seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
39279840802 | from astropy.io import fits
from astropy.convolution import convolve, Box1DKernel
import scipy as sp
import matplotlib
import matplotlib.pyplot as plt
import glob
'''
O 436
B 582
A 745
F 766
G 596
K 759
M 306
'''
'''
O 476, 8773, 9818
B 96, 378, 462, 489, 492
A 17, 114, 120, 136
F 52, 158
G 25, 27, 30, 85
K 61, 65
M 256, 291, 300
'''
i = [476, 378, 17, 158, 30, 61, 256]
c = ['O', 'B', 'A', 'F', 'G', 'K', 'M'][::-1]
loc = 5891
files = [glob.glob('/data2/cpb405/Training_2/*.fits')[j] for j in i][::-1]
fig, ax = plt.subplots(figsize = (5,0.9*5*sp.sqrt(2)))
ax.axvline(6565, c = 'r', alpha = 0.1)
ax.text(6600, 7, 'Ha', color = 'r')
ax.axvline(4862, c = 'r', alpha = 0.1)
ax.text(4900, 7, 'Hb', color = 'r')
ax.axvline(4342, c = 'r', alpha = 0.1)
ax.text(4400, 7, 'Hg', color = 'r')
for idx in range(len(files)):
with fits.open(files[idx]) as hdulist:
flux = hdulist[0].data[0]
init = hdulist[0].header['COEFF0']
disp = hdulist[0].header['COEFF1']
CLS = hdulist[0].header['CLASS']
SCLS = hdulist[0].header['SUBCLASS'][0]
#print('{}, {}, {}'.format(idx, CLS, SCLS))
wavelength = 10**sp.arange(init, init+disp*(len(flux)-0.9), disp)
wavelength = wavelength[:-100]
flux = flux[:-100]
flux = sp.array(flux)
wi = sp.searchsorted(wavelength, loc)
#wi = -1
flux = flux/sp.amax(flux)
ax.plot(wavelength, flux + idx, label = c[idx], c = '#1f77b4')
ax.annotate(c[idx], xy = (wavelength[sp.argmax(flux)]-75, idx+1.03))
ax.set_title('Stellar Spectra')
ax.set_xlabel('Wavelength \ Angstroms')
ax.set_ylabel('Normalised Flux')
plt.yticks([]," ")
#ax.set_yticklabels([])
#ax.get_yaxis().set_visible(False)
plt.tight_layout()
plt.savefig('MK.pdf')
plt.show() | grd349/LearningLAMOST | Matt/RegressorRF/Figures/plot_class.py | plot_class.py | py | 1,790 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "glob.glob",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "scipy.sqrt",
... |
34638872728 | import requests
from bs4 import BeautifulSoup
""" https://www.youtube.com/watch?v=PzWIdcFY9YQ """
url = 'https://url.com/sitemap.xml'
sitemapsoup = BeautifulSoup(requests.get(url).content, 'lxml')
sitemapurls = sitemapsoup.find_all("loc")
xml_urls = [sitemapurl.text for sitemapurl in sitemapurls]
count = 0
cerror = 0
mydata = open("FILEPATH/data.txt", "w")
for websiteurls in xml_urls:
source = BeautifulSoup(requests.get(websiteurls).text , 'html.parser')
try:
count += 1
mydata.write("yes!")
mydata.write("\n")
mydata.write(source.find('link', {'rel': 'canonical'}) ['href'])
mydata.write("\n")
print(count)
except:
mydata.write("no!")
mydata.write(websiteurls)
cerror += 1
print(cerror)
mydata.close()
| martamc-sp/PythonforSEO | lessons/4-urls-canonical.py | 4-urls-canonical.py | py | 824 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "bs4.BeautifulSoup",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "requests.get",
"li... |
15560736212 | #!/usr/bin/env python3
# This is a simple script that takes in an scurve file produced by
# csvcolumn_to_scurve and produces a png graph of the scurve.
import argparse
import csv
import matplotlib.pyplot as plt
import numpy as np
FIELDS = ['N/total', 'New/Old']
def get_data(input_file):
global FIELDS
for row in csv.DictReader(input_file):
yield (float(row[FIELDS[0]]), float(row[FIELDS[1]]))
def main():
p = argparse.ArgumentParser()
p.add_argument('input_csv_file', type=argparse.FileType('r'))
p.add_argument('output_file', type=str)
p.add_argument('-y-axis-num-tick-marks', type=int,
help='The number of y tick marks to use above/below zero.')
p.add_argument('-y-axis-min', type=float,
help='Override the min y axis that we use')
p.add_argument('-y-axis-max', type=float,
help='Override the min y axis that we use')
p.add_argument('-title', type=str,
help='Title of the graph')
p.add_argument('-x-axis-title', type=str,
help='The title to use on the x-axis of the graph')
p.add_argument('-y-axis-title', type=str,
help='The title to use on the x-axis of the graph')
args = p.parse_args()
data = np.array(list(get_data(args.input_csv_file)))
assert np.all(data >= 0)
x = data[:, 0]
y = data[:, 1]
x_axis_title = args.x_axis_title or FIELDS[0]
y_axis_title = args.y_axis_title or FIELDS[1]
title = args.title or "{} vs {}".format(x_axis_title, y_axis_title)
fig, ax = plt.subplots()
fig.set_size_inches(18.5, 18.5)
fig.suptitle(title, fontsize=20)
ax.set_xlabel(x_axis_title, fontsize=20)
ax.set_ylabel(y_axis_title, fontsize=20)
ax.plot(x, y)
ax.scatter(x, y)
# To get good bounds, we:
#
# 1. Re-center our data at 0 by subtracting 1. This will give us the %
# difference in between new and old (i.e. (new - old)/old)
#
# 2. Then we take the maximum absolute delta from zero and round to a
# multiple of 5 away from zero. Lets call this value limit.
#
# 3. We set [min_y, max_y] = [1.0 - limit, 1.0 + limit]
recentered_data = y - 1.0
max_magnitude = int(np.max(np.abs(recentered_data)) * 100.0)
y_limit = float(((max_magnitude // 5) + 1) * 5) * 0.01
ax.set_xlim(0.0, 1.0)
y_min = args.y_axis_min or 1.0 - y_limit
y_max = args.y_axis_max or 1.0 + y_limit
assert y_min <= y_max
ax.set_ylim(y_min, y_max)
ax.grid(True)
ax.xaxis.set_ticks(np.arange(0.0, 1.0, 0.05))
if args.y_axis_num_tick_marks:
y_delta = y_max - y_min
y_tickmark_frequency = y_delta / float(args.y_axis_num_tick_marks)
ax.yaxis.set_ticks(np.arange(y_min, y_max, y_tickmark_frequency))
plt.savefig(args.output_file)
if __name__ == "__main__":
main()
| apple/swift | utils/dev-scripts/scurve_printer.py | scurve_printer.py | py | 2,875 | python | en | code | 64,554 | github-code | 36 | [
{
"api_name": "csv.DictReader",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "argparse.FileType",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.array"... |
16989917132 | from logging import Logger
from typing import List
from pypika import Table # type: ignore
from pypika import PostgreSQLQuery as Q
from app.models.mart import engine_mart
from app.models.askue import AccountPoint
from app.models.mart import RegPointModel, RsPointModel, BalanceModel, BalanceRegModel
from sqlalchemy.engine import Transaction
from pypika.functions import Max # type: ignore
class DalMart:
"""
Класс DAL работы с БД Data Mart объектов
"""
def __init__(self, logger: Logger):
self._logger = logger
def get_max_rv_point_list(self, point_table: str) -> int:
rv = 0
try:
p = Table(point_table)
q = (Q.from_(p)
.select(Max(p.rv)))
sql = q.get_sql()
self._logger.debug(f'SQL: {sql}')
rv = engine_mart.scalar(sql)
if rv is None:
rv = 0
except Exception as e:
self._logger.error(e)
return rv
def insert_points(self, points: List[AccountPoint], dest_table: str) -> None:
con = engine_mart.connect()
self._logger.debug(f'DalMart.insert_point() dest_table:{dest_table}')
if dest_table == 'askue_reg_point':
data_result: List[RegPointModel] = []
tran: Transaction = con.begin()
try:
for p in points:
reg_string = p.DisplayName.split('\\')
if len(reg_string) < 4:
self._logger.warning(f"Имя объекта ({p.DisplayName}) не соответствует формату")
continue
reg_object = RegPointModel(id_point=p.Id, display_name=p.DisplayName, res=reg_string[0],
fes=reg_string[1], ps=reg_string[2], vl=reg_string[3], rv=p.Rv)
data_result.append(reg_object)
except Exception as e:
self._logger.error(f'convert to model failed {e}')
try:
for elem in data_result:
d = Table(dest_table)
q = Q.into(d).insert(elem.Id, elem.DisplayName, elem.Res, elem.Fes, elem.Ps, elem.Vl, elem.Rv) \
.on_conflict(d.id) \
.do_update(d.object_name, elem.DisplayName) \
.do_update(d.fes, elem.Fes) \
.do_update(d.res, elem.Res) \
.do_update(d.ps, elem.Ps) \
.do_update(d.vl, elem.Vl) \
.do_update(d.rv, elem.Rv)
sql = q.get_sql()
self._logger.debug(f'SQL: {sql}')
con.execute(sql)
tran.commit()
except Exception as e:
self._logger.error(f'DalMart.insert_point() {e}')
tran.rollback()
else:
data_result: List[RsPointModel] = []
tran: Transaction = con.begin()
try:
for p in points:
rs_string = p.DisplayName.split('\\')
if len(rs_string) < 6:
self._logger.warning(f"Имя объекта ({p.DisplayName}) не соответствует формату")
continue
rs_object = RsPointModel(id_point=p.Id, display_name=p.DisplayName, res=rs_string[0],
fes=rs_string[1], ps=rs_string[2], vl=rs_string[3], tp=rs_string[4],
sch=rs_string[5], ktt=p.Ktt, str_ra=p.Str_ra, rxx=p.Rxx, region=p.Locality,
number_point=p.Number_point, driver=p.Driver, rv=p.Rv, country=p.Country)
data_result.append(rs_object)
except Exception as e:
self._logger.error(f'convert to model failed {e}')
try:
for elem in data_result:
d = Table(dest_table)
q = Q.into(d).insert(elem.Id, elem.DisplayName, elem.Res, elem.Fes, elem.Ps, elem.Vl,
elem.Tp, elem.Sch, elem.Rv, elem.Str_ra, elem.Rxx, elem.Ktt, elem.Region,
elem.Number_point, elem.Driver, elem.Country) \
.on_conflict(d.id) \
.do_update(d.object_name, elem.DisplayName) \
.do_update(d.fes, elem.Fes) \
.do_update(d.res, elem.Res) \
.do_update(d.ps, elem.Ps) \
.do_update(d.vl, elem.Vl) \
.do_update(d.tp, elem.Tp) \
.do_update(d.sch, elem.Sch) \
.do_update(d.rv, elem.Rv) \
.do_update(d.str_ra, elem.Str_ra) \
.do_update(d.rxx, elem.Rxx) \
.do_update(d.ktt, elem.Ktt) \
.do_update(d.locality, elem.Region) \
.do_update(d.number_point, elem.Number_point) \
.do_update(d.driver, elem.Driver) \
.do_update(d.country, elem.Country)
sql = q.get_sql()
self._logger.debug(f'SQL: {sql}')
con.execute(sql)
tran.commit()
except Exception as e:
self._logger.error(f'DalMart.insert_point() {e}')
tran.rollback()
def read_rs_points(self) -> List[RsPointModel]:
"""
Выполняет чтение всех точек учета распределительных сетей
:return: массив точек учета
"""
p = Table('askue_rs_point', alias='p')
q = (Q.from_(p)
.select(p.id, p.object_name, p.fes, p.res, p.ps, p.vl, p.tp, p.sch, p.rv, p.str_ra, p.rxx, p.ktt,
p.locality, p.number_point, p.driver, p.country))
sql_query = q.get_sql()
return_values: List[RsPointModel] = []
try:
self._logger.debug(f'SQL: {sql_query}')
result = engine_mart.execute(sql_query)
for row in result:
data = RsPointModel(id_point=row['id'], display_name=row['object_name'], fes=row['fes'], res=row['res'],
ps=row['ps'], vl=row['vl'], tp=row['tp'], sch=row['sch'], rv=row['rv'],
str_ra=row['str_ra'], rxx=row['rxx'], ktt=row['ktt'], region=row['locality'],
number_point=row['number_point'], driver=row['driver'], country=row['country'])
return_values.append(data)
except Exception as e:
self._logger.error(e)
return return_values
def read_reg_points(self) -> List[RegPointModel]:
"""
Выполняет чтение всех точек учета распределительных сетей
:return: массив точек учета
"""
p = Table('askue_reg_point', alias='p')
q = (Q.from_(p)
.select(p.id, p.object_name, p.fes, p.res, p.ps, p.vl, p.rv))
sql_query = q.get_sql()
return_values: List[RegPointModel] = []
try:
self._logger.debug(f'SQL: {sql_query}')
result = engine_mart.execute(sql_query)
for row in result:
data = RegPointModel(row['id'], row['object_name'], row['fes'], row['res'], row['ps'], row['vl'],
row['rv'])
return_values.append(data)
except Exception as e:
self._logger.error(e)
return return_values
def insert_balance_calc(self, points: List[BalanceModel]):
"""
Выполняет добавление всех рассчетов в базу данных
"""
con = engine_mart.connect()
self._logger.debug("insert_balance_calc()... start")
tran: Transaction = con.begin()
try:
for elem in points:
d = Table('calc_balance')
q = Q.into(d).insert(elem.Id, elem.Id_tu, elem.Dtp, elem.Locality, elem.NameOfAccountingPoint,
elem.STrRa,
elem.Pxx,
elem.LossXX, elem.Ktt, elem.HeadOfCounter, elem.StartPeriod,
elem.QSlim, elem.Time_Start_Write, elem.Country, elem.Driver) \
.on_conflict(d.id) \
.do_update(d.id_tu, elem.Id_tu) \
.do_update(d.dtp, elem.Dtp) \
.do_update(d.locality, elem.Locality) \
.do_update(d.name_of_accounting_point, elem.NameOfAccountingPoint) \
.do_update(d.str_ra, elem.STrRa) \
.do_update(d.pxx, elem.Pxx) \
.do_update(d.loss_xx, elem.LossXX) \
.do_update(d.ktt, elem.Ktt) \
.do_update(d.head_of_counter, elem.HeadOfCounter) \
.do_update(d.start_period, elem.StartPeriod) \
.do_update(d.q_slim, elem.QSlim) \
.do_update(d.time_start_write, elem.Time_Start_Write) \
.do_update(d.country, elem.Country) \
.do_update(d.driver, elem.Driver)
sql = q.get_sql()
self._logger.debug(f'SQL: {sql}')
con.execute(sql)
tran.commit()
except Exception as e:
self._logger.error(f'DalMart.insert_balance_calc() {e}')
tran.rollback()
def insert_balance_reg_calc(self, points: List[BalanceRegModel]):
"""
Выполняет добавление всех рассчетов в базу данных
"""
con = engine_mart.connect()
self._logger.debug("insert_balance_calc()... start")
tran: Transaction = con.begin()
try:
for elem in points:
d = Table('calc_reg_balance')
q = Q.into(d).insert(elem.Id, elem.Id_tu, elem.StartPeriod, elem.Time_Start_Write) \
.on_conflict(d.id) \
.do_update(d.id_tu, elem.Id_tu) \
.do_update(d.start_period, elem.StartPeriod) \
.do_update(d.time_start_write, elem.Time_Start_Write)
sql = q.get_sql()
self._logger.debug(f'SQL: {sql}')
con.execute(sql)
tran.commit()
except Exception as e:
self._logger.error(f'DalMart.insert_balance_reg_calc() {e}')
tran.rollback() | giveyourtears/electroComputationServer | app/jobs/balance/data_mart_layer.py | data_mart_layer.py | py | 10,828 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "logging.Logger",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "pypika.Table",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "pypika.PostgreSQLQuery.from_",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "pypika.Postg... |
10331225638 | import json
import requests
class SSEStatsOnTime(object):
"""
http://www.sse.com.cn/services/hkexsc/home/
"""
def __init__(self):
self.url = 'http://yunhq.sse.com.cn:32041//v1/hkp/status/amount_status'
self.headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36',
}
def get_balance_info(self):
resp = requests.get(self.url)
if resp.status_code == 200:
datas = json.loads(resp.text)
item = dict()
# 交易所所属类型
item['Category'] = "SH"
# 当前的时间
m_today = str(datas['date'])
m_today = "-".join([m_today[:4], m_today[4:6], m_today[6:8]])
m_time = str(datas['status'][0][1])
# 区分小时时间是 2 位数和 1 位数的 即 9 点以及之前的数据 10 点以及之后的数据
if len(m_time) >= 9: # {'date': 20200417, 'status': [[100547000, 100547000], [417, 418], ['3 ', '111 '], 42000000000, 41207590461, '2']}
m_time = ":".join([m_time[:2], m_time[2:4], m_time[4:6]])
else: # {'date': 20200417, 'status': [[94338000, 94337000], [417, 418], ['3 ', '111 '], 42000000000, 41543482907, '2']}
m_time = ":".join([m_time[:1], m_time[1:3], m_time[3:5]])
_time = " ".join([m_today, m_time])
item['Time'] = _time
# 当日额度
item['DailyLimit'] = datas['status'][3]
# 当日资金余额
item['Balance'] = datas['status'][4]
# print(item)
return item
if __name__ == "__main__":
sse = SSEStatsOnTime()
sse.get_balance_info()
| wilsonkrum/DataFactory | hkland_flow/stock_hu_ontime.py | stock_hu_ontime.py | py | 1,796 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 19,
"usage_type": "call"
}
] |
43156623065 | #!/usr/bin/env python
import unittest
import mock
from quadcopter_brain import QuadcopterBrain
class TestQuadcopterBrain(unittest.TestCase):
@mock.patch('landing_site.LandingSite')
@mock.patch('quadcopter.Quadcopter')
def setUp(self, quadcopter_mock, landing_site_mock):
self.quadcopter_brain = QuadcopterBrain()
self.quadcopter_mock = self.quadcopter_brain.quadcopter
self.landing_site_mock = self.quadcopter_brain.landing_site
@mock.patch('rospy.sleep')
@mock.patch('waypoint_tools.WaypointTools.build_waypoint')
def test_go_to_waypoints(self, build_waypoint_mock, sleep_mock):
waypoint_data = [0, 1]
build_waypoint_mock.side_effect = [10, 11]
self.quadcopter_brain.go_to_waypoints(waypoint_data)
expected = [mock.call(0), mock.call(1)]
self.assertEqual(build_waypoint_mock.call_args_list, expected)
expected = [mock.call(10), mock.call(11)]
self.assertEqual(
self.quadcopter_mock.send_waypoint.call_args_list, expected)
@mock.patch('quadcopter_brain.QuadcopterBrain.go_to_waypoints')
def test_fly_path(self, go_to_waypoints_mock):
waypoint_data = [0, 1]
self.quadcopter_brain.fly_path(waypoint_data)
self.quadcopter_mock.launch.assert_called_once_with()
go_to_waypoints_mock.assert_called_once_with(waypoint_data)
self.quadcopter_mock.land.assert_called_once_with()
@mock.patch('quadcopter_brain.QuadcopterBrain.go_to_waypoints')
def test_go_to_waypoint_given_metered_offset(self, go_to_waypoint_mock):
delta_east = 10 # Meters
delta_north = -10 # Meters
self.quadcopter_brain.quadcopter.current_lat = 42.0
self.quadcopter_brain.quadcopter.current_long = -71.0
self.quadcopter_brain.quadcopter.current_rel_alt = 4.5
self.quadcopter_brain.go_to_waypoint_given_metered_offset(delta_east,
delta_north)
called_waypoint = go_to_waypoint_mock.call_args[0][0][0]
actual_waypoint = {"latitude": 41.999912, "longitude": -70.999877,
"altitude": 4.5} # Taken from google maps
self.assertAlmostEqual(called_waypoint["latitude"],
actual_waypoint["latitude"], 6)
self.assertAlmostEqual(called_waypoint["longitude"],
actual_waypoint["longitude"], 6)
self.assertAlmostEqual(called_waypoint["altitude"],
actual_waypoint["altitude"])
wait_time = go_to_waypoint_mock.call_args[0][1]
self.assertAlmostEqual(wait_time, 15)
delta_east = -10 # Meters
delta_north = 10 # Meters
delta_alt = 2 # Meters
sleep_time = 10 # Seconds
self.quadcopter_brain.go_to_waypoint_given_metered_offset(delta_east,
delta_north,
delta_alt,
sleep_time)
called_waypoint = go_to_waypoint_mock.call_args[0][0][0]
actual_waypoint = {"latitude": 42, "longitude": -71,
"altitude": 6.5} # Taken from google maps
self.assertNotEqual(called_waypoint["latitude"],
actual_waypoint["latitude"], 6)
self.assertNotEqual(called_waypoint["longitude"],
actual_waypoint["longitude"], 6)
self.assertAlmostEqual(called_waypoint["altitude"],
actual_waypoint["altitude"])
wait_time = go_to_waypoint_mock.call_args[0][1]
self.assertAlmostEqual(wait_time, 10)
# # Ask Kyle what's up
# @mock.patch('rospy.sleep')
# def test_find_landing_site(self, sleep_mock):
# # Test what happens when seen
# self.landing_site_mock.in_view = True
# self.landing_site_mock.lat_long.result = (-42, 71)
# res = self.quadcopter_brain.find_landing_site()
# self.assertEqual(res, (True, -42, 71))
# # Test what happens when not seen
# self.landing_site_mock.in_view = False
# self.landing_site_mock.lat_long.result = (-42, 71)
# res = self.quadcopter_brain.find_landing_site()
# self.assertEqual(res, (False, 0, 0))
# # Test what happens when seen after a few tries
# in_view_mock = mock.PropertyMock(side_effect=[False, False, True])
# type(self.landing_site).in_view = in_view_mock
# res = self.quadcopter_brain.find_landing_site()
# expected = [mock.call(0.1), mock.call(0.1)]
# self.assertEqual(res, (True, -42, 71))
# self.assertEqual(sleep_mock.call_args_list, expected)
@mock.patch('quadcopter_brain.QuadcopterBrain.go_to_waypoints')
@mock.patch('quadcopter_brain.QuadcopterBrain.find_landing_site')
def test_land_on_fiducial_simple(self, find_mock, go_to_mock):
# Fiducial found during landing
find_mock.return_value = True, 42, 71
self.quadcopter_brain.land_on_fiducial_simple()
wpt = {'latitude': 42,
'longitude': 71,
'altitude': 1.0}
go_to_mock.assert_called_once_with([wpt])
self.quadcopter_mock.land.assert_called_once_with()
# Fiducial not found during landing
go_to_mock.reset_mock()
self.quadcopter_mock.land.reset_mock()
find_mock.return_value = False, 0, 0
self.quadcopter_brain.land_on_fiducial_simple()
assert not go_to_mock.called
self.quadcopter_mock.land.assert_called_once_with()
@mock.patch('quadcopter_brain.QuadcopterBrain.find_landing_site')
@mock.patch('quadcopter_brain.QuadcopterBrain.go_to_waypoints')
def test_find_landing_site_at_waypoints(self, go_to_mock, find_site_mock):
waypoint_data = [0, 1]
find_site_mock.return_value = False, 0, 0
res = \
self.quadcopter_brain.find_landing_site_at_waypoints(waypoint_data)
go_to_expected = [mock.call([pt]) for pt in waypoint_data]
self.assertEqual(go_to_mock.call_args_list, go_to_expected)
find_site_expected = [mock.call(15) for point in waypoint_data]
self.assertEqual(find_site_mock.call_args_list, find_site_expected)
self.assertEqual(res, (False, 0, 0))
go_to_mock.reset_mock()
find_site_mock.reset_mock()
find_site_mock.return_value = True, 42.0, -71.0
res = \
self.quadcopter_brain.find_landing_site_at_waypoints(waypoint_data)
go_to_mock.assert_called_once_with([0])
find_site_mock.assert_called_once_with(15)
self.assertEqual(res, (True, 42.0, -71.0))
if __name__ == '__main__':
unittest.main()
| vpreston/mission_runner | quadcopter_brain/src/quadcopter_brain/test_quadcopter_brain.py | test_quadcopter_brain.py | py | 6,854 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "unittest.TestCase",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "quadcopter_brain.QuadcopterBrain",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "mock.patch",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "mo... |
11525663576 | import sqlite3
con = sqlite3.connect('example.db')
cursor = con.cursor()
persons = [("kiran", 21, "kiran@gmail.com"),
("anu", 29, "anu@yahoo.com"),
("sathis", 65, "satish@rediff.com")]
cursor.executemany("INSERT INTO person values (?, ?, ?)", persons)
print(cursor.rowcount)
con.commit()
con.close()
| avinash431/IntroductionToPython | databases/database-3.py | database-3.py | py | 325 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sqlite3.connect",
"line_number": 2,
"usage_type": "call"
}
] |
28987537714 | import threading as td
import RPi.GPIO as GPIO
import datetime as dt
import time
from helpers import TimeMeasure
import elemental_api_class as liveapi
class StreamAvailController:
def __init__(self, gpi_trigger, event_id, elemental_ip, lock_interval = 3, in_cue = False):
self.gpi_trigger = gpi_trigger
self.event_id = event_id
self.elemental_api = liveapi.Elemental_api(elemental_ip)
self.lock_interval = lock_interval
self.in_cue = in_cue
self.stream_locked = False
self.splice_counter = 0
self.interrupt_counter = 0
self.reaction_time = TimeMeasure()
def __str__(self):
return "GPI: {}, event_id: {}, in_cue: {}".format(self.gpi_trigger, self.event_id, self.in_cue)
# def event_detected(self):
# # Edge double checking to avoid false positives
# edge_before = GPIO.input(self.gpi_trigger)
# time.sleep(0.003)
# edge_after = GPIO.input(self.gpi_trigger)
# # If two edges are different -> measure third time
# if edge_before != edge_after:
# time.sleep(0.001)
# edge = GPIO.input(self.gpi_trigger)
# elif edge_before == edge_after:
# time.sleep(0.001) # Added for determinisim between the two cases
# edge = edge_before
# self.start_avail() if not edge else self.stop_avail()
def start_cue(self):
if self.stream_locked:
return 1
response = self.elemental_api.start_cue(self.event_id)
self.in_cue = True
self.lock_stream()
print("3. Starting cue")
return response
def stop_cue(self):
if self.stream_locked:
return 1
response = self.elemental_api.stop_cue(self.event_id)
self.in_cue = False
self.lock_stream()
print("3. Stopping cue")
return response
def start_stop_avail(self, gpi_triggered):
time.sleep(0.001)
edge = GPIO.input(gpi_triggered) # Read if rising or falling edge
self.reaction_time.start_measure()
self.interrupt_counter += 1
print('--------------------------------------------\n')
print("1.{} / {} Event detcted / Number: {}".format(dt.datetime.now(), edge, self.interrupt_counter))
print("2. Stream is in cue: {}".format(self.in_cue))
# Rising edge detected and Stream is NOT in Cue => Start cue
if edge and not self.in_cue:
response = self.start_cue()
if response is 1:
print('Stream is locked!')
return 0
self.reaction_time.end_measure()
self.splice_counter += 1
print('4. AD STARTED: Splice count:{} / Event Num: {}\n'.format(self.splice_counter, self.interrupt_counter))
print(response.text)
self.reaction_time.print_measure()
print('--------------------------------------------\n')
return 0
# Falling edge detected and Stream is in Cue => Stop cue
elif not edge and self.in_cue:
response = self.stop_cue()
self.reaction_time.end_measure()
if response is 1:
print('Stream is locked!')
return 0
print('4. AD STOPPED: Splice count:{} / Event Num: {}\n'.format(self.splice_counter, self.interrupt_counter))
print(response.text)
self.reaction_time.print_measure()
print('--------------------------------------------\n')
return 0
return 0
def lock_stream(self):
self.stream_locked = True
unlock_timer = td.Timer(self.lock_interval, self.unlock_stream)
unlock_timer.start()
def unlock_stream (self):
self.stream_locked = False
# If stream was locked on entering in an avail (GPIO -> 1)
if self.in_cue:
# If GPIO input is still 1 -> do nothing // If GPIO went to 0 -> stop cue
return 0 if GPIO.input(int(self.gpi_trigger)) else self.stop_cue()
# Or stream was locked on exiing from an avail (GPIO -> 0)
elif not self.in_cue:
# If GPIO input is still 0 -> do nothing // if GPIO went to 1 -> start cue
return 0 if not GPIO.input(int(self.gpi_trigger)) else self.start_cue()
| Hristiyan-Andreev/gpi_0.7_hw_reworked | s_av_ctrl.py | s_av_ctrl.py | py | 4,401 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "elemental_api_class.Elemental_api",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "helpers.TimeMeasure",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "RPi.... |
73223886825 | #!/usr/bin/env python3
import argparse
import cv2
import pic
import sys
import time
from PIL import *
def clearscreen(n):
print('\033[1A\033[K'*n, end='')
def main(filename, resize, colors=None, webcam=False, invert=False,
scale=(1, 1), nosleep=False):
vc = cv2.VideoCapture(filename)
tpf = 1.0/vc.get(cv2.CAP_PROP_FPS)
ei = pic.EmojiImage(colors=colors, invert=invert, scale=scale)
rval = False
height = 0
# Get the first frame to read the properties.
if vc.isOpened():
rval, frame = vc.read()
ei.fromarray(frame)
res, height = ei.make(resize)
print(res, end='')
while rval:
start = time.time()
clearscreen(height*scale[1])
rval, frame = vc.read()
if rval:
ei.fromarray(frame)
res, height = ei.make(resize)
print(res, end='')
# determine if we need to sleep. Not really that accurate, but i'm
# lazy and this is good enough.
diff = time.time()-start
if webcam is False and nosleep is False and diff < tpf:
time.sleep(tpf-diff)
vc.release()
| bahorn/emojipic | emojipic/ani.py | ani.py | py | 1,143 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "cv2.VideoCapture",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "cv2.CAP_PROP_FPS",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "pic.EmojiImage",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "time.time",
... |
37635424970 | # Given an array nums which consists of non-negative integers and an integer m, you can split the array into m non-empty continuous subarrays.
# Write an algorithm to minimize the largest sum among these m subarrays.
# Example 1:
# Input: nums = [7,2,5,10,8], m = 2
# Output: 18
# Explanation:
# There are four ways to split nums into two subarrays.
# The best way is to split it into [7,2,5] and [10,8],
# where the largest sum among the two subarrays is only 18.
# Example 2:
# Input: nums = [1,2,3,4,5], m = 2
# Output: 9
# Example 3:
# Input: nums = [1,4,4], m = 3
# Output: 4
# Constraints:
# 1 <= nums.length <= 1000
# 0 <= nums[i] <= 106
# 1 <= m <= min(50, nums.length)
from functools import cache
class Solution(object):
def splitArray(self, nums, m):
"""
:type nums: List[int]
:type m: int
:rtype: int
"""
prefix_sum = [0]
for n in nums:
prefix_sum.append(prefix_sum[-1]+n)
@cache
def min_max_subarray_sum(ind, splits):
if splits == 1:
return prefix_sum[-1]-prefix_sum[ind]
if splits == len(nums)-ind:
return max(nums[ind:])
min_max = float("inf")
acc_sum = 0
for end in range(ind, len(nums)-splits+1):
acc_sum += nums[end]
if acc_sum > min_max:
break
next_min_max = min_max_subarray_sum(end+1, splits-1)
cur_min_max = max(acc_sum, next_min_max)
min_max = min(min_max, cur_min_max)
return min_max
return min_max_subarray_sum(0, m)
| sunnyyeti/Leetcode-solutions | 410 Split Array Largest Sum.py | 410 Split Array Largest Sum.py | py | 1,656 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "functools.cache",
"line_number": 44,
"usage_type": "name"
}
] |
40961271099 | import argparse
import os
import shutil
from subprocess import run
from probar_entrega1 import probar
import pandas as pd
BASE_PATH = os.path.realpath(os.path.join(os.path.dirname(__file__)))
def bajar_repositorio(info_grupo):
print("Cloning", info_grupo['grupo'])
grupo_path = os.path.join(BASE_PATH, info_grupo.grupo)
if os.path.exists(grupo_path):
shutil.rmtree(grupo_path)
if info_grupo.cvs == 'git':
cmd = '{cvs} clone {cvs}@{servicio}:{url} {grupo}'.format(**info_grupo.to_dict())
elif info_grupo.cvs == 'hg':
cmd = '{cvs} clone ssh://{cvs}@{servicio}/{url} {grupo}'.format(**info_grupo.to_dict())
print("About to execute:", cmd)
run(cmd, shell=True)
def correr_pruebas(info_grupo):
probar(grupo=info_grupo.grupo)
def main(grupo=None, mantener_repositorio=False):
grupos = pd.read_csv('repos.config', sep='|')
if grupo is not None:
grupos = grupos[grupos.grupo == grupo]
for _, info_grupo in grupos.iterrows():
print("#"*160)
print("#"*160)
print("Grupo ", info_grupo.grupo)
if mantener_repositorio:
print("Se saltea la actualización del repositorio")
else:
bajar_repositorio(info_grupo)
correr_pruebas(info_grupo)
print("#"*160)
print("#"*160)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--grupo', help='Grupo en particular')
parser.add_argument('--mantener_repositorio', action='store_true', help='Evita volver a clonar el repo')
args = parser.parse_args()
main(args.grupo, args.mantener_repositorio)
| ucse-ia/ucse_ia | 2018/corrector.py | corrector.py | py | 1,647 | python | es | code | 5 | github-code | 36 | [
{
"api_name": "os.path.realpath",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"lin... |
26552867136 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/4/12 23:29
# @Author : DZQ
# @File : main.py
import sys
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import QThread, pyqtSignal
from PyQt5.QtWidgets import *
import xlrd
from threading import Thread
import json
from BaiduIndexSpider import BaiduIndexSpider
from workbook_handle import WorkBook
import math
import time
import random
class MainThread(QThread):
_my_signal = pyqtSignal(str)
_keyword_signal = pyqtSignal(list)
def __init__(self, keywords: list, filePath):
super(MainThread, self).__init__()
self.spider = BaiduIndexSpider(keywords)
self.filePath = filePath
self.keywords = keywords
self.workbookNum = 1
def split_keywords(self, keywords: list) -> [list]:
return [keywords[i * 10: (i + 1) * 10] for i in range(math.ceil(len(keywords) / 10))]
def spider_craw(self):
self._my_signal.emit("正在进行第{}个爬虫".format(self.workbookNum))
for each in self.spider.get_all_country():
try:
self.workbook.write_cell(each)
except:
pass
self._my_signal.emit("已爬取完全国信息")
self.workbook.init_province_cols()
self._my_signal.emit("开始爬取各省市信息")
year = 2011
for each in self.spider.get_index():
try:
self.workbook.write_cell(each)
except:
pass
try:
date = int(each['date'].split("-")[0])
if date > year:
self._my_signal.emit("爬取到{}年了".format(date))
year = date
except:
pass
self._my_signal.emit("爬虫结束,正在保存excel")
filePath = self.filePath + "/output{}.xls".format(self.workbookNum)
self.workbookNum += 1
self.workbook.workbook.save(filePath)
self._my_signal.emit("保存Excel完成")
def run(self) -> None:
if not self.spider.is_login():
self._my_signal.emit("Cookie过期")
return
real_keywords = list()
self._my_signal.emit("正在判断关键词是否被收录")
for each in self.keywords:
if self.spider.is_keyword(each):
real_keywords.append(each)
if len(real_keywords) == 0:
self._my_signal.emit("没有可以爬取的关键词")
return
self._keyword_signal.emit(real_keywords)
self.keywords_list = self.split_keywords(real_keywords)
self._my_signal.emit("关键词被分解成了{}个组\n".format(len(self.keywords_list)))
self._my_signal.emit("开始爬虫")
for each_keyword_list in self.keywords_list:
self.workbook = WorkBook()
self.spider.set_keywords(each_keyword_list)
self.spider_craw()
time.sleep(random.uniform(30, 35))
class Ui_MainWindow(QtWidgets.QMainWindow):
def __init__(self):
super(Ui_MainWindow, self).__init__()
self.setupUi(self)
self.retranslateUi(self)
def open_file(self):
filePath = QFileDialog.getOpenFileName(self, '选择文件', '', 'Excel files(*.xlsx , *.xls)')
self.inputFilePath = filePath[0]
self.filePathText.setPlainText(self.inputFilePath)
filePathList = filePath[0].split("/")[:-1]
outputFilePath = "/".join(filePathList)
self.outputFilePath = outputFilePath
self.get_keywords()
def print_keyword(self, keywords):
for each in keywords:
self.msgBox.append(each)
def handle_signal(self, info):
self.msgBox.append(info)
def start_spider(self):
if len(self.keywords) == 0:
self.msgBox.append("没有可以爬取的关键词")
return
self.thread = MainThread(self.keywords, self.outputFilePath)
self.thread._my_signal.connect(self.handle_signal)
self.thread._keyword_signal.connect(self.handle_list_signal)
self.thread.start()
def save_cookie(self):
cookie = self.cookieText.toPlainText()
if len(cookie) < 10:
self.msgBox.append("Cookie信息太短")
return
config = json.loads(open("./config.json", "r", encoding="utf8").read())
config['cookie'] = cookie
json.dump(config, open("./config.json", "w", encoding="utf8"), ensure_ascii=False)
self.msgBox.append("Cookie保存成功")
def handle_list_signal(self, info):
self.msgBox.append("获取到以下可以爬取的关键词:")
thread = Thread(target=self.print_keyword, args=(info,))
thread.start()
thread.join()
self.msgBox.append("共获得{}个被收录的关键词".format(len(info)))
def get_keywords(self):
excelFile = xlrd.open_workbook(self.inputFilePath)
sheet = excelFile.sheet_by_index(0)
row_num = sheet.nrows
keywords = list()
for i in range(row_num):
value = str(sheet.cell_value(i, 0)).strip()
if len(value) > 0:
keywords.append(value)
self.keywords = keywords
thread = Thread(target=self.print_keyword, args=(keywords,))
thread.start()
thread.join()
self.msgBox.append("共获取到{}个关键词".format(len(keywords)))
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1050, 744)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.filePathLabel = QtWidgets.QLabel(self.centralwidget)
self.filePathLabel.setGeometry(QtCore.QRect(30, 50, 101, 51))
font = QtGui.QFont()
font.setFamily("楷体")
font.setPointSize(12)
self.filePathLabel.setFont(font)
self.filePathLabel.setObjectName("filePathLabel")
self.filePathText = QtWidgets.QPlainTextEdit(self.centralwidget)
self.filePathText.setGeometry(QtCore.QRect(180, 50, 631, 51))
font = QtGui.QFont()
font.setFamily("Times New Roman")
font.setPointSize(12)
self.filePathText.setFont(font)
self.filePathText.setObjectName("filePathText")
self.filePathBtn = QtWidgets.QPushButton(self.centralwidget)
self.filePathBtn.setGeometry(QtCore.QRect(830, 60, 141, 41))
font = QtGui.QFont()
font.setFamily("楷体")
font.setPointSize(12)
self.filePathBtn.setFont(font)
self.filePathBtn.setObjectName("filePathBtn")
self.startSpiderBtn = QtWidgets.QPushButton(self.centralwidget)
self.startSpiderBtn.setGeometry(QtCore.QRect(390, 150, 201, 61))
font = QtGui.QFont()
font.setFamily("楷体")
font.setPointSize(12)
self.startSpiderBtn.setFont(font)
self.startSpiderBtn.setObjectName("startSpiderBtn")
self.cookieLabel = QtWidgets.QLabel(self.centralwidget)
self.cookieLabel.setGeometry(QtCore.QRect(40, 290, 81, 41))
font = QtGui.QFont()
font.setFamily("Times New Roman")
font.setPointSize(12)
self.cookieLabel.setFont(font)
self.cookieLabel.setObjectName("cookieLabel")
self.cookieText = QtWidgets.QPlainTextEdit(self.centralwidget)
self.cookieText.setGeometry(QtCore.QRect(180, 270, 631, 81))
self.cookieText.setObjectName("cookieText")
self.cookieBtn = QtWidgets.QPushButton(self.centralwidget)
self.cookieBtn.setGeometry(QtCore.QRect(840, 290, 141, 41))
font = QtGui.QFont()
font.setFamily("Times New Roman")
font.setPointSize(12)
self.cookieBtn.setFont(font)
self.cookieBtn.setObjectName("cookieBtn")
self.msgBox = QtWidgets.QTextBrowser(self.centralwidget)
self.msgBox.setGeometry(QtCore.QRect(160, 380, 681, 301))
font = QtGui.QFont()
font.setFamily("楷体")
font.setPointSize(12)
self.msgBox.setFont(font)
self.msgBox.setObjectName("msgBox")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1352, 30))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
self.filePathBtn.clicked.connect(self.open_file)
self.cookieBtn.clicked.connect(self.save_cookie)
self.startSpiderBtn.clicked.connect(self.start_spider)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "百度指数爬虫"))
self.filePathLabel.setText(_translate("MainWindow", "文件目录"))
self.filePathBtn.setText(_translate("MainWindow", "选择文件"))
self.startSpiderBtn.setText(_translate("MainWindow", "启动爬虫"))
self.cookieLabel.setText(_translate("MainWindow", "Cookie"))
self.cookieBtn.setText(_translate("MainWindow", "更新Cookie"))
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| dzqann/BaiduIndex | main.py | main.py | py | 9,586 | python | en | code | 9 | github-code | 36 | [
{
"api_name": "PyQt5.QtCore.QThread",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.pyqtSignal",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtCore.pyqtSignal",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "... |
8525512026 | import tensorflow as tf
import os
import sys
import data_generation
import networks
import scipy.io as sio
import param
import util
import truncated_vgg
from keras.backend.tensorflow_backend import set_session
from keras.optimizers import Adam
import scipy.misc
def train(model_name, gpu_id):
with tf.Session() as sess:
params = param.get_general_params()
network_dir = params['model_save_dir'] + '/' + model_name
# Creates models directory if not exist.
if not os.path.isdir(network_dir):
os.mkdir(network_dir)
train_feed = data_generation.create_feed(params, params['data_dir'], 'train')
test_feed = data_generation.create_feed(params, params['data_dir'], 'test')
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
set_session(tf.Session(config=config))
vgg_model = truncated_vgg.vgg_norm()
networks.make_trainable(vgg_model, False)
response_weights = sio.loadmat('../data/vgg_activation_distribution_train.mat')
model = networks.network_posewarp(params)
model.compile(optimizer=Adam(lr=1e-4), loss=[networks.vgg_loss(vgg_model, response_weights, 12)])
n_iters = params['n_training_iter']
summary_writer = tf.summary.FileWriter("D:\Proyectos\JEJU2018\Code\posewarp-cvpr2018\code\logs", graph=sess.graph)
tr_x, tr_y = next(train_feed)
te_x, te_y = next(test_feed)
# Prepare output directories if they don't exist.
output_dir = '../output/' + model_name + '/'
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
scipy.misc.imsave('../output/tr_orig_image.png', tr_x[0][0, :, :, :])
scipy.misc.imsave('../output/tr_targ_image.png', tr_y[0, :, :, :])
scipy.misc.imsave('../output/te_orig_image.png', te_x[0][0, :, :, :])
scipy.misc.imsave('../output/te_targ_image.png', te_y[0, :, :, :])
for step in range(0, n_iters):
x, y = next(train_feed)
train_loss = model.train_on_batch(x, y)
util.printProgress(step, 0, train_loss)
# out = sess.run(conv, feed_dict={"input_1:0" : x[0]})
# plt.matshow(out[0, :, :, 0])
# plt.show()
gen = tf.get_default_graph().get_tensor_by_name("loss/add_2_loss/lambda_5/add:0")
inp = tf.get_default_graph().get_tensor_by_name("in_img0:0")
out = tf.get_default_graph().get_tensor_by_name("in_img1:0")
p_s = tf.get_default_graph().get_tensor_by_name("mask_src/truediv:0")
# p_t = tf.get_default_graph().get_tensor_by_name("in_pose1:0")
image_summary_1 = tf.summary.image('images', [inp[0, :, :, :], out[0, :, :, :], gen[0, :, :, :]],
max_outputs=100)
# image_summary_2 = tf.summary.image('pose', [tf.reduce_sum(p_s[0, :, :, :], 2, keepdims=True)], max_outputs=100)
image_summary_1 = sess.run(image_summary_1,
feed_dict={"in_img0:0": x[0], "in_pose0:0": x[1], "in_pose1:0": x[2],
"mask_prior:0": x[3], "trans_in:0": x[4], "in_img1:0": y})
# image_summary_2 = sess.run(image_summary_2, feed_dict={"in_img0:0" : x[0], "in_pose0:0" : x[1], "in_pose1:0" : x[2],
# "mask_prior:0" : x[3], "trans_in:0" : x[4], "in_img1:0" : y})
summary_writer.add_summary(image_summary_1)
# summary_writer.add_summary(image_summary_2)
train_image = sess.run(gen, feed_dict={"in_img0:0": tr_x[0], "in_pose0:0": tr_x[1], "in_pose1:0": tr_x[2],
"mask_prior:0": tr_x[3], "trans_in:0": tr_x[4], "in_img1:0": tr_y})
test_image = sess.run(gen, feed_dict={"in_img0:0": te_x[0], "in_pose0:0": te_x[1], "in_pose1:0": te_x[2],
"mask_prior:0": te_x[3], "trans_in:0": te_x[4], "in_img1:0": te_y})
if step > 0 and step % params['model_save_interval'] == 0:
model.save_weights(network_dir + '/' + str(step) + '.h5')
if __name__ == "__main__":
if len(sys.argv) != 3:
print("Need model name and gpu id as command line arguments.")
else:
train(sys.argv[1], sys.argv[2])
| TZebin/Deep-Learning-Camp-JEJU2018 | Code/posewarp-cvpr2018/code/posewarp_train.py | posewarp_train.py | py | 4,468 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tensorflow.Session",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "param.get_general_params",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path",
... |
26987164949 | import datetime
from django import forms
from django.core.exceptions import ValidationError
from .models import TimeLog, Subject, Tag
class DateForm(forms.Form):
def __init__(self, *args, **kwargs):
self.min_date = kwargs.pop('min_date')
self.max_date = kwargs.pop('max_date')
# if user has no record (min_date and max_date is None) then disable the date inputs else set the min and max attrs
if self.min_date and self.max_date:
self.base_fields['start'].widget.attrs['min'] = self.min_date.isoformat()
self.base_fields['start'].widget.attrs['max'] = self.max_date.isoformat()
self.base_fields['end'].widget.attrs['min'] = self.min_date.isoformat()
self.base_fields['end'].widget.attrs['max'] = self.max_date.isoformat()
else:
self.base_fields['start'].widget.attrs['disabled'] = True
self.base_fields['end'].widget.attrs['disabled'] = True
super().__init__(*args, **kwargs)
start = forms.DateField(label='From')
end = forms.DateField(label='To')
def clean(self):
# if min_date or max_date is none, it means that user has no record yet
if self.min_date is None:
raise ValidationError("You don't have any record!")
cleaned_data = super().clean()
start = cleaned_data.get('start')
end = cleaned_data.get('end')
if start and end:
if start > end:
raise ValidationError('Your selected start date is greater then selected end date')
if not (self.min_date <= start <= self.max_date and self.min_date <= end <= self.max_date):
raise ValidationError(f'Your records date are between {self.min_date} and {self.max_date}')
return cleaned_data
class TimeLogForm(forms.ModelForm):
hours = forms.IntegerField(min_value=0, max_value=24)
minutes = forms.IntegerField(min_value=0, max_value=59)
def __init__(self, *args, **kwargs):
self.registrant_user = kwargs.pop('registrant_user', None)
super().__init__(*args, **kwargs)
# add registrant user's subjects and tags to the corresponding field choices
self.fields['subject'].queryset = self.registrant_user.subject_set.all()
self.fields['tags'].queryset = self.registrant_user.tag_set.all()
# add html attribute to the widget of fields
self.fields['subject'].widget.attrs['class'] = 'form-select'
self.fields['tags'].widget.attrs['class'] = 'form-select'
self.fields['tags'].widget.attrs['size'] = '3'
self.fields['date'].widget.attrs['class'] = 'form-control'
self.fields['hours'].widget.attrs['class'] = 'form-control'
self.fields['minutes'].widget.attrs['class'] = 'form-control'
self.fields['description'].widget.attrs['class'] = 'form-control'
class Meta:
model = TimeLog
exclude = ['user', 'duration']
widgets = {
'date': forms.DateInput(attrs={'type': 'date', 'max': datetime.date.today}),
}
def clean(self):
clean_data = super().clean()
hours = clean_data.get('hours')
minutes = clean_data.get('minutes')
date = clean_data.get('date')
# calculate and check if the duration is valid
if hours is not None and minutes is not None and date:
# calculate duration minutes based on hours and minutes
duration = (hours * 60) + minutes
if duration == 0:
raise ValidationError("Both hour and minute fields can not be 0.")
if duration > 1440:
raise ValidationError("One day is 24 hours!")
# check the particular date's durations doesn't exceed 24 hours
previous_durations_total = 0
for timelog in self.registrant_user.timelogs.filter(date=date):
previous_durations_total += timelog.duration
if (previous_durations_total + duration) > 1440:
remaind_hours = (1440 - previous_durations_total) // 60
remaind_miuntes = (1440 - previous_durations_total) % 60
if remaind_miuntes or remaind_hours:
raise ValidationError(f'Your remaind duration for '
f'{date} is {remaind_hours} hours and {remaind_miuntes} minutes.')
else:
raise ValidationError(f'There is no time left for {date}')
clean_data['duration'] = duration
return clean_data
class SubjectForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
self.user_subjects = kwargs.pop('user_subjects')
super().__init__(*args, **kwargs)
# add bootstarp class to the fields
for v in self.visible_fields():
v.field.widget.attrs['class'] = 'form-control'
class Meta:
model = Subject
fields = ['name', 'description']
def clean(self):
clean_data = super().clean()
name = clean_data.get('name')
if name:
if name.lower() in [s.name for s in self.user_subjects]:
raise ValidationError(f'{name.lower()} already exists.')
clean_data['name'] = name.lower()
return clean_data
class TagForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
self.user_tags = kwargs.pop('user_tags')
super().__init__(*args, **kwargs)
# add bootstarp class to the name field
self.fields['name'].widget.attrs['class'] = 'form-control'
class Meta:
model = Tag
fields = ['name']
def clean(self):
clean_data = super().clean()
name = clean_data.get('name')
if name:
if name.lower() in [s.name for s in self.user_tags]:
raise ValidationError(f'{name.lower()} already exists.')
clean_data['name'] = name.lower()
return clean_data | mf210/WAYD | timing/forms.py | forms.py | py | 5,996 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "django.forms.Form",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django.forms.DateField",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "django.forms... |
18844787736 | import sys,time,unittest
from selenium.webdriver.common.by import By
from selenium import webdriver
sys.path.append(".//")
sys.path.append(sys.path[0].split("ATQ项目")[0] + 'ATQ项目\\02.方法模块')
import Function_temp as F
class ATQtest(unittest.TestCase):
#driver全局变量
option =webdriver.FirefoxOptions()
option.set_headless()
dr1 = webdriver.Firefox(firefox_options=option)#设置对象
dr = webdriver.Firefox()#设置对象
F.driver=dr
def setUp(self,driver = dr):
self.driver =driver#设置对象
self.base_url = "http://11.8.127.248:8080/atq/frame.jsp"#网址
self.username='sunhongbin'#登录用户名
def test_01_login(self):
#data
print("登录ATQ!")
username = self.username
#page_element
用户名输入框=(By.NAME,'loginId')
密码输入框=(By.NAME,'password')
登录按钮=(By.LINK_TEXT,'登录')
#Script
driver = self.driver
self.driver.get(self.base_url)
F.find_element(用户名输入框).clear()
F.find_element(用户名输入框).send_keys(username)
F.find_element(密码输入框).send_keys('123456')
F.find_element(登录按钮).click()
#等待
time.sleep(3)
def test_02_case_import(self):
#data
print("进入案例管理页面!")
driver = self.driver
#case_path=self.case_path
#page
案例管理菜单=(By.XPATH,"//span[contains(text(),'案例管理')]")
需求管理菜单=(By.XPATH,"//span[contains(text(),'需求管理')]")
xf=(By.XPATH,"/html/body/div[3]/div/div/div[2]/div[2]/div/iframe")
加载提示=(By.XPATH,"//div[contains(text(),'Loading')]")
导入案例按钮=(By.XPATH,"//span[text()='导入案例']")
#Script
print("导入案例!")
F.find_element(案例管理菜单).click()
time.sleep(2)
#切换iframe页面
#driver.switch_to.frame(F.find_element(xf).find_element())
F.switch_to.frame(xf)
print("切换成功?")
time.sleep(5)
if F.find_element(需求管理菜单):
F.find_element(需求管理菜单).highlight()
print(F.find_element(需求管理菜单))
print("失败")
else:
print("成功")
print("等待【案例管理_案例列表】页面加载...")
print(F.find_elements(加载提示))
for i in range(0,30):
refresh=F.find_elements(加载提示).find_elements()
print()
if len(refresh)<2:
print("【案例管理_案例列表】页面加载完成!")
F.find_element(导入案例按钮).click()
break
else:
print(i)
time.sleep(3)
time.sleep(2)
#上传按钮
upload=driver.find_element_by_id("upload")
upload.send_keys(case_path)
value=upload.get_attribute('value')
if value!="":
print("文件上传成功!")
driver.find_element_by_xpath("//span[text()='确定']").click()
time.sleep(2)
#判断页面的元素检查是否正在导入,默认先等30s
if len(driver.find_elements_by_xpath("//div[contains(text(),'导入中,请稍候')]"))>0:
print("导入中,请耐心等候...")
time.sleep(30)
#30s后通过判断导入结束的弹出窗口判断是否导入完毕,如果没有找到窗口则等待继续寻找窗口,直至寻找成功
#回到主frame页
driver.switch_to.default_content()
for i in range(0,100):
try:
text=driver.find_element_by_xpath("/html/body/div[10]/div[2]").text
print("案例导入完毕!")
break
except:
time.sleep(0.01)
time.sleep(5)
#通过判断Loading元素,目录树页面是否加载成功,如果未加载成功则等待2s,反复循环
self.driver.switch_to.frame(xf)
print("等待【案例管理_案例目录】页面加载...")
for i in range(0,100):
refresh= driver.find_elements_by_xpath("//div[contains(text(),'Loading')]")
if len(refresh)>0:
time.sleep(2)
else:
print("【案例管理_案例目录】页面加载完成!")
break
#创建excel对象并取excel中的数据去判断目录树对应的目录是否已经存在,以此判断案例是否导入成功
ex=openpyxl.load_workbook(case_path)
sh=ex[ex.sheetnames[0]]
print("案例目录检查...")
if self.isElementExist("by.xpath","//span[text()='"+sh['C2'].value+"']/../../../following-sibling::tr[1]//span[text()='"+sh['D2'].value+"']"):
print("案例目录检查完毕,案例目录存在,案例导入成功!")
else:print("案例目录检查完毕,未发现案例目录,案例导入失败!")
#回到主frame页
driver.switch_to.default_content()
time.sleep(5)
def tearDown(self):
pass
#self.driver.quit()#这里有多个test需要用到driver,所以tearDown中,不要关闭浏览器
if __name__ == "__main__":
unittest.main() | cainiaosun/study | 测试/UI自动化/测试工具__Selenium/selenium/Selenium/ATQ项目/01.脚本文件/登录.py | 登录.py | py | 5,365 | python | zh | code | 0 | github-code | 36 | [
{
"api_name": "sys.path.append",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "sys.path.append",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_numbe... |
3745893127 | """empty message
Revision ID: 9c5fa6db20f1
Revises: ar399258p714
Create Date: 2023-03-06 13:56:47.958406
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '9c5fa6db20f1'
down_revision = 'ar399258p714'
branch_labels = None
depends_on = None
def upgrade():
op.add_column('finding', sa.Column('column_start', sa.Integer(), nullable=False, server_default=sa.text("0")))
op.add_column('finding', sa.Column('column_end', sa.Integer(), nullable=False, server_default=sa.text("0")))
op.drop_constraint('uc_finding_per_branch', 'finding', type_='unique')
op.create_unique_constraint('uc_finding_per_branch', 'finding',
['commit_id', 'branch_id', 'rule_name', 'file_path', 'line_number',
'column_start', 'column_end'])
def downgrade():
op.drop_constraint('uc_finding_per_branch', 'finding', type_='unique')
op.create_unique_constraint('uc_finding_per_branch', 'finding',
['commit_id', 'branch_id', 'rule_name', 'file_path', 'line_number'])
op.drop_column('finding', 'column_start')
op.drop_column('finding', 'column_end')
| abnamro/repository-scanner | components/resc-backend/alembic/versions/9c5fa6db20f1_finding_column.py | 9c5fa6db20f1_finding_column.py | py | 1,202 | python | en | code | 137 | github-code | 36 | [
{
"api_name": "alembic.op.add_column",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer... |
33911980837 | #!/usr/bin/env python3
import queries
import connection_handler
from IPython import embed
import mysql.connector
import grammar_format
from dotenv import load_dotenv
from managers.word_manager import Word_Manager
import re
load_dotenv()
class Phrase_Manager:
def __init__(self, phrase="None", person="None", person_manager="None"):
self.person = person
self.person_manager = person_manager
lower_phrase = phrase.lower()
result = lower_phrase.find("?")
self.new_phrase = self.remove_bad_chars(lower_phrase)
self.parsed_phrase = self.new_phrase.split()
if lower_phrase == 'teach':
self.teach_phrase()
else:
if result == -1:
print("That is a statement")
else:
self.get_question_format()
def get_question_format(self):
question_format = ''
for word in self.parsed_phrase:
word_manager = Word_Manager(word)
question_format += grammar_format.assign_part_of_speech(
word_manager.word)
def check_for_assigning_attribute(self):
if self.possessive and self.check_for_attribute():
self.assign_attribute()
def check_for_attribute(self):
attribute_reference = self.parsed_phrase.index("is")
attribute_index = attribute_reference - 1
self.attribute = self.parsed_phrase[attribute_index]
if self.attribute == 'name':
try:
first_or_last = self.parsed_phrase[attribute_index - 1]
self.attribute = first_or_last + "_" + self.attribute
except Exception as e:
print("Exception has occured 48: " + str(e))
self.get_new_value()
if hasattr(self.person, self.attribute):
return True
else:
return False
def get_new_value(self):
self.new_value_index = self.parsed_phrase.index("is") + 1
self.new_value = self.parsed_phrase[self.new_value_index]
if self.attribute == 'first_name' or self.attribute == 'last_name':
self.new_value = self.new_value.capitalize()
def assign_attribute(self):
self.person_manager.update_person(
self.person.id, self.attribute, self.new_value)
print("Updated!")
def determine_if_possessive(self):
self.establish_new_connection()
word = self.parsed_phrase[0]
try:
self.cursor.execute(queries.check_possessive(word))
except Exception as e:
print("Exception has occured 40: " + str(e))
result = self.cursor.fetchall()
self.list_result = [list(i) for i in result]
if 'is' in self.parsed_phrase:
if self.check_exists_result(self.list_result):
self.possessive = True
else:
self.possessive = False
# def handle_question(self):
# phrases = self.get_questions()
def get_questions(self):
self.establish_new_connection()
try:
self.cursor.execute(queries.get_questions())
except Exception as e:
print("Exception has occured 40: " + str(e))
result = self.cursor.fetchall()
self.list_result = [list(i) for i in result]
print("Results: " + str(self.list_result))
def save_new_phrase(self, phrase):
self.establish_new_connection()
try:
self.cursor.execute(queries.save_new_phrase(
phrase, self.person.id))
phrase_id = self.cursor.lastrowid
except Exception as e:
print("Exception has occured 54: " + str(e))
self.cnx.commit()
try:
self.cursor.execute(queries.save_person_phrase_matrix(
phrase_id, self.person.id))
except Exception as e:
print("Exception has occured 61: " + str(e))
self.cnx.commit()
self.cursor.close()
self.cnx.close()
def remove_bad_chars(self, phrase):
bad_chars = [';', ':', '!', "*", "?"]
for i in bad_chars:
phrase = phrase.replace(i, '')
return phrase
def teach_phrase(self):
self.phrase = input(
f"What new phrase would you like to teach me?")
if self.check_if_known():
print(f"I already know the phrase {self.phrase}")
else:
self.learn_phrase()
def learn_phrase(self):
self.definition = input(
f"What does the phrase {self.phrase} mean? ")
print("Thanks! I'll remember that.")
self.save_new_phrase()
def learn_phrase(self, phrase):
print(f"I'm now learning the phrase: {phrase}")
def check_if_known(self):
if self.check_for_phrase():
self.phrase_known()
else:
self.phrase_not_known()
def check_for_phrase(self):
try:
self.cursor.execute(queries.check_for_phrase(self.phrase))
except Exception as e:
print("Exception has occured 102: " + str(e))
result = self.cursor.fetchall()
self.check_exists_result(result)
def check_exists_result(self, result):
result_list = [list(i) for i in result]
number_returned = result_list[0][0]
if int(number_returned) > 0:
return True
self.update_phrase()
else:
return False
def update_phrase(self):
try:
self.cursor.execute(queries.update_phrase(
phrase, self.person.person_id))
except Exception as e:
print("Exception has occured: 120 " + str(e))
self.cnx.commit()
self.cursor.close()
self.cnx.close()
def establish_new_connection(self):
connection = connection_handler.establish_connection()
self.cnx = connection[0]
self.cursor = connection[1]
@staticmethod
def is_confirmation(word_or_phrase):
Phrase_Manager.establish_new_connection()
try:
cursor.execute(queries.check_for_confirmation(word_or_phrase))
except Exception as e:
print("Exception has occured 144: " + str(e))
result = cursor.fetchall()
if Phrase_Manager.confirmation_exists(result):
return True
else:
return False
@staticmethod
def confirmation_exists(result):
result_list = [list(i) for i in result]
number_returned = result_list[0][0]
if int(number_returned) > 0:
return True
else:
return False
| aburk3/Brain | managers/phrase_manager.py | phrase_manager.py | py | 6,593 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "dotenv.load_dotenv",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "managers.word_manager.Word_Manager",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "grammar_format.assign_part_of_speech",
"line_number": 39,
"usage_type": "call"
},... |
4889121541 | from utils import parseDate, checkDateInTheFuture, checkDateFromNotTooBig, s3Query
from http_response import okResponse, badRequestResponse
from typing import Union
import os
import boto3
BucketName = os.environ.get('BUCKET_NAME')
FileName = os.environ.get('PROV_FILE_NAME')
s3 = boto3.client('s3')
maxMonths = 5
def lambda_handler(event, context):
try:
if not(event['queryStringParameters']) is None and 'prov' in event['queryStringParameters']:
prov = event['queryStringParameters']['prov']
dateFrom = parseDate(event['queryStringParameters'], 'date-from')
if not checkDateFromNotTooBig(maxMonths, dateFrom):
return badRequestResponse(f'date-from should be max {maxMonths} months in the past')
if checkDateInTheFuture(dateFrom):
return badRequestResponse(f'date-from should not be in the future')
message = queryData(prov, dateFrom)
return okResponse(message)
else:
return badRequestResponse('Province is missing')
except Exception as e:
print(e)
return {
"statusCode": 500,
}
def queryData(prov: str, dateFrom: str) -> str:
query = f"""
SELECT
denominazione_regione AS region,
denominazione_provincia AS province,
sigla_provincia AS province_initials,
totale_casi AS total,
data AS reporting_date
FROM s3object s
WHERE sigla_provincia ='{prov}' AND data > '{dateFrom}'
"""
return s3Query(s3, query, BucketName, FileName)
| menalb/covid-data-app | api/bucketquery/coviddata/app_prov.py | app_prov.py | py | 1,582 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.environ.get",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_num... |
3674524668 | #Importing pyplot submodule
import matplotlib.pyplot as plt
import numpy as np
x = np.array(['Apples','Bananas','Lichi','Pineapple'])
y = np.array([100,45,60,90])
#using bar() to represent data in bar graph
plt.subplot(1,2,2)
plt.bar(x,y, width = 0.5)
plt.title('Vertical')
plt.show()
#for showing the graph horizontally, barh() is used and instead of width, height is used
plt.subplot(1,2,1)
plt.barh(x,y, height = 0.5)
plt.title('Horizontal')
plt.suptitle('Prices of fruits per kg')
plt.show()
| manudeepsinha/daily_commit | 2020/12/Python/23_matplotlib_bar.py | 23_matplotlib_bar.py | py | 524 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.array",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
... |
16157216808 | import pandas as pd
from django.contrib.auth.decorators import login_required
from django.contrib.sites.shortcuts import get_current_site
from django.http import HttpResponse
from django.urls import reverse
from researcher_UI.models import Administration
@login_required
def download_links(request, study_obj, administrations=None):
"""Download only the associated administration links instead of the whole data spreadsheet"""
response = HttpResponse(content_type="text/csv") # Format response as a CSV
response["Content-Disposition"] = (
"attachment; filename=" + study_obj.name + "_links.csv" ""
) # Name CSV
if administrations is None:
administrations = Administration.objects.filter(study=study_obj)
admin_data = pd.DataFrame.from_records(administrations.values()).rename(
columns={
"id": "administration_id",
"study_id": "study_name",
"url_hash": "link",
}
) # Grab variables from administration objects
admin_data = admin_data[
["study_name", "subject_id", "repeat_num", "administration_id", "link"]
] # Organize columns
admin_data[
"study_name"
] = study_obj.name # Replace study ID number with actual study name
# Recreate administration links and add them to dataframe
test_url = "".join(
[
"http://",
get_current_site(request).domain,
reverse("administer_cdi_form", args=["a" * 64]),
]
).replace("a" * 64 + "/", "")
admin_data["link"] = test_url + admin_data["link"]
if study_obj.instrument.language in ["English"] and study_obj.instrument.form in [
"WS",
"WG",
]:
admin_data = admin_data.append(
{"study_name": "3rd Edition (Marchman et al., 2023)"}, ignore_index=True
)
admin_data.to_csv(
response, encoding="utf-8", index=False
) # Convert dataframe into a CSV
# Return CSV
return response
| langcog/web-cdi | webcdi/researcher_UI/utils/download/download_links.py | download_links.py | py | 1,991 | python | en | code | 7 | github-code | 36 | [
{
"api_name": "django.http.HttpResponse",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "researcher_UI.models.Administration.objects.filter",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "researcher_UI.models.Administration.objects",
"line_number": 19,
... |
3238104451 | import numpy as np
import pandas as pd
#from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import xlwt
import KMeans
import Visualization
plt.rcParams['font.sans-serif'] = ['SimHei'] # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号
points = []
center_points = []
K = 4
file="D:\\experiment\\第三次豆瓣\\测试3\\train\\douban_train_zuobiao.csv"
data=pd.read_csv(file)
train_data = np.array(data)#np.ndarray()每个姓名转换为一个list[]
#print(train_data)
all_list=train_data.tolist()#转换list
#print(all_list)
for item in all_list:
print(item[2])
print(item[3])
print("-----------------------")
point = [item[2], item[3]]
points.append(point)
#print(type(points))#每个点存入列表
points = np.array(points)#转化为数组形式
#print(points)
center_points=[[18.26227416, -42.2997346], [16.23449381, -36.77185165], [58.35130569, 34.61516792], [-4.43906712, -56.93233191]]
kmeans = KMeans.KMeans(points, center_points, K)#K-means聚类
center_points, kmeans_cluster = kmeans.find_cluster_by_kmeans()#找到K-means聚类的簇
for i in kmeans_cluster:
print(i)
data1 = np.array(center_points) # np.ndarray()每个姓名转换为一个list[]
data2 = np.array(kmeans_cluster) # np.ndarray()每个姓名转换为一个list[]
# print(train_data)
visual = Visualization.Visualization(center_points, kmeans_cluster)
visual.visual()
| JiaoZixun/Recommend_By_Canopy-K-means | recommend——豆瓣/对比实验——K-means聚类.py | 对比实验——K-means聚类.py | py | 1,454 | python | en | code | 18 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 11,
"usage_type": "attribute"
},
{
... |
30662323647 | from datetime import datetime, timedelta
from pytz import timezone
from dateutil.relativedelta import relativedelta
data_1 = datetime(2023, 10, 30, 17, 10, 59)
print(data_1)
data_str = "2023-10-30 17:18:59"
data_str_formatter = "%Y-%m-%d %H:%M:%S"
data_2 = datetime.strptime(data_str, data_str_formatter)
print(data_2)
data_3 = datetime.now()
print(data_3)
data_4 = datetime.now(timezone("Asia/Tokyo"))
print(data_4)
data_5 = datetime(2023, 10, 30, 14, 20, 36, tzinfo=timezone("Asia/Tokyo"))
print(data_5)
fmt_1 = "%d/%m/%Y %H:%M:%S"
data_6 = datetime.strptime("31/10/2023 14:39:30", fmt_1)
data_7 = datetime.strptime("10/10/2023 14:39:30", fmt_1)
delta_1 = data_6 - data_7
delta_2 = timedelta(days=20)
print(data_7 - delta_2)
print(delta_1)
fmt_2 = "%d/%m/%Y %H:%M:%S"
data_8 = datetime.strptime("10/10/2001 15:35:26", fmt_2)
relative_delta_1 = data_8 - relativedelta(days=365 * 20)
print(relative_delta_1)
data_9 = datetime.now()
fmt_3 = "%d/%m/%Y"
print(data_9.strftime(fmt_3))
| juannaee/WorkSpace-Python-Intermediario | SEÇÃO 4/datetime/main1.py | main1.py | py | 994 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.datetime",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "datetim... |
42669964568 | import glob
import cv2
import numpy as np
from tqdm import tqdm
class Calibration(object):
def __init__(self, targetfilepath):
# termination criteria
self.criteria = (cv2.TERM_CRITERIA_EPS +
cv2.TERM_CRITERIA_MAX_ITER, 30, 1e-5)
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
self.worldPoints = np.zeros((9 * 6, 3), np.float32) # 모두 [0] 체스판에서 찾을 점의 3D좌표 셋 바탕
self.worldPoints[:, :2] = np.mgrid[0:9, 0:6].T.reshape(-1, 2)
# 월드 좌표계의 베이스를 잡아줌, 왼쪽 맨 위를 원점으로 함(0,0,0)
# 체스판이므로 Z-좌표값은 0으로 함
# Arrays to store object points and image points from all the images.
self.objectPoints = [] # 3d point in real world space, 켈리브레이션 넣을때 셋 지정하려고
self.imagePoints = [] # 2d points in image plane.
self.cameraMatrix = None
self.distortion = None
self.img_shape = None
self.rvecs = None
self.tvecs = None
# 덤프를 제거한 원하는 본질만 담은 것 & 회전벡터는 3x3으로 처리해놓기도 함
self.targetRvecs = []
self.targetTvecs = []
self.readfile(targetfilepath)
def readfile(self, targetfilepath):
targetimagefile = glob.glob(targetfilepath + '\\*.jpg')
#targetimagefile.sort()
print("start loading files")
for i in tqdm(range(len(targetimagefile))):
# print(targetimagefile[i])
imgmat = cv2.imread(targetimagefile[i])
# 그레이 스케일로 변경
imggray = cv2.cvtColor(imgmat, cv2.COLOR_BGR2GRAY)
# Find the chess board corners
# 성공 여부, 코너 포인트 리스트
# 이미지, 모서리 수, 플래그
ret, corners = cv2.findChessboardCorners(imggray, (9, 6), None)
if ret is True:
# If found, add object points, image points (after refining them)
cv2.cornerSubPix(imggray, corners, (11, 11), (-1, -1), self.criteria) # 찾은 코너에 대한 보정
self.imagePoints.append(corners)
self.objectPoints.append(self.worldPoints)
# ret = cv2.drawChessboardCorners(imgmat, (9, 6), corners, ret)
# cv2.imshow("test", imgmat)
# cv2.waitKey(1)
self.img_shape = cv2.cvtColor(cv2.imread(targetimagefile[0]), cv2.COLOR_BGR2GRAY).shape[::-1]
self.calibrate(len(targetimagefile))
def calibrate(self, target_lenght):
print("enter 1d calibration")
ret, self.cameraMatrix, self.distortion, self.rvecs, self.tvecs = cv2.calibrateCamera(
objectPoints=self.objectPoints,
imagePoints=self.imagePoints,
imageSize=self.img_shape,
cameraMatrix=self.cameraMatrix,
distCoeffs=self.distortion,
rvecs=self.rvecs,
tvecs=self.tvecs)
print(ret)
# 한 카메라에 대한 켈리브레이션:
# 성공여부, camera matrix, distortion coefficients, rotation and translation vector"s"
# R|t는 한 뷰에 대한 월드 원점 - 2D 뷰의 영상중점 (cx,cy)간 좌표변환들
print("exit 1d calibration")
# for i in tqdm(range(target_lenght)):
# dst, _ = cv2.Rodrigues(self.rvecs[i])
# self.targetRvecs.append(dst)
# self.targetTvecs.append(self.tvecs[i])
# print("Rodrigues eqs is solved")
# for i, (r, t) in enumerate(zip(self.rvecs, self.tvecs)):
# dst, _ = cv2.Rodrigues(r) # 회전벡터의 경우 표현방식이 달라서 변환 필요. 로드리게스 변환 참조.
# print(i, "번째 회전이동 : \n", dst)
# print(i, "번째 평행이동 : \n", t)
| Team-AllyHyeseongKim/vision-utils-calibrator-depth-map-deblur-odometry- | custom_lib/cail/calibrator.py | calibrator.py | py | 3,920 | python | ko | code | 0 | github-code | 36 | [
{
"api_name": "cv2.TERM_CRITERIA_EPS",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "cv2.TERM_CRITERIA_MAX_ITER",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 15,
"usage_type": "call"
},
{
"api_name":... |
71578877863 | # !/usr/bin/env python
# -*- coding: utf-8 -*-
import vtk
def main():
cps = vtk.vtkConvexPointSet()
points = vtk.vtkPoints()
points.InsertNextPoint(0, 0, 0)
points.InsertNextPoint(1, 0, 0)
points.InsertNextPoint(1, 1, 0)
points.InsertNextPoint(0, 1, 0)
points.InsertNextPoint(0, 0, 1)
points.InsertNextPoint(1, 0, 1)
points.InsertNextPoint(1, 1, 1)
points.InsertNextPoint(0, 1, 1)
points.InsertNextPoint(0.5, 0, 0)
points.InsertNextPoint(1, 0.5, 0)
points.InsertNextPoint(0.5, 1, 0)
points.InsertNextPoint(0, 0.5, 0)
points.InsertNextPoint(0.5, 0.5, 0)
for i in range(0, 13):
cps.GetPointIds().InsertId(i, i)
ug = vtk.vtkUnstructuredGrid()
ug.Allocate(1, 1)
ug.InsertNextCell(cps.GetCellType(), cps.GetPointIds())
ug.SetPoints(points)
colors = vtk.vtkNamedColors()
mapper = vtk.vtkDataSetMapper()
mapper.SetInputData(ug)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(colors.GetColor3d("Tomato"))
actor.GetProperty().SetLineWidth(3)
actor.GetProperty().EdgeVisibilityOn()
# Glyph the points
sphere = vtk.vtkSphereSource()
sphere.SetPhiResolution(21)
sphere.SetThetaResolution(21)
sphere.SetRadius(.03)
# Create a polydata to store everything in
polyData = vtk.vtkPolyData()
polyData.SetPoints(points)
pointMapper = vtk.vtkGlyph3DMapper()
pointMapper.SetInputData(polyData)
pointMapper.SetSourceConnection(sphere.GetOutputPort())
pointActor = vtk.vtkActor()
pointActor.SetMapper(pointMapper)
pointActor.GetProperty().SetColor(colors.GetColor3d("Peacock"))
# Create a renderer, render window, and interactor
renderer = vtk.vtkRenderer()
renderWindow = vtk.vtkRenderWindow()
renderWindow.SetWindowName("Convex Point Set")
renderWindow.AddRenderer(renderer)
renderWindowInteractor = vtk.vtkRenderWindowInteractor()
renderWindowInteractor.SetRenderWindow(renderWindow)
# Add the actors to the scene
renderer.AddActor(actor)
renderer.AddActor(pointActor)
renderer.SetBackground(colors.GetColor3d("Silver"))
renderer.ResetCamera()
renderer.GetActiveCamera().Azimuth(210)
renderer.GetActiveCamera().Elevation(30)
renderer.ResetCameraClippingRange()
# Render and interact
renderWindow.SetSize(640, 480)
renderWindow.Render()
renderWindowInteractor.Start()
if __name__ == '__main__':
main()
| lorensen/VTKExamples | src/Python/GeometricObjects/ConvexPointSet.py | ConvexPointSet.py | py | 2,484 | python | en | code | 319 | github-code | 36 | [
{
"api_name": "vtk.vtkConvexPointSet",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "vtk.vtkPoints",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "vtk.vtkUnstructuredGrid",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "vtk.vtkName... |
25305879977 | __author__ = 'diegopinheiro'
from common.attribute import Attribute
import math
import numpy
class AttributeConverter:
@staticmethod
def get_representation(attribute=Attribute(), category=None):
number_representation = AttributeConverter.get_number_representation(attribute=attribute)
category_index = attribute.categories.index(category)
category_position = int(math.pow(2, category_index))
return [int(bit) for bit in list(numpy.binary_repr(category_position, width=number_representation))]
@staticmethod
def get_number_representation(attribute=Attribute()):
return len(attribute.categories)
@staticmethod
def get_attribute_category(attribute=Attribute,
representation=None):
representation = "".join([str(i) for i in representation])
category_position = int(representation, 2)
category_index = 0
if category_position == 1:
category_position = 0
if category_position != 0:
category_index = int(math.log(category_position, 2))
return attribute.categories[category_index] | diegompin/genetic_algorithm | common/attribute_converter.py | attribute_converter.py | py | 1,144 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "common.attribute.Attribute",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "math.pow",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.binary_repr",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "common.attribut... |
36289714812 | from kafka.admin import KafkaAdminClient, ConfigResource, ConfigResourceType
TOPIC_NAME = "kafka.client.tutorial"
BOOTSTRAP_SERVER_HOST = "kafka_tutorial:9092" # 카프카 클러스터 서버의 host와 port를 지정
admin_client = KafkaAdminClient(
bootstrap_servers=BOOTSTRAP_SERVER_HOST
)
print("== Get broker information")
# return type dict
describe_cluster = admin_client.describe_cluster()
for node in describe_cluster.get('brokers'):
print(f"node : {node}")
cr = ConfigResource(ConfigResourceType.BROKER, name=node.get('node_id'), configs=node)
describe_config = admin_client.describe_configs([cr])
for config_i in describe_config:
print(f"\tconfig:\t:{config_i}")
print("== End broker information")
print("== Get topic information")
describe_topic = admin_client.describe_topics([TOPIC_NAME])
for info_i in describe_topic:
for k, v in info_i.items():
print(f'{k}\t{v}')
print('==================================================================')
print("== End topic information")
admin_client.close() | 2h-kim/kafka-personal-study | simple-kafka-admin-client/kafka-admin-client.py | kafka-admin-client.py | py | 1,069 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "kafka.admin.KafkaAdminClient",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "kafka.admin.ConfigResource",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "kafka.admin.ConfigResourceType.BROKER",
"line_number": 16,
"usage_type": "attribut... |
3835970469 | import json
import pandas as pd
import numpy as np
import filenames
myjson = {
"arm": {
"malware": [],
"benign": []
},
"mips": {
"malware": [],
"bening": []
}
}
df_arm_malware_forpoison = pd.read_csv(filenames.forpoison_arm_malware, header=None, index_col=False)
myjson["arm"]["malware"] = \
df_arm_malware_forpoison[df_arm_malware_forpoison.columns[-2]].tolist()
df_arm_benign_forpoison = pd.read_csv(filenames.forpoison_arm_benign, header=None, index_col=False)
myjson["arm"]["benign"] = \
df_arm_benign_forpoison[df_arm_benign_forpoison.columns[-2]].tolist()
df_mips_malware_forpoison = pd.read_csv(filenames.forpoison_mips_malware, header=None, index_col=False)
myjson["mips"]["malware"] = \
df_mips_malware_forpoison[df_mips_malware_forpoison.columns[-2]].tolist()
df_mips_benign_forpoison = pd.read_csv(filenames.forpoison_mips_benign, header=None, index_col=False)
myjson["mips"]["benign"] = \
df_mips_benign_forpoison[df_mips_benign_forpoison.columns[-2]].tolist()
with open(filenames.poisonJSON, "w") as f:
json.dump(myjson, f)
| ZsZs88/Poisoning | filepicker.py | filepicker.py | py | 1,111 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "filenames.forpoison_arm_malware",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_csv",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "... |
34916305882 | import numpy as np
import matplotlib.pyplot as plt
with open("../buildxcode/cte.txt") as f:
data = f.read()
data = data.split('\n')
x = [float(i) for i in data]
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.set_title("CTE")
ax1.set_ylabel('CTE')
ax1.plot(x, 'b')
plt.show()
| suprnrdy/CarND-PID-Control-Project-master | src/plotCTE.py | plotCTE.py | py | 341 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "mat... |
25056282946 | from django import forms
from django.core.exceptions import ValidationError
from manager.models import Accountancy
from manager.wallet_operations import wallet_choice, wallet_data_parse, change_wallet_balance
class AccountancyForm(forms.ModelForm):
class Meta:
model = Accountancy
fields = ()
def clean(self):
amount = float(self.data["amount"])
if amount < 0:
raise ValidationError("Amount can't be negative.")
wallet_type, previous_amount = wallet_data_parse(self.data)
_, self.wallet_obj = wallet_choice(
wallet_type,
self.instance.card_id or self.instance.cash_id or self.instance.cryptocurrency_id
)
amount = amount - float(previous_amount) if previous_amount else 0
self.wallet_obj = change_wallet_balance(
self.instance.IO, self.wallet_obj, amount
)
return super().clean()
def save(self, commit=True):
accountancy = super(AccountancyForm, self).save(commit=False)
self.clean()
if commit:
accountancy.amount = float(self.data["amount"])
accountancy.save()
self.wallet_obj.save()
return super().save(commit)
class AccountancySearchForm(forms.Form):
IO_type = forms.CharField(
max_length=50,
required=False,
label="",
widget=forms.TextInput(attrs={
"placeholder": "Search by type ...",
"class": "small_plate _comforta_bold text_shadow"
})
)
| AndriyKy/zlatnic | manager/forms.py | forms.py | py | 1,544 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "django.forms.ModelForm",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "manager.models.Accountancy",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "dja... |
13962486159 | from django.db import IntegrityError
from django.utils.timezone import make_aware
from datetime import datetime
import logging
from .utils import get_extras
class DatabaseHandler(logging.Handler):
"""
A log handler to store logs into the database.
Currently, only log entries that belong to an event are stored in the database.
All other log entries are available in the log files / via syslog.
"""
def __init__(self, *args, **kwargs):
self._logentry_model = None
super(DatabaseHandler, self).__init__(*args, **kwargs)
def emit(self, record):
# the handler is initialized by django before the database setup, so the import would fail
# therefore, we do it here dynamically when necessary - but only once
if not self._logentry_model:
from .models import LogEntry
self._logentry_model = LogEntry
# get the event, helper and user if they are stored in the entry
event = record.event if hasattr(record, "event") else None
if not event:
return
helper = record.helper if hasattr(record, "helper") else None
user = record.user if hasattr(record, "user") else None
# create the entry
entry = self._logentry_model(
timestamp=make_aware(datetime.fromtimestamp(record.created)),
level=record.levelname,
message=record.getMessage(),
event=event,
helper=helper,
user=user,
extras=get_extras(record),
module=record.name,
)
try:
entry.save()
except ValueError:
# if the event is deleted, we cannot save. we only store logs for existing events,
# so we can discard this event (deletions are still logged via syslog / in files if container is used)
pass
except IntegrityError:
# if a helper is deleted, the helper object is still there while we prepare the entry.
# on save, the helper may already be deleted, so we have a foreign key error.
entry.helper = None
entry.save()
| helfertool/helfertool | src/toollog/handlers.py | handlers.py | py | 2,156 | python | en | code | 52 | github-code | 36 | [
{
"api_name": "logging.Handler",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "models.LogEntry",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "django.utils.timezone.make_aware",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": ... |
5216435364 | from pydantic import BaseModel, EmailStr
from datetime import datetime
from typing import Optional
class User(BaseModel):
user_id : int
username : str
forenames: str
surname : str
email : EmailStr
bio : Optional[str]
display_name : Optional[str]
created_at: datetime
class UserRegistrationRequest(BaseModel):
username : str
forenames: str
surname: str
email: EmailStr
password: str
class UserUpdateRequest(BaseModel):
forenames: str
surname : str
email : EmailStr
bio : str
display_name : str
class UserPartialUpdateRequest(BaseModel):
forenames: Optional[str]
surname : Optional[str]
email : Optional[EmailStr]
bio : Optional[str]
display_name : Optional[str]
class Config:
extra = "forbid" | willpennell/r-shows | user-management-service/app/schemas/user_schemas.py | user_schemas.py | py | 802 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pydantic.BaseModel",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "pydantic.EmailStr",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "typing.Optional",... |
15991913621 | #!/usr/bin/python
from Constants import Constants as cnt
from CoinDaemon import CoinDaemon
from bitcoinrpc.authproxy import AuthServiceProxy
class Wallet:
"""Provides a high-level abstraction of a coin wallet and simplifies
the process of making JSON API RPC calls to the coin wallet
daemon"""
walletPath = ""
walletName = ""
walletNumber = 0
walletNumberStr = '%0*d' % (6, walletNumber)
walletPort = 00000
walletBalance = 0.0
walletProxy = None
def __init__(self, a_str, b_int, c_int, d_str):
self.walletPath = a_str
self.walletNumber = b_int
self.walletPort = c_int
self.walletName = d_str
def get_balance(self):
return self.daemonAPI.getbalance()
def test_wallet(self):
"""Function that tests the wallet and returns an int depending
on the result"""
import os, shutil, time, socket
from bitcoinrpc.authproxy import JSONRPCException
copyPath = cnt.HOME_DIR + "." + self.walletName + "/wallet.dat"
shutil.copy(self.walletPath, copyPath)
theDaemon = CoinDaemon(self.walletName, self.walletPort, self.walletPath)
COMMAND = cnt.SCRIPT_DIR + "bin/" + self.walletName + " getbalance"
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
daemonTimer = 0
RPCTimer = 0
while(theDaemon.is_running()):
print("[LOG] Waited " + str(daemonTimer) + " seconds for " + self.walletName + " daemon to stop...")
daemonTimer = daemonTimer + 1
time.sleep(1)
else:
theDaemon.start_daemon()
while not(sock.connect_ex(('localhost', int(self.walletPort))) == 0):
print("[LOG] Waited " + str(RPCTimer) + " seconds for RPC API...")
RPCTimer = RPCTimer + 10
time.sleep(10)
else:
self.walletProxy = AuthServiceProxy("http://" + cnt.USER + ":" + cnt.PASSWORD + "@127.0.0.1:" + self.walletPort)
print("[LOG] RPC API Up!")
self.walletBalance = self.walletProxy.getbalance()
if(self.walletBalance == 0):
#~ print("[LOG] Wallet tested - " + self.walletPath + " - EMPTY")
theDaemon.stop_daemon()
return 0
else:
try:
print(self.walletProxy.keypoolrefill())
except JSONRPCException:
#~ print("[LOG] LOCKED Wallet tested - " + self.walletPath + " - balance: " + str(self.walletBalance))
theDaemon.stop_daemon()
return 2
else:
#~ print("[LOG] Wallet tested - " + self.walletPath + " - balance: " + str(self.walletBalance))
theDaemon.stop_daemon()
return 1
| chriscassidy561/coinScript | Wallet.py | Wallet.py | py | 2,828 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "Constants.Constants.HOME_DIR",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "Constants.Constants",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "shutil.copy",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "Coi... |
30952198138 | import requests
class Polyline:
def __init__(self) -> None:
self.users = dict()
self.polylines = []
self.matches = []
def add(self, ID: int, name: str, ph_no: int, source: list, destination: list) -> None:
polyline = Polyline.__get_polyline(source, destination)
self.polylines.append([ID, polyline])
self.users[ID] = [name, ph_no]
self.__match_polylines()
def check_status(self, ID) -> int:
try:
idx = next(i for i, j in enumerate(self.matches) if j[0] == ID)
match, share = self.matches[idx][1], self.matches[idx][2]
match = self.users[match] + [share]
del self.matches[idx]
except:
match = [-1, -1]
return match
def remove(self, ID: int):
try:
idx = next(i for i, j in enumerate(self.polylines) if j[0] == ID)
del self.polylines[idx]
except:
print('No such ID')
@staticmethod
def LCSubStr(string1: str, string2: str) -> str:
m = len(string1)
n = len(string2)
result = 0
end = 0
length = [[0 for j in range(m+1)] for i in range(2)]
currRow = 0
for i in range(0, m + 1):
for j in range(0, n + 1):
if (i == 0 or j == 0):
length[currRow][j] = 0
elif (string1[i - 1] == string2[j - 1]):
length[currRow][j] = length[1 - currRow][j - 1] + 1
if (length[currRow][j] > result):
result = length[currRow][j]
end = i - 1
else:
length[currRow][j] = 0
currRow = 1 - currRow
if (result == 0):
return "-1"
return string1[end - result + 1: end + 1]
def _is_matching(self, polyline1: str, polyline2: str) -> bool:
if len(polyline1) < len(polyline2):
polyline1, polyline2 = polyline2, polyline1
lcs = self.LCSubStr(polyline1, polyline2)
com_head_len = 2
n_lcs = len(lcs)
n_line1 = len(polyline1) - com_head_len
n_line2 = len(polyline2) - com_head_len
# Assuming common header is 1st 2 chars
if lcs == polyline1[:com_head_len] or lcs == "-1":
return False
# Fully contained
elif (lcs == polyline1[com_head_len: ] or lcs == polyline2[com_head_len: ]):
return True
# LCS more than 50% of both:
elif (n_lcs > n_line1/2 and n_lcs > n_line2/2):
return True
else:
return False
def __match_polylines(self):
if len(self.polylines) < 2:
return
id1, polyline1 = self.polylines[-1]
for idx, (id2, polyline2) in enumerate(self.polylines[:-1]):
if self._is_matching(polyline1, polyline2):
# share of polyline1
share = self._get_fare_share(polyline1, polyline2)
del self.polylines[-1]
del self.polylines[idx]
self.matches.append([id1, id2, share])
self.matches.append([id2, id1, 100-share])
def _get_fare_share(self, polyline1: str, polyline2: str):
is_poly_swapped = False
if len(polyline1) < len(polyline2):
is_poly_swapped = True
polyline1, polyline2 = polyline2, polyline1
# lcs = self.LCSubStr(polyline1, polyline2)
com_head_len = 2
# n_lcs = len(lcs)
n_line1 = len(polyline1) - com_head_len
n_line2 = len(polyline2) - com_head_len
fare_share = min(1, n_line1/(n_line1+n_line2)) * 100
if is_poly_swapped:
fare_share = 100 - fare_share
return int(fare_share)
@staticmethod
def __get_polyline(source: list, destination: list) -> str:
api = f'https://router.hereapi.com/v8/routes?transportMode=car&origin={source[0]},{source[0]}&destination={destination[0]},{destination[0]}&return=polyline,summary&apikey=DCt7LzSN9sR8IGVpnTjD3CtQWYu55oinzBdFfD9idAE'
polyline = requests.get(api)
return polyline.json()['routes'][0]['sections'][0]['polyline']
if __name__ == '__main__':
polyline = Polyline()
polyline.add(123, "person1", 9988776655, [52.5308,13.3847], [52.5264,13.3686])
polyline.add(122, "person2", 9977553311, [52.5308,13.3847], [52.5264,13.3686])
print(polyline.check_status(122))
| Sivaram46/pool-ride | polyline.py | polyline.py | py | 4,465 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 123,
"usage_type": "call"
}
] |
34598487595 |
# IntesisHome Inegration with Domoticz
#
# Author: CV8R
#
"""
<plugin key="BasePlug" name="IntesisBox WMP-1 Protocol" author="CV8R" version="0.0.9" >
<description>
<h2>IntesisBox WMP-1</h2><br/>
<ul style="list-style-type:square">
<li>IntesisBox WMP-1 interface for air conditioners into IP based control systems</li>
</ul>
<ul style="list-style-type:square">
<h3>Configuration</h3><br/>
<li>IP Address and Port number default 3310 </li>
</ul>
</description>
<params>
<param field="Address" label="IP Address" width="200px" required="true" default=""/>
<param field="Port" label="Port" width="30px" required="true" default="3310"/>
<param field="Mode1" label="Debug" width="75px">
<options>
<option label="True" value="Debug"/>
<option label="False" value="Normal" default="true" />
</options>
</param>
</params>
</plugin>
"""
from typing import List
# Global var definitions
InitHeartbeatCount = 0
unitmode = "N/A"
oustandingPings = -1
lastHeartbeat = 0
# Limits as Global vars
minTempLimit = 180
maxTempLimit = 280
import Domoticz
import base64
import datetime
import re
class BasePlugin:
enabled = True
powerOn = 0
runCounter = 0
WMPConn = None
oustandingPings = 0
lastHeartbeat = datetime.datetime.now()
def __init__(self):
#self.var = 123
return
def onStart(self):
Domoticz.Log("onStart called")
Domoticz.Heartbeat(20) # Set heartbeat interval slower than default
if Parameters["Mode1"] == "Debug":
Domoticz.Debugging(1)
if (len(Devices) == 0):
Domoticz.Device(Name="Power", Unit=1, Image=16, TypeName="Switch", Used=1).Create()
Domoticz.Device(Name="Ambient Temp", Unit=2, TypeName="Temperature", Used=1).Create()
Options = {"LevelActions" : "|||||",
"LevelNames" : "|Auto|Heat|Dry|Cool|Fan",
"LevelOffHidden" : "true",
"SelectorStyle" : "0"}
Domoticz.Device(Name="Mode", Unit=3, TypeName="Selector Switch", Image=16, Options=Options, Used=1).Create()
Options = {"LevelActions" : "||||",
"LevelNames" : "|Auto|L1|L2|L3",
"LevelOffHidden" : "true",
"SelectorStyle" : "0"}
Domoticz.Device(Name="Fan Speed", Unit=4, TypeName="Selector Switch", Image=7, Options=Options, Used=1).Create()
Domoticz.Device(Name="Set Temp", Unit=5, Type=242, Subtype=1, Image=16, Used=1).Create()
Domoticz.Device(Name="Error LED", Unit=6, Image=13, TypeName="Switch", Used=1).Create()
Domoticz.Device(Name="Error Text", Unit=7, TypeName="Text", Used=1).Create()
Domoticz.Log("Device created.")
DumpConfigToLog()
def onStop(self):
Domoticz.Log("onStop called")
def onConnect(self, Connection, Status, Description):
Domoticz.Log("onConnect called")
global ConnectState
Domoticz.Log("Connecting")
if (Connection == self.WMPConn):
if (Status == 0):
Domoticz.Log("Connected successfully to: " + Connection.Address + ":" + Connection.Port)
self.WMPConn.Send('ID\n') # Get ID at startup
else:
if (Description.find("Only one usage of each socket address") > 0):
Domoticz.Log(Connection.Address + ":" + Connection.Port + " is busy, waiting.")
else:
Domoticz.Log("Failed to connect (" + str(Status) + ") to: " + Connection.Address + ":" + Connection.Port + " with error: " + Description)
self.WMPConn = None
def onMessage(self, Connection, Data):
Domoticz.Debug("onMessage called")
global unitmode
global oustandingPings
global lastHeartbeat
global minTempLimit
global maxTempLimit
strData = Data.decode("utf-8", "ignore")
Domoticz.Debug("onMessage called with Data: '" + str(strData) + "'")
#msgDataListRaw = re.split(r':+|,', strData) # type: List[str]
msgDataListRaw = re.split(r':+|,+|\[+|\]', strData) # split string to list of strings
msgDataList = list(filter(None, msgDataListRaw)) # Remove consecutive delimiters note: filter does not return a list, use list to turn into list
# Dump stripped messages in to Domoticz Log
count = 0
for msgData in msgDataList:
Domoticz.Debug("Stripped Message[" + str(count) + "] = " + msgData ) # Log the messages incoming and their stripped count
count = count + 1
Domoticz.Debug("Resetting Ping to 0")
oustandingPings = 0 # Reset ping counter onmessage for making sure connection is up in Heartbeat
# Is it a status update
if (msgDataList[0] == 'ACK'):
Domoticz.Debug("Message Acknowledged with response: " + msgDataList[0])
elif (msgDataList[0] == 'ERR'):
Domoticz.Error("WMP Message ########## SENDING MESSAGE ERROR ########## with response: " + msgDataList[0])
Devices[6].Update(nValue=1, sValue="100") # Set the Error LED switch to ON to flag for a send error
elif (msgDataList[0] == 'LIMITS'): #Get the limits from the AC unit
DataValues = '|'.join(msgDataList[2:])
if (msgDataList[1] == 'ONOFF'): #Get the ONOFF limits from the AC unit
Domoticz.Log("ONOFF Limits from unit: " + DataValues)
elif (msgDataList[1] == 'MODE'): #Get the MODE limits from the AC unit
Domoticz.Log("MODE Limits from unit: " + DataValues)
elif (msgDataList[1] == 'FANSP'): #Get the FANSP limits from the AC unit
Domoticz.Log("FANSP Limits from unit: " + DataValues)
elif (msgDataList[1] == 'VANEUD'): #Get the VANEUD limits from the AC unit
Domoticz.Log("VANEUD Limits from unit: " + DataValues)
elif (msgDataList[1] == 'VANELR'): #Get the VANELR limits from the AC unit
Domoticz.Log("VANELR Limits from unit: " + DataValues)
elif (msgDataList[1] == 'SETPTEMP'): #Get the SETPTEMP temp limits from the AC unit
Domoticz.Debug("SETPTEMP Temp limit values from unit: " + DataValues)
minTempLimit = int(msgDataList[2])
maxTempLimit = int(msgDataList[3])
Domoticz.Status("Min Temp Limit: " + str(minTempLimit) + " Max Temp Limit: " + str(maxTempLimit))
if (msgDataList[0] == 'CHN'):
Domoticz.Debug("Status Update - Unit: " + msgDataList[1] + " Function: " + msgDataList[2] + " Value = " + msgDataList[3])
# Update the status to Domoticz
if (msgDataList[2] == 'ONOFF'):
if (msgDataList[3] == 'ON'):
Domoticz.Status("Update status to On")
Devices[1].Update(nValue=1, sValue="100") # AC Power
elif (msgDataList[3] == 'OFF'):
Domoticz.Status("Update status to Off")
Devices[1].Update(nValue=0, sValue="0")
elif (msgDataList[2] == 'AMBTEMP'):
ambtemp = str(float(msgDataList[3])/10)
Domoticz.Log("Ambient temp")
Domoticz.Debug("Current ambient temp: " + ambtemp + " Degrees")
Devices[2].Update(nValue=0, sValue=ambtemp)
#Domoticz.Debug("Resetting Ping to 0") # using AMBTEMP
#oustandingPings = 0 # Reset ping counter for making sure connection is up in Heartbeat
elif (msgDataList[2] == 'SETPTEMP'):
settemp = str(int(msgDataList[3])/10)
if (unitmode != 'FAN'):
Domoticz.Status("Set temp is set to: " + settemp + " Degrees")
Devices[5].Update(nValue=1, sValue=settemp) # Update the temp display in the set temp device
else:
Domoticz.Debug("FAN MODE setting temp to not display")
Devices[5].Update(nValue=1, sValue="22") # N/A to have a temp displayed
elif (msgDataList[2] == 'MODE'):
unitmode = msgDataList[3]
if (unitmode == "AUTO"):
Domoticz.Status("Mode to: " + unitmode)
Devices[3].Update(nValue=1, sValue="10") # Auto
elif (unitmode == "HEAT"):
Domoticz.Status("Mode to: " + unitmode)
Devices[3].Update(nValue=1, sValue="20") # Heat
elif (unitmode == "DRY"):
Domoticz.Status("Mode to: " + unitmode)
Devices[3].Update(nValue=1, sValue="30") # Dry
elif (unitmode == "COOL"):
Domoticz.Status("Mode to: " + unitmode)
Devices[3].Update(nValue=1, sValue="40") # Cool
elif (unitmode == "FAN"):
Domoticz.Status("Mode to: " + unitmode)
Devices[3].Update(nValue=1, sValue="50") # Fan
Devices[3].Refresh()
elif (msgDataList[2] == 'FANSP'):
fspeed = msgDataList[3]
if (fspeed == "AUTO"):
Domoticz.Status("Fan Speed to: " + fspeed)
Devices[4].Update(nValue=1, sValue="10") # Fan Auto
elif (fspeed == "1"):
Domoticz.Status("Fan Speed to: " + fspeed)
Devices[4].Update(nValue=1, sValue="20") # Fan Level 1
elif (fspeed == "2"):
Domoticz.Status("Fan Speed to: " + fspeed)
Devices[4].Update(nValue=1, sValue="30") # Fan Level 2
elif (fspeed == "3"):
Domoticz.Status("Fan Speed to: " + fspeed)
Devices[4].Update(nValue=1, sValue="40") # Fan Level 3
Devices[4].Refresh()
elif (msgDataList[2] == 'VANEUD'):
vaneud = msgDataList[3]
Domoticz.Status("Vane Up/Down: " + vaneud)
elif (msgDataList[2] == 'VANELR'):
vanelr = msgDataList[3]
Domoticz.Status("Vane Left/Right: " + vanelr)
elif (msgDataList[2] == 'ERRSTATUS'):
errorstatus = msgDataList[3]
if (errorstatus != "OK"):
Domoticz.Status("Error Status: " + errorstatus)
Devices[6].Update(nValue=1, sValue="100") # Set the Error LED switch to ON to flag for an ERROR
elif (errorstatus == "OK"):
Domoticz.Status("Error Status: " + errorstatus)
Devices[6].Update(nValue=0, sValue="0") # Set the Error LED switch to OFF to clear ERROR
elif (msgDataList[2] == 'ERRCODE'):
errorcode = msgDataList[3]
Domoticz.Status("Error Code: " + errorcode)
Devices[7].Update(nValue=1, sValue=errorcode) # Set error text
else:
Domoticz.Error("Unrecognised status command")
def onCommand(self, Unit, Command, Level, Hue):
Domoticz.Log("onCommand called for Unit " + str(Unit) + ": Parameter '" + str(Command) + "', Level: " + str(Level))
if (Unit == 1):
if (Command == "On"):
Domoticz.Status("Sending Power ON")
self.powerOn = 1
self.WMPConn.Send('SET,1:ONOFF,ON\n')
elif(Command == "Off"):
Domoticz.Status("Sending Power OFF")
self.powerOn = 0
self.WMPConn.Send('SET,1:ONOFF,OFF\n')
elif (Unit == 3):
if (Command == "Set Level"):
Domoticz.Debug("Sending Mode")
if (str(Level) == '10'):
Domoticz.Status("Sending Mode Auto")
self.WMPConn.Send('SET,1:MODE,auto\n')
elif (str(Level) == '20'):
Domoticz.Status("Sending Mode Heat")
self.WMPConn.Send('SET,1:MODE,heat\n')
elif (str(Level) == '30'):
Domoticz.Status("Sending Mode Dry")
self.WMPConn.Send('SET,1:MODE,dry\n')
elif (str(Level) == '40'):
Domoticz.Status("Sending Mode Cool")
self.WMPConn.Send('SET,1:MODE,cool\n')
elif (str(Level) == '50'):
Domoticz.Status("Sending Mode Fan")
self.WMPConn.Send('SET,1:MODE,fan\n')
self.WMPConn.Send('LIMITS:SETPTEMP\n') # Check temp limits again when changing modes
elif (Unit == 4):
if (Command == "Set Level"):
Domoticz.Debug("Sending Fan Speed")
if (str(Level) == '10'):
Domoticz.Status("Sending Fan Speed Auto")
self.WMPConn.Send('SET,1:FANSP,AUTO\n')
elif (str(Level) == '20'):
Domoticz.Status("Sending Fan Speed Level 1")
self.WMPConn.Send('SET,1:FANSP,1\n')
elif (str(Level) == '30'):
Domoticz.Status("Sending Fan Speed Level 2")
self.WMPConn.Send('SET,1:FANSP,2\n')
elif (str(Level) == '40'):
Domoticz.Status("Sending Fan Speed Level 3")
self.WMPConn.Send('SET,1:FANSP,3\n')
elif (Unit == 5):
if (Command == "Set Level"):
settemp = Level
Domoticz.Debug("String of Set Temp raw value = " + str(Level))
settemp = round((int((float(settemp) * 10)))/5)*5 #includes complex rounding to nearest 5
Domoticz.Debug("Set Temp converted value = " + str(settemp))
if settemp < minTempLimit: #Adjusting for minLimit of unit
Domoticz.Status("Set temp point less than min limit setting to min value = " + str(minTempLimit / 10) + " Degrees")
settemp = minTempLimit #Send the minimum of unit
if settemp > maxTempLimit: #Adjusting for minLimit of unit
Domoticz.Status("Set temp point greater than max limit setting to max value = " + str(maxTempLimit / 10) + " Degrees")
settemp = maxTempLimit
Domoticz.Status("Setting Temp to: " + str(settemp / 10) + " Degrees")
Domoticz.Debug("Sending Set Temp to: " + str(settemp))
self.WMPConn.Send('SET,1:SETPTEMP,' + str(settemp) + '\n')
elif (Unit == 6):
if (Command == "Off"):
Domoticz.Log("User cleared the ERROR Status LED")
Devices[6].Update(nValue=0, sValue="0") # Set the Error LED switch to Off
else:
Domoticz.Error("No command available to send")
def onNotification(self, Name, Subject, Text, Status, Priority, Sound, ImageFile):
Domoticz.Log("Notification: " + Name + "," + Subject + "," + Text + "," + Status + "," + str(Priority) + "," + Sound + "," + ImageFile)
def onDisconnect(self, Connection):
Domoticz.Log("onDisconnect called")
self.WMPConn = None
def onHeartbeat(self):
global InitHeartbeatCount # Counter for first heartbeats
global oustandingPings # Counter for the Pings for check alive using AMBTEMP
global lastHeartbeat
Domoticz.Debug("onHeartbeat called")
Domoticz.Debug("onHeartbeat called, last response seen " + str(oustandingPings) + " heartbeats ago.")
Domoticz.Debug("Heartbeat Init Count = " + str(InitHeartbeatCount))
lastHeartbeat = datetime.datetime.now()
if (self.WMPConn == None):
Domoticz.Log("Connect to WMP")
InitHeartbeatCount = 0 # reset heartbeat count
oustandingPings = -1 # reset ping count
self.handleConnect()
else:
if (self.WMPConn.Name == "WMP_Connection") and (self.WMPConn.Connected()):
oustandingPings = oustandingPings + 1 # Increment Ping Counter, reset at AMPTEMP Status
if InitHeartbeatCount <= 6:
InitHeartbeatCount = InitHeartbeatCount + 1
Domoticz.Debug("Heartbeat Init Count Incremented now = " + str(InitHeartbeatCount))
if InitHeartbeatCount == 1: #Need to delay these inital messages or some are missed
Domoticz.Debug("Heartbeat Init Count = " + str(InitHeartbeatCount) + " Getting ONOFF")
self.WMPConn.Send('GET,1:ONOFF\n')
Domoticz.Debug("Heartbeat Init Count = " + str(InitHeartbeatCount) + " Getting MODE")
self.WMPConn.Send('GET,1:MODE\n')
Domoticz.Debug("Heartbeat Init Count = " + str(InitHeartbeatCount) + " Getting SETPTEMP")
self.WMPConn.Send('GET,1:SETPTEMP\n')
Domoticz.Debug("Heartbeat Init Count = " + str(InitHeartbeatCount) + " Getting FANSP")
self.WMPConn.Send('GET,1:FANSP\n')
if InitHeartbeatCount == 3:
Domoticz.Debug("Heartbeat Init Count = " + str(InitHeartbeatCount) + " Getting VANEUD")
self.WMPConn.Send('GET,1:VANEUD\n')
Domoticz.Debug("Heartbeat Init Count = " + str(InitHeartbeatCount) + " Getting VANELR")
self.WMPConn.Send('GET,1:VANELR\n')
Domoticz.Debug("Heartbeat Init Count = " + str(InitHeartbeatCount) + " Getting ERRSTATUS")
self.WMPConn.Send('GET,1:ERRSTATUS\n')
if InitHeartbeatCount == 4:
Domoticz.Debug("Heartbeat Init Count = " + str(InitHeartbeatCount) + " Getting ERRCODE")
self.WMPConn.Send('GET,1:ERRCODE\n')
if InitHeartbeatCount == 5:
Domoticz.Debug("Heartbeat Init Count = " + str(InitHeartbeatCount) + " Getting LIMITS ONOFF")
self.WMPConn.Send('LIMITS:ONOFF\n')
Domoticz.Debug("Heartbeat Init Count = " + str(InitHeartbeatCount) + " Getting LIMITS MODE")
self.WMPConn.Send('LIMITS:MODE\n')
Domoticz.Debug("Heartbeat Init Count = " + str(InitHeartbeatCount) + " Getting LIMITS FANSP")
self.WMPConn.Send('LIMITS:FANSP\n')
Domoticz.Debug("Heartbeat Init Count = " + str(InitHeartbeatCount) + " Getting LIMITS VANEUD")
self.WMPConn.Send('LIMITS:VANEUD\n')
if InitHeartbeatCount == 6:
Domoticz.Debug("Heartbeat Init Count = " + str(InitHeartbeatCount) + " Getting LIMITS VANELR")
self.WMPConn.Send('LIMITS:VANELR\n')
Domoticz.Debug("Heartbeat Init Count = " + str(InitHeartbeatCount) + " Getting LIMITS SETPTEMP")
self.WMPConn.Send('LIMITS:SETPTEMP\n')
Domoticz.Heartbeat(20) # Extending heartbeat at last Limit
if InitHeartbeatCount == 7: # when count gets to this number and is connected, it will not increment and commence AMBTEMP Heartbeats
Domoticz.Debug("Getting Ambient Temp")
self.WMPConn.Send('GET,1:AMBTEMP\n') # Get AMBTEMP at Heartbeat to confirm connected
if (oustandingPings == 3):
Domoticz.Log(self.WMPConn.Name + " has not responded to 3 heartbeats terminating connection.")
if (self.WMPConn.Connected()):
self.WMPConn.Disconnect()
Domoticz.Debug("Heartbeat Init Count = " + str(InitHeartbeatCount))
self.WMPConn = None
def handleConnect(self):
self.WMPConn = None
Domoticz.Debug("Settings shorter heartbeat to speed up initialisation")
Domoticz.Heartbeat(5) # Setting the inital hearbeat timeout used for delaying startup messages - extended in onHeartbeat after counter reached
self.WMPConn = Domoticz.Connection(Name="WMP_Connection", Transport="TCP/IP", Protocol="Line", Address=Parameters["Address"], Port=Parameters["Port"])
self.WMPConn.Connect()
global _plugin
_plugin = BasePlugin()
def onStart():
global _plugin
_plugin.onStart()
def onStop():
global _plugin
_plugin.onStop()
def onConnect(Connection, Status, Description):
global _plugin
_plugin.onConnect(Connection, Status, Description)
def onMessage(Connection, Data):
global _plugin
_plugin.onMessage(Connection, Data)
def onCommand(Unit, Command, Level, Hue):
global _plugin
_plugin.onCommand(Unit, Command, Level, Hue)
def onNotification(Name, Subject, Text, Status, Priority, Sound, ImageFile):
global _plugin
_plugin.onNotification(Name, Subject, Text, Status, Priority, Sound, ImageFile)
def onDisconnect(Connection):
global _plugin
_plugin.onDisconnect(Connection)
def onHeartbeat():
global _plugin
_plugin.onHeartbeat()
# Generic helper functions
def DumpConfigToLog():
for x in Parameters:
if Parameters[x] != "":
Domoticz.Debug( "'" + x + "':'" + str(Parameters[x]) + "'")
Domoticz.Debug("Device count: " + str(len(Devices)))
for x in Devices:
Domoticz.Debug("Device: " + str(x) + " - " + str(Devices[x]))
Domoticz.Debug("Device ID: '" + str(Devices[x].ID) + "'")
Domoticz.Debug("Device Name: '" + Devices[x].Name + "'")
Domoticz.Debug("Device nValue: " + str(Devices[x].nValue))
Domoticz.Debug("Device sValue: '" + Devices[x].sValue + "'")
Domoticz.Debug("Device LastLevel: " + str(Devices[x].LastLevel))
return
| luismalddonado/IntesishomewithDomoticz | plugin.py | plugin.py | py | 18,692 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "datetime.datetime.now",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "Domoticz.Log",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "Domoticz.He... |
7774676634 | from urllib.request import urlopen
from bs4 import BeautifulSoup
import ssl
# Ignore SSL certificate errors
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
url = input('Enter URL: ')
count2 = input('Enter count: ')
position = input('Enter position: ')
count2 = int(count2)
position = int(position)
html = urlopen(url, context=ctx).read()
soup = BeautifulSoup(html, "html.parser")
tags = soup('a')
count = 0
names_list = []
while (count < count2):
names_list.append(soup.find_all('a')[17].get_text())
url = soup.find_all('a')[position-1]["href"]
print ("Retrieving: ", url)
html = urlopen(url, context=ctx).read()
soup = BeautifulSoup(html, "html.parser")
tags = soup('a')
count +=1
print (names_list)
length = int(len(names_list))
print ("The answer to the assignment for this execution is",(names_list[length-1]))
| laurmvan/SI206-Fall2017 | HW6/HW6_PartB.py | HW6_PartB.py | py | 875 | python | en | code | null | github-code | 36 | [
{
"api_name": "ssl.create_default_context",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "ssl.CERT_NONE",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "urllib.request.urlopen",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "bs... |
6751769066 | # -*- coding: utf-8 -*-
from PyQt5.QtWidgets import QWidget
from PyQt5.QtGui import QDoubleValidator
from PyQt5.QtCore import pyqtSignal, pyqtSlot
from selfdefineformat.views.editcheckbox import Ui_Form
class EditCheckBoxModule(QWidget, Ui_Form):
edited = pyqtSignal()
def __init__(self, element, parent=None):
super(EditCheckBoxModule, self).__init__(parent)
self.setupUi(self)
self.element = element
self.set_detail()
self.set_validator()
def set_validator(self):
doubleValitor = QDoubleValidator()
doubleValitor.setBottom(0)
doubleValitor.setDecimals(2)
doubleValitor.setNotation(QDoubleValidator.StandardNotation)
self.lineEdit_width.setValidator(doubleValitor)
self.lineEdit_height.setValidator(doubleValitor)
self.lineEdit_fontsize.setValidator(doubleValitor)
def set_detail(self):
if self.element.tagName() != 'CheckBox':
self.close()
return
self.lineEdit_text.setText(self.element.attribute("name"))
self.lineEdit_fontsize.setText(self.element.attribute("size"))
self.lineEdit_width.setText(self.element.attribute("width"))
self.lineEdit_height.setText(self.element.attribute("height"))
self.lineEdit_id.setText(self.element.attribute("ID"))
try:
self.style = int(self.element.attribute("style"))
except ValueError:
self.style = 0
format = '{:03b}'.format(self.style)
self.checkBox_bold.setCheckState(2 if format[0] != '0' else 0)
self.checkBox_italic.setCheckState(2 if format[1] != '0' else 0)
self.checkBox_underline.setCheckState(2 if format[2] != '0' else 0)
@pyqtSlot(str)
def on_lineEdit_id_textEdited(self, p_str):
self.element.setAttribute("ID", p_str)
self.edited.emit()
@pyqtSlot(str)
def on_lineEdit_fontsize_textEdited(self, p_str):
self.element.setAttribute("size", p_str)
self.edited.emit()
@pyqtSlot(str)
def on_lineEdit_width_textEdited(self, p_str):
self.element.setAttribute("width", p_str)
self.edited.emit()
@pyqtSlot(str)
def on_lineEdit_height_textEdited(self, p_str):
self.element.setAttribute("height", p_str)
self.edited.emit()
@pyqtSlot(str)
def on_lineEdit_text_textEdited(self, p_str):
self.element.setAttribute("name", p_str)
self.edited.emit()
@pyqtSlot(bool)
def on_checkBox_bold_toggled(self, p_bool):
if p_bool:
self.element.setAttribute("style", self.style + 4)
self.style += 4
else:
self.element.setAttribute("style", self.style - 4)
self.style -= 4
self.edited.emit()
@pyqtSlot(bool)
def on_checkBox_italic_toggled(self, p_bool):
if p_bool:
self.element.setAttribute("style", self.style + 2)
self.style += 2
else:
self.element.setAttribute("style", self.style - 2)
self.style -= 2
self.edited.emit()
@pyqtSlot(bool)
def on_checkBox_underline_toggled(self, p_bool):
if p_bool:
self.element.setAttribute("style", self.style + 1)
self.style += 1
else:
self.element.setAttribute("style", self.style - 1)
self.style -= 1
self.edited.emit()
@pyqtSlot(bool)
def on_checkBox_default_toggled(self, p_bool):
if p_bool:
self.element.setAttribute("default", 2)
else:
self.element.setAttribute("style", 0)
self.edited.emit()
| zxcvbnmz0x/gmpsystem | selfdefineformat/modules/editcheckboxmodule.py | editcheckboxmodule.py | py | 3,645 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "PyQt5.QtWidgets.QWidget",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "selfdefineformat.views.editcheckbox.Ui_Form",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.pyqtSignal",
"line_number": 10,
"usage_type": "call"
},
... |
459210709 | import requests
import json
import ntpath
class Jira:
"""Common JIRA API methods.
JIRA's REST APIs provide access to resources (data entities) via URI paths. To use a REST API, your application will
make an HTTP request and parse the response. The JIRA REST API uses JSON as its communication format, and the
standard HTTP methods like GET, PUT, POST and DELETE (see API descriptions below for which methods are available for
each resource). URIs for JIRA's REST API resource have the following structure:
http://host:port/context/rest/api-name/api-version/resource-name
Currently there are two API names available:
auth - for authentication-related operations, and
api - for everything else.
The current API version is 2. However, there is also a symbolic version, called latest, which resolves to the latest
version supported by the given JIRA instance. As an example, if you wanted to retrieve the JSON representation of
issue JRA-9 from Atlassian's public issue tracker, you would access:
https://jira.atlassian.com/rest/api/latest/issue/JRA-9
"""
def __init__(self, base_http_url, project_key, auth):
"""Initialize JIRA object with base_http_url, project_key, and auth.
:param base_http_url: URL for JIRA site - http://host:port/ (i.e. http://localhost:8080/)
:type base_http_url: str
:param project_key: The project matching the projectKey supplied in the resource path as shown in URL.
:type project_key: str
:param auth: Tuple of username and password for authentication.
:type auth: tuple[str, str]
"""
self.base_http_url = base_http_url
self.project_key = project_key
self.auth = auth
def create_issue(self, summary, description='', issue_type='Task'):
"""Create Issue in Jira
Create issue with summary, description, and issue type.
:param summary: A brief summary of the issue. This will be the "title" shown next to the issue id on the boards.
:type summary: str
:param description: More details about the issue.
:type description: str
:param issue_type: Choose one of the predefined issue types for your project ('Bug', 'Task', 'Story', and
'Epic' by default.)
:type issue_type: str
:return: Response from the POST request.
STATUS 201: Success - application/json Returns a link to the created issue. \n
STATUS 400: Error - STATUS 400Returned if the input is invalid (e.g. missing required fields, invalid
field values, and so forth).
:rtype: list[requests.Response, str]
"""
# create Jira issue
url = self.base_http_url + 'rest/api/2/issue'
headers = {'Content-Type': 'application/json'}
data = {
"fields": {
"project": {
"key": self.project_key
},
"summary": summary,
"description": description,
"issuetype": {
"name": issue_type
}
}
}
r = requests.post(url, auth=self.auth, headers=headers, data=json.dumps(data))
return r
def get_issues(self, issue_id=None, max_results=10, start_at=0):
"""Get specific or list of Jira issue(s).
Get specific issue by setting the issue_id. Get a list of issues by leaving the issue_id blank and setting the
limit for the pagination size (default=10).
:param issue_id: JIRA will attempt to identify the issue by the issueIdOrKey path parameter. This can be an
issue id, or an issue key.
:type issue_id: str
:param max_results: The "maxResults" parameter indicates how many results to return per page.
:type max_results: int
:param start_at: Item that should be used as the first item in the page of results.
:type start_at: int
:return:
STATUS 200: Success - application/jsonReturns a full representation of a JIRA issue in JSON format.
STATUS 404: Error - Returned if the requested issue is not found, or the user does not have permission to
view it.
:rtype: list[requests.Response, str]
"""
if issue_id is None:
url = self.base_http_url + 'rest/api/2/issue?maxResults=' + str(max_results) + '&startAt' + str(start_at)
else:
url = self.base_http_url + 'rest/api/2/issue/' + str(issue_id)
headers = {'Content-Type': 'application/json'}
r = requests.get(url, auth=self.auth, headers=headers)
return r
def add_attachment(self, issue_id, attachments):
"""Add attachments to Jira issue
:param issue_id: JIRA will attempt to identify the issue by the issueIdOrKey path parameter. This can be an
issue id, or an issue key.
:type issue_id: str
:param attachments: List of string paths to attachments to be uploaded and added to an issue.
:type attachments: list[str]
:return:
STATUS 200: Success - application/json
STATUS 403: Error - Returned if attachments is disabled or if you don't have permission to add attachments
to this issue.
STATUS 404: Error - Returned if the requested issue is not found, the user does not have permission to
view it, or if the attachments exceeds the maximum configured attachment size.
:rtype: list[requests.Response, str]
"""
# add attachments to Jira issue
url = self.base_http_url + 'rest/api/2/issue/' + issue_id + '/attachments'
headers = {'X-Atlassian-Token': 'no-check'}
r = []
filenames = []
# POST request for attachments
if attachments:
for file in attachments:
upload = open(file, 'rb')
filenames.append(ntpath.basename(file))
r.append(requests.post(url, auth=self.auth, headers=headers, files={'file': upload}))
upload.close()
else:
r.append('ERROR: No attachments to add.')
# verify attachments were attached
if attachments:
jira_attachments = self.get_issues(issue_id).json()['fields']['attachment']
for filename in filenames:
if not any(d['filename'] == filename for d in jira_attachments):
# does not exist
r.append('ERROR: File ' + filename + ' was not attached.')
return r
| mjlabe/python-atlassian-server-api | atlassian_server_api/jira.py | jira.py | py | 6,655 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.post",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "ntpath.basename",
"line_nu... |
70537611943 | import elasticsearch
import json
import luigi
from elasticsearch.helpers import bulk
from luigi.contrib import esindex
from nfl import scraper
class IngestData(luigi.Task):
category = luigi.Parameter()
year = luigi.Parameter()
def output(self):
target = luigi.LocalTarget("output/{0}/{1}.json".format(self.year, self.category))
target.category = self.category
target.year = self.year
return target
def run(self):
with self.output().open('w') as f:
f.write(json.dumps(scraper.scrape_year_category(self.year, self.category), indent=2))
class ExportToES(luigi.Task):
def __init__(self):
self.host = "localhost"
self.port = "9200"
self.index = "demo5"
super(ExportToES, self).__init__()
def _init_connection(self):
return elasticsearch.Elasticsearch(
host=self.host,
port=self.port
)
def requires(self):
for year in range(2000, 2015):
for c in ['KICK_RETURNS', 'KICKING', 'PASSING', 'PUNTING', 'RECEIVING', 'RUSHING', 'SACKS', 'SCORING', 'TACKLES', 'TOUCHDOWNS']:
yield IngestData(c, year)
def output(self):
return esindex.ElasticsearchTarget(host=self.host, port=self.port, index=self.index, doc_type="report", update_id="_id")
def docs(self):
for inputFile in self.input():
with inputFile.open('r') as f:
for element in json.loads(f.read()):
element["_type"] = inputFile.category
element["_index"] = self.index
yield element
def run(self):
es = self._init_connection()
bulk(es, self.docs())
self.output().touch()
| jasonmotylinski/luigi-presentation | pipeline-py/luigi/luigipipeline/demo5.py | demo5.py | py | 1,753 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "luigi.Task",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "luigi.Parameter",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "luigi.Parameter",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "luigi.LocalTarget",
... |
74189117224 | #reverses the process of the spritesheetCreator script
from PIL import Image
import sys
#sets all the variables we need to accurately crop the images
imageAmount = 0
i = 1
width = 0
height = 0
maxWidth = 0
maxHeight = 0
counter = 0
row = 0
column = 0
#searches for the file saved by the spritesheet script
try:
with open("imageAtlas.txt", "r") as imageSearch:
data = imageSearch.read()
except IOError:
sys.exit()
imageSearch.close()
#changes the data of the atlas to something more readable
newData = data.replace('[', '').replace('(', '').replace(',', '').replace(')', '').replace(']', '')
#takes the new data and finds out how many images were in the spritesheet
imageAmount = (newData.count(" "))/2+1
#saves the data into a list
listNew = newData.split(' ')
imageList = zip(listNew[::2],listNew[1::2])
#uses the same math in the spritesheet script to figure out how many images per row there will be
while(((len(imageList)/i) != i)) :
i = i + 1
if(((len(imageList)/i) < i)) :
break
maxFramesPerRow = i
#opens the spritesheet and finds the size of the largest image
spritesheet = Image.open("spritesheet.png")
maxWidth, maxHeight = max(imageList)
#loops through the amount of images sets the size of the current image using the atlas
#the counter then checks if the row has ended if it had it moves onto the next row
#each image will be its original size the space between them being what the largest one was
#this is figued out because the space is constant
#saves each image into the current script folder
for x in range(imageAmount):
width, height = imageList[x]
if(counter == maxFramesPerRow):
row = row + 1
column = 0
counter = 0
area = (0, int(maxHeight)*row, int(width), int(height)+(int(maxHeight)*row))
cropped_img = spritesheet.crop(area)
cropped_img.save("image" + str(x+1) + ".png", "PNG")
else:
if(x is not 0):
column = column+1
area = (int(maxWidth)*column, int(maxHeight)*row, int(width)+(int(maxWidth)*column), int(height)+(int(maxHeight)*row))
cropped_img = spritesheet.crop(area)
cropped_img.save("image" + str(x+1) + ".png", "PNG")
counter = counter+1
| IrrationalThinking/portfolio | Example/reversal.py | reversal.py | py | 2,172 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.exit",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 36,
"usage_type": "name"
}
] |
74088296424 | """
Normally step 1 to align trimmed reads
e.g.
"""
import os
import sys
from tqdm import trange
from joblib import Parallel, delayed
import re
input_root = sys.argv[1]
output_root = sys.argv[2]
genome_dir = sys.argv[3]
subdirs = []
for subdir, dirs, files in os.walk(input_root):
for file in files:
subdirs.append(subdir)
subdirs = sorted(set(subdirs))
if not os.path.isdir(output_root):
os.system(f'mkdir -p {output_root}')
def process(pair_dir):
name = re.search(".*/(.*$)", pair_dir).group(1)
if os.path.exists(f"{output_root}/{name}_Aligned.out.bam"):
return
os.system(
(f'STAR --runThreadN 4 --genomeDir {genome_dir} --readFilesIn {pair_dir}/*gz '
f'--readFilesCommand zcat --outFileNamePrefix {output_root}/{name}_ --quantMode TranscriptomeSAM '
'--outSAMtype BAM Unsorted'))
Parallel(n_jobs=2)(delayed(process)(subdirs[i]) for i in trange(len(subdirs)))
| ZhaoxiangSimonCai/BioInfoScripts | RNA_workflows/star_by_folders.py | star_by_folders.py | py | 931 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.argv",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "os.walk",
"line_number"... |
16793418016 | import pygame
from battle import time, MOVE_DOWN_FREQ, BOARD_HEIGHT, BOARD_WIDTH, BLANK, POISON, SHAPES, BOX_SIZE, \
TEMPLATE_HEIGHT, TEMPLATE_WIDTH, MOVE_SIDE_WAYS_FREQ
from utils import get_new_piece, get_blank_board, calculate_level_and_fall_frequency, is_valid_position
from pygame.locals import *
BOARD_OFFSET = [int(((BOX_SIZE * BOARD_WIDTH) / 2) + 60) * -1, int(((BOX_SIZE * BOARD_WIDTH) / 2) - 47)]
LEFT_CONTROLS = (K_q, K_w, K_a, K_s, K_d, K_SPACE)
RIGHT_CONTROLS = (K_UP, K_DOWN, K_LEFT, K_RIGHT, K_INSERT, K_HOME)
CONTROLS = (LEFT_CONTROLS, RIGHT_CONTROLS)
class Player(object):
board = None
border_color = None
board_offset = 0
bangs = 3.0
now = None
last_move_down_time = now
last_move_sideways_time = now
last_fall_time = now
moving_down = False # Note: there is no moving_up variable
moving_left = False
moving_right = False
score = 0
level = 0
turn = 0
fall_frequency = 0
falling_piece = None
next_piece = None
controls = tuple()
game_over = False
def __init__(self, now=None, player_num=0, single_player=True):
if not now:
self.now = time.time()
else:
self.now = now
self.board = get_blank_board()
self.last_move_down_time = self.last_fall_time = self.last_move_sideways_time = self.now
self.update_level()
self.turn = 1
if single_player:
self.controls = LEFT_CONTROLS + RIGHT_CONTROLS
else:
self.controls = CONTROLS[player_num]
self.board_offset = BOARD_OFFSET[player_num]
def update_level(self):
self.level, self.fall_frequency = calculate_level_and_fall_frequency(self.score)
def update_falling_piece(self, now):
if self.game_over:
return
self.falling_piece = self.next_piece
self.turn += 1
self.next_piece = get_new_piece(self.turn)
self.last_fall_time = now
def remove_completed_line(self):
"""
Remove any completed lines on the board, move everything above them
down, and return the number of complete lines.
"""
if self.game_over:
return
num_lines_removes = 0
y = BOARD_HEIGHT - 1 # Start y at the bottom of the board
while y >= 0:
complete, bonus = self.is_completed_line_with_bonus(y)
if complete:
# Remove the line and pull boxes down by one line.
for pull_down_y in range(y, 0, -1):
for x in range(BOARD_WIDTH):
self.board[x][pull_down_y] = self.board[x][pull_down_y - 1]
# Set very top line to blank
for x in range(BOARD_WIDTH):
self.board[x][0] = BLANK
num_lines_removes += 1
if bonus:
num_lines_removes += 4
# Note on the next iteration of the loop, y is the same.
# This is so that is the line that was pulled down is also
# complete, it will be removed.
else:
y -= 1
if num_lines_removes:
self.score += num_lines_removes
self.update_level()
self.bangs += num_lines_removes * .25 # One new bang every four lines
def is_completed_line_with_bonus(self, y):
"""
Return True is the line filled with boxes with no gaps.
"""
bonus = True
block_color = None
for x in range(BOARD_WIDTH):
if self.board[x][y] in (BLANK, POISON):
return False, False
if block_color is None:
block_color = self.board[x][y]
if bonus:
bonus = block_color == self.board[x][y]
return True, bonus
def handle_event(self, event_type, key):
if key not in self.controls or self.game_over:
return
if event_type == KEYUP:
if key in (K_LEFT, K_a):
self.moving_left = False
elif key in (K_RIGHT, K_d):
self.moving_right = False
elif key in (K_DOWN, K_s):
self.moving_down = False
elif event_type == KEYDOWN:
# moving the block sideways
if key in (K_LEFT, K_a) and is_valid_position(self.board, self.falling_piece, adj_x=-1):
self.falling_piece['x'] -= 1
self.moving_left = True
self.moving_right = False
self.last_move_sideways_time = self.now
elif key in (K_RIGHT, K_d) and is_valid_position(self.board, self.falling_piece, adj_x=1):
self.falling_piece['x'] += 1
self.moving_left = False
self.moving_right = True
self.last_move_sideways_time = self.now
# Rotating the block (if there is room to rotate)
elif key in (K_UP, K_w):
self.falling_piece['rotation'] = (self.falling_piece['rotation'] + 1) % len(SHAPES[self.falling_piece['shape']])
if not is_valid_position(self.board, self.falling_piece):
self.falling_piece['rotation'] = (self.falling_piece['rotation'] - 1) % len(SHAPES[self.falling_piece['shape']])
elif key == K_q:
self.falling_piece['rotation'] = (self.falling_piece['rotation'] - 1) % len(SHAPES[self.falling_piece['shape']])
if not is_valid_position(self.board, self.falling_piece):
self.falling_piece['rotation'] = (self.falling_piece['rotation'] + 1) % len(SHAPES[self.falling_piece['shape']])
# Make the block fall faster with the down key
elif key in (K_DOWN, K_s):
self.moving_down = True
if is_valid_position(self.board, self.falling_piece, adj_y=1):
self.falling_piece['y'] += 1
self.last_move_down_time = self.now
# Move the current block all the way down
elif key == K_SPACE:
self.moving_down = False
self.moving_left = False
self.moving_right = False
for i in range(1, BOARD_HEIGHT):
if not is_valid_position(self.board, self.falling_piece, adj_y=i):
break
self.falling_piece['y'] += i - 1
def calculate_moves(self, now):
if self.game_over:
return
# Handling moving the block because of user input
if (self.moving_left or self.moving_right) and now - self.last_move_sideways_time > MOVE_SIDE_WAYS_FREQ:
if self.moving_left and is_valid_position(self.board, self.falling_piece, adj_x=-1):
self.falling_piece['x'] -= 1
elif self.moving_right and is_valid_position(self.board, self.falling_piece, adj_x=1):
self.falling_piece['x'] += 1
self.last_move_sideways_time = now
if self.moving_down and now - self.last_move_down_time > MOVE_DOWN_FREQ and is_valid_position(self.board, self.falling_piece, adj_y=1):
self.falling_piece['y'] += 1
self.last_move_down_time = now
# Let the piece fall if it is time to fall
if now - self.last_fall_time > self.fall_frequency:
# See if the piece has landed.
if not is_valid_position(self.board, self.falling_piece, adj_y=1):
# falling piece has landed, set it on the self.board
self.add_to_board(self.falling_piece)
self.remove_completed_line()
self.falling_piece = None
else:
# piece did not land just move it down one block
self.falling_piece['y'] += 1
self.last_fall_time = now
def add_to_board(self, piece):
"""
Fill in the board based on piece's location, shape, and rotation
"""
for x in range(TEMPLATE_WIDTH):
for y in range(TEMPLATE_HEIGHT):
if SHAPES[piece['shape']][piece['rotation']][x][y] != BLANK:
self.board[x + piece['x']][y + piece['y']] = piece['color']
| dadisi/battle-tetro | battle/player.py | player.py | py | 8,220 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "battle.BOX_SIZE",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "battle.BOARD_WIDTH",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "battle.time.time",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "battle.time",
"... |
73692413545 | import mock
import testtools
from stackalytics.dashboard import helpers
class TestHelpers(testtools.TestCase):
@mock.patch('time.time')
def test_get_current_company(self, mock_time_time):
current_timestamp = 1234567890
mock_time_time.return_value = current_timestamp
user = {
'user_id': 'smith',
'user_name': 'John Smith',
'companies': [{
'company_name': 'Current',
'end_date': current_timestamp + 1
}, {
'company_name': 'TheCompany',
'end_date': 0
}]
}
self.assertEqual('Current', helpers.get_current_company(user))
@mock.patch('stackalytics.dashboard.helpers.make_link')
def test_extend_user(self, mock_make_link):
company_link = mock.Mock()
mock_make_link.return_value = company_link
user = {
'user_id': 'smith',
'user_name': 'John Smith',
'companies': [{
'company_name': 'TheCompany',
'end_date': 0
}]
}
expected = {
'user_id': 'smith',
'user_name': 'John Smith',
'companies': [{
'company_name': 'TheCompany',
'end_date': 0
}],
'id': 'smith',
'company_link': company_link,
'text': 'John Smith',
}
observed = helpers.extend_user(user)
self.assertEqual(expected, observed)
mock_make_link.assert_called_once_with('TheCompany', '/', mock.ANY)
@mock.patch('time.time')
@mock.patch('stackalytics.dashboard.helpers.make_link')
def test_extend_user_current_company(self, mock_make_link, mock_time_time):
company_link = mock.Mock()
mock_make_link.return_value = company_link
current_timestamp = 1234567890
mock_time_time.return_value = current_timestamp
user = {
'user_id': 'smith',
'user_name': 'John Smith',
'companies': [{
'company_name': 'Current',
'end_date': current_timestamp + 1
}, {
'company_name': 'TheCompany',
'end_date': 0
}]
}
helpers.extend_user(user)
mock_make_link.assert_called_once_with('Current', '/', mock.ANY)
| Mirantis/stackalytics | stackalytics/tests/unit/test_helpers.py | test_helpers.py | py | 2,389 | python | en | code | 12 | github-code | 36 | [
{
"api_name": "testtools.TestCase",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "stackalytics.dashboard.helpers.get_current_company",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "stackalytics.dashboard.helpers",
"line_number": 26,
"usage_type... |
26744639927 | import argparse
import os
import time
import numpy as np
import torch
import torch.optim as optim
import torch.nn as nn
from torch.utils.data import DataLoader
from data_loader import CSV_PNG_Dataset, CSV_PNG_Dataset_2D, PNG_PNG_Dataset
from netArchitecture.VGG import VGGModel, VGGModel_2D
from netArchitecture.ResNet import ResNet18_2D
from visualize import Visualizations
import logging
logger = logging.getLogger("In train.py")
logger.setLevel(logging.DEBUG)
logger.disabled = True
#parse parameters
parser = argparse.ArgumentParser(description='train deep color extraction model')
parser.add_argument('--mode', default=2, type=int) ## mode 0: lab 3D histogram; mode 1: lab 2D histogram as input; mode 2: original images with png format
parser.add_argument('--backbone', default="resnet18", type=str) ## "resnet18" or "vgg"
parser.add_argument('--net_name', default="resnet18+ASPP+2D", type=str)
parser.add_argument('--with_aspp', default=True, choices=('True','False'))
parser.add_argument('--trained_model_config', default="", type=str) # used for resuming to train network
parser.add_argument('--legend_width', default=512, type=int)
parser.add_argument('--legend_height', default=20, type=int)
parser.add_argument('--image_width', default=512, type=int)
parser.add_argument('--image_height', default=256, type=int)
parser.add_argument('--cuda_device', default=0, type=int)
parser.add_argument('--lr', default=1e-4, type=float)
parser.add_argument('--train_bs',default=8, type=int)
parser.add_argument('--num_epochs', default=15, type=int)
parser.add_argument('--color_space', default="Lab", type=str) # possible value: "Lab", "Rgb"
parser.add_argument('--loss_function', default="MSE", type=str)
parser.add_argument('--prefix', default="", type=str)
opt = parser.parse_args()
# IS_LABEL_NORMALIZED = opt.is_label_normalized == 'True'
WITH_ASPP = opt.with_aspp == 'True'
LEARNING_RATE = opt.lr
BATCH_SIZE = opt.train_bs
NUM_EPOCHS = opt.num_epochs
NET_NAME = opt.net_name
CUDA_DEVICE = opt.cuda_device
MODE = opt.mode
BACKBONE = opt.backbone
COLOR_SPACE = opt.color_space
TRAINED_MODEL_CONFIG = opt.trained_model_config
TRAINED_EPOCH = 0
LOSS_FUNCTION = opt.loss_function
PREFIX = opt.prefix # used in inference.py
IS_LABEL_NORMALIZED = True
IS_NOT_DEBUG = True
USE_VISDOM = True
# if (MODE == 0): # lab 3D histogram
IMAGE_WIDTH = 32
IMAGE_HEIGHT = 32
IMAGE_CHANNEL = 32
if (MODE == 1): # lab 2D histogram
IMAGE_WIDTH = opt.image_width
IMAGE_HEIGHT = opt.image_height
IMAGE_CHANNEL = 1
elif (MODE == 2): # lab original images
IMAGE_WIDTH = opt.image_width
IMAGE_HEIGHT = opt.image_height
IMAGE_CHANNEL = 3
LABEL_WIDTH = opt.legend_width
LABEL_HEIGHT = opt.legend_height
LABEL_CHANNEL = 3
config = "Net_{}__mode_{}__backbone_{}_colorspace_{}__labelnormalized_{}__lossfun_{}__woaspp_{}__lheight_{}__bs_{}__ep_{}__lr_{}".\
format(NET_NAME, MODE, BACKBONE, COLOR_SPACE, IS_LABEL_NORMALIZED, LOSS_FUNCTION, WITH_ASPP, LABEL_HEIGHT, BATCH_SIZE, NUM_EPOCHS, LEARNING_RATE ) \
if (TRAINED_MODEL_CONFIG == "") else TRAINED_MODEL_CONFIG
# path for save and load netArchitecture
model_dir = "models"
if not os.path.exists(model_dir):
os.makedirs(model_dir)
model_path = os.path.join(model_dir, config)
torch.cuda.set_device(CUDA_DEVICE)
# define dataset
# if MODE == 0:
train_set = CSV_PNG_Dataset(
label_paras ={'width':LABEL_WIDTH,'height':LABEL_HEIGHT,'channel':LABEL_CHANNEL},
image_paras={'width':IMAGE_WIDTH, 'height':IMAGE_HEIGHT, 'channel':IMAGE_CHANNEL},
is_label_normalized= IS_LABEL_NORMALIZED
)
eval_set = CSV_PNG_Dataset(
label_paras ={'width':LABEL_WIDTH,'height':LABEL_HEIGHT,'channel':LABEL_CHANNEL},
image_paras={'width':IMAGE_WIDTH, 'height':IMAGE_HEIGHT, 'channel':IMAGE_CHANNEL},
file_list="./dataset/evaluation.txt",
is_label_normalized= IS_LABEL_NORMALIZED
)
if MODE == 1:
train_set = CSV_PNG_Dataset_2D(
image_paras={'width':IMAGE_WIDTH,'height':IMAGE_HEIGHT,'channel':IMAGE_CHANNEL},
label_paras={'width': LABEL_WIDTH, 'height': LABEL_HEIGHT, 'channel': LABEL_CHANNEL},
color_space=COLOR_SPACE,
is_label_normalized=IS_LABEL_NORMALIZED)
eval_set = CSV_PNG_Dataset_2D(
file_list="./dataset/evaluation.txt", # here change to evaluation.txt
image_paras={'width': IMAGE_WIDTH, 'height': IMAGE_HEIGHT, 'channel': IMAGE_CHANNEL},
label_paras={'width': LABEL_WIDTH, 'height': LABEL_HEIGHT, 'channel': LABEL_CHANNEL},
color_space=COLOR_SPACE,
is_label_normalized=IS_LABEL_NORMALIZED)
elif MODE == 2:
train_set = PNG_PNG_Dataset(image_paras={'width':IMAGE_WIDTH,'height':IMAGE_HEIGHT,'channel':IMAGE_CHANNEL},
label_paras ={'width':LABEL_WIDTH,'height':LABEL_HEIGHT,'channel':LABEL_CHANNEL},
color_space=COLOR_SPACE, is_label_normalized=IS_LABEL_NORMALIZED)
eval_set = PNG_PNG_Dataset(image_paras={'width':IMAGE_WIDTH,'height':IMAGE_HEIGHT,'channel':IMAGE_CHANNEL},
label_paras ={'width':LABEL_WIDTH,'height':LABEL_HEIGHT,'channel':LABEL_CHANNEL},file_list="./dataset/evaluation.txt",
color_space=COLOR_SPACE, is_label_normalized=IS_LABEL_NORMALIZED)
train_loader = DataLoader(train_set, batch_size=BATCH_SIZE, shuffle=IS_NOT_DEBUG, num_workers=2, drop_last=True)
eval_loader = DataLoader(eval_set, batch_size=1, shuffle=False)
# define net, criterion and optimizer
net = VGGModel(input_channel=IMAGE_CHANNEL, label_height=LABEL_HEIGHT, label_width=LABEL_WIDTH)
if MODE == 2 or MODE == 1:
if BACKBONE == "vgg":
net = VGGModel_2D(input_channel=IMAGE_CHANNEL, label_height=LABEL_HEIGHT, label_width=LABEL_WIDTH, with_aspp=WITH_ASPP)
elif BACKBONE == "resnet18":
print("resnet18")
net = ResNet18_2D(input_channel=IMAGE_CHANNEL, label_height=LABEL_HEIGHT, label_width=LABEL_WIDTH, with_aspp=WITH_ASPP)
test_loss_for_each_epoch = [] # used for recording avg mean of each epoch in testing phrase
loss_for_each_epoch = [] # used for recording avg mean of each epoch in training phrase
time_used_cumulation = []
if TRAINED_MODEL_CONFIG != "":
checkpoint = torch.load(model_path)
net.load_state_dict(checkpoint['model_state_dict'])
TRAINED_EPOCH = checkpoint['epoch'] + 1
time_used_cumulation = checkpoint['time_used']
loss_for_each_epoch = checkpoint['loss_for_each_epoch']
print('#netArchitecture parameters:', sum(param.numel() for param in net.parameters()))
if torch.cuda.is_available():
if logger.isEnabledFor(logging.DEBUG):
logger.debug(torch.cuda.current_device())
ts = time.time()
net.cuda()
print("finished loading netArchitecture params to cuda, time elapsed: {}".format(time.time() - ts))
optimizer = optim.Adam(net.parameters(), lr=LEARNING_RATE)
vis = Visualizations(env=config)
if LOSS_FUNCTION == "MSE":
criterian = nn.MSELoss()
elif LOSS_FUNCTION == "BCE":
criterian = nn.BCELoss()
sigmoid = torch.nn.Sigmoid()
def train():
if len(time_used_cumulation) == 0:
time_used = 0.0
else:
time_used = time_used_cumulation[-1]
iter_100_loss_values = []
for epoch in range(NUM_EPOCHS - TRAINED_EPOCH):
tm_start_each_epoch = time.time()
true_epoch = epoch + TRAINED_EPOCH
net.train()
loss_values = [] # used for visdom to visualize
epoch_loss_value_in_one_epoch = [] # used for recording all loss values in an epoch and computing the mean of them
for iter, batch in enumerate(train_loader):
torch.autograd.set_detect_anomaly(True)
images, labels = batch['image'], batch['label']
if torch.cuda.is_available():
images = images.cuda()
labels = labels.cuda()
preds = net(images)
if IS_LABEL_NORMALIZED:
preds = sigmoid(preds)
if LOSS_FUNCTION == "MSE":
loss = criterian(labels, preds)
elif LOSS_FUNCTION == "BCE":
loss = criterian(preds, labels.detach())
loss_values.append(loss.item())
epoch_loss_value_in_one_epoch.append(loss.item())
optimizer.zero_grad()
with torch.autograd.detect_anomaly():
loss.backward()
optimizer.step()
if iter % 10 == 0:
print("epoch{}, iter{}, loss: {}".format(true_epoch, iter, loss.item()))
niter = true_epoch * len(train_loader) + iter
if niter % 100 == 0:
iter_100_loss_values.append(np.mean(loss_values))
if USE_VISDOM:
vis.plot_loss(np.mean(loss_values), niter)
vis.plot_ground_truth(labels, COLOR_SPACE, caption="groud_truth_in epoch{}, iter{}".format(true_epoch, iter))
vis.plot_test_pred(preds, COLOR_SPACE, caption="pred_in epoch{}, iter{}".format(true_epoch, iter))
if MODE == 2:
vis.plot_ground_truth(images,COLOR_SPACE, win="original images", caption="image in epoch{}, iter{}".format(true_epoch, iter))
loss_values.clear()
vis.save()
time_used = time_used + time.time() - tm_start_each_epoch
time_used_cumulation.append(time_used)
loss_for_each_epoch.append(np.mean(epoch_loss_value_in_one_epoch))
epoch_loss_value_in_one_epoch.clear()
torch.save({
'epoch': true_epoch,
'model_state_dict': net.state_dict(),
'time_used': time_used_cumulation,
'loss_for_each_epoch':loss_for_each_epoch
}, model_path)
eval(epoch)
def eval(epoch):
net.eval()
loss_value_in_epoch = []
for iter, batch in enumerate(eval_loader):
images, labels = batch['image'], batch['label']
if torch.cuda.is_available():
images = images.cuda()
labels = labels.cuda()
preds = net(images)
if IS_LABEL_NORMALIZED:
preds = sigmoid(preds)
if LOSS_FUNCTION == "MSE":
loss = criterian(labels, preds)
elif LOSS_FUNCTION == "BCE":
loss = criterian(preds, labels.detach())
loss_value_in_epoch.append(loss.item())
if USE_VISDOM:
vis.plot_ground_truth(labels, COLOR_SPACE, win="evaluate_ground_truth")
vis.plot_test_pred(preds, COLOR_SPACE, win="evaluate_test_pred")
test_loss_for_each_epoch.append(np.mean(loss_value_in_epoch))
if __name__=="__main__":
eval(0)
train()
| yuanlinping/deep_colormap_extraction | train.py | train.py | py | 10,586 | python | en | code | 7 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.path.... |
74113768742 | import click
# import pickle
# import cv2
from recognize import process, recognize, draw, show
@click.command()
@click.argument('image')
@click.option('--encodings', '-e', default='encodings.pickle', help='path to db of BTS facial encodings.')
@click.option('--detection', default='cnn', help='Which face detection model to use. Options are "hog" or "cnn".')
@click.option('--tolerance', default=0.4, help='Tolerance level: (0...1); lower is more accurate, higher for better performance')
def main(image, encodings, detection, tolerance):
names = []
encodings, image = process(image, encodings)
boxes = recognize(image, names, encodings, detection, tolerance)
draw(image, boxes, names)
show(image)
if __name__ == '__main__':
main()
| cache-monet/bts_recognition | image.py | image.py | py | 741 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "recognize.process",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "recognize.recognize",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "recognize.draw",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "recognize.show",... |
2259081804 | import pyperclip as pc
names = ["Apple", "Banana", "Cherry", "Dog", "Elephant"]
present = []
absent = []
not_audio = []
asking = True
number = 0
while asking:
question = names[number]
answer = input(question + ": ")
if answer == "p":
present.append(question)
if answer == "a":
absent.append(question)
if answer == "n":
not_audio.append(question)
if number == 38:
asking = False
number += 1
correction = input("Names to be removed: ")
correction_list = correction.split(",")
for i in correction_list:
absent.remove(i)
counter = 1
absentees = ''
for x in absent:
temp = f"{counter}.{x}"
absentees = absentees + temp + "\n"
counter += 1
count2 = 1
not_audio_names = ''
for i in not_audio:
tempo = f"{count2}.{i}"
not_audio_names = not_audio_names + tempo + "\n"
count2 += 1
if not_audio_names == '':
not_audio_names = "Nil"
not_audio_msg = f"Students not connected to audio: {not_audio}"
present_length = len(present)
absent_length = len(absent)
message = f"Good Morning Ma'am, the attendance of IX-G for today is as follows: \nAbsentees: \n{absentees} \nStudents not connected to audio:\n{not_audio_names}.\nThe total strength today was {5 - absent_length}/5."
pc.copy(message)
print(message)
| PythonGeek07/Attendance-_Bot | main.py | main.py | py | 1,288 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pyperclip.copy",
"line_number": 47,
"usage_type": "call"
}
] |
73915818985 | from .base import RegexVocabulary, left_pad, NoWildcardsVocabulary, NoRangeFillVocabulary, NoCheckVocabulary,\
ProcedureVocabulary, ModifierVocabulary
import re
from itertools import product
_hcpcs_split_regex = re.compile('^([A-Z]*)([0-9]+)([A-Z]*)$')
def hcpcs_split(code):
match = _hcpcs_split_regex.match(code)
letter_part = match.groups()[0]
number_part = match.groups()[1]
return letter_part, number_part
def hcpcs_join(letter_part, number_part):
digits = 5 - len(letter_part)
return letter_part + (('%%.%dd' % digits) % int(number_part))
class HCPCS(RegexVocabulary, NoCheckVocabulary, ProcedureVocabulary):
vocab_name = 'HCPCS'
def __init__(self):
RegexVocabulary.__init__(self, '([\*ABCDEGHJKLMPQRSTVX\d][\d\*]{3}[FMTU\d\*])|([\d\*]{1,4}[FMTU\d\*])|([\d\*]{1,5})', ignore_case=True)
def _fill_range(self, lower, upper):
lower_start_letter, lower_number, lower_end_letter = _hcpcs_split_regex.match(lower).groups()
upper_start_letter, upper_number, upper_end_letter = _hcpcs_split_regex.match(upper).groups()
assert lower_start_letter == upper_start_letter
assert lower_end_letter == upper_end_letter
result = []
for num in range(int(lower_number), int(upper_number) + 1):
n = 5 - len(lower_start_letter) - len(lower_end_letter)
result.append(lower_start_letter + left_pad(str(num), n) + lower_end_letter)
return result
_places = ['ABCDEGHJKLMPQRSTVX0123456789'] + \
3 * ['0123456789'] + \
['FMTU0123456789']
def _match_pattern(self, pattern):
options = []
for i, item in enumerate(pattern):
if item == '*':
options.append(self._places[i])
else:
options.append([item])
return map(''.join, product(*options))
def _standardize(self, code):
return left_pad(code.strip().upper(), 5)
class HCPCSModifier(RegexVocabulary, NoWildcardsVocabulary, NoRangeFillVocabulary, NoCheckVocabulary, ModifierVocabulary):
vocab_name = 'HCPCSMOD'
def __init__(self):
RegexVocabulary.__init__(self, '[A-Za-z\d]{2}')
def _standardize(self, code):
result = code.strip().upper()
assert len(result) == 2
return result
| modusdatascience/clinvoc | clinvoc/hcpcs.py | hcpcs.py | py | 2,356 | python | en | code | 8 | github-code | 36 | [
{
"api_name": "re.compile",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "base.RegexVocabulary",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "base.NoCheckVocabulary",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "base.ProcedureV... |
10183845847 | from utils import evaluation_utils, embedding_utils
from semanticgraph import io
from parsing import legacy_sp_models as sp_models
from models import baselines
import numpy as np
from sacred import Experiment
import json
import torch
from torch import nn
from torch.autograd import Variable
from tqdm import *
import ast
from models.factory import get_model
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
ex = Experiment("test")
np.random.seed(1)
p0_index = 1
def to_np(x):
return x.data.cpu().numpy()
@ex.config
def main_config():
""" Main Configurations """
device_id = 0
#
model_name = "GPGNN"
data_folder = "data/gpgnn_data/"
save_folder = "data/models/"
model_params = "model_params.json"
word_embeddings = "glove.6B.50d.txt"
train_set = "test_train.json" #"train.json"
val_set = "test_val.json" #"validation.json"
# a file to store property2idx
# if is None use model_name.property2idx
property_index = None
learning_rate = 1e-3
shuffle_data = True
save_model = True
grad_clip = 0.25
os.environ["CUDA_VISIBLE_DEVICES"] = str(device_id)
@ex.automain
def main(model_params, model_name, data_folder, word_embeddings, train_set, val_set, property_index, learning_rate, shuffle_data, save_folder, save_model, grad_clip):
if not os.path.exists(save_folder):
os.mkdir(save_folder)
with open(model_params) as f:
model_params = json.load(f)
embeddings, word2idx = embedding_utils.load(data_folder + word_embeddings)
print("Loaded embeddings:", embeddings.shape)
def check_data(data):
for g in data:
if(not 'vertexSet' in g):
print("vertexSet missed\n")
training_data, _ = io.load_relation_graphs_from_file(data_folder + train_set, load_vertices=True)
val_data, _ = io.load_relation_graphs_from_file(data_folder + val_set, load_vertices=True)
check_data(training_data)
check_data(val_data)
if property_index:
print("Reading the property index from parameter")
with open(save_folder + args.property_index) as f:
property2idx = ast.literal_eval(f.read())
else:
_, property2idx = embedding_utils.init_random({e["kbID"] for g in training_data
for e in g["edgeSet"]} | {"P0"}, 1, add_all_zeroes=True, add_unknown=True)
max_sent_len = max(len(g["tokens"]) for g in training_data)
print("Max sentence length:", max_sent_len)
max_sent_len = 36
print("Max sentence length set to: {}".format(max_sent_len))
graphs_to_indices = sp_models.to_indices
if model_name == "ContextAware":
graphs_to_indices = sp_models.to_indices_with_real_entities_and_entity_nums_with_vertex_padding
elif model_name == "PCNN":
graphs_to_indices = sp_models.to_indices_with_relative_positions_and_pcnn_mask
elif model_name == "CNN":
graphs_to_indices = sp_models.to_indices_with_relative_positions
elif model_name == "GPGNN":
graphs_to_indices = sp_models.to_indices_with_real_entities_and_entity_nums_with_vertex_padding
_, position2idx = embedding_utils.init_random(np.arange(-max_sent_len, max_sent_len), 1, add_all_zeroes=True)
train_as_indices = list(graphs_to_indices(training_data, word2idx, property2idx, max_sent_len, embeddings=embeddings, position2idx=position2idx))
training_data = None
n_out = len(property2idx)
print("N_out:", n_out)
val_as_indices = list(graphs_to_indices(val_data, word2idx, property2idx, max_sent_len, embeddings=embeddings, position2idx=position2idx))
val_data = None
print("Save property dictionary.")
with open(save_folder + model_name + ".property2idx", 'w') as outfile:
outfile.write(str(property2idx))
print("Training the model")
print("Initialize the model")
model = get_model(model_name)(model_params, embeddings, max_sent_len, n_out).to(device)
loss_func = nn.CrossEntropyLoss(ignore_index=0).to(device)
opt = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=learning_rate, weight_decay=model_params['weight_decay'])
indices = np.arange(train_as_indices[0].shape[0])
step = 0
for train_epoch in range(model_params['nb_epoch']):
if(shuffle_data):
np.random.shuffle(indices)
f1 = 0
for i in tqdm(range(int(train_as_indices[0].shape[0] / model_params['batch_size']))):
opt.zero_grad()
sentence_input = train_as_indices[0][indices[i * model_params['batch_size']: (i + 1) * model_params['batch_size']]]
entity_markers = train_as_indices[1][indices[i * model_params['batch_size']: (i + 1) * model_params['batch_size']]]
labels = train_as_indices[2][indices[i * model_params['batch_size']: (i + 1) * model_params['batch_size']]]
if model_name == "GPGNN":
output = model(Variable(torch.from_numpy(sentence_input.astype(int))).to(device),
Variable(torch.from_numpy(entity_markers.astype(int))).to(device),
train_as_indices[3][indices[i * model_params['batch_size']: (i + 1) * model_params['batch_size']]])
elif model_name == "PCNN":
output = model(Variable(torch.from_numpy(sentence_input.astype(int))).to(device),
Variable(torch.from_numpy(entity_markers.astype(int))).to(device),
Variable(torch.from_numpy(np.array(train_as_indices[3][i * model_params['batch_size']: (i + 1) * model_params['batch_size']])).float(), requires_grad=False).to(device))
else:
output = model(Variable(torch.from_numpy(sentence_input.astype(int))).to(device),
Variable(torch.from_numpy(entity_markers.astype(int))).to(device))
loss = loss_func(output, Variable(torch.from_numpy(labels.astype(int))).view(-1).to(device))
loss.backward()
torch.nn.utils.clip_grad_norm(model.parameters(), grad_clip)
opt.step()
_, predicted = torch.max(output, dim=1)
labels = labels.reshape(-1).tolist()
predicted = predicted.data.tolist()
p_indices = np.array(labels) != 0
predicted = np.array(predicted)[p_indices].tolist()
labels = np.array(labels)[p_indices].tolist()
_, _, add_f1 = evaluation_utils.evaluate_instance_based(predicted, labels, empty_label=p0_index)
f1 += add_f1
print("Train f1: ", f1 / (train_as_indices[0].shape[0] / model_params['batch_size']))
val_f1 = 0
for i in tqdm(range(int(val_as_indices[0].shape[0] / model_params['batch_size']))):
sentence_input = val_as_indices[0][i * model_params['batch_size']: (i + 1) * model_params['batch_size']]
entity_markers = val_as_indices[1][i * model_params['batch_size']: (i + 1) * model_params['batch_size']]
labels = val_as_indices[2][i * model_params['batch_size']: (i + 1) * model_params['batch_size']]
if model_name == "GPGNN":
output = model(Variable(torch.from_numpy(sentence_input.astype(int)), volatile=True).to(device),
Variable(torch.from_numpy(entity_markers.astype(int)), volatile=True).to(device),
val_as_indices[3][i * model_params['batch_size']: (i + 1) * model_params['batch_size']])
elif model_name == "PCNN":
output = model(Variable(torch.from_numpy(sentence_input.astype(int)), volatile=True).to(device),
Variable(torch.from_numpy(entity_markers.astype(int)), volatile=True).to(device),
Variable(torch.from_numpy(np.array(val_as_indices[3][i * model_params['batch_size']: (i + 1) * model_params['batch_size']])).float(), volatile=True).to(device))
else:
output = model(Variable(torch.from_numpy(sentence_input.astype(int)), volatile=True).to(device),
Variable(torch.from_numpy(entity_markers.astype(int)), volatile=True).to(device))
_, predicted = torch.max(output, dim=1)
labels = labels.reshape(-1).tolist()
predicted = predicted.data.tolist()
p_indices = np.array(labels) != 0
predicted = np.array(predicted)[p_indices].tolist()
labels = np.array(labels)[p_indices].tolist()
_, _, add_f1 = evaluation_utils.evaluate_instance_based(
predicted, labels, empty_label=p0_index)
val_f1 += add_f1
print("Validation f1: ", val_f1 /
(val_as_indices[0].shape[0] / model_params['batch_size']))
# save model
if (train_epoch % 5 == 0 and save_model):
torch.save(model.state_dict(), "{0}{1}-{2}.out".format(save_folder, model_name, str(train_epoch)))
step = step + 1
| jack139/gp-gnn_test | train.py | train.py | py | 9,172 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.environ",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "torch.device",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
... |
6884335052 | import time
import torch
torch.set_printoptions(precision=7)
from addict import Dict as adict
from torch.nn import functional as F
from zerovl.core import DistHook, HookMode, WandbHook
from zerovl.core.hooks.log import LogHook
from zerovl.core.runners.builder import RUNNER
from zerovl.core.runners.epoch_runner import EpochRunner
from zerovl.utils.dist import generate_local_groups
from zerovl.tasks.clip.hooks import *
from zerovl.utils.misc import calc_topk_accuracy
from zerovl.utils import all_gather_group, logger, ENV
try:
from apex import amp
except ImportError:
pass
import numpy as np
import random
def setup_seed(seed):
# for stable decoupled gradient accumulation~(DGA).
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
@RUNNER.register_obj
class CLIP_BSGS_Runner(EpochRunner):
""" A runner used for clip
Args:
cfg (adict): global config.
"""
def __init__(self, cfg, data_loaders, model):
logger.info("CLIP runner initiated")
super(CLIP_BSGS_Runner, self).__init__(cfg, data_loaders, model)
self._init_clip_runner()
if data_loaders['train_dataset']:
num_samples = [len(dataset) for dataset in data_loaders['train_dataset']]
total_num = sum(num_samples)
self.sample_weights = [num_sample / total_num for num_sample in num_samples]
else:
self.sample_weights = None
def _init_clip_runner(self):
self.train_type = self.cfg.data.train_type
self.total_steps = self.train_steps * self.max_epochs
self.warmup_steps = int(self.total_steps * self.cfg.optim.lr.warmup_proportion)
self.dist_name = self.cfg.dist.name
self.fp16 = self.cfg.dist.fp16
self.batch_size_train = self.cfg.data.batch_size_train // ENV.size
self.batch_size_val = self.cfg.data.batch_size_val // ENV.size
self.batch_size = self.cfg.data.batch_size // ENV.size
assert self.batch_size_val % self.batch_size_train == 0
assert self.batch_size % self.batch_size_val == 0
group_size = self.cfg.loss.group_size
if group_size < 0:
group_size = ENV.size
group, group_rank = generate_local_groups(group_size)
self.rank = group_rank
self.group = group
if self.cfg.runner.stable_random != "none":
assert self.cfg.data.batch_size_train == self.cfg.data.batch_size_val
# set random seed for sampling from the same dataset.
self.rng = np.random.default_rng(2021)
def init_hook(self):
self.register_hook(ClipOptimizerHook(self),
priority='very_high', hook_mode=HookMode.TRAIN)
self.register_hook(DistHook(self),
priority='very_high', hook_mode=HookMode.TRAIN)
self.register_hook(ClipCheckpointHook(self),
priority='low', hook_mode=HookMode.TRAIN)
self.register_hook(LogHook(self),
priority='very_low')
if self.cfg.data.single_eval:
self.register_hook(RetrievalLocalEvalHook(self),
priority='very_low', hook_mode=HookMode.TRAIN)
else:
self.register_hook(RetrievalEvalHook(self),
priority='very_low', hook_mode=HookMode.TRAIN)
if self.cfg.wandb.enable:
self.register_hook(WandbHook(self),
priority='lowest', hook_mode=HookMode.TRAIN)
def input_preprocess(self, batch, mode='train'):
batch = {k: v.cuda(ENV.device, non_blocking=True) for k,v in batch.items() if k not in ['caption', 'name']}
return batch
def create_batch_dict(self, batch, mode='train'):
batch_dict = adict()
if mode == 'train':
batch_dict['image'], batch_dict['input_ids'], batch_dict['attention_mask'], \
batch_dict['caption'] = batch
else:
batch_dict['image'], batch_dict['input_ids'], batch_dict['attention_mask'], \
batch_dict['caption'], batch_dict['image_id'], batch_dict['caption_id'] = batch
return batch_dict
def train(self, data_iter, epoch_state, train_steps=None):
if data_iter is None:
return
self.model.train()
data_iter = data_iter[0]
self.call_hook('_before_train_epoch', epoch_state)
for batch in data_iter:
step_state = adict()
batch = self.create_batch_dict(batch)
batch = self.input_preprocess(batch)
if train_steps and epoch_state.inner_step > train_steps:
break
self.call_hook('_before_train_step', epoch_state, step_state)
step_state.batch_output = self.batch_processor(batch)
self.call_hook('_after_train_step', epoch_state, step_state)
if self.val_dataloader_list and self.val_interval_steps > 0 and \
((self.step + 1) % self.val_interval_steps == 0 or (self.step + 1) == self.total_steps):
for val_dataloader, val_steps, val_dataset_name in zip(self.val_dataloader_list, self.val_steps_list, self.cfg.data.valid_name):
self.val(val_dataloader, val_steps, val_dataset_name)
self.model.train()
self.step += 1
epoch_state.inner_step += 1
self.call_hook('_after_train_epoch', epoch_state)
def sequential_train(self, data_iters, epoch_state, train_steps=None):
if data_iters is None:
return
self.model.train()
self.call_hook('_before_train_epoch', epoch_state)
for data_iter in data_iters:
for batch in data_iter:
step_state = adict()
batch = self.create_batch_dict(batch)
batch = self.input_preprocess(batch)
if train_steps and epoch_state.inner_step > train_steps:
logger.emph('breaked??')
break
self.call_hook('_before_train_step', epoch_state, step_state)
step_state.batch_output = self.batch_processor(batch)
self.call_hook('_after_train_step', epoch_state, step_state)
if self.val_dataloader_list and self.val_interval_steps > 0 and \
((self.step + 1) % self.val_interval_steps == 0 or (self.step + 1) == self.total_steps):
for val_dataloader, val_steps, val_dataset_name in zip(self.val_dataloader_list, self.val_steps_list, self.cfg.data.valid_name):
self.val(val_dataloader, val_steps, val_dataset_name)
self.model.train()
self.step += 1
epoch_state.inner_step += 1
self.call_hook('_after_train_epoch', epoch_state)
def debias_train(self, data_loaders, epoch_state, train_steps=None):
if data_loaders is None:
return
self.model.train()
data_iters = [iter(data_loader) for data_loader in data_loaders]
num_datasets = len(data_loaders)
self.call_hook('_before_train_epoch', epoch_state)
for i in range(train_steps):
iter_index = self.rng.choice(num_datasets, p=self.sample_weights)
try:
data_iter = data_iters[iter_index]
batch = next(data_iter)
except StopIteration:
data_iters[iter_index] = iter(data_loaders[iter_index])
data_iter = data_iters[iter_index]
batch = next(data_iter)
step_state = adict()
batch = self.create_batch_dict(batch)
batch = self.input_preprocess(batch)
if train_steps and epoch_state.inner_step > train_steps:
break
self.call_hook('_before_train_step', epoch_state, step_state)
step_state.batch_output = self.batch_processor(batch)
self.call_hook('_after_train_step', epoch_state, step_state)
if self.val_dataloader_list and self.val_interval_steps > 0 and \
((self.step + 1) % self.val_interval_steps == 0 or (self.step + 1) == self.total_steps):
for val_dataloader, val_steps, val_dataset_name in zip(self.val_dataloader_list, self.val_steps_list, self.cfg.data.valid_name):
self.val(val_dataloader, val_steps, val_dataset_name)
self.model.train()
self.step += 1
epoch_state.inner_step += 1
self.call_hook('_after_train_epoch', epoch_state)
def val(self, data_loader, val_steps=None, val_dataset_name=None):
if data_loader is None:
return
self.model.eval()
if self.cfg.data.single_eval and ENV.rank != 0: return
epoch_state = adict()
epoch_state.inner_step = 0
epoch_state.data_loader = data_loader
epoch_state.val_steps = val_steps
epoch_state.dataset_name = val_dataset_name
self.call_hook('_before_val_epoch', epoch_state)
for batch in data_loader:
# init step state dict
step_state = adict()
batch = self.create_batch_dict(batch, mode='valid')
batch = self.input_preprocess(batch, mode='valid')
if val_steps and epoch_state.inner_step >= val_steps:
break
self.call_hook('_before_val_step', epoch_state, step_state)
with torch.no_grad():
step_state.batch_output = self.batch_processor(batch, embeddings=True)
self.call_hook('_after_val_step', epoch_state, step_state)
epoch_state.inner_step += 1
self.call_hook('_after_val_epoch', epoch_state)
def run(self):
"""Start running.
"""
# Logging for start running
logger.info(f'=> Start Running')
# data loaders
train_dataloader = self.train_dataloader
val_dataloader_list = self.val_dataloader_list
val_steps_list = self.val_steps_list
self.call_hook('before_run')
inner_step = 0
if self.checkpoint:
inner_step = self.checkpoint['meta']['inner_step']
while self.epoch < self.max_epochs:
# init train epoch state dict
epoch_state = adict()
epoch_state.inner_step = inner_step
epoch_state.data_loader = train_dataloader
# reset inner_step after first epoch from resume
inner_step = 0
if self.train_type == 'shuffle':
self.train(train_dataloader, epoch_state, self.train_steps)
elif self.train_type == 'sequential':
self.sequential_train(train_dataloader, epoch_state, self.train_steps)
elif self.train_type == 'debias':
self.debias_train(train_dataloader, epoch_state, self.train_steps)
else:
raise NotImplementedError
self.epoch += 1
if self.epoch % self.val_interval == 0 and val_dataloader_list and self.val_interval_steps < 0:
for val_dataloader, val_steps in zip(val_dataloader_list, val_steps_list):
try:
val_data_iter = val_dataloader.get_iterator(0, 0)
except:
val_data_iter = val_dataloader
self.val(val_data_iter, val_steps)
time.sleep(1) # wait for some hooks like loggers to finish
self.call_hook('after_run')
def batch_processor(self, data_batch, embeddings=False):
if self.cfg.runner.stable_random != "none":
stable_random_seed = self.step
setup_seed(stable_random_seed)
mixup_kwargs = {}
if self.model.module.use_mixup and not embeddings:
mixup_kwargs = self.model.module.get_mixup_kwargs(mixup_kwargs)
with torch.no_grad():
if embeddings:
_image_embeddings, _text_embeddings, temp = self.model(data_batch, embeddings='all', **mixup_kwargs)
output = {'image_embeddings': _image_embeddings,
'text_embeddings': _text_embeddings,
'image_id': data_batch['image_id'],
'caption_id': data_batch['caption_id']
}
return output
image_embeddings_local, text_embeddings_local = [], []
for _idx_l in range(0, self.batch_size, self.batch_size_train):
_data_batch = {"image": data_batch["image"][_idx_l: _idx_l + self.batch_size_train],
"input_ids": data_batch["input_ids"][_idx_l: _idx_l + self.batch_size_train],
"attention_mask": data_batch["attention_mask"][_idx_l: _idx_l + self.batch_size_train]
}
if self.scaler:
with torch.cuda.amp.autocast():
# (i', d), (t', d)
_image_embeddings, _text_embeddings, temp = self.model(_data_batch, embeddings='all', **mixup_kwargs)
else:
# (i', d), (t', d)
_image_embeddings, _text_embeddings, temp = self.model(_data_batch, embeddings='all', **mixup_kwargs)
image_embeddings_local.append(_image_embeddings)
text_embeddings_local.append(_text_embeddings)
# (i, d), (t, d)
image_embeddings_local = torch.cat(image_embeddings_local, dim = 0)
text_embeddings_local = torch.cat(text_embeddings_local, dim = 0)
temp_sqrt = torch.sqrt(temp)
# (i, d)
image_embeddings_global = torch.cat(all_gather_group(image_embeddings_local, self.group), 0)
# (t, d)
text_embeddings_global = torch.cat(all_gather_group(text_embeddings_local, self.group), 0)
s_i2t_nm = image_embeddings_global @ text_embeddings_local.T
s_i2t_mn = image_embeddings_local @ text_embeddings_global.T
# (i, t'), (i', t)
s_i2t_nm /= temp
s_i2t_mn /= temp
# (i), (t)
targets_i2t = torch.arange(self.batch_size * ENV.rank, self.batch_size * (ENV.rank + 1), device = ENV.device)
targets_t2i = torch.arange(self.batch_size * ENV.rank, self.batch_size * (ENV.rank + 1), device = ENV.device)
loss = 0.5 * (F.cross_entropy(s_i2t_mn, targets_i2t) + F.cross_entropy(s_i2t_nm.T, targets_t2i)).cpu().item()
y_i2t = torch.eye(self.cfg.data.batch_size, device=image_embeddings_local.device)
if self.model.module.use_mixup and not embeddings:
y_i2t_flip = torch.block_diag(*[torch.eye(self.batch_size_train).flip(0)] * (self.cfg.data.batch_size // self.batch_size_train)).to(device=image_embeddings_local.device)
alpha = mixup_kwargs['image_alpha'] if 'image_alpha' in mixup_kwargs else mixup_kwargs['text_alpha']
y_i2t = alpha * y_i2t + (1-alpha) * y_i2t_flip
y_i2t = y_i2t[self.batch_size * ENV.rank: self.batch_size * (ENV.rank + 1), :]
# (i'), (t')
s_i2t_esum_local = torch.sum(torch.exp(s_i2t_mn), dim = 1)
s_t2i_esum_local = torch.sum(torch.exp(s_i2t_nm.T), dim = 1)
# (i), (t)
s_i2t_esum = torch.cat(all_gather_group(s_i2t_esum_local, self.group), 0).unsqueeze(dim = 1)
s_t2i_esum = torch.cat(all_gather_group(s_t2i_esum_local, self.group), 0).unsqueeze(dim = 1)
p_i2t_mn = torch.exp(s_i2t_mn) / s_i2t_esum[self.batch_size * ENV.rank: self.batch_size * (ENV.rank + 1), :]
p_t2i_nm = torch.exp(s_i2t_mn.T) / s_t2i_esum
left_I = (p_i2t_mn + p_t2i_nm.T - 2 * y_i2t) @ text_embeddings_global
p_i2t_nm = torch.exp(s_i2t_nm) / s_i2t_esum
p_t2i_mn = torch.exp(s_i2t_nm.T) / s_t2i_esum[self.batch_size * ENV.rank: self.batch_size * (ENV.rank + 1), :]
left_T = (p_i2t_nm.T + p_t2i_mn - 2 * y_i2t) @ image_embeddings_global
# (i, d) = (1) * ((i, t) @ (t, d))
left_I /= temp_sqrt
left_T /= temp_sqrt
i2t_acc = calc_topk_accuracy(p_i2t_mn, targets_i2t)[0] # (1)
t2i_acc = calc_topk_accuracy(p_t2i_mn, targets_t2i)[0] # (1)
if self.cfg.runner.stable_random != "none":
setup_seed(stable_random_seed)
for _idx_l in range(0, self.batch_size, self.batch_size_train):
_data_batch = {"image": data_batch["image"][_idx_l: _idx_l + self.batch_size_train],
"input_ids": data_batch["input_ids"][_idx_l: _idx_l + self.batch_size_train],
"attention_mask": data_batch["attention_mask"][_idx_l: _idx_l + self.batch_size_train]
}
# (i', d), (t', d)
_left_I = left_I[_idx_l: _idx_l + self.batch_size_train]
_left_T = left_T[_idx_l: _idx_l + self.batch_size_train]
if self.scaler:
with torch.cuda.amp.autocast():
# (i', d), (t', d)
_image_embeddings, _text_embeddings, temp = self.model(_data_batch, embeddings='all', **mixup_kwargs)
else:
# (i', d), (t', d)
_image_embeddings, _text_embeddings, temp = self.model(_data_batch, embeddings='all', **mixup_kwargs)
temp_sqrt = torch.sqrt(temp)
# (i')
loss_temp_i = _left_I * _image_embeddings
loss_temp_t = _left_T * _text_embeddings
loss_temp = (loss_temp_i + loss_temp_t).sum() / 2 / self.batch_size
loss_temp = loss_temp / temp_sqrt
if self.dist_name == 'apex':
with amp.scale_loss(loss_temp, self.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.dist_name == 'torch' and self.fp16:
self.scaler.scale(loss_temp).backward()
else:
loss_temp.backward()
output = {'loss': loss,
'temperature': temp,
'i2t_acc': i2t_acc,
't2i_acc': t2i_acc,
'lr': self.optimizer.param_groups[0]['lr']
}
self.state.log_metrics.add_store('i2t_acc', i2t_acc)
self.state.log_metrics.add_store('t2i_acc', t2i_acc)
self.state.log_metrics.add_store('loss', loss)
return output
| zerovl/ZeroVL | zerovl/tasks/clip/clip_bsgs_runner.py | clip_bsgs_runner.py | py | 18,843 | python | en | code | 39 | github-code | 36 | [
{
"api_name": "torch.set_printoptions",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "torch.manual_seed",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "torch.cuda.manual_seed_all",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "to... |
26628181853 | import pickle
from flask import Flask,request,app,jsonify,url_for,render_template
import nltk, re, string
from nltk.corpus import stopwords, twitter_samples
from sklearn.linear_model import LogisticRegression
import pickle
from sklearn.feature_extraction.text import CountVectorizer
from Utilities import process_tweet
from nltk.corpus import stopwords
app=Flask(__name__)
app.config['SECRET_KEY'] = 'mysecretkey'
logistic=pickle.load(open('model.pkl','rb'))
cv=pickle.load(open('cv.pkl','rb'))
def predict_sentiment(tweet):
tweet = process_tweet(tweet)
tweet = cv.transform([tweet])
if logistic.predict(tweet) == 1:
sentiment = 'Positive Sentiment'
elif logistic.predict(tweet) == 0:
sentiment = 'Negetive Sentiment'
else:
sentiment = 'Neutral Sentiment'
return sentiment
@app.route('/',methods=['GET','POST'])
def index():
return render_template('index.html')
@app.route('/prediction',methods=['GET','POST'])
def prediction():
sentiment=predict_sentiment(request.form['tweet'])
print(sentiment)
return render_template('prediction.html', prediction_text="Your tweet is of {}".format(sentiment))
if __name__=="__main__":
app.run(debug=True)
| Sourav9827/Sentiment-Analysis | app.py | app.py | py | 1,224 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "flask.app",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "flask.Flask",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "flask.app.config",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "flask.app",
"line_num... |
35383679834 | #!/usr/bin/env python3
from sys import exit
from collections import Counter
import random
from statistics import mean
from TALinputs import TALinput
from multilanguage import Env, Lang, TALcolors
import mastermind_utilities as Utilities
# METADATA OF THIS TAL_SERVICE:
args_list = [
('max_num_attempts',int),
('num_matches',int),
('num_pegs',int),
('num_colors',int),
]
ENV =Env(args_list)
TAc =TALcolors(ENV)
LANG=Lang(ENV, TAc, lambda fstring: eval(f"f'{fstring}'"))
# START CODING YOUR SERVICE:
if ENV["seed"] == 'random_seed':
seed = random.randint(100000,999999)
else:
seed = int(ENV["seed"])
print(LANG.render_feedback("assigned-instance", f"# The assigned instance is:\n# number of pegs: {ENV['num_pegs']}\n# number of colors: {ENV['num_colors']}\n# Seed: "), end="")
TAc.print(seed, "yellow")
print(LANG.render_feedback("prompt", f"# Enter your first attempt which must be a sequence of {ENV['num_pegs']} colors separated by spaces.\n# example: \n# 1 4 3 \n# The server will respond with as many 'b' as the colors in the correct position and as many 'w' as the correct colors. \n"))
maxNumAttempts = ENV["max_num_attempts"]
numPegs = ENV["num_pegs"]
numColors = ENV["num_colors"]
sumAttempts = []
matchWin = 0
matchDone = 0
while matchDone < ENV["num_matches"]:
matchDone += 1
seed = random.randint(100000, 999999)
print(LANG.render_feedback("new-match", f"# match {matchDone} of {ENV['num_matches']}. Seed: "), end="")
TAc.print(seed, "yellow")
secretCode = Utilities.generateRandomPegsList(numPegs, numColors, seed)
count = 0
bufferOld = None
buffer = None
while count < maxNumAttempts:
count += 1
bufferOld = buffer
buffer = TALinput(
str,
num_tokens=numPegs,
regex=r"^([1-" + str(numColors) + "])$",
regex_explained="a sequence of number from 1 to " + str(numColors) + " separated by spaces. An example is: '4 2 1'.",
TAc=TAc
)
guessedCode = [int(i) for i in buffer]
rightColor, rightPositonAndColor = Utilities.calculateScore(secretCode, guessedCode)
result = Utilities.getStringOfResult(rightColor, rightPositonAndColor)
print(result)
if rightPositonAndColor == numPegs and rightColor == 0:
TAc.print(LANG.render_feedback("right-secret-code", f"# You found the secret code in {count} attempts.\n"), "green", ["bold"])
sumAttempts.append(count)
matchWin += 1
break
if count >= maxNumAttempts:
guessedCode = [int(i) for i in buffer]
rightColor, rightPositonAndColor = Utilities.calculateScore(secretCode, guessedCode)
if rightPositonAndColor == numPegs:
TAc.print(LANG.render_feedback("right-secret-code", f"# You found the secret code in {count} attempts.\n"), "green", ["bold"])
sumAttempts.append(count)
matchWin += 1
else:
TAc.print(LANG.render_feedback("wrong-secret-code", f"# You didn't find the secret code, the secret code is [{' '.join(map(str, secretCode))}].\n"), "red", ["bold"])
print('#end')
print(LANG.render_feedback("matches-statistics", f"# Statistics:\n# Matches won: {matchWin}/{ENV['num_matches']}\n# avg number of attempts (over won matches): {mean(sumAttempts)}\n# maximum number of attempts (over won matches): {max(sumAttempts)}"))
| romeorizzi/TALight | example_problems/tutorial/mastermind/services/eval_driver.py | eval_driver.py | py | 3,432 | python | en | code | 11 | github-code | 36 | [
{
"api_name": "multilanguage.Env",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "multilanguage.TALcolors",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "multilanguage.Lang",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "random.r... |
18433559527 | import pygame
import random
import time
import turtle
# Class thể hiện đối tượng Câu hỏi
# Một đối tượng Question gồm có 2 fields:
# - question: đề bài
# - answer: đáp án
class Question:
def __init__(self, question, answer):
self.question = question
self.answer = answer
# Class thể hiện trạng thái hiện tại của trò chơi
class GameState:
# Điểm số hiện tại
score = 0
roundnum = 1
# Khởi động lại đồng hồ bấm giờ: cho giá trị bằng thời gian hiện tại
def reset_timer(self):
self.start_time = time.time()
# Trả về thời gian trả lời câu hỏi (tính bằng giây), bằng cách lấy
# thời gian đồng hồ trừ đi thời gian start_time đã lưu.
def get_timer(self):
return time.time() - self.start_time
# Khởi tạo đối tượng cụ thể lưu trạng thái của trò chơi
state = GameState()
# Dùng thư viện pygame để chơi âm thanh.
def play_music(file):
pygame.mixer.init()
pygame.mixer.music.load(file)
pygame.mixer.music.play()
def play_sound(file):
pygame.mixer.init()
sound = pygame.mixer.Sound(file)
sound.play()
# Vẽ hình nhân vật.
avatar = turtle.Turtle()
def draw_avatar(image):
# Phải gọi lệnh turtle.addshape trước khi vẽ ảnh.
turtle.addshape(image)
avatar.clear()
avatar.penup()
avatar.setposition(350, -100)
# Lưu ý: turtle chỉ vẽ được ảnh có định dạng .gif
avatar.shape(image)
# Khởi tạo cây bút chuyên dùng để vẽ thời gian.
pen_timer = turtle.Turtle()
def draw_timer():
# Ẩn con rùa.
pen_timer.hideturtle()
# Nhấc bút lên.
pen_timer.penup()
# Xoá, để khi vẽ điểm không bị đè lên nhau.
pen_timer.clear()
# Đổi màu.
pen_timer.color('green')
# Đặt vị trí.
pen_timer.setposition(-240, 170)
# Viết điểm số ra màn hình.
pen_timer.write(round(state.get_timer()), font=get_font(20))
# Vẽ lại điểm số sau 1000ms (1 giây) nữa
turtle.Screen().ontimer(draw_timer, 1000)
# Khai báo dữ liệu câu hỏi và đáp án
def read_data(round_num):
# Đọc câu hỏi và đáp án từ Files.
# Số lượng câu hỏi
num_questions = 3
# Ban đầu, mảng dữ liệu là trống
data = []
# Các file câu hỏi đánh số là q1.txt, q2.txt, q3.txt,...
# Các file câu trả lời đánh số là a1.txt, a2.txt, a3.txt,...
# Ta dùng hàm range(1, x + 1) để duyệt qua các số 1, 2, ..., x
for i in range(1, num_questions + 1):
# Đọc câu hỏi, dùng encoding='utf-8' để đọc tiếng Việt
filename ='r' + str(round_num) + 'q' + str(i) + '.txt'
f = open(filename, 'r', encoding='utf-8')
question = f.read()
f.close()
# Đọc đáp án
filename ='r' +str(round_num) + 'a' + str(i) + '.txt'
f = open(filename, 'r', encoding='utf-8')
answer = f.read()
f.close()
# Tạo đối tượng Question và thêm vào mảng dữ liệu data
data.append(Question(question, answer))
# Trả về mảng dữ liệu data
return data
# Sinh ra các câu hỏi tính nhẩm ngẫu nhiên Siêu Trí Tuệ
def generate_math_questions(round_num):
# Ban đầu, danh sách câu hỏi trống.
data = []
# Số lượng câu hỏi sinh ra.
num_questions = 3
# Hai phép toán: cộng và nhân
operators = ["+", "x"]
# Số lượng chữ số tối đa khi sinh câu hỏi ngẫu nhiên
if round_num == 1:
max_digits = 9
min_digits = 1
elif round_num == 2:
max_digits = 99
min_digits = 10
else:
max_digits = 999
min_digits = 100
for i in range(num_questions):
# Chọn số ngẫu nhiên từ 0 đến 10^max_digits - 1
a = random.randint(min_digits, max_digits)
b = random.randint(min_digits, max_digits)
# Chọn một phép toán ngẫu nhiên
op = random.choice(operators)
# Sinh ra đề bài
question = str(a) + " " + op + " " + str(b) + " = ?"
# Sinh ra đáp án
if op == "+":
answer = a + b
elif op == "x":
answer = a * b
# Thêm câu hỏi vào danh sách
data.append(Question(question, str(answer)))
# Trả về danh sách câu hỏi tính nhẩm Siêu Trí Tuệ.
return data
# Trả về font chữ với kích thước được cho.
def get_font(font_size):
return ("Arial", font_size, "normal")
# Khởi tạo cây bút chuyên dùng để vẽ Điểm số.
pen_score = turtle.Turtle()
def draw_score():
# Ẩn con rùa.
pen_score.hideturtle()
# Nhấc bút lên.
pen_score.penup()
pen_score.clear()
pen_score.color('red')
pen_score.setposition(300, 175)
temp ="ROUND: "+ str(state.roundnum)
pen_score.write(temp, font=get_font(30))
pen_score.color('white')
pen_score.setposition(340, 110)
pen_score.write(state.score, font=get_font(40))
pen_round = turtle.Turtle()
def draw_round_number(round_num):
pen_round.hideturtle()
pen_round.penup()
pen_round.clear()
pen_round.color('red')
pen_round.setposition(300, 175)
temp ="ROUND: "+ str(state.roundnum)
pen_round.write(temp, font=get_font(30))
def ask_question(question):
print("***************************")
print(question.question)
turtle.clear()
turtle.hideturtle()
turtle.penup()
turtle.setposition(-240, 20)
turtle.write(question.question, font=get_font(15))
draw_score()
draw_avatar('KimNguu-normal.gif')
state.reset_timer()
result = turtle.textinput("Siêu Lập Trình", "Câu trả lời của bạn là gì?\n")
check_result(result, question.answer)
def check_result(result, answer):
time_taken = state.get_timer()
if time_taken < 5:
bonus = 5
else:
bonus = 0
state.roundnum =round_number
if result == answer:
state.score += 10 + bonus
play_sound("correct_answer.wav")
draw_avatar('KimNguu-correct.gif')
print("Đúng rồi")
else:
play_sound("wrong_answer.wav")
draw_avatar('KimNguu-wrong.gif')
print("Sai rồi")
time.sleep(0.5)
print("Thời gian trả lời câu hỏi là:", round(time_taken), "giây")
if bonus > 0:
print("Bạn nhận được điểm thưởng là", bonus, "vì trả lời nhanh")
print("Điểm hiện tại của bạn là: ", state.score)
def setup_turtle():
screen = turtle.Screen()
screen.setup(1200, 600)
screen.bgpic('background.gif')
turtle.title("Siêu lập trình")
# Gọi hàm thiết lập màn hình
setup_turtle()
# Chơi nhạc
play_music("music.wav")
# Vẽ thời gian
state.reset_timer()
draw_timer()
round_number = 1
while round_number < 4:
#draw_round_number(round_number)
data = read_data(round_number) + generate_math_questions(round_number)
for question in data:
ask_question(question)
round_number += 1
| aitomatic/contrib | src/aito/util/finalproject.py | finalproject.py | py | 7,485 | python | vi | code | 2 | github-code | 36 | [
{
"api_name": "time.time",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "pygame.mixer.init",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number... |
11580214091 | import os
import time
import random
import sys
import time
import math
from datetime import datetime
from getpass import getpass
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
Hour = 3600
Minute = 60
TotalNum=1
NumForOnePage = 16
def IntTryParse(val):
try:
return int(val)
except ValueError:
return val
def LeaveFootPrint(repeatStr):
global TotalNum
num=1
currentNum = 1
maxNum = random.randint(500,700)
print("Leave %s footprints"%str(maxNum))
while currentNum < maxNum:
src= "https://pairs.lv/#/search/one/%s"%str(num)
driver.get(src)
print("Current: {0}/{1}, Total: {2}, {3}".format(str(currentNum), maxNum, str(TotalNum), repeatStr))
if num == NumForOnePage:
num = 0
num += 1
currentNum += 1
TotalNum += 1
time.sleep(random.randint(3,7))
def ShowElapsedTime(startTime):
elapsed_time = time.time() - startTime
hour = math.floor(elapsed_time / Hour)
elapsedHour = hour * Hour
minite = math.floor((elapsed_time - elapsedHour) / Minute)
sec = str(elapsed_time - elapsedHour - minite * Minute)[:2]
print("所要時間は「%s時間%s分%s秒」"%(str(hour), str(minite), str(sec)))
def TakeRest():
minutesToRest = random.randint(15,30)
print("Take a rest for {0} minutes".format(str(minutesToRest)))
nowTime = datetime.now()
print("will end %s:%s"%(str(nowTime.hour), str(nowTime.minute + minutesToRest)))
driver.get("https://pairs.lv/#/search/grid/1")
time.sleep(minutesToRest * 60)
def GetRepeatString(counter, maxRepeatNum):
repeatStr = "Repeat: "
if maxRepeatNum == 0:
repeatStr += "なし"
elif maxRepeatNum > 0:
repeatStr += "{0}/{1}".format(str(counter), str(maxRepeatNum))
else:
repeatStr += "無限"
return repeatStr
driver = webdriver.Chrome(r'./chromedriver.exe')
driver.get("https://pairs.lv/#/login")
element = WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.ID, "registerBtn1"))
)
if element is not None:
print(element)
driver.execute_script("arguments[0].click();", element)
key = input('pairsのトップページが出たらrepeat回数を指定してください(マイナスの値は無限ループ)')
while not isinstance(IntTryParse(key), int):
print("数字を入力してください")
key = input()
print("Start!")
maxRepeatNum = int(key)
counter = 1
while True:
startTime = time.time()
print("%s回目"%str(counter))
LeaveFootPrint(GetRepeatString(counter, maxRepeatNum))
ShowElapsedTime(startTime)
if (maxRepeatNum > -1 and counter > maxRepeatNum):
print("End")
break
TakeRest()
counter += 1 | tomo-jp/pairsBot | pairs_automation.py | pairs_automation.py | py | 2,928 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "random.randint",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number... |
13988027867 | import numpy as np
from scipy.interpolate import interp1d
from skfmm import travel_time, distance
from scipy.signal import resample
def resample2d( x, shape=[] ):
if len(shape)==0:
raise ValueError('shape should not be empty.')
x1=resample(x,shape[0],axis=0)
x2=resample(x1,shape[1],axis=1)
return x2
def transform_normal_scores(scores, nscore):
"""
map standard quantiles to empirical probability distribution from
dynamic rupture simulation. values outside the empirical distribution
are mapped to the ends.
"""
x = nscore['nscore']
y = nscore['x']
fill_value = (y.min(), y.max())
f = interp1d(x,y,bounds_error=False,fill_value=fill_value)
return f(scores)
def linear_taper(n, inds=(0,-1), vals=(0.0,1.0) ):
"""
Returns normalized coefficient for linear taper between (start, end) and
values (start_value, end_value)
Args:
n (int) : length of taper
inds (tuple) : indexes of taper, default n
vals (tuple) : coresponding to inds, default {0, 1.0}
Returns:
coef (ndarray) : coefficient {0 .. 1.0} of linear taper over indexes = inds with
values = vals
"""
import numpy as np
# vars
ix = np.arange(n)
coef = np.ones(n)
# linear model
delta_y = vals[1] - vals[0]
if inds == (0,-1):
delta_x = n
else:
delta_x = inds[1] - inds[0]
slope = delta_y / delta_x
intercept = vals[0] - slope * inds[0]
coef[inds[0]:inds[1]] = slope * ix[inds[0]:inds[-1]] + intercept
# returns
return coef
def boundary_taper( field, taper_width=10, free_surface=True, values=0 ):
"""
returns a field tapered along to boundary to zero.
can add taper to some percentage later.
field (2d ndarray) : rupture field to taper.
taper_width (int) : boundary to taper
free_surface (bool) : (true) taper the free surface
(false) do NOT taper free surface
values sequence or int (optional) : ending values for taper. default is zero. value should be specfied
in terms of percentages.
return
tapered_field (ndarray) : tapered field with shape = field.shape
"""
ny, nx = field.shape
if free_surface:
baseline = np.ones( (ny-2*taper_width, nx-2*taper_width) )
padded = np.pad( baseline, ((taper_width,taper_width), (taper_width,taper_width)), 'linear_ramp', end_values=values )
else:
baseline = np.ones( (ny-taper_width, nx-2*taper_width) )
padded = np.pad( baseline, ((0,taper_width), (taper_width,taper_width)), 'linear_ramp', end_values=values )
assert field.shape == padded.shape
return field*padded
"""
Helping functions.
"""
def get_dip(nhat1, nhat2, nhat3):
nz,nx = nhat1.shape
dip = np.ones([nz,nx])
for i in range(nz):
for j in range(nx):
nproj = (nhat1[i,j], 0, nhat3[i,j])
n = (nhat1[i,j], nhat2[i,j], nhat3[i,j])
norm = lambda v: np.sqrt(v[0]**2+v[1]**2+v[2]**2)
scaling = 1.0 / ( norm(nproj) * norm(n) )
arg = scaling*(n[0]**2+n[2]**2)
if np.isclose(1.0, arg):
arg = 1.0
arg=np.arccos(arg)
theta = np.rad2deg(arg)
dip[i,j] = 90 - theta
return dip
def get_moment(slip, vs, rho, params):
mu = vs * vs * rho
area = params['dx'] * params['dx']
moment = mu * area * slip
return moment
def get_strike(nhat1, nhat3, mean_strike=270):
nz,nx = nhat1.shape
strike = np.ones([nz,nx])
for i in range(nz):
for j in range(nx):
nproj = (nhat1[i,j], 0, nhat3[i,j])
x3 = (1,0,0)
norm = lambda v: np.sqrt(v[0]**2+v[1]**2+v[2]**2)
scaling = 1.0 / ( norm(x3) * norm( nproj) )
theta = np.rad2deg(scaling * np.arccos(nproj[2]))
if nhat1[i,j] > 0 and nhat3[i,j] > 0:
strike[i,j] = 270 + theta
elif nhat1[i,j] < 0 and nhat3[i,j] > 0:
strike[i,j] = 270 - theta
elif nhat1[i,j] < 0 and nhat3[i,j] < 0:
# in 3rd quad
strike[i,j] = 270 - theta
elif nhat1[i,j] > 0 and nhat3[i,j] < 0:
# in 4th quad
strike[i,j] = theta - 90
# rotate to different strike
stk = strike - 270 + mean_strike
return stk
def source_time_function():
pass
def compute_trup(vrup, params):
phi = np.ones( (params['nz'],params['nx']) ) #* params['dx']
ihypo = params['ihypo']
phi[ ihypo[0], ihypo[1] ] = -1
trup = travel_time( phi, speed=vrup, dx=params['dx'] )
return np.array(trup)
def expand_bbp_velocity_model(velocity_model_bbp_format, nx, nz, dx):
"""
"""
# create array of discrete depths
z = np.linspace(0, (nz-1)*dx, nz)
# bbp provides layer thickness, so must convert to depth
dep_inc = velocity_model_bbp_format[:,0]
dep = np.cumsum(dep_inc)
# look-up discrete depths in model
layer_idxs = np.searchsorted(dep, z, side='right')
# debugging stuff
vs = np.zeros((nz, nx))
vp = np.zeros((nz, nx))
rho = np.zeros((nz, nx))
for i, idx in enumerate(layer_idxs):
# bbp format has cols: [layer_thickness, vp, vs, rho, qp, qs]
vp[i,:] = velocity_model_bbp_format[idx, 1]
vs[i,:] = velocity_model_bbp_format[idx, 2]
rho[i,:] = velocity_model_bbp_format[idx, 3]
return vp, vs, rho
if __name__ == "__main__":
from utils import plot_2d_image
mod = np.loadtxt('./central_japan_bbp1d.txt')
nx = 273
nz = 136
dx = 0.1
_, vs, _ = expand_bbp_velocity_model(mod, nx, nz, dx)
ax = plot_2d_image(vs, nx=nx, nz=nz, dx=dx,
clabel = r'$c_s$ (km/s) ', xlabel="Distance (km)", ylabel="Distance (km)",
surface_plot=False, contour_plot=False)
| wsavran/sokrg | krg_utils.py | krg_utils.py | py | 5,948 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "scipy.signal.resample",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "scipy.signal.resample",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "scipy.interpolate.interp1d",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": ... |
5547555689 | """
Tests for voting 13/01/2022.
"""
from sys import version
from collections import namedtuple
from brownie import interface, reverts
from scripts.vote_2022_01_13 import start_vote
from tx_tracing_helpers import *
from utils.config import (
lido_dao_lido_repo,
lido_dao_node_operators_registry_repo,
)
lido_old_app = {
'address': '0xC7B5aF82B05Eb3b64F12241B04B2cF14469E39F7',
'ipfsCid': 'QmbmPW5r9HMdyUARNJjjE7MNqBUGrXashwoWvqRZhc1t5b',
'content_uri': '0x697066733a516d626d5057357239484d64795541524e4a6a6a45374d4e714255477258617368776f577671525a686331743562',
'version': (2, 0, 0),
}
lido_new_app = {
'address': '0xC7B5aF82B05Eb3b64F12241B04B2cF14469E39F7',
'ipfsCid': 'QmQkJMtvu4tyJvWrPXJfjLfyTWn959iayyNjp7YqNzX7pS',
'content_uri': '0x697066733a516d516b4a4d7476753474794a76577250584a666a4c667954576e393539696179794e6a703759714e7a58377053',
'version': (2, 0, 1),
}
nos_old_app = {
'address': '0xec3567ae258639a0FF5A02F7eAF4E4aE4416C5fe',
'ipfsCid': 'QmQExJkoyg7xWXJjLaYC75UAmsGY1STY41YTG3wEK7q8dd',
'content_uri': '0x697066733a516d5145784a6b6f7967377857584a6a4c615943373555416d7347593153545934315954473377454b3771386464',
'version': (2, 0, 0),
}
nos_new_app = {
'address': '0xec3567ae258639a0FF5A02F7eAF4E4aE4416C5fe',
'ipfsCid': 'Qma7PXHmEj4js2gjM9vtHPtqvuK82iS5EYPiJmzKLzU58G',
'content_uri': '0x697066733a516d61375058486d456a346a7332676a4d3976744850747176754b3832695335455950694a6d7a4b4c7a55353847',
'version': (2, 0, 1),
}
NodeOperatorAdd = namedtuple(
'NodeOperatorAdd', ['name', 'id', 'address']
)
NEW_NODE_OPERATORS = [
# name, id, address
NodeOperatorAdd(
'Stakin', 14, '0xf6b0a1B771633DB40A3e21Cc49fD2FE35669eF46'
),
NodeOperatorAdd(
'ChainLayer', 15, '0xd5aC23b1adE91A054C4974264C9dbdDD0E52BB05'
),
NodeOperatorAdd(
'Simply Staking', 16, '0xFEf3C7aa6956D03dbad8959c59155c4A465DCacd'
),
NodeOperatorAdd(
'BridgeTower', 17, '0x40C20da8d0214A7eF33a84e287992858dB744e6d'
),
NodeOperatorAdd(
'Stakely', 18, '0x77d2CF58aa4da90b3AFCd283646568e4383193BF'
),
NodeOperatorAdd(
'InfStones', 19, '0x60bC65e1ccA448F98578F8d9f9AB64c3BA70a4c3'
),
NodeOperatorAdd(
'HashQuark', 20, '0x065dAAb531e7Cd50f900D644E8caE8A208eEa4E9'
),
NodeOperatorAdd(
'ConsenSys Codefi', 21, '0x5Bc5ec5130f66f13d5C21ac6811A7e624ED3C7c6'
),
]
def test_2022_01_13(
helpers, accounts, ldo_holder,
dao_voting, node_operators_registry,
vote_id_from_env
):
### LIDO APP
lido_repo = interface.Repo(lido_dao_lido_repo)
lido_old_app_from_chain = lido_repo.getLatest()
# check old versions of lido app is correct
assert lido_old_app['address'] == lido_old_app_from_chain[1]
assert lido_old_app['version'] == lido_old_app_from_chain[0]
assert lido_old_app['content_uri'] == lido_old_app_from_chain[2]
# check old ipfs link
bytes_object = lido_old_app_from_chain[2][:]
lido_old_ipfs = bytes_object.decode("ASCII")
lido_old_app_ipfs = f"ipfs:{lido_old_app['ipfsCid']}"
assert lido_old_app_ipfs == lido_old_ipfs
### NOS APP
nos_repo = interface.Repo(lido_dao_node_operators_registry_repo)
nos_old_app_from_chain = nos_repo.getLatest()
# check old versions of lido app is correct
assert nos_old_app['address'] == nos_old_app_from_chain[1]
assert nos_old_app['version'] == nos_old_app_from_chain[0]
assert nos_old_app['content_uri'] == nos_old_app_from_chain[2]
# check old ipfs link
bytes_object = nos_old_app_from_chain[2][:]
nos_old_ipfs = bytes_object.decode("ASCII")
nos_old_app_ipfs = f"ipfs:{nos_old_app['ipfsCid']}"
assert nos_old_app_ipfs == nos_old_ipfs
# Check that all NOs are unknown yet
for node_operator in NEW_NODE_OPERATORS:
with reverts('NODE_OPERATOR_NOT_FOUND'):
no = node_operators_registry.getNodeOperator(
node_operator.id, True
)
##
## START VOTE
##
vote_id = vote_id_from_env or start_vote({'from': ldo_holder}, silent=True)[0]
tx: TransactionReceipt = helpers.execute_vote(
vote_id=vote_id, accounts=accounts, dao_voting=dao_voting
)
### LIDO APP
# check only version and ipfs was changed
lido_new_app_from_chain = lido_repo.getLatest()
assert lido_new_app['address'] == lido_new_app_from_chain[1]
assert lido_new_app['version'] == lido_new_app_from_chain[0]
assert lido_new_app['content_uri'] == lido_new_app_from_chain[2]
# check new ipfs link
bytes_object = lido_new_app_from_chain[2][:]
lido_old_ipfs = bytes_object.decode("ASCII")
lido_new_app_ipfs = f"ipfs:{lido_new_app['ipfsCid']}"
assert lido_new_app_ipfs == lido_old_ipfs
### NOS APP
# check only version and ipfs was changed
nos_new_app_from_chain = nos_repo.getLatest()
assert nos_new_app['address'] == nos_new_app_from_chain[1]
assert nos_new_app['version'] == nos_new_app_from_chain[0]
assert nos_new_app['content_uri'] == nos_new_app_from_chain[2]
# check new ipfs link
bytes_object = nos_new_app_from_chain[2][:]
lido_old_ipfs = bytes_object.decode("ASCII")
nos_new_app_ipfs = f"ipfs:{nos_new_app['ipfsCid']}"
assert nos_new_app_ipfs == lido_old_ipfs
# Check that all NO was added
for node_operator in NEW_NODE_OPERATORS:
no = node_operators_registry.getNodeOperator(
node_operator.id, True
)
message = f'Failed on {node_operator.name}'
assert no[0] is True, message # is active
assert no[1] == node_operator.name, message # name
assert no[2] == node_operator.address, message # rewards address
assert no[3] == 0 # staking limit
### validate vote events (does not work for some reason)
# assert count_vote_items_by_events(tx) == 10, "Incorrect voting items count"
# display_voting_events(tx)
| lidofinance/scripts | archive/tests/xtest_2022_01_13.py | xtest_2022_01_13.py | py | 5,965 | python | en | code | 14 | github-code | 36 | [
{
"api_name": "collections.namedtuple",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "brownie.interface.Repo",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "utils.config.lido_dao_lido_repo",
"line_number": 82,
"usage_type": "argument"
},
{
... |
19534802491 | from functools import wraps
from flask import request, abort, g
from app.models import User
def login_required(f):
""" This decorator ensures that the current user is logged in before calling the actual view.
"""
@wraps(f)
def decorated(*args, **kwargs):
if request.method != 'OPTIONS':
token = request.headers.get('Authorization')
if not token:
abort(401)
user = User.verify_auth_token(token)
if not user:
abort(401)
g.user = user
return f(*args, **kwargs)
return decorated
def roles_required(*role_names):
""" This decorator ensures that the current user has all of the specified roles.
"""
def wrapper(func):
@wraps(func)
def decorated_view(*args, **kwargs):
if request.method != 'OPTIONS':
token = request.headers.get('Authorization')
if not token:
abort(401)
user = User.verify_auth_token(token)
if not user:
abort(401)
g.user = user
# User must have the required roles
if not user.has_roles(*role_names):
# Redirect to the unauthorized page
abort(403)
# Call the actual view
return func(*args, **kwargs)
return decorated_view
return wrapper
| Zokormazo/ngLlery-backend | app/decorators.py | decorators.py | py | 1,456 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.request.method",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "flask.request.headers.get",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "fla... |
5726494392 | from flask import Flask, jsonify, request
import json
import os
app = Flask(__name__)
# Load data from JSON file if it exists
def load_data():
if os.path.exists('investment_funds.json'):
with open('investment_funds.json') as file:
return json.load(file)
else:
with open('investment_funds.json', 'w') as file:
json.dump([], file)
return []
# create data to JSON file
def create_data(new_fund, investment_funds):
max_id = 0
with open('investment_funds.json') as file:
data_file = json.load(file)
print('file', data_file)
for fund in data_file:
print('fund', fund)
fund_id = fund.get('id')
if fund_id == max_id:
max_id += 1
new_fund['id'] = max_id
print(new_fund)
investment_funds.append(new_fund)
save_data(investment_funds)
# save data to JSON file
def save_data(investment_funds):
with open('investment_funds.json', 'w') as file:
json.dump(investment_funds, file, indent=2)
# Load initial data
investment_funds = load_data()
# Endpoint to retrieve a list of all funds
@app.route('/funds', methods=['GET'])
def get_all_funds():
return jsonify(investment_funds)
# Endpoint to create a new fund
@app.route('/funds', methods=['POST'])
def create_fund():
data = request.get_json()
new_fund = {
'fund_id': data.get('fund_id'),
'fund_name': data.get('fund_name'),
'fund_manager': data.get('fund_manager'),
'description': data.get('description'),
'nav': data.get('nav'),
'creation_date': data.get('creation_date'),
'performance': data.get('performance')
}
create_data(new_fund, investment_funds)
return jsonify(new_fund), 201
# Endpoint to retrieve details of a specific fund using its ID
@app.route('/funds/<int:fund_id>', methods=['GET'])
def get_fund_by_id(fund_id):
# print('investment_funds', investment_funds)
for fund in investment_funds:
print('fund', fund)
if fund['id'] == fund_id:
return jsonify(fund)
return jsonify({'error': 'fund not found'}), 404
# Endpoint to update the performance of a fund using its ID
@app.route('/funds/<int:fund_id>', methods=['PUT'])
def update_fund(fund_id):
specific_fund = {}
for fund in investment_funds:
print('fund', fund)
print(fund['id'] == fund_id, fund_id)
if fund['id'] == fund_id:
specific_fund = fund
if specific_fund:
print('true')
data = request.get_json()
specific_fund['performance'] = data.get('performance')
save_data(investment_funds)
return jsonify(specific_fund)
return jsonify({'error': 'Fund not found'}), 404
# Endpoint to delete a fund using its ID
@app.route('/funds/<int:fund_id>', methods=['DELETE'])
def delete_fund(fund_id):
deleted_fund = {}
for fund in investment_funds:
print('fund', fund)
if fund['id'] == fund_id:
deleted_fund = fund
investment_funds.remove(fund)
print('investment_funds', investment_funds)
save_data(investment_funds)
break
return jsonify({'message': 'Fund deleted successfully', 'deleted_fund': deleted_fund})
if __name__ == '__main__':
app.run(debug=True, port=5001)
| raqif/fund_management_system | app_json.py | app_json.py | py | 3,352 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "json.load",
"line_number": 1... |
32929404532 | #!/usr/bin/env python
# coding: utf-8
import csv
from config import ADDRESSES_FILE
from models import Birthday, get_this_week_list
def parse_data_file(in_file=ADDRESSES_FILE):
"""
:return: [] of {}
List of birthdays
"""
reader = csv.DictReader(open(in_file, "r"))
for row in reader:
if row:
yield row
def send_cake_remainder(birthdays):
"""Sends email remainder about cake to whoever turns
:param birthdays: list of birthdays
"""
this_week_birthdays = list(get_this_week_list(birthdays))
for birthday in this_week_birthdays:
birthday.send_msg()
yield birthday
def send_birthday_remainder(birthdays):
"""Sends email remainder about people who turn to whoever does not turn
:param birthdays: list of birthdays
"""
this_week_birthdays = list(get_this_week_list(birthdays))
if this_week_birthdays: # just send emails if there is some birthday
for b in birthdays:
birthday = Birthday(b) # parse raw csv data
if not birthday.is_this_week():
birthday.send_remainder_msg(this_week_birthdays)
yield birthday
def send_emails(addresses):
"""
:param addresses: str
Path to file containing addresses
:return: void
Run bot
"""
birthdays = list(parse_data_file(in_file=addresses))
cakes_sent = list(send_cake_remainder(birthdays))
reminders_sent = list(send_birthday_remainder(birthdays))
# get summary
cakes_sent = [
{
"reason": "cake",
"message": cake.get_summary()
} for cake in cakes_sent
]
reminders_sent = [
{
"reason": "remind",
"message": remind.get_summary()
} for remind in reminders_sent
]
return cakes_sent + reminders_sent
| raceup/happy-birthday-bot | hbb/bot.py | bot.py | py | 1,857 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "config.ADDRESSES_FILE",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "csv.DictReader",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "models.get_this_week_list",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "models... |
12896234781 | """
Provides RESTful URLs for Route objects
"""
from flask import Blueprint, request
from back_end.api import jsonify_decorator, token_decorator
from back_end.db import routes
from back_end.exceptions import InvalidContent
ROUTES = Blueprint('route', __name__)
@ROUTES.route('/<routeid>', methods=['GET'])
@jsonify_decorator
@token_decorator
def get_route(routeid, userid):
"""
Returns the route in JSON format with the `routeid` provided in URL.
"""
return routes.get_from_id(routeid, userid), 200
@ROUTES.route('/<routeid>/events', methods=['GET'])
@jsonify_decorator
@token_decorator
def get_events_from_plan(routeid, userid):
"""
Returns the list of events in JSON format containing all events
belonging to a route with the `routeid` provided in URL.
"""
return routes.get_from_id(routeid, userid).events, 200
@ROUTES.route('/<routeid>/vote', methods=['POST'])
@jsonify_decorator
@token_decorator
def vote_route(routeid, userid):
"""
Updates user's vote on the route specified by `routeid` provided in URL.
Vote is extracted in a JSON object received in request.
Returns the updated route in JSON format.
"""
json = request.get_json()
if json is None:
raise InvalidContent("A problem occurred when voting on the route")
return routes.vote(routeid, userid, json.get('vote')), 201
@ROUTES.route('', methods=['POST'])
@jsonify_decorator
@token_decorator
def create_route(userid):
"""
Creates a route with the properties specified in JSON object recieved in request.
Returns the created route in JSON format.
"""
json = request.get_json()
if json is None:
raise InvalidContent("A problem occurred when creating the route")
return routes.create(json.get('planid'), json.get('name'), json.get('eventidList'), userid), 201
| vedantchokshi/various-plans | back_end/api/routes.py | routes.py | py | 1,842 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "flask.Blueprint",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "back_end.db.routes.get_from_id",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "back_end.db.routes",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "bac... |
3985469839 | import os
from skimage import io
import copy
import numpy as np
import random
from glob import glob
import json
from sklearn.preprocessing import MultiLabelBinarizer
import torch
import torch.utils.data as data
from torchvision import transforms, datasets
from src.datasets.root_paths import DATA_ROOTS
CLASSES = ['Sea and ocean',
'Coniferous forest',
'Mixed forest',
'Moors and heathland',
'Transitional woodland/shrub',
'Sparsely vegetated areas',
'Discontinuous urban fabric',
'Non-irrigated arable land',
'Pastures',
'Complex cultivation patterns',
'Broad-leaved forest',
'Water bodies',
'Land principally occupied by agriculture, with significant areas of natural vegetation',
'Vineyards',
'Agro-forestry areas',
'Industrial or commercial units',
'Airports',
'Water courses',
'Natural grassland',
'Construction sites',
'Sclerophyllous vegetation',
'Peatbogs',
'Rice fields',
'Continuous urban fabric',
'Olive groves',
'Permanently irrigated land',
'Mineral extraction sites',
'Annual crops associated with permanent crops',
'Dump sites',
'Green urban areas',
'Intertidal flats',
'Bare rock',
'Fruit trees and berry plantations',
'Salt marshes',
'Road and rail networks and associated land',
'Estuaries',
'Inland marshes',
'Sport and leisure facilities',
'Beaches, dunes, sands',
'Coastal lagoons',
'Salines',
'Port areas',
'Burnt areas']
class BaseBigEarthNet(data.Dataset):
NUM_CLASSES = 43
MULTI_LABEL = True
NUM_CHANNELS = 12
FILTER_SIZE = 120
def __init__(
self,
root=DATA_ROOTS["bigearthnet"],
train=True,
image_transforms=None,
seed=42,
):
super().__init__()
self.root = root
self.train = train
self.image_transforms = image_transforms
self.rs = np.random.RandomState(seed)
train_paths, test_paths, train_labels, test_labels = self.train_test_split()
if train:
self.paths = train_paths
self.labels = train_labels
else:
self.paths = test_paths
self.labels = test_labels
self.targets = copy.deepcopy(self.labels)
def train_test_split(self, train_frac=0.8):
all_sample_paths = np.array(os.listdir(self.root))
num_samples = len(all_sample_paths)
labels = []
for i in range(num_samples):
sample_path = all_sample_paths[i]
metadata_path = glob(os.path.join(self.root, sample_path, '*.json'))[0]
class_names = set(json.load(open(metadata_path))['labels'])
labels.append(class_names)
encoder = MultiLabelBinarizer(classes=CLASSES, sparse_output=False)
encoded_labels = encoder.fit_transform(labels)
num_samples = len(all_sample_paths)
indices = np.arange(num_samples)
self.rs.shuffle(indices)
train_indices = indices[:int(num_samples * train_frac)]
test_indices = indices[int(num_samples * train_frac):]
train_paths = all_sample_paths[train_indices]
test_paths = all_sample_paths[test_indices]
train_labels = encoded_labels[train_indices]
test_labels = encoded_labels[test_indices]
return train_paths, test_paths, train_labels, test_labels
def __getitem__(self, index):
path = self.paths[index]
label = self.labels[index]
img_paths = glob(os.path.join(self.root, path, '*.tif'))
image = []
for i, img_path in enumerate(img_paths):
img = np.asarray(io.imread_collection(img_path), dtype=np.float32) # one of (1, 20, 20), (1, 60, 60), (1, 120, 120)
resized_img = transforms.Resize(120)(torch.tensor(img))
image.append(resized_img)
image = torch.vstack(image) # (12, 120, 120)
if self.image_transforms:
image = self.image_transforms(image)
return image, label
def __len__(self):
return len(self.paths)
class BigEarthNet(BaseBigEarthNet):
def __init__(
self,
root=DATA_ROOTS["bigearthnet"],
train=True,
image_transforms=None,
):
super().__init__()
self.dataset = BaseBigEarthNet(
root=root,
train=train,
image_transforms=image_transforms,
)
def __getitem__(self, index):
img_data, label = self.dataset.__getitem__(index)
img2_data, _ = self.dataset.__getitem__(index)
data = [index, img_data.float(), img2_data.float(), label, label]
return tuple(data)
def __len__(self):
return len(self.dataset)
| jbayrooti/divmaker | src/datasets/bigearthnet.py | bigearthnet.py | py | 5,152 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "torch.utils.data.Dataset",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.data",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "src.datasets.root_paths.DATA_ROOTS",
"line_number": 67,
"usage_type": "name"
},
{
... |
10045098498 | import requests
import json
import datetime,time
from kafka import KafkaProducer
# Liste des cryptos à récupérer
# Configuration du Kafka Producer
producer = KafkaProducer(bootstrap_servers=['broker:29092'], value_serializer=lambda x: json.dumps(x).encode('utf-8'))
crypto_list = ["bitcoin", "ethereum", "ripple"]
start_time = int(time.time())
# convert to timestamp
while int(time.time()) < start_time + 200:
for crypto in crypto_list:
url = f"https://api.coingecko.com/api/v3/coins/markets?vs_currency=usd&ids={crypto}"
response = requests.get(url)
data = response.json()[0]
# print(data["current_price"])
current_price = data["current_price"]
date = datetime.datetime.now()
message = {"crypto": crypto, "value": current_price, "date": date.strftime("%d-%m-%Y %H:%M:%S")}
print(message)
producer.send('crypto-values-f', value=message)
# Récupération toutes les 5 minutes
time.sleep(30) | stdynv/ARCHIDISTR | CryptoProducer.py | CryptoProducer.py | py | 983 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "kafka.KafkaProducer",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number... |
6795419791 | from django.conf import settings
from rest_framework import viewsets, exceptions
from rest_framework.decorators import action
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
from utils.certification_helpers import CertificationWorkshopWrapper
from utils.drf.authentication import UsernameAuthentication
from ...models import Event
from ...helpers import EventPermissionHelper
from ...tasks import SummitRequestTask
from ..serializers import (
EventAvailableSerializer,
EventTypesSerializer,
EventChangeStatusSerializer,
EventDetailSerializer,
EventSerializer,
EventListSerializer,
RequestSummitSerializer,
)
from .custom_pagination import StandardResultsSetPagination
class EventViewSet(viewsets.ModelViewSet):
model = Event
permission_classes = (IsAuthenticated, )
pagination_class = StandardResultsSetPagination
authentication_classes = (JSONWebTokenAuthentication, UsernameAuthentication, )
lookup_field = 'uuid'
serializers = {
'default': EventListSerializer,
'create': EventSerializer,
'update': EventSerializer,
'change_status': EventChangeStatusSerializer,
'retrieve': EventDetailSerializer,
'permissions': EventAvailableSerializer,
'events_types': EventTypesSerializer,
'request_summit': RequestSummitSerializer,
}
def get_serializer_class(self):
return self.serializers.get(
self.action,
self.serializers['default'],
)
def get_queryset(self):
return self.model.objects.filter_by_user(self.request.user).distinct()
def check_edit_permissions(self):
event = self.get_object()
can_edit = self.request.user.has_perm(
settings.EVENT_PERMS_EDIT_EVENT,
event,
)
if not can_edit and self.request.user.uuid != event.created_by.uuid:
raise exceptions.PermissionDenied
def create(self, request, *args, **kwargs):
helper = EventPermissionHelper()
can_create = helper.has_perm(
request.user,
'create_{}'.format(request.data.get('category')),
)
if not can_create:
raise exceptions.PermissionDenied
return super().create(request, *args, **kwargs)
def perform_create(self, serializer):
serializer.save(user_from=self.request.user)
@action(methods=['get'], detail=False)
def permissions(self, request):
helper = EventPermissionHelper()
user_objects = helper.get_events_available(request.user)
serializer = EventAvailableSerializer(
list(filter(lambda x: x[0] in user_objects, settings.EVENT_TYPE_CHOICES)),
many=True,
)
return Response(serializer.data)
@action(methods=['get'], detail=False)
def events_types(self, request):
event_available_data = []
helper = EventPermissionHelper()
for event_type in settings.EVENT_TYPE_CHOICES:
event_available_data.append([
event_type[0],
event_type[1],
helper.has_perm(request.user, 'create_{}'.format(event_type[0])),
])
serializer = EventTypesSerializer(event_available_data, many=True)
return Response(serializer.data)
@action(methods=['put'], detail=True)
def change_status(self, request, uuid):
self.check_edit_permissions()
serializer = self.get_serializer(
instance=self.get_object(),
data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save(user_from=request.user)
return Response(serializer.data)
@action(methods=['post'], detail=False)
def request_summit(self, request):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
SummitRequestTask().s(
user_uuid=self.request.user.uuid.__str__(),
**serializer.validated_data,
).apply_async()
return Response(serializer.data)
def perform_destroy(self, instance):
self.check_edit_permissions()
instance.status = (self.request.user, settings.EVENT_CH_STATUS_DELETED)
@action(methods=['post'], detail=True, url_path='send-certificates')
def send_certificates(self, request, uuid):
event = self.get_object()
certification_wrapper = CertificationWorkshopWrapper(event)
certification_wrapper.release_group_credential(request.user, event)
return Response()
| tomasgarzon/exo-services | service-exo-events/event/api/views/event.py | event.py | py | 4,675 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "rest_framework.viewsets.ModelViewSet",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.viewsets",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "models.Event",
"line_number": 28,
"usage_type": "name"
},
{
"ap... |
37435918590 | #!/usr/bin/env python
# coding: utf-8
# In[5]:
import numpy as np
# In[1]:
import plotly
import matplotlib.pyplot as plt
# In[3]:
#Equations of motion:
#y = vt + .5a(t**2)
#x = vt
#y = x + 0.5a(x**2)/(v**2)
# In[6]:
#Let intial velocity v = 10 m/s, acceleration a = -9.8 m/s^2, and initial height h = 100 m
# In[11]:
def y(x, a, v, h):
return x + (1/2)*a*(x**2)/(v**2) + h
xlist = np.linspace(0, 100, num = 1000, endpoint=True, retstep=False)
ylist = y(xlist, -9.8, 10, 100)
plt.figure(num=0, dpi=120)
plt.plot(xlist, ylist, color='pink')
plt.xlabel('Distance x (m)')
plt.ylabel('Height y (m)')
plt.title("Projectile Motion")
# In[ ]:
# In[ ]:
| abbychriss/toy-projects | Projectile motion.py | Projectile motion.py | py | 679 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.linspace",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "matplotlib.... |
39238016680 | import cv2
import numpy as np
def get_seg_size(images, threshold=23, pmapxy=None):
seg_sizes = []
index = 0
for image in images:
"""
if index % 10 == 0:
cv2.imshow("thres", thresh_image)
cv2.waitKey(0)
"""
index += 1
if pmapxy is not None:
thresh_image = np.multiply(image, pmapxy)
seg_sizes.append(thresh_image.sum(1).sum(0))
continue
seg_sizes.append(cv2.countNonZero(thresh_image))
return seg_sizes
def get_seg_perimeter(images):
seg_perimeters = []
index = 0
for image in images:
edge_image = cv2.Canny(image, threshold1=30, threshold2=40)
"""
if index % 10 == 0:
cv2.imshow("edge", edge_image)
cv2.waitKey(0)
cv2.imshow("image", image)
cv2.waitKey(0)
"""
seg_perimeters.append(cv2.countNonZero(edge_image))
index += 1
return seg_perimeters | Alice-OSENSE/feature_err_analysis | feature_func.py | feature_func.py | py | 989 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.multiply",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "cv2.countNonZero",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "cv2.Canny",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "cv2.countNonZero",
"lin... |
34000667448 | import json, random, csv, sys
def check_input():
out_file = ''
if (len(sys.argv) == 3) & (sys.argv[1] == '-i'):
coded_file = sys.argv[2]
out_file = None
elif (len(sys.argv) == 5) & (sys.argv[1] == '-i') & (sys.argv[3] == '-o'):
coded_file = sys.argv[2]
out_file = sys.argv[4]
else:
print("[Error] invalid parameters")
exit()
return coded_file, out_file
def write_data(data, file_name):
if '.json' not in file_name:
file_name = file_name + ".json"
with open(file_name, 'w') as f_out:
json.dump(data, f_out, indent=0)
f_out.close()
def process(coded_file):
data = {
"course-related": 0,
"food-related": 0,
"residence-related": 0,
"other": 0
}
if '.tsv' not in coded_file:
coded_file = coded_file + ".tsv"
with open(coded_file) as f:
tsv_file = csv.reader(f, delimiter="\t")
for line in tsv_file:
anno = line[2]
if len(anno) == 1:
if anno == 'c': data["course-related"] = data["course-related"]+1
elif anno == 'f': data["food-related"] = data["food-related"]+1
elif anno == 'r': data["residence-related"] = data["residence-related"]+1
elif anno == 'o': data["other"] = data["other"]+1
f.close()
return data
def main():
coded_file, out_file = check_input()
data = process(coded_file)
if (out_file == None):
print(json.dumps(data, indent=0))
else:
write_data(data, out_file)
if __name__ == '__main__':
main()
| namdar-nejad/COMP-598 | A7/Code/src/analyze.py | analyze.py | py | 1,690 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.argv",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number":... |
69826161703 | """
General Numerical Solver for the 1D Time-Dependent Schrodinger Equation.
Authors:
- Jake Vanderplas <vanderplas@astro.washington.edu>
- Andre Xuereb (imaginary time propagation, normalized wavefunction
For a theoretical description of the algorithm, please see
http://jakevdp.github.com/blog/2012/09/05/quantum-python/
License: BSD style
Please feel free to use and modify this, but keep the above information.
"""
import numpy as np
from scipy import fftpack
import matplotlib.pyplot as plt
from matplotlib import animation
import seaborn as sns
sns.set()
class Schrodinger(object):
"""
Class which implements a numerical solution of the time-dependent
Schrodinger equation for an arbitrary potential
"""
def __init__(self, x, psi_x0, V_x, k0=None, hbar=1, m=1, t0=0.0):
"""
Parameters
----------
x : array_like, float
Length-N array of evenly spaced spatial coordinates
psi_x0 : array_like, complex
Length-N array of the initial wave function at time t0
V_x : array_like, float
Length-N array giving the potential at each x
k0 : float
The minimum value of k. Note that, because of the workings of the
Fast Fourier Transform, the momentum wave-number will be defined
in the range
k0 < k < 2*pi / dx ,
where dx = x[1]-x[0]. If you expect nonzero momentum outside this
range, you must modify the inputs accordingly. If not specified,
k0 will be calculated such that the range is [-k0,k0]
hbar : float
Value of Planck's constant (default = 1)
m : float
Particle mass (default = 1)
t0 : float
Initial time (default = 0)
"""
# Validation of array inputs
self.x, psi_x0, self.V_x = map(np.asarray, (x, psi_x0, V_x))
N = self.x.size
assert self.x.shape == (N,)
assert psi_x0.shape == (N,)
assert self.V_x.shape == (N,)
# Validate and set internal parameters
assert hbar > 0
assert m > 0
self.hbar = hbar
self.m = m
self.t = t0
self.dt_ = None
self.N = len(x)
self.dx = self.x[1] - self.x[0]
self.dk = 2 * np.pi / (self.N * self.dx)
# Set momentum scale
if k0 == None:
self.k0 = -0.5 * self.N * self.dk
else:
assert k0 < 0
self.k0 = k0
self.k = self.k0 + self.dk * np.arange(self.N)
self.psi_x = psi_x0
self.compute_k_from_x()
# Variables which hold steps in evolution
self.x_evolve_half = None
self.x_evolve = None
self.k_evolve = None
def _set_psi_x(self, psi_x):
assert psi_x.shape == self.x.shape
self.psi_mod_x = (psi_x * np.exp(-1j * self.k[0] * self.x)
* self.dx / np.sqrt(2 * np.pi))
self.psi_mod_x /= self.norm
self.compute_k_from_x()
def _get_psi_x(self):
return (self.psi_mod_x * np.exp(1j * self.k[0] * self.x)
* np.sqrt(2 * np.pi) / self.dx)
def _set_psi_k(self, psi_k):
assert psi_k.shape == self.x.shape
self.psi_mod_k = psi_k * np.exp(1j * self.x[0] * self.dk
* np.arange(self.N))
self.compute_x_from_k()
self.compute_k_from_x()
def _get_psi_k(self):
return self.psi_mod_k * np.exp(-1j * self.x[0] * self.dk
* np.arange(self.N))
def _get_dt(self):
return self.dt_
def _set_dt(self, dt):
assert dt != 0
if dt != self.dt_:
self.dt_ = dt
self.x_evolve_half = np.exp(-0.5 * 1j * self.V_x
/ self.hbar * self.dt)
self.x_evolve = self.x_evolve_half * self.x_evolve_half
self.k_evolve = np.exp(-0.5 * 1j * self.hbar / self.m
* (self.k * self.k) * self.dt)
def _get_norm(self):
return self.wf_norm(self.psi_mod_x)
psi_x = property(_get_psi_x, _set_psi_x)
psi_k = property(_get_psi_k, _set_psi_k)
norm = property(_get_norm)
dt = property(_get_dt, _set_dt)
def compute_k_from_x(self):
self.psi_mod_k = fftpack.fft(self.psi_mod_x)
def compute_x_from_k(self):
self.psi_mod_x = fftpack.ifft(self.psi_mod_k)
def wf_norm(self, wave_fn):
"""
Returns the norm of a wave function.
Parameters
----------
wave_fn : array
Length-N array of the wavefunction in the position representation
"""
assert wave_fn.shape == self.x.shape
return np.sqrt((abs(wave_fn) ** 2).sum() * 2 * np.pi / self.dx)
def solve(self, dt, Nsteps=1, eps=1e-3, max_iter=1000):
"""
Propagate the Schrodinger equation forward in imaginary
time to find the ground state.
Parameters
----------
dt : float
The small time interval over which to integrate
Nsteps : float, optional
The number of intervals to compute (default = 1)
eps : float
The criterion for convergence applied to the norm (default = 1e-3)
max_iter : float
Maximum number of iterations (default = 1000)
"""
eps = abs(eps)
assert eps > 0
t0 = self.t
old_psi = self.psi_x
d_psi = 2 * eps
num_iter = 0
while (d_psi > eps) and (num_iter <= max_iter):
num_iter += 1
self.time_step(-1j * dt, Nsteps)
d_psi = self.wf_norm(self.psi_x - old_psi)
old_psi = 1. * self.psi_x
self.t = t0
def time_step(self, dt, Nsteps=1):
"""
Perform a series of time-steps via the time-dependent Schrodinger
Equation.
Parameters
----------
dt : float
The small time interval over which to integrate
Nsteps : float, optional
The number of intervals to compute. The total change in time at
the end of this method will be dt * Nsteps (default = 1)
"""
assert Nsteps >= 0
self.dt = dt
if Nsteps > 0:
self.psi_mod_x *= self.x_evolve_half
for num_iter in range(Nsteps - 1):
self.compute_k_from_x()
self.psi_mod_k *= self.k_evolve
self.compute_x_from_k()
self.psi_mod_x *= self.x_evolve
self.compute_k_from_x()
self.psi_mod_k *= self.k_evolve
self.compute_x_from_k()
self.psi_mod_x *= self.x_evolve_half
self.compute_k_from_x()
self.psi_mod_x /= self.norm
self.compute_k_from_x()
self.t += dt * Nsteps
######################################################################
# Helper functions for gaussian wave-packets
def gauss_x(x, a, x0, k0):
"""
a gaussian wave packet of width a, centered at x0, with momentum k0
"""
return ((a * np.sqrt(np.pi)) ** (-0.5)
* np.exp(-0.5 * ((x - x0) * 1. / a) ** 2 + 1j * x * k0))
def gauss_k(k, a, x0, k0):
"""
analytical fourier transform of gauss_x(x), above
"""
return ((a / np.sqrt(np.pi)) ** 0.5
* np.exp(-0.5 * (a * (k - k0)) ** 2 - 1j * (k - k0) * x0))
######################################################################
# Utility functions for running the animation
def theta(x):
"""
theta function :
returns 0 if x<=0, and 1 if x>0
"""
x = np.asarray(x)
y = np.zeros(x.shape)
y[x > 0] = 1.0
return y
def square_barrier(x, width, height):
return height * (theta(x) - theta(x - width))
######################################################################
# Create the animation
# specify time steps and duration
dt = 0.01
N_steps = 50
t_max = 120
frames = int(t_max / float(N_steps * dt))
# specify constants
hbar = 1.0 # planck's constant
m = 1.9 # particle mass
# specify range in x coordinate
N = 2 ** 11
dx = 0.1
x = dx * (np.arange(N) - 0.5 * N)
# specify potential
V0 = 1.5
L = hbar / np.sqrt(2 * m * V0)
a = 3 * L
x0 = -60 * L
V_x = square_barrier(x, a, V0)
V_x[x < -98] = 1E6
V_x[x > 98] = 1E6
# specify initial momentum and quantities derived from it
p0 = np.sqrt(2 * m * 0.2 * V0)
dp2 = p0 * p0 * 1. / 80
d = hbar / np.sqrt(2 * dp2)
k0 = p0 / hbar
v0 = p0 / m
psi_x0 = gauss_x(x, d, x0, k0)
# define the Schrodinger object which performs the calculations
S = Schrodinger(x=x,
psi_x0=psi_x0,
V_x=V_x,
hbar=hbar,
m=m,
k0=-28)
######################################################################
# Set up plot
fig = plt.figure('Quantum Tunneling')
# plotting limits
xlim = (-100, 100)
klim = (-5, 5)
# top axes show the x-space data
ymin = 0
ymax = V0
ax1 = fig.add_subplot(211, xlim=xlim,
ylim=(ymin - 0.2 * (ymax - ymin),
ymax + 0.2 * (ymax - ymin)))
psi_x_line, = ax1.plot([], [], c='r', label=r'$|\psi(x)|$')
V_x_line, = ax1.plot([], [], c='k', label=r'$V(x)$')
center_line = ax1.axvline(0, c='k', ls=':', label=r"$x_0 + v_0t$")
title = ax1.set_title("")
ax1.legend(prop=dict(size=12))
ax1.set_xlabel('$x$')
ax1.set_ylabel(r'$|\psi(x)|$')
# bottom axes show the k-space data
ymin = abs(S.psi_k).min()
ymax = abs(S.psi_k).max()
ax2 = fig.add_subplot(212, xlim=klim,
ylim=(ymin - 0.2 * (ymax - ymin),
ymax + 0.2 * (ymax - ymin)))
psi_k_line, = ax2.plot([], [], c='r', label=r'$|\psi(k)|$')
p0_line1 = ax2.axvline(-p0 / hbar, c='k', ls=':', label=r'$\pm p_0$')
p0_line2 = ax2.axvline(p0 / hbar, c='k', ls=':')
mV_line = ax2.axvline(np.sqrt(2 * V0) / hbar, c='k', ls='--',
label=r'$\sqrt{2mV_0}$')
ax2.legend(prop=dict(size=12))
ax2.set_xlabel('$k$')
ax2.set_ylabel(r'$|\psi(k)|$')
V_x_line.set_data(S.x, S.V_x)
######################################################################
# Functions to Animate the plot
def init():
psi_x_line.set_data([], [])
V_x_line.set_data([], [])
center_line.set_data([], [])
psi_k_line.set_data([], [])
title.set_text("")
return (psi_x_line, V_x_line, center_line, psi_k_line, title)
def animate(i):
S.time_step(dt, N_steps)
psi_x_line.set_data(S.x, 4 * abs(S.psi_x))
V_x_line.set_data(S.x, S.V_x)
center_line.set_data(2 * [x0 + S.t * p0 / m], [0, 1])
psi_k_line.set_data(S.k, abs(S.psi_k))
return (psi_x_line, V_x_line, center_line, psi_k_line, title)
# call the animator.
# blit=True means only re-draw the parts that have changed.
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames=frames, interval=30, blit=True)
# uncomment the following line to save the video in mp4 format. This
# requires either mencoder or ffmpeg to be installed on your system
# anim.save('schrodinger_barrier.mp4', fps=15,
# extra_args=['-vcodec', 'libx264'])
plt.tight_layout()
plt.show() | akapet00/schrodinger | src/scripts/quantum_tunneling.py | quantum_tunneling.py | py | 11,191 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "seaborn.set",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "numpy.pi",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "numpy.arange",
"line... |
38265076029 | from collections import OrderedDict
import fnmatch
import re
IMAGENET_DEFAULT_MEAN = (0.485, 0.456, 0.406)
IMAGENET_DEFAULT_STD = (0.229, 0.224, 0.225)
DEFAULT_CONFIGS = OrderedDict({
'ckdn': {
'metric_opts': {
'type': 'CKDN',
},
'metric_mode': 'FR',
},
'lpips': {
'metric_opts': {
'type': 'LPIPS',
'net': 'alex',
'version': '0.1',
},
'metric_mode': 'FR',
'lower_better': True,
},
'lpips-vgg': {
'metric_opts': {
'type': 'LPIPS',
'net': 'vgg',
'version': '0.1',
},
'metric_mode': 'FR',
'lower_better': True,
},
'dists': {
'metric_opts': {
'type': 'DISTS',
},
'metric_mode': 'FR',
'lower_better': True,
},
'ssim': {
'metric_opts': {
'type': 'SSIM',
'downsample': False,
'test_y_channel': True,
},
'metric_mode': 'FR',
},
'psnr': {
'metric_opts': {
'type': 'PSNR',
'test_y_channel': False,
},
'metric_mode': 'FR',
},
'fsim': {
'metric_opts': {
'type': 'FSIM',
'chromatic': True,
},
'metric_mode': 'FR',
},
'ms_ssim': {
'metric_opts': {
'type': 'MS_SSIM',
'downsample': False,
'test_y_channel': True,
'is_prod': True,
},
'metric_mode': 'FR',
},
'vif': {
'metric_opts': {
'type': 'VIF',
},
'metric_mode': 'FR',
},
'gmsd': {
'metric_opts': {
'type': 'GMSD',
'test_y_channel': True,
},
'metric_mode': 'FR',
'lower_better': True,
},
'nlpd': {
'metric_opts': {
'type': 'NLPD',
'channels': 1,
'test_y_channel': True,
},
'metric_mode': 'FR',
'lower_better': True,
},
'vsi': {
'metric_opts': {
'type': 'VSI',
},
'metric_mode': 'FR',
},
'cw_ssim': {
'metric_opts': {
'type': 'CW_SSIM',
'channels': 1,
'level': 4,
'ori': 8,
'test_y_channel': True,
},
'metric_mode': 'FR',
},
'mad': {
'metric_opts': {
'type': 'MAD',
'test_y_channel': True,
},
'metric_mode': 'FR',
'lower_better': True,
},
'niqe': {
'metric_opts': {
'type': 'NIQE',
'test_y_channel': True,
},
'metric_mode': 'NR',
'lower_better': True,
},
'ilniqe': {
'metric_opts': {
'type': 'ILNIQE',
},
'metric_mode': 'NR',
'lower_better': True,
},
'brisque': {
'metric_opts': {
'type': 'BRISQUE',
'test_y_channel': True,
},
'metric_mode': 'NR',
'lower_better': True,
},
'nrqm': {
'metric_opts': {
'type': 'NRQM',
},
'metric_mode': 'NR',
},
'pi': {
'metric_opts': {
'type': 'PI',
},
'metric_mode': 'NR',
'lower_better': True,
},
'musiq': {
'metric_opts': {
'type': 'MUSIQ',
'pretrained': 'koniq10k'
},
'metric_mode': 'NR',
},
'musiq-ava': {
'metric_opts': {
'type': 'MUSIQ',
'pretrained': 'ava'
},
'metric_mode': 'NR',
},
'musiq-koniq': {
'metric_opts': {
'type': 'MUSIQ',
'pretrained': 'koniq10k'
},
'metric_mode': 'NR',
},
'musiq-paq2piq': {
'metric_opts': {
'type': 'MUSIQ',
'pretrained': 'paq2piq'
},
'metric_mode': 'NR',
},
'musiq-spaq': {
'metric_opts': {
'type': 'MUSIQ',
'pretrained': 'spaq'
},
'metric_mode': 'NR',
},
'nima': {
'metric_opts': {
'type': 'NIMA',
'pretrained': 'ava'
},
'metric_mode': 'NR',
},
'pieapp': {
'metric_opts': {
'type': 'PieAPP',
},
'metric_mode': 'FR',
'lower_better': True,
},
'paq2piq': {
'metric_opts': {
'type': 'PAQ2PIQ',
},
'metric_mode': 'NR',
},
'dbcnn': {
'metric_opts': {
'type': 'DBCNN',
'pretrained': 'koniq'
},
'metric_mode': 'NR',
},
})
def _natural_key(string_):
return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_.lower())]
def list_models(filter='', exclude_filters=''):
""" Return list of available model names, sorted alphabetically
Args:
filter (str) - Wildcard filter string that works with fnmatch
exclude_filters (str or list[str]) - Wildcard filters to exclude models after including them with filter
Example:
model_list('*ssim*') -- returns all models including 'ssim'
"""
all_models = DEFAULT_CONFIGS.keys()
if filter:
models = []
include_filters = filter if isinstance(filter, (tuple, list)) else [filter]
for f in include_filters:
include_models = fnmatch.filter(all_models, f) # include these models
if len(include_models):
models = set(models).union(include_models)
else:
models = all_models
if exclude_filters:
if not isinstance(exclude_filters, (tuple, list)):
exclude_filters = [exclude_filters]
for xf in exclude_filters:
exclude_models = fnmatch.filter(models, xf) # exclude these models
if len(exclude_models):
models = set(models).difference(exclude_models)
return list(sorted(models, key=_natural_key))
| Sskun04085/IQA_PyTorch | pyiqa/default_model_configs.py | default_model_configs.py | py | 6,004 | python | hi | code | 0 | github-code | 36 | [
{
"api_name": "collections.OrderedDict",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "re.split",
"line_number": 220,
"usage_type": "call"
},
{
"api_name": "fnmatch.filter",
"line_number": 236,
"usage_type": "call"
},
{
"api_name": "fnmatch.filter",
... |
16968956587 | # -*- coding: utf-8 -*-
from django.template.loader import render_to_string
from django.contrib.admin.utils import quote
def get_mptt_admin_node_template(instance):
'''
Get MPTT admin node template name by model instance
:param instance: instance of mptt model
:return: template name
'''
return 'edw/admin/mptt/_%s_node.html' % instance.__class__.__name__.lower()
def mptt_admin_node_info_update_with_template(admin_instance, template, instance, node_info, context={}):
'''
Update MPTT admin node with rendered by html template node label.
:param admin_instance: mptt admin instance
:param template: template name for renfer
:param instance: instance of mptt model
:param node_info: jstree node info
:param context: additional context for render
:return: none
'''
pk_attname = admin_instance.model._meta.pk.attname
pk = quote(getattr(instance, pk_attname))
context.update({
'instance': instance,
'node_info': node_info,
'app_label': instance._meta.app_label.lower()
})
label = render_to_string(template, context)
node_info.update(
url=admin_instance.get_admin_url('change', (quote(pk),)),
move_url=admin_instance.get_admin_url('move', (quote(pk),)),
label=label,
)
| infolabs/django-edw | backend/edw/admin/mptt/utils.py | utils.py | py | 1,309 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "django.contrib.admin.utils.quote",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "django.template.loader.render_to_string",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "django.contrib.admin.utils.quote",
"line_number": 38,
"usage_typ... |
10346160454 | import pandas as pd
from pulp import *
import itertools
df = pd.read_excel('beers.xlsx')
costs = df.\
assign(Warehouse_to_Bar = lambda x: x.Warehouse_to_Bar.astype(str)).\
set_index('Warehouse_to_Bar').\
to_dict()
warehouses = ['A','B']
bars = [str(x) for x in range(1,6)]
supply_ = [1000,4000]
demand_ = [500,900,1800,200,700]
supply = dict(zip(warehouses,supply_))
demand = dict(zip(bars, demand_))
routes = list(itertools.product(warehouses,bars))
'''
dv : how many beers for every warehouse - bar pair
obj : minimize dv * cost
1) supply : sum of beers leaving a warehouse <= supply
2) demand : sum of beers reaching a bar == demand
'''
# instantiate minimization problem
prob = LpProblem('Beer_Transportation', LpMinimize)
# define decision variables
decision_vars = LpVariable.dicts('routes',(warehouses,bars),0,None,LpInteger)
# define objective function
prob += lpSum([decision_vars[w][b] * costs[w][b] for w,b in routes])
# define supply constrains
for w in warehouses:
prob += lpSum([decision_vars[w][b] for b in bars]) <= supply[w]
# define demand constrains
for b in bars:
prob += lpSum([decision_vars[w][b] for w in warehouses]) >= demand[b]
# solve problem
prob.solve()
# The status of the solution is printed to the screen
print("Status:", LpStatus[prob.status])
# print out solution
for w in warehouses:
for b in bars:
if value(decision_vars[w][b]):
print(f'from {w} to {b} - {value(decision_vars[w][b])}')
| Takfes/notes-operations-research | src/A.misc/03.pulp-transportation/pulp-transportation.py | pulp-transportation.py | py | 1,493 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_excel",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "itertools.product",
"line_number": 21,
"usage_type": "call"
}
] |
30731206475 | #######################################################################
# Necessaria a instalacao do biopython (pip install biopython)
from Bio import pairwise2
from Bio.pairwise2 import format_alignment
from Bio.Seq import Seq
def alinhamento(seqt, numseq):
nome1 = "guilherme"
nome2 = "costa"
nome3 = "oliveira"
dicionario = {"q":1, "w":2, "e":3, "r":4, "t":5, "y":6, "u":7, "i":8, "o":9, "p":10, "á":11, "ã":12,
"a":11, "s":9, "d":7, "f":5, "g":3, "h":1, "j":11, "k":9, "l":7, "ç":5, "é":3, "í":1,
"z":2, "x":4, "c":6, "v":8, "b":10, "n":12, "m":2, "ó":4, "õ":6, "ô":8, "ẫ":10, "ê":12}
#Funcao para transformar o primeiro nome em numeros
def converte(x):
for l in dicionario:
x = x.replace(l, str(dicionario[l]))
return x
nome1 = converte(nome1) #O primeiro nome
nome1 = int(nome1)
resto1 = nome1 % 3
if resto1 == 0:
alpha = 1
beta = 0
delta = -1
elif resto1 == 1:
alpha = 2
beta = 0
delta = -1
elif resto1 == 2:
alpha = 1
beta = 0
delta = -2
nome2 = converte(nome2)
nome2 = int(nome2) #O último nome
pref_gap = nome2 % 3
nome3 = converte(nome3)
nome3 = int(nome3) #O nome do meio
gap_js = nome3 % 2
score = 0
scoreaux = 0
count1 = 0
count2 = 0
n = numseq # numero de sequencias
k = 0
scoretotal = 0
while n > 0:
if n == numseq:# eh reduzido 0.5 do score a cada gap consecutivo, ou seja, as sequencias com gaps separados podem ter maior score, assim cumprindo a condicao de que os gaps sejam separados
alignments = pairwise2.align.globalms(seqt[k], seqt[k + 1], alpha, beta, delta, -0.5)
for alignment in alignments:
if count1 == 0:
seqt[k] = alignment.seqA
seqt[k + 1] = alignment.seqB
score = alignment.score
count1 = count1 + 1
else:
if alignment.score >= score:
seqt[k] = alignment.seqA
seqt[k + 1] = alignment.seqB
score = alignment.score
n = n - 2
k = k + 1
scoretotal = scoretotal + alignment.score
else:
alignments = pairwise2.align.globalms(seqt[k], seqt[k + 1], alpha, beta, delta, -0.5)
for alignment in alignments:
if count2 == 0:
seqt[k] = alignment.seqA
seqt[k + 1] = alignment.seqB
scoreaux = alignment.score
count2 = count2 + 1
else:
if alignment.score >= scoreaux:
seqt[k] = alignment.seqA
seqt[k + 1] = alignment.seqB
scoreaux = alignment.score
n = n - 2
k = k + 1
scoretotal = scoretotal + alignment.score
scoretotal = "Score Total: " + str(scoretotal)
seqt.append(scoretotal)
return seqt | GuilhermeC0sta/Alinhamento-Multiplo-de-DNA | CodWithGUI/alinhamento_func.py | alinhamento_func.py | py | 3,205 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "Bio.pairwise2.align.globalms",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "Bio.pairwise2.align",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "Bio.pairwise2",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "B... |
2956310970 | import socket
import struct
import subprocess
import logging
from datetime import datetime
# Configuração do cliente
MULTICAST_IP = '224.0.0.1'
MULTICAST_PORT = 5004
CHUNK_SIZE = 1472 # Tamanho do pacote incluindo 4 bytes para o contador
CLIENT_INTERFACE_IP = '0.0.0.0' # Use o IP de interface apropriado se necessário
PACKETS_RECEIVED = 0
# Configuração do logging
log_filename = datetime.now().strftime("cliente_%H%M%S.txt")
logging.basicConfig(filename=log_filename,
filemode='w',
level=logging.DEBUG,
format='CLIENTE - %(asctime)s - %(levelname)s - %(message)s')
# Criar um socket UDP
client_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
logging.info("Socket criado.")
# Permitir múltiplos clientes na mesma máquina (para fins de teste)
client_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
logging.info("Socket configurado para permitir múltiplos clientes.")
# Vincular ao endereço do servidor
client_sock.bind((CLIENT_INTERFACE_IP, MULTICAST_PORT))
logging.info(f"Socket vinculado a {CLIENT_INTERFACE_IP}:{MULTICAST_PORT}.")
# Dizer ao sistema operacional para adicionar o socket ao grupo multicast
group = socket.inet_aton(MULTICAST_IP) + socket.inet_aton(CLIENT_INTERFACE_IP)
client_sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, group)
logging.info(f"Socket adicionado ao grupo multicast {MULTICAST_IP}.")
# Inicializar contadores
expected_packet_counter = None
lost_packets = 0
out_of_order_packets = 0
# Preparar comando subprocess para VLC
vlc_command = "vlc -" # O traço '-' diz ao VLC para aceitar entrada do stdin
# Iniciar VLC como um subprocesso
vlc_process = subprocess.Popen(["vlc", "fd://0"], stdin=subprocess.PIPE, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
logging.info("Processo VLC iniciado.")
logging.info("Iniciando loop de recebimento de pacotes.")
try:
while True:
# Receber pacote
data, address = client_sock.recvfrom(CHUNK_SIZE)
# Verificar pacote "fim-de-stream"
if data[4:] == b'END_OF_STREAM':
PACKETS_RECEIVED += 1
logging.info("Fim do stream detectado, redefinindo contador.")
expected_packet_counter = None
continue
# Extrair contador de pacotes
packet_counter, = struct.unpack('>I', data[:4])
video_data = data[4:]
# Se for o primeiro pacote ou se for o pacote esperado
if expected_packet_counter is None or packet_counter == expected_packet_counter:
PACKETS_RECEIVED += 1
# Atualiza o contador esperado para o próximo pacote
if expected_packet_counter is None:
logging.info(f"Primeiro pacote recebido com contador {packet_counter}")
expected_packet_counter = (packet_counter + 1) % (2 ** 32)
else:
if packet_counter < expected_packet_counter:
PACKETS_RECEIVED += 1
# Pacote fora de ordem
out_of_order_packets += 1
logging.warning(f"Pacote fora de ordem. Esperado: {expected_packet_counter}, Recebido: {packet_counter}")
else:
# Pacotes perdidos
lost_packets += packet_counter - expected_packet_counter
expected_packet_counter = (packet_counter + 1) % (2 ** 32)
if lost_packets > 0: # Registra apenas se houver pacotes perdidos
logging.warning(f"Pacotes perdidos. Esperado: {expected_packet_counter-1}, Recebido: {packet_counter}")
# Definir o próximo contador de pacote esperado
expected_packet_counter = (packet_counter + 1) % (2 ** 32)
# Escrever os dados do vídeo no stdin do VLC
vlc_process.stdin.write(video_data)
except KeyboardInterrupt:
logging.info("Cliente encerrando por KeyboardInterrupt.")
logging.info(f"Pacotes recebidos: {PACKETS_RECEIVED}")
finally:
# Fechar processo VLC
if vlc_process:
vlc_process.terminate()
logging.info("Processo VLC terminado.")
# Fechar o socket
client_sock.close()
logging.info("Socket fechado.")
# Logar estatísticas finais
logging.info(f"Total de pacotes perdidos: {lost_packets}. Total de pacotes fora de ordem: {out_of_order_packets}.")
| gpdolzan/R2LAST | cliente.py | cliente.py | py | 4,325 | python | pt | code | 0 | github-code | 36 | [
{
"api_name": "datetime.datetime.now",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "logging.basicConfig",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "logging.D... |
29394077350 | import cv2
import numpy as np
import glob
from pyqtgraph.Qt import QtCore, QtGui
import pyqtgraph.opengl as gl
import numpy as np
from time import sleep
dis_test_label = np.load("D:/PythonFile/NestProject/Nest_Model/pos_process/test_data/dis_test_data_complex_exp_shuffle_3.npy")
dis_test_predict = np.load('D:/PythonFile/NestProject/Nest_Model/pos_process/prediction_data/predict_data_complex_exp_shuffle_3.npy')
dis_train_label = np.load("D:/NestData/3tx-32chirp-jaco-55times_all/data_usage/radar_pos_label_deleted.npy")
dis_train_predict = np.load('D:/PythonFile/NestProject/Nest_Model/pos_process/prediction_data/predict_data_complex_exp_shuffle_4(train).npy')
# dis_label = np.concatenate((label1, label2))
print(dis_train_label.shape, dis_test_predict.shape)
app = QtGui.QApplication([])
w = gl.GLViewWidget()
w.show()
g = gl.GLGridItem()
w.addItem(g)
colors_1 = [1.0,0,0,0.5]
colors_2 = [0,1.0,0,0.5]
# dis_test_predict = dis_test_predict[:,0,:]
dis_test_label = dis_test_label/100
dis_test_predict = dis_test_predict/100
dis_train_label = dis_train_label/100
dis_train_predict = dis_train_predict/100
dis_train_label = dis_train_label[:500]
# dis_test_label = dis_test_label[:,1:3]
# dis_predict = dis_predict[:,1:3]
# print(dis_test_label.shape)
# sp0 = gl.GLScatterPlotItem(pos=dis_test_label[:], color=colors_1)
# w.addItem(sp0)
# sp1 = gl.GLScatterPlotItem(pos=dis_test_predict[:])
# w.addItem(sp1)
sp2 = gl.GLScatterPlotItem(pos=dis_train_label[:], color=colors_2)
w.addItem(sp2)
# sp3 = gl.GLScatterPlotItem(pos=dis_train_predict[:])
# w.addItem(sp3)
# i = 0
# def update():
# global i
# sp2 = gl.GLScatterPlotItem(pos=dis_train_label[i], color=colors_1)
# w.addItem(sp2)
# # sp3 = gl.GLScatterPlotItem(pos=dis_test_predict[i])
# # w.addItem(sp3)
# print(i)
# i += 1
# time = QtCore.QTimer()
# time.timeout.connect(update)
# time.start(5)
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
| yangyongjx/LoRaNet | pos_process/reprojection_img.py | reprojection_img.py | py | 2,149 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.load",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 13,
... |
73819276585 | from transformers import BertForTokenClassification, BertJapaneseTokenizer, get_linear_schedule_with_warmup
from flask import Flask, render_template, request
import argparse
import json
from predict import predict
import torch
import sys
from pathlib import Path
base_dir = Path(__file__).resolve().parents[1]
sys.path.append(str(base_dir))
from utils import iob2json
device = 'cuda' if torch.cuda.is_available() else 'cpu'
app = Flask(__name__)
@app.route("/", methods=['POST'])
def index():
word = request.form["text"]
input_json = json.loads(word)
input_x = [input_json[str(i)] for i in range(len(input_json))]
input_x = [tokenizer.tokenize(t) for t in input_x]
input_x = [tokenizer.convert_tokens_to_ids(['[CLS]'] + x) for x in input_x]
tags = predict(model, input_x, device)
labels = [[id2label[t] for t in tag] for tag in tags]
input_x = [tokenizer.convert_ids_to_tokens(t)[1:] for t in input_x]
output = [zip(x, l) for x, l in zip(input_x, labels)]
output = [iob2json.decode_iob(i) for i in output]
return iob2json.create_json(output)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Train BERT')
parser.add_argument('--model_dir', type=str, help='data path')
args = parser.parse_args()
tokenizer = BertJapaneseTokenizer.from_pretrained("bert-base-japanese-char")
with open(args.model_dir + '/label_vocab.json', 'r') as f:
label_vocab = json.load(f)
id2label = {v:k for k, v in label_vocab.items()}
model = BertForTokenClassification.from_pretrained('bert-base-japanese-char', num_labels=len(label_vocab))
model_path = args.model_dir + '/final.model'
model.load_state_dict(torch.load(model_path))
app.run(port='8000', host='0.0.0.0', debug=True)
| ujiuji1259/NER | BERT/api.py | api.py | py | 1,783 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sys.path.append",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "torch.cuda.is_available",
... |
29101674414 | # Debugging script to see how much GPS signal bounced around
import csv
import math
import numpy as np
from matplotlib import pyplot as plt
'''
# distance between points
dx_between_pts = []
prev_lat, prev_long = 0, 0
with open('2_2_23_gps.csv', mode ='r') as f:
csv_f = csv.reader(f)
for i, line in enumerate(csv_f):
# column/header line (e.g. "time, lat, long, alt")
if i == 0:
continue
lat, long = float(line[1]), float(line[2])
if prev_lat != 0:
dlat = lat - prev_lat
dlong = long - prev_long
dx = (dlat**2 + dlong**2) ** .5
if not math.isnan(dx):
dx_between_pts.append(dx)
prev_lat = lat
prev_long = long
dx_min = min(dx_between_pts)
dx_max = max(dx_between_pts)
print("max: ", dx_max, " min: ", dx_min)
dx_sorted = np.sort(dx_between_pts)
plt.plot(dx_sorted)
assert dx_sorted[0] == dx_min, f"{dx_sorted[0]} != {dx_min}"
assert dx_sorted[-1] == dx_max, f"{dx_sorted[-1]} != {dx_max}"
plt.show()
'''
#################
### Phone GPS ###
#################
import gpxpy
import gpxpy.gpx
gpx_file = open('phone_gps.gpx', 'r')
gpx = gpxpy.parse(gpx_file)
prev_lat, prev_long = 0, 0
dx_between_pts = []
for track in gpx.tracks:
for segment in track.segments:
for point in segment.points:
lat, long = point.latitude, point.longitude
if prev_lat != 0:
dlat = lat - prev_lat
dlong = long - prev_long
dx = (dlat**2 + dlong**2) ** .5
if not math.isnan(dx):
dx_between_pts.append(dx)
prev_lat = lat
prev_long = long
dx_min = min(dx_between_pts)
dx_max = max(dx_between_pts)
print("max: ", dx_max, " min: ", dx_min)
dx_sorted = np.sort(dx_between_pts)
plt.plot(dx_sorted)
assert dx_sorted[0] == dx_min, f"{dx_sorted[0]} != {dx_min}"
assert dx_sorted[-1] == dx_max, f"{dx_sorted[-1]} != {dx_max}"
plt.show()
'''
# Creating histogram
fig, ax = plt.subplots(figsize = (10, 7))
min_dx = min(dx_between_pts)
max_dx = max(dx_between_pts)
bin_width = (max_dx - min_dx) / 5
ax.hist(dx_between_pts, bins = np.arange(min_dx, max_dx, bin_width))
# Show plot
plt.show()
'''
| bainro/jackal_melodic | plotGPS.py | plotGPS.py | py | 2,117 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "gpxpy.parse",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "math.isnan",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "numpy.sort",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_... |
30630423030 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 16 19:35:10 2020
@author: isaacparker
"""
#Load libraries
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import pandas as pd
from scipy.stats import lognorm, gaussian_kde
# Specify font used in plots
font = 'Adobe Myungjo Std'
math_font = 'cm'
import matplotlib as mpl
mpl.style.use('seaborn-white')
mpl.rcParams['font.family'] = font
mpl.rcParams['font.size'] = 18
mpl.rcParams['mathtext.fontset'] = 'cm'
#LOAD DATA
JV_exp = np.loadtxt('perov_JV_exp.txt',delimiter=',')
JV_exp = JV_exp
v_sweep = np.linspace (0,1.2,100)
power_exp= JV_exp[:,100*2:100*3]*v_sweep
eff_exp = np.max(power_exp, axis=1)/0.98
exp_condition = pd.read_excel('prcess_label.xlsx',index_col=0)
exp_condition = exp_condition.values
#Stack data and order
X_data = np.concatenate([eff_exp.reshape(-1,1), exp_condition],axis= 1)
p_index = []
X_data_re=[]
for i in [70,90,110,130]:
for j in [2,4,8]:
idx = np.intersect1d(np.where(X_data[:,1]==i) ,np.where(X_data[:,2]==j))
X_data_re.append(X_data[idx,:])
X_data_re = np.vstack(X_data_re)
#Remove data to have same # of samples:
X_data_re = np.delete(X_data_re, [0,15,21,13,14,10,12,17,12,9,7,4], 0)
X_data_re = np.insert(X_data_re, 36, [3.88, 90, 2], axis=0)
X_data_re = np.delete(X_data_re, [106,107,108,96,110,112], 0)
X_data_re = np.insert(X_data_re, 143, [5.77, 130, 8], axis=0)
# Compute efficiency and normalize
df_X1 = pd.DataFrame(X_data_re, columns=['Efficiency','Temperature','Ratio'])
df_X = df_X1.copy()
max_eff = df_X['Efficiency'].max()
# Normalize
df_X['Efficiency'] = df_X['Efficiency'] / max_eff
# Get mean and variance for empirical distribution
X_mean = df_X['Efficiency'].mean()
eff_data = df_X['Efficiency']
log_norm_var = eff_data.std()
# Lognormal distribution histogram
np.random.seed(6)
logn = lognorm(s=1*log_norm_var, scale = 0.7*(1-X_mean))
sample = logn.rvs (size=500)
sample[sample>1]= 1
plt.figure()
plt.hist((1-sample)*20,50,
density=True,
edgecolor='white',
linewidth=1.2,
# color='mediumseagreen',
# alpha=0.5)
# color=(60/255, 110/255, 135/255, 0.8))
color='k',
alpha=0.5)
plt.xlabel(r'Solar Cell Efficiency $\eta$ [%]', size=18, fontname=font)
plt.ylabel(r'Probability $p(\eta)$', size=18, fontname=font)
plt.xlim(left=0, right=20)
plt.yticks(np.arange(0, 0.25, step=0.1))
density = gaussian_kde((1-sample)*20)
xs = np.linspace(0,20,50)
density.covariance_factor = lambda : 1*log_norm_var
density._compute_covariance()
plt.plot(xs,density(xs),
# color=(60/255, 110/255, 135/255))
# color='mediumseagreen')
color='k')
plt.tight_layout()
plt.savefig('Fig1.png',dpi=300)
plt.show()
| PV-Lab/Data-Driven-PV | Figure_1.py | Figure_1.py | py | 2,792 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "matplotlib.style.use",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "matplotlib.style",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.rcParams",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "m... |
2856409659 | from matplotlib import pyplot as plt
# 题目:某月气温变化表
# 知识点总结:
# 1.x,y的输入
# 2.绘制 plt.plot()
# 3.设置图片大小 plt.figure()
# 4,保存到本地 plt.savefig()
# 5.描述信息 xlabel() ylable() title()
# 6.调整x,y的间距 xticks(),yticks()
# 7.线条的样式 linestyle=
# 8.标记出特殊的点
# 9.添加水印
# 设置图片大小
plt.figure(figsize=(16, 8), dpi=80)
# 左闭右开
x = range(2, 26, 2)
y = [15, 13, 14.5, 17, 20, 25, 26, 26, 24, 22, 18, 15]
# 传入x,y通过plot绘制出折线图
plt.plot(x, y)
# 设置x,y轴的刻度
# plt.xticks(x)
# plt.xticks(range(2, 25))
num = []
for i in range(2, 25):
num.append(i)
num.append(i + 0.5)
# plt.xticks(num[::3])
num = [i / 2 for i in range(4, 50)]
plt.xticks(num)
plt.yticks(range(min(y), max(y) + 1))
# 保存
# plt.savefig("./SaveDate/p1.png")
# 输出
plt.show()
# 关于range:
# 在 Python 3 中,range() 函数返回的对象并不是列表,而是一个类型为 range 的可迭代对象(iterable),
# 它按需生成指定范围内的整数。这种方式称为“惰性求值”(lazy evaluation),它可以节省空间和时间。
# 因为 range 对象并不是列表,所以它并不占用与列表相同的内存空间。
# 相反,它只需要存储 start、stop 和 step 这三个参数,以及根据这些参数生成整数序列的算法。
# 当使用 range 对象时,Python 会根据需要逐个生成序列中的元素,而不是一次性生成所有元素并存储在内存中。
# 如果您需要将 range 对象转换为列表,可以使用 list() 函数将其转换为列表
| srguf/DataAnalysis | matplotlib/折线图plot/单线/折线图.py | 折线图.py | py | 1,705 | python | zh | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "mat... |
22530303098 | import numpy as np
import pytest
from gym.spaces import Box, Discrete
from gym.wrappers import AtariPreprocessing, StepAPICompatibility
from tests.testing_env import GenericTestEnv, old_step_fn
class AleTesting:
"""A testing implementation for the ALE object in atari games."""
grayscale_obs_space = Box(low=0, high=255, shape=(210, 160), dtype=np.uint8, seed=1)
rgb_obs_space = Box(low=0, high=255, shape=(210, 160, 3), dtype=np.uint8, seed=1)
def lives(self) -> int:
"""Returns the number of lives in the atari game."""
return 1
def getScreenGrayscale(self, buffer: np.ndarray):
"""Updates the buffer with a random grayscale observation."""
buffer[...] = self.grayscale_obs_space.sample()
def getScreenRGB(self, buffer: np.ndarray):
"""Updates the buffer with a random rgb observation."""
buffer[...] = self.rgb_obs_space.sample()
class AtariTestingEnv(GenericTestEnv):
"""A testing environment to replicate the atari (ale-py) environments."""
def __init__(self):
super().__init__(
observation_space=Box(
low=0, high=255, shape=(210, 160, 3), dtype=np.uint8, seed=1
),
action_space=Discrete(3, seed=1),
step_fn=old_step_fn,
)
self.ale = AleTesting()
def get_action_meanings(self):
"""Returns the meanings of each of the actions available to the agent. First index must be 'NOOP'."""
return ["NOOP", "UP", "DOWN"]
@pytest.mark.parametrize(
"env, obs_shape",
[
(AtariTestingEnv(), (210, 160, 3)),
(
AtariPreprocessing(
StepAPICompatibility(AtariTestingEnv(), output_truncation_bool=True),
screen_size=84,
grayscale_obs=True,
frame_skip=1,
noop_max=0,
),
(84, 84),
),
(
AtariPreprocessing(
StepAPICompatibility(AtariTestingEnv(), output_truncation_bool=True),
screen_size=84,
grayscale_obs=False,
frame_skip=1,
noop_max=0,
),
(84, 84, 3),
),
(
AtariPreprocessing(
StepAPICompatibility(AtariTestingEnv(), output_truncation_bool=True),
screen_size=84,
grayscale_obs=True,
frame_skip=1,
noop_max=0,
grayscale_newaxis=True,
),
(84, 84, 1),
),
],
)
def test_atari_preprocessing_grayscale(env, obs_shape):
assert env.observation_space.shape == obs_shape
# It is not possible to test the outputs as we are not using actual observations.
# todo: update when ale-py is compatible with the ci
env = StepAPICompatibility(
env, output_truncation_bool=True
) # using compatibility wrapper since ale-py uses old step API
obs, _ = env.reset(seed=0)
assert obs in env.observation_space
obs, _, _, _, _ = env.step(env.action_space.sample())
assert obs in env.observation_space
env.close()
@pytest.mark.parametrize("grayscale", [True, False])
@pytest.mark.parametrize("scaled", [True, False])
def test_atari_preprocessing_scale(grayscale, scaled, max_test_steps=10):
# arbitrarily chosen number for stepping into env. and ensuring all observations are in the required range
env = AtariPreprocessing(
StepAPICompatibility(AtariTestingEnv(), output_truncation_bool=True),
screen_size=84,
grayscale_obs=grayscale,
scale_obs=scaled,
frame_skip=1,
noop_max=0,
)
obs, _ = env.reset()
max_obs = 1 if scaled else 255
assert np.all(0 <= obs) and np.all(obs <= max_obs)
terminated, truncated, step_i = False, False, 0
while not (terminated or truncated) and step_i <= max_test_steps:
obs, _, terminated, truncated, _ = env.step(env.action_space.sample())
assert np.all(0 <= obs) and np.all(obs <= max_obs)
step_i += 1
env.close()
| openai/gym | tests/wrappers/test_atari_preprocessing.py | test_atari_preprocessing.py | py | 4,102 | python | en | code | 33,110 | github-code | 36 | [
{
"api_name": "gym.spaces.Box",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "gym.spaces.Box",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"lin... |
33997460968 | from fbchat import *
from fbchat.models import *
from Credentials import *
import json
import requests
import re
import os
import time
from threading import Thread
import socket
def finalVerification(url):
if re.search("homework-help", url):
return True
return False
def question_id(url):
try:
ID = ""
i = len(url) - 1
while i > 0:
if url[i] != "q":
ID = url[i] + ID
else:
return "q" + ID
i -= 1
except Exception:
return "QUESTION ID ERROR"
def verifyURL(url):
try:
response = requests.head(url)
print(response.status_code)
if response.status_code == 404:
print("Bad website.")
return False
return True
except Exception:
print("Bad website")
return False
def isAnswered(url):
id = question_id(url)
for root, dirs, files in os.walk("./screenshots"):
for name in files:
if name == id + ".png":
return True
return False
class CustomClient(Client):
def onMessage(self, mid, author_id, message, message_object, thread_id, thread_type, ts, metadata, msg):
global daily_limit, start_time
def respond(text, msgType=None):
if thread_type == thread_type.GROUP:
if msgType is None:
self.send(Message(text=text), thread_id=thread_id, thread_type=thread_type)
elif msgType == "IMAGE":
self.sendLocalImage(text, thread_id=thread_id, thread_type=thread_type)
elif thread_type == thread_type.USER:
if msgType is None:
self.send(Message(text=text), thread_id=author_id, thread_type=thread_type)
elif msgType == "IMAGE":
self.sendLocalImage(text, thread_id=author_id, thread_type=thread_type)
def collectPNG():
global que_position, socket, recent_call
que_position += 1
print("Started a thread to collect {}".format(question_id(message)))
respond("You have {} new questions left. Approximate retrieval time: {:.0F} seconds".format(daily_limit[author_id], que_position*10 + 1*max(0, 25-(time.time()-recent_call)) + (que_position-1)*25))
while que_position-1 != 0 or time.time() - recent_call < 25:
time.sleep(0.1)
socket.sendto(message.encode(), ("127.0.0.1", 5000))
recent_call = time.time()
print("Request sent to AnswerMe")
started = time.time()
while time.time() - started < 15:
if os.path.exists("./screenshots/" + question_id(message) + ".png"):
respond("./screenshots/" + question_id(message) + ".png", "IMAGE")
que_position -= 1
return
respond("Error: Timed out.")
if time.time() - start_time > 86400:
start_time = time.time()
daily_limit = {}
print(message_object)
if author_id != self.uid:
if re.search("CHEGG", message):
message = message.replace("CHEGG", "").strip()
if verifyURL(message) and finalVerification(message):
respond("Your question {} is being processed.".format(question_id(message)))
if isAnswered(message):
respond("The question has been identified in Steve's data base.")
respond("./screenshots/" + question_id(message) + ".png", "IMAGE")
elif author_id in daily_limit and daily_limit[author_id] > 0 or author_id not in daily_limit:
if author_id not in daily_limit:
daily_limit[author_id] = 4
daily_limit[author_id] -= 1
Thread(target=collectPNG).start()
else:
respond(
"You have asked too many questions today. Please wait {:.2f} minute(s) to ask more questions!".format(
(86400 - (time.time() - start_time)) / 60))
else:
respond("Invalid URL. Please type in a correct link.")
class MessengerBot:
def __init__(self):
self.client = None
def login(self, username, password):
if self.client is None:
cookies = None
try:
with open("session.json") as file:
cookies = json.load(file)
print("Loading session cookies...")
except FileNotFoundError:
print("First time logging in...")
self.client = CustomClient(username, password, session_cookies=cookies)
print("Is logged in? {}".format(self.client.isLoggedIn()))
if cookies is None:
with open("session.json", "w") as file:
json.dump(self.client.getSession(), file)
def listen(self):
print("Listening")
self.client.listen()
if __name__ == "__main__":
while True:
try:
socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
que_position = 0
start_time = time.time()
recent_call = 0
daily_limit = {}
bot = MessengerBot()
bot.login(username, passwd)
bot.listen()
except Exception:
continue
| namdao2000/MessengerBot | MessengerBot.py | MessengerBot.py | py | 5,534 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "re.search",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "requests.head",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "os.walk",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 76,
... |
5275367948 | import os
import json
import psycopg2
import psycopg2.extras
from sentence_transformers import SentenceTransformer
from sklearn.metrics.pairwise import cosine_similarity
model = SentenceTransformer('distilbert-base-nli-mean-tokens')
alpha = 0.9
#Function that will take a list of goals and goal ids and output the similar goals
def my_calc_similarity(list_of_goals, list_of_ids):
sentence_embeddings = model.encode(list_of_goals)
#I want to turn this into a matrix of cosine similarity scores
sim_scores = cosine_similarity(sentence_embeddings)
#Set up a list to hold the potential matches goal ids
match_goal_ids = [[] for x in range(len(list_of_goals))]
for j in range(len(list_of_goals)):
#ignore the diagnol entry
cur_sim_scores = sim_scores[j]
cur_sim_scores[j] = 0
#index where greater than alpha
cur_potential_idx = [i for i,v in enumerate(cur_sim_scores) if v > alpha]
for k in range(len(cur_potential_idx)):
#Need to append the goal ID to the list
match_goal_ids[j].append(list_of_ids[cur_potential_idx[k]])
if j > cur_potential_idx[k]:
continue
#Print the current goals as potential matches
print("Potential Match: Goals ", j," and ", cur_potential_idx[k])
print("Potential Match: Goal ", j, ": ", list_of_goals[j],". and Goal ", cur_potential_idx[k],": ", list_of_goals[cur_potential_idx[k]])
return(match_goal_ids)
conn = connect_to_db()
cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
cur.execute("SELECT * FROM public.goals;")
rows = cur.fetchall()
cur_goals_list = [row['goal'] for row in rows]
cur_goal_ids = [row['id'] for row in rows]
match_ids = my_calc_similarity(cur_goals_list,cur_goal_ids)
| kcirtapfromspace/cloudfoundry_circleci | src/bert/cleaned_bert_similarity.py | cleaned_bert_similarity.py | py | 1,797 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "sentence_transformers.SentenceTransformer",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.pairwise.cosine_similarity",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "psycopg2.extras",
"line_number": 37,
"usage_type": "a... |
4778253189 | import os
import tempfile
import pytest
import warnings
import numpy as np
import onnxruntime as ort
import torch
from torch import nn as nn
from typing import Optional, Union, Tuple, List
import transformer_engine.pytorch as te
from transformer_engine.common import recipe
import transformer_engine_extensions as tex
from transformer_engine.pytorch.cpp_extensions import gemm, fp8_gemm, gelu, cast_to_fp8, cast_from_fp8
from transformer_engine.pytorch.module.base import get_workspace
import transformer_engine.pytorch.cpp_extensions as texcpp
import transformer_engine.pytorch.softmax as softmax_defs
from transformer_engine.pytorch.utils import get_default_init_method
from transformer_engine.pytorch.export import is_in_onnx_export_mode
from transformer_engine.pytorch.fp8 import FP8GlobalStateManager
# Global test configuration knobs.
# Enable this to serialize test inputs and outputs to file (as a Polygraphy RunResults instance).
SAVE_TEST_IO = bool(int(os.getenv("NVTE_ONNX_EXPORT_SAVE_TEST_IO", "0")))
if SAVE_TEST_IO:
from polygraphy.json import save_json
from polygraphy.comparator import RunResults
# The directory where generated ONNX test models are stored.
NVTE_TEST_ARTIFACTS_DIR = os.environ.get('NVTE_TEST_ARTIFACTS_DIR')
NVTE_TEST_ARTIFACTS_DIR = NVTE_TEST_ARTIFACTS_DIR or os.path.join(tempfile.gettempdir(), "./gen_onnx_models")
# The directory where this file is stored.
TESTS_DIR = os.path.dirname(os.path.abspath(__file__))
# ScaledUpperTriangMaskedSoftmax is exported via ONNX::Trilu which was introduced in opset 14.
TRILU_OPSET = 14
# Opset used in the ONNX files generated by the tests.
OPSET = 17
assert OPSET >= TRILU_OPSET
# Shared library implementing custom FP8 Q/DQ operators for ONNX Runtime (ORT).
ORT_CUSTOM_OPS_LIB = os.path.join(TESTS_DIR, "./libcustom_ort_fp8_qdq_ops.so")
fp8_available, reason_for_no_fp8 = FP8GlobalStateManager.is_fp8_available()
skip_FP8 = pytest.mark.skipif(not fp8_available, reason=reason_for_no_fp8)
supported_activations = ["gelu", "relu", "reglu", "geglu", "swiglu"]
all_normalizations = ["LayerNorm", "RMSNorm"]
@pytest.fixture()
def seed_default_rng():
"""Reseed the PRNG for test reproducibility"""
torch.random.seed()
@pytest.fixture()
def set_max_seq_len(max_seq_len=128):
"""Set the maximum sequence length that can be used for attention masking"""
os.environ["NVTE_ONNX_KVCACHE_MAX_SEQ_LEN"] = f"{max_seq_len}"
def create_fp8_recipe():
return recipe.DelayedScaling(margin=0, interval=1, fp8_format=recipe.Format.E4M3)
def do_export(
model: torch.nn.Module,
inp: torch.Tensor,
fname: str,
use_fp8: bool=True,
opset: int=OPSET,
input_names: List[str]=None,
output_names: List[str]=None,
dynamic_axes: List[str]=None
):
"""Export to ONNX"""
fp8_recipe = create_fp8_recipe()
input_names = input_names or ["input"]
output_names = output_names or ["output"]
with torch.inference_mode(), te.fp8_autocast(enabled=use_fp8, fp8_recipe=fp8_recipe), warnings.catch_warnings():
warnings.filterwarnings(
action='ignore',
category=torch.jit.TracerWarning,
module=r'.*'
)
model.cuda().eval()
os.makedirs(NVTE_TEST_ARTIFACTS_DIR, exist_ok=True)
fname = os.path.join(NVTE_TEST_ARTIFACTS_DIR, fname)
inps = inp if isinstance(inp, list) or isinstance(inp, tuple) else (inp,)
assert len(inps) == len(input_names)
inds_to_del = [i for i in range(len(inps)) if inps[i] is None]
input_names = [input_names[i] for i in range(len(inps)) if i not in inds_to_del]
with te.onnx_export(True):
torch.onnx.export(
model,
inps,
fname,
verbose=True,
dynamic_axes=dynamic_axes,
opset_version=opset,
input_names=input_names,
output_names=output_names,
do_constant_folding=True,
operator_export_type=torch.onnx.OperatorExportTypes.ONNX_FALLTHROUGH)
def to_numpy(tensor):
if isinstance(tensor, torch.Tensor):
if tensor.dtype == torch.bfloat16:
tensor = tensor.type(torch.float32)
tensor = tensor.detach().cpu().numpy()
return tensor
def set_layer_scale(module: torch.nn.Module, scale: float, num_gemms: int):
"""Initialize the FP8 quantization scales in module"""
NB_SCALES_PER_GEMM = 3 # One scale per: input, weights, and output GEMM tensors.
nb_total_scales = num_gemms * NB_SCALES_PER_GEMM
module.fp8_init(num_gemms)
module.fp8_meta["scaling_fwd"].scale = torch.ones(
nb_total_scales, dtype=torch.float32, device="cuda") / scale
module.fp8_meta["scaling_fwd"].scale_inv = torch.ones(
nb_total_scales, dtype=torch.float32, device="cuda") * scale
def te_infer(model: torch.nn.Module, inps: Union[Tuple[torch.tensor], torch.tensor], is_fp8: bool):
"""Transformer Engine forward propagation."""
fp8_recipe = create_fp8_recipe()
with torch.inference_mode(), te.fp8_autocast(enabled=is_fp8, fp8_recipe=fp8_recipe), warnings.catch_warnings():
te_outputs = model(*inps if isinstance(inps, tuple) else (inps,))
if not isinstance(te_outputs, tuple):
te_outputs = (te_outputs,)
return te_outputs
def compare_outputs(onnx_outputs, te_outputs, atol, rtol, max_errors_printed, allow_cnt_errors, fname):
""" Compare ORT and TE outputs."""
assert len(onnx_outputs) == len(te_outputs)
# Compare ORT and PyTorch outputs.
for onnx_output, te_output in zip(onnx_outputs, te_outputs):
# np.isclose: abs(a - b) <= (atol + rtol * abs(b))
te_output = to_numpy(te_output)
onnx_output = to_numpy(onnx_output)
ac = ~np.isclose(onnx_output, te_output, atol=atol, rtol=rtol)
mismatches = ac.nonzero()
mismatched_ids = [loc for loc in zip(*mismatches)]
if mismatched_ids:
# Log some information in case of error.
print("*" * 100)
nb_errors = len(mismatched_ids)
nb_vals = min(nb_errors, max_errors_printed)
print(f"Detected {nb_errors} diverging values (output shape={onnx_output.shape})")
print(f"Showing first {nb_vals} errors (ONNX -- TE):")
abs_err = np.abs(onnx_output - te_output)
errors = abs_err[mismatches]
for loc in mismatched_ids[:nb_vals]:
ref = te_output[loc]
print(f"{onnx_output[loc]} -- {te_output[loc]} err={abs_err[loc]} > {atol + rtol * abs(ref)}")
print(f"Max error: {np.max(errors)}")
if nb_errors > allow_cnt_errors:
raise ValueError(f"Output validation of {fname} failed with {nb_errors} errors")
def serialize_inputs_outputs(
fname: str,
inputs: Union[Tuple[torch.Tensor], torch.Tensor],
te_outputs: List[torch.Tensor],
input_names: Optional[List[str]] = None,
output_names: Optional[List[str]] = None,
):
if not SAVE_TEST_IO:
return
fname = os.path.join(NVTE_TEST_ARTIFACTS_DIR, fname)
input_names = input_names or ["input"]
output_names = output_names or ["output"]
inputs = inputs if isinstance(inputs, list) or isinstance(inputs, tuple) else (inputs,)
named_inputs = zip(input_names, inputs)
input_data = [{k: v.cpu() for k, v in named_inputs if v is not None}]
json_fname = fname[:-len(".onnx")] + "_inputs.json"
save_json(input_data, json_fname, description="custom input data")
json_fname = fname[:-len(".onnx")] + "_output.json"
named_outputs = zip(output_names, te_outputs)
output_data = {k: v.detach().cpu() for k, v in named_outputs if v is not None}
custom_outputs = RunResults()
custom_outputs.add([output_data], runner_name="custom_runner")
custom_outputs.save(json_fname)
def validate_result(
fname: str,
inps: Union[Tuple[torch.Tensor], torch.Tensor],
model: torch.nn.Module,
atol: float=1.e-8, # np.isclose default atol
rtol: float=1.e-5, # np.isclose default rtol
max_errors_printed: int=10,
is_fp8: bool=False,
allow_cnt_errors: int=0,
input_names: List[str]=None,
output_names: List[str]=None,
te_outputs: List[torch.Tensor]=None,
):
"""Compare the outputs of a Transformer Engine (TE) module vs the outputs of its ONNX
representation using ONNX Runtime (ORT) and ensure they are close.
The purpose of the output comparison is to validate that TE models are converted to
their correct ONNX representation by testing that TE and ORT outputs match within some
small threshold (allowing for finite precision errors).
Argument `allow_cnt_errors` reduces test failure noise due to spurious errors by ignoring,
a very small number (0-3) of outliers. This is fine to do because these outliers are due to
small kernel implementation differences between TE and ORT and do not imply an incorrect ONNX
representation (the tests assume both ORT or TE kernels are correct).
Argument `te_outputs` can be used to provide pre-computed TE outputs.
"""
def create_ort_session(fname: str, is_fp8: bool):
def load_custom_ops(session_opts: ort.SessionOptions):
"""For FP8 validation with ORT we need to load our custom FP8 Q/DQ extension."""
if not os.path.exists(ORT_CUSTOM_OPS_LIB):
raise FileNotFoundError(f"Unable to find {ORT_CUSTOM_OPS_LIB}")
session_opts.register_custom_ops_library(ORT_CUSTOM_OPS_LIB)
print("registered custom FP8 Q/DQ ops!")
"""Create an ONNX Runtime session for validation."""
kwargs = {"providers": ['CUDAExecutionProvider', 'CPUExecutionProvider']}
if is_fp8:
sess_options = ort.SessionOptions()
load_custom_ops(sess_options)
kwargs["sess_options"] = sess_options
s = ort.InferenceSession(fname, **kwargs)
return s
def create_ort_input_dict(session, inputs):
inputs = inputs if isinstance(inputs, list) or isinstance(inputs, tuple) else (inputs,)
input_names = [x.name for x in session.get_inputs()]
inps = [to_numpy(x) for x in inputs if x is not None]
inp_dict = dict(zip(input_names, inps))
return inp_dict
input_names = input_names or ["input"]
output_names = output_names or ["output"]
# Run ORT session and TE model.
fname = os.path.join(NVTE_TEST_ARTIFACTS_DIR, fname)
if not te_outputs:
te_outputs = te_infer(model, inps, is_fp8)
ort_s = create_ort_session(fname, is_fp8)
input_feed = create_ort_input_dict(ort_s, inps)
onnx_outputs = ort_s.run(None, input_feed=input_feed)
compare_outputs(onnx_outputs, te_outputs, atol, rtol, max_errors_printed, allow_cnt_errors, fname)
def create_meta(scale_factor: float, size: int=1):
meta = tex.FP8TensorMeta()
meta.amax_history = torch.zeros(1, size, dtype=torch.float32, device="cuda")
meta.scale_inv = torch.ones(size, dtype=torch.float32, device="cuda") / scale_factor
meta.scale = torch.ones(size, dtype=torch.float32, device="cuda") * scale_factor
return meta
def dtype2str(dtype: torch.dtype, fake_bf16_io=False):
if fake_bf16_io:
assert dtype == torch.bfloat16
return "_fake_bf16"
return {
torch.float32: "_fp32",
torch.float16: "_fp16",
torch.bfloat16: "_bf16",
}[dtype]
def as_te_type(dtype: torch.dtype):
return {
torch.float32: tex.DType.kFloat32,
torch.float16: tex.DType.kFloat16,
torch.bfloat16: tex.DType.kBFloat16,
}[dtype]
def get_attn_mask_str(use_mask, attn_mask_type):
# See FusedScaleMaskSoftmax::forward_fused_softmax for logic behind names.
if attn_mask_type is None:
return "_mask" if use_mask else "_no-mask"
attn_mask_str = "_arbitrary-no-mask"
attn_mask_str = "_causal-mask" if attn_mask_type == "causal" else attn_mask_str
attn_mask_str = "_arbitrary-mask" if use_mask and attn_mask_type == "arbitrary" else attn_mask_str
return attn_mask_str
"""
Tests cases begin here.
"""
@skip_FP8
@pytest.mark.parametrize("scale_factor", [1, 224])
@pytest.mark.parametrize(
"precision, atol", [
[torch.float32, 1e-7],
[torch.float16, 1e-7],
[torch.bfloat16, 5e-3],
["fake-torch.bfloat16", 5e-3],
])
def test_export_cast_ops(seed_default_rng, scale_factor: float, atol: float, precision: torch.dtype):
fake_bf16_io = precision == "fake-torch.bfloat16"
# reset precision to torch.bfloat16 after capturing fake BF16 mode
precision = torch.bfloat16 if precision == "fake-torch.bfloat16" else precision
class TestFP8_QDQ(nn.Module):
def __init__(self, fake_bf16_io):
super().__init__()
self.fp8_tensor = 0
self.meta = create_meta(scale_factor)
self.highprec_type = as_te_type(precision)
self.fp8_type = tex.DType.kFloat8E4M3
self.fake_bf16_io = fake_bf16_io
def forward(self, inp):
ret = cast_to_fp8(
inp,
self.meta,
self.fp8_tensor,
self.fp8_type)
ret = cast_from_fp8(
ret,
self.meta,
self.fp8_tensor,
self.fp8_type,
self.highprec_type)
if self.fake_bf16_io:
ret = ret.type(torch.float32)
return ret
# Set dimensions (these are arbitrary).
in_features = 64
hidden_size = 256
inp = torch.randn(hidden_size, in_features, device="cuda",
dtype=torch.float if fake_bf16_io else precision)
high_prec_str = dtype2str(precision, fake_bf16_io=fake_bf16_io)
fname = f"te.cast_fp8_{scale_factor}{high_prec_str}.onnx"
model = TestFP8_QDQ(fake_bf16_io)
do_export(model, inp, fname)
te_outputs = te_infer(model, inp, is_fp8=True)
serialize_inputs_outputs(fname, inp, te_outputs)
if fake_bf16_io or precision != torch.bfloat16:
validate_result(fname, inp, model, atol=atol, is_fp8=True, te_outputs=te_outputs)
@skip_FP8
@pytest.mark.parametrize("scale_factor", [448])
@pytest.mark.parametrize(
"precision, atol", [
[torch.float32, 1e-5],
[torch.float16, 1e-5],
[torch.bfloat16, 5e-3],
["fake-torch.bfloat16", 5e-3]
])
def test_export_gelu_fp8(scale_factor: float, precision: torch.dtype, atol: float):
fake_bf16_io = precision == "fake-torch.bfloat16"
# reset precision to torch.bfloat16 after capturing fake BF16 mode
precision = torch.bfloat16 if precision == "fake-torch.bfloat16" else precision
class TestFP8_Gelu(nn.Module):
def __init__(self, fake_bf16_io):
super().__init__()
self.fp8_tensor = 0
self.meta = create_meta(scale_factor)
self.highprec_type = as_te_type(precision)
self.fp8_type = tex.DType.kFloat8E4M3
self.fake_bf16_io = fake_bf16_io
def forward(self, inp):
ret = gelu(
inp,
self.meta,
self.fp8_tensor,
self.fp8_type)
ret = cast_from_fp8(
ret,
self.meta,
self.fp8_tensor,
self.fp8_type,
self.highprec_type)
if self.fake_bf16_io:
ret = ret.type(torch.float32)
return ret
# Set dimensions (these are arbitrary).
in_features = 64
hidden_size = 256
inp = torch.randn(hidden_size, in_features, device="cuda",
dtype=torch.float if fake_bf16_io else precision)
high_prec_str = dtype2str(precision, fake_bf16_io=fake_bf16_io)
fname = f"te.gelu_fp8_{scale_factor}{high_prec_str}.onnx"
model = TestFP8_Gelu(fake_bf16_io)
do_export(model, inp, fname)
te_outputs = te_infer(model, inp, is_fp8=True)
serialize_inputs_outputs(fname, inp, te_outputs)
if fake_bf16_io or precision != torch.bfloat16:
validate_result(fname, inp, model, rtol=0, atol=atol, is_fp8=True, allow_cnt_errors=2, te_outputs=te_outputs)
@pytest.mark.parametrize("scale_factors",
[(224, 224,),
])
@pytest.mark.parametrize(
"precision, use_fp8, use_bias, use_gelu", [
(torch.float32, False, False, False),
(torch.float16, False, False, False),
(torch.bfloat16, False, False, False),
(torch.float32, False, True, False),
(torch.float16, False, True, False),
(torch.bfloat16, False, True, False),
(torch.float32, False, True, True),
(torch.float16, False, True, True),
(torch.bfloat16, False, True, True),
# For FP8 GEMM GeLU is not used.
(torch.float32, True, False, False),
(torch.float16, True, False, False),
(torch.bfloat16, True, False, False),
# When enabling bias we must use float16 or bfloat16 (because of kernel limitations)
(torch.float16, True, True, False),
(torch.bfloat16, True, True, False),
])
def test_export_gemm(
seed_default_rng,
precision, # Precision of inputs, weights, output and bias
use_fp8,
use_bias,
use_gelu,
scale_factors
):
# Skip FP8 tests on non-hopper devices
if use_fp8 and not fp8_available:
pytest.skip(reason_for_no_fp8)
class TestFP8_GEMM(nn.Module):
def __init__(self, precision, use_bias, gelu, scale_factors):
super().__init__()
self.use_bias = use_bias
self.gelu = gelu
self.precision = precision
self.fp8_tensor_inp = tex.FP8FwdTensors.GEMM1_INPUT
self.fp8_tensor_weight = tex.FP8FwdTensors.GEMM1_WEIGHT
nb_inp_scales, nb_weight_scales = 1, out_features
act_scale_factor, weight_scale_factor = scale_factors
self.meta_inp = create_meta(act_scale_factor, nb_inp_scales)
self.meta_weight = create_meta(weight_scale_factor, nb_weight_scales)
bias_size = nb_weight_scales
self.bias = torch.randn(bias_size, dtype=precision, device="cuda")
self.gelu_input = torch.randn(hidden_size, out_features, dtype=precision, device="cuda")
self.inp_type = tex.DType.kFloat8E4M3
self.weights_type = tex.DType.kFloat8E4M3
self.outp_type = precision
def forward(self, inp, weight):
inp_fp8 = cast_to_fp8(
inp,
self.meta_inp,
self.fp8_tensor_inp,
self.inp_type)
weight_fp8 = cast_to_fp8(
weight,
self.meta_weight,
self.fp8_tensor_weight,
self.weights_type)
ret, _ = fp8_gemm(
weight_fp8,
self.meta_weight.scale_inv,
self.fp8_tensor_weight,
self.inp_type,
inp_fp8,
self.meta_inp.scale_inv,
self.fp8_tensor_inp,
self.weights_type,
self.outp_type,
get_workspace(),
bias=self.bias,
use_bias=self.use_bias,
use_split_accumulator=False)
return ret
class Test_GEMM(nn.Module):
def __init__(self, precision, use_bias=False, gelu=False):
super().__init__()
self.use_bias = use_bias
self.gelu = gelu
self.precision = precision
bias_size = out_features
self.bias = torch.randn(bias_size, dtype=precision, device="cuda")
self.gelu_input = torch.randn(hidden_size, out_features, dtype=precision, device="cuda")
def forward(self, inp, weight):
outp_type = self.precision
# note: due to logic in lines 104:116 and L129 in cpp_extensions.py
# it appears either bias OR gelu can be activated, not both
ret, _, _ = gemm(
weight,
inp,
outp_type,
get_workspace(),
# test bias
bias=self.bias,
use_bias=self.use_bias,
# test gelu
gelu=self.gelu,
gelu_input=self.gelu_input,
grad=False, # only True for backward pass
accumulate=False,
)
return ret
# If gelu is applied then bias must be added, as defined by TE kernel.
if use_gelu: assert use_bias
# Set dimensions (these are arbitrary).
out_features = 128
hidden_size = 256
in_features = 64
inp = torch.randn(hidden_size, in_features, device="cuda", dtype=precision)
weight = torch.randn(out_features, in_features, device="cuda", dtype=precision)
fp8_str = "_fp8" if use_fp8 else ""
bias_str = "_bias" if use_bias else ""
gelu_str = "_gelu" if use_gelu else ""
high_prec_str = dtype2str(precision)
fname = f"te.gemm{fp8_str}{bias_str}{gelu_str}{high_prec_str}.onnx"
input_names = ['input', 'weight']
if use_fp8:
model = TestFP8_GEMM(precision, use_bias, use_gelu, scale_factors)
do_export(model, (inp, weight), fname, use_fp8, input_names=input_names)
te_outputs = te_infer(model, (inp, weight), is_fp8=use_fp8)
serialize_inputs_outputs(fname, (inp, weight), te_outputs, input_names=input_names)
if precision != torch.bfloat16:
validate_result(fname, (inp, weight), model, rtol=1e-2, atol=2e-2,
is_fp8=True, input_names=input_names, te_outputs=te_outputs)
else:
model = Test_GEMM(precision, use_bias, use_gelu)
do_export(model, (inp, weight), fname, use_fp8, input_names=input_names)
te_outputs = te_infer(model, (inp, weight), is_fp8=use_fp8)
serialize_inputs_outputs(fname, (inp, weight), te_outputs, input_names=input_names)
if precision != torch.bfloat16:
validate_result(fname, (inp, weight), model, rtol=1e-2, atol=2e-2,
input_names=input_names, te_outputs=te_outputs)
@pytest.mark.parametrize("scale_factor", [448, 112])
@pytest.mark.parametrize("zero_centered_gamma", [False, True])
@pytest.mark.parametrize(
"use_fp8, precision, atol", [
[False, torch.float32, 1e-7],
[False, torch.float16, 1e-7],
[False, torch.bfloat16, 1e-7],
[False, "fake-torch.bfloat16", 1e-7],
[True, torch.float32, 1e-7],
[True, torch.float16, 1e-7],
[True, torch.bfloat16, 1e-2],
[True, "fake-torch.bfloat16", 1e-2]
])
def test_export_layernorm(
seed_default_rng,
use_fp8: bool,
scale_factor: float,
precision: torch.dtype,
zero_centered_gamma: bool,
atol: float
):
fake_bf16_io = precision == "fake-torch.bfloat16"
# reset precision to torch.bfloat16 after capturing fake BF16 mode
precision = torch.bfloat16 if precision == "fake-torch.bfloat16" else precision
# Skip FP8 tests on non-hopper devices
if use_fp8 and not fp8_available:
pytest.skip(reason_for_no_fp8)
# Set dimensions (these are arbitrary).
inp_shape = [64, 32]
class Test_Layernorm(nn.Module):
def __init__(self) -> None:
super().__init__()
eps = 1e-6 # An arbitrary small value
dtype = torch.float if fake_bf16_io else precision
self.ln = te.LayerNorm(inp_shape[1], eps, params_dtype=dtype,
zero_centered_gamma=False).eval().cuda()
def forward(self, inp):
ret = self.ln(inp)
return ret
class TestFP8_Layernorm(nn.Module):
def __init__(self) -> None:
super().__init__()
normalized_shape = torch.Size(inp.shape[1:])
self.weight = torch.randn(*normalized_shape, device="cuda",
dtype=torch.float32 if fake_bf16_io else precision)
self.bias = torch.zeros(*normalized_shape, device="cuda",
dtype=torch.float32 if fake_bf16_io else precision)
self.eps = 1e-6 # An arbitrary small value
self.fp8_tensor = tex.FP8FwdTensors.GEMM1_INPUT
self.meta = create_meta(scale_factor)
self.fp8_type = tex.DType.kFloat8E4M3
def forward(self, inp):
ret = texcpp.layernorm_fwd_fp8_inf(
inp,
self.weight,
self.bias,
self.eps,
self.meta,
self.fp8_tensor,
self.fp8_type,
zero_centered_gamma)
ret = cast_from_fp8(
ret,
self.meta,
self.fp8_tensor,
self.fp8_type,
as_te_type(precision))
if fake_bf16_io:
ret = ret.type(torch.float32)
return ret
inp = torch.randn(*inp_shape, device="cuda", dtype=torch.float32 if fake_bf16_io else precision)
model = TestFP8_Layernorm() if use_fp8 else Test_Layernorm()
high_prec_str = dtype2str(precision, fake_bf16_io=fake_bf16_io)
fp8_str = f"_fp8-{scale_factor}" if use_fp8 else ""
fname = f"te.layernorm{fp8_str}{high_prec_str}.onnx"
do_export(model, inp, fname, use_fp8=use_fp8)
te_outputs = te_infer(model, inp, is_fp8=use_fp8)
serialize_inputs_outputs(fname, inp, te_outputs)
if fake_bf16_io or precision != torch.bfloat16:
validate_result(
fname, inp, model, atol=atol, is_fp8=use_fp8, allow_cnt_errors=3, te_outputs=te_outputs)
@pytest.mark.parametrize("scale_factor", [448, 112])
@pytest.mark.parametrize(
"use_fp8, precision, atol", [
[False, torch.float32, 1e-7],
[False, torch.float16, 1e-7],
[False, torch.bfloat16, 1e-7],
[False, "fake-torch.bfloat16", 1e-7],
[True, torch.float32, 1e-7],
[True, torch.float16, 1e-7],
[True, torch.bfloat16, 1e-2],
[True, "fake-torch.bfloat16", 1e-2]
])
def test_export_rmsnorm(
seed_default_rng,
use_fp8: bool,
scale_factor: float,
precision: torch.dtype,
atol: float
):
fake_bf16_io = precision == "fake-torch.bfloat16"
# reset precision to torch.bfloat16 after capturing fake BF16 mode
precision = torch.bfloat16 if precision == "fake-torch.bfloat16" else precision
# Skip FP8 tests on non-hopper devices
if use_fp8 and not fp8_available:
pytest.skip(reason_for_no_fp8)
# Set dimensions (these are arbitrary).
inp_shape = [64, 32]
class Test_RMSnorm(nn.Module):
def __init__(self) -> None:
super().__init__()
eps = 1e-6 # An arbitrary small value
dtype = torch.float if fake_bf16_io else precision
self.ln = te.RMSNorm(inp_shape[1], eps, params_dtype=dtype).eval().cuda()
def forward(self, inp):
ret = self.ln(inp)
return ret
class TestFP8_RMSnorm(nn.Module):
def __init__(self) -> None:
super().__init__()
normalized_shape = torch.Size(inp.shape[1:])
self.weight = torch.randn(*normalized_shape, device="cuda",
dtype=torch.float32 if fake_bf16_io else precision)
self.eps = 1e-6 # An arbitrary small value
self.fp8_tensor = tex.FP8FwdTensors.GEMM1_INPUT
self.meta = create_meta(scale_factor)
self.fp8_type = tex.DType.kFloat8E4M3
def forward(self, inp):
ret = texcpp.rmsnorm_fwd_fp8_inf(
inp,
self.weight,
self.eps,
self.meta,
self.fp8_tensor,
self.fp8_type,
False)
ret = cast_from_fp8(
ret,
self.meta,
self.fp8_tensor,
self.fp8_type,
as_te_type(precision))
if fake_bf16_io:
ret = ret.type(torch.float32)
return ret
inp = torch.randn(*inp_shape, device="cuda", dtype=torch.float32 if fake_bf16_io else precision)
model = TestFP8_RMSnorm() if use_fp8 else Test_RMSnorm()
high_prec_str = dtype2str(precision, fake_bf16_io=fake_bf16_io)
fp8_str = f"_fp8-{scale_factor}" if use_fp8 else ""
fname = f"te.layernorm{fp8_str}{high_prec_str}.onnx"
do_export(model, inp, fname, use_fp8=use_fp8)
te_outputs = te_infer(model, inp, is_fp8=use_fp8)
serialize_inputs_outputs(fname, inp, te_outputs)
if fake_bf16_io or precision != torch.bfloat16:
validate_result(
fname, inp, model, atol=atol, is_fp8=use_fp8, allow_cnt_errors=3, te_outputs=te_outputs)
@pytest.mark.parametrize("scale_factor", [1])
@pytest.mark.parametrize("use_fp8", [False, True])
# Returning the bias is a TE fusion optimization we don't care about.
@pytest.mark.parametrize("return_bias", [False])
@pytest.mark.parametrize(
"precision, use_bias",[
(torch.float32, False),
(torch.float32, True),
(torch.float16, False),
(torch.float16, True),
# Todo: cannot configure BF16 when bias is disabled (ORT issue?)
(torch.bfloat16, False),
# Todo: cannot configure BF16 when bias is enabled (ORT issue?)
(torch.bfloat16, True),
])
def test_export_linear(
seed_default_rng,
scale_factor: float,
use_fp8: bool,
use_bias: bool,
return_bias: bool,
precision: torch.dtype
):
# Skip FP8 tests on non-hopper devices
if use_fp8 and not fp8_available:
pytest.skip(reason_for_no_fp8)
# Set dimensions (these are arbitrary).
in_features = 64
out_features = 256
hidden_size = 256
class Test_Linear(nn.Module):
def __init__(self,
in_features,
out_features,
use_bias,
return_bias,
precision
):
super().__init__()
self.linear = te.Linear(
in_features,
out_features,
bias=use_bias,
return_bias=return_bias,
params_dtype=precision
)
def forward(self, inp):
ret = self.linear(inp)
return ret
inp = torch.randn(hidden_size, in_features, device="cuda", dtype=precision)
fp8_str = "_fp8" if use_fp8 else ""
bias_str = "_bias" if use_bias else ""
high_prec_str = dtype2str(precision)
fname = f"te.linear{fp8_str}{bias_str}{high_prec_str}.onnx"
with te.fp8_autocast(enabled=use_fp8):
model = Test_Linear(
in_features,
out_features,
use_bias,
return_bias,
precision
).to(device='cuda')
if use_fp8:
set_layer_scale(model.linear, scale_factor, num_gemms=1)
do_export(model, inp, fname, use_fp8)
te_outputs = te_infer(model, inp, is_fp8=use_fp8)
serialize_inputs_outputs(fname, inp, te_outputs)
if precision in (torch.bfloat16, ):
return
if not use_fp8:
validate_result(fname, inp, model, atol=1e-3, te_outputs=te_outputs)
else:
validate_result(fname, inp, model, atol=1e-3, is_fp8=use_fp8, te_outputs=te_outputs)
@pytest.mark.parametrize("scale_factor", [112])
@pytest.mark.parametrize("use_fp8", [False, True])
# Returning the bias is a TE fusion optimization we don't care about.
@pytest.mark.parametrize("return_bias", [False])
@pytest.mark.parametrize("return_layernorm_output", [False])
@pytest.mark.parametrize(
"precision, use_bias",[
(torch.float32, False),
(torch.float32, True),
(torch.float16, True),
(torch.float16, False),
(torch.bfloat16, True),
(torch.bfloat16, False),
])
@pytest.mark.parametrize("zero_centered_gamma", [False, True])
@pytest.mark.parametrize("normalization", all_normalizations)
def test_export_layernorm_linear(
seed_default_rng,
scale_factor: float,
use_fp8: bool,
use_bias: bool,
return_bias: bool,
return_layernorm_output: bool,
precision: torch.dtype,
zero_centered_gamma: bool,
normalization: str,
):
# Skip FP8 tests on non-hopper devices
if use_fp8 and not fp8_available:
pytest.skip(reason_for_no_fp8)
if normalization == "RMSNorm" and zero_centered_gamma:
pytest.skip("RMSNorm does not support zero_centered_gamma yet!")
# Set dimensions (these are arbitrary).
in_features = 64
out_features = 256
hidden_size = 256
inp = torch.randn(in_features, out_features, device="cuda", dtype=precision)
fp8_str = "_fp8" if use_fp8 else ""
bias_str = "_bias" if use_bias else ""
high_prec_str = dtype2str(precision)
fname = f"te.layernorm_linear{fp8_str}{bias_str}{high_prec_str}.onnx"
with te.fp8_autocast(enabled=use_fp8):
model = te.LayerNormLinear(
hidden_size,
3 * hidden_size,
bias=use_bias,
return_bias=return_bias,
return_layernorm_output=return_layernorm_output,
params_dtype=precision,
zero_centered_gamma=zero_centered_gamma,
normalization=normalization,
).to(device='cuda')
if use_fp8:
set_layer_scale(model, scale_factor, num_gemms=1)
do_export(model, inp, fname, use_fp8)
te_outputs = te_infer(model, inp, is_fp8=use_fp8)
serialize_inputs_outputs(fname, inp, te_outputs)
if precision in (torch.bfloat16, ):
return
if not use_fp8:
validate_result(fname, inp, model, atol=1e-3, te_outputs=te_outputs)
elif precision != torch.bfloat16:
validate_result(fname, inp, model, atol=1e-6, is_fp8=use_fp8, te_outputs=te_outputs)
@pytest.mark.parametrize("scale_factor", [112])
@pytest.mark.parametrize("use_fp8", [False, True])
# Returning the bias is a TE fusion optimization we don't care about.
@pytest.mark.parametrize("return_bias", [False])
@pytest.mark.parametrize("return_layernorm_output", [False])
@pytest.mark.parametrize(
"precision, use_bias",[
(torch.float32, False),
(torch.float32, True),
(torch.float16, True),
(torch.float16, False),
(torch.bfloat16, True),
(torch.bfloat16, False),
])
@pytest.mark.parametrize("zero_centered_gamma", [False, True])
@pytest.mark.parametrize("activation", supported_activations)
@pytest.mark.parametrize("normalization", all_normalizations)
def test_export_layernorm_mlp(
seed_default_rng,
scale_factor: float,
use_fp8: bool,
use_bias: bool,
return_bias: bool,
return_layernorm_output: bool,
precision: torch.dtype,
zero_centered_gamma: bool,
activation: str,
normalization: str,
):
# Skip FP8 tests on non-hopper devices
if use_fp8 and not fp8_available:
pytest.skip(reason_for_no_fp8)
if normalization == "RMSNorm" and zero_centered_gamma:
pytest.skip("RMSNorm does not support zero_centered_gamma yet!")
# Set dimensions (these are arbitrary).
in_features = 64
out_features = 256
hidden_size = 256
ffn_hidden_size = 256
inp = torch.randn(in_features, out_features, device="cuda", dtype=precision)
fp8_str = "_fp8" if use_fp8 else ""
bias_str = "_bias" if use_bias else ""
high_prec_str = dtype2str(precision)
fname = f"te.layernorm_mlp{fp8_str}{bias_str}{high_prec_str}_{activation}.onnx"
with te.fp8_autocast(enabled=use_fp8):
model = te.LayerNormMLP(
hidden_size,
ffn_hidden_size,
bias=use_bias,
return_bias=return_bias,
return_layernorm_output=return_layernorm_output,
params_dtype=precision,
zero_centered_gamma=zero_centered_gamma,
activation=activation,
normalization=normalization,
).to(device='cuda')
if use_fp8:
set_layer_scale(model, scale_factor, num_gemms=2)
do_export(model, inp, fname, use_fp8)
te_outputs = te_infer(model, inp, is_fp8=use_fp8)
serialize_inputs_outputs(fname, inp, te_outputs)
if precision in (torch.bfloat16, ):
return
atol = 1e-6 if use_fp8 else (5e-1 if activation=="swiglu" else 1e-3)
validate_result(fname, inp, model, atol=atol, is_fp8=use_fp8, te_outputs=te_outputs)
@skip_FP8
@pytest.mark.parametrize(
"precision, use_mask, attn_mask_type", [
(torch.float32, True, "arbitrary"), # calls forward_torch_softmax (apply user mask)
(torch.float32, False, "no_mask"), # calls forward_torch_softmax (apply no mask)
(torch.float16, False, "causal"), # calls forward_torch_softmax (apply dynamic onnx mask)
(torch.float16, True, "arbitrary"), # calls forward_torch_softmax (apply user mask)
(torch.float16, False, "no_mask"), # calls forward_torch_softmax (apply no mask)
(torch.bfloat16, False, "causal"), # calls forward_torch_softmax (apply dynamic onnx mask)
(torch.bfloat16, True, "arbitrary"), # calls forward_torch_softmax (apply user mask)
(torch.bfloat16, False, "no_mask"), # calls forward_torch_softmax (apply no mask)
])
def test_export_core_attention(
seed_default_rng,
set_max_seq_len,
precision: torch.dtype,
use_mask: bool,
attn_mask_type: str,
):
# Set dimensions (these are arbitrary).
seq_len, batch_size, num_attention_heads, kv_channels = (64, 4, 1, 64)
qkv_size = (seq_len, batch_size, num_attention_heads, kv_channels)
qkv_format = "sbhd"
query_layer = torch.randn(qkv_size, dtype=precision, device="cuda")
key_layer = torch.randn(qkv_size, dtype=precision, device="cuda")
value_layer = torch.randn(qkv_size, dtype=precision, device="cuda")
input_names = ["query", "key", "value", "attention_mask"]
attention_mask = None
if use_mask:
# Generate a random mask with 50% probability for 0 or 1.
probs = 0.5 * torch.ones(batch_size, 1, 1, seq_len, device="cuda", dtype=precision)
attention_mask = torch.bernoulli(probs).to("cuda", dtype=torch.bool)
inp = (query_layer, key_layer, value_layer, attention_mask)
mask_str = get_attn_mask_str(use_mask, attn_mask_type)
high_prec_str = dtype2str(precision)
fname = f"te.core_attention{mask_str}{high_prec_str}.onnx"
model = te.attention.DotProductAttention(
num_attention_heads=num_attention_heads,
kv_channels=kv_channels,
attention_dropout=0.5,
qkv_format=qkv_format,
attn_mask_type=attn_mask_type,
).to(device='cuda')
do_export(model,
inp,
fname,
input_names=input_names,
use_fp8=True)
te_outputs = te_infer(model, inp, is_fp8=True)
serialize_inputs_outputs(fname, inp, te_outputs, input_names=input_names)
if precision in (torch.bfloat16, ):
return
validate_result(fname, inp, model, is_fp8=True, atol=1e-2, input_names=input_names, te_outputs=te_outputs)
test_configs_multihead_attention = [
#"use_mask, attn_mask_type"
(False, "no_mask"), # calls ScaledSoftmax
(True, "arbitrary"), # calls ScaledMaskedSoftmax
]
test_configs_attention_type = [
#"input_layernorm, attention_type, fuse_qkv_params"
(True, "self", True),
(False, "self", True),
(True, "self", False),
(False, "self", False),
(True, "cross", True),
(False, "cross", True),
(True, "cross", False),
(False, "cross", False),
]
@pytest.mark.parametrize("use_fp8", [False, True])
@pytest.mark.parametrize("use_mask, attn_mask_type", test_configs_multihead_attention)
@pytest.mark.parametrize("precision", [torch.float32, torch.float16, torch.bfloat16])
@pytest.mark.parametrize("return_layernorm_output", [False])
@pytest.mark.parametrize("input_layernorm, attention_type, fuse_qkv_params", test_configs_attention_type)
def test_export_multihead_attention(
seed_default_rng,
set_max_seq_len,
use_fp8: bool,
use_mask: bool,
attn_mask_type: str,
precision: torch.dtype,
return_layernorm_output: bool,
input_layernorm: bool,
attention_type: str,
fuse_qkv_params: bool
):
# Skip FP8 tests on non-hopper devices
if use_fp8 and not fp8_available:
pytest.skip(reason_for_no_fp8)
hidden_size = 256
sequence_length = 128
batch_size = 4
num_attention_heads = 32
kv_channels = 8
attention_dropout = 0.1
layernorm_epsilon = 1e-5
init_method = output_layer_init_method = get_default_init_method()
attention_args = (
hidden_size,
num_attention_heads,
kv_channels,
attention_dropout,
layernorm_epsilon,
init_method,
output_layer_init_method,
)
hidden_states_context = torch.randn(sequence_length, batch_size, hidden_size, dtype=precision, device="cuda")
attention_mask = None
if use_mask and attn_mask_type != "causal":
# Generate a random mask with 50% probability for 0 or 1.
probs = 0.5 * torch.ones(batch_size, 1, sequence_length, sequence_length, device="cuda", dtype=precision)
attention_mask = torch.bernoulli(probs).to("cuda", dtype=torch.bool)
encoder_output = None
if attention_type == "cross":
encoder_output = torch.randn(sequence_length, batch_size, hidden_size, dtype=precision, device="cuda")
fp8_str = "_fp8" if use_fp8 else ""
dtype_str = dtype2str(precision)
attn_type_str = "_self-attention" if attention_type == "self" else "_cross-attention"
fuse_qkv_str = "_fused-qkv" if fuse_qkv_params else ""
attn_mask_str = get_attn_mask_str(use_mask, attn_mask_type)
input_ln_str = "_input-ln" if input_layernorm else ""
fname = f"te.multihead_attention{fp8_str}{attn_mask_str}{attn_type_str}{input_ln_str}{fuse_qkv_str}{dtype_str}.onnx"
model = te.MultiheadAttention(
*attention_args,
attn_mask_type=attn_mask_type,
params_dtype=precision,
return_layernorm_output=return_layernorm_output,
input_layernorm=input_layernorm,
attention_type=attention_type,
fuse_qkv_params=fuse_qkv_params,
return_bias=True,
).to(device='cuda')
inp_context = (hidden_states_context, attention_mask, encoder_output)
input_names = ["hidden_states", "attention_mask", "encoder_output"]
output_names=["attention_output", "attention_bias"]
do_export(model, inp_context, fname, use_fp8, input_names=input_names, output_names=output_names,
dynamic_axes={"hidden_states": {0: "seq", 1:"bs"},
"attention_output": {0: "seq", 1:"bs"}})
te_outputs = te_infer(model, inp_context, is_fp8=use_fp8)
serialize_inputs_outputs(fname, inp_context, te_outputs, input_names=input_names, output_names=output_names)
if precision in (torch.bfloat16, ):
return
if not use_fp8:
validate_result(fname, inp_context, model, atol=1e-3, input_names=input_names,
output_names=output_names, te_outputs=te_outputs)
else:
validate_result(fname, inp_context, model, atol=1e-2, is_fp8=use_fp8,
input_names=input_names, output_names=output_names, allow_cnt_errors=3,
te_outputs=te_outputs)
# In GPT generative phase (inference) the input sequence is smaller than the maximum
# allowed sequence length and we want to test this condition.
# Pretend that we're in generative phase when it makes sense (causal mask and self-attention).
is_generative_phase = (attn_mask_type == "causal" and attention_type == "self")
if is_generative_phase:
seq_len_offset = 8
hidden_states_generative = torch.randn(sequence_length-seq_len_offset, batch_size, hidden_size, dtype=precision, device="cuda")
inp_generative = (hidden_states_generative, attention_mask, encoder_output)
if not use_fp8:
validate_result(fname, inp_generative, model, atol=1e-3, input_names=input_names, output_names=output_names)
else:
validate_result(fname, inp_generative, model, atol=1e-2, is_fp8=use_fp8,
input_names=input_names, output_names=output_names, allow_cnt_errors=3)
@pytest.mark.parametrize("use_fp8", [False, True])
@pytest.mark.parametrize("use_mask, attn_mask_type", test_configs_multihead_attention)
@pytest.mark.parametrize("output_layernorm", [
#True, # TO DO: handle this
False
])
@pytest.mark.parametrize("precision", [torch.float32, torch.float16, torch.bfloat16])
@pytest.mark.parametrize("fuse_qkv_params", [False, True])
@pytest.mark.parametrize("zero_centered_gamma", [False, True])
@pytest.mark.parametrize("activation", supported_activations)
def test_export_transformer_layer(
seed_default_rng,
set_max_seq_len,
use_fp8: bool,
use_mask: bool,
attn_mask_type: str,
output_layernorm: bool,
precision: torch.dtype,
fuse_qkv_params: bool,
zero_centered_gamma: bool,
activation: str,
):
# Skip FP8 tests on non-hopper devices
if use_fp8 and not fp8_available:
pytest.skip(reason_for_no_fp8)
# Layer configuration
hidden_size = 64
sequence_length = 128
batch_size = 1
ffn_hidden_size = 256
num_attention_heads = 4
input_tensor = torch.rand(sequence_length, batch_size, hidden_size, dtype=precision, device="cuda")
input_names = ["input", "attention_mask"]
attention_mask = None
if use_mask and attn_mask_type != "causal":
# Generate a random mask with 50% probability for 0 or 1.
probs = 0.5 * torch.ones(batch_size, 1, sequence_length, sequence_length, device="cuda", dtype=precision)
attention_mask = torch.bernoulli(probs).to("cuda", dtype=torch.bool)
inp = (input_tensor, attention_mask)
fp8_str = "_fp8" if use_fp8 else ""
fuse_qkv_params_str = "_fused-qkv" if fuse_qkv_params else ""
high_prec_str = dtype2str(precision)
attn_mask_str = get_attn_mask_str(use_mask, attn_mask_type)
fname = f"te.transformer_layer{fp8_str}{attn_mask_str}{fuse_qkv_params_str}{high_prec_str}_{activation}.onnx"
model = te.TransformerLayer(
hidden_size,
ffn_hidden_size,
num_attention_heads,
self_attn_mask_type=attn_mask_type,
output_layernorm=output_layernorm,
params_dtype=precision,
fuse_qkv_params=fuse_qkv_params,
zero_centered_gamma=zero_centered_gamma,
activation=activation).to(device='cuda')
do_export(model, inp, fname, use_fp8, input_names=input_names)
te_outputs = te_infer(model, inp, is_fp8=use_fp8)
serialize_inputs_outputs(fname, inp, te_outputs, input_names=input_names)
if precision in (torch.bfloat16, ):
return
atol = 5e-1 if use_fp8 else (5e-1 if activation=="swiglu" else 1e-3)
validate_result(fname, inp, model, atol=atol, is_fp8=use_fp8, input_names=input_names, te_outputs=te_outputs)
@pytest.mark.parametrize("use_fp8", [True])
@pytest.mark.parametrize("ln_scale_factor", [448*2])
@pytest.mark.parametrize("gemm_scale_factors", [(224, 224,),])
@pytest.mark.parametrize("precision", [torch.float32, torch.float16, torch.bfloat16])
@pytest.mark.parametrize("zero_centered_gamma", [False, True])
def test_export_gemm_layernorm(
seed_default_rng,
use_fp8: bool,
ln_scale_factor: float,
gemm_scale_factors: Tuple[float, float],
precision: torch.dtype,
zero_centered_gamma: bool
):
"""This is a regression test for testing that all LN inputs have the same type.
The test sets up GEMM with FP32 output which feeds into an LN that is configured
with FP16 or BF16 weights and bias.
"""
# Skip FP8 tests on non-hopper devices
if use_fp8 and not fp8_available:
pytest.skip(reason_for_no_fp8)
class TestFP8_GemmLayernorm(nn.Module):
def __init__(self) -> None:
super().__init__()
normalized_shape = torch.Size(inp.shape[1:])
self.weight = torch.randn(*normalized_shape, dtype=precision, device="cuda")
self.bias = torch.zeros(*normalized_shape, dtype=precision, device="cuda")
self.eps = 1e-6 # An arbitrary small value
self.fp8_tensor = tex.FP8FwdTensors.GEMM1_INPUT
self.meta = create_meta(ln_scale_factor)
self.fp8_type = tex.DType.kFloat8E4M3
self.gemm = TestFP8_GEMM(
precision, use_bias=False, gelu=False, scale_factors=gemm_scale_factors)
def forward(self, inp, weight):
x = self.gemm(inp, weight)
x = texcpp.layernorm_fwd_fp8_inf(
x,
self.weight,
self.bias,
self.eps,
self.meta,
self.fp8_tensor,
self.fp8_type,
zero_centered_gamma)
x = cast_from_fp8(
x,
self.meta,
self.fp8_tensor,
self.fp8_type,
tex.DType.kFloat32 if precision == torch.float32 else tex.DType.kFloat16)
return x
out_features = 128
hidden_size = 128
in_features = 128
class TestFP8_GEMM(nn.Module):
def __init__(self, precision, use_bias, gelu, scale_factors):
super().__init__()
self.use_bias = use_bias
self.gelu = gelu
self.precision = precision
self.fp8_tensor_inp = tex.FP8FwdTensors.GEMM1_INPUT
self.fp8_tensor_weight = tex.FP8FwdTensors.GEMM1_WEIGHT
nb_inp_scales, nb_weight_scales = 1, out_features
act_scale_factor, weight_scale_factor = scale_factors
self.meta_inp = create_meta(act_scale_factor, nb_inp_scales)
self.meta_weight = create_meta(weight_scale_factor, nb_weight_scales)
bias_size = nb_weight_scales
self.bias = torch.randn(bias_size, dtype=precision, device="cuda")
self.gelu_input = torch.randn(hidden_size, out_features, dtype=precision, device="cuda")
self.inp_type = tex.DType.kFloat8E4M3
self.weights_type = tex.DType.kFloat8E4M3
self.outp_type = precision
def forward(self, inp, weight):
inp_fp8 = cast_to_fp8(
inp,
self.meta_inp,
self.fp8_tensor_inp,
self.inp_type)
weight_fp8 = cast_to_fp8(
weight,
self.meta_weight,
self.fp8_tensor_weight,
self.weights_type)
ret, _ = fp8_gemm(
weight_fp8,
self.meta_weight.scale_inv,
self.fp8_tensor_weight,
self.inp_type,
inp_fp8,
self.meta_inp.scale_inv,
self.fp8_tensor_inp,
self.weights_type,
self.outp_type,
get_workspace(),
bias=self.bias,
use_bias=self.use_bias,
use_split_accumulator=False)
return ret
inp = torch.randn(hidden_size, in_features, dtype=precision, device="cuda")
weight = torch.randn(out_features, in_features, dtype=precision, device="cuda")
model = TestFP8_GemmLayernorm()
high_prec_str = dtype2str(precision)
fp8_str = f"_fp8" if use_fp8 else ""
fname = f"te.gemm_layernorm{fp8_str}{high_prec_str}.onnx"
input_names = ['input', 'weight']
do_export(model, (inp, weight), fname, use_fp8=use_fp8, input_names=input_names)
te_outputs = te_infer(model, (inp, weight), is_fp8=use_fp8)
serialize_inputs_outputs(fname, (inp, weight), te_outputs, input_names=input_names)
if precision not in (torch.bfloat16, ):
validate_result(
fname, (inp, weight), model, atol=5e-2, is_fp8=use_fp8, allow_cnt_errors=2,
input_names=input_names, te_outputs=te_outputs)
@skip_FP8
@pytest.mark.parametrize("use_fp8", [True, False])
@pytest.mark.parametrize("precision", [torch.float16, torch.bfloat16])
@pytest.mark.parametrize("zero_centered_gamma", [True])
def test_export_gpt_generation(
seed_default_rng,
set_max_seq_len,
use_fp8: bool,
precision: torch.dtype,
zero_centered_gamma: bool,
):
"""Test that the ONNX model can correctly handle inputs with different shapes and that
the attention mask it adjusted on-the-fly to different sequence lengths.
"""
# Skip FP8 tests on non-hopper devices
if use_fp8 and not fp8_available:
pytest.skip(reason_for_no_fp8)
# Layer configuration
hidden_size = 64
sequence_length = 128
batch_size = 1
ffn_hidden_size = 256
num_attention_heads = 4
attention_mask = None
use_mask = True
attn_mask_type = "causal"
fuse_qkv_params = True
output_layernorm = False
fp8_str = "_fp8" if use_fp8 else ""
fuse_qkv_params_str = "_fused-qkv" if fuse_qkv_params else ""
high_prec_str = dtype2str(precision)
attn_mask_str = get_attn_mask_str(use_mask, attn_mask_type)
fname = f"te.transformer_layer_generative{fp8_str}{attn_mask_str}{fuse_qkv_params_str}{high_prec_str}.onnx"
model = te.TransformerLayer(
hidden_size,
ffn_hidden_size,
num_attention_heads,
self_attn_mask_type=attn_mask_type,
output_layernorm=output_layernorm,
params_dtype=precision,
fuse_qkv_params=fuse_qkv_params,
zero_centered_gamma=zero_centered_gamma).to(device='cuda')
# "Context phase": use full input sequence length
input_names = ["input"]
output_names = ["output"]
input_tensor = torch.rand(sequence_length, batch_size, hidden_size, dtype=precision, device="cuda")
inp = (input_tensor,)
do_export(model, inp, fname, use_fp8,
input_names=input_names, output_names=output_names,
dynamic_axes={"input": {0: "seq", 1:"bs"},
"output": {0: "seq", 1:"bs"}, })
te_outputs = te_infer(model, inp, is_fp8=use_fp8)
serialize_inputs_outputs(fname, inp, te_outputs, input_names=input_names, output_names=output_names)
if precision not in (torch.bfloat16, ):
validate_result(fname, inp, model, atol=6e-3, is_fp8=use_fp8, input_names=input_names,
te_outputs=te_outputs)
# "Generative phase": use a single input (sequence len=1). For FP8 we need to pad the sequence to mult of 8.
sequence_length = 1 if not use_fp8 else 8
input_tensor = torch.rand(sequence_length, batch_size, hidden_size, dtype=precision, device="cuda")
inp = (input_tensor, attention_mask)
te_outputs = te_infer(model, inp, is_fp8=use_fp8)
serialize_inputs_outputs(fname, inp, te_outputs, input_names=input_names)
if precision not in (torch.bfloat16, ):
validate_result(fname, inp, model, atol=6e-3, is_fp8=use_fp8, input_names=input_names,
te_outputs=te_outputs)
@pytest.mark.parametrize("enabled", [True, False])
def test_export_ctx_manager(enabled):
assert is_in_onnx_export_mode() == False
with te.onnx_export(enabled):
assert is_in_onnx_export_mode() == enabled
assert is_in_onnx_export_mode() == False
| NVIDIA/TransformerEngine | tests/pytorch/test_onnx_export.py | test_onnx_export.py | py | 55,538 | python | en | code | 1,056 | github-code | 36 | [
{
"api_name": "os.getenv",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_num... |
36663492998 | import news_parser as Util
import datetime
import time
import DBHandler
from tensorflow.keras.models import load_model
import predict
CompanyList=[]
Headless = True # False : 창띄움, True : 창없음
MakeCompanyList = False # 회사 리스트 갱신
host = '데이터베이스 주소'
ID= '계정명'
PW='비밀번호'
DB_name='DB이름'
def GetNewsInfo(driver):
headlines, news_info, Text,NewsUrl = Util.GetNews(driver) # 헤드라인, 신문사 정보 및 게시 시간, 본문, 기사링크 파싱.
CompanyFromNews = Util.GetCompanyFromNews(headlines, CompanyList)
#Util.save_headlines(headlines, news_info, Text,CompanyFromNews,NewsUrl)
#Util.PrintNews(headlines, news_info, Text, CompanyFromNews)
return headlines, news_info, Text, NewsUrl,CompanyFromNews
def GetPriceInfo(driver):
NameList, PriceInfo, Fluctuation = Util.get_prices(driver) #KTOP 30, KOSPI, KOSPI200, KOSDAQ, KOSDAQ150, KRX300 순
Util.PrintPrice(NameList, PriceInfo, Fluctuation)
return NameList, PriceInfo, Fluctuation
def MakeCompanyFile(MakeCompanyList):
#Company CSV파일 생성
Util.MakeCompanyCSV()
if __name__ == '__main__':
print("Setting Interface...")
CompanyList = Util.GetCompanyList() # 코스피 상장 기업 업로드
try: #네이버 증권
NewsDriver = Util.News_get_driver(Headless)
except Exception as ex:
print("News Driver Err")
print('에러가 발생 했습니다', ex)
try : #한국거래소
PriceDriver = Util.NowPriceDriver(Headless)
except Exception as ex:
print("Price Driver Err")
print('에러가 발생 했습니다', ex)
try: # 다음 증권
KospiImageDriver = Util.Get_KospiGraphDriver(Headless)
except Exception as ex:
print("KospiImage Driver Err")
print('에러가 발생 했습니다', ex)
MakeCompanyFile(MakeCompanyList) #기업 리스트 갱신
DBController = DBHandler.MySqlController(host, ID, PW, DB_name)
label=[]
while(True):
now = datetime.datetime.now()
nowDatetime = now.strftime('%Y_%m_%d_%H시%M분%S초'.encode('unicode-escape').decode()).encode().decode('unicode-escape')
nowDatehour = now.strftime('%Y_%m_%d_%H시%M분'.encode('unicode-escape').decode()).encode().decode('unicode-escape')
try:
NameList, PriceInfo, Fluctuation = Util.get_prices(PriceDriver)
Util.PrintPrice(NameList,PriceInfo,Fluctuation)
DBController.update_totalprice(PriceInfo,Fluctuation)
except Exception as ex:
print("Price Info Err")
print('에러가 발생 했습니다', ex)
PriceDriver.quit()
PriceDriver = Util.NowPriceDriver(Headless)
NameList, PriceInfo, Fluctuation = Util.get_prices(PriceDriver)
# print("========================================")
try:
headlines, news_info, Text,NewsUrl,CompanyFromNews = GetNewsInfo(NewsDriver) #뉴스에서 기업 추출
print("News Updated...")
except Exception as ex:
print("News Update Err")
NewsDriver.quit()
NewsDriver = Util.News_get_driver(Headless)
print('에러가 발생 했습니다', ex)
try:
Util.Write_News(headlines, CompanyFromNews, nowDatehour) # 기업별 뉴스 자료 Writing
except Exception as ex:
print("News Write Err")
CompanyList = Util.GetCompanyList() # 코스피 상장 기업 업로드
try:
Util.GetKospiGraph(KospiImageDriver, PriceInfo, Fluctuation) # Kospi, Kosdaq 그래프 이미지 저장
print("Get Kospi Graph")
except Exception as ex:
KospiImageDriver.quit()
KospiImageDriver = Util.Get_KospiGraphDriver(Headless)
print("Graph Err")
print('에러가 발생 했습니다', ex)
try:
label = predict.classification(headlines, model)
print("Get labels")
DBController.UpdateNews(CompanyFromNews, headlines, Text, NewsUrl, news_info, label) # 최신 20개 기사 DB저장
DBController.InsertNewsHistory(CompanyFromNews, headlines, Text, NewsUrl, news_info, nowDatehour)
print("DB Commit : News Updated, News History Inserted")
except Exception as ex:
print("Label Err")
MakeCompanyFile(MakeCompanyList) # 기업 리스트 갱신
DBController = DBHandler.MySqlController(host, ID, PW, DB_name)
print('에러가 발생 했습니다', ex)
time.sleep(30)
NewsDriver.refresh()
PriceDriver.refresh()
KospiImageDriver.refresh()
print("DONE") | woqls22/StockNews | BackEnd/PythonScripts/main.py | main.py | py | 4,929 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "news_parser.GetNews",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "news_parser.GetCompanyFromNews",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "news_parser.get_prices",
"line_number": 24,
"usage_type": "call"
},
{
"api_nam... |
7507981867 | import os, sys, random
from datetime import datetime
platforms = {
"darwin": "MacOS",
"win32": "Windows32",
"win64": "Windows64",
"linux": "Linux",
}
def run_tool():
while True:
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
command = input(f"[{current_time}] > ")
if command == "os":
print(f"Detected operating system: {platforms.get(sys.platform, 'Unknown OS')}")
elif command == "clear":
os.system("clear")
elif "+" in command:
nums = command.split("+")
result = 0
for x in nums:
result += int(x)
print(result)
elif command == "games":
print("""1. Guess the word
2. Rock paper scissor
3. Mad libs generator""")
which = input("\nWhich game would you like to play? ")
if which == "1":
try:
with open("words.txt", "r") as file:
words = [word.strip() for word in file]
word_to_guess = random.choice(words)
guess = input(f"What word has a length of {len(word_to_guess)} characters, starts with {word_to_guess[0]} and ends with {word_to_guess[-1]}? ")
if guess != word_to_guess:
print(f"Incorrect. The word was {word_to_guess}.")
else:
print("Correct!")
except FileNotFoundError:
print("Can't find words.txt. Make sure it's in the same directory and not named something else.")
elif which == "2":
choices = ["rock", "paper", "scissor"]
user_choice = input("Enter your choice: ").lower()
computer_choice = random.choice(choices)
if user_choice == computer_choice:
print("Tie.")
elif user_choice == "rock" and computer_choice == "scissor":
print("Rock beats scissor. You win!")
elif user_choice == "rock" and computer_choice == "paper":
print("Paper beats rock. Computer wins!")
elif user_choice == "scissor" and computer_choice == "rock":
print("Rock beats scissor. Computer wins!")
elif user_choice == "scissor" and computer_choice == "paper":
print("Scissor beats paper. You win!")
elif user_choice == "paper" and computer_choice == "scissor":
print("Scissor beats paper. Computer wins!")
elif user_choice == "paper" and computer_choice == "rock":
print("Paper beats rock. You win!")
else:
print("Invalid choice.")
elif which == "3":
give_me_words = input("Enter 5 words: ").split()
print(f"The {give_me_words[0]} was slimy, it tasted like {give_me_words[1]}. I stayed at {give_me_words[2]} place. He made me {give_me_words[3]} in the morning. We packed up at {give_me_words[4]}.")
elif command == "quit":
sys.exit()
else:
print(f'Unknown command: "{command}" ')
run_tool()
| Vincent2212/CommandLine | main.py | main.py | py | 3,462 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.datetime.now",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "sys.platform",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "os.system",... |
8114541825 | """
Defines a function wrapper that can be used to group calls to a 'listable'
function into batches.
"""
import sys
import traceback
import asyncio
from fsc.export import export
from . import wrap_to_coroutine
@export
class BatchSubmitter:
"""
Function wrapper that collects calls to a function of one parameter, and submits
it in batches to a function which can take a list of parameters.
Arguments
---------
func: Callable
Function or coroutine which is "listable", i.e. given a list of input
parameters it will return a list of results.
loop: EventLoop
The event loop on which the batch submitter runs. Uses
``asyncio.get_event_loop()`` by default.
timeout: float
Maximum time after which the batch submitter will submit all current
tasks, even if the minimum batch size is not reached.
sleep_time : float
Time the batch submitter will sleep between checking if the minimum
batch size has been reached.
wait_batch_size : int
Minimum batch size that will be submitted before the timeout has been
reached. Is set to the same value as ``max_batch_size`` unless
specified explicitly.
max_batch_size : int
The maximum size of a batch that will be submitted.
"""
def __init__(
self,
func,
*,
loop=None,
timeout=0.1,
sleep_time=0.,
wait_batch_size=None,
max_batch_size=1000
):
self._func = wrap_to_coroutine(func)
self._loop = loop or asyncio.get_event_loop()
self._timeout = timeout
self._sleep_time = sleep_time
if max_batch_size <= 0:
raise ValueError('max_batch_size must be positive')
self._max_batch_size = max_batch_size
if wait_batch_size is None:
wait_batch_size = self._max_batch_size
if wait_batch_size <= 0:
raise ValueError('wait_batch_size must be positive')
self._wait_batch_size = wait_batch_size
self._tasks = asyncio.Queue()
self._batches = dict()
self._submit_loop_task = None
self._last_call_time = None
async def __call__(self, x):
"""
Adds a task for the given input, and starts the submission loop if needed.
"""
fut = self._loop.create_future()
self._tasks.put_nowait((x, fut))
self._last_call_time = self._loop.time()
if self._submit_loop_task is None or self._submit_loop_task.done():
self._submit_loop_task = asyncio.Task(
self._submit_loop(), loop=self._loop
)
self._submit_loop_task.add_done_callback(self._abort_on_exception)
return await fut
async def _submit_loop(self):
"""
Waits for tasks and then creates the batches which evaluate the function.
"""
while self._tasks.qsize() > 0:
await self._wait_for_tasks()
self._launch_batch()
@staticmethod
def _abort_on_exception(fut):
"""
Callback that forces a SystemExit when there is an exception in the submit loop.
"""
try:
fut.result()
except Exception: # pylint: disable=broad-except
sys.exit(''.join(traceback.format_exception(*sys.exc_info())))
async def _wait_for_tasks(self):
"""
Waits until either the timeout has passed or the queue size is big enough.
"""
assert self._tasks.qsize() > 0
while self._loop.time() - self._last_call_time < self._timeout:
if self._tasks.qsize() >= self._wait_batch_size:
return
await asyncio.sleep(self._sleep_time)
def _launch_batch(self):
"""
Launch a calculation batch.
"""
inputs = []
futures = []
for _ in range(self._max_batch_size):
try:
key, fut = self._tasks.get_nowait()
inputs.append(key)
futures.append(fut)
except asyncio.QueueEmpty:
break
task = asyncio.ensure_future(self._func(inputs))
task.add_done_callback(self._process_finished_batch)
self._batches[task] = futures
def _process_finished_batch(self, batch_future):
"""
Assign the results / exceptions to the futures of all finished batches.
"""
task_futures = self._batches.pop(batch_future)
try:
results = batch_future.result()
assert len(results) == len(task_futures)
for fut, res in zip(task_futures, results):
fut.set_result(res)
except Exception as exc: # pylint: disable=broad-except
for fut in task_futures:
fut.set_exception(exc)
| FrescolinoGroup/pyasynctools | fsc/async_tools/_batch_submit.py | _batch_submit.py | py | 4,827 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "asyncio.get_event_loop",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "asyncio.Queue",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "asyncio.Task",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"lin... |
7667794703 | #Usage: python3 kptable-appendix-11b.py [-h] [--help]
import datetime
import pathlib
import pandas as pd
import xlsxwriter
from lukeghg.crf.crfxmlconstants import ch4co2eq, n2oco2eq, ctoco2
from lukeghg.crf.crfxmlfunctions import ConvertFloat,ConvertSign, ConvertToCO2, SumTwoValues, SumBiomassLs
from lukeghg.crf.crfxmlfunctions import PaddingList, GenerateRowTitleList
#These constants will come from CrfXMLConstants
#Please check the
#ch4co2eq=25.0
#n2oco2eq=298.0
#ctoco2=44.0/12.0
#nkset={'IE','NA','NO','NE'}
#Sort the Records based on YearName in ascending order
def SortKey(x):
return x.attrib['YearName']
#---------------------------------The main program begins--------------------------------------------------
def appendix11b(start,end,directory,file_name):
#Command line generator
global ch4co2eq, n2oco2eq,ctoco2
inventory_year=end
#Output file, the table
kptable_appendix_11b_file = file_name
directory=directory+'/'
#Table Appendix-11b Afforestation/Reforestation and Deforestation files
#Deforestation "Conversion to water CH4" comes from NIR folder
kp4a2_fl_to_waters_ch4_org=directory+'KP4A2_FLtowaters_orgsoil_CH4.csv'
#The rest of the files are from crf-folder.
kp4a_agr_bm_gains_losses=directory+'KP4A_agr_bm_gains_losses.csv'
#2015 file name change
#2016 file name change
#2015:kpa2_ar_under_D_gains='KP4A2_AR_und_defor_treebm_gains.csv'
kpa2_ar_under_D_gains=directory+'KP4A2_AR_und_D_living_biomass_gains_trees.csv'
kp4a2_sl_soil=directory+'KP4A2_SL_soil.csv'
kp4a2_ar_under_d_soil=directory+'KP4A2_AR_und_defor_soils.csv'
#2015 KP_MTT_UID.csv in two files: KP_defor_mineral.csv and KP_defor_organic.csv
#kp_uid_mtt='KP_MTT_UID.csv'
kp_defor_mineral=directory+'KP_defor_mineral.csv'
kp_defor_organic=directory+'KP_defor_organic.csv'
kp4a2_fl_to_wl_soil=directory+'KP4A2_FLtoWL_soils.csv'
kp4a2_clglpewesl_deadwood=directory+'KP4A2_CLGLPEWESL_deadwood.csv'
kp4a2_d_living_biomass_losses_trees=directory+'KP4A2_D_living_biomass_losses_trees.csv'
kp4a2_fl_to_waters_org_soil=directory+'KP4A2_FLtowaters_orgsoil.csv'
#2015 rename
#kp4a2_d_mineralization='KP4A2_D_mineraalisationcl_gl_sl.csv'
kp4a2_d_mineralization=directory+'KPA2_soil_leaching_N2O.csv'
#2015 addition is Afforestation mineralization
kp4_ar_mineralization=directory+'KP4_Affor_mineralization.csv'
kp4a2_fl_to_wl_non_co2=directory+'KP4A2_FLtoWL_soils_nonCO2.csv'
kp4_living_biomass_gains_trees=directory+'KP4_living_biomass_gains_trees.csv'
kp4_ar_living_biomass_losses_trees=directory+'KP4A1_AR_living_biomass_losses_trees.csv'
kp4a1_clglsl_mineral_soil=directory+'KP4A1_CLGLSL_mineral_soil.csv'
kp4a1_ar_org_soil=directory+'KP4A1_AR_Organic_soil_C.csv'
kp4a11_wildfires=directory+'KP4A11_wildfires.csv'
kp4a1_clglpewesl_organic_soils_nonco2=directory+'KP4A1_CLGLPEWESL_organic_soils_nonco2.csv'
kp4_hwp_ard=directory+'KP4_HWP-AR.csv'
#Data for the two Tables in Appendix 1
#1. Deforestation: Conversion to water CH4
#Change in 2015: use third (CH4) line in kp4a2_fl_to_waters_org_soil
#----------------------------------------
f = open(kp4a2_fl_to_waters_org_soil)
#Read to a list [[year,val1],[year,val2]....,[year,valN]]
ls = [x.rpartition('#')[2].split() for x in f.readlines() if x.count('#') != 1]
#Third line is CH4
ls = ls[2]
#Convert to CO2
ls.pop(0)
fl_to_waters_ch4_co2_ls = [ConvertToCO2(ch4co2eq,x) for x in ls]
f.close()
#2. Agriculture Afforestation and Deforestation biomasses
#---------------------------------------------------------
f = open(kp4a_agr_bm_gains_losses)
agr_ls = [x.rpartition('#')[2].split() for x in f.readlines() if x.count('#') != 1]
#Pick the deforestation, the first part in the file
agr_d_ls = agr_ls[0:8]
#Pick the Afforestation and Reforestation, rest of the file
agr_ar_ls = agr_ls[8:len(agr_ls)]
#Deforestation: Sum the biomassess: Cropland, Grassland, North and South Finland,
#above ground and below ground
agr_d_bm_ls=SumBiomassLs(agr_d_ls)
#Afforestation: Sum the biomassess: Cropland, Grassland, North and South Finland,
#above ground and below ground
agr_ar_bm_ls=SumBiomassLs(agr_ar_ls)
#Convert to CO2 and convert sign: if biomass increases -> emissiosn decrease and vice versa
agr_d_co2_ls=[ConvertSign(ConvertToCO2(ctoco2,x)) for x in agr_d_bm_ls]
agr_ar_co2_ls=[ConvertSign(ConvertToCO2(ctoco2,x)) for x in agr_ar_bm_ls]
f.close()
#3. Deforestation biomass losses in trees
#----------------------------------------
f = open(kp4a2_d_living_biomass_losses_trees)
d_trees_ls = [x.rpartition('#')[2].split() for x in f.readlines() if x.count('#') != 1]
#Sum the biomasses, CL, GL, SETT, WL, North and South Finland, below and above ground
trees_bm_ls = SumBiomassLs(d_trees_ls)
#Convert to CO2 and convert sign: if biomass increases -> emissions decrease and vice versa
trees_d_co2_ls = [ConvertSign(ConvertToCO2(ctoco2,x)) for x in trees_bm_ls]
f.close()
#Deforestation Biomass: Afforestation/Reforestation under Deforestation, gains
#---------------------------------------------------------------------
f = open(kpa2_ar_under_D_gains)
ar_under_d_ls = [x.rpartition('#')[2].split() for x in f.readlines() if x.count('#') != 1]
ar_under_d_sum_ls = SumBiomassLs(ar_under_d_ls)
#Convert to CO2 and convert sign: if biomass increases -> emissions decrease and vice versa
ar_under_d_co2_ls = [ConvertSign(ConvertToCO2(ctoco2,x)) for x in ar_under_d_sum_ls]
f.close()
#Deforestation: DOM+SOM Mineral soils
#-----------------------------------
f = open(kp4a2_sl_soil)
d_sl_soil_ls = [x.rpartition('#')[2].split() for x in f.readlines() if x.count('#') != 1]
d_sl_soil_sum_ls = SumBiomassLs(d_sl_soil_ls)
d_sl_soil_co2_ls = [ConvertSign(ConvertToCO2(ctoco2,x)) for x in d_sl_soil_sum_ls]
f.close()
f = open(kp4a2_ar_under_d_soil)
d_ar_under_d_soil_ls = [x.rpartition('#')[2].split() for x in f.readlines() if x.count('#') != 1]
#Sum the both lines
d_ar_under_d_min_soil_sum_ls = SumBiomassLs(d_ar_under_d_soil_ls)
d_ar_under_d_min_soil_co2_ls = [ConvertSign(ConvertToCO2(ctoco2,x)) for x in d_ar_under_d_min_soil_sum_ls]
f.close()
#Settlements are now in Mineral soil, take lines 9 and 10
f = open(kp4a2_clglpewesl_deadwood)
d_clglpewesl_deadwood_ls = [x.rpartition('#')[2].split() for x in f.readlines() if x.count('#') != 1]
d_clglpewesl_deadwood_ls = d_clglpewesl_deadwood_ls[8:len(d_clglpewesl_deadwood_ls)]
d_clglpewesl_deadwood_min_sum_ls = SumBiomassLs(d_clglpewesl_deadwood_ls)
d_clglpewesl_deadwood_min_co2_ls = [ConvertSign(ConvertToCO2(ctoco2,x)) for x in d_clglpewesl_deadwood_min_sum_ls]
f.close()
f = open(kp_defor_mineral)
d_mtt_min_soil_ls = [x.rpartition('#')[2].split() for x in f.readlines() if x.count('#') != 1]
d_mtt_min_soil_ls = d_mtt_min_soil_ls[0:4]
d_mtt_min_soil_sum_ls = SumBiomassLs(d_mtt_min_soil_ls)
d_mtt_min_soil_co2_ls = [ConvertSign(ConvertToCO2(ctoco2,x)) for x in d_mtt_min_soil_sum_ls]
f.close()
d_dom_som_min_soil_deadwood_sum_co2_ls = [SumTwoValues(a,SumTwoValues(b,SumTwoValues(c,d))) for (a,b,c,d) in zip(d_sl_soil_co2_ls,
d_ar_under_d_min_soil_co2_ls,
d_clglpewesl_deadwood_min_co2_ls,
d_mtt_min_soil_co2_ls)]
#Deforestation: DOM+SOM Organic soils + Deadwood
#-----------------------------------------------
f = open(kp4a2_fl_to_wl_soil)
d_fl_to_wl_soil_ls = [x.rpartition('#')[2].split() for x in f.readlines() if x.count('#') != 1]
d_fl_to_wl_soil_sum_ls = SumBiomassLs(d_fl_to_wl_soil_ls)
d_fl_to_wl_soil_co2_ls = [ConvertSign(ConvertToCO2(ctoco2,x)) for x in d_fl_to_wl_soil_sum_ls]
f.close()
f = open(kp4a2_clglpewesl_deadwood)
d_clglpewesl_deadwood_ls = [x.rpartition('#')[2].split() for x in f.readlines() if x.count('#') != 1]
d_clglpewesl_deadwood_ls = d_clglpewesl_deadwood_ls[0:8]
d_clglpewesl_deadwood_org_sum_ls = SumBiomassLs(d_clglpewesl_deadwood_ls)
d_clglpewesl_deadwood_org_co2_ls = [ConvertSign(ConvertToCO2(ctoco2,x)) for x in d_clglpewesl_deadwood_org_sum_ls]
f.close()
f = open(kp_defor_organic)
d_mtt_org_soil_ls = [x.rpartition('#')[2].split() for x in f.readlines() if x.count('#') != 1]
d_mtt_org_soil_sum_ls = SumBiomassLs(d_mtt_org_soil_ls)
d_mtt_org_soil_co2_ls = [ConvertSign(ConvertToCO2(ctoco2,x)) for x in d_mtt_org_soil_sum_ls]
#print(8,d_mtt_org_soil_co2_ls)
f.close()
d_dom_som_org_soil_deadwood_sum_co2_ls=[SumTwoValues(a,SumTwoValues(b,c)) for (a,b,c) in
zip(d_fl_to_wl_soil_co2_ls,d_clglpewesl_deadwood_org_co2_ls,d_mtt_org_soil_co2_ls)]
#Deforestation: Conversion to water CO2
#Change in 2015: Lines 1 and 2 are C
#--------------------------------------
f = open(kp4a2_fl_to_waters_org_soil)
d_fl_to_waters_org_soil_ls = [x.rpartition('#')[2].split() for x in f.readlines() if x.count('#') != 1]
#Lines 1,2 are C
d_fl_to_waters_org_soil_ls = d_fl_to_waters_org_soil_ls[0:2]
d_fl_to_waters_org_soil_sum_ls = SumBiomassLs(d_fl_to_waters_org_soil_ls)
d_fl_to_waters_org_soil_co2_ls = [ConvertSign(ConvertToCO2(ctoco2,x)) for x in d_fl_to_waters_org_soil_sum_ls]
f.close()
#Deforestation: Mineralization
#-----------------------------
f = open(kp4a2_d_mineralization)
d_mineralization_ls = [x.rpartition('#')[2].split() for x in f.readlines() if x.count('#') != 1]
#2015 two last lines are N2O
d_mineralization_ls = d_mineralization_ls[2:len(d_mineralization_ls)]
d_mineralization_sum_ls = SumBiomassLs(d_mineralization_ls)
d_mineralization_co2_ls = [ConvertToCO2(n2oco2eq,x) for x in d_mineralization_sum_ls]
f.close()
#Deforestation: Drained and rewetted organic soils N2O
#-----------------------------------------------------
f = open(kp4a2_fl_to_wl_non_co2)
d_fl_to_wl_ls = [x.rpartition('#')[2].split() for x in f.readlines() if x.count('#') != 1]
d_fl_to_wl_n2o_ls = d_fl_to_wl_ls[0:1]
d_fl_to_wl_n2o_sum_ls = SumBiomassLs(d_fl_to_wl_n2o_ls)
d_fl_to_wl_n2o_co2_ls = [ConvertToCO2(n2oco2eq,x) for x in d_fl_to_wl_n2o_sum_ls]
f.close()
#Deforestation: Drained and rewetted organic soils CH4
#-----------------------------------------------------
f = open(kp4a2_fl_to_wl_non_co2)
d_fl_to_wl_ls = [x.rpartition('#')[2].split() for x in f.readlines() if x.count('#') != 1]
d_fl_to_wl_ch4_ls = d_fl_to_wl_ls[1:2]
d_fl_to_wl_ch4_sum_ls = SumBiomassLs(d_fl_to_wl_ch4_ls)
d_fl_to_wl_ch4_co2_ls = [ConvertToCO2(ch4co2eq,x) for x in d_fl_to_wl_ch4_sum_ls]
f.close()
#Deforestation HWP
#-----------------
#HWP is IE
d_hwp_ls = ['IE']*(inventory_year-1990+1)
#4. Afforestation living biomass gains and losses trees
#-------------------------------------------
f = open(kp4_living_biomass_gains_trees)
ar_trees_ls = [x.rpartition('#')[2].split() for x in f.readlines() if x.count('#') != 1]
f.close()
#Pick the Afforestation part, 2015 mineral and organic are added (not separately in the file)
ar_bm_gains_trees_ls = ar_trees_ls[4:len(ar_trees_ls)]
#Sum the biomasses, CL, CL, WLpeat, WLorg, Settlement, mineral, orgaing, South and North Finland
ar_bm_sum_gains_trees_ls = SumBiomassLs(ar_bm_gains_trees_ls)
f = open(kp4_ar_living_biomass_losses_trees)
ar_bm_losses_trees_ls = [x.rpartition('#')[2].split() for x in f.readlines() if x.count('#') != 1]
f.close()
ar_bm_sum_losses_trees_ls = SumBiomassLs(ar_bm_losses_trees_ls)
ar_bm_net_trees_ls = [SumTwoValues(x,y) for (x,y) in zip(ar_bm_sum_gains_trees_ls,ar_bm_sum_losses_trees_ls)]
#Convert to CO2 and convert sign: if biomass increases -> emissions decrease and vice versa
trees_ar_co2_ls = [ConvertSign(ConvertToCO2(ctoco2,x)) for x in ar_bm_net_trees_ls]
#5. Afforestation, DOM+SOM Mineral soils
#----------------------------------------
f = open(kp4a1_clglsl_mineral_soil)
dom_som_min_soil_ls = [x.rpartition('#')[2].split() for x in f.readlines() if x.count('#') != 1]
dom_som_min_soil_sum_ls = SumBiomassLs(dom_som_min_soil_ls)
#Convert to CO2 and convert sign: if biomass increases -> emissions decrease and vice versa
dom_som_min_soil_co2_ls = [ConvertSign(ConvertToCO2(ctoco2,x)) for x in dom_som_min_soil_sum_ls]
f.close()
#6. Afforestation, DOM+SOM Organinc soils
#----------------------------------------
f=open(kp4a1_ar_org_soil)
dom_som_org_soil_ls = [x.rpartition('#')[2].split() for x in f.readlines() if x.count('#') != 1]
dom_som_org_soil_sum_ls = SumBiomassLs(dom_som_org_soil_ls)
#Convert to CO2 and convert sign: if biomass increases -> emissions decrease and vice versa
dom_som_org_soil_co2_ls = [ConvertSign(ConvertToCO2(ctoco2,x)) for x in dom_som_org_soil_sum_ls ]
f.close()
#7. Biomass burning
#------------------
f=open(kp4a11_wildfires)
biomass_burning_ls = [x.rpartition('#')[2].split() for x in f.readlines() if x.count('#') != 1]
#CO2 South and North Finland
bm_burning_co2_ls = biomass_burning_ls[0:2]
bm_burning_co2_sum_ls = SumBiomassLs(bm_burning_co2_ls)
#CH4 South and North Finland
bm_burning_ch4_ls = biomass_burning_ls[2:4]
bm_burning_ch4_sum_ls = SumBiomassLs(bm_burning_ch4_ls)
#N2O South and North Finland
bm_burning_n2o_ls = biomass_burning_ls[4:6]
bm_burning_n2o_sum_ls = SumBiomassLs(bm_burning_n2o_ls)
#Convert ch4 and n2o to co2eq and sum all three emissions
biomass_burning_ch4co2eq_ls = [ConvertToCO2(ch4co2eq,x) for x in bm_burning_ch4_sum_ls]
biomass_burning_n2oco2eq_ls = [ConvertToCO2(n2oco2eq,x) for x in bm_burning_n2o_sum_ls]
biomass_burning_co2_sum_ls = [SumTwoValues(x,SumTwoValues(y,z)) for (x,y,z) in zip(bm_burning_co2_sum_ls,biomass_burning_ch4co2eq_ls,biomass_burning_n2oco2eq_ls)]
#print(biomass_burning_co2_sum_ls)
f.close()
#8. 2015 addition Mineralization
#-------------------------------
f=open(kp4_ar_mineralization)
ar_mineralization_ls=[x.rpartition('#')[2].split() for x in f.readlines() if x.count('#') != 1]
#South and North Fianland
ar_mineralization_no2_ls = ar_mineralization_ls[0:2]
ar_mineralization_n2o_sum_ls=SumBiomassLs(ar_mineralization_no2_ls)
ar_mineralization_n2o_co2_ls = [ConvertToCO2(n2oco2eq,x) for x in ar_mineralization_n2o_sum_ls]
#9.Drained organic soils N2O
#---------------------------
f=open(kp4a1_clglpewesl_organic_soils_nonco2)
drained_org_soils_nonco2_ls = [x.rpartition('#')[2].split() for x in f.readlines() if x.count('#') != 1]
#Two lines in the file, the first one is CH4
drained_org_soils_sum_ch4_ls = drained_org_soils_nonco2_ls[0:1]
drained_org_soils_sum_ch4_ls = SumBiomassLs(drained_org_soils_sum_ch4_ls)
#Convert from N2O to CO2
drained_org_soils_sum_ch4co2eq_ls = [ConvertToCO2(ch4co2eq,x) for x in drained_org_soils_sum_ch4_ls]
f.close()
#10.Drained organic soils CH4
#---------------------------
f=open(kp4a1_clglpewesl_organic_soils_nonco2)
drained_org_soils_nonco2_ls = [x.rpartition('#')[2].split() for x in f.readlines() if x.count('#') != 1]
#Two lines in the file, the second one is CH4
drained_org_soils_sum_n2o_ls = drained_org_soils_nonco2_ls[1:2]
drained_org_soils_sum_n2o_ls = SumBiomassLs(drained_org_soils_sum_n2o_ls)
#Convert from CH4 to CO2
drained_org_soils_sum_n2oco2eq_ls = [ConvertToCO2(n2oco2eq,x) for x in drained_org_soils_sum_n2o_ls]
f.close()
#11.HWP afforestation
#--------------------
f=open(kp4_hwp_ard)
hwp_ard_ls = [x.rpartition('#')[2].split() for x in f.readlines() if x.count('#') != 1]
#2015 the file structure is in 3 parts: 1) Initial stock, 2) Gains and losses 3) half-time information
#The sum of gains and losses will go to the table
#2016 the file structure is for each item: 1) half life, 2) init stock, 3) gains and 4) losses
#Pick every fourth item starting from the first right place in the list
hwp_ar_ls_gains = hwp_ard_ls[2::4]
hwp_ar_ls_losses = hwp_ard_ls[3::4]
hwp_ar_ls = hwp_ar_ls_gains+hwp_ar_ls_losses
#print(hwp_ar_ls)
hwp_ar_sum_ls = SumBiomassLs(hwp_ar_ls)
#Removals are good for the atmosphere, change the sign
hwp_ar_sum_ls = [ConvertSign(ConvertToCO2(ctoco2,x)) for x in hwp_ar_sum_ls]
#HWP does not have full time series from 1990
hwp_padding_ls = PaddingList(inventory_year,hwp_ar_sum_ls)
hwp_padding_ls = ['IE']*len(hwp_padding_ls)
hwp_ar_sum_co2_ls = hwp_padding_ls+hwp_ar_sum_ls
f.close()
#Create the two tables Afforestation/Reforestation and Deforestation
#-------------------------------------------------------------------
print("Creating first text file for Afforestation/Reforestation and Deforestation in", file_name)
f1 = open(kptable_appendix_11b_file,'w')
delim ='#'
table_name="Appendix_11b"
table_header="Net emissions and removals from the ativities under Articles 3.3\n"
table1title1="Table 1_App_11b Net emissions and removals from Afforestation and Reforestation, kt CO2eq.\n"
table2title2="Table 2_App_11b Net emissions and removals from Deforestation, ktCO2eq.\n"
table1columns1=delim+"Biomass"+delim+"DOM+SOM Mineral soils"+delim+"DOM+SOM Organic soils"+delim+"Biomass burning"+delim+"Mineralization"+delim
table1columns1=table1columns1+"Drained organic soils N2O"+delim+"Drained organic soils CH4"+delim+"HWP"+delim+"Total\n"
table2columns2=delim+"Biomass"+delim+"DOM+SOM Mineral soils"+delim+"DOM+SOM Organic soils+Deadwood"+delim+"Conversion to water CO2"+delim+"Mineralization"+delim
table2columns2=table2columns2+"Drained and rewetted organic soils CH4"+delim+"Drained and rewetted organic soils NO2"+delim
table2columns2=table2columns2+"HWP"+delim+"Conversion to water CH4"+delim+"Total"+"#\n"
#Row titles from 199 to inventory year
row_title_ls = GenerateRowTitleList(start,inventory_year)
f1.write(table_name)
f1.write(table_header)
#Afforestation and Reforestation
f1.write(table1title1)
f1.write(table1columns1)
for (year,agr_ar_co2,trees_ar_co2,dom_som_min,dom_som_org,bm_burning,ar_min_co2,n2oco2eq,ch4co2eq,hwp) in zip(row_title_ls,agr_ar_co2_ls,trees_ar_co2_ls,
dom_som_min_soil_co2_ls,
dom_som_org_soil_co2_ls,
biomass_burning_co2_sum_ls,
ar_mineralization_n2o_co2_ls,
drained_org_soils_sum_n2oco2eq_ls,
drained_org_soils_sum_ch4co2eq_ls,
hwp_ar_sum_co2_ls):
total=SumTwoValues(SumTwoValues(SumTwoValues(SumTwoValues(SumTwoValues(SumTwoValues(SumTwoValues(SumTwoValues(agr_ar_co2,trees_ar_co2),
dom_som_min),dom_som_org),bm_burning),
ar_min_co2),n2oco2eq),ch4co2eq),hwp)
f1.write(str(year)+delim+str(SumTwoValues(agr_ar_co2,trees_ar_co2))+"#"+str(dom_som_min)+"#"+str(dom_som_org)+"#"+str(bm_burning)+"#")
f1.write(str(ar_min_co2)+"#"+str(n2oco2eq)+"#"+str(ch4co2eq)+"#"+str(hwp)+"#"+str(total)+"#\n")
f1.write("Data from:"+"#"+kp4a_agr_bm_gains_losses+" Lines:9-"+str(len(agr_ls))+"#"+kp4a1_clglsl_mineral_soil+"#"+kp4a1_ar_org_soil+"#"+kp4a11_wildfires+"#")
f1.write(kp4_ar_mineralization+" Lines:1,2"+delim+kp4a1_clglpewesl_organic_soils_nonco2+" Line:1"+"#"+kp4a1_clglpewesl_organic_soils_nonco2+" Line:2"+"#"+kp4_hwp_ard+"#\n")
f1.write("#"+kp4_living_biomass_gains_trees+" Lines:5-"+str(len(ar_trees_ls))+"#"+"#"+"#"+"CO2 Lines:1-2,CH4 Lines:3-4,N2O Lines:5-6"+"####"+"Gains Lines:3,7,11 etc."+"#\n")
f1.write("#"+kp4_ar_living_biomass_losses_trees+"#######"+"Losses Lines:4,8,12 etc."+"#\n")
f1.write('\n\n')
#Deforestation
f1.write(table2title2)
f1.write(table2columns2)
for (year,agr_d_co2,trees_d_co2,ar_under_d_co2,
d_sl_soil_co2,d_ar_under_d_min_soil_co2,d_mtt_min_soil_co2,d_clglpewesl_deadwood_min_co2,
dom_som_org_soil_deadwood_co2,
d_fl_to_waters_org_soil_co2,
d_mineralization_co2,
d_fl_to_wl_ch4_co2,d_fl_to_wl_n2o_co2,
d_hwp,
to_waters_ch4) in zip(row_title_ls,agr_d_co2_ls,trees_d_co2_ls,ar_under_d_co2_ls,
d_sl_soil_co2_ls,d_ar_under_d_min_soil_co2_ls,d_mtt_min_soil_co2_ls,d_clglpewesl_deadwood_min_co2_ls,
d_dom_som_org_soil_deadwood_sum_co2_ls,
d_fl_to_waters_org_soil_co2_ls,
d_mineralization_co2_ls,
d_fl_to_wl_ch4_co2_ls,d_fl_to_wl_n2o_co2_ls,
d_hwp_ls,
fl_to_waters_ch4_co2_ls):
biomass = SumTwoValues(agr_d_co2,trees_d_co2)
biomass = SumTwoValues(biomass,ar_under_d_co2)
dom_som_min_soil=SumTwoValues(d_sl_soil_co2,d_ar_under_d_min_soil_co2)
dom_som_min_soil=SumTwoValues(dom_som_min_soil,d_mtt_min_soil_co2)
dom_som_min_soil=SumTwoValues(dom_som_min_soil,d_clglpewesl_deadwood_min_co2)
total1 = SumTwoValues(SumTwoValues(SumTwoValues(SumTwoValues(agr_d_co2,trees_d_co2),ar_under_d_co2),d_sl_soil_co2),d_ar_under_d_min_soil_co2)
total2 = SumTwoValues(SumTwoValues(d_mtt_min_soil_co2,d_clglpewesl_deadwood_min_co2),dom_som_org_soil_deadwood_co2)
total3 = SumTwoValues(SumTwoValues(d_fl_to_waters_org_soil_co2,d_mineralization_co2),d_fl_to_wl_ch4_co2)
total4 = SumTwoValues(SumTwoValues(d_fl_to_wl_n2o_co2,d_hwp),to_waters_ch4)
total = total1+total2+total3+total4
f1.write(str(year)+delim+str(biomass)+delim+str(dom_som_min_soil)+delim+str(dom_som_org_soil_deadwood_co2)+delim+str(d_fl_to_waters_org_soil_co2)+delim+
str(d_mineralization_co2)+delim+str(d_fl_to_wl_ch4_co2)+delim+str(d_fl_to_wl_n2o_co2)+delim+str(d_hwp)+delim+str(to_waters_ch4)+"#"+str(total)+"#\n")
f1.write("Data from:"+"#"+kp4a_agr_bm_gains_losses+" Lines:1-8"+delim+kp4a2_sl_soil+delim+kp4a2_fl_to_wl_soil+delim+kp4a2_fl_to_waters_org_soil+" Lines:1,2"+"#")
f1.write(kp4a2_d_mineralization+delim+kp4a2_fl_to_wl_non_co2+" Line:2"+delim+kp4a2_fl_to_wl_non_co2+" Line:1"+delim+"No file"+delim)
f1.write(kp4a2_fl_to_waters_org_soil+" Line:3"+"#\n")
f1.write("#"+kp4a2_d_living_biomass_losses_trees+delim+kp4a2_ar_under_d_soil+delim+kp4a2_clglpewesl_deadwood+" Lines:1-8"+"#\n")
f1.write("#"+kpa2_ar_under_D_gains+delim+kp_defor_mineral+delim+kp_defor_organic+"#\n")
f1.write("#"+delim+kp4a2_clglpewesl_deadwood+" Lines:9-10"+"#\n")
now = datetime.datetime.now()
#print(str(now))
f1.write("Date produced: "+str(now)+"\n")
f1.close()
#Create excel
p = pathlib.Path(file_name)
stem = p.stem
p_excel = pathlib.Path(stem+'.xlsx')
print("Creating Excel file for Afforestation/Reforestation and Deforestation in", str(p_excel))
#Define max number of columns, dataframe can adjust to it
names=['col' + str(x) for x in range(12) ]
df = pd.read_csv(file_name,engine='python',header=None,delimiter='#',keep_default_na=False,names=names,dtype=str)
writer = pd.ExcelWriter(p_excel,engine='openpyxl')
df_float = df.applymap(ConvertFloat)
df_float.to_excel(writer,file_name,header=False)
writer.close()
| jariperttunen/lukeghg | lukeghg/lukeghg/nir/kptableappendix11b.py | kptableappendix11b.py | py | 24,795 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "lukeghg.crf.crfxmlfunctions.ConvertToCO2",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "lukeghg.crf.crfxmlconstants.ch4co2eq",
"line_number": 71,
"usage_type": "argument"
},
{
"api_name": "lukeghg.crf.crfxmlfunctions.SumBiomassLs",
"line_number": 8... |
31180051801 | from dataclasses import dataclass
from datetime import datetime
from sqlalchemy_serializer import SerializerMixin
from app import db
@dataclass
class Result(db.Model, SerializerMixin):
__tablename__ = 'results'
name: str
last_build: int
last_result: str
last_exception: str
traceback: str
url: str
last_update: datetime
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String())
last_build = db.Column(db.Integer)
last_result = db.Column(db.String())
last_exception = db.Column(db.Text())
traceback = db.Column(db.Text())
url = db.Column(db.String())
last_update = db.Column(db.DateTime, nullable=False,
default=datetime.utcnow)
| TomerCohen95/JenkinsViewer | models.py | models.py | py | 740 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "app.db.Model",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "app.db",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "sqlalchemy_serializer.SerializerMixin",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "datetime.... |
2986802119 | # https://towardsdatascience.com/a-detailed-guide-to-pytorchs-nn-transformer-module-c80afbc9ffb1
import math
from datetime import datetime
from os import path
import torch
import torch.nn as nn
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from src.data_management.datasets.better_crnn_dataset import GOBetterCRNNDataset
from src.helper.utils import PATH_TO_LOG_FOLDER, PATH_TO_MODEL_FOLDER, print_blue, print_green, print_red, print_yellow
class PositionalEncoding(nn.Module):
def __init__(self, dim_model, dropout_p, max_len):
super().__init__()
# Info
self.dropout = nn.Dropout(dropout_p)
# Encoding - From formula
pos_encoding = torch.zeros(max_len, dim_model)
positions_list = torch.arange(0, max_len, dtype=torch.float).view(-1, 1) # 0, 1, 2, 3, 4, 5
division_term = torch.exp(torch.arange(0, dim_model, 2).float() * (-math.log(10000.0)) / dim_model) # 1000^(2i/dim_model)
# PE(pos, 2i) = sin(pos/1000^(2i/dim_model))
pos_encoding[:, 0::2] = torch.sin(positions_list * division_term)
# PE(pos, 2i + 1) = cos(pos/1000^(2i/dim_model))
pos_encoding[:, 1::2] = torch.cos(positions_list * division_term)
# Saving buffer (same as parameter without gradients needed)
pos_encoding = pos_encoding.unsqueeze(0).transpose(0, 1)
self.register_buffer('pos_encoding',pos_encoding)
def forward(self, token_embedding: torch.tensor) -> torch.tensor:
# Residual connection + pos encoding
return self.dropout(token_embedding + self.pos_encoding[:token_embedding.size(0), :])
class GOTransformer(nn.Module):
"""
Model from "A detailed guide to Pytorch's nn.Transformer() module.", by
Daniel Melchor: https://medium.com/p/c80afbc9ffb1/
"""
# Constructor
def __init__(
self,
num_tokens,
dim_model,
num_heads,
num_encoder_layers,
num_decoder_layers,
dropout_p,
):
super().__init__()
# INFO
self.model_type = 'Transformer'
self.dim_model = dim_model
# LAYERS
self.embedding = nn.Embedding(num_tokens, dim_model)
self.positional_encoder = PositionalEncoding(
dim_model=dim_model, dropout_p=dropout_p, max_len=5000
)
self.transformer = nn.Transformer(
d_model=dim_model,
nhead=num_heads,
num_encoder_layers=num_encoder_layers,
num_decoder_layers=num_decoder_layers,
dropout=dropout_p,
)
self.linear = nn.Linear(dim_model, num_tokens)
def forward(self, src, tgt, tgt_mask=None, src_pad_mask=None, tgt_pad_mask=None):
# Src size must be (batch_size, src sequence length)
# Tgt size must be (batch_size, tgt sequence length)
# Embedding + positional encoding - Out size = (batch_size, sequence length, dim_model)
#src = self.embedding(src) * math.sqrt(self.dim_model)
tgt = self.embed_token(tgt)
src = self.positional_encoder(src)
tgt = self.positional_encoder(tgt)
# We could use the parameter batch_first=True, but our KDL version doesn't support it yet, so we permute
# to obtain size (sequence length, batch_size, dim_model),
src = src.permute(1,0,2)
tgt = tgt.permute(1,0,2)
# Transformer blocks - Out size = (sequence length, batch_size, num_tokens)
transformer_out = self.transformer(src, tgt, tgt_mask=tgt_mask, src_key_padding_mask=src_pad_mask, tgt_key_padding_mask=tgt_pad_mask)
linear_out = self.linear(transformer_out)
return linear_out
def get_tgt_mask(self, size) -> torch.tensor:
# Generates a squeare matrix where the each row allows one word more to be seen
mask = torch.tril(torch.ones(size, size) == 1) # Lower triangular matrix
mask = mask.float()
mask = mask.masked_fill(mask == 0, float('-inf')) # Convert zeros to -inf
mask = mask.masked_fill(mask == 1, float(0.0)) # Convert ones to 0
return mask
def create_pad_mask(self, matrix: torch.tensor, pad_token: int) -> torch.tensor:
# If matrix = [1,2,3,0,0,0] where pad_token=0, the result mask is
# [False, False, False, True, True, True]
return (matrix == pad_token)
def embed_token(self, token):
return self.embedding(token) * math.sqrt(self.dim_model)
class GOTransformerTrainer:
def __init__(self,
sequence_length: int = 32,
batch_size: int = 8,
dim_model: int = 4096,
epochs: int = 100) -> None:
# parameters
self.sequence_length = sequence_length
self.batch_size = batch_size
self.dim_model = dim_model # (closest power of two to shape of data)
self.epochs = epochs
self.SOS_TOKEN = None
self.EOS_TOKEN = None
self.SOS_TOKEN_EMBEDDED = None
self.EOS_TOKEN_EMBEDDED = None
self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
self.writer = SummaryWriter(path.join(PATH_TO_LOG_FOLDER, 'runs', f'transformer_{str(datetime.now())}'))
def train(self):
torch.autograd.set_detect_anomaly(True)
dataset = GOBetterCRNNDataset(sequence_length=self.sequence_length)
train_set, val_set = torch.utils.data.random_split(dataset, [round(len(dataset) * 0.8), round(len(dataset) * 0.2)])
train_loader = torch.utils.data.DataLoader(train_set, batch_size=self.batch_size, shuffle=True)
val_loader = torch.utils.data.DataLoader(val_set, batch_size=1, shuffle=True)
print(f'Using device {self.device}')
model_params = {
'num_tokens': 4,
'dim_model': self.dim_model,
'num_heads': 2,
'num_encoder_layers': 3,
'num_decoder_layers': 3,
'dropout_p': 0.1
}
self.build(model_params)
model = GOTransformer(**model_params).to(self.device)
opt = torch.optim.SGD(model.parameters(), lr=0.01)
loss_fn = nn.CrossEntropyLoss()
self.writer.add_text('hyperparameters/batch_size', str(self.batch_size))
self.writer.add_text('hyperparameters/sequence_length', str(self.sequence_length))
self.writer.add_text('hyperparameters/dim_model', str(self.dim_model))
self.writer.add_text('hyperparameters/epochs', str(self.epochs))
time_before = datetime.now()
self.fit(model, opt, loss_fn, train_loader, val_loader, self.epochs, self.writer)
time_after = datetime.now()
time_difference = time_after - time_before
print(str(time_difference))
self.writer.add_text('metrics/training_time', str(time_difference))
def build(self, model_params):
model = GOTransformer(**model_params).to(self.device)
self.SOS_TOKEN = torch.tensor([2]).to(self.device)
self.EOS_TOKEN = torch.tensor([3]).to(self.device)
self.SOS_TOKEN_EMBEDDED = model.embed_token(self.SOS_TOKEN)
self.EOS_TOKEN_EMBEDDED = model.embed_token(self.EOS_TOKEN)
def add_sequence_tokens(self, batch):
if isinstance(batch[0], (torch.LongTensor, torch.cuda.LongTensor)):
return torch.stack([torch.concat((self.SOS_TOKEN, item, self.EOS_TOKEN)) for item in batch])
elif isinstance(batch[0], (torch.FloatTensor, torch.cuda.FloatTensor)):
return torch.stack([torch.concat((self.SOS_TOKEN_EMBEDDED, item, self.EOS_TOKEN_EMBEDDED)) for item in batch])
else:
return batch
def features_to_embedding_vectors(self, features):
# 192, 12, 115 -> 5, 52992
split_and_flattened = torch.reshape(features, (self.sequence_length, -1))
# 5, 52992 -> 5, 512
embedded = split_and_flattened[:, :self.dim_model]
return embedded * math.sqrt(self.dim_model)
def train_loop(self, model, opt, loss_fn, dataloader):
model.train()
total_loss = 0
for x, y in dataloader:
# convert from a multi-dimensional feature vector to a simple embedding-vector
x = torch.stack([self.features_to_embedding_vectors(item) for item in x])
x = x.to(self.device)
y = y.type(torch.long).to(self.device)
# prepend and append the sequence tokens
x = self.add_sequence_tokens(x)
y = self.add_sequence_tokens(y)
# Now we shift the tgt by one so with the <SOS> we predict the token at pos 1
y_input = y[:,:-1]
y_expected = y[:,1:]
# Get mask to mask out the next words
sequence_length = y_input.size(1)
tgt_mask = model.get_tgt_mask(sequence_length).to(self.device)
# Standard training except we pass in y_input and tgt_mask
pred = model(x, y_input, tgt_mask)
# Permute pred to have batch size first again
pred = pred.permute(1, 2, 0)
loss = loss_fn(pred, y_expected)
opt.zero_grad()
loss.backward(retain_graph=True)
opt.step()
total_loss += loss.detach().item()
return total_loss / len(dataloader)
def validation_loop(self, model, loss_fn, dataloader, epoch: int):
model.eval()
total_loss = 0
total_accuracy_complete = 0
total_accuracy_start = 0
c_time = 0
with torch.no_grad():
for x, y in dataloader:
# convert from a multi-dimensional feature vector to a simple embedding-vector
x = torch.stack([self.features_to_embedding_vectors(item) for item in x])
x = x.to(self.device)
y = y.type(torch.long).to(self.device)
# prepend and append the sequence tokens
x = self.add_sequence_tokens(x)
y = self.add_sequence_tokens(y)
# Now we shift the tgt by one so with the <SOS> we predict the token at pos 1
y_input = y[:,:-1]
y_expected = y[:,1:]
# Get mask to mask out the next words
sequence_length = y_input.size(1)
tgt_mask = model.get_tgt_mask(sequence_length).to(self.device)
# Standard training except we pass in y_input and src_mask
pred = model(x, y_input, tgt_mask)
# Permute pred to have batch size first again
pred = pred.permute(1, 2, 0)
# get accuracy
_, max_index = torch.max(pred, dim=1)
for sequence in max_index:
correct_complete = 0
correct_start = 0
for j in range(sequence_length):
if not sequence[j] in (0, 1):
sequence[j] = 0.5
_, max_index = torch.max(pred, dim=1)
sequence_length_dec = sequence_length - 1
for i in range(self.batch_size):
correct_complete = 0
correct_start = 0
for j in range(sequence_length_dec):
if max_index[i][j] == y_expected[i][j]:
correct_complete += 1
if correct_start == j:
correct_start += 1
total_accuracy_complete += correct_complete / sequence_length_dec
total_accuracy_start += correct_start / sequence_length_dec
self.writer.add_scalar(f'total_acc/epoch_{epoch}', correct_complete / sequence_length_dec, c_time)
self.writer.add_scalar(f'total_acc_start/epoch_{epoch}', correct_start / sequence_length_dec, c_time)
c_time += 1
loss = loss_fn(pred, y_expected)
total_loss += loss.detach().item()
total_loss /= len(dataloader)
total_accuracy_complete /= len(dataloader) * self.batch_size
total_accuracy_start /= len(dataloader) * self.batch_size
return total_loss, total_accuracy_complete, total_accuracy_start
def fit(self, model, opt, loss_fn, train_dataloader, val_dataloader, epochs, writer):
print_green('Training and validating model')
max_accuracy_start = 0.0
epoch_threshold = 20
for epoch in tqdm(range(epochs), 'Epochs'):
train_loss = self.train_loop(model, opt, loss_fn, train_dataloader)
validation_loss, acc_complete, acc_start = self.validation_loop(model, loss_fn, val_dataloader, epoch)
writer.add_scalar('loss/training', train_loss, epoch)
writer.add_scalar('loss/validation', validation_loss, epoch)
writer.add_scalar('accuracy/complete', acc_complete, epoch)
writer.add_scalar('accuracy/start', acc_start, epoch)
if epoch > epoch_threshold and acc_start > max_accuracy_start:
torch.save(model, f'{PATH_TO_MODEL_FOLDER}/transformer_{epoch}_{acc_start}_{datetime.now().strftime("%Y-%m-%d_%H:%M")}.pt')
max_accuracy_start = acc_start
def predict(self, model, x : list, y : list):
# convert from a multi-dimensional feature vector to a simple embedding-vector
x = torch.stack([self.features_to_embedding_vectors(item) for item in x])
x = x.to(self.device)
y = y.type(torch.long).to(self.device)
# prepend and append the sequence tokens
x = self.add_sequence_tokens(x)
y = self.add_sequence_tokens(y)
# Now we shift the tgt by one so with the <SOS> we predict the token at pos 1
y_input = y[:,:-1]
y_expected = y[:,1:]
# Get mask to mask out the next words
sequence_length = y_input.size(1)
tgt_mask = model.get_tgt_mask(sequence_length).to(self.device)
# Standard training except we pass in y_input and src_mask
pred = model(x, y_input, tgt_mask)
# Permute pred to have batch size first again
pred = pred.permute(1, 2, 0)
# get accuracy
_, max_index = torch.max(pred, dim=1)
for sequence in max_index:
for j in range(sequence_length):
if not sequence[j] in (0, 1):
sequence[j] = 0.5
return max_index
if __name__ == '__main__':
GOTransformerTrainer(
sequence_length=64,
dim_model=2048,
epochs=100,
batch_size=16
).train()
| felix-20/gravitational_oceans | src/ai_nets/transformer.py | transformer.py | py | 14,567 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "torch.nn.Dropout",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_n... |
2039679921 | #!/usr/bin/env python3
import requests, json, dewiki, sys
class ArgvError(Exception):
def __init__(self):
Exception.__init__(self, "Put in only '1' Value")
class Search:
ep = 'https://en.wikipedia.org/w/api.php'
session = requests.Session()
#endpoint
text = ''
def __init__(self, keyword):
self.keyword = keyword
self.params = {
"action": "parse",
"format": "json",
"page": keyword,
"redirects": 1,
"prop": "wikitext",
"contentmodel" : "wikitext",
"formatversion": "2",
}
self.result()
def result(self):
data = self.session.get(url = self.ep, params = self.params).json()
try:
if 'error' in data.keys():
raise Exception("%s is not Exist!" % self.keyword)
except Exception as e:
print(e)
exit()
text = '\t\t' + dewiki.from_string(data['parse']['title']) + '\n\n'
for item in dewiki.from_string(data['parse']['wikitext']).split('\n'):
if (len(item) != 0 and item[0] == '*'):
text += '\t' + item.split('<ref>')[0] + '\n'
try:
if (text == None):
raise Exception("Failed Search")
except Exciption as e:
print (e)
exit()
with open(self.keyword + '.wiki', 'w+') as fd:
fd.write(text)
return text
def __str__(self):
return self.result()
if (__name__ == '__main__'):
try:
if (len(sys.argv) != 2):
raise ArgvError()
except ArgvError as e:
print(e)
exit()
search = Search(sys.argv[1])
| youngmoneee/django_piscine | day03/ex02/request_wikipedia.py | request_wikipedia.py | py | 1,638 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.Session",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "dewiki.from_string",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "dewiki.from_string",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "sys.argv",
... |
18903389392 | from uuid import uuid4
from logging import getLogger
from uchicagoldrtoolsuite import log_aware
from uchicagoldrtoolsuite.core.lib.convenience import log_init_attempt, \
log_init_success
from .abc.transformer import Transformer
from ..structures.archive import Archive
from ..structures.stage import Stage
__author__ = "Tyler Danstrom, Brian Balsamo"
__email__ = " tdanstrom@uchicago.edu, balsamo@uchicago.edu"
__company__ = "The University of Chicago Library"
__copyright__ = "Copyright University of Chicago, 2016"
__publication__ = ""
__version__ = "0.0.1dev"
log = getLogger(__name__)
class ArchiveToStageTransformer(Transformer):
"""The StageToARrchiveTransformer takes an instance of a Stage structure
and copies its contents into an instance of an Archive structure
"""
@log_aware(log)
def __init__(self, origin_structure):
"""instantiates an instance of a StageToArchiveTansformer
It starts with the origin structure passed as a parameter
and sets an empty destination structure.
___Args__
1. origin_structure (Archive) : a fully realized instance of a
Archive structure
"""
log_init_attempt(self, log, locals())
self.origin_structure = origin_structure
self.destination_structure = None
log_init_success(self, log)
@log_aware(log)
def transform(self, stage_identifier=None):
"""returns a fully realized Archive structure containing the contents
of the origin Stage structure.
It copies the contents of the Stage structure into the new Archive
structure and sets the data attribute destination_structure before
returning said destination structure data attribute value.
"""
log.info("Transforming an Archive into a Stage")
if self.destination_structure is not None:
raise TypeError("a transformation already occured.")
if stage_identifier is None:
log.debug("No stage identifier provided, setting to a uuid")
stage_identifier = uuid4().hex
self.destination_structure = Stage(stage_identifier)
log.debug("Moving materialsuites into the Stage")
for n_materialsuite in self.origin_structure.materialsuite_list:
self.destination_structure.add_materialsuite(
n_materialsuite
)
log.debug("Moving accession records into the Stage")
for n_accessionrecord in self.origin_structure.accessionrecord_list:
self.destination_structure.add_accessionrecord(
n_accessionrecord
)
log.debug("Moving legalnotes into the Stage")
for n_legalnote in self.origin_structure.legalnote_list:
self.destination_structure.add_legalnote(
n_legalnote
)
log.debug("Moving adminnotes into the Stage")
for n_adminnote in self.origin_structure.adminnote_list:
self.destination_structure.add_adminnote(
n_adminnote
)
log.debug("Transformation complete, returning result")
return self.destination_structure
@log_aware(log)
def get_origin_structure(self):
"""returns the origin structure, in this case a fully-realized Stage structure
"""
return self._origin_structure
@log_aware(log)
def set_origin_structure(self, value):
"""sets the origin structure: it will only accept a Stage structure
"""
if isinstance(value, Archive):
self._origin_structure = value
else:
raise ValueError("ArchiveToStageTransformerr must have an " +
"instace of an Archive in origin_structure")
@log_aware(log)
def get_destination_structure(self):
"""returns the destination structure, or the structure created from
transform method
"""
return self._destination_structure
@log_aware(log)
def set_destination_structure(self, value):
"""sets the destination structure, an Archive structure
"""
self._destination_structure = value
@log_aware(log)
def __repr__(self):
return "< transform from archive {} to stage {}".\
format(id(self.origin_structure),
id(self.destination_structure))
destination_structure = property(get_destination_structure,
set_destination_structure)
origin_structure = property(get_origin_structure, set_origin_structure)
| uchicago-library/uchicagoldr-toolsuite | uchicagoldrtoolsuite/bit_level/lib/transformers/archivetostagetransformer.py | archivetostagetransformer.py | py | 4,571 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "abc.transformer.Transformer",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "uchicagoldrtoolsuite.core.lib.convenience.log_init_attempt",
"line_number": 37,
"usage_type... |
9084026058 | import requests
import json
import re
import unittest
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.common.exceptions import NoSuchElementException
# Create a new Firefox session
capabilities = DesiredCapabilities.FIREFOX.copy()
capabilities['marionette'] = False
driver = webdriver.Firefox(capabilities=capabilities)
driver.implicitly_wait(10)
#--------------------------------------- LOOKTIME -------------------------------------------------------#
# DESCRIPTION: Get the time (as a string) that a person spend in a place through Selenium
# PARAMETERS:
# INPUT: name: Name from a place with vicinity as a result from the request to the Google Places API
# OUTPUT: tiempo: Average time that a person spends in a place (Google's query)
#--------------------------------------------------------------------------------------------------------#
def looktime(name):
# Navigate to the application home page
driver.get('https://www.google.com')
search_field = driver.find_element_by_name('q')
search_field.clear()
print('looktime')
print(name)
# Enter search keyword and submit
search_field.send_keys(name)
search_field.submit()
# Currently on result page using find_elements_by_xpath method
# Exception in case don't find the time
try:
tiempo = driver.find_element_by_xpath("//div[@class='_B1k']/b").text
if len(tiempo) != 0:
return tiempo
except NoSuchElementException:
return False
#--------------------------------------- GET_TIME -------------------------------------------------------#
# DESCRIPTION: Tranform the time as a string into an integer (through regular expressions) and get
# the average time (in minutes)
# PARAMETERS:
# INPUT: tiempo: Average time as a string that a person spends in a place (Google's query)
# This is an output from the LOOKTIME function
# OUTPUT: fminutos: Average time in minutes as an integer
#--------------------------------------------------------------------------------------------------------#
def get_time(tiempo):
fminutos=0
count=0
# Get minutes
minutos = re.findall(r'\b\d+\bminutos|\d+-\d+\b|\d+\b min', tiempo)
if minutos:
if (str(minutos[0])).find('-') >= 1 :
min1,min2=minutos[0].split("-")
fminutos=int(min1)+int(min2)
count=count+2
else:
minutos2=[int (m) for m in minutos[0].split() if m.isdigit()]
fminutos=fminutos+int(minutos2[0])
count=count+1
# Get hours
tiempo = tiempo.replace(",",".")
horas = re.findall(r'\b\d+.\d+\b horas|\b\d+.\d+\b h|\b\d+\b horas|\b\d+\b h', tiempo)
horas2=[]
if horas:
for t in horas[0].split():
try:
horas2.append(float(t))
except ValueError:
pass
fminutos=fminutos+(int(horas2[0]*60))
count=count+1
# Get the average in case have the time more than once
if count >= 2:
fminutos=fminutos/2
horas2.clear()
# Take back the time in minutes
return fminutos
#------------------------------------------ MAIN --------------------------------------------------------#
# DESCRIPTION: Add to the puntos_de_interes.json file the name and the average time in minutes that
# a person spends in a point of interest
#
# INPUT FILE: puntos_de_interes.json
# This file was generated by data_processing.py program and already contains the
# Valencia's points of interest with population, traffic and tweets
#
# OUTPUT FILE: puntos_de_interes.json
# Update of the input file
#--------------------------------------------------------------------------------------------------------#
def main():
# Read the original JSON
path_input_file = 'puntos_de_interes.JSON'
with open(path_input_file, "r") as input_file:
data = json.load(input_file)
result = {}
result['type'] = data['type']
result['crs'] = data['crs']
result['features'] = []
# Keys to associate with each search
# Google Place API gives a list of types, on the other hand the accuracy of the results depends of
# the choice from the types
clave = {
'PID': ("train_station"),
'GTR': ("bus_station"),
'GSP': ("hospital"),
'TER': ("shopping_mall"), # Other type: department_store
'EDA': ("park"), # Other type: gym, stadium
}
cont=0
for feature in data['features']:
y = str(feature['geometry']['coordinates'][0])
x = str(feature['geometry']['coordinates'][1])
cal = clave.get(feature['properties']['califi'], ("")) # Make the relation with the key
# Request to Google Places API with a distance of 20 meters from the point of interest
r = requests.get('https://maps.googleapis.com/maps/api/place/nearbysearch/json?location='+x+','+y+'&rankby=distance&distance=20&type='+str(cal)+'&key=AIzaSyBydM3PpubE1x3_Et1e_ApoFRujEvbUer8')
# Other possible request, focusing on the radio
# 'https://maps.googleapis.com/maps/api/place/nearbysearch/json?location='+x+','+y+'&radius=500&type='+str(cal)+'&key=AIzaSyCAJQUnW6GpmM5PmDa22kJuNFtOrwJTHhI'
# A JSON generated by the API request
data2 = json.loads(r.content.decode("utf8"))
for out in data2['results']:
# Get the first 5 results for each request to the Google Places API
if cont <= 4 :
# We only need the name and the vicinity to get the time with LOOKTIME function
z = str(out['name'] + ' ' + out['vicinity'])
print(z)
time = looktime(z)
if time:
time = get_time(time)
break
else:
cont = cont +1
continue
else:
z = str(data2['results'][0]['name'] + ' ' + data2['results'][0]['vicinity']) #In case thata any of the firts five results have time, by default sets the first one
time=0
# Update the puntos_de_interes.json file
feature['properties']['nombre'] = z # Add the 'nombre' property
feature['properties']['tiempo_medio'] = time # Add the 'tiempo_medio' property
# Screen output to view program execution
print("\n Name: " + z)
print("\n Tiempo: " + str(time))
result['features'].append(feature)
z = ""
time=""
cont=0
# Write the output file
path_output_file = 'puntos_de_interes.JSON'
with open(path_output_file, "w") as output_file:
json.dump((result), output_file, indent=3)
driver.quit()
if __name__ == "__main__":
main()
| Axelflg/get_info_JSON | Find_info.py | Find_info.py | py | 7,050 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "selenium.webdriver.common.desired_capabilities.DesiredCapabilities.FIREFOX.copy",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.common.desired_capabilities.DesiredCapabilities.FIREFOX",
"line_number": 11,
"usage_type": "attribute"
},
{
... |
74039762662 | from rest_framework.decorators import api_view, permission_classes
from rest_framework.response import Response
from rest_framework import status
from rest_framework import serializers
from .models import *
from .permissions import ValidApiKey
import random
#Supporting Functions
def random_string(range_max,string=None,unique=False):
if not string:
string = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789?><.:;@#()'
random_string = ''
for i in range(0,range_max):
new_char = random.choice(string)
#If unique param is sent we want to remove the chosen character
if unique:
partitioned = string.rpartition(new_char)
string = partitioned[0] + partitioned[2]
random_string += new_char
return random_string
def get_results_for_word_length(word_length,words,special,combos):
#Get only one instance of each word
unique_words = list(set(words))
score = 0
for word in unique_words:
if word in combos:
if word_length == 9:
score = 150
else:
if word.find(special) != -1:
score += word_length*2
else:
score += word_length
return {
'result': {
'score' : score,
'scoredWords' : [word for word in unique_words if word in combos],
'unscoredWords' : [word for word in unique_words if word not in combos],
'wordsChecked' : words,
}
}
#Returns a dictionary object with the scores for the actual solution
def score_solution(word_list,special):
response_data = {
'result' : {},
'totalScore' : 0,
'allPossibleWords' : word_list,
}
for i in range(3,10):
result = get_results_for_word_length(i,[word for word in word_list if len(word) == i],special,word_list)
response_data['result'][str(i)+'letter'] = result['result']
response_data['totalScore'] += result['result']['score']
return response_data
#Serializers
class NonogramSerializer(serializers.ModelSerializer):
class Meta:
model = Nonogram
fields = ('id','word','combos')
#VIEWS
@api_view(['GET'])
def test(request):
print('I am a test', random_string(128))
return Response("hello", status=status.HTTP_200_OK)
@api_view(['GET'])
@permission_classes([ValidApiKey])
def get_nonogram(request):
words = Nonogram.objects.all().values('word','id')
random_word = random.choice(words)
#randomize the letters in the word
random_word['word'] = random_string(9,random_word['word'],True)
return Response(random_word, status=status.HTTP_200_OK)
############# SCORING RULES ##################
# must have 3 or more letters
# 1 point for letter
# If contains special character multiply by number of letters
# If nine letter words score 150
@api_view(['GET','POST'])
@permission_classes([ValidApiKey])
def score_word(request):
#Get the nonogram
try:
nonogram = Nonogram.objects.get(id=request.data['id'])
except Exception as e:
print(e)
return Response({'message':'Word does not exist'}, status=status.HTTP_404_NOT_FOUND)
#Collect from payload
word_list = request.data.get('word_list')
special_letter = request.data.get('special')
if not special_letter:
return Response({'message':'Please provide the special character'}, status=status.HTTP_400_BAD_REQUEST)
if word_list:
#Setup dictionary result object
payload = {
'id' : nonogram.id,
'solvedWord' : nonogram.word,
'sentWord' : request.data['word'],
'specialLetter' : special_letter,
'result' : {},
'totalScore' : 0,
'scoredWords' : [word for word in word_list if word in nonogram.combos],
'unscoredWords' : [word for word in word_list if word not in nonogram.combos],
'sentWords' : word_list,
'solution' : score_solution(nonogram.combos,special_letter),
}
for i in range(3,10):
result = get_results_for_word_length(i,[word for word in word_list if len(word) == i],special_letter,nonogram.combos)
payload['result'][str(i)+'letter'] = result['result']
payload['totalScore'] += result['result']['score']
else:
return Response({'message':'Please provide a word list'}, status=status.HTTP_400_BAD_REQUEST)
#words = Nonogram.objects.all().values('word')
return Response(payload, status=status.HTTP_200_OK)
@api_view(['GET','POST'])
@permission_classes([ValidApiKey])
def get_solution(request):
try:
nonogram = Nonogram.objects.get(id=request.data['id'])
except Exception as e:
print(e)
return Response({'message':'Word does not exist'}, status=status.HTTP_404_NOT_FOUND)
return Response(NonogramSerializer(nonogram).data, status=status.HTTP_200_OK)
@api_view(['GET','POST'])
@permission_classes([ValidApiKey])
def get_solution_with_score(request):
try:
nonogram = Nonogram.objects.get(id=request.data['id'])
except Exception as e:
print(e)
return Response({'message':'Word does not exist'}, status=status.HTTP_404_NOT_FOUND)
if request.data.get('special'):
special_letter = request.data['special']
else:
return Response({'message':'No speical letter has been sent.'}, status=status.HTTP_404_NOT_FOUND)
response_data = {
'id' : nonogram.id,
'word' : nonogram.word,
'solution' : score_solution(nonogram.combos,special_letter)
}
return Response(response_data, status=status.HTTP_200_OK)
| ChrisBriant/nonogram_backend | api/views.py | views.py | py | 5,695 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "random.choice",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "rest_framework.serializers.ModelSerializer",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.serializers",
"line_number": 65,
"usage_type": "name"
},
... |
19633985319 | from pydub import AudioSegment
from pydub.silence import split_on_silence
import os
import multiprocessing
def split_audio(filename, audio_folder='audio_folder', output_folder='segments'):
# Check if output folder exists and if not, create it
if not os.path.isdir(output_folder):
os.mkdir(output_folder)
filepath = os.path.join(audio_folder, filename)
audio_file = AudioSegment.from_mp3(filepath)
# Split track where the silence is 300 milliseconds or more and get chunks
chunks = split_on_silence(
audio_file,
# Must be silent for at least 300 milliseconds
min_silence_len=300,
# Consider it silent if quieter than -36 dBFS
silence_thresh=-36
)
# If chunks shorter than 2 seconds, append to the previous chunk
min_len = 2 * 1000 # 2 seconds in ms
chunks_corrected = []
for chunk in chunks:
if len(chunk) < min_len and chunks_corrected:
chunks_corrected[-1] += chunk
else:
chunks_corrected.append(chunk)
# Export all of the individual chunks as .mp3 files
for i, chunk in enumerate(chunks_corrected):
# Remove the last 4 characters of filename (.mp3)
out_file = os.path.join(output_folder, f"{filename[:-4]}_segment{i}.mp3")
chunk.export(out_file, format="mp3")
print(f"Finished splitting{out_file}")
def main(audio_folder):
pool = multiprocessing.Pool(multiprocessing.cpu_count())
audio_files = [f for f in os.listdir(audio_folder) if f.endswith('.mp3')]
pool.starmap(split_audio, [(f, audio_folder) for f in audio_files])
if __name__ == "__main__":
main(r"C:\Users\Harsh\Documents\gap\gapvoice\audio_preprocessing\mp3")
| harshbhatia66/BallsDeepLearning | DataPipeline/segment_audio.py | segment_audio.py | py | 1,714 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.isdir",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 11... |
11642826592 | from django.shortcuts import render
from django.views.generic import CreateView, UpdateView, ListView
from .models import Empresa
from .forms import EmpresaForm
from notafiscal.models import NotaFiscal
def empresa_list(request):
template_name ='empresa_list.html'
objects = Empresa.objects.all()
context ={'object_list' : objects}
return render(request, template_name,context)
def notafiscal_list2(request, empresa):
template_name ='notafiscal_list.html'
obj = Empresa.objects.filter(empresa = empresa)
context ={'object' : obj}
return render(request, template_name,context)
def empresa_add(request):
template_name='empresa_form.html'
return render(request,template_name)
class EmpresaCreate(CreateView):
model = Empresa
template_name='empresa_form.html'
form_class=EmpresaForm
class EmpresaList(ListView):
model = Empresa
template_name = 'empresa_list.html'
paginate_by = 10
class EmpresaUpdate(UpdateView):
model = Empresa
template_name='empresa_form.html'
form_class=EmpresaForm | vvalcristina/notafiscal | nfe/empresa/views.py | views.py | py | 1,063 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "models.Empresa.objects.all",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "models.Empresa.objects",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "models.Empresa",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": ... |
74144129703 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# =======================================
# Author : NareN
# git : https://github.com/DEVELByte
# =======================================
import logging
import os
import re
import argparse
from develbyte import create_app
logger = logging.getLogger("default")
def purge(directory, pattern):
for f in os.listdir(directory):
if re.search(pattern, f):
os.remove(os.path.join(directory, f))
def arguments():
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--configName', '-c', help='pass a absolute path to the config file')
return parser.parse_args()
def print_config(config):
for _config in config.keys():
logger.info("{key: <50}: {value}".format(key=_config, value=config[_config]))
if __name__ == '__main__':
purge(".", "nohup.out")
args = arguments()
app = create_app(args.configName)
print_config(app.config)
app.run(port=app.config.PORT, threaded=True)
| im-naren/flask-starter-kit | runserver.py | runserver.py | py | 1,027 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number":... |
31292738689 | import os
import configparser
def num_cpus():
try:
return len(os.sched_getaffinity(0))
except AttributeError:
return os.cpu_count()
def max_workers():
return (num_cpus() * 2) + 1
# Read configuration file
config = configparser.ConfigParser()
config.read('config.ini')
# Set bind variable from configuration file
port = config['SETTINGS']['port']
bind = f"0.0.0.0:{port}"
logfile = "gunicorn.log"
workers = max_workers()
timeout = 300
| SaltisRS/ChatEndpoint-osrs | gunicorn_config.py | gunicorn_config.py | py | 467 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.sched_getaffinity",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.cpu_count",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "configparser.ConfigParser",
"line_number": 14,
"usage_type": "call"
}
] |
10978100631 | from gensim import corpora, models, similarities
import logging
from Preprocess.py import MyCorpus
processed_dir = 'Users/mlinegar/Data/LDA/BoW'
_num_topics = 10
dictionary = corpora.Dictionary.load(processed_dir + "firsttry.dict")
corpus = corpora.MmCorpus(processed_dir + "firsttry.mm")
tfidf = models.TfidfModel(corpus)
corpus_tfidf = tfidf(corpus)
lda = models.LdaModel(corpus_tfidf, id2word=dictionary, num_topics=_num_topics) | mlinegar/RedditScraping | Analysis/Tranformations.py | Tranformations.py | py | 435 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "gensim.corpora.Dictionary.load",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "gensim.corpora.Dictionary",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "gensim.corpora",
"line_number": 8,
"usage_type": "name"
},
{
"api_nam... |
2744613256 | from django import forms
from events.models import Event
from datetime import datetime
from django.contrib.admin import widgets
class EventInputForm(forms.ModelForm):
class Meta:
model = Event
fields = ['organization', 'name', 'start_time','end_time', 'description', 'mask_profile', 'contact_name', 'url']
mask_profile = forms.Select(choices=("Yes", "No"))
start_time = forms.DateTimeField(input_formats=["%Y/%m/%d %H:%M"])
end_time = forms.DateTimeField(input_formats=["%Y/%m/%d %H:%M"])
url = forms.URLField(required=False)
labels = {
'organization': 'Organization',
'name': 'Name',
'start_time': 'Event Start Time',
'end_time': 'Event End Time',
'description': 'Description',
'mask_profile': 'Mask Profile?',
'contact_name': 'Contact Name',
'url': 'Event URL'
}
widgets = {
'organization': forms.Select(attrs={'placeholder': 'Select Organization'}),
'name': forms.TextInput(attrs={'placeholder': 'Event Name'}),
# 'start_time': forms.DateTimeInput(format="%Y/%m/%d %H:%M"),
# 'end_time': forms.DateTimeInput(format="%Y/%m/%d %H:%M"),
# 'time': widgets.AdminSplitDateTime(),
'description': forms.Textarea(attrs={'placeholder': 'Event Description'}),
'contact_name': forms.TextInput(attrs={'placeholder': 'Name of contact'}),
# 'url': forms.URLInput()
}
| DigitalEmpowermentATX/DECAwebsite | digital_inclusion/events/forms.py | forms.py | py | 1,532 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.forms.ModelForm",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "events.models.Event",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.form... |
28713706413 | # ----encoding='utf-8'-----
# Chentian's code paradise
import smtplib
import email
# 负责构造文本
from email.mime.text import MIMEText
# 负责构造图片
from email.mime.image import MIMEImage
# 负责将多个对象集合起来
from email.mime.multipart import MIMEMultipart
from email.header import Header
mail_host="smtp.163.com"
mail_sender="chentstudent@163.com"
mail_slience=""
mail_receivers=["2627601379@qq.com"]
mm=MIMEMultipart('related')
subject_content="""python邮件测试"""
mm["From"]="sender_name<chentstudent@163.com>"
mm["to"]="receiver_name<2627601379@qq.com>"
mm["subiect"]=Header(subject_content,"utf-8")
body_content="""你好,这是一个测试邮件,来自于陈添"""
message_text=MIMEMultipart(body_content,"plian","utf=8")
mm.attach(message_text)
imagine_data=open("FILE-20150725-1818LV48VS71BW2J.jpg","rb")
message_imagine=MIMEImage(imagine_data.read())
imagine_data.close()
mm.attach(message_imagine)
exc=MIMEText(open("大唐建设集团-2022年5月工资.xlsx","rb").read(),'base64','utf-8')
exc["Content-Disposition"]='attachment;filename="sample.xlex"'
mm.attach(exc)
stp=smtplib.SMTP()
stp.connect(mail_host,25)
stp.set_debuglevel(1)
stp.login(mail_sender,mail_license)
stp.sendmail(mail_sender, mail_receivers, mm.as_string())
print("邮件发送成功")
stp.quit()
| codefreshstudent/day8 | 测试文件夹/测试发邮件.py | 测试发邮件.py | py | 1,338 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "email.mime.multipart.MIMEMultipart",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "email.header.Header",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "email.mime.multipart.MIMEMultipart",
"line_number": 28,
"usage_type": "call"
},
... |
31888132367 | # -*- coding: utf-8 -*-
import argparse
import os
import schedule
import logging
import logging.config
import sys
import time
from redtrics.core.runner import Runner
logging.config.fileConfig(os.path.join(os.path.dirname(__file__), '..', 'etc', 'logging.ini'))
logger = logging.getLogger(__name__)
def main():
parser = argparse.ArgumentParser(prog='redtrics-generate', description='RedMart Github Metrics')
parser.add_argument('--base', help='base/branch to run the metrics on', default='master')
parser.add_argument('--run-now', help='run only one time. Otherwise will run as scheduler', action="store_true")
args = parser.parse_args()
try:
runner = Runner(args.base)
if args.run_now:
runner.run()
else:
schedule.every().monday.at("00:30").do(runner.run)
while True:
schedule.run_pending()
time.sleep(1)
except Exception as e:
logger.error(e)
sys.exit(1)
| tuananh-nguyen/redtrics | redtrics/cli/app.py | app.py | py | 994 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.config.fileConfig",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "logging.config",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path",
... |
35377030158 | from clean_doc import clean_doc
from gensim.models.doc2vec import LabeledSentence
from gensim.models import Doc2Vec
import gensim.models.doc2vec
import gensim
import multiprocessing
cores = multiprocessing.cpu_count()
assert gensim.models.doc2vec.FAST_VERSION > -1, "this will be painfully slow otherwise"
class LabeledLineSentence(object):
def __init__(self, data):
self.data = data
def __iter__(self):
for text,url in self.data:
yield LabeledSentence(words = text , tags = url)
def doc2vec(data , alpha , window , min_alpha , min_count , epoch , model_path):
#Labeled Sentences
sentences = LabeledLineSentence(data)
model = Doc2Vec(alpha = alpha , window = window , min_alpha = min_alpha , min_count = min_count , workers=cores) # use fixed learning rate
model.build_vocab(sentences)
for epoch_count in range(epoch):# Change for good performance
model.train(sentences)
model.alpha -= 0.002 # decrease the learning rate
model.min_alpha = model.alpha # fix the learning rate, no decay
# store the model to mmap-able files
model.save(model_path)
| wrat/Semantic-Relationship-Between-News | Doc2vec_model/doc2vec.py | doc2vec.py | py | 1,124 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "multiprocessing.cpu_count",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "gensim.models",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "gensim.models.doc2vec.LabeledSentence",
"line_number": 18,
"usage_type": "call"
},
{
"... |
919257196 | import requests
from bs4 import BeautifulSoup
import re
from domains import CONTENT_AREA
from emoji import emojize
from urllib.parse import urlparse
# ChatGPT d2ee59b7-b368-4a5f-b3af-2e33b7f33b4a
example_url = [
"https://backlinko.com/actionable-seo-tips",
"https://www.semrush.com/blog/seo-tips/",
"https://www.wordstream.com/blog/ws/2021/03/05/seo-strategy",
"https://ahrefs.com/blog/seo-tips/",
"https://backlinko.com/actionable-seo-tips",
"https://developers.google.com/search/docs/fundamentals/seo-starter-guide",
"https://www.pcmag.com/how-to/easy-but-powerful-seo-tips-to-boost-traffic-to-your-website",
"https://www.searchenginejournal.com/seo-tips/374673/",
"https://www.bdc.ca/en/articles-tools/marketing-sales-export/marketing/seo-small-businesses-10-ways-rank-higher",
]
def get_status_code(url):
response = requests.get(url)
return response.status_code
def get_domain(url):
"""Get the domain of a URL"""
domain = url.split("//")[-1].split("/")[0].split(".")[0]
return domain
def is_valid_url(url):
"""
Check if the given URL is valid or not.
Parameters:
url (str): The URL to be checked.
Returns:
bool: True if the URL is valid, False otherwise.
"""
regex = re.compile(
r"^(https?://)?" # http:// or https:// (optional)
r"((([A-Z0-9][A-Z0-9-]{0,61}[A-Z0-9])|localhost)\.)+" # domain...
r"([A-Z]{2,6})" # domain extension
r"(:\d{1,5})?" # optional port
r"(\/.*)?$",
re.IGNORECASE,
) # path (optional)
return bool(regex.match(url))
def domain_disclaimer(url):
"""Display a disclaimer message if domain not defined in domains.py"""
domain = get_domain(url)
if domain not in CONTENT_AREA:
return emojize(
":folded_hands:Content area is undefined, result may not be valid.",
variant="emoji_type",
)
else:
return emojize(
":thumbs_up: Good news! The content area has already been defined, the result should be more valid.",
variant="emoji_type",
)
def get_title(url):
"""Get the title of a webpage"""
try:
# Make request to webpage
response = requests.get(url)
# Parse webpage content using Beautiful Soup
soup = BeautifulSoup(response.content, "html.parser")
# Get title of webpage
title = soup.title.string
return title
except:
return "Unable to get title"
def get_description(url):
"""Get the description of a webpage"""
try:
# Make request to webpage
response = requests.get(url)
# Parse webpage content using Beautiful Soup
soup = BeautifulSoup(response.content, "html.parser")
# Get description from meta tags
meta_tags = soup.find_all("meta")
description = ""
for tag in meta_tags:
if tag.get("name", None) == "description":
description = tag.get("content", None)
return description
except:
return "Unable to get description"
def get_content(url):
try:
# Check if domain is registered
parsed_url = urlparse(url)
domain = (
parsed_url.netloc.split(".")[-2]
if parsed_url.netloc.count(".") >= 2
else parsed_url.netloc
)
content_class = CONTENT_AREA.get(domain)
if content_class:
# Make request to webpage
response = requests.get(url)
# Parse webpage content using Beautiful Soup
soup = BeautifulSoup(response.content, "html.parser")
# Get content of webpage using class
content = soup.find("div", class_=content_class)
return content.get_text()
else:
# Make request to webpage
response = requests.get(url)
# Parse webpage content using Beautiful Soup
soup = BeautifulSoup(response.content, "html.parser")
# Get content of webpage using tag "body"
content = soup.find("body")
return content.get_text()
except:
return "Unable to get content"
def get_content_with_html(url):
"""Get the content of a webpage with HTML elements"""
try:
# Check if domain is registered
domain = get_domain(url)
content_class = CONTENT_AREA.get(domain)
if content_class:
# Make request to webpage
response = requests.get(url)
# Parse webpage content using Beautiful Soup
soup = BeautifulSoup(response.content, "html.parser")
# Get content of webpage using class
content = soup.find("div", class_=content_class)
return str(content)
else:
# Make request to webpage
response = requests.get(url)
# Parse webpage content using Beautiful Soup
soup = BeautifulSoup(response.content, "html.parser")
# Get content of webpage using tag "body"
content = soup.find("body")
return str(content)
except:
return "Unable to get content"
def get_h1(url):
"""Get the H1 of a webpage"""
try:
# Make request to webpage
response = requests.get(url)
# Parse webpage content using Beautiful Soup
soup = BeautifulSoup(response.content, "html.parser")
# Get H1 of webpage
h1 = soup.find("h1").text if soup.find("h1") else None
return h1
except:
return "Unable to get H1"
def get_headings(content_html):
soup = BeautifulSoup(content_html, "html.parser")
# Mencari semua elemen heading
headings = soup.find_all(["h1", "h2", "h3"])
# Inisialisasi list untuk menyimpan heading
all_headings = []
# Perulangan untuk setiap heading
for heading in headings:
# Menambahkan tag sesuai dengan tipe heading
if heading.name == "h1":
all_headings.append(f"<H1>{heading.text}")
elif heading.name == "h2":
all_headings.append(f"<H2>{heading.text}")
elif heading.name == "h3":
all_headings.append(f"<H3>{heading.text}")
# Mengembalikan list heading
return all_headings
def get_first_parapraph(content):
soup = BeautifulSoup(content, "html.parser")
first_pargraph = soup.find("p")
if first_pargraph:
return first_pargraph.text.strip()
else:
" "
| syahidmid/seoanalysis | scrapers/scrape.py | scrape.py | py | 6,496 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "re.IGNORECASE",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "domains.CONTENT_AREA",
... |
40372645232 | import datetime
from django.utils import timezone
from django.test import TestCase
from django.urls import reverse
from .models import Question
def create_question(question_text, days):
"""Create a question with the given `question_text` and published the given numer of `days`
offset to now (negative for questions published in the past, positive for questions that have yet to be published"""
time = timezone.now() + datetime.timedelta(days=days)
return Question.objects.create(question_text=question_text, pub_date=time)
class QuestionIndexViewTests(TestCase):
def test_not_questions(self):
"""
If no questions exists, an appropiate message is displayed
"""
response = self.client.get(reverse('polls:index'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, "No hay encuestas")
def test_past_question(self):
"""
Questions with a past pub_date are displayed
"""
create_question(question_text="Past question", days=-30)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_question_list'],
['<Question: Past question>']
)
def test_future_question(self):
"""
Questions with a past pub_date are NOT displayed
"""
create_question(question_text="Future question", days=30)
response = self.client.get(reverse('polls:index'))
self.assertContains(response, "No hay encuestas")
self.assertQuerysetEqual(
response.context['latest_question_list'],
[]
)
def test_future_and_past_question(self):
"""
Even if both past and future questions exist, only
past questions are displayed
"""
create_question(question_text="Future question", days=30)
create_question(question_text="Past question", days=-30)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_question_list'],
['<Question: Past question>']
)
def test_two_past_questions(self):
create_question(question_text="Q1", days=-30)
create_question(question_text="Q2", days=-50)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_question_list'],
['<Question: Q1>', '<Question: Q2>']
)
class QuestionDetailViewTests(TestCase):
def test_future_question(self):
"""
Questions with a past pub_date are NOT displayed
"""
future_q = create_question(question_text="Future question", days=5)
url = reverse('polls:detail', args=(future_q.id,))
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_past_question(self):
"""
Questions with a past pub_date are NOT displayed
"""
past_q = create_question(question_text="Past question", days=-5)
url = reverse('polls:detail', args=(past_q.id,))
response = self.client.get(url)
self.assertContains(response, past_q.question_text)
class QuestionModelTests(TestCase):
def test_was_published_recently_with_future_question(self):
"""
was_published_recently() returns False for questions whose pub_date is in the future.
"""
time = timezone.now() + datetime.timedelta(days=30)
future_question = Question(pub_date=time)
self.assertIs(future_question.was_published_recently(), False)
def test_was_published_recently_with_old_question(self):
"""
was_published_recently() returns False for questions whose pub_date is older than 1 day.
"""
time = timezone.now() - datetime.timedelta(days=1, seconds=1)
old_question = Question(pub_date=time)
self.assertIs(old_question.was_published_recently(), False)
def test_was_published_recently_with_recent_question(self):
"""
was_published_recently() returns True for questions whose pub_date is within the last day.
"""
time = timezone.now() - datetime.timedelta(hours=23, minutes=59, seconds=59)
recent_question = Question(pub_date=time)
self.assertIs(recent_question.was_published_recently(), True)
| gpuma/phuyu | polls/tests.py | tests.py | py | 4,413 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "django.utils.timezone.now",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "django.utils.timezone",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "datetime.timedelta",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "mo... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.