seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 โ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k โ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
45035468935 |
def parse(s):
x,s = parse_a(s)
assert(len(s) == 0)
return x
def parse_a(s):
if s[0] == '(':
x, s = parse_a(s[1:])
if s[0] == '+':
y, s = parse_a(s[1:])
assert(s[0] == ')')
return x+y, s[1:]
elif s[0] == '*':
y, s = parse_a(s[1:])
assert(s[0] == ')')
return x*y, s[1:]
else:
assert(int(s[0]) > -1 and int(s[0]) < 10)
return int(s[0]), s[1:]
def add(s):
i = s.index('+')
a = s[:i]
b = s[i+1:]
arg1 = int(a.strip())
arg2 = int(b.strip())
return arg1 + arg2
def assertEquals(x, s):
y = parse(s)
if x != y:
print("Error: expected " + str(x) + " but got " + str(y))
if __name__ == "__main__":
assertEquals(1 + 2, '(1+2)')
assertEquals(1 * 2, '(1*2)')
assertEquals(1 + (2 * 3), '(1+(2*3))')
assertEquals(0 + (2 * 3), '(1+(2*3))')
| almostimplemented/codename | calculator.py | calculator.py | py | 917 | python | en | code | 0 | github-code | 13 |
5903439255 | from tensorflow import keras
from ray.tune import track
class TuneReporterCallback(keras.callbacks.Callback):
"""Tune Callback for Keras."""
def __init__(self, reporter=None, freq="batch", logs={}):
"""Initializer.
Args:
reporter (StatusReporter|tune.track.log|None): Tune object for
returning results.
freq (str): Sets the frequency of reporting intermediate results.
One of ["batch", "epoch"].
"""
self.reporter = reporter or track.log
self.iteration = 0
if freq not in ["batch", "epoch"]:
raise ValueError("{} not supported as a frequency.".format(freq))
self.freq = freq
super(TuneReporterCallback, self).__init__()
def on_batch_end(self, batch, logs={}):
if not self.freq == "batch":
return
self.iteration += 1
for metric in list(logs):
if "loss" in metric and "neg_" not in metric:
logs["neg_" + metric] = -logs[metric]
if "acc" in logs:
self.reporter(keras_info=logs, mean_accuracy=logs["acc"])
else:
self.reporter(keras_info=logs, mean_accuracy=logs.get("accuracy"))
def on_epoch_end(self, batch, logs={}):
if not self.freq == "epoch":
return
self.iteration += 1
for metric in list(logs):
if "loss" in metric and "neg_" not in metric:
logs["neg_" + metric] = -logs[metric]
if "acc" in logs:
self.reporter(keras_info=logs, mean_accuracy=logs["acc"])
else:
self.reporter(keras_info=logs, mean_accuracy=logs.get("accuracy"))
| zhuohan123/hoplite-rllib | python/ray/tune/integration/keras.py | keras.py | py | 1,692 | python | en | code | 2 | github-code | 13 |
17597189274 | # ํ๋ก๊ทธ๋จ ์ฌ์ฉ์๋ก๋ถํฐ ์์ฐ์๋ฅผ ์
๋ ฅ๋ฐ์ 0๋ถํฐ ์์ฐ์๊น์ง์ ํฉ๊ณ๋ฅผ ๊ตฌํ ๊ฒ.
# print("์๋ฌด ์์ฐ์๋ ์
๋ ฅํด์ฃผ์ธ์!")
# inputNum = int(input("0๋ถํฐ ์
๋ ฅ๋ ์์ฐ์๊น์ง ๋ํ ๊ฐ์ ์ถ๋ ฅํฉ๋๋ค. : "))
# count = 1
# totalSum1 = 0
# totalSum2 = 0
# ## while ์ฌ์ฉ
# while count <= inputNum :
# totalSum1 += count
# count += 1
# print("0๋ถํฐ" + str(inputNum)+"๊น์ง์ ์ด ํฉ์ " + str(totalSum1))
# ## for ์ฌ์ฉ
# for i in range(inputNum + 1) :
# totalSum2 += i
# print("0๋ถํฐ" + str(inputNum) + "๊น์ง์ ์ด ํฉ์ " + str(totalSum2))
#------------------------------------------------------------------------
# ํ๋ก๊ทธ๋จ ์ฌ์ฉ์๋ก๋ถํฐ -1์ ์
๋ ฅ๋ฐ์ผ๋ฉด ์ข
๋ฃ๋๋ ํ๋ก๊ทธ๋จ์ ์์ฑํ ๊ฒ.
# code = 0
# while code != -1 :
# print("ํ์ฌ ํ๋ก๊ทธ๋จ ์ํ : on")
# code = int(input("์ข
๋ฃํ๋ ค๋ฉด -1์ ์
๋ ฅํด์ฃผ์ธ์. : "))
# print("ํ๋ก๊ทธ๋จ ์ข
๋ฃ")
while True:
print("ํ์ฌ ํ๋ก๊ทธ๋จ ์ํ : on")
code = int(input("์ข
๋ฃํ๋ ค๋ฉด -1์ ์
๋ ฅํด์ฃผ์ธ์. : "))
if code == -1 :
print("ํ๋ก๊ทธ๋จ ์ข
๋ฃ")
break | junkue20/Inflearn_Python_Study | 9๊ฐ_๋ฐ๋ณต๋ฌธ์์ /quiz.py | quiz.py | py | 1,173 | python | ko | code | 0 | github-code | 13 |
30698282913 | from django.contrib import admin
from django.urls import path,include
from . import views
urlpatterns = [
path('',views.studentinfo,name="studentinfo"),
path('removestudent/<int:id>',views.removestudent,name="removestudent"),
# path('filterbook',views.filterbook,name="filterbook"),
# path('modifybook',views.modifybook,name="modifybook"),
# # path('modifybook/<int:id>',views.modifybook,name="modifybook"),
path('updatestudent/<int:id>',views.modify,name="modify"),
path('addstudent',views.addstudent,name="addstudent"),
path('showbookdetails/<int:id>',views.showbookdetails,name="showbookdetails"),
path('payfine/<int:id>/<int:amount>',views.payfine,name="payfine"),
path('issuebookforstudent/<int:id>/<str:bid>',views.issuebookforstudent,name="issuebookforstudent"),
]
| namansethi13/CollegeLibraryProject | Student_Details_App/urls.py | urls.py | py | 810 | python | en | code | 0 | github-code | 13 |
8815401830 | import json
import src.model_construction as mc
import src.data_management as dm
import pickle
import pandas as pd
class DataHandle:
"""
Data Handle for loading and performing operations on input data.
The Data Handle class allows data import and modifications of input data to an instance of the energyhub class.
The constructor of the class takes a topology dictionary with time indices, networks, technologies, nodes as an \
an input. An empty topology dict can be created by using the function \
:func:`~src.data_management.create_templates.create_empty_topology`
"""
def __init__(self, topology):
"""
Constructor
Initializes a data handle class and completes demand data for each carrier used (i.e. sets it to zero for all \
time steps)
:param dict topology: dictionary with time indices, networks, technologies, nodes. An empty dict can be created \
by using the function :func:`~src.data_management.create_templates.create_empty_topology`
"""
self.topology = topology
self.node_data = {}
self.technology_data = {}
self.network_data = {}
# init. demand, prices, emission factors = 0 for all timesteps, carriers and nodes
for nodename in self.topology['nodes']:
self.node_data[nodename] = {}
self.node_data[nodename]['demand'] = pd.DataFrame(index=self.topology['timesteps'])
self.node_data[nodename]['import_prices'] = pd.DataFrame(index=self.topology['timesteps'])
self.node_data[nodename]['import_limit'] = pd.DataFrame(index=self.topology['timesteps'])
self.node_data[nodename]['import_emissionfactors'] = pd.DataFrame(index=self.topology['timesteps'])
self.node_data[nodename]['export_prices'] = pd.DataFrame(index=self.topology['timesteps'])
self.node_data[nodename]['export_limit'] = pd.DataFrame(index=self.topology['timesteps'])
self.node_data[nodename]['export_emissionfactors'] = pd.DataFrame(index=self.topology['timesteps'])
for carrier in self.topology['carriers']:
self.node_data[nodename]['demand'][carrier] = 0
self.node_data[nodename]['import_prices'][carrier] = 0
self.node_data[nodename]['import_limit'][carrier] = 0
self.node_data[nodename]['import_emissionfactors'][carrier] = 0
self.node_data[nodename]['export_prices'][carrier] = 0
self.node_data[nodename]['export_limit'][carrier] = 0
self.node_data[nodename]['export_emissionfactors'][carrier] = 0
def read_climate_data_from_api(self, nodename, lon, lat, alt=10, dataset='JRC', year='typical_year', save_path=0):
"""
Reads in climate data for a full year
Reads in climate data for a full year from the specified source \
(`JRC PVGIS <https://re.jrc.ec.europa.eu/pvg_tools/en/>`_ or \
`ERA5 <https://cds.climate.copernicus.eu/cdsapp#!/home>`_). For access to the ERA5 api, \
an api key is required. Refer to `<https://cds.climate.copernicus.eu/api-how-to>`_
:param str nodename: nodename as specified in the topology
:param float lon: longitude of node - the api will read data for this location
:param float lat: latitude of node - the api will read data for this location
:param str dataset: dataset to import from, can be JRC (only onshore) or ERA5 (global)
:param int year: optional, needs to be in range of data available. If nothing is specified, a typical year \
will be loaded
:param str save_path: Can save climate data for later use to the specified path
:return: self at ``self.node_data[nodename]['climate_data']``
"""
if dataset == 'JRC':
data = dm.import_jrc_climate_data(lon, lat, year, alt)
elif dataset == 'ERA5':
data = dm.import_era5_climate_data(lon, lat, year)
if not save_path==0:
with open(save_path, 'wb') as handle:
pickle.dump(data, handle, protocol=pickle.HIGHEST_PROTOCOL)
self.node_data[nodename]['climate_data'] = data
def read_climate_data_from_file(self, nodename, file):
"""
Reads climate data from file
Reads previously saved climate data (imported and saved with :func:`~read_climate_data_from_api`) from a file to \
the respective node. This can save time, if api imports take too long
:param str nodename: nodename as specified in the topology
:param str file: path of climate data file
:return: self at ``self.node_data[nodename]['climate_data']``
"""
with open(file, 'rb') as handle:
data = pickle.load(handle)
self.node_data[nodename]['climate_data'] = data
def read_demand_data(self, nodename, carrier, demand_data):
"""
Reads demand data for one carrier to node.
Note that demand for all carriers not specified is zero.
:param str nodename: node name as specified in the topology
:param str carrier: carrier name as specified in the topology
:param list demand_data: list of demand data. Needs to have the same length as number of \
time steps.
:return: self at ``self.node_data[nodename]['demand'][carrier]``
"""
self.node_data[nodename]['demand'][carrier] = demand_data
def read_import_price_data(self, nodename, carrier, price_data):
"""
Reads import price data of carrier to node
Note that prices for all carriers not specified is zero.
:param str nodename: node name as specified in the topology
:param str carrier: carrier name as specified in the topology
:param list price_data: list of price data for respective carrier. Needs to have the same length as number of \
time steps.
:return: self at ``self.node_data[nodename]['import_prices'][carrier]``
"""
self.node_data[nodename]['import_prices'][carrier] = price_data
def read_export_price_data(self, nodename, carrier, price_data):
"""
Reads export price data of carrier to node
Note that prices for all carriers not specified is zero.
:param str nodename: node name as specified in the topology
:param str carrier: carrier name as specified in the topology
:param list price_data: list of price data for respective carrier. Needs to have the same length as number of \
time steps.
:return: self at ``self.node_data[nodename]['export_prices'][carrier]``
"""
self.node_data[nodename]['export_prices'][carrier] = price_data
def read_export_limit_data(self, nodename, carrier, export_limit_data):
"""
Reads export price data of carrier to node
Note that prices for all carriers not specified is zero.
:param str nodename: node name as specified in the topology
:param str carrier: carrier name as specified in the topology
:param list price_data: list of price data for respective carrier. Needs to have the same length as number of \
time steps.
:return: self at ``self.node_data[nodename]['export_prices'][carrier]``
"""
self.node_data[nodename]['export_limit'][carrier] = export_limit_data
def read_import_limit_data(self, nodename, carrier, import_limit_data):
"""
Reads export price data of carrier to node
Note that prices for all carriers not specified is zero.
:param str nodename: node name as specified in the topology
:param str carrier: carrier name as specified in the topology
:param list price_data: list of price data for respective carrier. Needs to have the same length as number of \
time steps.
:return: self at ``self.node_data[nodename]['export_prices'][carrier]``
"""
self.node_data[nodename]['import_limit'][carrier] = import_limit_data
def read_export_emissionfactor_data(self, nodename, carrier, export_emissionfactor_data):
"""
Reads export emission factor data of carrier to node
Note that emission factors for all carriers not specified is zero.
:param str nodename: node name as specified in the topology
:param str carrier: carrier name as specified in the topology
:param list export_emissionfactor_data: list of emission data for respective carrier. \
Needs to have the same length as number of time steps.
:return: self at ``self.node_data[nodename]['export_emissionfactors'][carrier]``
"""
self.node_data[nodename]['export_emissionfactors'][carrier] = export_emissionfactor_data
def read_import_emissionfactor_data(self, nodename, carrier, import_emissionfactor_data):
"""
Reads import emission factor data of carrier to node
Note that emission factors for all carriers not specified is zero.
:param str nodename: node name as specified in the topology
:param str carrier: carrier name as specified in the topology
:param list import_emissionfactor_data: list of emission data for respective carrier. \
Needs to have the same length as number of time steps.
:return: self at ``self.node_data[nodename]['import_emissionfactors'][carrier]``
"""
self.node_data[nodename]['import_emissionfactors'][carrier] = import_emissionfactor_data
def read_technology_data(self):
"""
Writes technologies to self and fits performance functions
Reads in technology data from JSON files located at ``./data/technology_data`` for all technologies specified in \
the topology.
:return: self at ``self.technology_data[nodename][tec]``
"""
# get all used technologies
tecs_used = dict()
for nodename in self.topology['technologies']:
tecs_used[nodename] = self.topology['technologies'][nodename]
self.technology_data[nodename] = dict()
# read in data to Data Handle and fit performance functions
for tec in tecs_used[nodename]:
# Read in JSON files
with open('./data/technology_data/' + tec + '.json') as json_file:
technology_data = json.load(json_file)
# Fit performance function
if (technology_data['TechnologyPerf']['tec_type'] == 'RES') or \
(technology_data['TechnologyPerf']['tec_type'] == 'STOR'):
technology_data = mc.fit_tec_performance(technology_data, tec=tec,
climate_data=self.node_data[nodename]['climate_data'])
else:
technology_data = mc.fit_tec_performance(technology_data)
self.technology_data[nodename][tec] = technology_data
def read_single_technology_data(self, nodename, technologies):
"""
Reads technologies to DataHandle after it has been initialized.
This function is only required if technologies are added to the model after the DataHandle has been initialized.
"""
for tec in technologies:
# Read in JSON files
with open('./data/technology_data/' + tec + '.json') as json_file:
technology_data = json.load(json_file)
# Fit performance function
if (technology_data['TechnologyPerf']['tec_type'] == 'RES') or \
(technology_data['TechnologyPerf']['tec_type'] == 'STOR'):
technology_data = mc.fit_tec_performance(technology_data, tec=tec,
climate_data=self.node_data[nodename]['climate_data'])
else:
technology_data = mc.fit_tec_performance(technology_data)
self.technology_data[nodename][tec] = technology_data
def read_network_data(self):
"""
Writes network to self and fits performance functions
Reads in network data from JSON files located at ``./data/network_data`` for all technologies specified in \
the topology.
:return: self at ``self.technology_data[nodename][tec]``
"""
for netw in self.topology['networks']:
with open('./data/network_data/' + netw + '.json') as json_file:
network_data = json.load(json_file)
network_data['distance'] = self.topology['networks'][netw]['distance']
network_data['connection'] = self.topology['networks'][netw]['connection']
network_data = mc.fit_netw_performance(network_data)
self.network_data[netw] = network_data
def pprint(self):
"""
Prints a summary of the input data (excluding climate data)
:return: None
"""
for nodename in self.topology['nodes']:
print('----- NODE '+ nodename +' -----')
for inst in self.node_data[nodename]:
if not inst == 'climate_data':
print('\t ' + inst)
print('\t\t' + f"{'':<15}{'Mean':>10}{'Min':>10}{'Max':>10}")
for carrier in self.topology['carriers']:
print('\t\t' + f"{carrier:<15}"
f"{str(round(self.node_data[nodename][inst][carrier].mean(), 2)):>10}"
f"{str(round(self.node_data[nodename][inst][carrier].min(), 2)):>10}"
f"{str(round(self.node_data[nodename][inst][carrier].max(), 2)):>10}")
def save(self, path):
"""
Saves instance of DataHandle to path.
The instance can later be loaded with
:param str path: path to save to
:return: None
"""
with open(path, 'wb') as handle:
pickle.dump(self, handle, protocol=pickle.HIGHEST_PROTOCOL)
def load_data_handle(path):
"""
Loads instance of DataHandle from path.
:param str path: path to load from
:return: instance of :class:`~DataHandle`
"""
with open(path, 'rb') as handle:
data = pickle.load(handle)
return data | UU-ER/EHUB-Py_Training | src/data_management/data_handling.py | data_handling.py | py | 14,629 | python | en | code | 0 | github-code | 13 |
19253462626 | from urllib.request import urlopen
from bs4 import BeautifulSoup
from urllib.parse import quote
import csv
#makes = ["DS", "GAC", "SERES", "ืืืืจื", "ืืืื", "ืืืคื", "ืืืืืืื", "ืืืืืงื", "ืืื ืคืื ืืื", "ืืืกืืื", "ืืืคื_ืจืืืืื", "ืืกืืื ืืจืืื", "ื.ื.ืื", "ืืืืืืง", "ืื ืืื", "ื'ืืื", "ืืืค", "ื'ื ืกืืก", "ืืจืืื ืืื", "ืืืฆืื", "ืืืื", "ืืืืืืกื", "ืืืืจ", "ืืื ืื", "ืืืืื", "ืืืืืื", "ืืกืื", "ืืืืืจ", "ืืื ืืื", "ืืื ืงืืื", "ืื ืืจืืืจ", "ืื ืฆืื", "ืืงืกืืก", "ืืืืื", "ืืื", "ืืืจืืื", "ืืื ื", "ืืืฆืืืืฉื", "ืืงืกืืก", "ืืจืฆืืก", "ื ืืกืื", "ืกืืื", "ืกืืืง-MG", "ืกืื ืืืื ื", "ืกืืืืจื", "ืกืืืืงื", "ืกืืื", "ืกืืืจืืื", "ืกืืืจื", "ืกืงืืื", "ืกืงืืืืื", "ืคืืืงืกืืืื", "ืคืื ืืืืง", "ืคืืจื", "ืคืืจืฉื", "ืคืืื", "ืคืื'ื", "ืคืจืืจื", "ืงืืืืืง", "ืงืืคืจื", "ืงืื", "ืงืจืืืืืจ", "ืจืื", "ืจื ื", "ืฉืืจืืื"]
base_url = "https://www.icar.co.il/"
make_page_url = "https://www.icar.co.il/%D7%99%D7%A6%D7%A8%D7%A0%D7%99_%D7%A8%D7%9B%D7%91/"
make_page = urlopen(make_page_url)
soup_get_urls = BeautifulSoup(make_page, 'lxml')
url_list = soup_get_urls.find_all("div", {"class": "manufacture_common"})
make_list_raw = [x.contents[1].attrs['href'] for x in url_list]
makes = [x.strip('/') for x in make_list_raw]
make_urls = [base_url + quote(x) for x in make_list_raw]
makes_dict = dict(zip(makes, make_urls))
csvfile = open('icar_models.csv', 'w', encoding='utf-8', newline='')
csvwriter = csv.writer(csvfile)
for make in makes:
page = urlopen(makes_dict.get(make))
soup = BeautifulSoup(page, 'lxml')
model_find = soup.find("div", {"class": "row cars manufatures"})
model_contents = model_find.contents
make = make.replace('_', ' ')
model_text = [model.text for model in model_contents if len(model) > 2]
models_raw = [x.strip('\n') for x in model_text if x.find(make) > 0]
models = [x[(len(make) + 1):] for x in models_raw]
out_list = [make]
out_list.extend(models)
csvwriter.writerow(out_list)
b=5 | Chilledfish/icar-Project | icar_details.py | icar_details.py | py | 2,178 | python | en | code | 0 | github-code | 13 |
29158564384 | #!/usr/bin/env python3
#coding=utf-8
import rospy
from sensor_msgs.msg import Joy
from robot_msgs.msg import user_control_msg
def JoyCallBack(msg):
global control_info
if msg.buttons[1] ==1:
control_info.serial_port_status = 1 - control_info.serial_port_status
if msg.buttons[0] == 1:
control_info.need_stop = 1 - control_info.need_stop
con_pub.publish(control_info)
if __name__ == "__main__":
rospy.init_node("controler_receiver_node")
joy_sub = rospy.Subscriber("joy",Joy,JoyCallBack,queue_size= 10)
con_pub = rospy.Publisher("User_Control_msg",user_control_msg,queue_size= 10)
control_info = user_control_msg()
control_info.need_stop = 1
control_info.serial_port_status = 1
rospy.spin()
| sha236/ros_learn_note | src/ControllerReceiver/scripts/controler_receiver_node.py | controler_receiver_node.py | py | 755 | python | en | code | 0 | github-code | 13 |
43942460256 | from fields import *
class ModelCreator(type):
def __call__(cls, *args, **kwargs):
new_class = type.__call__(cls, *args)
for key, value in kwargs.iteritems():
if hasattr(new_class, key):
attr = getattr(new_class, key)
if issubclass(type(attr), ObjectField):
if not issubclass(type(value), attr.type()):
raise TypeError('Excepted type {} not found'.format(key))
setattr(new_class, key, value)
else:
raise TypeError('No {} attr found'.format(key))
return new_class
| EdwOK/IGI_Labs | Lab2/build/lib/task14/model_creator.py | model_creator.py | py | 629 | python | en | code | 0 | github-code | 13 |
39024066946 | from django.core.mail import EmailMultiAlternatives
from django.template.loader import render_to_string
def send_welcome_email(name,receiver):
#Creating message subject and sender
subject = 'Welcome to My Neighbourhood'
sender = 'alfred.kahenya@student.moringaschool.com'
#passing in the context variables
text_content = render_to_string('email/hoodmail.txt',{"name":name})
html_content = render_to_string('email/hoodmail.html',{"name":name})
message = EmailMultiAlternatives(subject,text_content,sender,[receiver])
message.attach_alternative(html_content,'text/html')
message.send() | WaruiAlfred/Neighbourhood | home/email.py | email.py | py | 609 | python | en | code | 0 | github-code | 13 |
22336307433 | spell = input()
command = input()
while not command == 'Abracadabra':
cmd_info = command.split()
if cmd_info[0] == 'Abjuration':
spell = spell.upper()
print(spell)
elif cmd_info[0] == 'Necromancy':
spell = spell.lower()
print(spell)
elif cmd_info[0] == 'Illusion':
index = int(cmd_info[1])
letter = cmd_info[2]
if 0 <= index < len(spell):
spell = list(spell)
spell[index] = letter
spell = str(''.join(spell))
print('Done!')
else:
print('The spell was too weak.')
elif cmd_info[0] == 'Divination':
first_substring = cmd_info[1]
second_substring = cmd_info[2]
if first_substring in spell:
spell = spell.replace(first_substring, second_substring)
print(spell)
elif cmd_info[0] == 'Alteration':
substring = cmd_info[1]
if substring in spell:
spell = spell.replace(substring, '')
print(spell)
else:
print('The spell did not work!')
command = input()
| DimitarDimitr0v/Python-Fundamentals | Practical Exam Preparation/13. HOGWARDS.py | 13. HOGWARDS.py | py | 1,105 | python | en | code | 2 | github-code | 13 |
3998647560 | # SPDX-License-Identifier: GPL-2.0
"Print out the distribution of the working set sizes of the given trace"
import argparse
import sys
import tempfile
import _damo_dist
import _damo_fmt_str
import _damon_result
def get_wss_dists(records, acc_thres, sz_thres, do_sort):
wss_dists = {}
for record in records:
wss_dist = []
for snapshot in record.snapshots:
wss = 0
for r in snapshot.regions:
# Ignore regions not fulfill working set conditions
if r.nr_accesses.samples < acc_thres:
continue
if r.size() < sz_thres:
continue
wss += r.size()
wss_dist.append(wss)
if do_sort:
wss_dist.sort(reverse=False)
wss_dists[record.target_id] = wss_dist
return wss_dists
def pr_wss_dists(wss_dists, percentiles, raw_number, nr_cols_bar, pr_all_wss):
print('# <percentile> <wss>')
for tid, wss_dist in wss_dists.items():
print('# target_id\t%s' % tid)
if len(wss_dist) == 0:
print('# no snapshot')
return
print('# avr:\t%s' % _damo_fmt_str.format_sz(
sum(wss_dist) / len(wss_dist), raw_number))
if pr_all_wss:
for idx, wss in enumerate(wss_dist):
print('%s %s' % (idx, _damo_fmt_str.format_sz(wss, raw_number)))
return
if nr_cols_bar > 0:
max_sz = 0
for percentile in percentiles:
wss_idx = int(percentile / 100.0 * len(wss_dist))
if wss_idx == len(wss_dist):
wss_idx -= 1
wss = wss_dist[wss_idx]
if max_sz <= wss:
max_sz = wss
if max_sz > 0:
sz_per_col = max_sz / nr_cols_bar
else:
sz_per_col = 1
for percentile in percentiles:
wss_idx = int(percentile / 100.0 * len(wss_dist))
if wss_idx == len(wss_dist):
wss_idx -= 1
wss = wss_dist[wss_idx]
line = '%3d %15s' % (percentile,
_damo_fmt_str.format_sz(wss, raw_number))
if nr_cols_bar > 0:
cols = int(wss / sz_per_col)
remaining_cols = nr_cols_bar - cols
line += ' |%s%s|' % ('*' * cols, ' ' * remaining_cols)
print(line)
def set_argparser(parser):
parser.add_argument('--input', '-i', type=str, metavar='<file>',
default='damon.data', help='input file name')
parser.add_argument('--range', '-r', type=int, nargs=3,
metavar=('<start>', '<stop>', '<step>'), default=[0,101,25],
help='range of wss percentiles to print')
parser.add_argument('--exclude_samples', type=int, default=20,
metavar='<# samples>',
help='number of first samples to be excluded')
parser.add_argument('--acc_thres', '-t', type=int, default=1,
metavar='<# accesses>',
help='minimal number of accesses for treated as working set')
parser.add_argument('--sz_thres', type=int, default=1,
metavar='<size>',
help='minimal size of region for treated as working set')
parser.add_argument('--work_time', type=int, default=1,
metavar='<micro-seconds>',
help='supposed time for each unit of the work')
parser.add_argument('--sortby', '-s', choices=['time', 'size'],
help='the metric to be used for the sort of the working set sizes')
parser.add_argument('--plot', '-p', type=str, metavar='<file>',
help='plot the distribution to an image file')
parser.add_argument('--nr_cols_bar', type=int, metavar='<num>',
default=59,
help='number of columns that is reserved for wss visualization')
parser.add_argument('--raw_number', action='store_true',
help='use machine-friendly raw numbers')
parser.add_argument('--all_wss', action='store_true',
help='Do not print percentile but all calculated wss')
parser.description = 'Show distribution of working set size'
def main(args=None):
if not args:
parser = argparse.ArgumentParser()
set_argparser(parser)
args = parser.parse_args()
file_path = args.input
percentiles = range(args.range[0], args.range[1], args.range[2])
wss_sort = True
if args.sortby == 'time':
wss_sort = False
raw_number = args.raw_number
records, err = _damon_result.parse_records_file(file_path)
if err != None:
print('monitoring result file (%s) parsing failed (%s)' %
(file_path, err))
exit(1)
_damon_result.adjust_records(records, args.work_time, args.exclude_samples)
wss_dists = get_wss_dists(records, args.acc_thres, args.sz_thres, wss_sort)
if args.plot:
orig_stdout = sys.stdout
tmp_path = tempfile.mkstemp()[1]
tmp_file = open(tmp_path, 'w')
sys.stdout = tmp_file
raw_number = True
args.nr_cols_bar = 0
pr_wss_dists(wss_dists, percentiles, raw_number, args.nr_cols_bar,
args.all_wss)
if args.plot:
sys.stdout = orig_stdout
tmp_file.flush()
tmp_file.close()
xlabel = 'runtime (percent)'
if wss_sort:
xlabel = 'percentile'
err = _damo_dist.plot_dist(tmp_path, args.plot, xlabel,
'working set size (bytes)')
if err:
print('plot failed (%s)' % err)
if __name__ == '__main__':
main()
| awslabs/damo | damo_wss.py | damo_wss.py | py | 5,602 | python | en | code | 119 | github-code | 13 |
333374614 | # This scirpt scrape the job title, company name, salay and job summary from the indeed.co.uk webiste for python devloper position.
# Results are appended to the lists and saved to csv file as pandas data frame.
import requests
from bs4 import BeautifulSoup
import pandas as pd
def extract(page):
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.51 Safari/537.36'}
url = f'https://uk.indeed.com/jobs?q=python+developer&start={page}'
r = requests.get(url, headers)
soup = BeautifulSoup(r.content, 'html.parser')
return soup
def transform(soup):
divs = soup.find_all('div', class_ = 'job_seen_beacon')
for item in divs:
job_title = item.find('h2', class_ = 'jobTitle').text.strip()
company_name = item.find('span', class_ = 'companyName').text.strip()
try:
salary = item.find('div', class_ = 'salary-snippet').text.strip()
except:
salary = ''
summary = item.find('div', class_ = 'job-snippet').text.strip().replace('\n', '')
job = {
'title': job_title,
'company': company_name,
'salary': salary,
'summary': summary
}
joblist.append(job)
return
joblist = []
# change second value in 'range' below to scrape different number of sites
for i in range(0, 10):
print(f'Getting page, {i}')
c = extract(0)
transform(c)
df = pd.DataFrame(joblist)
print(df.head())
df.to_csv('indeed_jobs_10_pages.csv')
| gitmichu/Python | webscraping/webscraping_indeed.py | webscraping_indeed.py | py | 1,569 | python | en | code | 0 | github-code | 13 |
27706486433 | import tensorflow as tf
import numpy as np
import time
import os
import numpy
import json
import Utility
import DataHandlers
import HackathonDataNeuralNetwork
import NeuralNetwork as NN
import DataUtility
from DataUtility import Gesture
import MenuUtility
import ResultAnalyses
# Training Parameters
N_EPOCH = 5000
learning_rate = 0.05
#os.environ["CUDA_VISIBLE_DEVICES"]="-1"
class ActivationFunction:
SIGMOID = "Sigmoid"
RELU = "ReLu"
SOFTMAX = "Softmax"
def get_activation_function_by_id(id):
if id == 0:
return ActivationFunction.SIGMOID
elif id == 1:
return ActivationFunction.RELU
else:
return ActivationFunction.SOFTMAX
class ResultJsonName:
FILENAME = "filename"
RESULTS = "results"
GESTURE = "gesture"
class NeuralNetwork:
def __init__(self, session_folder, data_handler_type, is_hackathon):
self.epoch_count = 0
self.session_folder = session_folder
self.data_handler_type = data_handler_type
self.is_hackathon = is_hackathon
self.set_default_sess_path()
self.get_network_meta_data()
def set_default_sess_path(self):
self.set_sess_path(os.listdir(self.session_folder)[-1])
def change_dataset(self):
if self.is_hackathon:
self.is_hackathon = False
DataFile = NN
else:
self.is_hackathon = True
DataFile = HackathonDataNeuralNetwork
self.session_folder = DataFile.SESSION_FOLDERS
self.data_handler_type = DataFile.DATA_HANDLER_TYPE
self.set_default_sess_path()
def get_number_of_gesture(self):
return self.number_of_gestures
def set_sess_path(self, sess_path_id):
self.sess_path = self.session_folder + "{}/".format(sess_path_id)
self.file_path = self.sess_path + "network.meta"
self.sess_model_path = self.sess_path + "emg_model"
self.results_folder_path = self.sess_path + "results/"
self.log_folder_path = self.sess_path + "log/"
try:
self.get_network_meta_data()
except OSError as e:
pass
def set_layer_sizes(self, layer_sizes):
self.layer_sizes = layer_sizes
def set_layer_activation_functions(self, layer_activation_functions):
self.layer_activation_functions = layer_activation_functions
def print_sess_info(self, session_path):
meta_data_path = session_path + "/" + "network.meta"
with open(meta_data_path, 'r') as metafile:
layer_sizes = metafile.readline().split()[1:]
print("{:20s}".format("Number of gestures:"), layer_sizes[-1])
print("{:20s}".format("Layer sizes:"), layer_sizes)
print("{:20s}".format("Epoch count:"), int(metafile.readline().split(" ")[1]))
print("{:20s}".format("Activations:"), metafile.readline().split()[1:]) # layer_activation_functions
print("{:20s}".format("Wavelet level:"), int(metafile.readline().split(" ")[1]))
print("{:20s}".format("[MAV, RMS, WL]:"), [int(x) for x in metafile.readline().split()[1:]])
def select_sess_path(self):
session_folder_list = os.listdir(self.session_folder)
for i in range(len(session_folder_list)):
print("{})".format(i), session_folder_list[i])
self.print_sess_info(self.session_folder + session_folder_list[i])
print()
session_choice = input("Select a session to use: ")
try:
session_choice = int(session_choice)
except ValueError:
session_choice = -1
if session_choice >= len(session_folder_list) or session_choice < 0:
return
self.set_sess_path(session_folder_list[int(session_choice)])
self.get_network_meta_data()
def create_network_meta_data_file(self):
file_path = self.sess_path + "network.meta"
with open(file_path, 'w') as outfile:
outfile.write("layer_sizes ")
for layer_size in self.layer_sizes:
outfile.write("{} ".format(layer_size))
outfile.write("\n")
outfile.write("Epoch_count 0\n")
outfile.write("layer_activation_functions ")
for activation_function in self.layer_activation_functions:
outfile.write("{} ".format(activation_function))
outfile.write("\n")
outfile.write("Wavelet_level {}\n".format(self.wavelet_level))
outfile.write("Features ")
for feature in self.feature_function_check_list:
outfile.write("{} ".format(feature))
outfile.write("\n")
def get_network_meta_data(self):
with open(self.file_path, 'r') as metafile:
self.set_layer_sizes([int(x) for x in metafile.readline().split()[1:]])
self.number_of_gestures = self.layer_sizes[-1]
self.epoch_count = int(metafile.readline().split(" ")[1])
self.set_layer_activation_functions(metafile.readline().split()[1:])
self.wavelet_level = int(metafile.readline().split(" ")[1])
self.feature_function_check_list = [int(x) for x in metafile.readline().split()[1:]]
return (list(map(int, self.layer_sizes)), self.layer_activation_functions, self.epoch_count)
def update_epoch_count_network_meta_data(self, epoch_count):
self.epoch_count = epoch_count
with open(self.file_path, 'r') as metafile:
lines = metafile.readlines()
lines[1] = "Epoch_count {}\n".format(self.epoch_count)
with open(self.file_path, 'w') as metafile:
for line in lines:
metafile.write(line)
def create_emg_network_variables(self):
number_of_variables = len(self.layer_sizes) - 1
return_variables = []
bias_variables = []
for i in range(number_of_variables):
variable_name = "theta{}".format(i)
variable = tf.Variable(tf.random_uniform([self.layer_sizes[i], self.layer_sizes[i + 1]], -1, 1), name=variable_name)
return_variables.append(variable)
bias_name = "bias{}".format(i)
bias = tf.Variable(tf.zeros(self.layer_sizes[i + 1]), name=bias_name)
bias_variables.append(bias)
return (return_variables, bias_variables)
def create_emg_network_layers(self, input_placeholder, variables, bias_variables):
layers = []
current_layer = input_placeholder
number_of_variables = len(variables)
for i in range(number_of_variables):
theta = variables[i]
bias = bias_variables[i]
activation_function = self.layer_activation_functions[i]
if activation_function == ActivationFunction.SIGMOID:
layer = tf.sigmoid(tf.matmul(current_layer, theta) + bias)
elif activation_function == ActivationFunction.RELU:
layer = tf.add(tf.matmul(current_layer, theta), bias)
layer = tf.nn.relu(layer)
elif activation_function == ActivationFunction.SOFTMAX:
layer = tf.nn.softmax(tf.matmul(current_layer, theta) + bias)
layers.append(layer)
current_layer = layer
output = layers.pop()
return (layers, output)
def get_training_inputs_and_outputs(self, training_file_path):
inputs = []
outputs = []
with open(training_file_path, 'r') as training_data_file:
(training_size, n_inputs, n_outputs) = training_data_file.readline().split()
line_counter = 0
for line in training_data_file:
if line_counter % 2 == 0:
inputs.append([float(x) for x in line.split()])
else:
outputs.append([float(x) for x in line.split()])
line_counter += 1
return (inputs, outputs)
def get_training_meta_data(self, training_file_path):
with open(training_file_path, 'r') as training_data_file:
(training_size, n_inputs, n_outputs) = training_data_file.readline().split()
return(int(training_size), int(n_inputs), int(n_outputs))
def create_emg_training_file(self, file_list, training_file_path):
data_handler = DataHandlers.FileDataHandler(DataUtility.TRAINING_FILE_LIST[0])
data_handler.set_emg_wavelet_level(self.wavelet_level)
data_handler.set_feature_functions_list(self.feature_function_check_list)
n_input_nodes = len(data_handler.get_emg_data_features())
n_output_nodes = self.get_number_of_gesture()
size_of_training = len(file_list)
with open(training_file_path, 'w') as outfile:
outfile.write("{} ".format(size_of_training))
outfile.write("{} ".format(n_input_nodes))
outfile.write("{}\n".format(n_output_nodes))
for i in range(size_of_training):
data_file = file_list[i]
print("Training file progress: {}%".format(int(((i + 1) / size_of_training) * 100)), end="\r")
data_handler = self.data_handler_type(data_file)
data_handler.set_emg_wavelet_level(self.wavelet_level)
data_handler.set_feature_functions_list(self.feature_function_check_list)
emg_sums = data_handler.get_emg_data_features()
for i in range(n_input_nodes):
outfile.write(str(emg_sums[i]))
if i < n_input_nodes - 1:
outfile.write(" ")
else:
outfile.write("\n")
for gesture in range(n_output_nodes):
if gesture != data_file.gesture:
outfile.write("0")
else:
outfile.write("1")
if gesture < n_output_nodes - 1:
outfile.write(" ")
else:
outfile.write("\n")
print()
print("Finished")
print()
print("training size:", size_of_training)
print("Number of input neurons:", n_input_nodes)
print("Number of output neurons:", n_output_nodes)
print()
def create_emg_network(self):
sess_path_id = time.strftime("%Y-%m-%d-%H%M")
# new_sess_path = self.session_folder + "{}/".format(sess_path_id)
self.set_sess_path(sess_path_id)
if os.path.exists(self.sess_path):
run = input("A session with this name already exist, replace it? (y/n): ")
if not run == "y":
return
print("Create folder: {}".format(self.sess_path))
if not os.path.exists(self.sess_path):
os.makedirs(self.sess_path)
print("\nCreate EMG-training file")
training_file_path = self.sess_path + "training_file.data"
number_of_gestures = input("Number of gestures: ")
if Utility.is_int_input(number_of_gestures):
self.number_of_gestures = int(number_of_gestures)
number_of_gestures = self.get_number_of_gesture()
if not self.is_hackathon:
file_list = DataUtility.TRAINING_FILE_LIST
else:
file_list = HackathonDataNeuralNetwork.get_training_file_list(number_of_gestures)
wavelet_level = input("Use Wavelet Level: ")
if Utility.is_int_input(wavelet_level):
self.wavelet_level = int(wavelet_level)
print()
self.feature_function_check_list = [0, 0, 0]
feature_name_list = ["Mean Absoulute Value", "Root Mean Square", "Waveform Length"]
for i in range(len(feature_name_list)):
use_feature = input("Use {} (y/n): ".format(feature_name_list[i]))
if use_feature == 'y':
self.feature_function_check_list[i] = 1
print()
self.create_emg_training_file(file_list, training_file_path)
print("Create Network")
number_of_hidden_layers = int(input("Number of hidden layers: "))
print()
self.layer_sizes = [0] * (number_of_hidden_layers + 2)
self.layer_activation_functions = [0] * (number_of_hidden_layers + 1)
print("Activation function for each layer\n1) Sigmoid\n2) ReLu\n3) SoftMax")
for i in range(number_of_hidden_layers + 1):
activation_function_id = int(input("Connection {}: ".format(i)))
self.layer_activation_functions[i] = get_activation_function_by_id(activation_function_id)
# self.layer_activation_functions = [ActivationFunction.SIGMOID] * (number_of_hidden_layers + 1)
print("Number of neurons for each hidden layer")
for i in range(number_of_hidden_layers):
hidden_layer_id = i + 1
self.layer_sizes[hidden_layer_id] = int(input("Hidden layer {}: ".format(hidden_layer_id)))
(inputs, outputs) = self.get_training_inputs_and_outputs(training_file_path)
(training_size, n_inputs, n_outputs) = self.get_training_meta_data(training_file_path)
input_placeholder = tf.placeholder(tf.float32, shape=[training_size, n_inputs], name="input")
self.layer_sizes[0] = n_inputs
self.layer_sizes[-1] = n_outputs
(theta, bias) = self.create_emg_network_variables()
(layers, output) = self.create_emg_network_layers(input_placeholder, theta, bias)
init = tf.global_variables_initializer()
sess = tf.Session()
tf.summary.FileWriter(self.log_folder_path + "log_test", sess.graph)
sess.run(init)
saver = tf.train.Saver()
saver.save(sess, self.sess_model_path)
print("\n\nNetwork created")
print("Session path:", self.sess_model_path)
print("Layer sizes:", self.layer_sizes)
print("Layer activation functions:", self.layer_activation_functions)
tf.reset_default_graph()
print("\nCreate meta-data file")
self.create_network_meta_data_file() # Write meta data of session to file
self.print_sess_info(self.sess_path)
input("\nPress Enter to continue...")
def print_training_info(self, training_file_path):
os.system('cls')
print("Train Network")
print("Training file:", training_file_path)
print("Training session:", self.sess_path)
def train_emg_network(self):
training_file_path = self.sess_path + "training_file.data"
self.print_training_info(training_file_path)
(inputs, outputs) = self.get_training_inputs_and_outputs(training_file_path)
(training_size, n_inputs, n_outputs) = self.get_training_meta_data(training_file_path)
(sess_layer_sizes, layer_activation_functions, old_epoch_count) = self.get_network_meta_data()
if(n_inputs != sess_layer_sizes[0] or n_outputs != sess_layer_sizes[-1]):
print("Training file and session is not compatible!")
return
dummy = False
while not dummy:
n_steps = input("Number of steps: ")
dummy = Utility.is_int_input(n_steps)
n_steps = int(n_steps)
start_time = time.time()
current_time = time.time() - start_time
i = 0
global_step = old_epoch_count
self.print_training_info(training_file_path)
print("Number of steps:", n_steps)
print('Current time: {:.0f}h {:.0f}min {:.0f}sec'.format(0, 0, 0))
print('Estimated time: {:.0f}h {:.0f}min {:.0f}sec'.format(0, 0, 0))
print('Batch:', global_step)
print()
while global_step < n_steps:
self.continue_emg_network_training(inputs, outputs, n_inputs, n_outputs, training_size, n_steps, global_step)
self.print_training_info(training_file_path)
print("Number of steps:", n_steps)
print()
if global_step + N_EPOCH <= n_steps:
global_step += N_EPOCH
i += N_EPOCH
else:
global_step += ((n_steps - old_epoch_count) % N_EPOCH)
i += ((n_steps - old_epoch_count) % N_EPOCH)
current_time = time.time() - start_time
(hours, minutes, seconds) = Utility.second_to_HMS(current_time)
print('Current time: {:.0f}h {:.0f}min {:.0f}sec'.format(hours, minutes, seconds))
if i == 0:
estimated_time = 0
else:
estimated_time = (current_time / i) * (n_steps - old_epoch_count)
(hours, minutes, seconds) = Utility.second_to_HMS(estimated_time)
print('Estimated time: {:.0f}h {:.0f}min {:.0f}sec'.format(hours, minutes, seconds))
print('Batch:', global_step)
self.update_epoch_count_network_meta_data(global_step)
print()
print("Runtime:", "{0:.2f}".format(float(time.time() - start_time)) + "sec")
print("finished")
input("Press Enter to continue...")
def continue_emg_network_training(self, inputs, outputs, n_inputs, n_outputs, training_size, n_steps, epoch_count):
input_placeholder = tf.placeholder(tf.float32, shape=[training_size, n_inputs], name="input")
output_placeholder = tf.placeholder(tf.float32, shape=[training_size, n_outputs], name="output")
(theta, bias) = self.create_emg_network_variables()
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, self.sess_model_path)
(layer, output) = self.create_emg_network_layers(input_placeholder, theta, bias)
# Mean Squared Estimate - the simplist cost function (MSE)
cost = tf.reduce_mean(tf.square(outputs - output))
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
for i in range(N_EPOCH):
if epoch_count + i >= n_steps:
break
sess.run(train_step, feed_dict={input_placeholder: inputs, output_placeholder: outputs})
saver.save(sess, self.sess_model_path)
tf.reset_default_graph()
def test_emg_network(self):
self.get_network_meta_data()
print("Session path:", self.sess_path)
is_storeing_result = input("Write result to file (y/n)? ")
if is_storeing_result == 'y':
is_storeing_result = True
else:
is_storeing_result = False
summary_list = []
run_date = time.strftime("%Y-%m-%d-%H%M")
number_of_gestures = self.get_number_of_gesture()
if not self.is_hackathon:
file_list = DataUtility.TEST_FILE_LIST
else:
file_list = HackathonDataNeuralNetwork.get_test_file_list(number_of_gestures)
for test_file in file_list:
data_handler = self.data_handler_type(test_file)
data_handler.set_emg_wavelet_level(self.wavelet_level)
data_handler.set_feature_functions_list(self.feature_function_check_list)
start_time = time.time()
results = self.input_test_emg_network(data_handler)
end_time = time.time()
recognized_gesture = numpy.argmax(results)
print()
print("###########################################################")
self.print_results(results)
print()
print("Recognized:", Gesture.gesture_to_string(np.argmax(results)))
print("Correct gesture:", Gesture.gesture_to_string(test_file.gesture))
print("Analyse time: ", "%.2f" % float(end_time - start_time))
summary_list.append((test_file.gesture, recognized_gesture))
print()
print("File:", test_file.filename)
if is_storeing_result:
self.write_result_to_file(results, test_file.filename, test_file.gesture, run_date)
print("#############################################################")
print("Session path:", self.sess_path)
print("Summary List")
number_of_gestures = self.get_number_of_gesture()
success_list = []
for i in range(number_of_gestures):
success_list.append([0, 0])
for correct_gesture, recognized_gesture in summary_list:
success_list[correct_gesture][0] += 1
if correct_gesture == recognized_gesture:
success_list[correct_gesture][1] += 1
print(Gesture.gesture_to_string(correct_gesture), " -> ", Gesture.gesture_to_string(recognized_gesture))
print()
print("#############################################################")
print("Success Rate")
for i in range(number_of_gestures):
if success_list[i][0] != 0:
print('{:15s}\t{:4d} of {:4d} -> {:.2f}'.format(Gesture.gesture_to_string(i), success_list[i][1], success_list[i][0], 100 * success_list[i][1] / success_list[i][0]))
input("Press Enter to continue...")
def input_test_emg_network(self, input_data_handler):
input_data_handler.set_emg_wavelet_level(self.wavelet_level)
input_data_handler.set_feature_functions_list(self.feature_function_check_list)
test_inputs = [input_data_handler.get_emg_data_features()]
self.get_network_meta_data()
input_placeholder = tf.placeholder(tf.float32, shape=[1, self.layer_sizes[0]], name="input")
(theta, bias) = self.create_emg_network_variables()
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, self.sess_model_path)
# tf.summary.FileWriter(self.log_folder_path + "log_test", sess.graph)
output = self.create_emg_network_layers(input_placeholder, theta, bias)[1]
results = sess.run(output, feed_dict={input_placeholder: test_inputs})
tf.reset_default_graph()
return results[0]
def print_results(self, results):
for gesture in range(self.get_number_of_gesture()):
print('{}) {:15s}\t{:10f}'.format(gesture, Gesture.gesture_to_string(gesture), results[gesture]))
def write_result_to_file(self, results, file_name, correct_gesture, run_date):
results_folder_path = self.results_folder_path
json_file_path = results_folder_path + "raw_results_{}.json".format(run_date)
if not os.path.exists(results_folder_path):
os.makedirs(results_folder_path)
if os.path.isfile(json_file_path):
with open(json_file_path) as json_file:
json_data = json.load(json_file)
else:
json_data = json.loads('[]')
list_results = []
list_results = results.tolist()
json_object_result = '{{ "filename" : "{}", "gesture" : {}, "results" : {} }}'.format(file_name, correct_gesture, list_results)
json_data.append(json.loads(json_object_result))
with open(json_file_path, 'w') as outfile:
json.dump(json_data, outfile, sort_keys=True, indent=4, separators=(',', ': '))
def result_analyses(self):
if not os.path.exists(self.results_folder_path):
print("No results found!")
input("Press enter to continue...")
return
result_file_list = os.listdir(self.results_folder_path)
if len(result_file_list) == 0:
print("No results found!")
input("Press enter to continue...")
return
result_file_path = self.results_folder_path + result_file_list[-1]
if len(result_file_list) > 1:
for i in range(len(result_file_list)):
print("{})".format(i), result_file_list[i])
result_choice = int(input("Select a result file to use: "))
try:
if not (result_choice >= len(result_file_list) or result_choice < 0):
result_choice = int(result_choice)
result_file_path = self.results_folder_path + result_file_list[result_choice]
except ValueError:
pass
print("Result file: {}".format(result_file_path))
with open(result_file_path) as json_file:
json_result_data = json.load(json_file)
analyse_menu = [
MenuUtility.MenuItem("Raw success list", ResultAnalyses.raw_success_list),
MenuUtility.MenuItem("Filtered analyse", ResultAnalyses.filtered_analyse)
]
print("Analyses menu")
print("####################################################")
action = MenuUtility.print_menu(analyse_menu)
analyse_menu[action].function(self.get_number_of_gesture(), json_result_data)
input("Press enter to continue...")
| Tonychausan/MyoArmbandPython | src/NeuralNetworkUtility.py | NeuralNetworkUtility.py | py | 24,716 | python | en | code | 3 | github-code | 13 |
7041723430 | from o3seespy.base_model import OpenSeesObject, OpenSeesMultiCallObject, opy
from o3seespy.opensees_instance import OpenSeesInstance
from o3seespy.exceptions import ModelError
def set_node_mass(osi, node, x_mass, y_mass, rot_mass):
op_type = 'mass'
parameters = [node.tag, x_mass, y_mass, rot_mass]
osi.to_process(op_type, parameters)
class Mass(OpenSeesObject):
op_base_type = "mass"
op_type = None
def __init__(self, osi, node, x_mass, y_mass=None, rot_mass=None):
if osi.ndf > 2 and rot_mass is None:
rot_mass = 0.0
self.node = node
self.x_mass = x_mass
self.y_mass = y_mass
self.rot_mass = rot_mass
self._parameters = [self.node.tag, self.x_mass]
if self.y_mass is not None:
self._parameters.append(self.y_mass)
if self.rot_mass is not None:
if self.y_mass is None:
self._parameters.append(0.0)
self._parameters.append(self.rot_mass)
self.to_process(osi)
class Mass2D(OpenSeesObject):
op_base_type = "mass"
op_type = None
def __init__(self, osi, node, x_mass, y_mass=None, rot_mass=None):
if osi.ndf > 2 and rot_mass is None:
rot_mass = 0.0
self.node = node
self.x_mass = x_mass
self.y_mass = y_mass
self.rot_mass = rot_mass
self._parameters = [self.node.tag, self.x_mass]
if self.y_mass is not None:
self._parameters.append(self.y_mass)
if self.rot_mass is not None:
if self.y_mass is None:
self._parameters.append(0.0)
self._parameters.append(self.rot_mass)
self.to_process(osi)
class Mass3D(OpenSeesObject):
op_base_type = "mass"
op_type = None
def __init__(self, osi, node, x, y, z, x_rot, y_rot, z_rot):
self.node = node
self.x = x
self.y = y
self.z = y
self.x_rot = x_rot
self.y_rot = y_rot
self.z_rot = z_rot
self._parameters = [self.node.tag, self.x, self.y, self.z, self.x_rot, self.y_rot, self.z_rot]
self.to_process(osi)
def set_equal_dof(osi, node_1, node_2, dof):
op_type = 'equalDOF'
parameters = [node_1.tag, node_2.tag, dof]
osi.to_process(op_type, parameters)
def set_equal_dofs(osi, node_1, node_2, dofs):
op_type = 'equalDOF'
parameters = [node_1.tag, node_2.tag, *dofs]
osi.to_process(op_type, parameters)
def set_equal_dofs_mixed(osi, node_1, node_2, num_dof, rcdofs): # TODO: validate
op_type = 'equalDOF_Mixed'
parameters = [node_1.tag, node_2.tag, num_dof, *rcdofs]
osi.to_process(op_type, parameters)
class EqualDOF(OpenSeesObject):
op_base_type = "equalDOF"
op_type = None
def __init__(self, osi, r_node, c_node, dofs):
"""
Construct a constraint where the constrained node `c_node`
has the same movement as the retained node `r_node`
:param osi:
:param r_node: OpenSeesObject.node.Node() or list of
Retained node
:param c_node: OpenSeesObject.node.Node() or list of
Constrained node
:param dofs:
"""
self.r_node = r_node
self.c_node = c_node
self.dofs = dofs
self._parameters = [self.r_node.tag, self.c_node.tag, *self.dofs]
self.to_process(osi)
class EqualDOFMulti(OpenSeesMultiCallObject):
op_base_type = "equalDOF"
op_type = None
def __init__(self, osi, r_node, c_nodes, dofs):
"""
Construct a constraint where the c_node has the same movement as the r_node
:param osi:
:param r_node: OpenSeesObject.node.Node() or list of
Retained node
:param c_nodes: list of OpenSeesObject.node.Node()
Constrained node
:param dofs:
"""
self.r_node = r_node
self.c_nodes = c_nodes
self.dofs = dofs
if hasattr(r_node, '__len__'):
r_nodes = self.r_node
else:
r_nodes = [self.r_node for i in range(len(self.c_nodes))]
self._multi_parameters = []
for i in range(len(c_nodes)):
self._multi_parameters.append([r_nodes[i].tag, self.c_nodes[i].tag, *self.dofs])
self.to_process(osi)
class ModalDamping(OpenSeesObject):
"""
ModalDamping class
Notes:
* Need to run eigen() first, do not use 'fullGenLapack' option in eigen analysis
* Cannot be used with Newmark_Explicit, but works with other explicit and implicit methods
* Creates a full damping matrix, therefore either use solver.FullGen (very slow), or sparse solvers
like UmfPack (when doing this using algorithm.KrylovNewton or algorithm.BFGS, not algorithm.NewtonRaphson,
see https://portwooddigital.com/2019/09/12/be-careful-with-modal-damping/ for more details)
"""
op_base_type = "modalDamping"
op_type = None
def __init__(self, osi, xis):
"""
:param osi:
:param xis: array_like
List of damping values at each mode, or just provide first value in list.
"""
self.xis = xis
self._parameters = self.xis
self.to_process(osi)
def set_rigid_diaphragm(osi, r_node, cnodes, perp_dir):
cnode_tags = [x.tag for x in cnodes]
op_type = 'rigidDiaphragm'
parameters = [perp_dir, r_node.tag, *cnode_tags]
osi.to_process(op_type, parameters)
def set_rigid_link(osi, r_node, c_node, rtype):
"""
Create a multi-point constraint between nodes.
Parameters
----------
r_node: OpenSeesObject.node.Node()
Retained node
c_node: Node
Constrained node
rtype: str
Either 'bar' or 'beam'
Returns
-------
"""
op_type = 'rigidLink'
parameters = [rtype, r_node.tag, c_node.tag]
osi.to_process(op_type, parameters)
class RigidLink(OpenSeesObject):
op_base_type = 'rigidLink'
op_type = None
def __init__(self, osi, rtype, r_node, c_node):
"""
Create a multi-point constraint between nodes.
Parameters
----------
rtype: str
Either 'bar' or 'beam'
r_node: OpenSeesObject.node.Node()
Retained node
c_node: Node
Constrained node
Returns
-------
"""
self.rtype = rtype
self.r_node = r_node
self.c_node = c_node
self._parameters = [rtype, r_node.tag, c_node.tag]
self.to_process(osi)
class Fix1DOF(OpenSeesObject):
op_base_type = "fix"
op_type = None
def __init__(self, osi, node, x):
"""
Create a homogeneous SP constraint.
Parameters
----------
osi: OpenSeesInstance
node: OpenSeesObject.node.Node()
x: int
Fixity in x-direction
"""
self.node = node
self.x = x
self._parameters = [self.node.tag, self.x]
self.to_process(osi)
class Fix1DOFMulti(OpenSeesMultiCallObject):
op_base_type = "fix"
op_type = None
def __init__(self, osi, nodes, x, is_none='raise'):
"""
Create a homogeneous SP constraint.
Parameters
----------
osi: OpenSeesInstance
nodes: list of OpenSeesObject.node.Node()
x: int
Fixity in x-direction
"""
self.nodes = nodes
self.x = x
self._multi_parameters = []
for node in self.nodes:
try:
self._multi_parameters.append([node.tag, self.x])
self.to_process(osi)
except AttributeError as e:
if is_none == 'raise':
raise e
else:
pass
class Fix2DOF(OpenSeesObject):
op_base_type = "fix"
op_type = None
def __init__(self, osi, node, x, y):
"""
Create a homogeneous SP constraint.
Parameters
----------
osi: OpenSeesInstance
node: OpenSeesObject.node.Node()
x: int
Fixity in x-direction
y: int
Fixity in y-direction
"""
self.node = node
self.x = x
self.y = y
self._parameters = [self.node.tag, self.x, self.y]
self.to_process(osi)
class Fix2DOFMulti(OpenSeesMultiCallObject):
op_base_type = "fix"
op_type = None
def __init__(self, osi, nodes, x, y, is_none='raise', already_fixed='raise'):
"""
Create a homogeneous SP constraint.
Parameters
----------
osi: OpenSeesInstance
nodes: list of OpenSeesObject.node.Node()
x: int
Fixity in x-direction
y: int
Fixity in y-direction
"""
self.nodes = nodes
self.x = x
self.y = y
self._multi_parameters = []
for node in self.nodes:
try:
self._multi_parameters.append([node.tag, self.x, self.y])
self.to_process(osi)
except AttributeError as e:
if is_none == 'raise':
raise e
else:
pass
except ValueError as e:
if already_fixed == 'raise':
raise e
else:
pass
def add_fixity_to_dof(osi, dof, nodes):
if osi.ndf == 1:
fn = Fix1DOF
arr = [0]
elif osi.ndf == 2:
fn = Fix2DOF
arr = [0, 0]
elif osi.ndf == 3:
fn = Fix3DOF
arr = [0, 0, 0]
elif osi.ndf == 6:
fn = Fix6DOF
arr = [0, 0, 0, 0, 0, 0]
else:
raise ModelError("'create_fixities_for_dof' only supports ndf=1,2,3,6")
arr[dof - 1] = 1
for node in nodes:
if node is None:
continue
try:
fn(osi, node, *arr)
except ValueError:
if osi.state == 3:
osi.commands = osi.commands[:-1]
pass
#
# class Fix2DOaFMulti(OpenSeesMultiCallObject):
# op_base_type = "fix"
# op_type = None
#
# def __init__(self, osi, nodes, x, y):
# """
# Create a homogeneous SP constraint.
#
# Parameters
# ----------
# osi: OpenSeesInstance
# nodes: list of OpenSeesObject.node.Node()
# x: int
# Fixity in x-direction
# y: int
# Fixity in y-direction
# """
# self.nodes = nodes
# self.x = x
# self.y = y
# self._multi_parameters = []
# for node in self.nodes:
# self._multi_parameters.append([node.tag, self.x, self.y])
# self.to_process(osi)
class Fix3DOF(OpenSeesObject):
op_base_type = "fix"
op_type = None
def __init__(self, osi, node, x, y, z_rot):
"""
Create a homogeneous SP constraint.
Parameters
----------
osi: OpenSeesInstance
node: OpenSeesObject.node.Node()
x: int
Fixity in x-direction
y: int
Fixity in y-direction
z_rot: int
Fixity in rotation about z-axis
"""
self.node = node
self.x = x
self.y = y
self.z_rot = z_rot
self._parameters = [self.node.tag, self.x, self.y, self.z_rot]
self.to_process(osi)
class Fix3DOFMulti(OpenSeesMultiCallObject):
op_base_type = "fix"
op_type = None
def __init__(self, osi, nodes, x, y, z_rot, is_none='raise', already_fixed='raise'):
"""
Create a homogeneous SP constraint.
Parameters
----------
osi: OpenSeesInstance
nodes: list of OpenSeesObject.node.Node()
x: int
Fixity in x-direction
y: int
Fixity in y-direction
z_rot: int
Fixity in rotation about z-axis
"""
self.nodes = nodes
self.x = x
self.y = y
self.z_rot = z_rot
self._multi_parameters = []
for node in self.nodes:
try:
self._multi_parameters.append([node.tag, self.x, self.y, self.z_rot])
self.to_process(osi)
except AttributeError as e:
if is_none == 'raise':
raise e
else:
pass
except ValueError as e:
if already_fixed == 'raise':
raise e
else:
pass
class Fix4DOF(OpenSeesObject):
op_base_type = "fix"
op_type = None
def __init__(self, osi, node, x, y, z, pp):
"""
Create a homogeneous SP constraint.
Parameters
----------
osi: OpenSeesInstance
node: OpenSeesObject.node.Node()
x: int
Fixity in x-direction
y: int
Fixity in y-direction
z: int
Fixity in z-direction
pp: int
Fixity in pore-pressure
"""
self.node = node
self.x = x
self.y = y
self.z = z
self.pp = pp
self._parameters = [self.node.tag, self.x, self.y, self.z, self.pp]
self.to_process(osi)
class Fix6DOF(OpenSeesObject):
op_base_type = "fix"
op_type = None
def __init__(self, osi, node, x, y, z, x_rot, y_rot, z_rot):
"""
Create a homogeneous SP constraint.
Parameters
----------
osi: OpenSeesInstance
node: OpenSeesObject.node.Node()
x: int
Fixity in x-direction
y: int
Fixity in y-direction
z: int
Fixity in z-direction
x_rot: int
Fixity in rotation about x-axis
y_rot: int
Fixity in rotation about y-axis
z_rot: int
Fixity in rotation about z-axis
"""
self.node = node
self.x = x
self.y = y
self.z = z
self.x_rot = x_rot
self.y_rot = y_rot
self.z_rot = z_rot
self._parameters = [self.node.tag, self.x, self.y, self.z, self.x_rot, self.y_rot, self.z_rot]
self.to_process(osi)
class Fix6DOFMulti(OpenSeesMultiCallObject):
op_base_type = "fix"
op_type = None
def __init__(self, osi, nodes, x, y, z, x_rot, y_rot, z_rot, is_none='raise'):
"""
Create a homogeneous SP constraint.
Parameters
----------
osi: OpenSeesInstance
nodes: list of OpenSeesObject.node.Node()
x: int
Fixity in x-direction
y: int
Fixity in y-direction
z: int
Fixity in z-direction
x_rot: int
Fixity in rotation about x-axis
y_rot: int
Fixity in rotation about y-axis
z_rot: int
Fixity in rotation about z-axis
"""
self.nodes = nodes
self.x = x
self.y = y
self.z = z
self.x_rot = x_rot
self.y_rot = y_rot
self.z_rot = z_rot
self._multi_parameters = []
for node in self.nodes:
try:
self._multi_parameters.append([node.tag, self.x, self.y, self.z, self.x_rot, self. y_rot, self.z_rot])
self.to_process(osi)
except AttributeError as e:
if is_none == 'raise':
raise e
else:
pass
class Fix(OpenSeesObject):
op_base_type = "fix"
op_type = None
def __init__(self, osi, node, fixities):
"""
Create a homogeneous SP constraint.
"""
self.node = node
self.fixities = fixities
self._parameters = [self.node.tag, *self.fixities]
self.to_process(osi)
class Load(OpenSeesObject):
op_base_type = "load"
op_type = None
def __init__(self, osi, node, load_values):
self.node = node
self.load_values = load_values
self._parameters = [self.node.tag, *self.load_values]
self.to_process(osi)
class EleLoad2DPoint(OpenSeesObject):
op_base_type = "eleLoad"
op_type = None
def __init__(self, osi, ele, p_y, x, p_x=None):
"""
Type of load is 'beamPoint'
x: float
Position of load as a fraction of element length from node i
"""
self.ele_tag = ele.tag
self.x = float(x)
self.p_y = float(p_y)
self.p_x = p_x
self._parameters = ['-ele', self.ele_tag, '-type', '-beamPoint', self.p_y, self.x]
if self.p_x is not None:
self._parameters.append(float(self.p_x))
self.to_process(osi)
class EleLoad2DUniform(OpenSeesObject):
op_base_type = "eleLoad"
op_type = None
def __init__(self, osi, ele, w_y, w_x=None):
"""
Type of load is 'beamUniform'
"""
self.ele_tag = ele.tag
self.w_y = float(w_y)
self.w_x = w_x
self._parameters = ['-ele', self.ele_tag, '-type', '-beamUniform', self.w_y]
if self.w_x is not None:
self._parameters.append(float(self.w_x))
self.to_process(osi)
class SP(OpenSeesObject):
op_base_type = "sp"
op_type = None
def __init__(self, osi, node, dof, dof_values):
self.node = node
self.dof = int(dof)
self.dof_values = dof_values
self._parameters = [self.node.tag, self.dof, *self.dof_values]
self.to_process(osi)
class ImposedMotion(OpenSeesObject):
op_base_type = "ImposedMotion"
op_type = None
def __init__(self, osi, node, ory, g_motion_tag):
"""
"""
self.node = node
self.ory = ory
self.g_motion_tag = g_motion_tag
self._parameters = [self.node.tag, self.ory, self.g_motion_tag]
self.to_process(osi)
def analyze(osi, num_inc=1, dt=None, dt_min=None, dt_max=None, jd=None):
"""
Performs an analysis step.
Returns 0 if successful, and <0 if fail
Parameters
----------
osi: o3.OpenSeesInstance
num_inc: int
Number of analysis increments
dt: float
Time step
dt_min: float
(Only used in VariableTransient analsyis) Minimum allowable time step
dt_max: float
(Only used in VariableTransient analsyis) Maximum allowable time step
jd: int
(Only used in VariableTransient analsyis) Target number of integration steps.
In variable transient analysis, time step is scaled by n_iters / jd, and analysis
continues until total time increment >= num_inc * dt.
Returns
-------
"""
op_type = 'analyze'
if dt is None:
parameters = [int(num_inc)]
elif dt_min is None:
parameters = [int(num_inc), float(dt)]
else:
parameters = [int(num_inc), float(dt), dt_min, dt_max, jd]
return osi.to_process(op_type, parameters)
def loop_for_analyze_w_restart(osi, num_inc=1, dt=None, dt_min=None, dt_max=None, jd=None, alts=None, dtime=None):
if osi.logfile_name is None:
raise ValueError('cannot find logfile, use osi.set_logfile')
if len(alts) == 0:
return 1 # failed
init_time = get_time(osi)
curr_alt = alts[0]
# alt: (dt, alg)
if dtime is None:
dtime = num_inc * dt
prev_dt = dt
if curr_alt[0] is not None:
dt = curr_alt[0]
num_inc = int(dtime / dt)
fail = analyze(osi, num_inc=num_inc, dt=dt, dt_min=dt_min, dt_max=dt_max, jd=jd)
if fail:
curr_time = get_time(osi)
dtime = curr_time - init_time
if dtime <= 0:
return 0
alts = alts[1:]
fail = loop_for_analyze_w_restart(osi, num_inc=num_inc, dt=dt, dt_min=dt_min, dt_max=dt_max, jd=jd,
alts=alts, dtime=dtime)
return 0
class AlternativeAnalysis(object):
def __init__(self, dt=None):
self.dt = dt
pass
def analyze_w_restart(osi, num_inc=1, dt=None, dt_min=None, dt_max=None, jd=None, alts=None, dtime=1, nfs=1):
if osi.logfile_name is None:
raise ValueError('cannot find logfile, use osi.set_logfile')
if alts is None:
alts = []
init_time = get_time(osi)
tot_time = num_inc * dt
for i in range(nfs):
curr_loc_time = get_time(osi) - init_time
if curr_loc_time >= tot_time:
break
num_inc = int(tot_time - curr_loc_time / dt)
fail = analyze(osi, num_inc=num_inc, dt=dt, dt_min=dt_min, dt_max=dt_max, jd=jd)
if fail:
curr_loc_time = get_time(osi) - init_time
if curr_loc_time >= tot_time:
break
num_inc = int(tot_time - curr_loc_time / dt)
fail = loop_for_analyze_w_restart(osi, num_inc=num_inc, dt=dt, dt_min=dt_min, dt_max=dt_max, jd=jd,
alts=alts, dtime=dtime)
else:
break
pass
# get curr time.
def get_node_disp(osi, node, dof=None, node_as_tag=False):
op_type = 'nodeDisp'
if node_as_tag:
parameters = [node]
else:
parameters = [node.tag]
if dof is not None:
parameters.append(dof)
return osi.to_process(op_type, parameters)
def get_node_disps(osi, node, dofs, node_as_tag=False):
op_type = 'nodeDisp'
vals = []
for dof in dofs:
if node_as_tag:
parameters = [node, dof]
else:
parameters = [node.tag, dof]
vals.append(osi.to_process(op_type, parameters))
return vals
def get_all_node_disps(osi, dof):
tags = get_node_tags(osi)
disps = []
for tag in tags:
disps.append(osi.to_process('nodeDisp', [tag, dof])) # very slow
return disps
def get_node_vel(osi, node, dof):
op_type = 'nodeVel'
parameters = [node.tag, dof]
return osi.to_process(op_type, parameters)
def get_node_accel(osi, node, dof):
op_type = 'nodeAccel'
parameters = [node.tag, dof]
return osi.to_process(op_type, parameters)
def gen_reactions(osi):
op_type = 'reactions'
parameters = []
return osi.to_process(op_type, parameters)
def get_node_reaction(osi, node, dof):
op_type = 'nodeReaction'
parameters = [node.tag, dof]
return osi.to_process(op_type, parameters)
def get_node_reactions(osi, node):
op_type = 'nodeReaction'
parameters = [node.tag]
return osi.to_process(op_type, parameters)
def get_node_unbalance(osi, node, dof=None):
op_type = 'nodeUnbalance'
parameters = [node.tag]
if dof is not None:
parameters.append(dof)
return osi.to_process(op_type, parameters)
def get_node_pressure(osi, node):
op_type = 'nodePressure'
parameters = [node.tag, 2]
return osi.to_process(op_type, parameters)
def get_ele_response(osi, ele, arg, extra_args=None):
params = [ele.tag, arg]
if extra_args is not None:
params = [ele.tag, *extra_args, arg]
return osi.to_process('eleResponse', params)
def remove_sp(osi, node, dof, pattern=None):
op_type = 'remove'
parameters = ['sp', node.tag, dof]
if pattern is not None:
parameters.append(pattern.tag)
# p_str = ', '.join([str(x) for x in parameters])
return osi.to_process(op_type, parameters)
def remove_load_pattern(osi, load_pattern):
op_type = 'remove'
parameters = ['loadPattern', load_pattern.tag]
return osi.to_process(op_type, parameters)
def remove(osi, o3_obj):
"""Generic remover"""
op_type = 'remove'
parameters = [o3_obj.op_base_type, o3_obj.tag]
return osi.to_process(op_type, parameters)
def remove_recorders(osi):
"""Remove all recorders"""
op_type = 'remove'
return osi.to_process(op_type, ['recorders'])
def set_parameter(osi, value, eles=None, ele_range=None, args=None, ele_tag_range=None):
"""Set a parameter on an element"""
op_type = 'setParameter'
parameters = ['-val', value]
if eles is not None:
ele_tags = [x.tag for x in eles]
parameters += ['-ele', *ele_tags]
elif ele_range is not None:
ele_tags = [x.tag for x in ele_range]
parameters += ['-eleRange', *ele_tags]
elif ele_tag_range is not None:
parameters += ['-eleRange', *ele_tag_range]
# else:
# raise ValueError("'eles or ele_range must not be None in set_parameter")
if args:
parameters += [str(x) for x in args]
else:
raise ValueError("'args' can not be None in set_parameter")
# p_str = ', '.join([str(x) for x in parameters])
return osi.to_process(op_type, parameters)
def set_time(osi, time):
"""Reset the analysis time to a new value"""
osi.to_process('setTime', [time])
def get_time(osi):
"""Get the analysis time"""
return osi.to_process('getTime', [])
def wipe_analysis(osi):
osi.to_process('wipeAnalysis', [])
def wipe(osi=None):
"""Wipe the current analysis and save the results to file"""
if osi is None:
return opy.wipe()
osi.to_process('wipe', [])
def reset(osi):
"""Reset the current analysis and save the results to file"""
osi.to_process('reset', [])
def record(osi):
"""This command is used to cause all the recorders to do a record on the current state of the model."""
return osi.to_process('record', [])
def load_constant(osi, time=None):
params = []
if time is not None:
params += ['-time', time]
osi.to_process('loadConst', params)
def update_material_stage(osi, material, stage):
parameters = ['-material', material.tag, '-stage', stage]
osi.to_process("updateMaterialStage", parameters)
def get_eigen(osi, solver='genBandArpack', n=1):
"""Gets a list eigen values"""
parameters = [f'-{solver}', n]
outs = osi.to_process("eigen", parameters)
if not hasattr(outs, '__len__'):
return [outs]
return outs
def get_node_eigen_vector(osi, node, eigen_vector, dof):
parameters = [node.tag, eigen_vector, dof]
outs = osi.to_process("nodeEigenvector", parameters)
return outs
def get_pid(osi):
"""Get the processor ID of the calling processor."""
return osi.to_process('getPID', [])
def get_np(osi):
"""Get total number of processors."""
return osi.to_process('getNP', [])
def set_num_threads(osi, num):
"""Set the total number of threads"""
return osi.to_process('setNumThread', [num])
def get_num_threads(osi):
"""return the total number of threads available"""
return osi.to_process('getNumThread', [])
def get_node_dofs(osi, node):
"""Returns the DOF numbering of a node."""
return osi.to_process('nodeDOFs', node.tag)
def get_node_tags(osi, mesh=None):
"""Returns the OpenSEES numbering of the nodes."""
params = []
if mesh is not None:
params += ['-mesh', mesh.tag]
return osi.to_process('getNodeTags', params)
def get_ele_tags(osi, mesh=None):
"""Returns the OpenSEES numbering of the elements."""
params = []
if mesh is not None:
params += ['-mesh', mesh.tag]
return osi.to_process('getEleTags', params)
def get_node_coords(osi, node, ndm=None, node_as_tag=False):
if node_as_tag:
tag = node
else:
tag = node.tag
if ndm is not None:
pms = [ndm]
else:
pms = []
return osi.to_process('nodeCoord', [tag, *pms])
def get_all_node_coords(osi, ndm=None):
tags = get_node_tags(osi)
coords = []
if ndm is not None:
pms = [ndm]
else:
pms = []
for tag in tags:
coords.append(osi.to_process('nodeCoord', [tag, *pms])) # very slow
return coords
def get_all_node_coords_w_tag(osi, ndm=None):
tags = get_node_tags(osi)
coords = []
if ndm is not None:
pms = [ndm]
else:
pms = []
for tag in tags:
coords.append([tag, *osi.to_process('nodeCoord', [tag, *pms])]) # very slow
return coords
def get_all_ele_node_tags(osi):
ele_tags = get_ele_tags(osi)
node_tags = []
for tag in ele_tags:
node_tags.append(osi.to_process('eleNodes', [tag]))
return node_tags
def get_all_ele_node_tags_as_dict(osi):
ele_tags = get_ele_tags(osi)
if not hasattr(ele_tags, '__len__'):
ele_tags = [ele_tags]
node_tags = {}
for ele_tag in ele_tags:
node_tags[ele_tag] = osi.to_process('eleNodes', [ele_tag])
return node_tags
def get_all_ele_node_tags_by_n_nodes(osi):
ele_tags = get_ele_tags(osi)
all_node_tags = {}
for ele_tag in ele_tags:
node_tags = osi.to_process('eleNodes', [ele_tag])
if node_tags is not None:
if len(node_tags) not in all_node_tags:
all_node_tags[len(node_tags)] = {}
all_node_tags[len(node_tags)][ele_tag] = node_tags
return all_node_tags
class Parameter(OpenSeesObject):
op_base_type = 'parameter'
op_type = None
def __init__(self, osi, ele, section=None, mat=None, pname=''):
"""
Parameters
----------
osi
ele
section
mat
pname
"""
self.ele = ele
self.section = section
self.mat = mat
self.pname = pname
osi.n_params += 1
self._tag = osi.n_params
self._parameters = [self.tag, 'element', self.ele.tag]
if self.section is not None:
self._parameters += ['section', self.section] # unsure if this is correct
if self.mat is not None:
self._parameters += ['material', self.mat]
self._parameters.append(pname)
self.to_process(osi)
def update_parameter(osi, param, value):
"""Set a parameter to a new value"""
op_type = 'updateParameter'
parameters = [param.tag, value]
return osi.to_process(op_type, parameters)
def start(osi):
"""Start the timer"""
return osi.to_process('start', [])
def stop(osi):
"""Stop the timer and print timing information."""
return osi.to_process('stop', [])
def update_element_domain(osi):
"""Update elements in the domain"""
return osi.to_process('updateElementDomain', [])
def domain_change(osi):
return osi.to_process('domainChange', [])
def remesh(osi, alpha):
return osi.to_process('remesh', [alpha])
def set_element_rayleigh_damping_factors(osi, ele, alpha_m, beta_k, beta_k0, beta_kc):
return osi.to_process('setElementRayleighDampingFactors', [ele.tag, alpha_m, beta_k, beta_k0, beta_kc])
def set_ele_rayleigh_damp(osi, ele, alpha_m, beta_k, beta_k0, beta_kc):
return osi.to_process('setElementRayleighDampingFactors', [ele.tag, alpha_m, beta_k, beta_k0, beta_kc])
def imposed_motion(osi, node, direct, gm):
return osi.to_process('imposedMotion', [node.tag, direct, gm.tag])
| o3seespy/o3seespy | o3seespy/command/common.py | common.py | py | 30,746 | python | en | code | 16 | github-code | 13 |
32122643205 | #-*-coding:utf-8-*-
import os
import io
import sys
import magic
from raids_h import rehis_zero_pic,rehis_zero
import numpy as np
from PIL import Image
import tensorflow as tf
import base64,time
from config import config_h
_MODEL_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'data/models/1547856517')
_IMAGE_SIZE = 64
_BATCH_SIZE = 128
_LABEL_MAP = {0:'drawings', 1:'hentai', 2:'neutral', 3:'porn', 4:'sexy'}
def standardize(img):
mean = np.mean(img)
std = np.std(img)
img = (img - mean) / std
return img
def load_image(data):
# img = Image.open( data )
img = base64.b64decode(data)
img= Image.open(io.BytesIO(img))
img = img.convert('RGB')
img = img.resize((_IMAGE_SIZE, _IMAGE_SIZE))
img.load()
data = np.asarray( img, dtype=np.float32 )
data = standardize(data)
print('load_image')
return data
def predict(image_date):
with tf.Session() as sess:
print('******')
graph = tf.get_default_graph();
tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING], _MODEL_DIR)
inputs = graph.get_tensor_by_name("input_tensor:0")
probabilities_op = graph.get_tensor_by_name('softmax_tensor:0')
class_index_op = graph.get_tensor_by_name('ArgMax:0')
image_data = load_image(image_date)
probabilities, class_index = sess.run([probabilities_op, class_index_op],
feed_dict={inputs: [image_data] * _BATCH_SIZE})
probabilities_dict = {_LABEL_MAP.get(i): l for i, l in enumerate(probabilities[0])}
pre_label = _LABEL_MAP.get(class_index[0])
result = {"class": pre_label, "probability": probabilities_dict}
return result
ckeck_key = config_h.get_config('check','check_key')
if __name__ == '__main1__':
while True:
url = rehis_zero_pic.get('pic_check')
if url:
rehis_zero_pic.delete('pic_check')
count=rehis_zero_pic.hlen(url)
images=rehis_zero_pic.hscan(url,cursor=0,count=count)
s = 0
# ๅพ็่ฏๅซ็ฑปๅ
# {'drawings', 'hentai', 'neutral', 'porn','sexy'}
for k,v in images[1].items():
print(v)
res = predict(v)
print(res)
pic_type=res['class']
rehis_zero_pic.set(pic_type, k)
time.sleep(1)
if __name__ == '__main1__':
argv = sys.argv
if(len(argv) < 2):
print("usage: python nsfw_predict <image_path>")
image_path = argv[1]
print()
res = predict(image_path)
print(res)
ckeck_key = config_h.get_config('check','check_key')
check_key = ckeck_key
from tool.check_file import check_file
if __name__ == '__main__':
while True:
s=rehis_zero.hscan(check_key,cursor=0,count=10)
# s = rehis_zero.hscan_iter(check_key,count=1)
# print(s)
for i in s[1]:
key = i.decode('utf-8')
check_list=key.split(':')
if check_list[1] == 'image':
try:
v=rehis_zero_pic.hget(check_key, key)
rehis_zero_pic.hdel(check_key,key)
# print('ๅฏไปฅ่ฝฌutf-8',key)
res = predict(v)
if res['class'] == 'porn' or res['class'] == 'hentai':
# print('่ฟไธชๅพ็ๆฏ่ฒๆ
ๅพ็')
rehis_zero_pic.hset('pron',key,v)
rehis_zero_pic.hdel(check_key, key)
else:
rehis_zero_pic.hset('normal', key, v)
rehis_zero_pic.hdel(check_key,key)
# print(res,check_key)
except Exception as e:
print(key, ':', e)
elif check_list[1] == 'text':
s = rehis_zero.hget(check_key,key)
# rehis_zero_pic.hdel(check_key,key)
try:
file_title=s[0:2048]
file_type=magic.from_buffer(file_title)
print(file_type)
if 'text' in file_type:
print('่ฟไธชๆฏtext:', file_type)
print(type(s))
# print(s.decode('utf-8'))
with open(f'tmp/{check_list[2]}','wb') as f:
f.write(s)
score=check_file(file=f'tmp/{check_list[2]}')
print(score,type(score))
score=int(score)
if score > 10:
rehis_zero.hset('porn',key,s)
else:
rehis_zero.hset('normal', key, s)
rehis_zero.hdel(check_key, key)
elif 'Word' in file_type:
with open(f'tmp/{check_list[2]}', 'wb') as f:
f.write(s)
score = check_file(file=f'tmp/{check_list[2]}')
print(score, type(score))
score = int(score)
if score > 10:
rehis_zero.hset('porn', key, s)
else:
rehis_zero.hset('normal', key, s)
rehis_zero.hdel(check_key, key)
elif 'at least v1.0 to extract' in file_type:
with open(f'tmp/{check_list[2]}', 'wb') as f:
f.write(s)
score = check_file(file=f'tmp/{check_list[2]}')
print(score, type(score))
score = int(score)
if score > 10:
rehis_zero.hset('porn', key, s)
else:
rehis_zero.hset('normal', key, s)
rehis_zero.hdel(check_key, key)
else:
print('ๆช็ฅ็ฑปๅ',key)
rehis_zero.hdel(check_key, key)
except Exception as e:
print('ๆช็ฅๅๅ ',e)
elif check_list[1] == 'video':
print('ๅ ้ค่ง้ขๆไปถ:',key)
rehis_zero.hdel(check_key, key)
else:
rehis_zero.hdel(check_key, key)
| g37502/nsfw-master | nsfw_predict.py | nsfw_predict.py | py | 6,377 | python | en | code | 1 | github-code | 13 |
10385640905 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import shutil
import sys
from itertools import groupby
import gzip
import sys
_CSV_COLUMNS = [
'SVU_TYPE', 'SVV_VID', 'SVU_UID', 'SVA_UID',
'SVDWELL', 'SVSHARE', 'SVJOIN', 'SVCOMMENT', 'SVEXPOSE', 'SVFOLLOW', 'SVLIKE',
'SVU_COUNTRY', 'SVU_OS', 'SVV_COUNTRY',
'SVV_DURATION', 'SVV_DWELL', 'SVV_JOIN', 'SVV_SHARE', 'SVV_CLICK', 'SVV_COMMENT', 'SVV_FOLLOW', 'SVV_LIKE',
'SVV_CLICKRATE', 'SVV_LIKERATE', 'SVV_COMMENTRATE', 'SVV_SHARERATE', 'SVV_FOLLOWRATE', 'SVV_WATCHALLRATE'
]
_CSV_COLUMN_DEFAULTS = [
[''], [''], [''], [''],
[0], [0], [0], [0], [0], [0], [0],
[''], [''], [''],
[0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0],
[0.0], [0.0], [0.0], [0.0], [0.0], [0.0]
]
_POS_INDEX = {
'u_uid': _CSV_COLUMNS.index('SVU_UID'),
'u_country': _CSV_COLUMNS.index('SVU_COUNTRY'),
}
# first pos, second thresh
label_conf_dict = {'SVDWELL': [_CSV_COLUMNS.index('SVDWELL'), 17],
'SVSHARE': [_CSV_COLUMNS.index('SVSHARE'), 1],
'SVFOLLOW': [_CSV_COLUMNS.index('SVFOLLOW'), 1],
'SVLIKE': [_CSV_COLUMNS.index('SVLIKE'), 1]}
parser = argparse.ArgumentParser()
parser.add_argument(
'--dwell_label_thresh', type=int, default=1, help='dwell_label_thresh')
parser.add_argument(
'--share_label_thresh', type=int, default=1, help='share_label_thresh')
parser.add_argument(
'--like_label_thresh', type=int, default=1, help='like_label_thresh')
parser.add_argument(
'--follow_label_thresh', type=int, default=1, help='follow_label_thresh')
parser.add_argument(
'--input_feature_data', type=str, default='/tmp/census_data/adult.data',
help='Path to the input_feature_data.')
parser.add_argument(
'--input_score_data', type=str, default='/tmp/census_data/adult.data',
help='Path to the input_score_data.')
parser.add_argument(
'--time_tag', type=str, default='2018010100',
help='time_tag for output')
parser.add_argument(
'--project', type=str, default='model_name',
help='project name')
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
parser.add_argument("--is_input_gzip", type=str2bool, nargs='?',
const=True, default='false',
help="is_input_gzip")
_NUM_EXAMPLES = {
'train': 32561,
'validation': 16281,
}
def translate(feature, predict):
items = feature.split('\t')
# label = int(items[3])
label_dict = {k: float(int(items[v[0]]) >= v[1]) for k, v in label_conf_dict.items()}
# label_dict = dict(map(lambda kv: (kv[0], f(kv[1])), label_conf_dict.iteritems()))
score = round(float(predict), 6)
sample_info = [items[_POS_INDEX['u_uid']], items[_POS_INDEX['u_country']]]
return sample_info, label_dict, score
def read_input(features_path, predicts_path):
if FLAGS.is_input_gzip:
features = [line.rstrip('\n') for line in gzip.open(features_path)]
else:
features = [line.rstrip('\n') for line in open(features_path)]
predicts = [line.rstrip('\n') for line in open(predicts_path)]
return map(translate, zip(features, predicts))
def formatdata(data, thr):
datalist = dict()
for uid, label, score in data:
if score not in datalist:
datalist[score] = [0, 0]
if label >= thr:
datalist[score][0] += 1
else:
datalist[score][1] += 1
data = list()
for key in datalist:
data.append([key, datalist[key][0], datalist[key][1]])
return sorted(data, key=lambda x: -x[0])
def format_data_for_auc(data, key):
datalist = dict()
for uid, label_dict, score in data:
label = label_dict[key]
if score not in datalist:
datalist[score] = [0, 0]
if label >= 1.0:
datalist[score][0] += 1
else:
datalist[score][1] += 1
data = list()
for key in datalist:
data.append([key, datalist[key][0], datalist[key][1]])
return sorted(data, key=lambda x: -x[0])
def auc_origin(datalist):
totalP = sum([x[1] for x in datalist])
totalN = sum([x[2] for x in datalist])
x1 = 0
y1 = 0
x2 = 0
y2 = 0
sumP = 0
sumN = 0
sumArea = 0
for e in datalist:
sumP += e[1]
sumN += e[2]
x1, y1 = x2, y2
if totalN != 0 and totalP != 0:
x2 = 1.0 * sumN / totalN
y2 = 1.0 * sumP / totalP
if e[2] != 0:
sumArea += (x2 - x1) * (y2 + y1) / 2
return sumArea
def auc_speedup(datalist):
totalP = sum([x[1] for x in datalist])
totalN = sum([x[2] for x in datalist])
if totalN == 0 or totalP == 0:
return 0
reTotalP = 1.0 / totalP
reTotalN = 1.0 / totalN
reTotalPN = reTotalP * reTotalN * 0.5
sumP = 0
sumN = 0
sumArea = 0
for e in datalist:
x1 = sumN
y1 = sumP
sumP += e[1]
sumN += e[2]
if e[2] != 0:
sumArea += (sumN - x1) * (sumP + y1)
return sumArea * reTotalPN
def per_auc(data):
sorted_data = sorted(data, key=lambda i: i[0])
res = dict()
for key in label_conf_dict.keys():
user_auc_sum = 0
users = 0
user_auc_weighted_sum = 0
samples = 0
for uid, group in groupby(sorted_data, lambda i: i[0]):
group = list(group)
user_samples = len(group)
if all(i[1][key] < 1.0 for i in group):
continue
if all(i[1][key] >= 1.0 for i in group):
continue
user_format_data = format_data_for_auc(group, key)
user_auc = auc_speedup(user_format_data)
samples += user_samples
user_auc_weighted_sum += user_samples * user_auc
users += 1
user_auc_sum += user_auc
format_sort_data = format_data_for_auc(sorted_data, key)
all_auc = auc_speedup(format_sort_data)
weight_user_auc = 0.0 if samples == 0 else user_auc_weighted_sum / samples
per_user_auc = 0.0 if users == 0 else user_auc_sum / users
res[key] = [all_auc, weight_user_auc, per_user_auc]
return res
USER_COUNTRY = {'COUNTRY_U': 'US', 'COUNTRY_I': 'IN', 'COUNTRY_O': 'OTHER'}
FIELD_CONF = [USER_COUNTRY]
KEYS = ['ALL', 'COUNTRY_U', 'COUNTRY_I', 'COUNTRY_O']
if __name__ == '__main__':
FLAGS, unparsed = parser.parse_known_args()
label_conf_dict['SVDWELL'][1] = FLAGS.dwell_label_thresh
label_conf_dict['SVLIKE'][1] = FLAGS.like_label_thresh
label_conf_dict['SVSHARE'][1] = FLAGS.share_label_thresh
label_conf_dict['SVFOLLOW'][1] = FLAGS.follow_label_thresh
# Clean up the model directory if present
# shutil.rmtree(FLAGS.model_dir, ignore_errors=True)
full_data = read_input(FLAGS.input_feature_data, FLAGS.input_score_data)
data_dict = {}
# reg_data = [i for i in full_data if i[0][2] == '1']
# vis_data = [i for i in full_data if i[0][2] == '0']
# ava_data = [k for k in [reg_data, vis_data] if k]
# if not ava_data:
ava_data = [full_data]
aucs = []
for data in ava_data:
data_dict['ALL'] = [(i[0][0], i[1], i[2]) for i in data]
for j, conf in enumerate(FIELD_CONF):
for k, v in conf.iteritems():
data_dict[k] = [(i[0][0], i[1], i[2]) for i in data if i[0][j + 1] == v]
for k in KEYS:
data_cate = data_dict[k]
# over_all_auc, p_auc1, p_auc2 = per_auc(data_cate)
for auc_type, auc_list in per_auc(data_cate).items():
sample_auc = auc_list[0]
weighted_user_auc = auc_list[1]
user_auc = auc_list[2]
aucs.append('%s\t%s\t%s\t%s\t%.3f\t%.3f\t%.3f' % (
FLAGS.project, FLAGS.time_tag, k, auc_type, sample_auc, weighted_user_auc, user_auc))
print('\n'.join(aucs))
| alever520/tensorflow-ctr | python/calAuc.py | calAuc.py | py | 8,133 | python | en | code | 0 | github-code | 13 |
20524635580 | input = open("input.txt")
octo = {}
t = 0
for i in input:
i = i.strip()
for j in range(len(i)):
octo[(t,j)] = int(i[j])
t += 1
s = 0
while True:
for i in octo:
octo[i] += 1
flashed = []
f = True
while f:
f = False
for i in octo:
if octo[i] > 9 and not i in flashed:
f = True
flashed.append(i)
for k in range(-1, 2):
for l in range(-1, 2):
if (i[0]+l, i[1]+k) in octo:
octo[(i[0]+l, i[1]+k)] += 1
all = True
for i in octo:
if octo[i] > 9:
octo[i] = 0
else:
all = False
s += 1
if all:
break
print(s)
# part 2: 364
| Lesley55/AdventOfCode | 2021/11/part2.py | part2.py | py | 779 | python | en | code | 1 | github-code | 13 |
10868033225 | import os
from flask import request, jsonify
import flask.ext.restless
from werkzeug import secure_filename
from wand.image import Image
from .models import app, db, Artwork, ArtworkImage
manager = flask.ext.restless.APIManager(app, flask_sqlalchemy_db=db)
manager.create_api(Artwork,
methods=['GET', 'POST', 'PUT', 'DELETE'])
manager.create_api(ArtworkImage,
methods=['GET', 'POST', 'PUT', 'DELETE'],
include_methods=['url'])
SUPPORTED_IMAGE_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'gif', 'tif'])
def is_image(filename):
extension = os.path.splitext(filename)[1][1:]
return extension in SUPPORTED_IMAGE_EXTENSIONS
def get_full_image_path(filepath):
return os.path.join(app.config['UPLOAD_FOLDER'], filepath)
@app.route('/api/artwork_images/<int:id>/image', methods=['POST'])
def post_image(id):
artwork_image = ArtworkImage.query.get(id)
if artwork_image is None:
message = 'No ArtworkImage with id {} was found.'.format(id)
return jsonify({'message': message}), 400
file = request.files['file']
if file and is_image(file.filename):
filename = secure_filename(file.filename)
destination_path = get_full_image_path(filename)
file.save(destination_path)
if process_img(artwork_image, destination_path):
os.remove(destination_path)
message = '{} successfully uploaded and processed into {}.'\
.format(file.filename, artwork_image.filepath())
return jsonify({'message': message})
else:
message = '{} could not be processed.' .format(file.filename)
return jsonify({'message': message}), 500
else:
message = '{} is not a valid image (supported filetypes: {})'\
.format(file.filename, ', '.join(SUPPORTED_IMAGE_EXTENSIONS))
return jsonify({'message': message}), 400
def process_img(artwork_image, path):
destination_path = artwork_image.filepath()
with Image(filename=path) as img:
img.save(filename=destination_path)
return os.path.isfile(destination_path)
| augustjd/maxsaunderspottery | sand/api.py | api.py | py | 2,153 | python | en | code | 0 | github-code | 13 |
12861554465 | # P.92 # ๊ทธ๋ฆฌ๋ # ์ฑ๊ณต!
# 2. ํฐ ์์ ๋ฒ์น
# ์ฒซ์งธ ์ค์ N(2 <= N <= 1000), M(1 <= M <= 10000), K(1 <= K <= 10000)์ ์์ฐ์๊ฐ
# ์ฃผ์ด์ง๋ฉฐ, ๊ฐ ์์ฐ์๋ ๊ณต๋ฐฑ์ผ๋ก ๊ตฌ๋ถํ๋ค.
# ๋์งธ ์ค์ N๊ฐ์ ์์ฐ์๊ฐ ์ฃผ์ด์ง๋ค. ๊ฐ ์์ฐ์๋ ๊ณต๋ฐฑ์ผ๋ก ๊ตฌ๋ถํ๋ค. ๋จ, ๊ฐ๊ฐ์ ์์ฐ์๋ 1 ์ด์
# 10000 ์ดํ์ ์๋ก ์ฃผ์ด์ง๋ค.
# ์
๋ ฅ์ผ๋ก ์ฃผ์ด์ง๋ K๋ ํญ์ M๋ณด๋ค ์๊ฑฐ๋ ๊ฐ๋ค.
n, m, k = map(int, input().split(' '))
# print(n, m, k) # ์
๋ ฅ ๊ฐ ํ
์คํธ
li = list(map(int, input().split(' ')))
# print(li) # ์
๋ ฅ ๊ฐ ํ
์คํธ
li.sort()
i = li[len(li)-1]
j = li[len(li)-2]
print(i, j)
res = 0
p = 0
while p < m :
for q in range(k) :
res += i
p += 1
if p >= m :
break
if p >= m :
break
res += j
p += 1
print(res)
# ์์ ์ฝ๋ ๋ฐ ํด๋ต ์ฝ๋ 1 : ๋ฐ๋ณต๋ฌธ ์ ๋ฆฌ
# while True :
# for i in range(k) : # ๊ฐ์ฅ ํฐ ์๋ฅผ k๋ฒ ๋ํ๊ธฐ
# if m == 0 : # m์ด 0์ด๋ผ๋ฉด ๋ฐ๋ณต๋ฌธ ํ์ถ
# break
# res += p
# m -= 1 # ๋ํ ๋๋ง๋ค 1์ฉ ๋นผ๊ธฐ
# if m == 0 : # m์ด 0์ด๋ผ๋ฉด ๋ฐ๋ณต๋ฌธ ํ์ถ
# break
# res += q # ๋ ๋ฒ์งธ๋ก ํฐ ์๋ฅผ ํ ๋ฒ ๋ํ๊ธฐ
# m -= 1 # ๋ํ ๋๋ง๋ค 1์ฉ ๋นผ๊ธฐ
# print(res)
# ์์ ์ฝ๋ ๋ฐ ํด๋ต ์ฝ๋ 2 : ์๊ฐ ์ด๊ณผ ๋ฌธ์ ํด๊ฒฐ, ๋ฐ๋ณต๋๋ ์์ด ํ์
# cnt = m / (k+1) * k # ๊ฐ์ฅ ํฐ ์๊ฐ ๋ฑ์ฅํ๋ ํ์
# cnt += m % (k+1) # ๋๋จธ์ง๊ฐ ์์ ๋ ๊ฐ์ฅ ํฐ ์๊ฐ ๋ํด์ง๋ ํ์ ์ถ๊ฐ
# res = 0
# res += (cnt) * p # ๊ฐ์ฅ ํฐ ์ ๋ํด์ฃผ๊ธฐ
# res += (m-cnt) * q # ์ด ๋ํ๋ ์ - ๊ฐ์ฅ ํฐ ์ ๋ฑ์ฅ ํ์ = ๋ ๋ฒ์จฐ ํฐ ์ ๋ฑ์ฅ ํ์
# print(res) | Nachtstolz/CTwithPython | 92.py | 92.py | py | 1,737 | python | ko | code | 0 | github-code | 13 |
26005045290 | import bgheatmaps as bgh
"""
This example shows how to use visualize a heatmap in 3D
"""
values = dict( # scalar values for each region
TH=1,
RSP=0.2,
AI=0.4,
SS=-3,
MO=2.6,
PVZ=-4,
LZ=-3,
VIS=2,
AUD=0.3,
RHP=-0.2,
STR=0.5,
CB=0.5,
FRP=-1.7,
HIP=3,
PA=-4,
)
scene = bgh.heatmap(
values,
position=(8000, 5000, 5000),
orientation="frontal", # or 'sagittal', or 'horizontal' or a tuple (x,y,z)
thickness=1000,
title="frontal",
vmin=-5,
vmax=3,
format="3D",
).show()
| brainglobe/bg-heatmaps | examples/heatmap_3d.py | heatmap_3d.py | py | 565 | python | en | code | 20 | github-code | 13 |
32773348829 | import sys
sys.path.append("..")
import tkinter as tk
import tkinter.ttk as ttk
from Controller.ForumFunctions import *
from View.discussionFrame import *
class startDFrame:
def insertQuestion(self):
ques=self.questionT.get("1.0",'end-1c')
ans=self.answerT.get("1.0",'end-1c')
f=ForumFunctions
f.createQuestion(f,ques,ans)
self.df.destroy()
self.df2=discussionFrame(self.t)
def __init__(self,top=None):
self.t=top
self.df=tk.Frame(top)
self.df.pack(expand=True, fill='both')
self.questionT = tk.Text(self.df)
self.questionT.place(relx=0.4, rely=0.134, relheight=0.108, relwidth=0.44
, bordermode='ignore')
self.questionT.configure(background="white")
self.questionT.configure(font="TkTextFont")
self.questionT.configure(foreground="black")
self.questionT.configure(highlightbackground="#d9d9d9")
self.questionT.configure(highlightcolor="black")
self.questionT.configure(insertbackground="black")
self.questionT.configure(selectbackground="#c4c4c4")
self.questionT.configure(selectforeground="black")
self.questionT.configure(width=264)
self.questionT.configure(wrap="word")
self.answerT = tk.Text(self.df)
self.answerT.place(relx=0.083, rely=0.387, relheight=0.427
, relwidth=0.773, bordermode='ignore')
self.answerT.configure(background="white")
self.answerT.configure(font="TkTextFont")
self.answerT.configure(foreground="black")
self.answerT.configure(highlightbackground="#d9d9d9")
self.answerT.configure(highlightcolor="black")
self.answerT.configure(insertbackground="black")
self.answerT.configure(selectbackground="#c4c4c4")
self.answerT.configure(selectforeground="black")
self.answerT.configure(width=464)
self.answerT.configure(wrap="word")
self.submitQ = ttk.Button(self.df,command=self.insertQuestion)
self.submitQ.place(relx=0.375, rely=0.882, height=25, width=110
, bordermode='ignore')
self.submitQ.configure(takefocus="")
self.submitQ.configure(text='''Submit your Query''')
self.TLabel1 = ttk.Label(self.df)
self.TLabel1.place(relx=0.083, rely=0.151, height=39, width=165
, bordermode='ignore')
self.TLabel1.configure(background="#d9d9d9")
self.TLabel1.configure(foreground="#000000")
self.TLabel1.configure(font="TkDefaultFont")
self.TLabel1.configure(relief="flat")
self.TLabel1.configure(text='''Enter the title to your Query :''')
self.TLabel1.configure(width=165)
self.TLabel2 = ttk.Label(self.df)
self.TLabel2.place(relx=0.083, rely=0.336, height=19, width=145
, bordermode='ignore')
self.TLabel2.configure(background="#d9d9d9")
self.TLabel2.configure(foreground="#000000")
self.TLabel2.configure(font="TkDefaultFont")
self.TLabel2.configure(relief="flat")
self.TLabel2.configure(text='''Elaborate your Problem:''')
self.TLabel2.configure(width=145)
| lovelotey1600/DicussionForum-2.0 | View/startDFrame.py | startDFrame.py | py | 3,195 | python | en | code | 0 | github-code | 13 |
17190071125 | import copy
import inspect
import json
import math
from itertools import permutations, product
from random import Random
from unittest import TestCase, mock
import numpy as np
from .client import ApiClient, ApiError, NetworksResult, Query, QueryFields, \
TextType
class TestTextType(TestCase):
def test_init(self):
with self.assertRaises(TypeError):
TextType()
def test_repr(self):
class Foo(TextType):
def __str__(self):
return "foo"
f = Foo()
self.assertEqual(repr(f), "foo")
class TestQueryFields(TestCase):
def test_init(self):
with self.assertRaises(ValueError):
QueryFields({'a': 1, 'b': 2})
def test_str(self):
qf1 = QueryFields('bar')
self.assertEqual(str(qf1), 'bar')
qf2 = QueryFields([{'foo': 'bar'}, 'fubar'])
self.assertEqual(str(qf2), 'foo {bar} fubar')
qf3 = QueryFields(['foo', 'bar'])
self.assertEqual(str(qf3), 'bar foo')
def test_eq(self):
qf1 = QueryFields('bar')
qf2 = QueryFields('bar')
self.assertEqual(qf1, qf2)
fields = ['fubar', 'xyz',
{'foo': 'bar'},
{'baz': ['baz', {'b': 'a'}]}]
qfields = [QueryFields(fs) for fs in permutations(fields)]
for first, second in permutations(qfields, 2):
self.assertEqual(first, second)
self.assertEqual(hash(first), hash(second))
self.assertNotEqual(qf1, second)
self.assertNotEqual(hash(qf1), hash(second))
self.assertNotEqual(first, qf2)
self.assertNotEqual(hash(first), hash(qf2))
def test_lt(self):
qf1 = QueryFields('foo')
qf2 = QueryFields('bar')
self.assertLess(qf2, qf1)
self.assertGreater(qf1, qf2)
class TestQuery(TestCase):
template = """
query Foo($name: String) {{
bar(name: $name) {{
{fields}
}}
}}
"""
def test_str(self):
exp_query = inspect.cleandoc(self.template.format(fields='fubar'))
q = Query(self.template, {'name': 'baz'}, 'fubar')
qs = json.dumps({'query': exp_query,
'variables': {'name': 'baz'}},
sort_keys=True)
self.assertEqual(str(q), qs)
fields2 = ['fubar', 'bar', {'baz': 'a'}]
variables2 = {'fubar': [{'name': 'foo'}, {'name': 'baz'}],
'bar': 1}
q2 = Query(self.template,
variables2,
fields2)
exp_query2 = """
query Foo($name: String) {
bar(name: $name) {
bar baz {a} fubar
}
}
"""
qs2 = json.dumps({'query': inspect.cleandoc(exp_query2),
'variables': variables2},
sort_keys=True)
self.assertEqual(str(q2), qs2)
def test_eq(self):
# Dicts preserve insertion order from Python 3.6
q1 = Query(self.template, {'name': 'foo'}, ['bar', 'fubar'])
q2 = Query(self.template, {'name': 'foo'}, ['fubar', 'bar'])
self.assertEqual(q1, q2)
q3 = Query(self.template,
{'fubar': {'baz': 3, 'bar': 2},
'foo': 1},
'bar')
q4 = Query(self.template,
{'foo': 1,
'fubar': {'bar': 2, 'baz': 3}},
'bar')
self.assertEqual(q3, q4)
self.assertNotEqual(q1, q4)
rand = Random(0)
key_vals = [('a', 1), ('b', {'e': 2}), ('c', 'foo'), ('xyz', math.pi)]
qargs = [{'name': dict(args)} for args in permutations(key_vals)]
rand.shuffle(qargs)
fields = ['fubar', 'xyz',
{'foo': 'bar'},
{'baz': ['baz', {'b': 'a'}]}]
qfields = list(permutations(fields))
rand.shuffle(qfields)
prev_query = None
for args, fields in product(qargs, qfields):
q = Query('name', args, fields)
if prev_query:
self.assertEqual(q, prev_query)
self.assertEqual(hash(q), hash(prev_query))
prev_query = q
def test_lt(self):
q1 = Query(self.template, {'name': 'foo'}, 'bar')
q2 = Query(self.template, {'name': 'foo'}, 'fubar')
self.assertLess(q1, q2)
self.assertGreater(q2, q1)
def test_as_params(self):
args = {'name': 'foo'}
q = Query(self.template, args, 'bar')
exp_query = inspect.cleandoc(self.template.format(fields='bar'))
params = [('query', exp_query),
('variables', json.dumps(args))]
self.assertListEqual(q.as_params(), params)
class TestNetworksResult(TestCase):
def setUp(self):
self._networks = [
{'losses': [{'type': 'mse'}],
'name': 'eriklindernoren/Keras-GAN/lsgan',
'optimizer': {'learningRate': 0.00019999999494757503,
'type': 'Adam'}},
{'losses': [{'type': 'categorical_crossentropy'}],
'name': 'basveeling/wavenet/wavenet',
'optimizer': {'learningRate': 0.0010000000474974513,
'type': 'SGD'}},
{'losses': [{'type': 'mae'},
{'type': 'mae'},
{'type': 'mae'},
{'type': 'mse'},
{'type': 'mse'},
{'type': 'mae'}],
'name': 'eriklindernoren/Keras-GAN/discogan',
'optimizer': {'learningRate': 0.00019999999494757503,
'type': 'Adam'}},
{'losses': [],
'name': 'keras-team/keras-applications/inception_resnet_v2',
'optimizer': None}
]
def test_to_list(self):
nr = NetworksResult(self._networks)
list_res = nr.to_list()
self.assertListEqual(list_res, self._networks)
list_res[0]['name'] = 'foo'
list_res_2 = nr.to_list()
# Test deep copy
self.assertNotEqual(list_res_2[0]['name'], 'foo')
def test_to_data_frame(self):
nets = copy.deepcopy(self._networks)
nr = NetworksResult(self._networks)
fields = ['name', 'loss', 'optimizer', 'learningRate']
df = nr.to_data_frame(fields=fields)
# Test deep copy
self.assertListEqual(nr.to_list(), nets)
# Test columns
self.assertListEqual(list(df), fields)
# Test contents
self.assertEqual(df.shape, (9, 4))
self.assertEqual(df.iloc[1]['name'], self._networks[1]['name'])
self.assertEqual(df.iloc[1]['optimizer'],
self._networks[1]['optimizer']['type'])
self.assertEqual(df.iloc[2]['name'], self._networks[2]['name'])
self.assertEqual(df.iloc[3]['name'], self._networks[2]['name'])
self.assertEqual(df.iloc[5]['name'], self._networks[2]['name'])
self.assertEqual(df.iloc[5]['loss'], 'mse')
self.assertTrue(np.isnan(df.iloc[-1]['optimizer']))
self.assertTrue(np.isnan(df.iloc[-1]['learningRate']))
nets = [{'name': 'foo'}, {'name': 'bar'}]
nr2 = NetworksResult(nets)
df2 = nr2.to_data_frame()
self.assertEqual(df2.iloc[1]['name'], 'bar')
class TestApiClient(TestCase):
def test_endpoint(self):
# better double check
self.assertEqual(ApiClient._default_endpoint,
'https://nndb-api.aughie.org/graphql')
def test_normalize_args(self):
args = {'foo': [{'c': 3}, {'a': 1}, {'b': 2}],
'bar': {'d': 4},
'baz': None}
res = ApiClient._normalize_args(args)
# Make sure deep copy is used.
args['bar']['d'] = 5
exp_res = {'bar': {'d': 4},
'foo': [{'a': 1}, {'b': 2}, {'c': 3}]}
self.assertDictEqual(res, exp_res)
@mock.patch('aughie.nndb.client.ApiClient._cache')
@mock.patch('aughie.nndb.client.requests')
def test_make_query_valid(self, requests_mock, cache_mock):
cm = cache_mock
rm = requests_mock
rm.get.return_value = rm.Request
rm.Request.json.return_value = {'data': 'query-res'}
endpoint = 'api-endpoint'
client = ApiClient(api_endpoint=endpoint)
q = Query('{fields}', {'arg': True}, 'field')
cm.get.side_effect = KeyError(str(q))
res = client._make_query(q)
rm.get.assert_called_once_with(endpoint,
params=q.as_params())
cm.get.assert_called_once_with(q)
cm.set.assert_called_once_with(q, 'query-res')
self.assertEqual(res, 'query-res')
@mock.patch('aughie.nndb.client.ApiClient._cache')
@mock.patch('aughie.nndb.client.requests')
def test_make_query_no_cache(self, requests_mock, cache_mock):
cm = cache_mock
rm = requests_mock
rm.get.return_value = rm.Request
rm.Request.json.return_value = {'data': 'query-res'}
client = ApiClient(use_cache=False)
q = Query('{fields}', {'arg': True}, 'field')
res = client._make_query(q)
rm.get.assert_called_once_with(ApiClient._default_endpoint,
params=q.as_params())
cm.get.assert_not_called()
cm.set.assert_not_called()
self.assertEqual(res, 'query-res')
@mock.patch('aughie.nndb.client.requests')
def test_make_query_graphql_error(self, requests_mock):
r = requests_mock
r.get.return_value = r.Request
r.Request.json.return_value = {'errors': ["GraphQL error"]}
client = ApiClient()
q = Query('{fields}', {'arg': True}, 'field')
with self.assertRaises(ApiError, msg='["GraphQL error"]'):
client._make_query(q)
@mock.patch('aughie.nndb.client.requests')
def test_make_query_connection_error(self, requests_mock):
r = requests_mock
msg = 'Connection error'
r.get.side_effect = ConnectionError(msg)
client = ApiClient()
q = Query('{fields}', {'arg': True}, 'field')
with self.assertRaises(ApiError) as cm:
client._make_query(q)
self.assertEqual(str(cm.exception), msg)
@mock.patch('aughie.nndb.client.ApiClient._make_query')
def test_get_network(self, make_query_mock):
template = """
query GetNetwork($name: String!) {{
network(name: $name) {{
{fields}
}}
}}
"""
ret_val = {'name': 'network-name', 'losses': ['loss-type']}
make_query_mock.return_value = {'network': ret_val}
fields = ['name', {'losses': ['type']}]
q = Query(template,
{'name': 'network-name'},
fields)
client = ApiClient()
res = client.get_network('network-name', fields=fields)
make_query_mock.assert_called_once_with(q)
self.assertListEqual(res.to_list(), [ret_val])
@mock.patch('aughie.nndb.client.ApiClient._make_query')
def test_get_networks(self, make_query_mock):
template = """
query GetNetworks($network: NetworkInput $optimizers: [OptimizerInput]
$losses: [LossInput] $layers: [LayerInput]) {{
networks(network: $network optimizers: $optimizers losses: $losses
layers: $layers) {{
{fields}
}}
}}
"""
ret_val = [{'name': 'foo', 'optimizer': ['optimizer-type']}]
make_query_mock.return_value = {'networks': ret_val}
optimizers_arg = [{'hasDecay': True}]
losses_arg = [{'type': 'mse'}, {'type': 'binary_crossentropy'}]
layers_arg = [{'activation': 'relu'}]
fields = ['name', {'optimizer': ['type']}]
client = ApiClient()
res = client.get_networks(optimizers=optimizers_arg,
losses=losses_arg,
layers=layers_arg,
fields=fields)
losses_arg_sorted = sorted(losses_arg,
key=lambda el: json.dumps(el,
sort_keys=True))
q = Query(template,
{'optimizers': optimizers_arg,
'losses': losses_arg_sorted,
'layers': layers_arg},
fields)
make_query_mock.assert_called_once_with(q)
self.assertListEqual(res.to_list(), ret_val)
| agostbiro/aughie-py | aughie/nndb/test_client.py | test_client.py | py | 12,603 | python | en | code | 0 | github-code | 13 |
36588296066 | class Solution:
def isRectangleCover(self, rectangles: List[List[int]]) -> bool:
corners = set()
area = 0
for r in rectangles:
area += (r[3]-r[1])*(r[2]-r[0])
for p in [(r[0],r[1]), (r[2],r[3]), (r[0],r[3]), (r[2],r[1])]:
if p in corners:
corners.remove(p)
else:
corners.add(p)
if len(corners) != 4:
return False
corners = sorted(list(corners))
return area == (corners[-1][0]-corners[0][0])*(corners[-1][1] - corners[0][1]) | ysonggit/leetcode_python | 0391_PerfectRectangle.py | 0391_PerfectRectangle.py | py | 583 | python | en | code | 1 | github-code | 13 |
28571945512 | import asyncore
from pysnmp.compat.pysnmp1x import session, error
class async_session(asyncore.dispatcher, session.session):
"""An asynchronous SNMP engine based on the asyncore.py classes.
Send SNMP requests and receive a responses asynchronously.
"""
def __init__(self, agent, community,\
caller_fun, caller_data=None):
# Make sure we get the callback function
if not callable(caller_fun):
raise error.BadArgument('Bad callback function')
# Call parent classes constructors
asyncore.dispatcher.__init__(self)
session.session.__init__(self, agent, community)
# Keep references to data and method objects supplied
# by caller for callback on request completion.
self.caller_data = caller_data
self.caller_fun = caller_fun
def open(self):
"""
open()
Create a socket and pass it to asyncore dispatcher.
"""
asyncore.dispatcher.set_socket(self, session.session.open(self))
def send_request(self, encoded_oids, encoded_vals, type='GETREQUEST'):
"""
send_request(encoded_oids, encoded_vals[, type])
Build and send SNMP message to remote SNMP process (as specified
on async_session object creation) composed from encoded
Object IDs along with their associated values.
A callback function (as specified on async_session object creation)
will be invoked on response arrival or request timeout.
"""
self.request = session.session.encode_request(self, type, \
encoded_oids, \
encoded_vals)
session.session.send(self, self.request)
def handle_read(self):
"""Read SNMP reply from socket.
This does NOT time out so one needs to implement a mean of
handling timed out requests (perhaps it's worth looking at
medusa/event_loop.py for an interesting approach).
"""
(self.response, self.addr) = self.recvfrom(65536)
try:
# There seems to be no point in delivering pysnmp exceptions
# from here as they would arrive out of context...
(encoded_oids, encoded_vals) = \
session.session.decode_response(self, self.response)
# Catch all known pysnmp exceptions and return a tuple of None's
# as exceptions would then arrive out of context at this point.
except error.PySNMPError:
# Return a tuple of None's to indicate the failure
(encoded_oids, encoded_vals) = (None, None)
# Pass SNMP response along with references to caller specified data
# and ourselves
self.caller_fun(self, self.caller_data, encoded_oids, encoded_vals)
def writable(self):
"""Objects of this class never expect write events
"""
return 0
def handle_connect(self):
"""Objects of this class never expect connect events
"""
pass
| ag1455/OpenPLi-PC | pre/python/lib/python2.7/dist-packages/pysnmp/compat/pysnmp1x/asynsnmp.py | asynsnmp.py | py | 3,188 | python | en | code | 19 | github-code | 13 |
17995538013 | # coding=utf-8
import json
import os
import requests
import logging
from subprocess import call
from flask import Flask, request
app = Flask(__name__)
logging.basicConfig(level=logging.DEBUG)
path = "/root/.jenkins/workspace/"
@app.route('/', methods=['post'])
def build():
json_payload = json.loads(request.data)
logging.info(json_payload)
repo = json_payload['repository']['name']
url = json_payload['repository']['url']
commit_id = json_payload['after']
actual_path = "%s%s" % (path, repo)
logging.info("git --git-dir=%s/.git --work-tree=/%s/ pull origin master" % (actual_path, actual_path))
logging.info("git clone %s %s" % (url, actual_path))
if os.path.isdir("%s" % actual_path):
os.system("git --git-dir=%s/.git --work-tree=/%s/ pull" % (actual_path, actual_path))
os.system("git --git-dir=%s/.git --work-tree=/%s/ checkout %s" % (actual_path, actual_path, commit_id))
else:
os.system("git clone %s %s" % (url, actual_path))
os.system("git --git-dir=%s/.git --work-tree=/%s/ checkout %s" % (actual_path, actual_path, commit_id))
# ่ทๅJenkins - Crumb
# requests.get("http://10.15.38.146:8080/jenkins/crumbIssuer/api/json")
requests.post('http://10.15.38.146:8080/jenkins/job/ci-java-job/buildWithParameters',
headers={
"Content-Type":"application/x-www-form-urlencoded",
"Jenkins-Crumb": "a73ff726a735ad93a2b61b6b28b616bc"
},
data={
"branch":"dev",
"repo":"git@192.168.0.175:INF/zipkin_test.git",
"build_command":"mvn clean install -Dmaven.test.skip=true -U",
"notification_emails":"zhugongyi@niwodai.net"
})
return 'success'
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True, port=8090)
| Rayeee/ci-java-trigger | trigger.py | trigger.py | py | 1,923 | python | en | code | 0 | github-code | 13 |
17159230372 | # -*- coding: utf-8 -*-
import re
class Item(object):
def __init__(self, css, type_, use_parent=False, translate=False, sanitizer=None):
self.css = css
self.type = type_
self.use_parent = use_parent
self.translate = translate
self.sanitize = self.sanitize if sanitizer is None else sanitizer
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, self.css)
@staticmethod
def sanitize(value, type_=None):
raise NotImplementedError
def extract(self, soup):
raise NotImplementedError
class StrItem(Item):
def __init__(self, css, use_parent=False, translate=False, sanitizer=None, attr=None,
recursive=False):
super().__init__(css, str, use_parent, translate, sanitizer)
self.attr = attr
self.recursive = recursive
@staticmethod
def sanitize(value, type_=None):
if value is None:
return value
return value.strip() if re.search('\w+', value) else None
def extract(self, soup):
if not self.css:
return self.sanitize(soup.get(self.attr) if self.attr else soup.get_text())
elem = soup.select_one(self.css)
if not elem:
return None
if self.recursive:
return self.sanitize(elem.get(self.attr) if self.attr else elem.get_text())
else:
return self.sanitize(elem.get(self.attr) if self.attr
else self.get_text_not_recursive(elem))
@staticmethod
def get_text_not_recursive(elem):
return ''.join(elem.find_all(text=True, recursive=False)).strip()
class IntItem(Item):
def __init__(self, css, use_parent=False, translate=False, sanitizer=None, attr=None):
super().__init__(css, int, use_parent, translate, sanitizer)
self.attr = attr
@staticmethod
def sanitize(value, type_=None):
if value is None:
return value
search = re.search(u'[\d,]+', value)
return int(search.group().replace(',', '')) if search else 0
def extract(self, soup):
if not self.css:
return self.sanitize(soup.get(self.attr) if self.attr else soup.get_text())
elem = soup.select_one(self.css)
if not elem:
return None
return self.sanitize(elem.get(self.attr) if self.attr else elem.get_text())
class DictItem(Item):
# don't need css for DictItem (just a placeholder)
def __init__(self, child, css=None, type_=dict, use_parent=False, translate=False):
super().__init__(css, type_, use_parent, translate)
from . import Schema
if child and not issubclass(child, Schema):
raise AttributeError('Child parameter must be inheritance of Schema.')
self.child = child
@staticmethod
def sanitize(value, type_=None):
pass
def extract(self, soup):
data = {}
for child_key, child_item in self.child.get_items():
value = child_item.extract(soup)
if not value:
continue
if isinstance(child_item, DictItem):
data.update(value)
else:
data[child_key] = value
return data
class ListItem(Item):
def __init__(self, css, type_=list, use_parent=False, translate=False, sanitizer=None,
child=None, attrs=None):
super().__init__(css, type_, use_parent, translate, sanitizer)
self.child = child
self.attrs = attrs if attrs else {}
@staticmethod
def sanitize(value, type_=None):
if type_ == int:
return IntItem.sanitize(value)
elif type_ == str:
return StrItem.sanitize(value)
else:
return value
@staticmethod
def is_values_full(values):
for value in values:
if value in [None, [], {}]:
return False
return True
def extract(self, soup):
data = []
elems = soup.select(self.css)
if not elems:
return data
for elem in elems:
if self.child or self.attrs:
child_data = {}
if self.child:
for child_key, child_item in self.child.get_items():
value = child_item.extract(elem)
if isinstance(child_item, DictItem):
child_data[child_key].update(value)
else:
child_data[child_key] = value
elif self.attrs:
for attr, type_ in self.attrs.items():
if elem.get(attr):
child_data[attr] = self.sanitize(elem.get(attr), type_)
if child_data:
child_data['text'] = elem.get_text()
if child_data and self.is_values_full(child_data.values()):
data.append(child_data)
else:
data.append(self.sanitize(elem.get_text()))
return data
class HtmlItem(Item):
def __init__(self, css, use_parent=False, translate=False, sanitizer=None, remove_elems=None):
super().__init__(css, 'html', use_parent, translate, sanitizer)
self.remove_elems = remove_elems
self.elem = None
@staticmethod
def sanitize(value, type_=None):
return value
def extract(self, soup):
if not self.css:
return self.sanitize(soup)
self.elem = soup.select_one(self.css)
if not self.elem:
return None
if self.remove_elems:
for remove_elem in self.elem.select(self.remove_elems):
remove_elem.extract()
return self.elem
def to_string(self):
if not self.elem:
raise NotImplementedError('Element not extracted yet')
return re.sub('\s+', ' ', str(self.elem))
| jiyonghong/html-schema | item.py | item.py | py | 5,990 | python | en | code | 0 | github-code | 13 |
19571875129 | from Arrow import Arrow
class Machine:
def __init__(self, name, fs_name):
self.alphabet = {"1", "=", "*"}
self.start_state, self.final_states, self.transitions, self.states = Machine\
.read_transitions(name, fs_name)
@classmethod
def read_transitions(cls, name, fs_name):
res = dict()
states = set()
start_state = None
final_states = set()
with open(name, 'r') as file:
lines = file.readlines()
for transition_txt in lines:
st_from, symb, _, st_to, symb_replace, direction_str = transition_txt.strip().split()
direction = Arrow.Right if direction_str == "->" else Arrow.Left
if start_state is None:
start_state = st_from
states.add(st_from)
states.add(st_to)
res[(st_from, symb)] = (st_to, symb_replace, direction)
with open(fs_name, 'r') as file:
final_states |= set(file.readline().split())
return start_state, final_states, res, states
| KanashinDmitry/GoMUN | Machine.py | Machine.py | py | 1,096 | python | en | code | 0 | github-code | 13 |
19199008855 | from core import LessonFactory
from pathlib import Path
import shutil
def setup(lesson_path):
for name in Path(__file__).parent.glob('*'):
if not name.is_dir():
shutil.copy(name, Path(lesson_path) / name.name)
else:
shutil.copytree(name, Path(lesson_path) / name.name)
lesson = LessonFactory('binary_exploitation', setup=setup)
| zawadm321/embsec-challenges | binary_exploitation/__init__.py | __init__.py | py | 376 | python | en | code | 1 | github-code | 13 |
73174678098 | import nltk
nltk.download('vader_lexicon')
from nltk.sentiment.vader import SentimentIntensityAnalyzer
analyzer = SentimentIntensityAnalyzer()
def Analyse(headlines):
sentimentScores = {'neg': 0, 'neu': 0, 'pos': 0, 'compound': 0}
for line in headlines:
scores = analyzer.polarity_scores(line)
# analyse the headline using vader
sentimentScores['neg'] += scores['neg']
sentimentScores['neu'] += scores['neu']
sentimentScores['pos'] += scores['pos']
sentimentScores['compound'] += scores['compound']
# increase total negative, neutral, positive and compound (average) scores
for key in sentimentScores.keys():
# for each category (negative/neutral/positive/compound)
sentimentScores[key] = round(sentimentScores[key] / len(headlines), 3)
# find the average by dividing by number of headlines
return sentimentScores
| lucyvjordan/News-Sentiment-Analysis-Comparison | analysis.py | analysis.py | py | 947 | python | en | code | 0 | github-code | 13 |
10041772824 | import numpy as np
from sklearn import preprocessing
import pandas as pd
X = pd.read_csv('train_weather.csv')
X_1 = X.select_dtypes(include=[object])
le = preprocessing.LabelEncoder()
X_2 = X_1.apply(le.fit_transform)
enc = preprocessing.OneHotEncoder()
enc.fit(X_2)
onehotlabels = enc.transform(X_2).toarray()
X=X.drop(['IntersectionId','EntryHeading','ExitHeading','City'], axis=1)
Xtrain=X.values
Xtrain=np.c_[onehotlabels,Xtrain]
np.savetxt('enc_train.csv', Xtrain, delimiter=',')
| Loielaine/545project | onehotenc.py | onehotenc.py | py | 485 | python | en | code | 0 | github-code | 13 |
73483329296 | import pandas as pd
import json
def proxy():
http = []
https = []
ftp = []
with open('') as source:
for line in source:
data = json.loads(line)
host = data['host']
http_proxy = "http://" + host + ":80"
https_proxy = "https://" + host + ":443"
ftp_proxy = "ftp://" + host + ":21"
http.append(http_proxy)
https.append(https_proxy)
ftp.append(ftp_proxy)
df = pd.DataFrame([http, https, ftp]).T
df.columns = ['http', 'https', 'ftp']
return df
| MagnusXu/WebCrawler | CityRealty/proxy_logs_from_text.py | proxy_logs_from_text.py | py | 572 | python | en | code | 1 | github-code | 13 |
42147189140 | from test_services import logger
from test_services.atf import wsgw
from test_services.atf.common import (
Message,
Event,
Command,
)
from typing import (
Callable,
Type,
)
from test_services.config import settings
import asyncio
import threading
import queue
class MessageBus:
"""
Internal Communications Bus of the service.
Messages will be sent to supplied consumers.
Consumers are functions that accept an Event or Command and act on it.
Consumers will return a list of further generated Messages during consumption,
which will be passed on to the MessageBus for processing.
"""
def __init__(
self,
command_consumers: dict[Type[Command], Callable],
event_consumers: dict[Type[Event], list[Callable]],
consumer_count: int = settings.MESSAGEBUS_THREAD_COUNT,
):
self.command_consumers = command_consumers
self.event_consumers = event_consumers
self.startup(consumer_count=consumer_count)
self.lock_store = list()
def startup(self, consumer_count: int):
"""
This space is used to initialize needed external connections.
Intended to be run during object initiation, and any time that
API connection configuration data needs to be reset.
"""
# ATF WSGW Connection (Lazy, REST-HTTP API)
self.wsgw = wsgw.Wsgw(
wsgw.WsgwConfig(
api_url=settings.WSGW_API,
api_un=settings.SYS_UN,
api_pw=settings.SYS_PW,
domain=settings.WSGW_DOMAIN,
debug=settings.DEBUG,
)
)
self.message_q = queue.Queue()
# start consumers/workers
# each consumer spawns its own thread
for i in range(consumer_count):
threading.Thread(target=self.consumer, daemon=True).start()
logger.info(f'Message Bus initialized with {consumer_count} asynchronous consumers.')
def add_to_queue(self, messages: list[Message]) -> list[Message]:
"""
Pass a list of messages to have them added to the Message Queue.
Returns a list of Messages that could not be added.
An Empty List represents that all Messages were successfully
added to the Message Queue.
Note that there is no backoff logic implemented in this method.
Any backoff or retry logic will need to be implemented in the
calling code.
"""
messages_not_added: list[Message] = []
for message in messages:
try:
self.message_q.put(message)
except queue.Full as e:
# if the queue is going to be capped at some point then this
# exception will need to be handled in the calling code and
# likely paired with retry logic determined by business needs
logger.critical(
f'Attempt to add Messages {str(messages)} encountered queue.Full exception. This is not expected behavior, design is for uncapped queue. Check code comments. Please review any recent changes to message_bus.message_q configuration when looking for the culprit of this error.')
messages_not_added.append(message)
return messages_not_added
async def __engine(self):
"""
This is the core execution loop. Grab a message, consume message to
execute matching code, report done, repeat.
"""
while True:
message = self.message_q.get()
await self.__consume(message)
self.message_q.task_done()
def consumer(self):
"""
Each consumer uses asyncio to run the core execution loop, abstracted
to __engine.
"""
asyncio.run(self.__engine())
async def __consume(self, message: Message):
"""
This function will check the type of the Message and delegate
work to the correct method.
"""
# process (consume) the Message
try:
if isinstance(message, Command):
await self.__consume_command(message)
elif isinstance(message, Event):
await self.__consume_event(message)
else:
raise ValueError('message must be an Event or Command.')
except Exception as e:
logger.debug(f"{message.cid} Exception encountered while processing {str(message)}", exc_info=True)
async def __consume_command(self, command: Command):
"""
Pass Command to matching command_consumer function to perform
any processing work. This is where the seam between calling the
command_consumer and executing its internal code occurs, allowing
extensibility by adding components to command_consumers file.
"""
logger.info(f'{command.cid} consuming command {command}')
consumer = '(not captured)'
# deal with idempotency constraints
#
# REFACTOR_ALERT://
# ASYNC contention not being dealt with here due to currently perceived
# low expectation of triggering. Between the time of lock attainment
# scanning, and actual lock attainment or insertion into lock_store, another
# Consumer could also believe it can acquire the lock, and both Consumers would
# end up thinking that they acquired the lock, and the Message would be double
# processed
if command.field_locks:
attained_locks = set()
for lock in command.field_locks:
lock_sig = f"{command.__class__}.{lock}={command.__getattribute__(lock)}"
if lock_sig in self.lock_store:
# already being processed, discard after logging
logger.debug(f"{command.cid} lock conflict trying to acquire lock on {lock_sig} for {command}")
else:
# OK to process, adding lock signature to lock_store
attained_locks.add(lock_sig)
logger.debug(f"{command.cid} adding {lock_sig} to attained_locks for {command}")
if not len(attained_locks) == len(command.field_locks):
logger.debug(
f"{command.cid} not all locks could be attained, discarding Message {command} attained_locks={attained_locks}")
return
else:
for lock_sig in attained_locks:
self.lock_store.append(lock_sig)
logger.debug(f"{command.cid} adding lock_sig {lock_sig} to lock_store")
try:
consumer = self.command_consumers[type(command)]
messages = await consumer(command, self.wsgw)
except Exception as e:
logger.error(f'{command.cid} Exception occurred while {str(consumer)} was consuming {command}',
exc_info=True)
else:
self.add_to_queue(messages)
finally:
# check for membership in idempotent set (IF in THEN remove)
if command.field_locks:
for lock in command.field_locks:
lock_sig = f"{command.__class__}.{lock}={command.__getattribute__(lock)}"
self.lock_store.remove(lock_sig)
logger.debug(f"{command.cid} removed lock signature from lock_store: {lock_sig}")
async def __consume_event(self, event: Event):
"""
Pass Event to matching event_consumer function to perform
any processing work. This is where the seam between calling the
event_consumer and executing its internal code occurs, allowing
extensibility by adding components to event_consumers file.
"""
logger.info(f'{event.cid} processing {event}')
for consumer in self.event_consumers[type(event)]:
logger.debug(f'{event.cid} {consumer.__name__} is consuming {event}')
try:
messages = await consumer(event)
except Exception as e:
logger.error(f'{event.cid} Exception occurred while {consumer} was consuming {event}', exc_info=True)
else:
self.add_to_queue(messages) | R4RPA/netbrain | src/netbrain_service/application/messagebus.py | messagebus.py | py | 8,454 | python | en | code | 0 | github-code | 13 |
17057136894 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.MultiCurrencyMoneyOpenApi import MultiCurrencyMoneyOpenApi
from alipay.aop.api.domain.OpenApiInvoiceLinePreviewedOrder import OpenApiInvoiceLinePreviewedOrder
from alipay.aop.api.domain.MultiCurrencyMoneyOpenApi import MultiCurrencyMoneyOpenApi
from alipay.aop.api.domain.MultiCurrencyMoneyOpenApi import MultiCurrencyMoneyOpenApi
class OpenApiOutputInvoicePreviewedOrder(object):
def __init__(self):
self._buyer_address = None
self._buyer_bank_account = None
self._buyer_bank_name = None
self._buyer_invoice_title = None
self._buyer_ip_role_id = None
self._buyer_mid = None
self._buyer_tax_no = None
self._buyer_telephone = None
self._drawer = None
self._full_electronic_tag = None
self._invoice_amt = None
self._invoice_channel = None
self._invoice_code = None
self._invoice_date = None
self._invoice_id = None
self._invoice_lines = None
self._invoice_material = None
self._invoice_no = None
self._invoice_note = None
self._invoice_status = None
self._invoice_type = None
self._is_online = None
self._is_red = None
self._mail_status = None
self._memo = None
self._no_bill_invoice_flag = None
self._payee = None
self._recent_mail_id = None
self._red_amt = None
self._reviewer = None
self._seller_address = None
self._seller_bank_account = None
self._seller_bank_name = None
self._seller_company_name = None
self._seller_inst_id = None
self._seller_tax_no = None
self._seller_telephone = None
self._tax_amt = None
self._tnt_inst_id = None
self._type = None
@property
def buyer_address(self):
return self._buyer_address
@buyer_address.setter
def buyer_address(self, value):
self._buyer_address = value
@property
def buyer_bank_account(self):
return self._buyer_bank_account
@buyer_bank_account.setter
def buyer_bank_account(self, value):
self._buyer_bank_account = value
@property
def buyer_bank_name(self):
return self._buyer_bank_name
@buyer_bank_name.setter
def buyer_bank_name(self, value):
self._buyer_bank_name = value
@property
def buyer_invoice_title(self):
return self._buyer_invoice_title
@buyer_invoice_title.setter
def buyer_invoice_title(self, value):
self._buyer_invoice_title = value
@property
def buyer_ip_role_id(self):
return self._buyer_ip_role_id
@buyer_ip_role_id.setter
def buyer_ip_role_id(self, value):
self._buyer_ip_role_id = value
@property
def buyer_mid(self):
return self._buyer_mid
@buyer_mid.setter
def buyer_mid(self, value):
self._buyer_mid = value
@property
def buyer_tax_no(self):
return self._buyer_tax_no
@buyer_tax_no.setter
def buyer_tax_no(self, value):
self._buyer_tax_no = value
@property
def buyer_telephone(self):
return self._buyer_telephone
@buyer_telephone.setter
def buyer_telephone(self, value):
self._buyer_telephone = value
@property
def drawer(self):
return self._drawer
@drawer.setter
def drawer(self, value):
self._drawer = value
@property
def full_electronic_tag(self):
return self._full_electronic_tag
@full_electronic_tag.setter
def full_electronic_tag(self, value):
self._full_electronic_tag = value
@property
def invoice_amt(self):
return self._invoice_amt
@invoice_amt.setter
def invoice_amt(self, value):
if isinstance(value, MultiCurrencyMoneyOpenApi):
self._invoice_amt = value
else:
self._invoice_amt = MultiCurrencyMoneyOpenApi.from_alipay_dict(value)
@property
def invoice_channel(self):
return self._invoice_channel
@invoice_channel.setter
def invoice_channel(self, value):
self._invoice_channel = value
@property
def invoice_code(self):
return self._invoice_code
@invoice_code.setter
def invoice_code(self, value):
self._invoice_code = value
@property
def invoice_date(self):
return self._invoice_date
@invoice_date.setter
def invoice_date(self, value):
self._invoice_date = value
@property
def invoice_id(self):
return self._invoice_id
@invoice_id.setter
def invoice_id(self, value):
self._invoice_id = value
@property
def invoice_lines(self):
return self._invoice_lines
@invoice_lines.setter
def invoice_lines(self, value):
if isinstance(value, list):
self._invoice_lines = list()
for i in value:
if isinstance(i, OpenApiInvoiceLinePreviewedOrder):
self._invoice_lines.append(i)
else:
self._invoice_lines.append(OpenApiInvoiceLinePreviewedOrder.from_alipay_dict(i))
@property
def invoice_material(self):
return self._invoice_material
@invoice_material.setter
def invoice_material(self, value):
self._invoice_material = value
@property
def invoice_no(self):
return self._invoice_no
@invoice_no.setter
def invoice_no(self, value):
self._invoice_no = value
@property
def invoice_note(self):
return self._invoice_note
@invoice_note.setter
def invoice_note(self, value):
self._invoice_note = value
@property
def invoice_status(self):
return self._invoice_status
@invoice_status.setter
def invoice_status(self, value):
self._invoice_status = value
@property
def invoice_type(self):
return self._invoice_type
@invoice_type.setter
def invoice_type(self, value):
self._invoice_type = value
@property
def is_online(self):
return self._is_online
@is_online.setter
def is_online(self, value):
self._is_online = value
@property
def is_red(self):
return self._is_red
@is_red.setter
def is_red(self, value):
self._is_red = value
@property
def mail_status(self):
return self._mail_status
@mail_status.setter
def mail_status(self, value):
self._mail_status = value
@property
def memo(self):
return self._memo
@memo.setter
def memo(self, value):
self._memo = value
@property
def no_bill_invoice_flag(self):
return self._no_bill_invoice_flag
@no_bill_invoice_flag.setter
def no_bill_invoice_flag(self, value):
self._no_bill_invoice_flag = value
@property
def payee(self):
return self._payee
@payee.setter
def payee(self, value):
self._payee = value
@property
def recent_mail_id(self):
return self._recent_mail_id
@recent_mail_id.setter
def recent_mail_id(self, value):
self._recent_mail_id = value
@property
def red_amt(self):
return self._red_amt
@red_amt.setter
def red_amt(self, value):
if isinstance(value, MultiCurrencyMoneyOpenApi):
self._red_amt = value
else:
self._red_amt = MultiCurrencyMoneyOpenApi.from_alipay_dict(value)
@property
def reviewer(self):
return self._reviewer
@reviewer.setter
def reviewer(self, value):
self._reviewer = value
@property
def seller_address(self):
return self._seller_address
@seller_address.setter
def seller_address(self, value):
self._seller_address = value
@property
def seller_bank_account(self):
return self._seller_bank_account
@seller_bank_account.setter
def seller_bank_account(self, value):
self._seller_bank_account = value
@property
def seller_bank_name(self):
return self._seller_bank_name
@seller_bank_name.setter
def seller_bank_name(self, value):
self._seller_bank_name = value
@property
def seller_company_name(self):
return self._seller_company_name
@seller_company_name.setter
def seller_company_name(self, value):
self._seller_company_name = value
@property
def seller_inst_id(self):
return self._seller_inst_id
@seller_inst_id.setter
def seller_inst_id(self, value):
self._seller_inst_id = value
@property
def seller_tax_no(self):
return self._seller_tax_no
@seller_tax_no.setter
def seller_tax_no(self, value):
self._seller_tax_no = value
@property
def seller_telephone(self):
return self._seller_telephone
@seller_telephone.setter
def seller_telephone(self, value):
self._seller_telephone = value
@property
def tax_amt(self):
return self._tax_amt
@tax_amt.setter
def tax_amt(self, value):
if isinstance(value, MultiCurrencyMoneyOpenApi):
self._tax_amt = value
else:
self._tax_amt = MultiCurrencyMoneyOpenApi.from_alipay_dict(value)
@property
def tnt_inst_id(self):
return self._tnt_inst_id
@tnt_inst_id.setter
def tnt_inst_id(self, value):
self._tnt_inst_id = value
@property
def type(self):
return self._type
@type.setter
def type(self, value):
self._type = value
def to_alipay_dict(self):
params = dict()
if self.buyer_address:
if hasattr(self.buyer_address, 'to_alipay_dict'):
params['buyer_address'] = self.buyer_address.to_alipay_dict()
else:
params['buyer_address'] = self.buyer_address
if self.buyer_bank_account:
if hasattr(self.buyer_bank_account, 'to_alipay_dict'):
params['buyer_bank_account'] = self.buyer_bank_account.to_alipay_dict()
else:
params['buyer_bank_account'] = self.buyer_bank_account
if self.buyer_bank_name:
if hasattr(self.buyer_bank_name, 'to_alipay_dict'):
params['buyer_bank_name'] = self.buyer_bank_name.to_alipay_dict()
else:
params['buyer_bank_name'] = self.buyer_bank_name
if self.buyer_invoice_title:
if hasattr(self.buyer_invoice_title, 'to_alipay_dict'):
params['buyer_invoice_title'] = self.buyer_invoice_title.to_alipay_dict()
else:
params['buyer_invoice_title'] = self.buyer_invoice_title
if self.buyer_ip_role_id:
if hasattr(self.buyer_ip_role_id, 'to_alipay_dict'):
params['buyer_ip_role_id'] = self.buyer_ip_role_id.to_alipay_dict()
else:
params['buyer_ip_role_id'] = self.buyer_ip_role_id
if self.buyer_mid:
if hasattr(self.buyer_mid, 'to_alipay_dict'):
params['buyer_mid'] = self.buyer_mid.to_alipay_dict()
else:
params['buyer_mid'] = self.buyer_mid
if self.buyer_tax_no:
if hasattr(self.buyer_tax_no, 'to_alipay_dict'):
params['buyer_tax_no'] = self.buyer_tax_no.to_alipay_dict()
else:
params['buyer_tax_no'] = self.buyer_tax_no
if self.buyer_telephone:
if hasattr(self.buyer_telephone, 'to_alipay_dict'):
params['buyer_telephone'] = self.buyer_telephone.to_alipay_dict()
else:
params['buyer_telephone'] = self.buyer_telephone
if self.drawer:
if hasattr(self.drawer, 'to_alipay_dict'):
params['drawer'] = self.drawer.to_alipay_dict()
else:
params['drawer'] = self.drawer
if self.full_electronic_tag:
if hasattr(self.full_electronic_tag, 'to_alipay_dict'):
params['full_electronic_tag'] = self.full_electronic_tag.to_alipay_dict()
else:
params['full_electronic_tag'] = self.full_electronic_tag
if self.invoice_amt:
if hasattr(self.invoice_amt, 'to_alipay_dict'):
params['invoice_amt'] = self.invoice_amt.to_alipay_dict()
else:
params['invoice_amt'] = self.invoice_amt
if self.invoice_channel:
if hasattr(self.invoice_channel, 'to_alipay_dict'):
params['invoice_channel'] = self.invoice_channel.to_alipay_dict()
else:
params['invoice_channel'] = self.invoice_channel
if self.invoice_code:
if hasattr(self.invoice_code, 'to_alipay_dict'):
params['invoice_code'] = self.invoice_code.to_alipay_dict()
else:
params['invoice_code'] = self.invoice_code
if self.invoice_date:
if hasattr(self.invoice_date, 'to_alipay_dict'):
params['invoice_date'] = self.invoice_date.to_alipay_dict()
else:
params['invoice_date'] = self.invoice_date
if self.invoice_id:
if hasattr(self.invoice_id, 'to_alipay_dict'):
params['invoice_id'] = self.invoice_id.to_alipay_dict()
else:
params['invoice_id'] = self.invoice_id
if self.invoice_lines:
if isinstance(self.invoice_lines, list):
for i in range(0, len(self.invoice_lines)):
element = self.invoice_lines[i]
if hasattr(element, 'to_alipay_dict'):
self.invoice_lines[i] = element.to_alipay_dict()
if hasattr(self.invoice_lines, 'to_alipay_dict'):
params['invoice_lines'] = self.invoice_lines.to_alipay_dict()
else:
params['invoice_lines'] = self.invoice_lines
if self.invoice_material:
if hasattr(self.invoice_material, 'to_alipay_dict'):
params['invoice_material'] = self.invoice_material.to_alipay_dict()
else:
params['invoice_material'] = self.invoice_material
if self.invoice_no:
if hasattr(self.invoice_no, 'to_alipay_dict'):
params['invoice_no'] = self.invoice_no.to_alipay_dict()
else:
params['invoice_no'] = self.invoice_no
if self.invoice_note:
if hasattr(self.invoice_note, 'to_alipay_dict'):
params['invoice_note'] = self.invoice_note.to_alipay_dict()
else:
params['invoice_note'] = self.invoice_note
if self.invoice_status:
if hasattr(self.invoice_status, 'to_alipay_dict'):
params['invoice_status'] = self.invoice_status.to_alipay_dict()
else:
params['invoice_status'] = self.invoice_status
if self.invoice_type:
if hasattr(self.invoice_type, 'to_alipay_dict'):
params['invoice_type'] = self.invoice_type.to_alipay_dict()
else:
params['invoice_type'] = self.invoice_type
if self.is_online:
if hasattr(self.is_online, 'to_alipay_dict'):
params['is_online'] = self.is_online.to_alipay_dict()
else:
params['is_online'] = self.is_online
if self.is_red:
if hasattr(self.is_red, 'to_alipay_dict'):
params['is_red'] = self.is_red.to_alipay_dict()
else:
params['is_red'] = self.is_red
if self.mail_status:
if hasattr(self.mail_status, 'to_alipay_dict'):
params['mail_status'] = self.mail_status.to_alipay_dict()
else:
params['mail_status'] = self.mail_status
if self.memo:
if hasattr(self.memo, 'to_alipay_dict'):
params['memo'] = self.memo.to_alipay_dict()
else:
params['memo'] = self.memo
if self.no_bill_invoice_flag:
if hasattr(self.no_bill_invoice_flag, 'to_alipay_dict'):
params['no_bill_invoice_flag'] = self.no_bill_invoice_flag.to_alipay_dict()
else:
params['no_bill_invoice_flag'] = self.no_bill_invoice_flag
if self.payee:
if hasattr(self.payee, 'to_alipay_dict'):
params['payee'] = self.payee.to_alipay_dict()
else:
params['payee'] = self.payee
if self.recent_mail_id:
if hasattr(self.recent_mail_id, 'to_alipay_dict'):
params['recent_mail_id'] = self.recent_mail_id.to_alipay_dict()
else:
params['recent_mail_id'] = self.recent_mail_id
if self.red_amt:
if hasattr(self.red_amt, 'to_alipay_dict'):
params['red_amt'] = self.red_amt.to_alipay_dict()
else:
params['red_amt'] = self.red_amt
if self.reviewer:
if hasattr(self.reviewer, 'to_alipay_dict'):
params['reviewer'] = self.reviewer.to_alipay_dict()
else:
params['reviewer'] = self.reviewer
if self.seller_address:
if hasattr(self.seller_address, 'to_alipay_dict'):
params['seller_address'] = self.seller_address.to_alipay_dict()
else:
params['seller_address'] = self.seller_address
if self.seller_bank_account:
if hasattr(self.seller_bank_account, 'to_alipay_dict'):
params['seller_bank_account'] = self.seller_bank_account.to_alipay_dict()
else:
params['seller_bank_account'] = self.seller_bank_account
if self.seller_bank_name:
if hasattr(self.seller_bank_name, 'to_alipay_dict'):
params['seller_bank_name'] = self.seller_bank_name.to_alipay_dict()
else:
params['seller_bank_name'] = self.seller_bank_name
if self.seller_company_name:
if hasattr(self.seller_company_name, 'to_alipay_dict'):
params['seller_company_name'] = self.seller_company_name.to_alipay_dict()
else:
params['seller_company_name'] = self.seller_company_name
if self.seller_inst_id:
if hasattr(self.seller_inst_id, 'to_alipay_dict'):
params['seller_inst_id'] = self.seller_inst_id.to_alipay_dict()
else:
params['seller_inst_id'] = self.seller_inst_id
if self.seller_tax_no:
if hasattr(self.seller_tax_no, 'to_alipay_dict'):
params['seller_tax_no'] = self.seller_tax_no.to_alipay_dict()
else:
params['seller_tax_no'] = self.seller_tax_no
if self.seller_telephone:
if hasattr(self.seller_telephone, 'to_alipay_dict'):
params['seller_telephone'] = self.seller_telephone.to_alipay_dict()
else:
params['seller_telephone'] = self.seller_telephone
if self.tax_amt:
if hasattr(self.tax_amt, 'to_alipay_dict'):
params['tax_amt'] = self.tax_amt.to_alipay_dict()
else:
params['tax_amt'] = self.tax_amt
if self.tnt_inst_id:
if hasattr(self.tnt_inst_id, 'to_alipay_dict'):
params['tnt_inst_id'] = self.tnt_inst_id.to_alipay_dict()
else:
params['tnt_inst_id'] = self.tnt_inst_id
if self.type:
if hasattr(self.type, 'to_alipay_dict'):
params['type'] = self.type.to_alipay_dict()
else:
params['type'] = self.type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = OpenApiOutputInvoicePreviewedOrder()
if 'buyer_address' in d:
o.buyer_address = d['buyer_address']
if 'buyer_bank_account' in d:
o.buyer_bank_account = d['buyer_bank_account']
if 'buyer_bank_name' in d:
o.buyer_bank_name = d['buyer_bank_name']
if 'buyer_invoice_title' in d:
o.buyer_invoice_title = d['buyer_invoice_title']
if 'buyer_ip_role_id' in d:
o.buyer_ip_role_id = d['buyer_ip_role_id']
if 'buyer_mid' in d:
o.buyer_mid = d['buyer_mid']
if 'buyer_tax_no' in d:
o.buyer_tax_no = d['buyer_tax_no']
if 'buyer_telephone' in d:
o.buyer_telephone = d['buyer_telephone']
if 'drawer' in d:
o.drawer = d['drawer']
if 'full_electronic_tag' in d:
o.full_electronic_tag = d['full_electronic_tag']
if 'invoice_amt' in d:
o.invoice_amt = d['invoice_amt']
if 'invoice_channel' in d:
o.invoice_channel = d['invoice_channel']
if 'invoice_code' in d:
o.invoice_code = d['invoice_code']
if 'invoice_date' in d:
o.invoice_date = d['invoice_date']
if 'invoice_id' in d:
o.invoice_id = d['invoice_id']
if 'invoice_lines' in d:
o.invoice_lines = d['invoice_lines']
if 'invoice_material' in d:
o.invoice_material = d['invoice_material']
if 'invoice_no' in d:
o.invoice_no = d['invoice_no']
if 'invoice_note' in d:
o.invoice_note = d['invoice_note']
if 'invoice_status' in d:
o.invoice_status = d['invoice_status']
if 'invoice_type' in d:
o.invoice_type = d['invoice_type']
if 'is_online' in d:
o.is_online = d['is_online']
if 'is_red' in d:
o.is_red = d['is_red']
if 'mail_status' in d:
o.mail_status = d['mail_status']
if 'memo' in d:
o.memo = d['memo']
if 'no_bill_invoice_flag' in d:
o.no_bill_invoice_flag = d['no_bill_invoice_flag']
if 'payee' in d:
o.payee = d['payee']
if 'recent_mail_id' in d:
o.recent_mail_id = d['recent_mail_id']
if 'red_amt' in d:
o.red_amt = d['red_amt']
if 'reviewer' in d:
o.reviewer = d['reviewer']
if 'seller_address' in d:
o.seller_address = d['seller_address']
if 'seller_bank_account' in d:
o.seller_bank_account = d['seller_bank_account']
if 'seller_bank_name' in d:
o.seller_bank_name = d['seller_bank_name']
if 'seller_company_name' in d:
o.seller_company_name = d['seller_company_name']
if 'seller_inst_id' in d:
o.seller_inst_id = d['seller_inst_id']
if 'seller_tax_no' in d:
o.seller_tax_no = d['seller_tax_no']
if 'seller_telephone' in d:
o.seller_telephone = d['seller_telephone']
if 'tax_amt' in d:
o.tax_amt = d['tax_amt']
if 'tnt_inst_id' in d:
o.tnt_inst_id = d['tnt_inst_id']
if 'type' in d:
o.type = d['type']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/OpenApiOutputInvoicePreviewedOrder.py | OpenApiOutputInvoicePreviewedOrder.py | py | 23,254 | python | en | code | 241 | github-code | 13 |
28301016588 | """
Plot the calculation steps for the eddy feedback parameter
2x2 plot showing seasonal means of
1. Zonal-mean zonal wind
2. Horizontal EP-Flux Divergence
3. Product of the anomalies (vs time) of the (1) and (2), such that the covariance is
the sum over time
4. (3) but normalised by the standard deviation of (1) and (2), such that the
correlation coefficient is the sum over time
"""
import iris
from iris.analysis import MEAN, STD_DEV
import numpy as np
import matplotlib.pyplot as plt
import iris.plot as iplt
import cmcrameri
from eddy_feedback import datadir, plotdir
from eddy_feedback.figures import label_axes
def main():
latitude = (25, 72)
months = ["Dec", "Jan", "Feb"]
path = datadir / "eddy_feedback/daily_mean"
ep_flux = iris.load_cube(path / "era5_daily_EP-flux-divergence_NDJFM.nc")
u_zm = iris.load_cube(path / "era5_daily_zonal-mean-zonal-wind_NDJFM.nc")
ep_flux = extract_subset(
ep_flux,
latitude=latitude,
months=months,
)
u_zm = extract_subset(
u_zm,
latitude=latitude,
months=months,
)
ep_flux_mean, ep_flux_anom, ep_flux_std_dev = calc_stats(ep_flux)
u_zm_mean, u_zm_anom, u_zm_std_dev = calc_stats(u_zm)
covariance = ep_flux_anom * u_zm_anom
correlation = covariance / (ep_flux_std_dev * u_zm_std_dev)
fig, axes = plt.subplots(2, 2, figsize=(8, 5), sharex="all", sharey="all")
coords = ["season_year", "latitude"]
plt.axes(axes[0, 0])
make_plot(
cube=u_zm,
cmap="cubehelix_r",
coords=coords,
cbar_label="m s$^{-1}$",
title=r"$\bar{\mathbf{u}}$",
vmin=0
)
plt.axes(axes[0, 1])
make_plot(
cube=ep_flux,
cmap="cmc.vik",
coords=coords,
cbar_label="m s$^{-2}$",
title=r"$\nabla_\phi \mathbf{F}$",
)
plt.axes(axes[1, 0])
make_plot(
cube=covariance,
cmap="cmc.broc",
coords=coords,
cbar_label="m$^{2}$ s$^{-3}$",
title="Covariance",
)
plt.axes(axes[1, 1])
make_plot(
cube=correlation,
cmap="cmc.broc",
coords=coords,
cbar_label=" ",
title="Correlation",
)
label_axes(axes.flatten())
plt.savefig(plotdir / "fig1_eddy-feedback-parameter_calculation_steps.png")
plt.show()
def extract_subset(cube, latitude, months, pressure_level=None):
cube = cube.extract(iris.Constraint(month=months))
cube = cube.intersection(latitude=latitude, ignore_bounds=True)
if pressure_level == "depth_average":
cube = cube.collapsed("pressure_level", MEAN)
elif pressure_level is not None:
cube = cube.extract(iris.Constraint(pressure_level=pressure_level))
return cube.aggregated_by(["season_year"], MEAN)[1:-1]
def calc_stats(cube):
mean = cube.collapsed("season_year", MEAN)
anom = cube - mean
std_dev = cube.collapsed("season_year", STD_DEV)
return mean, anom, std_dev
def make_plot(cube, cmap, coords, cbar_label, title, vmin=-1):
limit = np.abs(cube.data).max()
if vmin != 0:
vmin = -limit
iplt.pcolormesh(cube, vmin=vmin, vmax=limit, cmap=cmap, coords=coords)
plt.colorbar(label=cbar_label)
plt.title(title)
if __name__ == '__main__':
main()
| leosaffin/eddy_feedback | eddy_feedback/figures/fig1_efp_era5_calculation_steps.py | fig1_efp_era5_calculation_steps.py | py | 3,299 | python | en | code | 0 | github-code | 13 |
27609694768 | #[Exemplo 1] Escreva um programa que leia vรกrios nรบmeros inteiros e sรณ pare quando o usuรกrio digitar o valor 999.
#No final, mostre a soma entre eles
mumero = soma = 0
while True:
numero = int(input('Digite um nรบmero [999 to exit]: '))
if numero == 999:
break
soma += numero
print(f'A soma vale {soma}')
| DIANAMOTTA/SENAC-TECH | Python/PythonAula08/exemplo001.py | exemplo001.py | py | 331 | python | pt | code | 0 | github-code | 13 |
10844223935 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Assigmnet in IPP course - DFA minimization
Author: Jakub Lukac
E-mail: xlukac09@stud.fit.vutbr.cz
Created: 06-04-2016
Testing: python3.4.4
"""
import sys
import argparse
from fsa import *
# Argument Parser with custom error function
class ArgumentParser(argparse.ArgumentParser):
def error(self, message):
self.print_usage(sys.stderr)
print('{0}: error: {1}'.format(self.prog, message), file=sys.stderr)
exc = sys.exc_info()[1] # get ArgumentError object
if exc:
if exc.args[0].dest == 'input':
exit(2)
elif exc.args[0].dest == 'output':
exit(3)
exit(1)
# parse arguments using an ArgumentParser object
parser = ArgumentParser(description='DFA minimization', add_help=False)
oper = parser.add_mutually_exclusive_group()
# define arguments
parser.add_argument('--help', action='store_true',
help='show this help message and exit')
parser.add_argument('--input', type=argparse.FileType('r'), default=sys.stdin, metavar='FILE',
help='(default: stdin)')
parser.add_argument('--output', type=argparse.FileType('w'), default=sys.stdout, metavar='FILE',
help='(default: stdout)')
oper.add_argument('-f', '--find-non-finishing', action='store_true', dest='find_non_fin',
help='find non finishing state, only one is possible otherwise return "0"')
oper.add_argument('-m', '--minimize', action='store_true',
help='minimize DFA')
parser.add_argument('-i', '--case-insensitive', action='store_true', dest='icase',
help='states names and symbols are case insensitive')
# BONUS WHT
parser.add_argument('-w', '--white-char', action='store_true', dest='white_delim',
help='white char as separator')
# BONUS MST
oper.add_argument('--analyze-string', dest='analyze',
help='analyze string as input for DFA')
args = parser.parse_args()
# print help
if args.help:
parser.print_help()
if args.find_non_fin or args.minimize or args.icase or args.white_delim:
exit(1)
exit(0)
fsa_string = ''
# read FSA from input file
for line in args.input:
fsa_string += line.split('#', 1)[0] + ' '
# normalize FSA input
fsa_string = fsa_string.strip()
if args.icase:
fsa_string = fsa_string.lower()
fsa = Fsa(args.white_delim)
try:
fsa.parse(fsa_string)
except Exception as err:
msg, code = err.args
print('{prog}:'.format(prog=sys.argv[0]), 'error:', msg, file=sys.stderr)
exit(code)
valid, msg = fsa.isvalid_fsa()
if not valid:
print('{prog}:'.format(prog=sys.argv[0]), 'error:', msg, file=sys.stderr)
exit(62)
if args.find_non_fin:
# output non-finishing state
if fsa.non_terminating_state:
args.output.write(fsa.non_terminating_state)
else:
args.output.write('0')
elif args.minimize:
# minimize FSA using algorithm from course Formal Languages and Compilers
fsa.minimization()
fsa.write(file=args.output)
elif args.analyze:
fsa.minimization()
result = fsa.analyze_str(args.analyze)
if result < 0:
print('{prog}:'.format(prog=sys.argv[0]), 'error: String', args.analyze,
'include some unknown symbols', file=sys.stderr)
exit(1)
elif result == 0:
args.output.write('0')
else:
args.output.write('1')
else:
# no option - just print normalized FSA
fsa.write(file=args.output)
exit(0)
| cubolu/School-Projects | Python/IPP-MKA/mka.py | mka.py | py | 3,553 | python | en | code | 0 | github-code | 13 |
23026004791 | def solution(n):
# Your code here
currentLevel = n - 1
total = currentLevel
print('result: ', calculate(n, currentLevel, 0, 0))
def calculate(n, currentLevel, totalParam, levelParam):
level = levelParam + 1
print(level, currentLevel, totalParam, sep=' ')
count = 0
if totalParam == n:
return 1
if totalParam > n:
return 0
if currentLevel <= 1:
return -1
total = totalParam
while currentLevel >= 1:
total = totalParam + currentLevel
result = 0
nextLevel = currentLevel - 1
while result != -1:
print('--------------')
print(level, currentLevel, totalParam, sep=' ')
total += nextLevel
result = calculate(n, nextLevel, total, level)
if result == 1:
print('count')
count += 1
total -= nextLevel
nextLevel -= 1
total -= currentLevel
currentLevel -= 1
return count
# solution(3)
# 3 -> 2 -> 1
# -> 1
# solution(5)
# 5 -> 4 -> 1
# 5 -> 3 -> 2
solution(6)
# 6 -> 5 -> 1
# 6 -> 4 -> 2
# 6 -> 3 -> 2 -> 1
# solution(7)
# 7 -> 6 -> 1
# 7 -> 5 -> 2
# 7 -> 4 -> 3
# 7 -> 4 -> 2 -> 1
# solution(20) | xuanchuong/google-foobar | the-grandest-staircase-of-them-all/solution.py | solution.py | py | 1,238 | python | en | code | 0 | github-code | 13 |
19480904711 | import csv
filename = "allCountyData1.csv"
fields = []
rows = []
# reading csv file
with open(filename, 'r') as csvfile:
# creating a csv reader object
csvreader = csv.reader(csvfile)
# extracting each data row one by one
for row in csvreader:
rows.append(row)
# get total number of rows
print("Total no. of rows: %d" % (csvreader.line_num))
filename = "conreport2019.csv"
rows_fin = []
with open(filename, 'r') as csvfile1:
csvreader = csv.reader(csvfile1)
r = 1
for row in csvreader:
for r in rows:
query = str(r[1]) + " County, " + str(r[3])
if(query==str(row[1])):
new_row = r + row
rows_fin.append(new_row)
# for r in rows:
# query = str(r[1]) + " County, " + str(r[3])
# print(query)
# for row in csvreader:
# if (str(row[1])==query):
# print('Match!')
# rows_fin.append(row)
with open('data_weather1.csv', 'a+', newline='') as file:
data_writer = csv.writer(file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for row_fin in rows_fin:
data_writer.writerow(row_fin)
| ahmed-boutar/Understanding-the-Environmental-Factors-that-Contribute-to-Spread-of-COVID-19 | DatasetBuilding/SourceCombinations/weather.py | weather.py | py | 1,188 | python | en | code | 0 | github-code | 13 |
26186388762 | #!/usr/bin/env python
from functools import partial
import numpy as np
import recombination_utils as recomb
import vdj_recombination as vdj
def driver_function(n_trials=100, n_iterations=1000, sz_of_alphabet=256, sz_of_genome=10):
# need to set up a mutation operator
antigen = np.full(shape=(sz_of_genome,), fill_value=0, dtype='int16')
mutate_op = partial(recomb.get_mutants, sz_of_alphabet=sz_of_alphabet)
loss_function = partial(recomb.mean_absolute_error, antigen=antigen)
for i in range(1, n_trials + 1):
stop, repertoire, affinities, weights = vdj.loop_with_vdj_recombination(
mutate_op=mutate_op,
loss_function=loss_function,
n_iterations=n_iterations)
print(f"{i}\t{stop}")
if __name__ == '__main__':
driver_function()
| gstqtfr/somatic_recombination | vdj_recombination_driver.py | vdj_recombination_driver.py | py | 813 | python | en | code | 0 | github-code | 13 |
17158732722 | import sys
@profile
def solve():
read = sys.stdin.readline
n = int(read())
table = [[] for _ in range(n)]
table[0].append(int(read()))
for i in range(1, n):
line = list(map(int, read().split()))
table[i].append(table[i - 1][0] + line[0])
for j in range(1, i):
table[i].append(max(table[i - 1][j - 1], table[i - 1][j]) + line[j])
table[i].append(table[i - 1][-1] + line[-1])
print(max(table[-1]))
solve()
| jiyolla/study-for-coding-test | BOJwithDongbinNa/1932/1932_alt.py | 1932_alt.py | py | 476 | python | en | code | 0 | github-code | 13 |
36925802192 | import platform
from tools.Background_subtraction_KNN import BackgroundSubtractionKNN
if __name__ == '__main__':
window_size = (1980, 1080)
source_name = input("Video file name (stored in the video_files folder: ")
source_name = 'GX011307'
source_name = 'GH010731_cut_orig'
os_name = platform.system()
bs = BackgroundSubtractionKNN(source_name, window_size, os_name)
bs.get_screenshot_tool()
# bs.subtractor(True) | RSantos94/vessel-impact-detection | tools/screencapture_tool.py | screencapture_tool.py | py | 448 | python | en | code | 1 | github-code | 13 |
22396749042 | import numpy as np
import pandas as pd
# pyburst
from pyburst.mcmc import burstfit, mcmc_versions, mcmc_tools, mcmc_plot
from pyburst.grids import grid_analyser
from pyburst.observations import obs_tools
from pyburst.plotting import plot_tools
"""
quick n dirty module for synthetic data in MCMC paper (2019)
"""
# ==========================
# from: synth5_7-9, run=2
# ==========================
param_used = {'mdot1': 0.102,
'mdot2': 0.1371,
'mdot3': 0.1497,
'x': 0.6971,
'z': 0.0061,
'qb1': 0.1551,
'qb2': 0.1549,
'qb3': 0.1774,
'm_nw': 2.02,
'm_gr': 1.6918,
'd_b': 7.05839,
'xi_ratio': 1.0190}
def plot_posteriors(chain, source, version, discard, cap=None):
"""Plots posteriors against true values
"""
truth = get_truth_values(source, version)
mcmc_plot.plot_posteriors(chain, source=source, version=version,
discard=discard, cap=cap, truth_values=truth)
def plot_contours(chain, source, version, discard, cap=None):
"""Plots contours against true values
"""
truth = get_truth_values(source, version)
mcmc_plot.plot_contours(chain, source=source, version=version,
discard=discard, cap=cap, truth_values=truth)
def get_truth_values(source, version):
"""Returns truth values of original params, with formatted labels
"""
pkeys = mcmc_versions.get_parameter(source, version, 'param_keys')
pkey_labels = plot_tools.convert_mcmc_labels(param_keys=pkeys)
truth = dict()
for i, key in enumerate(pkeys):
key_formatted = pkey_labels[i]
truth[key_formatted] = param_used[key]
return truth
def generate_synth_data(source, batches, run, mc_source, mc_version,
reproduce=True, free_params=('m_gr', 'd_b', 'xi_ratio'),
u_fedd_frac=0.08, u_fper_frac=0.01, noise_mag=0.01,
introduce_noise=True):
if reproduce:
print('Reusing same params')
params = param_used
else:
print('Generating new random params!')
params = generate_params(source, batches=batches, run=run,
mc_source=mc_source, mc_version=mc_version,
free_params=free_params)
table = setup_synth_table(source, batches=batches, run=run, mc_source=mc_source,
mc_version=mc_version, free_params=free_params,
params=params, u_fedd_frac=u_fedd_frac,
u_fper_frac=u_fper_frac)
if introduce_noise:
add_noise(table, magnitude=noise_mag)
# add epoch column
epochs = np.arange(1, len(batches) + 1)
table['epoch'] = epochs
table['cbol'] = 1.0
table['u_cbol'] = 0.0
obs_tools.save_summary(table, source=source)
def add_noise(table, magnitude=0.01):
print(f'adding noise: sigma={magnitude}')
n_rows = len(table)
for col in table:
noise = 1 + magnitude * np.random.normal(size=n_rows)
table[col] *= noise
def setup_synth_table(source, batches, run, mc_source, mc_version,
free_params=('m_gr', 'd_b', 'xi_ratio'),
params=None, u_fedd_frac=0.08, u_fper_frac=0.01):
""""""
if params is None:
params = generate_params(source, batches=batches, run=run,
mc_source=mc_source, mc_version=mc_version,
free_params=free_params)
bfit = burstfit.BurstFit(mc_source, version=mc_version, debug=False,
u_fper_frac=u_fper_frac, u_fedd_frac=u_fedd_frac)
mv = mcmc_versions.McmcVersion(mc_source, version=mc_version)
bprops = bfit.bprop_sample(x=None, params=params)
table = pd.DataFrame()
pd.set_option("display.precision", 5)
for i, key in enumerate(mv.bprops):
bp_i = 2 * i
u_i = bp_i + 1
u_key = f'u_{key}'
table[key] = bprops[:, bp_i]
table[u_key] = bprops[:, u_i]
return table
def generate_params(source, batches, run, mc_source, mc_version,
free_params=('m_gr', 'd_b', 'xi_ratio')):
""""""
synth_grid = grid_analyser.Kgrid(source, use_sub_cols=True)
mv = mcmc_versions.McmcVersion(mc_source, version=mc_version)
n_epochs = len(batches)
pkeys = mv.param_keys
params = dict.fromkeys(pkeys)
# ===== Pull model params from kepler grid =====
for key in mv.interp_keys:
key = mv.param_aliases.get(key, key)
if key in mv.epoch_unique:
for i in range(n_epochs):
grid_params = synth_grid.get_params(batches[i], run)
e_key = f'{key}{i+1}'
grid_key = {'mdot': 'accrate'}.get(key, key)
params[e_key] = float(grid_params[grid_key])
else:
grid_key = {'m_nw': 'mass'}.get(key, key)
grid_params = synth_grid.get_params(batches[0], run)
params[key] = float(grid_params[grid_key])
# ===== Randomly generate free params =====
for key in free_params:
params[key] = mcmc_tools.get_random_params(key, n_models=1, mv=mv)[0]
return params
| zacjohnston/pyburst | pyburst/synth/synth_new.py | synth_new.py | py | 5,323 | python | en | code | 3 | github-code | 13 |
3938832720 | # In this file a nn created and trained with tensorflow will check if a given array is sorted or not.
import pandas as pd
from sklearn.model_selection import train_test_split
from tensorflow.keras.models import Sequential, load_model
from tensorflow.keras.layers import Dense
from sklearn.metrics import accuracy_score
import numpy as np
import random
lines = 20
size = 10
def constructLine():
start = random.randint(0,9)
line = list(range(start, start+size))
unsort = random.randint(1,10)
if unsort > 5:
random.shuffle(line)
line.append(0)
else:
line.append(1)
return line
baseList = []
for line in range(0, lines):
baseList.append(constructLine())
arr = np.array(baseList)
cols = []
for i in range(0, size):
cols.append("Value" + str(i))
cols.append("sorted")
df = pd.DataFrame(arr, columns=cols)
X = pd.get_dummies(df.drop(["sorted"], axis=1))
Y = df["sorted"]
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=.2)
#Aufbau den Neuronalen Netzes
model = Sequential()
model.add(Dense(units=32, activation='relu', input_dim=len(X_train.columns)))
model.add(Dense(units=64, activation='relu'))
model.add(Dense(units=64, activation='relu'))
model.add(Dense(units=64, activation='relu'))
model.add(Dense(units=1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='sgd', metrics='accuracy')
#Training des Models
model.fit(X_train, y_train, epochs=200, batch_size=32)
#Model verwenden und prรผfen wie gut die Vorhersage ist
y_hat = model.predict(X_test)
y_hat = [0 if val < 0.5 else 1 for val in y_hat]
print(accuracy_score(y_test, y_hat))
print(X_test)
print(y_hat) | b2aff6009/ai-playground | tensorflow/sorted.py | sorted.py | py | 1,634 | python | en | code | 1 | github-code | 13 |
74449699217 | import tkinter as tk
from tkinter import ttk, NSEW
from StockData.data_keys import ASSET_TYPES
import tkinter.messagebox
HEIGHT = 700
WIDTH = 1400
def configButtonCommand(button, command):
button.config(command=command)
def throwError(title, message):
tk.messagebox.showerror(title, message)
class Gui(tk.Frame):
def __init__(self, master=tk.Tk()):
tk.Frame.__init__(self, master)
self.parent = master
self.parent.title('Project: Investment Portfolio')
self.parent.geometry('1500x700')
self.parent.resizable(True, True)
self.parent.protocol(self.close)
self.frame = tk.Frame(self.parent)
self.frame.grid(row=0, column=0, columnspan=1, rowspan=1, sticky=NSEW)
self.data = tk.Label(self.frame, background='#FAF0E6')
self.data.pack(side='left', fill='both')
self.assetLabel = tk.Label(self.data, text='Asset shortcut name: ', bg='#FAF0E6')
self.assetLabel.config(font=("Arial", 10))
self.assetLabel.grid(row=0, column=0, padx=5, pady=5)
self.asset = tk.Entry(self.data, width=20, justify='center')
self.asset.grid(row=1, column=0)
self.assetTypeLabel = tk.Label(self.data, text='Asset type: ', bg='#FAF0E6')
self.assetTypeLabel.config(font=("Arial", 10))
self.assetTypeLabel.grid(row=2, column=0, padx=5, pady=5)
self.assetType = ttk.Combobox(self.data, width=17, justify='center', state='readonly')
self.assetType['values'] = ASSET_TYPES
self.assetType.grid(row=3, column=0)
self.assetType.current()
self.volumeLabel = tk.Label(self.data, text='Asset volume: ', bg='#FAF0E6')
self.volumeLabel.config(font=("Arial", 10))
self.volumeLabel.grid(row=4, column=0, padx=5, pady=5)
self.volume = tk.Entry(self.data, width=20, justify='center')
self.volume.grid(row=5, column=0)
self.priceLabel = tk.Label(self.data, text='Asset average price: ', bg='#FAF0E6')
self.priceLabel.config(font=("Arial", 10))
self.priceLabel.grid(row=6, column=0, padx=5, pady=5)
self.price = tk.Entry(self.data, width=20, justify='center')
self.price.grid(row=7, column=0)
self.currencyLabel = tk.Label(self.data, text='Purchase currency:', bg='#FAF0E6')
self.currencyLabel.config(font=("Arial", 10))
self.currencyLabel.grid(row=8, column=0, padx=5, pady=5)
self.currency = ttk.Combobox(self.data, width=17, justify='center', state='readonly')
self.currency['values'] = ('PLN', 'USD', 'EUR')
self.currency.grid(row=9, column=0)
self.currency.current()
self.buttonAdd = tk.Button(self.data, text='Add new asset', height=2, width=20, bg='#DEB887')
self.buttonAdd.grid(row=10, column=0, padx=5, pady=10)
self.buttonShowWallet = tk.Button(self.data, text='Show wallet', height=2, width=20, bg='#DEB887')
self.buttonShowWallet.grid(row=12, column=0, padx=5, pady=10)
self.percentLabel = tk.Label(self.data, text='Percent of value:', bg='#FAF0E6')
self.percentLabel.config(font=("Arial", 10))
self.percentLabel.grid(row=13, column=0, padx=5, pady=5)
self.percent = tk.Entry(self.data, width=15, justify='center')
self.percent.grid(row=14, column=0, padx=5, pady=5)
self.percent.insert(0, '10')
self.buttonShowPortfolio = tk.Button(self.data, text='Show portfolio', height=2, width=20, bg='#DEB887')
self.buttonShowPortfolio.grid(row=16, column=0, padx=5, pady=10)
self.buttonClear = tk.Button(self.data, text='Clear', height=2, width=20, bg='#DEB887')
self.buttonClear.grid(row=17, column=0, padx=5, pady=10)
self.buttonClear.config(command=self.clearScreen)
self.screen = tk.Label(self.frame)
self.screen.pack(side='right', fill='both')
self.text = tk.Text(self.screen, height=100, width=160)
self.text.grid(row=0, column=0, sticky="nsew")
self.scrollbar = ttk.Scrollbar(self.screen, command=self.text.yview)
self.scrollbar.grid(row=0, column=1, sticky='nsew')
self.text['yscrollcommand'] = self.scrollbar.set
def close(self):
self.destroy()
def clearScreen(self):
self.text.delete(1.0, 'end')
| LeviSforza/Cryptocurrency-Investment-Portfolio | Investment-Portfolio/investment_gui.py | investment_gui.py | py | 4,304 | python | en | code | 0 | github-code | 13 |
1165119927 | import pandas as pd
import torch
from sklearn.model_selection import train_test_split
from torch import optim
from torch.utils.data import DataLoader
import torch.nn as nn
from src.utils.data_util import MyDataset
from src.models.MLP import MLP
# ่ฏปๅๆๆๆฐๆฎ
all_data_path = '../../Data/processed/merged_data_KNN.csv'
all_data = pd.read_csv(all_data_path)
# ๅๅ็นๅพๅๆ ็ญพ
all_data_x = all_data.iloc[:, 1:-1]
all_data_y = all_data['label']
# ๅๅๆต่ฏ้ๅ่ฎญ็ป้
train_data, test_data = train_test_split(all_data, test_size=0.2, random_state=2023)
train_data = MyDataset(train_data)
test_data = MyDataset(test_data)
# ่ฎพ็ฝฎ่ถ
ๅๆฐ
input_size = 107 # ่พๅ
ฅ็นๅพ็็ปดๅบฆ
hidden_size1 = 64 # ้่ๅฑๅคงๅฐ
hidden_size2 = 32
output_size = 6 # ่พๅบ็ฑปๅซ็ๆฐ้
learning_rate = 0.0005
num_epochs = 50
batch_size = 32
# ๅๅงๅๆฐๆฎๅ ่ฝฝๅจ
train_loader = DataLoader(train_data, shuffle=True, batch_size=batch_size)
# ๅๅงๅๆจกๅใๆๅคฑๅฝๆฐ
model = MLP(input_size, hidden_size1, hidden_size2, output_size)
criterion = nn.CrossEntropyLoss() # ไบคๅๆๅคฑๅฝๆฐ
optimizer = optim.Adam(model.parameters(), lr=learning_rate) # Adamไผๅๅจ
# ่ฎญ็ปๆจกๅ
for epoch in range(num_epochs):
total_loss = 0.0
for features, labels in train_loader:
optimizer.zero_grad() # ๆขฏๅบฆๆธ
้ถ
outputs = model(features) # ่ฎก็ฎ่พๅบ
loss = criterion(outputs, labels) # ่ฎก็ฎๆๅคฑๅฝๆฐ
loss.backward() # ๅๅไผ ๆญ
optimizer.step() # ๆดๆฐๅๆฐ
total_loss += loss.item()
print('Epoch [{}/{}], Loss: {:.4f}'.format(epoch + 1, num_epochs, total_loss))
# ๆต่ฏ้ไธ่ฏไผฐ
test_loader = DataLoader(test_data, shuffle=True, batch_size=batch_size)
# ๅจๆต่ฏ้ไธ่ฏไผฐๆจกๅ
model.eval()
with torch.no_grad():
correct = 0
total = 0
for features, labels in test_loader:
outputs = model(features)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
accuracy = correct / total
print('Test Accuracy: {:.2f}%'.format(accuracy * 100))
| winter-fairy/SwC | src/run_model/Run_MLP.py | Run_MLP.py | py | 2,153 | python | en | code | 0 | github-code | 13 |
3387361118 | import os
import pandas as pd
import numpy as np
import torch
from . import config
from . import dispatcher
from . import model
TEST_DATA = os.environ.get("TEST_DATA")
MODEL = os.environ.get("MODEL")
def predict(model_path):
df = pd.read_csv(TEST_DATA)
df = df[["id", "excerpt"]]
test_idx = df["id"].values
test_excerpts = tuple(df["excerpt"].to_list())
predictions = None
for FOLD in range(5):
df = pd.read_csv(TEST_DATA)
predictor = model.CommonLitBertBaseModel(
dispatcher.MODELS[MODEL], config.DEVICE
)
predictor.load_state_dict(torch.load(f"{model_path}/{MODEL}_{FOLD}_5.pt"))
predictor.to(config.DEVICE)
preds = predictor.predict(test_excerpts)
if FOLD == 0:
predictions = preds
else:
predictions += preds
predictions /= 5
sub = pd.DataFrame(
np.column_stack((test_idx, predictions)), columns=["id", "target"]
)
return sub
if __name__ == "__main__":
submission = predict(model_path="models/")
submission.to_csv(f"results/{MODEL}_submission.csv", index=False)
| RohanAwhad/kaggle-commonlit-readability | src/predict.py | predict.py | py | 1,131 | python | en | code | 0 | github-code | 13 |
24574360723 | # Exercรญcio 084 do curso de Python - Curso em vรญdeo
# Faรงa um programa que leia nome e peso de vรกrias pessoas,
# guardando tudo em uma lista. No final, mostre:
# A) Quantas pessoas foram cadastradas.
# B) Uma listagem com as pessoas mais pesadas.
# C) Uma listagem com as pessoas mais leves.
# Meu Cรณdigo
print('=' * 37)
print(' ' * 4 + 'Lista composta e anรกlise de dados')
print('=' * 37)
pessoa = list()
dadotemp = list()
maior = menor = 0
while True:
dadotemp.append(str(input('Nome: ')))
dadotemp.append(float(input('Peso: ')))
if len(pessoa) == 0:
maior = menor = dadotemp[1]
else:
if dadotemp[1] > maior:
maior = dadotemp[1]
if dadotemp[1] < menor:
menor = dadotemp[1]
pessoa.append(dadotemp[:])
dadotemp.clear()
cont = str(input('Deseja Continuar? [S/N] ')).strip().upper()[0]
if cont == 'N':
break
print('=' * 37)
print(f'Lista Cadastrada: {pessoa}')
print(f'Quantidade de Pessoas cadastradas: {len(pessoa)}')
print(f'O maior peso foi de {maior}Kg, Peso de ', end='')
for p in pessoa:
if p[1] == maior:
print(f'{p[0]}...', end='')
print()
print(f'O menor peso foi de {menor}Kg, Peso de ', end='')
for p in pessoa:
if p[1] == menor:
print(f'{p[0]}...', end='')
# Correรงรฃ - OK
| felipecabraloliveira/Python | curso-de-python-curso-em-video/scripts/exercicios/ex084.py | ex084.py | py | 1,310 | python | pt | code | 0 | github-code | 13 |
69975975059 | """
In statistics, the mode of a set of values is the value that appears most often. Write
code that processes an array of survey data, where survey takers have responded to
a question with a number in the range 1โ10, to determine the mode of the data set.
For our purpose, if multiple modes exist, any may be chosen.
"""
# what if the array is huge?
# refactored version of find-mode.py to speed it up, it is a โstockโ version of โfind the highestโ
survey_data = [4, 7, 3, 8, 9, 7, 3, 9, 9, 3, 3, 10] # values are in the range 1โ10
# survey_data = [4, 7, 2, 8, 9, 7, 2, 9, 2, 2, 3, 10] # values are in the range 1โ10
print(survey_data)
# 1) initialize histogram with zeros (counters at 0)
histogram = [0] * len(survey_data)
# 2) place a value/counter at a corresponding index
# e.g. you counted 1s ("all ones") 3 times in the list, so histogram[0] returns 3
# (you can also leave index 0 with value 0 and add one element so that you could do histogram[0] = 3)
for i, v in enumerate(survey_data):
histogram[v - 1] += 1
print(histogram)
most_frequent = 0
for i, v in enumerate(survey_data):
if histogram[i] > histogram[most_frequent]:
most_frequent = i
most_frequent += 1 # 0-indexing
print('The most frequent value and/or index: ', most_frequent)
| chicocheco/tlp-python | find-mode-refactored.py | find-mode-refactored.py | py | 1,287 | python | en | code | 0 | github-code | 13 |
17040631054 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayFinanceFinassistantcoreBotchatQueryModel(object):
def __init__(self):
self._chat = None
self._question = None
self._session_id = None
self._user_type = None
@property
def chat(self):
return self._chat
@chat.setter
def chat(self, value):
self._chat = value
@property
def question(self):
return self._question
@question.setter
def question(self, value):
self._question = value
@property
def session_id(self):
return self._session_id
@session_id.setter
def session_id(self, value):
self._session_id = value
@property
def user_type(self):
return self._user_type
@user_type.setter
def user_type(self, value):
self._user_type = value
def to_alipay_dict(self):
params = dict()
if self.chat:
if hasattr(self.chat, 'to_alipay_dict'):
params['chat'] = self.chat.to_alipay_dict()
else:
params['chat'] = self.chat
if self.question:
if hasattr(self.question, 'to_alipay_dict'):
params['question'] = self.question.to_alipay_dict()
else:
params['question'] = self.question
if self.session_id:
if hasattr(self.session_id, 'to_alipay_dict'):
params['session_id'] = self.session_id.to_alipay_dict()
else:
params['session_id'] = self.session_id
if self.user_type:
if hasattr(self.user_type, 'to_alipay_dict'):
params['user_type'] = self.user_type.to_alipay_dict()
else:
params['user_type'] = self.user_type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayFinanceFinassistantcoreBotchatQueryModel()
if 'chat' in d:
o.chat = d['chat']
if 'question' in d:
o.question = d['question']
if 'session_id' in d:
o.session_id = d['session_id']
if 'user_type' in d:
o.user_type = d['user_type']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/AlipayFinanceFinassistantcoreBotchatQueryModel.py | AlipayFinanceFinassistantcoreBotchatQueryModel.py | py | 2,311 | python | en | code | 241 | github-code | 13 |
6155126224 | #-------------------------------------------------------------------------------------------------------------------
# Training - Processing EUMETSAT Data and Products (MTG) - Example 6: Lightning Imager (LI) Data - Flash Area
# Author: Diego Souza (INPE/CGCT/DISSM)
#-------------------------------------------------------------------------------------------------------------------
#==================================================================================================================#
# REQUIRED MODULES
#==================================================================================================================#
import matplotlib.pyplot as plt # plotting library
import hdf5plugin # for reading compressed data, a decompression library is needed
import glob # unix style pathname pattern expansion
import os # miscellaneous operating system interfaces
import numpy as np # import the Numpy package
import cartopy, cartopy.crs as ccrs # produce maps and other geospatial data analyses
import cartopy.feature as cfeature # common drawing and filtering operations
import pyproj # python interface to PROJ (cartographic projections and coordinate transformations library)
from pyresample import geometry # classes for describing different geographic areas using a mesh of points or pixels
from matplotlib.offsetbox import OffsetImage # change the image size (zoom)
from matplotlib.offsetbox import AnnotationBbox # creates an annotation using an OffsetBox
from satpy import Scene # scene object to hold satellite data
from satpy.writers import get_enhanced_image # get an enhanced version of dataset as an XRImage instance
#==================================================================================================================#
# CREATE A CUSTOM AREA
#==================================================================================================================#
# image extent (min lon, min lat, max lon, max lat)
extent = [3.0, 43.00, 17.00, 57.00] # Germany
# pyproj definitions
P = pyproj.Proj(proj='eqc', ellps='WGS84', preserve_units=True)
G = pyproj.Geod(ellps='WGS84')
x1,y1 = P(extent[1],extent[0])
x2,y2 = P(extent[3],extent[2])
# define km per degree
km_per_degree = 111.32
# calculate the total number of degrees in lat and lon extent
deg_lon = extent[2] - extent[0]
deg_lat = extent[3] - extent[1]
# calculate the number of pixels (width and height)
resolution = 2.0
width = (km_per_degree * deg_lon) / resolution
height = (km_per_degree * deg_lat) / resolution
# creating an area definition on the fly
area_id = 'my_area'
description = 'custom area'
proj_id = 'my_area'
x_size = int(width)
y_size = int(height)
area_extent = (y1,x1,y2,x2)
proj_dict = {'a': 6378169.0, 'b': 6378169.0,'units': 'm', 'lon_0': 0.0,'proj': 'eqc', 'lat_0': 0.0}
area_def = geometry.AreaDefinition(area_id, description, proj_id, proj_dict, x_size, y_size, area_extent)
#==================================================================================================================#
# DATA READING AND MANIPULATION
#==================================================================================================================#
# initialise Scene
path_to_testdata = '../samples/mtg/L2_run_flashes_201306201500_201306201501/'
scn = Scene(filenames=glob.glob(os.path.join(path_to_testdata, '*BODY*.nc')), reader='li_l2_nc', reader_kwargs={'with_area_definition': True})
# load the datasets/composites of interest.
scn.load(["flash_area"], upper_right_corner='NE')
# resample the scene to a specified area
scn_resampled = scn.resample(area_def)
#==================================================================================================================#
# PLOT THE IMAGE
#==================================================================================================================#
# plot size (width x height, in inches)
plt.figure(figsize=(8,8))
# define the projection and add coastlines and gridlnes
ax = plt.axes(projection=ccrs.PlateCarree())
# define the image extent
img_extent = [extent[0], extent[2], extent[1], extent[3]]
# add some map elements to the plot
ax.add_feature(cfeature.LAND, facecolor='dimgray')
ax.add_feature(cfeature.OCEAN, facecolor='black')
# add coastlines, borders and gridlines
ax.coastlines(resolution='10m', color='turquoise', linewidth=1.0)
ax.add_feature(cartopy.feature.BORDERS, edgecolor='white', linewidth=0.5)
gl = ax.gridlines(crs=ccrs.PlateCarree(), color='white', alpha=1.0, linestyle='--', linewidth=0.15, xlocs=np.arange(-180, 180, 5), ylocs=np.arange(-90, 90, 5), draw_labels=True)
gl.top_labels = False
gl.right_labels = False
gl.xpadding = -5
gl.ypadding = -5
gl.ylabel_style = {'color': 'white', 'size': 6, 'weight': 'bold'}
gl.xlabel_style = {'color': 'white', 'size': 6, 'weight': 'bold'}
# add a logo to the plot
my_logo = plt.imread('../ancillary/eumetsat_logo.png')
imagebox = OffsetImage(my_logo, zoom = 0.2)
ab = AnnotationBbox(imagebox, (0.85, 0.95), xycoords="axes fraction", frameon = True, zorder=6)
ax.add_artist(ab)
# read the time and date
date = scn_resampled["flash_area"].attrs['start_time']
date = date.strftime('%Y-%m-%d %H:%M UTC')
# add a title
plt.title(f'MTG-I1 LI Scientific Test Data - Flash Area\n{date}' , fontweight='bold', fontsize=10, loc='left')
plt.title('Space Week Nordeste 2023', fontsize=10, loc='right')
#==================================================================================================================#
# SHOW THE PLOT
#==================================================================================================================#
# show the image
from satpy.writers import get_enhanced_image
rgb = np.moveaxis(get_enhanced_image(scn_resampled["flash_area"]).data.values, 0, -1)
im = plt.imshow(rgb, extent=img_extent, origin='upper', interpolation='none')
plt.show() | diegormsouza/spaceweek2023 | mtg/script_06_li.py | script_06_li.py | py | 6,224 | python | en | code | 2 | github-code | 13 |
600580298 | ds = [] # danh sรกch cรกc cแบงu thแปง vร phแปฅc vแปฅ
def chuc_nang_1():
while True:
chon = int(input("Bแบกn chแปn nhแบญp can bo (1) hay giao vien (2): "))
# nhแบญp thรดng tin chung
ma_so = input("Nhแบญp mรฃ sแป: ")
ho_ten = input("Nhแบญp hแป tรชn: ")
que_quan = input("Nhแบญp quรช quรกn: ")
nam_sinh = int(input("Nhแบญp nฤm sinh: "))
if chon == 1:
# nhแบญp CanBo
san_ken = int(input("Nhแบญp sแป thร nh tรญch: "))
ct = CanBo(ma_so, ho_ten, que_quan, nam_sinh, san_ken)
ds.append(ct)
else:
# nhแบญp GiaoVien
bai_bao = int(input("Nhแบญp sแป nฤm: "))
pv = GiaoVien(ma_so, ho_ten, que_quan, nam_sinh, bai_bao)
ds.append(pv)
tiep = input("Bแบกn cรณ nhแบญp nแปฏa khรดng? (C/K): ")
if tiep.upper() == "K":
break
def chuc_nang_3():
for x in ds:
if x.khen_thuong():
print(x)
def chuc_nang_4():
ma = input("Nhแบญp mรฃ cแบงn tรฌm: ")
for x in ds:
if x.ma_so.upper() == ma.upper():
print(x)
break
else:
print("Khรดng tรฌm thแบฅy")
from cau3.CanBo import NVSX
from Demo5.nhanvienvanphong import NVVP
def chuc_nang_5():
ma = input("Nhแบญp mรฃ cแบงn tรฌm: ")
for i in range(0, len(ds)):
if ds[i].ma_so.upper() == ma.upper():
# sแปญa ds[i]
ds[i].ho_ten = input("Nhแบญp hแป tรชn: ")
ds[i].que_quan = input("Nhแบญp quรช quรกn: ")
ds[i].nam_sinh = int(input("Nhแบญp nฤm sinh: "))
if isinstance(ds[i], CanBo):
ds[i].san_kien = int(input("Nhแบญp san kien: "))
else:
ds[i].bai_bao = int(input("Nhแบญp bai bao: "))
break
else:
print("Khรดng tรฌm thแบฅy")
def chuc_nang_7():
for x in ds:
if isinstance(x, CanBo):
if x.bai_bao() == False:
print(x)
def menu():
while True:
print("1. Nhแบญp vร o danh sรกch cแบงu thแปง vร phแปฅc vแปฅ\n"
"3. In ra man hinh danh sach can bo, giao vien ฤฦฐแปฃc khen thฦฐฦกng\n"
"4. Tรฌm vร in ra mร n hinh thรดng tin cua can bแป, giao vien co ma duoc nhap tu ban phim\n"
"5. Tรฌm vร sแปญa thรดng tin cแปงa can bo, giao vien cรณ mรฃ ma ฤฦฐแปฃc nhแบญp tแปซ bร n phรญm\n"
"7. In ra danh sรกch cรกc giao vien khรดng co bai bao\n"
"10. Thoรกt")
chon = int(input("Mแปi bแบกn chแปn chแปฉc nฤng: "))
if chon == 1:
chuc_nang_1()
elif chon == 3:
chuc_nang_3()
elif chon == 4:
chuc_nang_4()
elif chon == 5:
chuc_nang_5()
elif chon == 7:
chuc_nang_7()
elif chon == 10:
break
else:
print("Bแบกn chแปn sai, mแปi chแปn lแบกi")
# main
menu() | thaicanhth/PYTHON | cau3/qlttAPP.py | qlttAPP.py | py | 3,050 | python | vi | code | 0 | github-code | 13 |
36374510899 | n,k=[int(i) for i in input().split()]
a=[int(i) for i in input().split()]
mod=mod = (10**9)+7
p = [[a[0],0]]
for i in range(1,n):
while p[0][1] + k < i:
p.pop(0)
temp = p[0][0]*a[i]
while p[-1][0]>temp and len(p) != 1:
p.pop(-1)
p.append([temp,i])
print(p[-1][0]%mod)
| k0malSharma/Competitive-programming | CHRL4.py | CHRL4.py | py | 309 | python | en | code | 1 | github-code | 13 |
19717436517 | class GolfClub():
"""
GolfClub class
Attributes:
hand: str
example: 'left'
brand: str
example: 'TaylorMade'
"""
def __init__(self, hand, brand):
self.hand = hand
self.brand = brand
@property
def choose(self):
print(f'Grab the {self.hand} hand {self.brand} club')
@staticmethod
def swing():
print(f'Grib it and rip it')
if __name__ == "__main__":
g1 = GolfClub(hand='right', brand='Callaway')
print(g1.hand, g1.brand)
g1.choose
g2 = GolfClub(hand='left', brand='Ping')
print(g2.hand, g2.brand)
g2.swing()
| ekselan/lambdata-abw | abw_helpers/golf_clubs.py | golf_clubs.py | py | 644 | python | en | code | 0 | github-code | 13 |
38457300085 | from bs4 import BeautifulSoup
from urllib.parse import urljoin
from find_xss import find_xss_vulnerability
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.edge.options import Options
def get_page(url):
edge_options = Options()
edge_options.use_chromium = True
edge_options.add_argument("--headless")
driver = webdriver.Edge(options=edge_options)
driver.get(url)
page_source = driver.page_source
driver.quit()
return page_source
def find_links(html, base_url):
soup = BeautifulSoup(html, "html.parser")
links = []
for link in soup.find_all("a", href=True):
# if base_url in link:
new_url = urljoin(base_url, link["href"])
links.append(new_url)
return links
def scrape_page(main_url, max_pages=10):
visited_urls = set()
urls_to_visit = [main_url]
xss_found = False
print("Searching for XSS vulnerabilities... plz be patient, I am slow :')...")
while urls_to_visit and len(visited_urls) < max_pages:
current_url = urls_to_visit.pop(0)
if current_url in visited_urls:
continue
# print(f"Searching page: {current_url}")
visited_urls.add(current_url)
html = get_page(current_url)
if html:
new_links = find_links(html, current_url)
# print("number of urls found: ", len(urls_to_visit))
urls_to_visit.extend(new_links)
if find_xss_vulnerability(current_url) == True:
xss_found = True
if not xss_found:
print("No XSS vulnerabilities found") | ahnaftazwar368/ivf | scraping.py | scraping.py | py | 1,634 | python | en | code | 0 | github-code | 13 |
18887999310 | # Basic requirements
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# Train - test split
from sklearn.model_selection import train_test_split
# For XGBoost
from xgboost import XGBClassifier
from scipy.stats import uniform
from sklearn.model_selection import RandomizedSearchCV
# For flair
from flair.nn import Classifier
from flair.data import Sentence
# For metrics
from sklearn.metrics import confusion_matrix
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from sklearn.metrics import average_precision_score
#from sklearn.metrics import make_scorer
from sklearn.metrics import precision_recall_curve
from sklearn import metrics
# For cross validatiion
from sklearn.model_selection import cross_validate
#from sklearn.model_selection import cross_val_score,KFold
from sklearn.model_selection import StratifiedKFold
# For Naive Bayes classifier
from sklearn.naive_bayes import GaussianNB
# For logistic regression
from sklearn.linear_model import LogisticRegression
# For non-linear SVM
from sklearn.svm import NuSVC
# For VADER
import nltk
#nltk.download('vader_lexicon')
from nltk.sentiment.vader import SentimentIntensityAnalyzer
# for saving models
import joblib
def sa_train_test_split(reviews_csv):
'''
input : processed reviews_csv DataFrame
output : 2 DataFrames - train_data (70%), test_data(30%)
function : Splits input data into train and test
'''
# Specify shuffle = True, to shuffle the data before splitting to avoid bias due to order
# Stratify = sentiment, to ensure train and test have same ratio of postive to negative reviews
train_data, test_data = train_test_split(reviews_csv, test_size=0.3, shuffle=True, stratify=reviews_csv['Sentiment'], random_state=4263)
train_size = train_data['Sentiment'].value_counts().to_list()
test_size = test_data['Sentiment'].value_counts().to_list()
plotdata = pd.DataFrame({"positive":[train_size[0], test_size[0]],
"negative":[train_size[1], test_size[1]],
},
index=["Train", "Test"])
plotdata.plot(kind = "bar", rot = 0, title = 'Class Distribution in Train-Test Split')
return train_data, test_data
def evaluate_model_test(true_sent, predicted_sent, predicted_prob):
'''
input : List of true sentiment label, predicted sentiment label, predicted_probability
output : Print classification_report, scores and confusion matrix, pr_auc curve
function : Prints classification_report and evaluation metrics on test data
'''
print("TEST RESULTS")
print("Classification Report")
print(classification_report(true_sent, predicted_sent))
# Print Score
print("PR_AUC score: ", average_precision_score(true_sent, predicted_prob)) # pos_label=0
## Print Confusion Matrix
plt.figure(1)
labels = ['Negative (0)', 'Positive (1)']
cm = confusion_matrix(true_sent, predicted_sent)
disp = ConfusionMatrixDisplay(confusion_matrix = cm, display_labels = labels)
disp.plot(cmap = 'GnBu')
plt.title('Confusion Matrix')
plt.show()
## Plot PR_AUC Curve
plt.figure(2)
precision, recall, _ = precision_recall_curve(true_sent, predicted_prob)
# create precision-recall curve
plt.plot(recall, precision, marker='.', label = 'PR_AUC of Model')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title('PR_AUC Curve')
plt.legend()
plt.show()
def train_XGB(train_data, test_data):
'''
input : 2 DataFrames 'train_data' and 'test_data'
output : None
function : Fits XGBoost Classifier on the train_data, and saves model. Evaluate performance on test_data
'''
# create a default XGBoost classifier
model = XGBClassifier(
random_state=42,
)
# Create the grid search parameter grid and scoring funcitons
param_grid = {
"learning_rate": [0.1],
"colsample_bytree": [0.8],
"subsample": [0.6],
"max_depth": [3],
"n_estimators": [400],
"reg_lambda": [1],
"gamma": [0.1],
}
# Define scoring metrics
score_metrics = ['accuracy', 'f1', 'f1_weighted', 'average_precision']
# create the Kfold object
num_folds = 5
kfold = StratifiedKFold(n_splits=num_folds)
# create the grid search object
n_iter = 1
grid = RandomizedSearchCV(
estimator=model,
param_distributions=param_grid,
cv=kfold,
scoring=score_metrics,
n_jobs=-1,
n_iter=n_iter,
refit='f1_weighted',
)
# fit grid search
best_model = grid.fit(train_data.iloc[: , :-1], train_data['Sentiment'])
# Save cross-validation results
cv_results = pd.DataFrame(best_model.cv_results_)
# save model
best_model.best_estimator_.save_model("root/models/sa/xgb_model.json")
print("CROSS VALIDATION RESULTS XGBoost")
print("Average Cross Validation score accuracy: ", cv_results['mean_test_accuracy'][0])
print("Average Cross Validation score F1: ", cv_results['mean_test_f1'][0])
print("Average Cross Validation score F1_weighted: ", cv_results['mean_test_f1_weighted'][0])
print("Average Cross Validation score pr_auc: ", cv_results['mean_test_average_precision'][0])
# print(best_model.best_estimator_)
# Predict on test data
xgb_probs = best_model.predict_proba(test_data.iloc[: , :-1])
xgb_sentiment = best_model.predict(test_data.iloc[: , :-1])
xgb_probs_df = pd.DataFrame(data = xgb_probs, columns = ['NEGATIVE', 'POSITIVE'])
# Evalaute goodness of fit of model on test data
evaluate_model_test(test_data['Sentiment'], xgb_sentiment, xgb_probs_df['POSITIVE'])
def bayes_classifier(train_data, test_data):
'''
input : 2 DataFrames 'train_data' and 'test_data'
output : None
function : Fits Naive Baye's Classifier on the train_data. Evaluates performance on test_data
'''
NB = GaussianNB()
# create the Kfold object
kf = StratifiedKFold(n_splits=5)
score_metrics = ['accuracy', 'f1', 'f1_weighted', 'average_precision']
cv_results = cross_validate(NB, train_data.iloc[: , :-1], train_data['Sentiment'], cv = kf, scoring = score_metrics)
# Print cross validation scores
print("CROSS VALIDATION RESULTS")
print("Average Cross Validation score accuracy :{}".format(cv_results['test_accuracy'].mean()))
print("Average Cross Validation score F1 :{}".format(cv_results['test_f1'].mean()))
print("Average Cross Validation score F1_weighted :{}".format(cv_results['test_f1_weighted'].mean()))
print("Average Cross Validation score pr_auc :{}".format(cv_results['test_average_precision'].mean()))
# Final fit
NB.fit(train_data.iloc[: , :-1], train_data['Sentiment'])
# Predict sentiment labels and probabilities
NB_pred_sentiment = NB.predict(test_data.iloc[: , :-1])
NB_pred_prob = NB.predict_proba(test_data.iloc[: , :-1])
NB_pred_prob_df = pd.DataFrame(data = NB_pred_prob, columns = ['NEGATIVE', 'POSITIVE'])
# Evaluate goodness-of-fit of model on test-data
evaluate_model_test(test_data['Sentiment'], NB_pred_sentiment, NB_pred_prob_df['POSITIVE'])
def logistic_regression(train_data, test_data):
'''
input : 2 DataFrames 'train_data' and 'test_data'
output : None
function : Fits Logistic Regression Model on the train_data. Evaluates performance on test_data
'''
logreg = LogisticRegression()
# create the Kfold object
kf = StratifiedKFold(n_splits=5)
score_metrics = ['accuracy', 'f1', 'f1_weighted', 'average_precision']
cv_results = cross_validate(logreg, train_data.iloc[: , :-1], train_data['Sentiment'], cv = kf, scoring = score_metrics)
# Print cross validation scores
print("CROSS VALIDATION RESULTS")
print("Average Cross Validation score accuracy :{}".format(cv_results['test_accuracy'].mean()))
print("Average Cross Validation score F1 :{}".format(cv_results['test_f1'].mean()))
print("Average Cross Validation score F1_weighted :{}".format(cv_results['test_f1_weighted'].mean()))
print("Average Cross Validation score pr_auc :{}".format(cv_results['test_average_precision'].mean()))
# Final fit
logreg.fit(train_data.iloc[: , :-1], train_data['Sentiment'])
# Predict sentiment labels and probabilities
logreg_pred_sentiment = logreg.predict(test_data.iloc[: , :-1])
logreg_pred_prob = logreg.predict_proba(test_data.iloc[: , :-1])
logreg_pred_prob_df = pd.DataFrame(data = logreg_pred_prob, columns = ['NEGATIVE', 'POSITIVE'])
# Evaluate goodness-of-fit of model on test-data
evaluate_model_test(test_data['Sentiment'], logreg_pred_sentiment, logreg_pred_prob_df['POSITIVE'])
def svc_model(train_data, test_data):
'''
input : 2 DataFrames 'train_data' and 'test_data'
output : None
function : Fits Support Vector Machine (SVM) Classifier on the train_data and saves model.
Evaluates performance on test_data
'''
svm = NuSVC(gamma="auto", probability = True)
# create the Kfold object
kf = StratifiedKFold(n_splits=5)
score_metrics = ['accuracy', 'f1', 'f1_weighted', 'average_precision']
cv_results = cross_validate(svm, train_data.iloc[: , :-1], train_data['Sentiment'], cv = kf, scoring = score_metrics)
# Print cross validation scores
print("CROSS VALIDATION RESULTS")
print("Average Cross Validation score accuracy :{}".format(cv_results['test_accuracy'].mean()))
print("Average Cross Validation score F1 :{}".format(cv_results['test_f1'].mean()))
print("Average Cross Validation score F1_weighted :{}".format(cv_results['test_f1_weighted'].mean()))
print("Average Cross Validation score pr_auc :{}".format(cv_results['test_average_precision'].mean()))
# Final fit
svm.fit(train_data.iloc[: , :-1], train_data['Sentiment'])
# Save model
joblib.dump(svm, 'root/models/sa/svm_model.pkl')
# Predict sentiment labels and probabilities
svm_pred_sentiment = svm.predict(test_data.iloc[: , :-1])
svm_pred_prob = svm.predict_proba(test_data.iloc[: , :-1])
svm_pred_prob_df = pd.DataFrame(data = svm_pred_prob, columns = ['NEGATIVE', 'POSITIVE'])
# Evaluate goodness-of-fit of model on test-data
evaluate_model_test(test_data['Sentiment'], svm_pred_sentiment, svm_pred_prob_df['POSITIVE'])
def vader(train_data, test_data):
'''
input : 2 DataFrames 'train_data' and 'test_data'
output : None
function : Evaluates performance of pretrained Vader Model on the train_data and test_data
'''
SIA = SentimentIntensityAnalyzer()
# Performance of Train Data
vader_train_results = pd.DataFrame()
vader_train_results['VADER_dict'] = train_data['Text'].apply(lambda text: SIA.polarity_scores(text))
vader_train_results['VADER_score'] = vader_train_results['VADER_dict'].apply(lambda sent_dict: sent_dict['compound'])
vader_train_results['VADER_label'] = 0
# If compound > 0 -> 1 else compund < 0 -> 0
vader_train_results.loc[vader_train_results['VADER_score'] > 0, 'VADER_label'] = 1
vader_train_results.loc[vader_train_results['VADER_score'] < 0, 'VADER_label'] = 0
vader_train_results['VADER_prob'] = vader_train_results['VADER_dict'].apply(lambda sent_dict: sent_dict['pos'])
print("TRAIN DATA RESULTS")
print("Train accuracy :{}".format(accuracy_score(train_data['Sentiment'], vader_train_results['VADER_label'])))
print("Train F1 :{}".format(f1_score(train_data['Sentiment'], vader_train_results['VADER_label'])))
print("Train F1_weighted :{}".format(f1_score(train_data['Sentiment'], vader_train_results['VADER_label'], average='weighted')))
print("Train pr_auc :{}".format(average_precision_score(train_data['Sentiment'], vader_train_results['VADER_prob'])))
# Performance of Test Data
vader_results = pd.DataFrame()
vader_results['VADER_dict'] = test_data['Text'].apply(lambda text: SIA.polarity_scores(text))
vader_results['VADER_score'] = vader_results['VADER_dict'].apply(lambda sent_dict: sent_dict['compound'])
vader_results['VADER_label'] = 0
# If compound > 0 -> 1 else compund < 0 -> 0
vader_results.loc[vader_results['VADER_score'] > 0, 'VADER_label'] = 1
vader_results.loc[vader_results['VADER_score'] < 0, 'VADER_label'] = 0
vader_results['VADER_prob'] = vader_results['VADER_dict'].apply(lambda sent_dict: sent_dict['pos'])
evaluate_model_test(test_data['Sentiment'], vader_results['VADER_label'], vader_results['VADER_prob'])
def final_svm_full_model(train_data):
'''
input : full preprocessed train data
output : final svm model
function : Refit svm model on comeplete training data and save the final model for future unseen predictions
'''
svm = NuSVC(gamma="auto", probability = True)
# create the Kfold object
kf = StratifiedKFold(n_splits=5)
score_metrics = ['accuracy', 'f1', 'f1_weighted', 'average_precision']
cv_results = cross_validate(svm, train_data.iloc[: , :-1], train_data['Sentiment'], cv = kf, scoring = score_metrics)
# Print cross validation scores
print("CROSS VALIDATION RESULTS")
print("Average Cross Validation score accuracy :{}".format(cv_results['test_accuracy'].mean()))
print("Average Cross Validation score F1 :{}".format(cv_results['test_f1'].mean()))
print("Average Cross Validation score F1_weighted :{}".format(cv_results['test_f1_weighted'].mean()))
print("Average Cross Validation score pr_auc :{}".format(cv_results['test_average_precision'].mean()))
# Final fit
svm.fit(train_data.iloc[: , :-1], train_data['Sentiment'])
# Save model
joblib.dump(svm, 'root/models/sa/final_svm_model.pkl')
if __name__ == "__main__":
input_data = pd.read_csv(rf"root/src/data/sa/features_train_sa.csv")
final_svm_full_model(input_data)
| nivii26/DSA4263-Voice-of-Customer-VOC-analysis | root/src/model/sa/sa_train.py | sa_train.py | py | 14,106 | python | en | code | 2 | github-code | 13 |
22544669558 | # ์ ํ์์ ํ๋ณํ๊ธฐ
import math
def solution(a, b):
gcd = math.gcd(a, b)
a //= gcd
b //= gcd
check_set = set([2, 5])
b_set = set()
while b != 1:
if b % 2 == 0:
b_set.add(2)
b //= 2
continue
elif b % 5 == 0:
b_set.add(5)
b //= 5
continue
else:
b_set.add(b)
break
if not b_set - check_set:
return 1
return 2
# ๊ตณ์ด set ๋น๊ต ์์ด b๊ฐ ์์ธ์๋ถํด 2์ 5๋ก๋ง ํ์ ๋ 1์ด ๋๋ฉด ์ ํ์์
def solution(a, b):
b //= gcd(a, b)
while b % 2 == 0:
b //= 2
while b % 5 == 0:
b //= 5
return 1 if b == 1 else 2
| WeeYoungSeok/python_coding_study | programmers_100/problem_82.py | problem_82.py | py | 721 | python | ko | code | 0 | github-code | 13 |
39722694719 | # -*- coding: utf-8 -*-
# import matplotlib.pyplot as plt
import warnings
import numpy as np
from sklearn.linear_model import LinearRegression
from tabulate import tabulate
from helpers import *
from itertools import chain
warnings.filterwarnings(action="ignore", module="scipy",
message="^internal gelsd") # ignore this
NUM_PARTITIONS = 10
# initialize variables
print("reading data...")
labels, data = load_data("data/real_data.txt")
np.random.shuffle(data) # shuffle the data
CHUNK_SIZE = len(data) // NUM_PARTITIONS
table = []
for i in range(NUM_PARTITIONS):
print("Iteration {}/{}".format(i + 1, NUM_PARTITIONS))
l_v_bound = i * CHUNK_SIZE
r_v_bound = min(i * CHUNK_SIZE + CHUNK_SIZE, len(data))
validation = data[l_v_bound:r_v_bound] # validation set
train = np.concatenate(
[data[:l_v_bound], data[r_v_bound:len(data)]]) # training set
x_train = []
y_train = []
for entry in train:
x_train.append(extract_features(entry[0])[1])
y_train.append(np.array([float(e) for e in entry[2:]]))
reg_model = LinearRegression()
reg_model.fit(x_train, y_train)
x_val = []
y_val = []
for entry in validation:
x_val.append(extract_features(entry[0])[1])
y_val.append(np.array([float(e) for e in entry[2:]]))
pred = reg_model.predict(x_val)
table.append(calculate_error(pred, np.array(y_val)))
with open("linear_cross.csv", "w") as f:
new_table = np.array(table)
f.write(",".join(labels[2:]) + "\n")
avg = np.mean(new_table, axis=0)
std = np.std(new_table, axis=0)
f.write(",".join([str(q) for q in avg]) + "\n")
f.write(",".join([str(q) for q in std]) + "\n")
# plt.bar(labels[2:], avg, label="difference")
# plt.title("Predicted vs actual Difference")
# plt.xticks(rotation='vertical')
# plt.gcf().subplots_adjust(bottom=0.32)
# plt.legend()
# plt.show()
| seltzerfish/senior-design | developer_tools/cross_validate.py | cross_validate.py | py | 1,908 | python | en | code | 0 | github-code | 13 |
15538306213 | from dummy_data.dummy_data import products_list
def product_search(query_vector):
# products list
products = products_list
# to track top 3 scores
score_list = [0, 0, 0] # top 3 items
response_product_list = []
# score each item with weight
for product in products:
score = 0
if product["product_category"] == query_vector["product_category"]:
score += 3 # most weight
if product["product_name"] == query_vector["product_name"]:
score += 2 # more weight
if product["size"] == query_vector["size"]:
score += 1
if product["colour"] == query_vector["colour"]:
score += 1
if product["material"] == query_vector["material"]:
score += 1
if product["brand"] == query_vector["brand"]:
score += 1
if score > min(score_list):
score_list[score_list.index(min(score_list))] = score
response_product_list.append(product)
if len(response_product_list) > 3: # max 3
response_product_list.pop(0)
# print(score_list)
# print(response_product_list)
return response_product_list
# product_search("เถดเทเถปเทเถธเท เถเถณเทเถธเท", None, None, None, None, None)
| Lakith-Rambukkanage/dm_rasa_chatbot | actions/search.py | search.py | py | 1,288 | python | en | code | 0 | github-code | 13 |
12860497482 | import pytest
from src.core.services import UsersService
user_service = UsersService()
@pytest.mark.django_db
def test_create_offer(user, client):
data = {
"issue_year__gt": 2000,
"mileage__lt": 90019
}
client.credentials(HTTP_AUTHORIZATION='JWT ' + user_service.get_tokens_for_user(user)['access'])
response = client.post('/api/customers/create_offer', data)
return response.status_code == 201
| ChainHokesss/whitesnake_project | CarshowroomProject/src/customers/tests.py | tests.py | py | 433 | python | en | code | 0 | github-code | 13 |
14728890588 | import torch
import torch.nn as nn
import torchvision
import sys
import math
'''
The encoder of ASTER, which is composed of Resnet like conv network, and a multi-layer Bidirectional LSTM network
to enlarge the feature context, capturing long-range dependencies in both directions.
'''
def conv3x3(in_planes, out_planes, stride=1):
# 3x3 conv with padding
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
def conv1x1(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
def __init__(self, in_planes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv1x1(in_planes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet_ASTER(nn.Module):
def __init__(self, with_lstm=True, n_group=1):
super(ResNet_ASTER, self).__init__()
self.with_lstm = with_lstm
self.n_group = n_group
in_channels = 1
self.layer0 = nn.Sequential( # 32 x 140
nn.Conv2d(in_channels, 32, kernel_size=(3, 3), stride=1, padding=1, bias=False),
nn.BatchNorm2d(32),
nn.ReLU(inplace=True)
)
self.inplanes = 32
#self.linear = nn.Linear(256*2, 256)
self.layer1 = self._make_layer(32, 3, [2, 2]) # 16x70
self.layer2 = self._make_layer(64, 4, [2, 2]) # 8x35
self.layer3 = self._make_layer(128, 6, [2, 1]) #4x35
self.layer4 = self._make_layer(256, 6, [2, 1]) # 2x35
self.layer5 = self._make_layer(512, 3, [2, 1]) #1x35
self.linear = nn.Linear(512, 512)
if with_lstm:
self.rnn = nn.LSTM(512, hidden_size=256, bidirectional=True, num_layers=1, batch_first=True)
self.out_planes = 512
else:
self.out_planes = 512
# initialize
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, planes, blocks, stride):
downsample = None
if stride != [1, 1] or self.inplanes != planes:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes, stride),
nn.BatchNorm2d(planes)
)
layers = []
layers.append(BasicBlock(self.inplanes, planes, stride, downsample))
self.inplanes = planes
for _ in range(1, blocks):
layers.append(BasicBlock(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
batch_size = x.size(0)
x0 = self.layer0(x)
x1 = self.layer1(x0)
x2 = self.layer2(x1)
x3 = self.layer3(x2)
x4 = self.layer4(x3)
x5 = self.layer5(x4)
cnn_feature = x5.squeeze(2) # x5 (N,c,h,w) --> (N,c,w)
cnn_feature = cnn_feature.transpose(2, 1) # (N,c,w) --> (N,w,c) (N,35,512)
T = cnn_feature.size(1)
#print('cnn_feature size: ', cnn_feature.size())
if self.with_lstm:
rnn_output, (rnn_hidden,c) = self.rnn(cnn_feature) # [b, T, 512]
rnn_hidden = self.linear(rnn_output[:,-1,:])
rnn_output = self.linear(rnn_output)
return rnn_output, rnn_hidden
else:
return cnn_feature
if __name__ == '__main__':
x = torch.randn(3, 1, 32, 140)
model = ResNet_ASTER()
rnn_feature, rnn_hidden = model(x)
print(rnn_feature.size())
print(rnn_hidden.size())
| wiikycheng/Attention-based-OCR | Models/encoder.py | encoder.py | py | 4,445 | python | en | code | 0 | github-code | 13 |
11032725877 | import pandas as pd
import os
import numpy as np
class Dls_Raw_Results():
def __init__(self, folder=None, folder_stack=None, column_selection=None):
self.folder = folder
self.files = []
self.dls_data = None
self.found = None
self.folder_stack = folder_stack
self.files_formulations = []
self.column_selection = column_selection
self.random_dataframe = pd.DataFrame()
self.output_dataframe = pd.DataFrame()
def collect_csv(self):
'''
This function will call on the folder that was initalised when the class was called, and 'walk'
through it to collect all files, place them in a list. Once collected, it will check for only csv/CSV
files. These csv files will be read in by Pandas (encoding is due to the DLS data being in Hebrew) and that
data will be appended to the main data frame.
:return: A dataframe of the DLS data
'''
for (dirpath, dirnames, filenames) in os.walk(self.folder):
self.files.extend(filenames)
for f in self.files:
if f.endswith(('.csv', '.CSV')):
print(str(f))
dw = pd.read_csv(os.path.join(self.folder, f), encoding="ISO-8859-8")
self.dls_data = self.dls_data.append(dw, ignore_index=True)
return self.dls_data
def clean_dls_data(self):
self.dls_data['Sample Name'] = self.dls_data['Sample Name'].str.strip() # Clean up white spaces
self.dls_data['Sample Name'] = self.dls_data['Sample Name'].str.replace("(?=)(\s)(\d)", "",
regex=True) # Remove the numbers proceeding the D(value)
self.dls_data['Sample Name'] = self.dls_data['Sample Name'].str.replace("(?<=\d)(?=\-)", " ",
regex=True)
self.dls_data['Sample Name'] = self.dls_data['Sample Name'].str.replace("(\s)(?<=)(\-)(?=)(\s)", "_",
regex=True) # Change the - into an _
self.dls_data['Sample Name'] = self.dls_data['Sample Name'].str.replace("(?<=[A-Z]|\d)(\s)(?=\D)", "_",
regex=True) # Put an underscore between the GUID and D(value)
self.dls_data['Sample Name'] = self.dls_data['Sample Name'].str.replace("(_FILTERED)", "",
regex=True) # Unique instance of putting the
# word filtered in dls naming
self.dls_data['Sample Name'] = self.dls_data['Sample Name'].str.replace("(_FILTERED\d+)", "", regex=True)
self.dls_data['Sample Name'] = self.dls_data['Sample Name'].str.strip() # For good measure
self.list_dls_unique_scans = list(self.dls_data['Sample Name'].unique())
print("Number of unique DLS scans:", self.dls_data['Sample Name'].nunique())
print("DLS Samples Scanned: ", *iter(self.list_dls_unique_scans), sep=' | ')
def regression_output_files(self):
for folder_formulation in self.folder_stack:
for (dirpath, dirnames, filenames) in os.walk(folder_formulation):
dirnames[:] = [d for d in dirnames if '_complete' in d]
for file in filenames:
if file.endswith(('.xlsx', '.XLSX')):
self.files_formulations.append(os.path.join(dirpath, file))
###Need to remove 'cv_results' as these are large files and grinds the for loop to a halt
self.files_formulations[:] = [d for d in self.files_formulations if 'cv_results' not in d]
self.files_formulations[:] = [d for d in self.files_formulations if 'LiHa_Params' not in d]
for f in self.files_formulations:
if f.endswith(('.xlsx', '.XLSX')):
print(f)
if 'random_unlabeled' in f:
# print(str(f))
tdf_random = pd.read_excel(f)
tdf_random['location'] = 'random'
tdf_random['file_name'] = str(f)
self.random_dataframe = pd.concat([self.random_dataframe, tdf_random], ignore_index=True)
# self.dls_data = self.dls_data.append(dw, ignore_index=True)
elif 'Ouput_Selection_max_std_sampling' in f:
# Need to hardcode the skiprows for Output
tdf_output = pd.read_excel(f, skiprows=np.arange(13))
tdf_output['location'] = 'al'
tdf_output['file_name'] = str(f)
self.output_dataframe = pd.concat([self.output_dataframe, tdf_output], ignore_index=True)
# header=0)
# print(tdf_output)
self.random_dataframe.drop(columns=['original_index'], inplace=True)
self.random_dataframe.rename(columns={'Unnamed: 0': 'original_index'}, inplace=True)
self.output_dataframe.drop(columns=['sample_scoring'], inplace=True)
self.combined_data = pd.concat([self.output_dataframe, self.random_dataframe])
self.combined_data['original_index'] = self.combined_data['original_index'].astype(int)
self.combined_data.reset_index(drop=True)
self.combined_data = self.combined_data[self.column_selection]
def search(self, formulation_id):
if self.dls_data is not None:
self.found = self.dls_data[self.dls_data['Sample Name'].str.contains(formulation_id)]
else:
self.found = self.combined_data[self.combined_data['original_index'] == int(formulation_id)]
print(self.found)
print(self.found.index)
def search_via_formulation(self, formulation_data):
formulation_data = pd.read_excel(formulation_data)
# Compare column names first
#formulation_data_selection = formulation_data[self.column_selection]
formulation_column_selection = list(formulation_data.columns.intersection(self.combined_data.columns))
temp_columns_combined_data = list(self.combined_data.columns)
temp_columns_combined_data.remove('original_index')
temp_columns_combined_data.remove('location')
temp_columns_combined_data.remove('file_name')
formulation_data.loc[formulation_data['mw_cp_2'] == 430.6999999999999, 'mw_cp_2'] = 430.7
formulation_data.loc[formulation_data['Ratio_2'] == 0.44999999999999996, 'Ratio_2'] = 0.45
formulation_data.loc[formulation_data['Ratio_2'] == 0.4499999999999999, 'Ratio_2'] = 0.45
formulation_data = formulation_data[formulation_column_selection]
mask = self.combined_data.columns.isin(formulation_data.columns)
df = self.combined_data[self.combined_data.columns[mask]]
formulation_data = formulation_data.astype(df.dtypes)
print('Combined Dataframe Column Info')
print(self.combined_data.info())
print('Formulation Dataframe Column Info')
print(formulation_data.info())
merge_dfs = self.combined_data.merge(formulation_data.drop_duplicates(), left_on=temp_columns_combined_data,
right_on=list(formulation_data),
how='left', indicator=True)
found_results = merge_dfs[merge_dfs['_merge'] == 'both']
found_results.drop_duplicates(inplace=True)
file_name = os.path.join(r"/Users/calvin/Library/CloudStorage/OneDrive-Personal/Documents/2022/RegressorCommittee_Output/","joined_files.xlsx")
print(found_results)
found_results.to_excel(file_name)
##Test
folder_stack = [r'/Users/calvin/Library/CloudStorage/OneDrive-Personal/Documents/2022/RegressorCommittee_Output',
r'/Users/calvin/Library/CloudStorage/OneDrive-Personal/Documents/2022/RegressorCommittee_Output/AL_Output_Prev Iteration_EthanolDil_Issue',
r'/Users/calvin/Library/CloudStorage/OneDrive-Personal/Documents/2022/RegressorCommittee_Output/AL_Output - Rectified ETHANOLDILIssue',
r'/Users/calvin/Library/CloudStorage/OneDrive-Personal/Documents/2022/RegressorCommittee_Output/To be sorted',
r'/Users/calvin/Library/CloudStorage/OneDrive-Personal/Documents/2022/RegressorCommittee_Output/Random_Output',
r'/Users/calvin/Library/CloudStorage/OneDrive-Personal/Documents/2022/RegressorCommittee_Output/AL_Output_temp_out',
r'/Users/calvin/Library/CloudStorage/OneDrive-Personal/Documents/2022/RegressorCommittee_Output/AL_OUTPUT_Current']
test = Dls_Raw_Results(folder_stack=folder_stack, column_selection=['original_index',
'Concentration_1 (mM)',
'Ratio_1',
'Overall_Concentration_2',
'Ratio_2',
'Concentration_4',
'Ratio_4',
'Final_Vol',
'Lipid_Vol_Pcnt',
'Dispense_Speed_uls',
'mw_cp_1',
#'xlogp_cp_1',
#'complexity_cp_1',
#'heavy_atom_count_cp_1',
#'single_bond_cp_1',
#'double_bond_cp_1',
'mw_cp_2',
#'h_bond_acceptor_count_cp_2',
#'xlogp_cp_2',
#'complexity_cp_2',
#'heavy_atom_count_cp_2',
#'tpsa_cp_2',
#'ssr_cp_2',
#'single_bond_cp_2',
#'double_bond_cp_2',
#'aromatic_bond_cp_2',
'location',
'file_name'
])
test.regression_output_files()
#test.search(432852)
test.search_via_formulation(
formulation_data=r'/Users/calvin/Library/CloudStorage/OneDrive-Personal/Documents/2022/RegressorCommittee_Output/formulation_find/formulation_find.xlsx')
| calvinp0/AL_Master_ChemEng | dls_file_search_rnd_choose.py | dls_file_search_rnd_choose.py | py | 11,382 | python | en | code | 0 | github-code | 13 |
4179130338 | import uuid
from .entities import Role, UserRole, AppRole
from .errors import RoleNotFoundError
from .repositories import IUserRoleRepository, IAppRoleRepository
class UserRoleUseCase:
user_role_repository: IUserRoleRepository
def __init__(self, user_role_repository: IUserRoleRepository):
self.user_role_repository = user_role_repository
def get_user_roles(self, user_id: uuid.UUID) -> list[Role]:
roles = [Role.USER]
user_roles = self.user_role_repository.get_user_roles(user_id)
for user_role in user_roles:
roles.append(user_role.role)
return roles
def get_role_user_ids(self, role: Role) -> list[uuid.UUID]:
user_ids = []
user_roles = self.user_role_repository.get_user_roles_by_role(role)
for user_role in user_roles:
user_ids.append(user_role.user_id)
return user_ids
def add_role(self, user_id: uuid.UUID, role: Role) -> UserRole:
role = UserRole(user_id=user_id, role=role)
self.user_role_repository.add(role)
return role
def remove_role(self, user_id: uuid.UUID, role: Role) -> UserRole:
role = self.user_role_repository.remove(user_id, role)
if not role:
raise RoleNotFoundError()
return role
class AppRoleUseCase:
app_role_repository: IAppRoleRepository
def __init__(self, app_role_repository: IAppRoleRepository):
self.app_role_repository = app_role_repository
def get_app_roles(self, app_id: uuid.UUID) -> list[Role]:
roles = [Role.APP]
app_roles = self.app_role_repository.get_app_roles(app_id)
for app_role in app_roles:
roles.append(app_role.role)
return roles
def get_role_app_ids(self, role: Role) -> list[uuid.UUID]:
app_ids = []
app_roles = self.app_role_repository.get_app_roles_by_role(role)
for app_role in app_roles:
app_ids.append(app_role.app_id)
return app_ids
def add_role(self, app_id: uuid.UUID, role: Role) -> AppRole:
role = AppRole(app_id=app_id, role=role)
self.app_role_repository.add(role)
return role
def remove_role(self, app_id: uuid.UUID, role: Role) -> AppRole:
role = self.app_role_repository.remove(app_id, role)
if not role:
raise RoleNotFoundError()
return role
| KeepError/InnoID | Core/innoid_core/domain/modules/role/usecases.py | usecases.py | py | 2,383 | python | en | code | 0 | github-code | 13 |
41205420030 | import numpy as np
from LayerDense import LayerDense
from ActivationReLU import ActivationReLU
from ActivationSoftmax import ActivationSoftmax
from LossCategoricalCrossentropy import LossCategoricalCrossentropy
### TEST STUFF 1 ###
# from data_gen import data_gen
# X, y = data_gen().spiral_data(100, 3)
# layer1 = LayerDense(2,3)
# activation1 = ActivationReLU()
# layer2 = LayerDense(3,3)
# activation2 = ActivationSoftmax()
# layer1.forward(X)
# activation1.forward(layer1.output)
# layer2.forward(activation1.output)
# activation2.forward(layer2.output)
# loss_function = LossCategoricalCrossentropy()
# # Calculate accuracy from output of activation2 and targets
# # calculate values along first axis
# predictions = np.argmax(activation2.output, axis=1)
# if len(y.shape) == 2:
# y = np.argmax(y, axis=1)
# accuracy = np.mean(predictions == y)
# # Print accuracy
# print(activation2.output[:5])
# print(" Loss: ", loss_function.calculate(activation2.output, y))
# print('acc:', accuracy)
### TEST STUFF 2 ###
# Create dataset
from nnfs.datasets import vertical_data
X, y = vertical_data(samples=100, classes=3)
# Create model
dense1 = LayerDense(2, 3) # first dense layer, 2 inputs
activation1 = ActivationReLU()
dense2 = LayerDense(3, 3) # second dense layer, 3 inputs, 3 outputs
activation2 = ActivationSoftmax()
# Create loss function
loss_function = LossCategoricalCrossentropy()
# Helper variables
lowest_loss = 9999999 # some initial value
best_dense1_weights = dense1.weights.copy()
best_dense1_biases = dense1.biases.copy()
best_dense2_weights = dense2.weights.copy()
best_dense2_biases = dense2.biases.copy()
for iteration in range(10000):
# Update weights with some small random values
dense1.weights += 0.05 * np.random.randn(2, 3)
dense1.biases += 0.05 * np.random.randn(1, 3)
dense2.weights += 0.05 * np.random.randn(3, 3)
dense2.biases += 0.05 * np.random.randn(1, 3)
# Perform a forward pass of our training data through this layer
dense1.forward(X)
activation1.forward(dense1.output)
dense2.forward(activation1.output)
activation2.forward(dense2.output)
# Perform a forward pass through activation function
# it takes the output of second dense layer here and returns loss
loss = loss_function.calculate(activation2.output, y)
# Calculate accuracy from output of activation2 and targets
# calculate values along first axis
predictions = np.argmax(activation2.output, axis=1)
accuracy = np.mean(predictions == y)
# If loss is smaller - print and save weights and biases aside
if loss < lowest_loss:
print('New set of weights found, iteration:', iteration,
'loss:', loss, 'acc:', accuracy)
best_dense1_weights = dense1.weights.copy()
best_dense1_biases = dense1.biases.copy()
best_dense2_weights = dense2.weights.copy()
best_dense2_biases = dense2.biases.copy()
lowest_loss = loss
# Revert weights and biases
else:
dense1.weights = best_dense1_weights.copy()
dense1.biases = best_dense1_biases.copy()
dense2.weights = best_dense2_weights.copy()
dense2.biases = best_dense2_biases.copy() | bkstephen/ai_from_scratch | Python Version/main.py | main.py | py | 3,210 | python | en | code | 1 | github-code | 13 |
1070310682 | import argparse
import signal
import socket
import json
from happy_python.happy_log import HappyLogLevel
from common import hlog
__version__ = '0.0.1'
from miniim import LoginMessage
# noinspection PyUnusedLocal
def sigint_handler(sig, frame):
hlog.info('\n\nๆถๅฐ Ctrl+C ไฟกๅท๏ผ้ๅบ......')
exit(0)
def main():
parser = argparse.ArgumentParser(prog='mini_im_client',
description='่ฟทไฝ IMๅฎขๆท็ซฏ',
usage='%(prog)s -H|-P|-l|-m|-v')
parser.add_argument('-H',
'--host',
help='๏ผๅฏ้๏ผๆๅก็ซฏๅฐๅ๏ผ้ป่ฎคๅผ๏ผlocalhost',
required=False,
type=str,
action='store',
default='localhost',
dest='host')
parser.add_argument('-P',
'--port',
help='๏ผๅฏ้๏ผๆๅก็ซฏ็ซฏๅฃ๏ผ้ป่ฎคๅผ๏ผ9999',
required=False,
type=int,
action='store',
default='9999',
dest='port')
parser.add_argument('-m',
'--message',
help='๏ผๅฟ
้๏ผๆถๆฏ',
required=True,
type=str,
action='store',
dest='message')
parser.add_argument('-l',
'--log-level',
help='๏ผๅฏ้๏ผๆฅๅฟ็บงๅซ๏ผ0๏ผCRITICAL๏ผ|1๏ผERROR๏ผ|2๏ผWARNING๏ผ|3๏ผINFO๏ผ|4๏ผDEBUG๏ผ|5๏ผTRACE๏ผ๏ผ้ป่ฎคๅผ๏ผ3',
type=int,
choices=HappyLogLevel.get_list(),
default=HappyLogLevel.INFO.value,
required=False,
dest='log_level')
parser.add_argument('-v',
'--version',
help='ๆพ็คบ็ๆฌไฟกๆฏ',
action='version',
version='%(prog)s/v' + __version__)
args = parser.parse_args()
hlog.set_level(args.log_level)
host, port = args.host, args.port
message = json.loads(args.message)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
sock.connect((host, port))
lm = LoginMessage(user=message['user'], password=message['password'], client=message['client'])
frame = lm.dump_frame()
bb = frame.dump()
sock.sendall(bb)
received = str(sock.recv(1024), "utf-8")
hlog.info("Sent: {}".format(message))
hlog.info("Received: {}".format(received))
if __name__ == "__main__":
# ๅๅฐ่ฟ่กๆถๅฐ CTRL+C ไฟกๅท๏ผ็ดๆฅ้ๅบใ
signal.signal(signal.SIGINT, sigint_handler)
main()
| geekcampchina/MiniIM | python/client.py | client.py | py | 2,896 | python | en | code | 0 | github-code | 13 |
29354159663 | from django.http import HttpResponse
from .models import Img
from django.template import loader
from django.shortcuts import redirect
from django.shortcuts import get_object_or_404
from .forms import ImgUpdateForm
from django.contrib.auth.models import User
import random
# Create your views here.
def index(request):
classification_types = Img.CLASSIFICATION_TYPES
model_types = Img.WA_LANDSCAPE_MODEL_TYPE
class_list = [i[1] for i in classification_types]
model_list = [i[1] for i in model_types]
template = loader.get_template('classifier/index.html')
context = {
'classification_list': class_list,
'model_list': model_list,
}
return HttpResponse(template.render(context, request))
def classify_home(request):
classification_types = Img.CLASSIFICATION_TYPES
class_list = [i[1] for i in classification_types]
try:
next_img_obj_to_classify_west = Img.objects.filter(image_classification='', model_type='WESTSIDE').first().id
except (IndexError, AttributeError):
next_img_obj_to_classify_west = None
try:
next_img_obj_to_classify_east = Img.objects.filter(image_classification='', model_type='EASTSIDE').first().id
except (IndexError, AttributeError):
next_img_obj_to_classify_east = None
if not next_img_obj_to_classify_west and not next_img_obj_to_classify_east:
template = loader.get_template('classifier/success.html')
return HttpResponse(template.render({}, request))
template = loader.get_template('classifier/classify_imgs.html')
context = {
'classification_list': class_list,
'next_item_to_classify_west': next_img_obj_to_classify_west,
'next_item_to_classify_east': next_img_obj_to_classify_east,
}
return HttpResponse(template.render(context, request))
def classify_iter(request, model_type, img_id):
# need to change this to class based views in order to do the post easier
classification_types = Img.CLASSIFICATION_TYPES
class_list = [i[0] for i in classification_types]
# template = loader.get_template('classifier/classify_imgs_iter.html')
template = loader.get_template('classifier/img_update_form.html')
img_obj = get_object_or_404(Img, pk=img_id, model_type=model_type)
if request.method == 'POST':
form = ImgUpdateForm(request.POST, instance=img_obj)
if form.is_valid():
# print(form.cleaned_data['image_classification'])
# print(request.user.username)
form.instance.updated_by_user = request.user.username
form.save()
next_img = Img.objects.filter(image_classification='', model_type=model_type).first()
if next_img:
return redirect('classify_iter', img_id=next_img.id, model_type=next_img.model_type)
if not next_img:
template = loader.get_template('classifier/success.html')
return HttpResponse(template.render({}, request))
else:
form = ImgUpdateForm()
context = {
'classification_list': class_list,
'first_item': img_obj,
'form':form,
}
return HttpResponse(template.render(context, request))
def summary(request):
imgs = Img.objects.all()
num_classified = len([i for i in imgs if i.image_classification])
num_total = len([i for i in imgs])
img_count_dict = {i[0]:0 for i in Img.CLASSIFICATION_TYPES}
users = {i.updated_by_user for i in imgs}
user_count_dict = {i:0 for i in users}
img_count_dict[''] = 0
for img in imgs:
img_count_dict[img.image_classification] += 1
if img.image_classification:
user_count_dict[img.updated_by_user] += 1
# replace the empty one
img_count_dict['not classified'] = img_count_dict.pop("")
user_percent_count_dict = {i:[
user_count_dict[i],
"{0:.0%}".format(user_count_dict[i]/num_total),
User.objects.get(username=i).last_login if i != '' else 'n/a'] for i in user_count_dict
}
template = loader.get_template('classifier/summary.html')
context = {
'num_classified': num_classified,
'num_total': num_total,
'img_count_dict': img_count_dict.items(),
'users': user_count_dict.items(),
'user_with_percent': user_percent_count_dict.items(),
}
return HttpResponse(template.render(context, request))
def profile(request):
template = loader.get_template('classifier/profile.html')
if request.user.is_authenticated:
user = request.user.username
first_name = request.user.first_name
last_name = request.user.last_name
email = request.user.email
context = {
'username':user,
'first_name':first_name,
'last_name':last_name,
'email':email,
}
return HttpResponse(template.render(context, request))
else:
return HttpResponse(template.render({}, request))
def review(request):
template = loader.get_template('classifier/review.html')
classification_types = Img.CLASSIFICATION_TYPES
class_list = [i[0] for i in classification_types]
model_types = [i[1] for i in Img.WA_LANDSCAPE_MODEL_TYPE]
imgs_east = []
imgs_west = []
for classification in class_list:
try:
imgs_east += random.sample([i for i in Img.objects.all() if i.image_classification == classification and i.model_type == 'EASTSIDE'], k=100)
except IndexError:
pass
except ValueError:
# if sample is larger than population, take all
imgs_east += ([i for i in Img.objects.all() if i.image_classification == classification and i.model_type == 'EASTSIDE'])
try:
imgs_west += random.sample([i for i in Img.objects.all() if i.image_classification == classification and i.model_type == 'WESTSIDE'], k=100)
except IndexError:
pass
except ValueError:
# if sample is larger than population, take all
imgs_west += ([i for i in Img.objects.all() if i.image_classification == classification and i.model_type == 'WESTSIDE'])
context = {
'classification_list': class_list,
'imgs_east': imgs_east,
'imgs_west': imgs_west,
}
return HttpResponse(template.render(context, request))
def success(request):
template = loader.get_template('classifier/success.html')
return HttpResponse(template.render({}, request)) | kirk5davis/ffpa-classifier | classifier/views.py | views.py | py | 6,603 | python | en | code | 1 | github-code | 13 |
71497028179 | # ์ธ์ด : Python
# ๋ ์ง : 2022.7.30
# ๋ฌธ์ : BOJ > ๊ฐ์ฅ ๊ธด ์ง์ ์ฐ์ํ ๋ถ๋ถ ์์ด (small)
# (https://www.acmicpc.net/problem/22857)
# ํฐ์ด : ์ค๋ฒ 3
# =================================================================
def solution():
# K๊ฐ๋ฅผ ์ญ์ ํ ๋ฌธ์์ด ์ค ์ต๋ ์ง์ ๋ฌธ์์ด์ ๊ธธ์ด
# == ํ์๋ฅผ K๊ฐ๋ง ํฌํจํ๊ณ ์๋ ์ต๋ ๋ฌธ์์ด์ ๊ธธ์ด
odd = 0 # ํ์ ๊ฐ์
size = 0 # ๋ถ๋ถ ๋ฌธ์์ด์ ๊ธธ์ด
max_size = 0 # ์ต๋ ๋ถ๋ถ ๋ฌธ์์ด์ ๊ธธ์ด
left, right = 0, 0
while left <= right and right < N:
if odd > K:
if arr[left] % 2 == 1:
odd -= 1
left += 1
size -= 1
if arr[right] % 2 == 1:
odd += 1
right += 1
size += 1
max_size = max(max_size, size - odd) # โ odd๋ฅผ K๋งํผ ์ ๋ถ ๋ค ์ ์ฌ์ฉํ ์ ์์์ ์ ์!
return max_size
N, K = map(int, input().split(" "))
arr = list(map(int, input().split(" ")))
result = solution()
print(result) | eunseo-kim/Algorithm | BOJ/์ฝ๋ฉํ
์คํธ ๋๋น ๋ฌธ์ ์ง with Baekjoon/DP/08_๊ฐ์ฅ ๊ธด ์ง์ ์ฐ์ํ ๋ถ๋ถ ์์ด(small).py | 08_๊ฐ์ฅ ๊ธด ์ง์ ์ฐ์ํ ๋ถ๋ถ ์์ด(small).py | py | 1,057 | python | ko | code | 1 | github-code | 13 |
12707952773 | import matplotlib.pyplot as mpl
from PySide2.QtWidgets import QDialog, QVBoxLayout
from matplotlib import gridspec
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
from seedpod_ground_risk.pathfinding.moo_ga import *
# main window
# which inherits QDialog
class DataWindow(QDialog):
# constructor
def __init__(self, pathfinding_layer, grid, parent=None):
super(DataWindow, self).__init__(parent)
self.resize(1000, 500)
self.figure = mpl.figure(figsize=(8, 4))
self.pathfinding_layer = pathfinding_layer
self.grid = grid
self.canvas = FigureCanvas(self.figure)
self.toolbar = NavigationToolbar(self.canvas, self)
self.setWindowTitle("Path Data")
layout = QVBoxLayout()
layout.addWidget(self.toolbar)
layout.addWidget(self.canvas)
self.setLayout(layout)
gs = gridspec.GridSpec(1, 2, width_ratios=[2.5, 1])
ax1 = self.figure.add_subplot(gs[0])
ax2 = self.figure.add_subplot(gs[1])
path = self.pathfinding_layer.path
ys = []
for idx in range(len(path[:-1])):
n0 = path[idx].position
n1 = path[idx + 1].position
l = line(n0[0], n0[1], n1[0], n1[1])
ys.append(grid[l[0], l[1]])
path_dist = self.pathfinding_layer.dataframe.to_crs('EPSG:27700').iloc[0].geometry.length
ys = np.hstack(ys)
x = np.linspace(0, path_dist, len(ys))
ax1.plot(x, ys)
ax1.set_xlabel('Distance [m]')
ax1.set_ylabel('Risk of fatality [per hour]')
p = self.create_info_patch(ys)
data_txt = f"The total fatality risk over this path is \n{self.op_format(ys, sum)} per hour" \
f"\n\nThe average fatality risk over this path is \n{self.op_format(ys, np.average)} per hour" \
f"\n\nThe max fatality risk over this path is \n{self.op_format(ys, max)} per hour" \
f"\n\nThe min fatality risk over this path is \n{self.op_format(ys, min)} per hour"
ax2.add_patch(p)
ax2.axis('off')
ax2.text(0.5, 0.5, data_txt,
horizontalalignment='center',
verticalalignment='center',
fontsize=10, color='black',
transform=ax2.transAxes, wrap=True)
self.canvas.draw()
self.show()
def op_format(self, val, op):
return "{:.2e}".format(op(val))
def create_info_patch(self, ys):
import matplotlib.patches as pch
left, width = 0, 1
bottom, height = 0, 1
right = left + width
top = bottom + height
p = pch.Rectangle(
(left, bottom), width, height, color="white",
fill=False, clip_on=False
)
return p
| aliaksei135/seedpod_ground_risk | seedpod_ground_risk/ui_resources/info_popups.py | info_popups.py | py | 2,905 | python | en | code | 4 | github-code | 13 |
74525527058 | # -*- coding: utf-8 -*-
"""
Created on Thu Nov 22 11:01:27 2018
@author: Hugo
"""
def adigits(a,b,c):
d = [str(a) , str(b) , str(c)]
e = sorted(d)
h = list(reversed(e))
f = "".join(h)
f = int(f)
return f
print(adigits(1,2,3))
| Hugomguima/FEUP | 1st_Year/1st_Semestre/Fpro/Python/saved files/adigits_2.py | adigits_2.py | py | 277 | python | en | code | 0 | github-code | 13 |
21141796983 | import random
import discord
from discord import app_commands
from discord.ext import commands
import os
secret_code = os.getenv('RB_TOKEN')
bot = commands.Bot(command_prefix='!', intents=discord.Intents.all())
class db(discord.Client):
def __init__(self):
super().__init__(intents=discord.Intents.default())
self.synced = False
async def on_ready(self):
await tree.sync()
self.synced = True
print('Bot is Online')
bot = db()
tree = app_commands.CommandTree(bot)
@tree.command(name='help', description='provides information about bot function')
async def self(interaction: discord.Interaction):
await interaction.response.send_message('Select user in server that you would like the bot to roast.\n'
'A randomly selected phrase will be displayed along with the users name.')
@tree.command(name='roast', description='select user in server and a random roast '
'will be selected for them adn displayed')
async def self(interaction: discord.Interaction, user: discord.User):
roasts = ['puts the milk in before the cereal',
'sleeps with socks on',
'smells kinda funny',
'cant read',
'cant ride a bike',
'eats soup with a fork',
'cant drive',
'peaked in middle school',
'has a face literally no one could love',
'looks like they arent allowed near school zones',
'has cooties',
'cant tie their own shoes',
'is a fringe friend',
'has food allergies (loser)',
'cant count past 5']
phrase = random.choice(roasts)
await interaction.response.send_message(f'{user} {phrase}')
def run_Roast_bot():
bot.run(secret_code)
run_Roast_bot()
| RyBuck44/Roast_bot | r_bot.py | r_bot.py | py | 1,885 | python | en | code | 0 | github-code | 13 |
8242321430 | """\
Meshgrid motor explorer
"""
from __future__ import absolute_import, division, print_function
import random
import numbers
import collections
from .. import tools
from .. import meshgrid
from . import m_rand
defcfg = m_rand.defcfg._deepcopy()
defcfg._describe('res', instanceof=(numbers.Integral, collections.Iterable),
docstring='resolution of the meshgrid')
defcfg.classname = 'explorers.MeshgridMotorExplorer'
class MeshgridMotorExplorer(m_rand.RandomMotorExplorer):
"""\
Necessitate a sensory bounded environement.
"""
defcfg = defcfg
def __init__(self, cfg, **kwargs):
super(MeshgridMotorExplorer, self).__init__(cfg)
self._meshgrid = meshgrid.MeshGrid(self.cfg, [c.bounds for c in self.m_channels])
def _explore(self):
# pick a random bin
if len(self._meshgrid._nonempty_bins) == 0:
m_signal = tools.random_signal(self.m_channels)
else:
m_bin = random.choice(self._meshgrid._nonempty_bins)
m_signal = tools.random_signal(self.m_channels, bounds=m_bin.bounds)
return {'m_signal': m_signal, 'from': 'motor.babbling.mesh'}
def receive(self, exploration, feedback):
super(MeshgridMotorExplorer, self).receive(exploration, feedback)
self._meshgrid.add(tools.to_vector(exploration['m_signal'], self.m_channels), feedback['s_signal'])
| benureau/explorers | explorers/algorithms/m_mesh.py | m_mesh.py | py | 1,392 | python | en | code | 0 | github-code | 13 |
19145970224 | import flax
import numpy as np
from jam.utils import checkpoint_utils
resnet_importer = checkpoint_utils.CheckpointTranslator()
def transpose_conv_weights(w):
return np.transpose(w, [2, 3, 1, 0])
@resnet_importer.add(r"layer(\d)\.(\d+)\.conv(\d)\.(weight|bias)")
def block(key, val, layer, block, conv, weight_or_bias):
newname = {"weight": "kernel", "bias": "bias"}[weight_or_bias]
newkey = (
f"block_group_{int(layer) - 1}/block_{block}/conv_{int(conv) - 1}/{newname}"
)
if newname == "kernel":
val = transpose_conv_weights(val)
return newkey, val
@resnet_importer.add(
r"layer(\d)\.(\d+)\.bn(\d)\.(weight|bias|running_mean|running_var|num_batches_tracked)"
)
def bn(key, val, layer, block, bn, slot):
newname = {
"weight": "scale",
"bias": "bias",
"num_batches_tracked": "counter",
"running_mean": "mean",
"running_var": "var",
}[slot]
newkey = (
f"block_group_{int(layer) - 1}/block_{block}/batchnorm_{int(bn) - 1}/{newname}"
)
# if slot != "num_batches_tracked":
# val = np.reshape(val, [1, 1, 1, -1])
return newkey, val
@resnet_importer.add(
r"layer(\d)\.(\d+)\.downsample\.(\d)\.(weight|bias|running_mean|running_var|num_batches_tracked)"
)
def downsample(key, val, layer, block, conv, slot):
if int(conv) == 0:
newname = {
"weight": "kernel",
"bias": "bias",
}[slot]
if newname == "kernel":
val = transpose_conv_weights(val)
newkey = (
f"block_group_{int(layer) - 1}/block_{int(block)}/shortcut_conv/{newname}"
)
elif int(conv) == 1:
newname = {
"weight": "scale",
"bias": "bias",
"num_batches_tracked": "counter",
"running_mean": "mean",
"running_var": "var",
}[slot]
newkey = f"block_group_{int(layer) - 1}/block_{int(block)}/shortcut_batchnorm/{newname}"
else:
raise ValueError(f"Invalid conv number {conv}")
return newkey, val
@resnet_importer.add(r"conv1\.weight")
def initial_conv(key, val):
return "initial_conv/kernel", transpose_conv_weights(val)
@resnet_importer.add(r"bn1\.(weight|bias|running_mean|running_var|num_batches_tracked)")
def initial_bn(key, val, slot):
newname = {
"weight": "scale",
"bias": "bias",
"num_batches_tracked": "counter",
"running_mean": "mean",
"running_var": "var",
}[slot]
newkey = "initial_batchnorm/" + newname
return newkey, val
@resnet_importer.add(r"fc\.(weight|bias)")
def final_logits(key, val, slot):
newkey = {"weight": "logits/kernel", "bias": "logits/bias"}[slot]
if slot == "weight":
val = np.transpose(val, [1, 0])
return newkey, val
def load_from_torch_checkpoint(state_dict):
converted_dict = resnet_importer.apply(
state_dict=checkpoint_utils.as_numpy(state_dict)
)
converted_dict = {k: v for k, v in converted_dict.items()}
converted_variables = {}
for k, v in converted_dict.items():
if "counter" in k:
pass
elif "batchnorm" in k:
if "scale" in k or "bias" in k:
converted_variables[f"params/{k}"] = v
else:
converted_variables[f"batch_stats/{k}"] = v
else:
converted_variables[f"params/{k}"] = v
return flax.traverse_util.unflatten_dict(converted_variables, sep="/")
| ethanluoyc/jam | src/jam/flax/resnet/convert_torch_checkpoint.py | convert_torch_checkpoint.py | py | 3,492 | python | en | code | 0 | github-code | 13 |
16131405673 | #!/usr/bin/python3
"""
Purpose:
"""
import json
import shutil
import urllib3
# Pool Manager
http = urllib3.PoolManager()
# Download data
def download_data_from_url(url, filepath, chunk_size=1024):
r = http.request("GET", url, preload_content=False)
with open(filepath, "wb") as out:
while True:
data = r.read(chunk_size)
if not data:
break
out.write(data)
r.release_conn() # If given, poolmanager will reuse this connection
def download_data_from_url2(url, filepath, chunk_size=1024):
with open(filepath, "wb") as out:
r = http.request("GET", url, preload_content=False)
shutil.copyfileobj(r, out)
r.release_conn()
def uploading_data_to_url():
with open("file_name.txt") as f:
file_data = f.read()
# Sending the request.
resp = http.request(
"POST",
"https://reqbin.com/post-online",
fields={
"file": ("file_name.txt", file_data),
},
)
print(json.loads(resp.data.decode("utf-8"))["files"])
| udhayprakash/PythonMaterial | python3/16_Web_Services/c_REST/a_consuming_APIs/j_using_urllib3/d_sending_files.py | d_sending_files.py | py | 1,072 | python | en | code | 7 | github-code | 13 |
32309208155 | import argparse
def parameter_parser():
"""
A method to parse up command line parameters. By default it gives an embedding of the Bitcoin OTC dataset.
The default hyperparameters give a good quality representation without grid search.
Representations are sorted by node ID.
"""
parser = argparse.ArgumentParser(description = "Run SGCN.")
parser.add_argument("--data-path",
nargs = "?",
default = "./formated_data/Amazon_Instant_Video.formated",
help = "dataset cvx")
parser.add_argument("--dimnode",
type = int,
nargs = "?",
default = "40",
help = "the latent dim of X , node features dim")
parser.add_argument("--topk",
type = int,
nargs = "?",
default = "30",
help = "topk to recommendation")
parser.add_argument("--embedding-path",
nargs = "?",
default = "./output/embedding/bitcoin_otc_sgcn.csv",
help = "Target embedding csv.")
parser.add_argument("--regression-weights-path",
nargs = "?",
default = "./output/weights/bitcoin_otc_sgcn.csv",
help = "Regression weights csv.")
parser.add_argument("--log-path",
nargs = "?",
default = "./logs/bitcoin_otc_logs.json",
help = "Log json.")
parser.add_argument("--epochs",
type = int,
default = 100,
help = "Number of training epochs. Default is 100.")
parser.add_argument("--reduction-iterations",
type = int,
default = 30,
help = "Number of SVD iterations. Default is 30.")
parser.add_argument("--reduction-dimensions",
type = int,
default = 64,
help = "Number of SVD feature extraction dimensions. Default is 64.")
parser.add_argument("--seed",
type = int,
default = 42,
help = "Random seed for sklearn pre-training. Default is 42.")
parser.add_argument("--lamb",
type = float,
default = 1.0,
help = "Embedding regularization parameter. Default is 1.0.")
parser.add_argument("--test-size",
type = float,
default = 0.2,
help = "Test dataset size. Default is 0.2.")
parser.add_argument("--learning-rate",
type = float,
default = 0.1,
help = "Learning rate. Default is 0.01.")
parser.add_argument("--ydivx",
type = float,
default = 1,
help = "the Z and the Y dimision ratio")
parser.add_argument("--weight-decay",
type = float,
default = 10**-5,
help = "Learning rate. Default is 10^-5.")
parser.add_argument("--super-mu",
type = float,
default = 0.5,
help = "the merge super parameter of sgcn.loss + second.loss")
parser.add_argument("--model",
type = str,
default = "sgcn_mf",
help = "choose the basic model : sgcn_mf , mf")
parser.add_argument("--layers",
nargs="+",
type=int,
help = "Layer dimensions separated by space. E.g. 32 32.")
parser.add_argument("--deep-neurons",
nargs="+",
type=int,
help = "deep layers of the classification E.g. 32 32.")
parser.add_argument("--spectral-features",
dest = "spectral_features",
action = "store_true")
parser.add_argument("--general-features",
dest = "spectral_features",
action = "store_false")
parser.add_argument("--mf_lfmdim",
type = int,
nargs = "?",
default = "50",
help = "the latent dim of MF model")
parser.add_argument("--mf_learnrate",
type = float,
nargs = "?",
default = "0.005",
help = "learning rate of the ml model")
parser.set_defaults(layers = [32, 32])
parser.set_defaults(deep_neurons = [10, 1])
return parser.parse_args()
| 2742195759/SGCN_MF | src/parser.py | parser.py | py | 4,866 | python | en | code | 0 | github-code | 13 |
74405832976 | import math
import matplotlib.pyplot as plt
N = 30
x = []
y = []
s = 0
for i in range(1, N):
n = float(i)
an = math.factorial(2 * n) / ((3 ** n) * (math.factorial(n) ** 2))
print(an)
s += an
x.append(n)
y.append(s)
print(f"\n Sn = {s}")
print(f"For n = {N}")
plt.scatter(x, y)
plt.grid()
plt.show()
| LorenFiorini/Competitive-Programming | SCHOOL/Calculus/hw6/1B.py | 1B.py | py | 309 | python | en | code | 2 | github-code | 13 |
31014002643 | '''
fuzz.py
----------
The 'main' file.
Imeplements both fuzz and fals algorithms and provides an option to
randomly simulate a system.
Please type
./fuzz.py --help
for usage details.
'''
from __future__ import print_function
import matplotlib.pyplot as plt
import logging
import numpy as np
import argparse
import pickle
import time
import sys as SYS
import os
import socket
import thread
import sysv_ipc
from signal import signal, SIGPIPE, SIG_DFL
import errno
import loadsystem
import traces
#precision=None, threshold=None, edgeitems=None, linewidth=None, suppress=True, nanstr=None, infstr=None, formatter=Nonu)
np.set_printoptions(suppress=True)
FORMAT = '[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s'
FORMAT2 = '%(levelname) -10s %(asctime)s %(module)s:\
%(lineno)s %(funcName)s() %(message)s'
serverAddr = 'cpfuzz.socket'
logging.basicConfig(filename='log.ff', filemode='w', format=FORMAT2,
level=logging.DEBUG)
logger = logging.getLogger(__name__)
def setup_shm():
# http://semanchuk.com/philip/sysv_ipc/#shared_memory
try:
memory = sysv_ipc.SharedMemory(sysv_ipc.IPC_PRIVATE, flags=sysv_ipc.IPC_CREAT|sysv_ipc.IPC_EXCL, mode=0o600, size=MAP_SIZE)
if memory.id < 0:
logger.info("shmget() failed")
atexit(remove_shm);
shm_str = str(memory.id)
os.environ['SHM_ENV_VAR'] = str(memory.id)
memory.attach()
trace_bits = memory.read()
return memory
# memory.remove()
except:
logger.info("setup_shm failed")
pass
def attach_shm():
'''seems not work'''
id_str = os.environ.get("SHM_ENV_VAR")
if id_str != None:
key = int(id_str)
print(id)
shm = ipc.SharedMemory(key, 0, 0)
#I found if we do not attach ourselves
#it will attach as ReadOnly.
shm.attach(0,0)
buf = shm.read(19)
print(buf)
shm.detach()
pass
def check_prop_violation(trace, prop):
idx = prop.final_cons.contains(trace.x_array)
return trace.x_array[idx], trace.t_array[idx]
def create_harness(sys,prop):
harness = open('harness.c','r').read()
num_dims = sys.num_dims
# read(STDIN_FILENO, (int*)iv.int_state_arr, int_state_arr_num);
stateAssign = []
if num_dims.si > 0:
stateAssign = ['iv.int_state_arr[{}]={};'.format(i,prop.initial_controller_state[i]) for i in range(num_dims.si)]
harness = harness.replace('INT_STATE_ARR','\n'.join(stateAssign))
# read(STDIN_FILENO, (double*)iv.float_state_arr, float_state_arr_num);
stateAssign = []
if num_dims.sf > 0:
stateAssign = ['iv.float_state_arr[{}]={};'.format(i,prop.initial_controller_state[i]) for i in range(num_dims.si,num_dims.s)]
harness = harness.replace('FLOAT_STATE_ARR','\n'.join(stateAssign))
compareRob = ['rob = MAX(rob, MAX(iv.x_arr[{index}] - {high}, ({low}) - iv.x_arr[{index}] ));'.format(high=prop.final_cons.h[i],low=prop.final_cons.l[i],index=i) for i in range(num_dims.x)]
harness = harness.replace('COMPARE_ROB','\n'.join(compareRob))
harness = harness.replace('inf','DBL_MAX')
f = open(sys.path+'/harness.c','w')
f.write(harness)
f.close()
os.system('cd '+sys.path+';make')
def create_corpus(sys,prop):
num_segments = prop.num_segments # sample time period
num_dims = sys.num_dims
init_cons = prop.init_cons
f = open('seed_corpus/rand','wb')
# read(STDIN_FILENO, (double*)iv.input_arr, input_arr_num);
if num_dims.ci > 0 and (prop.ci.h - prop.ci.l > 0).any():
ci_lb = prop.ci.l
ci_ub = prop.ci.h
ci_array = ci_lb + (ci_ub - ci_lb) * np.random.random((num_segments, num_dims.ci)) # URandom controller input
f.write(ci_array.tobytes('C') )
# read(STDIN_FILENO, (double*)iv.x_arr, x_arr_num);
if num_dims.x > 0:
x_array = init_cons.l + np.random.rand(init_cons.dim) * (init_cons.h - init_cons.l) # random init state
f.write(x_array.tobytes('C') )
logger.info('URandom controller input num_segments:{},num_dims.ci:{},num_dims.si:{},num_dims.sf:{},num_dims.x{}'.format( num_segments, num_dims.ci,num_dims.si,num_dims.sf,num_dims.x))
f.close()
# f = open('seed_corpus/min','wb')
# # read(STDIN_FILENO, (double*)iv.input_arr, input_arr_num);
# if num_dims.ci > 0 and (prop.ci.h - prop.ci.l > 0).any():
# for i in range(num_segments):
# f.write(prop.ci.l.tobytes('C') )
# # read(STDIN_FILENO, (double*)iv.x_arr, x_arr_num);
# if num_dims.x > 0:
# f.write(init_cons.l.tobytes('C') )
# f.close()
# f = open('seed_corpus/max','wb')
# # read(STDIN_FILENO, (double*)iv.input_arr, input_arr_num);
# if num_dims.ci > 0 and (prop.ci.h - prop.ci.l > 0).any():
# for i in range(num_segments):
# f.write(prop.ci.h.tobytes('C') )
# # read(STDIN_FILENO, (double*)iv.x_arr, x_arr_num);
# if num_dims.x > 0:
# f.write(init_cons.h.tobytes('C') )
# f.close()
def setup_plant(sys,prop):
os.system("rm *.socket")
#create sockert
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
if sock < 0:
print ('socket error')
# bind to a file
if os.path.exists(serverAddr):
os.unlink(serverAddr)
if sock.bind(serverAddr):
print( 'socket.bind error')
#listen
if sock.listen(5):
print ('socket.listen error')
start_time = time.time()
signal(SIGPIPE,SIG_DFL)
while True:
# logger.info( 'waiting for connecting')
#waiting for client connecting
conn, clientAddr = sock.accept()
t = 0.0
dummy_val = 0.0
d = np.array( prop.initial_discrete_state)
pvt = np.array(sys.plant_pvt_init_data)
try:
# receive plant init state n = write(sockfd, iv.x_arr, x_arr_num);
data = conn.recv(sys.num_dims.x * 8)
x0 = np.frombuffer(data, dtype=np.float64)
x = x0
# receive control output n = write(sockfd, rv.output_arr, output_arr_num);
data = conn.recv(sys.num_dims.u * 8)
while data:
uu = np.frombuffer(data, dtype=np.float64)
(t, x, d, pvt) = sys.sim(
(t, t + sys.delta_t),
x,
d,
pvt,
uu,
dummy_val,
property_checker=None,
property_violated_flag=None
)
# rob = min(rob,prop.final_cons.robustness(x))
# if rob < 0:
# x_v = x
# logger.info("t={},x={},u={}".format(t,x,uu))
# send new state to controller n = read(sockfd, iv.x_arr, x_arr_num);
data = x.tobytes('C')
conn.sendall(data)
try:
data = conn.recv(sys.num_dims.u * 8)
except socket.error as e:
if e.errno == errno.ECONNRESET:
break
# # send robustness to controller n = read(sockfd, &result, sizeof(double));
# conn.sendall(np.array(rob,dtype = np.float64).tobytes('C'))
finally:
# if rob<0:
# stop_time = time.time()
# logger.info('time spent(s) = {}, rob = {}, x0 = {} -> xv = {}'.format(stop_time - start_time, rob, x0, x_v))
#close the connection
conn.close()
os.unlink(serverAddr)
def main():
logger.info('execution begins')
usage = '%(prog)s <filename>'
parser = argparse.ArgumentParser(description='CPFuzz', usage=usage)
parser.add_argument('-f','--filename', default=None, metavar='file_path.tst')
# uniform random fuzz
parser.add_argument('-s', '--simulate', type=int, metavar='num-sims',
help='simulate')
# todo: fuzz using robust value
parser.add_argument('-x', '--robust', type=int, metavar='num-sims',
help='using mtl robust value')
parser.add_argument('-p', '--plot', action='store_true',
help='enable plotting')
parser.add_argument('--dump', action='store_true',
help='dump trace in mat file')
parser.add_argument('--seed', type=int, metavar='seed_value',
help='seed for the random generator')
args = parser.parse_args()
if args.filename is None:
print('No file to test. Please use --help')
exit()
else:
filepath = args.filename
if args.seed is not None:
np.random.seed(args.seed)
Options = type('Options', (), {})
opts = Options()
opts.plot = args.plot
sys, prop = loadsystem.parse(filepath)
# if not os.path.exists(sys.path+"/fuzz-target"):
create_harness(sys,prop)
print("harness generated")
create_corpus(sys,prop)
# try:
# thread.start_new_thread( setup_plant, (sys,prop) )
# except:
# print("Error: unable to start thread")
num_segments = prop.num_segments
num_dims = sys.num_dims
if (prop.ci.h - prop.ci.l > 0).any():
cmd = 'afl-fuzz -P '+filepath+' -m none -i seed_corpus -o out -- '+ sys.path +'/fuzz-target %d %d %d %d %d %d' % (num_segments,num_dims.ci, num_dims.si,num_dims.sf,num_dims.x,num_dims.u)
else:
cmd = 'afl-fuzz -P '+filepath+' -m none -i seed_corpus -o out -- '+ sys.path +'/fuzz-target %d %d %d %d %d %d' % (num_segments, 0 , num_dims.si,num_dims.sf,num_dims.x,num_dims.u)
f = open('fuzz.sh','w')
f.write('#!/bin/sh\n'+cmd)
f.close()
setup_plant(sys,prop)
if __name__ == '__main__':
main()
| shangfute/CPFuzz | cpfuzz.py | cpfuzz.py | py | 9,847 | python | en | code | 0 | github-code | 13 |
40864957494 | """
Implementation of Stack and all operations.
Using:
1. List
"""
# Using List
list1 = []
list1.append('1st')
list1.append('2nd')
list1.append('3rd')
list1.pop()
print(list1)
list1.pop()
print(list1)
# Using collections.deque collection
from collections import deque
stack = deque('abcd')
stack.append('e')
stack.append('f')
print(stack)
stack.pop()
stack.pop()
print(stack)
| kundan123456/100DaysCodeChallengeDS | Day3/stack.py | stack.py | py | 390 | python | en | code | 0 | github-code | 13 |
71544155859 | # import the necessary packages
import os
# name of the dataset we will be using
DATASET = "cityscapes"
# build the dataset URL
DATASET_URL = f"http://efrosgans.eecs.berkeley.edu/pix2pix/datasets/{DATASET}.tar.gz"
# define the batch size
TRAIN_BATCH_SIZE = 32
INFER_BATCH_SIZE = 8
# dataset specs
IMAGE_WIDTH = 256
IMAGE_HEIGHT = 256
IMAGE_CHANNELS = 3
# training specs
LEARNING_RATE = 2e-4
EPOCHS = 150
STEPS_PER_EPOCH = 100
# path to our base output directory
BASE_OUTPUT_PATH = "outputs"
BASE_IMAGE_PATH = ""
# GPU training pix2pix model paths
GENERATOR_MODEL = os.path.join(BASE_OUTPUT_PATH, "models", "generator")
# define the path to the inferred images and to the grid images
BASE_IMAGES_PATH = os.path.join(BASE_OUTPUT_PATH, "images")
GRID_IMAGE_PATH = os.path.join(BASE_IMAGE_PATH, "grid.png") | bashendixie/ml_toolset | ๆกไพ100 ไฝฟ็จPix2Pix่ฟ่กๅพๅ็ฟป่ฏ/config.py | config.py | py | 810 | python | en | code | 9 | github-code | 13 |
11354363791 | """
From Point-GNN
"""
import torch
from torch import nn
def multi_layer_fc_fn(Ks=[300, 64, 32, 64], num_classes=4, is_logits=False, num_layers=4):
assert len(Ks) == num_layers
linears = []
for i in range(1, len(Ks)):
linears += [
nn.Linear(Ks[i-1], Ks[i]),
nn.ReLU(),
nn.BatchNorm1d(Ks[i])
]
if is_logits:
linears += [
nn.Linear(Ks[-1], num_classes)]
else:
linears += [
nn.Linear(Ks[-1], num_classes),
nn.ReLU(),
nn.BatchNorm1d(num_classes)
]
return nn.Sequential(*linears)
class ClassAwarePredictor(nn.Module):
def __init__(self, num_classes, box_encoding_len):
super(ClassAwarePredictor, self).__init__()
# self.cls_fn = multi_layer_fc_fn(Ks=[300, 64], num_layers=2, num_classes=num_classes, is_logits=True)
self.cls_fn = multi_layer_fc_fn(Ks=[64, 32], num_layers=2, num_classes=num_classes, is_logits=True)
self.loc_fns = nn.ModuleList()
self.num_classes = num_classes
self.box_encoding_len = box_encoding_len
for i in range(num_classes):
# self.loc_fns += [
# multi_layer_fc_fn(Ks=[300, 300, 64], num_layers=3, num_classes=box_encoding_len, is_logits=True)]
self.loc_fns += [
multi_layer_fc_fn(Ks=[64, 64, 32], num_layers=3, num_classes=box_encoding_len, is_logits=True)]
def forward(self, features):
logits = self.cls_fn(features)
box_encodings_list = []
for loc_fn in self.loc_fns:
box_encodings = loc_fn(features).unsqueeze(1)
box_encodings_list += [box_encodings]
box_encodings = torch.cat(box_encodings_list, dim=1)
return logits, box_encodings
| datong-new/Point-HGNN | head/plain_head.py | plain_head.py | py | 1,847 | python | en | code | 0 | github-code | 13 |
3039573711 | #! python3
# WebComicDownloader.py - Downloads the most recent comics from xkcd once a day if new comics have been uploaded
import requests, os, bs4
def main():
downloadComics()
def downloadComics():
# Create folder for comics
os.makedirs('./xkcdComics', exist_ok=True)
# Check if we already have the most recent comics
req = requests.get('https://xkcd.com/')
req.raise_for_status()
soup = bs4.BeautifulSoup(req.text, 'html.parser')
if checkMostRecentComic(soup):
print('Comics are up-to-date!')
return
# Download comics until most recent comic
comicNames = os.listdir('./xkcdComics')
while True:
comicElem = soup.select('#comic img')
if comicElem == []:
print('Unable to retrieve comic image')
else:
fileName = os.path.basename(comicElem[0].get('src'))
if fileName in comicNames:
break
imgReq = requests.get('https:' + comicElem[0].get('src'))
imgReq.raise_for_status()
print(f'Downloading {fileName}...')
comicFile = open(os.path.join('xkcdComics', fileName), 'wb')
for chunk in imgReq.iter_content(100000):
comicFile.write(chunk)
comicFile.close()
# Go to next comic
nextURL = 'https://xkcd.com' + soup.select('a[rel="prev"]')[0].get('href')
req = requests.get(nextURL)
req.raise_for_status()
soup = bs4.BeautifulSoup(req.text, 'html.parser')
def checkMostRecentComic(parsedHTML):
# Checks if the most recent xkcd comic is already downloaded
# Returns True if most recent xkcd comic is already downloaded
# Returns False if most recent xkcd comic is not already downloaded
comicNames = os.listdir('./xkcdComics')
# Find the name of the most recent comic
comicElem = parsedHTML.select('#comic img')
if comicElem == []:
print('Unable to retrieve most recent image')
exit(0)
# Check if the most recent comic is already downloaded
mostRecentComicName = os.path.basename(comicElem[0].get('src'))
if mostRecentComicName in comicNames:
return True
return False
if __name__ == '__main__':
main() | cjam3/AutomateTheBoringStuffPractice | Chapter 17/WebComicDownloader.py | WebComicDownloader.py | py | 2,241 | python | en | code | 0 | github-code | 13 |
26073616084 | import os
from numpy import *
import matplotlib.pyplot as plt
class Params():
def __init__(self,args):
self.dt = 1e-4 # timestep (s)
self.savetime = 1e-2 # (s)
self.t_f = 100.0 #100.0 # 3*self.dt # final time (s)
self.max_g = -9.81 # gravity (ms^-2)
self.max_q = 0.
self.theta = 0*pi/180. # slope angle (radians)
self.Fr = float(args[2]) # Froude number
self.r = 1.0 # radius of drum
self.G = Grid_Params(self,args)
self.B = Boundary_Params()
self.O = Output_Params(self.G)#self.nt)
self.S = [Solid_Params(self.G,self,args),]
self.segregate_grid = True
self.c = 1e-3 # inter-particle drag coefficient
self.l = 10. # number of particle diameters for seg diffusion coeff
# self.D = 1e-3 # segregation diffusion coefficient
self.supername = 'im/drum/ny_' + str(self.G.ny) + '/Fr_' + str(self.Fr) + '/'
# self.supername = 'im/drum_rough/wall_mu_' + str(self.B.wall_mu) + '/ny_' + str(self.G.ny) + '/Fr_' + str(self.Fr) + '/c_' +
# self.supername = 'im/drum_no_slip/ny_' + str(self.G.ny) + '/Fr_' + str(self.Fr) + '/c_' + str(self.c) + '/l_' + str(self.l) + '/'
self.pressure = 'lithostatic'
# self.smooth_gamma_dot = True # smooth calculation of gamma_dot
self.time_stepping = 'dynamic' # dynamic or static time steps
self.normalise_phi = True
self.CFL = 0.2 # stability criteria for determining timstep
print(self.supername)
def update_forces(self):
t_c = sqrt(4*pi**2*self.r/(abs(self.max_g)*self.Fr)) # rotation period
self.theta = -(self.t/t_c)*2.*pi # slope angle (radians)
self.g = self.max_g
class Grid_Params():
def __init__(self,P,args):
self.y_m = 0.0 # (m)
self.y_M = P.r # (m)
self.x_m = 0.0 # (m)
self.x_M = P.r # (m)
self.ny = int(args[1])
self.nx = self.ny
self.x = linspace(self.x_m,self.x_M,self.nx)
self.y = linspace(self.y_m,self.y_M,self.ny)
self.dx = self.x[1] - self.x[0] # grid spacing (m)
self.dy = self.y[1] - self.y[0] # grid spacing (m)
# self.s = array([0.5,1.0]) # s coordinate
self.top_gap = 0.5*(self.y_M - self.y_m)
self.R = 10.#float(args[2])
self.s_M = 0.003 # 3mm beads, following https://journals.aps.org/pre/pdf/10.1103/PhysRevE.62.961
# self.s_m = 0.1
self.s_m = self.s_M/self.R
self.ns = 2
self.s = array([self.s_m,self.s_M])
# s_edges = linspace(self.s_m,self.s_M,self.ns+1)
# self.s = (s_edges[1:] + s_edges[:-1])/2.
self.ds = self.s[1]-self.s[0]
class Boundary_Params():
def __init__(self):
self.has_bottom = True
self.has_top = True
self.has_right = True
self.has_left = True
# self.roughness = False
self.roughness = True
# self.wall_mu = 0.1
class Solid_Params():
def __init__(self,G,P,args):
self.X = []
self.Y = []
self.n = 0
self.rho = 2700. # density (kg/m^3)
self.packing = 0.6 # packing fraction
self.rho_s = self.rho/self.packing # solid density
self.PHI = []
self.law = 'pouliquen'
# # self.law = 'ken_simple'
self.mu_0 = tan(deg2rad(20.9)) #0.3
self.mu_1 = tan(deg2rad(32.76))
self.delta_mu = self.mu_1 - self.mu_0
self.I_0 = 0.279
self.eta_max = 100.*self.rho*sqrt(-P.max_g*(G.y_M-G.y_m)**3)/1e2 # 10x WORKS BETTER
# self.law = 'dp'
# # self.beta = 0.5
# self.s = 2.0
self.E = 1e7
self.nu = 0.4 # poissons ratio
self.K = self.E/(3*(1-2*self.nu))
self.G = self.E/(2*(1+self.nu))
self.pts_per_cell = 3
self.x = (G.nx-1)*self.pts_per_cell # particles in x direction
self.y = (G.ny-1)//2*self.pts_per_cell + 2 # particles in y direction
gap = array((G.dx,G.dy))/(2*self.pts_per_cell)
xp = linspace(G.x_m+gap[0],G.x_M-gap[0],self.x)
yp = linspace(G.y_m+gap[1],(G.y_M+G.y_m)/2.-gap[1]/2.,self.y)
X = tile(xp,self.y)
Y = repeat(yp,self.x)
for i in range(self.x*self.y):
self.X.append(X[i])
self.Y.append(Y[i])
self.n += 1
self.A = (G.y_M - G.y_m - G.top_gap)*(G.x_M - G.x_m)/self.n # area (m^2)
# def critical_time(self,P): # pouliquen
# distance = minimum(P.G.dx,P.G.dy)
# t_ela = distance/sqrt(self.K/self.rho) # elasticity
# t_diff = distance**2/self.eta_max*self.rho # momentum diffusivity/viscosity
# return minimum(t_diff,t_ela)
def critical_time(self,P): # dp
distance = minimum(P.G.dx,P.G.dy)
t_ela = distance/sqrt(self.K/self.rho) # elasticity
return t_ela
class Output_Params():
def __init__(self,G):
self.continuum_fig_size = [24,8]
self.mp_fig_size = [18,4]
def after_every_nth_timestep(self,P,G,L,plot):
plot.draw_continuum(G,P)
# plot.draw_material_points(L,P,G)
plot.draw_gsd_mp(L,P,G)
plot.save_u(L,P,G)
plot.save_s_bar(L,P,G)
plot.save_density(L,P,G)
plot.save_phi_MP(L,P,G)
def final_graphs(self,P,G,L,plot):
plot.draw_continuum(G,P)
# plot.draw_material_points(L,P,G,'final')
plot.draw_gsd_mp(L,P,G)
plot.save_u(L,P,G)
plot.save_s_bar(L,P,G)
plot.save_density(L,P,G)
plot.save_phi_MP(L,P,G)
| benjym/poly-mpm | inputs/bi_square_drum.py | bi_square_drum.py | py | 5,528 | python | en | code | 13 | github-code | 13 |
39399811440 | import re
from collections import Counter
def most_occr_element(word):
# re.findall will extract all the elements
# from the string and make a list
arr = re.findall(r'[0-9]+',word)
#Store Max Frequency
maxm = 0
#Max Elem of Most Frequency
max_elem = 0
# counter will store all the number with
# their frequencies
# c = counter((55, 2), (2, 1), (3, 1), (4, 1))
c = Counter(arr)
key = c.keys()
for x in list(key):
if c[x] >= maxm:
maxm = c[x]
max_elem = int(x)
return max_elem
word = 'geek55of55gee4ksabc3dr2x'
print(most_occr_element(word)) | shank24/PythonCodingPractice | Code_Ground/geeksforgeeks/Regex/most_Occur_Number.py | most_Occur_Number.py | py | 636 | python | en | code | 0 | github-code | 13 |
16729066990 | import unittest
from jframework.modules.scan.synscan import Synscan
from jframework.modules.scan.ackscan import Ackscan
class ScanTest(unittest.TestCase):
def setUp(self):
self.syn_scan = Synscan()
self.ack_scan = Ackscan()
def test_default_value_syn(self):
self.assertEqual(self.syn_scan.host, "127.0.0.1")
self.assertEqual(self.syn_scan.ports_list, [80])
def test_default_value_ack(self):
self.assertEqual(self.ack_scan.host, "127.0.0.1")
self.assertEqual(self.ack_scan.ports_list, [80])
self.assertIsNotNone(self.ack_scan.ports_list)
if __name__ == '__main__':
unittest.main()
| Josue87/tfg-framework-python | framework/jframework/test/test_scan.py | test_scan.py | py | 658 | python | en | code | 0 | github-code | 13 |
9414892622 |
def run(wd, sampleGffDir, outputDir):
def sortGenomicFeatures(data):#{sample, {chr, [ [start, end] ] }}
for sample in data.keys():
for chr in data[sample].keys():
data[sample][chr].sort(key=lambda e: e[0])
sampleData={} #{sample, {chr, [ [start, end] ] }}
for line in open(outputDir+"vertices.csv"): #vertices.csv
values=line.strip().replace("::",":").split(":")
sample=values[0]
chr=values[1]
start=int(values[2].split("-")[0])
end=int(values[2].split("-")[1])
if sample in sampleData:
if chr in sampleData[sample]:
sampleData[sample][chr].append([start,end])
else:
sampleData[sample][chr]=[ [start,end] ]
else:
sampleData[sample]={chr: [ [start,end] ] }
sortGenomicFeatures(sampleData)
print("loading gff")
gffData={} #{sample, {chr, [ [start,end] ] }}
for sample in sampleData:
for line in open(sampleGffDir+sample+".gff"):
if line[0]!="#":
values=line.strip().split("\t")
if values[2]=="CDS":
chr=values[0]
[start, end]=[int(values[3]), int(values[4])]
if sample in gffData:
if chr in gffData[sample]:
gffData[sample][chr].append([start,end])
else:
gffData[sample][chr]=[ [start,end] ]
else:
gffData[sample]={chr: [ [start,end] ] }
sortGenomicFeatures(gffData)
print("Merging genomic features into MGEs")
#find the density of non-core genes.
lookBackDepth=6
globalFeatureCounter=1
mgeFeatures={} #{sample : {chr: {feature: [start, end]}}}
for sample in gffData.keys():
for chr in gffData[sample].keys():
nonCoreGenes=[0]*len(gffData[sample][chr])
if chr in sampleData[sample]:
insideInsertion=False
firstNonCoreFeatureIndex=0
lastNonCoreFeatureIndex=0
featureCounter=0
for feature in gffData[sample][chr]:#this needs to check if feature [region of sample/chr overlaps with any of features in gffData[sample][chr][[start,end]]]
#determine if gff feature is core or non-core
nonCoreGenes[featureCounter]=0
for startEnd in sampleData[sample][chr]:
if not( feature[1]<startEnd[0] or feature[0]>startEnd[1]):
#this is non-core gene
nonCoreGenes[featureCounter]=1
break
featureCounter+=1
if featureCounter>=lookBackDepth:
nonCoreGenesCount=sum(nonCoreGenes[(featureCounter-lookBackDepth):featureCounter])
if nonCoreGenesCount>(lookBackDepth*0.75) and featureCounter!=len(gffData[sample][chr]):
if not insideInsertion:
#find the last core gene, the insert starts right after it
for i in range((featureCounter-lookBackDepth-1),featureCounter):
if nonCoreGenes[i]==1: #first non-core genes in among preceeding genes
firstNonCoreFeatureIndex=0 if i==0 else i #i=0 is corner case, normally, the last examined feature is the first noncore
break
insideInsertion=True
elif (insideInsertion and nonCoreGenesCount<(lookBackDepth*0.5) or (insideInsertion and featureCounter==len(gffData[sample][chr]))): #checks on non core genes count being higher than x% of look back depth.
if insideInsertion or featureCounter==len(gffData[sample][chr]): #the second accomodates the case where contig ends in an insertion.
#insertion sequence has ended a few genes earlier, find the last noncore gene
insideInsertion=False
if featureCounter==len(gffData[sample][chr]):
lastNonCoreFeatureIndex=featureCounter-1
else: #find the index of the last non-core gene.
for i in range(featureCounter-1,-1,-1):
if (i-1>0 and nonCoreGenes[i]==1 and nonCoreGenes[i-1]==1) or i==0:
lastNonCoreFeatureIndex=i
break
for k in range(firstNonCoreFeatureIndex,lastNonCoreFeatureIndex+1):
if sample not in mgeFeatures:
mgeFeatures[sample]={chr: {}}
if (chr not in mgeFeatures[sample]):
mgeFeatures[sample][chr]={globalFeatureCounter: []}
if globalFeatureCounter not in mgeFeatures[sample][chr]:
mgeFeatures[sample][chr][globalFeatureCounter]=[]
mgeFeatures[sample][chr][globalFeatureCounter].append(gffData[sample][chr][k])
globalFeatureCounter+=1
#if globalFeatureCounter>20:
# sys.exit()
if sum(nonCoreGenes)>len(gffData[sample][chr])*0.5: #whole contig is probably MGE
if sample not in mgeFeatures:
mgeFeatures[sample]={chr: {}}
#here, any features already identified on chromomosome are completely replaced
#so the who choromosome is newly added to dictionary to remove previous info. This avoids dulication of MGEs
mgeFeatures[sample][chr]={globalFeatureCounter: []}
mgeFeatures[sample][chr][globalFeatureCounter]=gffData[sample][chr]
globalFeatureCounter+=1
print("Generating within MGE edges")
output=open(outputDir+"within.csv","w")
processedEdges=set()
for sample in mgeFeatures:
for chr in mgeFeatures[sample]:
for feature in mgeFeatures[sample][chr].keys():
for gene in mgeFeatures[sample][chr][feature]:
for targetGene in mgeFeatures[sample][chr][feature]:
if gene[0]!= targetGene[0] and gene[1]!= targetGene[1] and len(mgeFeatures[sample][chr][feature])>5:
sourceNode=sample+"::"+chr+":"+str(gene[0])+"-"+str(gene[1])
targetNode=sample+"::"+chr+":"+str(targetGene[0])+"-"+str(targetGene[1])
if (sourceNode+targetNode) not in processedEdges:
output.write(sourceNode+"\t"+targetNode+"\tWithin\t"+str(feature)+"\n")
processedEdges.add(sourceNode+targetNode)
processedEdges.add(targetNode+sourceNode)
output.close()
| AntonS-bio/accessoryGenomeBuilder | groupGenesToMge.py | groupGenesToMge.py | py | 7,463 | python | en | code | 0 | github-code | 13 |
42658334759 | import unittest
import numpy as np
from PIL import Image
from operations import crop
class TestCrop(unittest.TestCase):
"""Test the crop operation"""
@classmethod
def setUpClass(cls):
# Load the test image
cls.img = np.array(Image.open('tests/tiny_test.png'))
def test_no_crop(self):
"""Test a crop that does nothing"""
actual = crop(self.img, 0, 0, 3, 3)
expected = self.img
self.assertTrue(np.array_equal(actual, expected))
def test_centre_crop(self):
"""Test a crop that removes some pixels from all sides"""
actual = crop(self.img, 1, 1, 2, 2)
expected = np.array([[[0, 0, 255, 255]]])
self.assertTrue(np.array_equal(actual, expected))
def test_crop_to_right_edge(self):
"""Test a crop that leaves the rightmost column"""
actual = crop(self.img, 2, 0, 3, 3)
expected = np.array(
[[[255, 0, 0, 255]],
[[255, 255, 255, 255]],
[[0, 0, 0, 255]]])
self.assertTrue(np.array_equal(actual, expected))
if __name__ == '__main__':
unittest.main()
| CullenStClair/img-editor | tests/test_crop.py | test_crop.py | py | 1,134 | python | en | code | 1 | github-code | 13 |
38036565058 | import os
import os.path
import shutil
def installWebFiles(RTTLibDir, resultsBasePath):
webDir = os.path.join(RTTLibDir, 'web')
necessaryFiles = [os.path.join(webDir, f) for f in os.listdir(webDir) if os.path.isfile(os.path.join(webDir, f))]
for aFile in necessaryFiles:
if not os.path.exists(os.path.join(resultsBasePath, os.path.basename(aFile))):
# self.mainLogger.debug('Installing missing web file: %s' % os.path.basename(aFile))
src = aFile
dst = resultsBasePath
shutil.copy(src, dst)
| rushioda/PIXELVALID_athena | athena/Tools/RunTimeTester/src/installWebFiles.py | installWebFiles.py | py | 578 | python | en | code | 1 | github-code | 13 |
23913481336 | import socket
from colors import print_green, print_red, print_orange
def scan_port(ip, port, no_warnings):
try:
sock = socket.socket()
sock.connect((ip, port))
print_green(f"[+] {port} IS OPEN")
except:
if no_warnings:
return
print_red(f"[-] {port} IS CLOSE")
def scan(ip, port, scan_multiple_ports, no_warnings):
print_orange(f"[*] Scanning {str(ip)}")
port = int(port)
if scan_multiple_ports:
for prt in range(1, port + 1):
scan_port(ip, prt, no_warnings)
else:
scan_port(ip, port, no_warnings)
| fadhilsaheer/scriptkiddie | tools/portscanner/scanner.py | scanner.py | py | 608 | python | en | code | 2 | github-code | 13 |
30816180147 | from time import sleep
from page_objects.basket_page import Basket
from page_objects.home_page import HomePage
from utilities.base_class import BaseClass
class TestForSale(BaseClass):
def test_sale(self, load_data):
loger = self.logger_object()
home_page = HomePage(self.driver)
try:
home_page.refuse_notifications()
except:
print("No notification")
home_page.accept_cookiies()
loger.info("Stop notification and accept cookies!")
home_page.search_items(load_data["product"])
home_page.click_search()
loger.info("Search items")
item_page = home_page.choose_item()
sleep(2)
item_page.add_item()
loger.info("Select item and add it to the Basket!")
item_page.continue_shoping()
item_page.hover_menu()
item_page.move_to_section()
action_camera = item_page.select_link()
loger.info("Open other products!")
num = action_camera.displayed_quantity()
containers = action_camera.count_items()
loger.info("compare displayed quantity and items on the page")
assert containers == num
basket = Basket(self.driver)
loger.info("Open the Basket and get the quantity of the items!")
basket.open_basket()
quantity = basket.get_quantity()
self.driver.get_screenshot_as_file("/home/georgi/PycharmProjects/forSales/reports/basket.png")
loger.info("compare received quantity with desired and close the Basket!")
assert quantity == 1
basket.close_basket()
| zGeorgi/for_sales | tests/test_for_sales.py | test_for_sales.py | py | 1,610 | python | en | code | 0 | github-code | 13 |
35466415428 | import os
import cv2
import pandas as pd
import torchvision
from torch.utils.data import Dataset, DataLoader
path_name = "../train_face_image"
# ่ฏปๅๅพ็ๆฐๆฎ
# image = []
# for dir_item in os.listdir(path_name):
# if dir_item is None:
# break
# else:
# image_path = os.path.relpath(os.path.join(path_name, dir_item)) # ๅช่ฆๅฎ็็ธๅฏน่ทฏๅพ
# image = cv2.imread(image_path)
class face_dataset(Dataset): # ้่ฆ็ปงๆฟDataset็ฑป
def __init__(self, img_dir, csv_dir, transform):
super().__init__()
# dir_item = os.listdir(img_dir)
self.image_dir = img_dir
self.img_labels = pd.read_csv(csv_dir,header=None)
self.transform = transform
def __len__(self):
return len(self.img_labels)
def __getitem__(self, index):
img_path = os.path.join(self.image_dir + self.img_labels.iloc[index, 0]) # ๅช่ฆๅฎ็็ธๅฏน่ทฏๅพๆไปถๅคน
image = cv2.imread(img_path)
label = self.img_labels.iloc[index,1]
if self.transform is not None:
image = self.transform(image)
return image, label
if __name__ == '__main__':
dataset = face_dataset(img_dir="../train_face_image/",
csv_dir="../person_csv/person_train.csv",
transform=torchvision.transforms.ToTensor())
dataloader = DataLoader(dataset, batch_size=32, drop_last=True)
for data in dataloader:
imgs , target = data
print(imgs.shape)
# print(target)
# if __name__ == '__main__':
# dataset = face_dataset(img_dir="../test_face_image/",
# csv_dir="../person_csv/person_test.csv",
# transform=torchvision.transforms.ToTensor())
# dataloader = DataLoader(dataset, batch_size=32, drop_last=True)
# for data in dataloader:
# imgs , target = data
# print(imgs.shape)
# print(target) | DirgeDos/RecognizeFace | face_check/src/face_dataset.py | face_dataset.py | py | 2,002 | python | en | code | 3 | github-code | 13 |
27620605490 | import requests
from lxml import etree
from bs4 import BeautifulSoup
import json
def discount_for_steam_balance(steam_price, buff_price):
return round(buff_price / (steam_price * 0.85), 3)
def discount_for_buff_balance(steam_price, buff_price):
return round(buff_price * 0.99 / steam_price, 3)
url = r'https://buff.163.com/api/market/goods/sell_order?'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/106.0.0.0 Safari/537.36 Edg/106.0.1370.37'
}
goods_id = '835470'
params = {
'game': 'csgo',
'goods_id': goods_id,
'page_num': '1',
'sort_by': 'default',
'mode': '',
'allow_tradable_cooldown': 1,
'_': ''
}
response = requests.get(url=url, headers=headers, params=params)
page_text = response.json()
items = []
name = page_text['data']['goods_infos'][goods_id]['name']
buff_price = float(page_text['data']['items'][0]['price'])
steam_price = float(page_text['data']['goods_infos'][goods_id]['steam_price_cny'])
for_steam_balance = discount_for_steam_balance(steam_price, buff_price)
for_buff_balance = discount_for_buff_balance(steam_price, buff_price)
items.append([name, buff_price, steam_price, for_steam_balance, for_buff_balance])
# with open('./page_json.json', 'w', encoding='utf-8') as fp:
# json.dump(page_text, fp=fp, ensure_ascii=False)
# tree = etree.HTML(page_text)
# price1 = tree.xpath('//div[@id="relative-goods"]/div/a/span/@data-price')
# title = tree.xpath('//div[@id="relative-goods"]/div/a/text()')
# print(price1)
# print(title)
# soup = BeautifulSoup(page_text, 'lxml')
# price2 = soup.select('#relative-goods > div > a > span')
# print(price2)
# print(name)
# print(buff_price)
# print(steam_price)
print(items)
| New-Heartbeat/spider-learn | buff้ฅฐๅไบคๆๆฐๆฎ็ฌๅ/prices_now.py | prices_now.py | py | 1,815 | python | en | code | 0 | github-code | 13 |
46976570344 | import FinanceDataReader as fdr
import pymysql
import numpy as np
import pandas as pd
import sqlalchemy
# ์ ๋ผ์ , 2018๋
df = fdr.DataReader('215600', '2021-05')
print(df.info())
from sqlalchemy import create_engine
conn = pymysql.connect(host='localhost', port=3306, user='root',
password='1234', db='INVESTAR', charset='utf8')
with conn.cursor(pymysql.cursors.DictCursor) as curs:
sql = """
CREATE TABLE IF NOT EXISTS oneDay3M(
code VARCHAR(20),
Date DATETIME,
Open BIGINT(20),
High BIGINT(20),
Low BIGINT(20),
Close BIGINT(20),
Volume BIGINT(20),
PRIMARY KEY (code, Date))
"""
curs.execute(query=sql)
sql = "SELECT code FROM investar.company_info;"
curs.execute(query=sql)
get_datas = curs.fetchall()
codes = [get_datas.pop()["code"] for _ in range(len(get_datas))]
start_i = 0
for code in codes:
print(start_i)
path = f"C:/Users/ad/Desktop/Github/AI-Stock-Prediction/ImageData/oneDayXlsx/{code}.xlsx"
df = pd.read_excel(path)
with conn.cursor() as curs:
for r in df.itertuples():
sql = f"REPLACE INTO oneDay3M VALUES" \
f" ('{code}', '{r.Date}', {r.Open}, {r.High}, {r.Low}, {r.Close},{r.Volume})"
curs.execute(sql)
start_i += 1
conn.commit()
| Deforeturn/AI-Stock-Prediction | ETC/test.py | test.py | py | 1,352 | python | en | code | 0 | github-code | 13 |
8488268133 | from django.core.management import BaseCommand
from theaters.models import Region, Theater
class Command(BaseCommand):
def handle(self, *args, **options):
regions_names = {
'์์ธ': [
'๊ฐ๋จ', '๊ฐ๋จ๋๋ก(์จํฐ)', '๊ฐ๋', '๊ตฐ์', '๋๋๋ฌธ', '๋ง๊ณก', '๋ชฉ๋', '์๋ด', '์์์๋์ปต๊ฒฝ๊ธฐ์ฅ', '์ฑ์', '์ผํธ๋ด', '์กํํํฌํ๋น์ค', '์ ์ด', '์ํ',
'์ด์', '์ฐฝ๋', '์ฝ์์ค', 'ํ๋', 'ํ๊ณก', 'ARTNINE'
],
'๊ฒฝ๊ธฐ': [
'๊ณ ์์คํํ๋', '๊นํฌํ๊ฐ์ ๋์', '๋จ์์ฃผ', '๋ํ', '๋ฏธ์ฌ๊ฐ๋ณ', '๋ฐฑ์', '๋ณ๋ด', '๋ถ์ฒ์คํํ๋์ํฐ', '๋ถ๋น', '์์', '์์๋จ๋ฌธ', '์ํฅ๋ฐฐ๊ณง', '์์ฐ์ค์',
'์์ฃผ', '์ํต', '์ฉ์ธ๊ธฐํฅ', '์ฉ์ธํ
ํฌ๋
ธ๋ฐธ๋ฆฌ', '์์ ๋ถ๋ฏผ๋ฝ', '์ผ์ฐ', '์ผ์ฐ๋ฒจ๋ผ์ํ', 'ํจํ
์ค', 'ํ์ฃผ๊ธ์ด', 'ํ์ฃผ์ด์ ', 'ํ์ฃผ์ถํ๋์', 'ํํ', 'ํ๋จ์คํํ๋'
],
'์ธ์ฒ': [
'๊ฒ๋จ', '์ก๋', '์์ข
', '์ธ์ฒ๋
ผํ', '์ฒญ๋ผ', '์ฒญ๋ผ์ง์ ค'
],
'๋์ /์ถฉ์ฒญ/์ธ์ข
': [
'๊ณต์ฃผ', '๋์ ', '๋์ ์ ์ฑ', '๋์ ์ค์๋ก', '๋์ ํ๋์์ธ๋ ', '์ธ์ข
(์กฐ์น์)', '์ธ์ข
๋์ฑ', '์ธ์ข
์ฒญ์ฌ', '์ค์ฐฝ', '์ ์ฒ', '์ง์ฒ', '์ฒ์', '์ฒญ์ฃผ์ฌ์ฐฝ', '์ถฉ์ฃผ',
'ํ์ฑ๋ดํฌ'
],
'๋ถ์ฐ/๋๊ตฌ/๊ฒฝ์': [
'๊ฑฐ์ฐฝ', '๊ฒฝ๋ถ๋์ฒญ', '๊ฒฝ์ฐํ์', '๊ฒฝ์ฃผ', '๊ตฌ๋ฏธ๊ฐ๋', '๊น์ฒ', '๋จํฌํญ', '๋๊ตฌ(์น ์ฑ๋ก)', '๋๊ตฌ์ ์ธ๊ณ(๋๋๊ตฌ)', '๋๊ตฌ์ด์์', '๋์ฒ', '๋ง์ฐ', '๋ฌธ๊ฒฝ',
'๋ถ์ฐ๊ทน์ฅ', '๋ถ์ฐ๋', '๋ถ๋๊ตฌ(์น ๊ณก)', '์ฌ์ฒ', '์ผ์ฒํฌ', '์์ฐ', '์์ฐ๋ผํผ์์คํ', '์ธ์ฐ', '์ ๊ด', '์ฐฝ์', '์ฐฝ์๋ด์', 'ํด์ด๋(์ฅ์ฐ)'
],
'๊ด์ฃผ/์ ๋ผ': [
'๊ด์ฃผ์๋ฌด', '๊ด์ฃผํ๋จ', '๋จ์', '๋ชฉํฌํ๋น(ํฌ๋ฅด๋ชจ)', '์ก์ฒ', '์์ฒ', '์ฌ์์
์ฒ', '์ ๋(๊ด์ฃผ)', '์ฒจ๋จ'
],
'๊ฐ์': [
'๋จ์ถ์ฒ', '์์ด', '์์ฃผ', '์์ฃผ์ผํธ๋ด'
],
'์ ์ฃผ': [
'์ ์ฃผ'
]
}
for region, name in regions_names.items():
# Region ๊ฐ์ฒด ์์ฑ
Region.objects.get_or_create(name=region)
for n in name:
r = Region.objects.get(name=region)
# Theater ๊ฐ์ฒด ์์ฑ
Theater.objects.get_or_create(name=n, region=r)
print('Region, Theater ๊ฐ์ฒด๋ค์ด ๋ชจ๋ ์์ฑ๋์์ต๋๋ค.')
| OmegaBox/OmegaBox_Server | app/theaters/management/commands/theater_datas.py | theater_datas.py | py | 2,647 | python | ko | code | 1 | github-code | 13 |
36562472156 | import xml.etree.cElementTree as ET
import os
import re
import configparser
import pyodbc
from hebrew_numbers import int_to_gematria
config = configparser.ConfigParser()
config.read(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'settings.ini'))
current_dir_path = config.get('XML','current_dir')
tanakh_dir_list = os.listdir(current_dir_path)
server = config.get("SQL", "server")
table_names = config.get("SQL", "table_names")
database_name = config.get("SQL", "database_name")
csv_file_name = config.get("CSV", "csv_file_name")
if os.path.exists(os.path.join(os.path.abspath(os.path.dirname(__file__)), csv_file_name)):
os.remove(os.path.join(os.path.abspath(os.path.dirname(__file__)), csv_file_name))
conn = pyodbc.connect('Driver={SQL Server};'
'Server=' + server + ';'
'Database=' + database_name + ';'
'Trusted_Connection=yes;',
autocommit=True, encoding='utf-8')
def execute_query(query):
with conn:
cursor = conn.cursor()
cursor.execute(query)
return cursor
def write_csv_file(file_name, textline):
"""
This function will write a new line into csv file in order to Bulk insert into TBL_MASSECHET_WORD table.
:param file_name: the csv file name
:param textline: the string that will be appended to the csv file
"""
with open(os.path.join(os.path.abspath(os.path.dirname(__file__)), csv_file_name), "a", encoding='utf-8') as f:
f.write(f"{textline}\r")
def get_perek_pasuk_id(chapter_num, verse_num, book):
execute_query(f"USE {database_name};")
query = f"""SELECT PP.PEREK_PASUK_ID,A.PEREK_NUM, PA.PASUK_NUM, S.SEFER_ENGLISH_NAME FROM TBL_TANAKH_PEREK_PASUK PP
JOIN TBL_TANAKH_PASUK PA ON PA.PASUK_ID = PP.PASUK_ID
JOIN
(
SELECT PE.SEFER_PEREK_ID,PE.SEFER_ID,P.PEREK_NUM FROM TBL_TANAKH_SEFER_PEREK PE
JOIN TBL_TANAKH_PEREK P ON PE.PEREK_ID = P.PEREK_ID
)AS A
ON A.SEFER_PEREK_ID = PP.SEFER_PEREK_ID
JOIN TBL_TANAKH_SEFER S ON S.SEFER_ID = A.SEFER_ID AND A.SEFER_PEREK_ID = PP.SEFER_PEREK_ID
WHERE PA.PASUK_NUM = '{verse_num}' AND A.PEREK_NUM = '{chapter_num}' AND S.SEFER_ENGLISH_NAME = '{book}' """
result_query = execute_query(query)
perek_pasuk_id = ''
for row in result_query:
perek_pasuk_id = row[0]
return perek_pasuk_id
def get_xml_word_and_attributes(xml):
tree = ET.ElementTree(file=current_dir_path + '\\' + xml)
for elem in tree.findall('Word'):
attributes = elem.attrib
perek_pasuk_id = get_perek_pasuk_id(attributes.get('Chapter'),attributes.get('Verse'),attributes.get('Book'))
word_position = attributes.get('WordSequence')
if attributes.get('Ktiv') == 'true':
isKtiv = 1
else:
isKtiv = 0
if attributes.get('Kri') == 'true':
isKri = 1
else:
isKri = 0
words = elem.text
full_word = re.sub('[\[\]\(\)]+','',words)
word = re.sub('[^ื-ืช]+','',words)
textline = f"|{perek_pasuk_id}|{word_position}|{isKtiv}|{isKri}|{word}|{full_word}"
print(textline)
write_csv_file(csv_file_name,textline)
def bulk_insert_to_tbl(csv_file_name, tbl_name):
execute_query(f"USE {database_name};")
execute_query(f"SET IDENTITY_INSERT {tbl_name} ON;")
query = f"BULK INSERT {tbl_name} \
FROM '{csv_file_name}' \
WITH \
( \
FIELDTERMINATOR ='|' \
, ROWTERMINATOR ='\r' \
,CODEPAGE = '65001' \
);"
execute_query(f"SET IDENTITY_INSERT {tbl_name} OFF;")
execute_query(query)
def main():
for xml in tanakh_dir_list:
get_xml_word_and_attributes(xml)
bulk_insert_to_tbl(csv_file_name, table_names)
if os.path.exists(os.path.join(os.path.abspath(os.path.dirname(__file__)), csv_file_name)):
os.remove(os.path.join(os.path.abspath(os.path.dirname(__file__)), csv_file_name))
if __name__ == '__main__':
main() | mdanielov/limud-kodesh | database/DB_Table_creation/Tanakh_sefaria_word_to_DB/xml_tanakh_parser.py | xml_tanakh_parser.py | py | 4,554 | python | en | code | 3 | github-code | 13 |
19880297275 | from django.urls import path
from . import views
urlpatterns = [
path('',views.index,name="index"),
path('customers',views.customers,name="customers"),
path('transactions',views.transactions,name="transactions"),
path('sender_Profile',views.sender_Profile,name="sender_Profile"),
path('history',views.history,name="history"),
path('customers/customer_transaction/<accountNo>',views.customer_transaction,name="customer_transaction")
]
| Keyur3766/DjangoProjects | Banking_System/home/urls.py | urls.py | py | 459 | python | en | code | 0 | github-code | 13 |
16978526939 | import numpy as nump
import pdb
from newTree import *
def breadth_search():
################ SETTING VALUES ####################################################################################
m = 3 # missionaries
c = 3 # cannibals
b = 2 # boat capacity
print("MISSIONARIES AND CANNIBALS")
if input("Use Default Values? (y/n) ") == 'n':
m = int(input("Number of Missionaries(Default = 3): ") or 3)
c = int(input("Number of Cannibals(Default = 3): ") or 3)
b = int(input("Boat Capacity(Default = 2): ") or 2)
print("M: {} C: {} B: {} \n".format(m, c, b))
if b < 2:
b = 2
print("\n Boat Cannot Have Capacity Smaller Than Two >:(\n Default Applied, Capacity = {}\n".format(b))
possible = nump.array([[0, 1, 1], [1, 0, 1], [1, 1, 1], [0, 2, 1], [2, 0, 1]])
if b > 2:
holder = []
i = 3
while i < b+1:
holder.append(i)
possible = nump.append(possible, [[0, i, 1], [i, 0, 1]], axis=0)
if i % 2 == 0:
possible = nump.append(possible, [[int(i/2), int(i/2), 1]], axis=0)
i += 1
j = len(holder)
for p in range(j):
x = holder[p]
k = 1
while k < int(j):
x = x - 1
y = k
k += 1
possible = nump.append(possible, [[x, y, 1], [y, x, 1]], axis=0)
x = len(possible) # This stuff is for removing repeat move sets
i = 0
while i < x-1:
temp = possible[i]
x = len(possible)
k = 0
while k < x-1:
if (possible[k] == temp).all() and i != k:
possible = nump.delete(possible, k, 0)
x -= 1
k += 1
i += 1
print("Possible Moves:\n {} \n".format(possible))
####################################################################################################################
# LEFTSIDE ||| RIGHTSIDE
node = nump.array([m, c, 1, 0, 0, 0])
goal = nump.array([0, 0, 0, m, c, 1])
mytree = MyTree(node, goal, possibles=possible)
find_solution(mytree, goal)
breadth_search()
| NotEnoughMilk/CSC375--Artificial-Intelligence | Missionaries and Cannibals/otherOtherMain.py | otherOtherMain.py | py | 2,319 | python | en | code | 0 | github-code | 13 |
72489888017 | # 1 disc:
# 1 1 -> 2
# 2 discs:
# 1 0 -> 1
# 2 0 -> 2
# 3 1 -> 2
# 3 discs:
# 1 1 -> 2
# 2 1 -> 0
# 3 2 -> 0
# 4 1 -> 2
# 5 0 -> 1
# 6 0 -> 2
# 7 1 -> 2
# 4 discs:
# 1 0 -> 1
# 2 0 -> 2
# 3 1 -> 2
# 4 0 -> 1
# 5 2 -> 0
# 6 2 -> 1
# 7 0 -> 1
# 8 0 -> 2 # 3 discs:
# 9 1 -> 2 # 1 1 -> 2
# 10 1 -> 0 # 2 1 -> 0
# 11 2 -> 0 # 3 2 -> 0
# 12 1 -> 2 # 4 1 -> 2 # 2 discs:
# 13 0 -> 1 # 5 0 -> 1 # 1 0 -> 1
# 14 0 -> 2 # 6 0 -> 2 # 2 0 -> 2 # 1 disc:
# 15 1 -> 2 # 7 1 -> 2 # 3 1 -> 2 # 1 1 -> 2
# 4 discs:
# 1 0 -> 1
# 2 0 -> 2
# 3 1 -> 2
# 4 0 -> 1
# 5 2 -> 0
# 6 2 -> 1
# 7 0 -> 1
# 8 0 -> 2 # 3 discs:
# 9 1 -> 2 # 1 0 -> 2
# 10 1 -> 0 # 2 0 -> 1
# 11 2 -> 0 # 3 2 -> 1
# 12 1 -> 2 # 4 0 -> 2 # 2 discs:
# 13 0 -> 1 # 5 1 -> 0 # 1 0 -> 1
# 14 0 -> 2 # 6 1 -> 2 # 2 0 -> 2 # 1 disc:
# 15 1 -> 2 # 7 0 -> 2 # 3 1 -> 2 # 1 0 -> 2
from typing import Dict
from adt.stack import Stack
def foo(
towers: Dict[str, Stack],
n: int,
from_pole: str,
with_pole: str,
to_pole: str
) -> Dict[str, Stack]:
if n == 1:
# print(f"if towers={towers}; n={n}")
print(f"pop from_pole -> push to_pole")
towers[to_pole].push(towers[from_pole].pop())
else:
# print(f"else1 towers={towers}; n={n}")
towers = foo(towers, n - 1, from_pole, to_pole, with_pole)
# print(f"else2 towers={towers}; n={n}")
print(f"pop from_pole -> push to_pole")
towers[to_pole].push(towers[from_pole].pop())
# print(f"else3 towers={towers}; n={n}")
towers = foo(towers, n - 1, with_pole, from_pole, to_pole)
return towers
f_pole = Stack()
w_pole = Stack()
t_pole = Stack()
my_n = 4
for i in reversed(range(my_n)):
f_pole.push(i)
ts = {"from_pole": f_pole, "with_pole": w_pole, "to_pole": t_pole}
print("\nStart state:")
print(ts)
new_ts = foo(ts, n=my_n, from_pole="from_pole", with_pole="with_pole", to_pole="to_pole")
print("\nFinal state:")
print(new_ts)
| ldnicolasmay/RunestonePythonDS3 | src/Chapter05/towers_of_hanoi.py | towers_of_hanoi.py | py | 2,077 | python | en | code | 0 | github-code | 13 |
42648376482 | """Represents the snake AI with personality D.Va"""
import random
from .Graph import Graph
from .a_star import a_star_search, alt_a_star_search
import time
class DVA(object):
"""Represents the Battlesnake D.Va"""
# In case server is started after game has begun
INIT = False
NAME = 'D.Va'
IMAGE_URL = 'static/d_va.png'
COLOR = '#EE4BB5'
TAUNTS = {
'set_up': {
'dva_online': 'D.Va online.',
'into_the_fight': 'I can\'t wait to get into the fight!',
'new_high_score': 'Let\'s shoot for a new high score!',
'gameface_on': 'Alright. Gameface: On.',
'keep_up_with_me': 'Think you can keep up with me?',
'lead_the_way': 'MEKA leads the way!',
'ready_player_one': 'Ready, player 1.',
}
}
# AI Blackboard
BLACKBOARD = {
'snake': None,
'snake_len': None,
'snake_head_coord': None,
'snake_tail_coord': None,
'nearest_snake': None,
'nearest_food': None,
'food': None,
'snakes': None,
'enemy_snakes': None,
}
GRAPH = Graph()
def __init__(self):
return
def get_name(self):
"""Return snake name"""
return self.NAME
def get_image_url(self):
"""Return snake image"""
return self.IMAGE_URL
def get_color(self):
"""Returns snake color"""
return self.COLOR
def get_taunt(self, category, key):
"""Return taunt based on category and key parameters"""
return self.TAUNTS[category].get(key)
def get_random_taunt(self, category):
"""Return random taunt based on category parameter"""
random_key = random.choice(list(self.TAUNTS[category]))
return self.TAUNTS[category].get(random_key)
def get_move(self):
"""Returns the next moves relative direction"""
# start = time.time()
snake_head = self.BLACKBOARD['snake_head_coord']
snake_tail = self.BLACKBOARD['snake_tail_coord']
nearest_food = self.BLACKBOARD['nearest_food']
nearest_snake = self.BLACKBOARD['nearest_snake']
(nearest_food_cost, nearest_food_coord) = nearest_food
if nearest_snake is not None:
(nearest_snake_cost, nearest_snake_object) = nearest_snake
nearest_snake_head = (
nearest_snake_object['coords'][0][0],
nearest_snake_object['coords'][0][1]
)
nearest_food_nearest_snake_cost = self.GRAPH.cost(
nearest_food_coord,
nearest_snake_head
)
current_path_to_tail = self.__find_path(
snake_head,
snake_tail
)
if nearest_snake is None or nearest_food_cost < nearest_food_nearest_snake_cost:
path = self.__find_path(
snake_head,
nearest_food_coord
)
else:
path = current_path_to_tail
# If no path to food exists, check if the next position can get back to the tail
if len(path) > 0:
next_coord = path[0]
if len(current_path_to_tail) > 0:
future_path_to_tail = self.__find_path(next_coord, current_path_to_tail[0])
if len(future_path_to_tail) == 0:
path = current_path_to_tail
elif len(path) == 0 and len(current_path_to_tail) > 0:
path = current_path_to_tail
if len(path) == 0:
coord_2 = self.GRAPH.farthest_node(self.BLACKBOARD['snake_head_coord'])
path = self.__find_path(snake_head, coord_2)
next_coord = path[0]
diff = (
next_coord[0] - self.BLACKBOARD['snake_head_coord'][0],
next_coord[1] - self.BLACKBOARD['snake_head_coord'][1]
)
# end = time.time()
# print "get_move() runtime: %.3f" % (end - start)
if diff == (0, 1):
return 'down'
elif diff == (0, -1):
return 'up'
elif diff == (1, 0):
return 'right'
else:
return 'left'
def init(self, data):
"""Initializes object based on Battlesnake game data"""
self.GRAPH.init(data['width'], data['height'])
return
def update(self, data):
"""Updates object based on Battlesnake turn data"""
# start = time.time()
# Check if we're initialized, if not, init
if not self.INIT:
self.init(data)
self.BLACKBOARD['snakes'] = data['snakes']
self.BLACKBOARD['food'] = data['food']
self.__update_self(data['you'], data['snakes'])
# Update graph
self.GRAPH.update(self.BLACKBOARD)
nearest_snake = self.__find_nearest_snake()
nearest_food = self.__find_nearest_food()
if nearest_snake is not None:
self.BLACKBOARD['nearest_snake'] = nearest_snake
if nearest_food is not None:
self.BLACKBOARD['nearest_food'] = nearest_food
# end = time.time()
# print "update() runtime: %.3f" % (end - start)
return
def __update_self(self, snake_id, snakes):
"""Updates snake based on Battlesnake turn data"""
for snake in snakes:
if snake_id == snake['id']:
snake_len = len(snake['coords'])
self.BLACKBOARD['snake'] = snake
self.BLACKBOARD['snake_len'] = snake_len
self.BLACKBOARD['snake_head_coord'] = (
snake['coords'][0][0],
snake['coords'][0][1]
)
self.BLACKBOARD['snake_tail_coord'] = (
snake['coords'][snake_len - 1][0],
snake['coords'][snake_len - 1][1]
)
self.BLACKBOARD['enemy_snakes'] = list(self.BLACKBOARD['snakes'])
self.BLACKBOARD['enemy_snakes'].remove(snake)
return
def __find_nearest_snake(self):
coord_1 = self.BLACKBOARD['snake_head_coord']
enemy_snakes = self.BLACKBOARD['enemy_snakes']
lowest_cost = -1
lowest_cost_snake = None
for snake in enemy_snakes:
coord_2 = snake['coords'][0]
cost = self.GRAPH.cost(coord_1, coord_2)
if lowest_cost == -1 or cost < lowest_cost:
lowest_cost_snake = snake
lowest_cost = cost
return (lowest_cost, lowest_cost_snake)
def __find_nearest_food(self):
food = self.BLACKBOARD['food']
coord_1 = self.BLACKBOARD['snake_head_coord']
lowest_cost_coord = None
lowest_cost = -1
for food_coord in food:
coord_2 = (food_coord[0], food_coord[1])
cost = self.GRAPH.cost(
coord_1,
coord_2
)
if lowest_cost == -1 or lowest_cost > cost:
lowest_cost_coord = coord_2
lowest_cost = cost
return (lowest_cost, lowest_cost_coord)
def __find_path(self, node_1, node_2):
"""Updates the A* pathing logic"""
# Obtain path mapping based on graph and start/end points
came_from = a_star_search(
self.GRAPH,
node_1,
node_2
)
# Build path array based on path mapping
path = []
node = node_2
while node != node_1:
# If node is not in mapping, no path exists
if node in came_from:
path.append(node)
node = came_from[node]
else:
# Set path to empty if no path exists and exit
path = []
break
path.reverse()
return path
| dlsteuer/battlesnake | snake/DVA.py | DVA.py | py | 7,768 | python | en | code | 0 | github-code | 13 |
17159071662 | import sys
def solve():
read = sys.stdin.readline
"""
def find(v):
while v != parent[v]:
parent[v] = parent[parent[v]]
v = parent[v]
return v
def union(v1, v2):
root_v1 = find(v1)
root_v2 = find(v2)
if root_v1 == root_v2:
return False
else:
parent[root_v1] = root_v2
size[root_v2] += size[root_v1]
return True
res = []
for _ in range(int(read())):
n, m = map(int, read().split())
parent = [i for i in range(n)]
size = [1 for _ in range(n)]
ans = 0
found = False
for _ in range(m):
a, b = map(int, read().split())
if not found and union(a - 1, b - 1):
ans += 1
if size[b - 1] == n:
found = True
res.append(str(ans))
print('\n'.join(res))
"""
# ์ ๊ฐ๋ป์งํ๋ค... 1๋ฑ ๋ต์์ง ๋ณด๊ณ ๊ฐ๋๋ฌ๋ค... ๋ญ์ง ํ์ง ์ค๋ฒ... ์ธ์ .
# res = []
for _ in range(int(read())):
n, m = map(int, read().split())
for _ in range(m):
read()
# res.append(str(n - 1))
print(n - 1)
# print('\n'.join(res))
solve()
| jiyolla/study-for-coding-test | BOJwithDongbinNa/9372/9372.py | 9372.py | py | 1,255 | python | en | code | 0 | github-code | 13 |
71544480659 | from sklearn import svm
from sklearn.model_selection import train_test_split
import numpy as np
import pandas as pd
import pickle
from sklearn.ensemble import ExtraTreesClassifier
# data = pd.read_csv('data/train_for_duplicated_data.csv')
# data.drop_duplicates(keep='first', inplace=True)
# # ่ทๅๆ ็ญพๅ
# labels = data['target']
# # ๅ ้คๆ ็ญพๅ
# del data['target']
week_day_dict = {
'Streptococcus_pyogenes' : 0,
'Salmonella_enterica' : 1,
'Enterococcus_hirae' : 2,
'Escherichia_coli' : 3,
'Campylobacter_jejuni' : 4,
'Streptococcus_pneumoniae' : 5,
'Staphylococcus_aureus' : 6,
'Escherichia_fergusonii' : 7,
'Bacteroides_fragilis' : 8,
'Klebsiella_pneumoniae' : 9
}
data1 = pd.read_csv('withtarget/train.csv')
data2 = pd.read_csv('withtarget/test.csv')
# ๅๅนถ
data3 = pd.concat([data1, data2])
# ๅ ้คidๅ
del data3['row_id']
data3.drop_duplicates(keep='first', inplace=True)
print(data3.shape)
#ๆไนฑๆฐๆฎ
data3.sample(frac=1)
#ๆไนฑๆฐๆฎ
data3.sample(frac=1)
#ๆไนฑๆฐๆฎ
data3.sample(frac=1)
# ่ทๅๆ ็ญพ
labels = data3['target']
# ๅ ้คๆ ็ญพๅ
del data3['target']
print(data3.shape)
print(labels.shape)
labels_arr = []
for lab in labels:
labels_arr.append(week_day_dict[lab])
random = ExtraTreesClassifier(n_estimators=1000, n_jobs=4)
random.fit(data3, labels_arr)
# ไฟๅญๆจกๅ
s=pickle.dumps(random)
f=open('extra_trees_train_and_test_dup_v1.model', "wb+")
f.write(s)
f.close() | bashendixie/ml_toolset | ๆกไพ49 (ๆบๅจๅญฆไน )kaggle_tabular_Feb_2022/extra_trees_v2.py | extra_trees_v2.py | py | 1,466 | python | en | code | 9 | github-code | 13 |
37865204092 | import numpy as np
from typing import Tuple
from matplotlib import pyplot as plt
def _array(mask, fill):
zero = np.zeros(mask.shape, dtype=np.uint8)
zero[mask] = fill
return zero
def colored(data, color: Tuple[int, int, int]):
r, g, b = color
data = data.astype(bool)
return np.dstack([_array(data, r), _array(data, g), _array(data, b)])
def red(data):
return colored(data, (255, 0, 0))
def white(data):
return colored(data, (255, 255, 255))
def gray(data):
return np.dstack([data, data, data])
def toGray(data):
return np.interp(data, (data.min(), data.max()),
(0, 255)).astype(np.uint8)
def layer(images, idx: int):
image = images[:, :, idx]
if not np.issubdtype(image.dtype, np.uint8):
image = toGray(image)
plt.figure()
plt.axis("off")
plt.imshow(image, cmap="gray")
def overlay(ct, label, alpha=0.4):
plt.figure()
plt.axis("off")
plt.imshow(gray(ct), cmap="gray")
plt.imshow(red(label), alpha=alpha)
def series(images, figsize=[12, 12], start_with=0, show_every=1):
"""
Visualize this series of scans
@param images: pixel array of DICOM scans
@param rows: rows of displayed image matrix
@param cols: columns of displayed image matrix
@param start_with: display images start with index
@param show_every: display every n image
"""
z = images.shape[-1]
rows = floor(sqrt((z - start_with) / show_every))
fig, ax = plt.subplots(rows, rows, figsize=figsize)
for i in range(rows * rows):
idx = start_with + show_every * i
if idx >= z:
break
plot = ax[int(i / rows), int(i % rows)]
plot.set_title("slice {}".format(idx))
plot.imshow(images[:, :, idx], cmap="gray")
plot.axis("off")
plt.show()
| unnamed42/metastasis-analyzer | jupyter/plot.py | plot.py | py | 1,820 | python | en | code | 0 | github-code | 13 |
27237347657 | #!/usr/bin/env python3
from graph.load_xml import load_graph
from graph.save_xml_v4 import save_graph
from lxml import etree
import sys
import os
source=sys.stdin
sourcePath="[XML-file]"
dest=sys.stdout
if len(sys.argv) < 2:
raise RuntimeError("This converts exactly one XML file. Please provide one path to an XML file")
source=sys.argv[1]
sys.stderr.write("Reading graph type from '{}'\n".format(source))
sourcePath=os.path.abspath(source)
if len(sys.argv)>2:
dest=sys.argv[2]
# LOAD XML
(type,instance)=load_graph(source, sourcePath)
if instance:
save_graph(instance, dest)
else:
save_graph(type, dest)
| joshjennings98/fyp | graph_schema-4.2.0/tools/convert_v3_graph_to_v4.py | convert_v3_graph_to_v4.py | py | 631 | python | en | code | 0 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.