seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
17114111094 | """
# query database for xrefs, extra MODs, post to populate mod_corpus_association
# python process_mod_corpus_association_to_api.py
"""
import logging.config
from os import path
from agr_literature_service.api.database.main import get_db
from agr_literature_service.api.models import ModCorpusAssociationModel, ReferenceModel, ModModel
import time
from agr_literature_service.lit_processing.utils.sqlalchemy_utils import sqlalchemy_load_ref_xref
log_file_path = path.join(path.dirname(path.abspath(__file__)), '../../../logging.conf')
logging.config.fileConfig(log_file_path)
logger = logging.getLogger('literature logger')
def do_everything():
# token = get_authentication_token()
# headers = generate_headers(token)
# api_server = environ.get('API_SERVER', 'localhost')
# api_port = environ.get('API_PORT', '8080')
# base_url = 'http://' + api_server + ':' + api_port + '/reference/mod_corpus_association/'
# generate_cross_references_file('reference') # this updates from references in the database, and takes 88 seconds. if updating this script, comment it out after running it once
xref_ref, ref_xref_valid, ref_xref_obsolete = sqlalchemy_load_ref_xref('reference')
db_session = next(get_db())
all_references_ids = db_session.query(ReferenceModel.curie, ReferenceModel.reference_id).all()
ref_curie_id_dict = {curie_id[0]: curie_id[1] for curie_id in all_references_ids}
all_mods = db_session.query(ModModel).all()
mod_abbreviation_id_dict = {mod.abbreviation: mod.mod_id for mod in all_mods}
all_mod_abbr = set([mod.abbreviation for mod in all_mods])
start = time.time()
for agr in ref_xref_valid:
for prefix in ref_xref_valid[agr]:
if prefix in all_mod_abbr:
if agr in ref_curie_id_dict:
mod_corpus_association = ModCorpusAssociationModel(reference_id=ref_curie_id_dict[agr],
mod_id=mod_abbreviation_id_dict[prefix],
corpus=True, mod_corpus_sort_source="dqm_files")
db_session.add(mod_corpus_association)
db_session.commit()
end = time.time()
logger.info("finished in " + str(end - start) + " seconds")
if __name__ == "__main__":
"""
call main start function
"""
logger.info("Starting process_mod_corpus_association_to_api.py")
do_everything()
logger.info("Ending process_mod_corpus_association_to_api.py")
# pipenv run python process_mod_corpus_association_to_api.py
| alliance-genome/agr_literature_service | agr_literature_service/lit_processing/oneoff_scripts/process_mod_corpus_association_to_api.py | process_mod_corpus_association_to_api.py | py | 2,621 | python | en | code | 1 | github-code | 13 |
28561344222 | from StringIO import StringIO
from bzrlib.errors import BinaryFile
from bzrlib.tests import TestCase, TestCaseInTempDir
from bzrlib.textfile import text_file, check_text_lines, check_text_path
class TextFile(TestCase):
def test_text_file(self):
s = StringIO('ab' * 2048)
self.assertEqual(text_file(s).read(), s.getvalue())
s = StringIO('a' * 1023 + '\x00')
self.assertRaises(BinaryFile, text_file, s)
s = StringIO('a' * 1024 + '\x00')
self.assertEqual(text_file(s).read(), s.getvalue())
def test_check_text_lines(self):
lines = ['ab' * 2048]
check_text_lines(lines)
lines = ['a' * 1023 + '\x00']
self.assertRaises(BinaryFile, check_text_lines, lines)
class TextPath(TestCaseInTempDir):
def test_text_file(self):
with file('boo', 'wb') as f: f.write('ab' * 2048)
check_text_path('boo')
with file('boo', 'wb') as f: f.write('a' * 1023 + '\x00')
self.assertRaises(BinaryFile, check_text_path, 'boo')
| ag1455/OpenPLi-PC | pre/python/lib/python2.7/dist-packages/bzrlib/tests/test_textfile.py | test_textfile.py | py | 1,029 | python | en | code | 19 | github-code | 13 |
20477380346 | from framework import utili
from framework.data_manager import data_manager_creator
from framework import register
from tensorflow.keras.models import load_model
class creator(register.base):
def create(self, data_manager, config):
name = config['name']
fn = self.get_entry(name)
return fn(data_manager, config)
def create_from_file(self, name):
model_name = name + '.h5'
store_name = name
store = utili.load_obj(store_name)
data_manager = data_manager_creator.instance.create(store['data_manager_info'])
model_manager = self.create(data_manager, store['config'])
model = load_model(model_name)
model_manager.set_model(model)
return model_manager
instance = creator()
| jian626/deepseq | framework/model_manager/model_manager_creator.py | model_manager_creator.py | py | 774 | python | en | code | 0 | github-code | 13 |
14078303328 | from comcrawl import IndexClient
import pandas as pd
import time
from concurrent.futures import ThreadPoolExecutor
from urlfetcher import df, fname
'''
websites = ["asmag", "AmericanMachinist", "assemblymag", "automationworld"]
# [ "aviationweek", "BestManufacturingPractices", "Chemengonline","ComputerWeekly", "Cordis.europ", "den" ]
'''
def fetch(website, filter, filtertype):
start = time.time()
client = IndexClient()
dataframe = pd.read_csv(f"{fname}\\url\\{website}.csv")
print(len(dataframe))
print(len(df))
dataframe["url"] = dataframe["url"].str.lower()
my_list = filter.split(",")
# all the filter parameter
if filtertype=='notrequired':
filter = dataframe[~dataframe['url'].str.contains('|'.join(my_list))]
elif filtertype=='required':
filter = dataframe[dataframe['url'].str.contains('|'.join(my_list))]
else:
filter = dataframe
print("after filter", len(filter))
try:
client.results = filter.to_dict("records")
client.download(threads=4)
htmldf = pd.DataFrame(client.results)
except:
return("empty")
end = time.time()
timetaken=end-start
with open(f'{fname}\\timetaken\\{website}CCtime.txt', 'w') as f:
f.write( f"time taken is {timetaken}")
return(htmldf)
def html_collector(df):
print("main")
with ThreadPoolExecutor(max_workers=10) as executor: # using ThreadPoolExecutor with number of threads.
x = executor.map(fetch, df['foldername'], df['filters'], df['filtertype']) # mapping function fetch with session 100times and 100 urls
executor.shutdown(wait=True) # wait everything until x fetches every data
print("batch completed")
return(list(x))
temp = html_collector(df)
for j in range(0, len(temp)):
temp1= pd.DataFrame(temp[j])
temp1.to_json(f"{fname}\\html\\{df['foldername'][j]}urldHTML.gz", compression="gzip")
print("file created")
| mayankrichu/Textextractor | htmlfetcher.py | htmlfetcher.py | py | 2,042 | python | en | code | 0 | github-code | 13 |
4447573502 | from django.conf.urls import url, include
from rest_framework.routers import DefaultRouter
from rest_framework.urlpatterns import format_suffix_patterns
from RESTservice import views
#
# # Create a router and register our viewsets with it
router = DefaultRouter()
router.register(r'topics', views.TopicViewSet)
router.register(r'users', views.UserViewSet)
router.register(r'collectors', views.CollectorViewSet)
router.register(r'collections', views.CollectionViewSet)
router.register(r'collectables', views.CollectableViewSet)
#
urlpatterns = [
# The auto made url patterns for my models
url(r'^', include(router.urls)),
url(r'^collectors/(?P<collector_name>.+)/image/$', views.CollectorImageView, name='collector_image_view'),
url(r'^collectables/(?P<collectable_name>.+)/image/$', views.CollectableImageView, name='collectable_image_view')
]
# urlpatterns = format_suffix_patterns(urlpatterns)
# urlpatterns += [
# url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
# ]
| owenstranathan/Collector | RESTservice/urls.py | urls.py | py | 1,022 | python | en | code | 0 | github-code | 13 |
22606959092 | from django.urls import path, re_path
from shortener import views
app_name = "shortener"
urlpatterns = [
path('', views.IndexView.as_view(), name='index'),
re_path(r'^(?P<pk>[^\/]+)/$', views.URLResolver.as_view(), name='resolver'),
]
| rrr3try/shortener | shortener/urls.py | urls.py | py | 246 | python | en | code | 0 | github-code | 13 |
39929114749 | import numpy as np
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--alg', type=str, default="sample_avg", help="sample_avg | constant")
args = parser.parse_args()
print(args)
qstar = np.array([0.] * 10)
mu, sigma = 0, 0.01 # mean and standard deviation
q_avg = np.array([0.] * 10) # q value
q_const = np.array([0.] * 10) # q value
epsilon = 0.1
alpha = 0.1
epoch = 10000
total_reward_avg = 0.
total_reward_const = 0.
avg_reward_avg = np.array([0.] * epoch)
avg_reward_const = np.array([0.] * epoch)
total_right_avg = 0.
total_right_const = 0.
avg_right_avg = np.array([0.] * epoch)
avg_right_const = np.array([0.] * epoch)
for i in range(epoch):
s = np.random.normal(mu, sigma, 10) # random walk
qstar += s
if np.random.rand(1)[0] < epsilon:
action_avg = np.random.randint(10)
else:
'''random tie breaking'''
action_avg = np.random.choice(np.flatnonzero(q_avg == q_avg.max()))
if np.random.rand(1)[0] < epsilon:
action_const = np.random.randint(10)
else:
'''random tie breaking'''
action_const = np.random.choice(np.flatnonzero(q_const == q_const.max()))
reward_avg = np.random.normal(qstar[action_avg], 1)
reward_const = np.random.normal(qstar[action_const], 1)
q_avg[action_avg] += (reward_avg - q_avg[action_avg])/(i + 1)
q_const[action_const] += alpha * (reward_const - q_const[action_const])
total_reward_avg += reward_avg
total_reward_const += reward_const
avg_reward_avg[i] = total_reward_avg/(i + 1)
avg_reward_const[i] = total_reward_const/(i + 1)
if q_avg.argmax() == qstar.argmax():
total_right_avg += 1.
if q_const.argmax() == qstar.argmax():
total_right_const += 1.
avg_right_avg[i] = total_right_avg/(i + 1)
avg_right_const[i] = total_right_const/(i + 1)
print("qstar value: ", qstar)
import matplotlib.pyplot as plt
ax1 = plt.subplot(121)
ax1.set_title("average reward")
ax1.plot(avg_reward_avg, label='sample mean')
ax1.plot(avg_reward_const, label='constant step')
ax1.legend()
ax2 = plt.subplot(122)
ax2.set_title("optimal action")
ax2.plot(avg_right_avg, label='sample mean')
ax2.plot(avg_right_const, label='constant step')
ax2.legend()
plt.show()
| Gordon-Guojun-Zhang/RL | Reinforcement_Learning/multi-armed-bandit.py | multi-armed-bandit.py | py | 2,241 | python | en | code | 0 | github-code | 13 |
30567825793 | # модуль создания объектов игрока и боссов,
# функции выбора босса от количества побед игрока,
# способности боссов перед боем с игроком
import GameStrings
import PlayerStrings
import BossStrings
import Characters
import random
def player_get_stats(hero_name):
# создание экземпляра игрока с данными характеристиками:
# имя, здоровье, урон, шанс критической атаки, шанс уклонения,
# вампиризм, регенерация, название способности, иконка
global player
# Митя
if hero_name == PlayerStrings.Mitya.name:
player = Characters.Player(hero_name, 800, 100, 0, 0, 20, 0,
GameStrings.ButtonText.mitya_skill,
GameStrings.Icons.mitya)
player.description = PlayerStrings.Mitya.description()
# Саня
elif hero_name == PlayerStrings.Sanya.name:
player = Characters.Player(hero_name, 1000, 200, 30, 0, 0, 0,
GameStrings.ButtonText.sanya_skill,
GameStrings.Icons.sanya)
player.description = PlayerStrings.Sanya.description()
# Тошик
elif hero_name == PlayerStrings.Toshik.name:
player = Characters.Player(hero_name, 1500, 100, 0, 0, 0, 0,
GameStrings.ButtonText.toshik_skill,
GameStrings.Icons.toshik)
player.description = PlayerStrings.Toshik.description()
# Коля
elif hero_name == PlayerStrings.Kolya.name:
player = Characters.Player(hero_name, 1200, 100, 0, 0, 0, 0,
GameStrings.ButtonText.kolya_skill,
GameStrings.Icons.kolya)
player.description = PlayerStrings.Kolya.description()
# Темыч
elif hero_name == PlayerStrings.Temich.name:
player = Characters.Player(hero_name, 800, 150, 0, 15, 0, 0,
GameStrings.ButtonText.temich_skill,
GameStrings.Icons.temich)
player.description = PlayerStrings.Temich.description()
def boss_get_stats(boss_name):
# создание экземпляра босса с данными характеристиками:
# имя, здоровье, урон, шанс критической атаки, шанс уклонения,
# вампиризм, регенерация, описание босса
global boss
# Палыч
if boss_name == BossStrings.Palich.name:
boss = Characters.Boss(boss_name, 800, 200, 0, 0, 0, 0,
BossStrings.Palich.description)
# Чайковский
elif boss_name == BossStrings.Chaikovskii.name:
boss = Characters.Boss(boss_name, 600, 250, 0, 0, 0, 0,
BossStrings.Chaikovskii.description)
boss.resurrection = True
# Вив
elif boss_name == BossStrings.Viv.name:
boss = Characters.Boss(boss_name, 900, 100, 0, 0, 0, 0,
BossStrings.Viv.description)
# Саша Шлякин
elif boss_name == BossStrings.Sasha.name:
boss = Characters.Boss(boss_name, 1000, 200, 20, 0, 0, 0,
BossStrings.Sasha.description)
# Качаловская Тварь
elif boss_name == BossStrings.Tvar.name:
boss = Characters.Boss(boss_name, 800, 50, 0, 0, 0, 0,
BossStrings.Tvar.description)
boss.returnal_value = 50
# Рандом Рандомыч
elif boss_name == BossStrings.Randomich.name:
boss = Characters.Boss(boss_name, random.randint(100, 1001), random.randint(10, 301),
random.randint(0, 51), random.randint(0, 51), 0,
random.randint(0, 301), BossStrings.Randomich.description)
boss.returnal_value = random.randint(0, 51)
# Котенок-Тролль
elif boss_name == BossStrings.Kitty.name:
boss = Characters.Boss(boss_name, 1000, 200, 0, 0, 0, 0,
BossStrings.Kitty.description)
boss.end_skill_chance = 50
# Инквизиция
elif boss_name == BossStrings.Inkvisizia.name:
boss = Characters.Boss(boss_name, 500, 500, 50, 0, 0, 0,
BossStrings.Inkvisizia.description)
# Доктор Леха
elif boss_name == BossStrings.DocLeha.name:
boss = Characters.Boss(boss_name, 1500, 300, 0, 0, 0, 0,
BossStrings.DocLeha.description)
boss.end_skill_chance = 36
# Пьяный Леха
elif boss_name == BossStrings.DrunkLeha.name:
boss = Characters.Boss(boss_name, 1200, 100, 0, 0, 0, 0,
BossStrings.DocLeha.description)
# Мел
elif boss_name == BossStrings.Mel.name:
boss = Characters.Boss(boss_name, 50, 0, 0, 90, 0, 0,
BossStrings.Mel.description)
boss.skill_meter_level_up = 1
boss.skill_meter_level += boss.skill_meter_level_up
# Рыжий
elif boss_name == BossStrings.Redhead.name:
boss = Characters.Boss(boss_name, 2000, 100, 0, 0, 0, 300,
BossStrings.Redhead.description)
# Следователь
elif boss_name == BossStrings.Sledovatel.name:
boss = Characters.Boss(boss_name, 1500, 100, 0, 50, 0, 0,
BossStrings.Sledovatel.description)
# Донер Кебаб
elif boss_name == BossStrings.Doner.name:
boss = Characters.Boss(boss_name, 1800, 350, 0, 0, 0, 0,
BossStrings.Doner.description)
# Черный Стас
elif boss_name == BossStrings.BlackStas.name:
boss = Characters.Boss(boss_name, 1500, 300, 0, 30, 0, 0,
BossStrings.BlackStas.description)
# Дрон
elif boss_name == BossStrings.Dron.name:
boss = Characters.Boss(boss_name, 2000, 100, 0, 0, 0, 0,
BossStrings.Dron.description)
boss.skill_meter_level_up = 10
boss.skill_meter_level += boss.skill_meter_level_up
# Валера Гладиатор
elif boss_name == BossStrings.Glad.name:
boss = Characters.Boss(boss_name, 3000, 200, 0, 0, 0, 0,
BossStrings.Glad.description)
# Великая Шива
elif boss_name == BossStrings.Shiva.name:
boss = Characters.Boss(boss_name, 2000, 500, 0, 30, 0, 0,
BossStrings.Shiva.description)
# Король Макар
elif boss_name == BossStrings.Makar.name:
boss = Characters.Boss(boss_name, player.health, player.damage,
player.critical_chance, player.miss_chance,
player.lifesteal, player.regeneration,
BossStrings.Makar.description)
# Гомогомозеки
elif boss_name == BossStrings.Gomozeki.name:
boss = Characters.Boss(boss_name, 2000, 300, 20, 0, 0, 200,
BossStrings.Gomozeki.description) | AssBurger69/BurgerGame | CharactersGenerator.py | CharactersGenerator.py | py | 7,581 | python | en | code | 0 | github-code | 13 |
507204917 | import sqlite3
from models.models import Person
import uuid
import cv2
from PIL import Image as im
def create_db():
conn = sqlite3.connect('database/Person.db',timeout=20)
c = conn.cursor()
c.execute("CREATE TABLE IF NOT EXISTS user ( id TEXT PRIMARY KEY, name TEXT NOT NULL, mobile_no INTEGER, adhar_no INTEGER, img BLOB NOT NULL, address TEXT , ptype TEXT, threat TEXT)")
def convertToBinaryData(imgfile):
# Convert digital data to binary format
with open(imgfile, 'rb') as file:
blobData = file.read()
return blobData
def insertData(name, id, adhar_no, imgfile, address, mobile_no, ptype, threat):
try:
sqliteConnection = sqlite3.connect('database/Person.db')
cursor = sqliteConnection.cursor()
print("Connected to Database")
sqlite_insert_blob_query = """ INSERT INTO user
(id, name, mobile_no, adhar_no, img, address, ptype, threat) VALUES (?, ?, ?, ?, ?, ?, ?, ?)"""
# imgfile=imgfile.replace(imgfile.split('.')[0],id)
img = convertToBinaryData(imgfile)
# Convert data into tuple format
# img=imgfile
# data_tuple = (id, name, mobile_no, adhar_no, img, address, ptype, threat)'
cursor.execute(sqlite_insert_blob_query, (id, name, mobile_no, adhar_no, img, address, ptype, threat))
sqliteConnection.commit()
print("Image and file inserted successfully into the database")
cursor.close()
except sqlite3.Error as error:
print("Failed to insert blob data into person table", error)
finally:
if sqliteConnection:
sqliteConnection.close()
# print("the sqlite connection is closed")
def retrieve(new_img):
try:
sqliteConnection = sqlite3.connect('database/Person.db')
cursor = sqliteConnection.cursor()
print("Checking Record In Database ..........")
# img = convertToBinaryData(new_img)
# print(img)
# fetch_query = "SELECT * FROM user WHERE name=new_img"
cursor.execute("SELECT id,name,mobile_no,adhar_no,address,ptype,threat FROM user WHERE id=:id",{'id':str(new_img).split('.')[0]})
result=cursor.fetchall()
# print(result)
out_dic={'Id':'','Name':'','Mobile No':'','Adhar No':'','Address':'', 'Person Type':'', 'Threat':''}
for info in result:
out_dic['Id']=info[0]
out_dic['Name']=info[1]
out_dic['Mobile No']=info[2]
out_dic['Adhar No']=info[3]
out_dic['Address']=info[4]
out_dic['Person Type']=info[5]
out_dic['Threat']=info[6]
sqliteConnection.commit()
# print("fetched successfully from the table")
cursor.close()
return out_dic
except sqlite3.Error as error:
print("Failed to fetch data from user table", error)
finally:
if sqliteConnection:
sqliteConnection.close()
# print("the sqlite connection is closed")
def writeTofile(data, filename):
# Convert binary data to proper format and write it on Hard Disk
with open(filename, 'wb') as file:
file.write(data)
# print("Stored blob data into: ", filename, "\n")
def all_image():
try:
sqliteConnection = sqlite3.connect('database/Person.db',timeout=20)
cursor = sqliteConnection.cursor()
print("Connected to Database")
cursor.execute("SELECT id,img from user")
users=cursor.fetchall()
img_list=[]
for i in users:
filename=i[0]
img=i[1]
# print(img)
# imgfile = im.fromarray(img)
# imgfile = imgfile.save(filename+'.jpg')
writeTofile(img, 'image_list/'+filename+'.jpg')
except sqlite3.Error as error:
print("Failed to fetch data from the database", error)
finally:
if sqliteConnection:
sqliteConnection.close()
# print("the sqlite connection is closed")
# if __name__ == "__main__":
#Database Creation
create_db()
#Loading all images from database to compare
all_image()
# name=input("Enter the name:\t")
# id=str(uuid.uuid4())
# adhar_no=int(input("Enter the adhar no:\t"))
# mobile_no=int(input("Enter your mobile no:\t"))
# address=input("Enter your address:\t")
# ptype=input("Person type:\t")
# threat=input("Threat YES | NO:\t")
# imgfile=input("Enter the image file path:\t")
# per=Person(name, id, adhar_no, imgfile, address, mobile_no, ptype, threat)
# insertData(per.name, per.id, per.adhar_no, per.imgfile, per.address, per.mobile_no, per.ptype, per.threat)
| aadityapritam001/Face_Recognition | db.py | db.py | py | 4,640 | python | en | code | 0 | github-code | 13 |
5661371339 | from functools import partial
from qtpy import QtWidgets
from qtpy.QtCore import Qt
import matplotlib.colors as colors
from matplotlib.legend import Legend
from matplotlib.text import Text
from mslice.models.colors import to_hex, name_to_color
from mslice.models.units import get_sample_temperature_from_string
from mslice.presenters.plot_options_presenter import SlicePlotOptionsPresenter
from mslice.presenters.quick_options_presenter import quick_options, check_latex
from mslice.models.workspacemanager.workspace_provider import get_workspace_handle
from mslice.plotting.plot_window.iplot import IPlot
from mslice.plotting.plot_window.interactive_cut import InteractiveCut
from mslice.plotting.plot_window.plot_options import SlicePlotOptions
from mslice.plotting.plot_window.overplot_interface import (_update_overplot_lines, _update_powder_lines,
toggle_overplot_line, cif_file_powder_line)
from mslice.plotting.pyplot import GlobalFigureManager
from mslice.scripting import generate_script
from mslice.util.compat import legend_set_draggable
from mslice.util.intensity_correction import IntensityType, IntensityCache
from mslice.models.intensity_correction_algs import sample_temperature
from typing import Callable
DEFAULT_LABEL_SIZE = 10
DEFAULT_TITLE_SIZE = 12
class SlicePlot(IPlot):
def __init__(self, figure_manager, slice_plotter_presenter, workspace_name):
self.manager = figure_manager
self.plot_window = figure_manager.window
self._canvas = self.plot_window.canvas
self._slice_plotter_presenter = slice_plotter_presenter
self.ws_name = workspace_name
self._arb_nuclei_rmm = None
self._cif_file = None
self._cif_path = None
self._legends_shown = True
self._legend_dict = {}
# Interactive cuts
self.icut = None
self.icut_event = [None, None]
self.setup_connections(self.plot_window)
self.intensity = False
self.intensity_type = IntensityType.SCATTERING_FUNCTION
self.temp_dependent = False
self.temp = None
self.default_options = None
def save_default_options(self):
self.default_options = {
'colorbar_label': self.colorbar_label,
'colorbar_label_size': DEFAULT_LABEL_SIZE,
'colorbar_log': self.colorbar_log,
'colorbar_range': self.colorbar_range,
'colorbar_range_font_size': DEFAULT_LABEL_SIZE,
'intensity': self.intensity,
'intensity_type': self.intensity_type,
'temp': self.temp,
'temp_dependent': self.temp_dependent,
'title': self.ws_name,
'title_size': DEFAULT_TITLE_SIZE,
'x_label': r"$|Q|$ ($\mathrm{\AA}^{-1}$)",
'x_label_size': DEFAULT_LABEL_SIZE,
'x_grid': False,
'x_range': self.x_range,
'x_range_font_size': DEFAULT_LABEL_SIZE,
'y_label': 'Energy Transfer (meV)',
'y_label_size': DEFAULT_LABEL_SIZE,
'y_grid': False,
'y_range': self.y_range,
'y_range_font_size': DEFAULT_LABEL_SIZE,
'legend': True,
}
def setup_connections(self, plot_window):
plot_window.redraw.connect(self._canvas.draw)
plot_window.action_gen_script.setVisible(True)
plot_window.action_gen_script_clipboard.setVisible(True)
plot_window.menu_information.setDisabled(False)
plot_window.menu_intensity.setDisabled(False)
plot_window.action_toggle_legends.setVisible(True)
plot_window.action_keep.setVisible(True)
plot_window.action_make_current.setVisible(True)
plot_window.action_save_image.setVisible(True)
plot_window.action_plot_options.setVisible(True)
plot_window.action_interactive_cuts.setVisible(True)
plot_window.action_interactive_cuts.triggered.connect(self.toggle_interactive_cuts)
plot_window.action_save_cut.setVisible(False)
plot_window.action_save_cut.triggered.connect(self.save_icut)
plot_window.action_flip_axis.setVisible(False)
plot_window.action_flip_axis.triggered.connect(self.flip_icut)
plot_window.action_waterfall.setVisible(False)
plot_window.action_sqe.triggered.connect(partial(self.show_intensity_plot, plot_window.action_sqe,
self._slice_plotter_presenter.show_scattering_function, False))
plot_window.action_chi_qe.triggered.connect(partial(self.show_intensity_plot, plot_window.action_chi_qe,
self._slice_plotter_presenter.show_dynamical_susceptibility, True))
plot_window.action_chi_qe_magnetic.triggered.connect(
partial(self.show_intensity_plot, plot_window.action_chi_qe_magnetic,
self._slice_plotter_presenter.show_dynamical_susceptibility_magnetic, True))
plot_window.action_d2sig_dw_de.triggered.connect(partial(self.show_intensity_plot, plot_window.action_d2sig_dw_de,
self._slice_plotter_presenter.show_d2sigma, False))
plot_window.action_symmetrised_sqe.triggered.connect(
partial(self.show_intensity_plot, plot_window.action_symmetrised_sqe,
self._slice_plotter_presenter.show_symmetrised, True))
plot_window.action_gdos.triggered.connect(
partial(self.show_intensity_plot, plot_window.action_gdos, self._slice_plotter_presenter.show_gdos, True))
plot_window.action_hydrogen.triggered.connect(
partial(toggle_overplot_line, self, self._slice_plotter_presenter, 1, True))
plot_window.action_deuterium.triggered.connect(
partial(toggle_overplot_line, self, self._slice_plotter_presenter, 2, True))
plot_window.action_helium.triggered.connect(
partial(toggle_overplot_line, self, self._slice_plotter_presenter, 4, True))
plot_window.action_arbitrary_nuclei.triggered.connect(self.arbitrary_recoil_line)
plot_window.action_aluminium.triggered.connect(
partial(toggle_overplot_line, self, self._slice_plotter_presenter, 'Aluminium', False))
plot_window.action_copper.triggered.connect(
partial(toggle_overplot_line, self, self._slice_plotter_presenter, 'Copper', False))
plot_window.action_niobium.triggered.connect(
partial(toggle_overplot_line, self, self._slice_plotter_presenter, 'Niobium', False))
plot_window.action_tantalum.triggered.connect(
partial(toggle_overplot_line, self, self._slice_plotter_presenter, 'Tantalum', False))
plot_window.action_cif_file.triggered.connect(partial(cif_file_powder_line, self,
self._slice_plotter_presenter))
plot_window.action_gen_script.triggered.connect(self.generate_script)
plot_window.action_gen_script_clipboard.triggered.connect(lambda: self.generate_script(clipboard=True))
def disconnect(self, plot_window):
plot_window.action_interactive_cuts.triggered.disconnect()
plot_window.action_save_cut.triggered.disconnect()
plot_window.action_flip_axis.triggered.disconnect()
plot_window.action_sqe.triggered.disconnect()
plot_window.action_chi_qe.triggered.disconnect()
plot_window.action_chi_qe_magnetic.triggered.disconnect()
plot_window.action_d2sig_dw_de.triggered.disconnect()
plot_window.action_symmetrised_sqe.triggered.disconnect()
plot_window.action_gdos.triggered.disconnect()
plot_window.action_hydrogen.triggered.disconnect()
plot_window.action_deuterium.triggered.disconnect()
plot_window.action_helium.triggered.disconnect()
plot_window.action_arbitrary_nuclei.triggered.disconnect()
plot_window.action_aluminium.triggered.disconnect()
plot_window.action_copper.triggered.disconnect()
plot_window.action_niobium.triggered.disconnect()
plot_window.action_tantalum.triggered.disconnect()
plot_window.action_cif_file.triggered.disconnect()
plot_window.action_gen_script.triggered.disconnect()
def window_closing(self):
if self.icut is not None:
self.icut.clear()
self.icut.window_closing()
self.icut = None
def plot_options(self):
SlicePlotOptionsPresenter(SlicePlotOptions(self.plot_window, redraw_signal=self.plot_window.redraw), self)
def plot_clicked(self, x, y):
bounds = self.calc_figure_boundaries()
if bounds['x_label'] < y < bounds['title']:
if bounds['y_label'] < x < bounds['colorbar_label']:
if y < bounds['x_range']:
quick_options('x_range', self, redraw_signal=self.plot_window.redraw)
elif x < bounds['y_range']:
quick_options('y_range', self, redraw_signal=self.plot_window.redraw)
elif x > bounds['colorbar_range']:
quick_options('colorbar_range', self, self.colorbar_log, redraw_signal=self.plot_window.redraw)
def object_clicked(self, target):
if isinstance(target, Legend):
return
elif isinstance(target, Text):
quick_options(target, self, redraw_signal=self.plot_window.redraw)
else:
quick_options(target, self)
self.update_legend()
self._canvas.draw()
def update_legend(self):
axes = self._canvas.figure.gca()
if self._legends_shown:
self._add_or_renew_legend(axes)
if self._canvas.manager.plot_handler.icut is not None:
self._canvas.manager.plot_handler.icut.rect.ax = axes
def _add_or_renew_legend(self, axes):
handles, labels = axes.get_legend_handles_labels()
if handles:
# Uses the 'upper right' location because 'best' causes very slow plotting for large datasets.
axes.legend(handles, labels, fontsize='medium', loc='upper right')
legend_set_draggable(axes.get_legend(), True)
else:
legend = axes.get_legend()
if legend:
legend.remove()
def change_axis_scale(self, colorbar_range, logarithmic):
current_axis = self._canvas.figure.gca()
colormesh = current_axis.collections[0]
vmin, vmax = colorbar_range
if logarithmic:
if vmin <= float(0):
vmin = 0.001
if vmax <= float(0):
vmax = 0.001
norm = colors.LogNorm(vmin, vmax)
else:
norm = colors.Normalize(vmin, vmax)
label = self.colorbar_label
colormesh.colorbar.remove()
colormesh.set_clim((vmin, vmax))
colormesh.set_norm(norm)
self._canvas.figure.colorbar(colormesh)
self.colorbar_label = label
def get_line_options(self, target):
line_options = {
'label': target.get_label(),
'legend': None,
'shown': None,
'color': to_hex(target.get_color()),
'style': target.get_linestyle(),
'width': str(target.get_linewidth()),
'marker': target.get_marker(),
'error_bar': None
}
return line_options
def set_line_options(self, line, line_options):
line.set_label(line_options['label'])
line.set_linestyle(line_options['style'])
line.set_marker(line_options['marker'])
line.set_color(name_to_color(line_options['color']))
line.set_linewidth(line_options['width'])
def calc_figure_boundaries(self):
fig_x, fig_y = self._canvas.figure.get_size_inches() * self._canvas.figure.dpi
bounds = {}
bounds['y_label'] = fig_x * 0.07
bounds['y_range'] = fig_x * 0.12
bounds['colorbar_range'] = fig_x * 0.75
bounds['colorbar_label'] = fig_x * 0.86
bounds['title'] = fig_y * 0.9
bounds['x_range'] = fig_y * 0.09
bounds['x_label'] = fig_y * 0.05
return bounds
def arbitrary_recoil_line(self):
recoil = True
checked = self.plot_window.action_arbitrary_nuclei.isChecked()
if checked:
self._arb_nuclei_rmm, confirm = QtWidgets.QInputDialog.getInt(
self.plot_window, 'Arbitrary Nuclei', 'Enter relative mass:', min=1)
if confirm:
toggle_overplot_line(self, self._slice_plotter_presenter, self._arb_nuclei_rmm, recoil, checked)
else:
self.plot_window.action_arbitrary_nuclei.setChecked(not checked)
else:
toggle_overplot_line(self, self._slice_plotter_presenter, self._arb_nuclei_rmm, recoil, checked)
def _reset_intensity(self):
options = self.plot_window.menu_intensity.actions()
for op in options:
op.setChecked(False)
def selected_intensity(self):
options = self.plot_window.menu_intensity.actions()
for option in options:
if option.isChecked():
return option
def set_intensity(self, intensity):
self._reset_intensity()
intensity.setChecked(True)
def show_intensity_plot(self, action, slice_plotter_method, temp_dependent):
last_active_figure_number, disable_make_current_after_plot = \
self.manager.report_as_current_and_return_previous_status()
if not self.default_options:
self.save_default_options()
self.default_options['temp_dependent'] = temp_dependent
self.temp_dependent = temp_dependent
self.default_options['intensity'] = True
self.intensity = True
self.default_options['intensity_type'] = \
IntensityCache.get_intensity_type_from_desc(slice_plotter_method.__name__[5:])
self.intensity_type = self.default_options['intensity_type']
if action.isChecked():
previous = self.selected_intensity()
self.set_intensity(action)
cbar_log = self.colorbar_log
cbar_range = self.colorbar_range
title = self.title
if temp_dependent:
if not self._run_temp_dependent(slice_plotter_method, previous):
self.manager.reset_current_figure_as_previous(last_active_figure_number, disable_make_current_after_plot)
return
else:
slice_plotter_method(self.ws_name)
self.update_canvas(cbar_range, cbar_log, title)
else:
action.setChecked(True)
self.manager.reset_current_figure_as_previous(last_active_figure_number, disable_make_current_after_plot)
if self.icut:
self.icut.refresh_current_cut()
def update_canvas(self, cbar_range, cbar_log, title):
self.change_axis_scale(cbar_range, cbar_log)
self.title = title
self.manager.update_grid()
self._update_lines()
self._canvas.draw()
def _run_temp_dependent(self, slice_plotter_method: Callable, previous: QtWidgets.QAction) -> bool:
try:
slice_plotter_method(self.ws_name)
except ValueError: # sample temperature not yet set, get it and reattempt method
if self._set_sample_temperature(previous):
slice_plotter_method(self.ws_name)
else: # failed to get sample temperature
return False
return True
def _set_sample_temperature(self, previous: QtWidgets.QAction) -> bool:
try:
temp_value_raw, field = self.ask_sample_temperature_field(str(self.ws_name))
temperature_found = self._handle_temperature_input(temp_value_raw, field)
except RuntimeError: # if cancel is clicked, go back to previous selection
temperature_found = False
if not temperature_found:
self.set_intensity(previous)
return temperature_found
def _handle_temperature_input(self, temp_value_raw: str, field: bool) -> bool:
if field:
temp_value = sample_temperature(self.ws_name, [temp_value_raw])
else:
temp_value = get_sample_temperature_from_string(temp_value_raw)
if temp_value is None or temp_value < 0:
self.plot_window.display_error("Invalid value entered for sample temperature. Enter a value in Kelvin \
or a sample log field.")
return False
self.default_options['temp'] = temp_value
self.temp = temp_value
self._slice_plotter_presenter.set_sample_temperature(self.ws_name, temp_value)
return True
def ask_sample_temperature_field(self, ws_name: str) -> str:
text = 'Sample temperature not found. Select the sample temperature field or enter a value in Kelvin:'
ws = get_workspace_handle(ws_name)
try:
keys = ws.raw_ws.run().keys()
except AttributeError:
keys = ws.raw_ws.getExperimentInfo(0).run().keys()
temp_field, confirm = QtWidgets.QInputDialog.getItem(self.plot_window, 'Sample Temperature', text, keys)
if not confirm:
raise RuntimeError("sample_temperature_dialog cancelled")
else:
return str(temp_field), temp_field in keys
def _update_recoil_lines(self):
""" Updates the recoil overplots lines when intensity type changes """
lines = {self.plot_window.action_hydrogen: [1, True, ''],
self.plot_window.action_deuterium: [2, True, ''],
self.plot_window.action_helium: [4, True, ''],
self.plot_window.action_arbitrary_nuclei: [self._arb_nuclei_rmm, True, '']}
_update_overplot_lines(self._slice_plotter_presenter, self.ws_name, lines)
def _update_lines(self):
""" Updates the powder/recoil overplots lines when intensity type changes """
self._update_recoil_lines()
_update_powder_lines(self, self._slice_plotter_presenter)
self.update_legend()
self._canvas.draw()
def toggle_interactive_cuts(self):
self.toggle_icut_button()
self.toggle_icut()
def toggle_icut_button(self):
if not self.icut:
self.manager.picking_connected(False)
if self.plot_window.action_zoom_in.isChecked():
self.plot_window.action_zoom_in.setChecked(False)
self.plot_window.action_zoom_in.triggered.emit(False) # turn off zoom
self.plot_window.action_zoom_in.setEnabled(False)
self.plot_window.action_keep.trigger()
self.plot_window.action_keep.setEnabled(False)
self.plot_window.action_make_current.setEnabled(False)
self.plot_window.action_flip_axis.setVisible(True)
else:
self.manager.picking_connected(True)
self.plot_window.action_zoom_in.setEnabled(True)
self.plot_window.action_keep.setEnabled(True)
self.plot_window.action_make_current.setEnabled(True)
self.plot_window.action_save_cut.setVisible(False)
self.plot_window.action_flip_axis.setVisible(False)
self._canvas.setCursor(Qt.ArrowCursor)
self.icut.set_icut_intensity_category(self.intensity_type)
self.icut.store_icut_cut_upon_toggle_and_reset()
def toggle_icut(self):
if self.icut is not None:
self.icut.clear()
self.icut = None
GlobalFigureManager.enable_make_current()
else:
self.icut = InteractiveCut(self, self._canvas, self.ws_name)
def save_icut(self):
self.icut.save_cut()
def flip_icut(self):
self.icut.flip_axis()
def get_slice_cache(self):
return self._slice_plotter_presenter.get_slice_cache(self.ws_name)
def get_cached_workspace(self):
cached_slice = self.get_slice_cache()
return getattr(cached_slice, self.intensity_type.name.lower())
def update_workspaces(self):
self._slice_plotter_presenter.update_displayed_workspaces()
def on_newplot(self):
# This callback should be activated by a call to pcolormesh
self.plot_window.action_hydrogen.setChecked(False)
self.plot_window.action_deuterium.setChecked(False)
self.plot_window.action_helium.setChecked(False)
self.plot_window.action_arbitrary_nuclei.setChecked(False)
self.plot_window.action_aluminium.setChecked(False)
self.plot_window.action_copper.setChecked(False)
self.plot_window.action_niobium.setChecked(False)
self.plot_window.action_tantalum.setChecked(False)
self.plot_window.action_cif_file.setChecked(False)
def generate_script(self, clipboard=False):
try:
generate_script(self.ws_name, None, self, self.plot_window, clipboard)
except Exception as e:
self.plot_window.display_error(e.message)
@property
def colorbar_label(self):
return self._canvas.figure.get_axes()[1].get_ylabel()
@colorbar_label.setter
def colorbar_label(self, value):
self._canvas.figure.get_axes()[1].set_ylabel(value, labelpad=20, rotation=270, picker=5)
@property
def colorbar_label_size(self):
return self._canvas.figure.get_axes()[1].yaxis.label.get_size()
@property
def colorbar_range(self):
return self._canvas.figure.gca().collections[0].get_clim()
@colorbar_range.setter
def colorbar_range(self, value):
self.change_axis_scale(value, self.colorbar_log)
@property
def colorbar_range_font_size(self):
return self._canvas.figure.get_axes()[1].get_yticklabels()[0].get_size()
@colorbar_range_font_size.setter
def colorbar_range_font_size(self, value):
self._canvas.figure.get_axes()[1].tick_params(axis='y', which='both', labelsize=value)
@property
def colorbar_log(self):
return isinstance(self._canvas.figure.gca().collections[0].norm, colors.LogNorm)
@colorbar_log.setter
def colorbar_log(self, value):
self.change_axis_scale(self.colorbar_range, value)
@property
def title(self):
return self.manager.title
@title.setter
def title(self, value):
if check_latex(value):
self.manager.title = value
else:
self.plot_window.display_error("invalid latex string")
@property
def title_size(self):
return self.manager.title_size
@property
def x_label(self):
return self.manager.x_label
@x_label.setter
def x_label(self, value):
if check_latex(value):
self.manager.x_label = value
else:
self.plot_window.display_error("invalid latex string")
@property
def x_label_size(self):
return self.manager.x_label_size
@property
def y_label(self):
return self.manager.y_label
@y_label.setter
def y_label(self, value):
if check_latex(value):
self.manager.y_label = value
else:
self.plot_window.display_error("invalid latex string")
@property
def y_label_size(self):
return self.manager.y_label_size
@property
def x_range(self):
return self.manager.x_range
@x_range.setter
def x_range(self, value):
self.manager.x_range = value
@property
def x_range_font_size(self):
return self.manager.x_range_font_size
@x_range_font_size.setter
def x_range_font_size(self, font_size):
self.manager.x_range_font_size = font_size
@property
def y_range(self):
return self.manager.y_range
@y_range.setter
def y_range(self, value):
self.manager.y_range = value
@property
def y_range_font_size(self):
return self.manager.y_range_font_size
@y_range_font_size.setter
def y_range_font_size(self, font_size):
self.manager.y_range_font_size = font_size
@property
def x_grid(self):
return self.manager.x_grid
@x_grid.setter
def x_grid(self, value):
self.manager.x_grid = value
@property
def y_grid(self):
return self.manager.y_grid
@y_grid.setter
def y_grid(self, value):
self.manager.y_grid = value
@property
def show_legends(self):
return self._legends_shown
@show_legends.setter
def show_legends(self, value):
self._legends_shown = value
def is_changed(self, item):
if self.default_options is None:
return False
return self.default_options[item] != getattr(self, item)
@property
def y_log(self): # needed for interface consistency with cut plot
return False
@staticmethod
def _get_overplot_datum(): # needed for interface consistency with cut plot
return 0
def set_cross_cursor(self):
self._canvas.setCursor(Qt.CrossCursor)
| mantidproject/mslice | src/mslice/plotting/plot_window/slice_plot.py | slice_plot.py | py | 25,085 | python | en | code | 1 | github-code | 13 |
72421476817 | import RPi.GPIO as GPIO
import time
# declare the sensor and led pin
sensor_pin = 23
led_pin = 26
# GPIO setup
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(sensor_pin, GPIO.IN)
GPIO.setup(led_pin, GPIO.OUT)
try:
while True:
if GPIO.input(sensor_pin):
# If no object is near
GPIO.output(led_pin, False)
while GPIO.input(sensor_pin):
time.sleep(0.2)
else:
# If an object is detected
GPIO.output(led_pin, True)
except KeyboardInterrupt:
GPIO.cleanup()
| donskytech/raspberrypi-projects | ir_sensor/ir_sensor.py | ir_sensor.py | py | 565 | python | en | code | 3 | github-code | 13 |
17194176065 | import pickle
import numpy as np
import streamlit as st
import utils
print(utils.load_model_from_disk)
best_mrnet_model_name = 'MRNet_Model3'
best_mrnet_model_cutoff_threshold = 0.429860
mrnet_model = utils.load_model_from_disk(best_mrnet_model_name)
best_kneemri_model_name = 'kneeMRI_Model6'
kneemri_model = utils.load_model_from_disk(best_kneemri_model_name)
st.set_page_config(
page_title="Medical Image Models",
)
st.title("DETECCIÓN DE DESGARRO DE LIGAMENTO CRUZADO ANTERIOR")
st.subheader("Basado en deep learning")
st.text('''
Tomás Agustín Barak
Instituto Politécnico Modelo
''')
mri_file = st.file_uploader("Subir RM",
type=['npy', 'pck'],
key="mri_file")
mrnet_label = {0: 'Sano', 1: 'Desgarro de LCA'}
kneemri_label = {0: 'Sano', 1: 'Desgarro parcial de LCA', 2: 'Desgarro completo de LCA'}
if mri_file is not None:
if mri_file.name.endswith('.npy'):
mri_vol = np.load(mri_file)
elif mri_file.name.endswith('.pck'):
mri_vol = pickle.load(mri_file)
mri_vol = mri_vol.astype(np.float64)
predict_button = st.button('Analizar')
if predict_button:
with st.spinner('Preprocesando...'):
preprocessed_mri_vol = utils.preprocess_mri(mri_vol)
with st.spinner('Analizando...'):
mri_vol = np.expand_dims(mri_vol, axis=3) # Dimension extra para compatibilidad
# mri_vol.shape
mrnet_pred_prob = mrnet_model.predict(np.array([preprocessed_mri_vol]))
print(mrnet_pred_prob)
mrnet_pred_label = (mrnet_pred_prob[0] >= best_mrnet_model_cutoff_threshold).astype('int')
print(mrnet_pred_label)
kneemri_pred_prob = kneemri_model.predict(np.array([preprocessed_mri_vol]))
print(kneemri_pred_prob)
kneemri_pred_label = kneemri_pred_prob[0].argmax(axis=-1)
print(kneemri_pred_label)
if mrnet_pred_label == 1 and kneemri_pred_label == 0:
if mrnet_pred_prob[0] > kneemri_pred_prob[0][kneemri_pred_label]:
st.write(f'Prediccion de desgarro del LCA: **{mrnet_label[mrnet_pred_label[0]]}**')
st.warning("Posible desgarro del LCA, sin certeza sobre el grado del mismo.")
elif mrnet_pred_label == 0 and kneemri_pred_label > 0:
if mrnet_pred_prob[0] < kneemri_pred_prob[0][kneemri_pred_label]:
st.write(f'Prediccion del grado de desgarro del LCA: **{kneemri_label[kneemri_pred_label]}**')
st.warning("Posibilidad de desgarro del LCA")
else:
st.write(f'Prediccion de desgarro del LCA: **{mrnet_label[mrnet_pred_label[0]]}**')
st.write(f'Prediccion del grado de desgarro del LCA: **{kneemri_label[kneemri_pred_label]}**')
slice_number = st.slider('Corte de la RM', min_value=1,
max_value=mri_vol.shape[0], value=1) - 1
img = mri_vol[slice_number, :, :]
normalized_image_data = (img - img.min()) / (img.max() - img.min())
with st.columns(3)[1]:
st.image(normalized_image_data, width=300)
# st.error('Disclaimer : The model predictions are just for reference. Please consult your doctor for treatment.')
| tomasbarak/acl-tear-detector | src/app.py | app.py | py | 3,298 | python | en | code | 0 | github-code | 13 |
40518343024 | import time
#import RPi.GPIO as gpio
#gpio.setmode(gpio.BCM)
#gpio.setup(12, gpio.OUT)
#led= gpio.PWM(12,10000)
#led.start(0)
global x
from matplotlib import pyplot as plt
def EMG_V():
for i in range(0,5):
time.sleep(0.01)
#p segment
for i in range(40,80):
x=i
plt.plot(x)
plt.show()
#time.sleep(0.005)
print(i)
time.sleep(0.001)
print('peak p')
for i in range(80,20,-1):
#led.ChangeDutyCycle(i)
x=i
plt.plot(x)
plt.show()
#time.sleep(0.001)
print('over p')
#time pr
time.sleep(0.01)
#dip before qrs
#led.ChangeDutyCycle(0)
x=0
plt.plot(x)
plt.show()
# qrs
#time.sleep(0.05)
#led.ChangeDutyCycle(100)
x=100
plt.plot(x)
plt.show()
print('qr completed')
#time.sleep(0.02)
#led.ChangeDutyCycle(0)
x=0
plt.plot(x)
plt.show()
#time.sleep(0.02)
#st start
#led.ChangeDutyCycle(45)
plt.plot(45)
plt.show()
#time.sleep(0.01)
#st
print('start st')
for i in range(45,80):
#led.ChangeDutyCycle(i)
# time.sleep(0.001)
x=i
plt.plot(x)
plt.show()
time.sleep(0.001)
for i in range(50,45,-1):
#led.ChangeDutyCycle(i)
x=i
plt.plot(x)
plt.show()
#time.sleep(0.001)
print('end st')
EMG_V()
#led.stop()
#gpio.cleanup()
| VipulAlgoSoul/Raspberrypi_oldrobotics | ecgP_001.py | ecgP_001.py | py | 1,729 | python | en | code | 1 | github-code | 13 |
71678517137 | import math
import numpy as np
from .PropagationRule import PropagationRule
from .SlopeFunctions import SlopeFunctions
from .WindFactorCalculator import WindFactorCalculator
class ExtendedRule(PropagationRule):
"""
The extended model defined by the following paper:
A. Hernández Encinas, L. Hernández Encinas, S. Hoya White, A. Martín del Rey, G. Rodríguez Sánchez,
Simulation of forest fire fronts using cellular automata,
Advances in Engineering Software,
Volume 38, Issue 6,
2007,
Pages 372-378,
ISSN 0965-9978,
https://doi.org/10.1016/j.advengsoft.2006.09.002
"""
def __init__(self, model):
super().__init__(model)
# Update height factors using a slope function
for (cell, _, _) in self.model.grid.coord_iter():
cell.update_height_factor(SlopeFunctions.slope_h2)
# Define some support vectors to iterate the neighbors
self.v_adj = np.array([(0, 1), (1, 0), (0, -1), (-1, 0)])
self.v_diag = np.array([(-1, 1), (1, 1), (1, -1), (-1, -1)])
def apply(self, cell):
""" Calculate the next state of the cell """
if cell.state == 1.0 or cell.rate_of_spread == 0.0:
return cell.state
# Compute the wind component
cell.wind_component = WindFactorCalculator.compute_wind_factor(self.model, cell)
# Calculate the next state using the formula given in the paper
next_state = (cell.rate_of_spread / self.model.max_ros) * cell.state
next_state += self.calculate_adj_term(cell)
if next_state < 1.0:
next_state += self.calculate_diag_term(cell)
# Apply a discretization function
return self.g(next_state)
def calculate_adj_term(self, cell):
""" Calculate the adjacent sum term of the formula """
sum = 0.0
for (a, b) in self.v_adj:
x, y = cell.pos + np.array((a, b))
if not self.model.grid.out_of_bounds((x, y)):
neighbor = self.model.get_cell(x, y)
sum += cell.wind_component * cell.get_height_factor(a, b) * neighbor.rate_of_spread * neighbor.state
sum /= self.model.max_ros
return sum
def calculate_diag_term(self, cell):
""" Calculate the diagonal sum term of the formula """
sum = 0.0
for (a, b) in self.v_diag:
x, y = cell.pos + np.array((a, b))
if not self.model.grid.out_of_bounds((x, y)):
neighbor = self.model.get_cell(x, y)
sum += cell.wind_component * cell.get_height_factor(a, b) * neighbor.rate_of_spread**2 * neighbor.state
sum *= math.pi / (4 * self.model.max_ros ** 2)
return sum
@staticmethod
def g(value):
""" The discretization function defined in the paper """
return 0.0 if value < 1.0 else 1.0
| gdepianto/forest_fire_simulation | forest_fire/ExtendedRule.py | ExtendedRule.py | py | 2,898 | python | en | code | 0 | github-code | 13 |
20194327876 | import pygame as py
from assets import *
import sys
# UI CONSTANTS
HEIGHT = 720
WIDTH = 720
UI_BORDER = 10
NUM_COLS = 10
NUM_ROWS = 20
GRID_BORDER_WIDTH = 2
# GAME CONSTANTS
movespeed = .5
deltamovespeed = 0
board = Board(NUM_ROWS, NUM_COLS, GRID_BORDER_WIDTH) #list to hold all the pieces on the playfield
board.set_current_piece(Piece(NUM_ROWS, NUM_COLS, GRID_BORDER_WIDTH))
def main():
global movespeed
global deltamovespeed
py.init()
screen = py.display.set_mode((HEIGHT, WIDTH))
playSurface = py.Surface((WIDTH * .6 - UI_BORDER * 2, HEIGHT - UI_BORDER * 2))
clock = py.time.Clock()
py.display.set_caption('Tetris')
running = True
dt = 0
font = py.font.Font(None, 36)
while running:
for event in py.event.get():
if event.type == py.QUIT:
running = False
break
# Check for KEYDOWN events
if event.type == py.KEYDOWN:
if event.key == py.K_UP:
board.rotate_90()
elif event.key == py.K_DOWN:
board.rotate_90_counterclockwise()
elif event.key == py.K_LEFT:
board.moveleft()
elif event.key == py.K_RIGHT:
board.moveright()
# check for time events
if deltamovespeed >= movespeed:
board.movedown()
print("Piece is at: " + str(board.active_piece().x) + ", " + str(board.active_piece().y))
deltamovespeed = 0
else:
deltamovespeed += dt
DrawUI(playSurface)
# board.get_current_piece().draw(playSurface)
board.draw(playSurface)
screen.blit(playSurface, (UI_BORDER, UI_BORDER))
fps_text = font.render(f"FPS: {clock.get_fps():.2f}", True, (255, 255, 255))
screen.blit(fps_text, (10, 10))
py.display.flip()
dt = clock.tick(60) / 1000
py.quit()
sys.exit()
def DrawUI(surface: py.Surface) -> None:
surface.fill((127,127,127))
X_WIDTH = surface.get_width() / NUM_COLS
Y_HEIGHT = surface.get_height() / NUM_ROWS
# draw grid on surface that is NUM_COLS x NUM_ROWS
for i in range(1,NUM_COLS):
py.draw.line(surface, (120,120,120), (i * X_WIDTH, 0), (i * X_WIDTH, surface.get_height()), GRID_BORDER_WIDTH)
for i in range(1,NUM_ROWS):
py.draw.line(surface, (120,120,120), (0, i * Y_HEIGHT), (surface.get_width(), i * Y_HEIGHT), GRID_BORDER_WIDTH)
#print board to stdout
def printBoard():
matrix = [["0" for j in range(NUM_COLS)] for i in range(NUM_ROWS)]
for piece in board:
for i in range(len(piece.matrix)):
for j in range(len(piece.matrix[i])):
if piece.matrix[i][j] == 1:
matrix[piece.x + i][piece.y + j] = "1"
for row in matrix:
print(row)
if __name__ == '__main__':
# printBoard()
main() | lukedwards99/tetris-clone | game.py | game.py | py | 2,988 | python | en | code | 0 | github-code | 13 |
34617658639 | import time
import pandas as pd
def replace_chars(x):
x = x.replace(' ', '').replace('.', '')
x = x.replace('D', '3').replace('L', '1').replace('-', '2')
s = list(x)
return pd.Series({'game' + str(i): s[i] for i in range(20)})
def convert_game_to_cols(df):
# print("Converting 'game' to a series of [-1, 1] columns...")
t1 = time.time()
#j = df['game'].str.replace('\s|\.', '', regex=True).str.translate(table).str.split('', expand=True)
j = df['game'].apply(replace_chars)
j = j.astype("int32") - 2
#print(j)
k = ['game' + str(i) for i in range(20)]
df[k] = j
#print(time.time() - t1)
return df | qwertyuu/python-ur-machine-learning | src/prep.py | prep.py | py | 655 | python | en | code | 0 | github-code | 13 |
13000140680 | import asyncio
from services.ascn_connection import second_request
import locale
# this function puts values from the second request to datalist for the further iterating to get info user needs
async def listing():
data = await second_request()
datalist = []
for item in data:
datalist.append(data[item])
return datalist
# this function is searching data from datalist
async def get_info(period: str, entity: str):
datalist = await listing()
final_data = [d['pl'] for d in datalist if d['month'] == period and d['company'] == entity]
# for d in datalist:
# if d['month'] == period and d['company'] == entity:
# final_data.append(d['pl'])
# data = next(d['pl'] for d in datalist if (d['month'] == period and d['company'] == entity))
# final_data.append(data)
return final_data
async def formatting(period: str, entity: str):
data = await get_info(period, entity)
locale.setlocale(locale.LC_ALL, 'ru_RU')
for item in data:
if '%' in item['plLine']:
item['amount'] = locale.format_string('%.2f%%', float(item['amount']) * 100)
else:
item['amount'] = locale.currency(int(item['amount']), grouping=True)
return data
async def parsed_data(period: str = 'Jan 21', entity: str = 'Итого'):
data = await formatting(period, entity)
result = ''.join(f"Выбранный период: <b>{period}</b>\nВыбранная компания: <b>{entity}</b>\n\n") + \
''.join([f"{obj['plLine']}: {obj['amount']}\n" for obj in data])
print(result)
return result
# print(parsed_data('Jan 21', 'Итого'))
# loop = asyncio.get_event_loop()
# loop.run_until_complete(parsed_data('Jan 21', 'Итого')) | moretobasco/firstOptiBot | services/opti_parser.py | opti_parser.py | py | 1,810 | python | en | code | 0 | github-code | 13 |
71415131219 | import re
import pickle
from tornado.escape import to_unicode
from tornado import web, escape
from tornado.web import RequestHandler
class FlashMixin(RequestHandler):
"""
Extends Tornado's RequestHandler by adding flash functionality.
(c) Bahman Movaqar http://www.bahmanm.com
"""
def _cookie_name(self, key):
return key + '_flash_cookie' # change this to store/retrieve flash
# cookies under a different name
def _get_flash_cookie(self, key):
return self.get_cookie(self._cookie_name(key))
def has_flash(self, key):
"""
Returns true if a flash cookie exists with a given key (string);
false otherwise.
"""
return self._get_flash_cookie(key) is not None
def get_flash(self, key):
"""
Returns the flash cookie under a given key after converting the
cookie data into a Flash object.
"""
if not self.has_flash(key):
return None
flash = escape.url_unescape(self._get_flash_cookie(key))
try:
flash_data = pickle.loads(flash)
self.clear_cookie(self._cookie_name(key))
return flash_data
except:
return None
def set_flash(self, flash, key='error'):
"""
Stores a Flash object as a flash cookie under a given key.
"""
flash = pickle.dumps(flash)
self.set_cookie(self._cookie_name(key), escape.url_escape(flash)) | gruentee/tornado-tutorial | blog/handlers/mixins/message.py | message.py | py | 1,500 | python | en | code | 0 | github-code | 13 |
37130158991 | # paths
import os
import settings as S
from useful_functs import functions
train_folder_name = "train_" + S.epoch_load + functions.string(S.fold_load)
run_folder_name = os.path.join(S.save_data_path, S.run_folder)
epoch_load = os.path.join(run_folder_name, train_folder_name)
PATH_load = os.path.join(epoch_load, "model.pt")
PATH_opt_load = os.path.join(epoch_load, "opt.pt")
PATH_scaler_load = os.path.join(epoch_load, "scaler.pt")
PATH_val_loss_load = os.path.join(epoch_load, "val_loss.pt")
PATH_epochs_completed_load = os.path.join(epoch_load, "epochs_completed.pt")
# Paths load and save
#PATH_load = r'C:\Users\olive\OneDrive\Documents\CNN\Report\3d_model_unet_downsample_64features.pt'
#PATH_opt_load = r'C:\Users\olive\OneDrive\Documents\CNN\Report\3d_model_unet_downsample_opt_64features.pt'
#PATH_sigma_load = r'C:\Users\olive\OneDrive\Documents\CNN\Report\3d_model_unet_downsample_sigma_64features.pt'
#PATH_scaler_load = r'C:\Users\olive\OneDrive\Documents\CNN\Report\3d_model_unet_downsample_scaler_64features.pt'
#PATH_val_loss_load = r'C:\Users\olive\OneDrive\Documents\CNN\Report\3d_model_unet_downsample_val_loss_64features.pt'
#PATH_epochs_completed_load = r'C:\Users\olive\OneDrive\Documents\CNN\Report\3d_model_unet_downsample_val_loss_64features.pt'
| oubino/General-location-finding | paths.py | paths.py | py | 1,307 | python | en | code | 3 | github-code | 13 |
21767103539 | from django.db import models
from catalog.models import Order, Order_Item
class OrderSummary(Order):
class Meta:
proxy = True
verbose_name = 'Order Report'
verbose_name_plural = 'Order Reports'
class OrderItemSummary(Order_Item):
class Meta:
proxy = True
verbose_name = "Product Report"
verbose_name_plural = "Product Reports"
| kwabena-aboah/POSsystem | summary/models.py | models.py | py | 387 | python | en | code | 0 | github-code | 13 |
28601573176 | from django.db import models
from django.core.validators import MaxValueValidator, MinValueValidator
from main.constants import *
from django.contrib.auth.models import AbstractUser
from random_word import RandomWords
from django.utils import timezone
import shortuuid
import uuid
class User(AbstractUser, models.Model):
email = models.EmailField(unique=True)
key = models.CharField(max_length=24, unique=True, default='jaslj932e39jlwi')
first_name = models.CharField(max_length=30, default="First Name")
last_name = models.CharField(max_length=30, default="Last Name")
def save(self, *args, **kwargs):
self.key = shortuuid.ShortUUID().random(length=8)
super().save(*args, **kwargs)
def __str__(self):
return self.email
class Startup(models.Model):
startup_member = models.ForeignKey("User", related_name="startups", on_delete = models.CASCADE)
name = models.CharField(max_length = 500)
starting_date = models.DateTimeField(auto_now_add=True)
no_of_employees = models.IntegerField(
default=1,
# validators=[MinValueValidator(1)]
)
oversell_counter = models.IntegerField(
default=0,
validators=[MinValueValidator(0)]
)
overall_score = models.FloatField(default=0)
overall_skewed_score = models.FloatField(default=0)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.startup_member.email + " | " + self.name
class Form(models.Model):
name = models.CharField(max_length=100)
key = models.CharField(max_length=24, unique=True, default='sjkdlDSF@4234')
date_created = models.DateTimeField(default=timezone.now)
oversell_threshold = models.IntegerField(
default=0,
# validators=[MaxValueValidator(100), MinValueValidator(1)]
)
percetage_of_oversell_threshold = models.IntegerField(default=70)
percentage_decrease_in_overall_score = models.IntegerField(default=10)
description = models.TextField(null=True, blank=True)
def save(self, *args, **kwargs):
self.key = shortuuid.ShortUUID().random(length=8)
super().save(*args, **kwargs)
class Meta:
verbose_name_plural = "Form"
def __str__(self):
return self.name + " | " + str(self.date_created)
class Category(models.Model):
form = models.ForeignKey("Form", on_delete=models.CASCADE, related_name="categories", default=1)
kind = models.CharField(max_length = 50)
description = models.TextField(blank = True)
weightage_in_percentage = models.IntegerField(
default=50,
# validators=[MaxValueValidator(100), MinValueValidator(1)]
)
class Meta:
unique_together = ('form', 'kind',)
class Meta:
ordering = ['kind']
verbose_name_plural = "Cateogories"
def __str__(self):
return self.kind
class Questions(models.Model):
category = models.ForeignKey("Category", on_delete=models.CASCADE, related_name="questions")
text = models.CharField(max_length=10000)
question_type = models.CharField(max_length=2, choices=QUESTION_PROFILE)
question_class = models.CharField(max_length=1, choices=QUESTION_CLASS)
required = models.BooleanField(default=False)
is_horizontal = models.BooleanField(default=False)
description = models.TextField(null=True, blank=True)
# answer_key = models.CharField(max_length = 5000, blank = True)
class Meta:
verbose_name_plural = "Questions"
class Meta:
unique_together = ('category', 'text',)
def __str__(self):
return self.text
class Choices(models.Model):
question = models.ForeignKey("Questions", on_delete=models.CASCADE, related_name="choices")
option = models.CharField(max_length=5000)
points = models.DecimalField(max_digits=6, decimal_places=2)
class Meta:
verbose_name_plural = "Choices"
def __str__(self):
# return self.question.text + " | " + self.option
return self.option
class FormResponse(models.Model):
key = models.UUIDField(default=uuid.uuid4, editable=False, unique=True)
form = models.ForeignKey(Form, related_name="form_responses", on_delete=models.CASCADE)
user = models.ForeignKey(User, related_name="user_form_responses", default=0, on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
class Answer(models.Model):
startup = models.ForeignKey("Startup", related_name = "startup_answers", on_delete = models.CASCADE)
form_response = models.ForeignKey(FormResponse, related_name="response_answers", on_delete=models.CASCADE)
answer_to = models.ForeignKey(Questions, on_delete = models.CASCADE, related_name = "answer_to")
text_response = models.CharField(max_length=5000, blank = True)
text_score = models.IntegerField(blank = True, default=0)
choices = models.ManyToManyField(Choices, related_name="choices", blank=True)
created_at = models.DateTimeField(auto_now_add=True)
class Meta:
verbose_name_plural = "Answers"
# def __str__(self):
# return str(self.choices.all())
class HeatMaps(models.Model):
x_coordinate = models.IntegerField(default=0)
y_coordinate = models.IntegerField(default=0)
value = models.IntegerField(default=0)
page_name = models.CharField(max_length=30)
def save(self, *args, **kwargs):
self.key = shortuuid.ShortUUID().random(length=8)
super().save(*args, **kwargs)
def __str__(self):
return self.page_name
| codelikeak47/matter_code | backend/main/models.py | models.py | py | 5,540 | python | en | code | 0 | github-code | 13 |
29075760804 | class RefreshIpOperation(object):
public_ip = "Public IP"
def __init__(self, instance_service):
"""
:param instance_service: Instance Service
:type instance_service: cloudshell.cp.aws.domain.services.ec2.instance.InstanceService
"""
self.instance_service = instance_service
def refresh_ip(self, cloudshell_session, ec2_session, deployed_instance_id, private_ip_on_resource,
public_ip_on_resource, resource_fullname):
"""
:param public_ip_on_resource:
:param resource_fullname:
:param deployed_instance_id:
:param private_ip_on_resource:
:param cloudshell_session: CloudShellAPISession
:param ec2_session : ec2_session
"""
deployed_instance = self.instance_service.get_active_instance_by_id(ec2_session, deployed_instance_id)
public_ip_on_aws = deployed_instance.public_ip_address
private_ip_on_aws = deployed_instance.private_ip_address
if public_ip_on_aws != public_ip_on_resource :
cloudshell_session.SetAttributeValue(resource_fullname, RefreshIpOperation.public_ip,
public_ip_on_aws if public_ip_on_aws is not None else "")
if private_ip_on_aws != private_ip_on_resource:
cloudshell_session.UpdateResourceAddress(resource_fullname, private_ip_on_aws)
| AdamSharon/AWS-Shell | package/cloudshell/cp/aws/domain/ami_management/operations/refresh_ip_operation.py | refresh_ip_operation.py | py | 1,409 | python | en | code | null | github-code | 13 |
20941674436 | def dictionary_input_1d(n):
d1 = {}
for i in range(n):
a = input().split()
d1[a[0]] = eval(a[1])
return d1
""" This is used for 1 dimensional dictionary enter from user"""
#first method
def merge1(dict1,dict2):
"""Using update method merge dictionary"""
return dict2.update(dict1)
#second method
def merge2(dict1,dict2):
result = {**dict1 , **dict2}
return result
def merge3(dict1,dict2):
result = dict1 | dict2
return result
if __name__ == "__main__":
dict1 =dictionary_input_1d(int(input()))
dict2 =dictionary_input_1d(int(input()))
print(merge1(dict1, dict2))
print(dict2)
print(merge2(dict1, dict2))
print(merge3(dict1, dict2)) | Rohit-saxena125/Python-code | Dictionary/Mergingdict.py | Mergingdict.py | py | 737 | python | en | code | 0 | github-code | 13 |
2062035670 | import pathlib
import typing
import warnings
import zipfile
import numpy as np
import pandas as pd
import requests
from nemdata import mmsdm, nemde
headers = {
"referer": "https://aemo.com.au/",
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36",
}
def download_zipfile(
file: "typing.Union[mmsdm.MMSDMFile, nemde.NEMDEFile]",
chunk_size: int = 128,
) -> bool:
"""download zipfile from a url and write to `file.data_directory / raw.zip`"""
request = requests.get(file.url, stream=True, headers=headers)
is_data_available = request.ok
if is_data_available:
with open(file.zipfile_path, "wb") as fd:
for chunk in request.iter_content(chunk_size=chunk_size):
fd.write(chunk)
return is_data_available
def unzip(path: pathlib.Path) -> None:
"""unzip a zip file to it's parent path"""
with zipfile.ZipFile(path, "r") as zip_ref:
zip_ref.extractall(path.parent)
def add_interval_column(
data: pd.DataFrame,
table: "typing.Union[mmsdm.MMSDMTable, nemde.NEMDETable]",
) -> pd.DataFrame:
"""add the `interval-start` and `interval-end` columns"""
interval = data[table.interval_column]
data.loc[:, "interval-end"] = interval
if isinstance(table.frequency, int):
data.loc[:, "frequency_minutes"] = table.frequency
else:
assert table.frequency
before_transition = (
data.loc[:, "interval-end"]
< table.frequency.transition_datetime_interval_end
)
data.loc[
before_transition, "frequency_minutes"
] = table.frequency.frequency_minutes_before
after_transition = (
data.loc[:, "interval-end"]
>= table.frequency.transition_datetime_interval_end
)
data.loc[
after_transition, "frequency_minutes"
] = table.frequency.frequency_minutes_after
# ignore performance warning about no vectorization
with warnings.catch_warnings():
warnings.simplefilter(action="ignore", category=pd.errors.PerformanceWarning)
data.loc[:, "interval-start"] = interval - np.array(
[pd.Timedelta(minutes=int(f)) for f in data["frequency_minutes"].values]
)
return data
| ADGEfficiency/nem-data | nemdata/utils.py | utils.py | py | 2,339 | python | en | code | 4 | github-code | 13 |
72694241297 | import os
import pymongo
if os.path.exists("env.py"):
import env
# static variables
MONGO_URI = os.environ.get("MONGO_URI")
DATABASE = "myFirstDB"
COLLECTION = "celebrities"
def mongo_connect(url):
try:
connection = pymongo.MongoClient(url)
return connection
except pymongo.errors.ConnectionFailure as e:
print("ERROR: Could not connect to MongoDB: %s") % e
def show_menu():
print("")
print("1. Add a record")
print("2. Find a record by name")
print("3. Edit a record")
print("4. Delete a record")
print("5 Exit")
option = input("Enter option: ")
return option
def get_record():
print("")
first = input("Enter first name > ")
last = input("Enter last name > ")
try:
document = collection.find_one({ "first": first.lower(), "last": last.lower() })
except:
print("ERROR: Error accessing the database")
if not document:
print("")
print("WARNING: No results found!")
return document
def add_record():
print("")
first = input("Enter first name > ")
last = input("Enter last name > ")
dob = input("Enter date of birth > ")
gender = input("Enter gender > ")
hair_color = input("Enter hair color > ")
occupation = input("Enter occcupation > ")
nationality = input("Enter nationality > ")
new_document = {
"first": first.lower(),
"last": last.lower(),
"dob": dob,
"gender": gender.lower(),
"hair_color": hair_color.lower(),
"occupation": occupation.lower(),
"nationality": nationality.lower()
}
try:
collection.insert_one(new_document)
print("")
print("INFO: Document inserted")
except:
print("ERROR: Error accessing the database")
def find_record():
document = get_record()
if document:
print("")
for key, value in document.items():
if key != "_id":
print(key.capitalize() + ": " + value.capitalize())
def edit_record():
document = get_record()
if document:
update_document = {}
print("")
for key, value in document.items():
if key != "_id":
update_document[key] = input(key.capitalize() + " [" + value + "] > ")
if update_document[key] == "":
update_document[key] = value
try:
collection.update_one(document, {"$set": update_document})
print("")
print("INFO: Document updated")
except:
print("ERROR: Error accessing the database")
def delete_record():
document = get_record()
if document:
print("")
for key, value in document.items():
if key != "_id":
print(key.capitalize() + ": " + value.capitalize())
print("")
confirmation = input("Is this the document you want to delete?\nY or N > ")
print("")
if confirmation.lower() == "y":
try:
collection.delete_one(document)
print("INFO: Document deleted")
except:
print("ERROR: Error accessing the database")
else:
print("INFO: Document not deleted")
def main_loop():
while True:
option = show_menu()
if option == "1":
add_record()
elif option == "2":
find_record()
elif option == "3":
edit_record()
elif option == "4":
delete_record()
elif option == "5":
connection.close()
break
else:
print("Invalid option")
print("")
connection = mongo_connect(MONGO_URI)
collection = connection[DATABASE][COLLECTION]
main_loop()
| RicardoAzuul/CI-Go_humongous_with_MongoDB | mongo_project.py | mongo_project.py | py | 3,761 | python | en | code | 0 | github-code | 13 |
40021303655 | from question_model import Question
from data import question_data
from quizz_brain import QuizzBrain
question_bank = []
for element in question_data:
question = Question(element["text"], element["answer"])
question_bank.append(question)
quizz = QuizzBrain(question_bank)
while quizz.still_has_question():
quizz.next_question()
print("You've completed the quiz")
print(f"Your final score is: {quizz.score}/{len(question_bank)}")
| olivska/quizz_true_false | main.py | main.py | py | 445 | python | en | code | 0 | github-code | 13 |
73783621778 | """
Configuration file for MLDaemon
"""
from sklearn.decomposition import PCA, FactorAnalysis
from sklearn import tree, svm, preprocessing
from sklearn.naive_bayes import GaussianNB, MultinomialNB
from sklearn.neural_network import MLPClassifier
import os
MONGO_HOST = "127.0.0.1"
MONGO_PORT = 27017
DATABASE_PATH = '/home/aquari/mldaemon/database'
OUTPUT_PATH = '/home/aquari/mldaemon/output'
LOG_PATH = '/home/aquari/mldaemon/log'
SLEEP_TIME = 24 * 60 * 60
# ML Algorithms configurations
cv = 10
dim_inic = 1000
dim_max = 6000
steps = 1000
apply_pca = False
decomposition_class = PCA
output_json_file = os.path.join(OUTPUT_PATH, 'output.json')
# instantiating classifiers
clfs = [
('Decision Tree', tree.DecisionTreeClassifier(), os.path.join(OUTPUT_PATH, 'tree.pkl')),
('Gaussian Naive Bayes', GaussianNB(), os.path.join(OUTPUT_PATH, 'gnb.pkl')),
('Neural Network MLP', MLPClassifier(solver='lbfgs', alpha=1e-5,
hidden_layer_sizes=(15,), random_state=1), os.path.join(OUTPUT_PATH, 'nnmlp.pkl')),
('Linear SVM', svm.SVC(kernel='linear', C=1), os.path.join(OUTPUT_PATH, 'lsvm.pkl')),
]
| LascaTorbot/mldaemon | config.py | config.py | py | 1,138 | python | en | code | 0 | github-code | 13 |
43586801899 | from channels.layers import get_channel_layer
from django.contrib.auth import get_user_model
from django.utils import timezone
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from rest_framework import permissions, status
from rest_framework.generics import CreateAPIView, UpdateAPIView, get_object_or_404
from rest_framework.response import Response
from rest_framework.views import APIView
from core.methods import _user, create_unique_code
from shop.models import *
from . import serializers
from .serializers import PaymentSerializer
channel_layer = get_channel_layer()
User = get_user_model()
@method_decorator(csrf_exempt, name="dispatch")
class PaymentAPIView(APIView):
permission_classes = [permissions.AllowAny, ]
serializer_class = PaymentSerializer
def get(self, format=None, *args, **kwargs):
user = _user(self.request)
# billing_id = self.request.data.get('billing_id', None)
queryset = Payment.objects.filter(user=user)
serializer = PaymentSerializer(queryset, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
def post(self, request, format=None):
user = _user(self.request)
billing_id = self.request.data.get('billing_id', None)
try:
queryset = Payment.objects.get(user=user, stripe_charge_id=billing_id)
except Payment.DoesNotExist:
return Response({
"detail": 'Billing detail does not exist.'
}, status=status.HTTP_404_NOT_FOUND)
serializer = PaymentSerializer(queryset)
return Response(serializer.data, status=status.HTTP_200_OK)
class UserInstallmentPayDetailCreateAPIView(APIView):
permission_classes = [permissions.AllowAny]
serializer_class = serializers.UserInstallmentPayDetailSerializer
def post(self, request, format=None):
installment_item_id = (request.data.get("installment_item", None))
installment_item = get_object_or_404(ItemInstallmentDetail, pk=installment_item_id)
data = {
"installment_item": installment_item,
"required_period": request.data.get("payment_period", None)
}
serializer = self.serializer_class(data=data, context={"request": request})
if serializer.is_valid():
# installment_item is not passed in validated data since in serializer is passed as a read_only field
# installment_item = serializer.validated_data['installment_item']
required_period = serializer.validated_data['required_period']
serializer.save(
required_period=required_period,
installment_item=installment_item,
# To Ensure that the instance is unique from others with paid_amount otherwise
# it would replace the instance
amount_paid=0
)
return Response(serializer.data, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def patch(self, request, format=None):
data = None
user = _user(request)
pk = request.data.get("profile_id", None)
try:
installment_pay_detail = UserInstallmentPayDetail.objects.get(id=pk)
except UserInstallmentPayDetail.DoesNotExist:
return Response({"detail": "User payment details not found"}, status=status.HTTP_404_NOT_FOUND)
if request.data.get("amount_paid", None) is None:
# No Payment is made
if request.data.get("address", None) and not request.data.get("create_address", None):
address = get_object_or_404(Address, pk=request.data.get("id"))
data = {"selected_address": address}
if request.data.get("selected_shipping_charges", None):
selected_shipping_charges = get_object_or_404(ShippingCharge, pk=request.data.get("id"))
data = {"selected_shipping_charges": selected_shipping_charges}
if request.data.get("mpesa", None):
mpesa_no = request.data.get("mpesa_no", None)
data = {"mpesa_no": mpesa_no}
if request.data.get("create_address", None) and request.data.get("address", None):
serializer = serializers.AddressSerializer(data=request.data, context={"request": request})
if serializer.is_valid():
address = serializer.save()
print(address)
data = {"selected_address": address}
else:
# User has made payment
amount_paid = request.data.get("amount_paid", None)
mpesa_no = request.data.get("mpesa_no", None)
pay_id = request.data.get("pay_id", None)
describe = request.data.get("describe", None)
color = request.data.get("color", None)
size = request.data.get("size", None)
payment = Payment.objects.create(
user=user,
stripe_charge_id=pay_id,
amount=amount_paid,
mpesa_no=mpesa_no,
shipping=f'For installment ({installment_pay_detail.selected_shipping_charges.town.name}) - {installment_pay_detail.selected_shipping_charges.shipping_cost}',
installment_item=f'Installment item: {installment_pay_detail.installment_item.item.title}, Period: {installment_pay_detail.installment_item.payment_period} Months'
)
data = {
"amount_paid": amount_paid,
"mpesa_no": mpesa_no,
"color": color,
"describe": describe,
"size": size,
"payment": payment,
}
# if mpesa_no:
# data["mpesa_no"] = mpesa_no
serializer = self.serializer_class(instance=installment_pay_detail, data=data, partial=True, context={"request": request})
if serializer.is_valid():
serializer.save(**data)
return Response(serializer.data, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class UserInstallmentPayDetailUpdateAPIView(UpdateAPIView):
serializer_class = serializers.UserInstallmentPayDetailSerializer
queryset = UserInstallmentPayDetail.objects.all()
def perform_update(self, serializer):
# This view should get installment_item from UserInstallmentPayDetail
amount_paid = serializer.validated_data['amount_paid']
# Remaining balance should be calculated
remaining_balance = serializer.validated_data['remaining_balance']
installment_item = serializer.instance.installment_item
required_period = serializer.instance.required_period
paid_for_period = serializer.instance.paid_for_period
payment_due_date = serializer.instance.payment_due_date
next_amount_to_pay = max(
remaining_balance,
installment_item.deposit_amount
) / (required_period - paid_for_period + 1)
paid_for_period += 1
payment_due_date += timedelta(days=30)
serializer.save(
next_amount_to_pay=next_amount_to_pay,
paid_for_period=paid_for_period,
payment_due_date=payment_due_date
)
| mwangihub/innovest-shop | shop/api/views_functions.py | views_functions.py | py | 7,378 | python | en | code | 0 | github-code | 13 |
26415138399 | import sqlite3
from xml.dom.minidom import Identified
from flask import Blueprint, render_template, request
from . import db
from flask_login import login_required, current_user
from fonction import get_familles, get_date
main = Blueprint('main', __name__)
@main.route('/')
def index():
return render_template('index.html')
@main.route('/acceuil')
@login_required
def profile():
return render_template('acceuil.html', name=current_user.name)
##################################################################
### partie sur les graphes###
##################################################################
database = sqlite3.connect('test.db')
cursor = database.cursor()
##############################
## on selctionne les nom dans la table famille
familles_liste = []
familles_liste.append('TOUTES')
for row in cursor.execute("SELECT nom from familles"):
familles_liste.append(row[0])
##########################
###############################
#on définit les dictionnaires###
liste_mois = ['TOUS', 'Janvier', 'Fevrier', 'Mars', 'Avril', 'Mai', 'Juin', 'Juillet', 'Aout', 'Septembre', \
'Octobre', 'Novembre', 'Decembre']
mois_valeur = {'Janvier':'01', 'Fevrier':'02', 'Mars':'03', 'Avril':'04', 'Mai':'05', 'Juin':'06',\
'Juillet':'07', 'Aout':'08','Septembre':'09', 'Octobre':'10', 'Novembre':'11', 'Decembre':'12'}
mois_nombre_de_jours = {'01':31, '02':29, '03':31, '04':30, '05':31, '06':30,\
'07':31, '08':31,'09':30, '10':31, '11':30, '12':31}
####
jours_total = [0,31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
for j in range(2, 13):
jours_total[j] += jours_total[j-1]
####################
#########################
## calcul effecutué pour le pleine lune
jour_commencement = 11
mois_commencement = 1
annee_commencement = 1990
pleine_lune = []
pleine_lune.append((jour_commencement, mois_commencement, annee_commencement))
while int(annee_commencement) <= 2020:
jour_commencement += 29
if mois_commencement==2 and jour_commencement > 28:
mois_commencement += 1
jour_commencement %= 28
elif jour_commencement > 30:
mois_commencement += 1
jour_commencement %= 30
if mois_commencement > 12:
annee_commencement += 1
mois_commencement %= 12
pleine_lune.append((jour_commencement, mois_commencement, annee_commencement))
#########################
annees_liste = []
for i in cursor.execute("SELECT DISTINCT date from velages"): #on selectionne les dates à partir de velages
annees_liste.append(i[0][6:10])
premiere_annee = annees_liste[0]
derniere_annee = annees_liste[-1]
annees_liste = list(range(int(premiere_annee), int(derniere_annee)+1))
graphes_types = ['Velages en fonction Jours', 'Pleine Lune', 'Distribution des races']
@main.route('/formulaire', methods=['get', 'post'])
def formulaire():
if request.method == 'POST':
famille= request.form["choix_famille"]
annee = request.form['annee']
mois = request.form['mois']
graph = request.form['graph_type']
pourcentage = request.form['pourcentage']
print("choix_famille:", famille, "start:", annee, "end:", mois , "graph:", graph, "choix du pourcentage: ", pourcentage)
database= sqlite3.connect('test.db')
cursor = database.cursor()
#graphique 1
if graph == 'Velages en fonction Jours':
if mois == 'TOUS':
jours_listes= list(range(1,366))
jour = [0]*367
else:
jours_listes = list(range(1, mois_nombre_de_jours[mois_valeur[mois]] + 1))
jour = [0]*(mois_nombre_de_jours[mois_valeur[mois]] + 1)
if famille == 'TOUTES':
for row in cursor.execute("SELECT date FROM velages"):
row = row[0]
if mois == 'TOUS':
if row[6:10] == annee:
jour[int(row[:2]) + jours_total[int(row[3:5])-1]] += 1
else:
if row[6:10] == annee and row[3:5] == mois_valeur[mois]:
jour[int(row[:2])] +=1
else:
for row in cursor.execute("SELECT velages.date FROM velages INNER JOIN animaux ON velages.mere_id = animaux.id INNER JOIN familles ON animaux.famille_id = familles.id WHERE familles.nom = '{}'".format(famille)):
row = row[0]
if mois == 'TOUS':
if row[6:10]== annee:
jour[int(row[:2]) + jours_total[int(row[3:5])-1]] +=1
else:
if row[6:10]== annee and row[3:5] == mois_valeur[mois]:
jour[int(row[:2])] += 1
return render_template('homeaffichage.html', familles= familles_liste, mois=liste_mois, annees=annees_liste, graphiques=graphes_types,choix_famille=famille, choix_mois= mois, choix_annee=annee, choix_graphes=graph, jour_liste= jours_listes, jours=jour[1:] )
#graphique 2
elif graph == 'Pleine Lune':
id_date = []
if famille == 'TOUTES':
for row in cursor.execute("SELECT velages.date, animaux_velages.animal_id FROM velages INNER JOIN animaux_velages ON velages.id = animaux_velages.velage_id"):
id_date.append(row)
else:
for row in cursor.execute("SELECT velages.date, animaux_velages.animal_id FROM velages INNER JOIN animaux_velages ON velages.id = animaux_velages.velage_id INNER JOIN animaux ON velages.mere_id = animaux.id INNER JOIN familles ON animaux.famille_id = familles.id WHERE familles.nom = '{}'".format(famille)):
id_date.append(row)
id_date_restant = []
if mois == 'TOUS':
for element in id_date:
if element[0][6:10] == annee: #on prend l'élement année
id_date_restant.append(element)
else:
for element in id_date:
if element[0][6:10] == annee and element[0][3:5]== mois_valeur[mois]:
id_date_restant.append(element)
pleine_lune_restant = []
for i in pleine_lune:
if i[2] == int(annee):
pleine_lune_restant.append(i)
longueur_iddaterestant= len(id_date_restant)
vaches_id=[]
vache_pleine_ou_pas= [0] * longueur_iddaterestant
x= 0
for element in id_date_restant:
vaches_id.append(element[1])
for i in pleine_lune_restant:
if int(element[0][3:5])-i[1] == 0 and abs(int(element[0][:2])-i[0]) <=1:
vache_pleine_ou_pas[x] +=1
break
x += 1
return render_template('homeaffichage.html', familles= familles_liste, mois=liste_mois, annees=annees_liste, graphiques=graphes_types,choix_famille=famille, choix_mois= mois, choix_annee=annee, choix_graphes=graph, vaches_id= vaches_id, vache_pleine_lune= vache_pleine_ou_pas)
#Graphique 3
elif graph == 'Distribution des races':
try:
pourcentage = float(pourcentage)
except:
return render_template('formulaire.html', familles=familles_liste, mois=liste_mois, annees=annees_liste, graphes=graphes_types, choix_famille=famille, choix_mois= mois, choix_annee=annee, choix_graphes=graph , pas_float=True)
races = ['Holstein', 'Blanc Bleu Belge', 'Jersey']
races_nr = [0] * 3
for type_id in range(3):
nr_de_vaches = cursor.execute("SELECT animal_id FROM animaux_types WHERE (type_id = {} AND pourcentage >= {})".format(type_id+1, pourcentage)).fetchall()
races_nr[type_id] = len(nr_de_vaches)
return render_template('homeaffichage.html', familles=familles_liste, mois=liste_mois, annees=annees_liste, graphes=graphes_types, choix_famille=famille, choix_mois= mois, choix_annee=annee, choix_graphes=graph, races=races, nr_races=races_nr, minpr=pourcentage)
return render_template('homeaffichage.html', familles=familles_liste, mois=liste_mois, annees=annees_liste, graphes=graphes_types, choix_famille=famille, choix_mois= mois, choix_annee=annee, choix_graphes=graph)
else:
return render_template('formulaire.html', familles=familles_liste, mois=liste_mois, annees=annees_liste, graphes=graphes_types) | uclthomasblaimont/projet2_the_farm | flask_auth_app/project/main.py | main.py | py | 8,555 | python | fr | code | 0 | github-code | 13 |
15303461336 | from typing import List
from copy import deepcopy
class Solution:
def canPlaceFlowers(self, flowerbed: List[int], n: int) -> bool:
planted = 0
for i in range(len(flowerbed)):
if (
flowerbed[i] == 0
and (i == 0 or flowerbed[i - 1] == 0)
and (i == len(flowerbed) - 1 or flowerbed[i + 1] == 0)
):
flowerbed[i] = 1
planted += 1
return planted >= n
testcases = []
testcases.append([[1, 0, 0, 0, 1], 1, True])
testcases.append([[1, 0, 0, 0, 1], 2, False])
solution = Solution()
for testcase in testcases:
testcase_copy = deepcopy(testcase)
output = getattr(solution, dir(solution)[-1])(*testcase[:-1])
if output != testcase[-1]:
getattr(solution, dir(solution)[-1])(*testcase_copy[:-1])
assert (
False
), f"testcase: {testcase[:-1]}, expected: {testcase[-1]}, output: {output}"
| JosephLYH/leetcode | leetcode/605.py | 605.py | py | 959 | python | en | code | 0 | github-code | 13 |
31942994330 | class ListNode:
def __init__(self, x):
self.val = x
self.next = None
# @lc code=start
class Solution:
def reverseList(self, head: ListNode) -> ListNode:
prev = None
while head:
tmp = head.next
head.next = prev
prev = head
head = tmp
return prev
# @lc code=end
| wylu/leetcodecn | src/python/p200to299/206.反转链表.py | 206.反转链表.py | py | 360 | python | en | code | 3 | github-code | 13 |
16644614637 | """
You are given a binary tree.
Write a function that can return the inorder traversal of node values.
Example:
Input:
3
\
1
/
5
Output: [3,5,1]
"""
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
"""iterative solution"""
def inorder_traversal_iter(root):
return result
t1 = TreeNode(3)
t1.right = TreeNode(1)
t1.right.left = TreeNode(5)
print(inorder_traversal(t1)) | JeffreyAsuncion/CodingProblems_Python | Lambda/inorder_traversal_iterative.py | inorder_traversal_iterative.py | py | 538 | python | en | code | 0 | github-code | 13 |
14487118963 | import tensorflow as tf
import string
# Load the dataset
with open('TENSORFLOW/data.txt', 'r') as f:
data = f.read().splitlines()
# Preprocess the dataset
correct_words = set(data[::2])
incorrect_words = set(data[1::2])
all_words = correct_words.union(incorrect_words)
# Create a dictionary to map words to indices
word_to_idx = {word: i for i, word in enumerate(all_words)}
# Convert the words to numerical features
X = []
y = []
for word in correct_words:
X.append([1 if i == word_to_idx[word] else 0 for i in range(len(all_words))])
y.append(1)
for word in incorrect_words:
X.append([1 if i == word_to_idx[word] else 0 for i in range(len(all_words))])
y.append(0)
# Convert the data to NumPy arrays
X = tf.convert_to_tensor(X, dtype=tf.float32)
y = tf.convert_to_tensor(y, dtype=tf.float32)
# Split the data into training and testing sets
X_train, X_test = X[:800], X[800:]
y_train, y_test = y[:800], y[800:]
# Define the neural network model
model = tf.keras.models.Sequential([
tf.keras.layers.Dense(64, activation='relu', input_shape=(len(all_words),)),
tf.keras.layers.Dense(32, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')
])
# Compile the model
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
# Train the model
model.fit(X_train, y_train, epochs=10, batch_size=32, validation_data=(X_test, y_test))
# Evaluate the model
model.evaluate(X_test, y_test)
# Save the model
model.save('spellcheck_model') | Thewildraptor777/School-Code | PYTHON/TENSORFLOW/model.py | model.py | py | 1,500 | python | en | code | 0 | github-code | 13 |
2867311378 | # -*- coding: utf-8 -*-
from unittest import TestCase
from jsonschema import RefResolver
from mock import patch, Mock
from pluct.resource import Resource, ObjectResource, ArrayResource
from pluct.session import Session
from pluct.schema import Schema
class BaseTestCase(TestCase):
def setUp(self):
self.session = Session()
def resource_from_data(self, url, data=None, schema=None, response=None):
resource = Resource.from_data(
url=url, data=data, schema=schema, session=self.session,
response=response)
return resource
def resource_from_response(self, response, schema):
resource = Resource.from_response(
response, session=self.session, schema=schema)
return resource
class ResourceInitTestCase(BaseTestCase):
def test_blocks_init_of_base_class(self):
with self.assertRaises(NotImplementedError):
Resource()
class ResourceTestCase(BaseTestCase):
def setUp(self):
super(ResourceTestCase, self).setUp()
self.data = {
"name": "repos",
"platform": "js",
}
self.raw_schema = {
'type': "object",
'required': ["platform"],
'title': "some title",
'properties': {
'name': {'type': 'string'},
'platform': {'type': 'string'}
},
'links': [
{
"href": "/apps/{name}/log",
"method": "GET",
"rel": "log"
},
{
"href": "/apps/{name}/env",
"method": "GET",
"rel": "env"
}
]}
self.schema = Schema(
href="url.com", raw_schema=self.raw_schema, session=self.session)
self.url = "http://app.com/content"
self.result = self.resource_from_data(
url=self.url, data=self.data, schema=self.schema)
def test_get_should_returns_a_resource(self):
self.assertIsInstance(self.result, Resource)
def test_missing_attribute(self):
with self.assertRaises(AttributeError):
self.result.not_found
def test_str(self):
expected = "<Pluct ObjectResource %s>" % self.data
self.assertEqual(expected, str(self.result))
def test_data(self):
self.assertEqual(self.data, self.result.data)
def test_response(self):
self.assertEqual(self.result.response, None)
def test_iter(self):
iterated = [i for i in self.result]
self.assertEqual(iterated, list(self.data.keys()))
def test_schema(self):
self.assertEqual(self.schema.url, self.result.schema.url)
def test_is_valid_schema_error(self):
old = self.result.schema['required']
try:
self.result.schema['required'] = ["ble"]
self.assertFalse(self.result.is_valid())
finally:
self.result.schema.required = old
def test_is_valid_invalid(self):
data = {
'doestnotexists': 'repos',
}
result = self.resource_from_data('/url', data=data, schema=self.schema)
self.assertFalse(result.is_valid())
def test_is_valid(self):
self.assertTrue(self.result.is_valid())
def test_resolve_pointer(self):
self.assertEqual(self.result.resolve_pointer("/name"), "repos")
def test_resource_should_be_instance_of_dict(self):
self.assertIsInstance(self.result, dict)
def test_resource_should_be_instance_of_schema(self):
self.assertIsInstance(self.result, Resource)
def test_is_valid_call_validate_with_resolver_instance(self):
with patch('pluct.resources.validate') as mock_validate:
self.result.is_valid()
self.assertTrue(mock_validate.called)
resolver = mock_validate.call_args[-1]['resolver']
self.assertIsInstance(resolver, RefResolver)
http_handler, https_handler = list(resolver.handlers.values())
self.assertEqual(http_handler, self.result.session_request_json)
self.assertEqual(https_handler, self.result.session_request_json)
def test_session_request_json(self):
mock_request_return = Mock()
with patch.object(self.result.session, 'request') as mock_request:
mock_request.return_value = mock_request_return
self.result.session_request_json(self.url)
self.assertTrue(mock_request.called)
self.assertTrue(mock_request_return.json.called)
class ParseResourceTestCase(BaseTestCase):
def setUp(self):
super(ParseResourceTestCase, self).setUp()
self.item_schema = {
'type': 'object',
'properties': {
'id': {
'type': 'integer'
}
},
'links': [{
"href": "http://localhost/foos/{id}/",
"method": "GET",
"rel": "item",
}]
}
self.raw_schema = {
'title': "title",
'type': "object",
'properties': {
'objects': {
'type': 'array',
'items': self.item_schema,
},
'values': {
'type': 'array'
}
}
}
self.schema = Schema(
href="url.com", raw_schema=self.raw_schema, session=self.session)
def test_wraps_array_objects_as_resources(self):
data = {
'objects': [
{'id': 111}
]
}
app = self.resource_from_data(
url="appurl.com", data=data, schema=self.schema)
item = app['objects'][0]
self.assertIsInstance(item, ObjectResource)
self.assertEqual(item.data['id'], 111)
self.assertEqual(item.schema, self.item_schema)
def test_eq_operators(self):
data = {
'objects': [
{'id': 111}
]
}
app = self.resource_from_data(
url="appurl.com", data=data, schema=self.schema)
self.assertDictEqual(data, app)
def test_wraps_array_objects_as_resources_even_without_items_key(self):
data = {
'values': [
{'id': 1}
]
}
resource = self.resource_from_data(
url="appurl.com", data=data, schema=self.schema)
item = resource['values'][0]
self.assertIsInstance(item, Resource)
self.assertEqual(item.data['id'], 1)
@patch("requests.get")
def test_doesnt_wrap_non_objects_as_resources(self, get):
data = {
'values': [
1,
'string',
['array']
]
}
resource_list = self.resource_from_data(
url="appurl.com", data=data, schema=self.schema)
values = resource_list['values']
self.assertEqual(values, data['values'])
class FromResponseTestCase(BaseTestCase):
def setUp(self):
super(FromResponseTestCase, self).setUp()
self._response = Mock()
self._response.url = 'http://example.com'
content_type = 'application/json; profile=http://example.com/schema'
self._response.headers = {
'content-type': content_type
}
self.schema = Schema('/', raw_schema={}, session=self.session)
def test_should_return_resource_from_response(self):
self._response.json.return_value = {}
returned_resource = self.resource_from_response(
self._response, schema=self.schema)
self.assertEqual(returned_resource.url, 'http://example.com')
self.assertEqual(returned_resource.data, {})
def test_should_return_resource_from_response_with_no_json_data(self):
self._response.json = Mock(side_effect=ValueError())
returned_resource = self.resource_from_response(
self._response, schema=self.schema)
self.assertEqual(returned_resource.url, 'http://example.com')
self.assertEqual(returned_resource.data, {})
def test_should_return_resource_from_response_with_response_data(self):
self._response.json.return_value = {}
returned_resource = self.resource_from_response(
self._response, schema=self.schema)
self.assertEqual(returned_resource.response, self._response)
self.assertEqual(returned_resource.response.headers,
self._response.headers)
def test_resource_with_an_array_without_schema(self):
data = {
'units': [
{'name': 'someunit'}
],
'name': 'registry',
}
s = Schema(
href='url',
raw_schema={
'title': 'app schema',
'type': 'object',
'required': ['name'],
'properties': {'name': {'type': 'string'}}
},
session=self.session)
response = self.resource_from_data("url", data, s)
self.assertDictEqual(data, response.data)
class ResourceFromDataTestCase(BaseTestCase):
def test_should_create_array_resource_from_list(self):
data = []
resource = self.resource_from_data('/', data=data)
self.assertIsInstance(resource, ArrayResource)
self.assertEqual(resource.url, '/')
self.assertEqual(resource.data, data)
expected = "<Pluct ArrayResource %s>" % resource.data
self.assertEqual(expected, str(resource))
def test_should_create_object_resource_from_dict(self):
data = {}
resource = self.resource_from_data('/', data=data)
self.assertIsInstance(resource, ObjectResource)
self.assertEqual(resource.url, '/')
self.assertEqual(resource.data, data)
| globocom/pluct | pluct/tests/test_resource.py | test_resource.py | py | 9,929 | python | en | code | 39 | github-code | 13 |
73668968656 | import cc_types
def create_type(cpp_type):
return cc_types.typen(cpp_type)
def create_lvalue(cpp_type, name):
created_type = create_type(cpp_type)
return {
'kind': 'lvalue',
'type': created_type,
'name': name,
}
def create_struct(name, members, default_values={}, member_functions=[]):
dependencies = []
for mem in members:
dependencies.append(mem['type'])
return {
'kind': 'struct',
'name': name,
'members': members,
'member_functions': member_functions,
'default_values': default_values,
'deps': dependencies
}
def create_function(name, arguments, returns, member_of=None, impl=None):
if impl is None:
impl_deps = []
else:
impl_deps = impl.deps
dependencies = []
for mem in arguments:
dependencies.append(mem['type'])
deps = dependencies + impl_deps
return {
'kind': 'function',
'name': name,
'args': tuple(arguments),
'returns': returns,
'impl': str(impl) if impl is not None else None,
'deps': deps,
'member_of': None,
}
| jpanikulam/op_graph | op_graph/create.py | create.py | py | 1,156 | python | en | code | 2 | github-code | 13 |
15978452273 | """Given an array of integers temperatures represents the daily temperatures,
return an array answer such that answer[i] is the number of days you have to wait after the ith day to get a warmer temperature.
If there is no future day for which this is possible, keep answer[i] == 0 instead
"""
from stack_array_based import Stack
def dailyTemperatures(temperatures) :
s = Stack()
answer = [0] * len(temperatures)
for i in range(len(temperatures)):
while len(s) > 0 and temperatures[s.top()] < temperatures[i]: # if the temperature became warmer compare it with the top element of stack untill you find bigger value in stack or stack become empty
answer[s.top()] = i - s.top() # update the value of the day by accesing its index
s.pop()
s.push(i) # store idices in the stack so the elements can be compared easily
return answer
if __name__ == "__main__":
print(dailyTemperatures([73,74,75,71,69,72,76,73]))
print(dailyTemperatures([30,40,50,60]))
print(dailyTemperatures([30,60,90]))
| abood-74/Data-Structer-and-Algorithms | DS/Stack/Daily Temperatures.py | Daily Temperatures.py | py | 1,270 | python | en | code | 0 | github-code | 13 |
24968795421 | # Escreva um programa que leia a quantidade de dias, horas, minutos e segundos do usuário
# Calcule o total em segundos.
dias = int(input("Dias: "))
horas = int(input("Horas: "))
minutos = int(input("Minutos: "))
segundos = int(input("Segundos: "))
dias_seg = 86400 * dias
horas_seg = 3600 * horas
minutos_seg = 60 * minutos
total = dias_seg + horas_seg + minutos_seg + segundos
print("Total em segundo é: %d" % total) | Monamello/introducao-programacao-python-exercicios | capitulo_3/exercicio3-9.py | exercicio3-9.py | py | 426 | python | pt | code | 0 | github-code | 13 |
9521370707 | from __future__ import absolute_import
from datetime import datetime, timedelta
from mock import patch, MagicMock, call
from django.test import TestCase, override_settings
from silver.fixtures.factories import TransactionFactory
from silver.fixtures.test_fixtures import PAYMENT_PROCESSORS
from silver.utils.decorators import get_transaction_from_token
from silver.utils.payments import (get_payment_url, get_payment_complete_url,
_get_jwt_token)
@override_settings(PAYMENT_PROCESSORS=PAYMENT_PROCESSORS)
class TestPaymentsUtilMethods(TestCase):
def test_get_payment_url(self):
transaction = TransactionFactory()
expected_url = '/pay/token/'
with patch('silver.utils.payments._get_jwt_token') as mocked_token:
mocked_token.return_value = 'token'
self.assertEqual(get_payment_url(transaction, None), expected_url)
assert mocked_token.mock_calls == [call(transaction)]
def test_get_payment_complete_url(self):
transaction = TransactionFactory()
expected_url = '/pay/token/complete?return_url=http://google.com'
mocked_request = MagicMock(GET={'return_url': 'http://google.com'},
versioning_scheme=None)
mocked_request.build_absolute_uri.return_value = '/pay/token/complete'
with patch('silver.utils.payments._get_jwt_token') as mocked_token:
mocked_token.return_value = 'token'
self.assertEqual(get_payment_complete_url(transaction, mocked_request),
expected_url)
assert mocked_token.mock_calls == [call(transaction)]
def test_get_transaction_from_token(self):
transaction = TransactionFactory()
mocked_view = MagicMock()
token = _get_jwt_token(transaction)
self.assertEqual(get_transaction_from_token(mocked_view)(None, token),
mocked_view())
mocked_view.has_calls([call(None, transaction, False), call()])
def test_get_transaction_from_expired_token(self):
transaction = TransactionFactory()
mocked_view = MagicMock()
with patch('silver.utils.payments.datetime') as mocked_datetime:
mocked_datetime.utcnow.return_value = datetime.utcnow() - timedelta(days=2 * 365)
token = _get_jwt_token(transaction)
self.assertEqual(get_transaction_from_token(mocked_view)(None, token),
mocked_view())
mocked_view.has_calls([call(None, transaction, True), call()])
| silverapp/silver | silver/tests/unit/test_payments_util.py | test_payments_util.py | py | 2,574 | python | en | code | 292 | github-code | 13 |
17934531417 | #!/bin/python3
#https://www.hackerrank.com/challenges/three-month-preparation-kit-minimum-absolute-difference-in-an-array/problem?isFullScreen=true&h_l=interview&playlist_slugs%5B%5D=preparation-kits&playlist_slugs%5B%5D=three-month-preparation-kit&playlist_slugs%5B%5D=three-month-week-four
import math
import os
import random
import re
import sys
#
# Complete the 'minimumAbsoluteDifference' function below.
#
# The function is expected to return an INTEGER.
# The function accepts INTEGER_ARRAY arr as parameter.
#
def minimumAbsoluteDifference(arr):
# Write your code here
arr.sort()
min_diff = float('inf')
for i in range(0, len(arr) - 1):
current = abs(arr[i] - arr[i + 1])
if current < min_diff:
min_diff = abs(arr[i] - arr[i + 1])
# Version with enumerate. Needs messy limit variable to stop the loop
# limit = len(arr) - 2
# for i, j in enumerate(arr):
# current = abs(j - arr[i+1])
# if current < min_diff:
# min_diff = current
# if i == limit:
# break
return min_diff
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input().strip())
arr = list(map(int, input().rstrip().split()))
result = minimumAbsoluteDifference(arr)
fptr.write(str(result) + '\n')
fptr.close()
| antoniojsp/retos | absolute_diff.py | absolute_diff.py | py | 1,349 | python | en | code | 0 | github-code | 13 |
21462711099 | import numpy as np
class CustomerSimulator(object):
def __init__(self, params, factor=80):
self.params = np.asarray(params)
self.num_comps = 1
self.factor = factor
def get_lambda(self, length, a, p, r):
features = np.array([
1,
(1 + ((1 if p < a else 0) + (1 if p <= a else 0)) / 2),
a - p,
self.num_comps,
(a + p) / 2,
a - r
])
return (length * np.exp(np.dot(self.params, features)) / (
1 + np.exp(np.dot(self.params, features)))) * self.factor
def __call__(self, length, own_price, comp_price, ref_price):
return np.random.poisson(self.get_lambda(length, own_price, comp_price, ref_price))
DEFAULT_PARAMS = [-3.89, -0.56, -0.01, 0.07, -0.03, 0]
DEFAULT_PARAMS_REF = [-3.89, -0.56, -0.01, 0.07, -0.03, -0.01]
DEFAULT_SIM = CustomerSimulator(DEFAULT_PARAMS) | Pancakeme/Dynamic-Pricing | customer.py | customer.py | py | 921 | python | en | code | 0 | github-code | 13 |
16006896140 | from rpg.maze_tester.maze_exp import *
if __name__ == '__main__':
exp = build_exp(base_config)
exp.add_exps(
'hammer',
dict(
_base=['mbsac', 'mbsacrnd', 'rpgnormal', 'rpgdiscrete', 'mbsacrnd5', 'rpgnormal1', 'rpgdiscrete1'],
env_cfg=dict(reward_type='sparse'),
),
base=None, default_env='AdroitHammer',
#names=['rnd', 'rl', 'rndx5', 'rnd001', 'rnd0005', 'rnd01'] + ['g0005', 'g005', 'g001', 'g01', 'g05', 'g1']
)
# python3 maze_tester/exp_1_11.py --exp hammer2 --runall remote --wandb True --seed 1,2,3 --cpu 5 --silent
# also test group
exp.add_exps(
'hammer2',
dict(
_base=['rpgnormal1', 'mbsacrnd', 'rpgsac'],
env_cfg=dict(reward_type='sparse'),
),
base=None, default_env='AdroitHammer',
#names=['rnd', 'rl', 'rndx5', 'rnd001', 'rnd0005', 'rnd01'] + ['g0005', 'g005', 'g001', 'g01', 'g05', 'g1']
)
exp.add_exps(
'cabinet',
dict(
_base=['rpgnormal1', 'mbsacrnd'],
env_cfg=dict(reward_type='sparse'),
),
base=None, default_env='EEArm',
#names=['rnd', 'rl', 'rndx5', 'rnd001', 'rnd0005', 'rnd01'] + ['g0005', 'g005', 'g001', 'g01', 'g05', 'g1']
)
exp.add_exps(
'stickpull',
dict(
_base=['mbsacrnd', 'rpgnormal'],
env_cfg=dict(reward_type='sparse', n=5),
),
base=None, default_env='MWStickPull',
)
exp.main() | haosulab/RPG | rpg/maze_tester/exp_1_11.py | exp_1_11.py | py | 1,509 | python | en | code | 18 | github-code | 13 |
36082893572 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Pomocny skript na vyhodnoceni, jestli je den nebo noc (a jestli se
behem foceni ma zapinat baterka nebo ne).
Pouziti:
./tma.py <OD> <DO>
./tma.py 20:00 6:30
Parametry <OD> <DO> rikaji, odkdy dokdy je tma. Po spusteni skriptu
si zjisti aktualni cas, a pokud se vleze do zadaneho intervalu, vrati
navratovou hodnotu 0. V opacnem pripade vraci 1.
Poznamka: rozpoznani navratove hodnoty v BASHi se da udelat takto:
~/bin/noc.py 20 7
RET=$? # v promenne $RET se ulozi navratova hodnota z noc.py
if [ $RET -eq 0 ] # test na hodnotu 0
then ~/bin/svetlo.sh 1 # reakce na hodnotu 0
fi
"""
import sys
from datetime import datetime, time
def parse_time(val):
"""
Pomocna funkce, ktera ze zadaneho retezce ve tvaru "6" ci
"20:45" udela objekt time() se zadanym casem. Pokud by sem
prisel nejaky nesmysl (treba "sdkjfls" nebo "56:99"), tak
vrati None.
"""
parts = val.split(':')
if len(parts) == 1:
parts.append('0')
elif len(parts) != 2:
return None
parts = [int(i) for i in parts if i.isdigit()]
if len(parts) != 2:
return None
try:
out = time(parts[0], parts[1])
except:
return None
return out
if __name__ == '__main__':
# kontrola parametru
if len(sys.argv) != 3:
sys.exit('Zadej cas odkdy dokdy si myslis, ze je tma, napr. "%s 20:30 05:30"' % (sys.argv[0], ))
time_from = parse_time(sys.argv[1])
time_to = parse_time(sys.argv[2])
if time_from is None:
sys.exit('Chybny format casu od: %s. Zadej ho ve tvaru HH:MM, napr. 20:30.' % (sys.argv[1], ))
if time_to is None:
sys.exit('Chybny format casu do: %s. Zadej ho ve tvaru HH:MM, napr. 06:20.' % (sys.argv[2], ))
# tak co? je den nebo noc?
now = datetime.now()
if now.time() > time_from or now.time() < time_to:
# je tma!
sys.exit()
# je den
sys.exit(1)
| msgre/kvetinac | kv_pi/bin/noc.py | noc.py | py | 2,004 | python | cs | code | 8 | github-code | 13 |
15102313701 | from tqdm.auto import tqdm
import pandas as pd
import numpy as np
class CFG:
train_path = "../inputs/train_valid/train_parquet"
val_path = "../inputs/train_valid/test_parquet"
val_label_path = "../inputs/train_valid/test_labels.parquet"
aid_feat_path = "../inputs/aid_feats/"
df = pd.read_parquet(CFG.val_path)
df["hour"] = df["ts"]//3600//1000%24
df["minute"] = df["ts"]//60//1000%60
for l in ["clicks", "carts", "orders"]:
df[f"is_{l}"] = df["type"] == l
recs = []
for s, g in tqdm(df.groupby("session")):
dic = {"session" : s}
dic["u_length"] = len(g)
dic["u_aid_dup_rate"] = int(g["aid"].nunique()/len(g) * 10000)
if len(g) == 1:
dic["u_last_ts_diff"] = 0
else:
dic["u_last_ts_diff"] = (g["ts"].iloc[-1] - g["ts"].iloc[-2])//1000 + 1
dic["u_session_time_length"] = (g["ts"].iloc[-1] - g["ts"].iloc[0])//1000//60
dic["u_time_density"] = int((dic["u_session_time_length"] / dic["u_length"]) * 10000)
for l in ["clicks", "carts", "orders"]:
dic[f"u_{l}"] = sum(g[f"is_{l}"])
dic[f"u_{l}_rate"] = int(dic[f"u_{l}"] / len(g) * 10000)
recs.append(dic)
df = pd.DataFrame(recs)
df = df.astype(np.uint32)
df.to_parquet("../inputs/stats/session_feats.parquet") | mrkmakr/OTTO-Multi-Objective-Recommender-System | codes/otto/scripts_stats/session_feats.py | session_feats.py | py | 1,253 | python | en | code | 5 | github-code | 13 |
21572581652 | '''
抓取百度贴吧---言情小说的基本内容
爬虫线路: requests - bs4
Python版本: 3.6
'''
import requests
import time
from bs4 import BeautifulSoup
def get_html(url):
r = requests.get(url,timeout=30)
r.raise_for_status()
r.encoding = 'utf-8'
return r.text
def get_content(url):
comments = []
html = get_html(url)
soup = BeautifulSoup(html, 'lxml')
liTags = soup.find_all('li', attrs={'class': ' j_thread_list clearfix'})
i=0
for li in liTags:
i+=1
print(i)
comment = {}
comment['title'] = li.find('a', attrs={'class': 'j_th_tit '}).text.strip()
print(comment['title'] )
comments.append(comment)
return comments
def OutFile(dict):
with open('TTBT.txt', 'a+') as f:
i=0
i+=1
for cc in dict:
f.write( '1: {} \n'.format(cc['title']))
print('当前页面爬取完成')
def main(base_url, deep):
url_list = []
# 将所有需要爬去的url存入列表
for i in range(0, deep):
url_list.append(base_url + '&pn=' + str(50 * i))
print('所有的网页已经下载到本地! 开始筛选信息。。。。')
#循环写入所有的数据
for url in url_list:
content = get_content(url)
#print(2222)
print(content)
OutFile(content)
print('所有的信息都已经保存完毕!')
base_url = 'https://tieba.baidu.com/f?ie=utf-8&kw=%E8%A8%80%E6%83%85%E5%B0%8F%E8%AF%B4&fr=search'
deep = 3
if __name__ == '__main__':
main(base_url, deep)
| Carrie999/Python-spider | tiebaxiaoshuo.py | tiebaxiaoshuo.py | py | 1,629 | python | en | code | 6 | github-code | 13 |
43061439521 | import numpy as np
import math
import copy
import torch
import torch.nn as nn
from torch.utils.data import TensorDataset, DataLoader
import torchvision.datasets.cifar as cifar
from torchvision import datasets, transforms
from livelossplot import PlotLosses
import os
from .train_utils import test_binary_model
from .eval_utils import AverageVarMeter
import torch.nn.functional as F
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
def make_MIA_loader_with_target(st_model, dataloader,device,batch_size=128,shuffle=False):
# ori_in = []
ori_pred = []
ori_target = []
ori_ohy = [] #one hot y
st_model.to(device).eval()
with torch.no_grad():
for x,y in dataloader:
ori_in.append(x)
pred = st_model(x.to(device)).detach().cpu()
ori_pred.append(pred)
ori_target.append(y)
ori_ohy.append(F.one_hot(y,num_classes=10))
del x,pred,y
st_model.cpu()
ori_ohy = torch.cat(ori_ohy,dim=0)
ori_pred = torch.cat(ori_pred,dim=0)
data = torch.cat((ori_pred,ori_ohy),dim=1)
labels = torch.cat(ori_target,dim=0)
dataloader_ = DataLoader(TensorDataset(data,labels),batch_size=batch_size, shuffle=shuffle)
return dataloader_
def prepare_MIAattack_loader(st_model, st_trainloader, st_validloader, device, batch_size=128, shuffle = True):
"""
st_trainloader: data loader used for training st_model
st_validloader: data loader not used for training
assume that data laoders have hard labels
use fixed loader for mia attack
"""
ori_ohy = []
# ori_out = []
ori_pred = []
st_model.to(device)
st_model.eval()
with torch.no_grad():
for x, y in st_trainloader:
ori_ohy.append(F.one_hot(y,num_classes=10))
# ori_out.append(F.one_hot(y,num_classes=10))
pred = st_model(x.to(device)).detach().cpu()
ori_pred.append(pred)
del x, pred,y
for x, y in st_validloader:
ori_ohy.append(F.one_hot(y,num_classes=10))
pred = st_model(x.to(device)).detach().cpu()
ori_pred.append(pred)
del x, pred
st_model.cpu()
ori_ohy = torch.cat(ori_ohy,dim=0)
ori_pred = torch.cat(ori_pred,dim=0)
data = torch.cat((ori_pred,ori_ohy),dim=1)
print("data shape:", data.shape)
num_tr = _data_num(st_trainloader)
num_val = _data_num(st_validloader)
labels = torch.cat((torch.ones(num_tr), torch.zeros(num_val)))
print("number of data for training and valid:", num_tr, num_val)
mia_loader = DataLoader(TensorDataset(data, labels), batch_size = batch_size, shuffle=shuffle)
return mia_loader
def _data_num(dataloader):
if type(dataloader.dataset) == torch.utils.data.dataset.Subset:
return len(dataloader.dataset.indices)
elif type(dataloader.dataset) in [cifar.CIFAR100, cifar.CIFAR10]:
return len(dataloader.dataset.targets)
else:
print("not implemented yet")
return
def train_mia_model(mia_att_model, st_model, mia_loader, criterion, optimizer, epochs, device, mia_testloader = None, save_dir = "../results", save_model = "cifar_mia_model.pth"):
mia_att_model.to(device)
logs_clf = {}
best_acc = 0.0
liveloss_tr = PlotLosses()
for epoch in range(epochs):
mia_att_model.train()
for x,y in mia_loader:
x = x.to(device)
y = y.to(device)
mia_att_model.zero_grad()
out = mia_att_model(x)
loss = criterion(out.flatten(), y)
loss.backward()
optimizer.step()
del out, x, y, loss
torch.cuda.empty_cache()
logs_clf['loss'], logs_clf['acc']= test_binary_model(mia_att_model, mia_loader, criterion, device, 100.0, save_dir, save_model)
if mia_testloader is not None:
logs_clf['val_loss'], logs_clf['val_acc']= test_binary_model(mia_att_model, mia_testloader, criterion, device, 0.0, save_dir, save_model)
liveloss_tr.update(logs_clf)
liveloss_tr.send()
return mia_att_model, logs_clf
"""
Systematic Evaluation of Privacy Risks of Machine Learning Models
"""
class black_box_benchmarks(object):
def __init__(self, shadow_train_performance, shadow_test_performance,
target_train_performance, target_test_performance, num_classes):
'''
each input contains both model predictions (shape: num_data*num_classes) and ground-truth labels.
'''
self.num_classes = num_classes
self.s_tr_outputs, self.s_tr_labels = shadow_train_performance
self.s_te_outputs, self.s_te_labels = shadow_test_performance
self.t_tr_outputs, self.t_tr_labels = target_train_performance
self.t_te_outputs, self.t_te_labels = target_test_performance
self.s_tr_corr = (np.argmax(self.s_tr_outputs, axis=1)==self.s_tr_labels).astype(int)
self.s_te_corr = (np.argmax(self.s_te_outputs, axis=1)==self.s_te_labels).astype(int)
self.t_tr_corr = (np.argmax(self.t_tr_outputs, axis=1)==self.t_tr_labels).astype(int)
self.t_te_corr = (np.argmax(self.t_te_outputs, axis=1)==self.t_te_labels).astype(int)
self.s_tr_conf = np.array([self.s_tr_outputs[i, self.s_tr_labels[i]] for i in range(len(self.s_tr_labels))])
self.s_te_conf = np.array([self.s_te_outputs[i, self.s_te_labels[i]] for i in range(len(self.s_te_labels))])
self.t_tr_conf = np.array([self.t_tr_outputs[i, self.t_tr_labels[i]] for i in range(len(self.t_tr_labels))])
self.t_te_conf = np.array([self.t_te_outputs[i, self.t_te_labels[i]] for i in range(len(self.t_te_labels))])
self.s_tr_entr = self._entr_comp(self.s_tr_outputs)
self.s_te_entr = self._entr_comp(self.s_te_outputs)
self.t_tr_entr = self._entr_comp(self.t_tr_outputs)
self.t_te_entr = self._entr_comp(self.t_te_outputs)
self.s_tr_m_entr = self._m_entr_comp(self.s_tr_outputs, self.s_tr_labels)
self.s_te_m_entr = self._m_entr_comp(self.s_te_outputs, self.s_te_labels)
self.t_tr_m_entr = self._m_entr_comp(self.t_tr_outputs, self.t_tr_labels)
self.t_te_m_entr = self._m_entr_comp(self.t_te_outputs, self.t_te_labels)
def _log_value(self, probs, small_value=1e-30):
return -np.log(np.maximum(probs, small_value))
def _entr_comp(self, probs):
return np.sum(np.multiply(probs, self._log_value(probs)),axis=1)
def _m_entr_comp(self, probs, true_labels):
log_probs = self._log_value(probs)
reverse_probs = 1-probs
log_reverse_probs = self._log_value(reverse_probs)
modified_probs = np.copy(probs)
modified_probs[range(true_labels.size), true_labels] = reverse_probs[range(true_labels.size), true_labels]
modified_log_probs = np.copy(log_reverse_probs)
modified_log_probs[range(true_labels.size), true_labels] = log_probs[range(true_labels.size), true_labels]
return np.sum(np.multiply(modified_probs, modified_log_probs),axis=1)
def _thre_setting(self, tr_values, te_values):
value_list = np.concatenate((tr_values, te_values))
thre, max_acc = 0, 0
for value in value_list:
tr_ratio = np.sum(tr_values>=value)/(len(tr_values)+0.0)
te_ratio = np.sum(te_values<value)/(len(te_values)+0.0)
acc = 0.5*(tr_ratio + te_ratio)
if acc > max_acc:
thre, max_acc = value, acc
return thre
def _mem_inf_via_corr(self):
# perform membership inference attack based on whether the input is correctly classified or not
t_tr_acc = np.sum(self.t_tr_corr)/(len(self.t_tr_corr)+0.0)
t_te_acc = np.sum(self.t_te_corr)/(len(self.t_te_corr)+0.0)
mem_inf_acc = 0.5*(t_tr_acc + 1 - t_te_acc)
print('For membership inference attack via correctness, the attack acc is {acc1:.3f}, with train acc {acc2:.3f} and test acc {acc3:.3f}'.format(acc1=mem_inf_acc, acc2=t_tr_acc, acc3=t_te_acc) )
return
def _mem_inf_thre(self, v_name, s_tr_values, s_te_values, t_tr_values, t_te_values):
# perform membership inference attack by thresholding feature values: the feature can be prediction confidence,
# (negative) prediction entropy, and (negative) modified entropy
t_tr_mem, t_te_non_mem = 0, 0
for num in range(self.num_classes):
thre = self._thre_setting(s_tr_values[self.s_tr_labels==num], s_te_values[self.s_te_labels==num])
t_tr_mem += np.sum(t_tr_values[self.t_tr_labels==num]>=thre)
t_te_non_mem += np.sum(t_te_values[self.t_te_labels==num]<thre)
mem_inf_acc = 0.5*(t_tr_mem/(len(self.t_tr_labels)+0.0) + t_te_non_mem/(len(self.t_te_labels)+0.0))
print('For membership inference attack via {n}, the attack acc is {acc:.3f}'.format(n=v_name,acc=mem_inf_acc))
return
def _mem_inf_benchmarks(self, all_methods=True, benchmark_methods=[]):
if (all_methods) or ('correctness' in benchmark_methods):
self._mem_inf_via_corr()
if (all_methods) or ('confidence' in benchmark_methods):
self._mem_inf_thre('confidence', self.s_tr_conf, self.s_te_conf, self.t_tr_conf, self.t_te_conf)
if (all_methods) or ('entropy' in benchmark_methods):
self._mem_inf_thre('entropy', -self.s_tr_entr, -self.s_te_entr, -self.t_tr_entr, -self.t_te_entr)
if (all_methods) or ('modified entropy' in benchmark_methods):
self._mem_inf_thre('modified entropy', -self.s_tr_m_entr, -self.s_te_m_entr, -self.t_tr_m_entr, -self.t_te_m_entr)
return | psr6275/HEDL_inference | codes/utils/mia_utils.py | mia_utils.py | py | 10,204 | python | en | code | 0 | github-code | 13 |
559899514 | import pygame
from constants import *
from utils import *
from titlebar import Titlebar
from board import Board
from assetloader import AssetLoader
from diffselect import DifficultySelector
class Game:
board = None
solution = None
def __init__(self):
self.win_anim = 0
self.titlebar = Titlebar()
if Game.board is None:
Game.board = Board()
self.diffselect = DifficultySelector()
AssetLoader.init()
def draw(self, screen: pygame.Surface):
if AssetLoader.muted:
AssetLoader.sound_channels[0].set_volume(0)
else:
AssetLoader.sound_channels[0].set_volume(1)
Game.board.draw(screen)
if self.diffselect.shown:
screen.blit(AssetLoader.darken, (0, 0))
self.titlebar.draw(screen)
self.diffselect.draw(screen)
if Game.has_won():
if self.win_anim == 0:
AssetLoader.play_sound(AssetLoader.win_sound, 0.6)
Game.solution = None
self.win_anim += 1/FRAMERATE
else:
self.win_anim = 0
if 1.3 > self.win_anim > 0:
return
if 1.6 > self.win_anim > 1.3:
self.diffselect.shown = True
def handle_event(self, event: pygame.event.Event):
self.titlebar.handle_event(event)
if self.diffselect.shown:
handle = self.diffselect.handle_event(event)
if handle is not None:
self.diffselect.shown = False
Game.board.set_difficulty(handle)
Game.board.randomize_game()
return
Game.board.handle_event(event)
@staticmethod
def has_won():
return all([stack.complete for stack in Game.board.stacks if len(stack.cards)]) \
and any([len(stack.cards) for stack in Game.board.stacks])
| quasar098/kabufuda-solitaire | game.py | game.py | py | 1,866 | python | en | code | 2 | github-code | 13 |
32077973068 | #!/usr/bin/env python
"""Python script for assembly comparison.
"""
from __future__ import print_function
import os
import sys
import warnings
from optparse import OptionParser
from Bio import BiopythonExperimentalWarning
with warnings.catch_warnings():
warnings.simplefilter("ignore", BiopythonExperimentalWarning)
from Bio import SearchIO
from Bio import SeqIO
from Bio.Blast.Applications import NcbiblastnCommandline
from Bio.Graphics import GenomeDiagram
from Bio.Graphics.GenomeDiagram import CrossLink
from Bio.SeqFeature import FeatureLocation, SeqFeature
from reportlab.lib import colors
from reportlab.lib.units import cm
# TODO - make into command line options
SPACER = 10000
MIN_GAP_JAGGY = 1000 # Sigils
MIN_GAP = 20000
usage = """Basic usage: assembly_comparison.py assembly.fasta reference.fasta
If a reference GenBank file exists next to the reference FASTA file but
with the extension *.gbk, that will be loaded to draw any annotated genes.
There should be a (nucleotide) BLAST database next to the reference FASTA
file, created with some thing like this such that the BLAST database files
are named reference.fasta.n* and the database is referenced simply as
reference.fasta when calling blastn:
$ makeblastdb -dbtype nucl -in reference.fasta
The optional output filename is if you wish the tool to produce a copy of
the input assembly with the contigs reordered and in some cases reverse
complemented to match the mapping. WARNING - while generally the ordering
matches what you might expect for biologically meaningful output, corner
cases will not.
"""
def hack_ncbi_fasta_name(pipe_name):
"""Turn 'gi|445210138|gb|CP003959.1|' into 'CP003959.1' etc.
For use with NCBI provided FASTA and GenBank files to ensure
contig names match up.
Or Prokka's *.fna and *.gbk files, turning 'gnl|Prokka|contig000001'
into 'contig000001'
"""
if pipe_name.startswith("gi|") and pipe_name.endswith("|"):
return pipe_name.split("|")[3]
elif pipe_name.startswith("gnl|") and pipe_name.count("|") == 2:
return pipe_name.split("|")[2]
else:
return pipe_name
def sys_exit(msg, error_level=1):
"""Print error message to stdout and quit with given error level."""
sys.stderr.write("%s\n" % msg.rstrip())
sys.exit(error_level)
parser = OptionParser(usage=usage)
parser.add_option(
"-f",
"--fasta",
dest="fasta_filename",
help="Write ordered FASTA file to FILE (default is off)",
default=None,
metavar="FILE",
)
parser.add_option(
"-m",
"--min-contig-len",
dest="min_len",
type="int",
help="Minimum contig length for FASTA output (if no BLAST hit)",
default=0,
)
parser.add_option(
"-l",
"--min-hit-len",
dest="min_hit",
type="int",
help="Minimum BLAST hit length to consider",
default=5000,
)
parser.add_option(
"-o",
"--output",
dest="pdf_filename",
help="Write PDF diagram to FILE (default automatic)",
default=None,
metavar="FILE",
)
parser.add_option(
"-b",
"--blast",
dest="blast_filename",
help="Use/write BLAST tabular output to FILE (default automatic)",
default=None,
metavar="FILE",
)
parser.add_option(
"-u",
"--unmapped",
dest="unmapped",
help="Show unmapped contigs on the outer tracks",
action="store_true",
)
(options, args) = parser.parse_args()
if len(args) != 2:
sys_exit("Requires two arguments!\n\n" + usage)
assembly_fasta, reference_fasta = args
output_fasta = options.fasta_filename
blast_file = options.blast_filename
diagram_pdf = options.pdf_filename
min_len = int(options.min_len)
min_hit = int(options.min_hit)
reference_genbank = os.path.splitext(reference_fasta)[0] + ".gbk"
output_stem = "%s_vs_%s" % (
os.path.splitext(os.path.basename(assembly_fasta))[0],
os.path.splitext(os.path.basename(reference_fasta))[0],
)
if diagram_pdf:
output_stem = os.path.join(os.path.dirname(diagram_pdf), output_stem)
elif blast_file:
output_stem = os.path.join(os.path.dirname(blast_file), output_stem)
elif output_fasta:
output_stem = os.path.join(os.path.dirname(output_fasta), output_stem)
else:
# Default to current directory
pass
if not blast_file:
blast_file = output_stem + ".blast.tsv"
if not diagram_pdf:
diagram_pdf = output_stem + ".blast.pdf"
if not os.path.isfile(assembly_fasta):
sys_exit("Assembly FASTA file not found: %r" % assembly_fasta)
if not os.path.isfile(reference_fasta):
sys_exit("Reference FASTA file not found: %r" % reference_fasta)
def do_blast(query_fasta, db_fasta, blast_file):
assert os.path.isfile(query_fasta)
assert os.path.isfile(db_fasta)
if not (
os.path.isfile(db_fasta + ".nhr")
and os.path.isfile(db_fasta + ".nin")
and os.path.isfile(db_fasta + ".nsq")
):
sys_exit("Missing BLAST database for %s" % db_fasta)
cmd = NcbiblastnCommandline(
query=query_fasta, db=db_fasta, out=blast_file, outfmt=6, evalue=1e-5
)
print(cmd)
stdout, stderr = cmd()
return
if not os.path.isfile(blast_file):
do_blast(assembly_fasta, reference_fasta, blast_file)
contigs = SeqIO.index(assembly_fasta, "fasta")
blast_results = SearchIO.index(blast_file, "blast-tab")
max_len = 0
for record in SeqIO.parse(reference_fasta, "fasta"):
max_len += SPACER + len(record)
max_len -= SPACER
if os.path.isfile(reference_genbank):
reference_parser = SeqIO.parse(reference_genbank, "genbank")
else:
reference_parser = SeqIO.parse(reference_fasta, "fasta")
if output_fasta:
sys.stderr.write(
"WARNING - Consider using order_assembly.py instead for FASTA output\n"
)
fasta_handle = open(output_fasta, "w")
fasta_saved_count = 0
fasta_short_dropped = 0
gd_diagram = GenomeDiagram.Diagram("Comparison")
gd_track_for_features = gd_diagram.new_track(
1, name="reference", greytrack=False, height=0.5, start=0, end=max_len
)
gd_feature_set = gd_track_for_features.new_set()
# Add a dark grey background
gd_feature_set.add_feature(
SeqFeature(FeatureLocation(0, len(record))), sigil="BOX", color="grey", label=False
),
offset = 0
ref_offsets = dict()
for record in reference_parser:
if offset > 0:
# Add Jaggy
# print("Adding jaggy from %i to %i" % (offset, offset+SPACER))
gd_feature_set.add_feature(
SeqFeature(FeatureLocation(offset, offset + SPACER)),
sigil="JAGGY",
color=colors.slategrey,
border=colors.black,
)
offset += SPACER
ref_offsets[hack_ncbi_fasta_name(record.id)] = offset
# print("Adding %s to inner reference track at offset %i" % (record.id, offset))
# Add feature for whole contig,
loc = FeatureLocation(offset, offset + len(record), strand=0)
gd_feature_set.add_feature(
SeqFeature(loc),
color=colors.grey,
border=colors.black,
label=True,
name=record.id,
)
for feature in record.features:
if feature.type != "gene":
continue
feature.location += offset
gd_feature_set.add_feature(
feature,
sigil="BOX",
color="lightblue",
label=True,
label_position="start",
label_size=6,
label_angle=0,
)
offset += len(record)
assert max_len == offset, "%r vs %r" % (max_len, offset)
gd_record_features = gd_track_for_features.new_set()
def reverse_complement_hsp_fragment(frag, query_length):
rev = SearchIO.HSPFragment(hit_id=frag.hit_id, query_id=frag.query_id)
rev.query_start = query_length - frag.query_end
rev.query_end = query_length - frag.query_start
rev.hit_start = frag.hit_start
rev.hit_end = frag.hit_end
if frag.hit_strand == -1:
rev.hit_strand = +1
elif frag.hit_strand == +1:
rev.hit_strand = -1
else:
# O or None,
rev.hit_strand = frag.hit_strand
return rev
def reverse_complement_hsp(hsp, query_length):
rev = SearchIO.HSP(
fragments=[
reverse_complement_hsp_fragment(frag, query_length)
for frag in hsp.fragments[::-1]
]
)
rev.ident_pct = hsp.ident_pct
return rev
def filter_blast(blast_result, query_length):
hsps = [
hsp for hsp in blast_result.hsps if (hsp.query_end - hsp.query_start) >= min_hit
]
hsps = sorted(hsps, key=lambda hsp: hsp.hit_start)
plus = 0
minus = 0
flipped = False
for hsp in hsps:
if hsp.hit_strand == -1:
minus += hsp.hit_end - hsp.hit_start
else:
plus += hsp.hit_end - hsp.hit_start
if minus > plus:
# Reverse the contig
flipped = True
hsps = [reverse_complement_hsp(hsp, query_length) for hsp in hsps]
hsps = sorted(hsps, key=lambda hsp: hsp.hit_start)
return make_offset(hsps, query_length), blast_result.id, hsps, flipped
def weighted_median(values_and_weights, tie_break=True):
"""Median of values with integer weights."""
x = []
count = sum(w for v, w in values_and_weights)
map(x.extend, ([v] * w for v, w in values_and_weights))
if tie_break:
# This can give the mean of the mid-points,
# with side effect of sometimes using an artifical
# offset not present in the data
return (x[count / 2] + x[(count - 1) / 2]) / 2.0
else:
# Approximiately the median - avoids mean of
# mid two values by taking the lower.
return x[count / 2]
def make_offset(blast_hsps, contig_len):
if not blast_hsps:
return 0
# Weighted by the HSP length:
offset = int(
weighted_median(
[
(
ref_offsets[hack_ncbi_fasta_name(hsp.hit_id)]
+ hsp.hit_start
- hsp.query_start,
hsp.hit_end - hsp.hit_start,
)
for hsp in blast_hsps
],
tie_break=False,
)
)
return min(max(0, offset), max_len - contig_len)
def add_jaggies(contig_seq, offset, gd_contig_features):
"""Add JAGGY features for any run of NNNN or XXXX in sequence."""
contig_seq = str(contig_seq).upper().replace("X", "N")
i = 0
j = 0
NNN = "N" * MIN_GAP_JAGGY
while i < len(contig_seq):
i = contig_seq.find(NNN, i)
if i == -1:
return
j = i
while j < len(contig_seq) and contig_seq[j] == "N":
j += 1
# print("Adding jaggy")
gd_contig_features.add_feature(
SeqFeature(FeatureLocation(offset + i, offset + j)),
sigil="JAGGY",
color=colors.slategrey,
border=colors.black,
)
i = j + 1
# Yes, this does end up parsing the entire FASTA file :(
contig_total_bp = sum(len(contigs[contig_id]) for contig_id in contigs)
# Sort the contigs by horizontal position on the diagram
# (yes, this does mean parsing the entire BLAST output)
# (and yes, also the FASTA file to get the query lengths)
blast_data = sorted(
filter_blast(b, len(contigs[b.id])) for b in SearchIO.parse(blast_file, "blast-tab")
)
contigs_shown = set()
contigs_shown_bp = 0
contig_tracks = []
for offset, contig_id, blast_hsps, flipped in blast_data:
# TODO - Use BLAST query length instead of parsing FASTA file?
contig = contigs[contig_id]
contig_len = len(contig)
if not blast_hsps:
# Works, but only if contig appears in BLAST output at all
# contigs_not_shown_bp += contig_len
continue
contigs_shown.add(contig_id)
contigs_shown_bp += contig_len
if output_fasta:
if contig_len < min_len:
print(
"Note %s had BLAST hit but was only length %i" % (contig_id, contig_len)
)
if flipped:
SeqIO.write(
contigs[contig_id].reverse_complement(
id=True, name=True, description="reversed"
),
fasta_handle,
"fasta",
)
else:
# Fast provided don't need to take reverse complement
fasta_handle.write(contigs.get_raw(contig_id))
fasta_saved_count += 1
if contig_len > max_len:
print(
"WARNING: Contig %s length %i, reference %i"
% (contig_id, contig_len, max_len)
)
# Add entire track for the oversized contig...
gd_track_for_contig = gd_diagram.new_track(
3, name=contig_id, greytrack=False, height=0.5, start=0, end=max_len
)
gd_contig_features = gd_track_for_contig.new_set()
contig_len = max_len
# Do not add track to the pool for reuse, add red feature for whole
# contig,
loc = FeatureLocation(0, max_len, strand=0)
gd_contig_features.add_feature(
SeqFeature(loc),
color=colors.red,
border=colors.black,
label=True,
name=contig_id,
)
offset = 0
else:
offset = min(max(0, offset), max_len - contig_len)
# Which track can we put this on?
gd_track_for_contig = None
gd_contig_features = None
# print "%s needs offset %i" % (contig_id, offset)
for track, fs in contig_tracks:
# TODO - Can we calculate max of features instead of _used hack?
if fs._used + MIN_GAP < offset:
# Good, will fit in this track
gd_track_for_contig = track
gd_contig_features = fs
break
if not gd_track_for_contig:
# print "Have %i tracks, adding one more" % len(contig_tracks)
# 1 = references, 2 = gap, 3+ = contigs
gd_track_for_contig = gd_diagram.new_track(
3, name=contig_id, greytrack=False, height=0.5, start=0, end=max_len
)
gd_contig_features = gd_track_for_contig.new_set()
contig_tracks.append((gd_track_for_contig, gd_contig_features))
# Add feature for whole contig,
loc = FeatureLocation(offset, offset + contig_len, strand=0)
gd_contig_features.add_feature(
SeqFeature(loc),
color=colors.grey,
border=colors.black,
label=True,
name=contig_id,
)
gd_contig_features._used = offset + contig_len
if flipped:
add_jaggies(contig.seq.reverse_complement(), offset, gd_contig_features)
else:
add_jaggies(contig.seq, offset, gd_contig_features)
# print "%s (len %i) offset %i" % (contig_id, contig_len, offset)
# Add cross-links,
for hsp in blast_hsps:
# print "%s:%i-%i hits %s:%i-%i" % (hsp.query_id, hsp.query_start, hsp.query_end,
# hsp.hit_id, hsp.hit_start, hsp.hit_end)
if flipped:
if hsp.hit_strand == -1:
flip = True
color = colors.darkgreen
else:
flip = False
color = colors.purple
else:
if hsp.hit_strand == -1:
flip = True
color = colors.blue
else:
flip = False
color = colors.firebrick
border = colors.lightgrey
# Fade the colour based on percentage identify, 100% identity = 50%
# transparent
color = colors.Color(
color.red, color.green, color.blue, alpha=(hsp.ident_pct / 200.0)
)
loc = FeatureLocation(
offset + hsp.query_start, offset + hsp.query_end, strand=0
)
q = gd_contig_features.add_feature(SeqFeature(loc), color=color, border=border)
r_offset = ref_offsets[hack_ncbi_fasta_name(hsp.hit_id)]
loc = FeatureLocation(
r_offset + hsp.hit_start, r_offset + hsp.hit_end, strand=0
)
h = gd_record_features.add_feature(SeqFeature(loc), color=color, border=border)
gd_diagram.cross_track_links.append(CrossLink(q, h, color, border, flip))
# Now add the unmatched contigs on outside
position = 0
gd_contig_features = None
unplaced = 0
for contig in SeqIO.parse(assembly_fasta, "fasta"):
contig_id = contig.id
if contig_id in contigs_shown:
continue
# print("Adding unmapped contig %s (len %i bp), offset now %i" % (contig_id, contig_len, position))
unplaced += 1
contig_len = len(contig)
if output_fasta:
if min_len <= contig_len:
fasta_handle.write(contigs.get_raw(contig_id))
fasta_saved_count += 1
else:
fasta_short_dropped += 1
if options.unmapped:
if contig_len > max_len:
print(
"WARNING: Contig %s length %i, reference %i"
% (contig_id, contig_len, max_len)
)
# Add entire track for the oversized contig...
gd_track_for_contig = gd_diagram.new_track(
max(gd_diagram.tracks) + 1,
name=contig_id,
greytrack=False,
height=0.5,
start=0,
end=max_len,
)
gd_contig_features = gd_track_for_contig.new_set()
contig_len = max_len
# Do not add track to the pool for reuse, add red feature for whole
# contig,
loc = FeatureLocation(0, max_len, strand=0)
gd_contig_features.add_feature(
SeqFeature(loc),
color=colors.red,
border=colors.black,
label=True,
name=contig_id,
)
else:
# Which track can we put this on?
if (
gd_contig_features is not None
and position + MIN_GAP + contig_len < max_len
):
# Good, will fit on current
position += MIN_GAP
else:
# print("Having to add another track for %s (len %i bp)" % (contig_id, contig_len))
gd_track_for_contig = gd_diagram.new_track(
max(gd_diagram.tracks) + 1,
name=contig_id,
greytrack=False,
height=0.5,
start=0,
end=max_len,
)
gd_contig_features = gd_track_for_contig.new_set()
position = 0
# Add feature for whole contig,
loc = FeatureLocation(position, position + contig_len, strand=0)
gd_contig_features.add_feature(
SeqFeature(loc),
color=colors.grey,
border=colors.black,
label=True,
name=contig_id,
)
# Add jaggy sigils for any gaps
add_jaggies(contig.seq, position, gd_contig_features)
position += contig_len
assert unplaced == len(contigs) - len(
contigs_shown
), "Only processed %i unplaced contigs, expected %i" % (
unplaced,
len(contigs) - len(contigs_shown),
)
print(
"Placed: %i of the %i contigs/scaffolds, %i bp"
% (len(contigs_shown), len(contigs), contigs_shown_bp)
)
print(
"Unplaced: %i contigs/scaffolds, %i bp"
% (len(contigs) - len(contigs_shown), contig_total_bp - contigs_shown_bp)
)
print(
"i.e. Placed %0.f%% of the assembly" % (contigs_shown_bp * 100.0 / contig_total_bp)
)
if output_fasta:
print("Wrote %i records to %r" % (fasta_saved_count, output_fasta))
print("Dropped %i short records" % fasta_short_dropped)
fasta_handle.close()
if fasta_saved_count + fasta_short_dropped != len(contigs):
sys_exit(
"Should have written %i records!" % (len(contigs) - fasta_short_dropped)
)
if not contigs_shown:
print("Nothing to do for PDF")
sys.exit(0)
page = (100 * cm, 100 * cm)
gd_diagram.draw(
format="circular",
circular=True,
circle_core=0.5,
pagesize=page,
start=0,
end=max_len,
)
gd_diagram.write(diagram_pdf, "PDF")
print("Saved %r" % diagram_pdf)
| peterjc/picobio | assembly_comparison/assembly_comparison.py | assembly_comparison.py | py | 20,141 | python | en | code | 44 | github-code | 13 |
177682708 | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$',views.index, name = 'index'),
url(r'^activity/([1-9][0-9]*)$', views.activity, name='activity'),
url(r'^activity/submit/([1-9][0-9]*)$', views.activity_submit, name='activity-submit'),
url(r'^activity/unsubmit/([1-9][0-9]*)$',views.activity_unsubmit,name='activity-unsubmit'),
url(r'^activity/delete/([1-9][0-9]*)$', views.activity_delete, name='activity-delete'),
url(r'^create', views.create, name='create'),
url(r'^login', views.login, name='login'),
url(r'^authenticate$', views.authenticate, name='authenticate'),
url(r'^signup$', views.signup, name='signup'),
url(r'^signup/submit$', views.signup_submit, name='signup-submit'),
url(r'^logout$', views.logout, name='logout'),
url(r'^homepage$', views.homepage, name='homepage'),
url(r'^manage$',views.manage,name='manage'),
url(r'^userinfo$',views.userinfo,name='userinfo'),
url(r'^userinfo/submit$', views.userinfo_submit, name='userinfo-submit'),
url(r'^schedule/priority$',views.dp_schedule_by_priority,name='schedule-priority'),
url(r'^schedule/amount$',views.dp_schedule_by_amount,name='schedule-amount'),
url(r'^upload$', views.upload_file, name='upload'),
url(r'^exception$',views.exception, name='exception'),
url(r'^search$',views.search, name='search'),
url(r'^search/result$',views.search_result, name='search-result'),
url(r'^help$',views.help, name='help'),
]
| owen6314/scheduleWebsite | ActivitiesManager/urls.py | urls.py | py | 1,497 | python | en | code | 0 | github-code | 13 |
15496665205 | from tkinter import *
from tkinter import messagebox
from tkinter import ttk
import time
named_tuple = time.localtime()
time_string = time.strftime("%d/%m/%Y, %H:%M", named_tuple)
print(time_string)
gui = Tk()
gui.title('My Note')
gui.geometry("600x400")
gui.configure(bg='#e6e5da')
Font = ('Bahnschrift',16)
Font2 = ('Bahnschrift',12)
def Clear():
t_text.set('')
f_file.set('')
def File():
text = t_text.get()
file_p = f_file.get()
radio = r_radio.get()
if radio == 1 :
f = open(file_p,'r+',encoding='utf-8')
f1 = f.read()
print(f1)
f.write(time_string+'\n')
f.write('\t'+text+'\n')
f.close()
elif radio == 2:
f = open(file_p,'w',encoding='utf-8')
f.write(time_string+'\n')
f.write('\t'+text+'\n')
f.close()
else:
f = open(file_p,'a',encoding='utf-8')
f.write(time_string+'\n')
f.write('\t'+text+'\n')
f.close()
messagebox.showinfo('เเจ้งเตือน','\tต้องการบันทึกไฟล์ใช่ไหม\t')
print('----บันทึกไฟล์สำเร็จ----')
t_text.set('')
f_file.set('')
t_text = StringVar()
E1 = ttk.Entry(gui,textvariable=t_text,width=20,font=Font).place(x=280,y=30)
f_file = StringVar()
E2 = ttk.Entry(gui,textvariable=f_file,width=20,font=Font).place(x=280,y=80)
L1 = Label (gui,text=' Enter Text ',bg='#e6e5da',font=Font,fg='#663f0b').place(x=60,y=30)
L2 = Label (gui,text=' Enter File Name ',bg='#e6e5da',font=Font,fg='#663f0b').place(x=60,y=80)
r_radio = IntVar()
R1 = Radiobutton(gui,text='อ่านไฟล์เเละเขียนไฟล์',variable=r_radio,value=1,bg='#e6e5da',font=Font,fg='#663f0b').place(x=270,y=140)
R2 = Radiobutton(gui,text='เขียนไฟล์',variable=r_radio,value=2,bg='#e6e5da',font=Font,fg='#663f0b').place(x=270,y=200)
R2 = Radiobutton(gui,text='ต่อท้ายไฟล์',variable=r_radio,value=3,bg='#e6e5da',font=Font,fg='#663f0b').place(x=270,y=260)
icon_b1 = PhotoImage(file='clear.png')
B1 = Button(gui,text=' Clear',width=100,image=icon_b1,bg='#66d8de',compound='left',font=Font2,fg='#663f0b',command=Clear).place(x=60,y=320)
icon_b2 = PhotoImage(file='ok.png')
B2 = Button(gui,text=' OK',width=100,image=icon_b2,bg='#66d8de',compound='left',font=Font2,fg='#663f0b',command=File).place(x=240,y=320)
icon_b3 = PhotoImage(file='cancle.png')
B3 = Button(gui,text=' Cancle ',width=100,image=icon_b3,bg='#66d8de',compound='left',font=Font2,fg='#663f0b',command=gui.destroy).place(x=430,y=320)
gui.mainloop()
| zawaneemakeng/GUI-Read-Write-Append | GUIReadWriteAppend/GUI.py | GUI.py | py | 2,600 | python | en | code | 0 | github-code | 13 |
37861908843 | from .voigtAstro import VoigtAstroP
from PyAstronomy import funcFit as fuf
from PyAstronomy.pyasl import convertDampingConstant
import numpy as np
class LyaTransmission(fuf.OneDFit):
"""
Lyman alpha transmission profile including Deuterium absorption.
The transmission is given by
.. math::
e^{-\\sigma N}\\, ,
where N is the column density and :math:`\\sigma` is the wavelength-dependent
cross-section.
*Fit Parameters*:
===== ============================= =====
N Hydrogen column density /cm^2
b Doppler parameter km/s
Dfrac Deuterium fraction --
===== ============================= =====
Parameters
----------
N : float, optional
Hydrogen column density [/cm^2]. The default is 0.
b : float, optional
The Doppler parameter (corresponds to sqrt(2) times
the velocity dispersion) to model thermal line width [km/s].
The default is 10 km/s.
D_fraction : float, optional
Fractional abundance of Deuterium with respect to Hydrogen.
The default is 1.5e-5.
"""
def __init__(self, N=0.0, b=10.0, D_fraction=1.5e-5):
fuf.OneDFit.__init__(self, ["N","b", "Dfrac"], rootName="LyaTransmission")
self["b"] = b
self["Dfrac"] = D_fraction
# (Only) Einstein coefficient relevant for LyA
elya = 6.258085e8
# Hydrogen
self._absH = VoigtAstroP()
self._absH["w0"] = 1215.67
self._absH["b"] = self["b"]
self._absH["gamma"] = convertDampingConstant(elya, 1215.67)
self._absH["f"] = 0.416
# Deuterium, its width is sqrt(2) times smaller
self._absD = VoigtAstroP()
self._absD["w0"] = 1215.34
self._absD["b"] = self["b"]/np.sqrt(2.0)
self._absD["gamma"] = convertDampingConstant(elya, 1215.67)
self._absD["f"] = 0.416
def evaluate(self, x):
"""
Evaluate the transmission profile.
Parameters
----------
x : array of floats
Contains the wavelength values in Angstrom.
Returns
-------
Model : array of floats
The line profile.
"""
self._absH["b"] = self["b"]
self._absD["b"] = self["b"]/np.sqrt(2.0)
y = np.exp(-self["N"] * self._absH.evaluate(x)) * \
np.exp(-self["N"] * self["Dfrac"] * self._absD.evaluate(x))
return y
| sczesla/PyAstronomy | src/modelSuite/LyAProfile.py | LyAProfile.py | py | 2,392 | python | en | code | 134 | github-code | 13 |
37829991998 | # coding: utf-8
"""
OpenAPI Petstore
This is a sample server Petstore server. For this sample, you can use the api key `special-key` to test the authorization filters. # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://github.com/openapi-json-schema-tools/openapi-json-schema-generator
"""
from openapi_client.shared_imports.schema_imports import *
class Schema(
schemas.DictSchema
):
class Schema_:
types = {frozendict.frozendict}
AdditionalProperties = schemas.Int32Schema
def __getitem__(self, name: str) -> Schema_.AdditionalProperties:
# dict_instance[name] accessor
return super().__getitem__(name)
def __new__(
cls,
*args_: typing.Union[dict, frozendict.frozendict],
configuration_: typing.Optional[schemas.schema_configuration.SchemaConfiguration] = None,
**kwargs: typing.Union[Schema_.AdditionalProperties, decimal.Decimal, int],
) -> 'Schema':
return super().__new__(
cls,
*args_,
configuration_=configuration_,
**kwargs,
)
| openapi-json-schema-tools/openapi-python-client-comparison | petstore/openapi_json_schema_generator_python/src/openapi_client/paths/store_inventory/get/responses/response_200/content/application_json/schema.py | schema.py | py | 1,139 | python | en | code | 0 | github-code | 13 |
42153438001 | import pynput
import cv2
import numpy as np
import time
from mss import mss
import win32gui
import multiprocessing
import multiprocessing.shared_memory
import pickle
from tools import BoundingBox
from button_click import ButtonClicker
from fishing import Fisher
from avoidance import Avoidance
class Bot:
"""also known as: Brain. This is the storage/action center"""
def __init__(self):
self.bounds = {}
self.shared_bounds = {} # Used by renderer.
self.views = {} # All of the views that can be rendered. They must be requested in self.render_areas
self.find_bounds()
self.paused = multiprocessing.Value("B", 0)
self.frame_rate = 40
self.seconds_per_frame = 1 / self.frame_rate
self.frame_times = []
self.frame_time_memory = 5 * self.frame_rate
self.frame_num = 0
self.frames_slow = 0
# Where on the screen to render next frame.
# The whitespace default is done to force it to use a larger buffer, fit bigger words.
self.render_areas = multiprocessing.shared_memory.ShareableList([" "]*5, name="render areas")
# TODO: Pass render debug info to renderer
self.render_speed = 1 # How regularly, in frames, the screen refreshes.
self.render_times = []
self.render_time_memory = self.frame_time_memory
self.render_num = 0
self.renders_slow = 0
self.lock = multiprocessing.Lock()
#self.current_task = ButtonClicker(self)
self.current_task = Fisher(self)
# limbs
self.hands = Hands()
def start(self):
keyboard_listener = pynput.keyboard.Listener(on_press=self.debug_buttons)
keyboard_listener.start()
print("Press f1 to pause")
# To get this working, I'm using both many forms of shared memory -
# SharedMemory and ShareableList is used to communicate unknown or complex data
# Value is used to communicate basic data, such as debugging values or the paused state.
render_loop = multiprocessing.Process(target=render, args=(self.paused, self.lock), daemon=True)
render_loop.start()
self.update()
def add_render_area(self, name):
b = self.bounds[name]
self.shared_bounds[name] = multiprocessing.shared_memory.SharedMemory(name=f"{name} area", create=True, size=4096 * 2)
p = pickle.dumps(b)
self.shared_bounds[name].buf[:len(p)] = p[:]
size = b.width * b.height * 4
self.views[name] = multiprocessing.shared_memory.SharedMemory(name=f"{name} render", create=True, size=size)
def find_bounds(self):
# Find the window.
window_handle = win32gui.FindWindow(None, "Adobe Flash Player 9")
if window_handle == 0:
raise RuntimeError("Cannot find game window.")
bounds = BoundingBox(*win32gui.GetWindowRect(window_handle))
b = BoundingBox()
mid = {"x": int((bounds.x1 + bounds.x2) / 2), "y": int((bounds.y1 + bounds.y2) / 2)}
if bounds.width > bounds.height:
size = bounds.height
else:
size = bounds.width
b.y1 = int(mid["y"] - (size/2))
b.y2 = int(mid["y"] + (size/2))
b.x1 = int(mid["x"] - (size/2))
b.x2 = int(mid["x"] + (size/2))
self.bounds["window"] = b
self.add_render_area("window")
# Find the minigame area.
c = BoundingBox()
c.x1 = int(b.x1 + (b.width * 0.03))
c.x2 = int(b.x1 + (b.width * 0.776))
c.y1 = int(b.y1 + (b.height * 0.14))
c.y2 = int(b.y1 + (b.height * 0.67))
self.bounds["minigame"] = c
self.add_render_area("minigame")
# canvas = take_screenshot(self.minigame_bounds)[0]
# # cv2.rectangle(canvas,
# # (self.minigame_bounds.x1, self.minigame_bounds.y1),
# # (self.minigame_bounds.x2, self.minigame_bounds.y2),
# # (128, 128, 0))
# # canvas = cv2.resize(canvas, (500, 500))
# cv2.imshow("", canvas)
# cv2.waitKey(0)
def get_view(self, view_name) -> np.array:
if view_name is None:
return None
w = self.bounds[view_name].width
h = self.bounds[view_name].height
buffered_array = np.ndarray((h, w, 4), dtype=np.uint8, buffer=self.views[view_name].buf)
new = buffered_array.copy()
return new
def debug_buttons(self, key):
if key == pynput.keyboard.Key.f1:
self.paused.value = not self.paused.value
print("paused" if self.paused.value else "unpaused")
return
elif key == pynput.keyboard.Key.f2:
if len(self.frame_times) > 0:
average_time = 0
for time in self.frame_times:
average_time += time
average_time = 1 / (average_time / len(self.frame_times))
print(f"FPS: {average_time}")
print(f"Frame times: {self.frame_times}")
print(f"Frames slow: {self.frames_slow / max(self.frame_num, 1) * 100}%")
if len(self.render_times) > 0:
print(f"Render times: {self.render_times}")
print(f"Renders slow: {self.renders_slow / max(self.render_num, 1) * 100}%")
print()
elif key == pynput.keyboard.Key.f3:
for area in self.render_areas:
view = self.get_view(area)
if view is not None:
cv2.imshow(f"{area} view", view)
cv2.waitKey(0)
elif key == pynput.keyboard.Key.f4:
# TODO: Print positions relative to minigame, window.
x = self.hands.mouse_x
y = self.hands.mouse_y
print(f"Screen pos:\nx: {self.hands.mouse_x}\ny: {self.hands.mouse_y}")
b = self.bounds["window"]
w_percent = (x - b.x1) / b.width
h_percent = (y - b.y1) / b.height
print(f"Relative to window area:\nx: {w_percent}%\ny: {h_percent}%")
b = self.bounds["minigame"]
w_percent = (x - b.x1) / b.width
h_percent = (y - b.y1) / b.height
print(f"Relative to minigame area:\nx: {w_percent}%\ny: {h_percent}%")
print()
def get_views(self):
#self.window_view, self.minigame_view = take_screenshot(self.window_bounds, self.minigame_bounds)
#self.minigame_view = take_screenshot(self.minigame_bounds)
self.minigame_view = take_screenshot(BoundingBox(0, 0, 100, 100))
def update(self):
while True:
if self.paused.value:
self.hands.clear()
time.sleep(0.2)
continue
# We floor the start here to more accurately calculate the time passed.
frame_start = time.perf_counter()
# Update which frames should be displayed.
with self.lock:
l = [None] * 5
gathered_data = self.current_task.desired_views()
l[:len(gathered_data)] = gathered_data[:]
areas = multiprocessing.shared_memory.ShareableList(name="render areas")
for i in range(len(l)):
areas[i] = l[i]
self.hands.update()
if self.current_task is not None:
#self.current_task.debug()
self.current_task.update()
# Wait for next frame
frame_slow, frame_time = frame_lock(frame_start, self.seconds_per_frame)
# Update debugging stats.
self.frame_num += 1
self.frames_slow += 1 if frame_slow else 0
if len(self.frame_times) == self.frame_time_memory:
self.frame_times.pop(0)
self.frame_times.append(frame_time)
class Hands:
"""The bot's hands, aka keyboard + mouse"""
def __init__(self):
self.mouse = pynput.mouse.Controller()
self.keyboard = pynput.keyboard.Controller()
self.click_command = {"duration": 0, "delay": 0}
self.clicking_for = 0 # How many frames we are clicking for.
self.is_clicking = False
self.held_keys = dict() # Keycode: {timing data}
@property
def mouse_x(self):
return self.mouse.position[0]
@property
def mouse_y(self):
return self.mouse.position[1]
def update(self):
if self.click_command["delay"] > 0:
self.click_command["delay"] -= 1
if self.click_command["delay"] == 0:
self.mouse.press(pynput.mouse.Button.left)
else:
self.click_command["duration"] -= 1
if self.click_command["duration"] == 0:
self.mouse.release(pynput.mouse.Button.left)
keys_to_remove = []
for keycode, timings in self.held_keys.items():
# Count down delay
if timings["delay"] > 0:
timings["delay"] -= 1
if timings["delay"] == 0:
self.keyboard.press(keycode)
# Count down held button
else:
timings["duration"] -= 1
if timings["duration"] <= 0:
self.keyboard.release(keycode)
keys_to_remove.append(keycode)
# Remove keys that finished.
for k in keys_to_remove:
del self.held_keys[k]
def clear(self):
if self.click_command["duration"] > 0 or self.click_command["delay"] > 0:
self.mouse.release(pynput.mouse.Button.left)
self.click_command["duration"] = 0
self.click_command["delay"] = 0
for keycode in self.held_keys:
self.keyboard.release(keycode)
self.held_keys = dict()
def click(self, duration=1, delay=0) -> bool:
"""Click."""
if self.click_command["duration"] >= 0:
return False
if delay == 0:
self.mouse.press(pynput.mouse.Button.left)
self.click_command["duration"] = duration
self.click_command["delay"] = delay
return True
def is_clicking(self) -> bool:
if self.click_command["duration"] <= 0:
return False
return True
def move(self, x, y):
"""Move somewhere."""
self.mouse.position = (x, y)
def press_key(self, key, duration=1, delay=0) -> bool:
if key in self.held_keys:
return False
if delay == 0:
self.keyboard.press(key)
self.held_keys[key] = {"duration": duration, "delay": delay}
return True
def render(paused, lock: multiprocessing.Lock):
while True:
if paused.value:
time.sleep(0.1)
continue
render_start = time.perf_counter()
with lock:
names = multiprocessing.shared_memory.ShareableList(name="render areas")
names = [x for x in names]
render_areas = []
for name in names:
if name is None:
continue
try:
render_area = multiprocessing.shared_memory.SharedMemory(name=f"{name} area")
except FileNotFoundError:
print(f"Could not find shared memory called: {name}")
continue
render_areas.append(pickle.loads(render_area.buf)) # Ignore this warning, it works.
render_area.close()
# render the areas
renders = take_screenshot(*render_areas)
for i in range(len(renders)):
name = names[i]
render = renders[i]
out = multiprocessing.shared_memory.SharedMemory(name=f"{name} render")
np_array = np.ndarray(render.shape, dtype=np.uint8, buffer=out.buf)
np_array[:] = render[:]
out.close()
# Wait for next frame
slow, time_taken = frame_lock(render_start, 0.025)
#return
# render_start = time.perf_counter()
#
# bot.get_views()
#
# # Wait for next frame
# render_slow, render_time = bot.frame_lock(render_start, bot.seconds_per_frame * bot.render_speed)
# # Update debugging stats.
# bot.render_num += 1
# bot.renders_slow += 1 if render_slow else 0
# if len(bot.render_times) == bot.render_time_memory:
# bot.render_times.pop(0)
# bot.render_times.append(render_time)
def take_screenshot(*args: BoundingBox) -> list[np.array]:
ret = []
with mss() as screenshot:
for bounds in args:
#image = ImageGrab.grab(bbox=(bounds.x1, bounds.y1, bounds.x2, bounds.y2)) # x, y, x2, y2
area = {"top": bounds.y1, "left": bounds.x1, "width": bounds.width, "height": bounds.height}
image = screenshot.grab(area)
image_np = np.array(image)
#image_np = cv2.cvtColor(image_np, cv2.COLOR_BGR2RGB)
ret.append(image_np)
return ret
def until_multiple(value, multiple, min_size=0.0):
# Distance between value and the next multiple of a number.
assert multiple > 0
past_multiple = value % multiple
until = multiple - past_multiple
slow = 0
while until < min_size:
until += multiple
slow += 1
return until, slow
def pass_cycles(number):
# not accurate but nor is time.sleep so fuck you
n = 0
while n < number:
n += 1
pass
def frame_lock(frame_start, seconds_per_frame) -> (bool, float):
"""Waits until the next frame 'cycle'
args:
frame_start - The time this frame started
seconds_per_frame - How long each frame is
returns:
tuple(frame_slow, total_frame_time)
frame_slow - Whether this took too long
total_frame_time - How long the whole frame took, in seconds
"""
frame_slow = False
# What cycle did this frame start on?
frame_cycle = frame_start - (frame_start % seconds_per_frame)
# Are we already late on this frame?
frame_end = time.perf_counter()
time_passed = frame_end - frame_cycle
if time_passed >= seconds_per_frame:
frame_slow = True
# Wait till next frame window.
time_since_cycle = time.perf_counter() - frame_cycle
# LET ME TAKE YOU ON A JOURNEY. Imagine, say, you wish to wait for... 5ms. The start of the next frame.
# A simple task for a computer, that runs at at the billions of clock cycles per seconds, no?
# no.
# Indeed it is not up to the cpu but the OS. Most operating systems are only accurate to about 13ms.
# Anything below this, and you might as well have just put 13ms in the first place.
# Okay so we can't use time.sleep, what if instead we schedule something to run every frame time, every 25ms?
# Ahh, a great idea! However, no. What you will find is that there is no multiprocessing solution
# to repeatedly running a task at a set time. You'd think it'd be so simple
# Every 25ms, check if a task has finished. If it has, restart it. If not, skip it.
# Nope! Threads and processes are destroyed at their end, and there's only *one* way to temporarily suspend a thread.
# Do you know what it is? That's right. TIME.SLEEP THAT INACCURATE BULLSHIT
# In the end I found my solution in just using a shit tonne of `pass` calls to pass time
# without lagging my whole pc.
while time_since_cycle < seconds_per_frame:
if seconds_per_frame - time_since_cycle > 0.015:
time.sleep(0.00001) # stops it lagging my pc 24/7. waits up to 15ms
else:
pass_cycles(2000) # More granular control over time.
time_since_cycle = time.perf_counter() - frame_start
frame_time = time.perf_counter() - frame_start
return frame_slow, frame_time
def main():
bot = Bot()
# Enters the main loop.
bot.start()
def winEnumHandler(hwnd, ctx):
if win32gui.IsWindowVisible( hwnd ):
print (hex(hwnd), win32gui.GetWindowText( hwnd ))
#win32gui.EnumWindows(winEnumHandler, None)
if __name__ == "__main__":
main()
| CrunchyDuck/DUCCI-AITG | main.py | main.py | py | 16,084 | python | en | code | 0 | github-code | 13 |
38394257246 | """
Analyze Trades Streamlit App - see README.md for details
"""
from src import predictresult as analyze_pred
import streamlit as st
import numpy as np
import pandas as pd
# tables
df = pd.DataFrame({"first column": [1, 2, 3, 4], "second column": [10, 20, 30, 40]})
st.dataframe(df.style.highlight_max(axis=0))
st.line_chart(df)
# map
map_data = pd.DataFrame(
np.random.randn(1000, 2) / [50, 50] + [37.76, -122.4], columns=["lat", "lon"]
)
st.map(map_data)
# slider
x = st.slider("x")
st.write(x, "square is", x * x)
# checkbox
if st.checkbox("Show dataframe"):
chart_data = pd.DataFrame(np.random.randn(20, 3), columns=["a", "b", "c"])
chart_data
| sws144/AnalyzeTradesApp | app.py | app.py | py | 671 | python | en | code | 0 | github-code | 13 |
34360459479 | import numpy as np
import matplotlib.pyplot as plt
from matplotlib import path
# Specify bounding polygon within source image
top_left = [152,120]
top_right = [305,150]
bottom_right = [241,295]
bottom_left = [38,255]
def main():
guney = np.array(plt.imread("images/dyson.jpg"), dtype=np.float) * (1./255)
bed = np.array(plt.imread("images/bed.png"), dtype=np.float)
print("Bed Image Size: ", bed.shape)
print("Guney Image Size: ", guney.shape)
img_points = [top_left, bottom_left, bottom_right, top_right]
gun_points = [[0,0], [guney.shape[0]-1, 0], [guney.shape[0]-1, guney.shape[1]-1], [0,guney.shape[1]-1]]
warped_pts,sample_pts = warp_pts(img_points, gun_points, bed.shape)
projected_img = inverse_warping(bed, guney, sample_pts, warped_pts)
plt.imshow(projected_img)
plt.show()
def check_pts(p, pts):
return p.contains_points([pts])
def inpolygon(img_points, target_shape):
p = path.Path(img_points)
pts = []
for y in range(target_shape[1]):
for x in range(target_shape[0]):
if check_pts(p, (x,y)):
pts.append([x,y])
return np.array(pts)
def est_homography(img_pts, gun_pts):
A = []
for i in range(len(img_pts)):
xv = img_pts[i][0]
yv = img_pts[i][1]
xl = gun_pts[i][0]
yl = gun_pts[i][1]
ax = [ -xv, -yv, -1, 0, 0, 0, xv*xl, yv*xl, xl]
ay = [ 0, 0, 0, -xv, -yv, -1, xv*yl, yv*yl, yl]
A.append(ax)
A.append(ay)
A = np.array(A)
U, S, Vh = np.linalg.svd(A)
L = Vh[-1,:] / Vh[-1,-1]
H = L.reshape(3, 3)
return H
def warp_pts(img_points, gun_points, target_shape):
sample_pts = inpolygon(img_points, target_shape)
H = est_homography(img_points,gun_points)
warped_pts = []
for i in range(sample_pts.shape[0]):
x = sample_pts[i,0]
y = sample_pts[i,1]
X = np.array([x,y,1])
X_bar = np.dot(H,X.T)
warped_pt = [X_bar[0]/X_bar[2],X_bar[1]/X_bar[2]]
warped_pts.append(warped_pt)
return np.array(warped_pts), sample_pts
def inverse_warping(bed, guney, sample_pts, warped_pts):
pts_final = np.array(np.ceil(sample_pts), dtype=int)
pts_initial = np.array(np.ceil(warped_pts), dtype=int)
nPts = sample_pts.shape[0]
projected_img = bed
for color in range(3):
sub_img_final = bed[:,:,color]
sub_img_initial = guney[:,:,color]
for i in range(nPts):
sub_img_final[pts_final[i][1],pts_final[i][0]] = sub_img_initial[pts_initial[i][0]-1,pts_initial[i][1]-1]
projected_img[:,:,color] = sub_img_final
return projected_img
if __name__ == "__main__":
main()
| l-j-oneil/free_beers | free_beers.py | free_beers.py | py | 2,767 | python | en | code | 0 | github-code | 13 |
70099483537 | from Function import *
def secant(aa, bb, cc):
func = Functions(aa, bb, cc)
x0 = float(input("Masukkan x0: "))
x1 = float(input("Masukkan x1: "))
error = float(input("Masukkan error: "))
n = int(input("Masukkan banyak iterasi: "))
i = 0
if (func.fx(x0)*func.fx(x1) > 0):
print("\nDiantara x0 dan x1 tidak ditemukan titik nol")
return None
print("\ni\t\txi\t\tf(xi)")
print("%d\t\t%.3f\t%.3f" % (i, x0, func.fx(x0)))
for i in range(0, n):
y0 = func.fx(x0)
y1 = func.fx(x1)
print("%d\t\t%.3f\t%.3f" % (i+1, x1, y1))
if abs(y1) <= error:
break
x2 = func.Sct_nilx2(x0, x1, y0, y1)
x0 = x1
x1 = x2
return x1
| caesarjalu/Metnum-Metode | Secant.py | Secant.py | py | 766 | python | en | code | 0 | github-code | 13 |
20702834627 | from django.contrib import admin
from django.urls import path, include
from pages import views
urlpatterns = [
path('project/page/', views.page, name="index"),
path('load_page/<page>', views.load_page, name="load_page"),
path('loaded_page/', views.PageLoader.as_view(), name="loaded_page"),
path('clear_output/<page>', views.clear_output, name="clear_output"),
path('delete_page/<page>', views.delete_page, name="delete_page"),
path('set_region_tei/', views.set_region_options, name="set_region_tei"),
path('set_page_options/', views.set_page_options, name="set_page_options"),
path('download_page/', views.download_page, name="download_page"),
path('generate_page_output/', views.generate_page, name="generate_page_output"),
]
| maxnth/ocr2tei | pages/urls.py | urls.py | py | 767 | python | en | code | 0 | github-code | 13 |
4132816507 | import random
SHOP = {
"ether": {
"ether": 50,
"mid-ether": 100,
"hi-ether": 150,
"turbo-ether": 200
},
"potion": {
"potion": 25,
"mid-potion": 30,
"hi-potion": 70
},
"vest": {
"black-vest": 100,
"ruby-vest": 150,
"dark-vest": 200
},
"sword": {
"masamune": 500,
"wooden-sword": 200,
"thunder-blade": 400,
}
}
life = {
"HP": 500,
"MP": 90,
"Strength": 10
}
lavos_life = {
"HP": random.randint(600, 2000),
"MP": random.randint(100, 1000),
"Strength": random.randint(70, 500)
}
money_bank = 5000
item_bank = 0
item_type = ""
item_bucket = {}
i_name = ""
i_total = 0
user_score = 0
def report(chosen_chara):
if chosen_chara == "report":
print("\n")
for key, val in life.items():
print(f"{key}: {val}")
print("\n")
def shop_list(chosen_items):
global money_bank
global item_bank
global item_type
if chosen_items in SHOP:
print()
print(f"You chose {chosen_items.capitalize()}!")
print(f"What type of {chosen_items.capitalize()}?\n")
for a, b in SHOP[chosen_items].items():
print(f"{a.capitalize()}: {b} zeen")
print()
item_type = input("Type here: ").lower()
item_c = input("How many: ")
item_count = int(item_c)
if item_type in SHOP[chosen_items].keys():
if item_bank != 99:
print(f"You will buy {item_count} {item_type.capitalize()}\n")
print(f"Your money before: {money_bank} zeen")
total = int(item_count * SHOP[chosen_items][item_type])
if money_bank >= total:
print(f"{item_count} {item_type.capitalize()}: {total} zeen")
money_bank = money_bank - total
item_bank = item_bank + item_count
print(f"Your money now: {money_bank} zeen")
print(f"Your items now: {item_bank} items\n")
item_bucket[item_type] = item_c
else:
print(f"Sorry, you do not have enough money to buy {item_type.capitalize()}\n")
elif item_type not in SHOP[chosen_items].keys():
print("Sorry, the item that you want to buy is not in the list. Try again....\n")
elif chosen_items == 'e':
print("You exit shopping....\n")
elif chosen_items not in SHOP:
print("Sorry, the item that you want to buy is not in the list. Try again....\n")
def customize_life(items_name, items_total):
global i_name
global i_total
if items_name in ['ether', 'black-vest']:
life['MP'] = life['MP'] + (50 * int(items_total))
elif items_name in ['mid-ether', 'ruby-vest']:
life['MP'] = life['MP'] + (100 * int(items_total))
elif items_name in ['hi-ether', 'dark-vest']:
life['MP'] = life['MP'] + (150 * int(items_total))
elif items_name == 'turbo-ether':
life['MP'] = life['MP'] + (200 * int(items_total))
if items_name == 'potion':
life['HP'] = life['HP'] + (35 * int(items_total))
elif items_name == 'mid-potion':
life['HP'] = life['HP'] + (100 * int(items_total))
elif items_name == 'hi-potion':
life['HP'] = life['HP'] + (175 * int(items_total))
if items_name == 'masamune':
life['Strength'] = life['Strength'] + (100 * int(items_total))
elif items_name == 'wooden-sword':
life['Strength'] = life['Strength'] + (30 * int(items_total))
elif items_name == 'thunder-blade':
life['Strength'] = life['Strength'] + (70 * int(items_total))
for X, Y in life.items():
print(X, Y)
print()
def fight_lavos(lavos, you):
global user_score
print("\n*** FIGHT LAVOS ***\n")
print("LAVOS\n")
for K, V in lavos_life.items():
print(f"{K}: {V}")
print("\nVS.\n")
print("YOU!\n")
for X, y in life.items():
print(f"{X}: {y}")
it = iter(lavos.values())
it_u = iter(you.values())
hp_u, mp_u, st_u = next(it_u), next(it_u), next(it_u)
hp, mp, st = next(it), next(it), next(it)
if hp_u > hp:
user_score += 1
if mp_u > mp:
user_score += 1
if st_u > st:
user_score += 1
else:
user_score = 0
print()
print(f"Your score is {user_score}")
print("🌟►CHRONO TRIGGERS◄🌟\n")
while True:
print(f"You have {money_bank} zeen in your Money Bank!\n"
f"Let's buy something to help you fight big boss, Lavos!\n")
print("""What do you want to buy?\n""")
for k, v in SHOP.items():
print(f"{k.capitalize()}")
item_buy = input("\nType here ('e' to exit shopping): ").lower()
shop_list(item_buy)
print(f"Your bucket:")
if item_buy != 'e':
for i_name, i_total in item_bucket.items():
print(f"{i_name.capitalize()} => {i_total}")
customize_life(i_name.casefold(), i_total)
if item_buy == 'e':
print('Thank you for shopping! Now, you are ready to fight with Lavos!')
fight_lavos(lavos_life, life)
if user_score == 3:
print("You win! Congratulations! You saved the earth!")
else:
print("You lose.... The future refused to change....")
print("\n*** Thank you for playing! ***\n")
break
| rachmifadillah/100DaysofCode | Chrono Triggers/chrono_triggers.py | chrono_triggers.py | py | 5,564 | python | en | code | 0 | github-code | 13 |
41318475545 | import os
from setuptools import setup
def packages(directory):
return [
_[0].replace('/','.')
for _ in os.walk(directory)
if os.path.isfile(os.path.join(_[0], '__init__.py'))
]
setup(
name = "ngnos",
version = "1.3.0",
author = "ngNOS maintainers and contributors",
author_email = "maintainers@ngnos.com",
description = ("ngNOS configuration libraries."),
license = "LGPLv2+",
keywords = "ngnos",
url = "http://www.ngnos.com",
packages = packages('ngnos'),
long_description="ngNOS configuration libraries",
classifiers=[
"Development Status :: 4 - Beta",
"Topic :: Utilities",
"License :: OSI Approved :: GNU Lesser General Public License v2 or later (LGPLv2+)",
],
entry_points={
"console_scripts": [
"config-mgmt = ngnos.config_mgmt:run",
],
},
)
| ngnos/ngnos-1x | python/setup.py | setup.py | py | 892 | python | en | code | 0 | github-code | 13 |
17706532480 | #_*_coding= utf-8
import pickle
import pymongo
import pandas as pd
def location_mul(list):
for lst in list:
location(lst)
def location(str):
lines=[]
data = read()
movie_dic={}
result_dic={}
for movie_id in data:
index = movie_id
movie = data[movie_id]
#print(index)
docid = index
#print(docid)
#print(movie)
lineText=""
for text in movie["plot"]:
lineText = "%s %s"%(lineText,text)
list=location_index(str,lineText)
if len(list)>0:
movie_dic[docid]=list
result_dic[str] = movie_dic
print(result_dic)
def location_index(str,text):
result=[]
text.replace('"','').replace("\n","").replace("\r","").replace("\r\n","")
lists = text.split()
#print(lists)
for i in range(len(lists)):
data = lists[i]
if data==str :
result.append(i)
#if len(result)==0:
# result="%s"%(i)
#else:
# result="%s,%s"%(result,i)
return result
#class positional_search(object):
client = pymongo.MongoClient(
"mongodb://jack:jackmongodb@cluster0-shard-00-00-uagde.mongodb.net:27017,cluster0-shard-00-01-uagde.mongodb.net:27017,cluster0-shard-00-02-uagde.mongodb.net:27017/test?ssl=true&replicaSet=Cluster0-shard-0&authSource=admin&retryWrites=true")
db = client['IMDBData']
collection = db['Movies_1']
#delf search(self,query):
query = {result_dic[0]: {"$exists": True}}
cursor = collection.Movies_1.find(query)
db.collection.find()
#if __name__ == '__main__':
#location('Princesses')
#location('the')
#location_mul(['tattoo','girl'])
| 2019-Spring-Information-Retrieval/backend | positionFinal.py | positionFinal.py | py | 1,754 | python | en | code | 0 | github-code | 13 |
5692699272 | import pickle as pk
from sklearn.metrics import accuracy_score, f1_score
from match import models
from util import map_item
path_sent = 'feat/ml/sent_test.pkl'
path_pair = 'feat/nn/pair_test.pkl'
path_label = 'feat/label_test.pkl'
with open(path_sent, 'rb') as f:
sents = pk.load(f)
with open(path_pair, 'rb') as f:
pairs = pk.load(f)
with open(path_label, 'rb') as f:
labels = pk.load(f)
def test(name, sents, labels, thre):
model = map_item(name, models)
if name == 'svm' or name == 'xgb':
probs = model.predict_proba(sents)[:, 1]
else:
sent1s, sent2s = sents
probs = model.predict([sent1s, sent2s])
preds = probs > thre
f1 = f1_score(labels, preds)
print('\n%s f1: %.2f - acc: %.2f' % (name, f1, accuracy_score(labels, preds)))
if __name__ == '__main__':
test('svm', sents, labels, thre=0.2)
test('xgb', sents, labels, thre=0.2)
test('dnn', pairs, labels, thre=0.2)
test('cnn_1d', pairs, labels, thre=0.2)
test('cnn_2d', pairs, labels, thre=0.2)
test('rnn', pairs, labels, thre=0.2)
| CyanYoung/chinese_intent_match | eval.py | eval.py | py | 1,079 | python | en | code | 2 | github-code | 13 |
22336979933 | is_happy = False
class Flower():
def __init__(self, name, req):
self.name = name
self.req = req
def water(self, flower_water):
if flower_water >= self.req:
global is_happy
is_happy = True
def status(self):
if is_happy:
return f"{self.name} is happy"
return f"{self.name} is not happy"
flower = Flower("Lilly", 100)
flower.water(50)
print(flower.status())
flower.water(100)
print(flower.status())
| DimitarGospodinov/softuni-python | Python OOP/Flower.py | Flower.py | py | 492 | python | en | code | 0 | github-code | 13 |
24297963134 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('race', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Result',
fields=[
('id', models.AutoField(primary_key=True, auto_created=True, verbose_name='ID', serialize=False)),
('firstname', models.CharField(max_length=50)),
('lastname', models.CharField(max_length=50)),
('age', models.IntegerField(blank=True, null=True)),
('bib', models.IntegerField()),
('time', models.TimeField()),
('claim', models.BooleanField(default=False)),
('race', models.ForeignKey(to='race.Race')),
('user', models.ForeignKey(blank=True, null=True, to=settings.AUTH_USER_MODEL)),
],
),
]
| garyditsch/grand_prix | results/migrations/0001_initial.py | 0001_initial.py | py | 1,067 | python | en | code | 0 | github-code | 13 |
71350180819 | from albumentations import DualTransform
from albumentations.augmentations import functional as F
from albumentations.augmentations.bbox_utils import normalize_bbox, denormalize_bbox
import cv2
class PadConstant(DualTransform):
"""Pad side of the image / max if side is less than desired number.
Args:
top (int): top padding.
bottom (int): bottom padding.
left (int): left padding.
right (int): right padding.
value (int, float, list of int, lisft of float): padding value if border_mode is cv2.BORDER_CONSTANT.
mask_value (int, float,
list of int,
lisft of float): padding value for mask if border_mode is cv2.BORDER_CONSTANT.
p (float): probability of applying the transform. Default: 1.0.
Targets:
image, mask, bbox, keypoints
Image types:
uint8, float32
"""
def __init__(
self,
top=0,
bottom=0,
left=0,
right=0,
value=None,
mask_value=None,
always_apply=False,
p=1.0,
):
super(PadConstant, self).__init__(always_apply, p)
self.pad_top = top
self.pad_bottom = bottom
self.pad_left = left
self.pad_right = right
self.border_mode = cv2.BORDER_CONSTANT
self.value = value
self.mask_value = mask_value
def update_params(self, params, **kwargs):
params = super(PadConstant, self).update_params(params, **kwargs)
params.update(
{
"pad_top": self.pad_top,
"pad_bottom": self.pad_bottom,
"pad_left": self.pad_left,
"pad_right": self.pad_right,
}
)
return params
def apply(self, img, pad_top=0, pad_bottom=0, pad_left=0, pad_right=0, **params):
return F.pad_with_params(
img,
pad_top,
pad_bottom,
pad_left,
pad_right,
border_mode=self.border_mode,
value=self.value,
)
def apply_to_mask(
self, img, pad_top=0, pad_bottom=0, pad_left=0, pad_right=0, **params
):
return F.pad_with_params(
img,
pad_top,
pad_bottom,
pad_left,
pad_right,
border_mode=self.border_mode,
value=self.mask_value,
)
def apply_to_bbox(
self,
bbox,
pad_top=0,
pad_bottom=0,
pad_left=0,
pad_right=0,
rows=0,
cols=0,
**params
):
x_min, y_min, x_max, y_max = denormalize_bbox(bbox, rows, cols)
bbox = x_min + pad_left, y_min + pad_top, x_max + pad_left, y_max + pad_top
return normalize_bbox(
bbox, rows + pad_top + pad_bottom, cols + pad_left + pad_right
)
def apply_to_keypoint(
self, keypoint, pad_top=0, pad_bottom=0, pad_left=0, pad_right=0, **params
):
x, y, angle, scale = keypoint
return x + pad_left, y + pad_top, angle, scale
def get_transform_init_args_names(self):
return ("top", "bottom", "left", "right", "border_mode", "value", "mask_value")
| cafeal/SIGNATE_AIEdge2 | src/TF_CenterNet/transforms/pad_constant.py | pad_constant.py | py | 3,216 | python | en | code | 3 | github-code | 13 |
23650378000 | import pytest
from .app import app as app_
def test_get_content_type():
"""
Testa se o `content_type` da resposta do `GET` da rota quiz
é um template HTML renderizado corretamente.
"""
with app_.test_client() as client:
response = client.get('/quiz/')
assert response.content_type == 'text/html; charset=utf-8'
def test_post_content_type():
"""
Testa se o `content_type` da resposta do `POST` da rota quiz
é um json.
"""
with app_.test_client() as client:
response = client.post('/quiz/')
assert response.content_type == 'application/json'
def test_post_dificulty_easy():
"""
Testa se o quiz de dificuldade `Fácil` existe.
"""
with app_.test_client() as client:
response = client.post("/quiz/")
json_data = response.get_json()
assert "Fácil" in json_data
def test_post_dificulty_intermediary():
"""
Testa se o quiz de dificuldade `Médio` existe.
"""
with app_.test_client() as client:
response = client.post("/quiz/")
json_data = response.get_json()
assert "Médio" in json_data
def test_post_dificulty_hard():
"""
Testa se o quiz de dificuldade `Difícil` existe.
"""
with app_.test_client() as client:
response = client.post("/quiz/")
json_data = response.get_json()
assert "Difícil" in json_data
@pytest.mark.parametrize("dificuldade", [
"Difíci",
"difícil",
"Muito Difícil",
"Fáci",
"Fácill",
"1",
""
])
def test_post_dificulty_border_case(dificuldade):
"""
Testa se o quiz, de dificuldades semelhantes
às existentes, não existem.
"""
with app_.test_client() as client:
response = client.post("/quiz/")
json_data = response.get_json()
assert dificuldade not in json_data | Sergio-Daniel-Pires/BlueSaver | test_quiz.py | test_quiz.py | py | 1,891 | python | pt | code | 2 | github-code | 13 |
12441992476 | import pytest
from gameprices.cli.mailalert import main as psnmailalert_main
from gameprices.test.commons import mailalert
from gameprices.test.test_psn import NO_SEARCH_FOR_CID_REASON
def test_mailfunc_not_existing():
wrong_line = "EP9000-CUSA07123_00-000000000000000,10.00,DE/de"
mailalert(wrong_line, psnmailalert_main, should_remain_in_file=wrong_line)
@pytest.mark.skip(reason=NO_SEARCH_FOR_CID_REASON)
def test_mailfunc_existing_and_not_existing():
unmatchable_price = "EP9000-CUSA07123_00-NIOHEU0000000000,0.00,DE/de"
matchable_and_unmatchable_price = (
unmatchable_price + "\nEP9000-CUSA07123_00-NIOHEU0000000000,100.00,DE/de"
)
mailalert(
matchable_and_unmatchable_price,
psnmailalert_main,
should_remain_in_file=unmatchable_price + "\n",
)
@pytest.mark.skip(reason=NO_SEARCH_FOR_CID_REASON)
def test_support_lines_without_store():
mailalert("EP0177-CUSA07010_00-SONICMANIA000000,100.00", psnmailalert_main)
| snipem/gameprices | gameprices/test/test_dealmailalert.py | test_dealmailalert.py | py | 989 | python | en | code | 65 | github-code | 13 |
12914079503 | import pandas as pd
from sqlalchemy import create_engine, text
from sqlalchemy.engine.url import URL
class Database:
def __init__(self, db_connection=None, dataframe=pd.DataFrame({"A": []}), dfname="DF_NAME"):
self.driver_name = db_connection["drivername"]
self.database_name = db_connection["database"]
self.connection = None
if db_connection is not None:
if db_connection["drivername"] == "mysql+mysqlconnector":
self.engine = create_engine(URL(**db_connection), pool_pre_ping=True,
connect_args={'auth_plugin': 'mysql_native_password'})
elif db_connection['drivername'] == 'mssql+pyodbc':
self.engine = create_engine(
'mssql+pyodbc://' + db_connection['host'] + '/' + db_connection['database'] +
'?trusted_connection=yes&driver=ODBC+Driver+13+for+SQL+Server')
elif db_connection["drivername"] == "sqlite":
self.engine = create_engine("sqlite:///" + db_connection["path"], pool_pre_ping=True)
if not dataframe.empty:
self.import_df(dataframe, dfname)
else:
raise ValueError("You need to pass a db connection or a dataframe")
def import_df(self, dataframe, name):
dataframe.to_sql(name=name, con=self.engine, if_exists="replace", index=False)
def disconnect(self):
"""
Close connection to self.engine
"""
self.connection.close()
def connect(self):
"""
:return: Connection to self.engine
"""
self.connection = self.engine.connect()
def execute(self, statement):
self.connect()
if 'mssql' in self.engine.name:
with self.connection.execution_options(autocommit=True) as conn:
conn.execute(text(statement))
else:
self.connection.execute(text(statement))
self.disconnect()
def execute_query(self, statement, as_df=False):
self.connect()
result = self.connection.execute(text(statement))
keys = result.keys()
result = result.fetchall()
self.disconnect()
if as_df:
return pd.DataFrame(result, columns=keys)
else:
return result
def materializedView(self, desc, tablename, query):
print("MaterializedView: " + desc)
self.execute('''drop table if exists {}'''.format(tablename))
if 'mssql' in self.engine.name:
self.execute(query)
else:
self.execute('''create table {} as '''.format(tablename) + query)
| SANElibDevTeam/SANElib | util/database.py | database.py | py | 2,667 | python | en | code | 7 | github-code | 13 |
36603777014 | from sys import *
setrecursionlimit(10**4)
# 위 코드 추가해서 통과!!!
def dfs(j_type, x, y, v, check, n):
dx, dy = [-1,0,1,0], [0,1,0,-1]
if check[x][y]:
return
else:
check[x][y] = 1
for i in range(4):
xx, yy = x + dx[i], y + dy[i]
if xx < 0 or xx >= n or yy < 0 or yy >= n:
continue
elif v[xx][yy] != j_type:
continue
else:
dfs(j_type, xx, yy, v, check, n)
return
def solution(v):
n = len(v)
answer = [0 for _ in range(3)]
check = [[0 for _ in range(n)] for _ in range(n)]
for i in range(n):
for j in range(n):
if not check[i][j]:
jak = v[i][j]
dfs(v[i][j], i, j, v, check, n)
answer[jak] += 1
return answer
print(solution([[0,0,1,1],[1,1,1,1],[2,2,2,1],[0,0,0,2]]))
# print(solution([[0,0,1],[2,2,1],[0,0,0]]))
| majung2/CTpractice | python/2020하반기/2020WinterCoding/03.py | 03.py | py | 987 | python | en | code | 0 | github-code | 13 |
29876294147 | from __future__ import absolute_import
import shutil
from jens.maintenance import validate_directories
from jens.git_wrapper import clone
from jens.settings import Settings
from jens.errors import JensError
from jens.test.tools import create_fake_repository
from jens.test.testcases import JensTestCase
class MaintenanceTest(JensTestCase):
def setUp(self):
super(MaintenanceTest, self).setUp()
self.settings = Settings()
# validate_directories() expects both below to look
# like a Git repository.
(self.environments_bare, self.environments) = \
create_fake_repository(self.sandbox_path)
shutil.rmtree(self.settings.ENV_METADATADIR)
clone(self.settings.ENV_METADATADIR, self.environments_bare, \
branch='master')
(self.repositories_bare, self.repositories) = \
create_fake_repository(self.sandbox_path)
shutil.rmtree(self.settings.REPO_METADATADIR)
clone(self.settings.REPO_METADATADIR, self.repositories_bare, \
branch='master')
#### TESTS ####
def test_all_expected_directories_are_present_and_inited(self):
validate_directories()
def test_no_bares_dir(self):
shutil.rmtree(self.settings.BAREDIR)
self.assertRaisesRegex(JensError,
self.settings.BAREDIR, validate_directories)
def test_no_cache_dir(self):
shutil.rmtree(self.settings.CACHEDIR)
self.assertRaisesRegex(JensError,
self.settings.CACHEDIR, validate_directories)
def test_no_clones_dir(self):
shutil.rmtree(self.settings.CLONEDIR)
self.assertRaisesRegex(JensError,
self.settings.CLONEDIR, validate_directories)
def test_no_environments_dir(self):
shutil.rmtree(self.settings.ENVIRONMENTSDIR)
self.assertRaisesRegex(JensError,
self.settings.ENVIRONMENTSDIR, validate_directories)
def test_no_repometadata_dir(self):
shutil.rmtree(self.settings.REPO_METADATADIR)
self.assertRaisesRegex(JensError,
self.settings.REPO_METADATADIR, validate_directories)
def test_no_envmetadata_dir(self):
shutil.rmtree(self.settings.ENV_METADATADIR)
self.assertRaisesRegex(JensError,
self.settings.ENV_METADATADIR, validate_directories)
| cernops/jens | jens/test/test_maintenance.py | test_maintenance.py | py | 2,335 | python | en | code | 42 | github-code | 13 |
6217496754 | from django.contrib.auth.models import User
from django.db import models
from .api.APIFactory import APIFactory
from .api.CoinMarketCap import CoinMarketCap
class Crypto(models.Model):
"""Defines cryptocurrencies which can be used in tracker"""
symbol = models.CharField(max_length=10, primary_key=True)
name = models.CharField(max_length=32)
def __str__(self):
return self.name
class Fiat(models.Model):
"""Defines fiat currencies which can be used in tracker"""
symbol = models.CharField(max_length=10, primary_key=True)
name = models.CharField(max_length=32)
def __str__(self):
return self.name
class UserProfile(models.Model):
"""Defines user account in the application"""
user = models.OneToOneField(User, on_delete=models.CASCADE)
fiat = models.ForeignKey(Fiat, on_delete=models.CASCADE, default='USD')
def __str__(self):
return f"{self.user} - {self.fiat}"
class Rate(models.Model):
"""Defines conversion rate between cryptocurrency and fiat"""
crypto = models.ForeignKey(Crypto, on_delete=models.CASCADE)
fiat = models.ForeignKey(Fiat, on_delete=models.CASCADE)
rate = models.FloatField(null=True)
percent_change_24h = models.FloatField(null=True)
def __str__(self):
return f"1 {self.crypto} -> {self.rate} {self.fiat}"
class Exchange(models.Model):
"""Defines exchanges which tracker integrates with"""
name = models.CharField(max_length=64, primary_key=True)
def __str__(self):
return f"{self.name}"
class ExchangeAccount(models.Model):
"""Defines a specific exchange account with API key"""
user = models.ForeignKey(UserProfile, on_delete=models.CASCADE)
exchange = models.ForeignKey(Exchange, on_delete=models.CASCADE)
key = models.CharField(max_length=1024)
secret = models.CharField(max_length=1024)
def __str__(self):
return f"{self.exchange}[{self.user.user}]"
class Balance(models.Model):
"""Defines balance of cryptocurrency user owns"""
crypto = models.ForeignKey(Crypto, on_delete=models.CASCADE)
amount = models.FloatField(null=True, blank=True)
date = models.DateTimeField(auto_now=True)
def __str__(self):
return f"{self.amount} {self.crypto}"
class Meta:
abstract = True
class ExchangeBalance(Balance):
"""Defines balance of cryptocurrency on exchange"""
exchange_account = models.ForeignKey(ExchangeAccount, on_delete=models.CASCADE)
def __str__(self):
return f"{self.exchange_account}: {super().__str__()}"
class ManualBalance(Balance):
"""Defines balance of cryptocurrency from manual user input"""
user = models.ForeignKey(UserProfile, on_delete=models.CASCADE)
def __str__(self):
return f"{self.user}: {super().__str__()}"
def get_user_balances(user):
total_balances = {}
exchange_accounts = ExchangeAccount.objects.filter(user=user)
exchange_balances = ExchangeBalance.objects.filter(exchange_account__in=exchange_accounts)
manual_balances = ManualBalance.objects.filter(user=user)
balances = exchange_balances.union(manual_balances)
user_fiat = user.fiat
for balance in balances:
crypto = balance.crypto
amount = balance.amount
try:
rate = Rate.objects.get(crypto=crypto, fiat=user_fiat)
amount_fiat = rate.rate * amount
percent_change_24h = rate.percent_change_24h
except (Rate.DoesNotExist, TypeError):
amount_fiat = 0
percent_change_24h = 0
if amount is None:
continue
if crypto in total_balances:
total_balances[crypto]['amount'] += amount
total_balances[crypto]['amount_fiat'] += amount_fiat
else:
total_balances[crypto] = {
'amount': amount,
'amount_fiat': amount_fiat,
'percent_change_24h': percent_change_24h
}
return total_balances
def refresh_rates():
cmc = CoinMarketCap()
fiats = Fiat.objects.all()
for fiat in fiats:
rates = cmc.get_crypto_rates(fiat.symbol)
for symbol, data_dict in rates.items():
crypto, _ = Crypto.objects.get_or_create(symbol=symbol)
rate, _ = Rate.objects.get_or_create(
crypto=crypto,
fiat=fiat
)
rate.rate = data_dict['price']
rate.percent_change_24h = data_dict['percent_change_24h']
rate.save()
def refresh_balances():
for account in ExchangeAccount.objects.all():
refresh_balance(account)
def refresh_balance(account):
api = APIFactory.create(account)
try:
balance = api.get_balance()
except TypeError:
print(f"Error: couldn't load balance for account - {account}, skipping")
return
for symbol, amount in balance.items():
crypto, _ = Crypto.objects.get_or_create(symbol=symbol)
exchange_balance, _ = ExchangeBalance.objects.get_or_create(exchange_account=account, crypto=crypto)
exchange_balance.amount = amount
exchange_balance.save()
for exchange_balance in ExchangeBalance.objects.all():
if exchange_balance.crypto.symbol not in balance:
exchange_balance.delete()
| nazarpechka/bittracker | tracker/models.py | models.py | py | 5,315 | python | en | code | 0 | github-code | 13 |
11217897400 | #!/usr/bin/env python
# coding: utf-8
# In[3]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# In[4]:
train_csv = pd.read_csv("D:\\OneDrive\\Desktop\\projects\\spooky authentication\\spooky-author-identification\\train.csv")
train_csv.head()
# In[5]:
test_csv = pd.read_csv("D:\\OneDrive\\Desktop\\projects\\spooky authentication\\spooky-author-identification\\test.csv")
test_csv.head()
# In[6]:
test_csv.shape
# In[7]:
train_csv["author"].unique()
# In[8]:
sns.countplot("author",data=train_csv)
# ## Feature engineering
# ### ==> remove stop words
# In[9]:
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
from nltk.stem.snowball import SnowballStemmer
# In[10]:
sw = stopwords.words('english')
# function to remove stop words
# In[11]:
def stopwords(text):
text = [word.lower() for word in text.split() if word.lower() not in sw]
return " ".join(text)
# In[12]:
train_csv['text'] = train_csv['text'].apply(stopwords)
train_csv.head(10)
# ### ==> Remove punctuations
# In[13]:
import string
punctuations = string.punctuation
punctuations
# function to remove punctuations
# In[14]:
def remove_punctuations(text):
for i in text:
if i in punctuations:
text = text.replace(i,"")
return text
# In[15]:
train_csv['text'] = train_csv['text'].apply(remove_punctuations)
train_csv.head()
# ### ==> Lemmatization
# In[16]:
from nltk.stem import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
# In[17]:
from nltk.corpus import wordnet
def get_wordnet_pos(word):
tag = nltk.pos_tag([word])[0][1][0].upper()
tag_dict = {"J": wordnet.ADJ,
"N": wordnet.NOUN,
"V": wordnet.VERB,
"R": wordnet.ADV}
return tag_dict.get(tag, wordnet.NOUN)
# In[18]:
def lemmatize(text):
text = [lemmatizer.lemmatize(word,get_wordnet_pos(word)) for word in text.split()]
return " ".join(text)
# In[19]:
train_csv['text']=train_csv['text'].apply(lemmatize)
train_csv.head()
# In[20]:
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
tfid_vectorizer = TfidfVectorizer("english")
tfid_vectorizer.fit(train_csv['text'])
dictionary = tfid_vectorizer.vocabulary_.items()
# In[21]:
vocab = []
count = []
# iterate through each vocab and count append the value to designated lists
for key, value in dictionary:
vocab.append(key)
count.append(value)
# store the count in panadas dataframe with vocab as index
vocab_after_stem = pd.Series(count, index=vocab)
# sort the dataframe
vocab_after_stem = vocab_after_stem.sort_values(ascending=False)
vocab_after_stem['zuro']
# plot of the top vocab
top_vacab = vocab_after_stem.head(20)
top_vacab.plot(kind = 'barh', figsize=(8,10), xlim= (19040, 19080))
# ### ==> Vectorizing
# In[22]:
tfid_matrix = tfid_vectorizer.transform(train_csv['text'])
# collect the tfid matrix in numpy array
array = tfid_matrix.todense()
# In[23]:
df = pd.DataFrame(array)
df.head(10)
# In[24]:
df['output'] = train_csv['author']
df['id'] = train_csv['id']
df.head(10)
# In[25]:
x=df.drop(columns = ['output','id'],axis=1)
y = df['output']
# In[26]:
from sklearn.naive_bayes import GaussianNB, BernoulliNB, MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score, log_loss
from sklearn.model_selection import GridSearchCV
# In[27]:
alpha_list1 = np.linspace(0.006, 0.1, 20)
alpha_list1 = np.around(alpha_list1, decimals=4)
alpha_list1
# In[28]:
parameter_grid = [{"alpha":alpha_list1}]
# In[29]:
classifier1 = MultinomialNB()
# gridsearch object using 4 fold cross validation and neg_log_loss as scoring paramter
gridsearch1 = GridSearchCV(classifier1,parameter_grid, scoring = 'neg_log_loss', cv = 4)
# fit the gridsearch
gridsearch1.fit(x, y)
# In[30]:
results1 = pd.DataFrame()
# collect alpha list
results1['alpha'] = gridsearch1.cv_results_['param_alpha'].data
# collect test scores
results1['neglogloss'] = gridsearch1.cv_results_['mean_test_score'].data
# In[31]:
plt.rcParams['figure.figsize'] = (12.0, 6.0)
plt.plot(results1['alpha'], -results1['neglogloss'])
plt.xlabel('alpha')
plt.ylabel('logloss')
plt.grid()
# In[32]:
print("Best parameter: ",gridsearch1.best_params_)
# In[33]:
print("Best score: ",gridsearch1.best_score_)
# In[34]:
tfid_matrix = tfid_vectorizer.transform(test_csv['text'])
# collect the tfid matrix in numpy array
array = tfid_matrix.todense()
# In[35]:
xtest = pd.DataFrame(array)
xtest.head(10)
# In[36]:
mb = MultinomialNB(alpha=0.0208)
mb.fit(x,y)
predictions = mb.predict(xtest)
# In[38]:
predictions
# In[40]:
mb.score(x,y)
# In[ ]:
| SampathPalivela/Spooky-Author-identification | Spooky Authentication .py | Spooky Authentication .py | py | 4,837 | python | en | code | 1 | github-code | 13 |
73117875857 | import itertools
class Solution:
def compare(self, n1, n2):
n1, n2 = int(n1), int(n2)
if n1 > n2:
return 1
elif n1 == n2:
return 0
else:
return -1
def compareVersion(self, version1: str, version2: str) -> int:
v1 = version1.split('.')
v2 = version2.split('.')
versions = [v1, v2]
versions = zip(*itertools.zip_longest(*versions, fillvalue = "0"))
v1, v2 = list(versions)
for i in range(len(v1)):
res = self.compare(v1[i], v2[i])
if res != 0:
return res
else:
continue
if res == 0:
return 0
| abaksy/leetcode-sol | 165/compareVersion.py | compareVersion.py | py | 719 | python | en | code | 1 | github-code | 13 |
71421319057 | #!/usr/bin/env python
#Finding a Spliced Motif
from Bio import SeqIO
with open('data/rosalind_sseq.txt') as f:
dna = []
for record in SeqIO.parse(f, 'fasta'):
dna.append(record.seq)
s, t = dna[0], dna[1]
memo = [0]*len(t)
for i in range(len(t)):
if i-1 >= 0:
memo[i] = s[memo[i-1]:].find(t[i]) + memo[i-1] + 1
else:
memo[i] = s.find(t[i]) + 1
with open('output.txt', 'w') as f:
f.write(' '.join(map(str, memo)))
| dogaukinder/dogalind | sseq.py | sseq.py | py | 465 | python | en | code | 0 | github-code | 13 |
39398630615 | from propositions_dictionary import propositions
import random
import pyfiglet
end_game = True
score = 0
#Continuer tant que c’est pas faux
#Avec une while boucle
while end_game == True :
#Assigner pour chaque phrase la variable A et B
#Utilisation du dictionnaire python dans le fichier "propositions_dictionary"
A, value_A = random.choice(list(propositions.items()))
B, value_B = random.choice(list(propositions.items()))
if A == B :
B, value_B = random.choice(list(propositions.items()))
#Mettre deux propositions aléatoire
#Avec random faire apparaître deux propositions
ascii_banner = pyfiglet.figlet_format("Higer or Lower")
print(ascii_banner)
print(" Who's animal has killed most humans in 2022? \n \n")
print(f"Compare A: {A} \n ")
#VS in asci
ascii_vs = pyfiglet.figlet_format("VS")
print(ascii_vs)
print(f"Against B: {B}\n")
#Pouvoir choisir la proposition qui a le plus de follower
#Utiliser Input pour choisir A ou B
choice = input("Enter A or B: ")
#Vérifier si c’est vrai ou faux
#si A < ou > B alors ajouter un point ou affichage du score et fin du jeu
if choice.upper() == "A" or choice.upper() == "B":
if choice == "A" :
if value_A > value_B:
print("Exact")
score += 1
else :
print("Wrong answer")
end_game = False
#Print score in ascci text
ascii_score = pyfiglet.figlet_format(f"Final score : {score}")
print(ascii_score)
if choice == "B" :
if value_B > value_A:
print("Exact")
score += 1
else :
print("Wrong answer")
end_game = False
#Print score in ascci text
ascii_score = pyfiglet.figlet_format(f"Final score : {score}")
print(ascii_score)
print("Invalid input. Please try again.")
| raych78/HigherOrLower | Game.py | Game.py | py | 2,053 | python | fr | code | 0 | github-code | 13 |
14450330336 | import json
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIRequestFactory, APIClient, APITestCase
from mixer.backend.django import mixer
from django.contrib.auth.models import User
from .views import UserModelViewSet
from .models import User
class TestUserModelViewSet(TestCase):
# тест для API, используя APIRequestFactory.
def test_get_list(self):
factory = APIRequestFactory()
request = factory.get('/api/users/')
view = UserModelViewSet.as_view({'get': 'list'})
response = view(request)
print(response.data['results'])
self.assertEqual(response.status_code, status.HTTP_200_OK)
# тест для API, используя APIClient
def test_get_detail(self):
user = User.objects.create(username='admin', birthday_year=18)
client = APIClient()
response = client.get(f'/api/users/{user.id}/')
self.assertEqual(response.status_code, status.HTTP_200_OK)
# тест для API, используя mixer
def test_get_detail_mixer(self):
users = mixer.blend(User)
response = self.client.get(f'/api/users/{users.id}/')
print(response.data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
# тест для API, используя APITestCase
class TestProjectModelViewSet(APITestCase):
def test_get_list(self):
response = self.client.get('/api/projects/')
self.assertEqual(response.status_code, status.HTTP_200_OK)
| Alex-Ruf/django_rest | backend/users/tests.py | tests.py | py | 1,559 | python | en | code | 0 | github-code | 13 |
24810405446 | from __future__ import annotations
from typing import Dict, TYPE_CHECKING
import discord
import asyncio
import json
import re
import webcolors
from discord.app_commands import command, describe, guilds, checks
from discord.ext import commands
from utils import SERVER_ID, Role
import uuid
if TYPE_CHECKING:
from bot import TMS
listeners = {}
async def listen_for_response(follow_id: int, timeout: int):
"""
Creates a global listener for a message from a user.
:param follow_id: the user ID to create the listener for
:param timeout: the amount of time to wait before returning None, assuming the user abandoned the operation
:return: the found message or None
"""
my_id = str(uuid.uuid4())
global listeners
listeners[my_id] = {"follow_id": follow_id, "timeout": timeout, "message": None}
count = timeout
while count > 0:
await asyncio.sleep(1)
count -= 1
if listeners[my_id]["message"] is not None:
return listeners[my_id]["message"]
return None
class EmbedFieldManagerButton(discord.ui.Button["EmbedFieldManagerView"]):
def __init__(self, view, name, raw_name, status):
self.field_manager_view = view
self.name = name
self.raw_name = raw_name
self.status = status
if self.status == "add":
super().__init__(label=f"Add {name}", style=discord.ButtonStyle.green)
elif self.status == "edit":
super().__init__(label=f"Edit {name}", style=discord.ButtonStyle.gray)
elif self.status == "toggle":
super().__init__(label=self.name, style=discord.ButtonStyle.blurple)
elif self.status == "complete":
super().__init__(label="Complete Field", style=discord.ButtonStyle.green)
async def callback(self, interaction: discord.Interaction):
if self.raw_name == "complete":
if not (
"name" in self.field_manager_view.field
and "value" in self.field_manager_view.field
):
help_message = await self.field_manager_view.channel.send(
"This field can not yet be completed, because you haven't defined both the field name and value."
)
await asyncio.sleep(10)
await help_message.delete()
self.field_manager_view.stop()
else:
self.field_manager_view.stopped_status = "completed"
self.field_manager_view.stop()
return
if self.raw_name != "inline":
await interaction.response.defer()
info_message = await self.field_manager_view.channel.send(
f"Please send the new value for the {self.raw_name}. The operation will be cancelled if no operation "
f"was sent within 2 minutes."
)
response_message = await listen_for_response(
follow_id=self.field_manager_view.user.id,
timeout=120,
)
await info_message.delete()
if response_message is None:
self.field_manager_view.stopped_status = "failed"
await response_message.delete()
if not len(response_message.content):
help_message = await self.field_manager_view.channel.send(
f"I couldn't find any text response in the message you just sent. Remember that for images, "
f"only URLs will work. I can't accept files for the {self.raw_name}!"
)
await asyncio.sleep(10)
await help_message.delete()
self.field_manager_view.stop()
return
# Check for limits
limits = {"name": 256, "value": 1024}
for k, v in limits.items():
if self.raw_name == k and len(response_message.content) > v:
help_message = await self.field_manager_view.channel.send(
f"Unfortunately, you can not provide a {k} longer than {v} characters. Please try again!"
)
await help_message.delete(delay=10)
self.field_manager_view.stop()
return
if self.raw_name == "inline":
# Editing name
self.field_manager_view.field["inline"] = not self.field_manager_view.field[
"inline"
]
else:
self.field_manager_view.field[self.raw_name] = response_message.content
# Update fields
if self.field_manager_view.index >= len(self.field_manager_view.fields):
self.field_manager_view.fields.append(self.field_manager_view.field)
else:
self.field_manager_view.fields[
self.field_manager_view.index
] = self.field_manager_view.field
self.field_manager_view.embed_update = {
"fields": self.field_manager_view.fields
}
self.field_manager_view.stop()
class EmbedFieldManagerView(discord.ui.View):
stopped_status = None
def __init__(self, interaction: discord.Interaction, fields, index):
self.channel = interaction.channel
self.user = interaction.user
self.fields = fields
self.index = index
self.embed_update = {}
super().__init__()
self.field = {}
if index < len(fields):
self.field = fields[index]
if "name" in self.field:
self.add_item(EmbedFieldManagerButton(self, "Name", "name", status="edit"))
else:
self.add_item(EmbedFieldManagerButton(self, "Name", "name", status="add"))
if "value" in self.field:
self.add_item(
EmbedFieldManagerButton(self, "Value", "value", status="edit")
)
else:
self.add_item(EmbedFieldManagerButton(self, "Value", "value", status="add"))
if "inline" not in self.field:
self.field["inline"] = False
self.add_item(
EmbedFieldManagerButton(
self,
f"Inline: {self.field['inline']} (Toggle)",
"inline",
status="toggle",
)
)
self.add_item(
EmbedFieldManagerButton(self, "Complete", "complete", status="complete")
)
class EmbedButton(discord.ui.Button["EmbedView"]):
def __init__(self, view, text, style, row, update_value, help_message=""):
super().__init__(label=text, style=style, row=row)
self.embed_view = view
self.update_value = update_value
self.help_message = help_message
async def callback(self, interaction: discord.Interaction):
# Check if the Complete button was pressed - if so, stop process
if self.update_value == "complete":
# If complete button is clicked, stop the view immediately
self.embed_view.stopped_status = "completed"
self.embed_view.stop()
return
if self.update_value == "cancel":
# If abort button is clicked, stop the view immediately
self.embed_view.stopped_status = "aborted"
self.embed_view.stop()
return
if self.update_value in ["import", "export"]:
self.embed_view.embed_update[self.update_value] = True
self.embed_view.stop()
return
if self.update_value in ["author_icon", "author_url"] and not any(
[
value in self.embed_view.embed_dict
for value in ["author_name", "authorName"]
]
):
help_message = await self.embed_view.channel.send(
"You can not set the author URL/icon without first setting the author name."
)
await help_message.delete(delay=10)
self.embed_view.stop()
return
if self.update_value == "url" and "title" not in self.embed_view.embed_dict:
help_message = await self.embed_view.channel.send(
"You can not set the title URL without first setting the title."
)
await help_message.delete(delay=10)
self.embed_view.stop()
return
if self.update_value == "add_field":
if (
"fields" in self.embed_view.embed_dict
and len(self.embed_view.embed_dict["fields"]) == 25
):
help_message = await self.embed_view.channel.send(
"You can't have more than 25 embed fields! Don't be so selfish, keeping all of the embed fields "
"to yourself!"
)
await help_message.delete(delay=10)
self.embed_view.stop()
return
self.embed_view.embed_update["add_field"] = {
"index": len(self.embed_view.embed_dict["fields"])
if "fields" in self.embed_view.embed_dict
else 0
}
return self.embed_view.stop()
if self.update_value in ["edit_field", "remove_field"]:
# Check to see if any fields actually exist
if "fields" not in self.embed_view.embed_dict or not len(
self.embed_view.embed_dict["fields"]
):
await self.embed_view.channel.send(
"It appears no fields exist in the embed currently."
)
self.embed_view.stopped_status = "failed"
return self.embed_view.stop()
await interaction.response.defer()
fields = self.embed_view.embed_dict["fields"]
min_num = 1
max_num = len(fields)
info_message = await self.embed_view.channel.send(
f"Please type in the index of the field you would like to "
f"{'edit' if self.update_value == 'edit_field' else 'remove'}. `1` refers to the first field, `2` to "
f"the second, etc...\n\nThe minimum accepted value is `1` and the maximum accepted value is `"
f"{len(fields)}`!"
)
valid_response = False
while not valid_response:
response_message = await listen_for_response(
follow_id=self.embed_view.user.id,
timeout=120,
)
await info_message.delete()
await response_message.delete()
if response_message is None:
self.embed_view.stopped_status = "failed"
await self.embed_view.channel.send(
"I couldn't find any content in your message. Aborting."
)
return self.embed_view.stop()
if not response_message.content.isnumeric():
self.embed_view.stopped_status = "failed"
await self.embed_view.channel.send(
"It appears that your message did not solely contain a number. Please try again."
)
return self.embed_view.stop()
if min_num <= int(response_message.content) <= max_num:
self.embed_view.embed_update[self.update_value] = {
"index": int(response_message.content) - 1
}
valid_response = True
return self.embed_view.stop()
await interaction.response.defer()
info_message = await self.embed_view.channel.send(
f"Please send the new value for the parameter. The operation will be cancelled if no operation was sent within 2 minutes.\n\n{self.help_message}"
)
response_message = await listen_for_response(
follow_id=self.embed_view.user.id,
timeout=120,
)
await info_message.delete()
await response_message.delete()
if response_message is None:
self.embed_view.stopped_status = "failed"
if not len(response_message.content):
help_message = await self.embed_view.channel.send(
"I couldn't find any text response in the message you just sent. Remember that for images, only URLs "
"will work. I can't accept files for any value!"
)
await asyncio.sleep(10)
await help_message.delete()
self.embed_view.stop()
return
# Check for embed limits
limits = {
"title": 256,
"description": 4096,
"footer_text": 2048,
"author_name": 256,
}
for k, v in limits.items():
if self.update_value == k and len(response_message.content) > v:
help_message = await self.embed_view.channel.send(
f"Unfortunately, you provided a string that is longer than the allowable length for that value. Please provide a value that is less than {v} characters."
)
await help_message.delete(delay=10)
self.embed_view.stop()
return
if self.update_value == "color" and not len(
re.findall(r"#[0-9a-f]{6}", response_message.content.lower())
):
help_message = await self.embed_view.channel.send(
f"The color you provide must be a hex code. For example, `#abbb02` or `#222ddd`."
)
await help_message.delete(delay=10)
self.embed_view.stop()
return
self.embed_view.embed_update[self.update_value] = response_message.content
self.embed_view.stop()
class EmbedView(discord.ui.View):
# This will be updated when the user updates an embed property
embed_update = {}
embed_dict = {}
user = None
channel = None
stopped_status = None
def __init__(self, embed_dict: Dict, interaction: discord.Interaction):
super().__init__()
self.embed_dict = embed_dict
self.embed_update = {}
self.user = interaction.user
self.channel = interaction.channel
self.stopped_status = None
associations = [
{
"proper_name": "Title",
"dict_values": ["title"],
"row": 0,
"help": "To remove the title, simply respond with `remove`.",
},
{"proper_name": "Description", "dict_values": ["description"], "row": 0},
{
"proper_name": "Title URL",
"dict_values": ["url", "title_url", "titleUrl"],
"row": 0,
"help": "To remove the URL from the title, simply respond with `remove`.",
},
{
"proper_name": "Color",
"dict_values": ["color"],
"row": 0,
"help": "Please send the color formatted as a hex color. For Scioly.org-related color codes, see <https://scioly.org/wiki/index.php/Scioly.org:Design>. To remove the color, simply respond with `remove`.",
},
{
"proper_name": "Thumbnail Image (from URL)",
"dict_values": ["thumbnail_url", "thumbnailUrl"],
"row": 1,
"help": "Please note that only HTTPS URLs will work. To remove the thumbnail, respond simply with `remove`.",
},
{
"proper_name": "Image (from URL)",
"dict_values": ["image_url", "imageUrl"],
"row": 1,
"help": "Please note that only HTTPS URLs will work. To remove the image, simply respond with `remove`.",
},
{
"proper_name": "Author Name",
"dict_values": ["author_name", "authorName"],
"row": 2,
"help": "To remove the author name (and therefore, the author icon/URL), simply respond with `remove`.",
},
{
"proper_name": "Author Icon (from URL)",
"dict_values": ["author_icon", "authorIcon"],
"row": 2,
"help": "To remove the author icon, simply respond with `remove`.",
},
{
"proper_name": "Author URL",
"dict_values": ["author_url", "authorUrl"],
"row": 2,
"help": "To remove the URL link from the author value, simply respond with `remove`.",
},
{
"proper_name": "Footer Text",
"dict_values": ["footer_text", "footerText"],
"row": 2,
"help": "To remove the footer text, simply respond with `remove`.",
},
{
"proper_name": "Footer Icon (from URL)",
"dict_values": ["footer_icon", "footerIcon"],
"row": 2,
"help": "To remove the footer icon, simply respond with `remove`.",
},
]
for association in associations:
if len(
[
dict_value
for dict_value in association["dict_values"]
if dict_value in embed_dict
]
):
button = EmbedButton(
self,
f"Edit {association['proper_name']}",
discord.ButtonStyle.gray,
association["row"],
association["dict_values"][0],
association["help"] if "help" in association else "",
)
self.add_item(button)
else:
button = EmbedButton(
self,
f"Set {association['proper_name']}",
discord.ButtonStyle.green,
association["row"],
association["dict_values"][0],
association["help"] if "help" in association else "",
)
self.add_item(button)
# Field operations
self.add_item(
EmbedButton(self, "Add Field", discord.ButtonStyle.green, 3, "add_field")
)
self.add_item(
EmbedButton(self, "Edit Fields", discord.ButtonStyle.gray, 3, "edit_field")
)
self.add_item(
EmbedButton(
self, "Remove Field", discord.ButtonStyle.danger, 3, "remove_field"
)
)
# Add complete operation
self.add_item(
EmbedButton(self, "Complete", discord.ButtonStyle.green, 4, "complete")
)
self.add_item(
EmbedButton(self, "Abort", discord.ButtonStyle.danger, 4, "cancel")
)
self.add_item(
EmbedButton(self, "Import", discord.ButtonStyle.blurple, 4, "import")
)
self.add_item(
EmbedButton(self, "Export", discord.ButtonStyle.blurple, 4, "export")
)
class EmbedCommands(commands.Cog, name="Embeds"):
def __init__(self, bot: TMS):
self.bot = bot
print("Initialized embed cog.")
@property
def display_emoji(self) -> discord.PartialEmoji:
return discord.PartialEmoji(name="\U00002604")
async def on_message(self, message: discord.Message):
global listeners
for listener in listeners.items():
if message.author.id == listener[1]["follow_id"]:
listeners[listener[0]]["message"] = message
@staticmethod
def _generate_embed(embed_dict: dict) -> discord.Embed:
new_embed_dict = {}
if "title" in embed_dict:
new_embed_dict["title"] = embed_dict["title"]
if "description" in embed_dict:
new_embed_dict["description"] = embed_dict["description"]
if "url" in embed_dict:
new_embed_dict["url"] = embed_dict["url"]
if "title_url" in embed_dict:
new_embed_dict["url"] = embed_dict["title_url"]
if "titleUrl" in embed_dict:
new_embed_dict["url"] = embed_dict["titleUrl"]
# Convert color properties to one concise color property to check for class
if "hexColor" in embed_dict:
embed_dict["color"] = embed_dict["hexColor"]
if "webColor" in embed_dict:
try:
embed_dict["color"] = webcolors.name_to_hex(embed_dict["webColor"])
except:
pass
if "color" in embed_dict and isinstance(embed_dict["color"], discord.Color):
new_embed_dict["color"] = embed_dict["color"]
if "color" in embed_dict and isinstance(embed_dict["color"], str):
if embed_dict["color"].startswith("#"):
new_embed_dict["color"] = discord.Color(
int(embed_dict["color"][1:], 16)
)
elif len(embed_dict["color"]) <= 6:
new_embed_dict["color"] = discord.Color(int(embed_dict["color"], 16))
if not len(new_embed_dict.items()):
new_embed_dict[
"description"
] = "This embed contains nothing, so a blank description was set."
response = discord.Embed(**new_embed_dict)
if "thumbnail_url" in embed_dict:
response.set_thumbnail(url=embed_dict["thumbnail_url"])
if "thumbnailUrl" in embed_dict:
response.set_thumbnail(url=embed_dict["thumbnailUrl"])
if "authorName" in embed_dict or "author_name" in embed_dict:
# Author name must be defined for other attributes to work
author_dict = {}
if "authorName" in embed_dict:
author_dict["name"] = embed_dict["authorName"]
if "author_name" in embed_dict:
author_dict["name"] = embed_dict["author_name"]
if "author_url" in embed_dict:
author_dict["url"] = embed_dict["author_url"]
if "authorUrl" in embed_dict:
author_dict["url"] = embed_dict["authorUrl"]
if "author_icon" in embed_dict:
author_dict["icon_url"] = embed_dict["author_icon"]
if "authorIcon" in embed_dict:
author_dict["icon_url"] = embed_dict["authorIcon"]
response.set_author(**author_dict)
if "fields" in embed_dict:
# If error, don't stress, just move on
try:
for field in embed_dict["fields"]:
response.add_field(**field)
except:
pass
footer_dict = {}
if "footer_text" in embed_dict:
footer_dict["text"] = embed_dict["footer_text"]
if "footerText" in embed_dict:
footer_dict["text"] = embed_dict["footerText"]
if "footer_icon" in embed_dict:
footer_dict["icon_url"] = embed_dict["footer_icon"]
if "footerIcon" in embed_dict:
footer_dict["icon_url"] = embed_dict["footerIcon"]
if "footerUrl" in embed_dict:
footer_dict["icon_url"] = embed_dict["footerUrl"]
if len(footer_dict.items()):
response.set_footer(**footer_dict)
if "image_url" in embed_dict:
response.set_image(url=embed_dict["image_url"])
if "imageUrl" in embed_dict:
response.set_image(url=embed_dict["imageUrl"])
return response
@command(description="Staff command. Assembles an embed in a particular channel.")
@guilds(SERVER_ID)
@checks.has_any_role(Role.SERVERLEADER, Role.FORMER_SL)
@describe(channel="The channel to send the message to.")
async def prepembed(
self, interaction: discord.Interaction, channel: discord.TextChannel
):
"""Helps to create an embed to be sent to a channel."""
embed_dict = {}
await interaction.response.send_message("Initializing...")
complete = False
embed_field_manager = False
embed_field_index = None
response = None
while not complete:
response = self._generate_embed(embed_dict)
view = None
if embed_field_manager:
if "fields" not in embed_dict:
embed_dict["fields"] = []
view = EmbedFieldManagerView(
interaction, embed_dict["fields"], embed_field_index
)
else:
view = EmbedView(embed_dict, interaction)
await interaction.edit_original_message(
content=f"This embed will be sent to {channel.mention}:",
embed=response,
view=view,
)
await view.wait()
if view.stopped_status == None:
if isinstance(view, EmbedFieldManagerView):
embed_dict.update(view.embed_update)
elif isinstance(view, EmbedView):
if any(
key in view.embed_update for key in ["add_field", "edit_field"]
):
# Switch to field manager mode
embed_field_manager = True
embed_field_index = view.embed_update[
list(view.embed_update.items())[0][0]
]["index"]
if "remove_field" in view.embed_update:
embed_field_index = view.embed_update[
list(view.embed_update.items())[0][0]
]["index"]
embed_dict["fields"].pop(embed_field_index)
if "import" in view.embed_update:
# Import a JSON file as the embed dict
await interaction.edit_original_message(
content="Please send the JSON file containing the embed message as a `.json` file.",
view=None,
embed=None,
)
file_message = await listen_for_response(
follow_id=interaction.user.id,
timeout=120,
)
# If emoji message has file, use this as emoji, otherwise, use default emoji provided
if file_message is None:
await interaction.edit_original_message(
content="No file was provided, so the operation was cancelled."
)
return
if (
not len(file_message.attachments)
or file_message.attachments[0].content_type
!= "application/json"
):
await interaction.edit_original_message(
content="I couldn't find a `.json` attachment on your message. Opertion aborted."
)
text = await file_message.attachments[0].read()
text = text.decode("utf-8")
jso = json.loads(text)
await file_message.delete()
if "author" in jso:
jso["author_name"] = interaction.user.name
jso["author_icon"] = interaction.user.avatar_url_as(
format="jpg"
)
embed_dict = jso
if "export" in view.embed_update:
# Generate a JSON file as the embed dict
with open("embed_export.json", "w+") as file:
json.dump(embed_dict, file)
await interaction.edit_original_message(
content="Here is the exported embed! The embed creator will return in approximately 15 "
"seconds.",
embed=None,
view=None,
)
file_message = await interaction.channel.send(
file=discord.File("embed_export.json")
)
await asyncio.sleep(15)
await file_message.delete()
removed = False
easy_removes = [
"title",
"url",
"color",
"thumbnail_url",
"image_url",
"footer_text",
"footer_url",
"author_url",
"author_icon",
]
for removal in easy_removes:
if (
removal in view.embed_update
and view.embed_update[removal] == "remove"
):
del embed_dict[removal]
removed = True
if (
"author_name" in view.embed_update
and view.embed_update["author_name"] == "remove"
):
del embed_dict["author_name"]
del embed_dict["author_url"]
del embed_dict["author_icon"]
removed = True
if not removed and not any(
key in view.embed_update
for key in ["add_field", "edit_field", "import", "export"]
):
# If just removed, don't actually set the value to 'remove'
# Or, if attempting to add/edit fields
embed_dict.update(view.embed_update)
else:
if view.stopped_status == "failed":
await interaction.edit_original_message(
content="An error has occurred. You may not have responded to my query in 2 minutes, or your "
"message may not have been formatted correctly. Operation cancelled.",
embed=None,
view=None,
)
return
elif view.stopped_status == "aborted":
await interaction.edit_original_message(
content="The embed creation was aborted.", embed=None, view=None
)
return
elif view.stopped_status == "completed":
if isinstance(view, EmbedFieldManagerView):
# If embed field manager in play, actually update fields and return to old view
embed_dict.update(view.embed_update)
embed_field_manager = False
elif isinstance(view, EmbedView):
complete = True
await channel.send(embed=response)
await interaction.edit_original_message(
content="The embed was successfully sent!", embed=None, view=None
)
async def setup(bot: TMS):
await bot.add_cog(EmbedCommands(bot))
| TMS-SciO/tms-scioly-bots | cogs/embed.py | embed.py | py | 32,226 | python | en | code | 1 | github-code | 13 |
1750283690 | from functions.validação import lerNota
from functions.visual import arredondamento
notas = []
for x in range(3):
notas.append(lerNota(f'Nota {x + 1}: '))
media = arredondamento(sum(notas) / len(notas))
if media < 7:
situacao = 'Reprovado(a)'
elif media < 10:
situacao = 'Aprovado(a)'
else:
situacao = 'Aprovado(a) com distinção'
print('-='*15)
print(f'Média: {media}')
print(f'Situação: {situacao}')
print('-='*15)
| Noronha1612/wiki_python-brasil | Estruturas de Decisão/ex20.py | ex20.py | py | 442 | python | pt | code | 0 | github-code | 13 |
2005505321 | import os
from fastapi import FastAPI, Request
from fastapi.responses import FileResponse
from fastapi.middleware.cors import CORSMiddleware
from google.cloud import storage
from google.cloud import vision
from models import Parking
from send_pushover import send_pushover
from orm import access_table
from typing import Union
from pydantic import BaseModel
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "bsid-user-group5-sa-key.json"
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
storage_client = storage.Client()
@app.get("/")
def hello_world():
return {"Hello": "World"}
@app.get("/buckets/{bucket}/blobs")
def list_blobs_by_bucket(bucket):
"""Lists all the blobs in the bucket."""
return [blob.name for blob in storage_client.list_blobs(bucket)]
# Use FileResponse to return the file
@app.get("/buckets/{bucket}/{folder}/{file}")
def return_image_from_bucket(bucket: str, folder: str, file: str):
# Determine download_path
if not file.endswith(".jpg"):
file = file + ".jpg"
download_path = f"/root/adslXmlad/images/{file}"
os.makedirs(os.path.dirname(download_path), exist_ok=True)
# Download file
if not os.path.exists(download_path):
blob = storage_client.get_bucket(bucket).blob(f"{folder}/{file}")
blob.download_to_filename(download_path)
# download_path = f"gs://{bucket}/{folder}/{file}" # Can't support this
return FileResponse(download_path)
@app.get("/license_time/{license_plate}/{current_time}")
def return_parking_number(license_plate: str, current_time: int):
parking_history = [
entry
for entry in access_table("query", "entry_records")
if license_plate == entry["license_plate"]
]
for entry in parking_history:
if (
entry["entry_time"].total_seconds()
<= current_time
<= entry["exit_time"].total_seconds()
):
return entry["parking_number"]
return None
return parking_history
@app.get("/image/{parking_number}/{entry_time}")
def return_image_from_public(parking_number: str, entry_time: int):
# Get file name using parking_number and entry_time
bucket = "tsmchack2023-bsid-grp5-public-read-bucket"
folder = "public_scenario_images"
file = None
filename_list = [blob.name for blob in storage_client.list_blobs(bucket)]
for filename in filename_list:
if f"{parking_number}_{entry_time:04d}" in filename:
file = filename.split("/")[-1]
break
if file is None:
return {"result": "File not found"}
# Determine download_path
download_path = f"/root/adslXmlad/images/{file}"
# return download_path
os.makedirs(os.path.dirname(download_path), exist_ok=True)
# Download file
if not os.path.exists(download_path):
blob = storage_client.get_bucket(bucket).blob(f"{folder}/{file}")
blob.download_to_filename(download_path)
# download_path = f"gs://{bucket}/{folder}/{file}" # Can't support this
return FileResponse(download_path)
@app.get("/image/{parking_number}/{entry_time}/vision")
def return_image_vision_from_public(parking_number: str, entry_time: int):
# Get file name using parking_number and entry_time
bucket = "tsmchack2023-bsid-grp5-public-read-bucket"
folder = "public_scenario_images"
file = None
filename_list = [blob.name for blob in storage_client.list_blobs(bucket)]
for filename in filename_list:
if f"{parking_number}_{entry_time:04d}" in filename:
file = filename.split("/")[-1]
break
if file is None:
return {"result": "File not found"}
# Get vision result
return get_vision(bucket, folder, file)
@app.get("/vision/{bucket}/{folder}/{file}")
def get_vision(bucket: str, folder: str, file: str):
image_uri = f"gs://{bucket}/{folder}/{file}"
client = vision.ImageAnnotatorClient()
image = vision.Image()
image.source.image_uri = image_uri
response = client.text_detection(image=image)
r = response.text_annotations[0].description
return {"vision_result": r}
@app.get("/{mode}/{table}/")
def access_table_api(
mode: str,
table: str,
license_plate=None,
entry_time=None,
exit_time=None,
eff_start_time=None,
eff_end_time=None,
parking_number=None,
):
data = {}
if license_plate:
data["license_plate"] = license_plate
if entry_time:
data["entry_time"] = entry_time
if exit_time:
data["exit_time"] = exit_time
if eff_start_time:
data["eff_start_time"] = eff_start_time
if eff_end_time:
data["eff_end_time"] = eff_end_time
if parking_number:
data["parking_number"] = parking_number
# return data
return access_table(mode, table, data)
# Pushover API
@app.get("/pushover/{msg}")
def send_pushover_api(msg: str):
send_pushover(msg)
return {"result": "success"}
@app.get("/parking/{parking_number}")
def get_parking_number_history(parking_number: str):
parking_history = [
entry
for entry in access_table("query", "entry_records")
if parking_number == entry["parking_number"]
]
color = (
"red"
if len(parking_history) > 0
and parking_history[-1]["exit_time"].total_seconds() == 1439
else "green"
)
return {"parking_history": parking_history, "color": color}
# run server
# uvicorn main:app --reload --host 0.0.0.0 --port 8000
| blacksnail789521/TSMC_parking_lot | main.py | main.py | py | 5,594 | python | en | code | 0 | github-code | 13 |
36330642485 | import re
import os
from functools import cmp_to_key
from hw2.tokenize_lemmatize import get_normal_form
from utilz.utils import get_tokenize_res
from utilz.const import limit as LIMIT
class WordInfo:
def __init__(self):
self.documents = []
self.general_count = 0
def append_document_info(self, document_number, document_word_count):
self.documents.append(document_number)
self.general_count += document_word_count
def read_lemmatization():
f = open("../hw2/lemmatization.txt", "r")
lines = f.readlines()
map = dict()
for line in lines:
key = None
words = re.split('\s+', line)
for i in range(len(words) - 1):
if i == 0:
key = words[i]
map[key] = []
else:
map[key].append(words[i])
return map
def get_document_index(filename):
number = ""
for letter in filename:
if letter.isdigit():
number += letter
return int(number)
def sort_index(index):
def comparator(x, y):
return x[1].general_count - y[1].general_count
return dict(sorted(index.items(), key=cmp_to_key(comparator), reverse=True))
def generate_word_map(map):
index = dict()
dir = '../data/doc-tokenize'
for file in os.listdir(dir):
if int(file[:-4]) > LIMIT:
continue
tokenize_res = get_tokenize_res(dir + '/' + file)
word_used = set()
for word in tokenize_res:
normal_form = get_normal_form(word)
if normal_form in map.keys() and normal_form not in word_used:
word_used.add(normal_form)
similar_words = map[normal_form]
count = 0
for similar_word in similar_words:
count += tokenize_res.count(similar_word)
if normal_form not in index.keys():
index[normal_form] = WordInfo()
index[normal_form].append_document_info(file[:-4], count)
print("end of reading doc ", file)
return dict(sorted(index.items()))
def write_index(index):
file = open("index.txt", "w")
for word, doc_info in index.items():
file_string = word + " "
for doc in doc_info.documents:
file_string += " " + str(doc)
file_string += "\n"
file.write(file_string)
file.close()
def create_index():
map = read_lemmatization()
index = generate_word_map(map)
sorted_index = sort_index(index)
write_index(sorted_index)
if __name__ == '__main__':
create_index() | borisgk98/itits-infosearch | hw3/create_index.py | create_index.py | py | 2,599 | python | en | code | 0 | github-code | 13 |
14570745682 | #!/usr/bin/env python3
from Bio import Entrez
import progressbar
import sys
Entrez.email = sys.argv[1]
Entrez.api_key = sys.argv[2]
query = sys.argv[3]
def get_taxid(result):
return result['LinkSetDb'][0]["Link"][0]["Id"]
def format_taxa(text):
try:
lineage = text["Lineage"]
except:
lineage = ""
try:
sciname = text["ScientificName"]
except:
sciname = ""
try:
cname = text['OtherNames']['GenbankCommonName']
except:
cname = ""
return "; ".join((lineage, sciname, cname))
def get_accesions(query):
print("Searching for matching accessions")
retmax = 10000
handle = Entrez.esearch(db="nucleotide",
retmax=retmax,
term=query,usehistory = "y",
idtype="acc")
record = Entrez.read(handle)
handle.close()
total = int(record["Count"])
accesions = set(record["IdList"])
retstart = retmax
webenv = record["WebEnv"]
print("Found " + str(total) + " to download.", flush = True)
with progressbar.ProgressBar(max_value=total) as bar:
if retstart <= total:
bar.update(retstart)
else:
bar.update(total)
while retstart <= total:
handle = Entrez.esearch(db="nucleotide",
retstart = retstart,
retmax=retmax,
term="(((12S OR MiFish) OR mitochondrion) NOT chromosome) NOT shotgun",
usehistory = "y",
webenv = webenv,
idtype="acc")
record = Entrez.read(handle)
handle.close()
accesions = accesions.union(set(record["IdList"]))
retstart += retmax
if retstart <= total:
bar.update(retstart)
else:
bar.update(total)
return list(accesions), webenv
def get_taxids(accesions, webenv=None):
print("Fetching corresponding taxonomy ids", flush = True)
taxids = []
retmax = 1000
total = len(accesions)
with progressbar.ProgressBar(max_value=total) as bar:
retstart = 0
if retstart <= total:
bar.update(retstart)
else:
bar.update(total)
while retstart <= len(accesions):
handle = Entrez.elink(db = "taxonomy",
dbfrom = "nucleotide",
id = accesions[retstart:min(len(accesions),retmax + retstart)],
webenv = webenv)
results = Entrez.read(handle)
handle.close()
retstart += retmax
taxids += list(map(get_taxid, results))
if retstart <= total:
bar.update(retstart)
else:
bar.update(total)
return taxids
def fetch_taxa(tids, webenv=None):
print("Downloading taxonomic information.", flush = True)
taxa_info = {}
retmax = 1000
total = len(tids)
with progressbar.ProgressBar(max_value=total) as bar:
retstart = 0
if retstart <= total:
bar.update(retstart)
else:
bar.update(total)
while retstart <= total:
handle = Entrez.efetch(db = "taxonomy", webenv = webenv, id = tids[retstart:min(total,retmax + retstart)])
result = Entrez.read(handle)
handle.close()
for i in result:
taxa_info[i["TaxId"]] = format_taxa(i)
retstart += retmax
if retstart <= total:
bar.update(retstart)
else:
bar.update(total)
return taxa_info
def fetch_seq(accesion, webenv):
print("Downloading sequences", flush = True)
seqs = []
taxids = []
retmax = 100
total = len(accesion)
with progressbar.ProgressBar(max_value=total) as bar:
retstart = 0
if retstart <= total:
bar.update(retstart)
else:
bar.update(total)
while retstart <= total:
accbatch = accesion[retstart:min(total,retmax + retstart)]
handle = Entrez.efetch(db = "nucleotide", webenv = webenv, rettype="fasta", retmode = "xml", id = accbatch)
results = Entrez.read(handle)
handle.close()
seqs += list(map(lambda x: x["TSeq_sequence"], results))
taxids += list(map(lambda x: x['TSeq_taxid'], results))
with open("12Snmito.fasta" , "at") as seqout:
with open("12Snmito.tax", "at") as taxout:
for i in range(len(accbatch)):
print(">" + accbatch[i], file = seqout)
print(seqs[retstart+i], file = seqout)
print(accbatch[i], end = "\t", file = taxout)
print(taxids[retstart+i], file = taxout)
retstart += retmax
if retstart <= total:
bar.update(retstart)
else:
bar.update(total)
return taxids, seqs
accesions, webenv = get_accesions(query)
already_got = []
taxids = []
with open("12Snmito.tax") as taxfile:
for line in taxfile:
already_got.append(line.split()[0])
taxids.append(line.split()[1])
accesions = list(set(accesions) - set(already_got))
taxids, seqs = fetch_seq(accesions, webenv)
tids = list(set(taxids))
taxa_info = {}
taxa_info.update(fetch_taxa(tids))
tids = list(set(tids) - set(taxa_info.keys()))
with open("12SnMito_full.tax", "wt") as ft:
for i in range(len(taxids)):
if taxids[i] in taxa_info:
print(already_got[i] + "\t" + taxa_info[taxids[i]], file = ft)
else:
try:
print(already_got[i], taxids[i], i in tids)
second_tid = get_taxids([already_got[i]])
print(second_tid)
gst = fetch_taxa(second_tid)
print(gst)
print(already_got[i] + "\t" + list(gst.values())[0], file = ft)
except:
print(already_got[i] + "\t" + already_got[i], file = ft)
print("Creating files")
| dwthomas/NCBI-denovo-reference-database | download_ncbi.py | download_ncbi.py | py | 6,251 | python | en | code | 3 | github-code | 13 |
41243305994 | """ Enter docstring for this program
"""
import random
import matplotlib.pyplot as pyplot
def guessingGame1(limit):
"""Play the guessing game by making random guesses."""
secretNumber = random.randrange(1, limit + 1)
myGuess = 0
guesses = 0
while (myGuess != secretNumber):
myGuess = random.randrange(1, limit + 1)
guesses = guesses + 1
return guesses
def monteCarlo(limit, trials):
totalGuesses_game1=0
for sequence in range(trials):
guessesNum_g1= guessingGame1(limit)
totalGuesses_game1= totalGuesses_game1+ guessesNum_g1
return totalGuesses_game1/trials
def main():
"""Enter docstring for main function
"""
trials = int(input("Enter number of trials for simulation run: "))
limit = int(input("enter max number for range: "))
monteCarlo(limit, trials)
main()
| gsakkas/seq2parse | src/tests/parsing_test_37.py | parsing_test_37.py | py | 860 | python | en | code | 8 | github-code | 13 |
38901782236 | # -*- coding: utf-8 -*-
"""
LEAP™ TransmissionCache
=======================
Contributors: Christian Sargusingh
Date: 2020-06-06
Repository: https://github.com/cSDes1gn/LEAP/tree/master/src/tcs
README available in repository root
Version:
Class `TransmissionCache` defines a LIFO caching architecture saving a history of transmitted
frame data. This class also provides the spatial encoding and subsequent hardware mapping
required to send to the arduino microcontroller. As the TCU processes a transmission request,
it calls `cache_map()` sending it the raw binary frame data and the transmission direction
(access point). The `cache_map()` function uses our `SpatialCodec` object to determine the
encoded mapping for all access points and updates the cache with the new frame data. The
corresponding binary hardware mapping is returned to the TCU to send to the arduino serial
monitor. The `check()` function is used to verify APR codes with the contents of the
transmitter cache.
Dependencies
------------
>>> import logging.config
>>> import bitarray
>>> import numpy as np
>>> from tcs.codec.spatial_codec import SpatialCodec
Copyright © 2020 LEAP. All Rights Reserved.
"""
import logging.config
import os
import bitarray
import numpy as np
import math
from tcs.codec.spatial_codec import SpatialCodec
from tcs.event.registry import EventRegistry
class constants:
CACHE_SIZE = 10
# Hardware mapping for LEAP™ v2 hardware
# Bottom Layer 0 Middle Layer 1 Middle Layer 2 Top Layer 3
# -------------- -------------- -------------- -----------
# 12 13 14 15 28 29 30 31 44 45 46 47 60 61 62 63
# 8 9 10 11 24 25 26 27 40 41 42 43 56 57 58 59
# 4 5 6 7 20 21 22 23 36 37 38 39 52 53 54 55
# 0 1 2 3 16 17 18 19 32 33 34 35 48 49 50 51
HM = np.array([
[[12,13,14,15],[8,9,10,11],[4,5,6,7],[0,1,2,3]],
[[28,29,30,31],[24,25,26,27],[20,21,22,23],[16,17,18,19]],
[[44,45,46,47],[40,41,42,43],[36,37,38,39],[32,33,34,35]],
[[60,61,62,63],[56,57,58,59],[52,53,54,55],[48,49,50,51]]
])
# Hardware Map for 2x2x2 LED Cube
HM2 = np.array([[[2,3],[0,1]],[[6,7],[4,5]]])
AP = 4
class TransmissionCache:
"""
Attributes:
- `_cache` (`list`): list of cached frame data 4 slots wide for each transmission direction.
- `_spatial_codec` (`SpatialCodec`): TCU spatial encoder object
"""
def __init__(self):
"""Initializes empty list for cached frames and stores a reference to the `SpatialCodec`
object instantiated by the TCU.
"""
cube_dim = int(os.environ['DIM'])
self.log = logging.getLogger(__name__)
self._spatial_codec = SpatialCodec(cube_dim, constants.HM)
self._cache = list()
with EventRegistry() as event:
event.register('VALIDATE_APR', self.validate)
self.log.info("%s successfully instantiated", __name__)
def cache_map(self, bin_frame: bitarray, ap_index: int) -> bitarray:
"""This function uses TCU instantiated `SpatialCodec` to perform 3 operations. First,
the binary input data is encoded into its corresponding spatial map for the requested
access point. Second, the spatial map is converted into a binary hardware map to send to the
arduino microcontroller based on its corresponding pinouts specified in the `constants`
class. Lastly, the frame is decoded for all 4 access points and updated as a cache entry.
The purpose is to store a list of entries that a verified receiver would decode and used to
identify the position of the receiver.
Args:
- `bin_frame` (`bitarray`): raw binary frame data to be encoded and mapped to the hardware
- `ap_index` (`int`): index for access point (direction) encoding 0 > N proceeding
clockwise.
Returns:
- `encoded_frame` (`bitarray`): binary hardware map based on the transmitters hardware map
(pinouts) specified in `constants.HM`.
"""
if len(self._cache) == constants.CACHE_SIZE:
self._cache.pop(0) # pop the bottom of the cache
cache_entry = list()
# determine the decoded frame data of all access points
for i in range(constants.AP):
frame = self._spatial_codec.encode(bin_frame,i)
if i == ap_index:
encoded_frame = self._spatial_codec.hardware_map(frame)
cache_entry.append(self._spatial_codec.decode(frame))
self._cache.append(cache_entry)
return encoded_frame
def validate(self, apr: bitarray):
"""
This function is an ISR bound to event:VALIDATE_APR. It references the cache and compares
the apr code sent by a receiver for access point validation to find a match. If a match is
found the APR_VALIDATED event is triggered with the index
:param apr: decoded frame data cached by a receiver during calibration
:returns:
"""
# FIXME: If cache is a dict based approach the retrieval time is significantly faster
with EventRegistry() as event:
for i in range(len(self._cache)):
for j in range(len(self._cache[i])):
if apr == self._cache[i][j]:
event.execute('APR_VALIDATED', j)
self.log.info("Validated APR key: %s", apr)
return
self.log.info("Revoked APR key: %s", apr)
event.execute('POST_REQUEST', False, "Access Point Registry invalid. Request declined.")
| LEAP-Systems/tesseract | legacy/codec/cache.py | cache.py | py | 5,815 | python | en | code | 0 | github-code | 13 |
36805441026 | #!/usr/bin/env python
import re, sys
from poserFile import *
def scalingData(source, actor_names):
wrapper_text = """\
{
version
{
number 4.1
}
Figure
{
}
}"""
content = PoserFile(wrapper_text.splitlines())
anchor = content.root.select('Figure').next()
if actor_names:
actors = list(actor for actor in source.actors
if re.sub(r':\d+$', '', actor.name) in actor_names)
else:
actors = list(source.actors)
for actor in actors:
anchor.prependSibling(actor.extractChannels('scale[XYZ]?'))
return content
if __name__ == "__main__":
source = PoserFile(file(sys.argv[1]))
scalingData(source, None).writeTo(file(sys.argv[2], "w"))
| odf/pfool | Python/extractScaling.py | extractScaling.py | py | 822 | python | en | code | 1 | github-code | 13 |
12206536772 | #!/usr/bin/python3
"""
Author: Zixiang Ma
Email: ericma0824@gmail.com
Time created: Sept 20 2019
ReadMe
1. Throught Process
This script contains two phases. Phase 1 is meant for data manipulation where files are parsed
and converted into tuples or dictionary. In phase 2, the script counts the mathching tuple by first
generating all possible synonyms tuples using DFS and compare to the other list of tuples.
2. General Assumption
This script assumes that files are resonably large and can be read into an single process application.
3. Error
The application exit with code 1 on argument parsing error and exit with code 2 on file parsing error.
4. Dependency
This script is developed using Python 3.7.4
5. How to execute
To run this script, execute command: python3 syns.txt file1.txt file2.txt
To specify tuple size, execute command python3 syns.txt file1.txt file2.txt 5
"""
import sys, re, argparse
from collections import deque
def build_synonyms_dictionary(path):
""" This function build a dictionary of synonmys mapping. a line of "jog run sprint"
will be converted into three key-value pairs: "jog"->["run", "sprint"], "run"->["jog", "sprint"],
and "sprint"->["jog", "run"]
Assumptions are listed below
1. every line represetns a synonyms group
2. each word are separated by space
Arguments:
path {string} -- path of the text file
Returns:
[dictionary] -- a dictionary containing the synonmys mappings.
"""
dictionary = {}
file = open(path, 'r')
for line in file:
values = line.lower().replace('\n', '').split(' ')
for i in range(len(values)):
dictionary[values[i]] = values[:i] + values[i+1:]
file.close()
return dictionary
def build_tuples_from_file(path, tuple_size):
""" this function build n size tuples from a file.
Only alphanumeric are kept from the text file.
Arguments:
path {string} -- path to text file
tuple_size {integer} -- size of the tuples
Returns:
[list] -- list of tuples built from file
"""
with open(path, 'r') as file:
text = file.read().lower()
cleaned_text_arr = re.sub(r'([^\s\w]|_)+', '', text).split()
running_queue = deque(cleaned_text_arr[:tuple_size])
res = [tuple(running_queue)]
for i in range(tuple_size, len(cleaned_text_arr)):
running_queue.popleft()
running_queue.append(cleaned_text_arr[i])
res.append(tuple(running_queue))
return res
def generate_synonmys_tuples_recusively(res, path, words, index):
""" this function generate all possible synonmys tuple using depth first search approach.
Arguments:
res {list} -- a list of tuples generated so far.
path {tuple} -- a tuple being built in progress.
words {list} -- a list of synonmys group used to build synonmys tuple
index {integer} -- an index pointing at the location to search for synonmys
"""
if index == len(words):
res.append(tuple(path))
return
for w in words[index]:
generate_synonmys_tuples_recusively(res, path + [w], words, index+1)
return
def build_synonyms_tuples(word_tuples, dictionary):
""" This function build a list of synonmys tuples
Arguments:
word_tuples {tuple} -- the original word tuple.
dictionary {dictionary} -- dictionary with synonyms mapping.
Returns:
[list] -- a list of synonmys tuples.
"""
res, words = [], []
for word_tuple in word_tuples:
if word_tuple in dictionary:
words.append(dictionary[word_tuple] + [word_tuple])
else:
words.append([word_tuple])
generate_synonmys_tuples_recusively(res, [], words, 0)
return res
def count_mathcing_tuples(plagiarized, source, synonyms):
""" this function count the matching synonyms tuples between plagiarized and source document.
Arguments:
plagiarized {list} -- a list of tuples representing the document examined for plagiarism.
source {list} -- a list of tuples representing the possible plagiarism source
synonyms {dictionary} -- a dictionary containing sysnonmys mappings
Returns:
[integer] -- the number of synonyms tuples of plagiarized found in source
"""
tuple_match_count = 0
for tup in plagiarized:
synonyms_tuples = build_synonyms_tuples(tup, synonyms)
for synonyms_tuple in synonyms_tuples:
if synonyms_tuple in source:
tuple_match_count += 1
return tuple_match_count
if __name__== "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('synonyms', type=str,
help='path to synonmys text file')
parser.add_argument('file_plagiarized', type=str,
help='path to the file examined for plagiarism')
parser.add_argument('file_source', type=str,
help='path to file of possible plagiarism sources')
parser.add_argument('tuple_size', type=int, default=3, nargs='?',
help='size of tuples used for matching, optional, default to 3.')
try:
options = parser.parse_args()
except:
parser.print_help()
sys.exit(1)
try:
dictionary = build_synonyms_dictionary(options.synonyms)
tuple_plagiarized = build_tuples_from_file(options.file_plagiarized, options.tuple_size)
tuple_source = build_tuples_from_file(options.file_source, options.tuple_size)
except Exception as e:
print(e)
sys.exit(2)
tuple_match_count = count_mathcing_tuples(tuple_plagiarized, tuple_source, dictionary)
print("%f%%" % (100 * (tuple_match_count)/float(len(tuple_plagiarized)))) | maxxx580/tripadvisor-take-home | main.py | main.py | py | 5,803 | python | en | code | 0 | github-code | 13 |
37248696444 | import torch
import jax
from jax import numpy as jnp
def onehot(labels, num_classes, on_value=1.0, off_value=0.0):
if torch.is_tensor(labels):
x = torch.nn.functional.one_hot(labels, num_classes=num_classes)
else:
x = (labels[..., None] == jnp.arange(num_classes)[None])
x = jax.lax.select(
x, jnp.full(x.shape, on_value), jnp.full(x.shape, off_value))
x = x.astype(jnp.float32)
return x
def cross_entropy_loss(logits, labels):
logp = jax.nn.log_softmax(logits)
return -jnp.mean(jnp.sum(logp * onehot(labels, num_classes=logits.shape[-1]), axis=1))
def compute_metrics(*, logits, labels):
loss = cross_entropy_loss(logits=logits, labels=labels)
accuracy = jnp.mean(jnp.argmax(logits, -1) == labels)
metrics = {
'loss': loss,
'accuracy': accuracy,
}
return metrics | minyoungpark1/swin_transformer_v2_jax | core/criterion.py | criterion.py | py | 893 | python | en | code | 8 | github-code | 13 |
18391427814 | import tornado.ioloop
from tornado.web import RequestHandler
import requests
import json
JOKE_URL = "https://official-joke-api.appspot.com/jokes/programming/random"
VALIDATE_KEY = "http://localhost:8080/api/validate?api_key={}"
class MainHandler(RequestHandler):
async def get(self):
api_key = self.get_argument("api_key")
validate_res = requests.get(VALIDATE_KEY.format(api_key), timeout=3)
if validate_res.status_code == 200:
validate_res = json.loads(validate_res.content)
if not validate_res.get("success"):
self.write({"success": False, "message":"not a valid api_key", "data": {}})
else:
res = requests.get(JOKE_URL, timeout=2)
if res.status_code == 200:
res = json.loads(res.content)
self.write({"success": True, "message": "found a random joke for you", "data": res})
else:
self.write({"success": False, "message": "cant fetch joke, this is not a joke ", "data": {}})
def make_app():
return tornado.web.Application([
(r"/api/joke", MainHandler),
])
if __name__ == "__main__":
try:
app = make_app()
app.listen(8081)
tornado.ioloop.IOLoop.current().start()
except KeyboardInterrupt:
print("shutting down server gracefully")
tornado.ioloop.IOLoop.current().stop() | Rangeeshar/sample_microservice | python_microservice/python_server.py | python_server.py | py | 1,426 | python | en | code | 0 | github-code | 13 |
9087030040 | #https://www.acmicpc.net/problem/11000
#백준 11000번 강의실 배정(정렬)
#import sys
#input = sys.stdin.readline
import heapq
n = int(input())
classes = []
for _ in range(n):
classes.append(list(map(int, input().split())))
classes.sort(key = lambda x : (x[0],x[1]))
cnt = 1
lesson = []
heapq.heappush(lesson, classes[0][1])
for i in range(1,n):
start, end = classes[i]
if start >= lesson[0]:
heapq.heappop(lesson)
else:
cnt+=1
heapq.heappush(lesson, end)
| MinsangKong/DailyProblem | 06-01/2.py | 2.py | py | 516 | python | en | code | 0 | github-code | 13 |
33650008966 | '''
This Module read video file and process each frame.
processed frame again write to new video object file.
'''
import cv2
import numpy as np
import download
import config
import logging
import transformer
import result
logger=logging.getLogger(__name__)
def main():
'''
this function downlaod video from youtube and save in specfic folder.
after this each frame of video is get processed . We are using gamma correction technique
for pixel transformation.
'''
try:
url = config.get_conf_value('video','url')
input_file_path = download.save_the_video(url)
cap = cv2.VideoCapture(input_file_path)
# check if url was opened
if not cap.isOpened():
logger.error('video can not not opened')
exit(-1)
frame_width = int(cap.get(3))
frame_height = int(cap.get(4))
frame_size = (frame_width,frame_height)
fps = cap.get(cv2.CAP_PROP_FPS)
video_writer = result.get_video_writer(fps,frame_size)
while True:
# read frame
ret, frame = cap.read()
# check if frame is empty
if not ret:
break
frame = transformer.apply_brightness_adjustment_gamma(frame)
video_writer.write(frame)
# display frame
#cv2.imshow('frame', frame)
if cv2.waitKey(30)&0xFF == ord('q'):
break
# release VideoCapture
cap.release()
cv2.destroyAllWindows()
except Exception as e:
logger.error("oops: ", e)
raise Exception("Oops: something went wrong while prcoessing video")
| shubham1809/pla-image-processing-brightness_adjust | playment/processing.py | processing.py | py | 1,706 | python | en | code | 0 | github-code | 13 |
23080422 | ############ Chaplygin gas
######## definition of Friedmann universe filled with CG
import numpy as np
def CG(a, t, A, B):
dadt = np.sqrt(np.sqrt(A*a**6 + B) )/np.sqrt(3.*a)
return dadt
###### parameters
A = 1./3
B=0.3
a0 = 0.1
##### time
t = np.linspace(0, 16, 101)
from scipy.integrate import odeint
sol1 = odeint(CG, a0, t, args=(A, B))
import matplotlib.pyplot as plt
######### definition of density
def d1(a):
den = np.sqrt(B/a**6. + A)
return den
plt.plot(t, d1(sol1[:]) )
plt.xlabel(r'time ($t$)')
plt.ylabel(r'density ($\rho$)')
plt.legend()
#plt.grid()
#plt.title('density as function of time')
axes = plt.gca()
axes.set_ylim([-2,20])
plt.show()
| rubbyaworka/The_codes_of_Chaplygin_gas_model | The_codes_of_Chaplygin_gas_models/CG_density_on_time.py | CG_density_on_time.py | py | 709 | python | en | code | 0 | github-code | 13 |
38028110388 | # Location & number of raw data file:
#runnumber=1000613
runnumber= 1000536
#runnumber = 1000855
RawDataDir="/castor/cern.ch/atlas/testbeam/combined/2004/"
#RawDataDir="/data/calo"
RawDataFilePrefix="daq_SFI-51_calo"
#include( "AthenaCommon/Atlas.UnixStandardJob.py" )
include( "ByteStreamCnvSvc/TBEventSelector_jobOptions.py" )
# File location:
ByteStreamInputSvc=Service("ByteStreamInputSvc")
ByteStreamInputSvc.InputDirectory += [RawDataDir]
ByteStreamInputSvc.FilePrefix += [RawDataFilePrefix]
ByteStreamInputSvc.RunNumber = [runnumber]
theApp.Dlls+=["TBCnv"]
theApp.TopAlg+=["TBCheckBCIDs"]
TBCheckBCIDs=Algorithm("TBCheckBCIDs")
TBCheckBCIDs.OutputLevel=DEBUG
# -- use root histos --
theApp.Dlls += [ "RootHistCnv" ]
theApp.HistogramPersistency = "ROOT"
NTupleSvc = Service( "NTupleSvc" )
#NTOutputFileString="FILE1 DATAFILE='%(file)s' OPT='NEW'" % {"file" : NTOutputFileName}
NTupleSvc.Output = [ "FILE1 DATAFILE='BCIDS.root' OPT='NEW'" ]
#NTupleSvc.Output = [NTOutputFileString]
#NTupleSvc.OutputLevel=DEBUG
# Number of events to be processed (default is 10)
theApp.EvtMax = 100
# Set output level threshold (2=DEBUG, 3=INFO, 4=WARNING, 5=ERROR, 6=FATAL )
MessageSvc = Service( "MessageSvc" )
MessageSvc.OutputLevel = ERROR
# Don't print event number each event
#AthenaEventLoopMgr = Service( "AthenaEventLoopMgr" )
#AthenaEventLoopMgr.OutputLevel = WARNING
| rushioda/PIXELVALID_athena | athena/TestBeam/TBCnv/share/CheckBCIDs.py | CheckBCIDs.py | py | 1,380 | python | en | code | 1 | github-code | 13 |
20981852185 | import networkx as nx
import sys
import numpy as np
import matplotlib.colors as colors
import random
import matplotlib.pyplot as plt
def internal_density (G, partition):
# the average density of the node belongs to each commmnities
val = 0.
n = G.number_of_nodes()
for com in set(partition.values()) :
list_nodes = [nodes for nodes in partition.keys()
if partition[nodes] == com]
H = G.subgraph(list_nodes)
ns = H.number_of_nodes()
val += ns * nx.density(H)
return val / n
def edges_inside(G, partition):
# the ration between total numberof edges and the edgs in th community
val = 0.
m = G.number_of_edges()
for com in set(partition.values()) :
list_nodes = [nodes for nodes in partition.keys()
if partition[nodes] == com]
H = G.subgraph(list_nodes)
ms = H.number_of_edges()
val += ms
return val/m
def TPR(G, partition):
#fraction of nodes in S that belong to a triad
val = 0.
n = G.number_of_nodes()
for com in set(partition.values()) :
list_nodes = [nodes for nodes in partition.keys()
if partition[nodes] == com]
H = G.subgraph(list_nodes)
ns = H.number_of_nodes()
if ns > 3 :
triangles = float(np.sum(nx.triangles(H).values()))/((ns)*(ns-1)*(ns-2)/6*3)
val += ns*triangles
return val/n
def expansion(G, partition):
# measures the number of edges per node that point outside the cluster
val = 0.
n = G.number_of_nodes()
for com in set(partition.values()) :
list_nodes = [nodes for nodes in partition.keys()
if partition[nodes] == com]
H = G.subgraph(list_nodes)
ds = np.sum(G.degree(list_nodes).values())
ns = H.number_of_nodes()
ms = H.number_of_edges()
cs = float(ds - 2*ms)
val += cs
return val/n
def cut_ratio(G, partition):
#the fraction of existing edges (out of all possible edges) leaving the cluster
val = 0.
n = G.number_of_nodes()
for com in set(partition.values()) :
list_nodes = [nodes for nodes in partition.keys()
if partition[nodes] == com]
H = G.subgraph(list_nodes)
ds = np.sum(G.degree(list_nodes).values())
ns = H.number_of_nodes()
ms = H.number_of_edges()
cs = float(ds - 2*ms)
val += cs / (n - ns)
return val/n
def numofcommunities(partition):
#the nummber of communities
return len(set(partition.values()))
def drawpart(G, partition, name):
size = float(len(set(partition.values())))
pos = nx.spring_layout(G)
count = 0
for com in set(partition.values()) :
count += 1
color = colors.cnames.values()[random.randint(0,len(colors.cnames.values())-1)]
list_nodes = [nodes for nodes in partition.keys()
if partition[nodes] == com]
nx.draw_networkx_nodes(G, pos, list_nodes, node_size = 20, node_color=color )
nx.draw_networkx_edges(G,pos, alpha=0.5)
plt.savefig(name)
plt.clf() | b02901017/NS2017 | hw2/scoringfunction.py | scoringfunction.py | py | 3,197 | python | en | code | 0 | github-code | 13 |
71594429138 | from backend.stats.database import DBClient
from nitrotype import Racer
import datetime
dbclient = DBClient()
c = dbclient.db.racers
def create_profile(username):
racer = Racer(username)
races = (racer.races)
wpm = racer.wpm_average
data = {'username': username, "races": [races], "wpms": [wpm], 'hourly': [[races,wpm, datetime.datetime.today().hour]]}
c.insert_one(data)
return data
def get_stats(username):
data = c.find_one({"username": username})
if data == None:
data = create_profile(username)
return data
def update_stats_all():
docs = c.find()
for doc in docs:
username = doc['username']
racer = Racer(username)
doc['races'].append(racer.races)
doc['wpms'].append(racer.wpm_average)
dbclient.update_array(c, {'username':username}, doc)
def update_stats_hourly():
docs = c.find()
for doc in docs:
username = doc['username']
racer = Racer(username)
doc['hourly'].append([racer.races, racer.wpm_average, datetime.datetime.today().hour])
dbclient.update_array(c, {'username':username}, doc) | Try2Win4Glory/Lacan-NTSport-Website | backend/stats/players/stats.py | stats.py | py | 1,129 | python | en | code | 1 | github-code | 13 |
15595409637 | import pandas as pd
import numpy as np
import os
def append_function(cant_ejec):
for i in (np.arange(int(cant_ejec))):
df_dnn = pd.read_csv('dnn_accuracy_delta'+str(i)+'.csv', header=None)
df_ph0 = pd.read_csv('perc_hidden0_delta'+str(i)+'.csv', header=None)
df_ph1 = pd.read_csv('perc_hidden1_delta'+str(i)+'.csv', header=None)
#print(df_dnn)
#input('okdnn')
appe = df_dnn.append(df_ph0, ignore_index=True)
#print(appe)
#input('ok0')
appe = appe.append(df_ph1, ignore_index=True)
#print(appe)
#input('ok1')
#print(appe)
append_column(appe, i)
def append_column(dataframe, i):
if os.path.isfile('deltas.csv'):
dff = pd.read_csv('deltas.csv', header=None)
df_join = dff.join(dataframe, lsuffix=str(i-1)+'_exec', rsuffix=str(i)+'_exec')
#print(df_join)
#print('##')
#print(df_join.astype('float'))
df_join.to_csv('deltas.csv', sep=',', header=None, index=None)
else:
dataframe.to_csv('deltas.csv', sep=',', header=None, index=None)
append_function(50)
| M2odrigo/model_config | data/test/append_function.py | append_function.py | py | 1,130 | python | en | code | 0 | github-code | 13 |
17041645184 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.InsPerson import InsPerson
from alipay.aop.api.domain.InsClaimPolicy import InsClaimPolicy
from alipay.aop.api.domain.InsPerson import InsPerson
class AlipayInsSceneClaimApplyModel(object):
def __init__(self):
self._accident_address = None
self._accident_desc = None
self._accident_time = None
self._beneficiary = None
self._bill_title = None
self._biz_data = None
self._claim_fee = None
self._claim_policy_list = None
self._out_biz_no = None
self._out_request_no = None
self._prod_code = None
self._reporter = None
@property
def accident_address(self):
return self._accident_address
@accident_address.setter
def accident_address(self, value):
self._accident_address = value
@property
def accident_desc(self):
return self._accident_desc
@accident_desc.setter
def accident_desc(self, value):
self._accident_desc = value
@property
def accident_time(self):
return self._accident_time
@accident_time.setter
def accident_time(self, value):
self._accident_time = value
@property
def beneficiary(self):
return self._beneficiary
@beneficiary.setter
def beneficiary(self, value):
if isinstance(value, InsPerson):
self._beneficiary = value
else:
self._beneficiary = InsPerson.from_alipay_dict(value)
@property
def bill_title(self):
return self._bill_title
@bill_title.setter
def bill_title(self, value):
self._bill_title = value
@property
def biz_data(self):
return self._biz_data
@biz_data.setter
def biz_data(self, value):
self._biz_data = value
@property
def claim_fee(self):
return self._claim_fee
@claim_fee.setter
def claim_fee(self, value):
self._claim_fee = value
@property
def claim_policy_list(self):
return self._claim_policy_list
@claim_policy_list.setter
def claim_policy_list(self, value):
if isinstance(value, list):
self._claim_policy_list = list()
for i in value:
if isinstance(i, InsClaimPolicy):
self._claim_policy_list.append(i)
else:
self._claim_policy_list.append(InsClaimPolicy.from_alipay_dict(i))
@property
def out_biz_no(self):
return self._out_biz_no
@out_biz_no.setter
def out_biz_no(self, value):
self._out_biz_no = value
@property
def out_request_no(self):
return self._out_request_no
@out_request_no.setter
def out_request_no(self, value):
self._out_request_no = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def reporter(self):
return self._reporter
@reporter.setter
def reporter(self, value):
if isinstance(value, InsPerson):
self._reporter = value
else:
self._reporter = InsPerson.from_alipay_dict(value)
def to_alipay_dict(self):
params = dict()
if self.accident_address:
if hasattr(self.accident_address, 'to_alipay_dict'):
params['accident_address'] = self.accident_address.to_alipay_dict()
else:
params['accident_address'] = self.accident_address
if self.accident_desc:
if hasattr(self.accident_desc, 'to_alipay_dict'):
params['accident_desc'] = self.accident_desc.to_alipay_dict()
else:
params['accident_desc'] = self.accident_desc
if self.accident_time:
if hasattr(self.accident_time, 'to_alipay_dict'):
params['accident_time'] = self.accident_time.to_alipay_dict()
else:
params['accident_time'] = self.accident_time
if self.beneficiary:
if hasattr(self.beneficiary, 'to_alipay_dict'):
params['beneficiary'] = self.beneficiary.to_alipay_dict()
else:
params['beneficiary'] = self.beneficiary
if self.bill_title:
if hasattr(self.bill_title, 'to_alipay_dict'):
params['bill_title'] = self.bill_title.to_alipay_dict()
else:
params['bill_title'] = self.bill_title
if self.biz_data:
if hasattr(self.biz_data, 'to_alipay_dict'):
params['biz_data'] = self.biz_data.to_alipay_dict()
else:
params['biz_data'] = self.biz_data
if self.claim_fee:
if hasattr(self.claim_fee, 'to_alipay_dict'):
params['claim_fee'] = self.claim_fee.to_alipay_dict()
else:
params['claim_fee'] = self.claim_fee
if self.claim_policy_list:
if isinstance(self.claim_policy_list, list):
for i in range(0, len(self.claim_policy_list)):
element = self.claim_policy_list[i]
if hasattr(element, 'to_alipay_dict'):
self.claim_policy_list[i] = element.to_alipay_dict()
if hasattr(self.claim_policy_list, 'to_alipay_dict'):
params['claim_policy_list'] = self.claim_policy_list.to_alipay_dict()
else:
params['claim_policy_list'] = self.claim_policy_list
if self.out_biz_no:
if hasattr(self.out_biz_no, 'to_alipay_dict'):
params['out_biz_no'] = self.out_biz_no.to_alipay_dict()
else:
params['out_biz_no'] = self.out_biz_no
if self.out_request_no:
if hasattr(self.out_request_no, 'to_alipay_dict'):
params['out_request_no'] = self.out_request_no.to_alipay_dict()
else:
params['out_request_no'] = self.out_request_no
if self.prod_code:
if hasattr(self.prod_code, 'to_alipay_dict'):
params['prod_code'] = self.prod_code.to_alipay_dict()
else:
params['prod_code'] = self.prod_code
if self.reporter:
if hasattr(self.reporter, 'to_alipay_dict'):
params['reporter'] = self.reporter.to_alipay_dict()
else:
params['reporter'] = self.reporter
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayInsSceneClaimApplyModel()
if 'accident_address' in d:
o.accident_address = d['accident_address']
if 'accident_desc' in d:
o.accident_desc = d['accident_desc']
if 'accident_time' in d:
o.accident_time = d['accident_time']
if 'beneficiary' in d:
o.beneficiary = d['beneficiary']
if 'bill_title' in d:
o.bill_title = d['bill_title']
if 'biz_data' in d:
o.biz_data = d['biz_data']
if 'claim_fee' in d:
o.claim_fee = d['claim_fee']
if 'claim_policy_list' in d:
o.claim_policy_list = d['claim_policy_list']
if 'out_biz_no' in d:
o.out_biz_no = d['out_biz_no']
if 'out_request_no' in d:
o.out_request_no = d['out_request_no']
if 'prod_code' in d:
o.prod_code = d['prod_code']
if 'reporter' in d:
o.reporter = d['reporter']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/AlipayInsSceneClaimApplyModel.py | AlipayInsSceneClaimApplyModel.py | py | 7,688 | python | en | code | 241 | github-code | 13 |
39540885695 | # modified from http://deeplearning.net/tutorial/
import os
import sys
import timeit
import numpy
import theano
import theano.tensor as T
class HiddenLayer(object):
def __init__(self, rng, input, n_in, n_out, W=None, b=None,
activation=T.tanh):
"""
Typical hidden layer of a MLP: units are fully-connected and have
sigmoidal activation function. Weight matrix W is of shape (n_in,n_out)
and the bias vector b is of shape (n_out,).
NOTE : The nonlinearity used here is tanh
Hidden unit activation is given by: tanh(dot(input,W) + b)
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type input: theano.tensor.dmatrix
:param input: a symbolic tensor of shape (n_examples, n_in)
:type n_in: int
:param n_in: dimensionality of input
:type n_out: int
:param n_out: number of hidden units
:type activation: theano.Op or function
:param activation: Non linearity to be applied in the hidden
layer
"""
self.input = input
# `W` is initialized with `W_values` which is uniformely sampled
# from sqrt(-6./(n_in+n_hidden)) and sqrt(6./(n_in+n_hidden))
# for tanh activation function
# the output of uniform if converted using asarray to dtype
# theano.config.floatX so that the code is runable on GPU
# Note : optimal initialization of weights is dependent on the
# activation function used (among other things).
# For example, results presented in [Xavier10] suggest that you
# should use 4 times larger initial weights for sigmoid
# compared to tanh
# We have no info for other function, so we use the same as
# tanh.
if W is None:
W_values = numpy.asarray(
rng.uniform(
low=-numpy.sqrt(6. / (n_in + n_out)),
high=numpy.sqrt(6. / (n_in + n_out)),
size=(n_in, n_out)
),
dtype=theano.config.floatX
)
if activation == theano.tensor.nnet.sigmoid:
W_values *= 4
W = theano.shared(value=W_values, name='W', borrow=True)
if b is None:
b_values = numpy.zeros((n_out,), dtype=theano.config.floatX)
b = theano.shared(value=b_values, name='b', borrow=True)
self.W = W
self.b = b
lin_output = T.dot(input, self.W) + self.b
self.output = (
lin_output if activation is None
else activation(lin_output)
)
# parameters of the model
self.params = [self.W, self.b]
class MLP(object):
"""Multi-Layer Perceptron Class
A multilayer perceptron is a feedforward artificial neural network model
that has one layer or more of hidden units and nonlinear activations.
"""
# mean squared error
def mse(self, y):
return T.mean((self.y_pred - T.reshape(y,(y.shape[0],1))) ** 2)
def __init__(self, rng, input, n_in, n_hidden, n_out):
"""Initialize the parameters for the multilayer perceptron
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type input: theano.tensor.TensorType
:param input: symbolic variable that describes the input of the
architecture (one minibatch)
:type n_in: int
:param n_in: number of input units, the dimension of the space in
which the datapoints lie
:type n_hidden: int
:param n_hidden: number of hidden units
:type n_out: int
:param n_out: number of output units
"""
self.hiddenLayer = HiddenLayer(
rng=rng,
input=input,
n_in=n_in,
n_out=n_hidden,
activation=T.tanh
)
# matrix applied to the hidden units to generate the output
self.W_out = theano.shared(
value=numpy.zeros(
(n_hidden, n_out),
dtype=theano.config.floatX
),
name='W_out',
borrow=True
)
# bias applied to the hidden units while generating the output
self.b_out = theano.shared(
value=numpy.zeros(
(n_out,),
dtype=theano.config.floatX
),
name='b_out',
borrow=True
)
self.y_pred = T.dot(self.hiddenLayer.output, self.W_out) + self.b_out
# the parameters of the model are the parameters of the two layers it is
# made out of
self.params = self.hiddenLayer.params + [self.W_out, self.b_out]
# keep track of model input
self.input = input
self.cost = self.mse
self.l2 = T.sum(self.hiddenLayer.params[0] ** 2)
self.l2 += T.sum(self.W_out ** 2)
| aporcel/Machine-Learning | homework4/mlp.py | mlp.py | py | 5,059 | python | en | code | 1 | github-code | 13 |
14211299621 | from django.shortcuts import render
from django.http import HttpResponse, JsonResponse
from django.views.decorators.csrf import csrf_exempt
from rest_framework.parsers import JSONParser
from blog.models import article, category
from blog.serializers import BlogSerializer, BlogCategorySerializer
# Create your views here.
@csrf_exempt
def article_list(request):
if request.method == 'GET':
snippets = article.objects.all()
fields = ['id','title', 'authorFn']
serializer = BlogSerializer(snippets, many=True,fields = fields)
data = {
"articles":serializer.data
}
return JsonResponse(data, safe=False)
elif request.method == 'POST':
data = JSONParser().parse(request)
serializer = BlogSerializer(data=data)
if serializer.is_valid():
serializer.save()
return JsonResponse(serializer.data, status=201)
return JsonResponse(serializer.errors, status=400)
@csrf_exempt
def article_detail(request, pk):
"""
Retrieve, update or delete a code snippet.
"""
try:
blog = article.objects.get(pk=pk)
except article.DoesNotExist:
return HttpResponse(status=404)
if request.method == 'GET':
serializer = BlogSerializer(blog)
return JsonResponse(serializer.data)
elif request.method == 'PUT':
data = JSONParser().parse(request)
serializer = BlogSerializer(blog, data=data)
if serializer.is_valid():
serializer.save()
return JsonResponse(serializer.data)
return JsonResponse(serializer.errors, status=400)
elif request.method == 'DELETE':
blog.delete()
return HttpResponse(status=204)
def category_list(request):
if request.method == 'GET':
categories = category.objects.all()
serializer = BlogCategorySerializer(categories, many=True)
data = {
"categories":serializer.data
}
return JsonResponse(data, safe=False)
def category_articles(request, pk):
try:
articles = article.objects.filter(category=pk)
except articles.DoesNotExist:
return HttpResponse(status=404)
if request.method == 'GET':
fields = ['id','title', 'authorFn']
serializer = BlogSerializer(articles, many=True,fields = fields)
data = {
"articles":serializer.data
}
return JsonResponse(data, safe=False) | ESBOL2312/djr-blog-back | blog/views.py | views.py | py | 2,468 | python | en | code | 0 | github-code | 13 |
34619302234 | import streamlit as st
# url video: https://www.youtube.com/watch?v=_9WiB2PDO7k&list=PLJ39kWiJXSixyRMcn3lrbv8xI8ZZoYNZU
# Text/Title
st.title("Streamlit tutorial")
# Header/Subheader
st.header("This is a header")
st.subheader("This is a subheader")
# Text
st.text("Hello Streamlit")
# Markdown
st.markdown("### This is a Markdown")
# Error/Colorfull Text
st.success("Succesful")
st.info("Information")
st.warning("This is a warning")
st.error("This is an error Danger")
#st.exception("NameError('name "three" not define')")
st.exception("NameError('name three not define')")
# Get help info about Python
st.help(range)
# Writing Text/Super Fxn
st.write("Text with write")
st.write(range(10))
# Images
from PIL import Image
img = Image.open("1080x1080.webp")
st.image(img, width=300, caption="Nienke and Nacho")
# Video
# video_file = open("example.mp4", "rb").read()
# st.video(video_file)
# Audio
# audio_file = open("example.mp3", "rb").read()
# st.audio(audio_file, format="audio/mp3")
# Widget
# Checkbox
if st.checkbox("Show/Hide"):
st.text("Showing or Hiding Widget")
# Radio Buttons
status = st.radio("What is your status", ("Active", "Inactive"))
if status == "Active":
st.success("You are active")
else:
st.warning("Your are inactive")
# SelectBox
occupation = st.selectbox(
"Your ocuppation", ["Programmer", "DataScientist", "Doctor", "Businessman"]
)
st.write("You selected this option: ", occupation)
# Multiselect
location = st.multiselect("Where do you work?", ["London", "USA", "Spain", "Portugal"])
st.write("You selected", len(location), "locations")
# Slider
level = st.slider("What is your level",1,5)
# Buttons
st.button("Simple button")
if st.button("About"):
st.write("Streamlit is cool")
# Text Input
firstname = st.text_input("Enter your name", "Type here...")
if st.button("Submit"):
result = firstname.title()
st.success(result)
# Text Area
message = st.text_area("Enter your message", "Type here...")
if st.button("Message"):
result = message.title()
st.success(result)
# Date Input
import datetime
today = st.date_input("Today is ", datetime.datetime.now())
# Time
the_time = st.time_input("The time is", datetime.time())
# Displaying JSON
st.text("Display JSON")
st.json({"name":"Nacho", "gender":"male"})
# Display Raw code
st.text("Display Raw code")
st.code("import numpy as np")
# Display Raw code
with st.echo():
# This will also show as a comment
import pandas as pd
df = pd.DataFrame()
# Progress bar
import time
my_progress_bar = st.progress(0)
for p in range(50):
my_progress_bar.progress(p + 1)
# Spinner
with st.spinner("Waiting..."):
time.sleep(5)
st.success("Finished!")
# Ballons
st.balloons()
# Sidebars
st.sidebar.header("About")
st.sidebar.text("This is a streamlit tutorial")
# Funtions
@st.cache
def run_fxn():
return range(100)
st.write(run_fxn())
# Plot
#st.pyplot()
# Dataframe
st.dataframe(df)
# Table
st.table(df) | joseignaciorm/share-streamlit-app | app.py | app.py | py | 2,973 | python | en | code | 0 | github-code | 13 |
72516562578 | import bittensor
import torch
import wandb
import datetime
import traceback
import sys
import os
from loguru import logger; logger = logger.opt(colors=True)
from torch.nn.utils import clip_grad_norm_
from datetime import datetime,timedelta
from threading import Lock
os.environ['TOKENIZERS_PARALLELISM'] = 'false'
def serve( config, server):
config.to_defaults()
# Create Subtensor connection
subtensor = bittensor.subtensor(config = config)
# Load/Create our bittensor wallet.
wallet = bittensor.wallet( config = config ).create().register()
# Load/Sync/Save our metagraph.
metagraph = bittensor.metagraph (
subtensor = subtensor
).load().sync().save()
# Instantiate the model we are going to serve on the network.
# Creating a threading lock for updates to the model
mutex = Lock()
gp_server = server
# Create our optimizer.
optimizer = torch.optim.SGD(
[ {"params": gp_server.parameters()} ],
lr = config.server.learning_rate,
momentum = config.server.momentum,
)
timecheck = {}
# Define our forward function.
def forward_text ( inputs_x ):
r""" Forward function that is called when the axon recieves a forward request from other peers
Args:
inputs_x ( :obj:`torch.Tensor`, `required`):
torch inputs to be forward processed.
Returns:
outputs (:obj:`torch.FloatTensor`):
The nucleus's outputs as a torch tensor of shape [batch_size, sequence_len, __network_dim__]
"""
return gp_server.encode_forward( inputs_x )
# Define our backward function.
def backward_text (inputs_x, grads_dy ):
r"""Backwards function that is called when the axon recieves a backwards request from other peers.
Updates the server parameters with gradients through the chain.
Args:
inputs_x ( :obj:`torch.Tensor`, `required`):
torch inputs from previous forward call.
grads_dy ( :obj:`torch.Tensor`, `required`):
torch grads of forward output.
"""
# -- normalized grads --
grads_dy = grads_dy/(grads_dy.sum() + 0.00001)
with mutex:
outputs_y = gp_server.encode_forward( inputs_x )
with torch.autograd.set_detect_anomaly(True):
torch.autograd.backward (
tensors = [ outputs_y ],
grad_tensors = [ grads_dy ],
retain_graph=True
)
logger.info('Backwards axon gradient applied')
gp_server.backward_gradients += inputs_x.size(0)
def priority(pubkey:str, request_type:bittensor.proto.RequestType, inputs_x) -> float:
r"""Calculates the priority on requests based on stake and size of input
Args:
pubkey ( str, `required`):
The public key of the caller.
inputs_x ( :obj:`torch.Tensor`, `required`):
torch inputs to be forward processed.
request_type ( bittensor.proto.RequestType, `required`):
the request type ('FORWARD' or 'BACKWARD').
"""
uid = metagraph.hotkeys.index(pubkey)
priority = metagraph.S[uid].item()/ sys.getsizeof(inputs_x)
return priority
def blacklist(pubkey:str, request_type:bittensor.proto.RequestType) -> bool:
r"""Axon security blacklisting, used to blacklist message from low stake members
Args:
pubkey ( str, `required`):
The public key of the caller.
request_type ( bittensor.proto.RequestType, `required`):
the request type ('FORWARD' or 'BACKWARD').
"""
# Check for stake
def stake_check():
uid =metagraph.hotkeys.index(pubkey)
if metagraph.S[uid].item() < config.server.blacklist.stake:
return True
else:
return False
# Check for time
def time_check():
current_time = datetime.now()
if pubkey in timecheck.keys():
prev_time = timecheck[pubkey]
if current_time - prev_time >= timedelta(seconds=config.server.blacklist.time):
timecheck[pubkey] = current_time
return False
else:
return True
else:
timecheck[pubkey] = current_time
return False
# Black list or not
if stake_check() or time_check():
return True
else:
return False
# Create our axon server
axon = bittensor.axon (
wallet = wallet,
forward_text = forward_text,
backward_text = backward_text,
blacklist= blacklist,
priority = priority
)
# Training Data
dataset = bittensor.dataset(config=config)
# load our old model
if config.server.restart != True:
gp_server.load(config.server.full_path)
if config.wandb.api_key != 'default':
# --- Init Wandb.
bittensor.wandb(
config = config,
cold_pubkey = wallet.coldkeypub.ss58_address,
hot_pubkey = wallet.hotkey.ss58_address,
root_dir = config.server.full_path
)
# -- Main Training loop --
try:
# -- serve axon to the network.
axon.start().serve(subtensor=subtensor)
# --- creating our chain weights
chain_weights =torch.zeros(metagraph.n)
uid = metagraph.hotkeys.index( wallet.hotkey.ss58_address )
chain_weights[uid] = 1
while True:
# --- Run
dataloader = iter(dataset.dataloader(epoch_length=100))
current_block = subtensor.get_current_block()
end_block = current_block + config.server.blocks_per_epoch
interation = 0
# --- Training step.
while end_block >= current_block:
if current_block != subtensor.get_current_block():
loss, _ = gp_server( next( dataloader ) )
if interation > 0 :
losses += loss
else:
losses = loss
interation += 1
current_block = subtensor.get_current_block()
#Custom learning rate
if gp_server.backward_gradients > 0:
optimizer.param_groups[0]['lr'] = 1/(gp_server.backward_gradients)
else:
optimizer.param_groups[0]['lr'] = 0.1
gp_server.backward_gradients = 0
# --- Update parameters
if interation != 0:
with mutex:
logger.info('Backpropagation Started')
losses.backward()
clip_grad_norm_(gp_server.parameters(), 1.0)
optimizer.step()
optimizer.zero_grad()
logger.info('Backpropagation Successful: Model updated')
# --- logging data
wandb_data = {
'block': end_block,
'loss': losses.item()/interation,
'stake': metagraph.S[ uid ].item(),
'rank': metagraph.R[ uid ].item(),
'incentive': metagraph.I[ uid ].item(),
}
# wandb syncing and update metagraph
metagraph.sync().save()
chain_weights =torch.zeros(metagraph.n)
chain_weights[uid] = 1
if config.wandb.api_key != 'default':
wandb.log( wandb_data )
logger.info(wandb_data)
# save the model
gp_server.save(config.server.full_path)
# --- setting weights
try:
did_set = subtensor.timeout_set_weights(
timeout=10,
uids=metagraph.uids,
weights = chain_weights,
wait_for_inclusion = True,
wallet = wallet,
)
if did_set:
logger.success('Successfully set weights on the chain')
else:
logger.error('Failed to set weights on chain. (Timeout)')
except Exception as e:
logger.error('Failure setting weights on chain with error: {}', e)
except KeyboardInterrupt:
# --- User ended session ----
axon.stop()
except Exception as e:
# --- Unknown error ----
logger.exception('Unknown exception: {} with traceback {}', e, traceback.format_exc())
| isabella618033/neurons | neurons/text/advanced_server/run.py | run.py | py | 8,940 | python | en | code | 1 | github-code | 13 |
17036383544 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayBossProdContractDownloadModel(object):
def __init__(self):
self._business_id = None
self._file_key = None
self._source_system_id = None
self._tenant = None
@property
def business_id(self):
return self._business_id
@business_id.setter
def business_id(self, value):
self._business_id = value
@property
def file_key(self):
return self._file_key
@file_key.setter
def file_key(self, value):
self._file_key = value
@property
def source_system_id(self):
return self._source_system_id
@source_system_id.setter
def source_system_id(self, value):
self._source_system_id = value
@property
def tenant(self):
return self._tenant
@tenant.setter
def tenant(self, value):
self._tenant = value
def to_alipay_dict(self):
params = dict()
if self.business_id:
if hasattr(self.business_id, 'to_alipay_dict'):
params['business_id'] = self.business_id.to_alipay_dict()
else:
params['business_id'] = self.business_id
if self.file_key:
if hasattr(self.file_key, 'to_alipay_dict'):
params['file_key'] = self.file_key.to_alipay_dict()
else:
params['file_key'] = self.file_key
if self.source_system_id:
if hasattr(self.source_system_id, 'to_alipay_dict'):
params['source_system_id'] = self.source_system_id.to_alipay_dict()
else:
params['source_system_id'] = self.source_system_id
if self.tenant:
if hasattr(self.tenant, 'to_alipay_dict'):
params['tenant'] = self.tenant.to_alipay_dict()
else:
params['tenant'] = self.tenant
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayBossProdContractDownloadModel()
if 'business_id' in d:
o.business_id = d['business_id']
if 'file_key' in d:
o.file_key = d['file_key']
if 'source_system_id' in d:
o.source_system_id = d['source_system_id']
if 'tenant' in d:
o.tenant = d['tenant']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/AlipayBossProdContractDownloadModel.py | AlipayBossProdContractDownloadModel.py | py | 2,439 | python | en | code | 241 | github-code | 13 |
13443425668 | import sys
from definitions import *
from units import *
def preprocess_line( line ):
if line[ 0 ] == '<':
return UnitLine( line )
else:
return TokenLine( *( line.split() ) )
sys.stdout = open( 'a.frisc', 'w' )
tlines = sys.stdin.readlines()
lines = [ t.rstrip() for t in tlines ]
indented = [ ( len( line ) - len( line.lstrip() ), preprocess_line( line.lstrip() ) ) for line in lines ]
stack = []
for depth, item in indented:
while stack and stack[ -1 ].depth >= depth:
top = stack.pop()
stack[ -1 ].append( top )
if isinstance( item, TokenLine ):
stack[ -1 ].append( Token( depth, item ) )
else:
unit = ( get_unit( item.name ) )( depth )
stack.append( unit )
while len( stack ) > 1:
top = stack.pop()
stack[ -1 ].append( top )
# print( stack[ -1 ] )
global_scope = Scope()
FRISC.generate_header()
FRISC.generate_extra_operators()
stack[ -1 ].descend( global_scope )
FRISC.generate_main_call()
FRISC.generate_final_code()
FRISC.output_final_code()
| PPJ-Grupa/PPJ | Lab4_zj/GeneratorKoda.py | GeneratorKoda.py | py | 1,043 | python | en | code | 0 | github-code | 13 |
17061075054 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class UboVO(object):
def __init__(self):
self._duty = None
self._invest_rate = None
self._name = None
@property
def duty(self):
return self._duty
@duty.setter
def duty(self, value):
self._duty = value
@property
def invest_rate(self):
return self._invest_rate
@invest_rate.setter
def invest_rate(self, value):
self._invest_rate = value
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
def to_alipay_dict(self):
params = dict()
if self.duty:
if hasattr(self.duty, 'to_alipay_dict'):
params['duty'] = self.duty.to_alipay_dict()
else:
params['duty'] = self.duty
if self.invest_rate:
if hasattr(self.invest_rate, 'to_alipay_dict'):
params['invest_rate'] = self.invest_rate.to_alipay_dict()
else:
params['invest_rate'] = self.invest_rate
if self.name:
if hasattr(self.name, 'to_alipay_dict'):
params['name'] = self.name.to_alipay_dict()
else:
params['name'] = self.name
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = UboVO()
if 'duty' in d:
o.duty = d['duty']
if 'invest_rate' in d:
o.invest_rate = d['invest_rate']
if 'name' in d:
o.name = d['name']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/UboVO.py | UboVO.py | py | 1,700 | python | en | code | 241 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.