seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
30022811113 | #_confirmed_users.py
_rsvp=[]
_friends=['jhony','mario','pedro','tony']
#I will confirm which user will come to my party
while _friends:
_dude=_friends.pop()
print(str(_dude).title()+" is comming to the party!")
_rsvp.append(_dude)
print("\n\n\tFriends RSVP:")
for friend in _rsvp:
print(str(friend).title())
| Jparedes20/python_work | _confirmed_users.py | _confirmed_users.py | py | 318 | python | en | code | 0 | github-code | 36 |
25441622848 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='member',
options={},
),
migrations.AlterModelOptions(
name='userknowspl',
options={'verbose_name': 'User knows Programming Language'},
),
migrations.AddField(
model_name='member',
name='experience',
field=models.SmallIntegerField(default=0),
),
]
| MJafarMashhadi/CodeDescriptionInformationCollector | core/migrations/0002_auto_20160107_1255.py | 0002_auto_20160107_1255.py | py | 648 | python | en | code | 0 | github-code | 36 |
3726457813 | from DQNAgent import DQNAgent
from pathlib import Path
my_file = Path("my_model.hd5")
if my_file.is_file():
Agent = DQNAgent("my_model.hd5")
else:
Agent = DQNAgent()
for i in range(1500):
Agent.observe()
Agent.train()
rewards = 0
for _ in range(10):
rewards += Agent.play()
print(rewards / 3)
Agent.save_network("my_model.hd5")
| sojunator/DV2454Proj | Q_ml_keras.py | Q_ml_keras.py | py | 353 | python | en | code | 0 | github-code | 36 |
16152692390 | class Home:
def room1(self):
width=100
breadth = 100
print('area of room1',width*breadth)
def kitchen(self):
width = 1222
breadth = 4888
print('area of kitchen',width*breadth)
class FirstHome(Home):
def studyRoom(self):
width=100
breadth=150
print("area of study room:",width*breadth)
def display(self):
self.room1()
self.kitchen()
self.studyRoom()
class SecondHome(Home):
def workArea(self):
width=80
breadth=100
print("area of work area:",breadth*width)
def display(self):
self.room1()
self.kitchen()
self.workArea()
objFirstHome=FirstHome()
print("Dimensions of first home is:")
objFirstHome.display()
objSecondHome=SecondHome()
print("Dimension of second home:")
objSecondHome.display() | siva5271/week3_assignments | q21.py | q21.py | py | 872 | python | en | code | 0 | github-code | 36 |
39305646033 | import asyncio
import logging
import os
import threading
import uuid
from time import sleep
from pywintypes import Time
from win32con import FILE_SHARE_DELETE, FILE_SHARE_READ, GENERIC_WRITE, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, \
FILE_SHARE_WRITE
from win32file import CreateFile, CloseHandle, SetFileTime
from AndroidFTPBackup.constants import BackupConstants
from AndroidFTPBackup.helpers.ConfigHelper import ConfigHelper
from AndroidFTPBackup.helpers.FileHelper import FileHelper
from AndroidFTPBackup.helpers.FtpHelper import FtpHelper
from AndroidFTPBackup.model.FtpFile import FtpFile
from AndroidFTPBackup.utils.BackupUtils import BackupUtils
class BackupHelper:
logger = logging.getLogger(__name__)
def __init__(self, backup, current_ip):
self.backup = backup
self.state = 'Started'
self.ftp_client = None
self.current_ip = current_ip
self.backup_name = backup['config']['basic']['name']
self.last_backup_end_time = BackupUtils.date_from_timestamp(backup['last_backup_end_time'])
self.last_backup_start_time = BackupUtils.date_from_timestamp(backup['last_backup_start_time'])
self.ftp_data = dict(port=backup['config']['ftp']['port'], userId=backup['config']['ftp']['userId'],
ip=current_ip, password=backup['config']['ftp']['password'])
def start_backup(self, backup):
loop = asyncio.new_event_loop()
p = threading.Thread(target=self.worker, args=(loop, backup,))
p.start()
def worker(self, loop, backup):
asyncio.set_event_loop(loop)
loop.run_until_complete(self.data_backup(backup))
async def data_backup(self, backup):
self.ftp_client = await self.connect_ftp()
self.logger.info("Starting Backup for: {}".format(self.backup_name))
self.logger.info("Last Backup started on: {}".format(self.last_backup_start_time))
self.logger.info("Last Backup completed on: {}".format(self.last_backup_end_time))
current_backup_start_time = BackupUtils.timestamp_from_date()
for dir_data in backup['config']['dirs']['backupDirs']:
await self.backup_folder(dir_data)
current_backup_end_time = BackupUtils.timestamp_from_date()
self.logger.info("Current Backup started on: {}".format(current_backup_start_time))
self.logger.info("Current Backup completed on: {}".format(current_backup_end_time))
self.state = 'Completed'
await BackupUtils.send_message('Completed', self.backup_name, current_backup_end_time)
await ConfigHelper.update_backup_time(self.backup_name, current_backup_start_time, current_backup_end_time)
async def backup_folder(self, dir_config):
source_path = dir_config['path']
backup_location = dir_config['backupLocation']
month_separated = dir_config['monthSeparated']
recursive = dir_config['recursive']
FileHelper.create_folder(backup_location)
await BackupUtils.send_message('Scanning', self.backup_name, source_path, dir_config['backupLocation'])
num_files = 0
for file in await self.get_dir_list(source_path):
file = FtpFile(file)
await self.validate_process_status(self.backup_name)
uuid_ = uuid.uuid4().__str__()
if file.type == 'dir':
if file.name[0] == '.':
continue
sub_dir_config = dict(path=os.path.join(source_path, file.name),
monthSeparated=month_separated, recursive=recursive,
backupLocation=BackupUtils.get_backup_location(backup_location, file, recursive))
await self.backup_folder(sub_dir_config)
continue
if file.modify < self.last_backup_start_time:
continue
try:
file_path, save = BackupUtils.get_file_path(backup_location, month_separated, file,
self.last_backup_end_time)
if save:
if num_files == 0:
self.logger.info('Backing up: {}'.format(source_path))
await BackupUtils.send_message('Enter Directory', self.backup_name,
source_path, backup_location)
num_files += 1
await BackupUtils.send_message('Copying', self.backup_name, file.name, file.size, uuid_)
await self.create_file(file_path, file.name, file.modify.timestamp(), source_path)
await BackupUtils.send_message('Saved', self.backup_name, file_path, file.size, uuid_)
except Exception as e:
self.logger.exception('Error saving: {}.'.format(file.name), e)
await BackupUtils.send_message('Error', self.backup_name, file.name, e.__str__(), uuid_)
async def validate_process_status(self, backup_name):
if self.state == 'Cancelled':
await BackupUtils.send_message('Cancelled', backup_name, BackupUtils.timestamp_from_date())
raise RuntimeError('Backup stopped by user')
async def create_file(self, file_name, current_file_name, time, file_path):
current_file = os.path.join(file_path, current_file_name)
with open(file_name, "wb") as file:
await self.get_file(current_file, file.write)
win_file = CreateFile(file_name, GENERIC_WRITE, FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE,
None, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, None)
# noinspection PyUnresolvedReferences
win_time = Time(time)
SetFileTime(win_file, win_time, win_time, win_time)
CloseHandle(win_file)
self.logger.info('Created file {} with time {}'.format(file_name, win_time))
async def get_dir_list(self, source_file_path):
try:
return list(self.ftp_client.mlsd(source_file_path))
except Exception as e:
await self.retry('get_dir_list', e)
return await self.get_dir_list(source_file_path)
async def get_file(self, current_file, save_file_callback):
try:
return self.ftp_client.retrbinary("RETR {}".format(current_file), save_file_callback)
except Exception as e:
await self.retry('get_file', e)
return await self.get_file(current_file, save_file_callback)
async def connect_ftp(self, retry_count=1):
while True:
try:
return FtpHelper.connect_ftp(self.ftp_data)
except Exception as e:
await self.retry('connect_ftp', e, retry_count)
async def retry(self, function, e, retry_count=1):
if retry_count > BackupConstants.MAX_RETRY_COUNT:
self.state = 'Cancelled'
await BackupUtils.send_message('Connection Failed', self.backup_name,
'Retry Limit reached, Cancelling Backup.')
raise RuntimeError('Retry Limit reached, Cancelling Backup.')
await BackupUtils.send_message('Connection Failed', self.backup_name,
'Retry Count: {}/{}'.format(retry_count, BackupConstants.MAX_RETRY_COUNT))
self.logger.error('Possible disconnect, retrying... {} {} {}'.format(retry_count, function, str(e)))
sleep(BackupConstants.RETRY_DELAY)
self.ftp_client.close()
self.ftp_client = await self.connect_ftp(retry_count + 1)
| SanketRevankar/AndroidFTP-DataBackup | AndroidFTPBackup/helpers/BackupHelper.py | BackupHelper.py | py | 7,605 | python | en | code | 3 | github-code | 36 |
38453029112 | import sys
from requests_html import HTMLSession
import time
def checker():
if len(sys.argv) < 2:
invalid_len_message()
else:
try:
if sys.argv[1].lower() == "--hashtag":
if sys.argv[2]:
if "#" in sys.argv[2]:
newarg = sys.argv[2].replace("#", "")
print(check_hashtags(newarg))
else:
print(check_hashtags(sys.argv[2]))
else:
error_message(sys.argv[1])
except IndexError:
usage_message("photochecker.py --hashtag 'hashtag'")
def check_hashtags(hashtag):
session = HTMLSession()
url = f'https://www.hashatit.com/hashtags/{hashtag}'
r = session.get(url)
time.sleep(2)
photo = r.html.find('.photo', first=True)
if(photo):
screen = photo.find('.screen', first=True)
if "instagram" in screen.text.lower():
links = photo.find('a.image-link', first=True)
link = list(links.absolute_links)[0]
name = photo.find('a.favicon', first=True)
string = f"Author: {name.text} | Link: {link}"
return string
else:
return f"""ERROR: Hashatit couldn't retrieve posts in Instagram with the hashtag '{hashtag}' in it."""
else:
return f"""ERROR: No photos with the hashtag '{hashtag}' found."""
def invalid_len_message():
print("""ERROR: You must pass in an argument. Run photochecker.py --hashtag 'hashtag'.""")
def error_message(arg):
print(f"""ERROR: Argument '{arg}' does not exist. Run photochecker.py --hashtag 'hashtag'.""")
def usage_message(msg):
print(f"USAGE: {msg}")
if __name__ == "__main__":
checker() | carlos-menezes/photochecker_instagram.py | photochecker.py | photochecker.py | py | 1,842 | python | en | code | 0 | github-code | 36 |
37633977750 | class Solution(object):
def gardenNoAdj(self, N, paths):
"""
:type N: int
:type paths: List[List[int]]
:rtype: List[int]
"""
self.nbs = {}
for s, t in paths:
self.nbs.setdefault(s, set()).add(t)
self.nbs.setdefault(t, set()).add(s)
self.ans = [-1] * N
self.cands = {i: set([1, 2, 3, 4]) for i in range(1, N + 1)}
for i in range(1, N + 1):
if self.ans[i - 1] == -1:
self.search_success(i)
return self.ans
def search_success(self, garden):
cands = self.cands[garden]
if len(cands)==0:
return False
for can in cands:
self.ans[garden - 1] = can
store = []
nbs = [nb for nb in self.nbs.get(garden, set()) if self.ans[nb-1]==-1]
for nb in nbs:
if can in self.cands[nb]:
self.cands[nb].remove(can)
store.append(nb)
if not nbs:
return True
for nb in nbs:
if not self.search_success(nb):
for nb in store:
self.cands[nb].add(can)
break
else:
return True
return False
if __name__=="__main__":
print(Solution().gardenNoAdj(4, [[1,2],[2,3],[3,4],[4,1],[1,3],[2,4]])) | sunnyyeti/Leetcode-solutions | 1042_Flower Planting With No Adjacent.py | 1042_Flower Planting With No Adjacent.py | py | 1,404 | python | en | code | 0 | github-code | 36 |
15287647409 | from django.test import TestCase
from users.forms import UserUpdateForm
class TestForms(TestCase):
def test_update_form_valid_data(self):
"""Test for valid update form"""
form = UserUpdateForm(data={
'username': 'Praveen',
'email': 'Praveen.t@gmail.com'
})
self.assertTrue(form.is_valid())
def test_update_form_valid_data(self):
"""Test for invalid update form"""
form = UserUpdateForm(data={ })
self.assertFalse(form.is_valid())
self.assertEquals(len(form.errors), 1) | ardagon89/deploying-a-email-classification-model-in-a-full-stack-website | singularity/users/test/test_forms.py | test_forms.py | py | 572 | python | en | code | 0 | github-code | 36 |
8267936452 | #!/usr/bin/python
import re
line = " abc, b, c AS INT"
token_regex = r'[()*/%+\-><=]|>=|<=|==|<>|VAR|AS|INT|CHAR|BOOL|FLOAT|AND|OR|NOT|START|STOP|&|,|[$_a-z][$_a-zA-Z0-9]*|\".*\"'
lex_ptr = 0
end = lex_ptr + 1
while end < len(line):
possible_token = line[lex_ptr:end]
matchObj = re.search(token_regex, possible_token)
if matchObj:
string_matched1 = matchObj.group()
# try matching the next token (longest rule matching)
possible_longer_token = line[lex_ptr:end + 1]
matchObj = re.search(token_regex, possible_longer_token)
if matchObj:
string_matched2 = matchObj.group()
print(string_matched1, " ", string_matched2)
if string_matched1 == string_matched2:
lex_ptr = end + 1
print(string_matched1)
break
else:
end += 1
end += 1
| jabinespbi/cfpl | compiler/test.py | test.py | py | 907 | python | en | code | 1 | github-code | 36 |
32327100021 | # Authors : Pranath Reddy, Amit Mishra
print(" _________ _____________ (_)____")
print("/ ___/ __ \/ ___/ __ `__ \/ / ___/")
print("/ /__/ /_/ (__ ) / / / / / / /__ ")
print("\___/\____/____/_/ /_/ /_/_/\___/ ")
print("A set of deep learning experiments on Cosmic Microwave Background Radiation Data")
import pandas as pd
import numpy as np
i = 0
j = 0
data = pd.read_csv('./Data.csv',header=None)
x = data.iloc[1:,1:].values
while j < x[:,0].size:
for i in range(0,x[0,:].size):
if x[j][i] < 112 or x[j][i] > 131:
x[j][i] = 0
j = j + 1
np.savetxt("nData.csv", x, delimiter=",")
| pranath-reddy/MLST-Cosmic | Data/Norm/Normalize.py | Normalize.py | py | 620 | python | en | code | 1 | github-code | 36 |
9098896290 | import os
import numpy as np
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFilter
from PIL import ImageFont
from tqdm import tqdm
train_dir = "dataset/train/"
test_dir = "dataset/test/"
# digit generation
def digit_generator(
digit="1",
font_name="/usr/share/fonts/truetype/custom/HindSiliguri-Regular.ttf",
font_size=210,
x_pos=50,
y_pos=-20,
color=(255, 255, 255),
):
img = Image.new("RGB", (256, 256), color=color)
d = ImageDraw.Draw(img)
font = ImageFont.truetype(font=font_name, size=font_size)
d.text((x_pos, y_pos), digit, fill=(0, 0, 0), font=font)
return img
# train data generation
def train_datagen(
fonts,
color_list=[(255, 255, 255), (255, 255, 204), (0, 128, 128), (133, 193, 233)],
color_names=["white", "yellow", "teal", "sky_blue"],
image_count=100,
):
"""
color_list is a list of tuples like (255,255,255) and color_names represents the corresponding names.
------------------------------------------------------------------------------------------------------
Example:
color_list = [(255,255,255), (255, 255, 204), (255, 153, 102), (102, 255, 51), (0, 0, 255), (255, 0, 102)]
color_names = color_names = ['white', 'yellow', 'orange', 'green', 'blue', 'red']
------------------------------------------------------------------------------------------------------
"""
digits_bns = "০ ১ ২ ৩ ৪ ৫ ৬ ৭ ৮ ৯".split()
digits_ens = "0 1 2 3 4 5 6 7 8 9".split()
if len(os.listdir(train_dir + "0")) == 0:
print("Generating training images...")
img_cnt = 0
for idx, font_name in tqdm(enumerate(fonts)):
for jdx, (digit_bn, digit_en) in enumerate(zip(digits_bns, digits_ens)):
for color, color_name in zip(color_list, color_names):
try:
img = digit_generator(
digit=digit_bn, font_name=font_name, color=color
)
img_cnt += 1
if img_cnt <= image_count:
img.save(
"dataset/train/{}/{}_{}_{}_{}.jpg".format(
digit_en,
idx,
jdx,
color_name,
font_name.split(".ttf")[0].split("/")[-1],
)
)
except Exception as e:
raise Exception("TrainImgGenError:", e)
else:
print("Directory is not empty: Not generating training images")
# test data generation
def test_datagen(
fonts,
color_list=[(255, 255, 255), (255, 255, 204), (0, 128, 128), (133, 193, 233)],
color_names=["white", "yellow", "teal", "sky_blue"],
image_count=100,
):
font_size = 200
digits_bns = "০ ১ ২ ৩ ৪ ৫ ৬ ৭ ৮ ৯".split()
digits_ens = "0 1 2 3 4 5 6 7 8 9".split()
if len(os.listdir(test_dir + "0")) == 0:
print("Generating test images...")
img_cnt = 0
for idx, font_name in tqdm(enumerate(fonts)):
for jdx, (digit_bn, digit_en) in enumerate(zip(digits_bns, digits_ens)):
for color, color_name in zip(color_list, color_names):
try:
img = digit_generator(
digit=digit_bn,
font_name=font_name,
font_size=font_size,
color=color,
)
img_cnt += 1
if img_cnt <= image_count:
img.save(
"dataset/test/{}/{}_{}_{}_{}.jpg".format(
digit_en,
idx,
jdx,
color_name,
font_name.split(".ttf")[0].split("/")[-1],
)
)
except Exception as e:
raise Exception("TestImgGenError:", e)
else:
print("Directory is not empty: Not generating test images")
| rednafi/prinumco | digit_generation_src/digit_generation.py | digit_generation.py | py | 4,386 | python | en | code | 10 | github-code | 36 |
18798632880 | import mysql.connector
mydb= mysql.connector.connect(
host="local host",
user="root",
password="",
database="school"
)
mycursor = mydb.cursor()
mycursor.execute("CREATE DATABASE school")
mycursor.execute("SHOW DATABASE")
for x in mycursor:
if x=="school":
print("database is present ")
else:
print("not present ")
mycursor.execute("CREATE TABLE Teacher(name varchar(20),dept varchar(30))")
sql="INSERT INTO Teacher(name, dept) VALUES(%s,%s)"
val=[("kannu","etc"),
("melvita","comp"),
("hrutwa","etc"),
("neetal","etc"),
("nishika","comp"),
("raj","etc"),
("musatf","IT"),
("keegan","etc"),
]
mycursor.executemany(sql,val)
mydb.commit()
mycursor.execute("CREATE TABLE student(name varchar(20),dept varchar(30))")
sq="INSERT INTO student(name, dept) VALUES(%s,%s)"
va=[("anish","IT"),
("kevin","comp"),
("namrata","IT"),
("vishvas","etc"),
("ajay","IT"),
("kiran","etc"),
("joan","IT"),
("ayushi","etc"),
("ajaybind","IT"),
("karishma","IT"),
("jo","IT"),
("ayushman","etc"),
]
mycursor.executemany(sql,val)
mydb.commit()
mycursor.execute("CREATE TABLE department(name varchar(20),subject(30))")
sql="INSERT INTO department(name,subject) VALUES(%s,%s)"
val=[("anish","CAS"),
("kevin","AC"),
("namrata","SOFT COMPUTING "),
("vishvas","CNN"),
("ajay","CAS"),
("kiran","CNN"),
("joan","AC"),
("ayushi","CNN"),
("ajaybind","SOFT COMPUTING"),
("karishma","ANN"),
("jo","WAS"),
("ayushman","CNN"),
]
mycursor.executemany(sql,val)
mydb.commit()
#dispay the tables
mycursor.execute("SELECT *FROM Teacher WHERE Dept = 'etc'") ##prints 5 rows
result1 = mycursor.fetchall()
for x in result1:
print(x)
mycursor.execute("SELECT *FROM student WHERE Dept = 'IT'") #prints 7 rows
result2 = mycursor.fetchall()
for x in result2:
print(x)
mycursor.execute("SELECT *FROM department ") #prints all rows
result3 = mycursor.fetchall()
for x in result3:
print(x)
| DhanKumari/python_2 | database.py | database.py | py | 2,143 | python | en | code | 0 | github-code | 36 |
14821096504 | # Woman who habitually buys pastries before 5
import json
def find_customers_who_order_multiple_pastries_before_5am() -> list[str]:
"""
Identifies customer ids of customers who
placed orders between midnight and 5am
"""
with open('./noahs-jsonl/noahs-orders.jsonl', 'r') as jsonl_file:
maybe_tinder_lady_customer_id = []
for line in jsonl_file:
order = json.loads(line)
date_time = order["ordered"]
time = date_time.split(" ")[1]
hour = time.split(":")[0]
hour = int(hour)
bakery_items_count = 0
for i in range(len(order["items"])):
if "BKY" in order["items"][i]["sku"]:
bakery_items_count += 1 * int(order["items"][i]["qty"])
if hour < 5 and bakery_items_count > 1:
maybe_tinder_lady_customer_id.append(order["customerid"])
return maybe_tinder_lady_customer_id
def find_customers_from_customer_ids(customer_ids: list[str]) -> list[dict]:
"""
Gets a list of customers from
a list of customer ids
"""
with open('./noahs-jsonl/noahs-customers.jsonl', 'r') as jsonl_file:
maybe_tinder_lady_info = []
for line in jsonl_file:
customer = json.loads(line)
customer_id = customer["customerid"]
if customer_id in customer_ids:
frequency = customer_ids.count(customer_id)
customer["freq"] = frequency
maybe_tinder_lady_info.append(customer)
return maybe_tinder_lady_info
if __name__ == "__main__":
possible_customer_ids = find_customers_who_order_multiple_pastries_before_5am()
possible_customers = find_customers_from_customer_ids(possible_customer_ids)
possible_customers = sorted(possible_customers, key=lambda x: x["freq"], reverse=True)
for customer in possible_customers:
# First customer to be printed is the one who most habitually
# purchases pastries early morning
print(customer) | Annie-EXE/Hanukkah-of-Data-2022 | Hanukkah Day 4/main.py | main.py | py | 2,069 | python | en | code | 1 | github-code | 36 |
32022624457 | #Load variable from a file. Userstat.txt
#USerstat.txt has first line as whether user wants to choose more projects, and next lines has names of all projects user has chosen.
#We have to create a login function here for the user, else, how would the app know which userfile to download.?
#For now let's simply ask the user for his username, and not the whole login process
import urllib.request
a=True
while(a):
try:
username = input("Please enter you username - ")
a=False
url = 'http://127.0.0.1:8000/download_userfile/'+username+'/'
obj=urllib.request.urlopen(url)
except:
print("Oops! wrong username, Try again!")
a=True
import sys
import time
from importlib import reload
'''zip = ZipFile('file.zip')
zip.extractall()'''
data = obj.read()
with open('Userstat_'+username+'.txt', "wb+") as code:
code.write(data)
code.close()
#urllib.request.urlretrieve(url,'Userstat_'+username+'.txt') it's in python2
file = open('Userstat_'+username+'.txt',"r")
l=file.readlines()
uid = l[0][0]
l1=l[1:]
file.close()
print("All of your chosen projects will run one by one")
print("Projects you have chosen are:")
ind=1
donecount=0
for i in l1:
a = str(ind)+" - "+i[i.find('-')+1:]
print(a)
if '(done)' in a:
donecount+=1
ind+=1
if donecount==len(l1):
print("Congratulations!!\n All the tasks of all the projects you are contributing to,")
print("Are done! Hurray!")
chind = int(input("Choose index of project to start with"))
print("Projects will be run from Project "+str(chind)+" in above order, one by one")
print("Note, the program will keep running until you close this application")
#originalsyspath = sys.path
'''for i in range(len(l)):
prid=i[:i.find('-')-1]
sys.path.insert(0, './'+prid+'_files')
'''
while(1>0):
for j in range(chind, len(l)):
i=l[j]
if ' (done)' in i:
continue
elif ' (wait)' in i:
print('Tasks for '+i+' are all assigned but not completed.')
print('Tasks maybe available after about 60 seconds, so sleeping for 60 seconds....')
time.sleep(60)
prid=i[:i.find('-')]
sys.path.insert(0, './'+prid+'_files')
print(sys.path)
import projman
reload(projman)
print("Currently doing - "+i[i.find('-')+1:]+" ...")
projman.runproj(username)
sys.path.remove('./'+prid+'_files')
file = open('Userstat_'+username+'.txt',"r")
l=file.readlines()
uid = l[0][0]
l1=l[1:]
file.close()
chind = 1
donecount=0
for i in l1:
a = str(ind)+" - "+i[i.find('-')+1:]
print(a)
if '(done)' in a:
donecount+=1
ind+=1
if donecount==len(l1):
print("Congratulations!!\n All the tasks of all the projects you are contributing to,")
print("Are done! Hurray!")
break
print("Note, the program will keep running until you close this application")
print("The program will now exit")
#print("Do you want to chose more projects?('f') Or do you want to delete projects from your list?('d')")
#chosen=input()
| snehalgupta/issc | pcapp/app.py | app.py | py | 2,900 | python | en | code | 0 | github-code | 36 |
20436996206 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('customer', '0009_auto_20151012_1449'),
('tab1', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='contactus',
name='customer',
field=models.ForeignKey(default=None, verbose_name='\u7528\u6237', blank=True, to='customer.Customer'),
),
]
| wlj459/unipub | tab1/migrations/0002_contactus_customer.py | 0002_contactus_customer.py | py | 505 | python | en | code | 1 | github-code | 36 |
28205621966 | from typing import Dict, List, Optional, Union
from fastapi import APIRouter, Depends, Response, status
from sqlalchemy.orm import Session
from src import oauth, schemas
from src.db.database import get_db
from src.services import post as post_service
router = APIRouter(prefix="/posts", tags=["Blog Posts"])
@router.get(
"/",
response_model=List[Dict[str, Union[schemas.PostOut, int]]],
status_code=status.HTTP_200_OK,
)
async def get_all_post(
limit: int = 10, skip: Optional[int] = None, db: Session = Depends(get_db)
):
return post_service.get_all_post(db, limit, skip)
@router.post("/", response_model=schemas.PostOut, status_code=status.HTTP_201_CREATED)
async def create_post(
post: schemas.PostCreate,
user_id: int = Depends(oauth.get_current_user),
db: Session = Depends(get_db),
):
return post_service.create_new_post(post, db, user_id)
@router.get("/{id}", response_model=schemas.PostOut, status_code=status.HTTP_200_OK)
async def get(id: int, db: Session = Depends(get_db)):
return post_service.get_by_id(post_id=id, db=db)
@router.delete("/{id}", status_code=status.HTTP_204_NO_CONTENT)
async def delete_post(
id: int,
user_id: int = Depends(oauth.get_current_user),
db: Session = Depends(get_db),
):
post_service.delete_post(id, user_id, db)
return Response(status_code=status.HTTP_204_NO_CONTENT)
@router.put("/{id}", response_model=schemas.PostOut, status_code=status.HTTP_200_OK)
async def update_post(
id: int,
post: schemas.PostUpdate,
user_id: int = Depends(oauth.get_current_user),
db: Session = Depends(get_db),
):
post = post_service.update_posts(id, post, user_id, db)
return post
| hiteshsankhat/blog_post | backend/src/api/endpoints/posts.py | posts.py | py | 1,700 | python | en | code | 0 | github-code | 36 |
29178030064 | from ting_file_management.file_management import txt_importer
import sys
def process(path_file, instance):
# Verifica se o arquivo já foi processado anteriormente
for index in range(len(instance)):
if instance.search(index)["nome_do_arquivo"] == path_file:
return
# Importa as linhas do arquivo
file_lines = txt_importer(path_file)
# Cria o dicionário com os dados do arquivo
file_data = {
"nome_do_arquivo": path_file,
"qtd_linhas": len(file_lines),
"linhas_do_arquivo": file_lines,
}
# Adiciona o dicionário à fila
instance.enqueue(file_data)
# Exibe os dados processados via stdout
sys.stdout.write(str(file_data))
def remove(instance):
if len(instance) == 0:
print("Não há elementos")
return
removed_file = instance.dequeue()
print(f"Arquivo {removed_file['nome_do_arquivo']} removido com sucesso")
def file_metadata(instance, position):
try:
file_data = instance.search(position)
print(file_data)
except IndexError:
print("Posição inválida", file=sys.stderr)
| erickbxs/Python-google | ting_file_management/file_process.py | file_process.py | py | 1,132 | python | pt | code | 1 | github-code | 36 |
20948666331 | import os
import os.path as osp
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data as data
from torch import optim
from torch.utils.tensorboard import SummaryWriter
from sklearn.metrics import confusion_matrix
from model.refinenet import Segmentor
from model.discriminator import Discriminator
from dataset import TrainDataset, TestDataset
from loss import CrossEntropyLoss2d, BCEWithLogitsLoss2d, FocalLoss
from metric import evaluate_conf_mat
from eval import evaluate
from utils import makedir, save_metrics
import settings
# def lr_poly_scheduler(optim_G, optim_D, init_lr, init_lr_D, lr_decay_iter, iter, max_iter, poly_power):
# if iter % lr_decay_iter or iter > max_iter:
# return
# # calculate new lr
# new_lr = init_lr * (1 - float(iter) / max_iter) ** poly_power
# new_lr_D = init_lr_D * (1 - float(iter) / max_iter) ** poly_power
# # set optim_G lr
# optim_G.param_groups[0]['lr'] = new_lr
# optim_G.param_groups[1]['lr'] = new_lr * 10
# # set optim_D lr
# optim_D.param_groups[0]['lr'] = new_lr_D
def make_D_label(label, D_output):
D_label = torch.ones_like(D_output) * label
D_label = D_label.clone().detach().requires_grad_(True).cuda()
return D_label
# def make_D_label2(label, ignore_mask):
# ignore_mask = np.expand_dims(ignore_mask, axis=1)
# D_label = (np.ones(ignore_mask.shape)*label)
# D_label[ignore_mask] = settings.BCE_IGNORE_LABEL
# # D_label = Variable(torch.FloatTensor(D_label)).cuda()
# D_label = torch.tensor(D_label, dtype=torch.float64, requires_grad=True).cuda()
# return D_label
def save_checkpoint(epoch, model_G, model_D, optim_G, optim_D, lr_scheduler_G, lr_scheduler_D):
checkpoint = {
'epoch': epoch,
'model_G_state_dict': model_G.state_dict(),
'model_D_state_dict': model_D.state_dict(),
'optim_G_state_dict': optim_G.state_dict(),
'optim_D_state_dict': optim_D.state_dict(),
'lr_scheduler_G_state_dict': lr_scheduler_G.state_dict(),
'lr_scheduler_D_state_dict': lr_scheduler_D.state_dict()
}
print('saving a checkpoint in epoch {}'.format(epoch))
torch.save(checkpoint, osp.join(settings.CHECKPOINT_DIR, 'CHECKPOINT_'+str(epoch)+'.tar'))
def train_one_epoch(model_G, model_D, optim_G, optim_D, dataloader, test_dataloader, epoch,
upsample, ce_loss, bce_loss, writer, print_freq=5, eval_freq=settings.EVAL_FREQ):
max_iter = len(dataloader)
# initialize losses
loss_G_seg_values = []
loss_adv_seg_values = []
loss_D_values = []
eval_trainval = False
if epoch % eval_freq == 0:
eval_trainval = True
# confusion matrix ; to track metrics such as mIoU during training
conf_mat = np.zeros((settings.NUM_CLASSES, settings.NUM_CLASSES))
# labels for adversarial training
pred_label = 0
gt_label = 1
for i_iter, batch in enumerate(dataloader):
images, depths, labels = batch
images = images.cuda()
depths = depths.cuda()
labels = labels.cuda()
optim_G.zero_grad()
optim_D.zero_grad()
####### train generator #######
# disable accumulating grads in discriminator
for param in model_D.parameters():
param.requires_grad = False
# get a mask where an elemnt is True for every pixel with ignore_label value
ignore_mask = (labels == settings.IGNORE_LABEL)
target_mask = torch.logical_not(ignore_mask)
target_mask = target_mask.unsqueeze(dim=1)
# get the output of generator
if settings.MODALITY == 'rgb':
predict = upsample(model_G(images))
elif settings.MODALITY == 'middle':
predict = upsample(model_G(images, depths))
# calculate cross-entropy loss
loss_G_seg = ce_loss(predict, labels)
# calculate adversarial loss
D_output = upsample(model_D(F.softmax(predict, dim=1)))
loss_adv = bce_loss(D_output, make_D_label(gt_label, D_output), target_mask)
# accumulate loss, backward and store value
loss_G = loss_G_seg + settings.LAMBDA_ADV_SEG * loss_adv
loss_G.backward()
loss_G_seg_values.append(loss_G_seg.data.cpu().numpy())
loss_adv_seg_values.append(loss_adv.data.cpu().numpy())
if eval_trainval:
# get pred and gt to compute confusion matrix
seg_pred = np.argmax(predict.detach().cpu().numpy(), axis=1)
seg_gt = labels.cpu().numpy().copy()
seg_pred = seg_pred[target_mask.squeeze(dim=1).cpu().numpy()]
seg_gt = seg_gt[target_mask.squeeze(dim=1).cpu().numpy()]
conf_mat += confusion_matrix(seg_gt, seg_pred, labels=np.arange(settings.NUM_CLASSES))
####### end of train generator #######
####### train discriminator #######
# activate the gradient accumulation in D
for param in model_D.parameters():
param.requires_grad = True
# detach from G
predict = predict.detach()
D_output = upsample(model_D(F.softmax(predict, dim=1)))
loss_D = bce_loss(D_output, make_D_label(pred_label, D_output), target_mask)
loss_D.backward()
loss_D_values.append(loss_D.data.cpu().numpy())
# pass ground truth to discriminator
gt = labels.clone().detach().cuda()
gt_one_hot = F.one_hot(gt, num_classes=settings.NUM_CLASSES).permute(0,3,1,2).contiguous().float()
D_output = upsample(model_D(gt_one_hot))
loss_D = bce_loss(D_output, make_D_label(gt_label, D_output), target_mask)
loss_D.backward()
loss_D_values.append(loss_D.data.cpu().numpy())
####### end of train discriminator #######
optim_G.step()
optim_D.step()
if i_iter % print_freq == 0 and i_iter != 0:
loss_G_seg_value = np.mean(loss_G_seg_values)
loss_G_seg_values = []
loss_adv_seg_value = np.mean(loss_adv_seg_values)
loss_adv_seg_values = []
loss_D_value = np.mean(loss_D_values)
loss_D_values = []
writer.add_scalar('Loss_G_SEG/Train', loss_G_seg_value, i_iter+epoch*max_iter)
writer.add_scalar('Loss_G_SEG_ADV/Train', loss_adv_seg_value, i_iter+epoch*max_iter)
writer.add_scalar('Loss_D/Train', loss_D_value, i_iter+epoch*max_iter)
writer.add_scalar('learning_rate_G/Train', optim_G.param_groups[0]['lr'], i_iter+epoch*max_iter)
writer.add_scalar('learning_rate_D/Train', optim_D.param_groups[0]['lr'], i_iter+epoch*max_iter)
print("epoch = {:3d}/{:3d}: iter = {:3d},\t loss_seg = {:.3f},\t loss_adv = {:.3f},\t loss_d = {:.3f}".format(
epoch, settings.EPOCHS, i_iter, loss_G_seg_value, loss_adv_seg_value, loss_D_value))
if eval_trainval:
save_metrics(conf_mat, writer, epoch*max_iter, 'Train')
conf_mat = evaluate(model_G, test_dataloader)
save_metrics(conf_mat, writer, epoch*max_iter, 'Val')
model_G.train()
def main():
# set torch and numpy seed for reproducibility
torch.manual_seed(27)
np.random.seed(27)
# tensorboard writer
writer = SummaryWriter(settings.TENSORBOARD_DIR)
# makedir snapshot
makedir(settings.CHECKPOINT_DIR)
# enable cudnn
torch.backends.cudnn.enabled = True
# create segmentor network
model_G = Segmentor(pretrained=settings.PRETRAINED, num_classes=settings.NUM_CLASSES,
modality=settings.MODALITY)
model_G.train()
model_G.cuda()
torch.backends.cudnn.benchmark = True
# create discriminator network
model_D = Discriminator(settings.NUM_CLASSES)
model_D.train()
model_D.cuda()
# dataset and dataloader
dataset = TrainDataset()
dataloader = data.DataLoader(dataset, batch_size=settings.BATCH_SIZE,
shuffle=True, num_workers=settings.NUM_WORKERS,
pin_memory=True, drop_last=True)
test_dataset = TestDataset(data_root=settings.DATA_ROOT_VAL, data_list=settings.DATA_LIST_VAL)
test_dataloader = data.DataLoader(test_dataset, batch_size=1, shuffle=False,
num_workers=settings.NUM_WORKERS, pin_memory=True)
# optimizer for generator network (segmentor)
optim_G = optim.SGD(model_G.optim_parameters(settings.LR), lr=settings.LR,
momentum=settings.LR_MOMENTUM, weight_decay=settings.WEIGHT_DECAY)
# lr scheduler for optimi_G
lr_lambda_G = lambda epoch: (1 - epoch / settings.EPOCHS) ** settings.LR_POLY_POWER
lr_scheduler_G = optim.lr_scheduler.LambdaLR(optim_G, lr_lambda=lr_lambda_G)
# optimizer for discriminator network
optim_D = optim.Adam(model_D.parameters(), settings.LR_D)
# lr scheduler for optimi_D
lr_lambda_D = lambda epoch: (1 - epoch / settings.EPOCHS) ** settings.LR_POLY_POWER
lr_scheduler_D = optim.lr_scheduler.LambdaLR(optim_D, lr_lambda=lr_lambda_D)
# losses
ce_loss = CrossEntropyLoss2d(ignore_index=settings.IGNORE_LABEL) # to use for segmentor
bce_loss = BCEWithLogitsLoss2d() # to use for discriminator
# upsampling for the network output
upsample = nn.Upsample(size=(settings.CROP_SIZE, settings.CROP_SIZE), mode='bilinear', align_corners=True)
# # labels for adversarial training
# pred_label = 0
# gt_label = 1
# load the model to resume training
last_epoch = -1
if settings.RESUME_TRAIN:
checkpoint = torch.load(settings.LAST_CHECKPOINT)
model_G.load_state_dict(checkpoint['model_G_state_dict'])
model_G.train()
model_G.cuda()
model_D.load_state_dict(checkpoint['model_D_state_dict'])
model_D.train()
model_D.cuda()
optim_G.load_state_dict(checkpoint['optim_G_state_dict'])
optim_D.load_state_dict(checkpoint['optim_D_state_dict'])
lr_scheduler_G.load_state_dict(checkpoint['lr_scheduler_G_state_dict'])
lr_scheduler_D.load_state_dict(checkpoint['lr_scheduler_D_state_dict'])
last_epoch = checkpoint['epoch']
# purge the logs after the last_epoch
writer = SummaryWriter(settings.TENSORBOARD_DIR, purge_step=(last_epoch+1)*len(dataloader))
for epoch in range(last_epoch+1, settings.EPOCHS+1):
train_one_epoch(model_G, model_D, optim_G, optim_D, dataloader, test_dataloader, epoch,
upsample, ce_loss, bce_loss, writer, print_freq=5, eval_freq=settings.EVAL_FREQ)
if epoch % settings.CHECKPOINT_FREQ == 0 and epoch != 0:
save_checkpoint(epoch, model_G, model_D, optim_G, optim_D,
lr_scheduler_G, lr_scheduler_D)
# save the final model
if epoch >= settings.EPOCHS:
print('saving the final model')
save_checkpoint(epoch, model_G, model_D, optim_G, optim_D,
lr_scheduler_G, lr_scheduler_D)
writer.close()
lr_scheduler_G.step()
lr_scheduler_D.step()
if __name__ == "__main__":
main()
# for i_iter in range(settings.MAX_ITER):
# # initialize losses
# loss_G_seg_value = 0
# loss_adv_seg_value = 0
# loss_D_value = 0
# # clear optim gradients and adjust learning rates
# optim_G.zero_grad()
# optim_D.zero_grad()
# lr_poly_scheduler(optim_G, optim_D, settings.LR, settings.LR_D, settings.LR_DECAY_ITER,
# i_iter, settings.MAX_ITER, settings.LR_POLY_POWER)
# ####### train generator #######
# # not accumulate grads in discriminator
# for param in model_D.parameters():
# param.requires_grad = False
# # get the batch of data
# try:
# _, batch = next(dataloader_iter)
# except:
# dataloader_iter = enumerate(dataloader)
# _, batch = next(dataloader_iter)
# images, depths, labels = batch
# images = images.cuda()
# depths = depths.cuda()
# labels = labels.cuda()
# # get a mask where is True for every pixel with ignore_label value
# ignore_mask = (labels == settings.IGNORE_LABEL)
# target_mask = torch.logical_not(ignore_mask)
# target_mask = target_mask.unsqueeze(dim=1)
# # get the output of generator
# if settings.MODALITY == 'rgb':
# predict = upsample(model_G(images))
# elif settings.MODALITY == 'middle':
# predict = upsample(model_G(images, depths))
# # calculate cross-entropy loss
# loss_G_seg = ce_loss(predict, labels)
# # calculate adversarial loss
# D_output = upsample(model_D(F.softmax(predict, dim=1)))
# loss_adv = bce_loss(D_output, make_D_label(gt_label, D_output), target_mask)
# # accumulate loss, backward and store value
# loss = loss_G_seg + settings.LAMBDA_ADV_SEG * loss_adv
# loss.backward()
# loss_G_seg_value += loss_G_seg.data.cpu().numpy()
# loss_adv_seg_value += loss_adv.data.cpu().numpy()
# ####### end of train generator #######
# ####### train discriminator #######
# # pass prediction to discriminator
# # reset the gradient accumulation
# for param in model_D.parameters():
# param.requires_grad = True
# # detach from G
# predict = predict.detach()
# D_output = upsample(model_D(F.softmax(predict, dim=1)))
# loss_D = bce_loss(D_output, make_D_label(pred_label, D_output), target_mask)
# loss_D.backward()
# loss_D_value += loss_D.data.cpu().numpy()
# # pass ground truth to discriminator
# gt = labels.clone().detach().cuda()
# gt_one_hot = F.one_hot(gt, num_classes=settings.NUM_CLASSES).permute(0,3,1,2).float()
# D_output = upsample(model_D(gt_one_hot))
# loss_D = bce_loss(D_output, make_D_label(gt_label, D_output), target_mask)
# loss_D.backward()
# loss_D_value += loss_D.data.cpu().numpy()
# ####### end of train discriminator #######
# optim_G.step()
# optim_D.step()
# # get pred and gt to compute confusion matrix
# seg_pred = np.argmax(predict.cpu().numpy(), axis=1)
# seg_gt = labels.cpu().numpy().copy()
# seg_pred = seg_pred[target_mask.squeeze(dim=1).cpu().numpy()]
# seg_gt = seg_gt[target_mask.squeeze(dim=1).cpu().numpy()]
# conf_mat += confusion_matrix(seg_gt, seg_pred, labels=np.arange(settings.NUM_CLASSES))
# ####### log ########
# if i_iter % ((settings.TRAIN_SIZE // settings.BATCH_SIZE)) == 0 and i_iter != 0:
# metrics = evaluate(conf_mat)
# writer.add_scalar('Pixel Accuracy/Train', metrics['pAcc'], i_iter)
# writer.add_scalar('Mean Accuracy/Train', metrics['mAcc'], i_iter)
# writer.add_scalar('mIoU/Train', metrics['mIoU'], i_iter)
# writer.add_scalar('fwavacc/Train', metrics['fIoU'], i_iter)
# conf_mat = np.zeros_like(conf_mat)
# writer.add_scalar('Loss_G_SEG/Train', loss_G_seg_value, i_iter)
# writer.add_scalar('Loss_D/Train', loss_D_value, i_iter)
# writer.add_scalar('Loss_G_SEG_adv/Train', loss_adv_seg_value, i_iter)
# writer.add_scalar('learning_rate_G/Train', optim_G.param_groups[0]['lr'], i_iter)
# writer.add_scalar('learning_rate_D/Train', optim_D.param_groups[0]['lr'], i_iter)
# print( "iter = {:6d}/{:6d},\t loss_seg = {:.3f}, loss_adv = {:.3f}, loss_D = {:.3f}".format(
# i_iter, settings.MAX_ITER,
# loss_G_seg_value,
# loss_adv_seg_value,
# loss_D_value))
# with open(settings.LOG_FILE, "a") as f:
# output_log = '{:6d},\t {:.8f},\t {:.8f},\t {:.8f}\n'.format(
# i_iter,
# loss_G_seg_value,
# loss_adv_seg_value,
# loss_D_value)
# f.write(output_log)
# # taking snapshot
# if i_iter >= settings.MAX_ITER:
# print('saving the final model ...')
# torch.save(model_G.state_dict(),osp.join(settings.CHECKPOINT_DIR, 'CHECKPOINT_'+str(settings.MAX_ITER)+'.pt'))
# torch.save(model_D.state_dict(),osp.join(settings.CHECKPOINT_DIR, 'CHECKPOINT_'+str(settings.MAX_ITER)+'_D.pt'))
# break
# if i_iter % settings.SAVE_EVERY == 0 and i_iter != 0:
# print('taking snapshot ...')
# torch.save(model_G.state_dict(),osp.join(settings.CHECKPOINT_DIR, 'CHECKPOINT_'+str(i_iter)+'.pt'))
# torch.save(model_D.state_dict(),osp.join(settings.CHECKPOINT_DIR, 'CHECKPOINT_'+str(i_iter)+'_D.pt'))
| ElhamGhelichkhan/semiseggan | train.py | train.py | py | 17,083 | python | en | code | 0 | github-code | 36 |
13989770988 | from collections import deque
class Solution:
def wallsAndGates(self, rooms):
def neighbors(x, y):
for (i, j) in ((x - 1, y), (x + 1, y), (x, y - 1), (x, y + 1)):
if 0 <= i < n and 0 <= j < m and rooms[i][j] > 0:
yield (i, j)
def bfs(start):
visited = set()
queue = deque([(start, 0)])
while queue:
node, dist = queue.popleft()
x, y = node
rooms[x][y] = min(rooms[x][y], dist)
for nei in neighbors(x, y):
i, j = nei
if nei not in visited and rooms[i][j] > dist + 1:
visited.add(nei)
queue.append((nei, dist + 1))
# main
n = len(rooms)
m = len(rooms[0]) if rooms else 0
if 0 in (n, m):
return
for i in range(n):
for j in range(m):
if rooms[i][j] == 0:
bfs((i, j))
| dariomx/topcoder-srm | leetcode/zero-pass/facebook/walls-and-gates/Solution.py | Solution.py | py | 1,023 | python | en | code | 0 | github-code | 36 |
30285129155 | # Turn a probability higher
# The you shift the probability is by making multiple calls
# to the generator and finally once u are able to represent
# the number of outcomes with the nearest numbers of
# sample space then the next thing to do is to
# have a result that is within the number of outcomes
# for eg [1,2,3,4] but n = 3 so u have make sure ure
# results are less than 3.
# since the numbers can be denoted with log2(n) bits
# we make log2(b-a+1) calls
# O(log2(b-a+1))
# O(1)
def prob_shift(a, b, func):
number_of_outcomes = b - a + 1
while True:
result, i = 0, 0
while (1 << i) < number_of_outcomes:
result = (result << 1) | func()
i += 1
if result < number_of_outcomes:
break
return result + a
| aaryanDhakal22/EOPIP | 1-PrimitiveTypes/probability_shift.py | probability_shift.py | py | 784 | python | en | code | 0 | github-code | 36 |
37439980327 | import pandas as pd
from pathlib import Path
import numpy as np
from data_cleaning import clean_data
from data_preparation import data_preparation
INPUT_TEST_PATH = Path('data/raw/test.csv')
OUTPUT_PATH = Path('data/test_with_predicted_revenue/')
if __name__ == "__main__":
test_df = pd.read_csv(INPUT_TEST_PATH)
test_cleaned_df = clean_data(df=test_df)
test_prepared_df = data_preparation(df=test_cleaned_df)
# Check for missing values
if test_prepared_df.isna().sum().any():
print(test_prepared_df.isna().sum())
raise ValueError("There is some NaN values in dataframe")
# Apply linear baseline from EDA
y_pred = np.exp(0.426147 + 0.993024 * test_prepared_df['log_rev_24h'])
test_prepared_df = test_prepared_df.assign(revenue_30d_total=y_pred)
OUTPUT_PATH.mkdir(exist_ok=True, parents=True)
test_prepared_df.to_csv(OUTPUT_PATH / 'linear_submission.csv', index=False)
| LlirikOknessu/brightika_test | linear_prod_version.py | linear_prod_version.py | py | 932 | python | en | code | 0 | github-code | 36 |
15502000063 | #help(print)
#print(input.__doc__)
#help(input)
def contagem(start: int, end: int, jump: int):
"""
-> Faz uma contagem e mostar na tela.
-> Caso o parametro jump seja 0, seu valor será trocado por 1.
:param start: inicio da contagem
:param end: fim da contagem
:param jump: passo da contagem
:return: sem retorno
"""
if jump == 0:
print('Erro! Não se pode contar e 0 em 0, o valor da contagem será de 1 em 1.')
jump = 1
if start < end:
n = start
while n <= end:
print(n, end=' ')
n += jump
else:
n = start
while n >= end:
print(n, end=' ')
n -= jump
def somar(a=0, b=0, c=0):
"""
-> Faz a soma de três valores opcionais e mostra o resultado na tela
:param a: primeiro valor
:param b: segundo valor
:param c: terceiro valor
:return: sem retorno
"""
s = a + b + c
print(f'A soma vale {s}')
def teste(b=0):
if b == 0:
x = 8
print(f'Na função teste, n vale {n}')
print(f'Na função teste, x vale {x}')
else:
global a
a = 8
b += 4
c = 2
print(f'A dentro vale {a}')
print(f'B dentro vale {b}')
print(f'C dentro vale {c}')
def newSum(a=0, b=0, c=0):
"""
-> Faz a soma de três valores opcionais e mostra o resultado na tela
:param a: primeiro valor
:param b: segundo valor
:param c: terceiro valor
:return: retorna o valor da soma dos valores
"""
s = a + b + c
return s
def fatorial(n = 1):
"""
-> Calcula o fatorial de um número inteiro qualquer
:param n: número(opcional) que terá seu fatorial calculado
:return: valor do fatorial do número passado
"""
f = 1
for c in range(n, 0, -1):
f *= c
return f
#help(contagem)
#contagem(0, 10, 2)
#help(somar)
#somar(3, 2, 5)
#somar(8, 4)
#somar()
#n = 2
#print(f'No programa principal, n vale {n}')
#teste()
#print(f'No programa principal, x vale {x}')
#a = 5
#teste(a)
#print(f'A fora vale {a}')
#r1 = newSum(3, 2, 5)
#r2 = newSum(1, 7)
#r3 = newSum(4)
#print(f'O resultado das somas são: {r1}, {r2}, {r3}')
num = int(input('Digite um número: '))
print(f'O fatorial de {num} é {fatorial(num)}')
| JoaoGabsSR/EstudosDePython | Python-3 Mundo-3/aula21a.py | aula21a.py | py | 2,308 | python | pt | code | 0 | github-code | 36 |
22579785648 | #https://www.acmicpc.net/problem/1018
#M * N 체스판을 잘라서 칠한뒤, 8 * 8 체스판이 되어야한다.
#예상 방법
#1 dict구성해서 칠해진 칸의 개수를 센 뒤, 다시 더할 때 갯수를 세본다.
#2 w = 1,2 b = 3,4 로 구성해서 구해본다.
#3 칠한뒤, 칠한 칸을 0과 1로 구성해서, 갯수를 세어본다.
#결론
#1. 2차원 배열로 입력을 받는다.
#2. 진짜 체스판을 만든다.
#3. 틀린 부분에 1을 적는 2차원리스트를, 'w' 'b' 케이스 별로 2개 저장한다.
#4. 8개로 나누어서 모든 칸을 더해본다.
#입력받기
M, N = map(int, input().split())
#체스판 문자열로 입력받기
board = []
for m in range(M):
board += [input()]
#print(board)
#진짜 체스판 만들기
real_board = [[0 for _ in range(N)]for _ in range(M)]
for m in range(M):
for n in range(N):
if m % 2 == 0: #y가 다를 경우 다르게 시작하는 것을 고려
if n % 2 == 0:
real_board[m][n] = 'W'
else:
real_board[m][n] = 'B'
else:
if n % 2 == 0:
real_board[m][n] = 'B'
else:
real_board[m][n] = 'W'
#print(real_board)
#체스판 칠할 칸 만들어서 칠하기
check_A = [[0 for _ in range(N)]for _ in range(M)] #맨앞이 W
check_B = [[0 for _ in range(N)]for _ in range(M)] #맨앞이 B
for m in range(M):
for n in range(N):
if board[m][n] != real_board[m][n]:
check_A[m][n] = 1
else:
check_B[m][n] = 1
#print(check_A)
#print(check_B)
#체크보드 판별하기
summyA = 0
summyB = 0
save = -1
for y in range(M - 8 + 1):
for x in range(N - 8 + 1):
for m in range(8):
for n in range(8):
summyA += check_A[m+y][n+x]
summyB += check_B[m+y][n+x]
if save == -1:
save = min([summyA, summyB])
else:
save = min([summyA, summyB, save])
summyA = 0
summyB = 0
print(save)
| heisje/Algorithm | baekjoon/1018_체스판다시칠하기.py | 1018_체스판다시칠하기.py | py | 2,052 | python | ko | code | 0 | github-code | 36 |
6699760535 | from compat.legacy_model import LegacyModel
from customer.customers import CustomerObjectMap
import os
from common.io.interactive import query_yes_no
BASE_MODEL_DIR = r"E:\viNet_RnD\Deployment\Inference Models\Inception"
def create_candidate_model_legacy(class_map,
network_version=None,
dataset_version=None) -> None:
"""
Used to freeze weights generated with legacy tool
@param dataset_version: dataset version
@param network_version: network version
@param class_map: a path to class map file
@return: None
"""
# select customer
customers = list(CustomerObjectMap.keys())
for i in range(len(customers)):
print(f"{i}: {customers[i]}")
print()
customer = customers[int(input("Select customer (index):"))]
freeze_latest = query_yes_no("Load latest checkpoints? ")
if freeze_latest:
# select base model
models = os.listdir(BASE_MODEL_DIR)
for i in range(len(models)):
print(f"{i} - {models[i]}")
input_graph_path = models[int(input("Enter net graph: "))]
input_graph_path = os.path.join(BASE_MODEL_DIR, input_graph_path)
assert os.path.exists(input_graph_path)
model = LegacyModel(input_graph_path,
customer=customer,
net_version=network_version,
dataset_version=dataset_version)
model.operationalize()
model.upload(class_map)
model.run_verification(customer=customer)
else:
model = LegacyModel("", customer=customer)
model.run_verification()
if __name__ == '__main__':
version, dsv = '2.11', 2
classmap = r"E:\viNet_RnD\Deployment\Vattenfall\2.9\viNet_2.9_Vattenfall_ClassMap.txt"
create_candidate_model_legacy(classmap,
network_version=version,
dataset_version=str(dsv))
| h3nok/MLIntro | Notebooks/cli/legacy_cli.py | legacy_cli.py | py | 1,995 | python | en | code | 0 | github-code | 36 |
9268710699 | import cv2
import numpy as np
import time
import os
from cobit_opencv_lane_detect import CobitOpencvLaneDetect
class CobitOpenCVGetData:
def __init__(self):
self.cap = cv2.VideoCapture('data/car_video.avi')
self.cv_detector = CobitOpencvLaneDetect()
self.image = None
self.angle = None
self.index = 0
self.jpeg = None
def update(self):
ret, self.image = self.cap.read()
ret, self.jpeg = cv2.imencode('.jpg', self.image)
return ret, self.jpeg.tobytes()
'''
if ret is False:
self.image = np.zeros((240, 320, 3), np.uint8)
cv2.putText(self.image, 'No frame', (40, 60), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1)
ret, self.jpeg = cv2.imencode('.jpg', self.image)
return ret, self.jpeg.tobytes()
else:
self.image = np.zeros((240, 320, 3), np.uint8)
ret, self.jpeg = cv2.imencode('.jpg', self.image)
return ret, self.jpeg.tobytes()
'''
def remove_old_data(self):
os.system("rm data/*.png")
def finish(self):
self.cap.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
cam = CobitOpenCVGetData()
loop = True
cam.remove_old_data()
while loop:
ret, img = cam.update()
print(ret)
if ret:
cv2.imshow("win", img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cam.finish()
| cobit-git/little-cobit-web-ctrl | cobit_opencv_get_data_backup.py | cobit_opencv_get_data_backup.py | py | 1,512 | python | en | code | 0 | github-code | 36 |
29045410309 | # Python
class Solution(object):
def subtractProductAndSum(self, n):
"""
:type n: int
:rtype: int
"""
productDigits = 1
sumDigits = 0
temp = n
while(temp != 0):
productDigits = productDigits * (temp % 10)
sumDigits = sumDigits + (temp % 10)
temp = temp/10
return productDigits - sumDigits
| richard-dao/Other | LeetCode-Problems/Easy/Subtract-Product-and-Sum-Of-Integer.py | Subtract-Product-and-Sum-Of-Integer.py | py | 426 | python | en | code | 0 | github-code | 36 |
11360546161 | import sys
sys.stdin = open('0820.txt')
# 작은 수 부터 차례대로 정렬
nnnn=["ZRO", "ONE", "TWO", "THR", "FOR", "FIV", "SIX", "SVN", "EGT", "NIN"]
T = int(input())
for tc in range(1, T+1):
tn, N = input().split()
text = input().split()
result = []
data = {
"ZRO": 0,
"ONE": 1,
"TWO": 2,
"THR": 3,
"FOR": 4,
"FIV": 5,
"SIX": 6,
"SVN": 7,
"EGT": 8,
"NIN": 9
}
number_list = [0]*10
for i in text:
number_list[int(data[i])] += 1
i = 0
print('#{}'.format(tc))
for k in nnnn:
for l in range(number_list[i]):
print(k, end=" ")
i += 1
print() | Jade-KR/TIL | 04_algo/수업/0820.py | 0820.py | py | 704 | python | ko | code | 0 | github-code | 36 |
3953501944 | from entity.incarnation import Incarnation
from entity import player
from time import monotonic
class Carrot(Incarnation):
"""
Implementation of the incarnation Carrot, the epeeist. Inherits from incarnation
"""
COOLDOWN_THRUST = 0.4
NUMBER_THRUST = 7
def __init__(self, owner_player: 'player.Player'):
Incarnation.__init__(self, owner_player)
self._remaining_thrusts: int = 0
self._next_thrust_time: float = 0
# GETTER
@staticmethod
def get_name() -> str:
return "carrot"
@staticmethod
def get_defense() -> float:
return 0.7
@staticmethod
def get_action_cooldown() -> float:
return 0.5
@staticmethod
def get_heavy_action_cooldown() -> float:
return 8.0
def action(self):
self._owner.front_attack(0.3, (10.0, 12.0), 0.2, 0.2)
self._owner.push_animation("carrot:strike")
def heavy_action(self):
self._remaining_thrusts = Carrot.NUMBER_THRUST
self._next_thrust_time = monotonic() + 0.4
self._owner.set_special_action(True, False)
self._owner.block_jump_for(Carrot.COOLDOWN_THRUST * Carrot.NUMBER_THRUST)
# self._owner.block_moves_for(Carrot.COOLDOWN_THRUST * Carrot.NUMBER_THRUST)
self._owner.push_animation("carrot:thrust")
def special_action(self):
if self._remaining_thrusts > 0 and self._next_thrust_time <= monotonic():
if self._remaining_thrusts == 1:
self._owner.front_attack(1.7, (13, 15), 3, 3)
# self._owner.get_stage().add_effect(EffectType.SMOKE, 1, self._owner.get_x(), self._owner.get_y())
else:
self._owner.front_attack(1.7, (8, 10), 0, 0, given_imune=0.0)
self._next_thrust_time = monotonic() + Carrot.COOLDOWN_THRUST
self._remaining_thrusts -= 1
if self._remaining_thrusts <= 0:
self._owner.set_special_action(False)
| mindstorm38/rutabagarre | src/entity/incarnation/carrot.py | carrot.py | py | 1,964 | python | en | code | 2 | github-code | 36 |
11071680416 | #!/usr/bin/env python
# coding: utf-8
# -- GongChen'xi
#
# 20220112
# In[1]:
import baostock as bs
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import os, sys
# In[2]:
def fetch_info(stock_num, info, start_date, end_date):
bs.login()
rs = bs.query_history_k_data_plus(stock_num, info,
start_date = start_date, end_date = end_date,
frequency="d", adjustflag="3")
data_list = []
while (rs.error_code == '0') & rs.next():
data_list.append(rs.get_row_data())
bs.logout()
return data_list
# In[3]:
# make dictionary of RSI6, RSI24
def get_rsi(stock_num, start_date, end_date):
rsi6 = {}
rsi24 = {}
change = fetch_info(stock_num, 'date, pctChg', start_date, end_date)
for i in range(len(change)):
if i >= 23:
date = change[i][0]
# RSI6
try:
denominator = 0
numerator = 0
for j in range(6):
denominator += abs(float(change[i-j][1]))
if float(change[i-j][1]) > 0:
numerator += abs(float(change[i-j][1]))
rsi6[date] = numerator/denominator
except:
rsi6[date] = 0.0
# RSI24
try:
denominator = 0
numerator = 0
for j in range(24):
denominator += abs(float(change[i-j][1]))
if float(change[i-j][1]) > 0:
numerator += abs(float(change[i-j][1]))
rsi24[date] = numerator/denominator
except:
rsi24[date] = 0.0
return rsi6, rsi24
# In[4]:
# make dictionary of close price
def get_price(stock_num, start_date, end_date):
close_price = {}
close = fetch_info(stock_num, 'date, close', start_date, end_date)
for i in range(len(close)):
close_price[close[i][0]] = float(close[i][1])
return close_price
# In[5]:
# make list of trading dates
def get_trading_dates(stock_num, start_date, end_date):
trading_date_list = []
date = fetch_info(stock_num, 'date', start_date, end_date)
for i in range(len(date)):
trading_date_list.append(date[i][0])
return trading_date_list
# In[6]:
def change_rate(buy_date, date, close_price):
buy_price = close_price[buy_date]
current_price = close_price[date]
change = (current_price-buy_price)/buy_price
return change
def simulation_start_date(trading_date_list):
date = trading_date_list[23]
return date
def next_date(date, trading_date_list):
new_date = None
for i in range(len(trading_date_list)):
if trading_date_list[i] == date:
new_date = trading_date_list[i+1]
return new_date
# In[ ]:
def initialize(stock_num, start_date, end_date):
close_price = get_price(stock_num, start_date, end_date)
trading_date_list = get_trading_dates(stock_num, start_date, end_date)
date = simulation_start_date(trading_date_list)
return close_price, trading_date_list, date
# In[ ]:
# analysis
def analysis(record, close_price, show):
close = []
date = []
for i in close_price:
close.append(close_price[i])
date.append(i)
t = []
for i in range(len(close)):
t.append(i)
buy_price = []
buy_t = []
sell_price = []
sell_t = []
for i in record:
for j in range(len(date)):
if date[j] == i[0]:
if i[1] == 'buy':
buy_price.append(close_price[date[j]])
buy_t.append(j)
if i[1] == 'sell':
sell_price.append(close_price[date[j]])
sell_t.append(j)
trade_list = []
for i in record:
for j in range(len(date)):
if date[j] == i[0]:
trade_list.append([i[0], i[1], close_price[date[j]]])
trade = pd.DataFrame(trade_list, columns = ['date', 'action', 'close'])
#print(trade, end = '\n\n')
profit = []
if len(trade_list) % 2 == 1:
trade_list.pop()
for i in range(int(len(trade_list)/2)):
profit.append((trade_list[2*i+1][2] - trade_list[2*i][2])/trade_list[2*i][2])
profit = np.array(profit)
average = profit.mean()
accumulated = ((profit + 1).cumprod() - 1)[-1]
if show:
print('On this pattern of trading,')
print('Average yield: ', average)
print('Accumulated yield: ', accumulated)
plt.plot(t, close)
plt.scatter(buy_t, buy_price, color = 'red')
plt.scatter(sell_t, sell_price, color = 'green')
return average, accumulated
| Chenxi-Gong/TradingPatternSimulation | simulation.py | simulation.py | py | 4,698 | python | en | code | 1 | github-code | 36 |
5185686033 | import re
import ssl
import requests
import urllib.request
from lxml import etree
from fake_useragent import UserAgent
from concurrent.futures import wait, ALL_COMPLETED
from .common import Anime, Seed, Subgroup
class Mikan:
def __init__(self, logger, config, executor):
self.url = config['URL']
self.ua = UserAgent()
self.logger = logger
self.executor = executor
self.seed_list = []
self.seed_list_download_sucess = []
self.seed_list_download_failed = []
self.img_list_download = []
def request_html(self, url):
try:
headers = {'User-Agent': self.ua.random}
res = requests.get(url=url, headers=headers, timeout=5)
res.raise_for_status()
res.encoding = res.apparent_encoding
html_doc = etree.HTML(res.text)
except Exception as e:
self.logger.warning("[SPIDER] request_html failed, url: {}, error: {}".format(url, e))
else:
self.logger.info("[SPIDER] request_html success, url: {}".format(url))
return html_doc
def download(self, url, path):
ssl._create_default_https_context = ssl._create_unverified_context
opener = urllib.request.build_opener()
opener.addheaders = [('User-Agent', self.ua.random)]
urllib.request.install_opener(opener)
try:
urllib.request.urlretrieve(url, path)
except Exception as e:
self.logger.warning("[SPIDER] download failed, url: {}, error: {}".format(url, e))
return False
else:
self.logger.info("[SPIDER] download success, url: {}".format(url))
return True
def get_anime_list(self):
html_doc = self.request_html(self.url)
if html_doc == None:
self.logger.warning("[SPIDER] get_anime_list failed, request_html failed, url: {}".format(self.url))
return
anime_list = []
for info in html_doc.xpath('//div[@class="sk-bangumi"]'):
update_day_ = info.xpath('.//@data-dayofweek')
anime_info = info.xpath('.//li')
for a in anime_info:
anime_name_ = a.xpath('.//@title')[0]
mikan_id_ = a.xpath('.//@data-bangumiid')[0]
img_url_ = a.xpath('.//@data-src')
anime_name = self.lxml_result_to_str(anime_name_)
mikan_id = int(self.lxml_result_to_str(mikan_id_))
img_url = self.lxml_result_to_str(img_url_)
update_day = int(self.lxml_result_to_str(update_day_))
if update_day == 7: # movie
anime_type = 1
update_day = 8
elif update_day == 8: # ova
anime_type = 2
update_day = 8
elif update_day == 0: # update on sunday
anime_type = 0
update_day = 7
else:
anime_type = 0
subscribe_status = 0
anime = Anime(anime_name, mikan_id, img_url, update_day, anime_type, subscribe_status)
anime_list.append(anime)
self.logger.info("[SPIDER] get_anime_list success, anime number: {}".format(len(anime_list)))
return anime_list
def get_subgroup_list(self, mikan_id):
url = "{}/Home/Bangumi/{}".format(self.url, mikan_id)
html_doc = self.request_html(url)
if html_doc == None:
self.logger.warning("[SPIDER] get_subgroup_list failed, request_html failed, url: {}".format(self.url))
return
subgroup_list = []
subgroup_id_ = html_doc.xpath('//li[@class="leftbar-item"]/span/a/@data-anchor')
subgroup_name_ = html_doc.xpath('//li[@class="leftbar-item"]/span/a/text()')
for i in range(len(subgroup_name_)):
subgroup_id = int(self.lxml_result_to_str(subgroup_id_[i])[1:])
subgroup_name = self.lxml_result_to_str(subgroup_name_[i])
subgroup = Subgroup(subgroup_id, subgroup_name)
subgroup_list.append(subgroup)
self.logger.info("[SPIDER] get_subgroup_list success, mikan_id: {}, subgroup number: {}".format(mikan_id, len(subgroup_list)))
return subgroup_list
def get_seed_list(self, mikan_id, subgroup_id, anime_type):
url = "{}/Home/ExpandEpisodeTable?bangumiId={}&subtitleGroupId={}&take=65".format(self.url, mikan_id, subgroup_id)
html_doc = self.request_html(url)
if html_doc == None:
self.logger.warning("[SPIDER] get_seed_list failed, request_html failed, url: {}".format(self.url))
return
seed_list = []
tr_list = html_doc.xpath('//tbody/tr')
for tr in tr_list:
seed_url_ = tr.xpath('.//a[last()]/@href')
seed_name_ = tr.xpath('.//a[@class="magnet-link-wrap"]/text()')
seed_size_ = tr.xpath('.//td[2]/text()')
seed_url = self.lxml_result_to_str(seed_url_)
seed_name = self.lxml_result_to_str(seed_name_)
seed_size = self.lxml_result_to_str(seed_size_).replace(' ', '')
if not self.if_1080(seed_name):
continue
if anime_type == 0:
episode_str = self.get_episode(seed_name)
if episode_str == "null":
continue
else:
episode_str = "01"
episode = int(episode_str)
seed_status = 0
seed = Seed(mikan_id, episode, seed_url, subgroup_id, seed_name, seed_status, seed_size)
seed_list.append(seed)
self.logger.info("[SPIDER] get_seed_list success, mikan_id: {}, subgroup_id: {}, anime_type: {}, seed number: {}".format(mikan_id, subgroup_id, anime_type, len(seed_list)))
return seed_list
# mikan.download_img("/images/Bangumi/202307/f94fdb7f.jpg", "static/img/anime_list")
def download_img(self, img_url, path):
url = "{}{}".format(self.url, img_url)
img_name = img_url.split('/')[4]
if not self.download(url, path + img_name):
self.logger.warning("[SPIDER] download_img failed, download failed, img_url: {}, path: {}".format(img_url, path))
return False
self.logger.info("[SPIDER] download_img success, img_url: {}, path: {}".format(img_url, path))
return True
# mikan.download_seed("/Download/20230913/dfe6eb7c5f780e90f74244a498949375c67143b0.torrent", "seed/")
def download_seed(self, seed_url, path):
url = "{}{}".format(self.url, seed_url)
torrent_name = seed_url.split('/')[3]
if not self.download(url, path + torrent_name):
self.logger.warning("[SPIDER] download_seed failed, download failed, seed_url: {}, path: {}".format(seed_url, path))
return False
self.logger.info("[SPIDER] download_seed sucess, seed_url: {}, path: {}".format(seed_url, path))
return True
def lxml_result_to_str(self, result):
result_str = ''
for a in result:
result_str += str(a)
return result_str
def get_episode(self, seed_name):
# 排除掉了合集
str_list = re.findall(r'\d{2}-\d{2}', seed_name)
if len(str_list) != 0:
return "null"
str_list = re.findall(r'\[\d{2}\]|\s\d{2}\s', seed_name)
if len(str_list) == 0:
str_list = re.findall(r'\[第\d+话\]', seed_name)
if len(str_list) == 0:
return "null"
else:
return str_list[0][2:-2]
episode_str = str_list[0][1:-1]
return episode_str
def if_1080(self, seed_name):
str_list = re.findall(r'1080', seed_name)
if len(str_list) == 0:
return False
return True
def get_seed_list_thread(self, args):
mikan_id, subgroup_id, anime_type = args
try:
seed_list = self.get_seed_list(mikan_id, subgroup_id, anime_type)
except Exception as e:
self.logger.warning("[SPIDER] get_seed_list_thread failed, mikan_id: {}, subgroup_id: {}, error: {}".format(mikan_id, subgroup_id, e))
else:
for s in seed_list:
self.seed_list.append(s)
def get_seed_list_task(self, mikan_id, subgroup_list, anime_type):
self.seed_list = []
task_list = []
for sub in subgroup_list:
subgroup_id = sub.subgroup_id
task = self.executor.submit(self.get_seed_list_thread, (mikan_id, subgroup_id, anime_type))
task_list.append(task)
wait(task_list, return_when=ALL_COMPLETED)
return self.seed_list
def download_seed_thread(self, args):
seed = args
seed_url = seed['seed_url']
path = seed['path']
try:
self.download_seed(seed_url, path)
except Exception as e:
self.logger.warning("[SPIDER] download_seed_thread failed, seed_url: {}, path: {}, error: {}".format(seed_url, path, e))
self.seed_list_download_failed.append(seed)
else:
self.seed_list_download_sucess.append(seed)
def download_seed_task(self, seed_list):
self.seed_list_download_sucess = []
self.seed_list_download_failed = []
task_list = []
for seed in seed_list:
task = self.executor.submit(self.download_seed_thread, seed)
task_list.append(task)
wait(task_list, return_when=ALL_COMPLETED)
return self.seed_list_download_sucess
def download_img_thread(self, args):
img = args
img_url = img['img_url']
path = img['path']
try:
self.download_img(img_url, path)
except Exception as e:
self.logger.warning("[SPIDER] download_img_thread failed, img_url: {}, path: {}".format(img_url, path))
else:
self.img_list_download.append(img)
def download_img_task(self, img_list):
self.img_list_download = []
task_list = []
for img in img_list:
task = self.executor.submit(self.download_img_thread, img)
task_list.append(task)
wait(task_list, return_when=ALL_COMPLETED)
return self.img_list_download
def get_anime_list_by_conditon(self, year, broadcast_season):
if broadcast_season == 1:
seasonStr = '%E6%98%A5'
elif broadcast_season == 2:
seasonStr ='%E5%A4%8F'
elif broadcast_season == 3:
seasonStr = '%E7%A7%8B'
elif broadcast_season == 4:
seasonStr = '%E5%86%AC'
else:
self.logger.warning("[SPIDER] get_anime_list_by_conditon failed, year: {}, broadcast_season: {}".format(year, broadcast_season))
return
url = "{}/Home/BangumiCoverFlowByDayOfWeek?year={}&seasonStr={}".format(self.url, year, seasonStr)
html_doc = self.request_html(url)
if html_doc == None:
self.logger.warning("[SPIDER] get_anime_list failed, request_html failed, url: {}".format(self.url))
return
anime_list = []
for info in html_doc.xpath('//div[@class="sk-bangumi"]'):
update_day_ = info.xpath('.//@data-dayofweek')
anime_info = info.xpath('.//li')
for a in anime_info:
anime_name_ = a.xpath('.//@title')[0]
mikan_id_ = a.xpath('.//@data-bangumiid')[0]
img_url_ = a.xpath('.//@data-src')
anime_name = self.lxml_result_to_str(anime_name_)
mikan_id = int(self.lxml_result_to_str(mikan_id_))
img_url = self.lxml_result_to_str(img_url_)
update_day = int(self.lxml_result_to_str(update_day_))
if update_day == 7: # movie
anime_type = 1
update_day = 8
elif update_day == 8: # ova
anime_type = 2
update_day = 8
elif update_day == 0: # update on sunday
anime_type = 0
update_day = 7
else:
anime_type = 0
subscribe_status = 0
anime = Anime(anime_name, mikan_id, img_url, update_day, anime_type, subscribe_status)
anime_list.append(anime)
self.logger.info("[SPIDER] get_anime_list success, anime number: {}".format(len(anime_list)))
return anime_list | FortyWinters/autoAnime | src/lib/spider.py | spider.py | py | 12,664 | python | en | code | 1 | github-code | 36 |
34609634958 | """
279. Perfect Squares
Given an integer n, return the least number of perfect square numbers that sum
to n.
"""
def num_squares_naive(n, squares):
"""
Naive recursive solution.
Paramters
---------
n : The input integer.
squares : List of square numbers <= n.
"""
if n == 0:
return 0
min_squares = float('inf')
for s in squares:
if s <= n:
n_squares = num_squares_naive(n - s, squares) + 1
min_squares = min(min_squares, n_squares)
return min_squares
def num_squares_rec(n, squares, dp):
"""Memoized recursive solution."""
if n == 0:
dp[0] = 0
return 0
min_squares = float('inf')
for s in squares:
if s <= n:
if dp[n - s] != -1:
n_squares = dp[n - s] + 1
else:
n_squares = num_squares_rec(n - s, squares, dp) + 1
min_squares = min(min_squares, n_squares)
dp[n] = min_squares
return dp[n]
def num_squares(n):
"""
Bottom-up dynamic programming solution.
"""
dp = [0] + [float('inf')] * n
for i in range(1, n + 1):
dp[i] = min(dp[i - j ** 2] for j in range(1, int(i ** 0.5) + 1)) + 1
return dp[n]
| wuihee/data-structures-and-algorithms | programming-paradigm/dynamic_programming/min_max_path/perfect_squares.py | perfect_squares.py | py | 1,258 | python | en | code | 0 | github-code | 36 |
37002642889 | import re
def strB2Q(ustring):
rstring = ''
for uchar in ustring:
inside_code = ord(uchar)
if inside_code == 0x3000:
inside_code = 0x0020
else:
inside_code -= 0xfee0
if not (0x0021 <= inside_code and inside_code <= 0x7e):
rstring += uchar
continue
rstring += chr(inside_code)
return rstring
def mathStyleText(latex):
transmap = {
'min': '\\textrm{min}',
'cm': '\\textrm{cm}',
'm/s': '\\textrm{m/s}',
'Rt': '\\textrm{Rt}',
}
for k, v in transmap.items():
latex = latex.replace(k, v)
return latex
def fullstopBySci(ustring):
return ustring.replace('。', '.')
def clearCR(ustring):
return ustring.replace('\n', '')
def repairPunctMark(ustring):
# 引号修复:修复 “ " 型的右侧引号问题
ustring = re.sub(
r"\“([^”]{1,20})\"",
lambda m: '“' + m.group(1) + '”',
ustring)
return ustring
def buildCR(ustring):
# 构造换行:当有小题序列号时,换行
ustring = re.sub(
r"\([1-6]\)",
lambda m: '\n' + m.group(0) + ' ',
ustring)
# 构造换行:当有小题序列号时,换行
ustring = re.sub(
r"[A-D]\.",
lambda m: '\n' + m.group(0) + ' ',
ustring)
return ustring
def buildMath(ustring):
def _build(matched):
result = ''
# if matched.group(0).strip() in ('.', '(', ')', '()', 'A.', 'B.', 'C.', 'D.') or not re.match('[a-zA-Z0-9]',matched.group(0)):
if matched.group(0).strip() in ('.', '(', ')', '()', ':' 'A.', 'B.', 'C.', 'D.'):
result = matched.group(0)
else:
result = ' $' + mathStyleText(matched.group(0)).strip() + '$ '
return result
ustring = re.sub(
r"[a-zA-Z\ \\\u0028-\u002b\u002d-\u003e]{1,50}", _build, ustring)
return ustring
def mathChar2TeX(ustring):
transmap = {
'△': '\\triangle',
'∠': '\\angle',
'°': '\\degree',
'≌': '\\cong',
'⊥': '\\perp'
}
for k, v in transmap.items():
ustring = ustring.replace(k, v + ' ')
return ustring
def mathChar2Unicode(ustring):
transmap = {
'° C': '℃',
'°C': '℃',
}
for k, v in transmap.items():
ustring = ustring.replace(k, v)
return ustring
def repairPunctSpace(ustring):
# 修复逗号、顿号为全角字符并将多余的空格删去,所以应当最后处理
transmap = {
' , ': ',',
', ': ',',
' ,': ',',
',': ',',
' 、 ': '、',
' 、': '、',
'、 ': '、',
}
for k, v in transmap.items():
ustring = ustring.replace(k, v)
return ustring
def repairErrorChinese(ustring):
return ustring.replace('- -', '一')
if __name__ == '__main__':
# a = strB2Q("你好pythonabdalduizxcvbnm")
text = """
[2021江苏无锡期中,偏难]两套完全相同(如图甲所示)
的加热装置,两套装置的试管中分别装有少量的相等体
积的M固体和N固体,它们的温度随加热时间变化的曲
线如图乙所示,在35 min内M物质从固体熔化成了液体,
N物质始终是固体,则下列说法正确的是
"""
print(repairPunctSpace( buildMath(buildCR(repairPunctMark(repairErrorChinese( mathChar2TeX( mathChar2Unicode(strB2Q(clearCR(text)) ) )))) )))
| zhangpeng96/Smart-String-Toolbox | ocr-optimize/simple_math.py | simple_math.py | py | 3,472 | python | en | code | 0 | github-code | 36 |
10900868041 | # Текстовая переменная
res = "Это число "
# Вводится текст
txt = input("Введите название числа от 1 до 4: ")
# Преобразование текста внутри в нижний регистр
txt = txt.lower()
# Идентификация числа
if txt == "один" or txt == "единица":
res += "1"
elif txt == "два" or txt == "двойка":
res += "2"
elif txt == "три" or txt == "тройка":
res += "3"
else:
res += "не идентифицировано"
# Результат идентификации
print(res)
| SetGecko/PonPbyEandT | Chapter_2/Listing02_10.py | Listing02_10.py | py | 623 | python | ru | code | 0 | github-code | 36 |
73122366504 | from django.db import models
from django.contrib.auth.models import User
from ckeditor_uploader.fields import RichTextUploadingField
# Create your models here.
class System(models.Model):
name = models.CharField(max_length=20, verbose_name='System')
owner = models.ForeignKey(User, verbose_name='Owner', on_delete=models.CASCADE)
created_time = models.DateTimeField(auto_now_add=True, verbose_name='Created Time')
def __str__(self):
return self.name
class Meta:
verbose_name = verbose_name_plural = 'System'
class CodeImg(models.Model):
code_name = models.CharField(max_length=200, verbose_name='Code')
code_img = models.ImageField(verbose_name='Code Image')
def photo_url(self):
if self.code_img and hasattr(self.code_img, 'url'):
return self.code_img.url
else:
return '/default/1.jpg'
def __str__(self):
return self.code_name
class Meta:
verbose_name = verbose_name_plural = 'Code IMG'
class DriveType(models.Model):
DRIVE_6X4 = 1
DRIVE_6X2 = 2
DRIVE_4X2 = 3
DRIVE_TYPE = (
(DRIVE_6X4, '6X4'),
(DRIVE_6X2, '6X2'),
(DRIVE_4X2, '4X2'),
)
name = models.PositiveIntegerField(choices=DRIVE_TYPE, verbose_name='Drive Type')
owner = models.ForeignKey(User, verbose_name='Owner', on_delete=models.CASCADE, default=2)
created_time = models.DateTimeField(auto_now_add=True, verbose_name='Created Time')
def __str__(self):
return self.get_name_display()
class Meta:
verbose_name = verbose_name_plural = 'Drive Type'
class Packages(models.Model):
COMFORT_CLASSIC = 10
COMFORT_TOP = 11
COMFORT_TOP_PLUS = 12
DRIVER_OPERATION_CLASSIC = 20
DRIVER_OPERATION_TOP = 21
EFFICIENCY_CLASSIC = 30
EXPRESS_CLASSIC = 40
EXPRESS_TOP = 41
SAFETY_TOP = 1
MOUNTAIN = 2
LIGHT_WEIGHT = 3
ROAD_STAR = 4
SUMMER_PACKAGE = 5
WINTER_PACKAGE = 6
PACKAGES_TYPE = (
(COMFORT_CLASSIC, 'Comfort Classic'),
(COMFORT_TOP, 'Comfort Top (only icw Comfort Classic)'),
(COMFORT_TOP_PLUS, 'Comfort Top Plus (only icw Comfort Top)'),
(DRIVER_OPERATION_CLASSIC, 'Driver Operation Classic'),
(DRIVER_OPERATION_TOP, 'Driver Operation Top (only icw Operation Classic)'),
(EFFICIENCY_CLASSIC, 'Efficiency Classic'),
(EXPRESS_CLASSIC, 'Express Classic'),
(EXPRESS_TOP, 'Express Top (only icw Express Classic)'),
(SAFETY_TOP, 'Safety Top (only icw Express Top)'),
(MOUNTAIN, 'Mountain'),
(LIGHT_WEIGHT, 'Light Weight'),
(ROAD_STAR, 'Road Star'),
(SUMMER_PACKAGE, 'Summer Package (only icw Comfort Top)'),
(WINTER_PACKAGE, 'Winter Package (only icw Comfort Classic)'),
)
name = models.PositiveIntegerField(choices=PACKAGES_TYPE, verbose_name='Packages')
owner = models.ForeignKey(User, verbose_name='Owner', on_delete=models.CASCADE, default=2)
created_time = models.DateTimeField(auto_now_add=True, verbose_name='Created Time')
def __str__(self):
return self.get_name_display()
class Meta:
verbose_name = verbose_name_plural = 'Packages'
class CodesH6(models.Model):
STATUS_ACTIVE = 1
STATUS_DEACTIVE = 0
STATUS_ITEMS = (
(STATUS_ACTIVE, 'Active'),
(STATUS_DEACTIVE, 'Deleted'),
)
H6A = 1
H6B = 2
H6 = 3
NA = 0
BRAND_ITEMS = (
(H6A, 'H6A'),
(H6B, 'H6B'),
(H6, 'H6'),
(NA, 'NA'),
)
name = models.CharField(max_length=10, verbose_name='Code')
title = models.CharField(max_length=200, verbose_name='Title')
system = models.ManyToManyField(System, verbose_name='System')
comments = models.CharField(max_length=300, verbose_name='Comments', blank=True)
status = models.PositiveIntegerField(choices=STATUS_ITEMS, verbose_name='Status')
restriction_with = models.CharField(max_length=500, verbose_name='With', blank=True)
restriction_not_with = models.CharField(max_length=500, verbose_name='Not With', blank=True)
brand = models.PositiveIntegerField(choices=BRAND_ITEMS, verbose_name='Brand', blank=True)
drive_type = models.ManyToManyField(DriveType, verbose_name='Drive Type', blank=True)
package = models.ManyToManyField(Packages, verbose_name='Package', blank=True)
brief = RichTextUploadingField(blank=True, verbose_name='In Brief')
benefits = RichTextUploadingField(blank=True,verbose_name='Benefits and Arguments')
personal_comments = RichTextUploadingField(blank=True, verbose_name='Personal Comments')
knowledge = RichTextUploadingField(blank=True, verbose_name='Knowledge')
owner = models.ForeignKey(User, verbose_name='Owner', on_delete=models.CASCADE, default=3)
created_time = models.DateTimeField(auto_now_add=True, verbose_name='Created Time')
def __str__(self):
return self.name + '-' + self.title
class Meta:
verbose_name = verbose_name_plural = 'H6 Codes'
"""
命令行赋值一直不行,报错外键约束问题,原来owner default=4,但是数据库里面没有4
c=CodesH6(name='Dummy3',status=1,brand=1,owner_id=3)
c.save()
多对多不能直接add值,要用id,或者对象
c.system.add('Chassis')
"""
| ikofan/sh6 | codes/models.py | models.py | py | 5,282 | python | en | code | 0 | github-code | 36 |
35056056600 | from src.reduction import ReductionMethod
import cantera as ct
def main() -> None:
"""
Edit all the variables in this function to perform the reduction.
Right now, it has DRG and DRGEP. Put all the state files in a folder and pass
the folder path to load the condition.
The automation of the result mechansim and testing will be implemented later.
Returns a set containing all the species deemed important.
"""
# detailed mechanism
detailed_gas = ct.Solution("gri30.xml")
# folder path (use \\ to separate the folders)
file_dir = "folder_path\\"
# Reduction parameters
threshold = 0.05
important_species = [ "OH", "CH4", "O2", "N2", "CO2", "H2O", "CO"]
reduction_method = "DRGEP" # accepts "DRG" or "DRGEP"
# call the reduction
red = ReductionMethod(detailed_gas, det_dir=file_dir)
final_spc_list, rii_list = red.run_reduction(reduction_method, threshold, important_species)
print(f" Final red_mech contains {len(final_spc_list)} species.\n (red/det) = ({len(final_spc_list)}/{detailed_gas.n_species})\n")
for spc in final_spc_list:
print(spc)
if __name__ == "__main__":
main()
| fingeraugusto/red_app | main_app.py | main_app.py | py | 1,201 | python | en | code | 0 | github-code | 36 |
42211653033 | import ipywidgets
from ipywidgets import *
from IPython.display import display, Markdown
all_options=[]
all_answers=[]
all_feedback=[]
options1=['ASIC, SoC, FPGA, MPSoC', 'FPGA, SoC, ASIC, MPSoC', 'SoC, ASIC, FPGA, MPSoC', 'ASIC, MPSoC, FPGA, SoC']
ans1='ASIC, SoC, FPGA, MPSoC'
fb1_a='Correct! We know ASICs are application specific. The other three descriptions can apply to an FPGA, but you can determine the correct answer from the mention of other components and multiple processors.'
fb1_b='Incorrect! Go back to the beginning of this notebook to review info on these four terms.'
fb1=[fb1_a, fb1_b, fb1_b, fb1_b]
all_options.append(options1); all_answers.append(ans1); all_feedback.append(fb1)
options2=['A','B','C']
ans2='B'
fb2_a='Incorrect; take a closer look at the diagram, and review to the gates discussed above.'
fb2_b='Correct! If B is changed to 1, the AND gate receives a 0 from the inverted B value. Because the AND gate is receiving to low inputs, its output is also low, and because A is also low, the OR gate will also be receiving two low inputs, making its output 0. On the flip side, switching either A or C to high, leaving the other two low, will result in D being high as well.'
fb2=[fb2_a, fb2_b, fb2_a]
all_options.append(options2); all_answers.append(ans2); all_feedback.append(fb2)
options3=['Verilog','JHDL','Ruby','VHDL']
ans3='Ruby'
fb3_a='Incorrect; look at the examples given in the notebook. Don\'t be afraid to look up a language that looks unfamiliar to you.'
fb3_b='Correct! Ruby is a high-level programming language that isn\'t used in designing hardware.'
fb3=[fb3_a, fb3_a, fb3_b, fb3_a]
all_options.append(options3); all_answers.append(ans3); all_feedback.append(fb3)
options4=['The size of the FPGA','The size of a feature on an FPGA','The maximum routing distance between IP','The physical size of a processor on an SoC']
ans4='The size of a feature on an FPGA'
fb4=['Incorrect; remember that an FPGA is a silicon component.',
'Correct! An FPGA \'feature\' refers to the elements on an FPGA, like a transistor, and smaller features means more can be fit in the same space, which is why you hear the number growing smaller as newer devices are developed. A higher number of features can imply (though not always) higher performance and power.',
'Incorrect; routing is not often measured and monitored in this way.',
'Incorrect; not all FPGA devices are SoCs.']
all_options.append(options4); all_answers.append(ans4); all_feedback.append(fb4)
options5=['A .tcl script','An HDL file', 'An IP', 'A bitstream']
ans5='A bitstream'
fb5_1='Incorrect; a tcl script is used to rebuild your design, as it includes commands for Vivado to use.'
fb5_2='Incorrect; HDL is used when developing the hardware, but is not loaded into the device.'
fb5_3='Incorrect; IP are building blocks in your hardware design.'
fb5_4='Correct! A bitstream is created based on your design, which is what is loaded onto the device in order for it to function as the designer intends. '
fb5=[fb5_1, fb5_2, fb5_3, fb5_4]
all_options.append(options5); all_answers.append(ans5); all_feedback.append(fb5)
def populate_questions():
questions=[]
for i in range(len(all_options)):
questions.append(show_buttons(all_options[i], all_answers[i], all_feedback[i]))
return questions
def show_buttons(options, answer, feedback):
radios=RadioButtons(
description=' ',
options=options,
disabled=False,
layout={'width': 'max-content'}
)
interactive_q=interactive(mc_interact,mc_val=radios, options=fixed(options), feedback=fixed(feedback))
return interactive_q
# interactive function, changing value: mc_val
def mc_interact(mc_val, options, feedback):
fb_text=feedback[options.index(mc_val)]
display(Markdown(fb_text)) | philipwu62/xilinx_XUP_notebooks | lib/fpga_widg.py | fpga_widg.py | py | 3,813 | python | en | code | 1 | github-code | 36 |
5514632684 | # -*- coding: UTF-8 -*-
from Courier import Courier,Order,debugFlag,OrdersPerSecond
import queue,sys,statistics
import time,json
import threading
def GetNextOrder(prepareTime):
courier = Courier();
o=Order(courier,prepareTime);
return o
if __name__ == '__main__':
try:
with open('dispatch_orders.json',encoding='utf-8') as f_in: #sample
data = json.load(f_in)
for seq,order in enumerate(data):
prepareTime = int (order['prepTime'])
print('Order %d new with prepareTime: %d'%(seq+1,prepareTime))
GetNextOrder(prepareTime)
time.sleep(1/int(OrdersPerSecond))# 2 order per second by default
while True:
if not ( all(x.canEate==1 for x in Order.orders) and all(x.Arrived == 1 for x in Courier.couriers) ):
print('qsize down: ',len([x.canEate for x in Order.orders if x.canEate==0])) if debugFlag == '1' else None
else:
print('Order Average Waittime(seconds): %.3f ,total %d orders' % (statistics.mean([x.waitTime.total_seconds() for x in Order.orders]),len(Order.orders)))
print('Courier Average Waittime(seconds): %.3f ,total %d courier' % (statistics.mean([x.waitTime.total_seconds() for x in Courier.couriers]),len(Courier.couriers)))
print()
break
time.sleep(3)
except KeyboardInterrupt:
print ("interruptted by Ctrl-c")
sys.exit(1)
| slideclick/2021ccs | execu/main.py | main.py | py | 1,525 | python | en | code | 0 | github-code | 36 |
7755902959 | import logging
from typing import Any, Callable, Coroutine, Dict, List, Optional, Union
import attr
from geojson_pydantic.geometries import (
GeometryCollection,
LineString,
MultiLineString,
MultiPoint,
MultiPolygon,
Point,
Polygon,
)
from pydantic import validator
from pydantic.types import conint
from pystac.utils import str_to_datetime
from stac_fastapi.api.models import BaseSearchGetRequest, ItemCollectionUri
from stac_fastapi.pgstac.types.base_item_cache import BaseItemCache
from stac_fastapi.pgstac.types.search import PgstacSearch
from starlette.requests import Request
from pccommon.redis import cached_result
from pcstac.contants import CACHE_KEY_BASE_ITEM
DEFAULT_LIMIT = 250
logger = logging.getLogger(__name__)
class PCSearch(PgstacSearch):
# Increase the default limit for performance
# Ignore "Illegal type annotation: call expression not allowed"
limit: Optional[conint(ge=1, le=1000)] = DEFAULT_LIMIT # type:ignore
# Can be removed when
# https://github.com/stac-utils/stac-fastapi/issues/187 is closed
intersects: Optional[
Union[
Point,
MultiPoint,
LineString,
MultiLineString,
Polygon,
MultiPolygon,
GeometryCollection,
]
]
@validator("datetime")
def validate_datetime(cls, v: str) -> str:
"""Validate datetime.
Custom to allow for users to supply dates only.
"""
if "/" in v:
values = v.split("/")
else:
# Single date is interpreted as end date
values = ["..", v]
dates: List[str] = []
for value in values:
if value == "..":
dates.append(value)
continue
str_to_datetime(value)
dates.append(value)
if ".." not in dates:
if str_to_datetime(dates[0]) > str_to_datetime(dates[1]):
raise ValueError(
"Invalid datetime range, must match format (begin_date, end_date)"
)
return v
class RedisBaseItemCache(BaseItemCache):
"""
Return the base item for the collection and cache by collection id.
First check if the instance has a local cache of the base item, then
try redis, and finally fetch from the database.
"""
def __init__(
self,
fetch_base_item: Callable[[str], Coroutine[Any, Any, Dict[str, Any]]],
request: Request,
):
self._base_items: dict = {}
super().__init__(fetch_base_item, request)
async def get(self, collection_id: str) -> Dict[str, Any]:
async def _fetch() -> Dict[str, Any]:
return await self._fetch_base_item(collection_id)
if collection_id not in self._base_items:
cache_key = f"{CACHE_KEY_BASE_ITEM}:{collection_id}"
self._base_items[collection_id] = await cached_result(
_fetch, cache_key, self._request
)
return self._base_items[collection_id]
@attr.s
class PCItemCollectionUri(ItemCollectionUri):
limit: Optional[int] = attr.ib(default=DEFAULT_LIMIT) # type:ignore
@attr.s
class PCSearchGetRequest(BaseSearchGetRequest):
limit: Optional[int] = attr.ib(default=DEFAULT_LIMIT) # type:ignore
| microsoft/planetary-computer-apis | pcstac/pcstac/search.py | search.py | py | 3,330 | python | en | code | 88 | github-code | 36 |
7078442662 | def fiware_arguments(func):
def wrapper(*args, **kwargs):
parser = func(*args, **kwargs)
parser.add_argument(
'--fiwareservice',
help='tenant/service to use when connecting Orion Context Brocker')
parser.add_argument(
'--fiwareservicepath',
help='scope/path to use when connecting Orion Context Brocker')
return parser
return wrapper
| OkinawaOpenLaboratory/fiware-meteoroid-cli | meteoroid_cli/meteoroid/v1/libs/decorator.py | decorator.py | py | 422 | python | en | code | 5 | github-code | 36 |
71104421224 | #!/usr/bin/env python
# -*- coding=UTF-8 -*-
# Created at May 26 10:07 by BlahGeek@Gmail.com
import sys
if hasattr(sys, 'setdefaultencoding'):
sys.setdefaultencoding('UTF-8')
import os
import httplib2
import requests
from BeautifulSoup import BeautifulSoup
from .settings import COOKIR_PATH
BASE_URL = 'http://3g.renren.com/status/newstatus.do'
class RenRen:
def __init__(self):
self.session = requests.Session()
cookie = open(COOKIR_PATH).read()
cookie = [x.strip() for x in cookie.split(';') if x]
cookie = map(lambda x: x.split('=', 1), cookie)
cookie = dict(cookie)
self.session.cookies = requests.utils.cookiejar_from_dict(cookie)
def postStatus(self, text):
soup = BeautifulSoup(self.session.get(BASE_URL).content)
form = soup.find('form')
assert(form is not None)
values = map(lambda x: (x['name'], x['value']), form.findAll('input', type='hidden'))
data = {'status': text}
data.update(dict(values))
req = self.session.post(form['action'], data)
# save cookie
with open(COOKIR_PATH, 'w') as f:
cookie = requests.utils.dict_from_cookiejar(self.session.cookies)
cookie = '; '.join([k+'='+v for k, v in cookie.iteritems()])
f.write(cookie)
| blahgeek/treehole | treehole/renren.py | renren.py | py | 1,318 | python | en | code | 30 | github-code | 36 |
28128335578 | import os
import v2_draw_dynamic as main_app
import logging
import sys
def cmd(cmdstr):
print(cmdstr)
os.system(cmdstr)
def main():
'''
if len(sys.argv) < 2:
logging.error("please input msg log file")
return
'''
while True:
try:
main_app.draw_dynamic(r"D:\Programs\bin3(EC2)_R\log\010011112222\msg.log")
except Exception as e:
print("catch exception",e)
logging.error(str(e))
main() | HZRelaper2020/show_log | v2_draw_dynamic_script.py | v2_draw_dynamic_script.py | py | 497 | python | en | code | 0 | github-code | 36 |
27479098786 |
import os
import pika # Importa a biblioteca pika para interagir com o RabbitMQ
import time # Importa a biblioteca time para controlar o tempo de sleep do loop
import socket # Importa socket para verificar a conectividade com a internet
import json # Importa json para manipular dados JSON
import random # Importa random para gerar números aleatórios
import math # Importa math para realizar operações matemáticas
import xml.etree.ElementTree as ET # Importa ElementTree para manipular dados XML
import glob # Importa glob para encontrar todos os caminhos que correspondem a um padrão específico
from datetime import datetime, timedelta # Importa datetime e timedelta para trabalhar com datas e horas
from dotenv import load_dotenv # Importa load_dotenv para carregar variáveis de ambiente do arquivo .env
# Carregar variáveis do .env
load_dotenv()
# Obtém as variáveis de ambiente para a URL do RabbitMQ e a chave de roteamento
RABBITMQ_URL = os.getenv("RABBITMQ_URL")
ROUTING_KEY = os.getenv("ROUTING_KEY")
class SensorSimulator:
def __init__(self, sensor_data):
"""
Inicializa o simulador com os dados dos sensores fornecidos.
:param sensor_data: dict, dados dos sensores a serem utilizados na simulação.
"""
self.sensor_data = sensor_data
def generate_value(self, min_val, max_val, mean_val, fluctuation=5):
"""
Gera um valor flutuante aleatório entre um intervalo especificado.
:param min_val: float, valor mínimo possível.
:param max_val: float, valor máximo possível.
:param mean_val: float, valor médio desejado.
:param fluctuation: float, flutuação permitida em torno do valor médio.
:return: float, valor aleatório gerado.
"""
lower_bound = max(min_val, mean_val - fluctuation)
upper_bound = min(max_val, mean_val + fluctuation)
return random.uniform(lower_bound, upper_bound)
def simulate_sensor_failure(self, prob_failure=0.01):
"""
Simula uma falha no sensor com uma probabilidade especificada.
:param prob_failure: float, probabilidade de falha do sensor.
:return: bool, True se falhar, False caso contrário.
"""
return random.random() < prob_failure
def log_data_to_xml(self, batch_data):
"""
Loga os dados do sensor em um arquivo XML.
:param batch_data: list, dados do sensor a serem logados.
"""
date_str = datetime.now().strftime("%Y%m%d")
log_filename = f"sensor_data_log_{date_str}.xml"
# Verifica se o arquivo de log já existe, se sim, carrega os dados existentes
if os.path.exists(log_filename):
tree = ET.parse(log_filename)
root = tree.getroot()
else:
root = ET.Element("SensorDataBatch")
# Adiciona novos dados ao XML
for data in batch_data:
sensor_data = ET.SubElement(root, "SensorData")
for key, value in data.items():
ET.SubElement(sensor_data, key).text = str(value)
# Salva os dados no arquivo XML
tree = ET.ElementTree(root)
with open(log_filename, "wb") as file:
tree.write(file)
# Limpa logs antigos
self.clean_old_logs(log_file_prefix="sensor_data_log_", max_logs=7)
def clean_old_logs(self, log_file_prefix, max_logs):
"""
Limpa logs antigos, mantendo apenas um número específico de arquivos de log.
:param log_file_prefix: str, prefixo dos arquivos de log.
:param max_logs: int, número máximo de arquivos de log a serem mantidos.
"""
log_files = sorted(glob.glob(f"{log_file_prefix}*.xml"))
for log_file in log_files[:-max_logs]:
os.remove(log_file)
def send_to_rabbitmq(self, message):
"""
Envia uma mensagem para uma fila RabbitMQ.
:param message: str, mensagem a ser enviada.
"""
# Estabelece conexão com o RabbitMQ e declara a fila
connection = pika.BlockingConnection(pika.URLParameters(RABBITMQ_URL))
channel = connection.channel()
channel.queue_declare(queue=ROUTING_KEY, durable=True)
# Publica a mensagem na fila
channel.basic_publish(exchange='',
routing_key=ROUTING_KEY,
body=message,
properties=pika.BasicProperties(
delivery_mode=2,
))
print(f" [x] Enviado '{message}'")
connection.close()
def simulate(self):
"""
Inicia a simulação, gerando dados de sensores, logando-os e enviando-os para a fila RabbitMQ.
"""
# Aguarda conexão com a internet
while not self.is_connected():
print("Aguardando conexão com a internet...")
time.sleep(5)
specific_sensors = list(self.sensor_data.keys())
# Loop de simulação
while True:
batch_data = []
start_timestamp = datetime.now()
# Gera dados para cada sensor especificado
for machine_id, sensor_id in specific_sensors:
faixa_min, faixa_max, valor_medio = self.sensor_data[(machine_id, sensor_id)]
# Adiciona uma pequena variação ao valor médio
valor_medio += 1
valor_medio += 5 * math.sin(start_timestamp.minute / 5)
# Simula falha no sensor ou gera valor
if self.simulate_sensor_failure():
value = None
else:
value = self.generate_value(faixa_min, faixa_max, valor_medio)
# Cria timestamp e dados do sensor
timestamp = start_timestamp + timedelta(minutes=random.randint(0, 5))
str_timestamp = timestamp.strftime('%Y-%m-%d %H:%M:%S')
data = {
"timestamp": str_timestamp,
"CompanyId": "EMPRESA01",
"MachineId": machine_id,
"SensorId": sensor_id,
"Value": value
}
batch_data.append(data)
# Envia dados para RabbitMQ e loga em XML
self.send_to_rabbitmq(json.dumps(batch_data))
self.log_data_to_xml(batch_data)
# Controla o tempo de sleep do loop dependendo do horário
current_hour = datetime.now().hour
if 9 <= current_hour <= 17:
time.sleep(180)
else:
time.sleep(300)
@staticmethod
def is_connected():
"""
Verifica se há conexão com a internet.
:return: bool, True se conectado, False caso contrário.
"""
try:
socket.create_connection(("www.google.com", 80))
return True
except OSError:
pass
return False
# Dados dos sensores
sensor_data = {
("M01", "S01"): (70, 100, 80),
("M01", "S02"): (500, 900, 700),
("M02", "S03"): (100, 140, 120),
("M03", "S04"): (500, 900, 700),
("M04", "S05"): (160, 210, 170),
("M05", "S06"): (70, 100, 80),
("M05", "S07"): (100, 140, 130),
("M06", "S08"): (7000, 12000, 10800),
("M06", "S09"): (100, 140, 130),
("M07", "S10"): (70, 100, 80),
("M07", "S11"): (7000, 12000, 10800),
("M07", "S16"): (100, 400, 201),
("M08", "S12"): (70, 100, 80),
("M08", "S13"): (1000, 3000, 2000),
("M09", "S14"): (1500, 1900, 1765),
("M10", "S15"): (1500, 1900, 1765)
}
# Inicia a simulação
simulator = SensorSimulator(sensor_data)
simulator.simulate()
| elderofz1on/ZionArchive | Projetos/MachineSimulatorMQTT/sensor_simulator.py | sensor_simulator.py | py | 7,987 | python | pt | code | 0 | github-code | 36 |
10666273143 | # ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
# 왜 틀리지?
# 삐빅 시간초과.. -> 아 input시간 차이 은근 크다!
import sys
input=sys.stdin.readline
N=int(input())
S={}
for _ in range(N):
a=int(input())
if a not in S:
S[a]=1
else:
S[a]+=1
# 여기서 리스트로 다시 저장하면 시간초과임
# for i in S:
# if max(S.values())==S[i]:
# M.append(int(i))
# M.sort()
# print(M[0])
# items를 써서 2차원 배열 형태로 만든 것....?
S=sorted(S.items(), key=lambda x:(-x[1], x[0]))
print(S[0][0])
# -
| chahyeonnaa/algorithm | 정렬/11652 카드.py | 11652 카드.py | py | 846 | python | ko | code | 0 | github-code | 36 |
21135250267 | # Databricks notebook source
# MAGIC %md
# MAGIC ## 2D Linear Regression
# MAGIC
# MAGIC #### Description
# MAGIC
# MAGIC This notebook is designed to provide a very basic insight into linear regression and how to utilise sklearn to perform it on datsets.
# MAGIC
# MAGIC In this notebook linear regression is performed on a dataset with 2 numeric variables, its aim is to explain the basic principles of linear regression before moving onto the second notebook which demonstrates linear regression on a multi-variable problem.
# MAGIC
# MAGIC Linear regression uses algebra to define the linear relationship between two or more variables. In 2-dimensional space, this linear relationship can be seen as the 'line-of-best-fit', a straight line that best represents the relationship between the 2 variables. This relationship holds as we add more variables though the line exists in higher dimensions and is hard to visualise through standard means.
# MAGIC
# MAGIC This linear relationship can then be used as a method for helping predicitions.
# MAGIC
# MAGIC #### SKLearn performance in databricks
# MAGIC
# MAGIC While SKLearn can be useful in certain situations, it is not designed to take advantage of cluster computing resources, which arguably is a major downside to using it inside databricks as you are not utilising the full proccessing power available to you.
# MAGIC
# MAGIC This is not us saying do not use sklearn as it may well be appropriate for certain tasks, however if your are performing tasks over large datasets and want to fully exploit the compute resources you have available to complete these tasks. Then you should look into the Spark `MLlib` library.
# COMMAND ----------
# MAGIC %md
# MAGIC #### Retrieve the data
# MAGIC
# MAGIC In this example the toy datasets have already created and added to the collab database to mimic an actual workflow, we will use a general function to get the database name however this can be can be replaced with a string.
# MAGIC
# MAGIC The utility functions are imported via the next command which runs the notebook stored in a different location. You can view these functions by navigating to the folder or you can also click the link in the next command. This can also be a useful way to tidy up your code and store frequently used functions in their own notebook to be imported into others.
# COMMAND ----------
# DBTITLE 1,Import python utility functions
# MAGIC %run ../../Wrangler-Utilities/Utilities-Python
# COMMAND ----------
import matplotlib.pyplot as plt
import pandas as pd
# get the table name
table_name = f"{get_collab_db()}.toy_2d_linear_regression"
# retrieve the dataframe
spark_df = spark.table(table_name)
# show the spark dataframe
display( spark_df )
# COMMAND ----------
# MAGIC %md
# MAGIC #### Understanding the Data
# MAGIC
# MAGIC As a first step before we move on to using creating potentially complex models, it may be useful to get some quick insights into the dataset. This way when moving forward we have a general appreciation of the contents of the dataset.
# MAGIC
# MAGIC There is many ways to do this, here we will show the inbuilt describe method and also how to create a simple plot of the data.
# MAGIC
# MAGIC *note: because this data is 2d in nature, plots are quite straightforward, more complex visualisation methods are needs for multivariable data*
# COMMAND ----------
# using .describe() gives us insight into some basic metrics of a dataframe
# we can also pass in column names e.g. .describe(['feature']), to isolate columns
display(spark_df.describe())
# COMMAND ----------
# to plot the data we must first convert it to a NumPy array or a Pandas dataframe
# Convert from spark dataframe to pandas dataframe
pandas_df = spark_df.toPandas()
# extract the feature and target columns
X = pandas_df['feature']
y = pandas_df['target']
# plot the data
plt.figure(figsize=(10, 5))
plt.scatter(X, y, marker='o')
plt.title("Plot of the Random Regression Dataset", fontsize="large")
plt.xlabel("Feature")
plt.ylabel("Target")
plt.show()
# COMMAND ----------
# MAGIC %md
# MAGIC #### Utilising SKLearn Linear Regression
# MAGIC In the above plot of the data we can see that there is a clear pattern in the data. But now suppose we want to model the exact linear relationship of this dataset.
# MAGIC
# MAGIC This is where we can utilise the sklearn LinearRegression function to aid us. To utilise the sklearn methods we must have a pandas dataframe not a spark dataframe.
# COMMAND ----------
# convert spark dataframe to pandas dataframe
pandas_df = spark_df.toPandas()
# extract the 2 features we want into seperate variables
X = pandas_df['feature']
y = pandas_df['target']
# split the data into training and test sets
# note the random_state variable is used so split is same every time, in practice ignore
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1)
# models require 2d Arrays so reformat
X_train = X_train.values.reshape(-1,1)
X_test = X_test.values.reshape(-1,1)
y_train = y_train.values.reshape(-1,1)
y_test = y_test.values.reshape(-1,1)
# fit a linear regression model ot the training data
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X_train, y_train) # training the algorithm
# generate our predicitions from the test data
y_pred = regressor.predict( X_test )
# COMMAND ----------
# MAGIC %md
# MAGIC #### Visualising our Predictions
# MAGIC
# MAGIC There is many different ways we can now visualise our predictions:
# MAGIC - We can plot a figure of the scattered test plots and our predicted line
# MAGIC - We can display a table showing the actual test values vs our predicted values
# MAGIC - We can then plot a figure of this table to visualise it
# MAGIC
# MAGIC These are just few examples of course there is many more ways to gain insight.
# COMMAND ----------
# we can extract the exact intercetp and coefficient of the slope
print("Intercept : ", regressor.intercept_)
print("Coefficient : ", regressor.coef_)
# plot the figure
plt.figure(figsize=(10, 5))
plt.scatter(X_test, y_test, color='gray')
plt.plot(X_test, y_pred, color='red', linewidth=2)
plt.title("Prediction vs Test data", fontsize="large")
plt.xlabel("Feature")
plt.ylabel("Target")
plt.show()
# COMMAND ----------
# create table view of actual values vs predictions
df = pd.DataFrame({'Actual': y_test.flatten(), 'Predicted': y_pred.flatten()})
display(df)
# COMMAND ----------
# visualise above table as a bar chart, note were only visualising the first 20
df1 = df.head(20)
df1.plot(kind='bar', figsize=(10,5))
plt.grid(which='major', linestyle='-', linewidth='0.5', color='gray')
plt.show()
# COMMAND ----------
# MAGIC %md
# MAGIC ### Evaluating our Model
# MAGIC
# MAGIC The final step is to evaluate the performance of our model, this is important to compare how well different algorithms perform on a particular dataset.
# MAGIC
# MAGIC For regression, three evaluation metrics are commonly used:
# MAGIC - **Mean Absolute Error** is the mean of the absolute value of the errors
# MAGIC $$ MAE = \frac{1}{n} \sum^{n}_{j=1}|y_i - y_j| $$
# MAGIC - **Mean Squared Error** is the mean of th esquared errors
# MAGIC $$ MSE = \frac{1}{N} \sum^{n}_{j=1}(y_i - y_j)^2 $$
# MAGIC - **Root Mean Squared Error** is the square root of the mean squared errors
# MAGIC $$ MSE = \sqrt{\frac{1}{N} \sum^{n}_{j=1}(y_i - y_j)^2} $$
# COMMAND ----------
from sklearn.metrics import mean_absolute_error, mean_squared_error
import numpy as np
print('Mean Absolute Error :', mean_absolute_error(y_test, y_pred))
print('Mean Squared Error :', mean_squared_error(y_test, y_pred))
print('Root Mean Squared Error :', np.sqrt(mean_squared_error(y_test, y_pred)))
| NHSDigital/sde_example_analysis | python/machine_learning_small_data/regression_simple.py | regression_simple.py | py | 7,838 | python | en | code | 1 | github-code | 36 |
25196942976 | import numpy as np
import argparse
### Arguments ###
parser = argparse.ArgumentParser()
parser.add_argument('input', type=str, help='Input text file')
parser.add_argument('fmap', type=str, help='Map with value and residue')
parser.add_argument('chain', type=str, help='Chain identifier to match residues')
parser.add_argument("--xvg", default=False, action=argparse.BooleanOptionalAction)
args = parser.parse_args()
def splitm(line):
'''
Correctly split pdb file
'''
return([line[0:6].strip(),line[6:11].strip(),line[12:16].strip(),line[16:17].strip(),line[17:20].strip(),
line[21:22].strip(),line[22:26].strip(),line[26:27].strip(),line[30:38].strip(),line[38:46].strip(),
line[46:54].strip(),line[54:60].strip(),line[60:66].strip(),line[76:78].strip(),line[78:80].strip()])
def create_dict(fname):
map = {}
with open(fname,'r') as f:
for line in f:
data = line.split()
if args.xvg:
if data[0] != '#' or data[0] != '@':
map[data[0]] = float(data[1])*100
else:
map[data[0][3:]] = float(data[1])*100
return map
def main(inp, fmap, chain):
print('Suggested spectrum')
print('spectrum b, 0xF4F3F3 0xD28288 0xF6A377 0xFBDF66 0xCFF66A 0x77FB74')
map = create_dict(fmap)
keys = map.keys()
pdb_format = "{:6s}{:5d} {:^4s}{:1s}{:3s} {:1s}{:4d}{:1s} {:8.3f}{:8.3f}{:8.3f}{:6.2f}{:6.2f} {:>2s}{:2s}\n"
out = inp.split('.')[0]+'_painted.pdb'
with open(inp,'r') as f:
with open(out, 'w') as fw:
for line in f:
data = splitm(line)
if data[0] == 'ATOM':
data[1] = int(data[1])
data[6] = int(data[6])
data[8] = float(data[8])
data[9] = float(data[9])
data[10] = float(data[10])
data[11] = float(data[11])
if data[5] == chain:
if str(data[6]) in keys:
data[12] = map[str(data[6])]
fw.write(pdb_format.format(*data))
else:
if len(data[12]) == 0:
data[12] = 0.0
fw.write(pdb_format.format(*data))
else:
fw.write(line)
else:
if len(data[12]) == 0:
data[12] = 0.0
fw.write(pdb_format.format(*data))
else:
fw.write(line)
else:
fw.write(line)
if __name__ == '__main__':
main(args.input, args.fmap, args.chain) | JAMelendezD/Contacts | paint_pdb.py | paint_pdb.py | py | 2,209 | python | en | code | 1 | github-code | 36 |
28515474927 | import urllib2, optparse
from opus_core.logger import logger
class LoadXSD(object):
'''
classdocs
'''
def __init__(self, source, destination):
'''
Constructor
'''
self.xsd_source = source
self.xsd_destination = destination
def load_and_store(self):
logger.log_status('Loading xsd file from: %s' %self.xsd_source)
response = urllib2.urlopen( self.xsd_source )
xsd = response.read()
logger.log_status('Store xsd file to: %s' %self.xsd_destination)
outfile = open( self.xsd_destination, 'w')
outfile.write( xsd )
outfile.close()
if __name__ == "__main__":
# default parameters are:
# --source=http://matsim.org/files/dtd/MATSim4UrbanSimConfigSchema.xsd
parser = optparse.OptionParser()
parser.add_option("-s", "--source", dest="xsd_source", action="store", type="string",
help="URL of xsd file")
parser.add_option("-d", "--destination", dest="xsd_destination", action="store", type="string",
help="Destination of xsd file")
(options, args) = parser.parse_args()
if options.xsd_source == None:
logger.log_error("Missing source location (url) to xsd file")
if options.xsd_destination == None:
logger.log_error("Missing destination location for xsd file")
load = LoadXSD( options.xsd_source, options.xsd_destination )
load.load_and_store()
| psrc/urbansim | opus_matsim/models/pyxb_xml_parser/load_xsd.py | load_xsd.py | py | 1,508 | python | en | code | 4 | github-code | 36 |
26743970117 | # -*- coding: utf-8 -*-
from linlp.algorithm.Viterbi import viterbiRecognitionSimply
from linlp.algorithm.viterbiMat.prob_trans_place import prob_trans as trans_p
from linlp.algorithm.viterbiMat.prob_emit_place import prob_emit as emit_p
def placeviterbiSimply(obs, DT, obsDT, debug):
if debug:
x = obs
obs = [('始##始', 'begin')] + obs + [('末##末', 'end')]
length = len(obs)
for no in range(length):
if (obs[no][1] == 'ns') and (obsDT.tree[obs[no][0]].get('total', 1001) <= 1000):
if DT.tree.get(obs[no][0]):
if len(obs[no][0]) < 3:
DT.tree[obs[no][0]].setdefault('H', 1)
DT.tree[obs[no][0]].setdefault('G', 1)
else:
DT.tree[obs[no][0]].update({'G': 1})
else:
if len(obs[no][0]) < 3:
DT.tree[obs[no][0]] = {'H': 1, 'G': 1}
else:
DT.tree[obs[no][0]] = {'G': 1}
elif obs[no][1].startswith('ns'):
obs[no] = ('未##地', obs[no][1])
elif obs[no][1].startswith('x'):
obs[no] = ('未##串', 'x')
elif obs[no][1].startswith('nr'):
obs[no] = ('未##人', obs[no][1])
elif obs[no][1].startswith('nt'):
obs[no] = ('未##团', obs[no][1])
elif obs[no][1].startswith('m'):
obs[no] = ('未##数', obs[no][1])
elif obs[no][1].startswith('t'):
obs[no] = ('未##时', obs[no][1])
elif not DT.tree.get(obs[no][0]): # 不在地名词典时
DT.tree[obs[no][0]] = {'Z': 21619956}
path = viterbiRecognitionSimply(obs, trans_p, emit_p, DT)
if debug:
s = ''
t = '['
l = len(x)
for i in range(l):
word = x[i]
s += '[' + word[0] + ' '
t += word[0]
for k, v in DT.tree[obs[i + 1][0]].items():
if k == 'total':
continue
s += k + ':' + str(v) + ' '
s += ']'
t += '/' + path[i + 1] + ', '
t += ']'
print('地名角色观察: %s' % s)
print('地名角色标注: %s' % t)
return path[1:-1]
| yuanlisky/linlp | linlp/recognition/PlaceRecognition.py | PlaceRecognition.py | py | 2,220 | python | en | code | 0 | github-code | 36 |
39901505337 | from collections import OrderedDict
from django.core.urlresolvers import resolve
def link_processor(request):
"""
This function provides, to all pages, a dict of links called "page_links".
These links contain {"name": "tag"} for a name of a page to a view tag.
These are used to automatically populate the sidebars.
"""
# These are names which go to url tags.
SIDEBAR_URLS = OrderedDict()
SIDEBAR_URLS["Home"] = "home"
SIDEBAR_URLS["Schools"] = "schools"
SIDEBAR_URLS["Professors"] = "professors"
SIDEBAR_URLS["Reviews"] = "reviews"
return {"page_links": SIDEBAR_URLS.items(),
"current_page_name": resolve(request.path_info).url_name,
}
| brhoades/sweaters-but-with-peer-reviews | middleware/links.py | links.py | py | 712 | python | en | code | 1 | github-code | 36 |
17770305064 | class leafNode:
def __init__(self, data) -> None:
self.leafData = self.numberOfLabelOccurrences(data) #para análise de treinamento
self.isLeaf = True
def numberOfLabelOccurrences(self, data):
# um dicionário (nao permite itens duplicados)
# para armazenar as labels e quantas vezes ocorrem
numberOfOccurrences = {}
for row in data:
rowLabel = row[-1] # label na última coluna
if rowLabel not in numberOfOccurrences:
numberOfOccurrences[rowLabel] = 0
numberOfOccurrences[rowLabel] += 1
# retorna o dicionario com a contagem de ocorrencias de cada label
return numberOfOccurrences | gabteo/bandeiras-covid | bandeiras-covid/leafNode.py | leafNode.py | py | 709 | python | pt | code | 0 | github-code | 36 |
4164917046 | # task 10.2 Напишіть програму, яка пропонує користувачу ввести свій вік, після чого виводить повідомлення про те чи вік є парним чи непарним числом.
# В програмі необхідно передбачити можливість введення від’ємного числа, і в цьому випадку згенерувати виняткову ситуацію.
# Головний код має викликати функцію, яка обробляє введену інформацію.
class AgeError(ValueError):
def __init__(self, data):
self.data = data
def __str__(self):
return repr(self.data)
def check_age(p_age):
if p_age < 0:
raise AgeError('Age is negative')
if p_age % 2 > 0:
return 'Odd age'
return 'Even age'
if __name__ == "__main__":
try:
age = int(input('Please enter your age:'))
print(check_age(age))
except AgeError as e:
print('Negative age is not allowed. ', e)
except Exception:
print("Seems like something else went wrong")
finally:
print("Program finished") | PythonCore051020/HW | HW10/RLysyy/task_10_2.py | task_10_2.py | py | 1,230 | python | uk | code | 0 | github-code | 36 |
26409275029 | #这个是我自己写的,过了前80个Case 过不了最后一个 超时了
#代码随想录的前两个答案也超时,只有那个用字典的不超时
'''
class Solution:
def findItinerary(self, tickets: List[List[str]]) -> List[str]:
self.result = []
tickets.sort()
used = [False] * len(tickets)
self.backtracking(['JFK'],used,tickets)
return self.result
def backtracking(self, path, used, tickets):
if len(path) == len(tickets) + 1:
self.result = path
# print('可以了')
return True
for index, ticket in enumerate(tickets):
if used[index] == False and tickets[index][0] == path[-1]:
#找没用过的机票且对得上上一个地方的
path.append(tickets[index][1])
used[index] = True
#向下递归
# print('path is',path)
# print('used is',used)
# print('继续递归')
if self.backtracking(path, used, tickets):
return True
path.pop()
used[index] = False
return False
'''
#代码随想录里的字典解法
#这道题看着这个Case来想就行了
#ticket = [[“jfk”,“kul”],["nrt","jfk"],["jfk","nrt"]]
class Solution:
from collections import defaultdict
def findItinerary(self, tickets: List[List[str]]) -> List[str]:
targets = defaultdict(list)
for ticket in tickets:
targets[ticket[0]].append(ticket[1])
#以上得到字典,key是出发地,value是一个list,list里装的是目的地
for value in targets.values():
value.sort(reverse=True)
#以上将Targets字典中的目的地按照逆序排序
self.result = []
self.backtracking("JFK", targets)
return self.result[::-1] #return result (逆序)
def backtracking(self, start, targets):
while targets[start]: #当某个出发机场有目的机场时
next_start = targets[start].pop() #找到下一个出发机场 并在targets里把用过的目的地去掉
self.backtracking(next_start, targets)
self.result.append(start) #当某个出发机场找不到目的机场时,放进result里
#result最后是要逆序返回的 belike jfk-nrt-jfk-kul 在result里是["kul"<-"jfk"<-"nrt"<-"jfk"]
#所以找不到出发机场的kul会最先进result,因为他左边没有东西了
| lpjjj1222/leetcode-notebook | 332. Reconstruct Itinerary.py | 332. Reconstruct Itinerary.py | py | 2,563 | python | zh | code | 0 | github-code | 36 |
14003011170 | import json
import requests
import random
import re
baseUrl = "http://jw1.yzu.edu.cn/"
session = requests.Session()
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36',
'Content-Type': 'application/x-www-form-urlencoded',
'Connection': 'keep-alive',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
'Accept-Encoding': 'gzip, deflate',
'Upgrade-Insecure-Requests': '1'}
def GetCode():
validcodeUrl = baseUrl + "validateCodeAction.do?random=+Math.random()"
validcode = session.get(validcodeUrl)
if (validcode.status_code == 200):
with open('codepic.png', 'wb') as f:
f.write(validcode.content)
print("验证码保存成功!")
else:
print("验证码保存失败!")
def Login(validcode):
data = {
"zjh": 191304324,
"mm": 191304324,
"v_yzm": validcode
}
res = session.post(baseUrl+"loginAction.do", data=data)
if "学分制综合教务" in res.text:
print("登录成功\n")
else:
print("登陆失败\n")
def GetSessionId():
res = session.get("http://jw1.yzu.edu.cn/dwr/engine.js")
start = res.text.find("dwr.engine._origScriptSessionId")
w1 = "dwr.engine._origScriptSessionId = "
w2 = ";"
pat = re.compile(w1+'(.*?)'+w2, re.S)
sessionId = pat.findall(res.text)[0][1:-1] + \
str(random.randint(0, 1000))
return sessionId
def ClassScript():
GetCode()
validCode = input("输入验证码:\n")
Login(validCode)
# queryClass = {
# "kch": 17038002,
# "kcm": "",
# "actionType": 3,
# "pageNumber": -1
# }
classData = {
"kcId": 17029015_01,
# "kcId": 17038002_01,
"preActionType": 3,
"actionType": 9
}
res = session.get(
"http://jw1.yzu.edu.cn/xkAction.do?actionType=-1&fajhh=3440")
# res = session.get(
# "http://jw1.yzu.edu.cn/xkAction.do?actionType=3&pageNumber=-1")
# res = session.post(baseUrl+"xkAction.do", data=queryClass)
# sessionId = GetSessionId()
# jSessionId = session.cookies["JSESSIONID"]
# payloadData = "callCount=1\npage=/xkAction.do?actionType=-1&fajhh=3440\nhttpSessionId=" + jSessionId + \
# "\nscriptSessionId="+sessionId + \
# "\nc0-scriptName=ajaxtool\nc0-methodName=reCall\nc0-id=0\nbatchId=0 "
# ajaxUrl = "http://jw1.yzu.edu.cn/dwr/call/plaincall/ajaxtool.reCall.dwr"
# res = session.post(ajaxUrl, data=payloadData)
res = session.post(
baseUrl+"xkAction.do", data=classData)
# res = session.post(
# baseUrl+"xkAction.do", data=classData)
print(res.text)
# file = open("text.html", mode="w")
# file.write(res.text)
ClassScript()
| Rickenbacker620/Codes | Python/stuff/urp.py | urp.py | py | 2,979 | python | en | code | 0 | github-code | 36 |
3587126567 | # -*- coding: utf-8 -*-
"""
Binarization
Feature binarization is the process of thresholding numerical features to
get boolean values.
"""
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
import numpy as np
import pandas as pd
from scipy import signal,stats
from flask import Flask,request,jsonify
import json
import re
import os
import codecs
def py_configs(configpath,conf=None, delete=None):
if not os.path.exists(configpath):
obj = {}
else:
with codecs.open(configpath, 'r', 'utf-8') as f:
str1 = f.read()
# obj = obj.encode('utf8').decode('utf8')
if not str1:
obj = {}
try:
obj = json.loads(str1)
except:
#
obj = {}
if isinstance(delete, str):
obj.pop(delete)
with codecs.open(configpath, 'w', 'utf-8') as f:
str1 = jsonify(obj)
f.write(str1)
return obj
if isinstance(conf, dict):
for key in conf:
obj[key] = conf[key]
with codecs.open(configpath, 'w', 'utf-8') as f:
str1 = jsonify(obj)
f.write(str1)
elif isinstance(conf, str):
if conf in obj:
return obj[conf]
else:
return {}
return obj
configpath=os.path.join(os.path.dirname(__file__),'config.txt')
try:
config = py_configs(configpath)
Signal_SERVER = config["Signal_SERVER"]
Signal_PORT = config["Signal_PORT"]
except:
raise Exception("Configuration error")
app = Flask(__name__)
@app.route('/Data_preprocessing/scaling_data',methods=['POST'])
def daqfft():
try:
form_key=list(request.form.to_dict().keys())
file_key=list(request.files.to_dict().keys())
print('k: ',form_key)
keys=['file','operation']
for key in keys:
if key not in form_key or key not in file_key:
code = 2
output = {"code": code, "KeyError": str(key)}
output = json.dumps(output)
return output
operation = request.form['operation']
file_get = request.files.get('file')
X_train = pd.read_csv(file_get)
result=''
# Operation Binarization
if operation == 'Normalization':
binarizer = preprocessing.Binarizer().fit(X_train) # fit does nothing
bin_tran =binarizer.transform(X_train)
result= jsonify(bin_tran)
return result
except Exception as e:
print('Exception: ',e)
code = 1
result = {"code":code,"error":re.findall("'([\w\d _]+)'",str(type(e)))[0]}
result = jsonify(result)
return result
if __name__=="__main__":
app.run(host= Signal_SERVER, port=int(Signal_PORT))
| KaifangXu/APIs | Data_pre/Binarization.py | Binarization.py | py | 2,972 | python | en | code | 0 | github-code | 36 |
159278396 | # глобальная переменная доступная в любом месте
# локальние доступние токо в пределах етого блока
name = 'Tom'
a = 5
N = (100,)
def myfunc(b):
# global a
a = 10
for x in range(b):
n = x + 1 # здесь a=10 как локальная переменная если написать global
print(n, end=" ") # мы будем работать с той которой создали в начале
print() # и если мы напишем принт(а) то будет уже 10
# также можно писать глобал когда нет глобальной переменни
# тоесть когда мы не обозначи а вообше
myfunc(6)
print()
print(a)
def say_hi():
print('Hello', name)
def say_bye():
name = 'Bob'
print('Good bye', name)
say_hi()
say_bye()
print(name)
# nonlocal
x = 0
def outer():
x = 1
def inner():
# nonlocal x
x = 2
print('inner:', x)
# каждая функция взяла свой х(переменную)
inner() # каторий обьявлен в своей области видимости
print('outer:', x)
outer()
print('global:', x)
print()
print()
x1 = 0
def outer():
x1 = 1
def inner():
nonlocal x1
x1 = 2 # тоесть если мы напишем nonlocal
print('inner:', x1) # то мы будем работать с x
# с уровнем выше с глоб так нельзя
inner()
print('outer:', x1)
outer()
print('global:', x1)
print()
print()
x2 = 0
def outer():
global x2
x2 = 1
def inner():
x2 = 2
print('inner:', x2)
# вот что будет при global
inner()
print('outer:', x2)
outer()
print('global:', x2)
| Savag33/My_Projects_python | Scopes/global,local.py | global,local.py | py | 2,023 | python | ru | code | 1 | github-code | 36 |
9636350217 | #%%
import numpy.random as rnd
import pandas as pd
#%%
# Number of samples
number_of_samples = 1000
#%%
# Distribution
def coin_flip():
if rnd.random() >= 0.5: # Set distribtution
return True
else:
return False
#%%
def simulation(number_of_samples):
simulated_data = []
for sample in range(0,number_of_samples):
if coin_flip():
simulated_data.append(1)
else:
simulated_data.append(0)
return simulated_data
#%%
coin_simulation = simulation(number_of_samples)
# %%
coin_simulation[0:10]
# %%
coin_df = pd.DataFrame(coin_simulation)
# %%
coin_df.hist()
# %%
coin_df.describe()
# %%
def inside_circle(x,y):
if (x**2 + y**2)**(1/2) <= 1: # Set distribtution
return True
else:
return False
#%%
def pi_simulation(number_of_samples):
x_list=[]
y_list=[]
is_inside_circle=[]
for sample in range(0,number_of_samples):
x = rnd.random()
y = rnd.random()
x_list.append(x)
y_list.append(y)
if inside_circle(x,y):
is_inside_circle.append(1)
else:
is_inside_circle.append(0)
return [x_list,y_list,is_inside_circle]
# %%
circle_df = pd.DataFrame(pi_simulation(number_of_samples))
# %%
circle_df = circle_df.T
# %%
circle_df
# %%
circle_df.columns = ['x','y','inside']
# %%
circle_df
# %%
circle_df.plot.scatter(
x='x',
y='y',
c='inside',
cmap = 'jet'
)
# %%
def estimate_pi(is_inside_circle):
inside = 0
for point in is_inside_circle:
if point == 1:
inside += 1
pi = 4 * inside / len(is_inside_circle)
return pi
#%%
circle_df.plot.scatter(
x='x',
y='y',
c='inside',
cmap = 'jet',
title=f"Pi Estimate = {estimate_pi(circle_df['inside'])}"
)
# %%
def under_parabola(x,y):
if y < x**2:
return True
else:
return False
#%%
def parabola_simulator(number_of_samples):
x_list=[]
y_list=[]
under_para=[]
for sample in range(0,number_of_samples):
x = rnd.random()
y = rnd.random()
x_list.append(x)
y_list.append(y)
if under_parabola(x,y):
under_para.append(1)
else:
under_para.append(0)
return [x_list,y_list,under_para]
#%%
para_df = pd.DataFrame(parabola_simulator(number_of_samples))
#%%
para_df = para_df.T
para_df.columns = ['x','y','under']
# %%
para_df.plot.scatter(
x='x',
y='y',
c='under',
cmap = 'jet'
)
# %%
para_df.hist()
# %%
| jdwrhodes/PyBer_Analysis | Module-Work/coin_simulator.py | coin_simulator.py | py | 2,532 | python | en | code | 0 | github-code | 36 |
69982166183 | import pandas
df = pandas.read_csv("/home/garrett/Desktop/Git_Repositories/Python_Practice/Exercises/Exercise_11/world-cities-population.csv")
cent = dict([(i,[a]) for i, a, in zip(df['Country or Area'],df['City'])])
cent_am = ["Belize","Costa Rica","El Salvador","Guatemala","Honduras","Nicaragua","Panama"]
city_cent_am = list()
for i in cent_am:
city_cent_am.append(cent.get(i,("No cities listed from {0:}".format(i))))
print(city_cent_am) | GarrettMatthews/CS_1400 | Exercises/Exercise_11/central_america.py | central_america.py | py | 461 | python | en | code | 0 | github-code | 36 |
13961966989 | from django import forms
from django.db import transaction
from django.utils.translation import gettext as _
from ..models import Duplicate, HelperShift
class MergeDuplicatesForm(forms.Form):
def __init__(self, *args, **kwargs):
self.helpers = kwargs.pop("helpers")
super(MergeDuplicatesForm, self).__init__(*args, **kwargs)
self.fields["helpers_ignore"] = forms.ModelMultipleChoiceField(
queryset=self.helpers,
widget=forms.CheckboxSelectMultiple(attrs={"id": "helper_ignore"}),
required=False,
label="",
)
self.fields["helpers_selection"] = forms.ModelChoiceField(
queryset=self.helpers,
widget=forms.RadioSelect(attrs={"id": "helper_selection"}),
empty_label=None,
required=True,
label="",
)
def clean(self):
cleaned_data = super().clean()
remaining_helper = cleaned_data["helpers_selection"]
ignore_helpers = cleaned_data["helpers_ignore"]
# remaining helpers must not be ignored (this makes no sense)
if remaining_helper in ignore_helpers:
raise forms.ValidationError(_("The remaining helper must not be ignored."))
# check for overlapping shifts
if not self.check_merge_possible(ignore_helpers):
raise forms.ValidationError(_("Cannot merge helpers which have the same shift."))
@transaction.atomic
def merge(self):
"""
Merge the helpers and keep the data selected in the form.
"""
remaining_helper = self.cleaned_data["helpers_selection"]
ignore_helpers = self.cleaned_data["helpers_ignore"]
oldest_timestamp = remaining_helper.timestamp
# we check this again inside the atomic code block to ensure that no change happends after the
# validation and before the merge (= no new shifts were added)
if not self.check_merge_possible(ignore_helpers):
raise ValueError("Cannot merge helpers with same shifts")
# and then to the merge
for helper in self.helpers:
if helper == remaining_helper or helper in ignore_helpers:
continue
# merge shifts
for helpershift in HelperShift.objects.filter(helper=helper):
helpershift.helper = remaining_helper
helpershift.save()
# merged coordinated jobs
for job in helper.coordinated_jobs:
job.coordinators.add(remaining_helper)
# merge gifts
if remaining_helper.event.gifts:
remaining_helper.gifts.merge(helper.gifts)
# then create the duplicate entry so that old links in mails still work
Duplicate.objects.create(deleted=helper.id, existing=remaining_helper)
# the overall timestamp of the helper should be the oldest one
# (there are multiple timestamps saved: one per helper and one per shift)
if helper.timestamp < oldest_timestamp:
oldest_timestamp = helper.timestamp
# and delete the old helper
helper.delete()
# update timestamp
remaining_helper.timestamp = oldest_timestamp
remaining_helper.save()
return remaining_helper
def check_merge_possible(self, ignore_helpers=None):
"""
Check if the merge is possible.
It is not possible when multiple helpers have the same shift. If we would merge those helpers,
we would "loose" allocated seats and this is probably not intended.
"""
shifts = []
for helper in self.helpers:
# if we have ignored_helpers, check that
if ignore_helpers and helper in ignore_helpers:
continue
# compare all shifts
for shift in helper.shifts.all():
if shift in shifts:
return False
else:
shifts.append(shift)
return True
| helfertool/helfertool | src/registration/forms/duplicates.py | duplicates.py | py | 4,063 | python | en | code | 52 | github-code | 36 |
21159707752 | # --------------
#Importing header files
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Path of the file
path
#Code starts here
#Reading the file
data=pd.read_csv(path)
#Renaming a column
data.rename(columns={'Total':'Total_Medals'},inplace=True)
#Printing the first five columns
print(data.head(10))
#Code ends here
# --------------
#Code starts here
#Creating new column 'Better_Event'
data['Better_Event'] = np.where(data['Total_Summer']>data['Total_Winter'], 'Summer', 'Winter')
data['Better_Event'] =np.where(data['Total_Summer'] ==data['Total_Winter'],'Both',data['Better_Event'])
#Finding the value with max count in 'Better_Event' column
better_event=data['Better_Event'].value_counts().index.values[0]
#Printing the better event
print('Better_Event=', better_event)
# --------------
#Code starts here
#Subsetting the dataframe
top_countries=data[['Country_Name','Total_Summer', 'Total_Winter','Total_Medals']]
#Dropping the last column
top_countries=top_countries[:-1]
#Function for top 10
def top_ten(data, col):
#Creating a new list
country_list=[]
#Finding the top 10 values of 'col' column
country_list= list((data.nlargest(10,col)['Country_Name']))
#Returning the top 10 list
return country_list
#Calling the function for Top 10 in Summer
top_10_summer=top_ten(top_countries,'Total_Summer')
print("Top 10 Summer:\n",top_10_summer, "\n")
#Calling the function for Top 10 in Winter
top_10_winter=top_ten(top_countries,'Total_Winter')
print("Top 10 Winter:\n",top_10_winter, "\n")
#Calling the function for Top 10 in both the events
top_10=top_ten(top_countries,'Total_Medals')
print("Top 10:\n",top_10, "\n")
#Extracting common country names from all three lists
common=list(set(top_10_summer) & set(top_10_winter) & set(top_10))
print('Common Countries :\n', common, "\n")
#Code ends here
# --------------
#Code starts here
top_10_summer=top_ten(top_countries,'Total_Summer')
top_10_winter=top_ten(top_countries,'Total_Winter')
top_10=top_ten(top_countries,'Total_Medals')
df=pd.DataFrame([top_10_summer,top_10_winter,top_10])
summer_df= data[data['Country_Name'].isin(top_10_summer)]
winter_df= data[data['Country_Name'].isin(top_10_winter)]
top_df= data[data['Country_Name'].isin(top_10)]
summer_df.plot.bar('Country_Name','Total_Summer')
winter_df.plot.bar('Country_Name','Total_Winter')
top_df.plot.bar('Country_Name','Total_Medals')
# --------------
#Code starts here
summer_df= data[data['Country_Name'].isin(top_10_summer)]
summer_df['Golden_Ratio']=data['Gold_Summer']/data['Total_Summer']
summer_max_ratio=summer_df['Golden_Ratio'].max()
summer_country_gold=summer_df.loc[summer_df['Golden_Ratio'].idxmax(),'Country_Name']
winter_df= data[data['Country_Name'].isin(top_10_winter)]
winter_df['Golden_Ratio']=data['Gold_Winter']/data['Total_Winter']
winter_max_ratio=winter_df['Golden_Ratio'].max()
winter_country_gold=winter_df.loc[winter_df['Golden_Ratio'].idxmax(),'Country_Name']
top_df= data[data['Country_Name'].isin(top_10)]
top_df['Golden_Ratio']=data['Gold_Total']/data['Total_Medals']
top_max_ratio=top_df['Golden_Ratio'].max()
top_country_gold=summer_df.loc[top_df['Golden_Ratio'].idxmax(),'Country_Name']
# --------------
#Code starts here
data_1=data[:-1]
data_1['Total_Points']=(data['Gold_Total']*3)+(data['Silver_Total']*2)+(data['Bronze_Total'])
most_points=max(data_1['Total_Points'])
best_country=data_1.loc[data_1['Total_Points'].idxmax(),'Country_Name']
print(best_country)
# --------------
#Code starts here
#Subsetting the dataframe
best=data[data['Country_Name']==best_country]
best.reset_index(drop = True, inplace = True)
best=best[['Gold_Total','Silver_Total','Bronze_Total']]
#Plotting bar plot
best.plot.bar(stacked=True)
#Changing the x-axis label
plt.xlabel('United States')
#Changing the y-axis label
plt.ylabel('Medals Tally')
#Rotating the ticks of X-axis
plt.xticks(rotation=45)
#Updating the graph legend
l=plt.legend()
l.get_texts()[0].set_text('Gold_Total :' + str(best['Gold_Total'].values))
l.get_texts()[1].set_text('Silver_Total :' + str(best['Silver_Total'].values))
l.get_texts()[2].set_text('Bronze_Total :' + str(best['Bronze_Total'].values))
#Code ends here
| nagnath001/olympic-hero | code.py | code.py | py | 4,367 | python | en | code | 0 | github-code | 36 |
72655723623 | import os
import sys
import math
from tqdm import tqdm
import pandas as pd
import numpy as np
sys.path.insert(1, os.path.join(sys.path[0], '..'))
from util import argparser
def permutation_test(df, column, n_permutations=100000, batch_size=1000):
# Get actual batch size
batch_size = min(batch_size, n_permutations)
# Get real avg
real_avg = df[column].mean().item()
values = df[column].values
values_exp = np.expand_dims(values, axis=1).repeat(batch_size, axis=1)
# Make n_permutations divisible per batch_size
n_batches = math.ceil(n_permutations / batch_size)
n_permutations = n_batches * batch_size
# Get number of permutations above
n = 0
for _ in range(n_batches):
permut = np.random.randint(0, 2, size=(len(values), batch_size)) * 2 - 1
random_avgs = np.mean(values_exp * permut, axis=0)
n += (random_avgs >= real_avg).sum()
return n / n_permutations, n_permutations
def remove_unused_cols(df):
del df['item_id']
del df['position']
del df['family_size']
del df['family_weight']
del df['length']
return df
def get_macroarea_counts(df):
df_count = df[['macroarea', 'concept_id', 'token_idx']].groupby(['concept_id', 'token_idx']).agg('count').reset_index()
df_count['macroarea_count'] = df_count['macroarea']
del df_count['macroarea']
df = pd.merge(df, df_count, left_on=['concept_id', 'token_idx'], right_on=['concept_id', 'token_idx'])
return df
def get_tokens_means(df):
df_new = df.groupby(['language_id', 'family', 'macroarea', 'concept_id', 'concept_name', 'token', 'token_idx']).agg('mean').reset_index()
df_new = df_new.groupby(['family', 'macroarea', 'concept_id', 'concept_name', 'token', 'token_idx']).agg('mean').reset_index()
df_new = df_new.groupby(['macroarea', 'concept_id', 'concept_name', 'token', 'token_idx']).agg('mean').reset_index()
df_new = get_macroarea_counts(df_new)
return df_new
def main():
args = argparser.parse_args(csv_folder='cv')
context = 'onehot'
fname = os.path.join(args.rfolder_base, 'avg_seed_results_per_pos.tsv')
df = pd.read_csv(fname, sep='\t')
remove_unused_cols(df)
# df_concepts = df.groupby(['concept_id', 'concept_name']).agg('mean').reset_index()
df_tokens = get_tokens_means(df)
df_tokens.set_index(['macroarea', 'concept_id', 'token_idx'], inplace=True)
df_tokens = df_tokens.sort_index()
df_tokens = df_tokens[df_tokens.macroarea_count == 4]
df_tokens['p_value'] = -1
df_tokens['n_permutations'] = -1
df_tokens['n_instances'] = -1
for macroarea, concept_id, token_idx in tqdm(df_tokens.index.unique(), desc='Concept--token permutation tests'):
idx = (macroarea, concept_id, token_idx)
df_temp = df[(df.macroarea == macroarea) & (df.concept_id == concept_id) & (df.token_idx == token_idx)]
p_val, n_permutations = permutation_test(df_temp, 'mi-' + context, n_permutations=100000)
# p_val, n_permutations = permutation_test_recursive(df_temp, 'mi-' + context, n_permutations=100000)
df_tokens.loc[idx, 'p_value'] = p_val
df_tokens.loc[idx, 'n_permutations'] = n_permutations
df_tokens.loc[idx, 'n_instances'] = df_temp.shape[0]
fname = os.path.join(args.rfolder_base, 'tokens_results.tsv')
df_tokens.to_csv(fname, sep='\t')
if __name__ == '__main__':
main()
| rycolab/form-meaning-associations | src/h04_analysis/get_results_per_token.py | get_results_per_token.py | py | 3,405 | python | en | code | 0 | github-code | 36 |
30103583749 | def solution(arr):
arr.sort()
answer = 0 # total group
member = 0 # current member
for i in arr:
member += 1
if member >= i:
member = 0
answer += 1
return answer
if __name__ == '__main__':
arr = [2, 3, 1, 2, 2]
print(solution(arr))
| RyuMyunggi/algorithm | algorithm/greedy/q1_모험가길드.py | q1_모험가길드.py | py | 304 | python | en | code | 0 | github-code | 36 |
6529596584 | """
The `test.unit.sha_api.mybottle.sha_api_bottle_test` module provides unit tests for the `ShaApiBottle` class in
`sha_api.mybottle.sha_api_bottle`.
Classes:
TestShaApiBottle: A unit test class for the `ShaApiBottle` class.
"""
import json
import tempfile
import unittest
from bottle import ConfigDict # pylint: disable=no-name-in-module
from mock import MagicMock, patch
from sha_api.mybottle.sha_api_bottle import global_config, ShaApiBottle
from sha_api.sha_apid import ROUTES
import sha_api
class TestShaApiBottle(unittest.TestCase):
"""
A unit test class for the `ShaApiBottle` class.
Methods:
setUp: Unit test initialization.
test_error_handler: Tests to ensure the error handler returns a JSON value.
test_global_config: Tests to ensure we get the correct values from a `ConfigDict` instance.
test_sha_api_constructor: Tests that `ShaApiBottle` instance can be properly instantiated without throwing any
exceptions.
"""
def setUp(self):
"""
Initializes the unit test global configs
"""
self.maxDiff = None # pylint: disable=invalid-name
self.config_sample = tempfile.NamedTemporaryFile(delete=False)
self.dbfile = tempfile.NamedTemporaryFile(delete=False)
self.os_environ = {u'SHA_API_CONFIG': self.config_sample.name}
self.configdict_ns = ConfigDict().load_dict(
{
u'sha_api': {
u'test_variable': u'test_value'
},
u'sqlite': {
u'dbfile': self.dbfile.name
}
}
)
with open(self.config_sample.name, 'w') as fout:
fout.write(u"[sha_api]\ntest_variable = test_value\n[sqlite]\ndbfile = %s" % self.dbfile.name)
def test_error_handler(self):
"""
Tests to ensure the error handler returns a JSON value.
"""
res = MagicMock()
res_json = json.dumps({u'err_msg': u'Response Body'})
res.body = u'Response Body'
api = ShaApiBottle(ROUTES)
self.assertEqual(api.default_error_handler(res), res_json)
def test_global_config(self):
"""
Tests to ensure we get the correct values from a `ConfigDict` instance.
"""
self.assertEqual(global_config(self.configdict_ns, u'sqlite', u'dbfile', u'Not Found'), self.dbfile.name)
def test_sha_api_constructor(self):
"""
Tests that `ShaApiBottle` instance can be properly instantiated without throwing any exceptions.
"""
# Branch 1: Nothing throws an error and all goes well
try:
api = ShaApiBottle(ROUTES) # pylint: disable=unused-variable
except Exception as err: # pylint: disable=broad-except
self.fail(u'ShaApiBottle sha_api instance failed to initialize: %s' % str(err))
# Branch 2: When routes object is not a list we get a proper assert error
with self.assertRaises(AssertionError) as err:
api = ShaApiBottle(dict())
self.assertEqual(str(err.exception), u'routes must be an array of route dicts to be passed to bottle.route')
# Branch 3: When routes object is a list but it doesnt contain dict items we get a proper assert error
with self.assertRaises(AssertionError) as err:
api = ShaApiBottle([False])
self.assertEqual(str(err.exception), u'route must be a dict that can be passed to bottle.route')
# Branch 4: When environment variable specifies config file it is properly loaded.
with patch.dict(u'sha_api.mybottle.sha_api_bottle.os.environ', values=self.os_environ):
self.assertEqual(self.os_environ[u'SHA_API_CONFIG'],
sha_api.mybottle.sha_api_bottle.os.environ.get(u'SHA_API_CONFIG'))
api = ShaApiBottle(ROUTES)
self.assertEqual(api.config.get(u'sha_api.test_variable'), u'test_value')
self.assertEqual(api.config.get(u'sqlite.dbfile'), self.dbfile.name)
# Branch 5: When any portion of the db initialization fails it should just bubble up the exception
with patch(u'sha_api.mybottle.sha_api_bottle.sqlite3.connect') as sqlite_connect:
self.assertEqual(sqlite_connect, sha_api.mybottle.sha_api_bottle.sqlite3.connect)
sqlite_connect.side_effect = Exception(u'sqlite exception')
with self.assertRaises(Exception) as err:
api = ShaApiBottle()
self.assertEqual(str(err.exception), u'sqlite exception')
| ju2wheels/python_sample | python/test/unit/sha_api/mybottle/sha_api_bottle_test.py | sha_api_bottle_test.py | py | 4,614 | python | en | code | 0 | github-code | 36 |
14860521204 | #!/usr/bin/env python
'''
对测试集数据进行测试,统计所有数据平均的RRMSE,SNR和CC值
'''
import argparse
import os
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
from utility.data import EEGData
from utility.conv_tasnet_v1 import TasNet
from utility.network import ResCNN, Novel_CNN2, Novel_CNN, fcNN
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import math
import scipy.io as io
from scipy.signal import butter, lfilter
EPS = 1e-8
parser = argparse.ArgumentParser('Evaluate separation performance using Conv-TasNet')
parser.add_argument('--model_path', type=str,
help='Path to model file created by training')
parser.add_argument('--data_dir', type=list, default=['./data/mixdata/foldtrain.txt',
'./data/mixdata/foldtest.txt'],
help='directory of fold')
parser.add_argument('--use_cuda', type=int, default=1,
help='Whether use GPU')
parser.add_argument('--batch_size', default=128, type=int,
help='Batch size')
parser.add_argument('--num_workers', default=0, type=int,
help='Num_workers')
# Network architecture
parser.add_argument('--N', default=256, type=int,
help='Encode dim')
parser.add_argument('--B', default=64, type=int,
help='Feature dim')
parser.add_argument('--sr', default=512, type=int,
help='Sample rate')
parser.add_argument('--L', default=16, type=int,
help='Length of the filters in samples (16=16ms at 1kHZ)')
parser.add_argument('--X', default=6, type=int,
help='Number of convolutional blocks in each repeat')
parser.add_argument('--R', default=3, type=int,
help='Number of repeats')
parser.add_argument('--P', default=3, type=int,
help='Kernel size in convolutional blocks')
parser.add_argument('--C', default=1, type=int,
help='Number of speakers')
# 计算相关系数
def calc_corr(a, b):
a_avg = sum(a) / len(a)
b_avg = sum(b) / len(b)
# 计算分子,协方差————按照协方差公式,本来要除以n的,由于在相关系数中上下同时约去了n,于是可以不除以n
cov_ab = sum([(x - a_avg) * (y - b_avg) for x, y in zip(a, b)])
# 计算分母,方差乘积————方差本来也要除以n,在相关系数中上下同时约去了n,于是可以不除以n
sq = math.sqrt(sum([(x - a_avg) ** 2 for x in a]) * sum([(x - b_avg) ** 2 for x in b]))
corr_factor = cov_ab / sq
return corr_factor
def butter_lowpass(data, cutoff, fs, order=5):
nyq = 0.5 * fs
normal_cutoff = cutoff / nyq
b, a = butter(order, normal_cutoff, btype='low', analog=False)
y = lfilter(b, a, data)
return y
def evaluate(args, snr_test=1):
# Load model
package = torch.load(args.model_path)
model = TasNet(args.N, args.B, args.sr, args.L, args.X, args.R, args.P, args.C)
# model = fcNN(lenth=1024)
# model = Novel_CNN2(len_signal=1024)
# model = ResCNN(1024)
model.load_state_dict(package['model'])
# print(model)
# for name in model.state_dict():
# print(name)
# print('encoder1:',model.state_dict()['encoder1.weight'])
model.eval()
if args.use_cuda:
model.cuda()
# Load data
f_test = np.load('../eegdenoisenet/testdata512/test_eeg.npz')
noiseEEG_test, EEG_test, SNRs_test = f_test['noiseEEG_test'], f_test['EEG_test'], f_test['SNRs_test']
# 选择 信噪比
idx = np.where(SNRs_test == snr_test)[0]
# 不分档则以下2行注释
noiseEEG_test = noiseEEG_test[idx]
EEG_test = EEG_test[idx]
evaluate_dataset = EEGData(noiseEEG_test, EEG_test)
evaluate_loader = DataLoader(evaluate_dataset, batch_size=args.batch_size, shuffle=False,
num_workers=args.num_workers)
with torch.no_grad():
Snr = 1.5
RRMSEspec_total = []
RRMSE_total = []
CC_total = []
SNR = []
estimate_all = []
for i, data in enumerate(evaluate_loader):
# Get batch data
eeg_mix = data['eeg_mix'].type(torch.float32)
# emg = data['emg'].type(torch.float32)
eeg_clean = data['eeg_clean'].type(torch.float32)
lpeeg_mix = butter_lowpass(eeg_mix, 20, 500)
# hpeeg_mix = butter_highpass(eeg_mix,100,500)
eeg_mix = eeg_mix.type(torch.float32)
lpeeg_mix = torch.from_numpy(lpeeg_mix).type(torch.float32)
# hpeeg_mix = torch.from_numpy(hpeeg_mix).type(torch.float32)
# lpeeg_clean = data['lpeeg_clean']
# Forward
if args.use_cuda:
eeg_clean = eeg_clean.cuda()
lpeeg_mix = lpeeg_mix.cuda()
eeg_mix = eeg_mix.cuda()
estimate_source1, estimate_source2 = model(lpeeg_mix, eeg_mix)
# estimate_source2 = model(eeg_mix)
e_noise = eeg_clean - estimate_source2.squeeze()
snr = 10 * torch.log10(torch.sum(eeg_clean ** 2, dim=1) / (torch.sum(e_noise ** 2, dim=1) + EPS) + EPS)
estimate2 = estimate_source2.cpu().numpy().squeeze()
# emg = emg.numpy()
eeg_clean = eeg_clean.cpu().numpy()
estimate_all.append(estimate2)
# eeg_mix = eeg_mix.numpy()
for j in range(estimate2.shape[0]):
eeg_snr = snr[j].item()
SNR.append(eeg_snr)
e_noise = eeg_clean[j, :] - estimate2[j, :]
RRMSE = np.sqrt(np.sum(e_noise ** 2) / (np.sum(eeg_clean[j, :] ** 2) + EPS) + EPS)
# TODO
Pxx_eeg, _ = mlab.psd(eeg_clean[j, :], NFFT=1024)
Pxx_estimate, _ = mlab.psd(estimate2[j, :], NFFT=1024)
# Pxx_eeg = Pxx_eeg[:201] # TODO
# Pxx_estimate = Pxx_estimate[:201] # TODO
RRMSEspec = np.sqrt(np.sum((Pxx_estimate - Pxx_eeg) ** 2) / (np.sum(Pxx_eeg ** 2) + EPS) + EPS)
eeg_RRMSE = RRMSE
# 计算CC
cc = calc_corr(eeg_clean[j, :], estimate2[j, :])
RRMSEspec_total.append(RRMSEspec)
RRMSE_total.append(eeg_RRMSE)
CC_total.append(cc)
estimate_all = np.vstack(estimate_all)
io.savemat(os.path.join('result', f'result_{snr_test}.mat'),
{'estimate_EEG': estimate_all, 'eeg_mix': noiseEEG_test, 'eeg_clean': EEG_test})
# plt.psd
return RRMSE_total, CC_total, SNR, RRMSEspec_total
if __name__ == '__main__':
args = parser.parse_args()
args.model_path = '/mnt/DEV/han/eeg/DASTCN_grnFFT/checkpoint/EEGARNet_model/epoch89.pth.tar'
out_dir = 'result'
print(args)
meanRRMSEs = []
meanRRMSEspecs = []
meanCCs = []
meanSNRs = []
for snr in np.linspace(-7, 2, 10):
print(snr)
RRMSE_total, CC_total, SNR, RRMSEspec_total = evaluate(args, snr_test=snr)
meanRRMSE = np.round(np.mean(RRMSE_total), 4)
meanRRMSEspec = np.round(np.mean(RRMSEspec_total), 4)
meanSNR = np.round(np.mean(SNR), 4)
varRRMSE = np.round(np.var(RRMSE_total), 4)
meanCC = np.round(np.mean(CC_total), 4)
varCC = np.round(np.var(CC_total), 4)
print('meanRRMSEspec:', meanRRMSE)
print('meanRRMSEspec:', meanRRMSEspec)
print('meanCC:', meanCC)
print('meanSNR:', meanSNR)
print('*' * 10)
meanRRMSEs.append(meanRRMSE)
meanCCs.append(meanCC)
meanSNRs.append(meanSNR)
meanRRMSEspecs.append(meanRRMSEspec)
os.makedirs(out_dir, exist_ok=True)
io.savemat(os.path.join(out_dir, 'result_perSNR.mat'), {'meanRRMSEs': meanRRMSEs,
'meanCCs': meanCCs,
'meanSNRs': meanSNRs,
'meanRRMSEspec': meanRRMSEspecs})
| BaenRH/DSATCN | code/evaluate_perSNR.py | evaluate_perSNR.py | py | 8,151 | python | en | code | 0 | github-code | 36 |
5297089787 | import asyncio
import json
import logging
import logging.config
from dataclasses import dataclass
import yaml
from web3 import Web3
from web3._utils.filters import LogFilter
@dataclass
class FilterWrapper:
event_filter: LogFilter
pair_name: str
oracle_address: str
logger: logging.Logger
class BlockchainMonitor:
def __init__(self):
with open("spec.yaml", "r") as s:
spec = yaml.safe_load(s)
with open("logging.yaml", "r") as s:
logging.config.dictConfig(yaml.safe_load(s))
with open('abi.json', "r") as s:
abi = json.load(s)
w3 = Web3(Web3.HTTPProvider(spec['connection_settings']['alchemy_url']))
self.filters = []
pair_name_to_logger = {}
for currency_pair in spec['currency_pairs']:
for pair_name, pair_spec in currency_pair.items():
for oracle_address in pair_spec['oracles_addresses']:
contract = w3.eth.contract(address=oracle_address, abi=abi)
pair_name_to_logger[pair_name] = pair_name_to_logger \
.get(pair_name, logging.getLogger(pair_name))
self.filters.append(FilterWrapper(
contract.events.AnswerUpdated.createFilter(fromBlock='latest'),
pair_name,
oracle_address,
pair_name_to_logger[pair_name]
))
@staticmethod
def __handle_event(event, filter_wrapper):
logger = filter_wrapper.logger
logger.info("Price changes in pair {}. Oracle address: {}. Current price: {}, block number: {}, tx hash: {}"
.format(filter_wrapper.pair_name,
filter_wrapper.oracle_address,
event.args.current,
event.blockNumber,
event.transactionHash.hex()))
async def __monitor(self, filter_wrapper, poll_interval):
filter_wrapper.logger.info("Start monitor pair {}. Oracle address: {}".format(
filter_wrapper.pair_name, filter_wrapper.oracle_address))
while True:
for AnswerUpdated in filter_wrapper.event_filter.get_new_entries():
self.__handle_event(AnswerUpdated, filter_wrapper)
await asyncio.sleep(poll_interval)
def monitor(self):
loop = asyncio.get_event_loop()
try:
for filter_wrapper in self.filters:
asyncio.ensure_future(self.__monitor(filter_wrapper, 10))
loop.run_forever()
finally:
loop.close()
if __name__ == "__main__":
BlockchainMonitor().monitor()
| dzahbarov/blockchain_monitor | monitor.py | monitor.py | py | 2,721 | python | en | code | 0 | github-code | 36 |
15860273253 | from __future__ import division
from builtins import str
import numpy as np
import pandas as pd
import seaborn as sns
from .helpers import *
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.rcParams['pdf.fonttype'] = 42
def plot(results, subjgroup=None, subjname='Subject Group', listgroup=None,
listname='List', subjconds=None, listconds=None, plot_type=None,
plot_style=None, title=None, legend=True, xlim=None, ylim=None,
save_path=None, show=True, ax=None, **kwargs):
"""
General plot function that groups data by subject/list number and performs analysis.
Parameters
----------
results : quail.FriedEgg
Object containing results
subjgroup : list of strings or ints
String/int variables indicating how to group over subjects. Must be
the length of the number of subjects
subjname : string
Name of the subject grouping variable
listgroup : list of strings or ints
String/int variables indicating how to group over list. Must be
the length of the number of lists
listname : string
Name of the list grouping variable
subjconds : list
List of subject hues (str) to plot
listconds : list
List of list hues (str) to plot
plot_type : string
Specifies the type of plot. If list (default), the list groupings (listgroup)
will determine the plot grouping. If subject, the subject groupings
(subjgroup) will determine the plot grouping. If split (currenty just
works for accuracy plots), both listgroup and subjgroup will determine
the plot groupings
plot_style : string
Specifies the style of the plot. This currently works only for
accuracy and fingerprint plots. The plot style can be bar (default for
accruacy plot), violin (default for fingerprint plots) or swarm.
title : string
The title of the plot
legend : bool
If true (default), a legend is plotted.
ylim : list of numbers
A ymin/max can be specified by a list of the form [ymin, ymax]
xlim : list of numbers
A xmin/max can be specified by a list of the form [xmin, xmax]
save_path : str
Path to save out figure. Include the file extension, e.g.
save_path='figure.pdf'
show : bool
If False, do not show figure, but still return ax handle (default True).
ax : Matplotlib.Axes object or None
A plot object to draw to. If None, a new one is created and returned.
Returns
----------
ax : matplotlib.Axes.Axis
An axis handle for the figure
"""
def plot_acc(data, plot_style, plot_type, listname, subjname, **kwargs):
# set defaul style to bar
plot_style = plot_style if plot_style is not None else 'bar'
plot_type = plot_type if plot_type is not None else 'list'
if plot_style is 'bar':
plot_func = sns.barplot
elif plot_style is 'swarm':
plot_func = sns.swarmplot
elif plot_style is 'violin':
plot_func = sns.violinplot
if plot_type is 'list':
ax = plot_func(data=data, x=listname, y="Accuracy", **kwargs)
elif plot_type is 'subject':
ax = plot_func(data=data, x=subjname, y="Accuracy", **kwargs)
elif plot_type is 'split':
ax = plot_func(data=data, x=subjname, y="Accuracy", hue=listname, **kwargs)
return ax
def plot_temporal(data, plot_style, plot_type, listname, subjname, **kwargs):
# set default style to bar
plot_style = plot_style if plot_style is not None else 'bar'
plot_type = plot_type if plot_type is not None else 'list'
if plot_style is 'bar':
plot_func = sns.barplot
elif plot_style is 'swarm':
plot_func = sns.swarmplot
elif plot_style is 'violin':
plot_func = sns.violinplot
if plot_type is 'list':
ax = plot_func(data=data, x=listname, y="Temporal Clustering Score", **kwargs)
elif plot_type is 'subject':
ax = plot_func(data=data, x=subjname, y="Temporal Clustering Score", **kwargs)
elif plot_type is 'split':
ax = plot_func(data=data, x=subjname, y="Temporal Clustering Score", hue=listname, **kwargs)
return ax
def plot_fingerprint(data, plot_style, plot_type, listname, subjname, **kwargs):
# set default style to violin
plot_style = plot_style if plot_style is not None else 'violin'
plot_type = plot_type if plot_type is not None else 'list'
if plot_style is 'bar':
plot_func = sns.barplot
elif plot_style is 'swarm':
plot_func = sns.swarmplot
elif plot_style is 'violin':
plot_func = sns.violinplot
if plot_type is 'list':
ax = plot_func(data=tidy_data, x="Feature", y="Clustering Score", hue=listname, **kwargs)
elif plot_type is 'subject':
ax = plot_func(data=tidy_data, x="Feature", y="Clustering Score", hue=subjname, **kwargs)
else:
ax = plot_func(data=tidy_data, x="Feature", y="Clustering Score", **kwargs)
return ax
def plot_fingerprint_temporal(data, plot_style, plot_type, listname, subjname, **kwargs):
# set default style to violin
plot_style = plot_style if plot_style is not None else 'violin'
plot_type = plot_type if plot_type is not None else 'list'
if plot_style is 'bar':
plot_func = sns.barplot
elif plot_style is 'swarm':
plot_func = sns.swarmplot
elif plot_style is 'violin':
plot_func = sns.violinplot
order = list(tidy_data['Feature'].unique())
if plot_type is 'list':
ax = plot_func(data=data, x="Feature", y="Clustering Score", hue=listname, order=order, **kwargs)
elif plot_type is 'subject':
ax = plot_func(data=data, x="Feature", y="Clustering Score", hue=subjname, order=order, **kwargs)
else:
ax = plot_func(data=data, x="Feature", y="Clustering Score", order=order, **kwargs)
return ax
def plot_spc(data, plot_style, plot_type, listname, subjname, **kwargs):
plot_type = plot_type if plot_type is not None else 'list'
if plot_type is 'subject':
ax = sns.lineplot(data = data, x="Position", y="Proportion Recalled", hue=subjname, **kwargs)
elif plot_type is 'list':
ax = sns.lineplot(data = data, x="Position", y="Proportion Recalled", hue=listname, **kwargs)
ax.set_xlim(0, data['Position'].max())
return ax
def plot_pnr(data, plot_style, plot_type, listname, subjname, position, list_length, **kwargs):
plot_type = plot_type if plot_type is not None else 'list'
if plot_type is 'subject':
ax = sns.lineplot(data = data, x="Position", y='Probability of Recall: Position ' + str(position), hue=subjname, **kwargs)
elif plot_type is 'list':
ax = sns.lineplot(data = data, x="Position", y='Probability of Recall: Position ' + str(position), hue=listname, **kwargs)
ax.set_xlim(0,list_length-1)
return ax
def plot_lagcrp(data, plot_style, plot_type, listname, subjname, **kwargs):
plot_type = plot_type if plot_type is not None else 'list'
if plot_type is 'subject':
ax = sns.lineplot(data=data[data['Position']<0], x="Position", y="Conditional Response Probability", hue=subjname, **kwargs)
if 'ax' in kwargs:
del kwargs['ax']
sns.lineplot(data=data[data['Position']>0], x="Position", y="Conditional Response Probability", hue=subjname, ax=ax, legend=False, **kwargs)
elif plot_type is 'list':
ax = sns.lineplot(data=data[data['Position']<0], x="Position", y="Conditional Response Probability", hue=listname, **kwargs)
if 'ax' in kwargs:
del kwargs['ax']
sns.lineplot(data=data[data['Position']>0], x="Position", y="Conditional Response Probability", hue=listname, ax=ax, legend=False, **kwargs)
ax.set_xlim(-5,5)
return ax
# if no grouping, set default to iterate over each list independently
subjgroup = subjgroup if subjgroup is not None else results.data.index.levels[0].values
listgroup = listgroup if listgroup is not None else results.data.index.levels[1].values
if subjconds:
# make sure its a list
if type(subjconds) is not list:
subjconds=[subjconds]
# slice
idx = pd.IndexSlice
results.data = results.data.sort_index()
results.data = results.data.loc[idx[subjconds, :],:]
# filter subjgroup
subjgroup = filter(lambda x: x in subjconds, subjgroup)
if listconds:
# make sure its a list
if type(listconds) is not list:
listconds=[listconds]
# slice
idx = pd.IndexSlice
results.data = results.data.sort_index()
results.data = results.data.loc[idx[:, listconds],:]
# convert to tiny and format for plotting
tidy_data = format2tidy(results.data, subjname, listname, subjgroup, analysis=results.analysis, position=results.position)
if not ax==None:
kwargs['ax']=ax
#plot!
if results.analysis=='accuracy':
ax = plot_acc(tidy_data, plot_style, plot_type, listname, subjname, **kwargs)
elif results.analysis=='temporal':
ax = plot_temporal(tidy_data, plot_style, plot_type, listname, subjname, **kwargs)
elif results.analysis=='fingerprint':
ax = plot_fingerprint(tidy_data, plot_style, plot_type, listname, subjname, **kwargs)
elif results.analysis=='fingerprint_temporal':
ax = plot_fingerprint_temporal(tidy_data, plot_style, plot_type, listname, subjname, **kwargs)
elif results.analysis=='spc':
ax = plot_spc(tidy_data, plot_style, plot_type, listname, subjname, **kwargs)
elif results.analysis=='pfr' or results.analysis=='pnr':
ax = plot_pnr(tidy_data, plot_style, plot_type, listname, subjname, position=results.position, list_length=results.list_length, **kwargs)
elif results.analysis=='lagcrp':
ax = plot_lagcrp(tidy_data, plot_style, plot_type, listname, subjname, **kwargs)
else:
raise ValueError("Did not recognize analysis.")
# add title
if title:
plt.title(title)
if legend is False:
try:
ax.legend_.remove()
except:
pass
if xlim:
plt.xlim(xlim)
if ylim:
plt.ylim(ylim)
if save_path:
mpl.rcParams['pdf.fonttype'] = 42
plt.savefig(save_path)
return ax
| ContextLab/quail | quail/plot.py | plot.py | py | 10,790 | python | en | code | 18 | github-code | 36 |
26465084457 | import pigpio
from time import sleep
pi = pigpio.pi()
#set GPIO pins
channel = 17
light_on = 10; #seconds
frequency = 100000; #seconds
pi.set_mode(17,pigpio.INPUT)
pi.set_PWM_dutycycle(17,128)
pi.set_PWM_frequency(17,5000)
print(pi.get_PWM_frequency(17))
sleep(light_on)
pi.write(channel,0)
| Naveen175py/IC231_Lab2 | task4.py | task4.py | py | 312 | python | en | code | 0 | github-code | 36 |
72398212263 | from django import forms
from django.forms import Textarea
from .models import Comment, Post
class PostForm(forms.ModelForm):
class Meta:
model = Post
fields = ("text", "group", "image")
widgets = {
"text": Textarea(
attrs={"class": "form-control", "placeholder": "Текст нового поста"}
),
}
class CommentForm(forms.ModelForm):
class Meta:
model = Comment
fields = ("text",)
widgets = {
"text": Textarea(
attrs={
"class": "form-control",
"placeholder": "Текст нового комментарий",
}
),
}
| EISerova/yatube-social-network | yatube/posts/forms.py | forms.py | py | 738 | python | en | code | 0 | github-code | 36 |
42600266577 | # Копирование найденных надежных и ненадежных аудиозаписей по Социуму за 2017-2018 на архивный диск
import openpyxl, traceback
import os, string, sys, shutil
from collections import Counter
from lib import l, fine_snils_, read_config
FIND_CATALOG = '/media/da3/asteriskBeagleAl/'
#CHANGE_ON_WINDOWS = 'Z:/'
#OUTPUT_CATALOG = 'O:/Документы/Записи/'
OUTPUT_CATALOG = '/media/da3/backup/'
TRUSTREESTR = 'Надежные.xlsx'
PROBLEMREESTR = 'Остальные.xlsx'
REESTRS = '/home/da3/Beagle/потеряшкиАудиозаписи/реестры/'
def isSNILS(snils):
if snils != None:
t = str(snils).replace('\n',' ').replace(' ', ' ').replace(' ', ' ').replace(' ', ' ').strip()
if len(t) > 11:
if t[3] == '-' and t[7] == '-' and (t[11] == ' ' or t[11] == '_'):
return True
else:
return False
else:
return False
return False
def isAudio(audio):
if audio != None:
t = str(audio).replace('\n',' ').replace(' ', ' ').replace(' ', ' ').replace(' ', ' ').strip()
t1 = t.split('/')[len(t.split(('/'))) - 1]
if t1.endswith('.'):
t1 = t1[:-1]
if t1.endswith('.mp3') or t1.endswith('.wav'):
t1 = t1[:-4]
if len(t1) > 26:
if t1[2] == '.' and t1[5] == '.' and t1[10] == '_' and (t1[13] == '-' or t1[13] == '_') and \
(t1[16] == '-' or t1[16] == '_'):
return ['длинный', t1]
elif len(''.join([char for i, char in enumerate(t1) if char in string.digits and i < 26])) == 25 \
and t1[14] == '_':
return ['короткий', t1]
else:
return ['', audio]
else:
return ['', audio]
return ['', audio]
def isSocium(audio):
if audio != None:
t = str(audio).replace('\n',' ').replace(' ', ' ').replace(' ', ' ').replace(' ', ' ').strip()
t1 = t.split('/')[len(t.split(('/'))) - 1]
if len(t1) > 26:
if t1[2] == '.' and t1[5] == '.' and t1[10] == '_' and (t1[13] == '-' or t1[13] == '_') and \
(t1[16] == '-' or t1[16] == '_') and (t1[6:10] == '2017' or t1[6:10] == '2018'):
return True
elif len(''.join([char for i, char in enumerate(t1) if char in string.digits and i < 26])) == 25 \
and t1[14] == '_' and (t1[:4] == '2017' or t1[:4] == '2018'):
return True
else:
return False
else:
return False
return False
# расшифровка любой ошибки
def full_tb_write(*args):
if not args:
exc_type, exc_val, exc_tb = sys.exc_info()
traceback.print_tb(exc_tb, file=sys.stdout)
elif len(args) == 3:
exc_type, exc_val, exc_tb = args
traceback.print_tb(exc_tb, file=sys.stdout)
elif len(args) == 1:
exc_type, exc_val, exc_tb = args[0].__class__, args[0], args[0].__traceback__
traceback.print_tb(exc_tb, file=sys.stdout)
snilsesTrust = {}
snilsesTrustShort = {}
wb = openpyxl.load_workbook(filename=TRUSTREESTR, read_only=True)
for sheetname in wb.sheetnames:
sheet = wb[sheetname]
if not sheet.max_row:
print('Файл', TRUSTREESTR, 'Excel некорректно сохранен OpenPyxl. Откройте и пересохраните его')
continue
for j, row in enumerate(sheet.rows):
snils = l(row[0].value)
snilsTrustAudios = []
for k, cell in enumerate(row):
if k and cell.value:
snilsTrustAudio = isAudio(cell.value)
if snilsTrustAudio[1] not in snilsTrustAudios:
snilsTrustAudios.append(snilsTrustAudio[1])
if snilsesTrust.get(snils, None):
if cell.value not in snilsesTrust[snils]:
snilsesTrust[snils].append(cell.value)
else:
snilsesTrust[snils] = [cell.value]
snilsesTrustShort[snils] = snilsTrustAudios
for i, snils in enumerate(snilsesTrust):
sucess = False
while not sucess:
try:
if not os.path.exists(OUTPUT_CATALOG + 'Выгрузки/' + fine_snils_(snils)):
os.mkdir(OUTPUT_CATALOG + 'Выгрузки/' + fine_snils_(snils))
audiofilesShort = []
for audiofile in snilsesTrust[snils]:
audiofileShort = isAudio(audiofile)[1]
if os.path.exists(audiofile): #.replace(FIND_CATALOG, CHANGE_ON_WINDOWS)):
if audiofileShort in audiofilesShort:
if not os.path.exists(OUTPUT_CATALOG + 'Выгрузки/' + fine_snils_(snils) + '/' + audiofileShort +
'-' + str(Counter(audiofilesShort)[audiofileShort]) + audiofile[-4:]):
shutil.copy(audiofile #.replace(FIND_CATALOG, CHANGE_ON_WINDOWS)
, OUTPUT_CATALOG + 'Выгрузки/' + fine_snils_(snils) + '/' + audiofileShort +
'-' + str(Counter(audiofilesShort)[audiofileShort]) + audiofile[-4:])
audiofilesShort.append(audiofileShort)
else:
if not os.path.exists(OUTPUT_CATALOG + 'Выгрузки/' + fine_snils_(snils) + '/' +
audiofileShort + audiofile[-4:]):
shutil.copy(audiofile #.replace(FIND_CATALOG, CHANGE_ON_WINDOWS)
, OUTPUT_CATALOG + 'Выгрузки/' + fine_snils_(snils) + '/' + audiofileShort +
audiofile[-4:])
audiofilesShort.append(audiofileShort)
else:
print('!!! Нет исходного файла', audiofile)
sucess = True
except Exception as e:
full_tb_write(e)
print('Ошибка - пробуем ещё раз')
print('Скопировано', i, 'из', len(snilsesTrust))
print('\nТеперь Остальные\n')
snilsesProblem = {}
snilsesProblemShort = {}
wb = openpyxl.load_workbook(filename=PROBLEMREESTR, read_only=True)
for sheetname in wb.sheetnames:
sheet = wb[sheetname]
if not sheet.max_row:
print('Файл', PROBLEMREESTR, 'Excel некорректно сохранен OpenPyxl. Откройте и пересохраните его')
continue
for j, row in enumerate(sheet.rows):
snils = l(row[0].value)
snilsProblemAudios = []
for k, cell in enumerate(row):
if k and cell.value:
snilsProblemAudio = isAudio(cell.value)
if snilsProblemAudio[1] not in snilsProblemAudios:
snilsProblemAudios.append(snilsProblemAudio[1])
if snilsesProblem.get(snils, None):
if cell.value not in snilsesProblem[snils]:
snilsesProblem[snils].append(cell.value)
else:
snilsesProblem[snils] = [cell.value]
snilsesProblemShort[snils] = snilsProblemAudios
for i, snils in enumerate(snilsesProblem):
sucess = False
while not sucess:
try:
if not os.path.exists(OUTPUT_CATALOG + 'Остальные/' + fine_snils_(snils)):
os.mkdir(OUTPUT_CATALOG + 'Остальные/' + fine_snils_(snils))
audiofilesShort = []
for audiofile in snilsesProblem[snils]:
audiofileShort = isAudio(audiofile)[1]
if os.path.exists(audiofile): #.replace(FIND_CATALOG, CHANGE_ON_WINDOWS)):
if audiofileShort in audiofilesShort:
if not os.path.exists(OUTPUT_CATALOG + 'Остальные/' + fine_snils_(snils) + '/' + audiofileShort +
'-' + str(Counter(audiofilesShort)[audiofileShort]) + audiofile[-4:]):
shutil.copy(audiofile #.replace(FIND_CATALOG, CHANGE_ON_WINDOWS)
, OUTPUT_CATALOG + 'Остальные/' + fine_snils_(snils) + '/' + audiofileShort +
'-' + str(Counter(audiofilesShort)[audiofileShort]) + audiofile[-4:])
audiofilesShort.append(audiofileShort)
else:
if not os.path.exists(OUTPUT_CATALOG + 'Остальные/' + fine_snils_(snils) + '/' +
audiofileShort + audiofile[-4:]):
shutil.copy(audiofile #.replace(FIND_CATALOG, CHANGE_ON_WINDOWS)
, OUTPUT_CATALOG + 'Остальные/' + fine_snils_(snils) + '/' + audiofileShort +
audiofile[-4:])
audiofilesShort.append(audiofileShort)
else:
print('!!! Нет исходного файла', audiofile)
sucess = True
except Exception as e:
full_tb_write(e)
print('Ошибка - пробуем ещё раз')
print('Скопировано', i, 'из', len(snilsesProblem))
| dekarh/asocium | asociumWrite.py | asociumWrite.py | py | 9,480 | python | en | code | 0 | github-code | 36 |
14141185614 | import requests
import json
import urllib.parse
from django.conf import settings
def current_place():
"""
現在地の緯度経度を取得する。
Returns:
int: 現在地の緯度、経度
"""
geo_request_url = "https://get.geojs.io/v1/ip/geo.json"
geo_data = requests.get(geo_request_url).json()
# print(geo_data['latitude'])
# print(geo_data['longitude'])
return geo_data["latitude"], geo_data["longitude"]
def get_movie_theatre(latitude, longitude):
"""
現在地の緯度、経度をインプットとして、付近の映画館リストを返す。
Args:
latitude (int): 現在地の緯度
longitude (int): 現在地の経度
"""
url = "https://maps.googleapis.com/maps/api/place/nearbysearch/json?language=ja&location=" + latitude + "," + longitude + "&radius=2000&type=movie_theater&key=" + settings.API_MAP
# print(url)
payload={}
headers = {}
response = requests.request("GET", url, headers=headers, data=payload)
json_dict = response.json()
movie_theatre = []
if json_dict["status"] == "OK":
for theatre in json_dict["results"]:
movie_theatre.append((theatre["name"],theatre["name"]))
print(movie_theatre)
return movie_theatre
def search_theatre(search_text):
"""
現在地の緯度、経度をインプットとして、付近の映画館リストを返す。
Args:
latitude (int): 現在地の緯度
longitude (int): 現在地の経度
"""
url = "https://maps.googleapis.com/maps/api/place/textsearch/json?language=ja&type=movie_theater&query=" + urllib.parse.quote(search_text) + "&key=" + settings.API_MAP
# print(url)
payload={}
headers = {}
response = requests.request("GET", url, headers=headers, data=payload)
json_dict = response.json()
movie_theatre = []
if json_dict["status"] == "OK":
for theatre in json_dict["results"]:
movie_theatre.append((theatre["name"], theatre["name"]))
# print(movie_theatre)
# else:
# movie_theatre.append("Result nothing","Result nothing")
return movie_theatre
if __name__ == "__main__":
latitude, longitude = current_place()
get_movie_theatre(latitude, longitude)
search_theatre("TOHO 新宿") | nicenaito/theatreCheckIn | theatreplaces.py | theatreplaces.py | py | 2,354 | python | ja | code | 0 | github-code | 36 |
6846890085 | import pandas as pd
import sqlite3
def connect_sqlite(db_file):
with sqlite3.connect(db_file) as conn:
conn.row_factory = sqlite3.Row
cur = conn.cursor()
return conn, cur
def get_dataframe(db_file, sql):
conn, cur = connect_sqlite(db_file)
df = pd.read_sql(sql,conn)
if conn:
conn.close()
return df
def req_count(df):
'''
df : pandas dataframe
'''
df['datatime']
if __name__ == '__main__' :
db_file = 'demklog_2017-01-21'
sql = 'select * from demklog '
df = get_dataframe(db_file, sql)
#print df.head()
df.plot()
# df_tm = pd.TimeSeries( pd. to_datetime( df['timestamp'] ) )
# print 'type df_tm=', type(df_tm),df_tm.head()
# ddtest = df['timestamp']
#
# # dd1.resample('M')
# print type(ddtest), ddtest [800:810]
# print ddtest.resample('H') | tcref/helloworld | helloworld/tcref/src/main/webpy_rest/check_db/statistics.py | statistics.py | py | 895 | python | en | code | 0 | github-code | 36 |
28686029068 | import json
import pandas as pd
from os.path import join
PROJECT_PATH = '../../'
event = pd.read_csv(join(PROJECT_PATH, 'data', 'LinearSearchThreadEvent.csv'),
header=None,
names=['Id', 'RootEventId', 'UserIdentifier', 'CreationDate', 'DiffSeconds', 'EventSource',
'EventTarget',
'Referrer', 'Url', 'Query', 'FragmentIdentifier'],
dtype={
'Id': 'int64',
'RootEventId': pd.Int64Dtype(),
'UserIdentifier': 'str',
'CreationDate': 'str',
'DiffSeconds': pd.Int64Dtype(),
'EventSource': 'str',
'EventTarget': 'str',
'Referrer': 'str',
'Url': 'str',
'Query': 'str',
'FragmentIdentifier': 'str'
})
event['CreationDate'] = pd.to_datetime(event['CreationDate'], format='%Y-%m-%d %H:%M:%S')
event = event.sort_values(by=['RootEventId', 'CreationDate'], ascending=True)
REID2IDList = {}
for _, row in event.iterrows():
if pd.isna(row['RootEventId']):
REID2IDList[str(row['Id'])] = [str(row['Id'])]
else:
if str(row['RootEventId']) not in REID2IDList.keys():
REID2IDList[str(row['RootEventId'])] = [str(row['Id'])]
else:
REID2IDList[str(row['RootEventId'])].append(str(row['Id']))
with open(join(PROJECT_PATH, 'data', 'REID2IDList.json'), "w", encoding="utf-8") as fw:
json.dump(REID2IDList, fw)
| kbcao/sequer | code/DatasetExtraction/eventList_extraction.py | eventList_extraction.py | py | 1,645 | python | en | code | 15 | github-code | 36 |
18903219922 | from logging import getLogger
from os.path import join
from configparser import NoOptionError
from uchicagoldrtoolsuite import log_aware
from uchicagoldrtoolsuite.core.app.abc.cliapp import CLIApp
from ..lib.writers.filesystemstagewriter import FileSystemStageWriter
from ..lib.readers.filesystemstagereader import FileSystemStageReader
from ..lib.processors.generictechnicalmetadatacreator import \
GenericTechnicalMetadataCreator
from ..lib.techmdcreators.fitscreator import FITsCreator
from ..lib.techmdcreators.apifitscreator import APIFITsCreator
__author__ = "Brian Balsamo"
__email__ = "balsamo@uchicago.edu"
__company__ = "The University of Chicago Library"
__copyright__ = "Copyright University of Chicago, 2016"
__publication__ = ""
__version__ = "0.0.1dev"
log = getLogger(__name__)
def launch():
"""
entry point launch hook
"""
app = TechnicalMetadataCreator(
__author__=__author__,
__email__=__email__,
__company__=__company__,
__copyright__=__copyright__,
__publication__=__publication__,
__version__=__version__
)
app.main()
class TechnicalMetadataCreator(CLIApp):
"""
Creates technical metadata (FITs) for all the material suites in a stage.
"""
@log_aware(log)
def main(self):
# Instantiate boilerplate parser
self.spawn_parser(description="The UChicago LDR Tool Suite utility " +
"creating technical metadata for materials in " +
"a stage.",
epilog="{}\n".format(self.__copyright__) +
"{}\n".format(self.__author__) +
"{}".format(self.__email__))
# Add application specific flags/arguments
log.debug("Adding application specific cli app arguments")
self.parser.add_argument("stage_id", help="The id of the stage",
type=str, action='store')
self.parser.add_argument("--skip_existing", help="Skip material " +
"suites which already claim to have " +
"technical metadata",
action='store_true',
default=False)
self.parser.add_argument("--staging_env", help="The path to your " +
"staging environment",
type=str,
default=None)
self.parser.add_argument("--eq_detect", help="The equality " +
"metric to use on writing, check " +
"LDRItemCopier for supported schemes.",
type=str, action='store',
default="bytes")
self.parser.add_argument("--fits_path", help="The path to the FITS " +
"executable on this system. " +
"Overrides any value found in configs.",
type=str, action='store',
default=None)
self.parser.add_argument("--fits_api_url", help="The url of a FITS " +
"Servlet examine endpoint. " +
"Overrides any value found in configs.",
type=str, action='store',
default=None)
self.parser.add_argument("--use_api", help="Use a FITS Servlet " +
"instead of a local FITS install.",
action="store_true",
default=False)
# Parse arguments into args namespace
args = self.parser.parse_args()
self.process_universal_args(args)
# App code
if args.staging_env:
staging_env = args.staging_env
else:
staging_env = self.conf.get("Paths", "staging_environment_path")
staging_env = self.expand_path(staging_env)
dto = {}
try:
dto['fits_path'] = self.conf.get("Paths", "fits_path")
except NoOptionError:
pass
try:
dto['fits_api_url'] = self.conf.get("URLs", "fits_api_url")
except NoOptionError:
pass
if args.fits_api_url is not None:
dto['fits_api_url'] = args.fits_api_url
if args.fits_path is not None:
dto['fits_path'] = args.fits_path
reader = FileSystemStageReader(staging_env, args.stage_id)
stage = reader.read()
log.info("Stage: " + join(staging_env, args.stage_id))
log.info("Processing...")
if args.use_api:
techmd_processors = [APIFITsCreator]
else:
techmd_processors = [FITsCreator]
techmd_creator = GenericTechnicalMetadataCreator(stage,
techmd_processors)
techmd_creator.process(skip_existing=args.skip_existing,
data_transfer_obj=dto)
log.info("Writing...")
writer = FileSystemStageWriter(stage, staging_env,
eq_detect=args.eq_detect)
writer.write()
log.info("Complete")
if __name__ == "__main__":
s = TechnicalMetadataCreator()
s.main()
| uchicago-library/uchicagoldr-toolsuite | uchicagoldrtoolsuite/bit_level/app/technicalmetadatacreator.py | technicalmetadatacreator.py | py | 5,572 | python | en | code | 0 | github-code | 36 |
34343308888 | '''Write a program that sort a list in descending order '''
n = [9,2,8,1,10,34,1,4,37,2]
for i in range(0, len(n)):
for j in range(i+1, len(n)):
if n[i] < n[j]:
temp = n[i]
n[i] = n[j]
n[j] = temp
print(n)
| ABDULSABOOR1995/Python-List-Exercises | List Exercises/sorting.py | sorting.py | py | 258 | python | en | code | 2 | github-code | 36 |
38107931605 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('article', '0002_remove_article_article_date'),
]
operations = [
migrations.AddField(
model_name='article',
name='article_date',
field=models.DateTimeField(default=datetime.date(2014, 10, 21)),
preserve_default=False,
),
]
| Evgeneus/blog-django-1.7 | article/migrations/0003_article_article_date.py | 0003_article_article_date.py | py | 494 | python | en | code | 0 | github-code | 36 |
72548079143 | #!/usr/bin/python
__author__ = "Evyatar Orbach"
__email__ = "evyataro@gmail.com"
'''Exercise 8
Make a two-player Rock-Paper-Scissors game.
(Hint: Ask for player plays (using input), compare them, print out a message of congratulations to the winner, and ask if the players want to start a new game)
Remember the rules:
Rock beats scissors
Scissors beats paper
Paper beats rock
'''
name = [{"player":"","choise":""} for player in range(2)]
stillWantToPlay = 'y'
for player in range(2):
name[player]["player"] = raw_input("Player %d please enter your name\n" % int(player+1))
while(stillWantToPlay == 'y'):
for player in range(2):
while name[player]["choise"] != 'rock' and \
name[player]["choise"] != 'paper' and \
name[player]["choise"] != 'scissors':
name[player]["choise"] = raw_input("%s please choose Rock-Paper-Scissors\n" % name[player]["player"]).lower()
if(name[0]["choise"] == 'rock' and name[1]["choise"] == 'paper'):
print("congratulations to the winner - %s" % name[1]["player"])
elif(name[0]["choise"] == 'rock' and name[1]["choise"] == 'scissors'):
print("congratulations to the winner - %s" % name[0]["player"])
elif(name[0]["choise"] == name[1]["choise"]):
print("Teko!! Play again")
elif(name[0]["choise"] == 'paper' and name[1]["choise"] == 'rock'):
print("congratulations to the winner - %s" % name[0]["player"])
elif(name[0]["choise"] == 'paper' and name[1]["choise"] == 'scissors'):
print("congratulations to the winner - %s" % name[1]["player"])
elif(name[0]["choise"] == 'scissors' and name[1]["choise"] == 'rock'):
print("congratulations to the winner - %s" % name[1]["player"])
else:
print("congratulations to the winner - %s" % name[0]["player"])
name[0]["choise"] = ""
name[1]["choise"] = ""
stillWantToPlay = raw_input("Want to play again? Y/N\n").lower()
| orbache/pythonExercises | exercise8.py | exercise8.py | py | 1,946 | python | en | code | 0 | github-code | 36 |
34338550372 | # https://leetcode.com/problems/add-two-numbers/?tab=Description
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def addTwoNumbers(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
firstNode = prevNode = ListNode(0)
prevCarry = 0
while (l1 is not None) or (l2 is not None):
currentValue = prevCarry
if l1 is not None:
currentValue += l1.val
l1 = l1.next
if l2 is not None:
currentValue += l2.val
l2 = l2.next
prevCarry, currentValue = divmod(currentValue, 10)
prevNode.next = ListNode(currentValue)
prevNode = prevNode.next
if prevCarry == 1:
prevNode.next = ListNode(prevCarry)
return firstNode.next
| 0x0400/LeetCode | p2.py | p2.py | py | 976 | python | en | code | 0 | github-code | 36 |
75162026024 | """
Config file for Streamlit App
"""
from member import Member
PROMOTION = "Promotion Continue Data Analyst - Mars 2022"
TITLE = "UnhapPy Earth"
# membres du groupe
TEAM_MEMBERS = [
Member(name = "Olga Fedorova",
linkedin_url = "https://www.linkedin.com/in/olga-fedorova-665a4b63/",
github_url = "https://github.com/OlgaFedorovaKukk"),
Member(name = "Boris Baldassari",
linkedin_url = "https://www.linkedin.com/in/borisbaldassari/",
github_url = "https://github.com/borisbaldassari"),
Member(name = "Nicolas Cristiano",
linkedin_url="https://www.linkedin.com/in/nicolas-cristiano-7b8a23171/",
github_url="https://github.com/Nic0C")]
# liste des fichiers de données
globales_file = './data/unhappy_earth/temperatures_globales.csv'
hemispheres_file = './data/unhappy_earth/temperatures_hemispheres.csv'
countries_file = './data/unhappy_earth/temperatures_countries.csv'
co2_global = './data/unhappy_earth/co2_global.csv'
co2_countries = './data/unhappy_earth/co2_countries.csv'
temperatures_globales_file = './data/unhappy_earth/temperatures_globales.csv'
temperatures_hemispheres_file = './data/unhappy_earth/temperatures_hemispheres.csv'
temperatures_countries_file = './data/unhappy_earth/temperatures_countries.csv' | DataScientest-Studio/mar22CDA_unhapPy_earth_studio | config.py | config.py | py | 1,313 | python | en | code | 0 | github-code | 36 |
9366685884 | import mysql.connector
import csv
import git
import os
import subprocess
import sys
from mysql.connector import Error
from operator import itemgetter
from testClasses import *
from connectSQL import *
def getRanVersions():
snapshots = []
fullSnapshots = []
try:
conn = mysql.connector.connect(host='localhost',
database='sonar',
user='sonarUser',
password='happify')
cursor = conn.cursor()
cursor.execute("SELECT * FROM snapshots")
row = cursor.fetchone()
snapshots.append(row)
while row is not None:
row = cursor.fetchone()
snapshots.append(row)
cursor.close()
except Error as someError:
print(someError)
finally:
cursor.close()
conn.close()
for item in snapshots:
if item is not None:
fullSnapshots.append(aSnapshot(item).version)
return fullSnapshots
def getTags(projectPath):
finalTags = []
print("current dir: ", os.path.abspath(os.curdir))
os.chdir(projectPath)
output = subprocess.check_output("git tag", shell=True)
newOutput = output.split("\n")
print("new output: ", newOutput)
for item in newOutput:
if "-" not in item:
if "0." not in item:
if item not in finalTags:
finalTags.append(item)
print(finalTags)
return finalTags
def getAllTags(projectPath):
finalTags = []
fullTags = []
print("current dir: ", os.path.abspath(os.curdir))
#os.chdir(projectPath)
output = subprocess.check_output("git tag", shell=True)
newOutput = output.split("\n")
print("new output: ", newOutput)
for item in newOutput:
if item not in finalTags:
finalTags.append(item)
for thing in finalTags:
fullDate = subprocess.check_output("git log -1 --format=%ai {}".format(thing), shell=True)
newDate = fullDate.split(" ")[0]
fullTags.append((thing, newDate))
return fullTags
def runSonarMaven(newOutput):
count = 0
for item in newOutput:
count = count + 1
try:
print("checking out:", item)
subprocess.check_output("git checkout tags/" + item, shell=True)
subprocess.check_output("mvn clean install", shell=True)
subprocess.check_output("mvn sonar:sonar ", shell=True)
except subprocess.CalledProcessError as someError:
print(someError)
def main():
versionsToBeRan = []
allVersions = getAllTags(sys.argv[1])
ranVersions = getRanVersions()
for item in allVersions:
if str(item) not in ranVersions:
versionsToBeRan.append(item)
print(versionsToBeRan)
for vers in allVersions:
print("hey", vers)
#runSonarMaven(versionsToBeRan)
#print(item.version)
if __name__ == '__main__':
main()
| MaxMoede/DPDM | getTags.py | getTags.py | py | 2,613 | python | en | code | 0 | github-code | 36 |
21877552923 | import os
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives.hashes import SHA256
from cryptography.hazmat.primitives.asymmetric import padding
# save file helper
def save_file(filename, content):
filepath = os.path.dirname(os.path.abspath(__file__))
f = open(filepath + "/" + filename, "wb")
f.write(content)
f.close()
def create_key():
# generate private key & write to disk
private_key = rsa.generate_private_key(
public_exponent=65537,
key_size=4096,
backend=default_backend()
)
pem = private_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption()
#encryption_algorithm=serialization.BestAvailableEncryption(b'mypassword')
)
save_file("private.pem", pem)
# generate public key
public_key = private_key.public_key()
pem = public_key.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo
)
save_file("public.pem", pem)
| Marcus11Dev/Blockchain_Lesson_Agent | create_Keys.py | create_Keys.py | py | 1,319 | python | en | code | 0 | github-code | 36 |
27271629688 | import time, random
import pygame
screen_w = 800
screen_h = 600
# Create the window
screen = pygame.display.set_mode((screen_w,screen_h))
black = (0,0, 0)
red = (255,0,0)
green = (0, 200, 0)
class Game(object):
def __init__(self):
self.screen = pygame.display.set_mode((800,600))
self.score = 0
self.oldScore = 0
self.speed = 5
self.speedMultiplier = 1
def gameInit(self):
pygame.init()
def gameName(self, name):
pygame.display.set_caption(name)
def setFPS(self, fps):
pygame.time.Clock().tick(fps)
def screenColor(self):
self.screen.fill(green)
def incSpeed(self, newSpeed):
self.speed = newSpeed
def resetGame(self):
self.oldScore = self.score
self.score = 0
self.speed = 5
self.speedMultiplier = 1
# Player Class
class Player(object):
init_x = 150
init_y = 405
def __init__(self):
self.x = Player.init_x
self.y = Player.init_y
self.width = 50
self.height = 50
self.vel = 10
self.isJump = False
self.jumpCount = 10
# Function to draw the players geometry, in this case just a red square
def draw(self):
# Red square as player
pygame.draw.rect(screen, red, [self.x, self.y, self.width, self.height])
# Black contour defined by hitbox
pygame.draw.rect(screen, black, (self.x, self.y, self.width, self.height), 3)
def jump(self):
# Using a mathematical equation with square calculations for jumping.
# When the player reaches a certain height, jumpCount will get a negative value
# so that the player starts falling down.
if self.jumpCount >= -10:
neg = 1
if self.jumpCount < 0:
neg = -1
# Math equation
self.y -= (self.jumpCount ** 2) * 0.8 * neg
self.jumpCount -= 1
else:
self.isJump = False
self.jumpCount = 10
def setVel(self, vel):
self.vel = vel
# Reset position after death
def reset(self):
self.x = 150
self.y = 405
self.width = 50
self.height = 50
# Enemy Class
class Enemy(object):
def __init__(self, x, y, width, height):
self.x = x
self.y = y
self. width = width
self.height = height
self.end = screen_w
self.vel = 10
# Function to draw the geometry of the enemies, in this case just a polygon in the form of a triangle
def draw(self):
# Calling function move() to actually draw the movement on the screen
self.move()
# If the enemy moves out of the screen it will appear on the other side again, looping
if self.x < 0:
self.x = self.end + random.randint(0,350)
# Pointlist is a set of coordinates for the polygon [(x1,y1),(x2,y2) and so on]
pointlist = [(self.x - self.width/2, self.y - self.width), (self.x - self.height/2, self.y - self.height), (self.x, self.y), (self.x - self.width, self.y)]
# Draw the red contour
pygame.draw.polygon(screen, red, pointlist, 3)
# Draw the black triangle defined by pointlist
pygame.draw.polygon(screen, black, pointlist, 0)
# Function to move the enemy to the left towards the player at a certain velocity
def move(self):
self.x -= self.vel
def setVel(self, newVel):
self.vel = newVel
# Resets position after death
def reset(self, offset):
self.x = random.randint(750,850) + offset
self.y = 454
self.width = 30
self.height = 30
self.vel = 10
# Function to call all class related functions to upload the drawings to the screen
def redrawGameWindow(player, enemyList):
player.draw()
for enemy in enemyList:
enemy.draw()
# Draws the red base in the game
pygame.draw.rect(screen, red, [0,456, screen_w, 200])
# Updates the screen with the drawings applied
pygame.display.update()
# Whenever the player is touches the enemy this function is called and displayes the message DEAD on screen
def printMSG(msg, x, y, size):
# Define font and size
font = pygame.font.Font(None, size)
# Define what message to display and it's color
text_surface = font.render(msg, True, (0, 0, 0))
# Print the message to screen using coordinates
screen.blit(text_surface, (x,y))
# Collision calculation with enemies, when the square touches the triangles it will display message "DEAD"
def checkCollision(game, player, enemies):
for enemy in enemies:
if (player.x + player.width) >= (enemy.x - enemy.width) and player.x <= enemy.x:
if (player.y + player.height) >= (enemy.y - enemy.height) and player.y <= enemy.y:
printMSG("DEAD", 355, 250, 50)
redrawGameWindow(player, enemies)
time.sleep(1)
# When collision occurs the game resets
player.reset()
enemies[0].reset(100)
enemies[1].reset(450)
game.resetGame()
# Increases and prints score as well as the old score
def scoreUpdate(game):
game.score += game.speed
printMSG(("Score: " + str(game.score)), 50, 50, 40)
printMSG(("Old Score: " + str(game.oldScore)), 500, 50, 40)
# Function that increases the speed every 1000 score
def speedUpdate(game, enemylist):
if game.score >= 2000 * game.speedMultiplier:
game.speedMultiplier += 1
for enemy in enemylist:
enemy.setVel(enemy.vel + 1)
def main():
# Game instance
game = Game()
game.gameInit()
game.gameName("Running Game 2")
# Player 1
sq = Player()
# Enemies 1 and 2
ey = Enemy(random.randint(750,850),454,30,30)
ey2 = Enemy(random.randint(1200,1400), 454, 30, 30)
# Enemy list, if several add here
enemyList = [ey, ey2]
# Game condition
running = True
# Game loop
while running:
# Set screen color in RGB
game.screenColor()
# Continously check all events that are happening in the game
for event in pygame.event.get():
# Check if window is closed when the cross is pressed
if event.type == pygame.QUIT:
running = False
# Variable for checking if any key is pressed
keys = pygame.key.get_pressed()
# Arrow key movements of player
if keys[pygame.K_LEFT] and sq.x > 0:
# Move player to the left with the given velocity when left key is pressed
sq.x -= sq.vel
if keys[pygame.K_RIGHT] and sq.x < screen_w - sq.width:
sq.x += sq.vel
# Jump function
if not(sq.isJump):
if keys[pygame.K_SPACE]:
sq.isJump = True
else:
sq.jump()
# Updates score every loop
scoreUpdate(game)
# Increases speed every 1000 score
speedUpdate(game, enemyList)
# Collision detection between player and enemies
checkCollision(game, sq, enemyList)
# Calling this function every loop to update the drawings to screen
redrawGameWindow(sq, enemyList)
# Frames per second
game.setFPS(30)
if __name__ == "__main__":
main()
pygame.quit()
| eliazz95/JumpingGame | firstGame.py | firstGame.py | py | 6,406 | python | en | code | 0 | github-code | 36 |
17111491784 | class Solution(object):
def twoSum(self,nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
hash_map = {}
for index, value in enumerate(nums):
hash_map[value] = index
for i, num in enumerate(nums):
j = hash_map.get(target - num)
if j is not None and i != j:
return [i,j]
def twoSum1(self,nums,target):
num_idx_dict = dict()
solution = []
for i,v in enumerate(nums):
dual = target - v
# dict 02
index_of_dual = num_idx_dict.get(dual,None)
if index_of_dual is not None and index_of_dual != i:
solution = [i,index_of_dual]
break
else:
# dict 01
num_idx_dict[v] = i
return solution
assert Solution().twoSum([2, 17, 7, 15], 9) == [0,2]
#print(Solution().twoSum([2, 17, 7, 15], 9));
| yannweb/yanns_code | code/001.py | 001.py | py | 994 | python | en | code | 1 | github-code | 36 |
10650897169 | import frappe
import re
import jwt
from frappe import _
from frappe.utils.data import cstr, cint, flt
from frappe.utils import getdate
from erpnext.regional.india.e_invoice.utils import (GSPConnector,raise_document_name_too_long_error,read_json,get_transaction_details,\
validate_mandatory_fields,get_doc_details,get_overseas_address_details,get_return_doc_reference,\
get_eway_bill_details,validate_totals,show_link_to_error_log,santize_einvoice_fields,safe_json_load,get_payment_details,\
validate_eligibility,update_item_taxes,get_invoice_value_details,get_party_details,update_other_charges)
from erpnext.regional.india.utils import get_gst_accounts,get_place_of_supply
import json
GST_INVOICE_NUMBER_FORMAT = re.compile(r"^[a-zA-Z0-9\-/]+$") #alphanumeric and - /
def validate_einvoice_fields(doc):
invoice_eligible = validate_eligibility(doc)
if not invoice_eligible:
return
# Finbyz Changes Start: dont change posting date after irn generated
if doc.irn and doc.docstatus == 1 and doc._action == 'submit':
if str(doc.posting_date) != str(frappe.db.get_value("Sales Invoice",doc.name,"posting_date")):
frappe.throw(_('You cannot edit the invoice after generating IRN'), title=_('Edit Not Allowed'))
# Finbyz Changes End
if doc.docstatus == 0 and doc._action == 'save':
if doc.irn and not doc.eway_bill_cancelled and doc.grand_total != frappe.db.get_value("Sales Invoice",doc.name,"grand_total"):# Finbyz Changes:
frappe.throw(_('You cannot edit the invoice after generating IRN'), title=_('Edit Not Allowed'))
if len(doc.name) > 16 and doc.authority == 'Authorized':# Finbyz Changes
raise_document_name_too_long_error()
elif doc.docstatus == 1 and doc._action == 'submit' and not doc.irn and doc.irn_cancelled == 0: # finbyz
frappe.throw(_('You must generate IRN before submitting the document.'), title=_('Missing IRN'))
elif doc.irn and doc.docstatus == 2 and doc._action == 'cancel' and not doc.irn_cancelled:
frappe.throw(_('You must cancel IRN before cancelling the document.'), title=_('Cancel Not Allowed'))
def make_einvoice(invoice):
validate_mandatory_fields(invoice)
schema = read_json('einv_template')
transaction_details = get_transaction_details(invoice)
item_list = get_item_list(invoice)
doc_details = get_doc_details(invoice)
invoice_value_details = get_invoice_value_details(invoice)
seller_details = get_party_details(invoice.company_address)
if invoice.gst_category == 'Overseas':
buyer_details = get_overseas_address_details(invoice.customer_address)
else:
buyer_details = get_party_details(invoice.customer_address)
place_of_supply = get_place_of_supply(invoice, invoice.doctype)
if place_of_supply:
place_of_supply = place_of_supply.split('-')[0]
else:
place_of_supply = invoice.billing_address_gstin[:2]
buyer_details.update(dict(place_of_supply=place_of_supply))
seller_details.update(dict(legal_name=invoice.company))
buyer_details.update(dict(legal_name=invoice.billing_address_title or invoice.customer_name or invoice.customer)) # finbyz change add billing address title
shipping_details = payment_details = prev_doc_details = eway_bill_details = frappe._dict({})
if invoice.shipping_address_name and invoice.customer_address != invoice.shipping_address_name:
if invoice.gst_category == 'Overseas':
shipping_details = get_overseas_address_details(invoice.shipping_address_name)
else:
shipping_details = get_party_details(invoice.shipping_address_name, is_shipping_address=True)
if invoice.is_pos and invoice.base_paid_amount:
payment_details = get_payment_details(invoice)
if invoice.is_return and invoice.return_against:
prev_doc_details = get_return_doc_reference(invoice)
if invoice.transporter and flt(invoice.distance) and not invoice.is_return:
eway_bill_details = get_eway_bill_details(invoice)
# not yet implemented
dispatch_details = period_details = export_details = frappe._dict({})
einvoice = schema.format(
transaction_details=transaction_details, doc_details=doc_details, dispatch_details=dispatch_details,
seller_details=seller_details, buyer_details=buyer_details, shipping_details=shipping_details,
item_list=item_list, invoice_value_details=invoice_value_details, payment_details=payment_details,
period_details=period_details, prev_doc_details=prev_doc_details,
export_details=export_details, eway_bill_details=eway_bill_details
)
try:
einvoice = safe_json_load(einvoice)
einvoice = santize_einvoice_fields(einvoice)
except Exception:
show_link_to_error_log(invoice, einvoice)
validate_totals(einvoice)
return einvoice
def get_item_list(invoice):
item_list = []
for d in invoice.items:
einvoice_item_schema = read_json('einv_item_template')
item = frappe._dict({})
item.update(d.as_dict())
item.sr_no = d.idx
item.description = json.dumps(d.item_group or d.item_name)[1:-1] # finbyz change add item group
item.qty = abs(item.qty)
if invoice.apply_discount_on == 'Net Total' and invoice.discount_amount:
item.discount_amount = abs(item.base_amount - item.base_net_amount)
else:
item.discount_amount = 0
item.unit_rate = abs((abs(item.taxable_value) - item.discount_amount)/ item.qty)
item.gross_amount = abs(item.taxable_value) + item.discount_amount
item.taxable_value = abs(item.taxable_value)
item.batch_expiry_date = frappe.db.get_value('Batch', d.batch_no, 'expiry_date') if d.batch_no else None
item.batch_expiry_date = format_date(item.batch_expiry_date, 'dd/mm/yyyy') if item.batch_expiry_date else None
#finbyz Changes
if frappe.db.get_value('Item', d.item_code, 'is_stock_item') or frappe.db.get_value('Item', d.item_code, 'is_not_service_item'):
item.is_service_item = 'N'
else:
item.is_service_item = 'Y'
#finbyz changes enditem.serial_no = ""
item = update_item_taxes(invoice, item)
item.total_value = abs(
item.taxable_value + item.igst_amount + item.sgst_amount +
item.cgst_amount + item.cess_amount + item.cess_nadv_amount + item.other_charges
)
einv_item = einvoice_item_schema.format(item=item)
item_list.append(einv_item)
return ', '.join(item_list)
# india utils.py
def validate_document_name(doc, method=None):
"""Validate GST invoice number requirements."""
country = frappe.get_cached_value("Company", doc.company, "country")
# Date was chosen as start of next FY to avoid irritating current users.
if country != "India" or getdate(doc.posting_date) < getdate("2021-04-01"):
return
if len(doc.name) > 16 and doc.authority == 'Authorized': #finbyz
frappe.throw(_("Maximum length of document number should be 16 characters as per GST rules. Please change the naming series."))
if not GST_INVOICE_NUMBER_FORMAT.match(doc.name):
frappe.throw(_("Document name should only contain alphanumeric values, dash(-) and slash(/) characters as per GST rules. Please change the naming series."))
| venku31/ceramic | ceramic/e_invoice_ceramic.py | e_invoice_ceramic.py | py | 6,890 | python | en | code | null | github-code | 36 |
3082899682 | import json
from hsfs import util
import humps
class TrainingDatasetSplit:
TIME_SERIES_SPLIT = "TIME_SERIES_SPLIT"
RANDOM_SPLIT = "RANDOM_SPLIT"
TRAIN = "train"
VALIDATION = "validation"
TEST = "test"
def __init__(
self,
name,
split_type,
percentage=None,
start_time=None,
end_time=None,
**kwargs
):
self._name = name
self._percentage = percentage
self._split_type = split_type
self._start_time = start_time
self._end_time = end_time
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
@property
def percentage(self):
return self._percentage
@percentage.setter
def percentage(self, percentage):
self._percentage = percentage
@property
def split_type(self):
return self._split_type
@split_type.setter
def split_type(self, split_type):
self._split_type = split_type
@property
def start_time(self):
return self._start_time
@start_time.setter
def start_time(self, start_time):
self._start_time = start_time
@property
def end_time(self):
return self._end_time
@end_time.setter
def end_time(self, end_time):
self._end_time = end_time
def json(self):
return json.dumps(self, cls=util.FeatureStoreEncoder)
def to_dict(self):
return {
"name": self._name,
"percentage": self._percentage,
"splitType": self._split_type,
"startTime": self._start_time,
"endTime": self._end_time,
}
@classmethod
def from_response_json(cls, json_dict):
json_decamelized = humps.decamelize(json_dict)
return cls(
name=json_decamelized["name"],
split_type=json_decamelized.get(
"split_type", TrainingDatasetSplit.RANDOM_SPLIT
),
percentage=json_decamelized.get("percentage", None),
start_time=json_decamelized.get("start_time", None),
end_time=json_decamelized.get("end_time", None),
)
| logicalclocks/feature-store-api | python/hsfs/training_dataset_split.py | training_dataset_split.py | py | 2,210 | python | en | code | 50 | github-code | 36 |
8492244725 | """
Checkboxes, are similar to radio buttons.
Square boxes that basically just relate to 0/1
We have this box tied to an int (0/1).
If you tie it to a string (on/off) you need to change a few things
We will copy this and create a string example on the next file.
"""
# PIL=Pillow. Terminal: pip3 install Pillow
# Needed: sudo apt-get install python3-pil.imagetk
from tkinter import *
from PIL import Image, ImageTk
#
root = Tk()
root.title("tkinter19_checkboxes")
root.geometry("400x400")
#
var = IntVar() # var for value of checkbox we want to assign a 0 or 1
# Create and place the actual checkbutton, and tie it to the var
c = Checkbutton(root, text="Check this box!", variable=var)
c.pack()
#
# Function will update the label, when button is clicked
# Label will change to 0 if button is clicked when box is NOT checked.
# Lavel will change to 1 if button is clicked when the box IS checked.
def show():
my_label = Label(root, text=var.get()).pack()
#
my_label = Label(root, text=var.get()).pack()
my_button = Button(root, text="Show Selection", command=show).pack()
# ==================
root.mainloop()
| ncterry/Python | Tkinter/tkinter19_checkboxes.py | tkinter19_checkboxes.py | py | 1,119 | python | en | code | 0 | github-code | 36 |
26316442971 | import pandas as pd
from sklearn.impute import KNNImputer
from imblearn.over_sampling import SMOTE
from matplotlib import pyplot as plt
pd.set_option('display.max_columns', None)
pd.set_option('display.width', 1000)
df = pd.read_csv('./alzheimer.csv', skiprows=1, names=(
'Group', 'M/F', 'Age', 'EDUC', 'SES', 'MMSE', 'CDR', 'eTIV', 'nWBV', 'ASF'
))
print(df.describe())
X = df.copy()
print(X.columns)
y = df[['Group']]
del X['Group']
X = pd.get_dummies(X) # convert m/f to dummy columns
imputer = KNNImputer()
X = pd.DataFrame(imputer.fit_transform(X), columns=X.columns)
# scaler = StandardScaler()
# X = scaler.fit_transform(X)
smote = SMOTE()
X_smote, y_smote = smote.fit_resample(X, y)
demented = df[df['Group'] == 'Demented']
not_demented = df[df['Group'] == 'Nondemented']
converted = df[df['Group'] == 'Converted']
plt.figure(figsize=(25, 12))
plt.subplot(331)
plt.hist([demented['M/F'], not_demented['M/F'], converted['M/F']], rwidth=0.8)
plt.legend(['Demented', 'Not demented', 'Converted'])
plt.title("Frequency of demented, non-demented, and converted patients by gender")
plt.subplot(332)
plt.hist([demented['Age'], not_demented['Age'], converted['Age']], rwidth=0.8)
plt.legend(['Demented', 'Not demented', 'Converted'])
plt.title("Frequency of demented, non-demented, and converted patients by age")
plt.subplot(333)
plt.hist([demented['EDUC'], not_demented['EDUC'], converted['EDUC']], rwidth=0.8)
plt.legend(['Demented', 'Not demented', 'Converted'])
plt.title("Frequency of demented, non-demented, and converted patients by education")
plt.subplot(334)
plt.hist([demented['SES'], not_demented['SES'], converted['SES']], rwidth=0.8)
plt.legend(['Demented', 'Not demented', 'Converted'])
plt.title("Frequency of demented, non-demented, and converted patients by SES")
plt.subplot(335)
plt.hist([demented['MMSE'], not_demented['MMSE'], converted['MMSE']], rwidth=0.8)
plt.legend(['Demented', 'Not demented', 'Converted'])
plt.title("Frequency of demented, non-demented, and converted patients by MMSE")
plt.subplot(336)
plt.hist([demented['CDR'], not_demented['CDR'], converted['CDR']], rwidth=0.8)
plt.legend(['Demented', 'Not demented', 'Converted'])
plt.title("Frequency of demented, non-demented, and converted patients by CDR")
plt.subplot(337)
plt.hist([demented['eTIV'], not_demented['eTIV'], converted['eTIV']], rwidth=0.8)
plt.legend(['Demented', 'Not demented', 'Converted'])
plt.title("Frequency of demented, non-demented, and converted patients by eTIV")
plt.subplot(338)
plt.hist([demented['nWBV'], not_demented['nWBV'], converted['nWBV']], rwidth=0.8)
plt.legend(['Demented', 'Not demented', 'Converted'])
plt.title("Frequency of demented, non-demented, and converted patients by nWBV")
plt.subplot(339)
plt.hist([demented['ASF'], not_demented['ASF'], converted['ASF']], rwidth=0.8)
plt.legend(['Demented', 'Not demented', 'Converted'])
plt.title("Frequency of demented, non-demented, and converted patients by ASF")
plt.show()
"""
Notes
methods used:
- imputing using KNNImputer for missing values in SES and MMSE
- dummy variables for the categorical M/F column
- SMOTE due to the 'Converted' target value being a significant minority and consistently miscategorized without SMOTE
- StandardScaler was tested for logistic regression with little to no improvement in results
- TODO: try binning age
Feature selection:
- RFE (for basic logistic regression)
- FFS (for basic logistic regression)
- Feature importance (in random forest module)
- Based on the results from these methods, removed from logistic and bagging model:
- EDUC
- MMSE
- For mlxtend and random forest, removed:
- M/F
- EDUC
- nWBV
- For stacked model, TBD
- RFE results:
SES
CDR
nWBV
ASF
M/F_F
- FFS results:
feature ffs
7 ASF 1.430156
5 eTIV 2.684062
0 Age 14.149083
8 M/F_F 18.656900
9 M/F_M 18.656900
6 nWBV 28.090159
1 EDUC 28.760364
2 SES 41.344708
3 MMSE 170.239290
4 CDR 496.623041
- feature importance results:
importance feature
3 0.482053 CDR
2 0.236816 MMSE
1 0.091915 SES
4 0.067490 eTIV
5 0.066786 ASF
0 0.054942 Age
Data correlations w/ positive dementia:
- Males had a higher proportion
- Age, no obvious relationship
- Loose correlation with mid-level education
- Higher SES loosely correlated (socioeconomic status)
- Lower MMSE strongly correlated (mini mental state examination)
- Higher CDR strongly correlated (clinical dementia rating)
- eTIV, no obvious correlation (estimated intracranial volume)
- Lower nWBV strongly correlated (normalized whole brain volume)
- ASF, no obvious correlation (atlas scaling factor)
""" | dlepke/4948-a1 | data_exploration.py | data_exploration.py | py | 4,701 | python | en | code | 0 | github-code | 36 |
36947928289 | from __future__ import print_function
__revision__ = "src/engine/SCons/Tool/rpmutils.py bee7caf9defd6e108fc2998a2520ddb36a967691 2019-12-17 02:07:09 bdeegan"
import platform
import subprocess
import SCons.Util
# Start of rpmrc dictionaries (Marker, don't change or remove!)
os_canon = {
'AIX' : ['AIX','5'],
'AmigaOS' : ['AmigaOS','5'],
'BSD_OS' : ['bsdi','12'],
'CYGWIN32_95' : ['cygwin32','15'],
'CYGWIN32_NT' : ['cygwin32','14'],
'Darwin' : ['darwin','21'],
'FreeBSD' : ['FreeBSD','8'],
'HP-UX' : ['hpux10','6'],
'IRIX' : ['Irix','2'],
'IRIX64' : ['Irix64','10'],
'Linux' : ['Linux','1'],
'Linux/390' : ['OS/390','20'],
'Linux/ESA' : ['VM/ESA','20'],
'MacOSX' : ['macosx','21'],
'MiNT' : ['FreeMiNT','17'],
'NEXTSTEP' : ['NextStep','11'],
'OS/390' : ['OS/390','18'],
'OSF1' : ['osf1','7'],
'SCO_SV' : ['SCO_SV3.2v5.0.2','9'],
'SunOS4' : ['SunOS','4'],
'SunOS5' : ['solaris','3'],
'UNIX_SV' : ['MP_RAS','16'],
'VM/ESA' : ['VM/ESA','19'],
'machten' : ['machten','13'],
'osf3.2' : ['osf1','7'],
'osf4.0' : ['osf1','7'],
}
buildarch_compat = {
'alpha' : ['noarch'],
'alphaev5' : ['alpha'],
'alphaev56' : ['alphaev5'],
'alphaev6' : ['alphapca56'],
'alphaev67' : ['alphaev6'],
'alphapca56' : ['alphaev56'],
'amd64' : ['x86_64'],
'armv3l' : ['noarch'],
'armv4b' : ['noarch'],
'armv4l' : ['armv3l'],
'armv4tl' : ['armv4l'],
'armv5tejl' : ['armv5tel'],
'armv5tel' : ['armv4tl'],
'armv6l' : ['armv5tejl'],
'armv7l' : ['armv6l'],
'atariclone' : ['m68kmint','noarch'],
'atarist' : ['m68kmint','noarch'],
'atariste' : ['m68kmint','noarch'],
'ataritt' : ['m68kmint','noarch'],
'athlon' : ['i686'],
'falcon' : ['m68kmint','noarch'],
'geode' : ['i586'],
'hades' : ['m68kmint','noarch'],
'hppa1.0' : ['parisc'],
'hppa1.1' : ['hppa1.0'],
'hppa1.2' : ['hppa1.1'],
'hppa2.0' : ['hppa1.2'],
'i386' : ['noarch','fat'],
'i486' : ['i386'],
'i586' : ['i486'],
'i686' : ['i586'],
'ia32e' : ['x86_64'],
'ia64' : ['noarch'],
'm68k' : ['noarch'],
'milan' : ['m68kmint','noarch'],
'mips' : ['noarch'],
'mipsel' : ['noarch'],
'parisc' : ['noarch'],
'pentium3' : ['i686'],
'pentium4' : ['pentium3'],
'ppc' : ['noarch','fat'],
'ppc32dy4' : ['noarch'],
'ppc64' : ['noarch','fat'],
'ppc64iseries' : ['ppc64'],
'ppc64pseries' : ['ppc64'],
'ppc8260' : ['noarch'],
'ppc8560' : ['noarch'],
'ppciseries' : ['noarch'],
'ppcpseries' : ['noarch'],
's390' : ['noarch'],
's390x' : ['noarch'],
'sh3' : ['noarch'],
'sh4' : ['noarch'],
'sh4a' : ['sh4'],
'sparc' : ['noarch'],
'sparc64' : ['sparcv9v'],
'sparc64v' : ['sparc64'],
'sparcv8' : ['sparc'],
'sparcv9' : ['sparcv8'],
'sparcv9v' : ['sparcv9'],
'sun4c' : ['noarch'],
'sun4d' : ['noarch'],
'sun4m' : ['noarch'],
'sun4u' : ['noarch'],
'x86_64' : ['noarch'],
}
os_compat = {
'BSD_OS' : ['bsdi'],
'Darwin' : ['MacOSX'],
'FreeMiNT' : ['mint','MiNT','TOS'],
'IRIX64' : ['IRIX'],
'MiNT' : ['FreeMiNT','mint','TOS'],
'TOS' : ['FreeMiNT','MiNT','mint'],
'bsdi4.0' : ['bsdi'],
'hpux10.00' : ['hpux9.07'],
'hpux10.01' : ['hpux10.00'],
'hpux10.10' : ['hpux10.01'],
'hpux10.20' : ['hpux10.10'],
'hpux10.30' : ['hpux10.20'],
'hpux11.00' : ['hpux10.30'],
'hpux9.05' : ['hpux9.04'],
'hpux9.07' : ['hpux9.05'],
'mint' : ['FreeMiNT','MiNT','TOS'],
'ncr-sysv4.3' : ['ncr-sysv4.2'],
'osf4.0' : ['osf3.2','osf1'],
'solaris2.4' : ['solaris2.3'],
'solaris2.5' : ['solaris2.3','solaris2.4'],
'solaris2.6' : ['solaris2.3','solaris2.4','solaris2.5'],
'solaris2.7' : ['solaris2.3','solaris2.4','solaris2.5','solaris2.6'],
}
arch_compat = {
'alpha' : ['axp','noarch'],
'alphaev5' : ['alpha'],
'alphaev56' : ['alphaev5'],
'alphaev6' : ['alphapca56'],
'alphaev67' : ['alphaev6'],
'alphapca56' : ['alphaev56'],
'amd64' : ['x86_64','athlon','noarch'],
'armv3l' : ['noarch'],
'armv4b' : ['noarch'],
'armv4l' : ['armv3l'],
'armv4tl' : ['armv4l'],
'armv5tejl' : ['armv5tel'],
'armv5tel' : ['armv4tl'],
'armv6l' : ['armv5tejl'],
'armv7l' : ['armv6l'],
'atariclone' : ['m68kmint','noarch'],
'atarist' : ['m68kmint','noarch'],
'atariste' : ['m68kmint','noarch'],
'ataritt' : ['m68kmint','noarch'],
'athlon' : ['i686'],
'falcon' : ['m68kmint','noarch'],
'geode' : ['i586'],
'hades' : ['m68kmint','noarch'],
'hppa1.0' : ['parisc'],
'hppa1.1' : ['hppa1.0'],
'hppa1.2' : ['hppa1.1'],
'hppa2.0' : ['hppa1.2'],
'i370' : ['noarch'],
'i386' : ['noarch','fat'],
'i486' : ['i386'],
'i586' : ['i486'],
'i686' : ['i586'],
'ia32e' : ['x86_64','athlon','noarch'],
'ia64' : ['noarch'],
'milan' : ['m68kmint','noarch'],
'mips' : ['noarch'],
'mipsel' : ['noarch'],
'osfmach3_i386' : ['i486'],
'osfmach3_i486' : ['i486','osfmach3_i386'],
'osfmach3_i586' : ['i586','osfmach3_i486'],
'osfmach3_i686' : ['i686','osfmach3_i586'],
'osfmach3_ppc' : ['ppc'],
'parisc' : ['noarch'],
'pentium3' : ['i686'],
'pentium4' : ['pentium3'],
'powerpc' : ['ppc'],
'powerppc' : ['ppc'],
'ppc' : ['rs6000'],
'ppc32dy4' : ['ppc'],
'ppc64' : ['ppc'],
'ppc64iseries' : ['ppc64'],
'ppc64pseries' : ['ppc64'],
'ppc8260' : ['ppc'],
'ppc8560' : ['ppc'],
'ppciseries' : ['ppc'],
'ppcpseries' : ['ppc'],
'rs6000' : ['noarch','fat'],
's390' : ['noarch'],
's390x' : ['s390','noarch'],
'sh3' : ['noarch'],
'sh4' : ['noarch'],
'sh4a' : ['sh4'],
'sparc' : ['noarch'],
'sparc64' : ['sparcv9'],
'sparc64v' : ['sparc64'],
'sparcv8' : ['sparc'],
'sparcv9' : ['sparcv8'],
'sparcv9v' : ['sparcv9'],
'sun4c' : ['sparc'],
'sun4d' : ['sparc'],
'sun4m' : ['sparc'],
'sun4u' : ['sparc64'],
'x86_64' : ['amd64','athlon','noarch'],
}
buildarchtranslate = {
'alphaev5' : ['alpha'],
'alphaev56' : ['alpha'],
'alphaev6' : ['alpha'],
'alphaev67' : ['alpha'],
'alphapca56' : ['alpha'],
'amd64' : ['x86_64'],
'armv3l' : ['armv3l'],
'armv4b' : ['armv4b'],
'armv4l' : ['armv4l'],
'armv4tl' : ['armv4tl'],
'armv5tejl' : ['armv5tejl'],
'armv5tel' : ['armv5tel'],
'armv6l' : ['armv6l'],
'armv7l' : ['armv7l'],
'atariclone' : ['m68kmint'],
'atarist' : ['m68kmint'],
'atariste' : ['m68kmint'],
'ataritt' : ['m68kmint'],
'athlon' : ['i386'],
'falcon' : ['m68kmint'],
'geode' : ['i386'],
'hades' : ['m68kmint'],
'i386' : ['i386'],
'i486' : ['i386'],
'i586' : ['i386'],
'i686' : ['i386'],
'ia32e' : ['x86_64'],
'ia64' : ['ia64'],
'milan' : ['m68kmint'],
'osfmach3_i386' : ['i386'],
'osfmach3_i486' : ['i386'],
'osfmach3_i586' : ['i386'],
'osfmach3_i686' : ['i386'],
'osfmach3_ppc' : ['ppc'],
'pentium3' : ['i386'],
'pentium4' : ['i386'],
'powerpc' : ['ppc'],
'powerppc' : ['ppc'],
'ppc32dy4' : ['ppc'],
'ppc64iseries' : ['ppc64'],
'ppc64pseries' : ['ppc64'],
'ppc8260' : ['ppc'],
'ppc8560' : ['ppc'],
'ppciseries' : ['ppc'],
'ppcpseries' : ['ppc'],
's390' : ['s390'],
's390x' : ['s390x'],
'sh3' : ['sh3'],
'sh4' : ['sh4'],
'sh4a' : ['sh4'],
'sparc64v' : ['sparc64'],
'sparcv8' : ['sparc'],
'sparcv9' : ['sparc'],
'sparcv9v' : ['sparc'],
'sun4c' : ['sparc'],
'sun4d' : ['sparc'],
'sun4m' : ['sparc'],
'sun4u' : ['sparc64'],
'x86_64' : ['x86_64'],
}
optflags = {
'alpha' : ['-O2','-g','-mieee'],
'alphaev5' : ['-O2','-g','-mieee','-mtune=ev5'],
'alphaev56' : ['-O2','-g','-mieee','-mtune=ev56'],
'alphaev6' : ['-O2','-g','-mieee','-mtune=ev6'],
'alphaev67' : ['-O2','-g','-mieee','-mtune=ev67'],
'alphapca56' : ['-O2','-g','-mieee','-mtune=pca56'],
'amd64' : ['-O2','-g'],
'armv3l' : ['-O2','-g','-march=armv3'],
'armv4b' : ['-O2','-g','-march=armv4'],
'armv4l' : ['-O2','-g','-march=armv4'],
'armv4tl' : ['-O2','-g','-march=armv4t'],
'armv5tejl' : ['-O2','-g','-march=armv5te'],
'armv5tel' : ['-O2','-g','-march=armv5te'],
'armv6l' : ['-O2','-g','-march=armv6'],
'armv7l' : ['-O2','-g','-march=armv7'],
'atariclone' : ['-O2','-g','-fomit-frame-pointer'],
'atarist' : ['-O2','-g','-fomit-frame-pointer'],
'atariste' : ['-O2','-g','-fomit-frame-pointer'],
'ataritt' : ['-O2','-g','-fomit-frame-pointer'],
'athlon' : ['-O2','-g','-march=athlon'],
'falcon' : ['-O2','-g','-fomit-frame-pointer'],
'fat' : ['-O2','-g','-arch','i386','-arch','ppc'],
'geode' : ['-Os','-g','-m32','-march=geode'],
'hades' : ['-O2','-g','-fomit-frame-pointer'],
'hppa1.0' : ['-O2','-g','-mpa-risc-1-0'],
'hppa1.1' : ['-O2','-g','-mpa-risc-1-0'],
'hppa1.2' : ['-O2','-g','-mpa-risc-1-0'],
'hppa2.0' : ['-O2','-g','-mpa-risc-1-0'],
'i386' : ['-O2','-g','-march=i386','-mtune=i686'],
'i486' : ['-O2','-g','-march=i486'],
'i586' : ['-O2','-g','-march=i586'],
'i686' : ['-O2','-g','-march=i686'],
'ia32e' : ['-O2','-g'],
'ia64' : ['-O2','-g'],
'm68k' : ['-O2','-g','-fomit-frame-pointer'],
'milan' : ['-O2','-g','-fomit-frame-pointer'],
'mips' : ['-O2','-g'],
'mipsel' : ['-O2','-g'],
'parisc' : ['-O2','-g','-mpa-risc-1-0'],
'pentium3' : ['-O2','-g','-march=pentium3'],
'pentium4' : ['-O2','-g','-march=pentium4'],
'ppc' : ['-O2','-g','-fsigned-char'],
'ppc32dy4' : ['-O2','-g','-fsigned-char'],
'ppc64' : ['-O2','-g','-fsigned-char'],
'ppc8260' : ['-O2','-g','-fsigned-char'],
'ppc8560' : ['-O2','-g','-fsigned-char'],
'ppciseries' : ['-O2','-g','-fsigned-char'],
'ppcpseries' : ['-O2','-g','-fsigned-char'],
's390' : ['-O2','-g'],
's390x' : ['-O2','-g'],
'sh3' : ['-O2','-g'],
'sh4' : ['-O2','-g','-mieee'],
'sh4a' : ['-O2','-g','-mieee'],
'sparc' : ['-O2','-g','-m32','-mtune=ultrasparc'],
'sparc64' : ['-O2','-g','-m64','-mtune=ultrasparc'],
'sparc64v' : ['-O2','-g','-m64','-mtune=niagara'],
'sparcv8' : ['-O2','-g','-m32','-mtune=ultrasparc','-mv8'],
'sparcv9' : ['-O2','-g','-m32','-mtune=ultrasparc'],
'sparcv9v' : ['-O2','-g','-m32','-mtune=niagara'],
'x86_64' : ['-O2','-g'],
}
arch_canon = {
'IP' : ['sgi','7'],
'alpha' : ['alpha','2'],
'alphaev5' : ['alphaev5','2'],
'alphaev56' : ['alphaev56','2'],
'alphaev6' : ['alphaev6','2'],
'alphaev67' : ['alphaev67','2'],
'alphapca56' : ['alphapca56','2'],
'amd64' : ['amd64','1'],
'armv3l' : ['armv3l','12'],
'armv4b' : ['armv4b','12'],
'armv4l' : ['armv4l','12'],
'armv5tejl' : ['armv5tejl','12'],
'armv5tel' : ['armv5tel','12'],
'armv6l' : ['armv6l','12'],
'armv7l' : ['armv7l','12'],
'atariclone' : ['m68kmint','13'],
'atarist' : ['m68kmint','13'],
'atariste' : ['m68kmint','13'],
'ataritt' : ['m68kmint','13'],
'athlon' : ['athlon','1'],
'falcon' : ['m68kmint','13'],
'geode' : ['geode','1'],
'hades' : ['m68kmint','13'],
'i370' : ['i370','14'],
'i386' : ['i386','1'],
'i486' : ['i486','1'],
'i586' : ['i586','1'],
'i686' : ['i686','1'],
'ia32e' : ['ia32e','1'],
'ia64' : ['ia64','9'],
'm68k' : ['m68k','6'],
'm68kmint' : ['m68kmint','13'],
'milan' : ['m68kmint','13'],
'mips' : ['mips','4'],
'mipsel' : ['mipsel','11'],
'pentium3' : ['pentium3','1'],
'pentium4' : ['pentium4','1'],
'ppc' : ['ppc','5'],
'ppc32dy4' : ['ppc32dy4','5'],
'ppc64' : ['ppc64','16'],
'ppc64iseries' : ['ppc64iseries','16'],
'ppc64pseries' : ['ppc64pseries','16'],
'ppc8260' : ['ppc8260','5'],
'ppc8560' : ['ppc8560','5'],
'ppciseries' : ['ppciseries','5'],
'ppcpseries' : ['ppcpseries','5'],
'rs6000' : ['rs6000','8'],
's390' : ['s390','14'],
's390x' : ['s390x','15'],
'sh' : ['sh','17'],
'sh3' : ['sh3','17'],
'sh4' : ['sh4','17'],
'sh4a' : ['sh4a','17'],
'sparc' : ['sparc','3'],
'sparc64' : ['sparc64','2'],
'sparc64v' : ['sparc64v','2'],
'sparcv8' : ['sparcv8','3'],
'sparcv9' : ['sparcv9','3'],
'sparcv9v' : ['sparcv9v','3'],
'sun4' : ['sparc','3'],
'sun4c' : ['sparc','3'],
'sun4d' : ['sparc','3'],
'sun4m' : ['sparc','3'],
'sun4u' : ['sparc64','2'],
'x86_64' : ['x86_64','1'],
'xtensa' : ['xtensa','18'],
}
# End of rpmrc dictionaries (Marker, don't change or remove!)
def defaultMachine(use_rpm_default=True):
""" Return the canonicalized machine name. """
if use_rpm_default:
try:
# This should be the most reliable way to get the default arch
rmachine = subprocess.check_output(['rpm', '--eval=%_target_cpu'], shell=False).rstrip()
rmachine = SCons.Util.to_str(rmachine)
except Exception as e:
# Something went wrong, try again by looking up platform.machine()
return defaultMachine(False)
else:
rmachine = platform.machine()
# Try to lookup the string in the canon table
if rmachine in arch_canon:
rmachine = arch_canon[rmachine][0]
return rmachine
def defaultSystem():
""" Return the canonicalized system name. """
rsystem = platform.system()
# Try to lookup the string in the canon tables
if rsystem in os_canon:
rsystem = os_canon[rsystem][0]
return rsystem
def defaultNames():
""" Return the canonicalized machine and system name. """
return defaultMachine(), defaultSystem()
def updateRpmDicts(rpmrc, pyfile):
""" Read the given rpmrc file with RPM definitions and update the
info dictionaries in the file pyfile with it.
The arguments will usually be 'rpmrc.in' from a recent RPM source
tree, and 'rpmutils.py' referring to this script itself.
See also usage() below.
"""
try:
# Read old rpmutils.py file
with open(pyfile,"r") as f:
oldpy = f.readlines()
# Read current rpmrc.in file
with open(rpmrc,"r") as f:
rpm = f.readlines()
# Parse for data
data = {}
# Allowed section names that get parsed
sections = ['optflags',
'arch_canon',
'os_canon',
'buildarchtranslate',
'arch_compat',
'os_compat',
'buildarch_compat']
for l in rpm:
l = l.rstrip('\n').replace(':',' ')
# Skip comments
if l.lstrip().startswith('#'):
continue
tokens = l.strip().split()
if len(tokens):
key = tokens[0]
if key in sections:
# Have we met this section before?
if tokens[0] not in data:
# No, so insert it
data[key] = {}
# Insert data
data[key][tokens[1]] = tokens[2:]
# Write new rpmutils.py file
with open(pyfile,"w") as out:
pm = 0
for l in oldpy:
if pm:
if l.startswith('# End of rpmrc dictionaries'):
pm = 0
out.write(l)
else:
out.write(l)
if l.startswith('# Start of rpmrc dictionaries'):
pm = 1
# Write data sections to single dictionaries
for key, entries in data.items():
out.write("%s = {\n" % key)
for arch in sorted(entries.keys()):
out.write(" '%s' : ['%s'],\n" % (arch, "','".join(entries[arch])))
out.write("}\n\n")
except:
pass
def usage():
print("rpmutils.py rpmrc.in rpmutils.py")
def main():
import sys
if len(sys.argv) < 3:
usage()
sys.exit(0)
updateRpmDicts(sys.argv[1], sys.argv[2])
if __name__ == "__main__":
main()
| mongodb/mongo | src/third_party/scons-3.1.2/scons-local-3.1.2/SCons/Tool/rpmutils.py | rpmutils.py | py | 15,575 | python | en | code | 24,670 | github-code | 36 |
11732621418 | from collections import Counter
##研究health样本和IBD样本内的物种:(common,health only,IBD only)
#日后还要分析他们分别在多少个样本内出现以及abundance。
health_path="/home/zc/IDBdata/all_data/species-analysis/non-species-common0.1-ave.csv"
ibd_path="/home/zc/IDBdata/all_data/species-analysis/ibd-species-common0.1-ave.csv"
health_spe_set=set()
health_com_set=set()
ibd_spe_set=set()
ibd_com_set=set()
# read health file
health_file=open(health_path,'r')
for line in health_file:
line=line.strip()
l1=line.split('\t')
health_spe_set.add(l1[0])
health_file.close()
# read IBD file.
ibd_file=open(ibd_path,'r')
for line in ibd_file:
line=line.strip()
l1=line.split('\t')
ibd_spe_set.add(l1[0])
ibd_file.close()
## analysis the both.
##species交集
spe_intersection_set=ibd_spe_set.intersection(health_spe_set)
##species差集
health_spe_diff=health_spe_set.difference(ibd_spe_set)
ibd_spe_diff=ibd_spe_set.difference(health_spe_set)
#species并集
spe_union_set=ibd_spe_set.union(health_spe_set)
print("species----------------- species -------------------------species")
print("spe_intersection:"+str(len(spe_intersection_set)))
print("health_spe_diff:"+str(len(health_spe_diff)))
print("ibd_spe_diff:"+str(len(ibd_spe_diff)))
print("spe_union_set:"+str(len(spe_union_set)))
species_list=[ "Clostridium_hathewayi", "Clostridium_bolteae",
"Escherichia_coli","Eubacterium_rectale","Faecalibacterium_prausnitzii","Haemophilus_parainfluenzae",
"Klebsiella_pneumoniae","Roseburia_hominis", "Ruminococcus_torques", "Ruminococcus_gnavus",
"Bacteroides_fragilis","Bifidobacterium_adolescentis","Clostridium_leptum","Dialister_invisus","Prevotella_copri"]
print("no list common")
no_list_common=[]
for specie in species_list:
if specie not in spe_intersection_set:
no_list_common.append(specie)
print(len(no_list_common))
print(no_list_common)
print("no list ibd")
no_list_ibd=[]
for specie in species_list:
if specie not in ibd_spe_set:
no_list_ibd.append(specie)
print(len(no_list_ibd))
print(no_list_ibd)
print("no list nonibd")
no_list_nonibd=[]
for specie in species_list:
if specie not in health_spe_set:
no_list_nonibd.append(specie)
print(no_list_nonibd)
print(len(no_list_nonibd))
| zhangchen97/Metabolic-Network | analysis/onLy_species.py | onLy_species.py | py | 2,297 | python | en | code | 1 | github-code | 36 |
31056572667 | from django.contrib.auth.models import AbstractBaseUser, UserManager, \
PermissionsMixin
from django.core import validators
from django.db import models
from django.utils import timezone
from accounts import constants
class User(AbstractBaseUser, PermissionsMixin):
#: The Permission level for this user
permission = models.CharField(max_length=40, blank=True, null=True,
choices=constants.PERMISSION_CHOICES)
"""
An abstract base class implementing a fully featured User model with
admin-compliant permissions.
Username, password and email are required. Other fields are optional.
"""
username = models.CharField(
'username', max_length=30, unique=True, help_text=_(
'Required. 30 characters or fewer. Letters, digits and '
'@/./+/-/_ only.'),
validators=[
validators.RegexValidator(r'^[\w.@+-]+$', _(
'Enter a valid username.'), 'invalid')
])
first_name = models.CharField('first name', max_length=30, blank=True)
last_name = models.CharField('last name', max_length=30, blank=True)
email = models.EmailField('email address', blank=True)
is_staff = models.BooleanField(
'staff status', default=False,
help_text='Designates whether the user can log into this admin site.')
is_active = models.BooleanField('active', default=True)
date_joined = models.DateTimeField('date joined', default=timezone.now)
company = models.ForeignKey('company.Company', on_delete=models.SET_NULL)
objects = UserManager()
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['email']
def get_full_name(self):
"""
Returns the first_name plus the last_name, with a space in between.
"""
full_name = '%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def get_short_name(self):
"Returns the short name for the user."
return self.first_name
def get_content_data(self):
content = {
'permission': self.permission,
'username': self.username,
'first_name': self.first_name,
'last_name': self.last_name,
'email': self.email,
'is_staff': self.is_staff,
'is_active': self.is_active,
'date_joined': self.date_joined,
}
return content
def __unicode__(self):
if self.first_name:
if self.last_name:
return "{0} {1}'s Profile".format(
self.first_name, self.last_name)
else:
return "{0}'s Profile".format(self.first_name)
else:
return "{0}'s Profile".format(self.username)
def save(self, *args, **kwargs):
super(User, self).save(*args, **kwargs)
| sublime1809/pto_tracker | accounts/models.py | models.py | py | 2,840 | python | en | code | 0 | github-code | 36 |
6536609411 | #!/usr/bin/env python3
# https://www.urionlinejudge.com.br/judge/en/problems/view/1019
def decompose(total, value):
decomposed = total // value
return total - decomposed * value, decomposed
def main():
SECONDS = int(input())
SECONDS, HOURS = decompose(SECONDS, 60 * 60)
SECONDS, MINUTES = decompose(SECONDS, 60)
print(HOURS, ':', MINUTES, ':', SECONDS, sep = '')
# Start the execution if it's the main script
if __name__ == "__main__":
main()
| rafaelpascoalrodrigues/programming-contests | uri-online-judge/python3/1019.py | 1019.py | py | 483 | python | en | code | 0 | github-code | 36 |
38033757546 | import dataclasses
import pickle
import re
from collections.abc import Hashable
from datetime import datetime
from pathlib import Path
from typing import Callable, ClassVar, Dict, FrozenSet, List, Optional, Set, Union
import pytest
from typing_extensions import Literal
import pydantic
from pydantic import BaseModel, Extra, ValidationError, validator
def test_simple():
@pydantic.dataclasses.dataclass
class MyDataclass:
a: int
b: float
d = MyDataclass('1', '2.5')
assert d.a == 1
assert d.b == 2.5
d = MyDataclass(b=10, a=20)
assert d.a == 20
assert d.b == 10
def test_model_name():
@pydantic.dataclasses.dataclass
class MyDataClass:
model_name: str
d = MyDataClass('foo')
assert d.model_name == 'foo'
d = MyDataClass(model_name='foo')
assert d.model_name == 'foo'
def test_value_error():
@pydantic.dataclasses.dataclass
class MyDataclass:
a: int
b: int
with pytest.raises(ValidationError) as exc_info:
MyDataclass(1, 'wrong')
assert exc_info.value.errors() == [
{'loc': ('b',), 'msg': 'value is not a valid integer', 'type': 'type_error.integer'}
]
def test_frozen():
@pydantic.dataclasses.dataclass(frozen=True)
class MyDataclass:
a: int
d = MyDataclass(1)
assert d.a == 1
with pytest.raises(AttributeError):
d.a = 7
def test_validate_assignment():
class Config:
validate_assignment = True
@pydantic.dataclasses.dataclass(config=Config)
class MyDataclass:
a: int
d = MyDataclass(1)
assert d.a == 1
d.a = '7'
assert d.a == 7
def test_validate_assignment_error():
@pydantic.dataclasses.dataclass(config=dict(validate_assignment=True))
class MyDataclass:
a: int
d = MyDataclass(1)
with pytest.raises(ValidationError) as exc_info:
d.a = 'xxx'
assert exc_info.value.errors() == [
{'loc': ('a',), 'msg': 'value is not a valid integer', 'type': 'type_error.integer'}
]
def test_not_validate_assignment():
@pydantic.dataclasses.dataclass
class MyDataclass:
a: int
d = MyDataclass(1)
assert d.a == 1
d.a = '7'
assert d.a == '7'
def test_validate_assignment_value_change():
class Config:
validate_assignment = True
@pydantic.dataclasses.dataclass(config=Config, frozen=False)
class MyDataclass:
a: int
@validator('a')
def double_a(cls, v):
return v * 2
d = MyDataclass(2)
assert d.a == 4
d.a = 3
assert d.a == 6
def test_validate_assignment_extra():
class Config:
validate_assignment = True
@pydantic.dataclasses.dataclass(config=Config, frozen=False)
class MyDataclass:
a: int
d = MyDataclass(1)
assert d.a == 1
d.extra_field = 1.23
assert d.extra_field == 1.23
d.extra_field = 'bye'
assert d.extra_field == 'bye'
def test_post_init():
post_init_called = False
@pydantic.dataclasses.dataclass
class MyDataclass:
a: int
def __post_init__(self):
nonlocal post_init_called
post_init_called = True
d = MyDataclass('1')
assert d.a == 1
assert post_init_called
def test_post_init_validation():
@dataclasses.dataclass
class DC:
a: int
def __post_init__(self):
self.a *= 2
def __post_init_post_parse__(self):
self.a += 1
PydanticDC = pydantic.dataclasses.dataclass(DC)
assert DC(a='2').a == '22'
assert PydanticDC(a='2').a == 23
def test_post_init_inheritance_chain():
parent_post_init_called = False
post_init_called = False
@pydantic.dataclasses.dataclass
class ParentDataclass:
a: int
def __post_init__(self):
nonlocal parent_post_init_called
parent_post_init_called = True
@pydantic.dataclasses.dataclass
class MyDataclass(ParentDataclass):
b: int
def __post_init__(self):
super().__post_init__()
nonlocal post_init_called
post_init_called = True
d = MyDataclass(a=1, b=2)
assert d.a == 1
assert d.b == 2
assert parent_post_init_called
assert post_init_called
def test_post_init_post_parse():
post_init_post_parse_called = False
@pydantic.dataclasses.dataclass
class MyDataclass:
a: int
def __post_init_post_parse__(self):
nonlocal post_init_post_parse_called
post_init_post_parse_called = True
d = MyDataclass('1')
assert d.a == 1
assert post_init_post_parse_called
def test_post_init_post_parse_types():
@pydantic.dataclasses.dataclass
class CustomType:
b: int
@pydantic.dataclasses.dataclass
class MyDataclass:
a: CustomType
def __post_init__(self):
assert type(self.a) == dict
def __post_init_post_parse__(self):
assert type(self.a) == CustomType
d = MyDataclass(**{'a': {'b': 1}})
assert d.a.b == 1
def test_post_init_assignment():
from dataclasses import field
# Based on: https://docs.python.org/3/library/dataclasses.html#post-init-processing
@pydantic.dataclasses.dataclass
class C:
a: float
b: float
c: float = field(init=False)
def __post_init__(self):
self.c = self.a + self.b
c = C(0.1, 0.2)
assert c.a == 0.1
assert c.b == 0.2
assert c.c == 0.30000000000000004
def test_inheritance():
@pydantic.dataclasses.dataclass
class A:
a: str = None
@pydantic.dataclasses.dataclass
class B(A):
b: int = None
b = B(a='a', b=12)
assert b.a == 'a'
assert b.b == 12
with pytest.raises(ValidationError):
B(a='a', b='b')
def test_validate_long_string_error():
class Config:
max_anystr_length = 3
@pydantic.dataclasses.dataclass(config=Config)
class MyDataclass:
a: str
with pytest.raises(ValidationError) as exc_info:
MyDataclass('xxxx')
assert exc_info.value.errors() == [
{
'loc': ('a',),
'msg': 'ensure this value has at most 3 characters',
'type': 'value_error.any_str.max_length',
'ctx': {'limit_value': 3},
}
]
def test_validate_assigment_long_string_error():
class Config:
max_anystr_length = 3
validate_assignment = True
@pydantic.dataclasses.dataclass(config=Config)
class MyDataclass:
a: str
d = MyDataclass('xxx')
with pytest.raises(ValidationError) as exc_info:
d.a = 'xxxx'
assert issubclass(MyDataclass.__pydantic_model__.__config__, BaseModel.Config)
assert exc_info.value.errors() == [
{
'loc': ('a',),
'msg': 'ensure this value has at most 3 characters',
'type': 'value_error.any_str.max_length',
'ctx': {'limit_value': 3},
}
]
def test_no_validate_assigment_long_string_error():
class Config:
max_anystr_length = 3
validate_assignment = False
@pydantic.dataclasses.dataclass(config=Config)
class MyDataclass:
a: str
d = MyDataclass('xxx')
d.a = 'xxxx'
assert d.a == 'xxxx'
def test_nested_dataclass():
@pydantic.dataclasses.dataclass
class Nested:
number: int
@pydantic.dataclasses.dataclass
class Outer:
n: Nested
navbar = Outer(n=Nested(number='1'))
assert isinstance(navbar.n, Nested)
assert navbar.n.number == 1
navbar = Outer(n=('2',))
assert isinstance(navbar.n, Nested)
assert navbar.n.number == 2
navbar = Outer(n={'number': '3'})
assert isinstance(navbar.n, Nested)
assert navbar.n.number == 3
with pytest.raises(ValidationError) as exc_info:
Outer(n='not nested')
assert exc_info.value.errors() == [
{
'loc': ('n',),
'msg': 'instance of Nested, tuple or dict expected',
'type': 'type_error.dataclass',
'ctx': {'class_name': 'Nested'},
}
]
with pytest.raises(ValidationError) as exc_info:
Outer(n=('x',))
assert exc_info.value.errors() == [
{'loc': ('n', 'number'), 'msg': 'value is not a valid integer', 'type': 'type_error.integer'}
]
def test_arbitrary_types_allowed():
class Button:
def __init__(self, href: str):
self.href = href
class Config:
arbitrary_types_allowed = True
@pydantic.dataclasses.dataclass(config=Config)
class Navbar:
button: Button
btn = Button(href='a')
navbar = Navbar(button=btn)
assert navbar.button.href == 'a'
with pytest.raises(ValidationError) as exc_info:
Navbar(button=('b',))
assert exc_info.value.errors() == [
{
'loc': ('button',),
'msg': 'instance of Button expected',
'type': 'type_error.arbitrary_type',
'ctx': {'expected_arbitrary_type': 'Button'},
}
]
def test_nested_dataclass_model():
@pydantic.dataclasses.dataclass
class Nested:
number: int
class Outer(BaseModel):
n: Nested
navbar = Outer(n=Nested(number='1'))
assert navbar.n.number == 1
def test_fields():
@pydantic.dataclasses.dataclass
class User:
id: int
name: str = 'John Doe'
signup_ts: datetime = None
user = User(id=123)
fields = user.__pydantic_model__.__fields__
assert fields['id'].required is True
assert fields['id'].default is None
assert fields['name'].required is False
assert fields['name'].default == 'John Doe'
assert fields['signup_ts'].required is False
assert fields['signup_ts'].default is None
def test_default_factory_field():
@pydantic.dataclasses.dataclass
class User:
id: int
aliases: Dict[str, str] = dataclasses.field(default_factory=lambda: {'John': 'Joey'})
user = User(id=123)
fields = user.__pydantic_model__.__fields__
assert fields['id'].required is True
assert fields['id'].default is None
assert fields['aliases'].required is False
assert fields['aliases'].default_factory() == {'John': 'Joey'}
def test_default_factory_singleton_field():
class MySingleton:
pass
class MyConfig:
arbitrary_types_allowed = True
MY_SINGLETON = MySingleton()
@pydantic.dataclasses.dataclass(config=MyConfig)
class Foo:
singleton: MySingleton = dataclasses.field(default_factory=lambda: MY_SINGLETON)
# Returning a singleton from a default_factory is supported
assert Foo().singleton is Foo().singleton
def test_schema():
@pydantic.dataclasses.dataclass
class User:
id: int
name: str = 'John Doe'
aliases: Dict[str, str] = dataclasses.field(default_factory=lambda: {'John': 'Joey'})
signup_ts: datetime = None
age: Optional[int] = dataclasses.field(
default=None, metadata=dict(title='The age of the user', description='do not lie!')
)
height: Optional[int] = pydantic.Field(None, title='The height in cm', ge=50, le=300)
user = User(id=123)
assert user.__pydantic_model__.schema() == {
'title': 'User',
'type': 'object',
'properties': {
'id': {'title': 'Id', 'type': 'integer'},
'name': {'title': 'Name', 'default': 'John Doe', 'type': 'string'},
'aliases': {
'title': 'Aliases',
'type': 'object',
'additionalProperties': {'type': 'string'},
},
'signup_ts': {'title': 'Signup Ts', 'type': 'string', 'format': 'date-time'},
'age': {
'title': 'The age of the user',
'description': 'do not lie!',
'type': 'integer',
},
'height': {
'title': 'The height in cm',
'minimum': 50,
'maximum': 300,
'type': 'integer',
},
},
'required': ['id'],
}
def test_nested_schema():
@pydantic.dataclasses.dataclass
class Nested:
number: int
@pydantic.dataclasses.dataclass
class Outer:
n: Nested
assert Outer.__pydantic_model__.schema() == {
'title': 'Outer',
'type': 'object',
'properties': {'n': {'$ref': '#/definitions/Nested'}},
'required': ['n'],
'definitions': {
'Nested': {
'title': 'Nested',
'type': 'object',
'properties': {'number': {'title': 'Number', 'type': 'integer'}},
'required': ['number'],
}
},
}
def test_initvar():
InitVar = dataclasses.InitVar
@pydantic.dataclasses.dataclass
class TestInitVar:
x: int
y: InitVar
tiv = TestInitVar(1, 2)
assert tiv.x == 1
with pytest.raises(AttributeError):
tiv.y
def test_derived_field_from_initvar():
InitVar = dataclasses.InitVar
@pydantic.dataclasses.dataclass
class DerivedWithInitVar:
plusone: int = dataclasses.field(init=False)
number: InitVar[int]
def __post_init__(self, number):
self.plusone = number + 1
derived = DerivedWithInitVar(1)
assert derived.plusone == 2
with pytest.raises(TypeError):
DerivedWithInitVar('Not A Number')
def test_initvars_post_init():
@pydantic.dataclasses.dataclass
class PathDataPostInit:
path: Path
base_path: dataclasses.InitVar[Optional[Path]] = None
def __post_init__(self, base_path):
if base_path is not None:
self.path = base_path / self.path
path_data = PathDataPostInit('world')
assert 'path' in path_data.__dict__
assert 'base_path' not in path_data.__dict__
assert path_data.path == Path('world')
with pytest.raises(TypeError) as exc_info:
PathDataPostInit('world', base_path='/hello')
assert str(exc_info.value) == "unsupported operand type(s) for /: 'str' and 'str'"
def test_initvars_post_init_post_parse():
@pydantic.dataclasses.dataclass
class PathDataPostInitPostParse:
path: Path
base_path: dataclasses.InitVar[Optional[Path]] = None
def __post_init_post_parse__(self, base_path):
if base_path is not None:
self.path = base_path / self.path
path_data = PathDataPostInitPostParse('world')
assert 'path' in path_data.__dict__
assert 'base_path' not in path_data.__dict__
assert path_data.path == Path('world')
assert PathDataPostInitPostParse('world', base_path='/hello').path == Path('/hello/world')
def test_classvar():
@pydantic.dataclasses.dataclass
class TestClassVar:
klassvar: ClassVar = "I'm a Class variable"
x: int
tcv = TestClassVar(2)
assert tcv.klassvar == "I'm a Class variable"
def test_frozenset_field():
@pydantic.dataclasses.dataclass
class TestFrozenSet:
set: FrozenSet[int]
test_set = frozenset({1, 2, 3})
object_under_test = TestFrozenSet(set=test_set)
assert object_under_test.set == test_set
def test_inheritance_post_init():
post_init_called = False
@pydantic.dataclasses.dataclass
class Base:
a: int
def __post_init__(self):
nonlocal post_init_called
post_init_called = True
@pydantic.dataclasses.dataclass
class Child(Base):
b: int
Child(a=1, b=2)
assert post_init_called
def test_hashable_required():
@pydantic.dataclasses.dataclass
class MyDataclass:
v: Hashable
MyDataclass(v=None)
with pytest.raises(ValidationError) as exc_info:
MyDataclass(v=[])
assert exc_info.value.errors() == [
{'loc': ('v',), 'msg': 'value is not a valid hashable', 'type': 'type_error.hashable'}
]
with pytest.raises(TypeError) as exc_info:
MyDataclass()
assert "__init__() missing 1 required positional argument: 'v'" in str(exc_info.value)
@pytest.mark.parametrize('default', [1, None, ...])
def test_hashable_optional(default):
@pydantic.dataclasses.dataclass
class MyDataclass:
v: Hashable = default
MyDataclass()
MyDataclass(v=None)
def test_override_builtin_dataclass():
@dataclasses.dataclass
class File:
hash: str
name: Optional[str]
size: int
content: Optional[bytes] = None
ValidFile = pydantic.dataclasses.dataclass(File)
file = File(hash='xxx', name=b'whatever.txt', size='456')
valid_file = ValidFile(hash='xxx', name=b'whatever.txt', size='456')
assert file.name == b'whatever.txt'
assert file.size == '456'
assert valid_file.name == 'whatever.txt'
assert valid_file.size == 456
assert isinstance(valid_file, File)
assert isinstance(valid_file, ValidFile)
with pytest.raises(ValidationError) as e:
ValidFile(hash=[1], name='name', size=3)
assert e.value.errors() == [{'loc': ('hash',), 'msg': 'str type expected', 'type': 'type_error.str'}]
def test_override_builtin_dataclass_2():
@dataclasses.dataclass
class Meta:
modified_date: Optional[datetime]
seen_count: int
Meta(modified_date='not-validated', seen_count=0)
@pydantic.dataclasses.dataclass
@dataclasses.dataclass
class File(Meta):
filename: str
Meta(modified_date='still-not-validated', seen_count=0)
f = File(filename=b'thefilename', modified_date='2020-01-01T00:00', seen_count='7')
assert f.filename == 'thefilename'
assert f.modified_date == datetime(2020, 1, 1, 0, 0)
assert f.seen_count == 7
def test_override_builtin_dataclass_nested():
@dataclasses.dataclass
class Meta:
modified_date: Optional[datetime]
seen_count: int
@dataclasses.dataclass
class File:
filename: str
meta: Meta
class Foo(BaseModel):
file: File
FileChecked = pydantic.dataclasses.dataclass(File)
f = FileChecked(filename=b'thefilename', meta=Meta(modified_date='2020-01-01T00:00', seen_count='7'))
assert f.filename == 'thefilename'
assert f.meta.modified_date == datetime(2020, 1, 1, 0, 0)
assert f.meta.seen_count == 7
with pytest.raises(ValidationError) as e:
FileChecked(filename=b'thefilename', meta=Meta(modified_date='2020-01-01T00:00', seen_count=['7']))
assert e.value.errors() == [
{'loc': ('meta', 'seen_count'), 'msg': 'value is not a valid integer', 'type': 'type_error.integer'}
]
foo = Foo.parse_obj(
{
'file': {
'filename': b'thefilename',
'meta': {'modified_date': '2020-01-01T00:00', 'seen_count': '7'},
},
}
)
assert foo.file.filename == 'thefilename'
assert foo.file.meta.modified_date == datetime(2020, 1, 1, 0, 0)
assert foo.file.meta.seen_count == 7
def test_override_builtin_dataclass_nested_schema():
@dataclasses.dataclass
class Meta:
modified_date: Optional[datetime]
seen_count: int
@dataclasses.dataclass
class File:
filename: str
meta: Meta
FileChecked = pydantic.dataclasses.dataclass(File)
assert FileChecked.__pydantic_model__.schema() == {
'definitions': {
'Meta': {
'properties': {
'modified_date': {'format': 'date-time', 'title': 'Modified ' 'Date', 'type': 'string'},
'seen_count': {'title': 'Seen Count', 'type': 'integer'},
},
'required': ['modified_date', 'seen_count'],
'title': 'Meta',
'type': 'object',
}
},
'properties': {
'filename': {'title': 'Filename', 'type': 'string'},
'meta': {'$ref': '#/definitions/Meta'},
},
'required': ['filename', 'meta'],
'title': 'File',
'type': 'object',
}
def test_inherit_builtin_dataclass():
@dataclasses.dataclass
class Z:
z: int
@dataclasses.dataclass
class Y(Z):
y: int
@pydantic.dataclasses.dataclass
class X(Y):
x: int
pika = X(x='2', y='4', z='3')
assert pika.x == 2
assert pika.y == 4
assert pika.z == 3
def test_dataclass_arbitrary():
class ArbitraryType:
def __init__(self):
...
@dataclasses.dataclass
class Test:
foo: ArbitraryType
bar: List[ArbitraryType]
class TestModel(BaseModel):
a: ArbitraryType
b: Test
class Config:
arbitrary_types_allowed = True
TestModel(a=ArbitraryType(), b=(ArbitraryType(), [ArbitraryType()]))
def test_forward_stdlib_dataclass_params():
@dataclasses.dataclass(frozen=True)
class Item:
name: str
class Example(BaseModel):
item: Item
other: str
class Config:
arbitrary_types_allowed = True
e = Example(item=Item(name='pika'), other='bulbi')
e.other = 'bulbi2'
with pytest.raises(dataclasses.FrozenInstanceError):
e.item.name = 'pika2'
def test_pydantic_callable_field():
"""pydantic callable fields behaviour should be the same as stdlib dataclass"""
def foo(arg1, arg2):
return arg1, arg2
def bar(x: int, y: float, z: str) -> bool:
return str(x + y) == z
class PydanticModel(BaseModel):
required_callable: Callable
required_callable_2: Callable[[int, float, str], bool]
default_callable: Callable = foo
default_callable_2: Callable[[int, float, str], bool] = bar
@pydantic.dataclasses.dataclass
class PydanticDataclass:
required_callable: Callable
required_callable_2: Callable[[int, float, str], bool]
default_callable: Callable = foo
default_callable_2: Callable[[int, float, str], bool] = bar
@dataclasses.dataclass
class StdlibDataclass:
required_callable: Callable
required_callable_2: Callable[[int, float, str], bool]
default_callable: Callable = foo
default_callable_2: Callable[[int, float, str], bool] = bar
pyd_m = PydanticModel(required_callable=foo, required_callable_2=bar)
pyd_dc = PydanticDataclass(required_callable=foo, required_callable_2=bar)
std_dc = StdlibDataclass(required_callable=foo, required_callable_2=bar)
assert (
pyd_m.required_callable
is pyd_m.default_callable
is pyd_dc.required_callable
is pyd_dc.default_callable
is std_dc.required_callable
is std_dc.default_callable
)
assert (
pyd_m.required_callable_2
is pyd_m.default_callable_2
is pyd_dc.required_callable_2
is pyd_dc.default_callable_2
is std_dc.required_callable_2
is std_dc.default_callable_2
)
def test_pickle_overriden_builtin_dataclass(create_module):
module = create_module(
# language=Python
"""\
import dataclasses
import pydantic
@dataclasses.dataclass
class BuiltInDataclassForPickle:
value: int
class ModelForPickle(pydantic.BaseModel):
# pickle can only work with top level classes as it imports them
dataclass: BuiltInDataclassForPickle
class Config:
validate_assignment = True
"""
)
obj = module.ModelForPickle(dataclass=module.BuiltInDataclassForPickle(value=5))
pickled_obj = pickle.dumps(obj)
restored_obj = pickle.loads(pickled_obj)
assert restored_obj.dataclass.value == 5
assert restored_obj == obj
# ensure the restored dataclass is still a pydantic dataclass
with pytest.raises(ValidationError, match='value\n +value is not a valid integer'):
restored_obj.dataclass.value = 'value of a wrong type'
def test_config_field_info_create_model():
# works
class A1(BaseModel):
a: str
class Config:
fields = {'a': {'description': 'descr'}}
assert A1.schema()['properties'] == {'a': {'title': 'A', 'description': 'descr', 'type': 'string'}}
@pydantic.dataclasses.dataclass(config=A1.Config)
class A2:
a: str
assert A2.__pydantic_model__.schema()['properties'] == {
'a': {'title': 'A', 'description': 'descr', 'type': 'string'}
}
def gen_2162_dataclasses():
@dataclasses.dataclass(frozen=True)
class StdLibFoo:
a: str
b: int
@pydantic.dataclasses.dataclass(frozen=True)
class PydanticFoo:
a: str
b: int
@dataclasses.dataclass(frozen=True)
class StdLibBar:
c: StdLibFoo
@pydantic.dataclasses.dataclass(frozen=True)
class PydanticBar:
c: PydanticFoo
@dataclasses.dataclass(frozen=True)
class StdLibBaz:
c: PydanticFoo
@pydantic.dataclasses.dataclass(frozen=True)
class PydanticBaz:
c: StdLibFoo
foo = StdLibFoo(a='Foo', b=1)
yield foo, StdLibBar(c=foo)
foo = PydanticFoo(a='Foo', b=1)
yield foo, PydanticBar(c=foo)
foo = PydanticFoo(a='Foo', b=1)
yield foo, StdLibBaz(c=foo)
foo = StdLibFoo(a='Foo', b=1)
yield foo, PydanticBaz(c=foo)
@pytest.mark.parametrize('foo,bar', gen_2162_dataclasses())
def test_issue_2162(foo, bar):
assert dataclasses.asdict(foo) == dataclasses.asdict(bar.c)
assert dataclasses.astuple(foo) == dataclasses.astuple(bar.c)
assert foo == bar.c
def test_issue_2383():
@dataclasses.dataclass
class A:
s: str
def __hash__(self):
return 123
class B(pydantic.BaseModel):
a: A
a = A('')
b = B(a=a)
assert hash(a) == 123
assert hash(b.a) == 123
def test_issue_2398():
@dataclasses.dataclass(order=True)
class DC:
num: int = 42
class Model(pydantic.BaseModel):
dc: DC
real_dc = DC()
model = Model(dc=real_dc)
# This works as expected.
assert real_dc <= real_dc
assert model.dc <= model.dc
assert real_dc <= model.dc
def test_issue_2424():
@dataclasses.dataclass
class Base:
x: str
@dataclasses.dataclass
class Thing(Base):
y: str = dataclasses.field(default_factory=str)
assert Thing(x='hi').y == ''
@pydantic.dataclasses.dataclass
class ValidatedThing(Base):
y: str = dataclasses.field(default_factory=str)
assert Thing(x='hi').y == ''
assert ValidatedThing(x='hi').y == ''
def test_issue_2541():
@dataclasses.dataclass(frozen=True)
class Infos:
id: int
@dataclasses.dataclass(frozen=True)
class Item:
name: str
infos: Infos
class Example(BaseModel):
item: Item
e = Example.parse_obj({'item': {'name': 123, 'infos': {'id': '1'}}})
assert e.item.name == '123'
assert e.item.infos.id == 1
with pytest.raises(dataclasses.FrozenInstanceError):
e.item.infos.id = 2
def test_issue_2555():
@dataclasses.dataclass
class Span:
first: int
last: int
@dataclasses.dataclass
class LabeledSpan(Span):
label: str
@dataclasses.dataclass
class BinaryRelation:
subject: LabeledSpan
object: LabeledSpan
label: str
@dataclasses.dataclass
class Sentence:
relations: BinaryRelation
class M(pydantic.BaseModel):
s: Sentence
assert M.schema()
def test_issue_2594():
@dataclasses.dataclass
class Empty:
pass
@pydantic.dataclasses.dataclass
class M:
e: Empty
assert isinstance(M(e={}).e, Empty)
def test_schema_description_unset():
@pydantic.dataclasses.dataclass
class A:
x: int
assert 'description' not in A.__pydantic_model__.schema()
@pydantic.dataclasses.dataclass
@dataclasses.dataclass
class B:
x: int
assert 'description' not in B.__pydantic_model__.schema()
def test_schema_description_set():
@pydantic.dataclasses.dataclass
class A:
"""my description"""
x: int
assert A.__pydantic_model__.schema()['description'] == 'my description'
@pydantic.dataclasses.dataclass
@dataclasses.dataclass
class B:
"""my description"""
x: int
assert A.__pydantic_model__.schema()['description'] == 'my description'
def test_issue_3011():
@dataclasses.dataclass
class A:
thing_a: str
class B(A):
thing_b: str
class Config:
arbitrary_types_allowed = True
@pydantic.dataclasses.dataclass(config=Config)
class C:
thing: A
b = B('Thing A')
c = C(thing=b)
assert c.thing.thing_a == 'Thing A'
def test_issue_3162():
@dataclasses.dataclass
class User:
id: int
name: str
class Users(BaseModel):
user: User
other_user: User
assert Users.schema() == {
'title': 'Users',
'type': 'object',
'properties': {'user': {'$ref': '#/definitions/User'}, 'other_user': {'$ref': '#/definitions/User'}},
'required': ['user', 'other_user'],
'definitions': {
'User': {
'title': 'User',
'type': 'object',
'properties': {'id': {'title': 'Id', 'type': 'integer'}, 'name': {'title': 'Name', 'type': 'string'}},
'required': ['id', 'name'],
}
},
}
def test_discrimated_union_basemodel_instance_value():
@pydantic.dataclasses.dataclass
class A:
l: Literal['a']
@pydantic.dataclasses.dataclass
class B:
l: Literal['b']
@pydantic.dataclasses.dataclass
class Top:
sub: Union[A, B] = dataclasses.field(metadata=dict(discriminator='l'))
t = Top(sub=A(l='a'))
assert isinstance(t, Top)
assert Top.__pydantic_model__.schema() == {
'title': 'Top',
'type': 'object',
'properties': {
'sub': {
'title': 'Sub',
'discriminator': {'propertyName': 'l', 'mapping': {'a': '#/definitions/A', 'b': '#/definitions/B'}},
'anyOf': [{'$ref': '#/definitions/A'}, {'$ref': '#/definitions/B'}],
}
},
'required': ['sub'],
'definitions': {
'A': {
'title': 'A',
'type': 'object',
'properties': {'l': {'title': 'L', 'enum': ['a'], 'type': 'string'}},
'required': ['l'],
},
'B': {
'title': 'B',
'type': 'object',
'properties': {'l': {'title': 'L', 'enum': ['b'], 'type': 'string'}},
'required': ['l'],
},
},
}
def test_post_init_after_validation():
@dataclasses.dataclass
class SetWrapper:
set: Set[int]
def __post_init__(self):
assert isinstance(
self.set, set
), f"self.set should be a set but it's {self.set!r} of type {type(self.set).__name__}"
class Model(pydantic.BaseModel, post_init_call='after_validation'):
set_wrapper: SetWrapper
model = Model(set_wrapper=SetWrapper({1, 2, 3}))
json_text = model.json()
assert Model.parse_raw(json_text) == model
def test_keeps_custom_properties():
class StandardClass:
"""Class which modifies instance creation."""
a: str
def __new__(cls, *args, **kwargs):
instance = super().__new__(cls)
instance._special_property = 1
return instance
StandardLibDataclass = dataclasses.dataclass(StandardClass)
PydanticDataclass = pydantic.dataclasses.dataclass(StandardClass)
clases_to_test = [StandardLibDataclass, PydanticDataclass]
test_string = 'string'
for cls in clases_to_test:
instance = cls(a=test_string)
assert instance._special_property == 1
assert instance.a == test_string
def test_ignore_extra():
@pydantic.dataclasses.dataclass(config=dict(extra=Extra.ignore))
class Foo:
x: int
foo = Foo(**{'x': '1', 'y': '2'})
assert foo.__dict__ == {'x': 1, '__pydantic_initialised__': True}
def test_ignore_extra_subclass():
@pydantic.dataclasses.dataclass(config=dict(extra=Extra.ignore))
class Foo:
x: int
@pydantic.dataclasses.dataclass(config=dict(extra=Extra.ignore))
class Bar(Foo):
y: int
bar = Bar(**{'x': '1', 'y': '2', 'z': '3'})
assert bar.__dict__ == {'x': 1, 'y': 2, '__pydantic_initialised__': True}
def test_allow_extra():
@pydantic.dataclasses.dataclass(config=dict(extra=Extra.allow))
class Foo:
x: int
foo = Foo(**{'x': '1', 'y': '2'})
assert foo.__dict__ == {'x': 1, 'y': '2', '__pydantic_initialised__': True}
def test_allow_extra_subclass():
@pydantic.dataclasses.dataclass(config=dict(extra=Extra.allow))
class Foo:
x: int
@pydantic.dataclasses.dataclass(config=dict(extra=Extra.allow))
class Bar(Foo):
y: int
bar = Bar(**{'x': '1', 'y': '2', 'z': '3'})
assert bar.__dict__ == {'x': 1, 'y': 2, 'z': '3', '__pydantic_initialised__': True}
def test_forbid_extra():
@pydantic.dataclasses.dataclass(config=dict(extra=Extra.forbid))
class Foo:
x: int
with pytest.raises(TypeError, match=re.escape("__init__() got an unexpected keyword argument 'y'")):
Foo(**{'x': '1', 'y': '2'})
def test_post_init_allow_extra():
@pydantic.dataclasses.dataclass(config=dict(extra=Extra.allow))
class Foobar:
a: int
b: str
def __post_init__(self):
self.a *= 2
assert Foobar(a=1, b='a', c=4).__dict__ == {'a': 2, 'b': 'a', 'c': 4, '__pydantic_initialised__': True}
def test_self_reference_dataclass():
@pydantic.dataclasses.dataclass
class MyDataclass:
self_reference: 'MyDataclass'
assert MyDataclass.__pydantic_model__.__fields__['self_reference'].type_ is MyDataclass
| merlinepedra25/PYDANTIC | tests/test_dataclasses.py | test_dataclasses.py | py | 34,040 | python | en | code | 1 | github-code | 36 |
74788585384 | import operator
from itertools import tee, starmap, groupby
from typing import Literal
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = tee(iterable)
next(b, None)
return zip(a, b)
def has_six_digits(i: int):
return 100_000 <= i <= 999_999
def is_nondecreasing_sequence(i: int):
pairs = pairwise(str(i))
goes_up = starmap(operator.le, pairs)
nondecreasing = all(goes_up)
return nondecreasing
def one_pair_is_not_triplet(i: int):
return 2 in {sum(1 for _ in g) for _, g in groupby(str(i))}
def has_pair(i: int):
pairs = pairwise(str(i))
is_equal = starmap(operator.eq, pairs)
return any(is_equal)
def moar_numbers(start_inclusive, stop_inclusive, part=Literal['a', 'b']):
current = start_inclusive
check_extra = part == 'b'
while current <= stop_inclusive:
current_fulfils_spec = (
has_six_digits(current) and
is_nondecreasing_sequence(current) and
has_pair(current) and
(not check_extra or one_pair_is_not_triplet(current))
)
if current_fulfils_spec:
yield current
current += 1
| el-hult/adventofcode2019 | day04/day4_lib.py | day4_lib.py | py | 1,183 | python | en | code | 0 | github-code | 36 |
6654095261 | from flask import Flask, render_template, request, redirect, session
from datetime import datetime
import random
app = Flask(__name__)
app.secret_key = 'JSaafE54!@#$%$#%^&*()_+'
@app.route('/')
def index():
#crear una variable session
if 'num_azar' not in session:
session['num_azar'] = random.randint(1,100)
session['resultado'] = ""
session['count'] = 0
print(session['num_azar'])
elif 'num_azar' in session:
if session['count'] > 0 and session['count']<= 4:
print("la session ya existe")
elif session['count'] > 4 and session['resultado'] != 'igual':
session['resultado'] = "perdiste"
print(session['num_azar'])
return render_template("index.html")
@app.route('/guess', methods=['POST'])
def guess():
numero_ingresado = int(request.form['numero'])
print(numero_ingresado)
if session['num_azar'] == numero_ingresado:
session['resultado'] = "igual"
print('paso por igual')
elif session['num_azar'] > numero_ingresado:
session['resultado'] = "mayor"
print('paso por mayor')
elif session['num_azar'] < numero_ingresado:
session['resultado'] = "menor"
print('paso por menor')
session['count'] += 1
return redirect('/')
@app.route('/play_again', methods=['POST'])
def play_again():
session.pop('num_azar')
session.pop('resultado')
session.pop('count')
return redirect('/')
@app.route('/ranking', methods=['POST'])
def ranking():
print(session['ranking'])
if 'ranking' not in session:
session['ranking'] = []
session['ranking'] = [
dict(nombre=request.form['nombre'], intentos=session['count'])]
print(request.form['nombre'])
print(session['ranking'])
return render_template("ranking.html", ranking=session['ranking'])
if __name__=="__main__":
app.run(debug=True) | cpinot/CodingDojo | python/flask/fundamentals/numeros_juegos_genial/server.py | server.py | py | 2,060 | python | en | code | 0 | github-code | 36 |
5232344019 | from openpyxl import load_workbook
wb = load_workbook("sample.xlsx")
ws = wb.active
# 번호 영어 수학
# 번호 (국어) 영어 수학
ws.move_range("B1:C11", rows=0, cols=1) # 0줄 밑으로, 1줄 오른쪽으로 이동
ws["B1"].value = "국어" # B1 셀에 '국어' 입력
# ws.move_range("C1:C11", rows=5, cols=-1) # 데이터 옮기면서 덮어씀
wb.save("sample_korean.xlsx") | OctoHoon/PythonStudy_rpa | rpa_basic/1_excel/9_move.py | 9_move.py | py | 402 | python | ko | code | 0 | github-code | 36 |
36968708533 | from collections import namedtuple
import os
# from unittest.mock import patch
import datetime
import random
import pytest
import hug
from bson.objectid import ObjectId
from pymongo import MongoClient
from pymongo.uri_parser import parse_uri
from helpers import clean_url, clean_email, hash_password
from db import DB
from middlewares import HostEnvMiddleware, MongoMiddleware
"""
API endpoints test
"""
TEST_MONGO_URL = os.environ.get('MONGODB_URI_TEST')
USERS = (
{'email': 'testuser1@email.com', 'api_key': 'apikey1'},
{'email': 'testuser2@email.com', 'api_key': 'apikey2'},
)
def create_fixtures():
"""
Creating user fixtures for tests
"""
remove_fixtures()
with MongoClient(TEST_MONGO_URL) as conn:
parsed = parse_uri(TEST_MONGO_URL)
db = conn[parsed['database']]
# adding user and one url for each user
for i, user in enumerate(USERS):
user_id = db.users.insert(user)
db.urls.insert({
'code': 'user{}'.format(i),
'short_url': 'http://ef.me/user{}'.format(i),
'long_url': 'http://user{}.com'.format(i),
'url_access': [],
'created_at': datetime.datetime.now(),
'created_by': user_id
})
def remove_fixtures():
"""
Removing fixtures
"""
with MongoClient(TEST_MONGO_URL) as conn:
parsed = parse_uri(TEST_MONGO_URL)
db = conn[parsed['database']]
emails = [i['email'] for i in USERS]
emails.append('testuser3@email.com')
query = {'email': {'$in': emails}}
user_ids = [i['_id'] for i in list(db.users.find(query, {'_id': 1}))]
# removing urls
db.urls.remove({'created_by': {'$in': user_ids}}, {'multi': True})
# removing users
db.users.remove({'_id': {'$in': user_ids}}, {'multi': True})
def setup():
"""
Creating initial fixtures for tests
"""
os.environ['MONGODB_URI'] = TEST_MONGO_URL
os.environ['HOST'] = 'http://ef.me'
create_fixtures()
def teardown():
"""
Clear fixtures for tests
"""
os.environ['MONGODB_URI'] = ''
os.environ['HOST'] = ''
remove_fixtures()
def test_short_url():
"""
test /api/short endpoint
"""
setup()
import api
# bad request without long_url query param
request_url = '/api/short'
headers = {'X-Api-Key': 'apikey1'}
response = hug.test.get(api, request_url, headers=headers)
assert response.data['error'] == 'long_url GET param missing'
# bad request without authentication header
response = hug.test.get(api, request_url)
assert response.status == '401 Unauthorized'
# bad request with inexistent authentication header
headers = {'X-Api-Key': 'not-exists'}
response = hug.test.get(api, request_url, headers=headers)
assert response.status == '401 Unauthorized'
# bad request with invalid url
headers = {'X-Api-Key': 'apikey1'}
response = hug.test.get(api, request_url, headers=headers,
long_url=(1, 2, 3))
assert response.data['error'] == 'long_url is not a valid URL'
# bad request with long code
headers = {'X-Api-Key': 'apikey1'}
response = hug.test.get(api, request_url, headers=headers,
long_url='www.google.com',
code='lllllllllllllllongggggggggg')
assert response.data['error'] == 'Code param must have a max length of 9'
# good request with code generating short_url
request_url = '/api/short'
headers = {'X-Api-Key': 'apikey1'}
response = hug.test.get(api, request_url, headers=headers,
long_url='www.google.com', code='abcd')
assert response.data['short_url'] == 'http://ef.me/abcd'
# good request for same long url will raise a 409 error
request_url = '/api/short'
headers = {'X-Api-Key': 'apikey1'}
response = hug.test.get(api, request_url, headers=headers,
long_url='www.google.com', code='abcd')
assert response.data['error'] == 'long_url already exists'
# good request without generating short_url
request_url = '/api/short'
headers = {'X-Api-Key': 'apikey1'}
response = hug.test.get(api, request_url, headers=headers,
long_url='www.google.com/123')
assert 'short_url' in response.data
teardown()
def test_expand_url():
"""
/api/expand endpoint tests
"""
setup()
import api
# bad request with missing short_url
request_url = '/api/expand'
headers = {'X-Api-Key': 'apikey1'}
response = hug.test.get(api, request_url, headers=headers)
assert response.data['error'] == 'short_url GET param missing'
# bad request with a not valid url
request_url = '/api/expand'
headers = {'X-Api-Key': 'apikey1'}
response = hug.test.get(api, request_url, headers=headers,
short_url=(1, 2, 3))
assert response.data['error'] == 'short_url is not a valid URL'
# bad request with a inexistent url
request_url = '/api/expand'
headers = {'X-Api-Key': 'apikey1'}
response = hug.test.get(api, request_url, headers=headers,
short_url='http://ef.me/noex')
assert response.data['error'] == 'short_url does not exist'
# valid request
request_url = '/api/expand'
headers = {'X-Api-Key': 'apikey1'}
response = hug.test.get(api, request_url, headers=headers,
short_url='http://ef.me/user0')
assert response.data['short_url'] == 'http://ef.me/user0'
assert response.data['long_url'] == 'http://user0.com'
teardown()
def test_go_to_url():
"""
testing /s/:code endpoint
"""
setup()
import api
# test not found
response = hug.test.get(api, '/s/123')
assert response.status == '404 Not Found'
# test 301 response
response = hug.test.get(api, '/s/user1')
assert response.status == '301 Moved Permanently'
teardown()
def test_create_user():
"""
testing /api/user endpoint
"""
setup()
import api
# bad request with no payload
response = hug.test.post(api, '/api/user')
assert response.data['error'] == 'Missing email on body request'
# bad request with bad email payload
payload = {'email': (1, 2, 3)}
response = hug.test.post(api, '/api/user', payload)
assert response.data['error'] == 'Email not valid'
# bad request with already added user
payload = {'email': 'testuser1@email.com'}
response = hug.test.post(api, '/api/user', payload)
assert response.data['error'] == 'User already exists'
# good request with valid payload
payload = {'email': 'testuser3@email.com'}
response = hug.test.post(api, '/api/user', payload)
assert response.status == '200 OK'
assert 'api_key' in response.data
teardown()
def test_get_user_urls():
"""
testing /api/urls endpoint
"""
setup()
import api
# bad request without auth
response = hug.test.get(api, '/api/urls')
assert response.status == '401 Unauthorized'
# get all urls from user1
headers = {'X-Api-Key': 'apikey1'}
response = hug.test.get(api, '/api/urls', headers=headers).data
assert len(response) == 1
assert response[0]['short_url'] == 'http://ef.me/user0'
assert response[0]['long_url'] == 'http://user0.com'
assert response[0]['code'] == 'user0'
assert response[0]['total_accesses'] == 0
# add one more access to url on user0 and check the results
hug.test.get(api, '/s/user0')
response = hug.test.get(api, '/api/urls', headers=headers).data
assert len(response) == 1
assert response[0]['total_accesses'] == 1
# test pagination
# adding more urls for user0 and retrieve it
for i in range(10):
code = random.randint(4, 99999)
resp = hug.test.get(api, '/api/short', headers=headers,
long_url='http://{}.com'.format(code))
assert resp.status == '201 Created'
response = hug.test.get(api, '/api/urls', headers=headers).data
assert len(response) == 5
# get page 2
response = hug.test.get(api, '/api/urls', headers=headers, page=2).data
assert len(response) == 5
# get page 3. Should have 1 url only
response = hug.test.get(api, '/api/urls', headers=headers, page=3).data
assert len(response) == 1
teardown()
def test_get_user_url():
"""
test /api/urls/{code} endpoint
"""
setup()
import api
# bad request without auth
response = hug.test.get(api, '/api/urls/123')
assert response.status == '401 Unauthorized'
# good request with user url
headers = {'X-Api-Key': 'apikey1'}
response = hug.test.get(api, '/api/urls/user0', headers=headers)
assert response.data['short_url'] == 'http://ef.me/user0'
assert response.data['long_url'] == 'http://user0.com'
assert response.data['total_accesses'] == 0
# get url from other user returns 404
headers = {'X-Api-Key': 'apikey1'}
response = hug.test.get(api, '/api/urls/user1', headers=headers)
assert response.data['error'] == 'URL does not exist'
teardown()
"""
Helpers test
"""
def test_clean_url():
"""
testing clean_url helper
"""
bad = 123
bad2 = ''
good = 'http://google.com'
without_scheme = 'google.com'
with_trailing_slash = 'google.com/'
with pytest.raises(ValueError):
clean_url(bad)
with pytest.raises(ValueError):
clean_url(bad2)
assert clean_url(good) == good
assert clean_url(without_scheme) == good
assert clean_url(with_trailing_slash) == good
def test_clean_email():
"""
testing clean_email helper
"""
bad = '123'
bad2 = 123
bad3 = '<<@>>'
good = 'test@email.com'
with pytest.raises(ValueError):
clean_email(bad)
with pytest.raises(ValueError):
clean_email(bad2)
with pytest.raises(ValueError):
clean_email(bad3)
assert clean_email(good) == good
def test_hash_password():
expected = 'd0088c5e26b377da76477cda8d7d2f2e5a3723176eb2a1ddf6c4719d567c3bfe7141f1998a1e3a3cbec86c96740d7d25bc954e2970d4974b66193a9ea210a8af'
assert hash_password('test@email.com', 'salt123') == expected
"""
Middleware test
"""
def test_env_middleware():
os.environ['HOST'] = 'http://bit.ly'
fake_request = namedtuple('Request', 'context')
fake_response = {}
req = fake_request(context={})
e = HostEnvMiddleware()
e.process_request(req, fake_response)
assert req.context['host'] == 'http://bit.ly'
os.environ['HOST'] = 'biggggggggggggghosttttttttttttttt.com'
e = HostEnvMiddleware()
req = fake_request(context={})
with pytest.raises(Exception):
e.process_request(req, fake_response)
os.environ['HOST'] = ''
def test_mongo_middleware():
os.environ['MONGODB_URI'] = TEST_MONGO_URL
parsed = parse_uri(TEST_MONGO_URL)
fake_request = namedtuple('Request', 'context')
fake_response = {}
req = fake_request(context={})
m = MongoMiddleware()
m.process_request(req, fake_response)
assert isinstance(req.context['db'], DB)
assert req.context['db'].database == parsed['database']
m.process_response(req, {}, {})
assert req.context['db'] is None
os.environ['MONGODB_URI'] = ''
"""
DB test
"""
def test_sanitize_query():
bad = ''
good = {}
good2 = {'_id': '58d0211ea1711d51401aee4c'}
assert DB.sanitize_query(bad) is False
assert DB.sanitize_query(good) == {}
assert DB.sanitize_query(good2) == {'_id': ObjectId('58d0211ea1711d51401aee4c')}
| ellisonleao/ef-url-shortener | test_api.py | test_api.py | py | 11,725 | python | en | code | 1 | github-code | 36 |
31408793357 | url = 'http://rest.kegg.jp/'
find = 'find' # Searches databases for a given term
list = 'list' # Lists the entries in a database
link = 'link' # Finds related entries in other databases
conv = 'conv' # Converts between KEDD identifiers and outside identifiers
info = 'info' # Gets information about the given database
get = 'get' # Returns the given database entry and its associated data
delimiter = '\t'
fields = {"NAME", "DESCRIPTION", "CLASS", "MODULE", "DISEASE", "DRUG", \
"DBLINKS", "ORGANISM", "GENE", "COMPOUND", "KO_PATHWAY"}
| Arabidopsis-Information-Portal/Intern-Hello-World | services/common/vars.py | vars.py | py | 538 | python | en | code | 0 | github-code | 36 |
30626059692 |
izbor = int(input("""Zdravo, ovo je spisak za kupovinu odaberi jednu od sledecih opcija:
1. Dodaj stavku na spisak
2. izlaz
"""))
lista = [ ]
n = int(input("Unesi broj stavki koje kupujemo : "))
if n == 0:
print ('Uneo si 0 stavki u spisak')
if izbor == 1:
for i in range(0, n):
element = [input("Unesi stvaku: "), int(input("Unesi kolicinu: "))]
lista.append(element)
print ('Vasa lista za kupovinu je',lista)
| mifa43/Python | liste kombinacija/listekombo.py | listekombo.py | py | 462 | python | hr | code | 1 | github-code | 36 |
43867194701 | n = int(input())
txy = [[0, 0, 0]] + [list(map(int, input().split())) for _ in range(n)]
flag = True
for i in range(1, n + 1):
t = txy[i][0] - txy[i - 1][0]
x = abs(txy[i][1] - txy[i - 1][1])
y = abs(txy[i][2] - txy[i - 1][2])
if not t >= x + y or not t % 2 == (x + y) % 2:
flag = False
break
print('Yes') if flag else print('No')
| cocoinit23/atcoder | abc/abc086/C - Traveling.py | C - Traveling.py | py | 366 | python | en | code | 0 | github-code | 36 |
15903684309 | import os
import numpy as np
def octopuses(lines:list, step_count:int=100):
def step(arr):
res = []
for i in range(len(arr)):
for k in range(len(arr[0])):
if arr[i,k]<9:
arr[i,k] = arr[i,k] + 1
else:
arr[i,k] = 0
res.append((i,k))
return res
def bright(arr, i, k):
if 0 != arr[i,k]:
arr[i,k] = arr[i,k] + 1
if arr[i,k]>9:
arr[i,k] = 0
return True
return False
def process(arr, i, k):
res = []
if i-1>=0:
if bright(arr, i-1, k): res.append((i-1,k))
if k-1>=0:
if bright(arr, i-1, k-1): res.append((i-1,k-1))
if k+1<len(arr[0]):
if bright(arr, i-1, k+1): res.append((i-1,k+1))
if i+1<len(arr):
if bright(arr, i+1, k): res.append((i+1,k))
if k+1<len(arr[0]):
if bright(arr, i+1, k+1): res.append((i+1,k+1))
if k-1>=0:
if bright(arr, i+1, k-1): res.append((i+1,k-1))
if k-1>=0:
if bright(arr, i, k-1): res.append((i,k-1))
if k+1<len(arr[0]):
if bright(arr, i, k+1): res.append((i,k+1))
return res
data = [[int(i) for i in list(line.strip())] for line in lines]
arr = np.array(data, dtype='int32')
flashes = 0
s = 0
while s<step_count or step_count==-1:
s += 1
res = step(arr)
flashes += len(res)
while res:
(i,k) = res.pop()
news = process(arr, i, k)
flashes += len(news)
res += news
if step_count == -1:
if np.max(arr) == 0:
return s
return flashes
test_lines = [
'5483143223',
'2745854711',
'5264556173',
'6141336146',
'6357385478',
'4167524645',
'2176841721',
'6882881134',
'4846848554',
'5283751526',
]
lines = [
'6636827465',
'6774248431',
'4227386366',
'7447452613',
'6223122545',
'2814388766',
'6615551144',
'4836235836',
'5334783256',
'4128344843',
]
assert octopuses(test_lines, 100) == 1656, "Function is wrong"
print("Part A:", octopuses(lines, 100))
assert octopuses(test_lines, -1) == 195, "Function is wrong"
print("Part B:", octopuses(lines, -1))
| coolafabbe/AdventOfCode2021 | Mikel/Day11/main.py | main.py | py | 2,449 | python | en | code | 0 | github-code | 36 |
75127577704 | import sys
from cravat import BaseAnnotator
from cravat import InvalidData
import sqlite3
import os
class CravatAnnotator(BaseAnnotator):
def annotate(self, input_data, secondary_data=None):
q = 'select brs_penetrance, lqt_penetrance, brs_structure, lqt_structure, function, lqt, brs, unaff, other, var, hugo from arrvars where chrom = "{chrom}" and pos = {pos} and ref = "{ref}" and alt = "{alt}"'.format(
chrom = input_data["chrom"], pos=int(input_data["pos"]), ref = input_data["ref_base"], alt = input_data["alt_base"])
self.cursor.execute(q)
row = self.cursor.fetchone()
if row:
var = row[9]
if var == None:
var = ''
hugo = row[10]
link = 'https://oates.app.vumc.org/vancart/'+ hugo + '/variant.php?q=' + var
if link.endswith("="):
link = ''
return {'brs_penetrance': row[0], 'lqt_penetrance': row[1], 'brs_structure': row[2], 'lqt_structure': row[3], 'function': row[4], 'lqt' : row[5], 'brs': row[6], 'unaff': row[7], 'other': row[8], 'link': link }
def cleanup(self):
pass
if __name__ == '__main__':
annotator = CravatAnnotator(sys.argv)
annotator.run() | KarchinLab/open-cravat-modules-karchinlab | annotators/arrvars/arrvars.py | arrvars.py | py | 1,275 | python | en | code | 1 | github-code | 36 |
27515854785 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 23 17:32:15 2020
geometry based sieving
"""
import os
import pickle
from skimage import measure
import pandas as pd
import numpy as np
import nibabel as nib
from skimage import measure
from sklearn import neighbors
# PROPERTIES = ['area', 'extent', 'filled_area', 'inertia_tensor', 'major_axis_length', 'minor_axis_length'] # 3d compatible
PROPERTIES = ['area', 'eccentricity', 'extent',
'inertia_tensor', 'major_axis_length', 'minor_axis_length',
'moments_hu', 'perimeter', 'solidity']
def read_nifti(img_path):
"""
Wrapper for nibabel to read in NIfTI scans.
Parameters
----------
img_path : string
The path to the .nii scan
Returns
-------
A numpy array representing the scan
"""
raw = nib.load(img_path)
img = raw.get_fdata()
return img
def label_2d(im):
labeled = im.copy()
vals_seen = []
adder = 0
for i in range(labeled.shape[2]):
sli = im[:,:,i]
labeled_slice = measure.label(sli)
labeled_slice = labeled_slice + (adder * (~np.isclose(labeled_slice, 0)))# we are labeling every slice individually, but we don't want to reuse labels between slices
labeled[:,:,i] = labeled_slice
the_max = labeled_slice.max()
if the_max > 0:
adder = the_max
vals_seen.extend(np.unique(labeled_slice))
vals_seen.remove(0)
'''
print(f'Vals_seen: {vals_seen}')
print(f'Adder: {adder}')
print(f'Unique in this slice: {np.unique(labeled_slice)}')
print('\n')
'''
if len(vals_seen) > len(np.unique(vals_seen)):
raise Exception(f'Labels are not unique after slice {i}')
return labeled.astype(int)
def generate_properties(im, props=PROPERTIES):
"""
Generates geometric properties for shapes in the binary input image
Parameters
----------
im : TYPE
DESCRIPTION.
props : TYPE, optional
DESCRIPTION
Returns
-------
X_train : TYPE
DESCRIPTION.
"""
X_train = pd.DataFrame()
labeled = label_2d(im)
for i in range(labeled.shape[2]):
sli = labeled[:,:,i]
try:
X_train = X_train.append(pd.DataFrame(measure.regionprops_table(sli, properties=props)))
except IndexError:
pass # happens when the slice has no regions in it
return X_train
def standardize_data(data, params):
standard_data = data.copy()
for col, mean, stddev in zip(data.columns, params[0], params[1]):
standard_data[col] = (standard_data[col] - mean) / stddev
return standard_data
def train_and_save(training_data, outloc):
"""
Trains a LOF algorithm for the purposes of novelty detection and pickles it
Standardizes the data first (transforms each column by subtracting the mean
and then dividing by the stddev)
Parameters
----------
training_data : TYPE
a pandas DataFrame of the training data.
out_loc : TYPE
name of the pickled object to save, which is a tuple with length 2, where
the first entry is the model. The second is a list of lists, where the first
list is the list of means used to transform the data and the second is the list
of the stddevs used to transform the data
Returns
-------
a tuple with length 2, where
the first entry is the model. The second is a list of lists, where the first
list is the list of means used to transform the data and the second is the list
of the stddevs used to transform the data
"""
standard_data = training_data.copy()
means = []
stddevs = []
for col in training_data.columns:
mean = training_data[col].mean()
stddev = training_data[col].std()
means.append(mean)
stddevs.append(stddev)
standard_data = standardize_data(training_data, (means, stddevs))
lof = neighbors.LocalOutlierFactor(novelty=True)
lof.fit(standard_data)
out_obj = (lof, (means, stddevs))
pickle.dump(out_obj, open(outloc, "wb" ))
return out_obj
def load_default_model():
script_folder = os.path.dirname(os.path.realpath(__file__))
repo_folder = os.path.dirname(script_folder)
model_loc = os.path.join(repo_folder, 'bin', 'gbs_models', 'gbs_default.pkl')
lof, params = pickle.load(open(model_loc, 'rb'))
return lof, params
def sieve_image(im, model_and_params=None, props=None):
if model_and_params is None:
model_and_params = load_default_model()
if props is None:
props=PROPERTIES
model = model_and_params[0]
params = model_and_params[1]
labeled = label_2d(im)
#observations = pd.DataFrame(measure.regionprops_table(labeled, properties=props))
props_with_label = props
props_with_label.append('label')
observations = generate_properties(labeled, props=props_with_label)
labels_only = pd.DataFrame(observations['label'])
observations_drop = observations.drop(columns='label')
standard_observations = standardize_data(observations_drop, params)
predictions = model.predict(standard_observations)
labels_only['prediction'] = predictions
to_zero = [row['label'] for i, row in labels_only.iterrows() if row['prediction'] == -1]
mask = np.isin(labeled, to_zero)
new_im = im.copy()
new_im[mask] = 0
return new_im.astype(int)
| rsjones94/neurosegment | neurosegment/gbs.py | gbs.py | py | 5,678 | python | en | code | 2 | github-code | 36 |
33014513380 | import numpy as np
import random
import math
import matplotlib.pyplot as plt
def intersects(s0,s1):
dx0 = s0[1][0]-s0[0][0]
dx1 = s1[1][0]-s1[0][0]
dy0 = s0[1][1]-s0[0][1]
dy1 = s1[1][1]-s1[0][1]
p0 = dy1*(s1[1][0]-s0[0][0]) - dx1*(s1[1][1]-s0[0][1])
p1 = dy1*(s1[1][0]-s0[1][0]) - dx1*(s1[1][1]-s0[1][1])
p2 = dy0*(s0[1][0]-s1[0][0]) - dx0*(s0[1][1]-s1[0][1])
p3 = dy0*(s0[1][0]-s1[1][0]) - dx0*(s0[1][1]-s1[1][1])
return (p0*p1<=0) & (p2*p3<=0)
def randomPath():
pathList = []
StartCord = (0,0)
startDir = 0
ranStop = random.randint(0, 3)
hasIntersect = False
while ranStop > 0:
ranLength = random.random() * 30 + 20
newCord = (StartCord[0]+ranLength*(math.cos(math.radians(startDir))),StartCord[1]+ranLength*(math.sin(math.radians(startDir))))
list2 = [StartCord,newCord]
for i in pathList:
list1 = [(pathList[i].startX, pathList[i].startY), (pathList[i].endX, pathList[i].endY)]
if intersects(list1,list2):
hasIntersect = True
break
if hasIntersect:
continue
pathList.append(Line(StartCord,newCord))
Dir = bool(random.getrandbits(1))
newAngle = random.random()*135+45
newcurve = make_curve(newAngle, 10, start_point=[newCord[0],newCord[1]],n_lines=20,left=Dir,start_degree=startDir)
if Dir:
startDir = 360 % (startDir+newAngle)
else:
startDir = 360 % (startDir-newAngle)
for i in newcurve:
pathList.append(newcurve[i])
StartCord = (newcurve[-1].endX,newcurve[-1].endY)
return pathList | HighSpeeds/ECE3 | CarLab/LineGenerator.py | LineGenerator.py | py | 1,677 | python | en | code | 0 | github-code | 36 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.