seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
23552489654 | from re import I
import numpy as np
import gym
import random
"""
常に観測値として1を返す環境
環境に対して取るべき行動が周期的に切り替わり、
それに応じて報酬が決定される。
"""
class StaticCyclicEnv0(gym.Env):
def __init__(self, cycle, cycle_cnt_max, action_num, noise):
super().__init__()
self.cycle = cycle
assert self.cycle % action_num == 0 and self.cycle >= action_num, 'cycleはアクション数の整数倍でなくてはならない'
self.action_num = action_num
self.desired_action = -1
self.cycle_cnt_max = cycle_cnt_max
self.noise = noise
self.action_space = gym.spaces.Discrete(self.action_num)
self.observation_space = gym.spaces.Box(
low = 1,
high = 1,
shape = [1],
dtype = np.int
)
self.reward_range = [0, 1]
self.reset()
def reset(self):
self.step_cnt = 0
self.cycle_cnt = 0
self.done = False
self.info = {
'bonus_cnt':0, \
'bonus_max': self.cycle_cnt_max * self.action_num, #生涯中でボーナスが発生するタイミングの数
'is_bonus': True #ルールが切り替わった直後および最初のステップでTrue
}
return self.observe()
def step(self, action):
observation = self.observe()
### Step更新
if(self.step_cnt % (self.cycle // self.action_num) == 0):
if(random.random()) >= self.noise:
self.update_action()
self.info['is_bonus'] = True
else:
self.info['is_bonus'] = False
self.step_cnt += 1
### rewardの計算
if action == self.desired_action:
reward = 1.0
else:
reward = 0.0
### 終了処理
if(self.cycle_cnt >= self.cycle_cnt_max):
self.done = True
else:
self.done = False
return observation, reward, self.done, self.info
def observe(self):
return 1
def update_action(self):
self.desired_action += 1
if(self.desired_action == self.action_num):
self.desired_action = 0
self.cycle_cnt += 1
| kato-mahiro/periodic_task_experiment | myenvs/myenvs.py | myenvs.py | py | 2,339 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "gym.Env",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "gym.spaces.Discrete",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "gym.spaces",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "gym.spaces.Box",
... |
21098832887 | #!/usr/bin/python3
import os
import sys
import argparse
import re
if __name__ == '__main__':
infile_format = ''
cmd_opts = []
id_pat = r''
alt_id_pat = r''
parser = argparse.ArgumentParser(description = """ """)
parser.add_argument('-i', '--infile',
help = f'Speciefies path to input file in {infile_format} format.',
type = str)
parser.add_argument('-d','--id',
help = '''Identifier XXX''',
type = str)
parser.add_argument('-c', '--command',
help = f'Command to be executed. One of {cmd_opts}.',
required = True)
parser.add_argument('-o', '--option',
help = 'Set option XXX',
action = 'store true')
args = vars(parser.parse_args())
# argument checks - infile exists
if not os.path.isfile(args['infile']):
print(f'File {args["infile"]} not found.')
sys.exit(0)
if args['command']:
if args['command'] not in cmd_opts:
print(f'Error: {args["command"]} is not a valid Command.')
sys.exit(0)
if args['command'] in ['XXX']:
if not re.match(id_pat, args['id']):
print(f'Error: {args["id"]} is not a valid GO Id.')
sys.exit(0)
# if args['command'] == 'getEntry':
# res = getEntry(filename = args['infile'], id = args['id'])
elif args['command'] in ['XXX']:
if not re.match(alt_id_pat, args['id']):
print(f'Error: {args["id"]} is not a valid external identifier.')
sys.exit(0)
print(args) | jonasfreimuth/dbp-exercises | templates/cli_template.py | cli_template.py | py | 1,556 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"l... |
16774999516 | from django_cas_ng import views as cas_views
from django_cas_ng.models import ProxyGrantingTicket, SessionTicket
from django_cas_ng.utils import get_protocol, get_redirect_url, get_cas_client
from django_cas_ng.signals import cas_user_logout
from django.http import JsonResponse, HttpRequest, HttpResponse, HttpResponseRedirect
from django.conf import settings
from django.contrib.auth import authenticate, login as auth_login, logout as auth_logout
from urllib import parse as urllib_parse
from rest_framework.response import Response
from rest_framework_jwt.settings import api_settings
from django.contrib.auth.models import update_last_login
from .models import User
JWT_PAYLOAD_HANDLER = api_settings.JWT_PAYLOAD_HANDLER
JWT_ENCODE_HANDLER = api_settings.JWT_ENCODE_HANDLER
class APILoginView(cas_views.LoginView):
def successful_login(self, request: HttpRequest, next_page: str) -> HttpResponse:
"""
This method is called on successful login. Override this method for
custom post-auth actions (i.e, to add a cookie with a token).
:param request:
:param next_page:
:return:
"""
try:
user = User.objects.get(email=f'{request.user.email}@ui.ac.id')
except User.DoesNotExist:
user = request.user
new_next_page = next_page
if user.email == "":
new_next_page = settings.SUCCESS_SSO_AUTH_REDIRECT + 'not-login/'
user.delete()
elif not user.is_active:
new_next_page = settings.SUCCESS_SSO_AUTH_REDIRECT + 'not-login/'
else:
payload = JWT_PAYLOAD_HANDLER(user)
jwt_token = JWT_ENCODE_HANDLER(payload)
update_last_login(None, user)
new_next_page = settings.SUCCESS_SSO_AUTH_REDIRECT + 'login-sivitas/' + jwt_token
return HttpResponseRedirect(new_next_page)
class APILogoutView(cas_views.LogoutView):
def get(self, request: HttpRequest) -> HttpResponse:
"""
Redirects to CAS logout page
:param request:
:return:
"""
next_page = settings.SUCCESS_SSO_AUTH_REDIRECT
# try to find the ticket matching current session for logout signal
try:
st = SessionTicket.objects.get(session_key=request.session.session_key)
ticket = st.ticket
except SessionTicket.DoesNotExist:
ticket = None
# send logout signal
cas_user_logout.send(
sender="manual",
user=request.user,
session=request.session,
ticket=ticket,
)
# clean current session ProxyGrantingTicket and SessionTicket
ProxyGrantingTicket.objects.filter(session_key=request.session.session_key).delete()
SessionTicket.objects.filter(session_key=request.session.session_key).delete()
auth_logout(request)
next_page = next_page or get_redirect_url(request)
if settings.CAS_LOGOUT_COMPLETELY:
client = get_cas_client(request=request)
return HttpResponseRedirect(client.get_logout_url(next_page))
# This is in most cases pointless if not CAS_RENEW is set. The user will
# simply be logged in again on next request requiring authorization.
return HttpResponseRedirect(next_page)
| ferenica/sipraktikum-backend | authentication/cas_wrapper.py | cas_wrapper.py | py | 3,330 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "rest_framework_jwt.settings.api_settings.JWT_PAYLOAD_HANDLER",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "rest_framework_jwt.settings.api_settings",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "rest_framework_jwt.settings.api_settin... |
11934438668 | from collections import Counter
from typing import Counter
def main():
t = int(input())
for i in range(t):
n = int(input())
nums = list(map(int, input().split()))
count = Counter(nums)
print(count)
main()
| Misganaw-Berihun/CONTESTS | After_study_contest_4/Equalize_the_Array.py | Equalize_the_Array.py | py | 245 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.Counter",
"line_number": 8,
"usage_type": "call"
}
] |
36189617286 | import requests
url_f = "https://shiqianjiang.cn/home/image/bg"
url_e = ".webp"
headers = {
'Accept': 'image/avif,image/webp,image/apng,image/svg+xml,image/*,*/*;q=0.8',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Connection': 'keep-alive',
'Referer': 'https://shiqianjiang.cn/home/',
'Sec-Fetch-Dest': 'image',
'Sec-Fetch-Mode': 'no-cors',
'Sec-Fetch-Site': 'same-origin',
'User-Agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/103.0.0.0 Mobile Safari/537.36',
'sec-ch-ua': '".Not/A)Brand";v="99", "Google Chrome";v="103", "Chromium";v="103"',
'sec-ch-ua-mobile': '?1',
'sec-ch-ua-platform': 'Android'
}
for i in range(9):
a = str(i)
if (i == 0):
a = ''
resp = requests.get(url_f + a + url_e, headers=headers, verify=False)
with open("shiqianjiang/"+str(i)+".webp", mode="wb") as f:
f.write(resp.content)
url = "https://shiqianjiang.cn/home/image/home.webp"
resp = requests.get(url, headers=headers, verify=False)
with open("shiqianjiang/"+"home.webp", mode="wb") as f:
f.write(resp.content)
url = "https://shiqianjiang.cn/image/head.png"
resp = requests.get(url, headers=headers, verify=False)
with open("shiqianjiang/"+"head.webp", mode="wb") as f:
f.write(resp.content) | wuheyouzi/code | PycharmProjects/test/shiqianjiang/shiqianjiang.py | shiqianjiang.py | py | 1,346 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 35,
"usage_type": "call"
}
] |
36584541657 | import logging
from datetime import datetime
from pymongo import MongoClient, UpdateOne
class UrlRepository:
def __init__(self):
mongo_client = MongoClient('mongodb://mongodb:27017/')
mongo_db = mongo_client['crawler_db']
self.collection = mongo_db['urls']
try:
self.collection.create_index([('url', 1)], unique=True)
except Exception as e:
pass
def upsert_url(self, url, content):
update_query = {
'$set': {
'content': content,
'last_modified': datetime.now()
}
}
upset_url = UpdateOne({'url': url}, update_query, upsert=True)
self.collection.bulk_write([upset_url])
def find_url(self, url):
query = {'url': url}
return self.collection.find_one(query)
| HarrYoha/url_explorer | src/repositories/url_repository.py | url_repository.py | py | 838 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pymongo.MongoClient",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "pymongo.Up... |
74537088743 | # stdlib imports
import asyncio
import time
# project imports
import asyncio_cpu
import asyncio_io
if __name__ == "__main__":
start_time = time.time()
loop = asyncio.get_event_loop()
io_start = time.time()
api_data = loop.run_until_complete(asyncio_io.get_data())
print(f"\nDone. IO bound time: {round(time.time() - io_start, 2)}\n")
cpu_start = time.time()
asyncio.run(asyncio_cpu.process_response(api_data=api_data))
print(f"\nDone. CPU bound time: {round(time.time() - cpu_start, 2)}")
print(f"\nTotal time: {round(time.time() - start_time, 2)}")
| bdelate/talk-python-async | src/asyncio_main.py | asyncio_main.py | py | 592 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "time.time",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "asyncio.get_event_loop",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "asyncio_io.get_data",
... |
26336332994 | import pytest
from ui.locators import basic_locators
from base import BaseCase
class Test_Target(BaseCase):
@pytest.mark.UI
def test_login(self):
self.log_in('alena1997999@gmail.com', 'tWz+H@&Gws#Yj7L')
assert 'Кампании' in self.driver.title
@pytest.mark.UI
def test_logout(self):
self.log_in('alena1997999@gmail.com', 'tWz+H@&Gws#Yj7L')
self.log_out()
assert "Рекламная платформа" in self.driver.title
@pytest.mark.UI
def test_change_info(self):
self.log_in('alena1997999@gmail.com', 'tWz+H@&Gws#Yj7L')
self.change_info("Anna", "89069474448", "anna1234@test.com")
assert 'Информация успешно сохранена' in self.driver.page_source
@pytest.mark.UI
@pytest.mark.parametrize(
'page, expected',
[
pytest.param(
basic_locators.CHANGE_PAGE1, 'Контактная информация'
),
pytest.param(
basic_locators.CHANGE_PAGE2, 'Лицевой счет'
),
],
)
def test_change_page(self, page, expected):
self.log_in('alena1997999@gmail.com', 'tWz+H@&Gws#Yj7L')
self.click_on_element(page)
assert expected in self.driver.title
| penguin7707/demo | code/test_hm1.py | test_hm1.py | py | 1,317 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "base.BaseCase",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "pytest.mark",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "pytest.mark",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "pytest.mark",
"line... |
37941848319 | from kivy.uix.screenmanager import ScreenManager, Screen
from tasks.tsks import A
class Scrn_manger:
sm = ScreenManager()
name = ""
sc = Screen(name="tasks")
# main.right.dodBtn.bind(on_press=main.dod)
def to_lists(self, sc, a):
self.sm.current = "lists"
self.sm.remove_widget(sc)
a.layout.clear_widgets()
sc.clear_widgets()
a.left.layout.clear_widgets()
a.right.buttons.clear_widgets()
a.right.add.add.clear_widgets()
a = True
def to_tasks(self, name):
a = A(name)
if self.a:
self.a = False
# a.right.delBtn.bind(on_release=a.right.fn.get_state)
ly = a.a()
a.bcbtn.bind(on_release=lambda i: self.to_lists(self.sc, a))
self.sc.add_widget(ly)
self.sm.add_widget(self.sc)
self.sm.current = "tasks"
| domenSedlar/ToDoAppClient | scrn_mangr.py | scrn_mangr.py | py | 874 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "kivy.uix.screenmanager.ScreenManager",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "kivy.uix.screenmanager.Screen",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "tasks.tsks.A",
"line_number": 24,
"usage_type": "call"
}
] |
40047690606 | import csv
import os
import datetime
from datetime import date, datetime, timedelta
import matplotlib.pyplot as plt
from rich.console import Console
console = Console()
current_date = date.today().strftime("%d/%m/%Y")
# Generates unique ID for each new line in each csv file
def generate_id(file_name):
with open(file_name, "r") as file:
csvReader = csv.reader(file)
lines = []
for line in csvReader:
if line != []:
lines.append(line)
if len(lines) == 1:
id_number = f"0{str(1)}"
return id_number
else:
last_added = lines[-1]
id_number = str(int(last_added[0]) + 1)
if len(id_number) < 2:
id_number = f"0{str(id_number)}"
# Getting rid of blank lines
f = open(file_name, "w+")
f.truncate()
f.close()
with open(file_name, "w", newline="") as newfile:
write = csv.writer(newfile)
write.writerows(lines)
return id_number
# Returns total available stock of a product in inventory
def get_total_stock(product):
with open("data/inventory.csv", "r") as file:
total_count = 0
for line in file.readlines():
if product in line:
product_line = line.split(",")
quantity = int(product_line[5])
total_count += quantity
return total_count
# Adds new product to inventory file
def buy_product(name, price, exp_date, quantity):
id = generate_id("data/inventory.csv")
new_product = [id, name, current_date, price, exp_date, quantity]
with open("data/inventory.csv", "a", newline="") as file:
writer = csv.writer(file)
writer.writerow(new_product)
console.print(f"{quantity} pieces of {name} are added to the inventory.", style="#96fdca")
# Updates sold.csv and expired.csv files
def update_csv_file(product, sold_quantity, sell_price, sell_date, file_name):
id = generate_id(file_name)
product = {
"id": id,
"product_name": product["product_name"],
"buy_id": product["id"],
"buy_price": product["buy_price"],
"buy_date": product["buy_date"],
"exp_date": product["exp_date"],
"stock_quantity": product["quantity"],
"sell_date": sell_date,
"sell_price": sell_price,
"sold_quantity": sold_quantity
}
with open(file_name, "a+", newline="") as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=[
"id","product_name","buy_id","buy_price","buy_date",
"exp_date","stock_quantity","sell_date",
"sell_price","sold_quantity"])
writer.writerow(product)
# Updates inventory.csv file
def update_inventory_file(inventory_dict):
csv_header = inventory_dict[0].keys()
updated_csv_file = "data/new_inventory.csv"
with open(updated_csv_file, "w") as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=csv_header)
writer.writeheader()
for product in inventory_dict[1:]:
writer.writerow(product)
old_csv_file = "data/inventory.csv"
if (os.path.exists(old_csv_file) and os.path.isfile(old_csv_file)):
os.remove(old_csv_file)
os.rename(updated_csv_file, old_csv_file)
# When a product is sold, this function checks if there is enough stock is available for selling
# by checking quantities and expiration dates.
# It automatically removes sold and/or expired stock.
def check_and_update_stock(product_name, sold_quantity, sell_price):
products_in_stock = []
with open("data/inventory.csv", "r") as csvfile:
readCSV = csv.reader(csvfile,delimiter=",")
for row in readCSV:
product = dict(id=row[0], product_name=row[1], buy_date=row[2], buy_price=row[3], exp_date=row[4], quantity=row[5])
products_in_stock.append(product)
updated_stock = 0
for product in products_in_stock[1:]:
if product["product_name"] == product_name:
exp = datetime.strptime(product["exp_date"], "%d/%m/%Y")
cur = datetime.strptime(current_date, "%d/%m/%Y")
if cur < exp:
stock = int(product["quantity"])
if stock >= sold_quantity and cur < exp:
stock = stock + updated_stock
stock = stock - sold_quantity
product["quantity"] = str(stock)
update_csv_file(product, sold_quantity, sell_price, current_date, "data/sold.csv")
if stock <= 0: products_in_stock.remove(product)
console.print(f"{sold_quantity} pieces of {product_name} are removed from inventory.", style="#FBDBDF")
return products_in_stock
elif stock < sold_quantity and cur < exp:
stock = stock + updated_stock
updated_stock = stock - sold_quantity
sold_quantity = 0
product["quantity"] = str(updated_stock)
if updated_stock <= 0: products_in_stock.remove(product)
elif cur >= exp and int(product["quantity"]) < get_total_stock(product["product_name"]):
products_in_stock.remove(product)
update_csv_file(product, 0, 0, "--", "data/expired.csv")
continue
else:
console.print(f"Unfortunately your {product_name} stock is expired ðŸ¤", style="#fd9796")
sold_quantity = 0
sell_price = 0
sell_date = "--"
products_in_stock.remove(product)
update_csv_file(product, sold_quantity, sell_price, sell_date, "data/expired.csv")
return products_in_stock
# Removes products from inventory.
def sell_product(sold_product_name, sold_quantity, sell_price):
total_stock = get_total_stock(sold_product_name)
if total_stock >= sold_quantity:
update_inventory_file(check_and_update_stock(sold_product_name, sold_quantity, sell_price))
elif total_stock > 0:
return console.print(f"You do not have enough stock, you can sell a maximum of {total_stock} 😅", style="#fdca96")
else:
return console.print(f"You do not have any {sold_product_name} in stock 😟", style="#fd9796") | Juliazijd/winc_superpy | superpy/helpers/buy_sell_products.py | buy_sell_products.py | py | 6,563 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "rich.console.Console",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "datetime.date.today",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "csv.reader",
... |
18659247749 | import numpy as np
import pandas as pd
from sklearn.model_selection import KFold, train_test_split
from sklearn.preprocessing import LabelEncoder,OneHotEncoder
from keras.utils import np_utils
import tensorflow as tf
from MB_nn import MB_nn
from keras.utils.np_utils import to_categorical
from sklearn.metrics import classification_report
# MB_true = {
# '2':[4],
# '4': [1, 2, 3, 5],
# '5':[0,4,6,3],
# '6':[5,3,35,34],
# '8':[7,34],
# '11':[10,34],
# '14': [13,33,36,35,12,20,32 ],
# '19':[18,31,20,23],
# '24': [23,17,31,25,30,22,29,16],
# '25':[24,29,16],
# '29': [26,28,25,30,16,24],
# '30': [16,17,31,15,29,24,32],
# '33': [14,12,20,32,34],
# '34': [33,7,10,35,8,11,9],
# '35':[36,34,6,14],
# }
# MB_pre = {'2': [4],
# '4': [1, 2, 3, 5],
# '5': [0, 3, 4, 6],
# '6': [34, 3, 35, 4, 5],
# '8': [34, 7],
# '11': [34, 10],
# '14': [32, 33, 35, 36],
# '19': [18, 20, 23, 31],
# '24': [16, 17, 23, 25, 29, 30, 31],
# '25': [24, 16, 29],
# '29': [24, 25, 26, 28, 30],
# '30': [17, 24, 31, 29, 15],
# '33': [32, 34, 14],
# '34': [33, 35, 6, 7, 8, 9, 10, 11],
# '35': [34, 36, 6, 14] }
def load_pred_MB(filename):
MB_pred = pd.read_csv(filename)
MB_dict = {}
for col in MB_pred:
MB_dict[col] = [str(int(i)) for i in list(MB_pred[col]) if not np.isnan(i) ]
return MB_dict # return {'2':[1,2,4], '4':[1,5,8]}
# Like 'Alarm.csv' and numerical column names
def load_mb_data(dataset, target, MB_dict):
df = pd.read_csv(dataset)
if target in MB_dict:
MB = MB_dict[target]
else:
print('Target Input Error!')
sys.exit()
X = df.loc[:, MB]
encoder = OneHotEncoder(sparse=False)
X = encoder.fit_transform(X) # X = pd.get_dummy(X)
y = df[target]
encoder = LabelEncoder()
y = encoder.fit_transform(y)
# Convert integers to dummy variables (i.e. one hot encoded)
y = np_utils.to_categorical(y)
return X, y
# Original dataset "ALARM.csv"
def load_all_data(target, data_name = 'ALARM.csv'):
#https://stackoverflow.com/questions/43515877/should-binary-features-be-one-hot-encoded
data_path = 'DATASET/' + data_name
df = pd.read_csv( data_path, index_col=False )
for col in df.columns:
if df[col].dtype == 'bool':
df[col] = df[col].map({True: 1, False: 0})
y = df.pop(target)
encoder = LabelEncoder()
y = encoder.fit_transform(y)
y = np_utils.to_categorical(y)
encoder = OneHotEncoder(sparse=False)
X = encoder.fit_transform(df)
return X, y
# '4' : 'LVEDVOLUME'
# '5' : 'LVFAILURE'
# '6' : 'STROKEVOLUME'
# '14' : 'TPR'
# '24' : 'INTUBATION'
# '29' : 'VENTTUBE'
# '30': 'VENTLUNG'
# '34' : 'HR'
def main(flag_MB = 0):
if flag_MB == 1:
# MB features as input
MB_dict = load_pred_MB('Pred_MB_MBOR.csv')
# 'ALARM_SAMPLES.csv': numerical columns and rows
X, y = load_mb_data('ALARM_SAMPLES.csv' ,'34', MB_dict)
name = 'MB'
else:
# All features as input
X, y = load_all_data('HR')
name = 'All'
input_shape = X.shape[1]
num_class = len(y[0])
res = []
no_epochs = 200
seed = 15
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state= seed)
print(X_train)
print(y_test.shape)
kf = KFold(n_splits = 10, random_state = seed, shuffle = True )
#https://www.kaggle.com/questions-and-answers/236902
for fold, (train_idx, val_idx) in enumerate(kf.split(X_train)):
train_X, val_X = X_train[train_idx], X_train[val_idx]
train_Y, val_Y = y_train[train_idx], y_train[val_idx]
MB_model = MB_nn(input_shape , num_class)
MB_model.assign_data(train_X, train_Y, val_X , val_Y, X_test, y_test)
initial_weights = MB_model.model.get_weights()
optim = tf.keras.optimizers.Adam()
MB_model.train(no_epochs, optim)
# Choose the best weights on the validation data from 10 fold results
MB_model.model.set_weights(MB_model.best_weights)
y_pred = MB_model.model.predict(MB_model.X_test)
ess = tf.keras.losses.CategoricalCrossentropy()
Entropy_Loss = ess(y_test, y_pred).numpy()
res.append([fold, Entropy_Loss])
print (fold + 1, Entropy_Loss)
y_pred = np.argmax(y_pred, axis=1)
y_test_temp = np.argmax(y_test, axis=1)
report = classification_report(y_test_temp, y_pred, output_dict=True)
df_classification_report = pd.DataFrame(report).transpose()
#df_classification_report = df_classification_report.sort_values(by=['f1-score'], ascending=False)
print(df_classification_report)
MB_model.model.reset_states()
df_results = pd.DataFrame(res, columns = ['Run', 'Entropy_Loss'])
df_results.to_csv(f'{name}.csv', index = False)
print (df_results['Entropy_Loss'].mean())
print (df_results['Entropy_Loss'].std())
if __name__ == "__main__":
main(0) | EricXue92/MB_NN | main.py | main.py | py | 5,247 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "numpy.isnan",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.One... |
15130212540 | # A brief script to convert GIF files to RAW with Vector's screen dimensions. Use GIF files that are 184x96 for best results.
# Expected Python Version is 3.9.
import os,sys
#import struct
import array
from PIL import Image
#import Image
SCREEN_WIDTH,SCREEN_HEIGHT = 184,96 #240,240 #180,240
SIZE = (SCREEN_WIDTH,SCREEN_HEIGHT)
def pack16bitRGB(pixel):
# print(pixel)
try:
r,g,b,a = pixel
except (ValueError,TypeError):
try:
a = 0
r,g,b = pixel
except (ValueError,TypeError):
r = g = b = pixel
# print(r,g,b,a,"\n")
word = ( (int(r>>3)<<11) |
(int(g>>2)<< 5) |
(int(b>>3)<< 0) )
return word
# return ((word>>8)&0xFF) | ((word&0xFF)<<8)
def convert_to_raw(img):
bitmap = [0x0000]*(SCREEN_WIDTH*SCREEN_HEIGHT)
for y in range(img.size[1]):
for x in range(img.size[0]):
pixel = pack16bitRGB(img.getpixel((x,y)))
bitmap[(y)*SCREEN_WIDTH + (x)] = pixel
return bitmap
RAW = 1
def convert_frame_to_data(frame):
newframe = frame.convert('RGBA')
newframe = convert_to_raw(newframe)
data = array.array("H",newframe)
return data
def extractGifFrames(inGif):
frame = Image.open(inGif)
nframes = 0
with open('%s.raw' % (os.path.basename(inGif),), "wb+") as f:
while frame:
# newframe = frame.rotate(90).resize( SIZE, Image.ANTIALIAS).convert('RGBA')
data = convert_frame_to_data(frame)
f.write(data.tobytes())
nframes += 1
try:
frame.seek( nframes )
except EOFError:
break;
return True
def convertImages(dirname, images):
outfilename = '%s/anim.raw' % dirname
with open(outfilename, "wb+") as f:
nframes = 0
for filename in images:
frame = Image.open(filename)
data = convert_frame_to_data(frame)
f.write(data.tobytes())
nframes += 1
print('wrote {} frames to {}'.format(nframes, outfilename))
if len(sys.argv) == 1:
print('error: pass in a .gif file or a folder of sequentail images')
exit(-1)
elif len(sys.argv) == 2:
extractGifFrames(sys.argv[1])
else:
print('got {} images'.format(len(sys.argv)))
images = sorted(sys.argv[1:])
convertImages(os.path.dirname(sys.argv[0]), images)
| digital-dream-labs/oskr-owners-manual | examples/change_boot_anim/gif_to_raw.py | gif_to_raw.py | py | 2,400 | python | en | code | 35 | github-code | 36 | [
{
"api_name": "array.array",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "os.path.basename",
"line_num... |
15058607422 | # Importing the ChoiceMC class
import sys
import os
try:
from ChoiceMC import ChoiceMC, loadResult
except ModuleNotFoundError:
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir)))
from ChoiceMC import ChoiceMC, loadResult
import matplotlib.pyplot as plt
import time
import numpy as np
# Setting up the variables to sweep over
g_sweep = np.linspace(0.01, 3, 20)
N=4
time_str = "EntanglementSweep_N" + str(N) + "-"+str(time.gmtime().tm_year)+'-'+str(time.gmtime().tm_mon)+'-'+str(time.gmtime().tm_mday)
path = os.path.join(os.getcwd(), time_str)
try:
os.mkdir(path)
except FileExistsError:
pass
os.chdir(path)
# Creating arrays to store the S2 versus g data
entropy = np.zeros((len(g_sweep),3), float)
entropy_out = open("SecondRenyiEntropy_N"+str(N)+'.dat','w')
purity = np.zeros((len(g_sweep),3), float)
purity_out = open("Purity_N"+str(N)+'.dat','w')
if N > 2:
entropy_RT = np.zeros((len(g_sweep),3), float)
entropy_RT_out = open("SecondRenyiEntropy_RatioTrick_N"+str(N)+'.dat','w')
purity_RT = np.zeros((len(g_sweep),3), float)
purity_RT_out = open("Purity_RatioTrick_N"+str(N)+'.dat','w')
acceptRatio_RT_dict = {}
acceptRatio_RT_out = open("AcceptanceRatio_RatioTrick_N"+str(N)+'.dat','w')
acceptRatioError_RT_dict = {}
acceptRatioError_RT_out = open("AcceptanceRatioStdError_RatioTrick_N"+str(N)+'.dat','w')
for ig, g in enumerate(g_sweep):
print("------------------------------------------------")
print("Starting g = " + str(g))
# Creating a ChoiceMC object for the current iteration
PIMC = ChoiceMC(m_max=5, P=9, g=g, MC_steps=100000, N=N, PIGS=True, Nskip=100, Nequilibrate=100, T=0.25)
# Creating the probability density matrix for each rotor
PIMC.createFreeRhoMarx()
# Creating the probability density matrix for nearest neighbour interactions
PIMC.createRhoVij()
# Performing MC integration
PIMC.runMCReplica()
# Storing and saving the data from the current run
entropy[ig,:] = [g, PIMC.S2_MC, PIMC.S2_stdError_MC]
entropy_out.write(str(g) + ' ' + str(PIMC.S2_MC) + ' ' + str(PIMC.S2_stdError_MC) + '\n')
purity[ig,:] = [g, PIMC.purity_MC, PIMC.purity_stdError_MC]
purity_out.write(str(g) + ' ' + str(PIMC.purity_MC) + ' ' + str(PIMC.purity_stdError_MC) + '\n')
if N > 2:
# Performing MC integration with the ratio trick
PIMC.runMCReplica(ratioTrick=True)
# Storing and saving the data from the current run
entropy_RT[ig,:] = [g, PIMC.S2_MC, PIMC.S2_stdError_MC]
entropy_RT_out.write(str(g) + ' ' + str(PIMC.S2_MC) + ' ' + str(PIMC.S2_stdError_MC) + '\n')
purity_RT[ig,:] = [g, PIMC.purity_MC, PIMC.purity_stdError_MC]
purity_RT_out.write(str(g) + ' ' + str(PIMC.purity_MC) + ' ' + str(PIMC.purity_stdError_MC) + '\n')
acceptRatio_RT_dict.update({g: PIMC.AR_MC_arr})
acceptRatioError_RT_dict.update({g: PIMC.AR_stdError_MC_arr})
acceptRatio_RT_out.write(str(g))
for AR in PIMC.AR_MC_arr:
acceptRatio_RT_out.write(' ' + str(AR))
acceptRatio_RT_out.write('\n')
acceptRatioError_RT_out.write(str(g))
for stdAR in PIMC.AR_stdError_MC_arr:
acceptRatioError_RT_out.write( ' ' + str(stdAR))
acceptRatioError_RT_out.write('\n')
# Closing the remaining open plots
plt.close('all')
if N==2:
# Loading in ED results
arrS2_ED = loadResult(os.path.join('ED', 'SecondRenyiEntropy_mMax5.dat'))
if N > 2:
acceptRatio_RT = np.zeros((len(acceptRatio_RT_dict[g_sweep[0]]), len(g_sweep), 3))
for ig, g in enumerate(acceptRatio_RT_dict):
for iPartition, Partition in enumerate(acceptRatio_RT_dict[g]):
acceptRatio_RT[iPartition,ig,:] = [g, acceptRatio_RT_dict[g][iPartition], acceptRatioError_RT_dict[g][iPartition]]
# Plotting
S2_fig, S2_ax = plt.subplots(1, 1, figsize=(8,5))
S2_ax.errorbar(entropy[:,0], entropy[:,1], entropy[:,2], label='PIGS', fmt='.-', capsize=3, color='k')
if N == 2:
S2_ax.plot(arrS2_ED[:,0], arrS2_ED[:,1], label='ED', marker='o', color='#d62728')
elif N > 2:
S2_ax.errorbar(entropy_RT[:,0], entropy_RT[:,1], entropy_RT[:,2], label='PIGS:RT', fmt='.-', capsize=3, color='#1f77b4')
S2_ax.legend()
S2_ax.minorticks_on()
S2_ax.set_xlabel('g')
S2_ax.set_ylabel(r'$S_2$')
S2_ax.annotate('N = ' + str(N), xy=(0.5, 0.95), xycoords='axes fraction', horizontalalignment='center', verticalalignment='top')
S2_fig.tight_layout()
S2_fig.savefig("SecondRenyiEntropy_N" + str(N) + ".png")
# Plotting
AR_fig, AR_ax = plt.subplots(1, 1, figsize=(8,5))
AR_ax.errorbar(purity[:,0], purity[:,1], purity[:,2], label='PIGS', fmt='.-', capsize=3, color='k')
if N > 2:
AR_ax.errorbar(purity_RT[:,0], purity_RT[:,1], purity_RT[:,2], label='PIGS:RT', fmt='.-', capsize=3, color='#1f77b4')
AR_ax.minorticks_on()
AR_ax.legend()
AR_ax.set_xlabel('g')
AR_ax.set_ylabel('Purity')
AR_ax.annotate('N = ' + str(N), xy=(0.5, 0.95), xycoords='axes fraction', horizontalalignment='center', verticalalignment='top')
AR_fig.tight_layout()
AR_fig.savefig("Purity_N" + str(N) + ".png")
# Plotting
AR_fig, AR_ax = plt.subplots(1, 1, figsize=(8,5))
AR_ax.errorbar(purity[:,0], purity[:,1], purity[:,2], label=r'$N_{S}/N_{U}$', fmt='.-', capsize=3)
if N > 2:
AR_ax.errorbar(purity_RT[:,0], purity_RT[:,1], purity_RT[:,2], label=r'$N_{S}/N_{U}$: RT', fmt='.-', capsize=3)
for i in range(np.shape(acceptRatio_RT)[0]):
AR_ax.errorbar(acceptRatio_RT[i,:,0], acceptRatio_RT[i,:,1], acceptRatio_RT[i,:,2], label=r'$N_{'+str(i+1)+'}/N_{'+str(i)+'}$: RT', fmt='.-', capsize=3)
AR_ax.minorticks_on()
AR_ax.legend()
AR_ax.set_xlabel('g')
AR_ax.set_ylabel('Acceptance Ratio')
AR_ax.annotate('N = ' + str(N), xy=(0.5, 0.95), xycoords='axes fraction', horizontalalignment='center', verticalalignment='top')
AR_fig.tight_layout()
AR_fig.savefig("AcceptanceRatio_N" + str(N) + ".png")
entropy_out.close()
purity_out.close()
if N > 2:
entropy_RT_out.close()
purity_RT_out.close()
acceptRatio_RT_out.close()
acceptRatioError_RT_out.close()
plt.close('all')
| AndrewBright34/ChoiceMC | Parametric_Sweeps/ChoiceMC_Sweep_Entanglement.py | ChoiceMC_Sweep_Entanglement.py | py | 6,148 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.path.append",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number... |
37728423811 | from tkinter import *
import pyttsx3
import PIL.ImageOps
from PIL import Image
import numpy as np
from PIL import EpsImagePlugin
import tensorflow as tf
import matplotlib.pyplot as plt
import threading
import random
import time
oldtext = ""
physical_devices = tf.config.experimental.list_physical_devices('GPU')
if len(physical_devices) > 0:
config = tf.config.experimental.set_memory_growth(physical_devices[0], True)
EpsImagePlugin.gs_windows_binary = r'bin\gswin64c'
modelfilename = "modelallwords"
labellist = ["The Eiffel Tower", "The Great Wall of China", "The Mona Lisa", "aircraft carrier", "airplane",
"alarm clock", "ambulance", "angel", "animal migration", "ant", "anvil", "apple", "arm", "asparagus",
"axe", "backpack", "banana", "bandage", "barn", "baseball bat", "baseball", "basket", "basketball", "bat",
"bathtub", "beach", "bear", "beard", "bed", "bee", "belt", "bench", "bicycle", "binoculars", "bird",
"birthday cake", "blackberry", "blueberry", "book", "boomerang", "bottlecap", "bowtie", "bracelet",
"brain", "bread", "bridge", "broccoli", "broom", "bucket", "bulldozer", "bus", "bush", "butterfly",
"cactus", "cake", "calculator", "calendar", "camel", "camera", "camouflage", "campfire", "candle",
"cannon", "canoe", "car", "carrot", "castle", "cat", "ceiling fan", "cell phone", "cello", "chair",
"chandelier", "church", "circle", "clarinet", "clock", "cloud", "coffee cup", "compass", "computer",
"cookie", "cooler", "couch", "cow", "crab", "crayon", "crocodile", "crown", "cruise ship", "cup",
"diamond", "dishwasher", "diving board", "dog", "dolphin", "donut", "door", "dragon", "dresser", "drill",
"drums", "duck", "dumbbell", "ear", "elbow", "elephant", "envelope", "eraser", "eye", "eyeglasses", "face",
"fan", "feather", "fence", "finger", "fire hydrant", "fireplace", "firetruck", "fish", "flamingo",
"flashlight", "flip flops", "floor lamp", "flower", "flying saucer", "foot", "fork", "frog", "frying pan",
"garden hose", "garden", "giraffe", "goatee", "golf club", "grapes", "grass", "guitar", "hamburger",
"hammer", "hand", "harp", "hat", "headphones", "hedgehog", "helicopter", "helmet", "hexagon",
"hockey puck", "hockey stick", "horse", "hospital", "hot air balloon", "hot dog", "hot tub", "hourglass",
"house plant", "house", "hurricane", "ice cream", "jacket", "jail", "kangaroo", "key", "keyboard", "knee",
"knife", "ladder", "lantern", "laptop", "leaf", "leg", "light bulb", "lighter", "lighthouse", "lightning",
"line", "lion", "lipstick", "lobster", "lollipop", "mailbox", "map", "marker", "matches", "megaphone",
"mermaid", "microphone", "microwave", "monkey", "moon", "mosquito", "motorbike", "mountain", "mouse",
"moustache", "mouth", "mug", "mushroom", "nail", "necklace", "nose", "ocean", "octagon", "octopus",
"onion", "oven", "owl", "paint can", "paintbrush", "palm tree", "panda", "pants", "paper clip",
"parachute", "parrot", "passport", "peanut", "pear", "peas", "pencil", "penguin", "piano", "pickup truck",
"picture frame", "pig", "pillow", "pineapple", "pizza", "pliers", "police car", "pond", "pool", "popsicle",
"postcard", "potato", "power outlet", "purse", "rabbit", "raccoon", "radio", "rain", "rainbow", "rake",
"remote control", "rhinoceros", "rifle", "river", "roller coaster", "rollerskates", "sailboat", "sandwich",
"saw", "saxophone", "school bus", "scissors", "scorpion", "screwdriver", "sea turtle", "see saw", "shark",
"sheep", "shoe", "shorts", "shovel", "sink", "skateboard", "skull", "skyscraper", "sleeping bag",
"smiley face", "snail", "snake", "snorkel", "snowflake", "snowman", "soccer ball", "sock", "speedboat",
"spider", "spoon", "spreadsheet", "square", "squiggle", "squirrel", "stairs", "star", "steak", "stereo",
"stethoscope", "stitches", "stop sign", "stove", "strawberry", "streetlight", "string bean", "submarine",
"suitcase", "sun", "swan", "sweater", "swing set", "sword", "syringe", "t-shirt", "table", "teapot",
"teddy-bear", "telephone", "television", "tennis racquet", "tent", "tiger", "toaster", "toe", "toilet",
"tooth", "toothbrush", "toothpaste", "tornado", "tractor", "traffic light", "train", "tree", "triangle",
"trombone", "truck", "trumpet", "umbrella", "underwear", "van", "vase", "violin", "washing machine",
"watermelon", "waterslide", "whale", "wheel", "windmill", "wine bottle", "wine glass", "wristwatch",
"yoga", "zebra", "zigzag"]
print(len(labellist))
model = tf.keras.models.load_model("saved models/" + modelfilename)
randomword = ""
engine = pyttsx3.init()
engine.setProperty('rate',145)
scale = 0
class Paint(object):
def __init__(self):
global scale
self.root = Tk()
self.root.title("Quick draw by: Gal Bareket")
scale = 1080 / self.root.winfo_screenheight()
print(scale)
self.eraser_button = Button(self.root, text='erase', command=self.use_eraser, height=int(2/scale), width=int(30/scale))
self.eraser_button.grid(row=0, column=1)
self.skip_button = Button(self.root, text='skip', command=self.pickword, height=int(2/scale), width=int(30/scale))
self.skip_button.grid(row=0, column=3)
#self.showimage_button = Button(self.root, text='show image', command=self.showimage, height=2, width=30)
#
#self.showimage_button.grid(row=0, column=4)
self.c = Canvas(self.root, bg='white', width=int(896 / scale), height=int(896 / scale))
self.c.grid(row=2, columnspan=5)
self.label1 = Label(self.root, text="", bg="white", height=int(1/scale), width=int(60/scale), font=("Courier", int(20/scale)))
self.label1.grid(row=4, columnspan=5)
self.label2 = Label(self.root, text="", bg="white", height=int(1/scale), width=int(35/scale), font=("Courier", int(15/scale)), anchor="w")
self.label2.grid(row=0, column=2)
threading.Thread(target=lambda: self.save()).start()
self.setup()
self.root.mainloop()
def setup(self):
self.old_x = None
self.old_y = None
self.color = "black"
self.eraser_on = False
self.c.bind('<B1-Motion>', self.paint)
self.c.bind('<Button-1>', self.paint)
self.c.bind('<ButtonRelease-1>', self.reset)
self.pickword()
def pickword(self):
global randomword
randomword = labellist[random.randint(0, len(labellist))]
self.label2.configure(text="Draw: " + randomword)
self.use_eraser()
def save(self):
global oldtext
won = False
self.c.postscript(file="drawnimage.eps")
img = Image.open("drawnimage.eps")
img = img.resize((28, 28))
img = PIL.ImageOps.invert(img)
img = img.convert('L')
imgA = np.asarray(img)
imgA = imgA.reshape(28, 28, 1).astype('float32')
imgA /= 255.0
arr = model.predict(imgA[None, :, :, :])[0]
indices = arr.argsort()[-3:][::-1]
predictionlist = []
for i in arr.argsort():
if (arr[i] > 0.10):
predictionlist.append(labellist[i])
text = ""
if (randomword in predictionlist):
text = "Oh i know it's " + randomword
won = True
elif (arr[indices[0]] > 0.10):
for i in range(2):
if (arr[indices[i]] > 0.10):
if (i == 0):
text = "I see " + labellist[indices[0]]
else:
text += ", " + labellist[indices[i]]
else:
if (arr[indices[0]] > 0.5):
text = "I am not sure what that is."
for i in indices:
print(labellist[i], str(int(arr[i] * 100)) + "%", end=",")
print("----------")
if not oldtext == text:
self.label1.config(text=text)
engine.say(text.replace(",", " or "),)
engine.runAndWait()
oldtext = text
if (randomword in predictionlist):
time.sleep(2)
self.pickword()
time.sleep(2)
threading.Timer(0.25, lambda: self.save()).start()
def showimage(self):
self.c.postscript(file="drawnimage.eps")
img = Image.open("drawnimage.eps")
img = img.resize((28, 28))
img = PIL.ImageOps.invert(img)
img = img.convert('L')
img.save("image.png","PNG")
imgA = np.asarray(img)
imgA = imgA.reshape(28, 28, 1).astype('float32')
imgA /= 255.0
plt.imshow(imgA)
plt.gray()
plt.grid(False)
plt.show()
def use_eraser(self):
self.c.delete("all")
def activate_button(self, some_button, eraser_mode=False):
self.active_button.config(relief=RAISED)
some_button.config(relief=SUNKEN)
self.active_button = some_button
self.eraser_on = eraser_mode
def paint(self, event):
self.line_width = 40 / scale
paint_color = 'white' if self.eraser_on else self.color
if self.old_x and self.old_y:
self.c.create_line(self.old_x, self.old_y, event.x, event.y,
width=self.line_width, fill=paint_color,
capstyle=ROUND, smooth=TRUE, splinesteps=36)
else:
self.c.create_line(event.x, event.y, event.x, event.y,
width=self.line_width, fill=paint_color,
capstyle=ROUND, smooth=TRUE, splinesteps=36)
self.old_x = event.x
self.old_y = event.y
def reset(self, event):
self.old_x, self.old_y = None, None
# threading.Thread(target=self.save()).start()
if __name__ == '__main__':
Paint()
| galbb12/quick-draw-full-python-tkinter | quick draw.py | quick draw.py | py | 10,113 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "tensorflow.config.experimental.list_physical_devices",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "tensorflow.config",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.config.experimental.set_memory_growth",
"line_number":... |
27182569232 | import asyncio
async def num(number):
print("before calling coroutine")
await asyncio.sleep(1)
print('after calling coroutine')
return str(number)
loop = asyncio.get_event_loop()
# n = num(5)
l= loop.run_until_complete(num(5))
print(l)
loop = asyncio.get_event_loop()
# c = loop.create_task(num(5))
# u = loop.run_until_complete(c)
# print(u)
| sivanagarajumolabanti/Chromata | asyncbasic/asyncfuture.py | asyncfuture.py | py | 364 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "asyncio.sleep",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "asyncio.get_event_loop",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "asyncio.get_event_loop",
"line_number": 15,
"usage_type": "call"
}
] |
38985342452 | #########################################################
### Train & Register Insurance Claims Model ###
#########################################################
###################
### Credentials ###
###################
import keyring
import getpass
import runpy
import os
from pathlib import Path
import urllib3
urllib3.disable_warnings()
### run script that contains username, password, hostname, working directory, and output directory
### ...OR define directly in this script
from password import hostname, port, wd, output_dir
runpy.run_path(path_name='password.py')
username = keyring.get_password('cas', 'username')
password = keyring.get_password('cas', username)
# username = getpass.getpass("Username: ")
# password = getpass.getpass("Password: ")
output_dir = os.getcwd()
metadata_output_dir = 'outputs'
###################
### Environment ###
###################
import swat
import pandas as pd
conn = swat.CAS(hostname, port, username, password, protocol="cas")
print(conn)
print(conn.serverstatus())
#############################
### Identify Table in CAS ###
#############################
### caslib and table to use in modeling
caslib = 'Public'
in_mem_tbl = 'pure_premium_raw_adj'
### load table in-memory if not already exists in-memory
if conn.table.tableExists(caslib=caslib, name=in_mem_tbl).exists<=0:
conn.table.loadTable(caslib=caslib, path=str(in_mem_tbl+str('.sashdat')),
casout={'name':in_mem_tbl, 'caslib':caslib, 'promote':True})
### show table to verify
conn.table.tableInfo(caslib=caslib, wildIgnore=False, name=in_mem_tbl)
########################
### Create Dataframe ###
########################
dm_inputdf = conn.CASTable(in_mem_tbl, caslib=caslib).to_frame()
### read csv from defined 'data_dir' directory
#data_dir = 'C:/Users/chparr/OneDrive - SAS/pure_premium'
#dm_inputdf = pd.read_csv(str(data_dir)+str('/')+in_mem_tbl+str('.csv'))
### print columns for review of model parameters
pd.set_option("display.max_rows", 1000)
print(dm_inputdf.dtypes)
########################
### Model Parameters ###
########################
### import python libraries
import numpy as np
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import OneHotEncoder, StandardScaler, KBinsDiscretizer, FunctionTransformer, PolynomialFeatures
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import make_pipeline, Pipeline
from sklearn.linear_model import TweedieRegressor, GammaRegressor, LinearRegression
from sklearn.feature_selection import RFE, RFECV
from sklearn.model_selection import cross_val_score
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import GradientBoostingRegressor, RandomForestRegressor
from sklearn.metrics import mean_tweedie_deviance, d2_absolute_error_score, mean_absolute_error, mean_squared_error
from sklearn.utils import shuffle
from scipy.sparse import csr_matrix
### power param
# 0: Normal
# 1: Poisson
# (1,2): Compound Poisson Gamma
# 2: Gamma
# 3: Inverse Guassian
tweedie_params = {
'power': 1.8,
'alpha': 0.1,
'fit_intercept': True,
'link': 'auto',
'tol': 0.0001,
'max_iter': 10000,
'warm_start': False
}
print(tweedie_params)
gamma_params = {
'alpha': 1,
'fit_intercept': True,
'tol': 0.0001,
'max_iter': 100,
'warm_start': False
}
print(gamma_params)
linear_params = {
'fit_intercept': True,
'copy_X': True,
'n_jobs': None,
'positive': False
}
print(linear_params)
### model manager information
model_name = 'tweedie_python'
project_name = 'Pure Premium'
description = 'Tweedie GLM'
model_type = 'GLM'
predict_syntax = 'predict'
### define macro variables for model
dm_dec_target = 'PurePremium'
dm_partitionvar = '_PartInd_'
create_new_partition = 'no' # 'yes', 'no'
dm_key = 'uniqueRecordID'
dm_partition_validate_val, dm_partition_train_val, dm_partition_test_val = [0, 1, 2]
dm_partition_validate_perc, dm_partition_train_perc, dm_partition_test_perc = [0.3, 0.6, 0.1]
dm_predictionvar = [str('P_') + dm_dec_target]
### mlflow
use_mlflow = 'no' # 'yes', 'no'
mlflow_run_to_use = 0
mlflow_class_labels =['TENSOR']
mlflow_predict_syntax = 'predict'
### var to consider in bias assessment
bias_var = 'Gender'
### create partition column, if not already in dataset
if create_new_partition == 'yes':
dm_inputdf = shuffle(dm_inputdf)
dm_inputdf.reset_index(inplace=True, drop=True)
validate_rows = round(len(dm_inputdf)*dm_partition_validate_perc)
train_rows = round(len(dm_inputdf)*dm_partition_train_perc) + validate_rows
test_rows = len(dm_inputdf)-train_rows
dm_inputdf.loc[0:validate_rows,dm_partitionvar] = dm_partition_validate_val
dm_inputdf.loc[validate_rows:train_rows,dm_partitionvar] = dm_partition_train_val
dm_inputdf.loc[train_rows:,dm_partitionvar] = dm_partition_test_val
####################
### Plot Columns ###
####################
from matplotlib import pyplot as plt
plt.hist(dm_inputdf[dm_dec_target])
plt.hist(dm_inputdf['Income'])
dm_inputdf.hist(figsize=(15,75), layout=(28,5))
##############################
### Final Modeling Columns ###
##############################
### transformations
dm_inputdf_raw = dm_inputdf
poly_cols_1 = [] # 'Age', 'Income'
poly_cols_1_out = [] # 'Age', 'Income', 'AgeSq', 'AgeIncome', 'IncomeSq'; 'bias_col' would be first if set to True
poly_1 = ('poly', PolynomialFeatures(degree=2, include_bias=False, interaction_only=False), poly_cols_1)
# poly_scale_cols_1 = []
# poly_scale_cols_1_out = []
# for i in poly_scale_cols_1:
# poly_scale_cols_1_out.append(i+'Poly_Scale')
# poly_scale_1 = ('poly_scale', make_pipeline(poly_1, StandardScaler()), poly_scale_cols_1)
impute_cols_1 = []
impute_cols_1_out = []
for i in impute_cols_1:
impute_cols_1_out.append(i+'Impute')
impute_1 = ('impute', SimpleImputer(strategy='most_frequent'), impute_cols_1) # 'median', 'mean'
encode_cols_1 = ['Rating_Category', 'Occupation', 'Marital_Status', 'Education', 'Gender', 'Car_Type', 'CarUse']
encode_cols_1_out = [] # transform output is in sorted order, so extra step is needed to align out column names
for i in encode_cols_1:
temp_list = sorted(dm_inputdf_raw[i].unique())
for j in temp_list:
encode_cols_1_out.append(i+j)
ohe_1 = ('encode', OneHotEncoder(sparse=False, handle_unknown='ignore'), encode_cols_1)
binned_cols_1 = ['Age', 'Car_Age', 'MotorVehicleRecordPoint', 'Travel_Time']
binned_cols_1_out = []
for i in binned_cols_1:
binned_cols_1_out.append(i+'Bin')
bin_1 = ('bins', KBinsDiscretizer(n_bins=5, encode='ordinal', strategy='quantile',), binned_cols_1)
scaled_cols_1 = []
scaled_cols_1_out = []
for i in scaled_cols_1:
scaled_cols_1_out.append(i+'Scale')
scale_1 = ('scales', StandardScaler(), scaled_cols_1)
log_cols_1 = []
log_cols_1_out = []
for i in log_cols_1:
log_cols_1_out.append(i+'Log')
log_1 = ('log', FunctionTransformer(func=np.log), log_cols_1)
log_scale_cols_1 = ['Bluebook', 'Income']
log_scale_cols_1_out = []
for i in log_scale_cols_1:
log_scale_cols_1_out.append(i+'Log_Scale')
log_scale_1 = ('log_scale', make_pipeline(FunctionTransformer(func=np.log), StandardScaler()), log_scale_cols_1)
### this performs multiple transforms on the same columns
keep_cols_1 = ['Exposure', 'DrivingUnderInfluence', 'Revoked', dm_key, dm_dec_target, dm_partitionvar]
keep_1 = ('keep', 'passthrough', keep_cols_1)
drop_cols_1 = ['Origination_Source']
drop_1 = ('drop', 'drop', drop_cols_1)
transforms = ColumnTransformer(transformers=[ohe_1, bin_1, log_scale_1, keep_1, drop_1],
remainder='drop',
verbose_feature_names_out=True)
# remainder='passthrough'
df_temp = transforms.fit_transform(dm_inputdf_raw)
#transforms.get_feature_names_out() this does not work with some of the transformation - why?? no idea
### work around for column names (this needs to be done in the order of the transforms)
transform_cols = poly_cols_1_out + impute_cols_1_out + encode_cols_1_out + binned_cols_1_out + scaled_cols_1_out + log_cols_1_out + log_scale_cols_1_out + keep_cols_1
dm_inputdf = pd.DataFrame(data=df_temp, columns=transform_cols)
### create list of rejected predictor columns
dm_input = list(dm_inputdf.columns.values)
dm_input = [x.replace(' ', '') for x in dm_input]
dm_input = [x.replace('(', '_') for x in dm_input]
dm_input = [x.replace(')', '_') for x in dm_input]
print(dm_input)
macro_vars = (dm_dec_target + ' ' + dm_partitionvar + ' ' + dm_key).split()
#string_cols = list(dm_inputdf.select_dtypes('object'))
#keep_predictors = [i for i in dm_input if i not in macro_vars]
#keep_predictors = [string_cols]
#rejected_predictors = [i for i in dm_input if i not in keep_predictors]
rejected_predictors = ['Rating_CategoryA', 'Occupation(missing)', 'Marital_StatusM',
'EducationBachelors', 'GenderF', 'Car_TypeHatchback', 'CarUseC', 'Exposure']
# 'Income', 'IncomeSq',
rejected_vars = rejected_predictors + macro_vars
for i in rejected_vars:
dm_input.remove(i)
##################
### Data Split ###
##################
### create train, test, validate datasets using existing partition column
dm_traindf = dm_inputdf[dm_inputdf[dm_partitionvar] == dm_partition_train_val]
dm_testdf = dm_inputdf.loc[(dm_inputdf[dm_partitionvar] == dm_partition_test_val)]
dm_validdf = dm_inputdf.loc[(dm_inputdf[dm_partitionvar] == dm_partition_validate_val)]
y_train = dm_traindf[dm_dec_target]
y_test = dm_testdf[dm_dec_target]
y_valid = dm_validdf[dm_dec_target]
fullX = dm_inputdf.loc[:, dm_input]
fully = dm_inputdf[dm_dec_target]
##########################
### Variable Selection ###
##########################
### Recursive Feature Elimination (RFE) with Crossvalidation (auto-select number of variables)
models_for_rfe = [DecisionTreeRegressor(), GradientBoostingRegressor()] #RandomForestRegressor()
rfe_cols_cv = []
for i in models_for_rfe:
rfe_cv = RFECV(estimator=i, step=1, cv=10, min_features_to_select=1)
rfe_cv.fit(fullX,fully)
rfe_cols_cv.append(list(rfe_cv.get_feature_names_out()))
#####################
### Training Code ###
#####################
models_for_training_list = [TweedieRegressor(**tweedie_params)]
model_results_list = []
model_list = []
for i in models_for_training_list:
for j in range(0, len(rfe_cols_cv)):
X_train = dm_traindf.loc[:, rfe_cols_cv[j]]
X_test = dm_testdf.loc[:, rfe_cols_cv[j]]
X_valid = dm_validdf.loc[:, rfe_cols_cv[j]]
dm_model = i
dm_model.fit(X_train, y_train, sample_weight=dm_traindf['Exposure'])
#cross_val_score(dm_model, X_train, y_train, cv=10, n_jobs=1)
score = dm_model.score(X_valid, y_valid)
model_results_list.append(score)
name = [str(i)[0:10]+str('_varlist')+str(j)]
model_list.append(name)
print('%s %.4f' % (name, score))
# models = dict('LinReg',LinearRegression(**linear_params), 'GammReg', GammaRegressor(**gamma_params), 'TweedieReg', TweedieRegressor(**tweedie_params))
# sparse_matrix = csr_matrix(dm_traindf.loc[:, rfe_cols_cv[j]])
###################################
### Score Data & Assess Model ###
###################################
def score_func(partition_df, partition_X, partition_y, partition):
dfProba = pd.DataFrame(pd.concat([partition_X.reset_index(drop=True),
partition_y.reset_index(drop=True),
partition_df['Exposure'].reset_index(drop=True),
pd.Series(data=dm_model.predict(partition_X), name='Prediction')],
axis=1)
)
dfProba['Predicted_Claims'] = dfProba['Exposure']*dfProba['Prediction']
observed_claims = np.sum(dfProba['Exposure']*dfProba['PurePremium'])
predicted_claims = np.sum(dfProba['Predicted_Claims'])
diff_predicted_minus_observed = predicted_claims-observed_claims
perc_diff = diff_predicted_minus_observed/observed_claims
print('**********')
print(partition)
print('**********')
print('observed_claims:', "${:0,.2f}".format(observed_claims))
print('predicted_claims', "${:0,.2f}".format(predicted_claims))
print('diff_observed_minus_predicted:', "${:0,.2f}".format(diff_predicted_minus_observed))
print('%_diff_of_observed:', "{0:.0%}".format(perc_diff))
print('% variance explained:', "{0:.0%}".format(dm_model.score(partition_X, partition_y)))
print('mean observed:', "${:0,.2f}".format(np.mean(partition_y)))
print('mean predicted:', "${:0,.2f}".format(np.mean(dfProba['Prediction'])))
print('mean tweedie deviance:', "${:0,.2f}".format(mean_tweedie_deviance(partition_y, dfProba['Prediction'], power=tweedie_params['power'])))
print('d2_absolute error:', "${:0,.2f}".format(d2_absolute_error_score(partition_y, dfProba['Prediction'])))
#print('mean absolute error:', "${:0,.2f}".format(mean_absolute_error(partition_y, dfProba['Prediction'])))
#print('mean squared error:', "${:0,.2f}".format(mean_squared_error(partition_y, dfProba['Prediction'])))
#print('root mean squared error:', "${:0,.2f}".format(np.sqrt(mean_squared_error(partition_y, dfProba['Prediction']))))
global df
df = pd.DataFrame(dfProba)
score_func(dm_traindf, X_train, y_train, 'train')
trainProba = df
trainData = trainProba[[dm_dec_target, 'Prediction']]
# score_func(dm_testdf, X_test, y_test, 'test')
# testProba = df
# testData = testProba[[dm_dec_target, 'Prediction']]
score_func(dm_validdf, X_valid, y_valid, 'validate')
validProba = df
validData = validProba[[dm_dec_target, 'Prediction']]
#######################################
### Register Model in Model Manager ###
#######################################
from sasctl import Session
import sasctl.pzmm as pzmm
from sasctl.services import model_repository as modelRepo
from sasctl.tasks import register_model
import shutil
import json
### define macro vars for model manager
input_df = X_train
target_df = y_train
predictors = np.array(X_train.columns)
output_labels = ['EM_PREDICTION', 'EM_PREDICTION']
event_prob_var = output_labels[0]
target_event = None
target_level = 'INTERVAL'
num_target_categories = 1
predict_method = str('{}.')+str(predict_syntax)+str('({})')
output_vars = pd.DataFrame(columns=output_labels, data=[[0.5, 0.5]])
### create directories for metadata
output_path = Path(output_dir) / metadata_output_dir / model_name
if output_path.exists() and output_path.is_dir():
shutil.rmtree(output_path)
### create output path
os.makedirs(output_path)
### create python requirements file
requirements = [
{
"step":"import math, pickle, pandas as pd, numpy as np, settings",
"command":"pip3 install math==3.10.5 pickle==3.10.5 numpy==1.20.3 pandas==1.3.4 settings==0.2.2"
}
]
requirementsObj = json.dumps(requirements, indent = 4)
with open(str(output_path)+str('/requirements.json'), 'w') as outfile:
outfile.write(requirementsObj)
### create session in cas
sess=Session(hostname, username=username, password=password, verify_ssl=False, protocol="http")
### create metadata and import to model manager
pzmm.PickleModel.pickleTrainedModel(_, dm_model, model_name, output_path)
pzmm.JSONFiles().writeVarJSON(input_df, isInput=True, jPath=output_path)
pzmm.JSONFiles().writeVarJSON(output_vars, isInput=False, jPath=output_path)
pzmm.JSONFiles().calculateFitStat(trainData=trainData, validateData=validData, jPath=output_path) #testData=testData,
pzmm.JSONFiles().generateROCLiftStat(dm_dec_target, int(target_event), conn, trainData=trainData, validateData=validData, jPath=output_path) #testData=testData,
pzmm.JSONFiles().writeFileMetadataJSON(model_name, jPath=output_path)
pzmm.JSONFiles().writeModelPropertiesJSON(
modelName=model_name,
modelDesc=description,
targetVariable=dm_dec_target,
modelType=model_type,
modelPredictors=predictors,
targetEvent=target_event,
targetLevel=target_level,
numTargetCategories=num_target_categories,
eventProbVar=event_prob_var,
jPath=output_path,
modeler=username)
pzmm.ImportModel().pzmmImportModel(output_path, model_name, project_name, input_df, target_df, predict_method, metrics=output_labels, force=True)
# alternative model registration
pzmm.ScoreCode().writeScoreCode(input_df, target_df, model_name, predict_method, model_name + '.pickle', pyPath=output_path)
zip_file = pzmm.ZipModel.zipFiles(fileDir=output_path, modelPrefix=model_name, isViya4=True)
with sess:
try:
modelRepo.import_model_from_zip(model_name, project_name, zip_file, version='latest')
except ValueError:
modelRepo.create_project(project_name, caslib)
modelRepo.import_model_from_zip(model_name, project_name, zip_file, version='latest')
inputVarList = list(X_train.columns)
for name in inputVarList:
print(name, str(name).isidentifier())
list(X_train.columns) | christopher-parrish/sas_viya | python/tweedie_regressor_python/insurance_claims_auto/pure_premium_python_insuranceclaimsauto.py | pure_premium_python_insuranceclaimsauto.py | py | 17,181 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "urllib3.disable_warnings",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "runpy.run_path",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "keyring.get_password",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "keyring.... |
11672647873 | import numpy as np
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import random
fig = plt.figure(figsize=(16,12))
ax = fig.add_subplot(111,projection='3d')
x1 = np.arange(-5,5,0.5)
x2 = np.arange(-5,5,0.5)
x1,x2 = np.meshgrid(x1,x2)
ax.set_xlim(-5,5)
ax.set_ylim(-5,5)
ax.set_zlim(0,50)
y = (x1**2) + (x2**2)
ax.plot_surface(x1,x2,y,rstride=1,cstride=1,cmap='gnuplot')
# ax.plot_wireframe(x1, x2, y, rstride=1, cstride=1)
plt.figtext(0.5,0.95,"Sphere function",size="xx-large",ha='center')
plt.show()
# test = [[2.0,2.0],
# [3.5,1.5],
# [0.0,0.0]]
#
# test = [[random.uniform(-5.0,5.0),random.uniform(-5.0,5.0)] for _ in range(20)]
#
# # ax.plot_surface(x1,x2,y,rstride=1,cstride=1,cmap='BuGn')
# for p in test:
# ax.plot_surface(x1, x2, y, rstride=1, cstride=1, cmap='BuGn')
# ax.scatter(p[0],p[1])
# plt.pause(1)
# plt.cla() | akaranjkar/PSO | plot.py | plot.py | py | 929 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.use",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "numpy.arange",... |
16002420757 | #!/usr/bin/python3
import os
import cv2
def Smoothing(image_name):
res_dir = os.environ["PY_IMG"]
if res_dir is None:
print("[ERROR] Resources path isn't defined")
# Convertimos a escala de grises
original_image = cv2.imread(res_dir + "/" + image_name, cv2.IMREAD_GRAYSCALE)
if original_image is None:
raise Exception("[ERROR] Image not found in path.")
img_width, img_height = original_image.shape
output_image = original_image.copy()
kernel_size = 3
kernel_radio = kernel_size // 2
kernel_window =\
[
[1, 2, 1],
[2, 4, 2],
[1, 2, 1]
]
for x in range(img_width):
for y in range(img_height):
items_sum, pixel_sum = 0, 0
for i in range(-kernel_radio, kernel_radio+1):
for j in range(-kernel_radio, kernel_radio+1):
# Target coordinates for image
tg_x, tg_y = i + x, j + y
# Target coordinates for kernel
ktg_x, ktg_y = i + kernel_radio, j + kernel_radio
# if out of bounds continue
if tg_x < 0 or tg_x >= img_width or tg_y < 0 or tg_y >= img_height:
continue
pixel_sum += kernel_window[ktg_x][ktg_y] * original_image[tg_x, tg_y]
items_sum += kernel_window[ktg_x][ktg_y]
new_pixel = abs(pixel_sum // items_sum)
output_image[x, y] = new_pixel
print("Kernel window", kernel_window)
cv2.imshow("Original image", original_image)
cv2.imshow("Output image", output_image)
cv2.waitKey(30000)
cv2.destroyAllWindows()
if __name__ == "__main__":
Smoothing("noisy_1.jpeg")
Smoothing("house.jpg")
| Madophs/Image-Processsing | filters/Smoothing.py | Smoothing.py | py | 1,790 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.environ",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "cv2.imread",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "cv2.IMREAD_GRAYSCALE",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "cv2.imshow",
... |
30391214782 | from django.contrib.auth import authenticate, login
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.core.exceptions import PermissionDenied
from django.shortcuts import render, get_object_or_404
from .models import *
from django.views.generic import ListView, CreateView, UpdateView
from .forms import CommentForm, UserForm
from review.models import Review
# from cart.forms import AddProductForm
# Create your views here.
def product_in_category(request, category_slug=None):
current_category = None
categories = Category.objects.all()
products = Product.objects.filter(available_display=True)
if category_slug:
current_category = get_object_or_404(Category, slug=category_slug)
products = products.filter(category=current_category)
return render(request,
'shop/list.html',
{
'current_category':current_category,
'categories':categories,
'products':products,
})
def product_detail(request, id, product_slug=None):
product = get_object_or_404(Product, id=id, slug=product_slug)
comment_form = CommentForm
return render(request,
'shop/detail.html',
{
'product':product,
'comment_form':comment_form,
})
def new_comment(request, pk, slug):
if request.user.is_authenticated:
product = get_object_or_404(Product, pk=pk, slug=slug)
if request.method == 'POST':
comment_form = CommentForm(request.POST)
if comment_form.is_valid():
comment = comment_form.save(commit=False)
comment.product = product
comment.author = request.user
comment.save()
return redirect(product.get_absolute_url())
else:
return redirect(product.get_absolute_url())
else:
raise PermissionDenied
def signup(request):
if request.method == "POST":
form = UserForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
raw_password = form.cleaned_data.get('password1')
user = authenticate(username=username, password=raw_password)
login(request, user)
return redirect('/')
else:
form = UserForm()
return render(request, 'shop/signup.html', {'form': form})
class ProductCreate(LoginRequiredMixin, UserPassesTestMixin, CreateView):
model = Product
fields = ['category', 'name', 'slug', 'image', 'description', 'price', 'stock', 'available_display', 'available_order', 'author']
def test_func(self):
return self.request.user.is_superuser
def form_valid(self, form):
current_user = self.request.user
if current_user.is_authenticated and (current_user.is_superuser):
form.instance.author = current_user
return super(ProductCreate, self).form_valid(form)
else:
return redirect('/shop')
class ProductUpdate(LoginRequiredMixin, UpdateView):
model = Product
fields = ['category', 'name', 'slug', 'image', 'description', 'price', 'stock', 'available_display', 'available_order', 'author']
template_name = 'shop/product_form_update.html'
def dispatch(self, request, *args, **kwargs):
current_user = request.user
if current_user.is_authenticated and current_user.is_superuser:
return super(ProductUpdate, self).dispatch(request, *args, **kwargs)
else:
raise PermissionDenied
| sikkzz/cloudprogramming | shop/views.py | views.py | py | 3,758 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 34,
"usage_type": "call"
... |
24556338376 | import hashlib
import json
import os
import argparse
import sys
import hmac
import re
import signal
from multiprocessing import Process
from flask import request
import requests
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.primitives.ciphers.algorithms import AES
seqNumber = 0
vccSecNumb = 0
filename_regex = re.compile(r'^[_\-\.0-9a-z]{1,127}$')
decimal_regex = re.compile(r'^(0|[1-9][0-9]*)$')
float_regex = re.compile(r'^\d{1,10}\.\d{2}$')
"""
python3 Client.py -s bank.auth -u 55555.user -a 55555 -n 1000.00
python3 Client.py -s bank.auth -u 55555.user -a 55555 -c 63.10
python3 Client.py -a 55555_0.card -m 45.10
"""
seqNumber = 0
def parse_args():
parser = argparse.ArgumentParser(description='Client')
parser.add_argument('-i', metavar='bk-ip', type=str, default='127.0.0.1', help='The IP that the client will search the bank. default is localhost')
parser.add_argument('-p', metavar='bk-port', type=int, default=3000, help='The port that bank will listen on. Defaults to 3000.')
parser.add_argument('-s', metavar='auth-file', type=str, default='bank.auth', help='Name of the auth file. Defaults to bank.auth')
parser.add_argument('-u', metavar='user-file', type=str, default = None, help='The customer user file. The default value is the account name prepended to .user')
parser.add_argument('-a', metavar='account', type=str, help='The account that you want to do operations.')
parser.add_argument('-n', metavar='balance', type=str, help='The balance of the account that you want to create')
parser.add_argument('-d', metavar='deposit', type=str, help='The amount you want to deposit on the account')
parser.add_argument('-c', metavar='vcc', type=str, help='The amount of money that you want to create a virtual card with')
parser.add_argument('-g', metavar='balance', type=int, help='Get the balance of a certain account')
parser.add_argument('-m', metavar='purchase', type=str, help='Withdraw the amount of money specified from the virtual credit card and the bank account')
return parser.parse_args()
def validate_args(args):
if not re.match(r'^[1-9]\d*$', str(args.p)):
return False, 135
if not (1024 <= args.p <= 65535):
return False, 135
if not re.match(filename_regex, args.s):
return False, 130
ip_pattern = re.compile(r'^((25[0-5]|2[0-4]\d|1\d{2}|[1-9]\d|\d)\.){3}(25[0-5]|2[0-4]\d|1\d{2}|[1-9]\d|\d)$')
if not re.match(ip_pattern, args.i):
return False, 130
return True, None
def signal_handler(sig, frame):
sys.exit(0)
def get_account_balance(ip, port, account):
try:
response = requests.get(url=f"http://{ip}:{port}/account/{account}.user", timeout=10)
response.raise_for_status()
with open("bank.auth", 'rb') as f:
key = f.read()
with open(str(account) + ".user", 'rb') as f:
iv = f.read()
h = hmac.new(key[:32], response.text.encode("latin1"), hashlib.sha3_256).hexdigest()
if (h == response.headers.get("Authorization")):
cipher = Cipher(algorithms.AES(key[:32]), modes.CBC(iv))
decryptor = cipher.decryptor()
saldo = decryptor.update(response.text.encode("latin1")).decode("utf8")
except requests.exceptions.Timeout:
sys.exit(63)
except requests.exceptions.RequestException:
sys.exit(63)
if response.status_code == 200:
print(saldo)
sys.stdout.flush()
else:
sys.exit(135)
def deposit(ip, port, account, deposit_amount):
global seqNumber
if not re.match(r'^\d+\.\d{2}$', str(deposit_amount)):
sys.exit(125)
if not re.match(r'^[_\-\.0-9a-z]{1,127}$', account):
sys.exit(125)
user = (account+".user ").encode("utf8")
deposit_amount = ("amount: "+str(deposit_amount) + " ").encode("utf8")
with open("bank.auth", 'rb') as f:
key = f.read()
with open(account+".user", 'rb') as f:
iv = f.read()
cipher = Cipher(algorithms.AES(key[:32]), modes.CBC(iv))
encryptor = cipher.encryptor()
user = encryptor.update(user)
cipher = Cipher(algorithms.AES(key[:32]), modes.CBC(key[32:]))
encryptor = cipher.encryptor()
amount = encryptor.update(deposit_amount)
cipher = Cipher(algorithms.AES(key[:32]), modes.CBC(key[32:]))
encryptor = cipher.encryptor()
seq_number = encryptor.update(("number: " + str(seqNumber) + " ").encode("utf8"))
payload = (seq_number.decode("latin1")+"|"+user.decode("latin1") + "|" + amount.decode("latin1")).encode("latin1")
h = hmac.new(key[:32], payload, hashlib.sha3_256).hexdigest()
headers = {
"Authorization": f"{h}",
"User": f"{account}.user"
}
try:
response = requests.post(url=f"http://{ip}:{port}/account/deposit", headers=headers, data=payload, timeout=10)
response.raise_for_status()
except requests.exceptions.Timeout:
sys.exit(63)
except requests.exceptions.RequestException:
sys.exit(63)
if response.status_code == 200:
print(response.text)
sys.stdout.flush()
else:
sys.exit(135)
def buy_product(account, amount_used):
global seqNumber
user = "account: "+account
amount_used = ("amount: "+str(amount_used)+" ").encode("utf8")
with open("bank.auth", 'rb') as f:
key = f.read()
with open(account, 'rb') as f:
iv = f.read()
cipher = Cipher(algorithms.AES(key[:32]), modes.CBC(iv))
encryptor = cipher.encryptor()
user = encryptor.update(user.encode("utf8"))
cipher = Cipher(algorithms.AES(key[:32]), modes.CBC(key[32:]))
encryptor = cipher.encryptor()
seq_number = encryptor.update(("number: "+str(seqNumber)+" ").encode("utf8"))
print(seq_number.decode("latin1"))
cipher = Cipher(algorithms.AES(key[:32]), modes.CBC(key[32:]))
encryptor = cipher.encryptor()
amount = encryptor.update(amount_used)
payload = (seq_number.decode("latin1") +" |"+user.decode("latin1")+" |"+amount.decode("latin1")).encode("latin1")
h = hmac.new(key[:32],payload,hashlib.sha3_256).hexdigest()
account=account+" "
user = account.split("_")[0]
headers = {
"Authorization": f"{h}",
"User": f"{user}.user"
}
try:
response = requests.post(url=f"http://127.0.0.1:5000/buy",headers=headers, data=payload, timeout=10)
response.raise_for_status()
except requests.exceptions.Timeout:
sys.exit(63)
except requests.exceptions.RequestException:
sys.exit(63)
if response.status_code == 200:
os.remove(account.strip(" "))
print(response.text)
else:
sys.exit(135)
def create_vcc(ip, port, account, vcc_amount):
if not re.match(r'^[_\-\.0-9a-z]{1,122}$', account):
sys.exit(125)
if not re.match(r'^\d+\.\d{2}$', str(vcc_amount)):
sys.exit(125)
user = account+".user"
payload = '{"account": "'+user+'","vcc": "'+str(vcc_amount)+'"} '
with open("bank.auth", 'rb') as f:
key = f.read()
with open(account+".user", 'rb') as f:
iv = f.read()
cipher = Cipher(algorithms.AES(key[:32]), modes.CBC(iv))
encryptor = cipher.encryptor()
data = encryptor.update(payload.encode('utf8'))
try:
response = requests.post(url=f"http://{ip}:{port}/account/createCard/"+account, data=data, timeout=10)
response.raise_for_status()
except requests.exceptions.Timeout:
sys.exit(63)
except requests.exceptions.RequestException:
sys.exit(63)
if response.status_code == 200:
decryptor = cipher.decryptor()
vcc_pin = decryptor.update(response.text.encode("latin1"))
"""
vcc_seq_number = response.headers.get("VCC_SEQ_NUMB")
vcc_seq_number = decryptor.update(vcc_seq_number.encode("latin1")).decode("utf8").strip(" ").strip("seq:")
"""
vcc_seq_number = response.headers.get("VCC_SEQ_NUMB")
with open(account+"_"+str(vcc_seq_number)+".card", 'wb') as f:
f.write(vcc_pin)
global vccSecNumb
vccSecNumb = vcc_seq_number
print(payload)
else:
sys.exit(135)
if __name__ == "__main__":
args = parse_args()
if ' '.join(sys.argv[1:]).replace(' ', '').__len__() > 4096:
sys.exit(130)
valid, error_code = validate_args(args)
if not valid:
sys.exit(error_code)
if args.u is None and args.a is not None: # If the user file is not specified, use the account name prepended to .user
if not re.match(r'^[_.\-a-zA-Z0-9]{1,122}$', args.a):
sys.exit(130)
args.u = f"{args.a}.user"
if args.u is not None and args.a is not None and args.n is not None:
if not re.match(r'^[_.\-a-zA-Z0-9]{1,122}$', args.a):
sys.exit(130)
if not re.match(float_regex, args.n):
sys.exit(130)
if not re.match(filename_regex, args.u):
sys.exit(130)
data = "conta: "+str(args.u)+", saldo: "+str(args.n) + " "
key = ""
with open("bank.auth", 'rb') as f:
key = f.read()
cipher = Cipher(algorithms.AES(key[:32]), modes.CBC(key[32:]))
encryptor = cipher.encryptor()
ct = encryptor.update(data.encode("utf8"))
decryptor = cipher.decryptor()
try:
response = requests.post(url=f"http://{args.i}:{args.p}/account", data=ct, timeout=10)
print(response.status_code)
if response.status_code == 400:
sys.exit(130)
pin = decryptor.update(response.text.encode("latin1"))
response.raise_for_status()
except requests.exceptions.Timeout:
sys.exit(63)
except requests.exceptions.RequestException:
sys.exit(63)
if response.status_code == 200:
with open(args.u, 'wb') as f:
f.write(pin)
if args.g is not None:
if not re.match(decimal_regex, str(args.g)):
sys.exit(130)
get_account_balance(args.i, args.p, args.g)
if args.d is not None and args.a is not None:
response = requests.get(url=f"http://{args.i}:{args.p}/seqnumb", timeout=10)
with open("bank.auth", 'rb') as f:
key = f.read()
cipher = Cipher(algorithms.AES(key[:32]), modes.CBC(key[32:]))
decryptor = cipher.decryptor()
seqNumber = int(decryptor.update(response.text.encode("latin1")).decode("utf8"))
if not re.match(float_regex, args.d):
sys.exit(130)
deposit(args.i, args.p, args.a, args.d)
if args.c is not None and args.a is not None:
response = requests.get(url=f"http://{args.i}:{args.p}/seqnumb", timeout=10)
with open("bank.auth", 'rb') as f:
key = f.read()
cipher = Cipher(algorithms.AES(key[:32]), modes.CBC(key[32:]))
decryptor = cipher.decryptor()
seqNumber = int(decryptor.update(response.text.encode("latin1")).decode("utf8"))
if not re.match(r'^[_.\-a-zA-Z0-9]{1,122}$', args.a):
sys.exit(130)
if not re.match(float_regex, args.c):
sys.exit(130)
create_vcc(args.i, args.p, args.a, args.c)
if args.m is not None and args.a is not None:
response = requests.get(url=f"http://{args.i}:{args.p}/seqnumb", timeout=10)
with open("bank.auth", 'rb') as f:
key = f.read()
cipher = Cipher(algorithms.AES(key[:32]), modes.CBC(key[32:]))
decryptor = cipher.decryptor()
seqNumber = int(decryptor.update(response.text.encode("latin1")).decode("utf8"))
if not re.match(float_regex, args.m):
sys.exit(130)
if not re.match(r'^[_.\-a-zA-Z0-9]{1,122}$', args.a):
sys.exit(130)
buy_product(args.a, args.m)
| tolojo/bank-SA-22-23 | Phase 1/Client/Client.py | Client.py | py | 12,283 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "re.compile",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_... |
21518532065 | """
Script for converting json annotations in to csv format for training
Only takes into consideration tool boundary boxes
# Re-implementation from new git clone surgery tool detection
#
"""
import csv
import json
import argparse
from pathlib import Path
from PIL import Image
DATA_DIR = str(Path(__file__).resolve().parents[1]) + "/data"
DEFAULT_CSV = DATA_DIR + "/raw_data2.csv" #
SAIL_IMAGES_PATH = "/pasteur/u/kkpatel/data/images/"
SAIL_JSON_PATH = "/pasteur/u/kkpatel/data/complete_data.json"
LOCAL_IMAGES_PATH = "/Users/stephenren/code/curis2020/MARVLous_surgery_annotator/src/images/"
LOCAL_JSON_PATH = DATA_DIR + "/raw_data.json"
AWS_DATA_PATH = "/home/ubuntu/tools_data/data/marvl-surgery-annotator-validate-export.json" #new_data.json"
AWS_IMAGES_PATH = "/home/ubuntu/tools_data/data/images/"
# AWS_DATA_PATH = "/home/ubuntu/stephen/data/new_data.json"
# AWS_IMAGES_PATH = "/home/ubuntu/stephen/data/images/"
def get_coordinates(position, img_width, img_height):
left = position["left"]
top = position["top"]
width = position["width"]
height = position["height"]
x1 = int(float(left) * img_width)
y1 = int(float(top) * img_height)
x2 = int((float(left) + float(width)) * img_width)
y2 = int((float(top) + float(height)) * img_height)
return [str(x1), str(y1), str(x2), str(y2)]
def convert(images_path, json_path, selected_tool, ignore_negatives, acceptable, ignore_annotator, hands, ignore_chirality):
jf = open(json_path)
cf = open(DEFAULT_CSV, 'w')
filewriter = csv.writer(cf, delimiter=',')
json_data = json.load(jf)['0'] #'data']
for data in json_data:
if data["object_type"] == "image" and data["id"]:
if ignore_annotator is not None and data["original_annotator_name"] == ignore_annotator:
continue
objects_in_image = 0
filename = data["name"]
vid_id = data['video_id']
if acceptable is not None and vid_id not in acceptable:
continue
img_width, img_height = Image.open(images_path + filename).size
if not hands and "tool_labels" in data:
for tool_label in data["tool_labels"]:
if tool_label["category"] == "scalpel":
continue
if selected_tool is None or tool_label["category"] == selected_tool:
objects_in_image += 1
line = [images_path + filename]
line += get_coordinates(tool_label["bounding_box_position"], img_width, img_height)
line.append(tool_label["category"])
filewriter.writerow(line)
if hands and "hand_labels" in data:
for hand_label in data["hand_labels"]:
objects_in_image += 1
line = [images_path + filename]
line += get_coordinates(hand_label["bounding_box_position"], img_width, img_height)
line.append('hand' if ignore_chirality else hand_label['chirality'])
filewriter.writerow(line)
# Case were tools are not present in the image - add negative label
if not ignore_negatives and objects_in_image == 0:
filewriter.writerow([images_path + filename, '', '', '', '', ''])
def build_acceptable_videos(json_path):
data_f = open(json_path)
json_data = json.load(data_f)['data']
acceptable = []
for data in json_data:
if data['object_type'] == 'video':
if data['quality'] == 'good' or data['quality'] == 'okay':
acceptable.append(data['id'])
return acceptable
def main():
parser = argparse.ArgumentParser(description='Script to convert data into csv format for pytorch-retinanet.')
parser.add_argument('--datapath', help='Path to json annotations')
parser.add_argument('--imagepath', help='Path to image directory')
parser.add_argument('--use_local', help='Use pre-loaded LOCAL_IMAGES_PATH (check convert_data.py)', action="store_true")
parser.add_argument('--focus_tool', help='Only use annotations for one particular tool')
parser.add_argument('--quality_control', action='store_true')
parser.add_argument('--ignore_negatives', action='store_true')
parser.add_argument('--ignore_annotator')
parser.add_argument('--hands', action='store_true')
parser.add_argument('--ignore_chirality', action='store_true')
parser.add_argument('--aws', action='store_true')
args, leftover = parser.parse_known_args()
images_path = SAIL_IMAGES_PATH
json_path = SAIL_JSON_PATH
if args.imagepath is not None:
images_path = args.imagepath
if args.datapath is not None:
json_path = args.datapath
if args.use_local:
images_path = LOCAL_IMAGES_PATH
json_path = LOCAL_JSON_PATH
if args.aws:
images_path = AWS_IMAGES_PATH
json_path = AWS_DATA_PATH
tool = args.focus_tool
if tool is not None:
print("Focusing on tool: " + tool)
acceptable_videos = None
if args.quality_control:
acceptable_videos = build_acceptable_videos(json_path)
print("Converting json data from " + json_path)
convert(images_path, json_path, tool, args.ignore_negatives, acceptable_videos, args.ignore_annotator, args.hands, args.ignore_chirality)
print("Converted data saved under " + DEFAULT_CSV)
if __name__ == "__main__":
main()
| egoodman92/semi-supervised-surgery | MULTITASK_FILES/RETINANET_FILES/src/util/convert_data2.py | convert_data2.py | py | 5,607 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "csv.writer",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number":... |
28956742527 | import argparse
from pickle import NONE
import random
from wordfreq import zipf_frequency
from constants import *
def get_difficulty_to_words_map(difficulty=None):
difficulty_to_words_map = {}
for word in WORDS:
difficulty = get_word_difficulty(word)
if difficulty not in difficulty_to_words_map:
difficulty_to_words_map[difficulty] = set(word)
else:
difficulty_to_words_map[difficulty].add(word)
return difficulty_to_words_map if difficulty is None else difficulty_to_words_map[difficulty]
def get_word_difficulty(word):
"""
Returns the difficulty of the word
"""
frequency = zipf_frequency(word, 'en')
if frequency > 2.63:
return 1
elif frequency > 1.7:
return 2
return 3
def get_word(length=None, difficulty=None):
"""
Returns a random word from the dictionary of words
"""
word_of_difficulty = get_difficulty_to_words_map(difficulty=difficulty)
if length == 1:
return random.choice(list(word_of_difficulty))
word = NONE
WORD_SEARCH_LIMIT = 1000
for _ in range(WORD_SEARCH_LIMIT):
word = random.choice(list(word_of_difficulty))
if len(word) != length:
continue
if word == NONE:
raise Exception("Could'nt find a word of length {}, try again with a different length!".format(length))
return word
def validate_args(args):
print("Validating arguments...")
if not args.word:
args.word = get_word(args.length, args.difficulty)
args.word = args.word.lower()
if args.word not in WORDS:
raise ValueError("Word not in dictionary")
if args.difficulty not in DIFFICULTY_CHOICES:
raise ValueError(
"Difficulty must be one of {}".format(DIFFICULTY_CHOICES))
if args.length < MIN_WORD_LENGTH or args.length > MAX_WORD_LENGTH:
raise ValueError("Word length must be between {} and {}".format(
MIN_WORD_LENGTH, MAX_WORD_LENGTH))
if args.guesses < MIN_GUESSES or args.guesses > MAX_GUESSES:
raise ValueError("Number of guesses must be between {} and {}".format(
MIN_GUESSES, MAX_GUESSES))
print("Arguments validated successfully!")
return args
def fetch_arguments_parser():
parser = argparse.ArgumentParser(description='Worlde bot')
parser.add_argument('-w', '--word', type=str,
help='Word to solve', default=None, required=False)
parser.add_argument('-l', '--length', type=int,
help='Length of the word', default=DEFAULT_WORD_LENGTH, required=False)
parser.add_argument('-d', '--difficulty', type=str,
help='Difficulty of the word', default=DEFAULT_DIFFICULTY, required=False)
parser.add_argument('-g', '--guesses', type=str,
help='Number of gussess allowed', default=DEFAULT_NUM_GUESSES, required=False)
parser.add_argument('-s', '--slow', type=str,
help='Wait for user input after every guess', default=None,
required=False)
return parser
| ravaan/wordle | utils.py | utils.py | py | 3,121 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "wordfreq.zipf_frequency",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "pickle.NONE",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "random.choice",
... |
26458087607 | import unittest
import sys
import importlib
from math import sqrt
import BaseTypes
import model
class TestChromosome(unittest.TestCase):
def setUp(self):
self.naturalNumberN = model.Nucleotide(domain=BaseTypes.IntInterval(0,9))
self.lessThan100N = model.Nucleotide(domain=BaseTypes.IntInterval(0,99))
self.distanceG = model.Gene((self.naturalNumberN, self.lessThan100N),
lambda nucleos: \
nucleos[0].value + nucleos[1].value/100.0)
self.fenotype = lambda genotype: sqrt(genotype[0].fenotype()**2 + \
genotype[1].fenotype()**2)
self.positionC = model.Chromosome((self.distanceG, self.distanceG),
self.fenotype)
def test_get_genes(self):
genes = self.positionC.genes
map(lambda gene: self.assertEqual(gene, self.distanceG), genes)
def test_get_fenotypeFun(self):
fenotype = self.positionC.fenotypeFun
self.assertEqual(fenotype, self.fenotype)
def test_fenotype(self):
self.assertEqual(self.positionC.fenotype(),
self.fenotype(self.positionC.genes))
if __name__=='__main__':
try:
file_name = sys.argv[1]
model = importlib.import_module(file_name)
except IndexError:
unittest.main()
end()
except ImportError:
if sys.argv[1].startswith('-'):
unittest.main()
end()
else:
raise
loader = unittest.defaultTestLoader.loadTestsFromTestCase(TestChromosome)
unittest.TextTestRunner().run(loader)
| rtorres19/pyevalres | test_Chromosome.py | test_Chromosome.py | py | 1,705 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "unittest.TestCase",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "model.Nucleotide",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "BaseTypes.IntInterval",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "model.Nu... |
2594269249 | # This is purely the result of trial and error.
import os
import sys
import codecs
import subprocess
from setuptools import setup
from setuptools import find_packages
import aiowrpr
INSTALL_REQUIRES = [
'aiodns==2.0.0',
'aiohttp[speedups]>=3.7.4',
'aiohttp-apispec==2.1.0',
'apispec==3.2.0',
'async-timeout==3.0.1',
'attrs==19.3.0',
'brotlipy==0.7.0',
'cchardet==2.1.5',
'cffi==1.13.2',
'chardet==3.0.4',
'idna==2.8',
'marshmallow==3.3.0',
'multidict==4.7.1',
'pycares==3.1.0',
'pycparser==2.19',
'ujson==1.35',
'webargs>=5.5.3',
'yarl==1.4.2'
]
# Conditional dependencies:
if sys.version_info < (3, 5) or sys.version_info > (3, 8):
sys.exit(
f"Sorry, Python {'.'.join(map(str, sys.version_info[:3]))} is not supported"
)
def long_description():
with codecs.open('README.md', encoding='utf8') as f_:
return f_.read()
# Fetch version from git tags, and write to version.py.
# Also, when git is not available (PyPi package), use stored version.py.
VERSION_PY = os.path.join(os.path.dirname(__file__), 'version.py')
try:
VERSION_GIT = str(subprocess.check_output(["git", "describe", "--tags"]).rstrip(), 'utf-8')
except Exception as _:
with open(VERSION_PY, 'r') as fh:
VERSION_GIT = open(VERSION_PY).read().strip().split('=')[-1].replace('"', '')
VERSION_MSG = "# Do not edit this file, pipeline versioning is governed by git tags"
with open(VERSION_PY, 'w') as fh:
fh.write(f'{VERSION_MSG}{os.linesep}__version__={VERSION_GIT}')
setup(
name='aiowrpr',
version=VERSION_GIT,
description=aiowrpr.__doc__.strip(),
long_description=long_description(),
url='https://github.com/ishirshov/aiowrpr',
download_url='https://github.com/ishirshov/aiowrpr',
author=aiowrpr.__author__,
author_email='ildar.shirshov@gmail.com',
license=aiowrpr.__license__,
packages=find_packages(),
scripts=['bin/make_api'],
entry_points={
'console_scripts': [
'http = httpie.__main__:main',
'https = httpie.__main__:main',
],
},
install_requires=INSTALL_REQUIRES,
classifiers=[
'Development Status :: 1 - Planning',
'Programming Language :: Python',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: BSD License',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development',
'Topic :: System :: Networking',
'Topic :: Terminals',
'Topic :: Text Processing',
'Topic :: Utilities'
],
)
| ishirshov/aiowrpr | setup.py | setup.py | py | 2,821 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.version_info",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "sys.version_info",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "codecs.open",
... |
38555784326 | import os
import sys
import docx2python
# Program to convert MS-Word pastes into a less
# annoying text file layout.
# Certain unicode symbols can be annoying to work with.
TRANSLATION_TABLE = [
("“", "\""), ("”", "\""), ("„", "\""),
("’", "'"), ("–", "-"), ("…", "..."),
("•", "*"),
]
def write_output(lines: list, out_file_name: str) -> None:
with open(out_file_name, "w") as f:
was_whitespace = False
in_preformatted_block = False
for line in lines:
# Handle HTML <pre> blocks
if "<pre>" in line or in_preformatted_block:
in_preformatted_block = True
if in_preformatted_block:
f.write(line)
if "</pre>" in line:
in_preformatted_block = False
was_whitespace = False
continue
# Convert unicode symbols
for sym, rep in TRANSLATION_TABLE:
line = line.replace(sym, rep)
# Header
if line.startswith("#"):
was_whitespace = False
f.write(line + "\n")
# Whitespace
elif not line.strip():
if not was_whitespace:
was_whitespace = True
f.write("\n")
# Normal text
else:
# Docx extraction artifact
if line.strip() == "*-":
line = "---"
was_whitespace = False
buffer = str()
for word in line.split():
if len(buffer) + len(word) + 1 < 80:
buffer += (" " + word if buffer else word)
else:
f.write(buffer + "\n")
buffer = word
f.write(buffer + "\n\n")
print("OK")
def process_md_file(input_name: str, out_file_name: str) -> None:
print(f"Reading .md file: \"{input_name}\"... ", end="")
with open(input_name, "r", encoding="utf-8", errors="replace") as f:
lines = [e.strip() for e in f.readlines()]
text = str()
new_line = True
for line in lines:
if not line or any([line.startswith(c) for c in "*#"]):
new_line = True
text += "\n\n"
if line:
if new_line:
new_line = False
else:
text += " "
text += line
print("OK")
with open(out_file_name, "w", encoding="utf-8", errors="replace") as f:
f.write(text)
def verify_file_does_not_exist_and_get_output_name(input_file_name: str, extension: str) -> str:
output_name = input_file_name.rsplit(".", 1)[0] + extension
if os.path.exists(output_name):
print("Output file already exists!")
sys.exit(-1)
return output_name
def process_file(input_name: str) -> None:
lower_file_name = input_name.lower().strip().rsplit(os.path.sep, 1)[-1]
if lower_file_name.endswith(".txt"):
output_name = verify_file_does_not_exist_and_get_output_name(input_name, ".md")
print(f"Reading .txt file: \"{input_name}\"... ", end="")
with open(input_name, "r", encoding="utf-8") as f:
lines = f.readlines()
write_output(lines, output_name)
# docx files need some more handling
elif lower_file_name.endswith(".docx") and not lower_file_name.startswith("~$"):
output_name = verify_file_does_not_exist_and_get_output_name(input_name, ".md")
print(f"Reading .docx file: \"{input_name}\"... ", end="")
text = docx2python.docx2python(input_name).text.replace("--", "*")
lines = [e + "\n" for e in text.splitlines() if e]
write_output(lines, output_name)
# convert .md files back to a format paste-able into Word
elif lower_file_name.endswith(".md"):
output_name = verify_file_does_not_exist_and_get_output_name(input_name, ".txt")
process_md_file(input_name, output_name)
def process(input_name: str) -> None:
if not os.path.exists(input_name):
print(f"No file with name \"{input_name}\"!")
return
if os.path.isdir(input_name):
print(f"Converting directory \"{input_name}\"...")
entries = os.listdir(input_name)
files = filter(lambda e: e.endswith(".docx") or e.endswith(".txt"), entries)
for file in files:
path = os.path.join(input_name, file)
process_file(path)
else:
process_file(input_name)
def main() -> None:
print("Flesh-Network Blog Post Indenting Tool (2021)")
print("-> Convert .txt and .docx files into properly formatted blog posts!\n")
if len(sys.argv) != 2:
print("Please supply a file name!")
sys.exit(-1)
input_name = sys.argv[1]
process(input_name)
if __name__ == "__main__":
main()
| TeilzeitTaco/flesh-network-blog | src/tools/indenter.py | indenter.py | py | 4,906 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.exists",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 97,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 105... |
7971934328 | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from nnAudio import Spectrogram
from .constants import *
from .Unet_blocks import *
import sys
import abc
from .normalization import Normalization
from torchvision.models import resnet18
batchNorm_momentum = 0.1
num_instruments = 1
def create_spectrogram_function(spec):
if spec == 'CQT':
r = 2
N_BINS = 88*r
return Spectrogram.CQT1992v2(sr=SAMPLE_RATE, hop_length=HOP_LENGTH,
n_bins=N_BINS, fmin=27.5,
bins_per_octave=12*r, trainable=False)
elif spec == 'Mel':
return Spectrogram.MelSpectrogram(sr=SAMPLE_RATE, win_length=WINDOW_LENGTH, n_mels=N_BINS,
hop_length=HOP_LENGTH, fmin=MEL_FMIN, fmax=MEL_FMAX,
trainable_mel=False, trainable_STFT=False)
raise Exception("Spectrogram parameter is not correct")
class RecognitionModel(metaclass=abc.ABCMeta):
@classmethod
def __subclasshook__(cls, subclass):
return (hasattr(subclass, 'init') and
callable(subclass.init) and
hasattr(subclass, 'forward') and
callable(subclass.forward))
class ResnetRecognitionModel(nn.Module):
def init(self, number_of_instruments):
self.conv = torch.nn.Conv2d(1, 3, (1, 1))
self.resnet = resnet18(progress=True)
self.classification_layer = torch.nn.Sequential(
nn.Flatten(),
nn.Linear(1000, number_of_instruments)
)
def forward(self, x):
x = self.conv(x)
x = self.resnet(x)
x = self.classification_layer(x)
return x
class ConvRecognitionModel(nn.Module):
def init(self, number_of_instruments):
self.conv1 = nn.Sequential(
nn.Conv2d(1, 50, kernel_size=(5, 5), stride=1),
nn.BatchNorm2d(50),
nn.LeakyReLU(negative_slope=0.2))
self.max_pooling = nn.MaxPool2d(2, stride=2)
# nn.ReLU(inplace=True)
self.conv2 = nn.Sequential(
nn.Conv2d(50, 100, kernel_size=(3, 3), stride=2),
nn.BatchNorm2d(100),
nn.LeakyReLU(negative_slope=0.2))
self.conv3 = nn.Sequential(
nn.Conv2d(100, 200, kernel_size=(3, 3), stride=2),
nn.BatchNorm2d(200),
nn.LeakyReLU(negative_slope=0.2))
self.conv4 = nn.Sequential(
nn.Conv2d(200, 300, kernel_size=(5, 1), stride=2),
nn.BatchNorm2d(300),
nn.LeakyReLU(negative_slope=0.2))
self.conv5 = nn.Sequential(
nn.Conv2d(300, 400, kernel_size=(8, 3), stride=1),
nn.BatchNorm2d(400),
nn.LeakyReLU(negative_slope=0.2))
self.classification_layer = nn.Sequential(
nn.Flatten(),
nn.Linear(400, number_of_instruments)
)
return self
# nn.ReLU(inplace=True)
# nn.ReLU(inplace=True),
def forward(self, x):
x = self.conv1(x)
x = self.max_pooling(x)
x = self.conv2(x)
x = self.max_pooling(x)
x = self.conv3(x)
x = self.max_pooling(x)
x = self.conv4(x)
x = self.conv5(x)
x = self.classification_layer(x)
return x
def create_submodel(model_type):
if model_type == "resnet":
return ResnetRecognitionModel()
elif model_type == "conv":
return ConvRecognitionModel()
raise Exception(
f"Recognition model {model_type} is not available!")
class InstrumentRecognitionModel(nn.Module):
def __init__(self, ds_ksize, ds_stride, mode='framewise', spec='CQT', norm=1, device='cpu', number_of_instruments=10, model_type="resnet"):
super(InstrumentRecognitionModel, self).__init__()
self.device = device
global N_BINS # using the N_BINS parameter from constant.py
# Selecting the type of spectrogram to use
self.spectrogram = create_spectrogram_function(spec)
self.normalize = Normalization(mode)
self.norm = norm
self.loss_function = nn.CrossEntropyLoss()
self.submodel = create_submodel(model_type)
self.submodel.init(number_of_instruments)
def forward(self, x):
return self.submodel.forward(x)
def eval(self):
self.submodel.eval()
def __is_blacklisted(self, name, blacklist):
for element in blacklist:
if element in name:
return True
return False
def load_my_state_dict(self, state_dict, blacklist = []):
print("Debug - loading state dict to current model!")
print(f"Parameters not allowed to be transferred: {blacklist}")
own_state = self.state_dict()
for name, param in state_dict.items():
if name not in own_state:
print(f"Warning - {name} not present in model state - skipping!")
continue
if self.__is_blacklisted(name, blacklist):
print(f"Parameter {name} is not allowed to be transfered")
continue
if isinstance(param, nn.Parameter):
param = param.data
print(f"Copying {name} parameter to target network!")
own_state[name].copy_(param)
i = 0
def run_on_batch(self, batch):
audio_label = batch['audio']
frame_label = batch['label'].type(torch.LongTensor).to(self.device)
spec = self.spectrogram(audio_label)
spec = torch.log(spec + 1e-5)
spec = self.normalize.transform(spec)
spec = spec.transpose(-1, -2)
classification_results = self(
spec.view(spec.size(0), 1, spec.size(1), spec.size(2)))
predictions = {
'results': classification_results
}
losses = {
'loss/transcription': self.loss_function(classification_results, torch.max(frame_label, 1)[1])
}
return predictions, losses, spec
| w4k2/automatic_music_transcription | model/instrument_recognition_model.py | instrument_recognition_model.py | py | 6,038 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "nnAudio.Spectrogram.CQT1992v2",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "nnAudio.Spectrogram",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "nnAudio.Spectrogram.MelSpectrogram",
"line_number": 25,
"usage_type": "call"
},
{
... |
30800487358 | import time, datetime
from screener import Screener
import os, yaml
def log(msg):
print(f"[{datetime.datetime.now()}] - {msg}")
config = yaml.safe_load(open("config.yaml","r"))
folder_left = config['folder_left']
folder_substats = config['folder_substats']
sec_between_screenshot = config['sec_between_screenshot']
# make dirs
for folder in [folder_left, folder_substats]:
try:
os.makedirs(folder)
except:
pass
if __name__ == '__main__':
bot = Screener(folder_left, folder_substats)
log('Calibrate screen left')
bot.run_calibration_left()
time.sleep(1)
log('Calibrate screen substats')
bot.run_calibration_substats()
log('Calibration is done, starting screenshoting...')
print()
while True:
log(f'taking screenshot n°{bot.index}...')
bot.screenshot(f"{bot.index}.png")
bot.delete_screenshot_if_redonant()
log('done.')
print()
time.sleep(sec_between_screenshot) | FrenchieTucker/RPGgearDetection | main_scraper.py | main_scraper.py | py | 1,035 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.datetime.now",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "yaml.safe_load",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.makedirs... |
8365459714 | import pytest
from .. import *
# this is not necessary but mypy complains if it's not included
from .. import CompileOptions
options = CompileOptions()
def test_cond_one_pred():
expr = Cond([Int(1), Int(2)])
assert expr.type_of() == TealType.uint64
cond1, _ = Int(1).__teal__(options)
pred1, _ = Int(2).__teal__(options)
cond1Branch = TealConditionalBlock([])
cond1.setNextBlock(cond1Branch)
cond1Branch.setTrueBlock(pred1)
cond1Branch.setFalseBlock(Err().__teal__(options)[0])
pred1.setNextBlock(TealSimpleBlock([]))
expected = cond1
actual, _ = expr.__teal__(options)
with TealComponent.Context.ignoreExprEquality():
assert actual == expected
def test_cond_two_pred():
expr = Cond([Int(1), Bytes("one")], [Int(0), Bytes("zero")])
assert expr.type_of() == TealType.bytes
cond1, _ = Int(1).__teal__(options)
pred1, _ = Bytes("one").__teal__(options)
cond1Branch = TealConditionalBlock([])
cond2, _ = Int(0).__teal__(options)
pred2, _ = Bytes("zero").__teal__(options)
cond2Branch = TealConditionalBlock([])
end = TealSimpleBlock([])
cond1.setNextBlock(cond1Branch)
cond1Branch.setTrueBlock(pred1)
cond1Branch.setFalseBlock(cond2)
pred1.setNextBlock(end)
cond2.setNextBlock(cond2Branch)
cond2Branch.setTrueBlock(pred2)
cond2Branch.setFalseBlock(Err().__teal__(options)[0])
pred2.setNextBlock(end)
expected = cond1
actual, _ = expr.__teal__(options)
with TealComponent.Context.ignoreExprEquality():
assert actual == expected
def test_cond_three_pred():
expr = Cond([Int(1), Int(2)], [Int(3), Int(4)], [Int(5), Int(6)])
assert expr.type_of() == TealType.uint64
cond1, _ = Int(1).__teal__(options)
pred1, _ = Int(2).__teal__(options)
cond1Branch = TealConditionalBlock([])
cond2, _ = Int(3).__teal__(options)
pred2, _ = Int(4).__teal__(options)
cond2Branch = TealConditionalBlock([])
cond3, _ = Int(5).__teal__(options)
pred3, _ = Int(6).__teal__(options)
cond3Branch = TealConditionalBlock([])
end = TealSimpleBlock([])
cond1.setNextBlock(cond1Branch)
cond1Branch.setTrueBlock(pred1)
cond1Branch.setFalseBlock(cond2)
pred1.setNextBlock(end)
cond2.setNextBlock(cond2Branch)
cond2Branch.setTrueBlock(pred2)
cond2Branch.setFalseBlock(cond3)
pred2.setNextBlock(end)
cond3.setNextBlock(cond3Branch)
cond3Branch.setTrueBlock(pred3)
cond3Branch.setFalseBlock(Err().__teal__(options)[0])
pred3.setNextBlock(end)
expected = cond1
actual, _ = expr.__teal__(options)
with TealComponent.Context.ignoreExprEquality():
assert actual == expected
def test_cond_has_return():
exprWithReturn = Cond([Int(1), Return(Int(1))], [Int(0), Return(Int(0))])
assert exprWithReturn.has_return()
exprWithoutReturn = Cond([Int(1), Bytes("one")], [Int(0), Bytes("zero")])
assert not exprWithoutReturn.has_return()
exprSemiReturn = Cond(
[Int(1), Return(Int(1))], [Int(0), App.globalPut(Bytes("key"), Bytes("value"))]
)
assert not exprSemiReturn.has_return()
def test_cond_invalid():
with pytest.raises(TealInputError):
Cond()
with pytest.raises(TealInputError):
Cond([])
with pytest.raises(TealTypeError):
Cond([Int(1), Int(2)], [Int(2), Txn.receiver()])
with pytest.raises(TealTypeError):
Cond([Arg(0), Int(2)])
| gconnect/voting-dapp-pyteal-react | venv/lib/python3.8/site-packages/pyteal/ast/cond_test.py | cond_test.py | py | 3,458 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "pytest.raises",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "pytest.raises",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "pytest.raises",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "pytest.raises",
"lin... |
21217260203 | import sqlite3 as sq
from others import funcs as f
def sql_start():
global base, cur
base = sq.connect('bd_crypto_users.db')
cur = base.cursor()
if base:
print('BD connected')
base.execute('CREATE TABLE IF NOT EXISTS {}(id PRIMARY KEY, USDT,BTC,ETH)'.format('data'))
base.commit()
def sql_add_start(user_id, usdt):
cur.execute('INSERT INTO data VALUES (?,?,?,?)', (user_id, str(usdt), str(0), str(0)))
base.commit()
def sql_reset_balance(user_id, usdt):
cur.execute('UPDATE data SET BTC == 0, ETH == 0 WHERE id == ' +
str(user_id))
base.commit()
cur.execute('UPDATE data SET USDT == ' + str(usdt) +
' WHERE id == ' + str(user_id))
base.commit()
def sql_get_dep(user_id):
r = cur.execute('SELECT USDT FROM data WHERE id LIKE \'%' +
str(user_id) + '%\'').fetchone()
return r[0]
def sql_get_crypto(user_id):
r = cur.execute('SELECT BTC, ETH FROM data WHERE id LIKE \'%' +
str(user_id) + '%\'').fetchone()
return r
def sql_buy_crypto(user_id, coin, usdt):
i = f.check_crypto_name(coin)
amount_of_crypto = float(f.change_coins_amount(usdt, coin)).__round__(7)
cur.execute('UPDATE data SET ' + coin + ' == ' +
str((float(sql_get_crypto(user_id)[i]).__round__(7) +
amount_of_crypto)) +
' WHERE id == ' + str(user_id))
base.commit()
amount_of_usdts = (float(sql_get_dep(user_id)) - float(usdt)).__round__(2)
if amount_of_usdts == 0.0:
amount_of_usdts = 0
cur.execute('UPDATE data SET USDT == ' + str(amount_of_usdts) +
' WHERE id == ' + str(user_id))
base.commit()
return str(amount_of_crypto)
def sql_sell_crypto(user_id, coin, crypto_amount):
i = f.check_crypto_name(coin)
crypto_balance = (float(sql_get_crypto(user_id)[i]) - float(crypto_amount)).__round__(7)
if crypto_balance == 0.0:
crypto_balance = 0
cur.execute('UPDATE data SET ' + coin + ' == ' + str(crypto_balance) +
' WHERE id == ' + str(user_id))
base.commit()
usdt_amount = float(f.change_usdt_amount(crypto_amount, coin)).__round__(2)
cur.execute('UPDATE data SET USDT == ' + str(
(float(sql_get_dep(user_id)).__round__(2) +
usdt_amount)) + ' WHERE id == ' + str(user_id))
base.commit()
return str(usdt_amount)
def sql_delete_acc(user_id):
cur.execute('DELETE FROM data WHERE id == ' +
str(user_id))
base.commit()
def sql_read_id(user_id):
try:
r = cur.execute('SELECT id FROM data WHERE id LIKE \'%' +
str(user_id) + '%\'').fetchone()
return r[0]
except:
return 0
| AKAMElmf/crypto_trade_bot | database/sqlite_bd.py | sqlite_bd.py | py | 2,756 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sqlite3.connect",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "others.funcs.check_crypto_name",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "others.funcs",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "others.fun... |
13870980492 | # 수집할 정보에 대응하는 CSS선택자를 각각 문자열 하나로 만들고, 이들을 딕셔너리 객체에 모아서 BeautifulSoup select함수와 사용하는 기법
# Content는 \
import requests
from bs4 import BeautifulSoup
class Content:
'''
글/페이지 전체에 사용할 기반 클래스
'''
def __init__(self, url, title, body):
self.url = url
self.title = title
self.body = body
def print(self):
'''
출력 결과를 원하는 대로 바꿀 수 있는 함수
'''
print('URL: {}'.format(self.url))
print('TITLE: {}'.format(self.title))
print('BPDY: {}'.format(self.body))
# Website 클래스는 각 페이지에서 수집한 정보를 저장하는 것이 아니라, 해당 데이터를 수집하는 방법에 대한 지침을 저장합니다.
class Website:
'''
웹사이트 구조에 관한 정보를 저장할 클래스
'''
def __init__(self, name, url, titleTag, bodyTag):
self.name = name
self.url = url
self.titleTag = titleTag
self.bodyTag = bodyTag
# -----
class Crawler:
def getPage(self, url):
try:
req = requests.get(url)
except requests.exceptions.RequestException:
return None
return BeautifulSoup(req.text, 'html.parser')
def safeGet(self, pageObj, selector):
'''
BeautifulSoup객체와 선택자를 받아 콘텐츠 문자열을 추출하는 함수
주어진 선택자로 검색된 결과가 없다면 빈 문자열을 반환합니다.
'''
selectedElems = pageObj.select(selector)
if selectedElems is not None and len(selectedElems) > 0:
return '\n'.join([elem.get_text() for elem in selectedElems])
return ''
def parse(self, site, url):
'''
URL을 받아 콘텐츠를 추출합니다.
'''
bs = self.getPage(url)
if bs is not None:
title = self.safeGet(bs, site.titleTag)
body = self.safeGet(bs, site.bodyTag)
if title != '' and body != '':
content = Content(url, title, body)
content.print()
crawler = Crawler()
siteData = [
['O\'Reilly Media', 'http://oreilly.com', 'h1', 'section#product-description'],
['Reuters', 'http://reuters.com', 'h1', 'div.StandardArticleBody_body_1gnLA'],
['Brookings', 'http://www.brookings.edu', 'h1', 'div.post-body']
]
websites = []
urls = [
'http://shop.oreiily.com/product/0636920028154.do',
'http://www.reuters.com/article/us-usa-epa-pruitt-idUSKBN19W2D0',
'https://www.brookings.edu/blog/techtank/2016/03/01/idea-to-retire-old-methods-of-policy-education/'
]
for row in siteData:
websites.append(Website(row[0], row[1], row[2], row[3]))
crawler.parse(websites[0], urls[0])
crawler.parse(websites[1], urls[1])
crawler.parse(websites[2], urls[2]) | hye0ngyun/PythonPractice | books/webScraping/chap04/chap04Ex2.py | chap04Ex2.py | py | 2,936 | python | ko | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "requests.exceptions",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 45,
"usage_type": "call"
}
] |
6554352748 | from __future__ import annotations
# IMPORTS
# =======>
# noinspection PyUnresolvedReferences
import typing
from util.formatter.TextColor import *
from util.formatter.TextEffects import *
from dataclasses import dataclass, field
from copy import deepcopy
# EXPORTS
# =======>
__all__ = [
'FormatString'
]
# MAIN CONTENT
# ============>
@dataclass
class FormatString:
"""
A class that represents a format string.
Attributes
----------
string: typing.Any
The string.
color: TextColor
The color.
bold: bool
Whether the string is bold.
underline: bool
Whether the string is underlined.
italic: bool
Whether the string is italicized.
strikethrough: bool
Whether the string is strikethrough.
minlength: int or None
The minimum length of the string.
maxlength: int or None
The maximum length of the string.
Notes
-----
Color is an BASH escape sequence.
References
----------
[1] https://misc.flogisoft.com/bash/tip_colors_and_formatting
"""
string: typing.Any = field(default='')
color: TextColor | typing.Iterable[TextColor] = field(default=TextColor.RESET)
bold: bool = field(default=False)
underline: bool = field(default=False)
italic: bool = field(default=False)
strikethrough: bool = field(default=False)
minlength: typing.Optional[int] = field(default=None)
maxlength: typing.Optional[int] = field(default=None)
_strings: typing.List[str] = field(init=False)
_colors: typing.List[TextColor] = field(init=False)
_bolds: typing.List[bool] = field(init=False)
_underlines: typing.List[bool] = field(init=False)
_italics: typing.List[bool] = field(init=False)
_strikethroughs: typing.List[bool] = field(init=False)
def __post_init__(self):
"""
Initializes the format string.
"""
self.string = str(self.string)
self.string = self.string.ljust(self.minlength) if self.minlength is not None else self.string
self.string = self.string[:self.maxlength - 1] + '…' \
if self.maxlength is not None and len(self.string) > self.maxlength else self.string
self._strings = []
self._colors = []
self._bolds = []
self._underlines = []
self._italics = []
self._strikethroughs = []
self._strings.append(self.string)
self._colors.append(self.color)
self._bolds.append(self.bold)
self._underlines.append(self.underline)
self._italics.append(self.italic)
self._strikethroughs.append(self.strikethrough)
def __str__(self) -> str:
"""
Returns the string representation of the format string.
Returns
-------
str
The string representation of the format string.
"""
string = ''
for i in range(len(self._strings)):
if isinstance(self._colors[i], TextColor):
string += self._colors[i].value
else:
string += ''.join(map(lambda x: x.value, self._colors[i]))
if self._bolds[i]:
string += TextEffects.BOLD.value
if self._underlines[i]:
string += TextEffects.UNDERLINE.value
if self._italics[i]:
string += TextEffects.ITALIC.value
if self._strikethroughs[i]:
string += TextEffects.STRIKETHROUGH.value
string += self._strings[i] + TextColor.RESET.value
return string
def __repr__(self) -> str:
"""
Returns the string representation of the format string.
Returns
-------
str
The string representation of the format string.
"""
return self.__str__()
def indent(self, indent=4) -> FormatString:
"""
Indents the format string.
Parameters
----------
indent: int
The number of spaces to indent by.
Returns
-------
FormatString
The indented format string.
"""
buffer = FormatString(' ' * indent)
for index, string in enumerate(self._strings):
lines = list(map(lambda x: FormatString(
string=x, color=self._colors[index], bold=self._bolds[index], underline=self._underlines[index],
italic=self._italics[index], strikethrough=self._strikethroughs[index]
), string.split('\n')))
buffer += lines[0]
for line in lines[1:]:
buffer += FormatString('\n' + ' ' * indent) + line
return buffer
def join(self, strings: typing.Iterable[FormatString | str]) -> FormatString:
"""
Joins the format strings.
Parameters
----------
strings: typing.List[FormatString]
The format strings to join.
Returns
-------
FormatString
The joined format string.
"""
buffer = FormatString()
for index, string in enumerate(strings):
if index > 0:
buffer += self
buffer += string
return buffer
def __add__(self, other: FormatString | str) -> FormatString:
"""
Returns the concatenation of the format strings.
Parameters
----------
other: FormatString
The other format string.
Returns
-------
FormatString
The concatenation of the format strings.
"""
cself = deepcopy(self)
other = deepcopy(other)
if isinstance(other, str):
other = FormatString(other)
cself._strings += other._strings
cself._colors += other._colors
cself._bolds += other._bolds
cself._underlines += other._underlines
cself._italics += other._italics
cself._strikethroughs += other._strikethroughs
return cself
def toRawString(self) -> str:
"""
Returns the raw string representation of the format string.
Returns
-------
str
String without any formatting, but with all BASH escape sequences.
Examples
--------
>>> print(FormatString('foo', TextColor.RED).toRawString())
\033[31mfoo\033[0m
Notes
-----
This method is used for testing.
"""
string = self.__str__()
for color, code in TextColor.RawCodes().items():
string = string.replace(color.value, code)
for effect, code in TextEffects.RawCodes().items():
string = string.replace(effect.value, code)
return string
| ButterSus/KiwiPreview | util/formatter/FormatString.py | FormatString.py | py | 6,736 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.Any",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "dataclasses.field",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "typing.Iterable",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "dataclasses.fi... |
38666647232 | from requests.adapters import BaseAdapter
from requests.compat import urlparse, unquote
from requests import Response, codes
import errno
import os
import stat
import locale
import io
from six import BytesIO
class FileAdapter(BaseAdapter):
def __init__(self, set_content_length=True):
super(FileAdapter, self).__init__()
self._set_content_length = set_content_length
def send(self, request, **kwargs):
""" Wraps a file, described in request, in a Response object.
:param request: The PreparedRequest` being "sent".
:returns: a Response object containing the file
"""
# Check that the method makes sense. Only support GET
if request.method not in ("GET", "HEAD"):
raise ValueError("Invalid request method %s" % request.method)
# Parse the URL
url_parts = urlparse(request.url)
# Reject URLs with a hostname component
if url_parts.netloc and url_parts.netloc != "localhost":
raise ValueError("file: URLs with hostname components are not permitted")
resp = Response()
# Open the file, translate certain errors into HTTP responses
# Use urllib's unquote to translate percent escapes into whatever
# they actually need to be
try:
# Split the path on / (the URL directory separator) and decode any
# % escapes in the parts
path_parts = [unquote(p) for p in url_parts.path.split("/")]
# Strip out the leading empty parts created from the leading /'s
while path_parts and not path_parts[0]:
path_parts.pop(0)
# If os.sep is in any of the parts, someone fed us some shenanigans.
# Treat is like a missing file.
if any(os.sep in p for p in path_parts):
raise IOError(errno.ENOENT, os.strerror(errno.ENOENT))
# Look for a drive component. If one is present, store it separately
# so that a directory separator can correctly be added to the real
# path, and remove any empty path parts between the drive and the path.
# Assume that a part ending with : or | (legacy) is a drive.
if path_parts and (
path_parts[0].endswith("|") or path_parts[0].endswith(":")
):
path_drive = path_parts.pop(0)
if path_drive.endswith("|"):
path_drive = path_drive[:-1] + ":"
while path_parts and not path_parts[0]:
path_parts.pop(0)
else:
path_drive = ""
# Try to put the path back together
# Join the drive back in, and stick os.sep in front of the path to
# make it absolute.
path = path_drive + os.sep + os.path.join(*path_parts)
# Check if the drive assumptions above were correct. If path_drive
# is set, and os.path.splitdrive does not return a drive, it wasn't
# reall a drive. Put the path together again treating path_drive
# as a normal path component.
if path_drive and not os.path.splitdrive(path):
path = os.sep + os.path.join(path_drive, *path_parts)
# Use io.open since we need to add a release_conn method, and
# methods can't be added to file objects in python 2.
resp.raw = io.open(path, "rb")
resp.raw.release_conn = resp.raw.close
except IOError as e:
if e.errno == errno.EACCES:
resp.status_code = codes.forbidden
elif e.errno == errno.ENOENT:
resp.status_code = codes.not_found
else:
resp.status_code = codes.bad_request
# Wrap the error message in a file-like object
# The error message will be localized, try to convert the string
# representation of the exception into a byte stream
resp_str = str(e).encode(locale.getpreferredencoding(False))
resp.raw = BytesIO(resp_str)
if self._set_content_length:
resp.headers["Content-Length"] = len(resp_str)
# Add release_conn to the BytesIO object
resp.raw.release_conn = resp.raw.close
else:
resp.status_code = codes.ok
resp.url = request.url
# If it's a regular file, set the Content-Length
resp_stat = os.fstat(resp.raw.fileno())
if stat.S_ISREG(resp_stat.st_mode) and self._set_content_length:
resp.headers["Content-Length"] = resp_stat.st_size
return resp
def close(self):
pass
| JimmXinu/FanFicFare | included_dependencies/requests_file.py | requests_file.py | py | 4,729 | python | en | code | 664 | github-code | 36 | [
{
"api_name": "requests.adapters.BaseAdapter",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "requests.compat.urlparse",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "requests.Response",
"line_number": 36,
"usage_type": "call"
},
{
"api_name... |
2248843568 | import os
import glob
import subprocess
import pandas as pd
from PIL import Image
import datetime
datetime.timedelta.min
def get_date_taken(path):
return Image.open(path)._getexif()[36867]
#enter in the directory of your images in the line below
os.chdir('D:/mgickdemo/images')
cwd = os.getcwd()
#unfortunately you have to use jpg for the exif data to come through. yes, this is dumb.
files = glob.glob('*.jpg')
for i in range(len(files)):
files[i] = files[i].split(".", 1)[0]
#If you're on a nix based system this needs to be changed from 'magick' to 'convert'
str1 = "magick "
str2 = ".jpg -crop 1728x1152 +repage "
str3 = "_%d.jpg"
for i in range(len(files)):
tfiles = str1 + files[i] + str2 + files[i] + str3
subprocess.run(tfiles, shell=True)
subfiles = glob.glob('*_[0-9].jpg')
subfiles = sorted(subfiles)
str4 = " -fuzz 15% -fill black +opaque \"rgb(210,210,20)\" -fuzz 15% -fill white -opaque \"rgb(210,210,20)\" -print \"%[fx:w*h*mean]\" "
#the above line probably needs to be adjusted to capture an appropriate 'green'. Ive been doing this by using the colour picker in photoshop to get RGB
#colourspace values for just one image of the set. both instances of the rgb(x,x,x) need to be changed
#" -fuzz 11% -fill black +opaque \"rgb(162,159,10)\" -fuzz 11% -fill white -opaque \"rgb(162,159,10)\" -print \"%[fx:w*h*mean]\" "
tl = ["t"] * len(subfiles)
namestamps = ["t"] * len(subfiles)
potnumber = ["t"] * len(subfiles)
col_names = ['Timestamp', 'Pixels']
my_df = pd.DataFrame(columns = col_names)
my_df
#get the first time to subtract from subsequent ones using time delta
for i in range(len(subfiles)):
tsfiles = str1 + subfiles[i] + str4 + "res" + subfiles[i]
print(tsfiles)
tl[i] = subprocess.run(tsfiles, shell=True, capture_output=True).stdout
tl[i] = tl[i].decode('utf-8')
namestamps[i] = get_date_taken(subfiles[i])
temp = get_date_taken(subfiles[i])
potnumber[i] = subfiles[i].split("_")[2].split(".")[0]
out = pd.DataFrame(list(zip(tl, namestamps, potnumber)))
out.to_csv("out.csv")
print(tl)
print(namestamps)
print(potnumber)
print(out)
| Owen-Duncan/LeafAreaQuant | Main.py | Main.py | py | 2,123 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.timedelta",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image.open",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "os.chdir",
"line_... |
26222486814 | import json
from datetime import datetime
from . import calc_func
from .db_queries_source import *
from .connection_to_database import create_connection
conn = ''
def insert_to_db(city_sender, city_recipient, urgency, type_of_cargo, weight,
delivery_type, lenght, width, height, declared_value_rate, is_test, sending_user):
request_time = datetime.now()
'''
Генерация и отправка запроса Calc в API CSE.
'''
calc_func.create_request_cse(city_sender=city_sender,
city_recipient=city_recipient,
urgency=urgency,
type_of_cargo=type_of_cargo,
weight=weight,
lenght=lenght,
width=width,
height=height,
delivery_type=delivery_type,
declared_value_rate=declared_value_rate,
is_test=is_test
)
'''
Если включена тестовая версия, результат а таблицу 'HTTP-запросы' не записывается.
'''
if is_test is True:
return
response_waiting_time = str((datetime.now() - request_time).total_seconds())
connection = create_connection()
connection.autocommit = True
with connection.cursor() as cursor:
cursor.execute(select_id)
id_api = cursor.fetchall()
if id_api == []:
id_bd = 1
else:
id_bd = id_api[-1][-1]+1
with connection.cursor() as cursor:
if id_bd > 1:
cursor.execute(select_request, (id_bd - 1, ))
select_last_data = cursor.fetchone()
print("Data select successfully")
with connection.cursor() as cursor:
if id_bd == 1:
cursor.execute(insert_request,
(1,
json.dumps(calc_func.xml_data, ensure_ascii=False, indent=2),
calc_func.ready_response_calc,
request_time,
response_waiting_time,
int(sending_user)))
print("Insert is successfully")
elif id_bd > 1:
cursor.execute(insert_request,
(id_bd,
json.dumps(calc_func.xml_data, ensure_ascii=False, indent=2),
calc_func.ready_response_calc,
request_time,
response_waiting_time,
int(sending_user)))
print("Insert is successfully")
cursor.execute(select_response, (id_bd,))
select_next_data = cursor.fetchone()
if select_next_data == select_last_data:
print('No changes')
def send_calc_request(sending_user, city_sender, city_recipient, urgency, type_of_cargo, weight,
delivery_type, lenght, width, height, declared_value_rate, is_test):
_urgency = str(urgency)
_delivery_type = str(delivery_type)
_type_of_cargo = str(type_of_cargo)
if urgency is None:
_urgency = ''
if delivery_type is None:
_delivery_type = ''
if type_of_cargo is None:
_type_of_cargo = ''
if declared_value_rate is None:
declared_value_rate = 0
try:
insert_to_db(sending_user=sending_user,
city_sender=city_sender,
city_recipient=city_recipient,
urgency=_urgency,
type_of_cargo=_type_of_cargo,
weight=weight,
lenght=lenght,
width=width,
height=height,
delivery_type=_delivery_type,
declared_value_rate=declared_value_rate,
is_test=is_test
)
except Exception as _ex:
print("[INFO]Error", _ex)
finally:
if conn:
conn.close()
print("[INFO]PostgreSQL connection close.")
| DamirF/IB | database_admin/psg_cse_api/psg_cse_api_tools/cse_to_db.py | cse_to_db.py | py | 4,246 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.datetime.now",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "datetim... |
19294805910 | import discord
from discord.ext import commands
from cogs.errors import WegbotException
@commands.command(hidden=False, name="pin", brief="Pin a message.", rest_is_raw=True)
@commands.guild_only()
async def pin_that(self, ctx, message_id: int, *, addition=None):
""" Pins the message with the given ID.
You may also provide additional text that will appear at the front of the pinned message. """
self.bot.logger.info(f'{ctx.author} request pin {message_id} in #{ctx.channel}')
await ctx.trigger_typing()
try:
message = await ctx.channel.get_message(message_id)
if message.pinned is True:
raise WegbotException("That message is already pinned")
embedded = discord.Embed(title=f"Message from {message.author}", description=addition)
embedded.add_field(name="Original Message", value=message.content, inline=False)
sent = await ctx.send(embed=embedded)
await sent.pin()
except WegbotException as ex:
self.bot.logger.warning(f'wegbot exception while pinning {message_id}: {ex.message}')
await ctx.send(f"{ex.message}, {ctx.author.mention}.")
except discord.errors.NotFound:
self.bot.logger.warning(f'attempted to pin message {message_id}, not found')
await ctx.send(f"Couldn't find a message with that ID, {ctx.author.mention}.")
except Exception as ex:
self.bot.logger.exception(f'unable to pin message {message_id}: {ex}')
await ctx.send(f"Couldn't pin that, {ctx.author.mention}. Have @ChaoticWeg check the logs.")
| ChaoticWeg/wegbot2-discord | cogs/messaging/pin.py | pin.py | py | 1,566 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "cogs.errors.WegbotException",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "discord.Embed",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "cogs.errors.WegbotException",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": ... |
324210528 | import yaml
from airflow.models import DAG
from airflow.operators.python_operator import PythonOperator
from airflow.operators.bash_operator import BashOperator
def run(script):
execFile('/root/airflow/runtime/{}'.format(script))
def create_python_task(task, dag):
if 'executor_config' in task:
t = PythonOperator(task, python_callable=run, op_args=[task['script']],dag=dag,
executor_config={'KubernetesExecutor': task['executor_config']})
else:
t = PythonOperator(task, python_callable=run, op_args=[task['script']],dag=dag)
def create_bash_task(task, dag):
if 'executor_config' in task:
t = BashOperator(task, bash_command=task['bash_command'],dag=dag,
executor_config={'KubernetesExecutor': task['executor_config']})
else:
t = BashOperator(task, bash_command=task['bash_command'],dag=dag)
def parse(stream):
content = yaml.load(stream)
args = {'owner': content['owner'], 'start_date': content['start_date']}
dag_id = content['dag_id']
dag = DAG(dag_id, schedule_interval=content['schedule_interval'], default_args = args)
tasks = content['tasks']
t = {}
operator_map = {"python": create_python_task, "bash": create_bash_task}
for task in tasks:
t[task] = operator_map[task['operator']](task, dag)
if 'upstream' in task:
t[task].set_upstream(t[task['upstream']])
globals()[dag_id] = dag
if __name__ == '__main__':
from sys import argv
input_file = argv[1]
with open(input_file, 'r') as fin:
parse(fin)
| Nanjo-Naoto/450 | parser.py | parser.py | py | 1,565 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "airflow.operators.python_operator.PythonOperator",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "airflow.operators.python_operator.PythonOperator",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "airflow.operators.bash_operator.BashOperator",
... |
34715529563 | from torchvision import datasets, transforms
from base import BaseDataLoader
from torch.utils.data import Dataset, ConcatDataset
from data_loader import EcalDataIO
import torch
import random
from pathlib import Path
import numpy as np
from collections import Counter
CSV_LEN = 25410
# ------------------------------------ DATALOADERS ------------------------------------- #
class CE_Loader(BaseDataLoader):
"""
Generates a DL from the existing files - concatenates the chunk_num of files.
"""
def __init__(self, data_dir, batch_size, shuffle=True, validation_split=0.0, num_workers=1, training=True,
chunk_low_num=0, chunk_high_num=1, partial_change=None, layer_change_lim=None):
trsfm = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
]) # Not in use for now
self.data_dir = Path(__file__).parent.parent / Path(data_dir)
self.partial_change = partial_change
dl = []
for i in range(chunk_low_num, chunk_high_num):
edep_file = self.data_dir / f"signal.al.elaser.edeplist{i}.mat"
en_file = self.data_dir / f"signal.al.elaser.energy{i}.mat"
# xy_file = self.data_dir / f"signal.al.elaser.trueXY{i}.mat"
xy_file = self.data_dir / f"signal.al.elaser.energy{i}.mat"
dataset = Continous_Energy_Data(edep_file, xy_file, status='train', energy=0, en_file=en_file,
partial_change=partial_change, layer_change_lim=layer_change_lim)
dl.append(dataset)
self.dataset = ConcatDataset(dl)
super().__init__(self.dataset, batch_size, shuffle, validation_split, num_workers)
class moment_loader(BaseDataLoader):
"""
Generates a DL from the existing files - concatenates the chunk_num of files.
"""
def __init__(self, data_dir, batch_size, shuffle=True, validation_split=0.0, num_workers=1, training=True,
chunk_low_num=0, chunk_high_num=1, partial_change=None, layer_change_lim=None):
trsfm = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
]) # Not in use for now
if training == True:
self.dataset = torch.load(Path(data_dir) / "train//train.pt")
else:
self.dataset = torch.load(Path(data_dir) / "test//test.pt")
# self.dataset = torch.load(Path(data_dir) / "train//train.pt")
print("Dataset len: ", len(self.dataset))
super().__init__(self.dataset, batch_size, shuffle, validation_split, num_workers)
class rand_loader(BaseDataLoader):
"""
Generates a DL from the existing files - concatenates the chunk_num of files.
"""
def __init__(self, data_dir, batch_size, shuffle=True, validation_split=0.0, num_workers=1, training=True,
chunk_low_num=0, chunk_high_num=1, partial_change=None, layer_change_lim=None):
trsfm = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
]) # Not in use for now
if training == True:
self.dataset = torch.load(Path(data_dir) / "train//train.pt")
else:
self.dataset = torch.load(Path(data_dir) / "test//test.pt")
# self.dataset = ConcatDataset([self.dataset, self.rand_ds])
self.dataset = Random_DS(len(self.dataset))
# self.dataset = torch.load(Path(data_dir) / "train//train.pt")
print("Dataset len: ", len(self.dataset))
super().__init__(self.dataset, batch_size, shuffle, validation_split, num_workers)
# __________________________________________DATASETS_______________________________________________________________
# Fitting for the new DS for continous energies
class Continous_Energy_Data(Dataset):
def __init__(self, en_dep_file, xy_file, transform=None, status='train', energy=0, en_file=None,
partial_change=None, layer_change_lim=None):
self.en_dep = EcalDataIO.ecalmatio(en_dep_file) # Dict with 100000 samples {(Z,X,Y):energy_stamp}
self.entry_dict = EcalDataIO.xymatio(xy_file)
self.initial_energy = energy
self.num_showers = 1
self.energies = EcalDataIO.energymatio(en_file)
self.partial_change = partial_change
self.layer_change_lim = layer_change_lim
# Eliminate multiple numbers of some kind
# del_list = []
# for key in self.energies:
# if 8 > len(self.energies[key]) > 4:
# del_list.append(key)
# for d in del_list:
# del self.energies[d]
# del self.en_dep[d]
# del self.entry_dict[d]
def __len__(self):
return len(self.en_dep)
# return 10
# Randomly change values of sample to 0 - amount of num*(1-partial_change)
def change_sample(self, sample: dict):
indices = np.random.choice(np.arange(len(sample.keys())), replace=False,
size=int(len(sample.keys()) * self.partial_change))
for idx in indices:
k = list(sample.keys())[idx]
z, x, y = k
if z < self.layer_change_lim:
continue
sample[k] = 0
return sample
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
d_tens = torch.zeros((110, 11, 21)) # Formatted as [x_idx, y_idx, z_idx]
key = list(self.en_dep.keys())[idx]
tmp = self.en_dep[key]
if self.partial_change != 1: # 1 means No changing the data. partial_change < 1 - change this percentage of the data by the wanted function
tmp = self.change_sample(tmp)
# for z, x, y in tmp.keys():
for z, x, y in tmp:
d_tens[x, y, z] = tmp[(z, x, y)]
entry = torch.Tensor(self.entry_dict[key])
# true_xy = PositionConverter.PadPosition(entry[0].item(), entry[1].item())
d_tens = d_tens.unsqueeze(0) # Only in conv3d
sample = (d_tens, entry, self.initial_energy)
if self.energies:
# sample = (d_tens, entry, self.energies[key][0])
sample = (d_tens, sum(entry), sum(self.energies[key]) / len(self.energies[key]))
# if sample[1].shape[0] == 4:
# print("hi")
# print(sample[0].shape, sample[1].shape, sample[2].shape)
return sample
class moment_energy_Data(Dataset):
def __init__(self, en_dep_file, en_file, transform=None, status='train', moment=1, min_shower_num=0,
max_shower_num=10000):
self.en_dep = EcalDataIO.ecalmatio(en_dep_file) # Dict with 100000 samples {(Z,X,Y):energy_stamp}
self.energies = EcalDataIO.energymatio(en_file)
self.moment = moment
# Eliminate multiple numbers of some kind
if min_shower_num > 0:
del_list = []
for key in self.energies:
if len(self.energies[key]) < min_shower_num or len(self.energies[key]) >= max_shower_num:
del_list.append(key)
for d in del_list:
del self.energies[d]
del self.en_dep[d]
def __len__(self):
return len(self.en_dep)
def calculate_moment(self, moment_num, en_list, normalize=True):
res = []
if not torch.is_tensor(en_list):
en_list = torch.Tensor(en_list)
first = torch.mean(en_list)
res.append(torch.mean(en_list))
if moment_num == 1:
return res
l = []
for val in en_list:
# l.append((val - first) ** 2)
l.append(val ** 2)
second = torch.mean(torch.Tensor(l))
res.append(second)
if moment_num == 2:
return res
for i in range(3, moment_num + 1):
l = []
for val in en_list:
if normalize:
# t = (val - first) ** i
t = (val) ** i
s = second ** i
r = t / s
l.append(r)
else:
# t = (val - first) ** i
t = val ** i
l.append(t)
tmp = torch.mean(torch.Tensor(l))
res.append(tmp)
return res
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
d_tens = torch.zeros((110, 11, 21)) # Formatted as [x_idx, y_idx, z_idx]
key = list(self.en_dep.keys())[idx]
tmp = self.en_dep[key]
# for z, x, y in tmp.keys():
for z, x, y in tmp:
d_tens[x, y, z] = tmp[(z, x, y)]
d_tens = d_tens.unsqueeze(0) # Only in conv3d
en_list = torch.Tensor(self.energies[key])
num_showers = len(en_list)
moment = self.calculate_moment(self.moment, en_list, True)
# moment = self.calculate_moment(2, en_list)
mean = moment[0]
var = moment[1]
third = moment[2]
fano = var / mean
# en_mean =torch.mean(en_list)
# en_sum = torch.sum(en_list)
# sample = (d_tens, mean, var, third, num_showers)
sample = en_list
return d_tens, torch.Tensor(moment), num_showers
class Bin_energy_data(Dataset):
def __init__(self, en_dep_file, en_file, transform=None, status='train', moment=1, min_shower_num=0,
max_shower_num=10000, file=0):
self.en_dep = EcalDataIO.ecalmatio(en_dep_file) # Dict with 100000 samples {(Z,X,Y):energy_stamp}
self.energies = EcalDataIO.energymatio(en_file)
self.moment = moment
self.file = file
# Eliminate multiple numbers of some kind
if min_shower_num > 0:
del_list = []
for key in self.energies:
if len(self.energies[key]) < min_shower_num or len(self.energies[key]) >= max_shower_num:
del_list.append(key)
for d in del_list:
del self.energies[d]
del self.en_dep[d]
def __len__(self):
return len(self.en_dep)
def calculate_moment(self, moment_num, en_list, normalize=True):
res = []
if not torch.is_tensor(en_list):
en_list = torch.Tensor(en_list)
first = torch.mean(en_list)
res.append(torch.mean(en_list))
if moment_num == 1:
return res
l = []
for val in en_list:
# l.append((val - first) ** 2)
l.append(val ** 2)
second = torch.mean(torch.Tensor(l))
res.append(second)
if moment_num == 2:
return res
for i in range(3, moment_num + 1):
l = []
for val in en_list:
if normalize:
# t = (val - first) ** i
t = (val) ** i
s = second ** i
r = t / s
l.append(r)
else:
# t = (val - first) ** i
t = val ** i
l.append(t)
tmp = torch.mean(torch.Tensor(l))
res.append(tmp)
return res
def random_sample_for_addition(self, data, n, num_samples):
# en_dep = EcalDataIO.ecalmatio("C:\\Users\\elihu\\PycharmProjects\\LUXE\\LUXE-project-master\\data\\raw"
# "\\signal.al.elaser.IP05.edeplist.mat")
# energies = EcalDataIO.energymatio("C:\\Users\\elihu\\PycharmProjects\\LUXE\\LUXE-project-master\\data\\raw"
# "\\signal.al.elaser.IP05.energy.mat")
samples = random.sample(list(self.en_dep.keys()), num_samples)
# while True:
# if len(self.energies[samples[0]]) != 1:
# samples = random.sample(list(self.en_dep.keys()), num_samples)
# else:
# break
sample = torch.zeros((110, 11, 21)) # Formatted as [x_idx, y_idx, z_idx]
N = 0
for key in samples:
N += len(self.energies[key])
tmp = self.en_dep[key]
# sum the samples:
for z, x, y in tmp:
sample[x, y, z] = sample[x, y, z] + tmp[(z, x, y)]
print(f"Orig - {n}, Add - {N}")
data = data + sample
n = n + N
print(f"sum - {n}")
return data, n
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
d_tens = torch.zeros((110, 11, 21)) # Formatted as [x_idx, y_idx, z_idx]
key = list(self.en_dep.keys())[idx]
tmp = self.en_dep[key]
# for z, x, y in tmp.keys():
for z, x, y in tmp:
d_tens[x, y, z] = tmp[(z, x, y)]
## ONLY 2 LAYER ON Y AXIS TRAINING
# d_tens = d_tens[:, 4:7, 0:10]
# d_tens = torch.transpose(d_tens, 0, 1)
#########################################
############ Layer Removal Experiment ############
# Zerofi Z layers
# for i in range(0, 7):
# d_tens[:, :, (20 - i)] = 0
# Zerofi Y layers
# for i in range(0, 6):
# d_tens[:, (10 - i), :] = 0
# d_tens[:, i, :] = 0
###################################
########## Alpha Experiment #########
# alpha = 1
#
# d_tens = np.cos(np.deg2rad(alpha)) * d_tens + np.sin(np.deg2rad(alpha)) * torch.rand(torch.Size([110, 11, 21]))
# d_tens = np.cos(np.deg2rad(alpha)) * d_tens + np.sin(np.deg2rad(alpha)) * torch.rand(torch.Size([110, 3, 10]))
# d_tens = (1-alpha) * d_tens + (alpha) * torch.rand(torch.Size([110, 11, 21]))
####################
######### Normalization #############
# if self.file == 3:
# d_tens = (d_tens - 0.0935) / 1.4025
#########################################
en_list = torch.Tensor(self.energies[key])
num_showers = len(en_list)
# Addition of samples for superposition test
d_tens, num_showers = self.random_sample_for_addition(d_tens, num_showers, 1)
d_tens = d_tens.unsqueeze(0) # Only in conv3d
# final_list = [0] * 10
final_list = [0] * 20
bin_list = np.linspace(0, 13, 20)
bin_list = np.linspace(0, 13, 10)
binplace = np.digitize(en_list, bin_list)
bin_partition = Counter(binplace)
for k in bin_partition.keys():
final_list[int(k) - 1] = bin_partition[k]
n = sum(final_list)
final_list = [f / n for f in final_list]
# final_list = [f/100000 for f in final_list]
return d_tens, final_list, num_showers, idx
class Random_DS(Dataset):
# Generate random samples
def __init__(self, len):
self.len = len
def __len__(self):
return self.len
def __getitem__(self, idx):
bins = torch.zeros(20)
bins[0] = 1
# return torch.rand(torch.Size([1, 110, 11, 21])), bins, 0
# return torch.ones(torch.Size([1, 110, 11, 21])), bins, 0
# rand_bins = torch.FloatTensor(20).uniform_(50, 500)
rand_bins = torch.ones(20)
return torch.rand(torch.Size([1, 110, 11, 21])), rand_bins, torch.sum(rand_bins), 0.
| elihusela/LUXE-project-master | data_loader/data_loaders_backup.py | data_loaders_backup.py | py | 15,351 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "base.BaseDataLoader",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Compose",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 24,
"usage_type": "name"
},
{
"api_nam... |
40631353290 | import json
from odata import ODataHandler
def test_service_endpoint(service, endpoint="", urlparams={}, sap_client = '100', print_result=False):
"""Simple test function for odata service"""
odatahandler = ODataHandler()
slash = "" if endpoint == "" else "/"
service_endpoint = "/%s%s%s" % (service, slash, endpoint)
urlparams0 = { 'sap-client': sap_client }
for p in urlparams:
urlparams0[p] = urlparams[p]
resp = odatahandler.http_get(service_endpoint, urlparams=urlparams0)
#print('status: %d' % resp.status_code)
jsonobj = json.loads(resp.content)
if print_result:
print(json.dumps(jsonobj,indent=2))
return jsonobj;
if __name__ == '__main__':
# some example calls:
# get endpoints for robcoewm service
test_service_endpoint("zewm_robco_srv");
# get all open warehouse tasks
test_service_endpoint("zewm_robco_srv", "OpenWarehouseTaskSet")
# get all storage bins
test_service_endpoint("zewm_robco_srv", "StorageBinSet", print_result = False)
# get endpoint for the md_product_op_srv service
test_service_endpoint("md_product_op_srv", print_result = False)
# get information about products
test_service_endpoint("md_product_op_srv", "C_Product",
urlparams = {
"$top" : 10,
}, print_result = False)
test_service_endpoint("billofmaterialv2_srv","I_Material",
urlparams = {
"$top": 10,
}, print_result = True)
| asumansuenbuel/ewm-access | src/test_ewm_connection.py | test_ewm_connection.py | py | 1,617 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "odata.ODataHandler",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 19,
"usage_type": "call"
}
] |
7814426661 | from django.core.cache import cache
from kavenegar import *
from redis.exceptions import ConnectionError as RedisServerConnectionError
KEY_KAVENEGAR = '4442494F6A77766776746B3444575466373961693741335956544F6B45683669556B6C7731493538534A413D'
SENDER = '1000596446'
def send_smd(code, phone):
api = KavenegarAPI(KEY_KAVENEGAR)
params = {'sender': SENDER, 'receptor': phone, 'message': "Use " + str(code) + " to login your account."}
api.sms_send(params)
def set_otp_cache(team_id, code):
try:
cache.set(key=str(team_id), value={'code': str(code)}, timeout=50)
except RedisServerConnectionError:
raise RedisServerConnectionError
return code
| atefesharifi/login-register-and-dashboard | common/utilities.py | utilities.py | py | 683 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.core.cache.cache.set",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "django.core.cache.cache",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "redis.exceptions.ConnectionError",
"line_number": 18,
"usage_type": "name"
},
{
... |
31454265122 | import argparse
import subprocess
import os
# Create argument parser
parser = argparse.ArgumentParser(description='Create and build base UFS case')
parser.add_argument("--project", default=None, help='Project to charge', required=True)
parser.add_argument("--tag", default=None, help='Model tag', required=True)
parser.add_argument("--dates", default=None, nargs="+", help='List of start dates', required=True)
parser.add_argument("--compset", default="UFS_S2S", help='Model compset')
parser.add_argument("--res", default="C384_t025", help='Model resolution')
parser.add_argument("--driver", default="nuopc", help='Model driver')
parser.add_argument("--options", default='--run-unsupported', help='Other options')
# Get case variables
args = parser.parse_args()
tag = args.tag
dates = args.dates
compset = args.compset
res = args.res
driver = args.driver
project = args.project
options = args.options
# Fixed values
home=os.environ.get("HOME")
wallclock = "00:30:00"
rlen="1"
# Create list of cases
caselist={init:"ufs.s2s."+res+"."+init+"."+tag for init in dates}
# Parse initial dates to create reference dates
reflist={init:init[0:4]+"-"+init[4:6]+"-"+init[6:8] for init in dates}
print(reflist)
for init in dates:
# Go to cime/scripts directory in UFSCOMP
os.chdir(home+"/UFSCOMP."+tag+"/cime/scripts")
# Clone the base build into new cases
options = '--keepexe'
rc = subprocess.run(["./create_clone",
"--case", caselist[init],
"--clone", "build_base",
options])
os.chdir(home+"/UFSCOMP."+tag+"/cime/scripts/"+caselist[init])
rc = subprocess.run(["./case.setup"])
# Define xmlchanges
xmlchanges = ["RUN_REFDATE="+reflist[init],
"RUN_STARTDATE="+reflist[init],
"JOB_WALLCLOCK_TIME="+wallclock,
"DOUT_S=FALSE",
"STOP_OPTION=nhours",
"STOP_N="+rlen]
rc = [subprocess.run([os.getcwd()+"/xmlchange", xmlchange]) for xmlchange in xmlchanges]
# Set path to case ICs
with open(os.getcwd()+"/env_mach_specific.xml", 'r') as f:
filedata = f.read()
filedata = filedata.replace("20120101",init)
with open(os.getcwd()+"/env_mach_specific.xml", 'w') as f:
f.write(filedata)
# Set cice IC
with open(os.getcwd()+"/user_nl_cice", "a") as f:
f.write("ice_ic = \"$ENV{UGCSINPUTPATH}/cice5_model.res_"+init+"00.nc\"\n")
# Set output options
with open(os.getcwd()+"/user_nl_fv3gfs", "a") as f:
f.write("nfhout = 6\n")
f.write("nfhmax_hf = 0\n")
f.write("nfhout_hf = 0\n")
f.write("fhzero = 6.0\n")
f.write("fdiag = 6.0\n")
f.write("fhout = 6.0\n")
# Submit run
#rc = subprocess.run([os.getcwd()+"/case.submit"])
| benjamin-cash/ufs_utils | ufs_cold_start.py | ufs_cold_start.py | py | 2,759 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "os.chdir",
... |
22280407323 | from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.http import JsonResponse
# Create your views here.
from server.settings import TENCENT_KEY
from app01.models import User
def index(request):
is_login = request.session.get('is_login', None)
print(f'is_login: {is_login}')
if is_login:
return redirect('/info/')
return render(request, 'index.html')
# 账户注册
def reg(request):
username = request.POST.get('username', None)
pwd = request.POST.get('pwd', None)
print(f'{username} - {pwd}')
if username and pwd:
# User.objects.create(username=username, pwd=pwd)
new_user = User(username=username, pwd=pwd)
new_user.save()
return redirect('/index/')
else:
return HttpResponse('注册错误')
# 账户登录
def login(request):
if request.method == 'POST':
username = request.POST.get('username', None) # POST要大写
pwd = request.POST.get('pwd', None)
print(f'{username} - {pwd}')
if username and pwd:
res = User.objects.filter(username=username, pwd=pwd).first()
# user_list = list(User.objects.all().values())
# user_list = User.get_all()
# print(user_list)
print(res)
if res:
request.session['is_login'] = True
# request.session.set_expiry(7 * 24 * 3600) # 设置session过期时间为一周后
return redirect('/info/')
else:
return HttpResponse('登录错误')
else:
return HttpResponse('登录错误')
else:
return HttpResponse('登录错误')
# 所有账户信息
def info(request):
is_login = request.session.get('is_login', None)
if is_login:
print(TENCENT_KEY)
user_list = list(User.objects.all().values())
print(user_list)
return render(request, 'info.html', {'user_list': user_list})
return redirect('/index/')
# 注销登录
def signout(request):
print('signout')
request.session.flush()
# del request.session['is_login']
#request.session.clear()
return JsonResponse({'msg':'success'})
# CBV TEST
from django.views import View
class test(View):
def get(self, request, *args, **kwargs):
# 在这里编写您的自定义逻辑
request.session['user'] = 'u1'
return HttpResponse(f"user:u1, session_id:{request.session.session_key}")
def post(self, request, *args, **kwargs):
# 在这里编写您的自定义逻辑
return HttpResponse("post 这是自定义视图的结果")
| LincolnBurrows/my-wechat-mini-program | server/app01/views.py | views.py | py | 2,661 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.shortcuts.redirect",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "app01.models.User",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "d... |
28418721166 | # Implementation of Selenium WebDriver with Python using PyTest
import pytest
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.common.by import By
import sys
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
from time import sleep
import random
from faker import Faker # for random data input
fake = Faker()
test_users = [
"SELENIUM_TEST",
"Lois_Lane",
"Clark_Kent",
"Jenny_Flex",
]
DELAY = 2
def signin(driver, username, password):
try:
driver.find_element(by=By.XPATH, value='//a[@href="'+ "/home/signin/" +'"]').click()
driver.find_element(by=By.XPATH, value='//input[@name="'+ "username" +'"]').send_keys(username)
driver.find_element(by=By.XPATH, value='//input[@name="'+ "password" +'"]').send_keys(password)
driver.find_element(by=By.XPATH, value='//button[@type="'+ "submit" +'"]').submit()
except Exception as e:
print('\n\n\
Feature: {}\n\
Given conditions: {}\n\
When: {}\n\
Then: {}'\
.format(
'https://rateer.pythonanywhere.com/home/signin/',
'Attempting to input',
'Clicking submit',
'Test Failed! Details:'+str(e)))
def find_friend(driver, username):
# Find friend
try:
driver.find_element(by=By.XPATH, value='//a[@href="'+ "/friends/search/" +'"]').click()
driver.find_element(by=By.XPATH, value='//input[@name="'+ "queryname" +'"]').send_keys(username)
driver.find_element(by=By.XPATH, value='//button[@type="'+ "submit" +'"]').submit()
try:
ui_res = driver.find_element(by=By.XPATH, value='//b[contains(text(), \'' + username + '\')]').text
except Exception as e:
print('\n\n\
Feature: {}\n\
Given conditions: {}\n\
When: {}'\
.format(
'https://rateer.pythonanywhere.com/friends/find/',
'Attempting to Find Friends',
'Clicking submit'))
print('\
Then: Test Failed -- User Not Found-- {}\n'\
.format(
str(e)))
except Exception as e:
print('\n\n\
Feature: {}\n\
Given conditions: {}\n\
When: {}\n\
Then: {}'\
.format(
'https://rateer.pythonanywhere.com/friends/find/',
'Attempting to input data',
'Clicking submit',
'Test Failed! Details:' + str(e)))
def view_profile(driver, username):
try:
msg = fake.sentence()
element = driver.find_element(by=By.XPATH, value='//a[@href="'+ "/friends/" + username +'/"]')
driver.execute_script("arguments[0].click();", element)
except Exception as e:
print('\n\n\
Feature: {}\n\
Given conditions: {}\n\
When: {}\n\
Then: {}'\
.format(
'https://rateer.pythonanywhere.com/friends/' + username + '/',
'Attempting to find friend',
'Clicking submit',
'Test Failed! Details:' + str(e)))
def rate(driver, username, rating):
try:
element = driver.find_element(by=By.XPATH, value='//a[@href="'+ "/friends/rate/" + username + '/' + str(rating) + '/"]')
driver.execute_script("arguments[0].click();", element)
print('\n\n\
Feature: {}\n\
Given conditions: {}\n\
When: {}\n\
Then: Test Passed\n'\
.format(
'/friends/rate/' + username + '/' + str(rating) + '/',
'Attempting to rate friend',
'Clicking submit',))
except Exception as e:
print('\n\n\
Feature: {}\n\
Given conditions: {}\n\
When: {}\n\
Then: {}'\
.format(
'/friends/rate/' + username + '/' + str(rating) + '/',
'Attempting to rate friend',
'Clicking submit',
'Test Failed! Details:' + str(e)))
if __name__ == '__main__':
try:
driver = webdriver.Chrome(ChromeDriverManager().install())
driver.get('https://rateer.pythonanywhere.com/')
driver.maximize_window()
except Exception as e:
print(str(e))
exit(1)
print("Starting test [Rate]")
signin(driver, test_users[0], test_users[0])
find_friend(driver, "Jenny_Flex")
view_profile(driver, "Jenny_Flex")
rate(driver, "Jenny_Flex", 4)
print("Finished test [Rate]")
sleep(DELAY) | syedsair/rateer-automated-tests | UI/rate.py | rate.py | py | 4,624 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "faker.Faker",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.common.by.By.XPATH",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 23,
"usage_type": "name"
},
{... |
36748147361 | import string
import requests
from fudge import __version__
from fudge.utils import FudgeException
def get_repository_name(repo_url):
repo_url = repo_url.rstrip('/')
repo_name = repo_url.split('/')[-1].rstrip('.git')
whitelist = set([char for char in string.ascii_letters + string.digits + '-_'])
if not all(char in whitelist for char in repo_name):
raise FudgeException('invalid repository name')
return repo_name
def discover_refs(repo_url, service):
url = '{}/info/refs'.format(repo_url)
headers = {
'User-Agent': 'fudge/{}'.format(__version__),
}
params = {
'service': service,
}
response = requests.get(url, headers=headers, params=params)
if response.status_code not in (200, 304):
raise FudgeException('repository {} does not exist'.format(repo_url))
content_type = response.headers.get('Content-Type')
if content_type != 'application/x-{}-advertisement'.format(service):
raise FudgeException('invalid Content-Type: {}'.format(content_type))
lines = iter(response.text.split('\n'))
service_line = parse_pkt_line(next(lines))
if service_line != '# service={}'.format(service):
raise FudgeException('invalid service line')
info = parse_pkt_line(next(lines))
head, capabilities = info.split('\0')
head_object_id = head.split()[0]
capabilities = capabilities.split()
refs = {}
while True:
ref_line = parse_pkt_line(next(lines))
if len(ref_line) == 0:
break
object_id, ref = ref_line.split()
refs[ref] = object_id
return head_object_id
def upload_pack(repo_url):
repo_url = repo_url.rstrip('/')
service = 'git-upload-pack'
head_object_id = discover_refs(repo_url, service)
command = 'want {}'.format(head_object_id)
request = pkt_line(command)
request += pkt_line()
request += pkt_line('done')
url = '{}/{}'.format(repo_url, service)
headers = {
'Content-Type': 'application/x-{}-request'.format(service),
'User-Agent': 'fudge/{}'.format(__version__)
}
response = requests.post(url, headers=headers, data=request)
if response.status_code not in (200, 304):
raise FudgeException('repository {} does not exist'.format(repo_url))
content_type = response.headers.get('Content-Type')
if content_type != 'application/x-{}-result'.format(service):
raise FudgeException('invalid response Content-Type: {}'.format(content_type))
lines = iter(response.content.split(b'\n', 1))
status = parse_pkt_line(next(lines))
if status != b'NAK':
raise FudgeException('could not retrieve the requested pack file')
pack = next(lines)
return pack, head_object_id
def pkt_line(command=None):
if not command:
return '0000'
length = '{:04x}'.format(len(command) + 5)
return '{}{}\n'.format(length, command)
def parse_pkt_line(line):
"""Parse a pkt-line."""
length, data = line[:4], line[4:]
length = int(length, 16)
# Skip flush-pkts
if length == 0 and len(data) > 0:
length, data = data[:4], data[4:]
length = int(length, 16)
return data
| QuantamKawts/fudge | fudge/protocol.py | protocol.py | py | 3,209 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "string.ascii_letters",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "string.digits",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "fudge.utils.FudgeException",
"line_number": 15,
"usage_type": "call"
},
{
"api_name"... |
2920768004 | from flask_smorest import Blueprint
from flask.views import MethodView
from src.contextmanager import DatabaseContextManager
from src.models import Chama
import uuid
from src.schema import (
ChamaSchema,
ChamaDisplaySchema,
ChamaCreateSchema
)
chama_router = Blueprint('chama endpoints', __name__)
@chama_router.route('/')
class ChamaRoutes(MethodView):
@chama_router.response(schema=ChamaDisplaySchema, status_code=200)
def get(self):
with DatabaseContextManager() as context:
chama = context.session.query(Chama).filter_by().all()
return {
"chama": [
chamas.to_json() for chamas in chama
]
}
@chama_router.arguments(schema=ChamaCreateSchema)
@chama_router.response(schema=ChamaCreateSchema, status_code=200)
def post(self, payload):
payload['chama_name'] = uudi.uuid4().hex
with DatabaseContextManager() as context:
statement = insert(
Chama
).values(
**payload
)
context.session.execute(statement)
context.session.commit()
return payload
def update(self, payload):
# Update chama member
with DatabaseContextManager() as context:
statement = update(
Chama
).values(
**payload
).where(
Chama.chama_id == payload['chama_id']
)
context.session.execute(statement)
context.session.commit()
return payload
def delete(self, payload):
with DatabaseContextManager() as context:
statement = delete(
Chama
).where(
Chama.chama_id == payload['chama_id']
)
context.session.execute(statement)
context.session.commit()
| kenreagan/ChamaYetuBackend | src/Chama/__init__.py | __init__.py | py | 1,904 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask_smorest.Blueprint",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "flask.views.MethodView",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "src.contextmanager.DatabaseContextManager",
"line_number": 18,
"usage_type": "call"
},
... |
586765290 | import os
import sys
import argparse
import numpy as np
import pdb
sys.path.append("../datasets")
from trainDataset import volume_loader
def parse_args():
parser = argparse.ArgumentParser(description="Deep Learning Model")
parser.add_argument("--root", required=True, type=str,
help="root of the dataset")
parser.add_argument("--test-start", type=int, default=50,
help="starting key timestep")
parser.add_argument("--test-end", type=int, default=66,
help="ending key timestep")
parser.add_argument("--infering-step", type=int, default=9,
help="in the infering phase, the number of intermediate volumes")
return parser.parse_args()
def main(args):
zSize, ySize, xSize = 120, 720, 480
gt_root = os.path.join(args.root, "exavisData", "combustion")
start_idx = ("%04d" % args.test_start)
gt_start_filepath = os.path.join(gt_root, "jet_" + start_idx, "jet_mixfrac_" + start_idx + ".dat")
gt_start = volume_loader(gt_start_filepath, zSize, ySize, xSize)
end_idx = ("%04d" % args.test_end)
gt_end_filepath = os.path.join(gt_root, "jet_" + end_idx, "jet_mixfrac_" + end_idx + ".dat")
gt_end = volume_loader(gt_end_filepath, zSize, ySize, xSize)
for i in range(args.test_start+1, args.test_end):
offset = i - args.test_start
interval = args.test_end - args.test_start
pred = (1 - offset / interval) * gt_start + offset / interval * gt_end
pred = pred.astype(np.float32)
volume_name = "jet_mixfrac_" + ("%04d" % i) + '.raw'
pred.tofile(os.path.join(args.root, "save_lerp", volume_name))
if __name__ == "__main__":
main(parse_args()) | trainsn/TSR-TVD | eval/lerp.py | lerp.py | py | 1,736 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "sys.path.append",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path.join",
... |
14950275682 | from flask import Flask, render_template, render_template, request, redirect, session
from . import auth, queries, uploads
from .utils import Settings
app = Flask(__name__)
app.secret_key = Settings().secret_key
API_BASE_URL = "http://app:8080" # Replace with the actual base URL of your API
# Register blueprints
app.register_blueprint(auth.auth_bp)
app.register_blueprint(queries.query_bp)
app.register_blueprint(uploads.upload_bp)
@app.route("/")
def index():
return render_template("index.html")
if __name__ == "__main__":
app.run()
| talhaanwarch/openai-chatbot | frontend/app.py | app.py | py | 551 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "utils.Settings",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 17,
"usage_type": "call"
}
] |
74523493544 | import matplotlib.pyplot as plt
import glob
import os
import argparse
# File select index out of range exception
class ValueOutOfRange(Exception):
def __str__(self):
return 'Value out of range'
data_path = None
parser = argparse.ArgumentParser()
parser.add_argument('--data', '-d', nargs='?', action='store', dest='data_path', metavar='data file')
parser.add_argument('--version', '-v', action='version', version='v1.0')
args = parser.parse_args()
try:
if args.data_path is None:
data_path = os.path.dirname(os.path.abspath(__file__)) + '/../data/*.TXT'
else:
sel = 0
data_path = args.data_path
# Find for data in data/ file
data_files = glob.glob(data_path)
# List found files
if args.data_path is None:
for i, fn in enumerate(data_files):
print('[', i + 1, '] ', os.path.basename(fn))
# Select file
sel = None
while sel not in range(len(data_files)) or type(sel) != int:
try:
sel = int(input('Select file > ')) - 1
# Exit if entered value is 0
if sel == -1:
exit()
if sel not in range(len(data_files)):
raise ValueOutOfRange()
except ValueOutOfRange as e:
print(e)
except ValueError as e:
print(e)
# Open data file
try:
f = open(data_files[sel], 'r')
except Exception as e:
print(e)
# HYPERPARAMETERS
DATA_LINE = 3
ZERO_LINE = 0
# Parse data
try:
for i, line in enumerate(f):
if i == DATA_LINE:
data = line.split(':')
if i == ZERO_LINE:
zero_line = line.split(':')
f.close() # Close data file
except Exception as e:
print(e)
f.close() # Close data file
# Parse tare value
for item in zero_line:
try:
tare = int(item)
except:
pass
print("Tare, zero value: ", tare)
# Convert to integers and append to int_data list
int_data = data
for i, item in enumerate(data):
try:
int_data[i] = int(data[i])
except:
del int_data[i]
del [int_data[len(int_data) - 1]]
# Apply tare
for i, d in enumerate(int_data):
int_data[i] = abs(d) - abs(tare) # With tare
int_data[i] /= 20000 # *20000 to convert to Kh¡g and *9.8 for Newtopns
plt.plot(range(len(int_data)), int_data, 'y')
plt.grid()
plt.xlabel("Lecture")
plt.ylabel("Kgf")
plt.show()
except KeyboardInterrupt as e:
print("\nBye!")
| EHAerospace/EHA-TestStand | graph/graph.py | graph.py | py | 2,669 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath"... |
70806707943 | import datetime
from django.http import HttpResponse
from django.shortcuts import get_object_or_404, redirect, render
from .models import *
from .functions import method
from django.http import JsonResponse
# Create your views here.
def index(request):
current_time = datetime.datetime.now()
formatted_time = current_time.strftime("%Y년 %m월 %d일 %H시 %M분 %S초")
context = {'current_servertime' : formatted_time}
return render(request, 'mainpage.html', context)
def action(request):
context = {}
if request.method == 'POST':
if request.POST.get('saveURL') == '':
return redirect('sugang:main')
else:
method.save_URL(request)
temp = method.show_server_time(method.get_accessurl_by_highest_id())
context['current_servertime'], context['user_url'] = temp[0], temp[1]
up_speed, down_speed, ping_speed = method.checkSpeed()
#업링크 다운링크 핑스피드 변수로저장했고 context로뽑아쓰기가능
#업링크 상위퍼센트 다운링크 상위퍼센트 핑스피드상위퍼센트 저장했고 cotext로쓰면댐
#총상위 몇퍼인지 나오는건 다운링크속도가 가장 결정에중요하다고 나와서 다운링크 퍼센트로했음
result = resultInfo.objects.create(
upSpeed = up_speed,
downSpeed= down_speed,
pingSpeed= ping_speed
)
down_percentile = method.get_speed_percentile(down_speed)
result.save()
print(down_percentile)
context['result'] = result
context['speed_ranking'] = down_percentile
return render(request, 'mainpage.html', context)
else:
return redirect('sugang:main')
def reload_serverclock(request):
target_url = method.get_accessurl_by_highest_id()
server_time = method.calculate_time(target_url.testURL)
return JsonResponse({'current_servertime':server_time})
def reload_defaultclock(request):
current_time = datetime.datetime.now()
formatted_time = current_time.strftime("%Y년 %m월 %d일 %H시 %M분 %S초")
return JsonResponse({'current_servertime':formatted_time}) | umleeho1/sugangapply | sugang/views.py | views.py | py | 2,242 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.datetime.now",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "django.shortcuts.render",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "... |
4867507385 | #!/usr/bin/python3
import numpy as np
import pickle
import matplotlib.pyplot as plt
from matplotlib.patches import FancyArrowPatch
from mpl_toolkits.mplot3d import proj3d
from mpl_toolkits.mplot3d import axes3d
# defining the class for later 3D arrow plots
class Arrow3D(FancyArrowPatch):
def __init__(self, xs, ys, zs, *args, **kwargs):
FancyArrowPatch.__init__(self, (0,0), (0,0), *args, **kwargs)
self._verts3d = xs, ys, zs
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.set_positions((xs[0],ys[0]),(xs[1],ys[1]))
FancyArrowPatch.draw(self, renderer)
def plot_energies():
infile = open('Data/energy_plot_values.pkl', 'r+b')
energy_plot_values = pickle.load(infile)
infile.close()
dt = energy_plot_values[0]
vmax = energy_plot_values[1]
grid_size = energy_plot_values[2]
thermostat = energy_plot_values[3]
steps = energy_plot_values[4]
infile = open('Data/E_kin.pkl', 'r+b')
E_kin = pickle.load(infile)
infile.close()
infile = open('Data/E_pot.pkl', 'r+b')
E_pot = pickle.load(infile)
infile.close()
infile = open('Data/E_tot.pkl', 'r+b')
E_tot = pickle.load(infile)
infile.close()
t = np.arange(steps)
fig2 = plt.figure()
axe2 = fig2.add_subplot(111)
axe2.set_ylabel('E')
axe2.set_xlabel('n$_{steps}$')
axe2.set_title('Energies for v$_{max}$ = %5.1f, n$_{steps}$= %5.0f, $\Delta$t=%5.3f' %(vmax, steps, dt))
axe2.plot(t, E_kin, label='$E_{kin}$')
axe2.plot(t, E_pot, label='$E_{pot}$')
axe2.plot(t, E_tot, label='$E_{tot}')
axe2.legend(loc=0)
fig2.savefig('Graphs/Energies_grid'+str(grid_size)+'_vmax'+str(vmax)+'_thermo'+str(thermostat)+'.png')
def plot_RDF():
infile = open('Data/RDF_plot_values.pkl', 'r+b')
RDF_plot_values = pickle.load(infile)
infile.close()
dt = RDF_plot_values[0]
vmax = RDF_plot_values[1]
grid_size = RDF_plot_values[2]
thermostat = RDF_plot_values[3]
steps = RDF_plot_values[4]
infile = open('Data/RDF.pkl', 'r+b')
RDF = np.array(pickle.load(infile), dtype=np.float_)
infile.close()
RDF /= np.sum(RDF)
infile = open('Data/bins.pkl', 'r+b')
bins = pickle.load(infile)
infile.close()
width = 0.7*(bins[1]-bins[0])
left = bins[:-1]
fig3 = plt.figure()
axe3 = fig3.add_subplot(111)
axe3.bar(left, RDF, width=width)
axe3.set_title('RDF for v$_{max}$ = %5.1f, n$_{steps}$= %5.0f, $\Delta$t=%5.3f, thermostat: %r' %(vmax, steps, dt, thermostat))
axe3.set_xlim(xmax = float(grid_size)/2)
axe3.set_xlabel('Distance r')
axe3.set_ylabel('Propability P')
fig3.savefig('Graphs/RDF_grid'+str(grid_size)+'_vmax'+str(vmax)+'_thermo'+str(thermostat)+'.png')
# plot_positions plots all positions in a 3D plot and saves it as a png
def plot_positions():
infile = open('Data/RDF_plot_values.pkl', 'r+b')
RDF_plot_values = pickle.load(infile)
infile.close()
dt = RDF_plot_values[0]
vmax = RDF_plot_values[1]
grid_size = RDF_plot_values[2]
thermostat = RDF_plot_values[3]
steps = RDF_plot_values[4]
for n in range(int(steps)):
infile = open('Data/x'+str(n)+'.pkl', 'r+b')
x = np.array(pickle.load(infile))
infile.close()
plt.ioff()
fig = plt.figure()
axe = fig.add_subplot(111, projection='3d')
xs, ys, zs = np.zeros(len(x)), np.zeros(len(x)), np.zeros(len(x))
for i,el in enumerate(x):
xs[i] = el[0]
ys[i] = el[1]
zs[i] = el[2]
axe.scatter(xs,ys,zs, s=500)
'''
for i, acc in enumerate(a):
acc /= 10
acc += x[i]
axe.add_artist(Arrow3D(*zip(x[i],acc), mutation_scale=20, lw=1, arrowstyle="-|>", color="k"))
'''
axe.set_xlim(0, grid_size)
axe.set_ylim(0, grid_size)
axe.set_zlim(0, grid_size)
axe.set_title('Step %5.0f' %n)
fig.savefig('Movie/step'+str(n)+'.png')
plt.close('all')
| Schlabonski/LennardJonesGas | plotting.py | plotting.py | py | 4,026 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.patches.FancyArrowPatch",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "matplotlib.patches.FancyArrowPatch.__init__",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "matplotlib.patches.FancyArrowPatch",
"line_number": 14,
"u... |
33677649557 | from fastapi.routing import APIRouter
from typing import Annotated
from fastapi import Depends
from app.main.factories import (
make_db_list_videos,
make_db_list_my_videos,
make_db_list_friends_videos
)
from app.schemas.video import VideosListOut
from app.infra.auth import JwtBearer
from app.main.config import PREFIX
router = APIRouter(prefix=f"{PREFIX}/video", tags=['Video'])
@router.get(
"/list",
status_code=200,
summary="List Videos",
response_description="All Videos",
response_model=VideosListOut,
dependencies=[Depends(JwtBearer())]
)
async def list_all_videos():
db_list_videos = make_db_list_videos()
videos = await db_list_videos.list_all()
return VideosListOut(message="Videos found", data=videos)
@router.get(
"/list/me",
status_code=200,
summary="List User Videos",
response_description="User Videos",
response_model=VideosListOut
)
async def list_my_videos(uuid: Annotated[str, Depends(JwtBearer())]):
db_list_my_videos = make_db_list_my_videos()
videos = await db_list_my_videos.list_my_videos(uuid)
return VideosListOut(message="My videos found", data=videos)
@router.get(
"/list/friends",
status_code=200,
summary="List Friends Videos",
response_description="Friends Videos",
response_model=VideosListOut
)
async def list_friends_videos(uuid: Annotated[str, Depends(JwtBearer())]):
db_list_friends_videos = make_db_list_friends_videos()
videos = await db_list_friends_videos.list_friends_videos(uuid)
return VideosListOut(message="Friends videos found", data=videos)
| Mauricio-Silva/backend-user | app/main/routes/video.py | video.py | py | 1,610 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "fastapi.routing.APIRouter",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "app.main.config.PREFIX",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "app.main.factories.make_db_list_videos",
"line_number": 26,
"usage_type": "call"
},
... |
19365768268 | import telebot
import re
import pymongo
from datetime import datetime
from telebot import types
from bson.objectid import ObjectId
from config import *
bot = telebot.TeleBot(TOKEN)
db = pymongo.MongoClient('mongodb://localhost:27017/').kunyn_team
working_obj = {}
for player in db.players.find():
working_obj[player['telegram_id']] = {
'name': player['name']
}
choose_game_msg = 'Тут останні твої ігри'
choose_player_msg = 'Вибери гравця'
choose_rest_player_msg = 'Є! Тобі залишилось оцініти ще їх:'
choose_score_msg = 'Тепер вибери оцінку або введи значення'
show_game_scores = 'Вибери гру, щоб побачити рейтинг гравців за неї'
CREATING_GAME = {
'name': False,
'date': False,
}
@bot.message_handler(commands=['start'])
def start_handler(message):
bot.send_message(message.chat.id, 'Бот для статистики гравців футзальної команди *Кунин*. Тисни /join')
@bot.message_handler(commands=['help'])
def init_handler(message):
bot.send_message(message.chat.id, 'Список доступних опцій:\n'
'/join - приєднатися до команди\n'
'/games - список 3-х останніх ігор\n'
'/all_games - список всіх ігор\n'
'/rating - рейтинг гравців за всі ігри\n'
'/games_rating - список ігор, щоб подивитись рейтинг')
@bot.message_handler(commands=['join'])
def join_handler(message):
from_user = message.from_user
send_msg = 'Ви прийняті і можете оцінювати інших гравців'
if_exist = db.players.find_one({'telegram_id': from_user.id}) is not None
if if_exist:
send_msg = 'Ви вже були додані до команди раніше'
else:
db.players.insert_one({
'telegram_id': from_user.id,
'name': from_user.first_name
})
working_obj[from_user.id] = {
'name': from_user.first_name
}
keyboard = types.ReplyKeyboardMarkup()
keyboard.row(types.KeyboardButton('/rating'), types.KeyboardButton('/help'))
keyboard.row(types.KeyboardButton('/games'), types.KeyboardButton('/games_rating'))
bot.send_message(message.chat.id, send_msg, reply_markup=keyboard)
@bot.message_handler(commands=['games', 'all_games', 'games_rating'])
def handler(message):
msg = show_game_scores if message.text == '/games_rating' else choose_game_msg
limit = 3 if message.text == '/games' else 0
games = db.games.find({'date': {'$lt': datetime.now()}}).sort('date', pymongo.DESCENDING).limit(limit)
keyboard = types.InlineKeyboardMarkup()
for game in games:
keyboard.row(types.InlineKeyboardButton(game['name'], callback_data=str(game['_id'])))
bot.send_message(message.chat.id, msg, reply_markup=keyboard)
@bot.message_handler(commands=['add_game'])
def handler(message):
CREATING_GAME['name'] = True
bot.send_message(message.chat.id, 'Введи назву гри')
@bot.message_handler(func=lambda _: CREATING_GAME['name'])
def handler(message):
game_name = message.text
msg = 'Коли вона була?'
is_valid = re.search(r"^(\w+('\w+)?)(\s\w+|-\d|\s\(\w+\))?(\s-\s)(\w+('\w+)?)(\s\w+|-\d|\s\(\w+\))?$", game_name) is not None
if is_valid:
CREATING_GAME['name'] = False
is_exist = db.games.find_one({'name': game_name}) is not None
if is_exist:
msg = f'Гра <b>{game_name}</b> вже існує.\nПодивитись список /all_games\nДодати іншу /add_game'
else:
inserted_game = db.games.insert_one({
'name': game_name,
'scores': []
})
CREATING_GAME['date'] = True
CREATING_GAME['id'] = inserted_game.inserted_id
else:
msg = 'Введи валідну назву гри.\nПриклад: <i>Команда-1 - Команда-2</i>'
bot.send_message(message.chat.id, msg, parse_mode='html')
@bot.message_handler(func=lambda _: CREATING_GAME['date'])
def handler(message):
target_game = {'_id': ObjectId(CREATING_GAME['id'])}
game = db.games.find_one(target_game)
msg = f'Гру *{game["name"]}* створено. Обирай її у списку /games'
try:
date = datetime.strptime(message.text, '%Y-%m-%d %H:%M')
db.games.update_one(target_game, {'$set': {
'date': date
}})
CREATING_GAME['date'] = False
CREATING_GAME['id'] = None
except ValueError:
msg = 'Невірний формат! Приклад валідної дати: _2020-01-12 02:20_'
bot.send_message(message.chat.id, msg, parse_mode='markdown')
def get_rating_msg(game_id=None):
target_game = {'_id': game_id} if game_id is not None else {'date': {'$lt': datetime.now()}}
games = db.games.find(target_game)
rating = {tg_id: [] for tg_id in working_obj}
for game in games:
for s in game['scores']:
rating[s['to']].append(s['score'])
for tg_id, scores in rating.items():
try:
rating[tg_id] = sum(scores) / len(scores)
except ZeroDivisionError:
rating[tg_id] = 0
sorted_players = {k: v for k, v in sorted(rating.items(), key=lambda item: item[1], reverse=True)}
result_msg = ''
for tg_id, score, n in zip(sorted_players, sorted_players.values(), range(len(sorted_players))):
result_msg += f'{n + 1}. {working_obj[tg_id]["name"]} {round(score, 2)}\n'
return result_msg
def get_players_to_score(game, user_id):
scores = game['scores']
scores_by_current_user = filter(lambda score: score['by'] == user_id, scores)
users_with_score = list(map(lambda s: s['to'], scores_by_current_user))
users_with_score.append(user_id)
all_users_ids = working_obj.keys()
ids_to_score = set(all_users_ids) - set(users_with_score)
return ids_to_score
def get_keyboard_with_players(players):
keyboard = types.InlineKeyboardMarkup()
for id in players:
keyboard.row(types.InlineKeyboardButton(working_obj[id]['name'], callback_data=id))
return keyboard
@bot.message_handler(commands=['rating'])
def handler(message):
bot.send_message(message.chat.id, get_rating_msg())
@bot.callback_query_handler(func=lambda call: call.message.text == show_game_scores)
def handler(query):
bot.send_message(query.message.chat.id, get_rating_msg(ObjectId(query.data)))
@bot.callback_query_handler(func=lambda call: call.message.text == choose_game_msg)
def handler(query):
# When choose game
game = db.games.find_one({'_id': ObjectId(query.data)})
if not game:
bot.send_message(query.message.chat.id, 'Спочатку виберу гру. Тисни /games')
return
working_obj[query.from_user.id]['game_id'] = ObjectId(query.data)
players_to_score = get_players_to_score(game, query.from_user.id)
if len(players_to_score) > 0:
keyboard = get_keyboard_with_players(players_to_score)
bot.send_message(query.message.chat.id, choose_player_msg, reply_markup=keyboard)
else:
bot.send_message(query.message.chat.id, f'За гру *{game["name"]}* ти вже поставив оцінки всім гравцям',
parse_mode='markdown')
@bot.callback_query_handler(func=lambda call: call.message.text in [choose_player_msg, choose_rest_player_msg])
def handler(query):
# When choose player for the game
if not working_obj[query.from_user.id].get('game_id'):
bot.send_message(query.message.chat.id, 'Ти не обрав гру! Тисни /games')
return
working_obj[query.from_user.id]['to_id'] = int(query.data)
keyboard = types.InlineKeyboardMarkup()
for r in range(2):
keyboard.row(
types.InlineKeyboardButton(r * 5 + 1, callback_data=r * 5 + 1),
types.InlineKeyboardButton(r * 5 + 2, callback_data=r * 5 + 2),
types.InlineKeyboardButton(r * 5 + 3, callback_data=r * 5 + 3),
types.InlineKeyboardButton(r * 5 + 4, callback_data=r * 5 + 4),
types.InlineKeyboardButton(r * 5 + 5, callback_data=r * 5 + 5),
)
bot.send_message(query.message.chat.id, choose_score_msg, reply_markup=keyboard)
def set_score(score, from_id, chat_id):
if not working_obj[from_id].get('game_id'):
bot.send_message(chat_id, 'Ти не обрав гру! Тисни /games')
return
to_id = working_obj[from_id].get('to_id')
if not to_id:
bot.send_message(chat_id, 'Ти не обрав гравця, для якого хочеш поставити оцінку!')
return
if score < 1 or score > 10:
bot.send_message(chat_id, 'Оцінка може бути в діапазоні *[1, 10]*', parse_mode='markdown')
return
target_game = {'_id': working_obj[from_id]['game_id']}
game = db.games.find_one(target_game)
if any(map(lambda s: s['by'] == from_id and s['to'] == to_id, game['scores'])):
bot.send_message(chat_id, f'Ти вже ставив оцінку грацю *{working_obj[to_id]["name"]}* за гру *{game["name"]}*',
parse_mode='markdown')
return
new_score = {
'by': from_id,
'score': score,
'to': to_id
}
db.games.update_one(target_game, {'$push': {'scores': new_score}})
game['scores'].append(new_score)
rest_players = get_players_to_score(game, from_id)
if len(rest_players) > 0:
keyboard = get_keyboard_with_players(rest_players)
bot.send_message(chat_id, choose_rest_player_msg, reply_markup=keyboard)
else:
bot.send_message(chat_id, f'Круто! За гру *{game["name"]}* ти вже поставив оцінки всім гравцям',
parse_mode='markdown')
@bot.callback_query_handler(func=lambda call: call.message.text == choose_score_msg)
def score_handler(query):
# When choose score for player
set_score(int(query.data), query.from_user.id, query.message.chat.id)
@bot.message_handler(func=lambda _: True)
def handler(message):
try:
score = float(message.text)
set_score(score, message.from_user.id, message.chat.id)
except ValueError:
msg = "Невірний формат оцінки! Можливо спробуй через крапку.\nПриклади: '7', '8.5', 6.75"
if not working_obj[message.from_user.id].get('game_id'):
msg = 'Ти не обрав гру! Тисни /games'
elif not working_obj[message.from_user.id].get('to_id'):
msg = 'Ти не обрав гравця, для якого хочеш поставити оцінку!'
bot.send_message(message.chat.id, msg)
if __name__ == '__main__':
bot.polling(none_stop=True)
| andrii-porokhnavets/telegram_bots | scoring/main.py | main.py | py | 11,279 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "telebot.TeleBot",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pymongo.MongoClient",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "telebot.types.ReplyKeyboardMarkup",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "... |
22995979758 |
from time import sleep
import pytest #找到测试用例,执行
import requests #发送请求的包
from common.send_request import SendRequest #导入公共的请求类
from common.yaml_util import write_yaml, read_yaml #yaml文件的操作
# scope="function"-为函数级别 scope="class"-类级别 scope="session"-表示回话 autouse=False-不自动执行 params-数据驱动 ids-数据驱动的别名 name-别名
# @pytest.fixture(scope="class",autouse=False,params=["xM",'小黑子'],ids=['xM','IKUN'],name="db")
# def execute_db_connection(request):
# print("连接数据库连接")
# # yield 上面是用例之前的处理,下面是用例之后的处理 爷的
# # request 返回值
# yield request.param
# print("关闭数据库连接")
class TestApi:
# accessToken="" #需要优化,其它的.py文件需要使用的话需要导入这个定义的类,然后.py文件执行也会执行下面的方法
# 优化:将token保存在yaml文件中
"""
def setup(self):
print("用例之前")
def teardown(self):
print("用例之后")
def setup_class(self):
print("类之前")
def teardown_class(self):
print("类之后")
这几个测试用例对所有的都生效,但是我想某个用例不执行,或者某个用例执行?
比如:登录用例,不需要再执行类之前,其它不需要token的不需要用例之前或之后.....
"""
# 优化 解决:用fixture 非斯扯 装饰器
# 1.登录API
# @pytest.mark.smoke
# @pytest.mark.run(order=1)
def test_login(self):
url = "http://ceshi13.dishait.cn/admin/login"
userinfo = {
"username":"admin",
"password":"admin"
}
# # res = requests.post(url=url,data=userinfo)
res=SendRequest().all_send_request(method="post", url=url, data=userinfo)
# # TestApi.accessToken=res.json()['data']['token']
# write_yaml({"accessToken":[res.json()['data']['token']]}) #存储在yaml文件中
write_yaml({"accessToken":res.json()['data']['token']}) #存储在yaml文件中
# 2.获取管理员列表API
# 标记
# @pytest.mark.user_manager
# 跳过
# @pytest.mark.skip(reason="该版本不执行")
# 执行顺序
# @pytest.mark.run(order=2)
def test_manager(self):
# def test_manager(self,db): #执行多次,因为做了数据驱动
page = 1
url = f"http://ceshi13.dishait.cn/admin/manager/{page}"
# headers = {
# # "token": TestApi.accessToken
# "token": read_yaml("accessToken") #从yaml文件中读取token
# }
params = {
"limit":10,
"keyword":"admin"
}
# res=requests.get(url=url,headers=headers)
# res = SendRequest().all_send_request(method="get", url=url, headers=headers, params=params)
# print(res.json())
# raise Exception("小黑子") #自动抛出异常
# print(db)
# 问题点:
# 1.token的关联:可以设置一个全局的token然后通过类名去调用这个token,但是引发了一个新的问题:2
# 2.参数token的问题,在其他的.py文件引入时,会同时执行这类里面所有的测试用例,解决:将token的数据存放在yaml文件中,然后再读取
# 3.但是引发了一个新的问题:接口再次执行时会再次生成多个相同的属性名,导致yaml文件出现红色下划线,如何解决?
# 解决思路:在整个项目之前去清空yaml文件,需要调用clear_yaml()方法
# 4.类和用例之前和之后执行的setup、teardown、setup_class、teardown_class,有一个问题是他们是对所有的用例都触发的,如何解决?
# 5.方法如下:
# scope="function"-为函数级别 scope="class"-类级别 autouse=False-不自动执行 params-数据驱动 ids-数据驱动的别名 name-别名
# @pytest.fixture(scope="class",autouse=False,params=["xM",'小黑子'],ids=['xM','IKUN'],name="db")
# def execute_db_connection(request):
# print("连接数据库连接")
# # yield 上面是用例之前的处理,下面是用例之后的处理 爷的
# # request 返回值
# yield request.param
# print("关闭数据库连接")
# 如果设置,需要通过别名去调用,否则要通过定义的 execute_db_connection去调用(其中autouse=False),autouse=True则都不需要操作
| xmtxy/pytest | test_case/bzx_test_api.py | bzx_test_api.py | py | 4,370 | python | zh | code | 0 | github-code | 36 | [
{
"api_name": "common.send_request.SendRequest",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "common.yaml_util.write_yaml",
"line_number": 48,
"usage_type": "call"
}
] |
36539454569 | # -*- coding:utf-8 -*-
#Name:
#Descripton:
#Author: smartwy
#Date:
#Version:
import os
# print('Process (%s) start ...'%os.getpid())
#
# pid = os.fork() # windows 没有fork调用,linux unix Mac支持fork调用
#
# if pid == 0:
# print('I am child process (%s) and my parent is %s .'%(os.getpid(),os.getppid()))
# else:
# print("I (%s) just created a child process (%s)."%(os.getpid(),pid))
from multiprocessing import Process
from multiprocessing import Pool
import os
import time,random
# def run_proc(name):
# print('Run child process name:%s (id:%s) ...'%(name,os.getpid()))
# # time.sleep(20)
# for i in range(3):
# # print(i)
# time.sleep(1)
#
# if __name__ == '__main__':
# print('Parent process %s'%os.getpid())
# argsl = ['a','n', 'o', 'e']
# for i in argsl:
# p = Process(target=run_proc,args=(i,))
# print('Child process will start')
# p.start() # 启动子进程,
# p.join() # join方法可以等子进程结束后继续往下执行,通常用于进程间的同步
# # 如果不使用join()启动完全部子进程后不会等待进程结束,直接往下执行
# print('Child process end')
def long_time_task(name):
print('Run process %s task %s ...' % (name, os.getpid()))
start = time.time()
time.sleep(random.random() * 3)
end = time.time()
print('Task %s runs %0.2f seconds.' % (name, (end - start)))
if __name__=='__main__':
print('Parent process {}.'.format(os.getpid()))
p = Pool(4)
for i in range(6): # 进程池最多4个进程,这里放了6个,则先执行4个,结束一个启动一个,直到6个全部执行完
p.apply_async(long_time_task, args=(i,))
print('Waiting for all subprocesses done...')
p.close() # 调用close之后不能添加新的进程了
p.join() # 等待所有子进程执行完毕再往下执行,否则直接往下执行
print('All subprocesses done.')
# p.apply_async(long_time_task,args=('wy')) 放在了close()之后,报错
# from multiprocessing import Value
#
# print(money)
| smartwy/python_test | 练习文件/多进程.py | 多进程.py | py | 2,102 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.getpid",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "random.random",
"line_number": 45,... |
2583396106 | import yaml
def get_latest_jdk(distroJdks):
latest = None
latestInt = 0
for jdk in distroJdks:
if jdk == "latest":
latest = jdk
latestInt = 999
elif int(jdk) > latestInt:
latest = jdk
latestInt = int(jdk)
return latest
def get_lts_jdk(jdks, distroJdks):
latest = None
latestInt = 0
for jdk in distroJdks:
if jdk != "latest" and int(jdk) > latestInt and jdks[jdk]["lts"]:
latest = jdk
latestInt = int(jdk)
return latest
def generate_jdk_images(path):
with open(path, "r") as stream:
defs = yaml.safe_load(stream)
jdks = defs["jdks"]
distros = defs["distros"]
images = []
for distro in distros:
name = distro["name"]
base = distro["base"]
default = False
if "default" in distro:
default = distro["default"]
aliases = [name]
if "aliases" in distro:
aliases.extend(distro["aliases"])
distroJdks = distro["jdks"]
latest = get_latest_jdk(distroJdks)
lts = get_lts_jdk(jdks, distroJdks)
for jdk in distroJdks:
isLatest = jdk == latest
isLts = jdk == lts
tags = []
for alias in aliases:
tags.append(f"{jdk}-{alias}")
if isLatest and jdk != "latest":
tags.append(f"latest-{alias}")
if isLts:
tags.append(f"lts-{alias}")
tags.append(alias)
if default:
tags.append(jdk)
if isLatest:
tags.append("latest")
if isLts:
tags.append("lts")
fullTags = [f"ghcr.io/basicimg/jdk:{tag}" for tag in tags]
images.append({
"path": f"jdk/{jdk}/{name}",
"generate": True,
"base": base,
"install": [distroJdks[jdk]],
"app": "$(java -version 2>&1 | head -n 1)",
"tags": fullTags,
"description": f"JDK {jdk} installed on {name}"
})
return images
| basicimg/images | basicimg-actions-generator/jdk.py | jdk.py | py | 2,187 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "yaml.safe_load",
"line_number": 26,
"usage_type": "call"
}
] |
34948989285 | import torch
import os
import logging
import datetime
import argparse
import json
import pandas as pd
import numpy as np
from tqdm import tqdm
from generative_utils import load_model, load_context, process_outputs
def inference(model, tokenizer, question, context, no_ans_threshold, ans_threshold, max_length=4096, stride=128, device="cuda", max_answer_length=64):
""" This function performs inference on a given question and context.
"""
inputs = tokenizer.encode(context, question, max_length=max_length, truncation="only_first", stride=stride, padding=False, return_overflowing_tokens=True)
answers = []
for input in tqdm(inputs):
input_ids = torch.tensor(input, device=device).unsqueeze(0)
#generate answer
with torch.no_grad():
outputs = model.generate(input_ids,
do_sample=False,
num_beams=20,
max_new_tokens=max_answer_length,
num_return_sequences=10,
eos_token_id=tokenizer.eos_token_id,
pad_token_id=tokenizer.pad_token_id,
return_dict_in_generate=True,
output_scores=True
)
#Process outputs
selected_sentence, sequence_score, no_answer_logit = process_outputs(outputs, input_ids, tokenizer, model, device)
selected_sentence = selected_sentence.squeeze(0)
answers.append((selected_sentence[input_ids.shape[1]:], sequence_score, no_answer_logit))
del outputs
for i, (answer, score, no_answer_logit) in enumerate(answers):
answers[i] = (tokenizer.decode(answer.squeeze(0), skip_special_tokens=True), score, no_answer_logit)
answers = [answer for answer in answers if (answer[2][0]) < no_ans_threshold and answer[1][0] > ans_threshold]
answers.sort(key=lambda x: x[1][0], reverse=True)
if len(answers) == 0:
return {"text": "No answer found", "logit_score": "0", "no_answer_score": "0"}
return {"text": answers[0][0], "logit_score": answers[0][1][0], "no_answer_score": answers[0][2][0]}
if __name__ == "__main__":
#Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument('--config', default='../configs/config.json')
args = parser.parse_args()
config = json.load(open(args.config, 'r'))
#Gather all hyperparameters
experiment_name = config['experiment_name']
model_id = config['model_id']
tokenizer_id = config['tokenizer_id']
device = config['device']
no_ans_threshold = config['no_ans_threshold']
ans_threshold = config['ans_threshold']
stride = config['stride']
Qlora = config['Qlora']
max_length = config['max_length']
max_answer_length = config['max_answer_length']
seed = config['seed']
#Set seed and create output directory
torch.manual_seed(seed)
np.random.seed(seed)
output_dir = "../outputs/" + experiment_name
if not os.path.exists(output_dir):
os.makedirs(output_dir)
else:
for file in os.listdir(output_dir):
os.remove(output_dir + "/" + file)
#Set up logging
log_file = os.path.join(output_dir, "inference_log_" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + ".log")
logger = logging.getLogger()
logger.setLevel(logging.INFO)
fh = logging.FileHandler(log_file)
fh.setLevel(logging.INFO)
logger.addHandler(fh)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
logger.info("Starting evaluation for experiment: " + experiment_name)
logger.info("Model: " + model_id)
logger.info("Tokenizer: " + tokenizer_id)
logger.info("Device: " + device)
logger.info("No answer threshold: " + str(no_ans_threshold))
logger.info("Answer threshold: " + str(ans_threshold))
logger.info("Qlora: " + str(Qlora))
logger.info("Stride: " + str(stride))
logger.info("Max length: " + str(max_length))
logger.info("Max answer length: " + str(max_answer_length))
logger.info("Seed: " + str(seed))
#Load contexts
sinch_node_red = load_context("mmd/sinch_doc_node_red.mmd")
sinch_webhook = load_context("mmd/sinch_doc_how_to_webhook.mmd")
sinch_overview = load_context("mmd/sinch_doc_overview.mmd")
nougat_context = load_context("mmd/nougat.mmd")
#Load model and hyperparameters
model, tokenizer = load_model(model_id, Qlora=Qlora, device=device)
no_ans_threshold = no_ans_threshold
ans_threshold = ans_threshold
max_length = max_length
#Inference
df = pd.DataFrame(columns=['question', 'answer', 'logit_score', 'no_answer_probability'])
question = "What is Node RED ? "
answer = inference(model, tokenizer, question, sinch_node_red, no_ans_threshold=no_ans_threshold, ans_threshold=ans_threshold, device=device, stride=stride, max_length=max_length, max_answer_length=max_answer_length)
logger.info(question + " " + str(answer))
df = pd.concat([df, pd.DataFrame([[question, answer["text"], answer["logit_score"], answer["no_answer_score"]]], columns=['question', 'answer', 'logit_score', 'no_answer_probability'])])
"""question = "In few words, What is Node RED ? "
answer = inference(model, tokenizer, question, sinch_node_red, no_ans_threshold=no_ans_threshold, ans_threshold=ans_threshold, device=device, stride=stride, max_length=max_length, max_answer_length=max_answer_length)
logger.info(question + " " + str(answer))
df = pd.concat([df, pd.DataFrame([[question, answer["text"], answer["logit_score"], answer["no_answer_score"]]], columns=['question', 'answer', 'logit_score', 'no_answer_probability'])])
question = "What are the supported channels of Node RED ? "
answer = inference(model, tokenizer, question, sinch_node_red, no_ans_threshold=no_ans_threshold, ans_threshold=ans_threshold, device=device, stride=stride, max_length=max_length, max_answer_length=max_answer_length)
logger.info(question + " " + str(answer))
df = pd.concat([df, pd.DataFrame([[question, answer["text"], answer["logit_score"], answer["no_answer_score"]]], columns=['question', 'answer', 'logit_score', 'no_answer_probability'])])
question = "In which cases can I use Node RED ? "
answer = inference(model, tokenizer, question, sinch_node_red, no_ans_threshold=no_ans_threshold, ans_threshold=ans_threshold, device=device, stride=stride, max_length=max_length, max_answer_length=max_answer_length)
logger.info(question + " " + str(answer))
df = pd.concat([df, pd.DataFrame([[question, answer["text"], answer["logit_score"], answer["no_answer_score"]]], columns=['question', 'answer', 'logit_score', 'no_answer_probability'])])
question = "What are the differents nodes of Sinch Messaging ? "
answer = inference(model, tokenizer, question, sinch_node_red, no_ans_threshold=no_ans_threshold, ans_threshold=ans_threshold, device=device, stride=stride, max_length=max_length, max_answer_length=max_answer_length)
logger.info(question + " " + str(answer))
df = pd.concat([df, pd.DataFrame([[question, answer["text"], answer["logit_score"], answer["no_answer_score"]]], columns=['question', 'answer', 'logit_score', 'no_answer_probability'])])
question = "When was Node RED released ? "
answer = inference(model, tokenizer, question, sinch_node_red, no_ans_threshold=no_ans_threshold, ans_threshold=ans_threshold, device=device, stride=stride, max_length=max_length, max_answer_length=max_answer_length)
logger.info(question + " " + str(answer))
df = pd.concat([df, pd.DataFrame([[question, answer["text"], answer["logit_score"], answer["no_answer_score"]]], columns=['question', 'answer', 'logit_score', 'no_answer_probability'])])
question = "Give me the different steps to add a webhook to my app ? "
answer = inference(model, tokenizer, question, sinch_webhook, no_ans_threshold=no_ans_threshold, ans_threshold=ans_threshold, device=device, stride=stride, max_length=max_length, max_answer_length=max_answer_length)
logger.info(question + " " + str(answer))
df = pd.concat([df, pd.DataFrame([[question, answer["text"], answer["logit_score"], answer["no_answer_score"]]], columns=['question', 'answer', 'logit_score', 'no_answer_probability'])])
question = "What is the Sinch Conversation API ?"
answer = inference(model, tokenizer, question, sinch_overview, no_ans_threshold=no_ans_threshold, ans_threshold=ans_threshold, device=device, stride=stride, max_length=max_length, max_answer_length=max_answer_length)
logger.info(question + " " + str(answer))
df = pd.concat([df, pd.DataFrame([[question, answer["text"], answer["logit_score"], answer["no_answer_score"]]], columns=['question', 'answer', 'logit_score', 'no_answer_probability'])])
question = "Can I use the Sinch Conversation API with Viber Business ? "
answer = inference(model, tokenizer, question, sinch_overview, no_ans_threshold=no_ans_threshold, ans_threshold=ans_threshold, device=device, stride=stride, max_length=max_length, max_answer_length=max_answer_length)
logger.info(question + " " + str(answer))
df = pd.concat([df, pd.DataFrame([[question, answer["text"], answer["logit_score"], answer["no_answer_score"]]], columns=['question', 'answer', 'logit_score', 'no_answer_probability'])])
question = "Can I use the Sinch Conversation API with Outlook ? "
answer = inference(model, tokenizer, question, sinch_overview, no_ans_threshold=no_ans_threshold, ans_threshold=ans_threshold, device=device, stride=stride, max_length=max_length, max_answer_length=max_answer_length)
logger.info(question + " " + str(answer))
df = pd.concat([df, pd.DataFrame([[question, answer["text"], answer["logit_score"], answer["no_answer_score"]]], columns=['question', 'answer', 'logit_score', 'no_answer_probability'])])
question = "Where are the hosting locations for the Conversation API ? "
answer = inference(model, tokenizer, question, sinch_overview, no_ans_threshold=no_ans_threshold, ans_threshold=ans_threshold, device=device, stride=stride, max_length=max_length, max_answer_length=max_answer_length)
logger.info(question + " " + str(answer))
df = pd.concat([df, pd.DataFrame([[question, answer["text"], answer["logit_score"], answer["no_answer_score"]]], columns=['question', 'answer', 'logit_score', 'no_answer_probability'])])
question = "What are the specific pricing details for using the Sinch Conversation API ? "
answer = inference(model, tokenizer, question, sinch_overview, no_ans_threshold=no_ans_threshold, ans_threshold=ans_threshold, device=device, stride=stride, max_length=max_length, max_answer_length=max_answer_length)
logger.info(question + " " + str(answer))
df = pd.concat([df, pd.DataFrame([[question, answer["text"], answer["logit_score"], answer["no_answer_score"]]], columns=['question', 'answer', 'logit_score', 'no_answer_probability'])])
question = "How does the Sinch Conversation API handle multimedia content like images and videos ? "
answer = inference(model, tokenizer, question, sinch_overview, no_ans_threshold=no_ans_threshold, ans_threshold=ans_threshold, device=device, stride=stride, max_length=max_length, max_answer_length=max_answer_length)
logger.info(question + " " + str(answer))
df = pd.concat([df, pd.DataFrame([[question, answer["text"], answer["logit_score"], answer["no_answer_score"]]], columns=['question', 'answer', 'logit_score', 'no_answer_probability'])])
"""
#Save results
df = df.drop(['logit_score', 'no_answer_probability'], axis=1)
latex_code = df.to_latex(index=False, column_format="|p{5cm}|p{10cm}|", float_format=(lambda x: "%.3f" % x))
latex_code = latex_code.replace('\\toprule', '\hline')
latex_code = latex_code.replace('\\bottomrule', '\hline')
latex_code = latex_code.replace('\\\n', '\\ \hline\n')
latex_file_name = "/generative_qa_mistral_dpo.tex"
with open(output_dir + latex_file_name, 'w') as file:
file.write(latex_code) | Kreik2809/Open-Book-Question-Answering | inference/src/generative_inference.py | generative_inference.py | py | 12,340 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tqdm.tqdm",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "generative_utils.process_outputs... |
21694229487 | import numpy as np
import re
from io import StringIO
def GetChunkFromTextFile(FileName, StartStr, StopStr, skip_header=0, skip_footer=0, LastHit=True, DataType='array'):
# DataType means we can extract the chunk and then turn it into:
# 1) Numpy table 'numpy'
# 2) return the raw text 'raw'
DataType = DataType.lower()
# Read the file.
try:
with open(FileName, 'r') as myfile:
data = myfile.read()
except:
print('Failed to open ' + FileName + '. Skipping.')
return
# This regex looks for the data between the start and top strings.
reout = re.compile('%s(.*?)%s' % (StartStr, StopStr), re.S)
try:
# Extract just the data we want.
if LastHit == False:
SectionStr = reout.search(data).group(1)
else:
SectionStr = reout.findall(data)[-1]
except:
# It is possible that the user asked for something that isn't in the file. If so, just bail.
return None
if DataType == 'raw':
# Now apply skip_header and skip_footer
SectionData = SectionStr
SectionData = ''.join(SectionData.splitlines(True)[skip_header:])
if skip_footer > 0:
SectionData = ''.join(SectionData.splitlines(True)[:-skip_footer])
if DataType == 'float':
SectionData = np.float(SectionStr)
if DataType == 'array':
# Convert it into a numpy array.
SectionData = np.genfromtxt(StringIO(SectionStr), skip_header=skip_header, skip_footer=skip_footer)
return SectionData
| ZGainsforth/QEScripts | IR/GetChunkFromTextFile.py | GetChunkFromTextFile.py | py | 1,560 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "re.compile",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "re.S",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "numpy.float",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "numpy.genfromtxt",
"line_number"... |
20883522665 | from unittest import result
from django.shortcuts import render
from django.http import Http404, HttpResponseRedirect
from results.models import WeatherStation
from .apps import ResultsConfig
import pmdarima as pm
import pandas as pd
import numpy as np
from datetime import date
# Create your views here.
def results(request):
if request.method == 'POST':
#call trainmodel function
context, envcontext = trainModel(request)
request.session['access_pages'] = True
request.session['results_context'] = context
request.session['env_context'] = envcontext
return render(request, 'results/results.html', context)
elif 'access_pages' in request.session:
return render(request, 'results/results.html', request.session['results_context'])
else:
return HttpResponseRedirect('/')
def trainModel(request):
#Get the inputs from the request
station = request.POST['location_input']
user_location = request.POST['location_name']
size = request.POST['size']
azimuth = request.POST['azimuth']
tilt = request.POST['tilt']
#Efficiency Matrix - for calculating solar panel efficiency based on tilt + azimuth
efficiency_matrix = {
'Horizontal_S': 0.897, '15_S': 0.965, '30_S': 1.00, '45_S': 0.998, '60_S': 0.956, '75_S': 0.877, 'Vertical_S': 0.765,
'Horizontal_SE/SW': 0.897, '15_SE/SW': 0.936, '30_SE/SW': 0.951, '45_SE/SW': 0.936, '60_SE/SW': 0.890, '75_SE/SW': 0.818, 'Vertical_SE/SW': 0.720,
'Horizontal_E/W': 0.897, '15_E/W': 0.865, '30_E/W': 0.825, '45_E/W': 0.779, '60_E/W': 0.724, '75_E/W': 0.659, 'Vertical_E/W': 0.585,
'Horizontal_NE/NW': 0.897, '15_NE/NW': 0.790, '30_NE/NW': 0.685, '45_NE/NW': 0.600, '60_NE/NW': 0.534, '75_NE/NW': 0.480, 'Vertical_NE/NW': 0.429,
'Horizontal_N': 0.897, '15_N': 0.757, '30_N': 0.629, '45_N': 0.518, '60_N': 0.431, '75_N': 0.387, 'Vertical_N': 0.354
}
#Get the Irradiance
irradiance = getIrradiance(station)
#Get the grid demand
demand = getDemand()
#kwh per month generated
generated_KwH = getKwH(irradiance, size, azimuth, tilt, efficiency_matrix)
#amount of savings per month
savings, prices = getPrices(list(demand.Demand.values), generated_KwH)
#get values in correct format
yearly_vals, monthly_vals, result_specs, envcontext = arrangeData(size, demand, generated_KwH, prices, savings)
result_specs = dict(result_specs, **{'user_location': user_location, 'size': size, 'azimuth': azimuth, 'tilt': tilt})
#Create context
context = {'yearly_vals': yearly_vals, 'monthly_vals': monthly_vals, 'result_specs': result_specs, 'active': 'results'}
#return render(request, 'results/results.html', context)
return context, envcontext
#Forecast Global Irradiance - Jcm^2
def getIrradiance(station):
#Try catch for weather station - raise Http404 if it does not exist in DB
#Should not raise error however as input is validated before request sent
try:
#'timeseries' is the weather station's values as a dataframe
timeseries = pd.DataFrame(list(WeatherStation.objects.filter(LOCATION=station).values()))
except WeatherStation.DoesNotExist:
raise Http404("Weather Station does not exist")
#Convert 'DATE' to datetime object to be used by SARIMAX model and set as index
timeseries['DATE'] = pd.to_datetime(timeseries['DATE'])
timeseries['DATE'] = timeseries["DATE"].dt.strftime('%Y-%d-%m')
timeseries = timeseries.set_index("DATE")
#Create the model
weather_model = pm.auto_arima(timeseries.GLORAD, exogenous=timeseries.maxtp.values.reshape(-1, 1),
start_p=0, start_q=0,
test='adf',
max_p=3, max_q=3, m=12,
start_P=0, seasonal=True,
max_P= 0, max_Q= 0, start_Q= 0,
d=0, D=1, trace=False,
error_action='ignore',
suppress_warnings=False,
stepwise=True)
#Exogenous values needed for predictions
eX = timeseries['maxtp'].values.reshape(-1, 1)
eX = np.concatenate( (eX, eX[-12:-8] ) )
eX = np.repeat(eX, 3)
eX = eX.reshape(-1,1)
#Get the number of months elapsed so there is parity between the predictions and when the predictions are made
end_date = date.today()
start_date = date(2021, 8, 31)
num_months = (end_date.year - start_date.year) * 12 + (end_date.month - start_date.month)
irradiance = list(weather_model.predict(n_periods = (20*12)+num_months, exogenous=eX[:(20*12)+num_months]))[num_months:]
return irradiance
#Convert Irradiance into the costs
def getKwH(irradiance, size, azimuth, tilt, efficiency_matrix):
#Formula to be used - Output (kWh) = 0.8 x kWp x S x E
#Kwp: Installed Peak Power | S: Solar Irradiance | E: efficiency depending on roof orientation and tilt
#As irradiance is in Jcm^2 we need to convert to KWHm^2 - so multiple by 0.0027777777777778 to *roughly* convert
generated_KwH = []
E = tilt + '_' + azimuth
E = efficiency_matrix[E]
Z = 1
Kwp = float(size)
for monthlyGlorad in irradiance:
SI = monthlyGlorad * 0.0027777777777778
generated_KwH.append( ((0.8 * Kwp) * SI) * E )
return generated_KwH
#Forecast Grid Demand
def getDemand():
#get number of days elapsed between now and end of training set
end_date = date.today()
start_date = date(2019, 12, 30)
num_days = (end_date - start_date).days
num_months = (end_date.year - start_date.year) * 12 + (end_date.month - start_date.month)
#make prediction
gridDemand_model = ResultsConfig.gridDemand_model
demand = gridDemand_model.forecast(num_days+(20*366))
#turn prediction into dataframe then group by months
demand_df = pd.DataFrame(list(demand), list(demand.index), columns = ['Demand'])
demandMonths_df = demand_df.groupby(pd.Grouper(freq="M"))
demandMonths_df = demandMonths_df.sum()
demandMonths_df.Demand = demandMonths_df.Demand/10
return demandMonths_df[num_months+1:]
def getPrices(demand, generated_KwH):
#this gets us the list of savings and electricity prices for each month
elecModel = ResultsConfig.elecPrices_model
prices = []
for month_demand in demand:
prices.append(elecModel.predict(np.array([month_demand]).reshape(1, 1))[0])
savings = [a*b for a,b in zip(prices,generated_KwH)]
return savings, prices
def arrangeData(size, demand, generated_KwH, prices, savings):
if(len(demand.index) > len(generated_KwH)):
demand = demand[1:]
data_df = pd.DataFrame(list(zip(generated_KwH, prices, savings)), columns = [['KwH', 'ElecCost', 'Savings']], index=demand.index)
yearly_totals = data_df.groupby(pd.Grouper(freq="Y"))
yearly_totals = yearly_totals.sum()
#Get the number of months elapsed so there is parity between the predictions and when the predictions are made
end_date = date.today()
start_date = date(2021, 8, 31)
if float(size) < 1:
grant = 0
elif 1 <= float(size) < 2:
grant = 900
elif 2 <= float(size) < 3:
grant = 1800
elif 3 <= float(size) < 4:
grant = 2100
elif float(size) >= 4:
grant = 2400
initial_cost = float(size) * 1900
final_cost = initial_cost - float(grant)
total = 0.0
end_year = 0
for index, year in enumerate(yearly_totals.Savings.values.ravel().tolist()):
total += int(year)
if total >= final_cost:
end_year = index
break
twenty_year_savings = round(sum(yearly_totals.Savings.values.ravel().tolist()), 2)
yearly_totals = yearly_totals.iloc[:end_year]
yearly_KwH = yearly_totals.KwH.values.ravel().tolist()
yearly_savings = yearly_totals.Savings.values.ravel().tolist()
yearly_labels = yearly_totals.index.strftime("%Y").tolist()
yearly_vals = {'yearly_KwH': yearly_KwH, 'yearly_savings': yearly_savings, 'yearly_labels': yearly_labels}
monthly_vals = {}
for year in yearly_labels:
df = data_df[data_df.index.year == int(year)]
vals = {'monthly_KwH': df.KwH.values.ravel().tolist(),
'monthly_savings': df.Savings.values.ravel().tolist(),
'monthly_elec': df.ElecCost.values.ravel().tolist(),
'monthly_labels': df.index.month_name().str.slice(stop=3).tolist()}
monthly_vals[year] = vals
envimpact = getEnvImpact(sum(yearly_KwH)/len(yearly_KwH))
result_specs = {'investment_cost': int(final_cost), 'payback': len(yearly_labels), '20_year_savings': twenty_year_savings}
return yearly_vals, monthly_vals, result_specs, envimpact
def getEnvImpact(KwH):
#co2 reduction - 0.23314 * kwh (gives kg)
co2_reduction = KwH * 0.23314
#car offset - 2.75 / co2 (in tonnes so multiply by 0.001)
car_offset = co2_reduction / 2750
#tree offset - 5.9kg for a seedling or 22kg for a fully grown tree
tree_offset = co2_reduction / 22
sapling_offset = co2_reduction / 5.9
envimpact = {'co2_reduction': str(round(co2_reduction, 2)), 'car_offset': str(round(car_offset, 2)), 'KwH': round(KwH, 2),
'tree_offset': str(round(tree_offset, 2)), 'sapling_offset': str(round(sapling_offset, 2)), 'active': 'envimpact'}
return envimpact | shaner13/Ivy | results/views.py | views.py | py | 9,431 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.shortcuts.render",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 23,
"usage_type": "call"
},
{
"... |
16445576901 | """ Author: Daniel J. Sawtelle
*** Purpose: Bombard the given URL with randomized form return data
***
*** Source: https://www.youtube.com/watch?v=UtNYzv8gLbs
"""
import os
import random
import string
import json
import time
import requests
""" Function - Return a string object of a date formatted as specified
*** start: First date possible to select
*** end: Last date possible to select
*** format: Structure of the date string being returned
*** prop: Proportion of the distance to jump into the specified date range
***
*** Source: https://stackoverflow.com/questions/553303/generate-a-random-date-between-two-other-dates
"""
def str_time_prop(start, end, format, prop):
#Retrieve the start and end dates as a time object
stime = time.mktime(time.strptime(start, format))
etime = time.mktime(time.strptime(end, format))
#Evaluate the proportion of the date range to jump to
ptime = stime + prop * (etime - stime)
#Return a string object of the date tracked
return time.strftime(format, time.localtime(ptime))
#Seed the random instance for generating data for the bombardment
random.seed = (os.urandom(1024))
#URL of the address to spam with data
url = 'AddressOfScammerGoesHere'
"""---------------- Main Function : Bombard the URL with randomized data ----------------"""
#Get an object of the list of names (first and last), streets, and companies to use for data spamming
fNames = json.loads(open('FirstNames.json').read())
lNames = json.loads(open('LastNames.json').read())
street = json.loads(open('StreetNames.json').read())
company = json.loads(open('CompanyNames.json').read())
country = json.loads(open('USVariations.json').read())
na = json.loads(open('NAVariations.json').read())
#Track the number of data bombardments done during this script call
dataCount = 1
while True:
#Generate a random city/state pairing
state = random.choice(json.loads(open('StateAbbreviations.json').read()))
city = random.choice(json.loads(open('StateCities\\'+ state + 'Cities.json').read()))
#Person Information
PName = random.choice(fNames) + ' ' + random.choice(lNames)
PAppartmentNumber = str(random.randint(1, 999))
if random.choice([True, False]):
PAppartmentNumber = random.choice(na)
PAddress = str(random.randint(1, 10000)) + ' ' + random.choice(street)
PCity = city
PState = state
PZip = ''.join(random.choice(string.digits) for i in range(5))
PCountry = random.choice(country)
PPhoneNumber = '(' + ''.join(random.choice(string.digits) for i in range(3)) + ') ' + ''.join(random.choice(string.digits) for i in range(3)) + '-' + ''.join(random.choice(string.digits) for i in range(4))
#Employer Information
EName = random.choice(company)
EEIN = ''.join(random.choice(string.digits) for i in range(2))
if random.choice([True, False]):
EEIN = ''.join(random.choice(string.ascii_letters)) + EEIN
if random.choice([True, False]):
EEIN = EEIN + '-'
EEIN = EEIN + ''.join(random.choice(string.digits) for i in range(7))
if random.choice([True, False]):
EEIN = EEIN + ''.join(random.choice(string.digits))
EAddress = ''.join(random.choice(string.digits) for i in range(4)) + ' ' + random.choice(street)
ECity = city
EState = state
EZip = PZip[:3] + ''.join(random.choice(string.digits) for i in range(2))
ECountry = random.choice(country)
EPhoneNumber = '(' + ''.join(random.choice(string.digits) for i in range(3)) + ') ' + ''.join(random.choice(string.digits) for i in range(3)) + '-' + ''.join(random.choice(string.digits) for i in range(4))
#Government/Financial Information
EDOB = str_time_prop('01/01/1970', '12/31/2011', '%m/%d/%Y', random.random())
ESSN = ''.join(random.choice(string.digits) for i in range(3)) + '-' + ''.join(random.choice(string.digits) for i in range(2)) + '-' +''.join(random.choice(string.digits) for i in range(4))
EDLNumber = 'D' + ''.join(random.choice(string.digits) for i in range(8))
EState = state
EDLIssueDate = str_time_prop('01/01/1970', '12/31/1970', '%m/%d/%Y', random.random())[:-4] + str(int(EDOB[-4:]) + random.randrange(16, 35))
EDLExpireDate = EDOB[:-4] + str(int(EDOB[-4:]) + 6)
if state == 'AZ':
EDLExpireDate = EDOB[:-4] + str(int(EDOB[-4:]) + 65)
AGI = ''.join(str(random.randint(1, 99))) + ',' + ''.join(random.choice(string.digits) for i in range(3)) + '.' + ''.join(random.choice(string.digits) for i in range(2))
if random.choice([True, False]):
AGI = '$' + AGI
if random.choice([True, False]):
AGI = str(random.randint(0, 87986)) + '.' + ''.join(random.choice(string.digits) for i in range(2))
if random.choice([True, True, False, False, False]):
notApp = random.choice(na)
EName = notApp
EEIN = notApp
EAddress = random.choice(na)
ECity = notApp
EState = notApp
EZip = notApp
ECountry = notApp
AGI = random.choice([notApp, "0"])
if random.choice([True, False, False, False]):
EName = random.choice(["Self", "Self Employed", "self empl.", "self employed"])
AGI = ''.join(str(random.randint(1, 4))) + ',' + ''.join(random.choice(string.digits) for i in range(3)) + '.' + ''.join(random.choice(string.digits) for i in range(2))
if random.choice([True, False]):
AGI = '$' + AGI
if random.choice([True, False]):
AGI = str(random.randint(0, 4999)) + '.' + ''.join(random.choice(string.digits) for i in range(2))
#Send the data bombardment to the URL
requests.post(url, allow_redirects=False, data={
'textfield' : PName,
'textfield2' : PAppartmentNumber,
'textfield3' : PAddress,
'textfield4' : PCity,
'textfield5' : PState,
'textfield6' : PZip,
'textfield7' : PCountry,
'textfield8' : PPhoneNumber,
'textfield9' : EName,
'textfield18': EEIN,
'textfield10': EAddress,
'textfield11': ECity,
'textfield12': EState,
'textfield13': EZip,
'textfield14': ECountry,
'textfield15': EPhoneNumber,
'textfield16': EDOB,
'textfield17': ESSN,
'textfield19': EDLNumber,
'textfield20': EState,
'textfield22': EDLIssueDate,
'textfield23': EDLExpireDate,
'textfield21': AGI,
'Submit': 'UAccess - CARES Fund'
})
#Display general random bombardment information sent this generation
print(str(dataCount) + ' Sending Data - ')
print(' Name : ' + PName)
print(' Apartment: ' + PAppartmentNumber)
print(' Address : ' + PAddress)
print(' City : ' + PCity)
print(' State : ' + PState)
print(' Zip Code : ' + PZip)
print(' Country : ' + PCountry)
print(' Phone : ' + PPhoneNumber)
print(' Employer : ' + EName)
print(' EIN : ' + EEIN)
print(' Address : ' + EAddress)
print(' City : ' + ECity)
print(' State : ' + EState)
print(' Zip : ' + EZip)
print(' Country : ' + ECountry)
print(' Phone : ' + EPhoneNumber)
print(' DOB : ' + EDOB)
print(' SSN : ' + ESSN)
print(' DL Number: ' + EDLNumber)
print(' DL Issued: ' + EDLIssueDate)
print(' DL Expire: ' + EDLExpireDate)
print(' AGI : ' + AGI)
#Increment the Bombardment Count
dataCount = dataCount + 1 | DSawtelle/ScammerSpammer | scammerSpammer.py | scammerSpammer.py | py | 7,010 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "time.mktime",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "time.strptime",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "time.mktime",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "time.strptime",
"line_numbe... |
39279700922 | from plot_model import plot_results
import glob
from astropy.io import fits
import tensorflow as tf
import numpy as np
import time
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape = shape)
return tf.Variable(initial)
def conv(x,W):
return tf.nn.conv1d(x, W, 1,'SAME')
def max_pool(x, width):
return tf.nn.pool(x, [width], 'MAX', 'SAME', strides = [width])
'''
start
'''
files = glob.glob('/data2/mrs493/DR1_3/*.fits')
samples = len(files)
classes = ['STAR', 'GALAXY', 'QSO', 'Unknown']
cls = len(classes)
flux = []
CLASS = []
wavelengths = 3800
for idx, file in enumerate(files):
with fits.open(file) as hdulist:
flx = hdulist[0].data[0]
flx = flx[:wavelengths]
CLS = hdulist[0].header['CLASS']
flux.append(flx)
CLASS.append([0]*cls)
CLASS[-1][classes.index(CLS)] = 1
flux = np.array(flux)
CLASS = np.array(CLASS)
for i in range(cls):
print(classes[i], ': ', np.sum([x[i] for x in CLASS]))
'''
end
'''
train_frac = 0.7
batch_frac= 0.025
pw0 = 4
width1 = 50
inter1 = 32
pw1 = 10
width2 = width1
inter2 = 2*inter1
pw2 = 10
inter3 = 1000
keep = 0.5
record = 100
train_steps = 3000
f_wavs = wavelengths
for pw in [pw0, pw1, pw2]:
f_wavs = int(np.ceil(f_wavs/pw))
split = np.random.random(samples)<=train_frac
x_train = np.array(flux[split])
x_test = np.array(flux[[not s for s in split]])
y_train = np.array(CLASS[split])
y_test = np.array(CLASS[[not s for s in split]])
x = tf.placeholder(tf.float32, shape = [None, wavelengths])
y_ = tf.placeholder(tf.float32, shape = [None, cls])
i_l1 = tf.reshape(x, [-1, wavelengths, 1])
m_l1 = max_pool(i_l1, pw0)
W_l1 = weight_variable([width1, 1,inter1])
b_l1 = bias_variable([inter1])
o_l1 = tf.nn.relu(conv(m_l1, W_l1) + b_l1)
i_l2 = max_pool(o_l1, pw1)
W_l2 = weight_variable([width2, inter1,inter2])
b_l2 = bias_variable([inter2])
o_l2 = tf.nn.relu(conv(i_l2, W_l2) + b_l2)
i_l3 = max_pool(o_l2, pw2)
m_l3 = tf.reshape(i_l3, [-1, f_wavs*inter2])
W_l3 = weight_variable([f_wavs*inter2, inter3])
b_l3 = tf.Variable(tf.zeros([inter3]))
o_l3 = tf.nn.relu(tf.matmul(m_l3, W_l3) + b_l3)
keep_prob= tf.placeholder(tf.float32)
i_l4 = tf.nn.dropout(o_l3, keep_prob)
W_l4 = weight_variable([inter3, cls])
b_l4 = bias_variable([cls])
y = tf.matmul(i_l4, W_l4) + b_l4
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels = y_, logits = y))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
accuracies = []
confusion = tf.confusion_matrix(tf.argmax(y,1), tf.argmax(y_,1))
with tf.Session() as sess:
t = time.time()
sess.run(tf.global_variables_initializer())
for i in range(train_steps):
batch = np.random.random(len(x_train))<=batch_frac
batch_x = x_train[batch]
batch_y = y_train[batch]
if i%record == 0:
train_accuracy = sess.run(accuracy, feed_dict={x: x_test, y_: y_test, keep_prob: 1.0})
print('step {} training accuracy {}'.format(i, train_accuracy))
accuracies.append([i, train_accuracy])
train_step.run(feed_dict={x: batch_x, y_: batch_y, keep_prob: keep})
conf, acc = sess.run([confusion, accuracy], feed_dict={x: x_test, y_: y_test, keep_prob: 1.0})
print('test accuracy {}'.format(acc))
print(conf)
accuracies.append([i+1, acc])
np.savetxt('Files/LAMOST_conv/classes.csv', classes, fmt = '%s', delimiter = ',')
np.savetxt('Files/LAMOST_conv/confusion.csv', conf, fmt = '%i', delimiter = ',')
np.savetxt('Files/LAMOST_conv/accuracies.csv', accuracies, delimiter = ',')
print('training time: ', time.time() - t, 's')
plot_results('LAMOST_conv') | grd349/LearningLAMOST | Matt/ClassifierNN/Old_Models/model_LAMOST_conv.py | model_LAMOST_conv.py | py | 3,910 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "tensorflow.truncated_normal",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "tensorflow.Variable",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "tensorflow.constant",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "t... |
20063508538 | from django.urls import path
from api import views
from rest_framework_simplejwt.views import (
TokenRefreshView,
)
app_name = 'api'
urlpatterns = [
path('', views.getRoutes,name='routes'),
path('token/', views.MyTokenObtainPairView.as_view(), name='token_obtain_pair'),
path('token/refresh/', TokenRefreshView.as_view(), name='token_refresh'),
path('register/', views.RegisterView.as_view(), name='auth_register'),
path('test/', views.testEndPoint, name='test'),
path('get/<str:website>/', views.LockBoxAPIView.as_view(), name='get_lockbox'),
path('lockbox/', views.LockBoxAPIView.as_view(), name='lockbox'),
] | Hack-Weekly/lavender-snake-password-manager | api/urls.py | urls.py | py | 647 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "api.views.getRoutes",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "api.views",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "django.urls.path"... |
2666772723 | import pandas as pd
from matplotlib import pyplot as plt
data = pd.read_csv("countries.csv")
print(data)
#Compare population growth in the US and China
us = data[data.country == "United States"]
china = data[data.country == "China"]
print(us)
print(china)
#Plot US and China population growth
plt.plot(us.year, us.population / 10**6)
plt.plot(china.year, china.population / 10**6)
plt.legend(["United States", "China"])
plt.xlabel("year")
plt.ylabel("population")
plt.show() | CharlesIvia/data-visualization | pop_growth.py | pop_growth.py | py | 486 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "matplotlib.py... |
10532200845 | import requests
from datetime import datetime
USERNAME = "kristijan"
TOKEN = "hafdaga134312"
pixela_endpoint = "https://pixe.la/v1/users"
user_params = {
"token": "hafdaga134312",
"username": "kristijan",
"agreeTermsOfService": "yes",
"notMinor": "yes",
}
#response = requests.post(url=pixela_endpoint, json=user_params)
#print(response.text)
graph_endpoint = f"{pixela_endpoint}/{USERNAME}/graphs"
graph_config = {
"id": "graph1",
"name": "Reading Graph",
"unit": "pages",
"type": "int",
"color": "shibafu"
}
today = datetime.now()
headers = {
"X-USER-TOKEN": TOKEN
}
pixel = {
"date": today.strftime("%Y%m%d"),
"quantity": input("How many pages have you red today? "),
}
#response = requests.post(url=graph_endpoint, json=graph_config, headers=headers)
#print(response.text)
#response = requests.delete(url="https://pixe.la/v1/users/kristijan/graphs/graph1/20220125", headers=headers)
#print(response.text)
response = requests.post(url="https://pixe.la/v1/users/kristijan/graphs/graph1", json=pixel, headers=headers)
print(response.text)
| Kvidakovic1/Python-Exercises | Habit_Tracking/main.py | main.py | py | 1,106 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.datetime.now",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "requests.post",
"line_number": 51,
"usage_type": "call"
}
] |
1674184175 | from django.db import models, connection
# Create your models here.
class News(models.Model):
news_id = models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')
news_title = models.CharField(max_length=200)
news_source = models.CharField(max_length=50)
news_content = models.TextField(default="")
publish_time = models.DateTimeField(null=True)
news_status = models.CharField(max_length=2,default="0")
create_time = models.DateTimeField(auto_now=True)
update_time = models.DateTimeField(null=True)
def __str__(self):
return self.news_title
def getNewsByNewsId(news_id):
sql = 'SELECT * FROM portal_news WHERE news_id=%s'
params = list()
params.append(news_id)
dao = DaoSupport()
return dao.selectOne(sql,params)
def getNewsList(params=None):
condition = params[0]
news_status = condition.get('news_status')
selectsql = " SELECT news_id,news_title,news_source,news_content,date_format(publish_time, '%%Y-%%m-%%d %%H:%%i:%%s') "
fromsql = " FROM portal_news WHERE 1 = 1 "
if news_status is not None:
fromsql += " AND news_status = %s "
dao = DaoSupport()
return dao.selectPagination(selectsql, fromsql, params)
class DaoSupport(object):
def selectOne(self,sql,params):
with connection.cursor() as cursor:
cursor.execute(sql,params)
row = cursor.fetchone()
return row
def selectList(self,sql,params):
with connection.cursor() as cursor:
cursor.execute(sql,params)
row = cursor.fetchall()
return row
'''
分页查询 字段类型为 数字或字符串 如有datetime等格式 需进行格式转化
'''
def selectPagination(self,selectsql,fromsql,params):
countsql = ' SELECT COUNT(1) '
pagesql = ' limit %s, %s'
condition = params[0]
conditionparams = [condition.get(k) for k in condition]
with connection.cursor() as cursor:
if len(conditionparams) == 0:
cursor.execute(countsql + fromsql)
else:
cursor.execute(countsql + fromsql,conditionparams)
row = cursor.fetchone()
total_count = row[0]
sql = selectsql + fromsql + pagesql
print('sql:%s,params:%s' % (sql,params))
page = Page(total_count,params[-2],params[-1])
params[-2] = page.current_index
conditionparams.append(params[-2])
conditionparams.append(params[-1])
cursor.execute(sql,conditionparams)
rows = cursor.fetchall()
page.setRows(rows)
return page.to_dict()
class Page(object):
# 总数量total_count
# 当前页current_page
# 每页显示个数pagenum
# 当前索引current_index
# 总页数total_page
# 列表rows
def __init__(self, total_count, current_page, pagenum):
self.total_count = total_count
self.total_page = (total_count+pagenum-1)//pagenum
self.current_page = 1 if current_page <= 0 else ( self.total_page if current_page > self.total_page else current_page)
self.pagenum = pagenum
self.current_index = (current_page-1)*pagenum
def setRows(self, rows):
self.rows = rows
def to_dict(self):
dict = {}
dict.update(self.__dict__)
return dict
| chundonghan/pysite | portal/models.py | models.py | py | 3,193 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.db.models.Model",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "django.db.models.AutoField",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "... |
28876927609 | # 配置信息---将用户自定义配置文件及默认配置文件合成一个
import importlib
import os
from lib.conf import global_settings
class Settings():
def __init__(self):
# 获取默认配置文件的内容写入到Settings类的名称空间
for name in dir(global_settings):
if name.isupper():
value = getattr(global_settings, name)
setattr(self, name, value)
# 获取用户自定义配置的文件内容,写入到Settings类的名称空间
user_settings = os.environ.get('USER_SETTINGS')
if not user_settings:
return
# m = importlib.import_module('config.settings')
m = importlib.import_module('config.settings')
for name in dir(m):
if name.isupper():
value = getattr(m, name)
setattr(self, name, value)
settings = Settings()
| Zhu-GF/AutoGatheringAsserts | lib/conf/config.py | config.py | py | 915 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "lib.conf.global_settings",
"line_number": 11,
"usage_type": "argument"
},
{
"api_name": "lib.conf.global_settings",
"line_number": 13,
"usage_type": "argument"
},
{
"api_name": "os.environ.get",
"line_number": 16,
"usage_type": "call"
},
{
"api_name... |
15295704029 | from django.urls import include, path
from rest_framework import routers
from . import views
app_name = 'articles'
router = routers.DefaultRouter()
router.register(r'articles', views.ArticleViewSet)
urlpatterns = [
# path('', views.article_list, name="list"),
path('', include(router.urls)),
path('api-auth/', include('rest_framework.urls', namespace='rest_framework')),
# path('create/', views.article_create, name="create"),
# path('delete/<str:id>', views.article_delete, name="delete"),
# path('upload/', views.article_upload, name="upload"),
# #str:slug should be last because of regex scanning
# path('<str:slug>/', views.article_details, name='detail'),
]
| Dban1/myDjangoTraining | cynoblog/articles/urls.py | urls.py | py | 698 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "rest_framework.routers.DefaultRouter",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "rest_framework.routers",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 12,
"usage_type": "call"
},
{
"api_na... |
36164549179 | from blog.models import Post
from django.urls import path
from . import views
# using django view
# from .views import PostListView, PostDetailView, PostCreateView, PostUpdateView, PostDeleteView, post
# urlpatterns = [
# path("", PostListView.as_view(), name="blog-home"),
# path("post/<int:pk>", PostDetailView.as_view(), name="post-detail"),
# path("post/new", PostCreateView.as_view(), name="post-create"),
# path("post/<int:pk>/update", PostUpdateView.as_view(), name="post-update"),
# path("post/<int:pk>/delete", PostDeleteView.as_view(), name="post-delete"),
# path("about/", views.about, name="blog-about")
# ]
#using view function
urlpatterns = [
path("", views.home, name="blog-home"),
path("<int:post_id>/", views.post, name="post-detail"),
path("new/", views.post_create, name="post-create"),
path("<int:post_id>/update/", views.post_update, name="post-update"),
path("<int:post_id>/delete/", views.post_delete, name="post-delete"),
]
| nubcakee/django-basic-template | blog/urls.py | urls.py | py | 996 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "django.urls.path",... |
4014075302 | # 문제 출처 : https://programmers.co.kr/learn/courses/30/lessons/42839?language=python3
from itertools import permutations
import math
def solution(numbers):
answer = 0
'''
오류코드
arr = list(map(int, list(numbers)))
for per in permutations(list(numbers)):
arr.append(int(''.join(per)))
print(set(arr))
배열에 제대로 순열이 안들어감
'''
new_numbers = list(numbers)
for i in range(2, len(numbers)+1):
per = list(permutations(numbers, i))
for j in per:
if len(j) <= len(numbers):
new_numbers.append(''.join(j))
# print(new_numbers)
new_numbers = list(set([int(x) for x in new_numbers]))
# print(new_numbers)
for num in new_numbers:
if primetester(num) == True:
answer += 1
return answer
def primetester(n):
if n <= 0 or n % 1 != 0:
return False
elif n == 1:
return False
else:
num = math.sqrt(n)
for i in range(2, int(num + 1)):
if n % i == 0:
return False
return True
| ThreeFive85/Algorithm | Programmers/level2/findPrime/find_prime.py | find_prime.py | py | 1,102 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "itertools.permutations",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_number": 38,
"usage_type": "call"
}
] |
39752425383 | import MySQLdb
class InsertsEmprestimo:
def __init__(self):
self.con = ""
def conecta(self):
host = "localhost"
user = "ProjetoFinal"
password = "123456"
db = "db_biblioteca"
port = 3306
self.con = MySQLdb.connect(host, user, password, db, port)
def insertEmprestimo(self, setEmprestimo): #Inserindo dados na tabela tbl_emprestimo
self.conecta()
cur = self.con.cursor()
query = "INSERT INTO tbl_Emprestimo (retirada, devolucao, FK_Bibliotecario," \
" FK_Cliente) VALUES("'%s'")" %(setEmprestimo)
print(query)
cur.execute(query)
self.con.commit()
self.con.close()
def insertLivroEmprestimo(self, setsLivroEmprestimo): #Inserindo dados na tabela tbl_emprestimo
self.conecta()
cur = self.con.cursor()
query = "INSERT INTO Livro_Emprestimo(fk_livro, fk_emprestimo)" \
" VALUES("'%s'");" %(setsLivroEmprestimo)
print(query)
cur.execute(query)
self.con.commit()
self.con.close()
def selectTblEmprestimo(self):#Selecionando o registro mais atual para usar nos futuros inserts
self.conecta()
cur = self.con.cursor()
query = "select max(cod_emprestimo) from tbl_emprestimo;"
print(query)
cur.execute(query)
result = cur.fetchall()
self.con.close()
return result
def selectNomeLivroEmprestado(self, setNomeLivroEmprestado):
self.conecta()
cur = self.con.cursor()
query = "select nome_livro from tbl_livros join Livro_Emprestimo on livro_emprestimo.fk_" \
"livro = tbl_livros.codigo_livro where fk_emprestimo='%s';" % (setNomeLivroEmprestado)
print(query)
cur.execute(query)
result = cur.fetchall()
self.con.close()
return result
# def selectNomeLivro(self, setNomeLivro):
# self.conecta()
# cur = self.con.cursor()
# query = "select * from tbl_livros where codigo_livro = '%s';" % (setNomeLivro)
# print(query)
# cur.execute(query)
# result = cur.fetchall()
# self.con.close()
# return result | DavidDevOps2000/ProgramaBibliotecaPython | inserts.py | inserts.py | py | 2,226 | python | pt | code | 0 | github-code | 36 | [
{
"api_name": "MySQLdb.connect",
"line_number": 14,
"usage_type": "call"
}
] |
33501403446 | import sys
from lxml import etree
from math import sqrt
from GammaPipeCommon.utility import *
from conf import get_resultsdb_conf
from conf import get_pipedb_conf
import mysql.connector as mysql
import re
import time
import os
import subprocess
class ImportResults:
def import_results(results_xml,check_alert):
print("import"+str(results_xml))
#read xml
root_dir = os.getcwd()
conf_dictionary = get_pipedb_conf()
pipedb_hostname = conf_dictionary['host']
pipedb_username = conf_dictionary['username']
pipedb_password = conf_dictionary['password']
pipedb_port = conf_dictionary['port']
pipedb_database = conf_dictionary['database']
conf_dictionary_results = get_resultsdb_conf()
resultsdb_hostname = conf_dictionary_results['host']
resultsdb_username = conf_dictionary_results['username']
resultsdb_password = conf_dictionary_results['password']
resultsdb_port = conf_dictionary_results['port']
resultsdb_database = conf_dictionary_results['database']
try:
# get events list
conn = mysql.connect(host=pipedb_hostname, user=pipedb_username, passwd=pipedb_password, db=pipedb_database,port=pipedb_port)
cursor = conn.cursor(dictionary=True)
conn_results = mysql.connect(host=resultsdb_hostname, user=resultsdb_username, passwd=resultsdb_password, db=resultsdb_database,port=resultsdb_port)
cursor_results = conn_results.cursor(dictionary=True)
tree = etree.parse(results_xml)
sources = tree.findall("//source")
for source in sources:
#for each point source
if(source.attrib["type"]=="PointSource"):
print("source")
name = source.attrib["name"]
ts = source.attrib["ts"]
sqrtts = sqrt(float(ts))
print("sqrtts="+str(sqrtts))
runid = source.attrib["runid"]
spectrum_element = source.find("spectrum")
spectrum_type = spectrum_element.attrib["type"]
print("spectrum_type="+str(spectrum_type))
parameter_prefactor = spectrum_element.find("parameter[@name='Prefactor']")
flux = parameter_prefactor.attrib['value']
flux_err = parameter_prefactor.attrib['error']
flux_scale = parameter_prefactor.attrib['scale']
parameter_index = spectrum_element.find("parameter[@name='Index']")
spectral_index = parameter_index.attrib['value']
spectral_index_error = parameter_index.attrib['error']
spatial_model_element = source.find("spatialModel")
spatial_model_type = spatial_model_element.attrib['type']
parameter_ra = spatial_model_element.find("parameter[@name='RA']")
ra = parameter_ra.attrib['value']
if 'error' in parameter_ra.attrib:
ella = parameter_ra.attrib['error']
else:
ella = -1
parameter_dec = spatial_model_element.find("parameter[@name='DEC']")
dec = parameter_dec.attrib['value']
if 'error' in parameter_dec.attrib:
ellb = parameter_dec.attrib['error']
else:
ellb = -1
ellphi = 0
#convert ra,dec to l,b
l,b = Utility.convert_fk5_to_gal(ra,dec)
#TODO
#check if already exist this detection
lpeak = -1
bpeak = -1
r = -1
query_run = "select tstart,tstop,emin,emax,l,b from run r join energybin eb ON (eb.energybinid = r.energybinid) where runid = "+runid
cursor.execute(query_run)
row= cursor.fetchone()
tstart = re.sub(r"\.0$", "", str(row['tstart']))
tstop = re.sub(r"\.0$", "", str(row['tstop']))
emin = re.sub(r"\.0$", "", str(row['emin']))
emax = re.sub(r"\.0$", "", str(row['emax']))
run_l = re.sub(r"\.0$", "", str(row['l']))
run_b = re.sub(r"\.0$", "", str(row['b']))
rootname = root_dir+"/T"+tstart+"_"+tstop+"_E"+emin+"_"+emax+"_P"+run_l+"_"+run_b
import_time = time.time()
#insert detection into DB and call alert algorithm
query_insert = ("insert into detection (rootname,label,runid,l,b,r,ella,ellb,ellphi,lpeak,bpeak,flux,fluxerr,sqrtts,spectralindex,spectralindexerr,import_time)"
" values ('"+str(rootname)+"','"+str(name)+"',"+str(runid)+","+str(l)+","+str(b)+",0,"+str(ella)+","+str(ellb)+","+str(ellphi)+","+str(lpeak)+","+str(bpeak)+","+str(flux)+""
","+str(flux_err)+","+str(sqrtts)+","+str(spectral_index)+","+str(spectral_index_error)+","+str(import_time)+")")
print(query_insert)
cursor_results.execute(query_insert)
conn_results.commit()
detectionid = cursor_results.lastrowid
print("detectionid "+str(detectionid))
if(check_alert == 1):
#from run get tstart_tt tstop_tt
query = "select r.tstart,r.tstop,analysissessiontype_notice_observationid,analysissessiontype_observationid from run r join analysissession ans ON (ans.analysissessionid = r.analysissessionid) where runid = "+str(runid)
print(query)
cursor.execute(query)
run = cursor.fetchone()
t_start_tt = run['tstart']
t_stop_tt = run['tstop']
analysissessiontype_notice_observationid = run['analysissessiontype_notice_observationid']
analysissessiontype_observationid = run['analysissessiontype_observationid']
if analysissessiontype_notice_observationid is None:
analysissessiontype_notice_observationid = 'NULL'
if analysissessiontype_observationid is None:
analysissessiontype_observationid = 'NULL'
x_alert = 8
x_association = 4
cmd = "ruby $PIPELINE/GammaPipeCommon/alert/alert_check.rb "+str(detectionid)+" "+str(l)+" "+str(b)+" "+str(r)+" "+str(ella)+" "+str(ellb)+" "+str(ellphi)+" "+str(lpeak)+" "+str(bpeak)+" "+str(sqrtts)+" "+str(t_start_tt)+" "+str(t_stop_tt)+" "+str(analysissessiontype_observationid)+" "+str(analysissessiontype_notice_observationid)+" "+str(x_alert)+" "+str(x_association)+" "+str(root_dir)
output = subprocess.Popen(cmd,shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode('utf-8')
print(output)
if(check_alert == 1):
#TODO gestione allerte doppie -> passare anche l'id della sessione per controllare solo quelle dove sono andato ad inserire
cmd = "ruby $PIPELINE/GammaPipeCommon/alert/check_for_duplicate_alert.rb "+str(analysissessiontype_observationid)+" "+str(analysissessiontype_notice_observationid)
output = subprocess.Popen(cmd,shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode('utf-8')
print(output)
cursor.close()
conn.close()
cursor_results.close()
conn_results.close()
except Exception as e :
print("error")
print(e)
if __name__ == '__main__':
# Run binned in-memory pipeline
ImportResults.import_results(sys.argv[1],sys.argv[2])
| cta-rta/ctoolsint | ImportResults.py | ImportResults.py | py | 8,133 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "os.getcwd",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "conf.get_pipedb_conf",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "conf.get_resultsdb_conf",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "mysql.connecto... |
34050456586 | import shadow.utils
shadow.utils.set_seed(0, cudnn_deterministic=True) # set seeds for reproducibility
#%matplotlib inline
import matplotlib.pyplot as plt
from sklearn import datasets
import numpy as np
import random
import math as m
n_samples = 1000 # number of samples to generate
noise = 0.05 # noise to add to sample locations
X, y = datasets.make_moons(n_samples=n_samples, noise=noise)
class my_kmeans:
def __init__(self, clusers=2):
self.k = clusers
def cal_dis(self, data, centeroids):
dis = []
for i in range(len(data)):
dis.append([])
for j in range(self.k):
dis[i].append(m.sqrt((data[i, 0] - centeroids[j, 0])**2 + (data[i, 1]-centeroids[j, 1])**2))
return np.asarray(dis)
def divide(self, data, dis):
clusterRes = [0] * len(data)
for i in range(len(data)):
seq = np.argsort(dis[i])
clusterRes[i] = seq[0]
return np.asarray(clusterRes)
def centeroids(self, data, clusterRes):
centeroids_new = []
for i in range(self.k):
idx = np.where(clusterRes == i)
sum = data[idx].sum(axis=0)
avg_sum = sum/len(data[idx])
centeroids_new.append(avg_sum)
centeroids_new = np.asarray(centeroids_new)
return centeroids_new[:, 0: 2]
def cluster(self, data, centeroids):
clulist = self.cal_dis(data, centeroids)
clusterRes = self.divide(data, clulist)
centeroids_new = self.centeroids(data, clusterRes)
err = centeroids_new - centeroids
return err, centeroids_new, clusterRes
def fit(self,data):
clu = random.sample(data[:, 0:2].tolist(), 2)
clu = np.asarray(clu)
err, clunew, clusterRes = self.cluster(data, clu)
while np.any(abs(err) > 0):
#print(clunew)
err, clunew, clusterRes = self.cluster(data, clunew)
clulist = self.cal_dis(data, clunew)
clusterResult = self.divide(data, clulist)
return clusterResult
def myKNN(S, k, sigma=2.0):
N = len(S)
A = np.zeros((N,N))
for i in range(N):
dist_with_index = zip(S[i], range(N))
dist_with_index = sorted(dist_with_index, key=lambda x:x[0])
neighbours_id = [dist_with_index[m][1] for m in range(k+1)] # xi's k nearest neighbours
for j in neighbours_id: # xj is xi's neighbour
A[i][j] = np.exp(-S[i][j]/2/sigma/sigma)
A[j][i] = A[i][j] # mutually
return A
def calLaplacianMatrix(adjacentMatrix):
# compute the Degree Matrix: D=sum(A)
degreeMatrix = np.sum(adjacentMatrix, axis=1)
# compute the Laplacian Matrix: L=D-A
laplacianMatrix = np.diag(degreeMatrix) - adjacentMatrix
# normailze
# D^(-1/2) L D^(-1/2)
sqrtDegreeMatrix = np.diag(1.0 / (degreeMatrix ** (0.5)))
return np.dot(np.dot(sqrtDegreeMatrix, laplacianMatrix), sqrtDegreeMatrix)
def euclidDistance(x1, x2, sqrt_flag=False):
res = np.sum((x1-x2)**2)
if sqrt_flag:
res = np.sqrt(res)
return res
def Distance(X):
X = np.array(X)
S = np.zeros((len(X), len(X)))
for i in range(len(X)):
for j in range(i+1, len(X)):
S[i][j] = 1.0 * euclidDistance(X[i], X[j])
S[j][i] = S[i][j]
return S
clusters = 2
Similarity = Distance(X)
Adjacent = myKNN(Similarity, k=5)
Laplacian = calLaplacianMatrix(Adjacent)
x, V = np.linalg.eig(Laplacian)
x = zip(x, range(len(x)))
x = sorted(x, key=lambda x:x[0])
H = np.vstack([V[:,i] for (v, i) in x[:clusters]]).T
result = my_kmeans(2).fit(H)
plt.title('spectral cluster result')
plt.scatter(X[:,0], X[:,1],marker='o',c=result)
plt.show() | alalba221/Advanced-Machine-Learning | Final23F/Q4.py | Q4.py | py | 3,742 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "shadow.utils.utils.set_seed",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "shadow.utils.utils",
"line_number": 2,
"usage_type": "attribute"
},
{
"api_name": "shadow.utils",
"line_number": 2,
"usage_type": "name"
},
{
"api_name": "sklearn... |
20244284451 | #!/usr/bin/env python
import os
import sys
from lib.util import rm_rf
SOURCE_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
def main():
os.chdir(SOURCE_ROOT)
rm_rf('node_modules')
rm_rf('dist')
rm_rf('out')
rm_rf('spec/node_modules')
rm_rf('vendor/brightray/vendor/download/libchromiumcontent')
rm_rf('vendor/brightray/vendor/libchromiumcontent/src')
rm_rf(os.path.expanduser('~/.node-gyp'))
if __name__ == '__main__':
sys.exit(main())
| brave/muon | script/clean.py | clean.py | py | 482 | python | en | code | 970 | github-code | 36 | [
{
"api_name": "os.path.abspath",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.chdir",
"line_number... |
30132799933 | from collections import defaultdict
from typing import Tuple, Union
import cv2
import distinctipy
import matplotlib.pylab as plt
import numpy as np
from PIL import Image
def preprocess_image_draw(image: Union[Image.Image, np.ndarray]):
image = np.array(image) # Convert if PIL, copy if numpy
if len(image.shape) == 2: # GRAYSCALE
image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
elif len(image.shape) == 3:
pass
else:
raise ValueError("Unsupported number of dims on image.")
return image
def draw_tooth(
image: np.ndarray,
pt0: list,
pt1: list,
tooth_name: str,
color: tuple,
size_factor=1.0,
draw_axis=False,
):
x1, y1 = pt0
height, width = image.shape[:2]
text = f"{tooth_name}"
draw_scale = max(height, width) / 2000
x2, y2 = pt1
# x2, y2 = x2 * width, y2 * height
px = min(x1, x2) + np.abs(x1 - x2) // 2
py = min(y1, y2) + np.abs(y1 - y2) // 2
if draw_axis:
image = cv2.line(
image,
(int(x1), int(y1)),
(int(x2), int(y2)),
(255, 0, 0),
int(2 * draw_scale),
)
cv2.putText(
image,
text,
(int(px - 20 * draw_scale / 2000), int(py)),
cv2.FONT_HERSHEY_SIMPLEX,
size_factor * draw_scale,
color,
int(size_factor * draw_scale * 3),
)
return image
def draw_longaxis_output(
image: Union[Image.Image, np.ndarray],
keypoints: list,
color: Tuple = (0, 255, 0),
th: float = 0.14,
size_factor=1.0,
draw_axis=False,
):
image = preprocess_image_draw(image)
teeth_map = defaultdict(list)
for keypoint in keypoints:
tooth_name = keypoint["class_name"].split("_")[0]
teeth_map[tooth_name].append(keypoint)
for tooth_name, keypoints in teeth_map.items():
if np.mean([p["score"] for p in keypoints]) < th:
continue
pt0 = keypoints[0]["point"]
pt1 = keypoints[1]["point"]
image = draw_tooth(
image,
pt0,
pt1,
tooth_name,
color,
size_factor,
draw_axis=draw_axis,
)
return image
def draw_panorogram(image, contours_pairs, closed=False):
dimage = preprocess_image_draw(image)
alpha = 0.5
COLOR_MAP = {
"ContMand": (0, 255, 255),
"CanManDir": (255, 0, 0),
"CanManEsq": (255, 0, 0),
"RebAlvInf": (0, 255, 0),
"RebAlvSup": (0, 255, 0),
"SeioMaxDir": (0, 0, 255),
"SeioMaxEsq": (0, 0, 255),
"FossaNasal": (255, 255, 0),
}
for pair in contours_pairs:
if "CanMan" in pair[0]:
overlay = np.zeros(shape=dimage.shape, dtype=np.uint8)
overlay = cv2.drawContours(
overlay,
[np.array(pair[1]).astype(int)],
-1,
color=COLOR_MAP[pair[0]],
thickness=-1,
)
else:
overlay = np.zeros(shape=dimage.shape, dtype=np.uint8)
overlay = cv2.polylines(
overlay,
[np.array(pair[1]).astype(int)],
isClosed=closed,
color=COLOR_MAP[pair[0]],
thickness=int(max(dimage.shape) / 500),
)
dimage = cv2.addWeighted(overlay, alpha, dimage, 1, 0)
return dimage
def draw_bbox(image, coords, color=(225, 0, 0), text=None, text_below=False):
"""Draw bbox on image, expect an int image"""
dimage = image
height, width = dimage.shape[:2]
min_dim = min(height, width)
x1, y1, x2, y2 = coords
thickness = 1 + int(min_dim / 600)
dimage = cv2.rectangle(
dimage, (int(x1), int(y1)), (int(x2), int(y2)), color, thickness
)
if text is None:
return dimage
if not text_below:
x_text, y_text = int(x1), int(y1 - min_dim / 100)
else:
x_text, y_text = int(x1), int(y1 + min_dim / 100)
dimage = cv2.putText(
dimage,
text,
(int(x_text), int(y_text)),
cv2.FONT_HERSHEY_SIMPLEX,
min_dim / 1000,
color,
int(0.7 * thickness),
cv2.LINE_AA,
)
return dimage
def draw_bboxes(image, bboxes, th=0.5):
dimage = preprocess_image_draw(image)
for idx, bbox in enumerate(bboxes):
if bbox["score"] < th:
continue
color = plt.get_cmap("hsv")(idx / 32) # Number of teeth
color = [int(x * 255) for x in color]
text = f"{bbox['class_name']} {bbox['score']:.2f}"
dimage = draw_bbox(
dimage,
bbox["bbox"],
color=color,
text=text,
)
return dimage
def contour2mask(contours, w, h):
conv_mask = np.zeros(shape=(h, w), dtype=np.uint8)
conv_mask = cv2.fillPoly(
conv_mask,
[np.array(tcont, dtype=int).reshape((-1, 1, 2)) for tcont in contours],
color=255,
)
return conv_mask
def draw_masks(image, masks):
dimage = preprocess_image_draw(image)
alpha = 0.2
for idx, mask in enumerate(masks):
mask = mask / 255
color = plt.get_cmap("hsv")(idx / len(masks))
mask_color = (
255 * np.stack([color[0] * mask, color[1] * mask, color[2] * mask], axis=2)
).astype(np.uint8)
dimage = cv2.addWeighted(dimage, 1, mask_color, 1 - alpha, 0)
return dimage
def draw_heatmap(image, heatmap):
dimage = preprocess_image_draw(image)
alpha = 0.2
dimage = cv2.addWeighted(dimage, 1, heatmap, 1 - alpha, 0)
return dimage
def draw_contours(image, contours, color=(255, 0, 0), closed=False):
dimage = preprocess_image_draw(image)
dimage = cv2.polylines(
dimage.copy(),
[np.array(cont).astype(int) for cont in contours],
isClosed=closed,
color=color,
thickness=2,
)
return dimage
def draw_procedures_output(
img,
entities,
point_names=None,
plot_labels=False,
):
img = preprocess_image_draw(img)
height, width = img.shape[:2]
scale = max(img.shape) / 2000
shown_cls = []
max_upper_limit = height
mand_lower_limit = 0
tooth_map = defaultdict(list)
for e in entities:
tooth_map[e["tooth"]].append(e)
CLASSES = sorted(list({e["class_name"] for e in entities}))
colors = distinctipy.get_colors(len(CLASSES), rng=139)
for e in entities:
point = e["line"]
if point[0][1] < point[1][1]: # Mandibula
if point[1][1] > mand_lower_limit:
mand_lower_limit = point[1][1]
if point[0][1] > point[1][1]: # Maxila
if point[1][1] < max_upper_limit:
max_upper_limit = point[1][1]
for i, (tooth, ents) in enumerate(tooth_map.items()):
point = ents[0]["line"]
label = ents[0]["class_name"]
if point[0][1] < point[1][1]: # Mandibula
xb, yb = point[1][0], point[1][1] # botton coods
xt, yt = point[0][0], point[0][1] # top coords
ax, ay = xb - xt, yb - yt # center point
pv = np.array([0, ay]) / 6
for j, e in enumerate(ents):
color = colors[CLASSES.index(e["class_name"])]
color = [x * 255 for x in color]
offset = j
shown_cls.append(e["class_name"])
img = cv2.circle(
img,
(
int(xb + offset * pv[0]),
int(1.05 * mand_lower_limit + offset * pv[1]),
),
int(max(img.shape) / 200),
color,
-1,
)
elif point[0][1] > point[1][1]: # Maxila
xb, yb = point[0][0], point[0][1]
xt, yt = point[1][0], point[1][1]
ax, ay = xt - xb, yt - yb
pv = np.array([0, ay]) / 6
for j, e in enumerate(ents):
# color = plt.get_cmap("hsv")(
# CLASSES.index(l) / len(CLASSES)
# ) # Number of classes on COCO
color = colors[CLASSES.index(e["class_name"])]
color = [x * 255 for x in color]
# offset = j - len(label) // 2
offset = j
shown_cls.append(e["class_name"])
img = cv2.circle(
img,
(
int(xt + offset * pv[0]),
int(0.95 * max_upper_limit + offset * pv[1]),
),
int(max(img.shape) / 200),
color,
-1,
)
bimg = np.zeros((height, int(width + 500 * scale), 3), dtype=np.uint8)
bimg[:height, :width, :] = img
width = bimg.shape[1]
for i, _cls in enumerate(list(set(shown_cls))):
# color = plt.get_cmap("hsv")(
# CLASSES.index(_cls) / len(CLASSES)
# ) # Number of classes on COCO
color = colors[CLASSES.index(_cls)]
color = [x * 255 for x in color]
text = _cls
font_scale = scale * 1
img = cv2.putText(
bimg,
text,
(
int(width - 500 * scale + scale * 50),
int(height - 500 * scale + i * 40 * scale),
),
cv2.FONT_HERSHEY_SIMPLEX,
font_scale,
color,
thickness=int(3 * scale),
)
return img
def draw_points(image, entities):
dimage = preprocess_image_draw(image)
width, height = image.size
draw_scale = max(height, width) / 2000
for ent in entities:
x, y = ent["point"]
dimage = cv2.circle(dimage, (x, y), int(10 * draw_scale), (0, 255, 0), -1)
dimage = cv2.putText(
dimage,
ent["class_name"],
(x, int(y - width / 80)),
cv2.FONT_HERSHEY_SIMPLEX,
draw_scale,
(255, 0, 0),
int(4 * draw_scale),
cv2.LINE_AA,
)
return dimage
| Radio-Memory/radiomemory-ai-api-demo | vis.py | vis.py | py | 10,117 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.Union",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "PIL.Image.Image",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "numpy.ndarray",
"line... |
31748626437 | import matplotlib.pyplot as plt
from random_walk import RandomWalk
"""make a random walk and plot points as long as the program is active"""
while True:
rw = RandomWalk()
rw.fill_walk()
"""set size of plot window"""
plt.figure(figsize=(10, 6))
point_numbers = list(range(rw.num_points))
plt.scatter(rw.x_values, rw.y_values, c=point_numbers, cmap=plt.cm.Blues, edgecolors='none', s=1)
"""highlight start and end points"""
plt.scatter(0, 0, c='purple', edgecolors='none', s=100)
plt.scatter(rw.x_values[-1], rw.y_values[-1], c='yellow', edgecolors='none', s=100)
"""remove axes"""
plt.axes().get_xaxis().set_visible(False)
plt.axes().get_yaxis().set_visible(False)
plt.show()
keep_running = input("Would you like to make another walk? y/n: ")
if keep_running == 'n':
break
| Javataru/data_visualizations | data_graph/data_visualizations/rw_display.py | rw_display.py | py | 876 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "random_walk.RandomWalk",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "matp... |
34481978389 | from datetime import datetime
import matplotlib.pyplot as plt
"""
Please read the README file before running this code
In order to fully understand the purpose and the execution of this code
"""
def clean_line(line):
cleaned_line = line.strip()
cleaned_line = cleaned_line[1:]
if cleaned_line.startswith("value"):
return cleaned_line[8:-1]
return None
def load_set_from_file(filename):
data_set = set()
with open(filename, 'r') as file:
for line in file:
cleaned_line = clean_line(line)
if cleaned_line is not None:
data_set.add(cleaned_line)
return data_set
def days_between_dates(date1, date2):
# Convert dates to datetime objects
dt1 = datetime(date1[2], date1[0], date1[1])
dt2 = datetime(date2[2], date2[0], date2[1])
# Calculate the difference between the two dates
delta = dt2 - dt1
# Extract the number of days from the difference
days = delta.days
return days
def str_to_list(date_string):
# Remove the square brackets and split the string into individual elements
date_string = date_string.strip('[]')
date_elements = date_string.split(',')
# Convert the elements to integers and create the final list
date_list = [int(element.strip()) for element in date_elements]
return date_list
def update_followers(number_of_followers, username):
#trick so I don't have to import the OS library - the following 2 lines of code are needed only for the first time running this code
file = open(f"{username}_followers_variation.txt", 'a')
file.close()
# check the date for the current numbs of followers
current_datetime = datetime.now()
curr_date = current_datetime.date()
year, month, day = curr_date.year, curr_date.month, curr_date.day
current_date = [month, day, year]
# check how much time has passed since the last followers update
distance = None
with open(f"{username}_followers_variation.txt", 'r') as file:
lines = file.readlines()
if not lines: distance = 0
else:
last_line = lines[-1]
last_date_unclear = last_line[:13]
last_date = str_to_list(last_date_unclear)
distance = days_between_dates(last_date, current_date)
# update data
with open(f"{username}_followers_variation.txt", 'a') as f:
f.write(f"{current_date} , {number_of_followers}, {distance}.\n")
# update file for plotting
file_for_plotting(distance, number_of_followers, username)
return
def file_for_plotting(distance, number_of_followers, username):
# updates a text file with the aim of creating easily a plot
with open(f"{username}_data_for_plotting.txt", 'a') as f:
f.write(f"{distance}\n")
f.write(f"{number_of_followers}\n")
return
def following_but_not_followers(username):
# returns a set of people that you are following but that they are not following you back
followings = load_set_from_file(f"{username} - following.json")
followers = load_set_from_file(f"{username} - followers.json")
# P.S. sets are faster and usable since we don't expect duplicates
# collects data for future applications
update_followers(len(followers), username)
not_followers = followings - followers
print(f'n. followers: {len(followers)}')
print(f'n. following: {len(followings)}')
return 'not following you back:', len(not_followers), not_followers
def load_plot_followers_variation():
xs, ys = [], []
with open(f"{username}_data_for_plotting.txt", 'r') as f:
lines = f.readlines()
for i in range(0, len(lines), 2):
xs.append(int(lines[i]))
ys.append(int(lines[i+1]))
# Check if the lengths of xs and ys match
if len(xs) != len(ys):
raise ValueError("Lists xs and ys must have the same length.")
# Create the plot
plt.figure()
plt.plot(xs, ys, marker='o', linestyle='-', color='b')
plt.xlabel('Time')
plt.ylabel('Number of followers')
plt.title('Followers variation')
plt.grid(True)
plt.show()
if __name__ == '__main__':
username = input('Please input the username you want to analyze: ')
result = following_but_not_followers(username)
var = input('Do you also want to know the variation of your followers you had in the time? [Y] Yes [N] No -> ')
if var not in {'Y', 'N'}:
print('Input not valid, please rerun the program.')
elif var == 'Y':
result_2 = load_plot_followers_variation()
else:
result_2 = None
print(result)
if result_2: print(result_2)
| andrea-gentilini/InstagramUnFollowers | main.py | main.py | py | 4,840 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.datetime",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "datetime.da... |
37428725368 | from __future__ import division
from pyglet import image, text
from tdgl.gl import *
from tdgl import part, picking
from tdgl.stylesheet import border_points
__all__ = ('Panel','LabelPanel')
class Panel(part.ScalePart):
"""Base class for things with a bordered panel behind"""
_default_style = dict(
bg=None, # background colour
bd=None, # border colour
border=0, # border width
bg_radius = 0, # corner radius of background
bg_round = 0, # num rounding points at corner
bd_radius = 0, # corner radius of border
bd_round = 0, # rounding points at border corner
bg_margin = 0, # 0 = (0,0). Spacing from contents to panel edge
bd_margin = 0, # spacing from contents to border
texture = None, # texture of panel
texture_repeat = 'scale', # == (1,1), num repeats across panel
)
def __init__(self,*args,**kw):
super(Panel,self).__init__(*args,**kw)
dl = glGenLists(2)
self.bgdl = dl # for background
self.bddl = dl + 1 # for border
def __del__(self):
if glDeleteLists:
glDeleteLists(self.bgdl,2)
def render(self,mode="OPAQUE"):
if mode == 'PICK':
picking.label(self)
glCallList(self.bgdl)
glCallList(self.bddl)
self.render_content(mode)
if mode == 'PICK':
picking.nolabel()
def prepare(self):
self.prepare_content()
getstyle = self.getstyle
bg = getstyle("bg")
tex = getstyle("texture")
if bg:
if isinstance(tex,basestring):
self.tex = image.load(tex).get_mipmapped_texture()
self.tex_id = self.tex.id
elif hasattr(tex,"id"):
self.tex = tex
self.tex_id = tex.id
elif isinstance(tex,int):
self.tex = tex
self.tex_id = tex
else:
self.tex = None
self.tex_id = 0
bd = getstyle("bd")
border = getstyle("border")
with gl_compile(self.bgdl):
if bg:
self.render_background()
with gl_compile(self.bddl):
if bd and border:
self.render_border()
def render_background(self):
w,h = self.content_size()
getstyle = self.getstyle
bg = getstyle("bg")
has_texture = bool(self.tex)
margin = getstyle("bg_margin",0)
if type(margin) in (int,float):
marginx = margin
marginy = margin
else:
marginx,marginy = margin
radii = getstyle("bg_radius",0)
round = getstyle("bg_round",0)
points = border_points(
w + 2*marginx, h + 2*marginy,
radii, round)
if has_texture:
rep = getstyle("texture_repeat","scale")
if rep == "scale":
rep = 1.0
if type(rep) in (int,float):
aspect = w/h
rep = (rep*aspect,rep)
rx,ry = rep
tw = rx / (w + 2*marginx)
th = ry / (h + 2*marginy)
tpoints = [(x*tw + 0.5, y*th + 0.5)
for x,y in points]
glBindTexture(GL_TEXTURE_2D,self.tex_id)
glEnable(GL_TEXTURE_2D)
else:
glDisable(GL_TEXTURE_2D)
tpoints = []
glColor4f(*bg)
v = glVertex3f
tc = glTexCoord2f
z = -0.02
with gl_begin(GL_TRIANGLE_FAN):
tc(0.5,0.5); v(0,0,z)
if tpoints:
for (x,y),(s,t) in zip(points,tpoints):
tc(s,t)
v(x,y,z)
else:
for (x,y) in points:
v(x,y,z)
def render_border(self):
w,h = self.content_size()
getstyle = self.getstyle
bd = getstyle("bd")
border = getstyle("border")
margin = getstyle("bd_margin",0)
if type(margin) in (int,float):
marginx = margin
marginy = margin
else:
marginx,marginy = margin
radii = getstyle("bd_radius",0)
round = getstyle("bd_round",0)
points = border_points(
w + 2*marginx, h + 2*marginy,
radii, round)
glDisable(GL_TEXTURE_2D)
glEnable(GL_LINE_SMOOTH)
glColor4f(*bd)
v = glVertex3f
z = -0.01
glLineWidth(border)
with gl_begin(GL_LINE_LOOP):
for (x,y) in points:
v(x,y,z)
# Override in sub-classes:
def prepare_content(self):
pass
def render_content(self,mode):
pass
def content_size(self):
return (1,1)
class LabelPanel(Panel):
""" A Panel containing a pyglet Label"""
_default_style = Panel._default_style.copy()
_default_style.update(dict(fg=(1,1,1,1),
font=None,
font_size=16,
italic=False,
bold=False))
def __init__(self,name="",text="",html=False,**kw):
super(LabelPanel,self).__init__(name,**kw)
self.text = text
self.html = html
self.prepare()
def content_size(self):
return self.label.content_width,self.label.content_height
def render_content(self,mode="OPAQUE"):
self.label.draw()
def prepare_content(self):
getstyle = self.getstyle
fg = getstyle("fg",(1,1,1,1))
font = getstyle("font")
font_size = getstyle("font_size")
italic = getstyle("italic",False)
bold = getstyle("bold", False)
text_width = self.getgeom("text_width")
multiline = bool(text_width)
color = [int(c*255) for c in fg]
if self.html:
self.label = text.HTMLLabel(
text=self.text,
width=text_width,
multiline=multiline,
anchor_x='center',anchor_y='center')
self.label.set_style('color',color)
else:
self.label = text.Label(
text=self.text,
font_name=font, font_size=font_size,
color=color,
italic=italic, bold=bold,
width=text_width,
multiline=multiline,
anchor_x='center',anchor_y='center')
class SelectPanel(Panel):
""" A Panel containing stacked parts, with one selected.
The parts have to implement content_size(), so some
kind of Panel is likely. """
_default_style = Panel._default_style.copy()
_default_style["pad"] = 2
def __init__(self,name="",contents=(),selected=None,vertical=True,**kw):
super(SelectPanel,self).__init__(name,**kw)
self.vertical = vertical
self.selected = selected
self.contents = list(contents)
self._content_size = (1,1)
def prepare_content(self):
sumw = sumh = 0
minw = maxw = None
minh = maxh = None
pad = self.getstyle("pad",0)
for i,p in enumerate(self.contents):
classes = ["choice"]
if i == self.selected:
classes.append("selected")
p.choice_number = i # So we can tell which one it is
p.add_styles(*classes)
p.prepare()
w,h = p.content_size()
sumw += w
sumh += h
minw = w if minw is None else min(minw,w)
minh = h if minh is None else min(minh,h)
maxw = w if maxw is None else max(maxw,w)
maxh = h if maxh is None else max(maxh,h)
if self.vertical:
self._content_size = maxw,sumh + (len(self.contents)-1) * pad
y = sumh / 2.0 # top of contents box, relative to centre
x = 0
z = 0.01
for p in self.contents:
w,h = p.content_size()
p.pos = (x,y-h/2.0,z)
y -= (h + pad)# top of next line
else:
self._content_size = sumw + (len(self.contents)-1) * pad,maxh
y = 0
x = -sumw / 2.0 # left of contents box, relative to centre
z = 0.01
for p in self.contents:
w,h = p.content_size()
p.pos = (x + w/2.0,y,z)
x += w + pad # left of next column
def render_content(self,mode="OPAQUE"):
for i,p in enumerate(self.contents):
if mode=="PICK":
picking.label(self,selected=i)
p.draw(mode)
if mode=="PICK":
picking.nolabel()
def content_size(self):
return self._content_size
def select_by_number(self,n):
if n is None or 0 <= n < len(self.contents):
self.selected = n
self.prepare()
def select_object(self,obj):
try:
self.selected = self.contents.index(obj)
except ValueError:
self.selected = None
self.prepare()
def restyle(self,force=False):
"""Copied from Group.restyle()"""
super(SelectPanel,self).restyle(force)
for p in self.contents:
p.restyle(force)
class SelectTextPanel(SelectPanel):
"""A simple SelectPanel containing LabelPanels"""
def __init__(self,name="",lines=(),**kw):
labels = [LabelPanel(name="%s[%d]" % (name,i), text=line)
for i,line in enumerate(lines)]
super(SelectTextPanel,self).__init__(name, contents=labels, **kw)
| scavpy/Scav-Team-Pyweek-Aug-2010 | gamelib/tdgl/panel.py | panel.py | py | 9,576 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "tdgl.part.ScalePart",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "tdgl.part",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "tdgl.picking.label",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "tdgl.picking",
... |
17744150099 | import sys
import argparse
import pdb
import os
import time
import getpass
import yaml
import random
import string
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
from parse_rest.connection import register
from parse_rest.datatypes import Object
from parse_rest.query import QueryResourceDoesNotExist
class CodeMonHandler(PatternMatchingEventHandler):
patterns = []
ignore_patterns = []
ignore_directories = False
case_sensitive = False
fileChanges = ''
def __init__(self, args=None, session=None):
self.patterns = args.filters
self.ignore_patterns = ['*.git*']
self.session = session
self.args = args
def process(self, event):
"""
event.event_type
'modified' | 'created' | 'moved' | 'deleted'
event.is_directory
True | False
event.src_path
path/to/observed/file
"""
# the file will be processed there
# print(event.src_path, event.event_type) # print now only for debug
within_interval = self.within_interval(event.src_path) # check if file is within interval, if it is update existing record
self.fileChanges.filename = os.path.abspath(event.src_path)
self.fileChanges.type = 'directory' if event.is_directory else 'file'
self.fileChanges.parent = self.session.as_pointer
self.fileChanges.event = event.event_type
if event.event_type == 'modified' or (event.event_type == 'created' and event.is_directory == False):
print('Saving %s file contents: %s' % (event.event_type, self.fileChanges.filename))
with open(event.src_path, 'r') as contents:
self.fileChanges.content = contents.read()
else:
self.fileChanges.content = ''
self.fileChanges.save()
def within_interval(self, src_path):
fileChangesObj = Object.factory('FileChanges')
try:
fileChanges = fileChangesObj.Query.all().filter(parent = self.session.as_pointer,
filename = os.path.basename(src_path),
type = 'file').limit(1)
#pdb.set_trace()
fileChanges = fileChanges[0] if len(fileChanges) > 0 else False
except QueryResourceDoesNotExist:
fileChanges = False
if fileChanges:
current_time = time.gmtime()
time_diff = (time.mktime(current_time) - time.mktime(fileChanges.updatedAt.timetuple()))
if time_diff > self.args.interval_limit:
print('Over interval limit')
self.fileChanges = fileChangesObj()
return False
else:
print('within interval')
self.fileChanges = fileChanges
else:
self.fileChanges = fileChangesObj()
return True
def on_modified(self, event):
self.process(event)
def on_created(self, event):
self.process(event)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Monitor current folder')
parser.add_argument('-f', '--filter', dest='filters', nargs='*',
default=['*.py', '*.txt'], help='Filter which filetype that will be monitored, default is all')
parser.add_argument('-i', '--interval', dest='interval_limit', default=120,
help='Interval between creating a new record for changes')
parser.add_argument('-n', '--name', dest='monitor_name',
default='%s - %s' % (getpass.getuser(), os.path.basename(os.getcwd())), help='This session\'s name')
parser.add_argument('-d','--directory', dest='monitor_path',
default='.', help='Which folder to monitor')
args = parser.parse_args()
f = open('%s/config.yaml' % os.path.abspath(os.path.dirname(sys.argv[0])))
config = yaml.safe_load(f)
f.close()
register(config['parse']['app_id'], config['parse']['rest_key'], master_key=None)
# Create session
monObj = Object.factory('MonSession')
monObj = monObj()
monObj.user = getpass.getuser()
monObj.machineID = ''.join(random.choice(string.ascii_uppercase + string.digits + string.ascii_lowercase) for _ in range(32))
monObj.directory = os.path.abspath(args.monitor_path)
monObj.name = args.monitor_name
monObj.save()
print('Session created: %s' % monObj.objectId)
observer = Observer()
observer.schedule(CodeMonHandler(args=args,session=monObj), path=args.monitor_path, recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join() | mkhairul/codeMonitor | src/agent.py | agent.py | py | 4,886 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "watchdog.events.PatternMatchingEventHandler",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "os.path.abspath",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name... |
18264517083 | from django.contrib import admin # 장고에서 제공하는 Admin 기능 사용을 위한 임포트
from .models import Post # 직접 작성한 Post 모델 사용을 위한 임포트
@admin.register(Post) # 어드민 사용을 위한 모델 연결
class PostAdmin(admin.ModelAdmin): # 모델기반의 어드민 사용을 위한 상속
list_display = ['id', 'title', 'content', 'created_at'] # 리스트 화면의 출력 필드 정의
list_editable = ['title', ] # 리스트 화면의 수정 필드 정의
# list_filter = ['is_active', ] # 리스트 화면의 필터 필드 정의
search_fields = ['title',] # 리스트 화면의 검색 필드 정의
# python manage.py createsuperuser
| 3chamchi/likelion-seoul-6th | week5/blog-app/posts/admin.py | admin.py | py | 721 | python | ko | code | 3 | github-code | 36 | [
{
"api_name": "django.contrib.admin.ModelAdmin",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.register",
"line_number": 5,
"usage_type": "call"
},
{
... |
7934166631 | '''
Description: Definitions of functions
Author: Nan Li
Contact: linan.lqq0@gmail.com
'''
# import packages
import re
import time
import pandas as pd
from headers import headers
from datetime import datetime
from seleniumwire import webdriver
# short names dictionary
address_dict = ({'road':'rd', 'street':'st', 'place':'pl', 'avenue':'ave',
'parade':'pde', 'highway':'hwy', 'drive':'dr', 'grove':'gr',
'crescent':'cres', 'court':'ct', 'close':'cl', 'circuit':'cct',
'avenue':'ave',
})
# function to read postcode from excel file
def readPostcode(file_name):
postcode = pd.read_excel(file_name).sort_values(by=['Postcode'])
postcode = postcode.values
postcode = postcode[:,0].astype(int)
return postcode
# function to create valid url according to postcode
def createUrl(postcode, page_no):
url_pre = 'https://www.realestate.com.au/sold/in-'
if postcode < 1000:
url = '{}0{}/list-{}?includeSurrounding=false'.format(url_pre, postcode, page_no)
elif postcode < 10000:
url = '{}{}/list-{}?includeSurrounding=false'.format(url_pre, postcode, page_no)
else:
print('Wrong postcode!!')
exit()
return url
# function to create valid url according to address
def createHouseUrl1(address, suburb, postcode, use_short):
url_pre = 'https://www.realestate.com.au/property/'
str_address = address.lower()
if re.search(r'[0-9|a-z]+/[0-9]', str_address) is not None:
str_address = 'unit-' + str_address
if use_short:
for key in address_dict.keys():
street_name = re.search(key, str_address)
if street_name is not None:
str_address = re.sub(street_name.group(), address_dict[key], str_address)
str_suburb = suburb.lower()
if postcode < 1000:
str_postcode = 'nt-0{:d}'.format(postcode)
elif postcode < 2600:
str_postcode = 'nsw-{:d}'.format(postcode)
elif postcode < 2700:
str_postcode = 'act-{:d}'.format(postcode)
elif postcode < 2800:
str_postcode = 'nsw-{:d}'.format(postcode)
elif postcode < 3000:
str_postcode = 'act-{:d}'.format(postcode)
elif postcode < 4000:
str_postcode = 'vic-{:d}'.format(postcode)
elif postcode < 5000:
str_postcode = 'qld-{:d}'.format(postcode)
elif postcode < 6000:
str_postcode = 'sa-{:d}'.format(postcode)
elif postcode < 7000:
str_postcode = 'wa-{:d}'.format(postcode)
elif postcode < 8000:
str_postcode = 'tas-{:d}'.format(postcode)
str_address = '{}-{}-{}'.format(str_address, str_suburb, str_postcode)
str_address = re.sub('[,|\s|/]', '-', str_address)
for _ in range(4):
str_address = re.sub('--', '-', str_address)
url = url_pre + str_address
return url
# function to create url according to REA_id
def createHouseUrl2(REA_id):
url = 'https://www.realestate.com.au/property/lookup?id={:d}'.format(REA_id)
return url
def get_cookie(url, user_agent):
options = webdriver.ChromeOptions()
options.add_argument('user-agent={}'.format(user_agent))
options.add_argument("--window-size=100x100")
options.add_argument('ignore-certificate-errors')
options.add_argument("--disable-blink-features=AutomationControlled")
options.add_experimental_option("excludeSwitches", ["enable-automation", "enable-logging"])
try:
driver = webdriver.Chrome(options=options)
except:
print('Update ChromeDriver')
'''driver._orig_get = driver.get
def _get_wrapped(*args, **kwargs):
if driver.execute_script("return navigator.webdriver"):
driver.execute_cdp_cmd(
"Page.addScriptToEvaluateOnNewDocument",
{
"source": """
Object.defineProperty(window, 'navigator', {
value: new Proxy(navigator, {
has: (target, key) => (key === 'webdriver' ? false : key in target),
get: (target, key) =>
key === 'webdriver'
? undefined
: typeof target[key] === 'function'
? target[key].bind(target)
: target[key]
})
});
"""
},
)
return driver._orig_get(*args, **kwargs)
driver.get = _get_wrapped
driver.get = _get_wrapped
driver.get = _get_wrapped
original_user_agent_string = driver.execute_script(
"return navigator.userAgent"
)
driver.execute_cdp_cmd(
"Network.setUserAgentOverride",
{
"userAgent": original_user_agent_string.replace("Headless", ""),
},
)
driver.execute_cdp_cmd(
"Page.addScriptToEvaluateOnNewDocument",
{
"source": """
Object.defineProperty(navigator, 'maxTouchPoints', {
get: () => 1
})"""
},
)'''
def interceptor(request):
del request.headers['user-agent']
request.headers['user-agent'] = user_agent
# Block PNG, JPEG and GIF images
if request.path.endswith(('.png', '.jpg', '.gif')):
request.abort()
#driver.request_interceptor = interceptor
driver.get(url)
driver.execute_script("window.open('{}');".format(url))
time.sleep(4) # wait for cookies loaded
cnt = 0
for request in driver.requests:
if request.response is not None and request.response.status_code == 200:
cookie = re.search(r"(KP_UIDz-ssn=[\S]+);", str(request.response.headers))
if cookie is not None:
cookie = cookie.group(1)
cnt = cnt + 1
if cnt > 3:
break
if cookie is None:
print('Change cookie name and retry!')
driver.quit()
return cookie
def update_cookie(old, new):
with open("headers.py", "r") as f:
data = f.read()
data = re.sub(old, new, data)
with open("headers.py", "w") as f:
f.write(data) | linan-1990/RealestateCrawler | functions.py | functions.py | py | 6,379 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "pandas.read_excel",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 50,... |
29748242745 | #server
import os
import tkinter as tk
import sqlite3
import random
from tkinter import ttk
from tkinter import messagebox as mbox
import PIL
from PIL import Image
from tkinter import *
from PIL import Image, ImageTk
from PIL import ImageGrab
import socket
#from gtts import gTTS
#import pyttsx3
from functools import partial
from datetime import datetime
from datetime import date
import time
cu = datetime.now()
def start():
def end(a):
a.destroy()
def request():
lr = Label(root1, text="Client requested for chatting", fg="maroon", bg="peachpuff", font=("Lucida console", 15))
lr.place(x=120,y=200)
def receive(a):
host='localhost'
port=8500
s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.bind((host,port))
s.listen(1)
c,addr=s.accept()
mess1=c.recv(1024)
mess1=mess1.decode()
e0.insert(0,mess1)
c.close()
def send(a):
host='localhost'
port=9000
s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.connect((host,port))
mess1=e1.get()
s.send(mess1.encode())
e1.delete(0,END)
e0.delete(0,END)
s.close()
receive(a)
def undo(a):
e1.delete(0,END)
root1 = Tk()
root1.title("Chat Server")
root1.configure(bg="peachpuff")
root1.geometry('800x700')
f = Frame(root1, bg="peachpuff", borderwidth=20, relief=SUNKEN)
f.pack()
l = Label(f, text="Server", fg="maroon", bg="white", font=("Lucida console", 30), width=60,height=2)
l.pack()
pic = PIL.Image.open("chat.jpg").convert("RGB")
pic = pic.resize((100,100))
pic1 = ImageTk.PhotoImage(pic)
img_label1 = Label(root1, image=pic1)
img_label1.image=pic1
img_label1.place(x=330,y=240)
img_label1.pack_propagate(0)
ls = Label(root1, text="Gray button indicates waiting for client..", fg="maroon", bg="peachpuff", font=("Lucida console", 15))
ls.place(x=170,y=200)
l1 = Label(root1, text="From client:", fg="maroon", bg="peachpuff", font=("Lucida console", 15))
l1.place(x=120,y=400)
l2 = Label(root1, text="To client:", fg="maroon", bg="peachpuff", font=("Lucida console", 15))
l2.place(x=120,y=500)
e0 = Entry(root1, font=("Candara", 18), width=25)
e0.place(x=300, y=400)
e1 = Entry(root1, font=("Candara", 18), width=25)
e1.place(x=300, y=500)
b1 = Button(root1, text=">>", bg="green", fg="black",width=3, overrelief=SUNKEN, font=("Calibri",15),borderwidth=2,command=partial(send,root1))
b1.place(x=650, y=500)
b2 = Button(root1, text="Undo", bg="maroon", fg="black",width=5, overrelief=SUNKEN, font=("Calibri",15),borderwidth=2,command=partial(undo,root1))
b2.place(x=480, y=570)
b3 = Button(root1, text="Accept request from client", bg="green", fg="black",width=30, overrelief=SUNKEN, font=("Calibri",15),command=partial(receive,root1))
b3.place(x=220,y=150)
root1.mainloop()
start()
| shetyeanuja/python-mini-projects | Client Server Chat/server.py | server.py | py | 3,275 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.datetime.now",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "socket.socket",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "socket.AF_INET"... |
12520460708 | #!/usr/bin/python3
'''
Select data from states table and filter it
by the names that contain an a
then print it.
'''
import sys
from model_state import Base, State
from sqlalchemy import (create_engine)
from sqlalchemy.orm import Session
if __name__ == "__main__":
engine = create_engine('mysql+mysqldb://{}:{}@localhost/{}'.format
(sys.argv[1], sys.argv[2], sys.argv[3]),
pool_pre_ping=True)
Base.metadata.create_all(engine)
session = Session(engine)
for state in (session.query(State).order_by(State.id)
.filter(State.name.like('%a%'))):
print("{}: {}".format(state.id, state.name))
session.close()
| sebastiancalleu/holbertonschool-higher_level_programming | 0x0F-python-object_relational_mapping/9-model_state_filter_a.py | 9-model_state_filter_a.py | py | 706 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sqlalchemy.create_engine",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "model_state.Base.metadata.create_all",
"line_number": 19,
"usage_type": "call"
},
{
"api_n... |
72776117224 | import requests
import bs4
from bs4 import BeautifulSoup
import pandas as pd
import time
import csv
import simplejson as json
import threading
def extract_job_title_from_result(soup):
jobs = []
for div in soup.find_all(name="div", attrs={"class":"row"}):
for a in div.find_all(name="a", attrs={"data-tn-element":"jobTitle"}):
jobs.append(a["title"])
return(jobs)
def extract_company_from_result(soup):
companies = []
for div in soup.find_all(name="div", attrs={"class":"row"}):
company = div.find_all(name="span", attrs={"class":"company"})
if len(company) > 0:
for b in company:
companies.append(b.text.strip())
else:
sec_try = div.find_all(name="span", attrs={"class":"result-link-source"})
for span in sec_try:
companies.append(span.text.strip())
return(companies)
def extract_location_from_result(soup):
locations = []
spans = soup.findAll("span", attrs={"class": "location"})
for span in spans:
locations.append(span.text)
return(locations)
def extract_salary_from_result(soup):
salaries = []
for div in soup.find_all(name="div", attrs={"class":"row"}):
try:
salaries.append(div.find('nobr').text)
except:
try:
div_two = div.find(name="div", attrs={"class":"sjcl"})
div_three = div_two.find("div")
salaries.append(div_three.text.strip())
except:
salaries.append("Nothing_found")
return(salaries)
def extract_summary_from_result(soup):
summaries = []
spans = soup.findAll("span", attrs={"class": "summary"})
for span in spans:
summaries.append(span.text.strip())
return(summaries)
URL = "https://www.indeed.com/jobs?q=data+scientist+%2420%2C000&l=New+York&start=10"
page = requests.get(URL)
soup = BeautifulSoup(page.text, "html.parser")
print(soup.prettify())
if __name__ == "__main__":
t1 = threading.Thread(target=extract_job_title_from_result, args=(soup,))
t2 = threading.Thread(target=extract_company_from_result, args=(soup,))
t3 = threading.Thread(target=extract_location_from_result,args=(soup,))
t4 = threading.Thread(target=extract_salary_from_result,args=(soup,))
t5 = threading.Thread(target=extract_summary_from_result,args=(soup,))
t1.start()
t2.start()
t3.start()
t4.start()
t5.start()
t1.join()
t2.join()
t3.join()
t4.join()
t5.join()
page = requests.get(URL,timeout=5) # + '&start=' + str(start))
time.sleep(1)
soup = BeautifulSoup(page.text, 'lxml')
job_post = []
for div in soup.find_all(name='div', attrs={'class':'row'}):
job_post_object = {
"job_title": div.find(name="a").text.encode('utf-8'),
"company": div.find(name="span").text.encode('utf-8'),
"location": div.find(name="span").text.encode('utf-8'),
"summary": div.find(name='span').text.encode('utf-8'),
"salary": div.find(name="div").text.encode('utf-8')
}
job_post.append(job_post_object)
with open('IndeedData.json', 'w') as outfile:
json.dump(job_post, outfile)
outfile.write('\n')
| arthimj/Web-Scrapping-Python | web_scrapping.py | web_scrapping.py | py | 3,426 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
... |
29495477058 | from pathlib import Path
from my_scientific_profile.papers.papers import (
fetch_all_paper_authors,
fetch_all_paper_infos,
)
papers = fetch_all_paper_infos()
paper_authors = fetch_all_paper_authors()
print(f"fetched {len(papers)} papers and {len(paper_authors)} authors")
from my_scientific_profile.database.authors import save_all_paper_authors_to_s3 # noqa
from my_scientific_profile.database.papers import save_all_papers_to_s3, convert_papers_to_dataframe # noqa
from my_scientific_profile.database.aws_s3 import S3_BUCKET, S3_CLIENT # noqa
from to_quarto.utils import ROOT_DIR
save_all_papers_to_s3(s3_client=S3_CLIENT, s3_bucket=S3_BUCKET)
save_all_paper_authors_to_s3(s3_client=S3_CLIENT, s3_bucket=S3_BUCKET)
print(f"saved to S3 {S3_CLIENT}")
df = convert_papers_to_dataframe(papers)
path = Path(ROOT_DIR)
team_path = path.joinpath("data")
df.to_json(team_path.joinpath("all_papers.json"))
df.to_csv(team_path.joinpath("all_papers.csv"))
| tbereau/tbereau | scripts/fetch_papers.py | fetch_papers.py | py | 963 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "my_scientific_profile.papers.papers.fetch_all_paper_infos",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "my_scientific_profile.papers.papers.fetch_all_paper_authors",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "my_scientific_profile.databas... |
13239290421 | """Notes on the different columns of the csv files
PSNR: Peak Signal-to-Noise Ratio
SSIM: Structural Similarity
Loss_D: Discriminator Loss; Used to train Discriminator
Loss_G: Generator Loss; Used to train Discriminator; Composed of img perception and disc Loss
Score_D: Discriminator score given to the real image
Score_G: Discriminator score given to the fake image
"""
import os
import pandas as pd
import matplotlib.pyplot as plt
# import seaborn as sns
# sns.set()
# sns.set_context("talk")
# sns.set_style('white')
STATS_DIR = 'logs/statistics'
STAT_FILENAMES = [f for f in os.listdir(STATS_DIR) if f!='.gitignore']
if not os.path.exists('plots'):
os.makedirs('plots')
for f in STAT_FILENAMES:
print(f)
fname = f[:-4]
shorter_fname = '_'.join(fname.split('_')[2:6])
df = pd.read_csv(os.path.join(STATS_DIR, f))
fig = plt.figure()
ax = fig.add_subplot(211)
ax.set(xlabel="Epoch")
df.plot(
y='Loss_G',
title='Training Losses',
ax=ax)
if 'adv0_' not in shorter_fname:
df.plot(
y='Loss_D',
secondary_y=True,
ax=ax)
ax = fig.add_subplot(212)
ax.set(xlabel="Epoch")
df.plot(
ax=ax,
y='PSNR',
title='Validation Metrics')
df.plot(
y='SSIM',
secondary_y=True,
ax=ax)
fig.suptitle(shorter_fname, size=16)
fig.tight_layout()
fig.subplots_adjust(top=0.85)
fig.savefig(os.path.join('plots', shorter_fname+'.png'))
| PierreSp/DL4CV_2017_Final_Project | srgan/plots.py | plots.py | py | 1,509 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.listdir",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number... |
23747786249 | """
Khinshan Khan - oss.py.
This module implements a God class for OS simulation.
"""
from collections import deque
import itertools
from mcm_oss import memory
from mcm_oss import disk
class OSS:
"""An OS mimicker."""
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
def __init__(self, ram_max, disks_max):
self._memory = memory.Memory(ram_max)
self._disks = disk.Disk(disks_max)
self._rt_ready_queue = deque()
self._common_ready_queue = deque()
self._pid_count = 1
def process(self, command, size):
"""
Create real time or common process of `size' if enough contiguous memory is available.
"""
proc = self._create_pcb(command, int(size))
if proc is None: # short circuit if bad size
return
if(command == 'AR'):
self._rt_ready_queue.append(proc)
elif(command == 'A'):
self._common_ready_queue.append(proc)
def hard_disk(self, command, number):
"""
Moves process from ready queue to hard disk or from hard disk to ready queue depending on
command.
"""
if(command == 'd'):
proc = None
if self._rt_ready_queue:
proc = self._rt_ready_queue.popleft()
elif self._common_ready_queue:
proc = self._common_ready_queue.popleft()
if proc:
self._disks.add_proc(int(number), proc)
else:
print("No process to move to disk!")
elif(command == 'D'):
proc = self._disks.remove_proc(int(number))
if proc:
if proc["type"] == "RT":
self._rt_ready_queue.append(proc)
else:
self._common_ready_queue.append(proc)
else:
print("No process found in disk!")
def show(self, show_type):
"""
Show various status of the OS simulation:
r: ready queue
i: disks
m: memory
"""
if(show_type == 'r'):
print("PID", "TYPE", "STATUS", sep='\t')
procs = itertools.chain(self._rt_ready_queue, self._common_ready_queue)
running = next(procs, None)
if running:
print(running["pid"], running["type"], "running", sep='\t')
for proc in procs:
print(proc["pid"], proc["type"], "waiting", sep='\t')
elif(show_type == 'i'):
print("PID", "DISK", "STATUS", sep='\t')
self._disks.io_snapshot()
elif(show_type == 'm'):
print("PID", "M_START", "M_END", sep='\t')
procs = itertools.chain(self._rt_ready_queue, self._common_ready_queue)
for proc in procs:
print(proc["pid"], proc["start"], proc["end"], sep='\t')
self._disks.memory_snapshot()
def time(self, command):
"""
Will terminate or rotate process since user decided it's time to do so.
"""
if(command == 'Q'):
if self._rt_ready_queue:
self._rt_ready_queue.rotate(-1)
elif self._common_ready_queue:
self._common_ready_queue.rotate(-1)
elif(command == 't'):
proc = None
if self._rt_ready_queue:
proc = self._rt_ready_queue.popleft()
elif self._common_ready_queue:
proc = self._common_ready_queue.popleft()
if proc:
self._memory.restore_memory(proc["start"], proc["end"])
def _create_pcb(self, command, size):
"""
Create the PCB for a new process if possible.
"""
if(size == 0):
print("Can't have a process of size 0!")
return None
start, end = self._memory.find_free(size)
if start is None:
print("Not enough contiguous memory available for this process!")
return None
# pcb for newly created process
proc_type = "RT" if command == "AR" else "Common"
proc = {"type": proc_type, "pid": self._pid_count, "start": start, "end": end}
self._pid_count += 1
return proc
| shan-memery/mcm-oss | mcm_oss/oss.py | oss.py | py | 4,275 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "mcm_oss.memory.Memory",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "mcm_oss.memory",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "mcm_oss.disk.Disk",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "mcm_oss.disk",... |
26796000187 | import requests
from bs4 import BeautifulSoup
import pprint
def stocks_gainers():
res = requests.get('https://www.google.com/finance/markets/gainers?hl=en')
soup = BeautifulSoup(res.text, 'html.parser')
stocks_container_gainers = soup.find('div', {'class': 'Sy70mc'})
stocks_listing_gainers = stocks_container_gainers.find('ul', {'class': 'sbnBtf'})
Gainer= []
for stock in stocks_listing_gainers.find_all('li'):
name = stock.find('div', {'class': 'COaKTb'}).text
price = stock.find('div', {'class': 'YMlKec'}).text
gain = stock.find('span', {'class': 'P2Luy Ez2Ioe'}).text
Gainer.append({'name': name, 'price': price, 'gain': gain})
pprint.pprint(Gainer)
def stocks_loser():
res1 = requests.get('https://www.google.com/finance/markets/losers?hl=en')
soup1 = BeautifulSoup(res1.text, 'html.parser')
Stocks_container_losers = soup1.find('div', {'class': 'Vd323d'})
stocks_listing_losers = Stocks_container_losers.find('ul', {'class':'sbnBtf'})
Loser = []
for stock in stocks_listing_losers.find_all('li'):
name = stock.find('div', {'class': 'ZvmM7'}).text
price = stock.find('div', {'class': 'YMlKec'}).text
Lose = stock.find('span', {'class': 'P2Luy Ebnabc'}).text
Loser.append({'name': name, 'price': price, 'Lose': Lose})
pprint.pprint(Loser)
stocks_gainers()
| Prasadk1234/Project | Stock_monitoring.py | Stock_monitoring.py | py | 1,420 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pprint.pprint",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_n... |
15351976065 | import os
import sys
import torchvision.models as models
import torch
import cv2
import argparse
import os
import time
import json
import sys
import dlib
import pandas as pd
import numpy as np
import imutils
from imutils.face_utils import FaceAligner
from tensorflow.keras.models import load_model, model_from_json
root_path = os.path.abspath(
os.path.join(
os.path.dirname(
os.path.dirname(
os.path.abspath(__file__)
)
),
os.path.pardir)
)
if root_path not in sys.path:
sys.path.append(root_path)
from utils import *
class FacePredictions:
def __init__(self, _config, verbose=False):
config = _config['modalities']['face']['model_info']['default']
self._config = config
def predict_sample(self, data, return_all=False):
if isinstance(data, str):
image = cv2.imread(data, 1)
if image is None:
print("Can't find {} image".format(data))
# sys.exit(-1)
else:
image = data
success, image, emotion_predictions = self.detect_emotions(image)
label = np.argmax(emotion_predictions)
if return_all:
return label, emotion_predictions, self._labels
else:
return label, emotion_predictions
# def detect_emotions(self):
def predict(self, data):
# read image
res = []
for i, data_list in enumerate(data):
source, label = data_list, -1
label, emotion_predictions = self.predict_sample(source)
res.append([label, label_mapping['ravdess'][label], emotion_predictions])
return res
if __name__ == '__main__':
# config = get_config("{}/configs/basic.json".format(root_path))
# face_predictor = FacePredictions(config)
# res = face_predictor.predict([
# ['/data1/bixiao/Code/ERFramework/data/friends/face/dia2_utt5_14.jpg', 0],
# ['/data1/bixiao/Code/ERFramework/data/friends/face/dia2_utt5_21.jpg', 0]
# ])
# print(res)
# -*- coding: utf-8 -*-
import urllib.request
import urllib.error
import time
http_url = 'https://api-cn.faceplusplus.com/facepp/v3/detect'
key = "XSyHeF1ysKH4dpgiuRUvNydxB4pzJMp8"
secret = "4-nmA0PI-xij4nRVQ2RVOrFlNzoDVpGa"
filepath = os.path.join(root_path, "data/ravdess/face/01-01-07-01-01-01-02.png")
boundary = '----------%s' % hex(int(time.time() * 1000))
data = []
data.append('--%s' % boundary)
data.append('Content-Disposition: form-data; name="%s"\r\n' % 'api_key')
data.append(key)
data.append('--%s' % boundary)
data.append('Content-Disposition: form-data; name="%s"\r\n' % 'api_secret')
data.append(secret)
data.append('--%s' % boundary)
fr = open(filepath, 'rb')
data.append('Content-Disposition: form-data; name="%s"; filename=" "' % 'image_file')
data.append('Content-Type: %s\r\n' % 'application/octet-stream')
data.append(fr.read())
fr.close()
data.append('--%s' % boundary)
data.append('Content-Disposition: form-data; name="%s"\r\n' % 'return_landmark')
data.append('1')
data.append('--%s' % boundary)
data.append('Content-Disposition: form-data; name="%s"\r\n' % 'return_attributes')
data.append(
"gender,age,smiling,headpose,facequality,blur,eyestatus,emotion,ethnicity,beauty,mouthstatus,eyegaze,skinstatus")
data.append('--%s--\r\n' % boundary)
for i, d in enumerate(data):
if isinstance(d, str):
data[i] = d.encode('utf-8')
http_body = b'\r\n'.join(data)
# build http request
req = urllib.request.Request(url=http_url, data=http_body)
# header
req.add_header('Content-Type', 'multipart/form-data; boundary=%s' % boundary)
try:
# post data to server
resp = urllib.request.urlopen(req, timeout=5)
# get response
qrcont = resp.read()
# if you want to load as json, you should decode first,
# for example: json.loads(qrount.decode('utf-8'))
import pprint
pprint.pprint(qrcont.decode('utf-8'))
except urllib.error.HTTPError as e:
print(e.read().decode('utf-8'))
| Freja1122/ERFramework | models/face/facePredictions_faceAPI.py | facePredictions_faceAPI.py | py | 4,195 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "os.path.abspath",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number"... |
15150949741 | """
Generic Kedro Nodes
"""
import logging
import json
from sklearn.ensemble import IsolationForest
from sklearn.covariance import EllipticEnvelope
from sklearn.neighbors import LocalOutlierFactor
from .plugin import hooks
import pandas as pd
from .views import ADAlgorithms, create_samples_os_view
iso_params = {
"IsolationForest.n_estimators": 100,
"IsolationForest.max_samples": "auto",
"IsolationForest.contamination": "auto",
"IsolationForest.max_features": 1.0,
"IsolationForest.bootstrap": False,
"IsolationForest.n_jobs": None,
"IsolationForest.random_state": None,
"IsolationForest.verbose": 0,
"IsolationForest.warm_start": False,
}
ell_params = {
"EllipticEnvelope.store_precision": True,
"EllipticEnvelope.assume_centered": False,
"EllipticEnvelope.support_fraction": None,
"EllipticEnvelope.contamination": 0.1,
"EllipticEnvelope.random_state": None,
}
local_params = {
"LocalOutlierFactor.n_neighbors": 20,
"LocalOutlierFactor.algorithm": "auto",
"LocalOutlierFactor.leaf_size": 30,
"LocalOutlierFactor.metric": "minkowski",
"LocalOutlierFactor.p": 2,
"LocalOutlierFactor.metric_params": None,
"LocalOutlierFactor.contamination": "auto",
"LocalOutlierFactor.novelty": False,
"LocalOutlierFactor.n_jobs": None,
}
def isolation_forest(data: pd.DataFrame, params: dict) -> pd.DataFrame:
"""
Calculate outlier score using *isolation forest* algorithm for the input dataframe, on the list of columns specified
inside ``params.cols``.
:param data: input dataframe
:param params: parameters for the anomaly detection model
:return: a dataframes with anomaly detection scores and predictions
"""
create_samples_os_view(ADAlgorithms.IsolationForest, params)
return outlier_score(ADAlgorithms.IsolationForest, data, params)
def elliptic_envelope(data: pd.DataFrame, params: dict) -> pd.DataFrame:
"""
Calculate outlier score using *elliptic envelope* algorithm for the input dataframe, on the list of columns
specified inside ``params.cols``.
:param data: input dataframe
:param params: parameters for the anomaly detection model
:return: a dataframes with anomaly detection scores and predictions
"""
create_samples_os_view(ADAlgorithms.EllipticEnvelope, params)
return outlier_score(ADAlgorithms.EllipticEnvelope, data, params)
def local_outlier_factor(data: pd.DataFrame, params: dict) -> pd.DataFrame:
"""
Calculate outlier score using *local outlier factor* algorithm for the input dataframe,
on the list of columns specified inside ``params.cols``.
:param data: input dataframe
:param params: parameters for the anomaly detection model
:return: a dataframes with anomaly detection scores and predictions
"""
create_samples_os_view(ADAlgorithms.LocalOutlierFactor, params)
return outlier_score(ADAlgorithms.LocalOutlierFactor, data, params)
def outlier_score(algo: ADAlgorithms, data: pd.DataFrame, params: dict) -> pd.DataFrame:
"""
calculate outlier score using the algorithm that has been specified by one of the wrapper functions
(e.g. isolation forest, eliptic curve or local outlier factor).
:param algo: algorithm to be used for the outlier detection.
:param data: input dataframe.
:param params: module specific parameters.
:return: a dataframe with os metric (outlier score, prediction, used algorithm and parameters)
"""
cols = params["cols"]
x = data[cols].to_numpy()
try:
if algo == ADAlgorithms.IsolationForest:
try:
iso_params.update(params["IsolationForest"])
except KeyError:
logging.info(
"No parameters for Isolation Forest found, using the default ones"
)
algo_obj: IsolationForest = IsolationForest(
n_estimators=iso_params["IsolationForest.n_estimators"],
max_samples=iso_params["IsolationForest.max_samples"],
contamination=iso_params["IsolationForest.contamination"],
max_features=iso_params["IsolationForest.max_features"],
bootstrap=iso_params["IsolationForest.bootstrap"],
n_jobs=iso_params["IsolationForest.n_jobs"],
random_state=iso_params["IsolationForest.random_state"],
verbose=iso_params["IsolationForest.verbose"],
warm_start=iso_params["IsolationForest.warm_start"],
).fit(x)
ols = -algo_obj.score_samples(x)
prd = algo_obj.predict(x)
elif algo == ADAlgorithms.EllipticEnvelope:
try:
ell_params.update(params["EllipticEnvelope"])
except KeyError:
logging.info(
"No parameters for Elliptic Envelope found, using the default ones"
)
algo_obj: EllipticEnvelope = EllipticEnvelope(
store_precision=ell_params["EllipticEnvelope.store_precision"],
assume_centered=ell_params["EllipticEnvelope.assume_centered"],
support_fraction=ell_params["EllipticEnvelope.support_fraction"],
contamination=ell_params["EllipticEnvelope.contamination"],
random_state=ell_params["EllipticEnvelope.random_state"],
).fit(x)
ols = -algo_obj.score_samples(x)
prd = algo_obj.predict(x)
elif algo == ADAlgorithms.LocalOutlierFactor:
try:
local_params.update(params["LocalOutlierFactor"])
except KeyError:
logging.info(
"No parameters for Local Outlier Factor found, using the default ones"
)
algo_obj: LocalOutlierFactor = LocalOutlierFactor(
n_neighbors=local_params["LocalOutlierFactor.n_neighbors"],
algorithm=local_params["LocalOutlierFactor.algorithm"],
leaf_size=local_params["LocalOutlierFactor.leaf_size"],
metric=local_params["LocalOutlierFactor.metric"],
p=local_params["LocalOutlierFactor.p"],
metric_params=local_params["LocalOutlierFactor.metric_params"],
contamination=local_params["LocalOutlierFactor.contamination"],
novelty=local_params["LocalOutlierFactor.novelty"],
n_jobs=local_params["LocalOutlierFactor.n_jobs"],
).fit(x)
ols = -algo_obj.negative_outlier_factor_
prd = algo_obj.fit_predict(x)
except MemoryError as e:
logging.error(e)
raise Exception("Ran out of memory") from e
except Exception as e:
logging.error(e)
raise Exception(f"Could not run {algo.value}") from e
# 1 for inliers, -1 for outliers.
predictions: list[bool] = [x == -1 for x in prd]
os_df: pd.DataFrame = pd.DataFrame()
os_df["sample_id"] = data["id"]
os_df["run_id"] = hooks.trace_id
os_df["score"] = ols
os_df["algorithm"] = algo.value
os_df["parameters"] = json.dumps(params)
os_df["prediction"] = predictions
return os_df
| TU-Berlin-SNET/Waldo-Kedro-Plugin | waldo_kedro_plugin/nodes.py | nodes.py | py | 7,196 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "pandas.DataFrame",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "views.create_samples_os_view",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "views.ADAlgorithms.IsolationForest",
"line_number": 58,
"usage_type": "attribute"
},... |
10080595644 | #! /usr/bin/python3
import os
import boto3
import json
import logging
import random
import traceback
from datetime import datetime
from python.opentelemetry import trace
from python.opentelemetry.semconv.trace import SpanAttributes
CUSTOM_OTEL_SPAN_EVENT_NAME = 'LambdaUpdateEvent'
SQS_MESSAGE_GROUP_ID = 'otel'
# Reset and init logger
logger = logging.getLogger()
if logger.handlers:
for handler in logger.handlers:
logger.removeHandler(handler)
logging.basicConfig(level=logging.INFO)
client_s3 = boto3.client('s3')
client_sqs = boto3.client('sqs')
random.seed(datetime.now().timestamp())
OUTPUT_S3_BUCKET_NAME = os.getenv('OUTPUT_S3_BUCKET_NAME')
SQS_QUEUE_URL = os.getenv('SQS_QUEUE_URL')
def cause_error():
n = random.randint(0, 15)
return n == 1
def get_custom_object_from_input_s3(
bucket_name,
key_name
):
logger.info('Getting custom object from the input S3...')
try:
custom_object = json.loads(
client_s3.get_object(
Bucket=bucket_name,
Key=key_name,
)['Body'].read())
logger.info('Getting custom object from the input S3 is succeeded.')
return custom_object
except Exception as e:
msg = f'Getting custom object from the input S3 is failed: {str(e)}'
logger.error(msg)
raise Exception(msg)
def update_custom_object(
custom_object,
):
custom_object['isUpdated'] = True
def store_custom_object_in_output_s3(
key_name,
custom_object,
):
try:
logger.info('Updating custom object in the output S3...')
bucket_name = f'{OUTPUT_S3_BUCKET_NAME}'
if cause_error():
bucket_name = 'wrong-bucket-name'
client_s3.put_object(
Bucket=bucket_name,
Key=key_name,
Body=json.dumps(custom_object),
)
logger.info('Updating custom object in output S3 is succeeded.')
except Exception as e:
msg = f'Updating custom object in the output S3 is failed: {str(e)}'
logger.error(msg)
raise Exception(msg)
def send_custom_object_s3_info_to_sqs(
bucket_name,
key_name,
):
try:
logger.info(
'Sending S3 info of the updated custom object to SQS...')
message = {
'bucket': bucket_name,
'key': key_name,
}
client_sqs.send_message(
MessageGroupId=SQS_MESSAGE_GROUP_ID,
QueueUrl=SQS_QUEUE_URL,
MessageBody=json.dumps(message)
)
logger.info(
'Sending S3 info of the updated custom object to SQS is succeeded.')
except Exception as e:
msg = f'Sending S3 info of the updated custom object to SQS is failed: {str(e)}'
logger.error(msg)
raise Exception(msg)
def enrich_span_with_success(
context,
):
span = trace.get_current_span()
span.add_event(
CUSTOM_OTEL_SPAN_EVENT_NAME,
attributes={
'is.successful': True,
'aws.request.id': context.aws_request_id
})
def enrich_span_with_failure(
context,
e,
):
span = trace.get_current_span()
span.set_attribute('otel.status_code', 'ERROR')
span.set_attribute('otel.status_description', 'Update Lambda is failed.')
span.record_exception(exception=e, escaped=True)
span.add_event(
CUSTOM_OTEL_SPAN_EVENT_NAME,
attributes={
'is.successful': False,
'aws.request.id': context.aws_request_id
})
def lambda_handler(event, context):
# Parse bucket information
bucket_name = event['Records'][0]['s3']['bucket']['name']
key_name = event['Records'][0]['s3']['object']['key']
try:
# Create the custom object from input bucket
custom_object = get_custom_object_from_input_s3(bucket_name, key_name)
# Update custom object
update_custom_object(custom_object)
# Store the custom object in S3
store_custom_object_in_output_s3(key_name, custom_object)
# Send custom object to SQS
send_custom_object_s3_info_to_sqs(bucket_name, key_name)
# Enrich span with success
enrich_span_with_success(context)
except Exception as e:
# Enrich span with failure
enrich_span_with_failure(context, e)
| utr1903/monitoring-lambda-with-opentelemetry | python/apps/update/lambda_function.py | lambda_function.py | py | 4,368 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "logging.basicConfig",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "boto3.client"... |
32925227061 | import sqlite3
import sys
import colorama
connection = sqlite3.connect("rubiksql.db")
cursor = connection.cursor()
colorama.init()
def selectTop (cantidad = 10, direction = "ASC", campos = "*"):
global cursor, connection
cursor.execute("SELECT "+campos+" FROM layouts ORDER BY distance "+
direction+" LIMIT "+str(cantidad)+";")
connection.commit()
return cursor.fetchall()
def getCantidad ():
global cursor, connection
cursor.execute("SELECT * FROM layouts;")
connection.commit()
return len(cursor.fetchall())
def diferentes ():
global cursor, connection
cursor.execute("SELECT distance FROM layouts;")
connection.commit()
lineas = cursor.fetchall()
resultado = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
for linea in lineas:
resultado[linea[0]] += 1
for i in range(0,len(resultado)):
if not resultado[i] == 0:
print(str(i)+": "+str(resultado[i]))
def acomulador():
global connection, cursor
connection = sqlite3.connect("Acomulador.db")
cursor = connection.cursor()
def rubiksql():
global connection, cursor
connection = sqlite3.connect("rubiksql.db")
cursor = connection.cursor()
def clearAll():
global connection, cursor
acomulador()
cursor.execute("DROP TABLE layouts;")
connection.commit()
rubiksql()
cursor.execute("DROP TABLE layouts;")
connection.commit()
def help():
print(colorama.Fore.GREEN+"rubiksql() "+
colorama.Fore.WHITE+"Cambiar base de datos a rubiksql.db")
print(colorama.Fore.GREEN+"acomulador() "+
colorama.Fore.WHITE+"Cambiar base de datos a Acomulador.db")
print(colorama.Fore.GREEN+"diferentes() "+
colorama.Fore.WHITE+"Muestra la cantidad de cada distancia")
print(colorama.Fore.GREEN+"getCantidad() "+
colorama.Fore.WHITE+"Muestra la cantidad total en la BD")
print(colorama.Fore.GREEN+"selectTop(cantidad = 10, direction = 'ASC', campos = '*') "+
colorama.Fore.WHITE+"Muestra la cantidad total en la BD")
print(colorama.Fore.GREEN+"clearAll() "+
colorama.Fore.WHITE+"DROP TABLES")
def quit():
print("Agur")
sys.exit(0)
def main ():
global connection, cursor
while True:
exec(input("<rubikLooker:#>"))
main() | jonoreilly/python | rubik/rubikLooker.py | rubikLooker.py | py | 2,605 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sqlite3.connect",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "colorama.init",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"li... |
21620485761 | from __future__ import absolute_import
import sys
from typing import ByteString
from typing import Mapping
from typing import NamedTuple
from typing import Optional
from typing import Sequence
from uuid import uuid4
import numpy as np
from past.builtins import unicode
from apache_beam.portability.api import schema_pb2
from apache_beam.typehints.native_type_compatibility import _get_args
from apache_beam.typehints.native_type_compatibility import _match_is_exactly_mapping
from apache_beam.typehints.native_type_compatibility import _match_is_named_tuple
from apache_beam.typehints.native_type_compatibility import _match_is_optional
from apache_beam.typehints.native_type_compatibility import _safe_issubclass
from apache_beam.typehints.native_type_compatibility import extract_optional_type
# Registry of typings for a schema by UUID
class SchemaTypeRegistry(object):
def __init__(self):
self.by_id = {}
self.by_typing = {}
def add(self, typing, schema):
self.by_id[schema.id] = (typing, schema)
def get_typing_by_id(self, unique_id):
result = self.by_id.get(unique_id, None)
return result[0] if result is not None else None
def get_schema_by_id(self, unique_id):
result = self.by_id.get(unique_id, None)
return result[1] if result is not None else None
SCHEMA_REGISTRY = SchemaTypeRegistry()
# Bi-directional mappings
_PRIMITIVES = (
(np.int8, schema_pb2.BYTE),
(np.int16, schema_pb2.INT16),
(np.int32, schema_pb2.INT32),
(np.int64, schema_pb2.INT64),
(np.float32, schema_pb2.FLOAT),
(np.float64, schema_pb2.DOUBLE),
(unicode, schema_pb2.STRING),
(bool, schema_pb2.BOOLEAN),
(bytes if sys.version_info.major >= 3 else ByteString, schema_pb2.BYTES),
)
PRIMITIVE_TO_ATOMIC_TYPE = dict((typ, atomic) for typ, atomic in _PRIMITIVES)
ATOMIC_TYPE_TO_PRIMITIVE = dict((atomic, typ) for typ, atomic in _PRIMITIVES)
# One-way mappings
PRIMITIVE_TO_ATOMIC_TYPE.update({
# In python 2, this is a no-op because we define it as the bi-directional
# mapping above. This just ensures the one-way mapping is defined in python
# 3.
ByteString: schema_pb2.BYTES,
# Allow users to specify a native int, and use INT64 as the cross-language
# representation. Technically ints have unlimited precision, but RowCoder
# should throw an error if it sees one with a bit width > 64 when encoding.
int: schema_pb2.INT64,
float: schema_pb2.DOUBLE,
})
def typing_to_runner_api(type_):
if _match_is_named_tuple(type_):
schema = None
if hasattr(type_, 'id'):
schema = SCHEMA_REGISTRY.get_schema_by_id(type_.id)
if schema is None:
fields = [
schema_pb2.Field(
name=name, type=typing_to_runner_api(type_._field_types[name]))
for name in type_._fields
]
type_id = str(uuid4())
schema = schema_pb2.Schema(fields=fields, id=type_id)
SCHEMA_REGISTRY.add(type_, schema)
return schema_pb2.FieldType(row_type=schema_pb2.RowType(schema=schema))
# All concrete types (other than NamedTuple sub-classes) should map to
# a supported primitive type.
elif type_ in PRIMITIVE_TO_ATOMIC_TYPE:
return schema_pb2.FieldType(atomic_type=PRIMITIVE_TO_ATOMIC_TYPE[type_])
elif sys.version_info.major == 2 and type_ == str:
raise ValueError(
"type 'str' is not supported in python 2. Please use 'unicode' or "
"'typing.ByteString' instead to unambiguously indicate if this is a "
"UTF-8 string or a byte array.")
elif _match_is_exactly_mapping(type_):
key_type, value_type = map(typing_to_runner_api, _get_args(type_))
return schema_pb2.FieldType(
map_type=schema_pb2.MapType(key_type=key_type, value_type=value_type))
elif _match_is_optional(type_):
# It's possible that a user passes us Optional[Optional[T]], but in python
# typing this is indistinguishable from Optional[T] - both resolve to
# Union[T, None] - so there's no need to check for that case here.
result = typing_to_runner_api(extract_optional_type(type_))
result.nullable = True
return result
elif _safe_issubclass(type_, Sequence):
element_type = typing_to_runner_api(_get_args(type_)[0])
return schema_pb2.FieldType(
array_type=schema_pb2.ArrayType(element_type=element_type))
raise ValueError("Unsupported type: %s" % type_)
def typing_from_runner_api(fieldtype_proto):
if fieldtype_proto.nullable:
# In order to determine the inner type, create a copy of fieldtype_proto
# with nullable=False and pass back to typing_from_runner_api
base_type = schema_pb2.FieldType()
base_type.CopyFrom(fieldtype_proto)
base_type.nullable = False
return Optional[typing_from_runner_api(base_type)]
type_info = fieldtype_proto.WhichOneof("type_info")
if type_info == "atomic_type":
try:
return ATOMIC_TYPE_TO_PRIMITIVE[fieldtype_proto.atomic_type]
except KeyError:
raise ValueError(
"Unsupported atomic type: {0}".format(fieldtype_proto.atomic_type))
elif type_info == "array_type":
return Sequence[typing_from_runner_api(
fieldtype_proto.array_type.element_type)]
elif type_info == "map_type":
return Mapping[typing_from_runner_api(fieldtype_proto.map_type.key_type),
typing_from_runner_api(fieldtype_proto.map_type.value_type)]
elif type_info == "row_type":
schema = fieldtype_proto.row_type.schema
user_type = SCHEMA_REGISTRY.get_typing_by_id(schema.id)
if user_type is None:
from apache_beam import coders
type_name = 'BeamSchema_{}'.format(schema.id.replace('-', '_'))
user_type = NamedTuple(
type_name,
[(field.name, typing_from_runner_api(field.type))
for field in schema.fields])
user_type.id = schema.id
SCHEMA_REGISTRY.add(user_type, schema)
coders.registry.register_coder(user_type, coders.RowCoder)
return user_type
elif type_info == "logical_type":
pass # TODO
def named_tuple_from_schema(schema):
return typing_from_runner_api(
schema_pb2.FieldType(row_type=schema_pb2.RowType(schema=schema)))
def named_tuple_to_schema(named_tuple):
return typing_to_runner_api(named_tuple).row_type.schema
| a0x8o/kafka | sdks/python/apache_beam/typehints/schemas.py | schemas.py | py | 6,240 | python | en | code | 59 | github-code | 36 | [
{
"api_name": "numpy.int8",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "apache_beam.portability.api.schema_pb2.BYTE",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "apache_beam.portability.api.schema_pb2",
"line_number": 45,
"usage_type"... |
34451715981 | import arff
import pandas as pd
import numpy as np
import json
import data_utils
import os
def load_data(use_data=None):
data_load = np.load('dataset/'+use_data+str('.npy'))
print(data_load.shape)
return np.asarray(data_load).astype(np.float)
def creat_dataset(directory):
if not os.path.exists(directory):
os.makedirs(directory)
if __name__ == "__main__":
client_no = 30
dataset_p = "kdd_10"
directory_p = "arff_data/"+dataset_p+"_"+str(client_no)
creat_dataset(directory_p)
data_load = load_data(use_data=dataset_p)
data_client, client_name = data_utils.StreamClientData(data_load, client_no)
if (data_load[:, -1] == 0).any():
print('Okay')
else:
print('Label Transformation')
data_load[:, -1] = data_load[:, -1] - 1
for cl_key in data_client.keys():
print('Current Client: ', cl_key, end="\n")
data = data_client[cl_key]
#print(data.shape)
attributes = data.shape[1]
print(data.shape)
df = pd.DataFrame(data=data, columns = ["attr_"+str(i+1) for i in range(attributes-1)]+['label'])
dict_obj = {"attributes":[(col, u'NUMERIC' if col=="label" else u'REAL') for col in list(df.columns)],
"data": df.values.tolist(),
u'description': u'',
"relation": 'electricity_'+cl_key
}
arff_doc = arff.dumps(dict_obj)
output_filename = directory_p+"/"+dataset_p+"_"+cl_key +'.arff'
with open(output_filename, "w") as fp:
fp.write(arff_doc)
| mvisionai/FedLimited | convert_to_arff.py | convert_to_arff.py | py | 1,605 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "numpy.load",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.float",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_n... |
8306618656 | from django.contrib import admin
from .models import Topic, Course, Student, Order, Review
from decimal import Decimal
def decrease_price(modeladmin, request, queryset):
for obj in queryset:
obj.price = obj.price*Decimal(0.9)
obj.save()
class CourseAdmin(admin.ModelAdmin):
fields = [('title', 'topic'), ('price', 'num_reviews', 'for_everyone')]
list_display = ('title', 'topic', 'price')
actions = [decrease_price]
class OrderAdmin(admin.ModelAdmin):
fields = ['courses', ('student', 'order_status', 'order_date')]
list_display = ('id', 'student', 'order_status', 'order_date', 'total_items')
class CourseInLine(admin.TabularInline):
model = Course
class TopicAdmin(admin.ModelAdmin):
list_display = ('name', 'length')
inlines = [CourseInLine, ]
class StudentAdmin(admin.ModelAdmin):
list_display = ('first_name', 'last_name', 'level', 'list_of_registered_courses')
def list_of_registered_courses(self, obj):
courses = obj.registered_courses.all()
list_courses = [c.title for c in courses]
return list_courses
# Register your models here.
admin.site.register(Topic, TopicAdmin)
# admin.site.register(Topic)
# admin.site.register(Course)
admin.site.register(Course, CourseAdmin)
admin.site.register(Student, StudentAdmin)
# admin.site.register(Order)
admin.site.register(Order, OrderAdmin)
admin.site.register(Review)
| krunal1998/Course-Registration | myapp/admin.py | admin.py | py | 1,417 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "decimal.Decimal",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.contrib.admin.ModelAdmin",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 12,
"usage_type": "name"
},
{
"api_name... |
28297108867 | #!/usr/bin/env python3
from collections import namedtuple
from datetime import datetime
import os, sys, time
import xml.etree.ElementTree as ET
import logging
import yaml
import requests
import re
import twitter
import nltk
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--offset', type=int, help='articles offset', default=0)
args = parser.parse_args()
log = logging.getLogger()
formatter = logging.Formatter('%(asctime)s | %(levelname)s | %(message)s')
fhandler = logging.FileHandler(filename='twitter.log', mode='a')
fhandler.setFormatter(formatter)
log.addHandler(fhandler)
log.setLevel(logging.INFO)
ARXIV_SRC = [
{
'id': 'CSCL',
'name': 'Computation and Language',
'url': 'http://export.arxiv.org/rss/cs.CL'
}
]
TWEET_STACK_DURATION = 10*60*60
REFRESH_DELAY = 5*60*60
LINK_PLACEHOLER = '0'*23
AUTH_OBJ = None
def parse_articles(xml):
"""
For the given string of arXiv.org RSS XML, return list of Articles
"""
# XPath didn't work
root = ET.ElementTree(ET.fromstring(xml))
ns = {'rss': 'http://purl.org/rss/1.0/'}
items = root.findall('rss:item', ns)
date = re.search(r'<dc:date>(.*)</dc:date>', xml).groups()[0]
return [
date,
[
[
item.find('rss:{}'.format(field), ns).text
for field in ['title', 'link', 'description']
] for item in items
]
]
def is_hashtag_viable(word, tag):
MIN_HASHTAGIFY = 10
if '-' in word:
return False
if LINK_PLACEHOLER in word:
return False
if len(word) >= 3 and all([x.isupper() for x in word]):
return True
if len(word) >= 3 and sum([1 if x.isupper() else 0 for x in word]) >= 3:
return True
if len(word) >= MIN_HASHTAGIFY and (tag in ['NN', 'NNP', 'NNS', 'JJ']):
return True
def add_hashtags(abstract):
hashtags = set()
tokens = nltk.word_tokenize(abstract)
tags = nltk.pos_tag(tokens)
for word, tag in tags:
if is_hashtag_viable(word, tag):
hashtags.add(word)
for hashtag in hashtags:
abSp = abstract.split(hashtag)
# we add hashtags to the first word only
abstract = abSp[0] + '#' + hashtag + hashtag.join(abSp[1:])
return abstract
def generate_tweet(article):
"""
Construct a tweet for the given article
"""
MAX_CHAR = 240
# Take the proper title
title = article[0].split('. (arXiv')[0]
link = article[1]
# Abstract
abstract = re.sub(r'\n+', ' ', article[2])
out = f'{title}\n{LINK_PLACEHOLER}\n{abstract}'
out = add_hashtags(out)
out = out[:MAX_CHAR]
out = out.replace(LINK_PLACEHOLER, link)
out = re.sub(r'<.*?>', '', out)
out = re.sub(r'\n+', r'\n', out)
out = re.sub(r'\ +', ' ', out)
out = re.sub(r'\#+', '#', out)
out = re.sub(r'(\w)\#', r'\1', out)
out = out[:MAX_CHAR]
# Go back and remove everything after the last end of word/phrase/sentence
out = re.sub(r'(\.|\,|\?|\s)[^\.\,\?\s]*$', r'-', out)
return out
def send_tweet(i, tweet):
"""
Actually POST tweet to twitter API
"""
api = twitter.Api(
AUTH_OBJ['consumer_key'],
AUTH_OBJ['consumer_secret'],
access_token_key=AUTH_OBJ['access_token_key'],
access_token_secret=AUTH_OBJ['access_token_secret']
)
tweet_clean = tweet.replace('\n', ' ')
try:
api.PostUpdate(tweet)
log.info(f'Sent {i} "{tweet_clean}"')
except twitter.TwitterError as e:
log.warning(f'Failed to send "{tweet_clean}" ({e.message})')
PREV_DATE = None
def parse_keys():
global AUTH_OBJ
with open('keys.yaml', 'r') as f:
AUTH_OBJ = yaml.safe_load(f)
if __name__ == '__main__':
log.info('Running run.py')
parse_keys()
while True:
for source in ARXIV_SRC:
res = requests.get(source['url'])
if not res.ok:
log.warning(f'Failed on {source["url"]}: {res.reason}')
continue
try:
with open('prev_sent.time', 'r') as f:
pdate = f.readlines()[0].rstrip('\n')
except IOError:
pdate = None
adate, articles = parse_articles(res.text)
if len(articles) == 0:
log.info(f'Zero articles found, skipping this loop')
continue
if adate != pdate:
log.info(f'Article date {adate} is different from the previous date {pdate}')
else:
log.info(f'Article date {adate} is the same as the previous date')
if args.offset != 0:
pass
else:
continue
with open('prev_sent.time', 'w') as f:
f.write(adate)
tweetDelay = TWEET_STACK_DURATION / len(articles)
log.info(f'Found {len(articles[args.offset:])} articles, delay set to {tweetDelay}s')
for i, article in enumerate(articles[args.offset:]):
send_tweet(i, generate_tweet(article))
time.sleep(tweetDelay)
# reset the offset argument
if args.offset != 0:
args.offset = 0
time.sleep(REFRESH_DELAY)
| zouharvi/arxiv-twitter | run.py | run.py | py | 5,360 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "logging.Formatter",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "logging.F... |
22769347103 | #!/bin/python
import requests
from bs4 import BeautifulSoup
from urllib.parse import urlencode, parse_qs
# Facebook
#ACCESS_TOKEN = 'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJkYXRlX2lzc3VlZCI6IjIwMTUtMTEtMjFUMTg6NTc6MzAuNTQ4MDk3IiwiY2xpZW50X2lkIjoiaHR0cDovL2V4YW1wbGUuY29tLyIsInNpdGUiOjQzLCJzY29wZSI6InBvc3QiLCJtZSI6Imh0dHA6Ly9mZXZlcmRyZWFtLmNjL2ZhY2Vib29rLmNvbS8xMzQzMDc3NTY5MzI5MTkiLCJub25jZSI6MTkxNTg4MDM5N30.sdjM8utyDorgf-Rt2-ia9Vpl7WO7vXNYmVlXXjQxa5E'
# Flickr
ACCESS_TOKEN = 'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzaXRlIjo1NSwibm9uY2UiOjkyNTEwNjc0OSwic2NvcGUiOiJwb3N0IiwiZGF0ZV9pc3N1ZWQiOiIyMDE1LTExLTIxVDIzOjEyOjU4LjkyNDg5MyIsImNsaWVudF9pZCI6Imh0dHA6Ly9leGFtcGxlLmNvbS8iLCJtZSI6Imh0dHA6Ly9mZXZlcmRyZWFtLmNjL2ZsaWNrci5jb20vMzkyMTY3NjRATjAwIn0.n20Hm5PIWqxhw3XIUN2zJHBXJmF08LL-A47pADNylj4'
MICROPUB_ENDPOINT = 'http://feverdream.cc/micropub'
if __name__ == '__main__':
r = requests.post(MICROPUB_ENDPOINT, headers={
'Authorization': 'Bearer ' + ACCESS_TOKEN,
}, data={
'name': 'Test post with a photo',
'category[]': ['https://flickr.com/people/kparks/', 'devils slide',
'outdoor', 'highway 1', 'california'],
}, files={
'photo': open('/home/kmahan/Pictures/2015/08/23/IMG_4373.JPG', 'rb')
})
photo_url = r.headers.get('Location')
print('Result', r, r.text)
print('Location', r.headers['Location'])
r = requests.post(MICROPUB_ENDPOINT, headers={
'Authorization': 'Bearer ' + ACCESS_TOKEN
}, data={
'like-of': 'https://www.flickr.com/photos/kparks/10746970745/in/dateposted/'
})
print('Result', r, r.text)
print('Location', r.headers['Location'])
r = requests.post(MICROPUB_ENDPOINT, headers={
'Authorization': 'Bearer ' + ACCESS_TOKEN
}, data={
'in-reply-to': photo_url,
'content': 'Test comment on your photo!'
})
print('Result', r, r.text)
print('Location', r.headers['Location'])
| kylewm/silo.pub | scripts/do_local_micropub_flickr.py | do_local_micropub_flickr.py | py | 1,961 | python | en | code | 27 | github-code | 36 | [
{
"api_name": "requests.post",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 40,
"usage_type": "call"
}
] |
33028130226 | from xml.etree import ElementTree
from blog import app
import sys
import requests
import sys
from bs4 import BeautifulStoneSoup as Soup
def analyze_site_map():
r = requests.get('{}{}sitemap.xml'.format(app.config['WEB_PROTOCOL'], app.config['DOMAIN']))
soup = Soup(r.content)
locs = soup.findAll('loc')
return [loc.string for loc in locs]
def main():
bad = []
for loc in analyze_site_map():
r = requests.get(loc)
print(loc, r.url, r.status_code)
if loc != r.url or r.status_code != 200:
bad.append((loc, r.url, r.status_code))
if bad:
print("Failed:\n")
for b in bad:
print(b)
return 1
print("Success")
return 0
if __name__ == '__main__':
try:
exit = main()
except Exception as ex:
sys.stderr.write(str(ex))
exit = 1
sys.exit(exit)
| mkmoisen/blog | verify_sitemap.py | verify_sitemap.py | py | 888 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "blog.app.config",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "blog.app",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "bs4.BeautifulStoneSoup",
... |
11292852216 | import os
from msgraph import api, sites
authority_host_uri = 'https://login.microsoftonline.com'
tenant = 'XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX'
resource_uri = 'https://graph.microsoft.com'
client_id = 'XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX'
client_thumbprint = 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
client_certificate_path = os.path.join('data', 'sites_lists.pem')
with open(client_certificate_path, 'rb') as input_file:
client_certificate = input_file.read()
api_instance = api.GraphAPI.from_certificate(authority_host_uri, tenant, resource_uri, client_id, client_certificate, client_thumbprint)
site_id = 'a258178f-da15-42dd-a85b-90dbe49ebd9e'
site = sites.Site.get(api_instance, site=site_id)
list_ids = ('8f8b90e2-9880-4eda-bcb2-ae07462f89a2', '2f9c8b8b-f269-4ed4-bda4-4ba738871df0', '9359ba09-168a-4be3-9625-1263b17a5082', 'd22ec8d5-6716-48d4-aec1-e0eeaac0d009')
for list_id in list_ids:
site_list = sites.SiteList.get(api_instance, site, list_instance=list_id)
list_items = sites.ListItem.get(api_instance, site, site_list)
for item in list_items:
print(item.fields)
new_list_item = sites.ListItem.create(api_instance, site, list_ids[0], dict(Title='johndoe@wm.edu'))
new_list_item.delete(api_instance, site, list_ids[0])
| WMInfoTech/python-msgraph | examples/sites_lists.py | sites_lists.py | py | 1,261 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "msgraph.api.GraphAPI.from_certificate",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "msgraph... |
34482110199 | import bpy
import math
bpy.context.scene.frame_end = 180
# add a cube
bpy.ops.mesh.primitive_cube_add()
cube = bpy.context.active_object
# insert keyframe at frame one
start_frame = 1
cube.keyframe_insert("rotation_euler", frame=start_frame)
bpy.context.scene.frame_current = 180
# change the rotation of the cube around x-axis
degrees_x = 720
radians = math.radians(degrees_x)
cube.rotation_euler.x = radians
# change the rotation of the cube around z-axis
degrees_z = 360
radians = math.radians(degrees_z)
cube.rotation_euler.z = radians
# insert keyframe at the last frame
end_frame = 180
cube.keyframe_insert("rotation_euler", frame=end_frame)
| ambivalenzia/BlenderPythonProjects | cube_rotation_animation.py | cube_rotation_animation.py | py | 655 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "bpy.context",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "bpy.ops.mesh.primitive_cube_add",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "bpy.ops",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "bpy.contex... |
30395093762 | from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.response import Response
from blog.models import Blog, BlogCategory
from blog.api.serializers import BlogApi, BlogCategoryApi
@api_view(['GET',])
def api_blog_view(request):
try:
blog = Blog.objects.all().exclude(blog_status = 0)#filter(blog_status = 1)
except Blog.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = BlogApi(blog, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
@api_view(['GET',])
def api_blog_details_view(request, pk):
try:
blog = Blog.objects.get(pk=pk)
except Blog.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = BlogApi(blog)
return Response(serializer.data, status=status.HTTP_200_OK)
@api_view(['GET',])
def api_blog_by_category_view(request, pk):
try:
blog_by_category = Blog.objects.filter(blog_category = pk).exclude(blog_status = 0)
except Blog.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = BlogApi(blog_by_category, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
@api_view(['GET',])
def api_blog_categories_view(request):
try:
categories = BlogCategory.objects.all()
except BlogCategory.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = BlogCategoryApi(categories, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
| siklerdaniiii/astral | blog/api/views.py | views.py | py | 1,731 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "blog.models",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "blog.models.Blog.objects.all",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "blog.models.Blog.objects",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name":... |
20060032970 |
import matplotlib.pyplot as plt
import matplotlib.collections as mcoll
from collections import defaultdict
from matplotlib import colors
from matplotlib.lines import Line2D
import warnings
import seaborn as sns
import math
import numpy as np
import pandas as pd
def deCasteljau(b, t):
N = len(b)
a = np.copy(b)
for r in range(1, N):
a[:N-r, :] = (1-t)*a[:N-r, :] + t*a[1:N-r+1, :]
return a[0, :]
def BezierCv(b, nr=5):
t = np.linspace(0, 1, nr)
return np.array([[deCasteljau(b, t[k]),
deCasteljau(b, t[k+1])] for k in range(nr-1)])
def position_circle(x, radius=1):
""" Return the x,y coordinate of the point at
angle (360*x)°, in the circle of radius "radius"
and center (0, 0)
"""
return np.array([radius*math.cos(x*2*math.pi),
radius*math.sin(x*2*math.pi)])
def linear_gradient(start, end, n=10):
""" Return a gradient between start and end,
with n elements.
"""
gradients = np.zeros((len(start), n))
for i in range(len(start)):
gradients[i, :] = np.linspace(start[i], end[i], num=n)
return np.transpose(gradients)
def linear_gradient_color(c1, c2, n=10):
""" Return a gradient between the two color c1 & c2
"""
return linear_gradient(colors.to_rgba(c1), colors.to_rgba(c2), n=n)
def draw_chord(A, B, ax=None, color_start="b", color_end="r",
precision=1000, **kwargs):
""" Draw a Bezier curve between two points """
d = np.linalg.norm(np.array(A) - np.array(B))
# Depending on the distance between the points
# the Bezier curve parameters change
b = [A, A/(1 + d), B/(1 + d), B]
bz = BezierCv(b, nr=precision)
lc = mcoll.LineCollection(bz,
colors=linear_gradient_color(color_start,
color_end,
n=precision), **kwargs)
ax.add_collection(lc)
def draw_arc_circle(start, end, color="b", radius=1, ax=None,
thickness=0.1, precision=1000, **kwargs):
ts = np.linspace(start, end, precision)
poly_nodes = ([position_circle(t, radius=radius) for t in ts] +
[position_circle(t, radius=radius+thickness)
for t in ts[::-1]])
x, y = zip(*poly_nodes)
ax.fill(x, y, color=color, **kwargs)
def add_text_circle(x, txt, radius=1, ax=None, **kwargs):
""" Add text on the border of the circle, in the right orientation """
ax.text(*position_circle(x, radius=radius),
txt, rotation=(360*x - 180 if 0.25 < x < 0.75 else 360*x),
ha='right' if 0.75 > x > 0.25 else 'left',
va='top' if 0.75 > x > 0.25 else 'bottom',
rotation_mode='anchor', **kwargs)
class Chords:
def __init__(self, data, order_col, pair_col, color_col=None,
layout_args={}, text_args={},
chords_args={}, palette=sns.color_palette()):
if 'spacing' not in layout_args:
layout_args['spacing'] = 0
if 'precision_chord' not in layout_args:
layout_args['precision_chord'] = 100
if 'precision_circle' not in layout_args:
layout_args['precision_circle'] = 100
if 'thickness_circle' not in layout_args:
layout_args['thickness_circle'] = 0.1
if 'subcircle' not in layout_args:
layout_args['subcircle'] = True
if 'radius_subcircle' not in layout_args:
layout_args['radius_subcircle'] = 1.14
if 'radius_circle' not in layout_args:
layout_args['radius_circle'] = 1.02
if 'thickness_subcircle' not in layout_args:
layout_args['thickness_subcircle'] = 0.1
if 'internal_chords' not in layout_args:
layout_args['internal_chords'] = False
if 'radius_text' not in layout_args:
layout_args['radius_text'] = max(layout_args['thickness_subcircle']
+ layout_args['radius_subcircle'],
layout_args['thickness_circle']
+ layout_args['radius_circle']) + 0.1
if 'no_chords' not in layout_args:
layout_args['no_chords'] = False
if 'inverted_grad' not in layout_args:
layout_args['inverted_grad'] = True
if 'circle_args' not in layout_args:
layout_args['circle_args'] = {}
if 'subcircle_args' not in layout_args:
layout_args['subcircle_args'] = {}
if 'singleton' not in layout_args:
layout_args['singleton'] = True
if 'palette' not in text_args:
if color_col is None:
text_args['palette'] = palette
else:
text_args['palette'] = defaultdict(lambda: 'k')
if not np.all(data[pair_col].value_counts() <= 2):
raise TypeError("Every value in the `pair` column "
"should appear exactly twice")
if not layout_args['singleton']:
self.data = data[data.pair_col.map(data.pair_col.value_counts()) == 2]
else:
self.data = data.copy()
self.chords = []
self.data = data.copy()
self.order_col = order_col
self.pair_col = pair_col
if color_col is None:
color_col = order_col
self.color_col = color_col
self.df = None
self.layout = layout_args
self.text_args = text_args
self.chords_args = chords_args
self.palette = palette
self.order_data("order", "pair")
self.compute_positions()
self.pair_chords()
def order_data(self, categories, pairs):
"""
Return a correctly ordered dataframe, ready to be plotted in chord format
@ Args:
data: pd.DataFrame() to reorder, with a column
`categories` and a column `pair`
"""
self.format_data()
self.df["associate_cat_order"] = self.df.apply(
lambda r: (len(self.mapcat)+r["nbcat"]-r["associate_nbcat"]) % len(self.mapcat)
+ (len(self.mapcat)//2+1 if r["nbcat"]==r["associate_nbcat"] else 0.5),
axis=1)
self.df["sort_order"] = self.df.apply(
lambda r: (r["idx"] if r["nbcat"] <= r["associate_nbcat"]
else -r["associate"]), axis=1)
sign = lambda x: 0 if x == 0 else 1 if x > 0 else -1
self.df["singleton_sort"] = self.df.apply(lambda r: 0 if r["nbcat"] != r["associate_nbcat"]
else sign(r["idx"] - r["associate"]), axis=1)
self.df["internal_sort"] = self.df.apply(lambda r: (0 if r["nbcat"] != r["associate_nbcat"]
else (r["idx"] if r["idx"] < r["associate"]
else -r["associate"])), axis=1)
self.df = self.df.sort_values(by=["nbcat", "associate_cat_order", "singleton_sort",
"internal_sort", "sort_order"])
def format_data(self):
"""
Process the dataframe so that it can be plotted in chord format
@ Args:
data: pd.DataFrame() to reorder, with a column
`categories` and a column `pair`
"""
if self.color_col == self.order_col:
self.df = self.data[[self.order_col, self.pair_col]].rename({
self.pair_col: "pair",
self.order_col: "order"}, axis=1).copy()
self.df["color"] = self.df["order"]
else:
self.df = self.data[[self.order_col, self.pair_col, self.color_col]].rename({
self.pair_col: "pair",
self.order_col: "order",
self.color_col: "color"}, axis=1).copy()
self.df.index.names = ['og_idx']
self.df = self.df.reset_index()
catunique = self.df["order"].unique()
self.mapcat = dict(zip(catunique, range(len(catunique))))
colorunique = self.df["color"].unique()
self.mapcolor = dict(zip(colorunique, range(len(colorunique))))
self.df["nbcat"] = self.df["order"].map(self.mapcat).astype(int)
self.df["nbcolor"] = self.df["color"].map(self.mapcolor).astype(int)
self.df["idx"] = self.df.index
pairmap = self.df.groupby("pair").idx.apply(list).to_dict()
self.df["associate"] = self.df.apply(
lambda r: [a for a in pairmap[r["pair"]] if a != r["idx"]][0]
if len(pairmap[r["pair"]]) > 1 else pairmap[r["pair"]][0],
axis=1)
self.df["associate_nbcat"] = self.df.associate.map(self.df.nbcat)
self.df["associate_nbcolor"] = self.df.associate.map(self.df.nbcolor)
self.df["catcolor"] = self.df.apply(lambda r: (r["nbcat"], r["nbcolor"]),
axis=1
).astype('category').cat.codes
self.df["associate_catcolor"] = self.df.apply(lambda r:
(r["associate_nbcat"], r["associate_nbcolor"]),
axis=1
).astype('category').cat.codes
def compute_positions(self):
cat_jump = list(np.where(self.df.nbcat.values[:-1]
!= self.df.nbcat.values[1:])[0])
x = 0
positions = []
for i in range(len(self.df)):
positions.append(x)
if i in cat_jump:
x += self.layout['spacing']
x += (1 - self.layout['spacing']*(len(cat_jump)+1))/(len(self.df))
self.df["position"] = positions
self.df["associate_position"] = self.df.associate.map(self.df.position)
def pair_chords(self):
self.chords = list(zip(
self.df.position,
self.df.associate_position,
self.df.nbcolor,
self.df.associate_nbcolor,
self.df.nbcat,
self.df.associate_nbcat))
# add chord except if singleton
self.chords = [tpl for tpl in self.chords if tpl[0] != tpl[1]]
def add_chord(self, idx1, idx2):
dd = self.df.set_index("og_idx")
self.chords.append(
(dd.loc[idx1].position, dd.loc[idx2].position,
dd.loc[idx1].nbcolor, dd.loc[idx2].nbcolor,
dd.loc[idx1].nbcat, dd.loc[idx2].nbcat))
def plot(self, ax=None):
if ax is None:
_, ax = plt.subplots(figsize=(8, 8))
ax.axis('off')
nb_to_name_cat = {self.mapcat[k]:k for k in self.mapcat}
positions = self.df.position.values
catcolors = self.df.catcolor.values
idxs = np.where(catcolors[:-1] != catcolors[1:])[0]
start_categorie = [0] + list(positions[idxs+1])
end_categorie = list(positions[idxs]) + [positions[-1]]
cats = [self.df.nbcolor.iloc[0]] + list(self.df.nbcolor.iloc[idxs+1])
for s, e, c in zip(start_categorie, end_categorie, cats):
draw_arc_circle(s - 0.5/len(self.df), e + 0.5/len(self.df),
color=self.palette[c], ax=ax,
precision=self.layout['precision_circle'],
thickness=self.layout['thickness_circle'],
radius=self.layout['radius_circle'],
**self.layout['circle_args'])
# the radius text should correspond to categories
cats = self.df.nbcat.values
idxs = np.where(cats[:-1] != cats[1:])[0]
start_categorie = [0] + list(positions[idxs+1])
end_categorie = list(positions[idxs]) + [positions[-1]]
cats = [cats[0]] + list(cats[idxs+1])
for s, e, c in zip(start_categorie, end_categorie, cats):
add_text_circle((s + e - 1/len(self.df))/2, nb_to_name_cat[c], ax=ax,
color=self.text_args['palette'][c],
radius=self.layout['radius_text'],
**{k: v for k, v in self.text_args.items() if k != 'palette'})
if self.layout['subcircle']:
catcolors = self.df.associate_catcolor.values
idxs = np.where(catcolors[:-1] != catcolors[1:])[0]
start_subcategorie = [0] + list(positions[idxs+1])
end_subcategorie = list(positions[idxs]) + [positions[-1]]
subcats = [self.df.associate_nbcolor.iloc[0]] + list(self.df.associate_nbcolor.iloc[idxs+1])
for s, e, c in zip(start_subcategorie, end_subcategorie, subcats):
draw_arc_circle(s - 0.5/len(self.df), e + 0.5/len(self.df),
color=self.palette[c], ax=ax,
precision=self.layout['precision_circle'],
thickness=self.layout['thickness_subcircle'],
radius=self.layout['radius_subcircle'],
**self.layout['subcircle_args'])
if not self.layout['no_chords']:
for pos_1, pos_2, color_1, color_2, cat_1, cat_2 in self.chords:
if cat_1 != cat_2 or self.layout['internal_chords']:
draw_chord(position_circle(pos_2),
position_circle(pos_1), ax=ax,
color_start=self.palette[color_2
if self.layout['inverted_grad']
else color_1],
color_end=self.palette[color_1
if self.layout['inverted_grad']
else color_2],
precision=self.layout['precision_chord'],
**self.chords_args)
ax.set_xlim(-1, 1)
ax.set_ylim(-1, 1)
ax.axis('equal')
ax.axis('off')
return ax
def chord_diagram(categories, pairs, hues=None,
data=None, ax=None, palette=sns.color_palette(),
layout_args={}, text_args={}, chord_args={}):
""" Draw a chord diagram.
@ Args
- categories: Categories of each individual.
Decide the order of the plot.
Either a list or a column name if `data` is not None.
- pair: For each individual identifies the pair it's in.
Every value should appear twice. Either a list or a column
name if `data` is not None.
- hues: list of categories that will determine the color of the plot.
- data: dataset containing the columns `categories` and `pair`
- ax: matplotlib ax object
- palette: seaborn palette
- layout_args: dict arguments for the layout, include:
* 'spacing' (default 0): space between the categories
* precision_chord: precision to plot the chord,
higher = better but slower.
* precision_circle: same for the circles
* subcircle: presence or not of a subcircle (see examples)
* thickness_circle, thickness_subcircle: width of the circle / subcircle (default 0.1)
* radius_circle, radius_subcircle: radii of both circles
* internal_chords: Plot or not the internal chords (default `False`)
* radius_text: radius of the text
* no_chords: Don't plot the chords (good for testing, default `False`)
* inverted_grad: Inverse the gradient on the chords (default `True`)
* circle_args / subcircle_args: dict, default `{}`, additional arguement for ax.fill
* nuplets: allow for more than one link going from the same node. Default `False`
* singletons: allow for nodes with no "paired" value, default `True`
* plot: Default `True`, if `False` does not plot the figure.
"""
if 'nuplets' not in layout_args:
layout_args['nuplets'] = False
if 'plot' not in layout_args:
layout_args['plot'] = True
if layout_args['nuplets']:
layout_args['singletons'] = True
if data is None:
data = pd.DataFrame()
data["cat"] = categories
data["pair"] = pairs
data["col"] = hues
categories = "cat"
pairs = "pair"
hues = None if hues is None else "col"
doublets = None
if layout_args['nuplets']:
data_copy = data.copy()
data_copy.index.names = ['idx']
data_copy = data_copy.reset_index()
map_pair = data_copy.groupby(pairs).idx.apply(list)
new_pairs = [str(p) + str(map_pair[p].index(idx)//2)
for idx, p in zip(data_copy.idx, data_copy[pairs])]
data_copy[pairs] = new_pairs
else:
data_copy = data.copy()
ch = Chords(data=data_copy, order_col=categories,
pair_col=pairs,
color_col=hues,
layout_args=layout_args, text_args=text_args,
chords_args=chord_args, palette=palette)
if layout_args['nuplets']:
for idx1, p1, p2 in zip(data.index, data[pairs], data_copy[pairs]):
if len(map_pair) > 2:
for idx2 in map_pair[p1]:
ch.add_chord(idx1, idx2)
if layout_args['plot']:
ch.plot(ax=ax)
return ch
| Thopic/chordialement | chordialement/core.py | core.py | py | 17,468 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "numpy.copy",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number"... |
12553103859 | import requests
import json
import time
url = 'https://formulae.brew.sh/api/formula.json'
response = requests.get(url)
packages_json = response.json()
results = []
t1 = time.perf_counter()
for package in packages_json:
package_name = package['name']
package_desc = package['desc']
package_url = f'https://formulae.brew.sh/api/formula/{package_name}.json'
try:
response = requests.get(package_url)
package_json = response.json()
# print(package_json)
package_string = json.dumps(package_json, indent=2)
install_30 = package_json['analytics']['install_on_request']['30d'][package_name]
install_90 = package_json['analytics']['install_on_request']['90d'][package_name]
install_365 = package_json['analytics']['install_on_request']['365d'][package_name]
except Exception:
continue
data = {
'name': package_name,
'desc': package_desc,
'analytics': {
'30d': install_30,
'90d': install_90,
'365d': install_365
}
}
results.append(data)
# time.sleep(response.elapsed.total_seconds())
print(f"Got {package_name} in {response.elapsed.total_seconds()} seconds.")
t2 = time.perf_counter()
print(f"Total Time: {t2 - t1} seconds.")
file_path = 'C:\\Users\\Paavan Gopala\\Desktop\\OS-Demo\\New Folder\\packages_info.json'
with open(file_path, 'w') as file_writer:
json.dump(results, file_writer, indent=2)
| iampaavan/Pure_Python | How to Write Python Scripts to Analyze JSON APIs and Sort Results.py | How to Write Python Scripts to Analyze JSON APIs and Sort Results.py | py | 1,406 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "time.perf_counter",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_num... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.