seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9294254942 | ## This module
from StreamAnimations import utils, sprite
from StreamAnimations.sprite import hitbox
from StreamAnimations.canvases import SinglePageCanvas
from StreamAnimations.engine.renderers.gif import GifRenderer
from StreamAnimations.engine import utils as engineutils
from StreamAnimations.systems import twodimensional
## Builtin
import pathlib
import random
ROOT = pathlib.Path(__file__).resolve().parent
OUTDIR = ( ROOT / "output").resolve()
OUTDIR.mkdir(exist_ok = True)
SAMPLEDIR = (ROOT / "samples").resolve()
SPRITESIZE = 32
CANVASSIZE = 384, 216
BASEHEIGHT = 10
def load_walk():
up, down, right = utils.import_spritesheet((SAMPLEDIR / "Walk Up.png").resolve()), \
utils.import_spritesheet((SAMPLEDIR / "Walk Down.png").resolve()), \
utils.import_spritesheet((SAMPLEDIR / "Walk Right.png"))
directions = dict(up = utils.split_spritesheet(up, SPRITESIZE, SPRITESIZE),
down = utils.split_spritesheet(down, SPRITESIZE, SPRITESIZE),
right = utils.split_spritesheet(right, SPRITESIZE, SPRITESIZE)
)
directions["left"] = utils.mirror_sprite(directions['right'])
return directions
def load_printer():
frames = []
for zlevel in range(1,6):
sheet = utils.import_spritesheet( (SAMPLEDIR / f"Prusa Z{zlevel}.png").resolve())
frames.extend(utils.split_spritesheet(sheet, SPRITESIZE, SPRITESIZE))
return {"idle": frames}
def load_desk():
return {"idle":utils.split_spritesheet(utils.import_spritesheet( (SAMPLEDIR / "Desk-1.png").resolve()), SPRITESIZE, 45)}
def load_desk_text():
return {"idle": utils.split_spritesheet(utils.import_spritesheet( (SAMPLEDIR / "stream.png").resolve() ), SPRITESIZE, 21)}
walk = load_walk()
me = twodimensional.Sprite2D(directionalsprites= walk, hitboxes = [], animations = {"idle":[walk['down'][0],]})
mehitbox = hitbox.MaskedHitbox(hitbox.create_rect_hitbox_image(me.get_image().width, BASEHEIGHT),anchor="bl")
me.add_hitbox(mehitbox)
printer = sprite.StationarySprite(animations=load_printer())
printerhitbox = hitbox.MaskedHitbox(hitbox.create_rect_hitbox_image(printer.get_image().width, BASEHEIGHT//2), anchor="bl")
printer.add_hitbox(printerhitbox)
desk = sprite.StationarySprite(animations= load_desk())
deskhitbox = hitbox.MaskedHitbox(hitbox.create_rect_hitbox_image(desk.get_image().width, BASEHEIGHT),anchor="bl")
desk.add_hitbox(deskhitbox)
monitortext = sprite.CosmeticSprite(animations= load_desk_text(), offset = (12, 12), parent = desk)
canvas = SinglePageCanvas(CANVASSIZE, SPRITESIZE // 4)
canvas.add_listener("movement", engineutils.collision_stop_rule)
canvas.add_sprite(me, (50, 70))
canvas.add_sprite(printer, (80,80))
canvas.add_sprite(monitortext, (0,0))
canvas.add_sprite(desk, (50, 50))
## ANIMATION
renderer = GifRenderer(canvas, sorter= twodimensional.twod_sprite_sorter)
with renderer.frame(): pass
path = ["right","right","right","right","right", "up", "up", "up", "up","left","left","left","left","left"]
for i in range(100):
with renderer.frame() as frame:
#frame.move_sprite(me, random.choice(twodimensional.TwoDimensional_4Way.directions()))
if path: frame.move_sprite(me, path.pop(0))
if(printer.animations.is_last_frame()): printer.animations.pause()
renderer.save((OUTDIR / "map2.gif"), 10, scale = 5) | AdamantLife/StreamAnimations | sample.py | sample.py | py | 3,312 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "StreamAnimations.utils.import_spritesheet",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "StreamAnimations.utils",
"line_number": 22,
"usage_type": "name"
},
{
"api... |
16574355106 | import collections
import heapq
from typing import List
class Solution:
def maxProbability(self, n: int, edges: List[List[int]], succProb: List[float], start: int, end: int) -> float:
adj = collections.defaultdict(list)
for i in range(len(edges)):
src, dst = edges[i]
adj[src].append([dst, succProb[i]])
adj[dst].append([src, succProb[i]])
pq = [(-1, start)]
vis = set()
while pq:
prob, curr = heapq.heappop(pq)
vis.add(curr)
if curr == end:
return prob * -1
for neig, edgeProb in adj[curr]:
if neig not in vis:
heapq.heappush(pq, (prob * edgeProb, neig))
return 0 | BLANK00ANONYMOUS/PythonProjects | Leetcode Daily Challenges/14_feb_2023.py | 14_feb_2023.py | py | 759 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.List",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "collections.defaultdict",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "heapq.heappop",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "heapq.heappush",
... |
11394835592 | '''
Script to export PASCAL VOC 2012 annotation data in VIA format
Author: Abhishek Dutta <adutta@robots.ox.ac.uk>
12 Apr. 2018
'''
import xmltodict
import os
import json
base_dir = '/data/datasets/voc2012/VOCdevkit/VOC2012/'
img_dir = os.path.join(base_dir, 'JPEGImages/')
ann_dir = os.path.join(base_dir, 'Annotations')
set_dir = os.path.join(base_dir, 'ImageSets', 'Main')
def get_via_fileid(filename, filesize):
return filename + str(filesize);
def get_file_size(filename):
return os.path.getsize(filename)
def get_region_attributes(d):
ri = {}
ri['shape_attributes'] = {}
if 'bndbox' in d:
x0 = int( float(d['bndbox']['xmin']) )
y0 = int( float(d['bndbox']['ymin']) )
x1 = int( float(d['bndbox']['xmax']) )
y1 = int( float(d['bndbox']['ymax']) )
ri['shape_attributes']['name'] = 'rect'
ri['shape_attributes']['x'] = x0
ri['shape_attributes']['y'] = y0
ri['shape_attributes']['width'] = x1 - x0
ri['shape_attributes']['height'] = y1 - y0
ri['region_attributes'] = {}
if 'name' in d:
ri['region_attributes']['name'] = d['name']
if 'pose' in d:
ri['region_attributes']['pose'] = d['pose']
if 'truncated' in d:
ri['region_attributes']['truncated'] = d['truncated']
if 'difficult' in d:
ri['region_attributes']['difficult'] = d['difficult']
return ri
def voc_xml_to_json(fn):
print(fn)
with open(fn) as f:
d = xmltodict.parse(f.read())
d = d['annotation']
img_fn = d['filename']
img_path = os.path.join(img_dir, img_fn)
img_size = get_file_size(img_path)
img_id = get_via_fileid(img_fn, img_size)
js = {}
js[img_id] = {}
js[img_id]['fileref'] = img_path
js[img_id]['size'] = img_size
js[img_id]['filename'] = img_fn
js[img_id]['base64_img_data'] = ''
fa = {}
if 'source' in d:
if 'database' in d['source']:
fa['database'] = d['source']['database']
if 'annotation' in d['source']:
fa['annotation'] = d['source']['annotation']
if 'image' in d['source']:
fa['image'] = d['source']['image']
if 'size' in d:
if 'width' in d['size']:
fa['width'] = d['size']['width']
if 'height' in d['size']:
fa['height'] = d['size']['height']
if 'depth' in d['size']:
fa['depth'] = d['size']['depth']
if 'segmented' in d:
fa['segmented'] = d['segmented']
js[img_id]['file_attributes'] = fa
js[img_id]['regions'] = []
if isinstance(d['object'], list):
region_count = len(d['object'])
for i in range(0, region_count):
ri = get_region_attributes( d['object'][i] )
js[img_id]['regions'].append(ri)
else:
r = get_region_attributes( d['object'] )
js[img_id]['regions'].append(r)
return js
outjson_fn = '/data/datasets/via/import/pascal_voc/_via_project_pascal_voc2012_import.js'
outjson_f = open(outjson_fn, 'w')
outjson_f.write('var via_project_pascal_voc2012 = \'{"_via_settings":{}, "_via_attributes":{}, "_via_img_metadata":{')
first = True
for file in os.listdir(ann_dir):
if file.endswith(".xml"):
file_path = os.path.join(ann_dir, file)
js = voc_xml_to_json(file_path)
js_str = json.dumps(js)
if not first:
outjson_f.write( "," ) # remove first and last curley braces
else:
first = False
outjson_f.write( js_str[1:-1] ) # remove first and last curley braces
outjson_f.write("}}\';")
outjson_f.close()
print('\nWritten everything to {}'.format(outjson_fn))
| ox-vgg/via | via-2.x.y/scripts/import/pascal_voc/exp_annotations.py | exp_annotations.py | py | 3,496 | python | en | code | 184 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 1... |
14992228919 | from flask import Flask
from flask import request
from urllib.parse import urlencode
import requests
import json
server = Flask(__name__)
api_key = "AIzaSyAWtsz4ALYdHQJKRSeGv-invChqgL7tAFs"
@server.route('/location')
def location():
city_name = request.values.get('city-name')
data_type = "json"
endpoint = f"https://maps.googleapis.com/maps/api/place/autocomplete/{data_type}"
params = {"input": city_name, "key": api_key}
url_params = urlencode(params)
print(url_params)
url = f"{endpoint}?{url_params}"
print(url)
r = requests.get(url)
print(r.status_code)
data = {}
index = 1
for res in r.json()['predictions']:
if(res['description'].split(',')[0].lower() == city_name):
key = 'location_'+str(index)
location = {}
if(len(res['description'].split(','))<3):
location['city'] = res['description'].split(',')[0]
location['country'] = res['description'].split(',')[1]
else:
location['city'] = res['description'].split(',')[0]
location['province'] = res['description'].split(',')[1]
location['country'] = res['description'].split(',')[2]
data[key] = location
index += 1
return data
if __name__ == '__main__':
server.run(debug=True)
| KJS89/Wuduplz | Web mining/final/locationList.py | locationList.py | py | 1,190 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "flask.request.values.get",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "flask.request.values",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "flask.r... |
7037221042 | from flask import Flask,render_template,request
import tweepy
import re
import pandas as pd
from tweepy import OAuthHandler
from nltk.corpus import stopwords
from nltk.tokenize import TweetTokenizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn import naive_bayes
from sklearn.metrics import roc_auc_score
#======================================================= ======================================================
df = pd.read_csv("/home/saurabh/Sentiment_Analysis_Dataset.csv")
t = pd.DataFrame()
t['Sentiment'] = df.Sentiment
t['Text'] = df.SentimentText
#======================================================= ======================================================
stop_words = set(stopwords.words("english"))
vectorizer = TfidfVectorizer(use_idf = True, lowercase = True , strip_accents = 'ascii' , stop_words = stop_words )
X = vectorizer.fit_transform(t.Text)
y = t.Sentiment
X_train,X_test,y_train,y_test = train_test_split(X,y)
clf = naive_bayes.MultinomialNB()
clf.fit(X_train,y_train)
#======================================================= ======================================================
def classifier(queries):
#===================================================================
#
query = queries
tknzr=TweetTokenizer(strip_handles=True,reduce_len=True)
consumer_key="YOUR_KEY"
consumer_secret="YOUR SECRET_TOKEN"
access_token="YOUR TOKEN"
access_token_secret="TOKEN_SECRET"
try:
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
tweets_caught = api.search(q=query,count=5)
except:
print("Error")
#====================================================================
#===========================cleaning tweet===========================
count = 0
text = []
raw_tweet = []
for tweet in tweets_caught:
clean_text = []
words = tknzr.tokenize(tweet.text)
for w in words:
if w not in stop_words:
clean_text.append(w)
str = " "
for w in clean_text:
str = str+w+" "
URLless_str = re.sub(r'(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:\'".,<>?«»“”‘’]))', '', str)
if tweet.retweet_count > 0:
if URLless_str not in text:
text.append(URLless_str)
raw_tweet.append(tweet.text)
count = count+1
else:
text.append(URLless_str)
raw_tweet.append(tweet.text)
count = count + 1
#
#======================================================================
text_vec = vectorizer.transform(text)
Resultant_Sentiment = clf.predict(text_vec)
answer = pd.DataFrame()
answer["tweet"] = raw_tweet
answer["Sentiment"] = Resultant_Sentiment
return answer
#======================================================= ======================================================
app = Flask(__name__)
@app.route('/')
def dir1():
return render_template("profile.html")
@app.route('/sentiment' , methods = ['POST'])
def sentiment():
queries = request.form['query']
answer = classifier(queries)
return render_template("sentiment.html",sentiments=answer)
if __name__ == '__main__':
app.run()
#======================================================= ======================================================
| saurabhc104/sentiment_analysis | analysis.py | analysis.py | py | 3,670 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.stopwords.words",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "nltk.cor... |
18583370851 | from pygame.locals import *
import figures
import pygame
import random
import sys
types = [figures.Square, figures.Line]
class Game:
def __init__(self, window=(500, 500), speed=5, block=20, fps=20):
pygame.init()
pygame.display.set_caption("Tetrissss")
self.WINDOW_SIZE = window
self.SPEED = speed
self.BLOCK = block
self.FPS = fps
# Calcul des dimensions de la grille / de l'adresse de chaque case et mise en place
self.nb_blocks = ((self.WINDOW_SIZE[0] - 1) // (self.BLOCK + 1), (self.WINDOW_SIZE[1] - 1) // (self.BLOCK + 1))
self.padd = ((self.WINDOW_SIZE[0] - 1) % (self.BLOCK + 1), (self.WINDOW_SIZE[1] - 1) % (self.BLOCK + 1))
self.grid_x = {i: (self.padd[0] // 2) + 2 + i * (self.BLOCK + 1) for i in range(self.nb_blocks[0])}
self.grid_y = {i: (self.padd[1] // 2) + 2 + i * (self.BLOCK + 1) for i in range(self.nb_blocks[1])}
self.clock = pygame.time.Clock()
self.clock.tick(self.FPS)
self.surface = pygame.display.set_mode(self.WINDOW_SIZE)
# Instanciation figure(s) et variables de suivi du mur de blocs
self.fig = random.choice(types)(self, self.BLOCK, 5, 0)
self.fig.draw()
self.limit = {i: self.nb_blocks[1] - 1 for i in range(self.nb_blocks[0])}
self.wall = {y: {} for y in range(self.nb_blocks[1])}
self.lines = {i: self.nb_blocks[0] for i in range(self.nb_blocks[1])}
self.playing = True
def draw_grid(self):
"""Mise en place de la grille"""
self.surface.fill((0, 0, 0))
curs = (self.padd[0] // 2) + 1
for _ in range(self.nb_blocks[0] + 1):
pygame.draw.line(self.surface, (20, 20, 20), (curs, self.padd[1] // 2),
(curs, self.WINDOW_SIZE[1] - (self.padd[1] // 2 + self.padd[1] % 2)))
curs += self.BLOCK + 1
curs = (self.padd[1] // 2) + 1
for _ in range(self.nb_blocks[1] + 1):
pygame.draw.line(self.surface, (20, 20, 20), (self.padd[0] // 2, curs),
(self.WINDOW_SIZE[0] - (self.padd[0] // 2 + self.padd[0] % 2), curs))
curs += self.BLOCK + 1
def block_to_wall(self):
"""Ajout d'un bloc ayant achevé sa chute au mur de blocs"""
for block in self.fig.get_top():
self.limit[block[0]] = block[1] - 1
if block[1] <= 2:
self.playing = False # TODO : perdu
full_lines = []
for block in self.fig.get_blocks():
self.wall[block[1]][block[0]] = self.fig.color
self.lines[block[1]] -= 1
if self.lines[block[1]] == 0:
full_lines.append(block[1])
if len(full_lines) > 0:
full_lines.sort(reverse=True)
for i in range(len(full_lines)):
self.del_line(full_lines[i] + i)
# TODO : bonus si plusieurs lignes complétées en même temps
del self.fig
# Instanciation aléatoire d'une figure
self.fig = random.choice(types)(self, self.BLOCK, 5, 0)
def del_line(self, y):
"""Suppression d'une ligne complète"""
for x, val in self.limit.items():
self.limit[x] += 1
toDel = []
iterate = True
while iterate:
self.lines[y] = self.lines[y - 1]
for bl, col in self.wall[y].items():
if bl in self.wall[y - 1].keys():
self.wall[y][bl] = self.wall[y - 1][bl]
else:
toDel.append((y, bl))
y -= 1
if self.lines[y] == self.nb_blocks[0]:
iterate = False
for blY, blX in toDel:
del self.wall[blY][blX]
def game_loop(self):
count = 0
while self.playing:
pygame.display.update()
self.draw_grid()
# Lorsque la figure atteint le sol, ses blocs sont intégrés au mur et la limite recalculée
if not self.fig.falling:
self.block_to_wall()
# Affichage du bloc courant et du mur de blocs
self.fig.draw()
for y, bl in self.wall.items():
for x, col in bl.items():
pygame.draw.rect(self.surface, col, (self.grid_x[x], self.grid_y[y], self.BLOCK, self.BLOCK))
# Commandes utilisateur
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
if event.type == KEYDOWN:
if event.key == K_LEFT:
self.fig.move(-1)
elif event.key == K_RIGHT:
self.fig.move(1)
elif event.key == K_DOWN:
self.fig.fall()
elif event.key == K_UP:
while self.fig.falling:
self.fig.fall()
elif event.key == K_SPACE:
self.fig.turn()
# Chute du bloc courant
if count < self.SPEED:
count += 1
elif count == self.SPEED:
self.fig.fall()
count = 0
self.clock.tick(self.FPS)
if __name__ == "__main__":
game = Game(window=(230, 300))
game.game_loop()
| Clapsouille/Tetris | main.py | main.py | py | 5,526 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "figures.Square",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "figures.Line",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "pygame.init",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_ca... |
418860871 | import sys
import json
import logging
import argparse
import select
import time
import logs.server_log_config
from socket import socket, AF_INET, SOCK_STREAM, SOL_SOCKET, SO_REUSEADDR
from common.variables import ACTION, PRESENCE, TIME, USER, ACCOUNT_NAME, RESPONSE, \
MESSAGE, MESSAGE_TEXT, ERROR, DEFAULT_PORT, MAX_CONNECTIONS, SENDER
from common.utils import get_message, send_message
from decorators import log
# Инициализация логирования сервера:
SERVER_LOGGER = logging.getLogger('server')
@log
def process_client_message(message, messages_list, client):
"""
Обработчик сообщений от клиентов.
Функция принимает словарь-сообщение от клиента, проверяет корректность, возвращает словарь-ответ для клиента.
:param message:
:param messages_list:
:param client:
:return:
"""
SERVER_LOGGER.debug(f'Разбор сообщения от клиента: {message}.')
# Если это сообщение присутствует, принимаем и отвечаем.
if ACTION in message and message[ACTION] == PRESENCE and TIME in message \
and USER in message and message[USER][ACCOUNT_NAME] == 'Guest':
send_message(client, {RESPONSE: 200})
return
# Если это сообщение, то добавляем его в очередь сообщений. Ответ не требуется.
elif ACTION in message and message[ACTION] == MESSAGE and TIME in message \
and MESSAGE_TEXT in message:
messages_list.append((message[ACCOUNT_NAME], message[MESSAGE_TEXT]))
return
else:
send_message(client, {
RESPONSE: 400,
ERROR: 'Bad request',
})
return
@log
def arg_parser():
"""Парсер аргументов командной строки."""
parser = argparse.ArgumentParser()
parser.add_argument('-p', default=DEFAULT_PORT, type=int, nargs='?')
parser.add_argument('-a', default='', nargs='?')
namespace = parser.parse_args(sys.argv[1:])
listen_address = namespace.a
listen_port = namespace.p
# Проверка получения корректного номера порта для работы сервера.
if not 1023 < listen_port < 65535:
SERVER_LOGGER.critical(
f'Попытка запуска сервера с неподходящим номером порта: {listen_port}.'
f' Допустимые адреса с 1024 до 65535. Клиент завершается.'
)
sys.exit(1)
return listen_address, listen_port
def main():
"""
Загрузка параметров командной строки.
Если нет параметров, то задаем значения по умолчанию.
:return:
"""
listen_address, listen_port = arg_parser()
SERVER_LOGGER.info(f'Запущен сервер. Порт для подключений: {listen_port}, '
f'адрес, с которого принимаются подключения: {listen_address}. '
f'Если адрес не указан, то принимаются соединения с любых адресов.')
# Готовим сокет.
transport = socket(AF_INET, SOCK_STREAM)
transport.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
transport.bind((listen_address, listen_port))
transport.settimeout(1)
# Список клиентов, очередь сообщений.
clients = []
messages = []
# Слушаем порт.
transport.listen(MAX_CONNECTIONS)
while True:
try:
client, client_address = transport.accept()
except OSError as err:
print(err.errno)
pass
else:
SERVER_LOGGER.info(f'Установлено соединение с ПК {client_address}.')
clients.append(client)
recv_data_list = []
send_data_list = []
err_list = []
# Проверяем на наличие ждущих клиентов.
try:
if clients:
recv_data_list, send_data_list, err_list = select.select(clients, clients, [], 0)
except OSError:
pass
# Принимаем сообщения и еcли они есть, то кладем в словарь. В случае ошибки исключаем клиента.
if recv_data_list:
for client_with_message in recv_data_list:
try:
process_client_message(get_message(client_with_message), messages, client_with_message)
except:
SERVER_LOGGER.info(f'Клиент {client_with_message.getpeername()} отключился от сервера.')
clients.remove(client_with_message)
# Если есть сообщения для отправки и ожидающие клиенты, то отправляем им сообщение.
if messages and send_data_list:
message = {
ACTION: MESSAGE,
SENDER: messages[0][0],
TIME: time.time(),
MESSAGE_TEXT: messages[0][1]
}
del messages[0]
for waiting_client in send_data_list:
try:
send_message(waiting_client, message)
except:
SERVER_LOGGER.info(f'Клиент {waiting_client.getpeername()} отключился от сервера.')
waiting_client.close()
clients.remove(waiting_client)
if __name__ == '__main__':
main()
| Shorokhov-A/repo_client-server-apps_python | practical_task_7/server.py | server.py | py | 5,881 | python | ru | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "common.variables.ACTION",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "common.variables.PRESENCE",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "c... |
33406629366 | """
Main Neural Network Pipeline.
"""
#-------------------------- set gpu using tf ---------------------------#
import tensorflow as tf
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.Session(config=config)
#------------------- start importing keras module ---------------------#
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, Conv1D, GlobalAveragePooling2D
from keras.callbacks import ModelCheckpoint
from datagenerator import DataGenerator
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
import numpy as np
import itertools
import os
# Parameters
if os.path.abspath('~') == '/Users/ghunk/~':
data_root = "/Users/ghunk/Desktop/GRADUATE/CSC_464/Final_Project/Dataset/stft_binaural_0.5s/"
else:
data_root = "/scratch/ghunkins/stft_binaural_0.5s/"
elevations = [-45, -30, -15, 0, 15, 30, 45]
azimuths = [15*x for x in range(24)]
el_az = list(itertools.product(elevations, azimuths))
classes = [str(x) + '_' + str(y) for x, y in el_az]
encoder = LabelEncoder()
encoder.fit(classes)
params = {'batch_size': 32,
'Y_encoder': encoder,
'shuffle': True}
LIMIT = 2000000
RANDOM_STATE = 3
# Datasets
IDs = os.listdir(data_root)[:LIMIT]
Train_IDs, Test_IDs, _, _, = train_test_split(IDs, np.arange(len(IDs)), test_size=0.2, random_state=RANDOM_STATE)
# Generators
training_generator = DataGenerator(**params).generate(Train_IDs)
validation_generator = DataGenerator(**params).generate(Test_IDs)
# Design model
model = Sequential()
model.add(Conv2D(256, kernel_size=(804, 1), activation='relu', input_shape=(804, 47, 1)))
model.add(Conv2D(256, kernel_size=(1, 3), strides=(1, 2), activation='relu'))
model.add(Conv2D(256, kernel_size=(1, 3), strides=(1, 2), activation='relu'))
model.add(Conv2D(256, kernel_size=(1, 3), strides=(1, 2), activation='relu'))
model.add(GlobalAveragePooling2D(data_format='channels_last'))
model.add(Dense(168, activation='relu'))
model.add(Dense(168, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.summary()
# set callback: https://machinelearningmastery.com/check-point-deep-learning-models-keras/
filepath="weights-improvement-{epoch:02d}-{val_acc:.2f}.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
callbacks_list = [checkpoint]
# Train model on dataset
model.fit_generator(generator = training_generator,
steps_per_epoch = len(Train_IDs)//params['batch_size'],
nb_epoch = 50,
validation_data = validation_generator,
validation_steps = len(Test_IDs)//params['batch_size'],
verbose=2,
callbacks=callbacks_list)
model.save("./model_200000_job_epoch12.h5py")
| ghunkins/Binaural-Source-Localization-CNN | Neural_Net/v3/neuralnet.py | neuralnet.py | py | 2,940 | python | en | code | 9 | github-code | 36 | [
{
"api_name": "tensorflow.ConfigProto",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "tensorflow.Session",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.path",
... |
41585106305 | import json
# # Data to be written
# dictionary = {
# "hello":"lol"
# }
# # Serializing json
# json_object = json.dumps(dictionary, indent=4)
# Writing to sample.json
# with open("sample.json", "a") as outfile:
# outfile.write(json_object)
filename="sample.json"
entry={'hello','lol1'}
with open(filename, "r") as file:
data = json.load(file)
# 2. Update json object
data.append(entry)
# 3. Write json file
with open(filename, "w") as file:
json.dump(data, file) | AugeGottes/Yet-Another-Kafka | test.py | test.py | py | 484 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "json.load",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 22,
"usage_type": "call"
}
] |
1685939480 | import pandas as pd
import geopandas as gpd
from sqlalchemy import create_engine
from shapely.geometry import box
from SGIS.api import SGISRequest
from credentials.database import AP
CSV = '/Users/dongookson/Code/data-project/SGIS/key_locations/ad8.csv'
QUERY = 'select * from key_locations'
GET = False
# connect to database AP
engine = create_engine(AP)
if GET:
# read dummy data
df = pd.read_csv(CSV)[0:10]
# Make API GET request
patient_locs = [SGISRequest().geocode_addr(patient) for patient in df['Address']]
df['x_5179'] = [p.get('x') for p in patient_locs]
df['y_5179'] = [p.get('y') for p in patient_locs]
# create geodataframe
gdf = gpd.GeoDataFrame(
df,
crs='epsg:5179',
geometry=gpd.points_from_xy(x=df.x_5179, y=df.y_5179)
)
# create well-known-text(wkt) column for WGS84
gdf['wkt_4326'] = gdf.to_crs(4326)['geometry']
# write as table
gdf.to_postgis(
'key_locations',
engine
)
else:
gdf = gpd.read_postgis(QUERY, engine, geom_col='geometry')
print(gdf.head(10))
# print(gdf.crs)
# print(gdf['geometry'].sindex.query(box(988969.330849867, 988969.33084999, 1818020.086700, 1818020.0860560)))
# print(type(gdf['geometry'])) | donny-son/airhealth-database | SGIS/test_geocode.py | test_geocode.py | py | 1,276 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sqlalchemy.create_engine",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "credentials.database.AP",
"line_number": 14,
"usage_type": "argument"
},
{
"api_name": "pandas.read_csv",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "... |
8379980336 | from sys import argv
from panda3d.core import Vec3
from pandac.PandaModules import loadPrcFileData
loadPrcFileData('configurate', 'window-title Loading')
from direct.directbase import DirectStart
from direct.task import Task
from direct.actor.Actor import Actor
from direct.gui.DirectGui import *
from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
from direct.showbase.InputStateGlobal import inputState
from direct.controls.GravityWalker import GravityWalker
from direct.showbase import DirectObject
from direct.interval.IntervalGlobal import *
import urllib, os, __main__, random
from pandac.PandaModules import *
from random import choice
base.disableMouse()
title = OnscreenImage(image='phase_3/maps/Game_Toontown_Logo_1.jpg', pos=(0, 0, 0.0), parent=render2d)
base.graphicsEngine.renderFrame()
base.graphicsEngine.renderFrame()
base.graphicsEngine.renderFrame()
base.graphicsEngine.renderFrame()
title.destroy()
props = WindowProperties()
props.setTitle('League Of Toons')
base.win.requestProperties(props)
from panda3d.core import *
class guitools():
def createFrame(self, filepath, resX = None, resY = None):
yresolution = 600
tex = loader.loadTexture(filepath)
tex.setBorderColor(Vec4(0, 0, 0, 0))
tex.setWrapU(Texture.WMRepeat)
tex.setWrapV(Texture.WMRepeat)
cm = CardMaker(filepath + ' card')
if resX == None:
resX = tex.getOrigFileXSize()
if resY == None:
resY = tex.getOrigFileySize()
cm.setFrame(-resX, resX, -resY, resY)
card = NodePath(cm.generate())
card.setTexture(tex)
card.flattenLight()
card.setScale(card.getScale() / yresolution)
return card
def createButton(self, cmd, position, hpr, model, buttonImgs, scale, colour = (1, 1, 1, 1), text = ''):
ButtonImage = loader.loadModel(model)
ButtonImageUp = ButtonImage.find('**/' + buttonImgs[0])
ButtonImageDown = ButtonImage.find('**/' + buttonImgs[1])
ButtonImageRollover = ButtonImage.find('**/' + buttonImgs[-1])
return DirectButton(frameSize=None, image=(ButtonImageUp, ButtonImageDown, ButtonImageRollover), relief=None, command=cmd, geom=None, pad=(0.01, 0.01), text=text, suppressKeys=0, pos=position, hpr=hpr, text_fg=(1, 1, 1, 1), color=colour, text_scale=0.059, borderWidth=(0.13, 0.01), scale=scale)
class ClassicBook():
Book = loader.loadModel('phase_3.5/models/gui/stickerbook_gui.bam')
def __beingOpened__(self):
self.BookClose.hide()
try:
base.localAvatar.b_setAnimState('OpenBook')
except:
pass
return True
def __openBook__(self):
seq = Sequence()
seq.append(Func(self.__beingOpened__))
seq.append(Wait(0.3))
seq.append(Func(self.__addNavs__))
seq.start()
return True
def __delNavs__(self):
self.bg[0].hide()
self.bg[1].hide()
self.BookOpen.removeNode()
self.__addOnButton__()
try:
base.localAvatar.b_setAnimState('CloseBook')
base.localAvatar.physControls.enableAvatarControls()
except:
pass
return True
def __addNavs__(self):
self.BookClose.removeNode()
self.bg = []
for b in range(2):
self.bg.append(Guitools.createFrame('phase_3.5/maps/Book.jpg', base.win.getXSize() + 160, base.win.getYSize() + 125))
self.bg.append(Guitools.createFrame('phase_3.5/maps/big_book.jpg',600,450))
self.bg[b].reparentTo(aspect2d)
self.bg[0].setPos(0, 0, 0)
self.bg[1].setPos(0, 0, 0.1)
self.BookOpen = DirectButton(frameSize=None, image=(self.Book.find('**/BookIcon_OPEN'), self.Book.find('**/BookIcon_CLSD'), self.Book.find('**/BookIcon_RLVR2')), relief=None, command=self.__delNavs__, text='', text_pos=(0, -0.015), geom=None, scale=0.305, pad=(0.01, 0.01), suppressKeys=0, pos=(1.16, 0, -0.83), hpr=(0, 0, 0), text_scale=0.06, borderWidth=(0.015, 0.01))
try:
base.localAvatar.b_setAnimState('ReadBook')
except:
pass
return True
def __addOnButton__(self):
self.BookClose = DirectButton(frameSize=None, image=(self.Book.find('**/BookIcon_CLSD'), self.Book.find('**/BookIcon_OPEN'), self.Book.find('**/BookIcon_RLVR')), relief=None, command=self.__openBook__, text='', text_pos=(0, -0.015), geom=None, scale=0.305, pad=(0.01, 0.01), suppressKeys=0, pos=(1.16, 0, -0.83), hpr=(0, 0, 0), text_scale=0.06, borderWidth=(0.015, 0.01))
return True
def __init__(self):
self.__addOnButton__()
Guitools = guitools()
BookGui = ClassicBook()
base.disableMouse()
legsAnimDict = {'right-hand-start': 'phase_3.5/models/char/tt_a_chr_dgs_shorts_legs_right-hand-start.bam',
'firehose': 'phase_5/models/char/tt_a_chr_dgs_shorts_legs_firehose.bam',
'rotateL-putt': 'phase_6/models/char/tt_a_chr_dgs_shorts_legs_rotateL-putt.bam',
'slip-forward': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_slip-forward.bam',
'catch-eatnrun': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_eatnrun.bam',
'tickle': 'phase_5/models/char/tt_a_chr_dgs_shorts_legs_tickle.bam',
'water-gun': 'phase_5/models/char/tt_a_chr_dgs_shorts_legs_water-gun.bam',
'leverNeutral': 'phase_10/models/char/tt_a_chr_dgs_shorts_legs_leverNeutral.bam',
'swim': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_swim.bam',
'catch-run': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_gamerun.bam',
'sad-neutral': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_sad-neutral.bam',
'pet-loop': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_petloop.bam',
'jump-squat': 'phase_3.5/models/char/tt_a_chr_dgs_shorts_legs_jump-zstart.bam',
'wave': 'phase_3.5/models/char/tt_a_chr_dgs_shorts_legs_wave.bam',
'reel-neutral': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_reelneutral.bam',
'pole-neutral': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_poleneutral.bam',
'bank': 'phase_5.5/models/char/tt_a_chr_dgs_shorts_legs_jellybeanJar.bam',
'scientistGame': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_scientistGame.bam',
'right-hand': 'phase_3.5/models/char/tt_a_chr_dgs_shorts_legs_right-hand.bam',
'lookloop-putt': 'phase_6/models/char/tt_a_chr_dgs_shorts_legs_lookloop-putt.bam',
'victory': 'phase_3.5/models/char/tt_a_chr_dgs_shorts_legs_victory-dance.bam',
'lose': 'phase_5/models/char/tt_a_chr_dgs_shorts_legs_lose.bam',
'cringe': 'phase_3.5/models/char/tt_a_chr_dgs_shorts_legs_cringe.bam',
'right': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_right.bam',
'headdown-putt': 'phase_6/models/char/tt_a_chr_dgs_shorts_legs_headdown-putt.bam',
'conked': 'phase_3.5/models/char/tt_a_chr_dgs_shorts_legs_conked.bam',
'jump': 'phase_3.5/models/char/tt_a_chr_dgs_shorts_legs_jump.bam',
'into-putt': 'phase_6/models/char/tt_a_chr_dgs_shorts_legs_into-putt.bam',
'fish-end': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_fishEND.bam',
'running-jump-land': 'phase_3.5/models/char/tt_a_chr_dgs_shorts_legs_leap_zend.bam',
'shrug': 'phase_3.5/models/char/tt_a_chr_dgs_shorts_legs_shrug.bam',
'sprinkle-dust': 'phase_5/models/char/tt_a_chr_dgs_shorts_legs_sprinkle-dust.bam',
'hold-bottle': 'phase_5/models/char/tt_a_chr_dgs_shorts_legs_hold-bottle.bam',
'takePhone': 'phase_5.5/models/char/tt_a_chr_dgs_shorts_legs_takePhone.bam',
'melt': 'phase_5/models/char/tt_a_chr_dgs_shorts_legs_melt.bam',
'pet-start': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_petin.bam',
'look-putt': 'phase_6/models/char/tt_a_chr_dgs_shorts_legs_look-putt.bam',
'loop-putt': 'phase_6/models/char/tt_a_chr_dgs_shorts_legs_loop-putt.bam',
'good-putt': 'phase_6/models/char/tt_a_chr_dgs_shorts_legs_good-putt.bam',
'juggle': 'phase_5/models/char/tt_a_chr_dgs_shorts_legs_juggle.bam',
'run': 'phase_3/models/char/tt_a_chr_dgs_shorts_legs_run.bam',
'pushbutton': 'phase_3.5/models/char/tt_a_chr_dgs_shorts_legs_press-button.bam',
'sidestep-right': 'phase_3.5/models/char/tt_a_chr_dgs_shorts_legs_jump-back-right.bam',
'water': 'phase_5.5/models/char/tt_a_chr_dgs_shorts_legs_water.bam',
'right-point-start': 'phase_3.5/models/char/tt_a_chr_dgs_shorts_legs_right-point-start.bam',
'bad-putt': 'phase_6/models/char/tt_a_chr_dgs_shorts_legs_bad-putt.bam',
'struggle': 'phase_5/models/char/tt_a_chr_dgs_shorts_legs_struggle.bam',
'running-jump': 'phase_3.5/models/char/tt_a_chr_dgs_shorts_legs_running-jump.bam',
'callPet': 'phase_5.5/models/char/tt_a_chr_dgs_shorts_legs_callPet.bam',
'throw': 'phase_3.5/models/char/tt_a_chr_dgs_shorts_legs_pie-throw.bam',
'catch-eatneutral': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_eat_neutral.bam',
'tug-o-war': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_tug-o-war.bam',
'bow': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_bow.bam',
'swing': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_swing.bam',
'climb': 'phase_5/models/char/tt_a_chr_dgs_shorts_legs_climb.bam',
'scientistWork': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_scientistWork.bam',
'think': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_think.bam',
'catch-intro-throw': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_gameThrow.bam',
'walk': 'phase_3.5/models/char/tt_a_chr_dgs_shorts_legs_walk.bam',
'down': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_down.bam',
'pole': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_pole.bam',
'periscope': 'phase_3.5/models/char/tt_a_chr_dgs_shorts_legs_periscope.bam',
'duck': 'phase_3.5/models/char/tt_a_chr_dgs_shorts_legs_duck.bam',
'curtsy': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_curtsy.bam',
'jump-land': 'phase_3.5/models/char/tt_a_chr_dgs_shorts_legs_jump-zend.bam',
'loop-dig': 'phase_5.5/models/char/tt_a_chr_dgs_shorts_legs_loop_dig.bam',
'angry': 'phase_3.5/models/char/tt_a_chr_dgs_shorts_legs_angry.bam',
'bored': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_bored.bam',
'swing-putt': 'phase_6/models/char/tt_a_chr_dgs_shorts_legs_swing-putt.bam',
'pet-end': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_petend.bam',
'spit': 'phase_5/models/char/tt_a_chr_dgs_shorts_legs_spit.bam',
'right-point': 'phase_3.5/models/char/tt_a_chr_dgs_shorts_legs_right-point.bam',
'start-dig': 'phase_5.5/models/char/tt_a_chr_dgs_shorts_legs_into_dig.bam',
'castlong': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_castlong.bam',
'confused': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_confused.bam',
'neutral': 'phase_3/models/char/tt_a_chr_dgs_shorts_legs_neutral.bam',
'jump-idle': 'phase_3.5/models/char/tt_a_chr_dgs_shorts_legs_jump-zhang.bam',
'reel': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_reel.bam',
'slip-backward': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_slip-backward.bam',
'sound': 'phase_5/models/char/tt_a_chr_dgs_shorts_legs_shout.bam',
'sidestep-left': 'phase_3.5/models/char/tt_a_chr_dgs_shorts_legs_sidestep-left.bam',
'up': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_up.bam',
'fish-again': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_fishAGAIN.bam',
'cast': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_cast.bam',
'phoneBack': 'phase_5.5/models/char/tt_a_chr_dgs_shorts_legs_phoneBack.bam',
'phoneNeutral': 'phase_5.5/models/char/tt_a_chr_dgs_shorts_legs_phoneNeutral.bam',
'scientistJealous': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_scientistJealous.bam',
'battlecast': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_fish.bam',
'sit-start': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_intoSit.bam',
'toss': 'phase_5/models/char/tt_a_chr_dgs_shorts_legs_toss.bam',
'happy-dance': 'phase_5/models/char/tt_a_chr_dgs_shorts_legs_happy-dance.bam',
'running-jump-squat': 'phase_3.5/models/char/tt_a_chr_dgs_shorts_legs_leap_zstart.bam',
'teleport': 'phase_3.5/models/char/tt_a_chr_dgs_shorts_legs_teleport.bam',
'sit': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_sit.bam',
'sad-walk': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_losewalk.bam',
'give-props-start': 'phase_3.5/models/char/tt_a_chr_dgs_shorts_legs_give-props-start.bam',
'book': 'phase_3.5/models/char/tt_a_chr_dgs_shorts_legs_book.bam',
'running-jump-idle': 'phase_3.5/models/char/tt_a_chr_dgs_shorts_legs_leap_zhang.bam',
'scientistEmcee': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_scientistEmcee.bam',
'leverPull': 'phase_10/models/char/tt_a_chr_dgs_shorts_legs_leverPull.bam',
'tutorial-neutral': 'phase_3.5/models/char/tt_a_chr_dgs_shorts_legs_tutorial-neutral.bam',
'badloop-putt': 'phase_6/models/char/tt_a_chr_dgs_shorts_legs_badloop-putt.bam',
'give-props': 'phase_3.5/models/char/tt_a_chr_dgs_shorts_legs_give-props.bam',
'hold-magnet': 'phase_5/models/char/tt_a_chr_dgs_shorts_legs_hold-magnet.bam',
'hypnotize': 'phase_5/models/char/tt_a_chr_dgs_shorts_legs_hypnotize.bam',
'left-point': 'phase_3.5/models/char/tt_a_chr_dgs_shorts_legs_left-point.bam',
'leverReach': 'phase_10/models/char/tt_a_chr_dgs_shorts_legs_leverReach.bam',
'feedPet': 'phase_5.5/models/char/tt_a_chr_dgs_shorts_legs_feedPet.bam',
'reel-H': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_reelH.bam',
'applause': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_applause.bam',
'smooch': 'phase_5/models/char/tt_a_chr_dgs_shorts_legs_smooch.bam',
'rotateR-putt': 'phase_6/models/char/tt_a_chr_dgs_shorts_legs_rotateR-putt.bam',
'fish-neutral': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_fishneutral.bam',
'push': 'phase_9/models/char/tt_a_chr_dgs_shorts_legs_push.bam',
'catch-neutral': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_gameneutral.bam',
'left': 'phase_4/models/char/tt_a_chr_dgs_shorts_legs_left.bam'}
torsoAnimDict = {'right-hand-start': 'phase_3.5/models/char/tt_a_chr_dgl_shorts_torso_right-hand-start.bam',
'firehose': 'phase_5/models/char/tt_a_chr_dgl_shorts_torso_firehose.bam',
'rotateL-putt': 'phase_6/models/char/tt_a_chr_dgl_shorts_torso_rotateL-putt.bam',
'slip-forward': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_slip-forward.bam',
'catch-eatnrun': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_eatnrun.bam',
'tickle': 'phase_5/models/char/tt_a_chr_dgl_shorts_torso_tickle.bam',
'water-gun': 'phase_5/models/char/tt_a_chr_dgl_shorts_torso_water-gun.bam',
'leverNeutral': 'phase_10/models/char/tt_a_chr_dgl_shorts_torso_leverNeutral.bam',
'swim': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_swim.bam',
'catch-run': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_gamerun.bam',
'sad-neutral': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_sad-neutral.bam',
'pet-loop': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_petloop.bam',
'jump-squat': 'phase_3.5/models/char/tt_a_chr_dgl_shorts_torso_jump-zstart.bam',
'wave': 'phase_3.5/models/char/tt_a_chr_dgl_shorts_torso_wave.bam',
'reel-neutral': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_reelneutral.bam',
'pole-neutral': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_poleneutral.bam',
'bank': 'phase_5.5/models/char/tt_a_chr_dgl_shorts_torso_jellybeanJar.bam',
'scientistGame': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_scientistGame.bam',
'right-hand': 'phase_3.5/models/char/tt_a_chr_dgl_shorts_torso_right-hand.bam',
'lookloop-putt': 'phase_6/models/char/tt_a_chr_dgl_shorts_torso_lookloop-putt.bam',
'victory': 'phase_3.5/models/char/tt_a_chr_dgl_shorts_torso_victory-dance.bam',
'lose': 'phase_5/models/char/tt_a_chr_dgl_shorts_torso_lose.bam',
'cringe': 'phase_3.5/models/char/tt_a_chr_dgl_shorts_torso_cringe.bam',
'right': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_right.bam',
'headdown-putt': 'phase_6/models/char/tt_a_chr_dgl_shorts_torso_headdown-putt.bam',
'conked': 'phase_3.5/models/char/tt_a_chr_dgl_shorts_torso_conked.bam',
'jump': 'phase_3.5/models/char/tt_a_chr_dgl_shorts_torso_jump.bam',
'into-putt': 'phase_6/models/char/tt_a_chr_dgl_shorts_torso_into-putt.bam',
'fish-end': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_fishEND.bam',
'running-jump-land': 'phase_3.5/models/char/tt_a_chr_dgl_shorts_torso_leap_zend.bam',
'shrug': 'phase_3.5/models/char/tt_a_chr_dgl_shorts_torso_shrug.bam',
'sprinkle-dust': 'phase_5/models/char/tt_a_chr_dgl_shorts_torso_sprinkle-dust.bam',
'hold-bottle': 'phase_5/models/char/tt_a_chr_dgl_shorts_torso_hold-bottle.bam',
'takePhone': 'phase_5.5/models/char/tt_a_chr_dgl_shorts_torso_takePhone.bam',
'melt': 'phase_5/models/char/tt_a_chr_dgl_shorts_torso_melt.bam',
'pet-start': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_petin.bam',
'look-putt': 'phase_6/models/char/tt_a_chr_dgl_shorts_torso_look-putt.bam',
'loop-putt': 'phase_6/models/char/tt_a_chr_dgl_shorts_torso_loop-putt.bam',
'good-putt': 'phase_6/models/char/tt_a_chr_dgl_shorts_torso_good-putt.bam',
'juggle': 'phase_5/models/char/tt_a_chr_dgl_shorts_torso_juggle.bam',
'run': 'phase_3/models/char/tt_a_chr_dgl_shorts_torso_run.bam',
'pushbutton': 'phase_3.5/models/char/tt_a_chr_dgl_shorts_torso_press-button.bam',
'sidestep-right': 'phase_3.5/models/char/tt_a_chr_dgl_shorts_torso_jump-back-right.bam',
'water': 'phase_5.5/models/char/tt_a_chr_dgl_shorts_torso_water.bam',
'right-point-start': 'phase_3.5/models/char/tt_a_chr_dgl_shorts_torso_right-point-start.bam',
'bad-putt': 'phase_6/models/char/tt_a_chr_dgl_shorts_torso_bad-putt.bam',
'struggle': 'phase_5/models/char/tt_a_chr_dgl_shorts_torso_struggle.bam',
'running-jump': 'phase_3.5/models/char/tt_a_chr_dgl_shorts_torso_running-jump.bam',
'callPet': 'phase_5.5/models/char/tt_a_chr_dgl_shorts_torso_callPet.bam',
'throw': 'phase_3.5/models/char/tt_a_chr_dgl_shorts_torso_pie-throw.bam',
'catch-eatneutral': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_eat_neutral.bam',
'tug-o-war': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_tug-o-war.bam',
'bow': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_bow.bam',
'swing': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_swing.bam',
'climb': 'phase_5/models/char/tt_a_chr_dgl_shorts_torso_climb.bam',
'scientistWork': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_scientistWork.bam',
'think': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_think.bam',
'catch-intro-throw': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_gameThrow.bam',
'walk': 'phase_3.5/models/char/tt_a_chr_dgl_shorts_torso_walk.bam',
'down': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_down.bam',
'pole': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_pole.bam',
'periscope': 'phase_3.5/models/char/tt_a_chr_dgl_shorts_torso_periscope.bam',
'duck': 'phase_3.5/models/char/tt_a_chr_dgl_shorts_torso_duck.bam',
'curtsy': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_curtsy.bam',
'jump-land': 'phase_3.5/models/char/tt_a_chr_dgl_shorts_torso_jump-zend.bam',
'loop-dig': 'phase_5.5/models/char/tt_a_chr_dgl_shorts_torso_loop_dig.bam',
'angry': 'phase_3.5/models/char/tt_a_chr_dgl_shorts_torso_angry.bam',
'bored': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_bored.bam',
'swing-putt': 'phase_6/models/char/tt_a_chr_dgl_shorts_torso_swing-putt.bam',
'pet-end': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_petend.bam',
'spit': 'phase_5/models/char/tt_a_chr_dgl_shorts_torso_spit.bam',
'right-point': 'phase_3.5/models/char/tt_a_chr_dgl_shorts_torso_right-point.bam',
'start-dig': 'phase_5.5/models/char/tt_a_chr_dgl_shorts_torso_into_dig.bam',
'castlong': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_castlong.bam',
'confused': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_confused.bam',
'neutral': 'phase_3/models/char/tt_a_chr_dgl_shorts_torso_neutral.bam',
'jump-idle': 'phase_3.5/models/char/tt_a_chr_dgl_shorts_torso_jump-zhang.bam',
'reel': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_reel.bam',
'slip-backward': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_slip-backward.bam',
'sound': 'phase_5/models/char/tt_a_chr_dgl_shorts_torso_shout.bam',
'sidestep-left': 'phase_3.5/models/char/tt_a_chr_dgl_shorts_torso_sidestep-left.bam',
'up': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_up.bam',
'fish-again': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_fishAGAIN.bam',
'cast': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_cast.bam',
'phoneBack': 'phase_5.5/models/char/tt_a_chr_dgl_shorts_torso_phoneBack.bam',
'phoneNeutral': 'phase_5.5/models/char/tt_a_chr_dgl_shorts_torso_phoneNeutral.bam',
'scientistJealous': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_scientistJealous.bam',
'battlecast': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_fish.bam',
'sit-start': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_intoSit.bam',
'toss': 'phase_5/models/char/tt_a_chr_dgl_shorts_torso_toss.bam',
'happy-dance': 'phase_5/models/char/tt_a_chr_dgl_shorts_torso_happy-dance.bam',
'running-jump-squat': 'phase_3.5/models/char/tt_a_chr_dgl_shorts_torso_leap_zstart.bam',
'teleport': 'phase_3.5/models/char/tt_a_chr_dgl_shorts_torso_teleport.bam',
'sit': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_sit.bam',
'sad-walk': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_losewalk.bam',
'give-props-start': 'phase_3.5/models/char/tt_a_chr_dgl_shorts_torso_give-props-start.bam',
'book': 'phase_3.5/models/char/tt_a_chr_dgl_shorts_torso_book.bam',
'running-jump-idle': 'phase_3.5/models/char/tt_a_chr_dgl_shorts_torso_leap_zhang.bam',
'scientistEmcee': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_scientistEmcee.bam',
'leverPull': 'phase_10/models/char/tt_a_chr_dgl_shorts_torso_leverPull.bam',
'tutorial-neutral': 'phase_3.5/models/char/tt_a_chr_dgl_shorts_torso_tutorial-neutral.bam',
'badloop-putt': 'phase_6/models/char/tt_a_chr_dgl_shorts_torso_badloop-putt.bam',
'give-props': 'phase_3.5/models/char/tt_a_chr_dgl_shorts_torso_give-props.bam',
'hold-magnet': 'phase_5/models/char/tt_a_chr_dgl_shorts_torso_hold-magnet.bam',
'hypnotize': 'phase_5/models/char/tt_a_chr_dgl_shorts_torso_hypnotize.bam',
'left-point': 'phase_3.5/models/char/tt_a_chr_dgl_shorts_torso_left-point.bam',
'leverReach': 'phase_10/models/char/tt_a_chr_dgl_shorts_torso_leverReach.bam',
'feedPet': 'phase_5.5/models/char/tt_a_chr_dgl_shorts_torso_feedPet.bam',
'reel-H': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_reelH.bam',
'applause': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_applause.bam',
'smooch': 'phase_5/models/char/tt_a_chr_dgl_shorts_torso_smooch.bam',
'rotateR-putt': 'phase_6/models/char/tt_a_chr_dgl_shorts_torso_rotateR-putt.bam',
'fish-neutral': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_fishneutral.bam',
'push': 'phase_9/models/char/tt_a_chr_dgl_shorts_torso_push.bam',
'catch-neutral': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_gameneutral.bam',
'left': 'phase_4/models/char/tt_a_chr_dgl_shorts_torso_left.bam'}
DuckHead = loader.loadModel('phase_3/models/char/duck-heads-1000.bam')
otherParts = DuckHead.findAllMatches('**/*long*')
for partNum in range(0, otherParts.getNumPaths()):
otherParts.getPath(partNum).removeNode()
ntrlMuzzle = DuckHead.find('**/*muzzle*neutral')
otherParts = DuckHead.findAllMatches('**/*muzzle*')
for partNum in range(0, otherParts.getNumPaths()):
part = otherParts.getPath(partNum)
if part != ntrlMuzzle:
otherParts.getPath(partNum).removeNode()
DuckTorso = loader.loadModel('phase_3/models/char/tt_a_chr_dgl_shorts_torso_1000.bam')
DuckLegs = loader.loadModel('phase_3/models/char/tt_a_chr_dgs_shorts_legs_1000.bam')
otherParts = DuckLegs.findAllMatches('**/boots*') + DuckLegs.findAllMatches('**/shoes')
for partNum in range(0, otherParts.getNumPaths()):
otherParts.getPath(partNum).removeNode()
DuckBody = Actor({'head': DuckHead,
'torso': DuckTorso,
'legs': DuckLegs}, {'torso': torsoAnimDict,
'legs': legsAnimDict})
DuckBody.attach('head', 'torso', 'def_head')
DuckBody.attach('torso', 'legs', 'joint_hips')
gloves = DuckBody.findAllMatches('**/hands')
ears = DuckBody.findAllMatches('**/*ears*')
head = DuckBody.findAllMatches('**/head-*')
sleeves = DuckBody.findAllMatches('**/sleeves')
shirt = DuckBody.findAllMatches('**/torso-top')
shorts = DuckBody.findAllMatches('**/torso-bot')
neck = DuckBody.findAllMatches('**/neck')
arms = DuckBody.findAllMatches('**/arms')
legs = DuckBody.findAllMatches('**/legs')
feet = DuckBody.findAllMatches('**/feet')
bodyNodes = []
bodyNodes += [gloves]
bodyNodes += [head, ears]
bodyNodes += [sleeves, shirt, shorts]
bodyNodes += [neck,
arms,
legs,
feet]
bodyNodes[0].setColor(1, 1, 1, 1)
bodyNodes[1].setColor(1, 0.5, 0, 1)
bodyNodes[2].setColor(1, 0.5, 0, 1)
bodyNodes[3].setColor(0.264, 0.308, 0.676, 1)
bodyNodes[4].setColor(0.264, 0.308, 0.676, 1)
bodyNodes[5].setColor(1, 1, 1, 1)
bodyNodes[6].setColor(1, 0.5, 0, 1)
bodyNodes[7].setColor(1, 0.5, 0, 1)
bodyNodes[8].setColor(0.276, 0.872, 0.36, 1)
bodyNodes[9].setColor(0.276, 0.872, 0.36, 1)
topTex = loader.loadTexture('phase_3/maps/desat_shirt_5.jpg')
botTex = loader.loadTexture('phase_4/maps/CowboyShorts1.jpg')
sleeveTex = loader.loadTexture('phase_3/maps/desat_sleeve_5.jpg')
bodyNodes[3].setTexture(sleeveTex, 1)
bodyNodes[4].setTexture(topTex, 1)
bodyNodes[5].setTexture(botTex, 1)
DuckBody.reparentTo(render)
geom = DuckBody.getGeomNode()
geom.getChild(0).setSx(0.730000019073)
geom.getChild(0).setSz(0.730000019073)
offset = 3.2375
base.camera.reparentTo(DuckBody)
base.camera.setPos(0, -9.0 - offset, offset)
wallBitmask = BitMask32(1)
floorBitmask = BitMask32(2)
base.cTrav = CollisionTraverser()
base.camera.hide()
def getAirborneHeight():
return offset + 0.025
walkControls = GravityWalker(legacyLifter=True)
walkControls.setWallBitMask(wallBitmask)
walkControls.setFloorBitMask(floorBitmask)
walkControls.setWalkSpeed(16.0, 24.0, 8.0, 80.0)
walkControls.initializeCollisions(base.cTrav, DuckBody, floorOffset=0.025, reach=4.0)
walkControls.setAirborneHeightFunc(getAirborneHeight)
walkControls.enableAvatarControls()
DuckBody.physControls = walkControls
def setWatchKey(key, input, keyMapName):
def watchKey(active = True):
if active == True:
inputState.set(input, True)
keyMap[keyMapName] = 1
else:
inputState.set(input, False)
keyMap[keyMapName] = 0
base.accept(key, watchKey, [True])
base.accept(key + '-up', watchKey, [False])
keyMap = {'left': 0,
'right': 0,
'forward': 0,
'backward': 0,
'control': 0}
setWatchKey('arrow_up', 'forward', 'forward')
setWatchKey('control-arrow_up', 'forward', 'forward')
setWatchKey('alt-arrow_up', 'forward', 'forward')
setWatchKey('shift-arrow_up', 'forward', 'forward')
setWatchKey('arrow_down', 'reverse', 'backward')
setWatchKey('control-arrow_down', 'reverse', 'backward')
setWatchKey('alt-arrow_down', 'reverse', 'backward')
setWatchKey('shift-arrow_down', 'reverse', 'backward')
setWatchKey('arrow_left', 'turnLeft', 'left')
setWatchKey('control-arrow_left', 'turnLeft', 'left')
setWatchKey('alt-arrow_left', 'turnLeft', 'left')
setWatchKey('shift-arrow_left', 'turnLeft', 'left')
setWatchKey('arrow_right', 'turnRight', 'right')
setWatchKey('control-arrow_right', 'turnRight', 'right')
setWatchKey('alt-arrow_right', 'turnRight', 'right')
setWatchKey('shift-arrow_right', 'turnRight', 'right')
setWatchKey('control', 'jump', 'control')
movingNeutral, movingForward = (False, False)
movingRotation, movingBackward = (False, False)
movingJumping = False
def setMovementAnimation(loopName, playRate = 1.0):
global movingRotation
global movingBackward
global movingForward
global movingNeutral
global movingJumping
if 'jump' in loopName:
movingJumping = True
movingForward = False
movingNeutral = False
movingRotation = False
movingBackward = False
elif loopName == 'run':
movingJumping = False
movingForward = True
movingNeutral = False
movingRotation = False
movingBackward = False
elif loopName == 'walk':
movingJumping = False
movingForward = False
movingNeutral = False
if playRate == -1.0:
movingBackward = True
movingRotation = False
else:
movingBackward = False
movingRotation = True
elif loopName == 'neutral':
movingJumping = False
movingForward = False
movingNeutral = True
movingRotation = False
movingBackward = False
else:
movingJumping = False
movingForward = False
movingNeutral = False
movingRotation = False
movingBackward = False
ActorInterval(DuckBody, loopName, playRate=playRate).loop()
def handleMovement(task):
if keyMap['control'] == 1:
if keyMap['forward'] or keyMap['backward'] or keyMap['left'] or keyMap['right']:
if movingJumping == False:
if DuckBody.physControls.isAirborne:
setMovementAnimation('running-jump-idle')
elif keyMap['forward']:
if movingForward == False:
setMovementAnimation('run')
elif keyMap['backward']:
if movingBackward == False:
setMovementAnimation('walk', playRate=-1.0)
elif keyMap['left'] or keyMap['right']:
if movingRotation == False:
setMovementAnimation('walk')
elif not DuckBody.physControls.isAirborne:
if keyMap['forward']:
if movingForward == False:
setMovementAnimation('run')
elif keyMap['backward']:
if movingBackward == False:
setMovementAnimation('walk', playRate=-1.0)
elif keyMap['left'] or keyMap['right']:
if movingRotation == False:
setMovementAnimation('walk')
elif movingJumping == False:
if DuckBody.physControls.isAirborne:
setMovementAnimation('jump-idle')
elif movingNeutral == False:
setMovementAnimation('neutral')
elif not DuckBody.physControls.isAirborne:
if movingNeutral == False:
setMovementAnimation('neutral')
fsrun.stop()
elif keyMap['forward'] == 1:
if movingForward == False:
if not DuckBody.physControls.isAirborne:
setMovementAnimation('run')
elif keyMap['backward'] == 1:
if movingBackward == False:
if not DuckBody.physControls.isAirborne:
setMovementAnimation('walk', playRate=-1.0)
elif keyMap['left'] or keyMap['right']:
if movingRotation == False:
if not DuckBody.physControls.isAirborne:
setMovementAnimation('walk')
fswalk = loader.loadSfx('phase_3.5/audio/sfx/AV_footstep_walkloop.wav')
elif not DuckBody.physControls.isAirborne:
if movingNeutral == False:
setMovementAnimation('neutral')
return Task.cont
base.taskMgr.add(handleMovement, 'controlManager')
def collisionsOn():
DuckBody.physControls.setCollisionsActive(True)
DuckBody.physControls.isAirborne = True
def collisionsOff():
DuckBody.physControls.setCollisionsActive(False)
DuckBody.physControls.isAirborne = True
def toggleCollisions():
if DuckBody.physControls.getCollisionsActive():
DuckBody.physControls.setCollisionsActive(False)
DuckBody.physControls.isAirborne = True
else:
DuckBody.physControls.setCollisionsActive(True)
DuckBody.physControls.isAirborne = True
base.accept('f1', toggleCollisions)
DuckBody.collisionsOn = collisionsOn
DuckBody.collisionsOff = collisionsOff
DuckBody.toggleCollisions = toggleCollisions
fsrun = loader.loadSfx('phase_3/audio/bgm/tt_theme.mid')
fsrun.setLoop(True)
fsrun.play()
localAvatar = DuckBody
base.localAvatar = localAvatar
localAvatar.physControls.placeOnFloor()
onScreenDebug.enabled = False
def updateOnScreenDebug(task):
onScreenDebug.add('Avatar Position', localAvatar.getPos())
onScreenDebug.add('Avatar Angle', localAvatar.getHpr())
return Task.cont
MickeyFont = loader.loadFont('phase_3/models/fonts/MickeyFont.bam')
class EnvironmentTTC():
def __init__(self):
self.modeldict = {}
self.LoadTTC()
def LoadTTC(self):
self.modelloader('Sky', 'phase_3.5/models/props/TT_sky.bam', render, 0, 0, 0, 0, 0, 0, 5, 5, 5)
Clouds1 = self.modeldict['Sky'].find('**/cloud1')
Clouds2 = self.modeldict['Sky'].find('**/cloud2')
Clouds1.setScale(0.6, 0.6, 0.6)
Clouds2.setScale(0.9, 0.9, 0.9)
Clouds1Spin = Clouds1.hprInterval(360, Vec3(60, 0, 0))
Clouds1Spin.loop()
Clouds2Spin = Clouds2.hprInterval(360, Vec3(-60, 0, 0))
Clouds2Spin.loop()
self.modelloader('TTC', 'phase_4/models/neighborhoods/toontown_central.bam', render, 0, 0, 0, -90, 0, 0, 1, 1, 1)
self.modeldict['TTC'].setTransparency(TransparencyAttrib.MBinary, 1)
self.modelloader('ToonHQ', 'phase_3.5/models/modules/hqTT.bam', render, 24.6425, 24.8587, 4.00001, 135, 0, 0, 1, 1, 1)
self.modeldict['ToonHQ'].find('**/doorFrameHoleRight_0').hide()
self.modeldict['ToonHQ'].find('**/doorFrameHoleLeft_0').hide()
self.modeldict['ToonHQ'].find('**/doorFrameHoleRight_1').hide()
self.modeldict['ToonHQ'].find('**/doorFrameHoleLeft_1').hide()
self.modelloader('Partygate', 'phase_4/models/modules/partyGate_TT.bam', render, 77.935, -159.939, 2.70141, 195, 0, 0, 1, 1, 1)
self.modelloader('Petshop', 'phase_4/models/modules/PetShopExterior_TT.bam', render, -124.375, 74.3749, 0.5, 49, 0, 0, 1, 1, 1)
self.modelloaderanimate('PetshopFish', 'phase_4/models/props/exteriorfish-zero.bam', 'phase_4/models/props/exteriorfish-swim.bam', self.modeldict['Petshop'], 0, 0, 0, 0, 0, 0, 1, 1, 1, 'swim')
Petsign1 = self.modeldict['Petshop'].find('**/sign_origin')
self.textloader('Pettext2', 'Pettextnode2', 'Pettextname2', 'Pet Shop', MickeyFont, Petsign1, -5, -0.2, 0.2, 0, 0, 0, 2, 2, 2, 0.9, 0.88, 0.1)
Petdoor = self.modeldict['Petshop'].find('**/door_origin')
self.modelloadercopyto('Door1', 'phase_3.5/models/modules/doors_practical.bam', 'door_double_round_ur', Petdoor, 0, -0.1, 0, 0, 0, 0, 1, 1, 1)
self.modeldict['Door1'].setColor(1, 0.87, 0.38)
self.modelloader('Clothingshop', 'phase_4/models/modules/clothshopTT.bam', render, 106.265, 160.831, 3, -30, 0, 0, 1, 1, 1)
Clothingsign1 = self.modeldict['Clothingshop'].find('**/sign_origin')
self.textloader('Clothingtext2', 'Clothingtextnode2', 'Clothingtextname2', 'Clothing Shop', MickeyFont, Clothingsign1, -6.7, -0.2, 0.1, 0, 0, 0, 1.5, 1.5, 1.5, 0.88, 0.45, 0.38)
Clothingdoor = self.modeldict['Clothingshop'].find('**/door_origin')
self.modelloadercopyto('Door2', 'phase_3.5/models/modules/doors_practical.bam', 'door_double_clothshop', Clothingdoor, 0, -0.1, 0, 0, 0, 0, 1, 1, 1)
self.modeldict['Door2'].setColor(0.88, 0.45, 0.38)
self.modelloader('Toonhall', 'phase_4/models/modules/toonhall.bam', render, 116.66, 24.29, 4, -90, 0, 0, 1, 1, 1)
Hallsign = self.modeldict['Toonhall'].find('**/sign_origin')
self.textloader('Halltext1', 'Halltextnode1', 'Halltextname1', 'Mickey', MickeyFont, Hallsign, -5, -0.2, -0.5, 0, 0, 0, 2.5, 2.5, 2.5, 0.9, 0.88, 0.1)
self.textloader('Halltext2', 'Halltextnode2', 'Halltextname2', 'Toon Hall', MickeyFont, Hallsign, -7, -0.2, -3, 0, 0, 0, 2.5, 2.5, 2.5, 0.9, 0.88, 0.1)
Halldoor = self.modeldict['Toonhall'].find('**/toonhall_door_origin')
self.modelloadercopyto('Door3', 'phase_3.5/models/modules/doors_practical.bam', 'door_double_round_ur', Halldoor, 0, -0.1, 0, 0, 0, 0, 1, 1, 1)
self.modeldict['Door3'].setColor(0.88, 0.45, 0.38)
self.modelloader('Schoolhouse', 'phase_4/models/modules/school_house.bam', render, 129.919, -138.445, 2.4997, -140, 0, 0, 1, 1, 1)
Schoolsign = self.modeldict['Schoolhouse'].find('**/sign_origin')
self.modelloadercopyto('Schoolsign', 'phase_4/models/props/signs_TTC.bam', 'TTC_sign3', Schoolsign, 1, -0.05, 3.7, 0, 0, 0, 1, 1, 1)
self.textloader('Schooltext1', 'Schooltextnode1', 'Schooltextname1', 'Toontown', MickeyFont, Schoolsign, -2.5, -0.07, 4.8, 0, 0, 0, 1, 1, 1, 0.9, 0.88, 0.4)
self.textloader('Schooltext2', 'Schooltextnode2', 'Schooltextname2', 'School House', MickeyFont, Schoolsign, -4.8, -0.07, 3, 0, 0, 0, 1.4, 1.4, 1.4, 0.9, 0.5, 0.1)
Schooldoor = self.modeldict['Schoolhouse'].find('**/school_door_origin')
self.modelloadercopyto('Door4', 'phase_3.5/models/modules/doors_practical.bam', 'door_double_square_ul', Schooldoor, 0, -0.1, 0, 0, 0, 0, 1, 1, 1)
self.modeldict['Door4'].setColor(1, 0.63, 0.38)
self.modelloader('Bank', 'phase_4/models/modules/bank.bam', render, 57.1796, 38.6656, 4, 0, 0, 0, 1, 1, 1)
Banksign = self.modeldict['Bank'].find('**/sign_origin')
self.textloader('Banktext1', 'Banktextnode1', 'Banktextname1', 'Bank', MickeyFont, Banksign, -3.1, -0.2, -1, 0, 0, 0, 2.5, 2.5, 2.5, 0.9, 0.6, 0.1)
Bankdoor = self.modeldict['Bank'].find('**/bank_door_origin')
self.modelloadercopyto('Door5', 'phase_3.5/models/modules/doors_practical.bam', 'door_double_round_ur', Bankdoor, 0, -0.1, 0, 0, 0, 0, 1, 1, 1)
self.modeldict['Door5'].setColor(0.88, 0.45, 0.38)
self.modelloader('Library', 'phase_4/models/modules/library.bam', render, 91.4475, -44.9255, 4, 180, 0, 0, 1, 1, 1)
Librarysign = self.modeldict['Library'].find('**/sign_origin')
self.modelloadercopyto('Librarysign', 'phase_4/models/props/signs_TTC.bam', 'TTC_sign3', Librarysign, 1.7, -0.05, 3.7, 0, 0, 0, 1, 1, 1)
self.textloader('Librarytext1', 'Librarytextnode1', 'Librarytextname1', 'Toontown', MickeyFont, Librarysign, -1.5, -0.07, 4.8, 0, 0, 0, 1, 1, 1, 0.9, 0.88, 0.4)
self.textloader('Librarytext2', 'Librarytextnode2', 'Librarytextname2', 'Library', MickeyFont, Librarysign, -2.8, -0.07, 3, 0, 0, 0, 1.9, 1.9, 1.9, 0.9, 0.5, 0.1)
Librarydoor = self.modeldict['Library'].find('**/library_door_origin')
self.modelloadercopyto('Door6', 'phase_3.5/models/modules/doors_practical.bam', 'door_double_round_ur', Librarydoor, 0, 0, 0, 0, 0, 0, 1, 1, 1)
self.modeldict['Door6'].setColor(0.88, 0.45, 0.38)
self.modelloader('Gagshop', 'phase_4/models/modules/gagShop_TT.bam', render, -86.6848, -90.5693, 0.500015, 0, 0, 0, 1, 1, 1)
Gagdoor = self.modeldict['Gagshop'].find('**/building_front')
self.modelloadercopyto('Door7', 'phase_3.5/models/modules/doors_practical.bam', 'door_double_square_ur', Gagdoor, 3, 0.1, 0, 180, 0, 0, 1, 1, 1)
self.modeldict['Door7'].setColor(1, 0.63, 0.38)
self.modelloader('GoofyTunnel', 'phase_4/models/modules/Speedway_Tunnel.bam', render, 20.9205, 172.683, 3.24925, -150, -0.083787, 0.0101321, 1, 1, 1)
Goofysign = self.modeldict['GoofyTunnel'].find('**/sign_origin')
self.textloader('Goofytext1', 'Goofytextnode1', 'Goofytextname1', 'Goofy', MickeyFont, Goofysign, -2, -0.07, 0.7, 0, 0, 0, 2.2, 2.2, 2.2, 0.1, 0.1, 0.7)
self.textloader('Goofytext2', 'Goofytextnode2', 'Goofytextname2', 'Speed Way', MickeyFont, Goofysign, -6.1, -0.07, -2.8, 0, 0, 0, 2.6, 2.6, 2.6, 0.9, 0.5, 0.1)
self.modelloader('FirstTunnel', 'phase_4/models/modules/safe_zone_tunnel_TT.bam', render, -239.67, 64.08, -6.18, -90, 0, 0, 1, 1, 1)
SignOrigin1 = self.modeldict['FirstTunnel'].find('**/sign_origin')
self.modelloader('Orangesign1', 'phase_3.5/models/props/tunnel_sign_orange.bam', SignOrigin1, 0, -0.05, 0, 0, 0, 0, 1.5, 1.5, 1.5)
self.textloader('Tunnel1text1', 'Tunnel1textnode1', 'Tunnel1textname1', 'Loopy Lane', MickeyFont, SignOrigin1, -5.5, -0.07, -1.8, 0, 0, 0, 1.6, 1.6, 1.6, 0.0, 0.6, 0.1)
self.textloader('Tunnel1text2', 'Tunnel1textnode2', 'Tunnel1textname2', 'Toontown Central', MickeyFont, SignOrigin1, -5.7, -0.7, -2.9, 0, 0, 0, 1, 1, 1, 0.0, 0.6, 0.0)
self.modelloader('MickeyLogo1', 'phase_3.5/models/props/mickeySZ.bam', SignOrigin1, 0, -0.07, 2, 0, 0, 0, 4.5, 4.5, 4.5)
self.modelloader('SecondTunnel', 'phase_4/models/modules/safe_zone_tunnel_TT.bam', render, -68.38, -202.64, -3.58, -31, 0, 0, 1, 1, 1)
SignOrigin2 = self.modeldict['SecondTunnel'].find('**/sign_origin')
self.textloader('Tunnel2text1', 'Tunnel2textnode1', 'Tunnel2textname1', 'Silly Street', MickeyFont, SignOrigin2, -5.9, -0.07, -1.8, 0, 0, 0, 1.6, 1.6, 1.6, 0.0, 0.6, 0.1)
self.textloader('Tunnel2text2', 'Tunnel2textnode2', 'Tunnel2textname2', 'Toontown Central', MickeyFont, SignOrigin2, -5.7, -0.7, -2.9, 0, 0, 0, 1, 1, 1, 0.0, 0.6, 0.0)
self.modelloader('Orangesign2', 'phase_3.5/models/props/tunnel_sign_orange.bam', SignOrigin2, 0, -0.05, 0, 0, 0, 0, 1.5, 1.5, 1.5)
self.modelloader('MickeyLogo2', 'phase_3.5/models/props/mickeySZ.bam', SignOrigin2, 0, -0.07, 2, 0, 0, 0, 4.5, 4.5, 4.5)
self.modelloader('ThirdTunnel', 'phase_4/models/modules/safe_zone_tunnel_TT.bam', render, 27.6402, 176.475, -6.18, 171, 0, 0, 1, 1, 1)
SignOrigin3 = self.modeldict['ThirdTunnel'].find('**/sign_origin')
self.textloader('Tunnel3text1', 'Tunnel3textnode1', 'Tunnel3textname1', 'Punchline Place', MickeyFont, SignOrigin3, -7.7, -0.07, -1.8, 0, 0, 0, 1.6, 1.6, 1.6, 0.0, 0.6, 0.1)
self.textloader('Tunnel3text2', 'Tunnel3textnode2', 'Tunnel3textname2', 'Toontown Central', MickeyFont, SignOrigin3, -5.7, -0.7, -2.9, 0, 0, 0, 1, 1, 1, 0.0, 0.6, 0.0)
self.modelloader('Orangesign3', 'phase_3.5/models/props/tunnel_sign_orange.bam', SignOrigin3, 0, -0.05, 0, 0, 0, 0, 1.5, 1.5, 1.5)
self.modelloader('MickeyLogo3', 'phase_3.5/models/props/mickeySZ.bam', SignOrigin3, 0, -0.07, 2, 0, 0, 0, 4.5, 4.5, 4.5)
self.modelloader('Fishingdock1', 'phase_4/models/props/piers_tt.bam', render, -63.5335, 41.648, -3.36708, 120, 0, 0, 1, 1, 1)
self.modelloader('Fishingdock2', 'phase_4/models/props/piers_tt.bam', render, -90.2253, 42.5202, -3.3105, -135, 0, 0, 1, 1, 1)
self.modelloader('Fishingdock3', 'phase_4/models/props/piers_tt.bam', render, -94.9218, 31.4153, -3.20083, -105, 0, 0, 1, 1, 1)
self.modelloader('Fishingdock4', 'phase_4/models/props/piers_tt.bam', render, -77.5199, 46.9817, -3.28456, -180, 0, 0, 1, 1, 1)
self.modelloader('DDSign1', 'phase_4/models/props/neighborhood_sign_DD.bam', render, -59.1768, 92.9836, 0.499824, -9, 0, 0, 1, 1, 1)
self.modelloader('DDSign2', 'phase_4/models/props/neighborhood_sign_DD.bam', render, -33.749, 88.9499, 0.499825, 170, 0, 0, 1, 1, 1)
self.modelloader('MMSign1', 'phase_4/models/props/neighborhood_sign_MM.bam', render, -143.503, -8.9528, 0.499987, 90, 0, 0, 1, 1, 1)
self.modelloader('MMSign2', 'phase_4/models/props/neighborhood_sign_MM.bam', render, -143.242, 16.9541, 0.499977, -90, 0, 0, 1, 1, 1)
self.modelloader('DGSign1', 'phase_4/models/props/neighborhood_sign_DG.bam', render, 21.3941, -144.665, 2.99998, -30, 0, 0, 1, 1, 1)
self.modelloader('DGSign2', 'phase_4/models/props/neighborhood_sign_DG.bam', render, 44.1038, -157.906, 2.99998, 148, 0, 0, 1, 1, 1)
self.modelloader('Gazebo', 'phase_4/models/modules/gazebo.bam', render, -60.44, -11.4, -2, -178, 0, 0, 1, 1, 1)
self.modelloader('Fountain', 'phase_4/models/props/toontown_central_fountain.bam', render, 93.2057, -106.482, 2.50002, 0, 0, 0, 1, 1, 1)
self.modelloader('Mickeyhorse', 'phase_4/models/props/mickey_on_horse.bam', render, 73.6829, 121.026, 2.49996, 0, 0, 0, 1, 1, 1)
self.modelloader('FlowerPlant1', 'phase_3.5/models/props/big_planter.bam', render, 18.9496, -48.977, 4.95856, 135, 0, 0, 1, 1, 1)
self.modelloader('FlowerPlant2', 'phase_3.5/models/props/big_planter.bam', render, 19.2327, 52.5553, 4.95837, -135, 0, 0, 1, 1, 1)
self.modelloader('Fence1', 'phase_3.5/models/modules/wood_fence.bam', render, -148, -23, 0.5, 90, 0, 0, 1, 1, 1)
self.modelloader('Fence2', 'phase_3.5/models/modules/wood_fence.bam', render, -147, -32.8, 0.5, 96, 0, 0, 1, 1, 1)
self.modelloader('Fence3', 'phase_3.5/models/modules/wood_fence.bam', render, -144.1, -41.9, 0.5, 107, 0, 0, 1, 1, 1)
self.modelloader('Fence4', 'phase_3.5/models/modules/wood_fence.bam', render, -95, -95.5, 0.5, 160, 0, 0, 1, 1, 1)
self.modelloader('Fence5', 'phase_3.5/models/modules/wood_fence.bam', render, -104, -92.2, 0.5, 150, 0, 0, 1, 1, 1)
self.modelloader('Fence6', 'phase_3.5/models/modules/wood_fence.bam', render, -112.5, -87.3, 0.5, 148, 0, 0, 1, 1, 1)
self.modelloader('Fence7', 'phase_3.5/models/modules/wood_fence.bam', render, -140.73, -53, 0.5, 107, 0, 0, 1.16, 1, 1.0)
self.modelloaderstreetlight('Streetlight1', 'phase_3.5/models/props/streetlight_TT.bam', render, -125, 60, 0.5, 1500, 0, 0, 1, 1, 1)
self.modelloaderstreetlight('Streetlight2', 'phase_3.5/models/props/streetlight_TT.bam', render, 58.8, 93.6, 3, -90, 0, 0, 1, 1, 1)
self.modelloaderstreetlight('Streetlight3', 'phase_3.5/models/props/streetlight_TT.bam', render, 95, 93.6, 3, -90, 0, 0, 1, 1, 1)
self.modelloaderstreetlight('Streetlight4', 'phase_3.5/models/props/streetlight_TT.bam', render, 134, -126, 3, -130, 0, 0, 1, 1, 1)
self.modelloaderstreetlight('Streetlight5', 'phase_3.5/models/props/streetlight_TT.bam', render, 108, -28, 4, -90, 0, 0, 1, 1, 1)
self.modelloaderstreetlight('Streetlight6', 'phase_3.5/models/props/streetlight_TT.bam', render, 108, 32, 4, -90, 0, 0, 1, 1, 1)
self.modelloaderstreetlight('Streetlight7', 'phase_3.5/models/props/streetlight_TT.bam', render, 32, 61, 4, -90, 0, 0, 1, 1, 1)
self.modelloaderstreetlight('Streetlight8', 'phase_3.5/models/props/streetlight_TT.bam', render, 28, -57, 4, -90, 0, 0, 1, 1, 1)
self.modelloaderstreetlight('Streetlight9', 'phase_3.5/models/props/streetlight_TT.bam', render, -101, -70, 0.5, 80, 0, 0, 1, 1, 1)
self.modelloaderstreetlight('Streetlight10', 'phase_3.5/models/props/streetlight_TT.bam', render, -129, -42.5, 0.5, 90, 0, 0, 1, 1, 1)
self.modelloaderstreetlight('Streetlight11', 'phase_3.5/models/props/streetlight_TT.bam', render, 3.8, 118, 3, -110, 0, 0, 1, 1, 1)
self.modelloaderstreetlight('Streetlight12', 'phase_3.5/models/props/streetlight_TT.bam', render, 116, 146, 3, 145, 0, 0, 1, 1, 1)
self.modelloaderstreetlight('Streetlight13', 'phase_3.5/models/props/streetlight_TT.bam', render, 86, 164, 3, -95, 0, 0, 1, 1, 1)
self.modelloaderstreetlight('Streetlight14', 'phase_3.5/models/props/streetlight_TT.bam', render, 45.5, -88, 3, -2, 0, 0, 1, 1, 1)
self.modelloaderstreetlight('Streetlight15', 'phase_3.5/models/props/streetlight_TT.bam', render, 78.3, -88, 3, -2, 0, 0, 1, 1, 1)
self.modelloaderstreetlight('Streetlight16', 'phase_3.5/models/props/streetlight_TT.bam', render, 100, -157, 3, 30, 0, 0, 1, 1, 1)
self.modelloadercopyto('Tree1', 'phase_3.5/models/props/trees.bam', 'prop_tree_large_no_box_ul', render, -80.9143, 79.7948, 0.2, 1, 1, 1, 1, 1, 1)
self.modelloadercopyto('Tree2', 'phase_3.5/models/props/trees.bam', 'prop_tree_large_no_box_ul', render, -26.1169, 73.7975, 0.2, 1, 1, 1, 1, 1, 1)
self.modelloadercopyto('Tree3', 'phase_3.5/models/props/trees.bam', 'prop_tree_fat_no_box_ul', render, 7.14367, 100.346, 2.725, 1, 1, 1, 1, 1, 1)
self.modelloadercopyto('Tree4', 'phase_3.5/models/props/trees.bam', 'prop_tree_fat_no_box_ul', render, 55.8308, 153.977, 2.725, 1, 1, 1, 1, 1, 1)
self.modelloadercopyto('Tree5', 'phase_3.5/models/props/trees.bam', 'prop_tree_fat_no_box_ul', render, 102.359, 81.1646, 2.725, 1, 1, 1, 1, 1, 1)
self.modelloadercopyto('Tree6', 'phase_3.5/models/props/trees.bam', 'prop_tree_fat_no_box_ul', render, 114.09, 57.3141, 2.725, 1, 1, 1, 1, 1, 1)
self.modelloadercopyto('Tree7', 'phase_3.5/models/props/trees.bam', 'prop_tree_fat_no_box_ul', render, 143.598, 110.178, 2.725, 1, 1, 1, 1, 1, 1)
self.modelloadercopyto('Tree8', 'phase_3.5/models/props/trees.bam', 'prop_tree_large_no_box_ul', render, -128.41, 32.9562, 0.2, 1, 1, 1, 1, 1, 1)
self.modelloadercopyto('Tree9', 'phase_3.5/models/props/trees.bam', 'prop_tree_large_no_box_ul', render, -128.708, -23.9096, 0.2, 1, 1, 1, 1, 1, 1)
self.modelloadercopyto('Tree10', 'phase_3.5/models/props/trees.bam', 'prop_tree_large_no_box_ul', render, -52.4323, -73.2793, 0.2, 1, 1, 1, 1, 1, 1)
self.modelloadercopyto('Tree11', 'phase_3.5/models/props/trees.bam', 'prop_tree_fat_no_box_ul', render, 7.00708, -99.2181, 2.725, 1, 1, 1, 1, 1, 1)
self.modelloadercopyto('Tree12', 'phase_3.5/models/props/trees.bam', 'prop_tree_small_no_box_ul', render, 96.5467, -145.522, 2.725, 1, 1, 1, 1, 1, 1)
self.modelloadercopyto('Tree13', 'phase_3.5/models/props/trees.bam', 'prop_tree_small_no_box_ul', render, 119.57, -127.05, 2.725, 1, 1, 1, 1, 1, 1)
self.modelloadercopyto('Tree14', 'phase_3.5/models/props/trees.bam', 'prop_tree_small_no_box_ul', render, 128.064, -60.4145, 2.725, 1, 1, 1, 1, 1, 1)
self.modelloadercopyto('Tree15', 'phase_3.5/models/props/trees.bam', 'prop_tree_small_no_box_ul', render, 121.146, -45.0892, 2.725, 1, 1, 1, 1, 1, 1)
self.modelloadercopyto('Tree16', 'phase_3.5/models/props/trees.bam', 'prop_tree_small_no_box_ul', render, 113.503, -57.8055, 2.725, 1, 1, 1, 1, 1, 1)
def modelloader(self, nodename, modelpath, renderparent, x, y, z, h, p, r, scale1, scale2, scale3):
self.modeldict[nodename] = loader.loadModel(modelpath)
self.modeldict[nodename].reparentTo(renderparent)
self.modeldict[nodename].setPos(x, y, z)
self.modeldict[nodename].setHpr(h, p, r)
self.modeldict[nodename].setScale(scale1, scale2, scale3)
def modelloadercopyto(self, nodename, modelpath, findmodel, renderparent, x, y, z, h, p, r, scale1, scale2, scale3):
self.modeldict[nodename] = loader.loadModel(modelpath)
self.modeldict[nodename] = self.modeldict[nodename].find('**/' + findmodel).copyTo(renderparent)
self.modeldict[nodename].setPos(x, y, z)
self.modeldict[nodename].setHpr(h, p, r)
self.modeldict[nodename].setScale(scale1, scale2, scale3)
def modelloaderanimate(self, nodename, modelpath, animatepath, renderparent, x, y, z, h, p, r, scale1, scale2, scale3, animation):
self.modeldict[nodename] = Actor(modelpath, {animation: animatepath})
self.modeldict[nodename].reparentTo(renderparent)
self.modeldict[nodename].setPos(x, y, z)
self.modeldict[nodename].setHpr(h, p, r)
self.modeldict[nodename].setScale(scale1, scale2, scale3)
self.modeldict[nodename].loop(animation)
def textloader(self, nodename, Textnodename, Textname, Textdata, Fonttype, renderparent, x, y, z, h, p, r, scale1, scale2, scale3, color1, color2, color3):
Textname = TextNode(Textnodename)
Textname.setText(Textdata)
Textname.setFont(Fonttype)
self.modeldict[nodename] = renderparent.attachNewNode(Textname)
self.modeldict[nodename].setPos(x, y, z)
self.modeldict[nodename].setHpr(h, p, r)
self.modeldict[nodename].setScale(scale1, scale2, scale3)
self.modeldict[nodename].setColor(color1, color2, color3)
def modelloaderstreetlight(self, nodename, modelpath, renderparent, x, y, z, h, p, r, scale1, scale2, scale3):
self.modeldict[nodename] = loader.loadModel(modelpath)
self.modeldict[nodename].reparentTo(renderparent)
self.modeldict[nodename].setPos(x, y, z)
self.modeldict[nodename].setHpr(h, p, r)
self.modeldict[nodename].setScale(scale1, scale2, scale3)
self.modeldict[nodename].find('**/prop_post_light_base').hide()
self.modeldict[nodename].find('**/p1').hide()
self.modeldict[nodename].find('**/prop_post_one_light').hide()
self.modeldict[nodename].find('**/p13').hide()
BTFont = loader.loadFont('phase_3/models/fonts/MickeyFont.bam')
bk_text = ' '
textObject = OnscreenText(text=bk_text, pos=(0.95, -0.95), scale=0.07, fg=(1,
0.5,
0.5,
1), align=TextNode.ACenter, mayChange=1)
textObject.setFont(BTFont)
def setText(textEntered):
textObject.setText(textEntered)
if b:
b.hide()
ImgBtn2.show()
def clearText():
if b:
b.enterText('')
def openChatGui():
if b:
b.show()
ImgBtn2.hide()
chatGui = loader.loadModel('phase_3.5/models/gui/chat_input_gui.bam')
b = DirectEntry(text='', scale=0.05, command=setText, initialText='Type Something', numLines=3, focus=1, focusInCommand=clearText)
b.hide()
chatGui = loader.loadModel('phase_3.5/models/gui/chat_input_gui.bam')
chatGui = loader.loadModel('phase_3.5/models/gui/chat_input_gui.bam')
ImgBtn2 = DirectButton(frameSize=None, text=' ', image=(chatGui.find('**/ChtBx_ChtBtn_UP'), chatGui.find('**/ChtBx_ChtBtn_DN'), chatGui.find('**/ChtBx_ChtBtn_RLVR')), relief=None, command=openChatGui, text_pos=(2, -0.325), geom=None, pad=(0.01, 0.01), suppressKeys=0, pos=(-1.21, -1, 0.9), text_scale=1, borderWidth=(0.015, 0.01))
b.setPos(-1.21, -2, 5)
chatGui = loader.loadModel('phase_3.5/models/gui/chat_input_gui.bam')
ImgBtn2 = DirectButton(frameSize=None, text=' ', image=(chatGui.find('**/ChtBx_ChtBtn_UP'), chatGui.find('**/ChtBx_ChtBtn_DN'), chatGui.find('**/ChtBx_ChtBtn_RLVR')), relief=None, command=openChatGui, text_pos=(2, -0.325), geom=None, pad=(0.01, 0.01), suppressKeys=0, pos=(-1.1, -1, 0.9), text_scale=0.06, color=(0, 1, 0), borderWidth=(0.015, 0.01))
b.setPos(-1.21, -2, 0.75)
Font = loader.loadFont('phase_3/models/fonts/Courier.bam')
tag = OnscreenText(scale=0.5, text='Smirky Superchomp', bg=(0.9,
0.9,
0.9,
0.3), fg=(0.35,
0.35,
0.95,
1), decal=True)
tag.wrtReparentTo(DuckBody)
tag.setBillboardAxis()
tag.setPos(0, 0)
tag.setDepthTest(True)
tag.setDepthWrite(True)
tag.reparentTo(DuckBody)
tag.setZ(tag, DuckBody.find('**/__Actor_head').getZ(DuckBody) + -1)
tag.reparentTo(DuckBody.find('**/def_head'))
tag.setFont(BTFont)
fist = loader.loadModel('phase_3.5/models/gui/tt_m_gui_gm_toonResistance_fist.bam')
fist.reparentTo(DuckBody.find('**/def_head'))
fist.setPos(0, 0, 0)
fist.setScale(2.3)
fist.find('**/gmPartyHat').remove()
ttHatSpin = fist.find('**/fistIcon').hprInterval(3, Vec3(360, 0, 0))
ttHatSpin.loop()
fedora = loader.loadModel('phase_4/models/accessories/tt_m_chr_avt_acc_hat_fedora.bam')
fedora.reparentTo(DuckBody.find('**/def_head'))
fedora.setScale(0.35)
fedora.setZ(0.75)
fedora.setH(180)
nerdglasses = loader.loadModel('phase_4/models/accessories/tt_m_chr_avt_acc_msk_squareRims.bam')
nerdglasses.reparentTo(DuckBody.find('**/def_head'))
nerdglasses.setH(180)
nerdglasses.setScale(0.45)
nerdglasses.setZ(0.2)
nerdglasses.setY(0.05)
CS1 = loader.loadModel('phase_5/models/modules/TT_A2.bam')
CS1.reparentTo(render)
CS1.setH(138.37)
CS1.setX(-109.07)
CS1.setY(-92.27)
CS2 = loader.loadModel('phase_5/models/modules/TT_A3.bam')
CS2.reparentTo(render)
CS2.setH(104.93)
CS2.setX(-132.65)
CS2.setY(-74.96)
def TP1():
DuckBody.setZ(500.89)
DuckBody.setY(59.6964)
DuckBody.setX(-1.00264)
ButtonImage = loader.loadModel('phase_3/models/gui/quit_button.bam')
ImgBtn11 = DirectButton(frameSize=None, text='SpeedWay', image=(ButtonImage.find('**/QuitBtn_UP'), ButtonImage.find('**/QuitBtn_DN'), ButtonImage.find('**/QuitBtn_RLVR')), relief=None, command=TP1, text_pos=(0, -0.015), geom=None, pad=(0.01, 0.01), suppressKeys=0, pos=(-0.05, 0, 0.95), text_scale=0.059, borderWidth=(0.13, 0.01), scale=0.7, color=(0, 1, 0))
environ = loader.loadModel('phase_6/models/karting/GasolineAlley_TT.bam')
environ.reparentTo(render)
environ.setZ(500)
tunnel = loader.loadModel('phase_4/models/modules/safe_zone_tunnel_TT.bam')
tunnel.reparentTo(render)
tunnel.setPos(60, 175, 493)
tunnel.setHpr(180, 0, 0)
tunnel.setScale(1)
tunnelsign = loader.loadModel('phase_3.5/models/props/tunnel_sign_orange.bam')
tunnelsign.reparentTo(tunnel)
tunnelsign.setPos(60, 95.01, 523.7)
tunnelsign.setHpr(180, 0, 0)
tunnelsign.setScale(1.6)
SZsign = loader.loadModel('phase_4/models/props/goofySZ.bam')
SZsign.reparentTo(tunnel)
SZsign.setPos(60, 95.025, 523.7)
SZsign.setHpr(180, 0, 0)
SZsign.setScale(4)
kartshop = loader.loadModel('phase_6/models/karting/kartShop.bam')
kartshop.reparentTo(render)
kartshop.setPos(0, 10, 500)
scoreboard = loader.loadModel('phase_6/models/karting/tt_m_ara_gfs_leaderBoardCrashed.bam')
scoreboard.reparentTo(render)
scoreboard.setPos(1, -111, 500)
scoreboard.setHpr(180, 0, 0)
wrench = loader.loadModel('phase_6/models/karting/KartArea_WrenchJack.bam')
wrench.reparentTo(render)
wrench.setPos(-33, 5, 500)
wrench.setHpr(180, 0, 0)
tires = loader.loadModel('phase_6/models/karting/KartArea_Tires.bam')
tires.reparentTo(render)
tires.setPos(33, 5, 500)
trees1 = loader.loadModel('phase_6/models/karting/GoofyStadium_TreeBase.bam')
trees1.reparentTo(render)
trees1.setPos(-13, 58, 499.7)
trees1.setScale(12)
trees2 = loader.loadModel('phase_6/models/karting/GoofyStadium_TreeBase.bam')
trees2.reparentTo(render)
trees2.setPos(13, 58, 499.7)
trees2.setScale(12)
trees3 = loader.loadModel('phase_6/models/karting/GoofyStadium_TreeBase.bam')
trees3.reparentTo(render)
trees3.setPos(-13, -35, 499.7)
trees3.setScale(12)
trees4 = loader.loadModel('phase_6/models/karting/GoofyStadium_TreeBase.bam')
trees4.reparentTo(render)
trees4.setPos(13, -35, 499.7)
trees4.setScale(12)
trees5 = loader.loadModel('phase_6/models/karting/GoofyStadium_TreeBase.bam')
trees5.reparentTo(render)
trees5.setPos(-10, -76, 499.7)
trees5.setScale(12)
trees6 = loader.loadModel('phase_6/models/karting/GoofyStadium_TreeBase.bam')
trees6.reparentTo(render)
trees6.setPos(10, -76, 499.7)
trees6.setScale(12)
light1 = loader.loadModel('phase_6/models/karting/GoofyStadium_Lamppost_Base1.bam')
light1.reparentTo(render)
light1.setPos(-10, -52, 499.3)
light1.setScale(14)
light2 = loader.loadModel('phase_6/models/karting/GoofyStadium_Lamppost_Base1.bam')
light2.reparentTo(render)
light2.setPos(10, -52, 499.3)
light2.setScale(14)
box = loader.loadModel('phase_6/models/karting/GoofyStadium_Mailbox.bam')
box.reparentTo(render)
box.setPos(16, -50, 500)
box.setHpr(210, 0, 0)
box.setScale(10)
flag1 = loader.loadModel('phase_6/models/karting/flag.bam')
flag1.reparentTo(render)
flag1.setPos(-18, 6, 499.8)
flag2 = loader.loadModel('phase_6/models/karting/flag.bam')
flag2.reparentTo(render)
flag2.setPos(18, 6, 499.8)
sign = loader.loadModel('phase_6/models/karting/KartShowBlockSign.bam')
sign.reparentTo(render)
sign.setPos(-16, -50, 500)
sign.setHpr(-120, 0, 0)
sign.setScale(26)
announcer1 = loader.loadModel('phase_6/models/karting/announcer.bam')
announcer1.reparentTo(render)
announcer1.setPos(25, -150, 499.3)
announcer1.setHpr(-140, 0, 0)
announcer2 = loader.loadModel('phase_6/models/karting/announcer.bam')
announcer2.reparentTo(render)
announcer2.setPos(-26, -149, 499.3)
announcer2.setHpr(-212, 0, 0)
announcer3 = loader.loadModel('phase_6/models/karting/announcer.bam')
announcer3.reparentTo(render)
announcer3.setPos(-38, -135, 499.3)
announcer3.setHpr(-212, 0, 0)
announcer4 = loader.loadModel('phase_6/models/karting/announcer.bam')
announcer4.reparentTo(render)
announcer4.setPos(37, -137.5, 499.3)
announcer4.setHpr(-140, 0, 0)
cone1 = loader.loadModel('phase_6/models/karting/cone.bam')
cone1.reparentTo(render)
cone1.setPos(13, -4, 499.7)
cone2 = loader.loadModel('phase_6/models/karting/cone.bam')
cone2.reparentTo(render)
cone2.setPos(13, 20, 499.7)
cone3 = loader.loadModel('phase_6/models/karting/cone.bam')
cone3.reparentTo(render)
cone3.setPos(-14, 18, 499.7)
cone4 = loader.loadModel('phase_6/models/karting/cone.bam')
cone4.reparentTo(render)
cone4.setPos(-14, -3, 499.7)
cone5 = loader.loadModel('phase_6/models/karting/cone.bam')
cone5.reparentTo(render)
cone5.setPos(-23, 9, 499.7)
cone6 = loader.loadModel('phase_6/models/karting/cone.bam')
cone6.reparentTo(render)
cone6.setPos(45, -138, 499.4)
cone7 = loader.loadModel('phase_6/models/karting/cone.bam')
cone7.reparentTo(render)
cone7.setPos(25, -109, 500)
cone8 = loader.loadModel('phase_6/models/karting/cone.bam')
cone8.reparentTo(render)
cone8.setPos(24, -111, 500)
cone8.setHpr(45, 0, 0)
cone9 = loader.loadModel('phase_6/models/karting/cone.bam')
cone9.reparentTo(render)
cone9.setPos(75, -106, 500)
cone9.setHpr(0, 0, -120)
cone10 = loader.loadModel('phase_6/models/karting/cone.bam')
cone10.reparentTo(render)
cone10.setPos(76.5, -107.5, 500)
cone10.setHpr(0, 120, 0)
cone11 = loader.loadModel('phase_6/models/karting/cone.bam')
cone11.reparentTo(render)
cone11.setPos(26, -154, 499.3)
cone11.setHpr(42, 0, 0)
cone12 = loader.loadModel('phase_6/models/karting/cone.bam')
cone12.reparentTo(render)
cone12.setPos(1, -187, 501.22)
cone12.setHpr(42, 0, 0)
krate1 = loader.loadModel('phase_6/models/karting/krate.bam')
krate1.reparentTo(render)
krate1.setPos(1, -187, 499.3)
krate1.setScale(1.2)
krate2 = loader.loadModel('phase_6/models/karting/krate.bam')
krate2.reparentTo(render)
krate2.setPos(-48, -115, 499.3)
krate2.setScale(1.2)
krate3 = loader.loadModel('phase_6/models/karting/krate.bam')
krate3.reparentTo(render)
krate3.setPos(-50, -113, 499.3)
krate3.setHpr(45, 0, 0)
krate3.setScale(1.2)
krate4 = loader.loadModel('phase_6/models/karting/krate.bam')
krate4.reparentTo(render)
krate4.setPos(-49, -114, 501.22)
krate4.setHpr(60, 0, 0)
krate4.setScale(1.2)
def TP2():
DuckBody.setZ(0)
DuckBody.setX(0)
DuckBody.setY(0)
ButtonImage = loader.loadModel('phase_3/models/gui/quit_button.bam')
ImgBtn11 = DirectButton(frameSize=None, text='TTC', image=(ButtonImage.find('**/QuitBtn_UP'), ButtonImage.find('**/QuitBtn_DN'), ButtonImage.find('**/QuitBtn_RLVR')), relief=None, command=TP2, text_pos=(0, -0.015), geom=None, pad=(0.01, 0.01), suppressKeys=0, pos=(1, 0, 0.95), text_scale=0.059, borderWidth=(0.13, 0.01), scale=0.7, color=(0, 1, 0))
lord = Actor('phase_3/models/char/mickey-1200.bam', {'walk': 'phase_3/models/char/mickey-walk.bam'})
lord.reparentTo(render)
lord.loop('walk')
lord.setX(106.58)
lord.setY(-1.37)
lord.setZ(4.46)
lord.setH(104.62)
cs = CollisionSphere(0, 0, 1, 3)
cnodePath = lord.attachNewNode(CollisionNode('cnode'))
cnodePath.node().addSolid(cs)
pandaPosInterval1 = lord.posInterval(3, Point3(96.3312, 0.553801, 4.025), startPos=Point3(96.3312, 0.553801, 4.025))
pandaHprInterval1 = lord.hprInterval(3, Point3(96.3312, 0.553801, 4.025), startHpr=Point3(96.3312, 0.553801, 4.025))
pandaPosInterval2 = lord.posInterval(3, Point3(54.1032, 10.1371, 4.025), startPos=Point3(96.3312, 0.553801, 4.025))
pandaHprInterval2 = lord.hprInterval(3, Point3(172.798, 0, 0), startHpr=Point3(96.3312, 0.553801, 4.025))
pandaPosInterval3 = lord.posInterval(3, Point3(62.9905, -21.4791, 6.05112), startPos=Point3(54.1032, 10.1371, 4.025))
pandaHprInterval3 = lord.hprInterval(3, Point3(438.492, 0, 0), startHpr=Point3(172.798, 0, 0))
lord.pandaPace = Sequence(pandaPosInterval1, pandaHprInterval1, pandaPosInterval2, pandaHprInterval2, pandaPosInterval3, pandaHprInterval3)
lord.pandaPace.loop()
environ = EnvironmentTTC()
base.taskMgr.add(updateOnScreenDebug, 'UpdateOSD')
tag2 = OnscreenText(scale=2, text='Mickey', bg=(0.9,
0.9,
0.9,
0.3), fg=(0.35,
0.35,
0.95,
1), decal=True)
tag2.wrtReparentTo(lord)
tag2.setBillboardAxis()
tag2.setPos(0, 0)
tag2.setDepthTest(True)
tag2.setDepthWrite(True)
tag2.reparentTo(lord)
tag2.setZ(tag, lord.find('**/joint_pupilL').getZ(lord) + 1)
tag2.reparentTo(lord.find('**/joint_pupilL'))
tag2.setFont(BTFont)
tag2.setColor(1, 0.1, 0.1, 1.0)
title.destroy()
base.oobe()
run()
| ronanwow1001/Toontown-1 | Landwalker Example.py | Landwalker Example.py | py | 63,531 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandac.PandaModules.loadPrcFileData",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "direct.actor.Actor.Actor",
"line_number": 371,
"usage_type": "call"
},
{
"api_name": "direct.controls.GravityWalker.GravityWalker",
"line_number": 427,
"usage_typ... |
14570657497 | from kivy.app import App
from kivy.uix.widget import Widget
from kivy.uix.button import Button
from kivy.uix.relativelayout import RelativeLayout
from kivy.uix.label import Label
from kivy.uix.textinput import TextInput
from kivy.graphics import Color, Line, Rectangle
from image_to_text import get_text, train_model
import cv2
from string import ascii_uppercase as UC
class DrawWidget(RelativeLayout):
def __init__(self, **kwargs):
super(DrawWidget, self).__init__(**kwargs)
with self.canvas:
Color(*(1,1,1),mode="rgb")
self.rect = Rectangle(size = self.size, pos = self.pos)
self.bind(size=self.update_rect)
def on_touch_down(self, touch):
color = (0,0,0)
with self.canvas:
Color(*color,mode="rgb")
width = 2.5
x,y = self.to_local(x=touch.x, y=touch.y)
touch.ud["line"] = Line(points=(x, y),width=width)
def on_touch_move(self,touch):
x,y = self.to_local(x=touch.x, y=touch.y)
touch.ud['line'].points += [x, y]
def update_rect(self, instance, value):
self.rect.size = self.size
self.rect.pos = self.pos
class DrawApp(App):
def build(self):
self.title = 'Convert To Text'
parent = RelativeLayout()
self.draw = DrawWidget(size_hint=(0.5,0.8),pos_hint={'x':0,'y':0.2})
clear_btn = Button(size_hint=(0.5,0.1),text="Clear",pos_hint={'x':0,'y':0.1})
clear_btn.bind(on_release=self.clear_canvas)
convert_btn = Button(size_hint=(0.5,0.1),text="Convert to text",pos_hint={'x':0.5,'y':0.1})
convert_btn.bind(on_release=self.convert)
self.label = Label(size_hint=(0.5,0.9),pos_hint={'x':0.5,'y':0.2})
label1 = Label(size_hint=(0.3,0.1),pos_hint={"x":0,"y":0},text="Wrong conversion? Type in correct capital letters comma separated and train")
label1.bind(width=lambda *x: label1.setter('text_size')(label1, (label1.width, None)), texture_size=lambda *x: label1.setter('height')(label1, label1.texture_size[1]))
self.inp_txt = TextInput(size_hint=(0.4,0.1),pos_hint={"x":0.3,"y":0})
self.train_btn = Button(size_hint=(0.3,0.1),pos_hint={"x":0.7,"y":0},text="Train", disabled=True)
self.train_btn.bind(on_release = self.train)
parent.add_widget(self.draw)
parent.add_widget(self.label)
parent.add_widget(clear_btn)
parent.add_widget(convert_btn)
parent.add_widget(label1)
parent.add_widget(self.inp_txt)
parent.add_widget(self.train_btn)
return parent
def clear_canvas(self, obj):
self.draw.canvas.clear()
with self.draw.canvas:
Color(*(1,1,1),mode="rgb")
self.draw.rect = Rectangle(size = self.draw.size, pos = (0,0))
self.draw.bind(size=self.draw.update_rect)
self.train_btn.disabled = True
def convert(self, obj):
self.train_btn.disabled = False
self.draw.export_to_png("draw.png")
img = cv2.imread("draw.png")
self.lets, self.imgs = get_text(img)
txt = " ".join(self.lets)
self.label.text = txt
def train(self, obj):
let = self.inp_txt.text
let = let.replace(" ","").split(",")
lbls = []
chars = list(UC)
for l in let:
lbls.append(chars.index(l))
if len(lbls) == len(self.imgs):
train_model(self.imgs, lbls)
if __name__ == "__main__":
DrawApp().run()
| ShantanuShinde/Character-Recognition-with-CNN | Character Recognition App/paintapp.py | paintapp.py | py | 3,519 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "kivy.uix.relativelayout.RelativeLayout",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "kivy.graphics.Color",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "kivy.graphics.Rectangle",
"line_number": 23,
"usage_type": "call"
},
{
... |
13768273606 | from os import stat
from flask import Flask, Response
from flask_restx import Resource, Api, reqparse
import random
import json
import os
from werkzeug.exceptions import BadHost, BadRequest
app = Flask(__name__)
api = Api(app)
#Cette route va permettre de retourner la valeur +1
@api.route('/plus_one/<int:number>')
@api.doc(params={"x": "Must be an integer."})
class Add(Resource):
def get(self, number):
return {'value': number+1}
#Cette route permet de retourner la valeur au carré et, pour mon cas j'ai testé de transmettre deux paramètres
@api.route('/square')
@api.doc(params={"int": "Must be an integer", "email": "Must be a string"}, location="query")
class Square(Resource):
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('int', type=int)
parser.add_argument('email', type=str)
args = parser.parse_args()
return {'value': args['int'] ** 2, 'email': args['email']}
#Cette route prend en paramètre le choix de l'utilisateur sous forme de int et renvoie le message final
@api.route('/game/<int:choice>')
@api.doc(params={"choice": "1: Pierre \n 2: Papier \n 3: Ciseaux \n 4: Lézard \n 5: Spock"})
class Game(Resource):
def get(self, choice):
liste_choices = ["Pierre", "Papier", "Ciseaux", "Lézard", "Spock"]
def choice_computer(index):
possibilities = ["Pierre", "Papier", "Ciseaux", "Lézard", "Spock"]
possibilities.remove(possibilities[index])
computer = possibilities[random.randint(0, len(possibilities)-1)]
return computer
if choice not in [1,2,3,4,5]:
return Response(
"Send a number between 1 and 5 !",
status=400,
)
else:
user_choice= liste_choices[choice-1]
computer_choice= choice_computer(choice-1)
index_computer = liste_choices.index(computer_choice)
result = {0: {1: True, 2: False, 3: True, 4: False },
1: {0: True, 2: False, 3: False, 4: True},
2: {0: False, 1: True, 3: True, 4: False},
3: {0: False, 1: True, 2: False, 4: True},
4: {0: True, 1: False, 2: True, 3: False} }
if result[choice-1][index_computer] == False:
with open('stats.json') as json_file:
data = json.load(json_file)
data[user_choice] = int(data[user_choice]) + 1
with open('stats.json', 'w') as json_file:
json.dump(data, json_file)
return {"ordinateur" : computer_choice, "user": user_choice, "message": "Vous avez perdu."}
else:
with open('stats.json') as json_file:
data = json.load(json_file)
data[user_choice] = int(data[user_choice]) + 1
with open('stats.json', 'w') as json_file:
json.dump(data, json_file)
return {"ordinateur" : computer_choice, "user": user_choice, "message": "Vous avez gagné !"}
@api.route('/stats')
class Stats(Resource):
def get(self):
SITE_ROOT = os.path.realpath(os.path.dirname(__file__))
json_url = os.path.join(SITE_ROOT,"stats.json")
data = json.load(open(json_url))
return data
if __name__ == '__main__':
app.run(debug=True) | eparisLR/FLASK-RESTX | api.py | api.py | py | 3,444 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "flask_restx.Api",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "flask_restx.Resource",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "flask_restx.Resource... |
22216786293 | import bpy
import bmesh
import os
#function for changing the topology of the object by triangulating it
def triangulate_object(obj):
me = obj.data
# Get a BMesh representation
bm = bmesh.new()
bm.from_mesh(me)
bmesh.ops.triangulate(bm, faces=bm.faces[:], quad_method=0, ngon_method=0)
# Finish up, write the bmesh back to the mesh
bm.to_mesh(me)
bm.free()
#retrieves the radi for the Envelope based elip
def min_max_axis(object):
Xs = []
Ys = []
Zs = []
boundaries = []
coords = [(object.matrix_world * v.co) for v in object.data.vertices]
for vert in coords:
Xs.append(vert[0])
Ys.append(vert[1])
Zs.append(vert[2])
Xs.sort()
Ys.sort()
Zs.sort()
boundaries.append(Xs[len(Xs) - 1])
boundaries.append(Xs[0])
boundaries.append(Ys[len(Ys) - 1])
boundaries.append(Ys[0])
boundaries.append(Zs[len(Zs) - 1])
boundaries.append(Zs[0])
E_radi.append((abs(boundaries[0] - boundaries[1]))/2)
E_radi.append((abs(boundaries[2] - boundaries[3]))/2)
E_radi.append((abs(boundaries[4] - boundaries[5]))/2)
#create folder and move blend file into it
file = bpy.path.basename(bpy.context.blend_data.filepath)
filename = file.strip('.blend')
mom_folder = '/Users/rileysterman/Desktop/blender objects/pre-pre-animation/'
object_folder = mom_folder + filename
os.makedirs(object_folder)
os.rename(bpy.context.blend_data.filepath, object_folder + '/' + file)
#identify object of interest,create lists of elip radi, and reset its location to the origin, and set the center to be based on mass volume
ob = bpy.data.objects[0]
E_radi = []
ob.location = (0,0,0)
#ob.origin_set(type='ORIGIN_CENTER_OF_VOLUME')
#triangulate the object
triangulate_object(ob)
#export stl file of triangulated object into the object folder
stl_path = object_folder + '/' + filename + '.stl'
bpy.ops.export_mesh.stl(filepath=stl_path)
#export fbx file
fbx_path = object_folder + '/' + filename + '.fbx'
bpy.ops.export_scene.fbx(filepath =fbx_path)
# E_radi
min_max_axis(ob)
#noramlize E_radi
radi_sum = E_radi[0] + E_radi[1] + E_radi[2]
E_radi[0] = E_radi[0] /radi_sum
E_radi[1] = E_radi[1] /radi_sum
E_radi[2] = E_radi[2] /radi_sum
E = open(object_folder +'/'+ 'E' + filename, 'w+')
E.write(str(E_radi[0]) + '~' + str(E_radi[1]) + '~' + str(E_radi[2]))
E.close()
| whorticulterist/RotationSimulation | blender objects/python_scripts/Initial_Individual_Processing.py | Initial_Individual_Processing.py | py | 2,397 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "bmesh.new",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "bmesh.ops.triangulate",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "bmesh.ops",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "bpy.path.basename",
... |
41145576381 | from calendar import c
from pyexpat import model
from re import L
import numpy as np
import pandas as pd
compas_scores_raw= pd.read_csv("compas_score_raw.csv", lineterminator='\n')
compas_scores_two_year= pd.read_csv("compas_scores_two_years.csv", lineterminator='\n')
print('-----------------Compas Scores Raw-----------------')
print('type',type(compas_scores_raw))
print('-----------------Compas Scores Two Year-----------------')
print('type',type(compas_scores_two_year))
#number of rows and columns
print('-----------------Compas Scores Raw-----------------')
print('shape',compas_scores_raw.shape)
print('-----------------Compas Scores Two Year-----------------')
print('shape',compas_scores_two_year.shape)
# fitlering the data with the following conditions
# 1. if charge data was not within 30 days of arrest
# 2. c_charge_degree is not missing
# 3. score_text is not missing
# 4. is_recid is not missing -1 means missing
print('-----------------Compas Scores two year-----------------')
df= compas_scores_two_year[[ 'age', 'c_charge_degree','race', 'age_cat', 'score_text', 'sex', 'priors_count', 'days_b_screening_arrest', 'decile_score', 'is_recid', 'c_jail_in', 'c_jail_out', 'v_decile_score','two_year_recid\r']]
print(np.shape(df))
df = df.loc[(df['days_b_screening_arrest'] <= 30) & (df['days_b_screening_arrest'] >= -30) & (df['is_recid'] != -1) & (df['c_charge_degree'] != 'O') & (df['score_text'] != 'N/A')]
print('shape of filtered data',df.shape)
#length of stay in jail
df['length_of_stay'] = pd.to_datetime(df['c_jail_out']) - pd.to_datetime(df['c_jail_in'])
df['length_of_stay'] = df['length_of_stay'].astype('timedelta64[D]')
df['length_of_stay'] = df['length_of_stay'].astype(int)
print(df['length_of_stay'])
print('shape of filtered data',df.shape)
print('length of stay',df['length_of_stay'].describe())
#correlation between length of stay and decile score
print('correlation between length of stay and decile score',df['length_of_stay'].corr(df['decile_score']))
print('-----------------describe age-----------------')
print(df['age'].describe())
print('-----------------describe race----------------')
print(df['race'].describe())
print('-----------------race split-----------------')
race = ['African-American', 'Caucasian', 'Hispanic', 'Asian', 'Native American', 'Other']
for i in race :
print( i,len(df[df['race']== i])/len(df['race']))
print ('-----------------describe score text----------------')
print('low ', len(df[df['score_text'] == 'Low']))
print('medium ', len(df[df['score_text'] == 'Medium']))
print('high ', len(df[df['score_text'] == 'High']))
#race and sex split
female = []
male = []
for i in race :
temp = len(df[(df['race']== i) & (df['sex'] == 'Male')] )
print(temp)
male.append(temp)
temp = len(df[(df['race']== i) & (df['sex'] == 'Female')])
female.append(temp)
print(race)
print ('female', female)
print('male', male)
f = pd.crosstab(df['sex'], df['race'])
print('f',f)
# find decide score for african american
print('-----------------decile score for african american-----------------')
print(df[(df['race']) == 'African-American']['decile_score'].describe())
decile = [1,2,3,4,5,6,7,8,9,10]
# plot decide score for african american
import matplotlib.pyplot as plt
# bar plot for decide score for african american and caucasian
df_race_decile_score = df[['race', 'decile_score']]
df_african = df_race_decile_score[ df_race_decile_score['race'] == 'African-American']
df_caucasian = df_race_decile_score[ df_race_decile_score['race'] == 'Caucasian']
counts_decile_AA = []
counts_decile_C = []
temp = []
for i in decile:
temp = len(df_african[df_african['decile_score'] == i])
counts_decile_AA.append(temp)
temp = len(df_caucasian[df_caucasian['decile_score'] == i])
counts_decile_C.append(temp)
fig = plt.figure()
ax = fig.subplots(1,2)
ax[0].bar(decile, counts_decile_AA)
ax[0].set_title('African American')
ax[1].bar(decile, counts_decile_C)
ax[1].set_title('Caucasian')
ax[0].set_ylabel('Count')
ax[0].set_xlabel('Decile score')
ax[0].set_ylim(0, 650)
ax[1].set_ylabel('Count')
ax[1].set_xlabel('Decile score')
ax[1].set_ylim(0, 650)
plt.show()
# # plot volinent decide score for african american and caucasian
df_race_V_decile_score = df[['race', 'v_decile_score']]
df_african = df_race_V_decile_score[ df_race_V_decile_score['race'] == 'African-American']
df_caucasian = df_race_V_decile_score[ df_race_V_decile_score['race'] == 'Caucasian']
counts_decile_AA = []
counts_decile_C = []
temp = []
for i in decile:
temp = len(df_african[df_african['v_decile_score'] == i])
counts_decile_AA.append(temp)
temp = len(df_caucasian[df_caucasian['v_decile_score'] == i])
counts_decile_C.append(temp)
fig = plt.figure()
ax = fig.subplots(1,2)
ax[0].bar(decile, counts_decile_AA)
ax[0].set_title('African American')
ax[1].bar(decile, counts_decile_C)
ax[1].set_title('Caucasian')
ax[0].set_ylabel('Count')
ax[0].set_xlabel('Violent Decile score')
ax[0].set_ylim(0, 850)
ax[1].set_ylabel('Count')
ax[1].set_xlabel('Violent Decile score')
ax[1].set_ylim(0, 850)
plt.show()
# create some factors for logistic regression
df_c_charge_degree = df[['c_charge_degree']]
df_age_cat = df[['age_cat']]
df_race = df[['race']]
df_sex = df[['sex']]
df_age_race = df[['race']]
df_score = df[['score_text']]
# df_c_charge_degree = pd.get_dummies(df_c_charge_degree)
# print('head', df_c_charge_degree.head())
#labels, uniques = pd.factorize(df_c_charge_degree)
#factorize df_c_charge_degree
crime_factor, u_charge_degree = pd.factorize(df_c_charge_degree['c_charge_degree'])
f_age_cat, u_age_cat= pd.factorize(df_age_cat['age_cat'])
#relevel age cat with reference = 1
f_age_cat = f_age_cat - 1
#factorize race
f_race_AA, u_race_AA= pd.factorize(df_age_race['race'] == 'African-American')
f_race_C, u_race = pd.factorize(df_age_race['race'] == 'Caucasian')
#relevel race with reference = 3
print('----------------race----------------')
print("Numeric Representation : \n", f_race_AA)
print("Unique Values : \n", u_race_AA)
#factorize gender with male and female labels
f_gender, uniques_gender = pd.factorize(df_sex['sex'])
print("Numeric Representation : \n", f_gender)
print("Unique Values : \n", uniques_gender)
#factorise score text
f_score_text, u_score_text = pd.factorize(df_score['score_text'] != 'Low')
print("Numeric Representation : \n", f_score_text)
print("size of f_score_text", len(f_score_text))
print("Unique Values : \n", u_score_text)
# create a new maxtrix with the factors
priors_count = df[['priors_count']]
two_year_recid = df[['two_year_recid\r']]
X = np.column_stack(( f_age_cat, crime_factor, f_race_AA, f_race_C, f_gender, priors_count, two_year_recid ))
# build a binmal logistic regression model to explain the score text given the factors
from sklearn.linear_model import LogisticRegression
model = LogisticRegression(penalty='l2', C=1)
model.fit(X, f_score_text)
ypred = model.predict(X)
#print summary
print('intercept', model.intercept_)
#print coefficients with corresponding factors
print('coefficients', model.coef_)
print('score', model.score(X, f_score_text))
#model accuracy
from sklearn.metrics import accuracy_score
print('accuracy', accuracy_score(f_score_text, ypred))
import statsmodels.api as sm
model = sm.GLM(f_score_text, X, family=sm.families.Binomial())
results = model.fit()
print(results.summary())
| dansmith5764/A-study-of-fairness-in-transfer-learning | Compas/Parse_1.py | Parse_1.py | py | 7,675 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.shape",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "pandas.to_datetime",
"... |
38875746376 |
import torch
import torch.nn as nn
from torch.utils.cpp_extension import load
import os
import time
import random
import math
cur_path = os.path.dirname(os.path.realpath(__file__))
cpu_unsorted_segsum = load('cpu_unsorted_segsum',
[f'{cur_path}/cpu_extension.cc'],
extra_cflags=['-fopenmp', '-O3', '-march=native'],
extra_ldflags=['-lgomp', '-O3', '-march=native'],
verbose=False)
import cpu_unsorted_segsum
if torch.cuda.is_available():
cuda_unsorted_segsum = load('cuda_unsorted_segsum',
[f'{cur_path}/cuda_extension.cu'],
extra_cflags=['-fopenmp', '-O3', '-march=native'],
extra_ldflags=['-lgomp', '-O3', '-march=native'],
verbose=False)
import cuda_unsorted_segsum
else:
cuda_unsorted_segsum = None
print('CUDA not available, cuda_unsorted_segsum will not be available')
def unsorted_segment_sum_ref(
data : torch.Tensor,
indices : torch.Tensor,
num_segments : int
) -> torch.Tensor:
return torch.cat([
data[indices == i].sum(dim=0, keepdim=True)
for i in range(num_segments)
], dim=0)
class UnsortedSegmentSum(torch.autograd.Function):
@staticmethod
def forward(ctx, data : torch.Tensor, indices : torch.Tensor, num_segments : int) -> torch.Tensor:
ctx.save_for_backward(indices)
M = cuda_unsorted_segsum if data.device.type == 'cuda' else cpu_unsorted_segsum
assert M is not None, f'No backend for {data.device}'
if len(data.shape) == 2:
return M.unsorted_segment_sum_fwd(data, indices, num_segments)
else:
raise NotImplementedError()
@staticmethod
def backward(ctx, grad):
indices, = ctx.saved_tensors
M = cuda_unsorted_segsum if grad.device.type == 'cuda' else cpu_unsorted_segsum
assert M is not None, f'No backend for {grad.device}'
if len(grad.shape) == 2:
return M.unsorted_segment_sum_bwd(grad.contiguous(), indices), None, None
else:
raise NotImplementedError()
def unsorted_segment_sum(
data : torch.Tensor,
indices : torch.Tensor,
num_segments : int
) -> torch.Tensor:
return UnsortedSegmentSum.apply(data, indices, num_segments)
def unit_test_cpu():
print('==== Correctness Test CPU ====')
data = torch.randn(1000, 3, requires_grad=False)
indices = torch.randint(0, 100, (1000,), requires_grad=False)
num_segments = 100
d1 = data.clone().requires_grad_()
d2 = data.clone().requires_grad_()
ref = unsorted_segment_sum_ref(d1, indices, num_segments)
out = UnsortedSegmentSum.apply(d2, indices, num_segments)
print('(FWD) L2 = ', (ref - out).pow(2).sum().sqrt())
ref.pow(2).sum().backward()
out.pow(2).sum().backward()
print('(BWD) L2 = ', (d1.grad - d2.grad).pow(2).sum().sqrt())
def unit_test_gpu():
print('==== Correctness Test GPU ====')
data = torch.randn(1000, 3, requires_grad=False)
indices = torch.randint(0, 100, (1000,), requires_grad=False)
num_segments = 100
d1 = data.clone().requires_grad_()
d2 = data.clone().cuda().requires_grad_()
ref = unsorted_segment_sum_ref(d1, indices, num_segments)
out = UnsortedSegmentSum.apply(d2, indices.clone().cuda(), num_segments)
print('(FWD) L2 = ', (ref - out.cpu()).pow(2).sum().sqrt())
ref.pow(2).sum().backward()
out.pow(2).sum().backward()
print('(BWD) L2 = ', (d1.grad - d2.grad.cpu()).pow(2).sum().sqrt())
if __name__ == '__main__':
unit_test_cpu()
unit_test_gpu()
exit(0)
# Benchmark
t0 = time.perf_counter()
for _ in range(1000):
_ = unsorted_segment_sum_ref(data, indices, num_segments)
t1 = time.perf_counter()
print(f'Reference (Fwd): {(t1 - t0) * 1000:.2f} ms')
t0 = time.perf_counter()
for _ in range(1000):
_ = UnsortedSegmentSum.apply(data, indices, num_segments)
t1 = time.perf_counter()
print(f'Extension (Fwd): {(t1 - t0) * 1000:.2f} ms')
t0 = time.perf_counter()
for _ in range(1000):
out = unsorted_segment_sum_ref(d1, indices, num_segments)
out.pow(2).sum().backward()
t1 = time.perf_counter()
print(f'Reference (Fwd + Bwd): {(t1 - t0) * 1000:.2f} ms')
t0 = time.perf_counter()
for _ in range(1000):
out = UnsortedSegmentSum.apply(d2, indices, num_segments)
out.pow(2).sum().backward()
t1 = time.perf_counter()
print(f'Extension (Fwd + Bwd): {(t1 - t0) * 1000:.2f} ms')
| medav/meshgraphnets-torch | kernels/unsorted_segsum/kernel.py | kernel.py | py | 4,483 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "os.path.dirname",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "os.path.realpath",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "torch.utils.cpp_extensi... |
36947640459 | from __future__ import print_function
__revision__ = "src/engine/SCons/Tool/FortranCommon.py bee7caf9defd6e108fc2998a2520ddb36a967691 2019-12-17 02:07:09 bdeegan"
import re
import os.path
import SCons.Action
import SCons.Defaults
import SCons.Scanner.Fortran
import SCons.Tool
import SCons.Util
def isfortran(env, source):
"""Return 1 if any of code in source has fortran files in it, 0
otherwise."""
try:
fsuffixes = env['FORTRANSUFFIXES']
except KeyError:
# If no FORTRANSUFFIXES, no fortran tool, so there is no need to look
# for fortran sources.
return 0
if not source:
# Source might be None for unusual cases like SConf.
return 0
for s in source:
if s.sources:
ext = os.path.splitext(str(s.sources[0]))[1]
if ext in fsuffixes:
return 1
return 0
def _fortranEmitter(target, source, env):
node = source[0].rfile()
if not node.exists() and not node.is_derived():
print("Could not locate " + str(node.name))
return ([], [])
# This has to match the def_regex in the Fortran scanner
mod_regex = r"""(?i)^\s*MODULE\s+(?!PROCEDURE|SUBROUTINE|FUNCTION|PURE|ELEMENTAL)(\w+)"""
cre = re.compile(mod_regex,re.M)
# Retrieve all USE'd module names
modules = cre.findall(node.get_text_contents())
# Remove unique items from the list
modules = SCons.Util.unique(modules)
# Convert module name to a .mod filename
suffix = env.subst('$FORTRANMODSUFFIX', target=target, source=source)
moddir = env.subst('$FORTRANMODDIR', target=target, source=source)
modules = [x.lower() + suffix for x in modules]
for m in modules:
target.append(env.fs.File(m, moddir))
return (target, source)
def FortranEmitter(target, source, env):
target, source = _fortranEmitter(target, source, env)
return SCons.Defaults.StaticObjectEmitter(target, source, env)
def ShFortranEmitter(target, source, env):
target, source = _fortranEmitter(target, source, env)
return SCons.Defaults.SharedObjectEmitter(target, source, env)
def ComputeFortranSuffixes(suffixes, ppsuffixes):
"""suffixes are fortran source files, and ppsuffixes the ones to be
pre-processed. Both should be sequences, not strings."""
assert len(suffixes) > 0
s = suffixes[0]
sup = s.upper()
upper_suffixes = [_.upper() for _ in suffixes]
if SCons.Util.case_sensitive_suffixes(s, sup):
ppsuffixes.extend(upper_suffixes)
else:
suffixes.extend(upper_suffixes)
def CreateDialectActions(dialect):
"""Create dialect specific actions."""
CompAction = SCons.Action.Action('$%sCOM ' % dialect, '$%sCOMSTR' % dialect)
CompPPAction = SCons.Action.Action('$%sPPCOM ' % dialect, '$%sPPCOMSTR' % dialect)
ShCompAction = SCons.Action.Action('$SH%sCOM ' % dialect, '$SH%sCOMSTR' % dialect)
ShCompPPAction = SCons.Action.Action('$SH%sPPCOM ' % dialect, '$SH%sPPCOMSTR' % dialect)
return CompAction, CompPPAction, ShCompAction, ShCompPPAction
def DialectAddToEnv(env, dialect, suffixes, ppsuffixes, support_module = 0):
"""Add dialect specific construction variables."""
ComputeFortranSuffixes(suffixes, ppsuffixes)
fscan = SCons.Scanner.Fortran.FortranScan("%sPATH" % dialect)
for suffix in suffixes + ppsuffixes:
SCons.Tool.SourceFileScanner.add_scanner(suffix, fscan)
env.AppendUnique(FORTRANSUFFIXES = suffixes + ppsuffixes)
compaction, compppaction, shcompaction, shcompppaction = \
CreateDialectActions(dialect)
static_obj, shared_obj = SCons.Tool.createObjBuilders(env)
for suffix in suffixes:
static_obj.add_action(suffix, compaction)
shared_obj.add_action(suffix, shcompaction)
static_obj.add_emitter(suffix, FortranEmitter)
shared_obj.add_emitter(suffix, ShFortranEmitter)
for suffix in ppsuffixes:
static_obj.add_action(suffix, compppaction)
shared_obj.add_action(suffix, shcompppaction)
static_obj.add_emitter(suffix, FortranEmitter)
shared_obj.add_emitter(suffix, ShFortranEmitter)
if '%sFLAGS' % dialect not in env:
env['%sFLAGS' % dialect] = SCons.Util.CLVar('')
if 'SH%sFLAGS' % dialect not in env:
env['SH%sFLAGS' % dialect] = SCons.Util.CLVar('$%sFLAGS' % dialect)
# If a tool does not define fortran prefix/suffix for include path, use C ones
if 'INC%sPREFIX' % dialect not in env:
env['INC%sPREFIX' % dialect] = '$INCPREFIX'
if 'INC%sSUFFIX' % dialect not in env:
env['INC%sSUFFIX' % dialect] = '$INCSUFFIX'
env['_%sINCFLAGS' % dialect] = '$( ${_concat(INC%sPREFIX, %sPATH, INC%sSUFFIX, __env__, RDirs, TARGET, SOURCE)} $)' % (dialect, dialect, dialect)
if support_module == 1:
env['%sCOM' % dialect] = '$%s -o $TARGET -c $%sFLAGS $_%sINCFLAGS $_FORTRANMODFLAG $SOURCES' % (dialect, dialect, dialect)
env['%sPPCOM' % dialect] = '$%s -o $TARGET -c $%sFLAGS $CPPFLAGS $_CPPDEFFLAGS $_%sINCFLAGS $_FORTRANMODFLAG $SOURCES' % (dialect, dialect, dialect)
env['SH%sCOM' % dialect] = '$SH%s -o $TARGET -c $SH%sFLAGS $_%sINCFLAGS $_FORTRANMODFLAG $SOURCES' % (dialect, dialect, dialect)
env['SH%sPPCOM' % dialect] = '$SH%s -o $TARGET -c $SH%sFLAGS $CPPFLAGS $_CPPDEFFLAGS $_%sINCFLAGS $_FORTRANMODFLAG $SOURCES' % (dialect, dialect, dialect)
else:
env['%sCOM' % dialect] = '$%s -o $TARGET -c $%sFLAGS $_%sINCFLAGS $SOURCES' % (dialect, dialect, dialect)
env['%sPPCOM' % dialect] = '$%s -o $TARGET -c $%sFLAGS $CPPFLAGS $_CPPDEFFLAGS $_%sINCFLAGS $SOURCES' % (dialect, dialect, dialect)
env['SH%sCOM' % dialect] = '$SH%s -o $TARGET -c $SH%sFLAGS $_%sINCFLAGS $SOURCES' % (dialect, dialect, dialect)
env['SH%sPPCOM' % dialect] = '$SH%s -o $TARGET -c $SH%sFLAGS $CPPFLAGS $_CPPDEFFLAGS $_%sINCFLAGS $SOURCES' % (dialect, dialect, dialect)
def add_fortran_to_env(env):
"""Add Builders and construction variables for Fortran to an Environment."""
try:
FortranSuffixes = env['FORTRANFILESUFFIXES']
except KeyError:
FortranSuffixes = ['.f', '.for', '.ftn']
#print("Adding %s to fortran suffixes" % FortranSuffixes)
try:
FortranPPSuffixes = env['FORTRANPPFILESUFFIXES']
except KeyError:
FortranPPSuffixes = ['.fpp', '.FPP']
DialectAddToEnv(env, "FORTRAN", FortranSuffixes,
FortranPPSuffixes, support_module = 1)
env['FORTRANMODPREFIX'] = '' # like $LIBPREFIX
env['FORTRANMODSUFFIX'] = '.mod' # like $LIBSUFFIX
env['FORTRANMODDIR'] = '' # where the compiler should place .mod files
env['FORTRANMODDIRPREFIX'] = '' # some prefix to $FORTRANMODDIR - similar to $INCPREFIX
env['FORTRANMODDIRSUFFIX'] = '' # some suffix to $FORTRANMODDIR - similar to $INCSUFFIX
env['_FORTRANMODFLAG'] = '$( ${_concat(FORTRANMODDIRPREFIX, FORTRANMODDIR, FORTRANMODDIRSUFFIX, __env__, RDirs, TARGET, SOURCE)} $)'
def add_f77_to_env(env):
"""Add Builders and construction variables for f77 to an Environment."""
try:
F77Suffixes = env['F77FILESUFFIXES']
except KeyError:
F77Suffixes = ['.f77']
#print("Adding %s to f77 suffixes" % F77Suffixes)
try:
F77PPSuffixes = env['F77PPFILESUFFIXES']
except KeyError:
F77PPSuffixes = []
DialectAddToEnv(env, "F77", F77Suffixes, F77PPSuffixes)
def add_f90_to_env(env):
"""Add Builders and construction variables for f90 to an Environment."""
try:
F90Suffixes = env['F90FILESUFFIXES']
except KeyError:
F90Suffixes = ['.f90']
#print("Adding %s to f90 suffixes" % F90Suffixes)
try:
F90PPSuffixes = env['F90PPFILESUFFIXES']
except KeyError:
F90PPSuffixes = []
DialectAddToEnv(env, "F90", F90Suffixes, F90PPSuffixes,
support_module = 1)
def add_f95_to_env(env):
"""Add Builders and construction variables for f95 to an Environment."""
try:
F95Suffixes = env['F95FILESUFFIXES']
except KeyError:
F95Suffixes = ['.f95']
#print("Adding %s to f95 suffixes" % F95Suffixes)
try:
F95PPSuffixes = env['F95PPFILESUFFIXES']
except KeyError:
F95PPSuffixes = []
DialectAddToEnv(env, "F95", F95Suffixes, F95PPSuffixes,
support_module = 1)
def add_f03_to_env(env):
"""Add Builders and construction variables for f03 to an Environment."""
try:
F03Suffixes = env['F03FILESUFFIXES']
except KeyError:
F03Suffixes = ['.f03']
#print("Adding %s to f95 suffixes" % F95Suffixes)
try:
F03PPSuffixes = env['F03PPFILESUFFIXES']
except KeyError:
F03PPSuffixes = []
DialectAddToEnv(env, "F03", F03Suffixes, F03PPSuffixes,
support_module = 1)
def add_f08_to_env(env):
"""Add Builders and construction variables for f08 to an Environment."""
try:
F08Suffixes = env['F08FILESUFFIXES']
except KeyError:
F08Suffixes = ['.f08']
try:
F08PPSuffixes = env['F08PPFILESUFFIXES']
except KeyError:
F08PPSuffixes = []
DialectAddToEnv(env, "F08", F08Suffixes, F08PPSuffixes,
support_module = 1)
def add_all_to_env(env):
"""Add builders and construction variables for all supported fortran
dialects."""
add_fortran_to_env(env)
add_f77_to_env(env)
add_f90_to_env(env)
add_f95_to_env(env)
add_f03_to_env(env)
add_f08_to_env(env)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| mongodb/mongo | src/third_party/scons-3.1.2/scons-local-3.1.2/SCons/Tool/FortranCommon.py | FortranCommon.py | py | 9,651 | python | en | code | 24,670 | github-code | 36 | [
{
"api_name": "os.path.path.splitext",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "re.compile",
"lin... |
30374326803 | import numpy as np
from scipy.special import expit
def piston_action(alfa, p_linea, p_servicio):
alfa %= 2 * np.pi
return (p_servicio - p_linea) * expit((alfa - np.pi) * 20)
def build_indep(
alfa: np.float32,
beta: np.float32,
omega: np.float32,
mb: np.float32,
mp: np.float32,
R: np.float32,
Ib: np.float32,
Lg: np.float32,
L: np.float32,
D_plunger,
p_linea,
p_servicio,
) -> np.ndarray:
p_alfa = piston_action(alfa, p_linea, p_servicio)
dbdt = (R / L) * (np.cos(alfa) / np.cos(beta)) * omega
d2bdt2 = (dbdt**2 - omega**2) * np.tan(beta)
b0 = mb * Lg * (R / L) * omega**2 * np.sin(alfa)
b1 = (
-mb
* R
* omega**2
* (
np.cos(alfa)
+ (1 - Lg / L)
* (
(R / L) * (np.cos(alfa) ** 2 / np.cos(beta) ** 3)
- np.sin(alfa) * np.tan(beta)
)
)
)
b2 = Ib * d2bdt2
b3 = 0
b4 = mp * R * omega**2 * (
(np.cos(alfa + beta) / np.cos(beta))
+ (R / L) * (np.cos(alfa) ** 2 / np.cos(beta) ** 3)
) - p_alfa * (D_plunger**2 * np.pi / 4)
return np.array([b0, b1, b2, b3, b4])
def build_matrix(L: np.float32, Lg: np.float32, beta: np.float32) -> np.ndarray:
m = np.eye(5)
m[4, 4] = 0
m[4, 3] = 1
m[3, 3] = 0
m[3, 4] = -1
m[3, 2] = 1
m[0, 2] = 1
m[1, 3] = 1
vec = np.array(
[
-(L - Lg) * np.sin(beta),
(L - Lg) * np.sin(beta),
Lg * np.cos(beta),
-Lg * np.sin(beta),
0,
]
)
m[2] = vec
return m
def solve_system(
alfa,
beta,
omega,
mb: np.float32,
mp: np.float32,
R: np.float32,
Ib: np.float32,
L,
Lg,
D_plunger,
p_linea,
p_servicio,
):
A = build_matrix(L, Lg, beta)
b = build_indep(
alfa, beta, omega, mb, mp, R, Ib, Lg, L, D_plunger, p_linea, p_servicio
)
sol = np.linalg.solve(A, b) / 1000
R = np.sqrt((sol[0] + sol[2]) ** 2 + (sol[1] + sol[3]) ** 2)
theta_R = np.arctan((sol[1] + sol[3]) / (sol[0] + sol[2]))
ret_dict = {
"module_A": np.sqrt(sol[0] ** 2 + sol[1] ** 2),
"phase_A": np.arctan2(sol[1], sol[0]),
"module_B": np.sqrt(sol[2] ** 2 + sol[3] ** 2),
"phase_B": np.arctan2(sol[3], sol[2]),
"Total_mod": R,
"Total_phase": theta_R,
"solution": sol,
}
return ret_dict
def get_max(res):
modA = np.array([x["module_A"] for x in res])
phiA = np.array([x["phase_A"] for x in res])
modB = np.array([x["module_B"] for x in res])
phiB = np.array([x["phase_B"] for x in res])
XA = modA * np.cos(phiA)
YA = modA * np.sin(phiA)
XB = modB * np.cos(phiB)
YB = modB * np.sin(phiB)
Amax = np.argmax(modA)
Bmax = np.argmax(modB)
XAmax = XA[Amax]
YAmax = YA[Amax]
XBmax = XB[Bmax]
YBmax = YB[Bmax]
print(f"XAmax = {XAmax}, YAmax = {YAmax}")
print(f"XBmax = {XBmax}, YBmax = {YBmax}")
return 1, 1
| hanzy1110/paper_fem_bombas | src/solve.py | solve.py | py | 3,065 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.pi",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "scipy.special.expit",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "numpy.float32",
"lin... |
25048960763 | from Crypto.Cipher import AES # AES (all modes)
from Crypto.Util import Counter # AES CTR
from os import urandom # AES CBC or CTR
#from Crypto import Random # AES CBC or CTR
#Random.get_random_bytes(16) # AES CBC or CTR
from Crypto.Cipher import PKCS1_OAEP # RSA
from Crypto.PublicKey import RSA # RSA
#module 'time' has no attribute 'clock?
def pad16(string):
BLOCK_SIZE = 16
PADDING = '#'
if (len(string) % 16) > 0:
out = string + (BLOCK_SIZE - len(string) % BLOCK_SIZE) * PADDING
return out
else:
return string
#ALTERNATIVE PADDING out = '{s:{c}^{n}}'.format(s=string,n=BLOCK_SIZE,c='#')
def unpad16 (string):
BLOCK_SIZE = 16
PADDING = '#'
out = string.strip(PADDING)
return out
class RSA_cipher (object):
def __init__(self, k):
self.KEY_LENGTH = k
# self.KEY_LENGTH = 1024 # Minimum value, better use method set_key_length()
#def set_key_length (self, k):
# self.KEY_LENGTH = k
def generate_keypair (self):
key = RSA.generate(self.KEY_LENGTH)
pubkey = key.publickey().exportKey("DER")
privkey = key.exportKey("DER")
return (pubkey,privkey)
def encrypt (self, pub, message):
key = RSA.importKey(pub)
cipher = PKCS1_OAEP.new(key)
ciphertext = cipher.encrypt(message)
return ciphertext
def decrypt (self, priv, ciphertext):
key = RSA.importKey(priv)
cipher = PKCS1_OAEP.new(key)
message = cipher.decrypt(ciphertext)
return message
class AES_ECB (object): # USER FOR host_decrypt AND host_encrypt
def __init__(self, k):
self.KEY = pad16(k)
self.cipher = AES.new(self.KEY, AES.MODE_ECB)
def encrypt(self, s):
s = pad16(s)
return self.cipher.encrypt(s)
def decrypt(self, s):
t = self.cipher.decrypt(s)
return unpad16(t)
class AES_CBC (object):
def __init__(self, k):
self.KEY = pad16(k)
def encrypt(self, s):
iv = urandom(16)
s = pad16(s)
enc_cipher = AES.new(self.KEY, AES.MODE_CBC, iv)
return iv + enc_cipher.encrypt(s)
def decrypt(self, s):
iv = s[:16]
dec_cipher = AES.new(self.KEY, AES.MODE_CBC, iv)
t = dec_cipher.decrypt(s[16:])
return unpad16(t)
class AES_CTR (object): # KEY = 128 or 256 bit IV = 128 bit # BLOCK SIZE = 128 bit
def __init__(self, k):
self.KEY = pad16(k) # KEY 128 or 256 bit (padded)
def encrypt(self, s):
iv = urandom(16) # generate random IV (128 bit) - for every encryption
ctr = Counter.new(128, initial_value=long(iv.encode('hex'), 16)) # init counter
enc_cipher = AES.new(self.KEY, AES.MODE_CTR, counter=ctr) # init cipher
s = pad16(s) # message padding (multiple of 128 bit)
return iv + enc_cipher.encrypt(s) # minimum output 32 byte: IV (128 bit) + ENC_MESSAGE (128 bit)
def decrypt(self, s):
iv = s[:16] # get IV (first 128 bit)
ctr = Counter.new(128, initial_value=long(iv.encode('hex'), 16)) # init counter
dec_cipher = AES.new(self.KEY, AES.MODE_CTR, counter=ctr) # init cipher
t = dec_cipher.decrypt(s[16:]) # decrypt (IV is excluded)
return unpad16(t) # return unpadded message
# GOOD TEST OF RSA
####################################################################################
print ("-------------------\nRSA:\n\n")
r = RSA_cipher(4096)
pub, priv = r.generate_keypair()
#print pub
#print priv
enc = r.encrypt(pub, "test_string")
dec = r.decrypt(priv, enc)
print ("ENC: " + str(enc) ) #.encode('hex')
print ("DEC: " + str(dec))
# GOOD TEST OF AES-ECB
####################################################################################
print ("\n\n-------------------\nAES ECB:\n\n")
KEY = "AAAAABBBBBCCCCCDDDDDEEEEEFFFFFGG"
c = AES_ECB (KEY)
enc = c.encrypt("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA " * 2)
dec = c.decrypt(enc)
print ("ENC: " + str(enc)) #.encode('hex')
print ("DEC: " + str(dec))
# GOOD TEST OF AES-CBC
####################################################################################
print ("\n\n-------------------\nAES CBC:\n\n")
KEY = "AAAAABBBBBCCCCCDDDDDEEEEEFFFFFGG"
c = AES_CBC (KEY)
enc = c.encrypt("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA " * 2)
dec = c.decrypt(enc)
print ("ENC: " + str(enc)) #.encode('hex')
print ("DEC: " + str(dec))
# GOOD TEST OF AES-CTR <-- Suggested
####################################################################################
print ("\n\n-------------------\nAES CTR:\n\n")
KEY = "AAAAABBBBBCCCCCDDDDDEEEEEFFFFFGG"
c = AES_CTR (KEY)
enc = c.encrypt("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA " * 2)
dec = c.decrypt(enc)
print ("ENC: " + str(enc)) #.encode('hex')
print ("DEC: " + str(dec))
print ("DEC2: " + str( c.decrypt(enc) )) #.encode('hex') | chusanapunn/RSA_AY3T2 | A1/aeR.py | aeR.py | py | 4,658 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "Crypto.PublicKey.RSA.generate",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "Crypto.PublicKey.RSA",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "Crypto.PublicKey.RSA.importKey",
"line_number": 52,
"usage_type": "call"
},
{
... |
34376075705 | from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.contrib.auth.views import login, logout
from django.views.generic.edit import CreateView
import views
admin.autodiscover()
urlpatterns = patterns(
'',
url(r'^admin/', include(admin.site.urls)),
# url('^', include('django.contrib.auth.urls')),
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^login/$', login,
{'template_name': 'login.html'}),
url(r'^logout/$', logout,
{'template_name': 'logout.html'}),
url(r'^register/$', views.RegisterView.as_view(), name='register'),
url(r'^wall/', include('wall.urls', namespace="wall")),
)
| jonathantumulak/facebookwall | facebookwall/src/facebookwall/facebookwall/urls.py | urls.py | py | 689 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.contrib.admin.autodiscover",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.contrib.admin",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.patterns",
"line_number": 10,
"usage_type": "call"
},
{
"ap... |
43346932566 | import datetime
from discord import client, Message
import aiohttp
import random
import time
import typing
import json
import humanize
import discord
from discord.ext import commands
start_time = time.time()
intents = discord.Intents.default()
bot = commands.Bot(command_prefix="!", intents=discord.Intents.all())
ffmpeg_options = {
'before_options': '-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 5',
'options': '-vn',
'format': 'bestaudio[ext=m4a]'
}
@bot.event
async def on_guild_join(ctx, guild):
channel = guild.system_channel
if channel is not None:
await channel.send(f'Successfully joined! You can find a command list here - https://v1ss0nd.github.io/discord-help , \nYou have to create "Moderator" role to use bot moderation feature, make sure you gave it to your server moderators!')
@bot.event
async def on_message(message: Message):
if message.author == bot.user: return
user_id = message.author.id
content = message.content
time_ = datetime.datetime.now()
last_seen[user_id] = (content, time_)
last_seen_str = {k: (v[0], v[1].strftime("%Y-%m-%d %H:%M:%S")) for k, v in last_seen.items()}
with open("last_seen.json", "w") as f:
json.dump(last_seen_str, f)
await bot.process_commands(message)
@bot.event
async def on_ready():
await bot.change_presence(activity=discord.Game(name='Type "!help" to DM with me to see a list with supported commands'))
@bot.command()
async def stalk(ctx, user: discord.Member):
with open("last_seen.json", "r") as f:
last_seen = json.load(f)
user_id = str(user.id)
if user_id in last_seen:
content, time = last_seen[user_id]
time_dt = datetime.datetime.strptime(time, "%Y-%m-%d %H:%M:%S")
now = datetime.datetime.now()
delta = now - time_dt
delta_str = humanize.precisedelta(delta)
delta_str = delta_str.replace(",", "").strip() + " ago"
await ctx.reply(f"{user.mention} was last seen in chat {delta_str}, their last message: {content}")
else:
await ctx.reply(f"i havent seen any messages from {user.mention}.")
@bot.command()
async def ping(ctx):
uptime = time.time() - start_time
latency = bot.latency * 1000
hours, remainder = divmod(uptime, 3600)
minutes, seconds = divmod(remainder, 60)
await ctx.reply(f'pong! Current uptime is {int(hours)} hours {int(minutes)} minutes {int(seconds)} seconds. Latency is {round(latency)} ms')
bot.remove_command('help')
class CustomHelpCommand(commands.MinimalHelpCommand):
async def send_pages(self):
destination = self.get_destination()
embed = discord.Embed(title="https://v1ss0nd.github.io/discord-help", url="https://v1ss0nd.github.io/discord-help")
for page in self.paginator.pages:
embed.description = page
await destination.send(embed=embed)
bot.help_command = CustomHelpCommand()
@bot.command()
@commands.has_role("Moderator")
async def spam(ctx, count: int, *message):
try:
message = " ".join(message)
except ValueError:
return
for i in range(count):
await ctx.send(message)
@bot.command(description="info about provided user")
async def user(ctx, user: typing.Optional[commands.UserConverter] = None):
if user is None:
user = ctx.author
guild = ctx.guild
member = guild.get_member(user.id)
embed = discord.Embed()
embed.title = f"{user.name}#{user.discriminator}"
embed.description = f"{user.mention}"
embed.color = discord.Color.random()
embed.add_field(name="ID", value=user.id)
embed.add_field(name="Created at", value=user.created_at.strftime("%Y-%m-%d %H:%M:%S"))
if member is not None:
embed.add_field(name="Nickname", value=member.nick or "None")
embed.add_field(name="Joined at", value=member.joined_at.strftime("%Y-%m-%d %H:%M:%S"))
embed.add_field(name="Roles", value=", ".join(role.name for role in member.roles[1:]) or "None")
await ctx.reply(embed=embed)
@bot.group()
@commands.has_role("Moderator")
async def role(ctx):
if ctx.invoked_subcommand is None:
await ctx.reply("Please specify a valid subcommand: list, create, delete, give, remove, color, rename")
@role.command()
async def display(ctx, role_name: str):
guild = ctx.guild
role = discord.utils.get(guild.roles, name=role_name)
if role is None:
await ctx.reply(f"Role {role_name} not found")
return
current_hoist = role.hoist
new_hoist = not current_hoist
await role.edit(hoist=new_hoist)
await ctx.reply(f"Separate displaying of {role_name} switched to {new_hoist}.")
@role.command()
async def create(ctx, name, color: discord.Color):
await ctx.guild.create_role(name=name, color=color)
await ctx.reply(f"Created role {name}")
@role.command()
async def delete(ctx, *, name):
role = discord.utils.get(ctx.guild.roles, name=name)
if role:
await role.delete()
await ctx.reply(f"Deleted role {name}")
else:
await ctx.reply(f"Role {name} not found")
@role.command()
async def give(ctx, role: discord.Role, member: discord.Member,):
await member.add_roles(role)
await ctx.reply(f"Gave {role.name} to {member.name}")
@role.command()
async def remove(ctx, role: discord.Role, member: discord.Member, ):
await member.remove_roles(role)
await ctx.reply(f"Removed {role.name} from {member.name}")
@role.command()
async def list(ctx):
rolelist = [role.name for role in ctx.guild.roles]
roles = ", ".join(rolelist)
await ctx.reply(f"{roles}")
@role.command()
async def color(ctx, role: discord.Role, color: discord.Color):
await role.edit(color=color)
await ctx.reply(f"Changed the color of {role.name} to {color}")
@role.command()
async def rename(ctx, role: discord.Role, *, name: str):
await role.edit(name=name)
await ctx.reply(f"Changed the name of {role.mention} to {name}")
@role.command()
async def move(ctx, role_name: str, direction: str):
guild = ctx.guild
role = discord.utils.get(guild.roles, name=role_name)
if role is None:
await ctx.reply(f"Role not found.")
return
if direction not in ["top", "bottom"]:
await ctx.reply(f"Invalid direction. Please use 'top' or 'bottom'.")
return
bot_member = guild.get_member(bot.user.id)
bot_top_role = bot_member.top_role
if direction == "top":
position = bot_top_role.position - 1
else:
position = min(r.position for r in guild.roles if not r.managed) + 1
await role.edit(position=position)
await ctx.reply(f"{role_name} moved to {direction}.")
@bot.group()
@commands.has_role("Moderator")
async def member(ctx):
if ctx.invoked_subcommand is None:
await ctx.reply("Please specify a valid subcommand: mute, ban, unban, kick")
@member.command()
async def ban(ctx, member: discord.Member, *, reason=None):
await member.ban(reason=reason)
await ctx.reply(f"{member} has been banned for {reason}.")
@member.command()
async def unban(ctx, id: int):
user = await client.fetch_user(id)
await ctx.guild.unban(user)
await ctx.reply(f"{user} has been unbanned.")
@member.command()
async def kick(ctx, member: discord.Member, *, reason=None):
await member.kick(reason=reason)
await ctx.reply(f'User {member} has been kicked.')
@member.command()
@commands.has_permissions(manage_messages=True)
async def mute(ctx, member: discord.Member):
role = discord.utils.get(ctx.guild.roles, name="Muted")
guild = ctx.guild
if role not in guild.roles:
perms = discord.Permissions(send_messages=False, speak=False)
await guild.create_role(name="Muted", permissions=perms)
await member.add_roles(role)
await ctx.reply("Successfully created Muted role and assigned it to mentioned user.")
else:
await member.add_roles(role)
await ctx.reply(f"Has been muted {member}")
@bot.command()
async def join(context: commands.Context) -> discord.VoiceProtocol:
if context.author.voice is None:
return await context.reply("You are not in a voice channel.")
channel = context.author.voice.channel
client = context.voice_client
if client is None:
client = await channel.connect()
if client.is_connected() and client.channel != channel:
await client.move_to(channel)
return client
@bot.command()
async def leave(ctx):
guild = ctx.guild
if guild.voice_client is not None:
await guild.voice_client.disconnect()
await ctx.reply(f"Left from the voice channel")
else:
await ctx.reply("I am not in a voice channel.")
@bot.command()
async def play(ctx, path: str, repeat: bool = False):
vc = await join(ctx)
if path.startswith("http"):
song = pafy.new(path)
audio = song.getbestaudio()
source = discord.FFmpegPCMAudio(audio.url)
else:
source = discord.FFmpegPCMAudio(path)
vc.loop = repeat
vc.play(source)
await ctx.reply(f"Playing {path}")
@bot.command()
async def stop(ctx):
vc = ctx.voice_client
if vc and vc.is_connected():
vc.stop()
await ctx.reply("Stopped playing.")
else:
await ctx.reply("There is nothing playing.")
@bot.command()
async def playfile(context: commands.Context, repeat: bool = False) -> None:
client = await join(context)
attachment = context.message.attachments[0]
filename = await download_audio(attachment.url)
client.loop = repeat
client.play(discord.FFmpegPCMAudio(filename))
await context.reply(f"Playing __{attachment.filename.replace('_', ' ')}__")
async def download_audio(url: str) -> str:
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
content = await response.read()
extension = response.url.suffix
filename = f"audio{random.randint(1000, 9999)}{extension}"
with open(filename, "wb") as file:
file.write(content)
return filename
bot.run('TOKEN')
| v1ss0nd/vsndbot-Discord | bot.py | bot.py | py | 9,975 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "time.time",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "discord.Intents.default",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "discord.Intents",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.co... |
32194079583 | import time
import os
import sqlite3
import mysql.connector
import json
import utils.normalize_data
import git
import importlib
def process_requests():
maria_conn = None
lite_conn = None
try:
maria_cnf_FD = open('db_conf.json')
maria_cnf = json.load(maria_cnf_FD)
maria_cnf_FD.close()
if not os.path.isdir('depot'):
print('ERROR: Depot dir not found')
return
lite_conn = sqlite3.connect('data/git_tasks.db')
maria_conn = mysql.connector.connect(host=maria_cnf['host'],user=maria_cnf['user'],
password=maria_cnf['password'],database='git_info')
lite_cur = lite_conn.cursor()
# maria_cur = maria_conn.cursor()
while True:
lite_cur.execute('SELECT * FROM tasks WHERE start_date IS NULL \
ORDER BY priority,req_date')
print(lite_cur)
row = lite_cur.fetchone()
if row:
print(row)
# time.sleep(60)
lite_cur.execute('UPDATE tasks \
SET start_date = DateTime("now","localtime") \
WHERE task_id = ?', (row[0],))
lite_conn.commit()
# Extract request type
try:
request = json.loads(row[3])
task_mod = importlib.import_module('depot_manager.' + request['_C'])
result = task_mod.process(request, maria_conn, lite_conn)
except json.decoder.JSONDecodeError:
print('BAD REQUEST FORMAT: ' + row[3])
result = (True, json.dumps({'status': 'BAD REQUEST FORMAT'}))
except ModuleNotFoundError:
print('INVALID REQUEST TYPE: ' + str(request))
result = (True, json.dumps({'status': 'INVALID REQUEST TYPE'}))
except KeyError:
print('BAD REQUEST FORMAT: ' + str(request))
result = (True, json.dumps({'status': 'BAD REQUEST FORMAT'}))
lite_cur.execute('UPDATE tasks \
SET answer = ?, end_date = DateTime("now","localtime") \
WHERE task_id = ?', (result[1],row[0]))
if result[0]:
lite_cur.execute('UPDATE tasks \
SET ack_date = DateTime("now","localtime") \
WHERE task_id = ?', (row[0],))
lite_conn.commit()
time.sleep(15)
lite_conn.close()
maria_conn.close()
except sqlite3.Error as e:
print('Database Error, Exiting server')
print(e)
lite_conn = None
except mysql.connector.Error as e:
print('Database Error, Exiting server')
print(e)
maria_conn = None
except json.JSONDecodeError:
print('ERROR Reading json config file')
except KeyboardInterrupt:
pass
finally:
if lite_conn:
lite_conn.close()
if maria_conn:
maria_conn.close()
print('STOP')
| mathieu-bergeron/aquiletour2021 | dockers/git/depot_manager/task_processor.py | task_processor.py | py | 3,103 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "json.load",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "sqlite3.connect",
"line_numb... |
36335146377 | import json, sys, time, os
import requests as rq
import soundcloud as sc
id = "ql3NWDyvuRgjpzwArS8lYmm2SrVGYLDz"
scurl = "https://api-v2.soundcloud.com/"
qcliid = "?client_id=ql3NWDyvuRgjpzwArS8lYmm2SrVGYLDz"
client = sc.Client(client_id=id)
class Track:
def __init__(self, inp):
data = json.loads(resolve(inp).text)
resp = query("/tracks/" + str(data['id']))
self.resp = resp
self.content = parse(resp)
self.id = self.content['id']
self.name = self.content['title']
self.artistid = self.content['user_id']
self.artist = self.content['user']['username']
if (self.content['monetization_model'] == 'AD_SUPPORTED') or (self.content['monetization_model'] == 'BLACKBOX') or (self.content['monetization_model'] == 'NOT_APPLICABLE'):
self.downloadable = True
try:
self.mpeg = self.content['media']['transcodings'][1]['url'] + qcliid
except IndexError:
print("WIP")
self.downloadable = False
else:
self.downloadable = False
def getMpeg(self):
url = parse(rq.get(self.mpeg))['url']
return rq.get(url)
def download(self):
if self.downloadable:
resp = self.getMpeg()
name = self.name + " -- " + self.artist + ".mp3"
name = name.replace('/', '|')
name = name.replace('\\', '|')
with open(name, "wb") as mpeg:
for chunk in resp.iter_content(chunk_size=1024):
if chunk:
mpeg.write(chunk)
else:
print(self.name + " is not downloadable")
class Playlist:
def __init__(self, inp):
data = json.loads(resolve(inp).text)
try:
resp = query("/playlists/" + str(data['id']))
except KeyError:
print("There was an error. Are you sure this is a playlist? If you are, is it public?")
sys.exit()
self.resp = resp
self.content = parse(resp)
self.name = self.content['title']
self.id = self.content['id']
self.artistid = self.content['user_id']
self.artist = self.content['user']['username']
tracks = self.content['tracks']
objTracks = []
for track in tracks:
temp = Track(idToUrl(track['id']))
objTracks.append(temp)
self.tracks = objTracks
def download(self):
cwd = os.getcwd()
title = self.name + " -- " + self.artist
path = os.path.join(cwd, title)
os.mkdir(path)
os.chdir(path)
for track in self.tracks:
track.download()
os.chdir(cwd)
class User:
def __init__(self, inp):
data = json.loads(resolve(inp).text)
resp = query("/users/" + str(data['id']))
self.resp = resp
self.content = parse(resp)
self.id = self.content['id']
self.name = self.content['full_name']
self.tracks = parse(query("/users/" + str(data['id']) + "/tracks"))
self.playlists = parse(query("/users/" + str(data['id']) + "/playlists"))
self.followings = parse(query("/users/" + str(data['id']) + "/followings"))
self.followers = parse(query("/users/" + str(data['id']) + "/followers"))
self.comments = parse(query("/users/" + str(data['id']) + "/comments"))
self.webProfiles = parse(query("/users/" + str(data['id']) + "/web-profiles"))
likes = parse(query("/users/" + str(data['id']) + "/track_likes"))
likes = likes['collection']
objLikes = []
for like in likes:
temp = Track(idToUrl(like['track']['id']))
objLikes.append(temp)
self.likes = objLikes
def downloadLikes(self):
cwd = os.getcwd()
title = self.name + "'s likes"
path = os.path.join(cwd, title)
os.mkdir(path)
os.chdir(path)
for like in self.likes:
like.download()
os.chdir(cwd)
def resolve(inp):
out = ''
try:
out = client.get("/resolve", url=inp)
except rq.exceptions.HTTPError as e:
out = str(e)
url = convertApiv2(out)
resp = rq.get(url)
return resp
def convertApiv2(resp):
spliturl = resp.split('api', 1)
url = spliturl[0] + "api-v2" + spliturl[1]
return url.strip("403 Client Error: Forbidden for url: ")
def parse(resp): return json.loads(resp.text)
def query(inp):
out = ''
try:
out = client.get(inp)
except rq.exceptions.HTTPError as e:
out = str(e)
url = convertApiv2(out)
resp = rq.get(url)
return resp
def idToUrl(inp):
url = scurl + "tracks/" + str(inp) + qcliid
resp = rq.get(url)
return parse(resp)['permalink_url']
# ADD CODE HERE
| idosyncrasi/soundcloud-dl | main.py | main.py | py | 4,200 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "soundcloud.Client",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_num... |
73593036584 | from django.urls import path
from django.contrib.auth.views import LoginView, LogoutView
from .views import home_view, account_view, meal_view, order_view, report_view, sign_up_view, add_meal_view, edit_meal_view
app_name = 'restaurant'
urlpatterns = [
path('', home_view, name='home'),
path('sign-up/', sign_up_view, name='sign-up'),
path('sign-in/', LoginView.as_view(template_name='restaurant/sign_in.html'),
name='sign-in'),
path('sign-out/', LogoutView.as_view(template_name='restaurant/sign_out.html'),
name='sign-out'),
path('account/', account_view, name='account'),
path('meal/', meal_view, name='meal'),
path('meal/add/', add_meal_view, name='add-meal'),
path('meal/<int:meal_id>/edit/', edit_meal_view, name='edit-meal'),
path('order/', order_view, name='order'),
path('report/', report_view, name='report'),
]
| AmarjotSingh21/food-delivery-django | restaurant/urls.py | urls.py | py | 882 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "views.home_view",
"line_number": 8,
"usage_type": "argument"
},
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "views.sign_up_view... |
70911809705 | import os
import sys
import re
import json
import math
import argparse
import time
import subprocess
import numpy as np
import networkx as nx
import tensorflow as tf
import datetime
from operator import itemgetter
import collections
FILE_PATH = os.path.dirname(os.path.abspath(__file__))
FATHER_PATH = os.path.join(FILE_PATH, '..')
DATA_PATH = os.path.join(FATHER_PATH, 'data')
def main():
parser = argparse.ArgumentParser(formatter_class = argparse.RawTextHelpFormatter)
parser.add_argument('--input_file', type = str, required = True)
#parser.add_argument('--n', type = int, required = True)
parser.add_argument('--ratio', type = float, required = True)
parser.add_argument('--self_loop', type = str, default = "yes")
args = parser.parse_args()
args.input_file = os.path.join(DATA_PATH, args.input_file)
nw_file = os.path.join(DATA_PATH, args.input_file + "_nw.dat")
n = 0
m = 0
G_init = []
G_dynamic = {}
with open(nw_file, "r") as f:
for line in f:
line = line.strip()
if len(line) == 0:
continue
items = line.split()
if len(items) == 1:
n = int(args.ratio * float(items[0]))
if len(items) != 2:
continue;
m = max(int(items[1]), int(items[0]), m)
if int(items[1]) < n and int(items[0]) < n:
G_init.append(items)
else:
it = max(int(items[0]), int(items[1]))
if it not in G_dynamic:
G_dynamic[it] = [items]
else:
G_dynamic[it].append(items)
if args.self_loop == "yes":
for i in xrange(n):
G_init.append((str(i), str(i)))
for i in xrange(n, m + 1):
if i not in G_dynamic:
G_dynamic[i] = [(str(i), str(i))]
else:
G_dynamic[i].append((str(i), str(i)))
init_nw_file = os.path.join(DATA_PATH, args.input_file + "_" + str(n) + "_" + str(args.ratio) + "_nw_init")
dynamic_nw_file = os.path.join(DATA_PATH, args.input_file + "_" + str(n) + "_" + str(args.ratio) + "_nw_dynamic")
with open(init_nw_file, "w") as f:
f.write(str(n) + "\n")
for u, v in G_init:
f.write(str(u) + "\t" + str(v) + "\n")
tmp = [(k, G_dynamic[k]) for k in sorted(G_dynamic.keys())]
with open(dynamic_nw_file, "w") as f:
for u, s in tmp:
f.write(str(u) + "\t" + str(len(s)) + "\n")
for v, w in s:
f.write(str(v) + "\t" + str(w) + "\n")
f.write("\n")
if __name__ == "__main__":
main()
| luke28/DNE | tools/get_input.py | get_input.py | py | 2,670 | python | en | code | 8 | github-code | 36 | [
{
"api_name": "os.path.dirname",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line... |
4108004877 | from collections import deque
from sys import stdin, exit
input = stdin.readline
num, h = [int(x) for x in input().split()]
grid = [[int(x) for x in input().split()] for _ in range(num)]
visited = [[False] * num for _ in range(num)]
visited[0][0] = True
moves = [[1, 0], [0, 1], [-1, 0], [0, -1]]
queue = deque()
queue.append([0, 0])
while queue:
a, b = queue.popleft()
for x, y in moves:
x += a
y += b
if 0 <= x < num and 0 <= y < num and not visited[x][y] and abs(grid[a][b] - grid[x][y]) <= h:
queue.append([x, y])
visited[x][y] = True
if visited[num - 1][num - 1]:
print("yes")
exit()
print("no")
| AAZZAZRON/DMOJ-Solutions | dmopc13c3p3.py | dmopc13c3p3.py | py | 679 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "sys.stdin.readline",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "sys.stdin",
"line_number": 3,
"usage_type": "name"
},
{
"api_name": "collections.deque",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"lin... |
34385049617 | """ Code for computing SW distances between PDs [1]_ of point cloud summaries of activations
Notes
-----
Relevant section : Experiments with PH
Relevant library : `Persim` [2]_
References
----------
.. [1] Carrière, M.; Cuturi, M.; and Oudot, S. 2017. Sliced Wasserstein Kernel for
Persistence Diagrams. In Precup, D.; and Teh, Y. W., eds., Proceedings of the
34th International Conference on Machine Learning, volume 70 of Proceedings of
Machine Learning Research, 664–673. PMLR.
.. [2] Saul, N.; and Tralie, C. 2019. Scikit-TDA: Topological Data Analysis for Python.
"""
import pickle
import argparse
import numpy as np
from persim import sliced_wasserstein
# UPDATE THESE TO REFLECT YOUR OWN DIRECTORIES AND FILE NAMING CONVENTIONS:
# path to project directory containing all model and experiment files
projectdir = '/rcfs/projects/blaktop'
# path to experiment directory containing PH results
expdir = f'{projectdir}/resnet_cifar_experiments'
# directory prefix and filename suffix for PH results per model/batch
# e.g., path to PH results for model i on batch b is expdir/prefix_{i}/persistence_batch{b}filesuffix.p
prefix = 'resnet18_cifar_large'
filesuffix = '_1000'
# number of randomly initialized models (used in 'cross' mode, see below)
num_models = 100
def get_layers(PH):
""" Returns layers from PH dict keys in the correct order
Note
----
Specifically designed for the module names defined in `cifar_resnet.resnet18`
"""
# use key so that conv1 is first (before all block_seq)
return sorted(PH, key = lambda x : x.replace('conv',''))
def SW_dist_internal(PH, layers):
""" Computes SW distance between layers of a model """
nlayers = len(layers)
dist = np.zeros((nlayers, nlayers))
for i, layer_i in enumerate(layers[:-1]):
for j, layer_j in enumerate(layers[i+1:], start=i+1):
dist[i][j] = sliced_wasserstein(PH[layer_i]['dgms'][1], PH[layer_j]['dgms'][1])
dist[j][i] = dist[i][j]
return dist
def SW_dist_cross_model(PH_i, PH_j, layers):
""" Computes SW distances between layers for two differently initialized models """
nlayers = len(layers)
dist = np.zeros((nlayers, nlayers))
for i, layer_i in enumerate(layers):
for j, layer_j in enumerate(layers):
dist[i][j] = sliced_wasserstein(PH_i[layer_i]['dgms'][1], PH_j[layer_j]['dgms'][1])
return dist
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='SW Distances between PDs of Point Cloud Summaries')
parser.add_argument('-fs', type=int, help='run index of first random seed ResNet-18 model', default=0)
parser.add_argument('-ls', type=int, help='run index of last random seed ResNet-18 model (exclusive)', default=1)
parser.add_argument('-fb', type=int, help='index of first batch', default=0)
parser.add_argument('-lb', type=int, help='index of last batch (exclusive)', default=1)
parser.add_argument('-m', type=str, help="mode: either 'int' for single model internal distances, or 'cross' for distances between differently initialized models", default='int')
args = parser.parse_args()
for b in range(args.fb, args.lb):
filename = f'persistence_batch{b}{filesuffix}.p'
for i in range(args.fs, args.ls):
savepath = f'{expdir}/{prefix}_{i}'
PH = pickle.load(open(f'{savepath}/{filename}','rb'))
layers = get_layers(PH)
if args.m == 'int':
dist = SW_dist_internal(PH, layers)
else:
dist = []
for j in range(num_models):
if j != i:
otherpath = f'{expdir}/{prefix}_{j}'
PH_other = pickle.load(open(f'{otherpath}/{filename}','rb'))
dist_other = SW_dist_cross_model(PH, PH_other, layers)
else:
dist_other = SW_dist_internal(PH,layers)
dist.append(dist_other)
dist = np.concatenate(dist, axis=1)
np.save(f'{savepath}/sliced_wasserstein_batch{b}{filesuffix}_{args.m}', dist)
| pnnl/DeepDataProfiler | papers_with_code/ExperimentalObservations/AAAI-code-PH/SW_distances.py | SW_distances.py | py | 4,209 | python | en | code | 20 | github-code | 36 | [
{
"api_name": "numpy.zeros",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "persim.sliced_wasserstein",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "persim.sliced_wasse... |
18760515421 | import numpy as np
import cv2
import screeninfo
import oa_ls
def init_proj(window_name, screen_id):
screen = screeninfo.get_monitors()[screen_id]
width, height = screen.width, screen.height
cv2.moveWindow(window_name, screen.x -1, screen.y-1)
cv2.setWindowProperty(window_name, cv2.WND_PROP_FULLSCREEN,
cv2.WINDOW_FULLSCREEN)
return width, height
def show_laser_line():
cv2.namedWindow("window", cv2.WND_PROP_FULLSCREEN)
width, height = init_proj("window", 1)
img_ls = create_laser_scan_line_speckle((0,0,255), 1, width, height, 3)
cv2.imshow("window", img_ls)
cv2.waitKey(0)
if __name__ == '__main__':
show_laser_line()
| olaals/multivision-depr | multivision/oa_realapi.py | oa_realapi.py | py | 699 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "screeninfo.get_monitors",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "cv2.moveWindow",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "cv2.setWindowProperty",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "cv2.WND_P... |
31701293102 |
import os
import importlib
from collections import OrderedDict
from .utils import (SettingsLoader, ProjectSettings, ThemeSettings,
ShareData, PathResolver, SysPathContextManager)
from .protocol import PluginRegister
from .sequence_analyze import SequenceParser
class SettingsProcedure:
@classmethod
def _load_share_data(cls, loaders):
ShareData.load_data(loaders)
@classmethod
def _load_project_settings(cls, path):
project_settings_loader = SettingsLoader(path)
ProjectSettings.load_data(project_settings_loader)
cls._load_share_data(project_settings_loader)
@classmethod
def _load_theme_settings(cls, path, name):
theme_settings_loader = SettingsLoader(path, name)
ThemeSettings.load_data(theme_settings_loader)
cls._load_share_data(theme_settings_loader)
@classmethod
def _load_settings(cls):
pr = PathResolver
# set up ProjectSettings
project_settings_path = pr.project_settings()
cls._load_project_settings(project_settings_path)
# set up ThemeSettings
theme_settings_set = []
for theme_name in ProjectSettings.get_registered_theme_name():
theme_settings_path = pr.theme_settings(theme_name)
cls._load_theme_settings(theme_settings_path, theme_name)
@classmethod
def _load_themes(cls):
pr = PathResolver
theme_dir = pr.themes()
for theme_name in ProjectSettings.get_registered_theme_name():
with SysPathContextManager(theme_name, theme_dir):
importlib.import_module(theme_name)
@classmethod
def run(cls, project_path=None):
# project_path is None means the path has already been set.
if project_path:
PathResolver.set_project_path(project_path)
cls._load_settings()
cls._load_themes()
class PluginProcedure:
runtime_components = ['pre_load', 'in_load', 'post_load',
'pre_process', 'in_process', 'post_process',
'pre_write', 'in_write', 'post_write']
extended_procedure = ['cli_extend']
@classmethod
def _get_plain_text(cls, theme_name, field_name):
search_key = '{}.{}'.format(theme_name, field_name)
plain_text = ThemeSettings.get(search_key)
return plain_text
@classmethod
def _get_execution_orders(cls):
error_happend = False
exec_orders = OrderedDict()
# In this function, exec_orders contains both default and extended
# procedures.
for component in (cls.runtime_components + cls.extended_procedure):
parser = SequenceParser()
for theme_name in ProjectSettings.get_registered_theme_name():
plain_text = cls._get_plain_text(theme_name, component)
if plain_text is None:
continue
# analyze
parser.analyze(theme_name, plain_text)
if parser.error:
parser.report_error()
error_happend = True
else:
exec_orders[component] = parser.generate_sequence()
return error_happend, exec_orders
@classmethod
def _linearize_exec_orders(cls, exec_orders):
# extract cli_indices.
extract_field = cls.extended_procedure[0]
cli_indices = exec_orders[extract_field]
del exec_orders[extract_field]
# generate plugin calling sequence.
flat_orders = []
for container in exec_orders.values():
flat_orders.extend(container)
return flat_orders, cli_indices
@classmethod
def _verify_plugins(cls, flat_orders):
for plugin_index in flat_orders:
plugin = PluginRegister.get_plugin(plugin_index)
if plugin is None:
# can not find such plugin
print('Can Not Find {}'.format(plugin_index))
return True
return False
@classmethod
def run(cls):
parse_error, exec_orders = cls._get_execution_orders()
flat_order, cli_indices = cls._linearize_exec_orders(exec_orders)
match_error = cls._verify_plugins(flat_order + cli_indices)
if parse_error or match_error:
raise SyntaxError('Error happended, suspend program.')
return flat_order, cli_indices
| huntzhan/GeekCMS | geekcms/loadup.py | loadup.py | py | 4,407 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "utils.ShareData.load_data",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "utils.ShareData",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "utils.SettingsLoader",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "utils.... |
33540908783 | import logging
from typing import List
import funppy
def sum(*args):
result = 0
for arg in args:
result += arg
return result
def sum_ints(*args: List[int]) -> int:
result = 0
for arg in args:
result += arg
return result
def sum_two_int(a: int, b: int) -> int:
return a + b
def sum_two_string(a: str, b: str) -> str:
return a + b
def sum_strings(*args: List[str]) -> str:
result = ""
for arg in args:
result += arg
return result
def concatenate(*args: List[str]) -> str:
result = ""
for arg in args:
result += str(arg)
return result
def setup_hook_example(name):
logging.warning("setup_hook_example")
return f"setup_hook_example: {name}"
def teardown_hook_example(name):
logging.warning("teardown_hook_example")
return f"teardown_hook_example: {name}"
if __name__ == '__main__':
funppy.register("sum", sum)
funppy.register("sum_ints", sum_ints)
funppy.register("concatenate", concatenate)
funppy.register("sum_two_int", sum_two_int)
funppy.register("sum_two_string", sum_two_string)
funppy.register("sum_strings", sum_strings)
funppy.register("setup_hook_example", setup_hook_example)
funppy.register("teardown_hook_example", teardown_hook_example)
funppy.serve()
| httprunner/hrp | examples/debugtalk.py | debugtalk.py | py | 1,315 | python | en | code | 83 | github-code | 36 | [
{
"api_name": "typing.List",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "logging.warning",
"line_numbe... |
5600126262 | import boto3
import json
import logging
import os
from base64 import b64decode
from urllib.request import Request, urlopen
from urllib.error import URLError, HTTPError
SLACK_CHANNEL = '#general'
HOOK_URL = 'https://hooks.slack.com/services/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'
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def main(event, context):
logger.info("Event: " + str(event))
message = json.loads(event['Records'][0]['Sns']['Message'])
logger.info("Message: " + str(message))
#alarm_name = message['AlarmName']
#old_state = message['OldStateValue']
#new_state = message['NewStateValue']
#reason = message['NewStateReason']
slack_message = {
'channel': SLACK_CHANNEL,
'text': "New error message: %s" % (str(message))
}
req = Request(HOOK_URL, json.dumps(slack_message).encode('utf-8'))
try:
response = urlopen(req)
response.read()
logger.info("Message posted to %s", slack_message['channel'])
except HTTPError as e:
logger.error("Request failed: %d %s", e.code, e.reason)
except URLError as e:
logger.error("Server connection failed: %s", e.reason)
| ManuelGago/VodChallenge | vodchallenge/error_to_slack.py | error_to_slack.py | py | 1,195 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "json.loads",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "urllib.request.Request... |
17237472456 | #키와 몸무게로 성별 classification
import numpy as np
import matplotlib.pyplot as plt
from sklearn import neighbors, datasets
from sklearn import svm
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
file=open('gender_dataset.txt')
gender=[]
height=[]
weight=[]
print("▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆\nCROSS VALIDATION\n▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆▆")
for line in file.readlines():
line = line.replace('\n', '')
g,h,w = line.split('\t')
gender.append(str(g))
height.append(float(h))
weight.append(float(w))
# print(gender)
# print(height)
# print(weight)
X=[]
for i in range(len(gender)):
X.append([height[i],weight[i]])
y=gender
# print(X)
# print(y)
# plt.scatter(X[:,0], X[:,1], c=y, s=30, cmap=plt.cm.Paired)
k_fold=int(input("cross validation할 k_fold값: "))
new_X=[[] for i in range(k_fold)]
new_y=[[] for i in range(k_fold)]
#male
male_count=0
group=0
for i in range(len(gender)):
if(y[i]=="Male"):
male_count+=1
new_X[group].append(X[i])
new_y[group].append(y[i])
if(male_count==int(len(gender)/2/k_fold)):
male_count=0
group+=1
#female
female_count=0
fgroup=0
for i in range(len(gender)):
if(y[i]=="Female"):
female_count+=1
new_X[fgroup].append(X[i])
new_y[fgroup].append(y[i])
if(female_count==int(len(gender)/2/k_fold)):
female_count=0
fgroup+=1
# print(len(new_X[0]))
total_percentage=0
models = input("모델의 종류를 입력해주세요(linear,poly,rbf,sigmoid,precomputed,lda,knn): ")
if models=="knn":
neigh = int(input("n_neighbors 값을 입력하세요: "))
for test_group in range(k_fold):
# if(test_group!=0):continue
train_X=[]
train_y=[]
test_X=[]
test_y=[]
for target_group in range(k_fold):
if(target_group==test_group):
test_X=new_X[target_group]
test_y=new_y[target_group]
elif(target_group!=test_group):
train_X=train_X+new_X[target_group]
train_y=train_y+new_y[target_group]
# print(str("test group: ")+str(test_group))
# print(len(test_X))
# print(len(train_X)
if models=="linear":
clf = svm.SVC(kernel="linear") #svm_linear
elif models=="poly":
clf = svm.SVC(kernel="poly") #svm_poly
elif models=="rbf":
clf = svm.SVC(kernel="rbf") #svm_poly
elif models=="sigmoid":
clf = svm.SVC(kernel="sigmoid") #svm_poly
elif models=="precomputed":
clf = svm.SVC(kernel="precomputed") #svm_poly
elif models=="lda":
clf = LinearDiscriminantAnalysis(n_components=1) #lda
elif models=="knn":
#knn start
n_neighbors = neigh
for weights in ['uniform', 'distance']:
# we create an instance of Neighbours Classifier and fit the data.
clf = neighbors.KNeighborsClassifier(n_neighbors, weights=weights)
clf.fit(X, y)
#knn end
else:
models = input("오류 발생. 다시 시도해주세요.")
break
clf.fit(train_X, train_y)
model_answer=clf.predict(test_X)
total_count=0
correct_count=0
for i in range(len(model_answer)):
total_count+=1
if(model_answer[i]==test_y[i]):
correct_count+=1
percentage=correct_count/total_count*100
total_percentage+=percentage
print("테스트 그룹: "+str(test_group+1))
print("정확도: "+str(percentage)+"% ("+str(correct_count)+"/"+str(total_count)+")\n")
total_percentage/=k_fold
print("----------------------------------------")
print("모델 종류: "+str(models.upper()))
print("cross validation 전체 정확도: "+str(total_percentage)+"%")
print("----------------------------------------\n<키와 몸무게로 성별 예측>")
# ax = plt.gca()
# xlim = ax.get_xlim()
# ylim = ax.get_ylim()
# xx = np.linspace(xlim[0], xlim[1], 30)
# yy = np.linspace(ylim[0], ylim[1], 30)
# YY, XX = np.meshgrid(yy, xx)
# xy = np.vstack([XX.ravel(), YY.ravel()]).T
# Z = clf.decision_function(xy).reshape(XX.shape)
# ax.contour(XX, YY, Z, colors='k', levels=[-1,0,1], alpha=0.5, linestyles=['--', '-', '--'])
# ax.scatter(clf.support_vectors_[:,0], clf.support_vectors_[:,1], s=60, facecolors='r')
# plt.show()
while True:
a,b = map(float,input("키와 몸무게 값을 공백 한 칸을 두고 입력해주세요: ").split(" "))
newdata = [[a,b]]
print(clf.predict(newdata))
| hoony6134/kaist | gender.py | gender.py | py | 4,612 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sklearn.svm.SVC",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "sklearn.svm",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "sklearn.svm.SVC",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "sklearn.svm",
"line_n... |
23124927437 | from django.conf.urls import patterns, include, url
from rest_framework.routers import DefaultRouter
from blog import views
router = DefaultRouter()
router.register(r'post', views.PostViewSet)
router.register(r'category', views.CategoryViewSet)
urlpatterns = patterns('',
url(r'^', include(router.urls)),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
)
| PolarIt/myblog | myblog/urls.py | urls.py | py | 398 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "rest_framework.routers.DefaultRouter",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "blog.views.PostViewSet",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "blog.views",
"line_number": 6,
"usage_type": "name"
},
{
"api_name... |
6418206467 | import logging
from datetime import datetime
from flask import request
from flask_mail import Mail, Message
from service.schemas import MessageSchema, EmailSchema
from service.models import tr_messages, tm_emails
from service import db, app
from service.tasks.task_email import email_users
def saveMessage(data):
try:
# Convert timestamp string to general datetime format
data['timestamp'] = convertToDateTime(data['timestamp'])
# Timestamp should be greater than now
if data['timestamp'] > datetime.now():
# Get delta time in seconds
delaySeconds = getSecondsDifference(data['timestamp'])
# Save message to database
message = tr_messages.Messages(**data)
db.session.add(message)
arEmails = list()
emails = getEmails()
for email in emails:
arEmails.append(email['email'])
print(arEmails, data["email_subject"], data["email_content"])
# Call email task asynchronously
# ARGS = Email addreses, subject, content
# Countdown = delta time in seconds
email_users.apply_async(args=[arEmails, data["email_subject"], data["email_content"]], countdown=delaySeconds)
# Commit db transaction
return db.session.commit()
else:
return 'Check your datetime'
except Exception as e:
logging.exception(e)
return 'Please check your request'
def getEmails():
emails = tm_emails.Emails.query.all()
return EmailSchema.all_email_schema.dump(emails).data
def getMessageAtTimestamp():
timestamp = datetime.now().strftime('%Y-%m-%d %H:%M')
messages = tr_messages.Messages.query.all()
return MessageSchema.all_message_schema.dump(messages).data
def getSecondsDifference(dt):
dtDelta = (dt - datetime.now()).total_seconds()
return int(dtDelta)
# Send email function with SMTP
def sendEmail(email_addresses, subject, message):
print(email_addresses, subject, message)
try:
mail = Mail(app)
msg = Message(subject, sender=(app.config['MAIL_SENDER'], app.config['MAIL_USERNAME']), recipients=email_addresses)
msg.body = message
response = mail.send(msg)
logging.info(response)
return 'success'
except Exception as e:
logging.exception("Email error")
return 'failed'
# return request.post(
# "https://api.mailgun.net/v3/{}/messages".format(app.config['MAIL_DOMAIN']),
# auth=("api", app.config['MAIL_API_KEY']),
# data={"from": "{}} <mailgun@{}}>".format(app.config['MAIL_SENDER'], app.config['MAIL_DOMAIN']),
# "to": [email_address],
# "subject": subject,
# "text": message}
# )
def convertToDateTime(str):
return datetime.strptime(str, '%d %b %Y %H:%M')
| robinraintama/flask_email | service/controllers/MessageController.py | MessageController.py | py | 3,148 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.datetime.now",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "service.models.tr_messages.Messages",
"line_number": 22,
"usage_type": "call"
},
{
"api_n... |
7424990082 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Harmonia's Kärcher
# Fixes surnäme items in ä mässive wäy.
import json
from SPARQLWrapper import SPARQLWrapper, JSON
import requests
from bs4 import BeautifulSoup
def getWDcontent(item):
sparql.setQuery("""
SELECT DISTINCT ?lang ?label ?description WHERE {{
{{
SELECT ?lang ?label WHERE {{
wd:{0} rdfs:label ?label .
BIND(LANG(?label) AS ?lang) .
}}
}} UNION {{
SELECT ?lang ?description WHERE {{
wd:{0} schema:description ?description .
BIND(LANG(?description) AS ?lang) .
}}
}}
}} ORDER BY ?lang
""".format(item)) # Sample query: http://tinyurl.com/hj4z2hu
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
results = results["results"]["bindings"]
label_langs = {}
descriptions = []
for res in results:
for k, v in res.items():
if k == "label":
lang = v['xml:lang']
if lang not in label_langs:
label = v['value']
label_langs[lang] = label
if lang not in all_labels_languages:
all_labels_languages.append(lang)
elif k == "description":
lang = v['xml:lang']
descriptions.append(lang)
print(' - Labels found in {} language(s)'.format(len(label_langs)))
print(' - Descriptions found in {} language(s)'.format(len(descriptions)))
return label_langs, descriptions
# Global variables
all_labels_languages = []
all_items = []
# Languages and descriptions
with open("resources/surname.json") as file:
surname_descriptions = json.load(file)
file.close()
out = ""
all_langs = ['af', 'an', 'ast', 'bar', 'bm', 'br', 'ca', 'co', 'cs', 'cy',
'da', 'de', 'de-at', 'de-ch', 'en', 'en-ca', 'en-gb', 'eo', 'es',
'et', 'eu', 'fi', 'fr', 'frc', 'frp', 'fur', 'ga', 'gd', 'gl',
'gsw', 'hr', 'hu', 'ia', 'id', 'ie', 'io', 'it', 'jam', 'kab',
'kg', 'lb', 'li', 'lij', 'lt', 'lv', 'mg', 'min', 'ms', 'nap',
'nb', 'nds', 'nds-nl', 'nl', 'nn', 'nrm', 'oc', 'pap', 'pcd',
'pl', 'pms', 'prg', 'pt', 'pt-br', 'rgn', 'rm', 'ro', 'sc', 'scn',
'sco', 'sk', 'sr-el', 'sv', 'sw', 'tr', 'vec', 'vi', 'vls', 'vmf',
'vo', 'wa', 'wo', 'zu', 'fo', 'is', 'kl']
# endpoint
endpoint = "https://query.wikidata.org/bigdata/namespace/wdq/sparql"
sparql = SPARQLWrapper(endpoint)
# Main query
rest_base = "https://www.wikidata.org/api/rest_v1/"
rest_request = "page/html/User%3AHarmonia_Amanda%2FNames"
response = requests.get(rest_base + rest_request)
soup = BeautifulSoup(response.text, "lxml")
all_items = soup.p.text.split()
for item in all_items:
print('\nParsing item {}'.format(item))
labels, descriptions = getWDcontent(item)
label = labels['en']
out += "{}\tAen\t{} (surname)\n".format(item, label)
# We fix descriptions first to avoid conflicts
for lang, description in surname_descriptions.items():
out += "{}\tD{}\t{}\n".format(item, lang, description)
# Force empty descriptions for languages not in the previous list
for lang in descriptions:
if lang not in surname_descriptions.keys():
out += "{}\tD{}\t\"\"\n".format(item, lang)
print(labels, descriptions, label)
for lang in all_langs:
out += "{}\tL{}\t{}\n".format(item, lang, label)
out += "\n"
f = open('temp-qs.txt', 'w')
f.write(out)
f.close()
f = open('temp-ps.txt', 'w')
f.write(('\n').join(all_items))
f.close()
qs_url = "https://tools.wmflabs.org/wikidata-todo/quick_statements.php"
ps_url = "https://petscan.wmflabs.org/#tab_other_sources"
print("\n=============")
print("Operation complete! {} items parsed.".format(len(all_items)))
print("- Please paste the content of temp-qs.txt to {}".format(qs_url))
ps_txt = "- Please paste the content of temp-ps.txt to {} ".format(ps_url)
ps_txt += "and run the command '-P31:Q4167410'"
print(ps_txt)
print("Note: during the execution of the script,")
print(" labels were found in the following languages:")
print(', '.join(all_labels_languages))
| Ash-Crow/scrapers | harmonias-karcher.py | harmonias-karcher.py | py | 4,238 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "SPARQLWrapper.JSON",
"line_number": 29,
"usage_type": "argument"
},
{
"api_name": "json.load",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "SPARQLWrapper.SPARQLWrapper",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "requests... |
15134640368 | import codecs
import logging
import pandas as pd
from ekorpkit import eKonf
log = logging.getLogger(__name__)
class BaseInfo:
Keys = eKonf.Keys
def __init__(self, **args):
self.args = eKonf.to_config(args)
self._initialized = False
def __str__(self):
classname = self.__class__.__name__
s = f"{classname} :\n{self.INFO}"
return s
def init_info(self, data):
if self._initialized:
return data
if isinstance(data, pd.DataFrame):
log.info(
f"index: {self.INDEX}, index of data: {data.index.name}, columns: {list(data.columns)}, id: {self.IDs}"
)
if data.index.name is None:
data.index.name = self.INDEX
elif self.INDEX is None:
self.INDEX = data.index.name
elif self.INDEX != data.index.name and self.INDEX in data.columns:
data = self.set_index(data, self.INDEX)
elif self.INDEX != data.index.name and self.INDEX not in data.columns:
log.warning(f"{self.INDEX} not in dataframe")
if not self.IDs or self.IDs[0] == self.Keys.INDEX.value:
self.IDs = [self.INDEX]
self.set_dtypes(data)
self._initialized = True
return data
def set_index(self, data, name):
if isinstance(data, pd.DataFrame):
if name in data.columns:
data.set_index(name, inplace=True)
self.INDEX = name
else:
log.warning(f"{name} not in dataframe")
return data
def reset_index(
self,
data,
rename_old_index=None,
drop=False,
):
if isinstance(data, pd.DataFrame):
if self.INDEX in data.columns:
data.drop(self.INDEX, axis=1, inplace=True)
data = data.reset_index(drop=drop)
if not drop and rename_old_index is not None and self.INDEX in data.columns:
data = data.rename(columns={self.INDEX: rename_old_index})
self.INDEX = self.Keys.INDEX.value
self.set_dtypes(data)
return data
def reset_id(self, data):
if isinstance(data, pd.DataFrame):
data.rename(columns={self.ID: self._ID}, inplace=True)
data = self.reset_index(data, rename_old_index=self.ID)
return data
def combine_ids(self, data):
if self.IDs is None:
return data
if isinstance(data, pd.DataFrame):
if len(self.IDS) > 1:
data[self.ID] = data[self.IDs].apply(
lambda row: self.ID_SEPARATOR.join(row.values.astype(str)),
axis=1,
)
return data
def common_columns(self, dataframes):
"""
Find common columns between dataframes
"""
if not isinstance(dataframes, list):
dataframes = [dataframes]
common_columns = list(set.intersection(*(set(df.columns) for df in dataframes)))
df = dataframes[0][common_columns].copy()
dtypes = df.dtypes.apply(lambda x: x.name).to_dict()
self.DATATYPEs = dtypes
return common_columns
def to_datetime(self, data):
if self.DATETIME_INFO is None:
return data
_columns = eKonf.ensure_list(self.DATETIME_INFO.get(eKonf.Keys.COLUMNS))
_format = self.DATETIME_INFO.get(eKonf.Keys.FORMAT, None)
rcParams = self.DATETIME_INFO.get(eKonf.Keys.rcPARAMS) or {}
if _columns is None:
log.info("No datetime column found")
return data
if isinstance(data, pd.DataFrame):
for _col in _columns:
if _col in data.columns:
data[_col] = pd.to_datetime(data[_col], format=_format, **rcParams)
log.info(f"converted datetime column {_col}")
return data
def append_id(self, _id):
log.info(f"Adding id [{_id}] to {self.IDs}")
if self.IDs is None:
self.IDs = [_id]
else:
if isinstance(self.IDs, str):
self.IDs = [self.IDs]
self.IDs += [_id]
log.info(f"Added id [{_id}], now {self.IDs}")
def append_dataset(self, data, _dataset):
if _dataset is None:
return data
if isinstance(data, pd.DataFrame):
data[self.DATASET] = _dataset
if self.DATASET not in self.IDs:
self.append_id(self.DATASET)
if self.DATA and self.DATASET not in self.DATA:
self.DATATYPEs[self.DATASET] = "str"
log.info(f"Added a column [{self.DATASET}] with value [{_dataset}]")
return data
def append_split(self, data, _split):
if _split is None:
return data
if isinstance(data, pd.DataFrame):
data[self.SPLIT] = _split
if self.SPLIT not in self.IDs:
self.append_id(self.SPLIT)
if self.DATA and self.SPLIT not in self.DATA:
self.DATATYPEs[self.SPLIT] = "str"
log.info(f"Added a column [{self.SPLIT}] with value [{_split}]")
return data
def set_dtypes(self, data):
if isinstance(data, pd.DataFrame):
dtypes = data.dtypes.apply(lambda x: x.name).to_dict()
self.DATATYPEs = dtypes
return data
@property
def _ID(self):
return self.Keys._ID.value
@property
def ID_SEPARATOR(self):
return eKonf.Defaults.ID_SEP.value
@property
def INFO(self):
return self.args
@property
def DATETIME_INFO(self):
return self.INFO.get(self.Keys.DATETIME)
@DATETIME_INFO.setter
def DATETIME_INFO(self, value):
self.INFO[eKonf.Keys.DATETIME.value] = value
@property
def DATATYPEs(self):
return self.INFO.get(eKonf.Keys.DATA)
@DATATYPEs.setter
def DATATYPEs(self, value):
self.INFO[eKonf.Keys.DATA.value] = value
@property
def COLUMNs(self):
return self.INFO.get(eKonf.Keys.COLUMNS) or {}
@COLUMNs.setter
def COLUMNs(self, value):
self.INFO[eKonf.Keys.COLUMNS.value] = value
@property
def DATA(self):
if self.DATATYPEs is None:
return None
return list(self.DATATYPEs.keys())
@property
def DATASET(self):
return eKonf.Keys.DATASET.value
@property
def INDEX(self):
return self.COLUMNs.get(eKonf.Keys.INDEX) or eKonf.Keys.INDEX.value
@INDEX.setter
def INDEX(self, value):
self.COLUMNs[eKonf.Keys.INDEX.value] = value
@property
def ID(self):
return eKonf.Keys.ID.value
@property
def IDs(self):
return eKonf.ensure_list(self.COLUMNs.get(self.ID))
@IDs.setter
def IDs(self, value):
self.COLUMNs[self.ID] = value
@property
def SPLIT(self):
return eKonf.Keys.SPLIT.value
class CorpusInfo(BaseInfo):
def __init__(self, **args):
super().__init__(**args)
def to_timestamp(self, data, metadata=None):
if self.TIMESTAMP_INFO is None:
return data, metadata
_key = self.TIMESTAMP_INFO.get(eKonf.Keys.KEY)
_format = self.TIMESTAMP_INFO.get(eKonf.Keys.FORMAT)
rcParams = self.TIMESTAMP_INFO.get(eKonf.Keys.rcPARAMS) or {}
if _key is None:
log.info("No timestamp key found")
return data, metadata
if isinstance(data, pd.DataFrame):
if _key in data.columns:
data[self.TIMESTAMP] = pd.to_datetime(
data[_key], format=_format, **rcParams
)
log.info(f"Loaded timestamp column {self.TIMESTAMP}")
elif metadata is not None and _key in metadata.columns:
metadata[self.TIMESTAMP] = pd.to_datetime(
metadata[_key], format=_format, **rcParams
)
df_dt = metadata[self.MERGE_META_ON + [self.TIMESTAMP]].copy()
data = data.merge(df_dt, on=self.MERGE_META_ON, how="left")
# metadata.drop(self.TIMESTAMP, axis=1, inplace=True)
log.info(f"Timestamp column {self.TIMESTAMP} added to data")
return data, metadata
def combine_texts(self, data):
if self.TEXTs is None:
return data
if isinstance(data, pd.DataFrame):
data[self.TEXTs] = data[self.TEXTs].fillna("")
if len(self.TEXTs) > 1:
data[self.TEXT] = data[self.TEXTs].apply(
lambda row: self.SEGMENT_SEP.join(row.values.astype(str)),
axis=1,
)
self.DATATYPEs = {
k: v for k, v in self.DATATYPEs.items() if k not in self.TEXTs
}
self.DATATYPEs[self.TEXT] = "str"
return data
def merge_metadata(self, data, metadata):
if metadata is None:
return data
meta_cols = [col for col in metadata.columns if col not in data.columns]
meta_cols += self.MERGE_META_ON
data = data.merge(metadata[meta_cols], on=self.MERGE_META_ON, how="left")
return data
def append_split_to_meta(self, metadata, _split):
if _split is None:
return metadata
if isinstance(metadata, pd.DataFrame):
metadata[self.SPLIT] = _split
if self.METADATA and self.SPLIT not in self.METADATA:
self.METATYPEs[self.SPLIT] = "str"
log.info(f"Added a column [{self.SPLIT}] with value [{_split}]")
return metadata
def append_corpus(self, data, _corpus):
if _corpus is None:
return data
if isinstance(data, pd.DataFrame):
data[self.CORPUS] = _corpus
if self.CORPUS not in self.IDs:
self.append_id(self.CORPUS)
if self.DATA and self.CORPUS not in self.DATA:
self.DATATYPEs[self.CORPUS] = "str"
if self.METADATA and self.CORPUS not in self.METADATA:
self.METATYPEs[self.CORPUS] = "str"
log.info(f"Added a column [{self.CORPUS}] with value [{_corpus}]")
return data
@property
def MERGE_META_ON(self):
return eKonf.ensure_list(self.COLUMNs.get(eKonf.Keys.META_MERGE_ON)) or self.IDs
@MERGE_META_ON.setter
def MERGE_META_ON(self, value):
self.COLUMNs[eKonf.Keys.META_MERGE_ON.value] = value
@property
def TEXT(self):
return eKonf.Keys.TEXT.value
@property
def TEXTs(self):
return eKonf.ensure_list(self.COLUMNs.get(self.TEXT))
@TEXTs.setter
def TEXTs(self, value):
self.COLUMNs[self.TEXT] = value
@property
def METADATA(self):
if self.METATYPEs is None:
return None
return list(self.METATYPEs.keys())
@property
def TIMESTAMP(self):
return eKonf.Keys.TIMESTAMP.value
@property
def CORPUS(self):
return eKonf.Keys.CORPUS.value
@property
def METATYPEs(self):
return self.INFO.get(eKonf.Keys.META)
@METATYPEs.setter
def METATYPEs(self, value):
self.INFO[eKonf.Keys.META.value] = value
@property
def TIMESTAMP_INFO(self):
return self.INFO.get(self.TIMESTAMP)
@TIMESTAMP_INFO.setter
def TIMESTAMP_INFO(self, value):
self.INFO[self.TIMESTAMP] = value
@property
def SEGMENT_SEP(self):
return codecs.decode(
self.INFO.get("segment_separator", "\n\n"), "unicode_escape"
)
@property
def SENTENCE_SEP(self):
return codecs.decode(
self.INFO.get("sentence_separator", "\n"), "unicode_escape"
)
class DatasetInfo(BaseInfo):
def __init__(self, **args):
super().__init__(**args)
class FeatureInfo(BaseInfo):
def __init__(self, **args):
super().__init__(**args)
@property
def Y(self):
return self.COLUMNs.get(eKonf.Keys.Y)
@Y.setter
def Y(self, value):
self.COLUMNs[eKonf.Keys.Y.value] = value
@property
def X(self):
return eKonf.ensure_list(self.COLUMNs.get(eKonf.Keys.X))
@X.setter
def X(self, value):
self.COLUMNs[eKonf.Keys.X.value] = value
| entelecheia/ekorpkit | ekorpkit/info/column.py | column.py | py | 12,331 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "ekorpkit.eKonf.Keys",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "ekorpkit.eKonf",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "ekorpkit.eKo... |
19655830809 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on 03/03/2021
@author: phongdk
"""
import os
from datetime import datetime
import icecream
DATA_DIR = os.getenv('DATA_DIR', '/shared_storage/bi_mlearn_training/coccoc_shopping')
DATA_FILENAME = f'{DATA_DIR}/data/shopee_sample.pkl'
DOC2VEC_FILENAME = f"{DATA_DIR}/models/top2vec_2M_learn.model"
INDEXER_FILENAME = f"{DATA_DIR}/models/indexer.pkl"
def time_format():
return f'{datetime.now()} |> '
"""
CONFIG DEBUG MODE -> other files just import config
"""
icecream.ic.configureOutput(prefix=time_format,
includeContext=True)
icecream.install()
"""
CONFIG LOG FORMAT
"""
LOGGING_CONFIG = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '%(asctime)s [%(levelname)s] %(filename)s: %(message)s'
},
},
'handlers': {
'default': {
'level': 'INFO',
'formatter': 'standard',
'class': 'logging.StreamHandler',
'stream': 'ext://sys.stdout', # Default is stderr
},
},
'loggers': {
'': { # root logger
'handlers': ['default'],
'level': 'DEBUG',
'propagate': False
},
'gensim': {
'handlers': ['default'],
'level': 'ERROR',
'propagate': False
},
'apscheduler': {
'handlers': ['default'],
'level': 'ERROR',
'propagate': False
},
'__main__': { # if __name__ == '__main__'
'handlers': ['default'],
'level': 'DEBUG',
'propagate': False
},
}
}
| phongdk92/shopping_retrieval | src/config.py | config.py | py | 1,703 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.getenv",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "icecream.ic.configu... |
43303443334 | #! /usr/bin/env python
import colorsys
def hsv2ansi(h, s, v):
# h: 0..1, s/v: 0..1
if s < 0.1:
return int(v * 23) + 232
r, g, b = map(lambda x: int(x * 5), colorsys.hsv_to_rgb(h, s, v))
return 16 + (r * 36) + (g * 6) + b
def ramp_idx(i, num):
assert num > 0
i0 = float(i) / num
h = 0.57 + i0
s = 1 - pow(i0,3)
v = 1
return hsv2ansi(h, s, v)
def ansi_ramp(num):
return [ramp_idx(i, num) for i in range(num)]
ansi_ramp80 = ansi_ramp(80)
if __name__ == '__main__':
import sys
from py.io import ansi_print
colors = int(sys.argv[1]) if len(sys.argv) > 1 else 80
for col in range(colors):
ansi_print('#', "38;5;%d" % ramp_idx(col, colors), newline=False, flush=True)
| mozillazg/pypy | rpython/tool/ansiramp.py | ansiramp.py | py | 742 | python | en | code | 430 | github-code | 36 | [
{
"api_name": "colorsys.hsv_to_rgb",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "py.io.ansi_print",
"line_number": 29,
"usage_type": "call"
}
] |
3685343845 |
import numpy as np
import os
import six.moves.urllib as urllib
import sys
import tarfile
import tensorflow as tf
import zipfile
import json
from collections import defaultdict
from io import StringIO
from PIL import Image
import requests
from lib.helpers import DatabaseConnector as dtb
from lib import label_map_util
from lib import visualization as vis_util
import uuid
import datetime
class ObjectDetectionRules(object):
"""docstring for ObjectDetectionRules"""
LABEL={
"shorts": ['outerwears', 'tshirt', 'tops'],
"jeans": ['outerwears', 'tshirt', 'tops'],
"tops": ["jeans", "shorts", "pants", "skirts"],
"person": [],
"skirts": ['outerwears', 'tshirt', 'tops']
}
class ObjectDetector(object):
"""docstring for ObjectDetector"""
def __init__(self, graph_path, label_path, num_class):
super(ObjectDetector, self).__init__()
self.model = ObjectDetectionModel(graph_path, label_path, num_class)
def run(self, picture, expecting):
image = Image.open(picture)
boxes = self.model.run(image)
expected = []
others = []
for i in range(len(boxes)):
boxes[i]["width"] = image.size[0]
boxes[i]["height"] = image.size[1]
if boxes[i]["label"]["name"] in expecting:
expected.append(boxes[i])
if boxes[i]["label"]["name"] in [x for expected_label in expecting for x in ObjectDetectionRules.LABEL[expected_label]]:
others.append(boxes[i])
expected = self.format_objects(picture, expected, is_expected=True)
others = self.format_objects(picture, others)
expected, others = self.correct_detection(expected, others)
if len(others)>1:
max_confidende = -1
tokeep = None
for obj in others:
if obj["confidence"]>max_confidende:
max_confidende = obj["confidence"]
tokeep = obj
others = [tokeep]
if len(expected)==0:
return []
return expected + others
def format_object(self, path, box, is_expected=False):
return { "is_expected":is_expected, "path": path, "height": box["height"], "width": box["width"], "label": box["label"]["name"], "confidence": box["label"]["value"], "ymin": box["ymin"], "ymax": box["ymax"], "xmin": box["xmin"], "xmax":box["xmax"]}
def format_objects(self, path, boxes, keep_main=True, is_expected=False):
objects = {}
for box in boxes:
obj = self.format_object(path, box, is_expected=is_expected)
if obj["label"] not in objects.keys():
objects[obj["label"]] = []
objects[obj["label"]].append(obj)
for key in objects.keys():
max_value = -1
indice = None
for i in range(len(objects[key])):
if objects[key][i]["confidence"]>max_value:
max_value = objects[key][i]["confidence"]
indice = i
objects[key][indice]["is_main"]=True
if keep_main:
total = []
for k in objects.keys():
for obj in objects[k]:
if "is_main" in obj.keys():
total.append(obj)
else:
total=[]
for k in objects.keys():
for obj in objects[k]:
total.append(obj)
return total
def correct_detection(self, expected, others):
for i in range(len(expected)):
for j in range(len(others)):
if others[j]["ymin"] <= expected[i]["ymin"] and others[j]["ymax"] < expected[i]["ymax"] and others[j]["ymax"] > expected[i]["ymin"] and others[j]["ymax"]-expected[i]["ymin"]>=0.4:
others[j] = None
elif others[j]["ymin"] <= expected[i]["ymin"] and others[j]["ymax"] < expected[i]["ymax"] and others[j]["ymax"] > expected[i]["ymin"] and others[j]["ymax"]-expected[i]["ymin"]>=0.1:
expected[i]["ymin"] = expected[i]["ymin"] + (others[j]["ymax"]-expected[i]["ymin"]) / 2.0
others[j]["ymax"] = others[j]["ymax"] - (others[j]["ymax"]-expected[i]["ymin"]) / 2.0
elif others[j]["ymin"] >= expected[i]["ymin"] and others[j]["ymax"] < expected[i]["ymax"]:
others[j] = None
elif others[j]["ymin"] >= expected[i]["ymin"] and others[j]["ymax"] >= expected[i]["ymax"] and expected[i]["ymax"] - others[j]["ymin"] >= 0.4:
others[j] = None
elif others[j]["ymin"] >= expected[i]["ymin"] and others[j]["ymax"] >= expected[i]["ymax"] and expected[i]["ymax"] - others[j]["ymin"] >= 0.1:
expected[i]["ymax"] = expected[i]["ymax"] - (expected[i]["ymax"]-others[j]["ymin"]) / 2.0
others[j]["ymin"] = others[j]["ymin"] + (expected[i]["ymax"]-others[j]["ymin"]) / 2.0
elif others[j]["ymin"] <= expected[i]["ymin"] and others[j]["ymax"] >= expected[i]["ymax"]:
others[j] = None
expected = [x for x in expected if x != None]
others = [x for x in others if x != None]
return expected, others
class ObjectDetectionModel(object):
"""docstring for ObjectDetectionModel"""
def __init__(self, graph_path, label_path, num_class):
super(ObjectDetectionModel, self).__init__()
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(graph_path, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
self.sess = tf.Session(graph=detection_graph)
self.label_map = label_map_util.load_labelmap(label_path)
categories = label_map_util.convert_label_map_to_categories(self.label_map, max_num_classes=num_class, use_display_name=True)
self.category_index = label_map_util.create_category_index(categories)
# Definite input and output Tensors for detection_graph
self.image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
self.detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
self.detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
self.detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
self.num_detections = detection_graph.get_tensor_by_name('num_detections:0')
def load_image_into_numpy_array(self, image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape((im_height, im_width, 3)).astype(np.uint8)
def run(self, image):
try:
image_np = self.load_image_into_numpy_array(image)
image_np_expanded = np.expand_dims(image_np, axis=0)
(boxes, scores, classes, num) = self.sess.run(
[self.detection_boxes, self.detection_scores, self.detection_classes, self.num_detections],
feed_dict={self.image_tensor: image_np_expanded})
# Visualization of the results of a detection.
boxes = vis_util.visualize_boxes_and_labels_on_image_array_2(
image_np,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
self.category_index,
min_score_thresh=0.2,
use_normalized_coordinates=True,
line_thickness=5)
return boxes
except:
raise | MatthieuBlais/tensorflow-clothing-detection | obj_detection.py | obj_detection.py | py | 6,828 | python | en | code | 11 | github-code | 36 | [
{
"api_name": "PIL.Image.open",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "tensorflow.Graph",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "tensorflow.GraphDef",
... |
30838795713 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 11 18:11:32 2021
@author: mathisagathe
"""
from pymongo import MongoClient
import matplotlib.pyplot as plt
client = MongoClient("10.35.7.4", username = "mathis", password = "MathisM21", authsource = "mathisdb")
db=client.mathisdb
collection = db["TripAdvisor"]
r1 = {"country":"France"}
nbrestoFR = collection.find((r1)).count()
print("Le nombre total de restaurants en France sur TA est de : ",nbrestoFR)
#Nombre de restaurants en France servant des repas végétariens
r2 = {"$and":
[
{"country":"France"},
{"vegetarian_friendly":"Y"}
]
}
nbvege = collection.find((r2)).count()
#Nombre de restaurants en France servant des repas gluten free
r3 = {"$and":
[
{"country":"France"},
{"gluten_free":"Y"}
]
}
nbgf = collection.find((r3)).count()
# Graphique : Pourcentage de restaurants Végétariens et sans gluten en France
# https://www.python-graph-gallery.com/pie-plot-matplotlib-basic
# https://www.kaggle.com/stefanoleone992/tripadvisor-european-restaurants-eda
print("Le nombre total de restaurants en France servant des repas végétariens est de : ",nbvege)
print("Le nombre total de restaurants en France servant des repas sans gluten est de : ",nbgf)
#Top 5 des villes européennes avec le plus de restaurants
r3 = collection.aggregate([
{"$group":{"_id":"$city","nb":{"$sum":1}}},
{"$sort":{"nb":-1}},
{"$limit":6}
])
for i in r3:
print(i)
| romanelollier/School_Project_BigData | requetes_mod.py | requetes_mod.py | py | 1,536 | python | fr | code | 0 | github-code | 36 | [
{
"api_name": "pymongo.MongoClient",
"line_number": 13,
"usage_type": "call"
}
] |
34987281436 | import torch
import torch.nn as nn
from methods.ein_seld.data_augmentation import spec_augment_
from methods.ein_seld.data_augmentation import spec_augment, channel_rotation
from methods.utils.stft import (STFT, LogmelFilterBank, intensityvector,
spectrogram_STFTInput)
import numpy as np
import librosa
import librosa.display
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import torch
import torchaudio
import torchaudio.functional as F
import torchaudio.transforms as T
import librosa
class LogmelIntensity_Extractor(nn.Module):
def __init__(self, cfg , data_type):
super().__init__()
data = cfg['data']
sample_rate, n_fft, hop_length, window, n_mels, fmin, fmax = \
data['sample_rate'], data['n_fft'], data['hop_length'], data['window'], data['n_mels'], \
data['fmin'], data['fmax']
center = True
pad_mode = 'reflect'
ref = 1.0
amin = 1e-10
top_db = None
# STFT extractor
self.stft_extractor = STFT(n_fft=n_fft, hop_length=hop_length, win_length=n_fft,
window=window, center=center, pad_mode=pad_mode,
freeze_parameters=data['feature_freeze'])
# Spectrogram extractor
self.spectrogram_extractor = spectrogram_STFTInput
# Logmel extractor
self.logmel_extractor = LogmelFilterBank(sr=sample_rate, n_fft=n_fft,
n_mels=n_mels, fmin=fmin, fmax=fmax, ref=ref, amin=amin, top_db=top_db,
freeze_parameters=data['feature_freeze'])
# Intensity vector extractor
self.intensityVector_extractor = intensityvector
self.data_type = data_type
self.cfg = cfg
def define_transformation(self,waveform):
sample_rate = 24000
n_fft = 1024
win_length = None
hop_length = 600
n_mels = 256
mel_spectrogram = T.MelSpectrogram(
sample_rate=sample_rate,
n_fft=n_fft,
win_length=win_length,
hop_length=hop_length,
power=2.0,
n_mels=n_mels,
)
melspec = mel_spectrogram(waveform.cpu())
return melspec
def forward(self, x):
"""
input:
(batch_size, channels=4, data_length)
output:
(batch_size, channels, time_steps, freq_bins)
"""
# for infrerence
if type(x)!= tuple :
if x.ndim == 3:
x = self.stft_extractor(x)
logmel = self.logmel_extractor(self.spectrogram_extractor(x))
intensity_vector = self.intensityVector_extractor(x, self.logmel_extractor.melW)
out = torch.cat((logmel, intensity_vector), dim=1)
return out
else:
raise ValueError("x shape must be (batch_size, num_channels, data_length)\n \
Now it is {}".format(x.shape))
input, target, ind, data_type = x
if input.ndim != 3:
raise ValueError("x shape must be (batch_size, num_channels, data_length)\n \
Now it is {}".format(input.shape))
#self.plot_waveform(input[0])
#melspec = self.define_transformation(input[0])
#self.plot_spectrogram(melspec)
# get the indices of augmented data
aug_idx_inverse = [i for i, x in enumerate(data_type) if x == "train_invert_position_aug"]
if ind == 'train' and len(aug_idx_inverse) != 0:
for i, dt in enumerate(aug_idx_inverse):
input[i, :, :] = torch.flip(input[i, :, :], dims=[1]) # invert waveform time axis
sed_label = torch.flip(target['sed'][i], dims=[0]) # invert sed label time axis
doa_label = torch.flip(target['doa'][i], dims=[0]) # invert doa label time axis
doa_label = 0.0 - doa_label # also invert sound source position
target['sed'][i] = sed_label
target['doa'][i] = doa_label
aug_idx_rotate = [i for i, x in enumerate(data_type) if x == "train_rotate_channel"]
if ind == 'train' and len(aug_idx_rotate) != 0:
for i , dt in enumerate(aug_idx_rotate):
input[i, :, :], pattern = channel_rotation.apply_data_channel_rotation('foa', input[i, :, :])
aug_rotate = channel_rotation.apply_label_channel_rotation('foa', target['doa'][i], pattern)
# update the target
target['doa'][i] = aug_rotate
input = self.stft_extractor(input)
logmel = self.logmel_extractor(self.spectrogram_extractor(input))
aug_idx_spc = [i for i, x in enumerate(data_type) if x == "train_spec_aug"]
if ind == 'train' and len(aug_idx_spc) != 0:
# get specAugment Parameters
F = self.cfg['data_augmentation']['F']
T = self.cfg['data_augmentation']['T']
num_freq_masks = self.cfg['data_augmentation']['num_freq_masks']
num_time_masks = self.cfg['data_augmentation']['num_time_masks']
replace_with_zero = self.cfg['data_augmentation']['replace_with_zero']
for i , dt in enumerate(aug_idx_spc):
logmel_aug = spec_augment.specaug(torch.squeeze(logmel[dt,:,:,:]).permute(0, 2, 1),
W=2, F=F, T=T,
num_freq_masks=num_freq_masks,
num_time_masks=num_time_masks,
replace_with_zero=replace_with_zero)
logmel[dt, :, :, :] = logmel_aug
intensity_vector = self.intensityVector_extractor(input, self.logmel_extractor.melW)
out = torch.cat((logmel, intensity_vector), dim=1)
return out, target
def plot_spectrogram(self, spec, title=None, ylabel='freq_bin', aspect='auto', xmax=None):
fig, axs = plt.subplots(1, 1)
axs.set_title(title or 'Spectrogram (db)')
axs.set_ylabel(ylabel)
axs.set_xlabel('frame')
im = axs.imshow(librosa.power_to_db(spec[0]), origin='lower', aspect=aspect)
if xmax:
axs.set_xlim((0, xmax))
fig.colorbar(im, ax=axs)
plt.show(block=False)
plt.savefig('Spectrogram.png', format='png')
plt.close(fig)
def plot_waveform(self,waveform, title="Waveform", xlim=None, ylim=None):
num_channels, num_frames = waveform.shape
time_axis = torch.arange(0, num_frames)
# // sample_rate
figure, axes = plt.subplots(num_channels, 1)
if num_channels == 1:
axes = [axes]
for c in range(num_channels):
axes[c].plot(time_axis, waveform[c].cpu(), linewidth=1)
axes[c].grid(True)
if num_channels > 1:
axes[c].set_ylabel(f'Channel {c + 1}')
if xlim:
axes[c].set_xlim(xlim)
if ylim:
axes[c].set_ylim(ylim)
figure.suptitle(title)
plt.show(block=False)
plt.savefig('waveform.png', format='png')
plt.close(figure)
'''
# For spectrogram visualization
def plot_specgram(self,waveform, sample_rate, title="Spectrogram", xlim=None):
#waveform = waveform[0].numpy()
waveform = waveform[0].cpu().numpy()
num_channels, num_frames = waveform.shape
time_axis = torch.arange(0, num_frames) // sample_rate
figure, axes = plt.subplots(num_channels, 1)
if num_channels == 1:
axes = [axes]
for c in range(num_channels):
axes[c].specgram(waveform[c], Fs=sample_rate)
if num_channels > 1:
axes[c].set_ylabel(f'Channel {c + 1}')
if xlim:
axes[c].set_xlim(xlim)
figure.suptitle(title)
plt.savefig('Spec')
plt.show(block=False)
''' | saraalrawi/EIN-SELD | seld/methods/feature.py | feature.py | py | 8,022 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "methods.utils.stft.STFT",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "methods.utils.s... |
74169633702 | import logging
import os
import uuid
from io import BytesIO
from typing import Sequence
from zipfile import ZIP_DEFLATED, ZipFile
import anndata as ad
import dramatiq
from fastapi import APIRouter, Depends, File, Form, HTTPException, UploadFile
from fastapi.responses import ORJSONResponse
from matplotlib.colors import rgb2hex
from sqlalchemy.orm import Session
from starlette.responses import StreamingResponse
from starlette.status import HTTP_404_NOT_FOUND
from histocat.api.db import get_db
from histocat.api.security import get_active_member
from histocat.config import config
from histocat.core.dataset import service as dataset_service
from histocat.core.dataset.dto import DatasetDto, DatasetUpdateDto
from histocat.core.image import get_qualitative_colors
from histocat.core.member.models import MemberModel
from histocat.core.utils import stream_bytes
logger = logging.getLogger(__name__)
router = APIRouter()
@router.get("/groups/{group_id}/projects/{project_id}/datasets", response_model=Sequence[DatasetDto])
def get_project_datasets(
group_id: int,
project_id: int,
db: Session = Depends(get_db),
member: MemberModel = Depends(get_active_member),
):
"""Retrieve project's datasets"""
items = dataset_service.get_project_datasets(db, project_id=project_id)
return items
@router.patch("/groups/{group_id}/datasets/{dataset_id}", response_model=DatasetDto)
def update(
group_id: int,
dataset_id: int,
params: DatasetUpdateDto,
member: MemberModel = Depends(get_active_member),
db: Session = Depends(get_db),
):
"""Update dataset"""
item = dataset_service.get(db, id=dataset_id)
if not item:
raise HTTPException(status_code=HTTP_404_NOT_FOUND, detail=f"Dataset id:{dataset_id} not found")
item = dataset_service.update(db, item=item, params=params)
return item
@router.get("/groups/{group_id}/datasets/{dataset_id}/centroids")
def get_centroids(
group_id: int,
dataset_id: int,
member: MemberModel = Depends(get_active_member),
db: Session = Depends(get_db),
):
"""Get dataset cell centroids"""
dataset = dataset_service.get(db, id=dataset_id)
if not dataset:
raise HTTPException(status_code=HTTP_404_NOT_FOUND, detail=f"Dataset id:{dataset_id} not found")
adata = ad.read_h5ad(dataset.cell_file_location())
mappable = get_qualitative_colors(vmin=adata.obs["AcquisitionId"].min(), vmax=adata.obs["AcquisitionId"].max())
colors = [rgb2hex(c) for c in mappable.to_rgba(adata.obs["AcquisitionId"])]
output = {
"acquisitionIds": adata.obs["AcquisitionId"].tolist(),
"cellIds": adata.obs["CellId"].tolist(),
"objectNumbers": adata.obs["ObjectNumber"].tolist(),
"x": adata.obs["CentroidX"].round(2).tolist(),
"y": adata.obs["CentroidY"].round(2).tolist(),
"colors": colors,
}
return ORJSONResponse(output)
@router.get("/groups/{group_id}/datasets/{dataset_id}", response_model=DatasetDto)
def get_by_id(
group_id: int,
dataset_id: int,
member: MemberModel = Depends(get_active_member),
db: Session = Depends(get_db),
):
"""Get dataset by id"""
item = dataset_service.get(db, id=dataset_id)
if not item:
raise HTTPException(status_code=HTTP_404_NOT_FOUND, detail=f"Dataset id:{dataset_id} not found")
return item
@router.delete("/groups/{group_id}/datasets/{dataset_id}", response_model=DatasetDto)
def delete_by_id(
group_id: int,
dataset_id: int,
member: MemberModel = Depends(get_active_member),
db: Session = Depends(get_db),
):
"""Delete a specific dataset by id"""
item = dataset_service.remove(db, id=dataset_id)
if not item:
raise HTTPException(status_code=HTTP_404_NOT_FOUND, detail=f"Dataset id:{dataset_id} not found")
return item
@router.get("/datasets/{dataset_id}/download")
async def download_by_id(dataset_id: int, db: Session = Depends(get_db)):
"""Download dataset by id"""
item = dataset_service.get(db, id=dataset_id)
if not item:
raise HTTPException(status_code=HTTP_404_NOT_FOUND, detail=f"Dataset id:{dataset_id} not found")
file_name = f"{item.name}.zip"
abs_src = os.path.abspath(item.location)
buffer = BytesIO()
with ZipFile(buffer, "w", ZIP_DEFLATED) as zip:
for folderName, _, filenames in os.walk(item.location):
for filename in filenames:
absname = os.path.abspath(os.path.join(folderName, filename))
arcname = absname[len(abs_src) + 1 :]
zip.write(absname, arcname)
headers = {"Content-Disposition": f'attachment; filename="{file_name}"'}
return StreamingResponse(stream_bytes(buffer.getvalue()), media_type="application/zip", headers=headers)
@router.post("/groups/{group_id}/projects/{project_id}/datasets/upload")
def upload_dataset(
group_id: int,
project_id: int,
type: str = Form(None),
masks_folder: str = Form(None),
regionprops_folder: str = Form(None),
intensities_folder: str = Form(None),
file: UploadFile = File(None),
member: MemberModel = Depends(get_active_member),
db: Session = Depends(get_db),
):
path = os.path.join(config.INBOX_DIRECTORY, str(uuid.uuid4()))
if not os.path.exists(path):
os.makedirs(path)
uri = os.path.join(path, file.filename)
with open(uri, "wb") as f:
f.write(file.file.read())
broker = dramatiq.get_broker()
message = dramatiq.Message(
actor_name="import_dataset",
queue_name="import",
args=(),
kwargs={
"type": type,
"masks_folder": masks_folder,
"regionprops_folder": regionprops_folder,
"intensities_folder": intensities_folder,
"uri": uri,
"project_id": project_id,
},
options={},
)
broker.enqueue(message)
return {"uri": uri}
| BodenmillerGroup/histocat-web | backend/histocat/api/dataset/controller.py | controller.py | py | 5,914 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "fastapi.APIRouter",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.orm.Session",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "histocat.c... |
36121027603 | import logging
import sys
import torch
import yaml
from tagging_trainer import TaggingTrainer
from forte.common.configuration import Config
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
if __name__ == "__main__":
task = sys.argv[1]
assert task in ["ner", "pos"], "Not supported nlp task type: {}".format(
task
)
extractor_configs = yaml.safe_load(
open("configs/config_extractors.yml", "r")
)
# Configure output extractor based on the task, see
# BioSeqTaggingExtractor for more information.
output_configs = extractor_configs["feature_scheme"]["output_tag"][
"extractor"
]["config"]
if task == "ner":
output_configs["entry_type"] = "ft.onto.base_ontology.EntityMention"
output_configs["attribute"] = "ner_type"
output_configs["tagging_unit"] = "ft.onto.base_ontology.Token"
elif task == "pos":
output_configs["entry_type"] = "ft.onto.base_ontology.Token"
output_configs["attribute"] = "pos"
config = {
"config_data": Config(
{},
default_hparams=yaml.safe_load(
open("configs/config_data.yml", "r")
),
),
"config_model": Config(
{},
default_hparams=yaml.safe_load(
open("configs/config_model.yml", "r")
),
),
"config_extractors": extractor_configs,
"device": torch.device("cuda")
if torch.cuda.is_available()
else torch.device("cpu"),
}
trainer: TaggingTrainer = TaggingTrainer(task_type=task, **config)
trainer.run()
# Save training state to disk
trainer.save(config["config_data"].train_state_path)
torch.save(trainer.model, "model.pt")
| asyml/forte | examples/tagging/main_train_tagging.py | main_train_tagging.py | py | 1,785 | python | en | code | 230 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "logging.basicConfig",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
... |
23297663163 | import pathlib
import pandas as pd
from model import dez
datadir = pathlib.Path(__file__).parents[0].joinpath('data')
def test_populate_solution_land_distribution():
expected = pd.read_csv(datadir.joinpath('lbt_ocean_dist.csv'), index_col=0)
de = dez.DEZ('Limiting bottom trawling')
# We freeze applicable zones as solution_dez_matrix.csv is likely to change
de.applicable_zones = ['DEZ1: Epipelagic, EEZ', 'DEZ2: Epipelagic, ABNJ']
de._populate_world_ocean_allocation()
de._populate_solution_ocean_distribution()
pd.testing.assert_frame_equal(de.get_ocean_distribution(), expected, check_dtype=False) | ProjectDrawdown/solutions | model/tests/test_dez.py | test_dez.py | py | 634 | python | en | code | 203 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "model.dez.DEZ",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "model.dez",
"line_number... |
75009815143 | import asyncio
import logging
from random import randint
from src.streaming import Consumer, Producer, Topic
LOGGER = logging.getLogger(__name__)
GROUP_ID = "WEBSITE_NER"
async def amain():
"""Consume website changes, produce NER results."""
consumer = await Consumer.create(Topic.CHANGE.value, group_id=GROUP_ID)
producer = await Producer.create()
async for message in consumer:
data = message.value
LOGGER.info("processing event %s %s", data["id"], data["domain"])
await producer.send_and_wait(Topic.NER.value, calculate_ner(data))
def calculate_ner(data: dict) -> dict:
return {
"domain": data["domain"],
"id": data["id"],
# some fancy NER detection results
"ner": {
"persons": randint(0, 10),
"locations": randint(0, 10),
"brands": randint(0, 10),
},
}
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s [%(module)s:%(lineno)s] %(message)s')
asyncio.run(amain())
| SebastianRemander/eda-demo | src/producers/website_ner.py | website_ner.py | py | 1,060 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "src.streaming.Consumer.create",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "src.streaming.Consumer",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": ... |
42831133132 | #!/usr/bin/env python3
import struct
import json
import sys
def write_tileset(filename, spec):
with open(spec['base'], 'rb') as src:
base_data = src.read()
with open(spec['overlay'], 'rb') as src:
overlay_data = src.read()
if 'cc2' in spec:
with open(spec['cc2'], 'rb') as src:
cc2_data = src.read()
else:
cc2_data = b''
tis = open(filename, 'wb')
tis.write(b'CCTILE02')
tis.write(struct.pack('I', len(spec['name'])))
tis.write(bytes(spec['name'], 'utf-8'))
tis.write(struct.pack('I', len(spec['desc'])))
tis.write(bytes(spec['desc'], 'utf-8'))
tis.write(struct.pack('B', spec['size']))
tis.write(struct.pack('I', len(base_data)))
tis.write(base_data)
tis.write(struct.pack('I', len(overlay_data)))
tis.write(overlay_data)
tis.write(struct.pack('I', len(cc2_data)))
tis.write(cc2_data)
tis.close()
# Generate default tilesets if called from the command line
if __name__ == '__main__':
if len(sys.argv) < 2:
print('Usage: {} spec.json [...]'.format(sys.argv[0]))
sys.exit(1)
for arg in sys.argv[1:]:
with open(arg, 'r') as spec_file:
spec = json.load(spec_file)
tis_filename = arg.rsplit('.', 1)[0] + '.tis'
write_tileset(tis_filename, spec)
| zrax/cctools | res/gen_tilesets.py | gen_tilesets.py | py | 1,323 | python | en | code | 18 | github-code | 36 | [
{
"api_name": "struct.pack",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "struct.pack",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "struct.pack",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "struct.pack",
"line_number": ... |
40943554690 | import sys
import ruamel
from ruamel.yaml import YAML
from datetime import datetime, date, time
from common import *
import fileinput
class NonAliasingRTRepresenter(ruamel.yaml.RoundTripRepresenter):
def ignore_aliases(self, data):
return True
def main():
out = {'races': []}
yaml = YAML(typ='safe')
with open('data/common.yaml', 'r') as fi:
ydat = yaml.load(fi)
d = date.today()
year = d.year
month = d.month + 1
if month == 13:
# skip Smarch
year = year + 1
month = 1
day = 1
try:
while True:
d = date(year, month, day)
for w in ydat['weekly']:
if w['isoweekday'] == d.isoweekday():
t = time.fromisoformat(w['time'])
dt = datetime.combine(d, t, tzinfo=RACETZ)
r = {'datetime': dt.strftime('%Y-%m-%d %H:%M')}
for k in w.keys():
if k not in ('isoweekday', 'time', 'msg'):
r[k] = w[k]
kw = dt.isocalendar().week
sunday = d.isoweekday() == 7
casual = r['skills_preset'] == 'casual'
# alternate simple and complex, and do the opposite on Sunday
simple = True
if (kw % 2 == 0 and casual) or (kw % 2 != 0 and not casual):
simple = False
if sunday:
simple = not simple
if simple:
r['desc'] += ' - Simple'
else:
r['desc'] += ' - Complex'
out['races'].append(r)
day = day + 1
except ValueError:
pass
# ugly hack
replace_last = 'Casual - Complex' if month % 2 == 0 else 'Hard - Complex'
for i, obj in reversed(list(enumerate(out['races']))):
if obj['desc'] == replace_last:
out['races'][i]['desc'] = obj['desc'].replace('Complex', 'Mystery')
break
yout = YAML()
yout.default_flow_style = False
yout.version = (1, 2)
yout.indent(mapping=2, sequence=4, offset=2)
yout.Representer = NonAliasingRTRepresenter
with open('data/races-new.yaml', 'w') as fout:
yout.dump(out, fout)
# add whitespace
with fileinput.FileInput('data/races-new.yaml', inplace=True) as f:
for line in f:
if line.startswith(' -') and f.lineno() > 4:
print()
print(line, end='')
if __name__ == '__main__':
main()
| pkdawson/workrobot | new_schedule.py | new_schedule.py | py | 2,605 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "ruamel.yaml",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "ruamel.yaml.YAML",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "datetime.date.today",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "datetime.date",
... |
26672929841 | #!/usr/bin/env python
import argparse
import itertools
import pandas as pd
def getCmdArgs():
p = argparse.ArgumentParser(description="Add scenes in neighboring rows of scenes in a given scene ID list.")
p.add_argument("-l", "--list", dest="scn_list", required=True, default=None, metavar="CSV_OF_SCENE_LIST", help="A CSV file of scene list. It must have at least the first column as the list of scene IDs (e.g. LC80010042015211LGN01)")
p.add_argument("--lead", dest="nlead", required=False, default=1, metavar="NUM_OF_LEADING_ROWS_TO_BUFFER", help="Number of leading rows to buffer from a scene, e.g. 2 leading rows of path=18,row=30 will add two path/row pairs, (1) path=18,row=31, (2) path=18,row=32.")
p.add_argument("--trail", dest="ntrail", required=False, default=1, metavar="NUM_OF_TRAILING_ROWS_TO_BUFFER", help="Number of trailing rows to buffer from a scene, e.g. 2 leading rows of path=18,row=30 will add two path/row pairs, (1) path=18,row=29, (2) path=18,row=28.")
p.add_argument("-o", "--output", dest="output", required=True, default=None, metavar="OUTPUT_PRD_LIST", help="Name of output CSV file of the list of path,row,start_date,end_date, of the scenes after row buffering.")
cmdargs = p.parse_args()
return cmdargs
def scnIdToPathRowDay(scn_id):
path = int(scn_id[3:6])
row = int(scn_id[6:9])
year = int(scn_id[9:13])
doy = int(scn_id[13:16])
return path, row, year, doy
def main(cmdargs):
scn_csv = cmdargs.scn_list
nlead = cmdargs.nlead
ntrail = cmdargs.ntrail
prd_csv = cmdargs.output
out_header = ["path", "row", "start_date", "end_date"]
prd_header = ["path", "row", "year", "doy"]
with open(prd_csv, "w") as out_fobj:
out_fobj.write(",".join(out_header))
out_fobj.write("\n")
scn_df = pd.read_csv(scn_csv, usecols=[0])
scn_list = scn_df.iloc[:, 0].tolist()
prd_list = zip(*[scnIdToPathRowDay(scn) for scn in scn_list])
prd_dict = {nm:prd for nm, prd in zip(prd_header, prd_list)}
prd_df = pd.DataFrame(prd_dict)
# Add buffer rows
buf_row_add = range(-1*ntrail, nlead+1)
buf_row_add.remove(0)
buf_df_list = [prd_df.copy() for bra in buf_row_add]
for bra, bd in itertools.izip(buf_row_add, buf_df_list):
bd["row"] = bd["row"] + bra
all_prd_df = pd.concat([prd_df]+buf_df_list, axis=0)
all_prd_df = all_prd_df.drop_duplicates(prd_header, keep=False)
all_prd_df = all_prd_df.sort_values(["year", "doy", "path", "row"])
datestr = ["{0:04d}{1:03d}".format(getattr(row, "year"), getattr(row, "doy"))
for row in all_prd_df.itertuples()]
all_prd_df[out_header[2]] = pd.to_datetime(datestr, format="%Y%j")
all_prd_df[out_header[3]] = pd.to_datetime(datestr, format="%Y%j")
all_prd_df.to_csv(out_fobj, header=False, index=False, columns=out_header,
mode="a", date_format="%Y-%m-%d")
if __name__ == "__main__":
cmdargs = getCmdArgs()
main(cmdargs)
| zhanlilz/landsat-tools | landsat-albedo-pipeline/buffer_scene_list.py | buffer_scene_list.py | py | 3,074 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "itertools.izi... |
21885184448 | """
Device discovery
"""
import enum
import re
from dataclasses import asdict, dataclass
from queue import Empty, Queue
from socket import inet_ntoa
from typing import Dict, Generator, List, Optional
import click
import requests
import usb
from zeroconf import ServiceBrowser, ServiceInfo, ServiceStateChange, Zeroconf
from brewblox_ctl import const, tabular, utils
BREWBLOX_DNS_TYPE = '_brewblox._tcp.local.'
DISCOVER_TIMEOUT_S = 5
DISCOVERY_LEN = 4 # USB / TCP / mDNS
MODEL_LEN = 7 # 'Spark 2' / 'Spark 3' / 'Spark 4'
MAX_ID_LEN = 24 # Spark 4 IDs are shorter
HOST_LEN = 4*3+3
class DiscoveryType(enum.Enum):
all = 1
usb = 2
mdns = 3
mqtt = 4
# aliases
wifi = 3
lan = 3
def __str__(self):
return self.name
@staticmethod
def choices():
return list((str(v) for v in DiscoveryType.__members__))
@dataclass
class DiscoveredDevice:
discovery: str
model: str
device_id: str
device_host: str = ''
def __post_init__(self):
self.device_id = self.device_id.lower()
@dataclass
class HandshakeMessage:
brewblox: str
firmware_version: str
proto_version: str
firmware_date: str
proto_date: str
system_version: str
platform: str
reset_reason_hex: str
reset_data_hex: str
device_id: str
model: str = ''
def __post_init__(self):
self.device_id = self.device_id.lower()
if self.platform == 'photon':
self.model = 'Spark 2'
elif self.platform == 'p1':
self.model = 'Spark 3'
elif self.platform == 'esp32':
self.model = 'Spark 4'
else:
self.model = self.platform
def match_id_services(config: Optional[dict]) -> Dict[str, str]: # [ID, service_name]
"""Gets the --device-id value for all Spark services in config.
Because IDs are yielded during discovery,
values are returned with the ID as key,
and a comma-separated string of services as value
"""
if not config:
return {}
output: Dict[str, List[str]] = {}
for name, service in config.get('services', {}).items():
if not service.get('image', '').startswith('ghcr.io/brewblox/brewblox-devcon-spark'):
continue
match = re.match(
r'.*\-\-device\-id(\w|=)(?P<id>\w+)',
service.get('command', ''))
if match:
id = match.group('id').lower()
output.setdefault(id, []).append(name)
return {
id: ', '.join(services)
for id, services
in output.items()
}
def find_device_by_host(device_host: str) -> Optional[DiscoveredDevice]:
utils.info(f'Querying device with address {device_host}...')
try:
resp = requests.get(f'http://{device_host}', timeout=5)
resp.raise_for_status()
content = resp.text
if not content.startswith('!BREWBLOX'):
raise RuntimeError('Host did not respond with a Brewblox handshake')
handshake = HandshakeMessage(*content.split(','))
utils.info(f'Found a {handshake.model} with ID {handshake.device_id}')
return DiscoveredDevice(
discovery='TCP',
device_id=handshake.device_id,
model=handshake.model,
device_host=device_host,
)
except Exception as ex:
utils.warn(f'Failed to fetch device info: {str(ex)}')
return None
def discover_usb() -> Generator[DiscoveredDevice, None, None]:
devices = [
*usb.core.find(find_all=True,
idVendor=const.VID_PARTICLE,
idProduct=const.PID_PHOTON),
*usb.core.find(find_all=True,
idVendor=const.VID_PARTICLE,
idProduct=const.PID_P1),
# Spark 4 does not support USB control, and is not listed
]
for dev in devices:
dev: usb.core.Device
id = usb.util.get_string(dev, dev.iSerialNumber).lower()
model = {const.PID_PHOTON: 'Spark 2', const.PID_P1: 'Spark 3'}[dev.idProduct]
yield DiscoveredDevice(discovery='USB',
model=model,
device_id=id)
def discover_mdns() -> Generator[DiscoveredDevice, None, None]:
queue: Queue[ServiceInfo] = Queue()
conf = Zeroconf()
def on_service_state_change(zeroconf: Zeroconf, service_type, name, state_change):
if state_change == ServiceStateChange.Added:
info = zeroconf.get_service_info(service_type, name)
queue.put(info)
try:
ServiceBrowser(conf, BREWBLOX_DNS_TYPE, handlers=[on_service_state_change])
while True:
info = queue.get(timeout=DISCOVER_TIMEOUT_S)
if not info or not info.addresses or info.addresses == [b'\x00\x00\x00\x00']:
continue # discard simulators
id = info.properties[b'ID'].decode()
model = info.properties[b'HW'].decode()
host = inet_ntoa(info.addresses[0])
yield DiscoveredDevice(discovery='mDNS',
model=model,
device_id=id,
device_host=host)
except Empty:
pass
finally:
conf.close()
def discover_device(discovery_type: DiscoveryType) -> Generator[DiscoveredDevice, None, None]:
if discovery_type in [DiscoveryType.all,
DiscoveryType.usb]:
yield from discover_usb()
if discovery_type in [DiscoveryType.all,
DiscoveryType.mdns,
DiscoveryType.mqtt]:
yield from discover_mdns()
def list_devices(discovery_type: DiscoveryType,
compose_config: Optional[dict]):
id_services = match_id_services(compose_config)
table = tabular.Table(
keys=[
'discovery',
'model',
'device_id',
'device_host',
'service'
],
headers={
'discovery': 'Discovery'.ljust(DISCOVERY_LEN),
'model': 'Model'.ljust(MODEL_LEN),
'device_id': 'Device ID'.ljust(MAX_ID_LEN),
'device_host': 'Device host'.ljust(HOST_LEN),
'service': 'Service',
}
)
utils.info('Discovering devices...')
table.print_headers()
for dev in discover_device(discovery_type):
table.print_row({
**asdict(dev),
'service': id_services.get(dev.device_id, ''),
})
def choose_device(discovery_type: DiscoveryType,
compose_config: Optional[dict],
) -> Optional[DiscoveredDevice]:
id_services = match_id_services(compose_config)
table = tabular.Table(
keys=[
'index',
'discovery',
'model',
'device_id',
'device_host',
'service'
],
headers={
'index': 'Index',
'discovery': 'Discovery'.ljust(DISCOVERY_LEN),
'model': 'Model'.ljust(MODEL_LEN),
'device_id': 'Device ID'.ljust(MAX_ID_LEN),
'device_host': 'Device host'.ljust(HOST_LEN),
'service': 'Service',
}
)
devs = []
utils.info('Discovering devices...')
table.print_headers()
for dev in discover_device(discovery_type):
# TODO(Bob) less hacky check
if discovery_type == DiscoveryType.mqtt and dev.model != 'Spark 4':
continue
devs.append(dev)
table.print_row({
**asdict(dev),
'index': len(devs),
'service': id_services.get(dev.device_id, ''),
})
if not devs:
click.echo('No devices discovered')
return None
idx = click.prompt('Which device do you want to use?',
type=click.IntRange(1, len(devs)),
default=1)
return devs[idx-1]
| BrewBlox/brewblox-ctl | brewblox_ctl/discovery.py | discovery.py | py | 7,942 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "enum.Enum",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "typing.Opt... |
9817120997 | import itertools
import os
import pandas as pd
import pickle
import random
import torch
from torchvision import datasets
from tqdm import tqdm
from scipy import rand
import data_handler
from models import AlexNet, Vgg16
def generate_model_testset_results(model, testset_path):
"""
Evaluate whole 'imagenetv2-matched-frequency-format-val' dataset
on given model and saves the results ("img_name", "max_confidence", "pred_label", "true_label" for each image)
in a DataFrame via pickle.
For information on the dataset see https://github.com/modestyachts/ImageNetV2
"""
img_folder = datasets.ImageFolder(root=testset_path)
img_names, true_labels_idx, pred_labels_idx, pred_max_confidences = [], [], [], []
for img_path in tqdm(img_folder.imgs):
pil_img = img_folder.loader(img_path[0])
img_name = img_path[0].split(os.sep)[-1]
# preprocessing and prediction
input_tensor = data_handler.transform()(pil_img)
input_tensor = input_tensor.unsqueeze(0)
output = model.predict(input_tensor)
# output has unnormalized scores. To get probabilities, run a softmax on it.
probabilities = torch.nn.functional.softmax(output[0], dim=0)
img_names.append(img_name)
pred_max_confidences.append(probabilities.detach().numpy().max())
pred_labels_idx.append(probabilities.detach().numpy().argmax())
true_labels_idx.append(int(img_path[0].split(os.sep)[-2]))
df = pd.DataFrame([img_names, pred_max_confidences, pred_labels_idx, true_labels_idx]).transpose()
df.columns = ["img_name", "max_confidence", "pred_label", "true_label"]
df["pred_is_correct"] = df["pred_label"] == df["true_label"]
return df
def create_questionnairs(imgs_idx, xai_methods, model_names, df_vgg, df_alex, seed=None):
if seed:
random.seed(seed)
# create first half of question with fixed images for all questionnaire forms
questionnaires_list = get_fixed_img_questionnaires(imgs_idx, xai_methods, model_names)
# adding images works directly on the reference of 'questionnaires_list'
add_random_unique_images(questionnaires_list, imgs_idx, df_alex, df_vgg, model_names, xai_methods)
return questionnaires_list
def get_fixed_img_questionnaires(imgs_idx, xai_methods, models):
NUM_QUESTIONNAIRES = 12
NUM_IMGS = 12
questionnaires_list = []
random_imgs_idx = [imgs_idx.pop(random.randint(0, len(imgs_idx) - 1)) for i in range(NUM_IMGS)]
permutations = list(itertools.product(random_imgs_idx, models, xai_methods))
# distribute permutations on questionnaires
for q in range(NUM_QUESTIONNAIRES):
questionnaire = []
for i in range(NUM_IMGS):
if (q + i) > (NUM_IMGS - 1):
questionnaire.append(permutations[i * NUM_IMGS:i * NUM_IMGS + NUM_IMGS][(q + i) - NUM_IMGS])
else:
questionnaire.append(permutations[i * NUM_IMGS:i * NUM_IMGS + NUM_IMGS][q + i])
questionnaires_list.append(questionnaire)
return questionnaires_list
def add_random_unique_images(questionnaires_list, imgs_idx, df_alex, df_vgg, model_names, xai_methods):
FINAL_QUESTIONNAIRE_SIZE = 24
for idx_qn, questionnaire in enumerate(questionnaires_list):
df_variants_count = pd.DataFrame(list(itertools.product(xai_methods, model_names, [True, False]))).groupby(
[0, 1, 2]).count()
df_variants_count["count"] = 0
# evaluate variants for the already drawn fixed questions
for idx_q, question in enumerate(questionnaire):
if question[1] == "alex":
if df_alex["pred_is_correct"][question[0]]:
questionnaires_list[idx_qn][idx_q] += (True,)
df_variants_count.loc[question[2], "alex", True]["count"] += 1
else:
questionnaires_list[idx_qn][idx_q] += (False,)
df_variants_count.loc[question[2], "alex", False]["count"] += 1
else:
if df_vgg["pred_is_correct"][question[0]]:
questionnaires_list[idx_qn][idx_q] += (True,)
df_variants_count.loc[question[2], "vgg", True]["count"] += 1
else:
questionnaires_list[idx_qn][idx_q] += (False,)
df_variants_count.loc[question[2], "vgg", False]["count"] += 1
"""
add addtional random images to each questionnaire such that for every variant in df_variants_count the
count will be 1
"""
while df_variants_count["count"].sum() != FINAL_QUESTIONNAIRE_SIZE:
rand_img_idx = imgs_idx.pop(random.randint(0, len(imgs_idx) - 1))
alex_pred = df_alex.loc[rand_img_idx]["pred_is_correct"]
vgg_pred = df_alex.loc[rand_img_idx]["pred_is_correct"]
df_alex_options = df_variants_count.loc[:, "alex", alex_pred]
df_alex_options = df_alex_options[df_alex_options["count"] == 0]
df_vgg_options = df_variants_count.loc[:, "vgg", vgg_pred]
df_vgg_options = df_vgg_options[df_vgg_options["count"] == 0]
if not df_alex_options.empty:
rand_variant = df_alex_options.index[random.randint(0, df_alex_options.shape[0] - 1)]
question = (rand_img_idx, rand_variant[1], rand_variant[0], rand_variant[2])
questionnaire.append(question)
df_variants_count.loc[rand_variant]["count"] += 1
elif not df_vgg_options.empty:
rand_variant = df_vgg_options.index[random.randint(0, df_vgg_options.shape[0] - 1)]
question = (rand_img_idx, rand_variant[1], rand_variant[0], rand_variant[2])
questionnaire.append(question)
df_variants_count.loc[rand_variant]["count"] += 1
def save_questionnaires(questionnaires_list, path):
with open(path, 'wb') as f:
pickle.dump(questionnaires_list, f)
def shuffle_questions(questionnaire):
for questionnaire in questionnaire:
random.shuffle(questionnaire)
def main():
"""
create questionnaires
must only be evaluated if testset hasn't already been evaluated
"""
folder_vgg = os.path.join(os.path.curdir, "data", "stats", "df_vgg.pickle")
folder_alex = os.path.join(os.path.curdir, "data", "stats", "df_alexnet.pickle")
if not (os.path.exists(folder_alex) and os.path.exists(folder_vgg)):
models = [Vgg16(), AlexNet()]
for model in models:
model.eval()
folder = os.path.join(os.path.curdir, "data", "imagenetv2-matched-frequency-format-val")
df = generate_model_testset_results(model, folder)
df.to_pickle(f"data/stats/df_{model.name}_2.pickle")
imgs_idx = list(range(10000))
xai_methods = ['gradCAM', 'LRP', 'SHAP', 'LIME', 'ConfidenceScores', 'IntegratedGradients']
model_names = ["alex", "vgg"]
df_vgg = pd.read_pickle(folder_vgg)
df_alex = pd.read_pickle(folder_alex)
questionnaires_list = create_questionnairs(imgs_idx, xai_methods, model_names, df_vgg, df_alex, seed=3)
shuffle_questions(questionnaires_list)
folder = os.path.join(os.path.curdir, "data", "question_generation", "questionnaires_shuffled.pickle")
save_questionnaires(questionnaires_list, folder)
# additionally shuffle questions in questionnairs
folder = os.path.join(os.path.curdir, "data", "question_generation", "questionnaires.pickle")
questionnaires_list = data_handler.get_questionnaires(folder)
shuffle_questions(questionnaires_list)
folder = os.path.join(os.path.curdir, "data", "question_generation", "questionnaires_shuffled.pickle")
save_questionnaires(questionnaires_list, folder)
if __name__ == '__main__':
main()
| tlabarta/helpfulnessofxai | experiment_creator.py | experiment_creator.py | py | 7,853 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torchvision.datasets.ImageFolder",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "torchvision.datasets",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "tqdm.tqdm",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "os.se... |
42157253168 | """empty message
Revision ID: c4665b8d682b
Revises: 10dbb0e0a903
Create Date: 2019-12-26 14:38:11.609539
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'c4665b8d682b'
down_revision = '10dbb0e0a903'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('goods', sa.Column('gage', sa.Integer(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('goods', 'gage')
# ### end Alembic commands ###
| FeelingsLw/flask_demo2 | migrations/versions/c4665b8d682b_.py | c4665b8d682b_.py | py | 646 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "alembic.op.add_column",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer... |
74953743465 | """Unit tests for the config module."""
import os
import pytest
from wmtmetadata.config import Config, ConfigFromFile, ConfigFromHost
from wmtmetadata.host import HostInfo
from . import data_dir
tmp_dir = '/tmp'
sample_config_file = os.path.join(data_dir, 'wmt-config-siwenna.yaml')
host = 'siwenna.colorado.edu'
name = 'Hydrotrend'
fetched_config_file = 'wmt-config-{}.yaml'.format(host)
def test_config():
config = Config()
assert isinstance(config, Config)
def test_configfromfile():
config = ConfigFromFile(sample_config_file)
assert config.filename == sample_config_file
def test_configfromfile_load():
config = ConfigFromFile(sample_config_file)
config.load()
components = config.components.keys()
assert components.pop() == name
assert config.host['hostname'] == host
def test_configfromhost():
config = ConfigFromHost(host)
assert config.executor.info['name'] == host
@pytest.mark.skip(reason="Don't abuse remote test machine")
def test_configfromhost_build():
config = ConfigFromHost(host)
config.build()
@pytest.mark.skip(reason="Don't abuse remote test machine")
def test_configfromhost_fetch():
config = ConfigFromHost(host)
config.fetch(local_dir=tmp_dir)
assert os.path.isfile(os.path.join(tmp_dir, fetched_config_file))
@pytest.mark.skip(reason="Don't abuse remote test machine")
def test_configfromhost_load():
config = ConfigFromHost(host)
config.build()
config.fetch(local_dir=tmp_dir)
config.load()
components = config.components.keys()
assert components.pop() == name
| csdms/wmt-metadata | wmtmetadata/tests/test_config.py | test_config.py | py | 1,593 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "wmtmetadata.config.Config",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "wmtmetadata.confi... |
19483517480 | '''
函数说明:
Author: hongqing
Date: 2021-08-04 14:23:54
LastEditTime: 2021-08-04 15:23:25
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
numoffinger=21
class Net(nn.Module):
def __init__(self,type=1):
super(Net, self).__init__()
self.fc1 = nn.Linear(numoffinger-1, 255)
self.fc2 = nn.Linear(255, 255)
self.fc3 = nn.Linear(255, 3)
if(type==1):
self.fc1.weight.data.normal_(0, 3) # initialization
self.fc2.weight.data.normal_(0, 3)
self.fc3.weight.data.normal_(0, 3)
if(type==0):
self.fc1.weight.data.zero_() # initialization
self.fc2.weight.data.zero_()
self.fc3.weight.data.zero_()
if(type==2):
self.fc1.weight.data.random_(1,2) # initialization
self.fc2.weight.data.random_(1,2)
self.fc3.weight.data.random_(1,2)
def forward(self, x):
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
x = self.fc3(x)
return x | KouseiHongqing/KouseiPose | mymodel.py | mymodel.py | py | 1,095 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_nu... |
17541630757 | import glob
import os
import random
import shutil
import numpy as np
import torch
import torch.nn as nn
import torchvision.transforms.v2 as T
def set_seed(seed: int = 42):
"""Sets the seed for reproducibility."""
random.seed(seed)
np.random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def remove_glob(pathname: str):
for p in glob.glob(pathname, recursive=True):
if os.path.isfile(p):
os.remove(p)
elif os.path.isdir(p):
shutil.rmtree(p)
class ImageTransform:
"""Image transformation module."""
def __init__(self, input_size=384, phase="train"):
if phase == "train":
self.data_transform = nn.Sequential(
T.RandomResizedCrop(input_size, (0.25, 1.0), (3 / 4, 4 / 3)),
T.RandomChoice(
[
T.RandomRotation((0, 0)),
T.RandomRotation((90, 90)),
T.RandomRotation((180, 180)),
T.RandomRotation((270, 270)),
],
),
T.RandomHorizontalFlip(p=0.5),
T.ColorJitter(brightness=0.5),
T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
)
elif phase == "test":
self.data_transform = nn.Sequential(
T.Resize(input_size),
T.CenterCrop(input_size),
T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
)
def __call__(self, img):
return self.data_transform(img)
| xkurozaru/fewshot-finetune-domain-adaptation | common/utils.py | utils.py | py | 1,761 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "random.seed",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.random.seed",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"lin... |
16879280056 | import requests
import json
import urllib3
from settings import settings as settings
from sdwan_operations import monitor as sdwanmn
import time
import sys, getopt
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
def main(argv):
# To run the program use the syntax:
# python __main__.py -d <devfile> -f <fieldfile> -c <optional csvfile>
devfile = ''
fieldfile = ''
csvfile = ''
try:
opts, args = getopt.getopt(argv,"hd:f:c:",["devfile=", "fieldfile=", "csvfile"])
except getopt.GetoptError:
print ('python __main__.py -d <devfile> -f <fieldfile> -c <optional csvfile>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('python __main__.py -d <devfile> -f <fieldfile> -c <optional csvfile>')
sys.exit(1)
elif opt in ("-d", "--devfile"):
devfile = arg
elif opt in ("-f", "--fieldfile"):
fieldfile = arg
elif opt in ("-c", "--csvfile"):
csvfile = arg
# Read the device list from the supplied file (for example, dev_list.json)
try:
with open(devfile) as f:
dev_list = json.load(f)
except FileNotFoundError:
print ('python __main__.py -d <devfile> -f <fieldfile> -c <optional csvfile>')
sys.exit(2)
# Read the field list from the supplied file (for example, field_list.json)
try:
with open(fieldfile) as f:
field_list = json.load(f)
except FileNotFoundError:
print ('python __main__.py -d <devfile> -f <fieldfile> -c <optional csvfile>')
sys.exit(2)
while (True):
# Create a session and pass it as a parameter for show_port_stats
session = requests.Session()
session.auth = (settings.vmanage_username, settings.vmanage_password)
sdwanmn.show_port_stats(session, dev_list, field_list)
# Comment below to stop exporting data to csv
if csvfile != "":
sdwanmn.dump_to_csv(session,dev_list,field_list,csvfile)
time.sleep(60)
if __name__ == "__main__":
main(sys.argv[1:])
| stantiku/sdwan_monitor | __main__.py | __main__.py | py | 2,117 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "urllib3.disable_warnings",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "urllib3.exceptions",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "getopt.getopt",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "getopt.G... |
27414823832 |
import logging
import os
from copy import deepcopy
from harmony.util import shortened_id
from harmony.repository_state import RepositoryState
logger = logging.getLogger(__name__)
def commit(local_location_id, working_directory, location_states, repository_state):
"""
Scan the given working directory for changes and commit them to local
state storage.
That is, update location_states[local_location_id] with the
new current file states (digests, "who has what?").
Also update repository_state info ("who made which content decision in what
order?")
Parameters:
local_location_id:
ID of the location that is considered local (i.e. the one that belongs
to the working_directory instance)
working_directory:
WorkingDirectory instance representing the local working directory.
location_states:
LocationStates instance representing the local location state storage.
Will (possibly) be modified.
repository_state:
RepositoryState instance representing the local repository state
storage. Will (possibly) be modified.
return:
True iff any change was recorded.
"""
id_ = local_location_id
short_id = shortened_id(id_)
paths = set(working_directory.get_filenames()) \
| set(location_states.get_all_paths(id_))
# 1. update location state
# - detect renames (add WIPE entries later for those)
# - when a file is *added* that is known to other locations w/
# different digest, let user confirm what he wants to do (see
# above)
# - increase local clock
#
# 2. update repository state
# - if file changed in step 1:
# clock = current clock for local + max for each other location
# hash = current local hash
# (deviate from this if user selected to do something else)
# - if file did not change:
# no change in hash or clock
# Do all the file scanning before so we can be sure to do it at most
# once per file in the WD
wd_states = {
path: working_directory.generate_file_state(path)
for path in paths
if working_directory.file_maybe_modified(
location_states.get_file_state(id_, path)
)
}
location_state_cache = {
path: location_states.get_file_state(id_, path)
for path in paths
}
any_change = False
for path in paths:
if path in wd_states:
file_state = location_state_cache[path]
new_file_state = wd_states[path]
changed = location_states.update_file_state(id_, new_file_state)
if changed:
any_change = True
# If the file vanished but a new one with the same digest
# popped up, consider that a rename.
# Rename means, the old file is WIPEd (instead of just
# locally removed) and the new file is added as usual
if not new_file_state.exists():
logger.debug('{} vanished'.format(new_file_state.path))
# Iterate over paths to find a possible rename target
for path2 in paths:
# Rename to itself does not make sense
# Rename to a file that has not changed (or better: just appeared) does not make sense
if path2 == path or path2 not in wd_states:
continue
path2_state = location_state_cache[path2]
new_path2_state = wd_states[path2]
logger.debug('{} rename candidate {} ex before={} ex now={} self.digest={} candidate.digest={}'.format(
path, path2, path2_state.exists(),
new_path2_state.exists(),
file_state.digest, new_path2_state.digest
))
if not path2_state.exists() \
and new_path2_state.exists() \
and new_path2_state.digest == file_state.digest:
logger.info('Detected rename: {} -> {}'.format(path, path2))
new_file_state.wipe = True
new_file_state.digest = file_state.digest
break
repository_state.update_file_state(
new_file_state,
id_,
location_states.get_clock(id_) + 1,
)
logger.debug('{} committed: {} clk={}'.format(short_id, new_file_state.path, location_states.get_clock(id_) + 1))
else:
logger.debug('{} not actually changed: {}'.format(short_id, path))
else:
logger.debug('{} not changed: {}'.format(short_id, path))
return any_change
def merge(local_state, remote_state, merger_id):
"""
Merge two repository states ('local' and 'remote') into a common state if
possible, auto-detecting if a change only happened on one side and
propagating those changes.
For cases in which a file was changed on both sides, return details of the
conflict.
local_state:
RepositoryState() instance that reflects the local repository state.
remote_state:
RepositoryState() instance that reflects the remote repository state.
merger_id:
ID of the repository conducting the merge (assumed to correspond
to the 'local' repository)
return:
A pair (conflicts, merged).
$conflicts is a dictonary of the form { path: (local_entry, remote_entry),
... } whereas $path denotes the path of a file in conflict and $local_entry
and $remote_entry refer to the RepositoryState.Entry instances for that
file that are in conflict.
$merged is a newly created RepositoryState instance with selected merged
repository states.
If $conflicts is empty, $merged covers all files present either locally or
remotely.
"""
local_paths = set(local_state.get_paths())
remote_paths = set(remote_state.get_paths())
merged = RepositoryState(None)
conflicts = {}
for p in local_paths - remote_paths:
merged[p] = local_state[p]
for p in remote_paths - local_paths:
merged[p] = remote_state[p]
# conflicts can only arise in paths that are specified in both state
# files
paths = set(local_state.get_paths()) & set(remote_state.get_paths())
for path in paths:
local = local_state[path]
remote = remote_state[path]
c = local.clock.compare(remote.clock)
if c is None:
if local.contents_different(remote):
logger.debug('merge: {} in conflict: {} <-> {}'.format(
path, local.clock, remote.clock
))
conflicts[path] = (local, remote)
else:
logger.debug('merge: {} automerged (same content)'.format(path))
m = deepcopy(local)
m.clock.update(remote.clock)
m.clock.increase(merger_id)
merged[path] = m
elif c < 0:
logger.debug('merge: {} newer on remote'.format(path))
merged[path] = remote
else: # c >= 0:
logger.debug('merge: {} same version or newer on local'.format(path))
merged[path] = local
return conflicts, merged
def auto_rename(working_directory, repository_state):
from harmony.working_directory import WorkingDirectory
assert isinstance(working_directory, WorkingDirectory)
"""
Apply automatic renaming in the given working_directory.
That is, if working dir contains files that are WIPEd in $repository_state but
are present under a different name, automatically rename those to obtain
the repository file at a low cost.
Repository.commit() should be called after calling this to commit the
changes to the working directory.
precondition: WD clean
"""
# Automatically apply auto-renaming
# Auto-renaming
# -------------
# 1. Find any files $A with a WIPE entry.
# 2. Compute/get their digest (from location state)
# 3. Find a non-wiped file $B in repo that does not exist in the WD
# 4. Rename $A to $B
for path, entry in repository_state.files.items():
logger.debug('auto_rename: {}: path={} wipe={} in_wd={}'.format(path, entry.path, entry.wipe, (entry.path in working_directory)))
if entry.wipe and (entry.path in working_directory):
possible_targets = {
e.path for e in repository_state.files.values()
if e.path != path and e.digest == entry.digest and not e.wipe
}
logger.info(
'{} could be auto-renamed to any of {}'.format(
path, possible_targets
)
)
if possible_targets:
(working_directory.path / path).rename(working_directory.path / possible_targets.pop())
| Droggelbecher/harmony | harmony/file_state_logic.py | file_state_logic.py | py | 9,182 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "harmony.util.shortened_id",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "harmony.repository_state.RepositoryState",
"line_number": 163,
"usage_type": "call"
},
{
... |
5058919782 |
from django.db import models
# Create your models here.
class Meal(models.Model):
menu = models.TextField(blank=True)
def __str__(self):
return self.menu
class MealList(models.Model):
date = models.DateField(blank=True)
breakfast = models.ForeignKey(
Meal,
blank=True,
null=True,
on_delete=models.CASCADE,
related_name="breakfast_id",
)
lunch = models.ForeignKey(
Meal,
null=True,
blank=True,
on_delete=models.CASCADE,
related_name="lunch"
)
dinner = models.ForeignKey(
Meal,
null=True,
blank=True,
on_delete=models.CASCADE,
related_name="dinner"
)
created_at = models.DateTimeField(auto_now_add = True)
updated_at = models.DateTimeField(auto_now = True)
class Meta:
db_table = 'meal_lists'
def update_meal(self, data, number):
if number == 1:
self.breakfast = data
if number == 2:
self.lunch = data
if number == 3:
self.dinner = data
self.save()
return self
| KaceTH/django_api-0.00.90 | MealTable/models.py | models.py | py | 1,139 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.db.models.Model",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "django.db.models.TextField",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "... |
7595057258 | from os import path
from subprocess import check_output, check_call
from pathlib import Path
def _filename(fname):
import yapl
yapl_root = Path(yapl.__file__).parent
filename = fname + ".sh"
full_filename = yapl_root / filename
return str(full_filename)
def func(fname, *args):
shell_script = _filename(fname)
command = list([shell_script] + [str(a) for a in args])
result = check_output(command, text=True).strip()
return result
def proc(fname, *args):
shell_script = _filename(fname)
command = list([shell_script] + [str(a) for a in args])
check_call(command)
| padresmurfa/yapl | python_library/yapl/internal/shell_call.py | shell_call.py | py | 613 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "yapl.__file__",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "subprocess.check_output",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "subprocess.chec... |
1904857127 | from scipy import linalg
import numpy as np
import scipy.optimize as sopt
import matplotlib.pyplot as plt
from scipy.optimize import fsolve
from scipy.optimize import LinearConstraint
from matplotlib import cm
import tabulate
import math
START_POINT = [2,2]
# fun - main muction
def fun(x: np.ndarray) -> np.float64:
return ((x[0]-2)**2) + (x[1]-1)**2
# hfunc - represents an "= 0" fun
def hfun(x: np.ndarray) -> np.float64:
return x[0]-2*x[1]+1
# gfunc - represents an ">= 0" fun
def gfun(x: np.ndarray) -> np.float64:
return -0.25 * x[0]**2 - x[1]**2+1
def gplusfun(g, x, u, c):
return min(g(x), np.divide(u, c))
def modified_lagrange_method(fun, points, epsx, g_constarins, h_constrains, u=[0], a=[0], c=0.1, beta=2, counter=0, func_counter = 0, _callback=None):
"""Minimize a function with given constrains
Arguments:
points {[float]} -- [array of calculated points]
epsx {[float]} -- [epsilon]
g_constarins {[Callable]} -- [array of inequality constrains]
h_constrains {[Callable]} -- [array of equality constrains]
Keyword Arguments:
u {list} -- [Langrange factor for inequality, must be same length as g_constarins] (default: {[0]})
a {list} -- [Langrange factor for equality, must be same length as h_constrains] (default: {[0]})
c {float} -- [penalty factor] (default: {0.1})
beta {int} -- [growth rate of penalty factor must be in range [2;26]] (default: {2})
counter {int} -- [counter] (default: {0})
callback - function that takes dict x, witch contains all intermediate values such as x, u, a, c, f(x), L(x,u,a,c)
"""
def lagrange(x):
if(len(g_constarins) != 0 and len(g_constarins) != 0):
array_of_constrains_g = np.array(
[gplusfun(g_constrain, x, u_i, c) for g_constrain, u_i in zip(g_constarins, u)])
array_of_constrains_h = np.array(
[h_constrain(x) for h_constrain in h_constrains])
return fun(x) - sum([u_i * g for u_i, g in zip(u, array_of_constrains_g)]) + 0.5*sum(c * array_of_constrains_g**2) - sum([a_i * g for a_i, g in zip(a, array_of_constrains_h)]) + 0.5*sum(c * array_of_constrains_h**2)
elif(len(h_constrains) != 0 and len(g_constarins) == 0):
array_of_constrains_h = np.array(
[h_constrain(x) for h_constrain in h_constrains])
return fun(x) - sum([a_i * h for a_i, h in zip(a, array_of_constrains_h)]) + 0.5*sum(c * array_of_constrains_h**2)
elif(len(h_constrains) == 0 and len(g_constarins) != 0):
array_of_constrains_g = np.array(
[gplusfun(g_constrain, x, u_i, c) for g_constrain, u_i in zip(g_constarins, u)])
return fun(x) - sum([u_i * g for u_i, g in zip(u, array_of_constrains_g)]) + 0.5*sum(c * array_of_constrains_g**2)
else:
return fun(x)
if _callback is not None:
_callback({"x": points[-1], "u": u, "a": a, "c": c, "f": fun(points[-1]), "L": lagrange(points[-1]), "iter": counter, "fiter": func_counter})
# BFGS - is most fast & eficient for my cases
res = sopt.minimize(
lagrange, x0=points[-1], method='BFGS')
next_val = res.x
func_counter = func_counter+res.nfev
counter = counter+res.nit
points.append(next_val)
u = np.array([max(0, (u_i - c*g_constrain(next_val)))
for g_constrain, u_i in zip(g_constarins, u)])
a = np.array([a_i-c*h_constrain(next_val)
for h_constrain, a_i in zip(h_constrains, a)])
c = beta*c
counter = counter+1
if(abs(next_val - points[-2])[0] < epsx and abs(next_val - points[-2])[1] < epsx):
return points
else:
return modified_lagrange_method(fun, points, epsx, g_constarins, h_constrains, u, a, c, beta, counter, func_counter,_callback)
def filter_zLim(X,Y,Z, zlim):
for i in range(0, len(Z)):
for j in range(0, len(Z)):
if Z[i][j] > zlim[1] or Z[i][j] < zlim[0]:
Z[i][j] = 4
return X, Y, Z
def printDecorator(f, res):
def wrapper(x):
res.append(x)
ret_val = f(x)
return ret_val
return wrapper
def trunc(number, digits) -> float:
stepper = 10.0 ** digits
return math.trunc(stepper * number) / stepper
def plotting():
xs = []
results_list = []
f = lambda x: xs.append(x["x"])
callback = printDecorator(f, results_list)
#Adjust plotting scale here where x in [a1,b1] and y in [a2,b2] [a1: b1: 20j, a2: b2: 20j]
X, Y = np.mgrid[2.4:0:20j, 2.5:-1.5:20j]
Z = fun(np.array([X, Y]))
fig = plt.figure()
ax = fig.add_subplot(1, 2, 1, projection='3d')
ax.set_xlim((0, 3))
ax.set_ylim((-1.5, 3))
ax.set_zlim((0, 4))
ax.plot_wireframe(X, Y, Z)
ax.contour(X, Y, gfun(np.array([X, Y])), levels=[0], colors='blue')
ax.contour(X, Y, hfun(np.array([X, Y])), levels=[0], colors='lime')
#Put list of constrains here, for my case its one constrain g(x) and one h(x)
g_constarins = np.array([gfun])
h_constrains = np.array([hfun])
vals = modified_lagrange_method(fun, list([START_POINT, ]), 1e-6,
g_constarins, h_constrains, _callback=callback)
#Print Results Table
header = results_list[0].keys()
rows = [x.values() for x in results_list[0:11] + [results_list[-1]]]
print(tabulate.tabulate(rows, header, tablefmt='grid'))
ax.plot(np.array(vals).T[0], np.array(vals).T[1], np.array(
list(map(fun, np.array(vals)))).T, "x-", color='red')
ax1 = fig.add_subplot(1, 2, 2)
#Adjust plotting scale here where x in [a1,b1] and y in [a2,b2] [a1: b1: 20j, a2: b2: 20j]
X, Y = np.mgrid[3: 0: 20j, 2.3: -1.5: 20j]
Z = fun(np.array([X, Y]))
ax1.contour(X, Y, Z, levels=40)
t = 0
for x in zip(np.array(vals).T[0], np.array(vals).T[1]):
if abs(fun(x) - t) > 1e-2:
ax1.annotate(trunc(fun(x),3), (x[0], x[1]))
t = fun(x)
ax1.plot(np.array(vals).T[0], np.array(vals).T[1], "x-", color='red')
for idx, g_constr in enumerate(g_constarins):
ax1.clabel(ax1.contour(X, Y, g_constr(np.array([X, Y])), levels=[0], colors='blue'), fmt=f"g{idx}(x)", fontsize=10)
for idx, h_constr in enumerate(h_constrains):
ax1.clabel(ax1.contour(X, Y, h_constr(np.array([X, Y])), levels=[0], colors='lime'), fmt=f"h{idx}(x)", fontsize=10)
plt.show()
plotting()
| BON4/FuncOptimization | constarint_optimization.py | constarint_optimization.py | py | 6,464 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.ndarray",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "numpy.float64",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "numpy.ndarray",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "numpy.float6... |
73202366184 | import os, sys
from langchain.llms import OpenAI
from langchain.chains.question_answering import load_qa_chain
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import FAISS
prefix, query = sys.argv[1:3]
api_key = os.environ.get('OPENAI_API_KEY')
if api_key is None:
sys.exit("OPENAI_API_KEY is unset")
embeddings = OpenAIEmbeddings(openai_api_key=api_key)
llm = OpenAI(temperature=0.7, openai_api_key=api_key)
qa_chain = load_qa_chain(llm, chain_type="stuff")
fdir = os.path.dirname(prefix)
fname = os.path.basename(prefix)
faiss_index = FAISS.load_local(fdir, embeddings, fname)
# This model's maximum context length is 4097 tokens, 256 for the completion
ss = faiss_index.similarity_search(query.strip(), k=5)
ans = qa_chain.run(input_documents=ss, question=query)
print(ans.strip())
| d2jvkpn/x-ai | pkg/langchain/langchain_query.py | langchain_query.py | py | 833 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.argv",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_numb... |
4758110469 | import os, random
import numpy as np
import torch
import argparse
from train import train
def init_seeds(seed=0):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def parsing_args(c):
parser = argparse.ArgumentParser(description='msflow')
parser.add_argument('--mode', default='train', type=str,
help='train or test.')
parser.add_argument('--resume', action='store_true', default=False,
help='resume training or not.')
parser.add_argument('--eval_ckpt', default='', type=str,
help='checkpoint path for evaluation.')
parser.add_argument('--gpu', default='0', type=str)
parser.add_argument('--class-name', default='bottle', type=str)
parser.add_argument('--lr', default=1e-4, type=float,
help='learning rate')
parser.add_argument('--batch-size', default=8, type=int,
help='train batch size')
parser.add_argument('--meta-epochs', default=25, type=int,
help='number of meta epochs to train')
parser.add_argument('--sub-epochs', default=4, type=int,
help='number of sub epochs to train')
parser.add_argument('--extractor', default='wide_resnet50_2', type=str,
help='feature extractor')
parser.add_argument('--pool-type', default='avg', type=str,
help='pool type for extracted feature maps')
parser.add_argument('--parallel-blocks', default=[2, 5, 8], type=int, metavar='L', nargs='+',
help='number of flow blocks used in parallel flows.')
parser.add_argument('--pro-eval', action='store_true', default=False,
help='evaluate the pro score or not.')
parser.add_argument('--pro-eval-interval', default=4, type=int,
help='interval for pro evaluation.')
args = parser.parse_args()
for k, v in vars(args).items():
setattr(c, k, v)
c.input_size = (256, 256) if c.class_name == 'transistor' else (512, 512)
return c
def main(c):
c = parsing_args(c)
os.environ['CUDA_VISIBLE_DEVICES'] = c.gpu
init_seeds(seed=c.seed)
c.version_name = 'msflow_{}_{}pool_pl{}'.format(c.extractor, c.pool_type, "".join([str(x) for x in c.parallel_blocks]))
c.ckpt_dir = os.path.join(c.work_dir, c.version_name, c.class_name)
train(c)
if __name__ == '__main__':
import default as c
main(c) | cool-xuan/msflow | main.py | main.py | py | 2,580 | python | en | code | 15 | github-code | 36 | [
{
"api_name": "random.seed",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.random.seed",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "torch.manual_seed",
... |
35090835676 | #!/usr/bin/python
######################################################################################
## Find disk usage under a root directory, examples:
## 1. python du_rootdir.py -d rootDir
## Find disk usage under the root directory "rootDir"
## 2. python du_rootdir.py -d rootDir -r true
## Find disk usage recursively under the root directory "rootDir"
## 3. python du_rootdir.py -d rootDir -r true -l 3
## Find disk usage recursively under the root directory "rootDir" with
## recursive level 3
## Author: Zhichang Guo, email: Zhichang.Guo@noaa.gov
######################################################################################
from pathlib import Path
import argparse
import os
import sys
def print_indented(result, level):
print('\t' * level + result)
def traverse_dir(dir):
iflag = 'not'
l = os.listdir(dir)
for d in l:
if os.path.isdir(dir + d):
if not d.startswith('.') or (d.startswith('.') and not iflag.upper() == 'NOT'):
fullpath = os.path.join(dir, d)
cmd = "du -sh " + " \'" + str(fullpath) + "\'"
# print(cmd)
result = os.popen(cmd).read()
# print(result)
print(result,end='')
def traverse_dir_recur(dir, max_level, level=0):
iflag = 'not'
l = os.listdir(dir)
for d in l:
if os.path.isdir(dir + d):
if not d.startswith('.') or (d.startswith('.') and not iflag.upper() == 'NOT'):
fullpath = os.path.join(dir, d)
if max_level == 0 or level < max_level:
traverse_dir_recur(dir + d + "/", max_level, level+1)
cmd = "du -sh " + " \'" + str(fullpath) + "\'"
# print(cmd)
result = os.popen(cmd).read()
# print_indented(result.rstrip("\n"), level+1)
if level > 0:
print('\033[1;31;43m ' + result.rstrip("\n") + ' \033[0;0m')
else:
print(result.rstrip("\n"))
#print(result)
def find_du(rootdir, rflag, max_level):
home = os.environ['HOME']
path = Path(rootdir)
owner = path.owner()
print("Owner: ",owner)
rootdir += '/'
if rflag == 'false':
traverse_dir(rootdir)
else:
traverse_dir_recur(rootdir, max_level)
if __name__ == "__main__":
ap = argparse.ArgumentParser()
ap.add_argument('-d', '--rootd', help="name of the root directory", required=True)
ap.add_argument('-r', '--rflag', help="recurively or not", default="false")
ap.add_argument('-l', '--level', help="level", type=int, default=0)
MyArgs = ap.parse_args()
find_du(MyArgs.rootd, MyArgs.rflag, MyArgs.level)
| zhichang-guo/Scripts | du_rootdir.py | du_rootdir.py | py | 2,785 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.listdir",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number... |
38869163941 | from pathlib import Path
import pandas as pd
from sklearn.metrics import make_scorer, accuracy_score, f1_score, roc_auc_score
from imblearn.metrics import geometric_mean_score, sensitivity_score, specificity_score
def get_slovak_data(business_area, year, postfix):
print("Loading Slovak data...")
path_bankrupt = Path(__file__).parent / "data/slovak_data/parsed_data/bankrupt/bankrupt_{}_{}_year_{}.csv" \
.format(business_area, year, postfix)
path_non_bankrupt = Path(__file__).parent / "data/slovak_data/parsed_data/non_bankrupt/nonbankrupt_{}_{}_year_{}" \
".csv".format(business_area, year, postfix)
print("Data: {}".format(path_bankrupt))
bankrupt_data = pd.read_csv(path_bankrupt)
non_bankrupt_data = pd.read_csv(path_non_bankrupt)
features = bankrupt_data.drop(["IS_BANKRUPT"], axis=1).append(non_bankrupt_data.drop(["IS_BANKRUPT"], axis=1))
labels = bankrupt_data["IS_BANKRUPT"].append(non_bankrupt_data["IS_BANKRUPT"])
print("Info: rows - {}, columns - {}".format(len(features), len(features.columns)))
return features, labels
def get_scoring_dict():
scoring_dict = {
'accuracy_score': make_scorer(accuracy_score),
'f1_score': make_scorer(f1_score),
'roc_auc_score': make_scorer(roc_auc_score),
'geometric_mean_score': make_scorer(geometric_mean_score),
'sensitivity_score': make_scorer(sensitivity_score),
'specificity_score': make_scorer(specificity_score)
}
return scoring_dict | kanasz/TabNet | src/base_functions.py | base_functions.py | py | 1,548 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_... |
74731484265 | import torch
from torch.optim.optimizer import Optimizer
class COCOB(Optimizer):
r"""Implements COCOB algorithm.
It has been proposed in `Training Deep Networks without Learning Rates Through Coin Betting`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
alpha (float, optional): It was proposed to increase the stability in the first iterations,
similarly and independently to the learning rate warm-up. The number roughly denotes the
number of rounds of warm-up (default 100)
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
.. _Training Deep Networks without Learning Rates Through Coin Betting:
https://arxiv.org/abs/1705.07795
"""
def __init__(self, params, alpha: float = 100, eps: float = 1e-8, weight_decay: float = 0):
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= alpha:
raise ValueError("Invalid alpha value: {}".format(alpha))
if not 0.0 <= weight_decay:
raise ValueError(
"Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(weight_decay=weight_decay)
self._alpha = alpha
self._eps = eps
super(COCOB, self).__init__(params, defaults)
@torch.no_grad()
def step(self, closure = None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise RuntimeError('COCOB does not support sparse gradients')
state = self.state[p]
# State initialization
if len(state) == 0:
# Sum of the negative gradients
state['sum_negative_gradients'] = torch.zeros_like(p).detach()
# Sum of the absolute values of the stochastic subgradients
state['grad_norm_sum'] = torch.zeros_like(p).detach()
# Maximum observed scale
state['L'] = self._eps*torch.ones_like(p).detach()
# Reward/wealth of the algorithm for each coordinate
state['reward'] = torch.zeros_like(p).detach()
# We need to save the initial point because this is a FTRL-based algorithm
state['x0'] = torch.clone(p.data).detach()
sum_negative_gradients, grad_norm_sum, L, reward, x0 = (
state['sum_negative_gradients'],
state['grad_norm_sum'],
state['L'],
state['reward'],
state['x0'],
)
if group['weight_decay'] != 0:
grad = grad.add(p, alpha=group['weight_decay'])
# update maximum rage of the gradients
torch.max(L, torch.abs(grad), out=L)
# udpate dual vector
sum_negative_gradients.sub_(grad)
# update sum of the absolute values of the gradients
grad_norm_sum.add_(torch.abs(grad))
# update the wealth
reward.addcmul_(grad, p.data.sub(x0), value=-1)
# reset the wealth to zero in case we lost all
torch.maximum(reward, torch.zeros_like(reward), out=reward)
# calculate denominator
den = torch.maximum(grad_norm_sum.add(L), L.mul(self._alpha)).mul(L)
# update model parameters
p.data.copy_(reward.add(L).mul(sum_negative_gradients).div(den).add(x0))
return loss
| bremen79/parameterfree | parameterfree/cocob.py | cocob.py | py | 4,274 | python | en | code | 73 | github-code | 36 | [
{
"api_name": "torch.optim.optimizer.Optimizer",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "torch.enable_grad",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "torch.zeros_like",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "tor... |
11624227934 | import re
import sys
from napalm import get_network_driver
from getpass import getpass
def pretty_print(d, indent=0):
for key, value in d.items():
print('\t' * indent + str(key))
if isinstance(value, dict):
pretty_print(value, indent+1)
elif isinstance(value, list):
for e in value:
print('\t' * (indent+1) + str(e))
else:
print('\t' * (indent+1) + str(value))
def build_dict(cfg):
""" Builds nested/deep dictionary from Cisco ios
config using recursion function 'child()', which also
changes "the most child" dictionaries to list if possible.
For global Cisco commands make special keys based on
first word in the command, e.g.: '# aaa #'
"""
def group_global_childless(dct):
for k in list(dct):
if not dct[k]:
dct.pop(k,None)
w = k.split()
if w[0] == 'no':
sec_name = f"# {w[1]} #"
else:
sec_name = f"# {w[0]} #"
if sec_name in dct.keys():
dct[sec_name].append(k)
else:
dct.update({sec_name: [k]})
def child(base_indent):
nonlocal n
result = {}
while True:
if n >= len(lines):
break
stripped = lines[n].lstrip()
indent = len(lines[n]) - len(stripped)
if base_indent >= indent:
break
n = n + 1
result.update({stripped: child(indent)})
# In case we got all values={} transform result to list
if not [v for v in result.values() if v]:
result = [k for k in result.keys()]
return result
n = 0
cfg, special_cases = cut_special_cases(cfg)
lines = cfg.splitlines()
lines = [line for line in lines if line
and not line.startswith('!')
and not line.startswith('end')]
dct = child(base_indent=-1)
dct.update(special_cases)
group_global_childless(dct)
return(dct)
def cut_special_cases(cfg):
""" Cut special cases (banners, boot markers) from config and
put them in special_cases dictionary, that is also returned
"""
special_cases = {}
rgx = r"((?:(?P<type>(?:set\s+)*banner\s\w+\s+)(?P<delim>\S+))((.*\r?\n)+?.*?)(\3).*)"
re_banners = re.findall(rgx,cfg)
for r in re_banners:
cfg = cfg.replace(r[0],"",1)
special_cases.update({f"# {r[1]}#": r[0].splitlines()})
rgx = r"boot-start-marker\r?\n(.*\r?\n)*boot-end-marker"
re_boot = re.search(rgx,cfg)
cfg = cfg.replace(re_boot[0],"",1)
special_cases.update({"# boot #": re_boot[0].splitlines()})
return cfg, special_cases
def main():
""" Reads config from file passed as argument. Or connect
to Cisco ios device asking interactively ip, user, password
Then prints result with indentation
"""
if len(sys.argv) >= 1:
file_name = sys.argv[1]
fp = open(file_name)
content = fp.read()
dct = build_dict(content)
else:
driver = get_network_driver('ios')
ipaddress = input('IP address: ')
username = input('Username: ')
ios_conn = driver(ipaddress, username, getpass())
ios_conn.open()
cfgs = ios_conn.get_config()
dct = build_dict(cfgs['running'])
pretty_print(dct,1)
if __name__ == "__main__":
main()
| pkomissarov/cisco-parsers | parsecfg.py | parsecfg.py | py | 3,480 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "re.findall",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 91,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 92,
... |
15744632677 | import multiprocessing
import optparse
import os
import re
from error import InvalidProjectGroupsError
from error import NoSuchProjectError
from error import RepoExitError
from event_log import EventLog
import progress
# Are we generating man-pages?
GENERATE_MANPAGES = os.environ.get("_REPO_GENERATE_MANPAGES_") == " indeed! "
# Number of projects to submit to a single worker process at a time.
# This number represents a tradeoff between the overhead of IPC and finer
# grained opportunity for parallelism. This particular value was chosen by
# iterating through powers of two until the overall performance no longer
# improved. The performance of this batch size is not a function of the
# number of cores on the system.
WORKER_BATCH_SIZE = 32
# How many jobs to run in parallel by default? This assumes the jobs are
# largely I/O bound and do not hit the network.
DEFAULT_LOCAL_JOBS = min(os.cpu_count(), 8)
class UsageError(RepoExitError):
"""Exception thrown with invalid command usage."""
class Command:
"""Base class for any command line action in repo."""
# Singleton for all commands to track overall repo command execution and
# provide event summary to callers. Only used by sync subcommand currently.
#
# NB: This is being replaced by git trace2 events. See git_trace2_event_log.
event_log = EventLog()
# Whether this command is a "common" one, i.e. whether the user would
# commonly use it or it's a more uncommon command. This is used by the help
# command to show short-vs-full summaries.
COMMON = False
# Whether this command supports running in parallel. If greater than 0,
# it is the number of parallel jobs to default to.
PARALLEL_JOBS = None
# Whether this command supports Multi-manifest. If False, then main.py will
# iterate over the manifests and invoke the command once per (sub)manifest.
# This is only checked after calling ValidateOptions, so that partially
# migrated subcommands can set it to False.
MULTI_MANIFEST_SUPPORT = True
def __init__(
self,
repodir=None,
client=None,
manifest=None,
git_event_log=None,
outer_client=None,
outer_manifest=None,
):
self.repodir = repodir
self.client = client
self.outer_client = outer_client or client
self.manifest = manifest
self.git_event_log = git_event_log
self.outer_manifest = outer_manifest
# Cache for the OptionParser property.
self._optparse = None
def WantPager(self, _opt):
return False
def ReadEnvironmentOptions(self, opts):
"""Set options from environment variables."""
env_options = self._RegisteredEnvironmentOptions()
for env_key, opt_key in env_options.items():
# Get the user-set option value if any
opt_value = getattr(opts, opt_key)
# If the value is set, it means the user has passed it as a command
# line option, and we should use that. Otherwise we can try to set
# it with the value from the corresponding environment variable.
if opt_value is not None:
continue
env_value = os.environ.get(env_key)
if env_value is not None:
setattr(opts, opt_key, env_value)
return opts
@property
def OptionParser(self):
if self._optparse is None:
try:
me = "repo %s" % self.NAME
usage = self.helpUsage.strip().replace("%prog", me)
except AttributeError:
usage = "repo %s" % self.NAME
epilog = (
"Run `repo help %s` to view the detailed manual." % self.NAME
)
self._optparse = optparse.OptionParser(usage=usage, epilog=epilog)
self._CommonOptions(self._optparse)
self._Options(self._optparse)
return self._optparse
def _CommonOptions(self, p, opt_v=True):
"""Initialize the option parser with common options.
These will show up for *all* subcommands, so use sparingly.
NB: Keep in sync with repo:InitParser().
"""
g = p.add_option_group("Logging options")
opts = ["-v"] if opt_v else []
g.add_option(
*opts,
"--verbose",
dest="output_mode",
action="store_true",
help="show all output",
)
g.add_option(
"-q",
"--quiet",
dest="output_mode",
action="store_false",
help="only show errors",
)
if self.PARALLEL_JOBS is not None:
default = "based on number of CPU cores"
if not GENERATE_MANPAGES:
# Only include active cpu count if we aren't generating man
# pages.
default = f"%default; {default}"
p.add_option(
"-j",
"--jobs",
type=int,
default=self.PARALLEL_JOBS,
help=f"number of jobs to run in parallel (default: {default})",
)
m = p.add_option_group("Multi-manifest options")
m.add_option(
"--outer-manifest",
action="store_true",
default=None,
help="operate starting at the outermost manifest",
)
m.add_option(
"--no-outer-manifest",
dest="outer_manifest",
action="store_false",
help="do not operate on outer manifests",
)
m.add_option(
"--this-manifest-only",
action="store_true",
default=None,
help="only operate on this (sub)manifest",
)
m.add_option(
"--no-this-manifest-only",
"--all-manifests",
dest="this_manifest_only",
action="store_false",
help="operate on this manifest and its submanifests",
)
def _Options(self, p):
"""Initialize the option parser with subcommand-specific options."""
def _RegisteredEnvironmentOptions(self):
"""Get options that can be set from environment variables.
Return a dictionary mapping environment variable name
to option key name that it can override.
Example: {'REPO_MY_OPTION': 'my_option'}
Will allow the option with key value 'my_option' to be set
from the value in the environment variable named 'REPO_MY_OPTION'.
Note: This does not work properly for options that are explicitly
set to None by the user, or options that are defined with a
default value other than None.
"""
return {}
def Usage(self):
"""Display usage and terminate."""
self.OptionParser.print_usage()
raise UsageError()
def CommonValidateOptions(self, opt, args):
"""Validate common options."""
opt.quiet = opt.output_mode is False
opt.verbose = opt.output_mode is True
if opt.outer_manifest is None:
# By default, treat multi-manifest instances as a single manifest
# from the user's perspective.
opt.outer_manifest = True
def ValidateOptions(self, opt, args):
"""Validate the user options & arguments before executing.
This is meant to help break the code up into logical steps. Some tips:
* Use self.OptionParser.error to display CLI related errors.
* Adjust opt member defaults as makes sense.
* Adjust the args list, but do so inplace so the caller sees updates.
* Try to avoid updating self state. Leave that to Execute.
"""
def Execute(self, opt, args):
"""Perform the action, after option parsing is complete."""
raise NotImplementedError
@staticmethod
def ExecuteInParallel(
jobs, func, inputs, callback, output=None, ordered=False
):
"""Helper for managing parallel execution boiler plate.
For subcommands that can easily split their work up.
Args:
jobs: How many parallel processes to use.
func: The function to apply to each of the |inputs|. Usually a
functools.partial for wrapping additional arguments. It will be
run in a separate process, so it must be pickalable, so nested
functions won't work. Methods on the subcommand Command class
should work.
inputs: The list of items to process. Must be a list.
callback: The function to pass the results to for processing. It
will be executed in the main thread and process the results of
|func| as they become available. Thus it may be a local nested
function. Its return value is passed back directly. It takes
three arguments:
- The processing pool (or None with one job).
- The |output| argument.
- An iterator for the results.
output: An output manager. May be progress.Progess or
color.Coloring.
ordered: Whether the jobs should be processed in order.
Returns:
The |callback| function's results are returned.
"""
try:
# NB: Multiprocessing is heavy, so don't spin it up for one job.
if len(inputs) == 1 or jobs == 1:
return callback(None, output, (func(x) for x in inputs))
else:
with multiprocessing.Pool(jobs) as pool:
submit = pool.imap if ordered else pool.imap_unordered
return callback(
pool,
output,
submit(func, inputs, chunksize=WORKER_BATCH_SIZE),
)
finally:
if isinstance(output, progress.Progress):
output.end()
def _ResetPathToProjectMap(self, projects):
self._by_path = {p.worktree: p for p in projects}
def _UpdatePathToProjectMap(self, project):
self._by_path[project.worktree] = project
def _GetProjectByPath(self, manifest, path):
project = None
if os.path.exists(path):
oldpath = None
while path and path != oldpath and path != manifest.topdir:
try:
project = self._by_path[path]
break
except KeyError:
oldpath = path
path = os.path.dirname(path)
if not project and path == manifest.topdir:
try:
project = self._by_path[path]
except KeyError:
pass
else:
try:
project = self._by_path[path]
except KeyError:
pass
return project
def GetProjects(
self,
args,
manifest=None,
groups="",
missing_ok=False,
submodules_ok=False,
all_manifests=False,
):
"""A list of projects that match the arguments.
Args:
args: a list of (case-insensitive) strings, projects to search for.
manifest: an XmlManifest, the manifest to use, or None for default.
groups: a string, the manifest groups in use.
missing_ok: a boolean, whether to allow missing projects.
submodules_ok: a boolean, whether to allow submodules.
all_manifests: a boolean, if True then all manifests and
submanifests are used. If False, then only the local
(sub)manifest is used.
Returns:
A list of matching Project instances.
"""
if all_manifests:
if not manifest:
manifest = self.manifest.outer_client
all_projects_list = manifest.all_projects
else:
if not manifest:
manifest = self.manifest
all_projects_list = manifest.projects
result = []
if not groups:
groups = manifest.GetGroupsStr()
groups = [x for x in re.split(r"[,\s]+", groups) if x]
if not args:
derived_projects = {}
for project in all_projects_list:
if submodules_ok or project.sync_s:
derived_projects.update(
(p.name, p) for p in project.GetDerivedSubprojects()
)
all_projects_list.extend(derived_projects.values())
for project in all_projects_list:
if (missing_ok or project.Exists) and project.MatchesGroups(
groups
):
result.append(project)
else:
self._ResetPathToProjectMap(all_projects_list)
for arg in args:
# We have to filter by manifest groups in case the requested
# project is checked out multiple times or differently based on
# them.
projects = [
project
for project in manifest.GetProjectsWithName(
arg, all_manifests=all_manifests
)
if project.MatchesGroups(groups)
]
if not projects:
path = os.path.abspath(arg).replace("\\", "/")
tree = manifest
if all_manifests:
# Look for the deepest matching submanifest.
for tree in reversed(list(manifest.all_manifests)):
if path.startswith(tree.topdir):
break
project = self._GetProjectByPath(tree, path)
# If it's not a derived project, update path->project
# mapping and search again, as arg might actually point to
# a derived subproject.
if (
project
and not project.Derived
and (submodules_ok or project.sync_s)
):
search_again = False
for subproject in project.GetDerivedSubprojects():
self._UpdatePathToProjectMap(subproject)
search_again = True
if search_again:
project = (
self._GetProjectByPath(manifest, path)
or project
)
if project:
projects = [project]
if not projects:
raise NoSuchProjectError(arg)
for project in projects:
if not missing_ok and not project.Exists:
raise NoSuchProjectError(
"%s (%s)"
% (arg, project.RelPath(local=not all_manifests))
)
if not project.MatchesGroups(groups):
raise InvalidProjectGroupsError(arg)
result.extend(projects)
def _getpath(x):
return x.relpath
result.sort(key=_getpath)
return result
def FindProjects(self, args, inverse=False, all_manifests=False):
"""Find projects from command line arguments.
Args:
args: a list of (case-insensitive) strings, projects to search for.
inverse: a boolean, if True, then projects not matching any |args|
are returned.
all_manifests: a boolean, if True then all manifests and
submanifests are used. If False, then only the local
(sub)manifest is used.
"""
result = []
patterns = [re.compile(r"%s" % a, re.IGNORECASE) for a in args]
for project in self.GetProjects("", all_manifests=all_manifests):
paths = [project.name, project.RelPath(local=not all_manifests)]
for pattern in patterns:
match = any(pattern.search(x) for x in paths)
if not inverse and match:
result.append(project)
break
if inverse and match:
break
else:
if inverse:
result.append(project)
result.sort(
key=lambda project: (project.manifest.path_prefix, project.relpath)
)
return result
def ManifestList(self, opt):
"""Yields all of the manifests to traverse.
Args:
opt: The command options.
"""
top = self.outer_manifest
if not opt.outer_manifest or opt.this_manifest_only:
top = self.manifest
yield top
if not opt.this_manifest_only:
yield from top.all_children
class InteractiveCommand(Command):
"""Command which requires user interaction on the tty and must not run
within a pager, even if the user asks to.
"""
def WantPager(self, _opt):
return False
class PagedCommand(Command):
"""Command which defaults to output in a pager, as its display tends to be
larger than one screen full.
"""
def WantPager(self, _opt):
return True
class MirrorSafeCommand:
"""Command permits itself to run within a mirror, and does not require a
working directory.
"""
class GitcClientCommand:
"""Command that requires the local client to be a GITC client."""
| GerritCodeReview/git-repo | command.py | command.py | py | 17,769 | python | en | code | 267 | github-code | 36 | [
{
"api_name": "os.environ.get",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "os.cpu_count",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "error.RepoExitError",
... |
22660738512 | # data_processing.py
from shapely import wkb
from shapely.geometry import shape
import binascii
import psycopg2
ewkb_data = None
def get_variable_ewkb():
global ewkb_data
print("variable ewkb")
print(ewkb_data)
return ewkb_data
def process_for_view(data):
global ewkb_data # Declare ewkb_data as a global variable
# Process the data as needed in your Python program
# For example, you can access the features using data['features']
# and perform further processing or analysis
print("Processing data...")
geojson_data = data
# Convert GeoJSON to Shapely geometry
shapely_geometry = shape(geojson_data['geometry'])
# Convert Shapely geometry to WKB (Well-Known Binary) format
wkb_data = shapely_geometry.wkb
# Convert bytes to hexadecimal string
wkb_hex = wkb_data.hex()
# Convert Shapely geometry to EWKB (Extended Well-Known Binary) format
ewkb_data = wkb.dumps(shapely_geometry, srid=4326, hex=True)
print(wkb_hex)
print(ewkb_data)
return ewkb_data
| Fakhrynm/serverdatatrainingsitepython | Processdataview.py | Processdataview.py | py | 1,054 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "shapely.geometry.shape",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "shapely.wkb.dumps",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "shapely.wkb",
"line_number": 35,
"usage_type": "name"
}
] |
26246312144 | import serial
from sense_hat import SenseHat
from socket import gethostname
from xbee import XBee
from statistics import median
def clear_matrix():
sense.clear()
def show_hostname():
hostname = gethostname()
sense.show_message("Hostname: " + hostname)
def receive_data(data):
print("received data: ", data)
if (data["rf_data"] == b"ping"):
# Received ping? -> Send pong back
send_data("pong")
elif (data["rf_data"] == b"pong"):
# Received pong? -> Store & calc RSSI
rssi_list.append(ord(data["rssi"]))
print_current_rssi_median()
if data["source_addr"] not in rssi_list:
rssi_dict[data["source_addr"]] = []
rssi_dict[data["source_addr"]].append(ord(data["rssi"]))
def send_data(data, dest_addr="\x00\x0A"):
xbee.send("tx",
frame_id="\x00",
dest_addr=dest_addr,
data=data)
def init_rssi_calc(n_pings=10):
rssi_list = []
dest_addr = "\x00\x0A" # 2byte hex value (TODO: Set according to adress of destination XBee module)
for i in range(n_pings):
send_data("ping", dest_addr)
def print_current_rssi_median():
med = median(rssi_list)
print("Current RSSI median with {} received pongs: {}".format(len(rssi_list), med))
dist = dist_from_rssi(med)
print("Current dist from RSSI: dist = {}".format(dist))
def dist_from_rssi(rssi):
n = 2.8 # path loss variable from 2 to 4
A = 33 # TODO: Measure reference RSSI (1m distance)
# RSSI = -10 * n * log_10(d) + A
# => Transformed to
# d = 10^(A/10n)
dist = 10**(A/10*n)
return dist
def three_anchor_bbox():
dist_dict = {}
for anchor in rssi_dict:
dist_dict[anchor] = dist_from_rssi(median(rssi_dict[anchor]))
x = 1/2 * (min([anchor_positions[anchor][0] + dist_dict[anchor] for anchor in anchor_positions]) + \
max([anchor_positions[anchor][0] - dist_dict[anchor] for anchor in anchor_positions]))
y = 1/2 * (min([anchor_positions[anchor][1] + dist_dict[anchor] for anchor in anchor_positions]) + \
max([anchor_positions[anchor][1] - dist_dict[anchor] for anchor in anchor_positions]))
print(">> BBox calculation done: X: {} | Y: {}".format(x,y))
def three_anchor_multilat():
print("... Starting multilateration.")
dist_dict = {}
for anchor in rssi_dict:
dist_dict[anchor] = dist_from_rssi(median(rssi_dict[anchor]))
# https://github.com/kamalshadi/Localization
import localization as lx
P=lx.Project(mode='2D',solver='LSE')
print("... adding anchors")
for anchor in anchor_positions:
P.add_anchor(anchor, anchor_positions[anchor])
t,label = P.add_target()
print("... adding measurements")
for dist in dist_dict:
P.add_measure(dist, dist_dict[dist])
print("... calculating...")
P.solve()
print("> Done! Multilat result:", t.loc)
def broadcast_ping():
rssi_dict = {}
# Broadcast ping
send_data("ping", "\xFF\xFF")
if __name__ == "__main__":
sense = SenseHat()
print(">> Opening serial port...")
ser = serial.Serial("/dev/ttyUSB1", 9600)
xbee = XBee(ser, callback=receive_data)
rssi_list = []
rssi_dict = {}
## TODO: Fill anchor position dictionary!
anchor_positions = {
"add1": (0, 1),
"add2": (0, 3),
"add3": (0, 4)
}
print(">> Waiting for events...")
print("Middle: clear_matrix, left: init_rssi_calc, right: three_anchor_bbox, down: broadcast_ping")
print("Sequence: broadcast_ping -> three_anchor_bbox / three_anchor_multilat")
while True:
for event in sense.stick.get_events():
if event.action == "pressed":
if event.direction == "middle":
print("** Event: Pressed.")
clear_matrix()
elif event.direction == "left":
print("** Event: Left")
init_rssi_calc()
elif event.direction == "right":
print("** Event: Right.")
three_anchor_bbox()
elif event.direction == "down":
print("** Event: Down")
broadcast_ping()
elif event.direction == "up":
print("** Event: up")
three_anchor_multilat()
| tristndev/UzL_DSN | Tutorial 4/ex04_01_RSSI_to_distance.py | ex04_01_RSSI_to_distance.py | py | 4,390 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "socket.gethostname",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "xbee.send",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "statistics.median",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "statistics.median",
... |
23006702098 |
import torch
import numpy as np
from torch._C import dtype
from torch import nn
from torch.nn import functional as F
from torchvision import transforms
from torchvision.transforms import functional as TF
class ImageAugmentation():
def __init__(self):
super().__init__()
"""
PepperSaltNoise
"""
def addPepperSaltNoise(self,detections,p=0.2,pn=0.05):
feat=detections
if(np.random.rand()<p):
bs,grids,dim=detections.shape
maxnum=detections.max().item()
minnum=detections.min().item()
peper=torch.full((dim,),maxnum)
salt=torch.full((dim,),minnum)
#add bs*grids*p Peppers
for _ in range(int(bs*grids*pn)):
row=np.random.randint(bs)
col=np.random.randint(grids)
feat[row][col]=peper
#add bs*grids*p Salts
for _ in range(int(bs*grids*pn)):
row=np.random.randint(bs)
col=np.random.randint(grids)
feat[row][col]=salt
return feat
"""
GaussianNoise
"""
def addGaussianNoise(self,detections,p=0.2,mean=0,var=0.0001):
feat=detections
if(np.random.randn()<p):
maxnum=detections.max().item()
normdet=detections/maxnum
#generate guassian noise
noise = torch.from_numpy(np.random.normal(mean, var ** 0.5, detections.shape))
newdet=normdet+noise
newdet=torch.clamp(newdet,0,1)
feat=newdet*maxnum
return feat.to(torch.float32)
"""
resizePool
"""
def resizePool(self,detections,p=0.2,poolsize=2,stride=2):
feat=detections
if(np.random.randn()<p):
m = nn.MaxPool2d(poolsize, stride=stride)
use_feat=detections[:,:49,:]
# nouse_feat=detections[:,49:,:]
bs,gs,dim=use_feat.shape
use_feat=use_feat.view(bs,int(np.sqrt(gs)),int(np.sqrt(gs)),dim)
#maxpool
output= m(use_feat.permute(0,3,1,2))
#upsample
output= F.interpolate(output, size=[int(np.sqrt(gs)),int(np.sqrt(gs))])
output=output.permute(0,2,3,1)
output=output.view(bs,-1,dim)
feat=output
# feat=torch.cat((output,nouse_feat),dim=1)
return feat
"""
RandomCrop
"""
def randomCrop(self,detections,p=0.2,cropsize=5):
feat=detections
if(np.random.randn()<p):
use_feat=detections[:,:49,:]
# nouse_feat=detections[:,49:,:]
bs,gs,dim=use_feat.shape
use_feat=use_feat.view(bs,int(np.sqrt(gs)),int(np.sqrt(gs)),dim)
use_feat=use_feat.permute(0,3,1,2)
#crop
startRange=np.sqrt(gs)-cropsize
startRow=np.random.randint(startRange)
startCol=np.random.randint(startRange)
output=use_feat[:,:,startRow:startRow+cropsize,startCol:startCol+cropsize]
#upsample
output= F.interpolate(output, size=[int(np.sqrt(gs)),int(np.sqrt(gs))])
output=output.permute(0,2,3,1)
output=output.view(bs,-1,dim)
feat=output
# feat=torch.cat((output,nouse_feat),dim=1)
return feat
"""
RandomHorizontalFlip
"""
def randomHorizontalFlip(self,detections,p=0.2):
feat=detections
if(np.random.randn()<p):
use_feat=detections[:,:49,:]
# nouse_feat=detections[:,49:,:]
bs,gs,dim=use_feat.shape
#reshape
use_feat=use_feat.view(bs,int(np.sqrt(gs)),int(np.sqrt(gs)),dim)
use_feat=use_feat.permute(0,3,1,2)
#HorizontalFlip
hflip=transforms.RandomHorizontalFlip(p=1)
output=hflip(use_feat)
#reshape
output=output.permute(0,2,3,1)
output=output.view(bs,-1,dim)
feat=output
# feat=torch.cat((output,nouse_feat),dim=1)
return feat
"""
RandomVerticalFlip
"""
def randomVerticalFlip(self,detections,p=0.2):
feat=detections
if(np.random.randn()<p):
use_feat=detections[:,:49,:]
# nouse_feat=detections[:,49:,:]
bs,gs,dim=use_feat.shape
#reshape
use_feat=use_feat.view(bs,int(np.sqrt(gs)),int(np.sqrt(gs)),dim)
use_feat=use_feat.permute(0,3,1,2)
#VerticalFlip
vflip=transforms.RandomVerticalFlip(p=1)
output=vflip(use_feat)
#reshape
output=output.permute(0,2,3,1)
output=output.view(bs,-1,dim)
feat=output
# feat=torch.cat((output,nouse_feat),dim=1)
return feat
"""
randRotate
"""
def randRotate(self,detections,p=0.5):
feat=detections
if(np.random.randn()<p):
use_feat=detections[:,:49,:]
# nouse_feat=detections[:,49:,:]
bs,gs,dim=use_feat.shape
#reshape
use_feat=use_feat.view(bs,int(np.sqrt(gs)),int(np.sqrt(gs)),dim)
use_feat=use_feat.permute(0,3,1,2)
#rotate
degree=np.random.randint(60)-30
output=TF.rotate(use_feat,degree)
#reshape
output=output.permute(0,2,3,1)
output=output.view(bs,-1,dim)
feat=output
# feat=torch.cat((output,nouse_feat),dim=1)
return feat
"""
channel shuffle
"""
def channelShuffle(self,detections,p=0.2):
feat=detections
if(np.random.randn()<p):
use_feat=detections[:,:49,:]
# nouse_feat=detections[:,49:,:]
bs,gs,dim=use_feat.shape
#reshape
use_feat=use_feat.view(bs,int(np.sqrt(gs)),int(np.sqrt(gs)),dim)
use_feat=use_feat.permute(0,3,1,2)
#channel shuffle
indexs=np.arange(dim)
np.random.shuffle(indexs)
output=use_feat[:,indexs,:,:]
#reshape
output=output.permute(0,2,3,1)
output=output.view(bs,-1,dim)
feat=output
# feat=torch.cat((output,nouse_feat),dim=1)
return feat
"""
randMask
"""
def randMask(self,detections,p=0.3,pn=0.1):
feat=detections
if(np.random.rand()<p):
bs,grids,dim=detections.shape
salt=torch.full((dim,),0.0)
#Mask
for _ in range(int(bs*grids*pn)):
row=np.random.randint(bs)
col=np.random.randint(grids)
feat[row][col]=salt
return feat
def randnChooseOne4(self,detections):
feat=detections
augs=['addPepperSaltNoise','resizePool','randomCrop','randRotate']
aug=augs[np.random.randint(len(augs))]
feat=getattr(self,aug)(feat,p=0.3)
return feat
| xmu-xiaoma666/SDATR | models/augmentation.py | augmentation.py | py | 6,969 | python | en | code | 18 | github-code | 36 | [
{
"api_name": "numpy.random.rand",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "torch.full",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "torch.full",
"line... |
1313517117 | from __future__ import print_function
import argparse
import keras
from data_utils import load_data
from sklearn.model_selection import train_test_split
from model import vgg16
from hyperspace import hyperdrive
num_classes = 10
batch_size = 32
epochs = 5
# The data, shuffled and split between train and test sets:
(x_train, y_train), (x_test, y_test) = load_data()
# Further split to create validation set
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=x_test.shape[0],
shuffle=True, random_state=0)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_val = keras.utils.to_categorical(y_val, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
x_train = x_train.astype('float32')
x_val = x_val.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_val /= 255
x_test /= 255
def objective(params):
"""
Objective function to be minimized.
Parameters
----------
`params` [list]
Hyperparameters to be set in optimization iteration.
- Managed by hyperdrive.
"""
kernel1 = int(params[0])
kernel2 = int(params[1])
# kernel3 = int(params[2])
# kernel4 = int(params[3])
# kernel5 = int(params[4])
# kernel6 = int(params[5])
# batch_size = int(params[6])
# model = vgg16(kernel1=kernel1, kernel2=kernel2, kernel3=kernel3,
# kernel4=kernel4, kernel5=kernel5, kernel6=kernel6)
model = vgg16(kernel1=kernel1, kernel2=kernel2)
model.compile(optimizer=keras.optimizers.rmsprop(lr=0.0001, decay=1e-6),
loss='categorical_crossentropy',
metrics=['accuracy'])
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_val, y_val),
shuffle=True)
# Score trained model.
scores = model.evaluate(x_val, y_val, verbose=1)
print('Validation loss:', scores[0])
print('Validation accuracy:', scores[1])
return scores[0]
def main():
parser = argparse.ArgumentParser(description='Setup experiment.')
parser.add_argument('--results_dir', type=str, help='Path to results directory.')
args = parser.parse_args()
hparams = [(2, 8), # kernel1
(2, 8)] # kernel2
# (2, 8), # kernel3
# (2, 8), # kernel4
# (2, 8), # kernel5
# (2, 8), # kernel6
# (32, 64)] # batch_size
hyperdrive(objective=objective,
hyperparameters=hparams,
results_path=args.results_dir,
model="GP",
n_iterations=11,
verbose=True,
random_state=0)
if __name__ == '__main__':
main()
| yngtodd/vgg_hyper | vgg_hyper/main.py | main.py | py | 2,878 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "data_utils.load_data",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "keras.utils.to_categorical",
"line_number": 23,
"usage_type": "call"
},
... |
73368915303 | from __future__ import print_function
import numpy as np
np.random.seed(1337)
from itertools import product
from sklearn import cluster
from sklearn.externals import joblib
from keras.datasets import mnist
from sklearn.neighbors import KNeighborsClassifier
from scipy.misc import imresize
from keras.utils import np_utils
from sklearn.svm import SVC
from sklearn.grid_search import GridSearchCV
from sklearn.cross_validation import StratifiedKFold
from sklearn import preprocessing
import matplotlib.pyplot as plt
import sys
EPS = 1e-9
class IMG:
def extract(self, img):
return img.flatten()
def finite_derivatives(img):
size = img.shape
dx = np.empty(img.shape, dtype=np.double)
dx[0, :] = 0
dx[-1, :] = 0
dx[1:-1, :] = (img[2:, :] - img[:-2, :]) / 2.0
dy = np.empty(img.shape, dtype=np.double)
dy[:, 0] = 0
dy[:, -1] = 0
dy[:, 1:-1] = (img[:, 2:] - img[:, :-2]) / 2.0
mag = (dx ** 2 + dy ** 2) ** 0.5
return mag, dx, dy
class HOG:
def __init__(self, orientations=9, cell=(8,8)):
self.orientations = orientations
self.cell = cell
def extract(self, img, mask=None):
if len(img.shape) == 3:
img = img[0]
if mask == None:
mask = np.ones(shape=img.shape, dtype=img.dtype)
mag, dx, dy = finite_derivatives(img)
phase = np.arctan2(dy, dx)
phase = phase.astype(np.float64)
#phase = np.abs(phase)
size = img.shape
size = (size[0] / self.cell[0], size[1] / self.cell[1])
w = mask.astype(np.float64)
w *= mag
if np.sum(w) > EPS:
w /= np.sum(w)
ans = np.array([])
for i, j in product(range(size[0]), range(size[1])):
tl = (i * self.cell[0], j * self.cell[1])
br = ((i + 1) * self.cell[0], (j + 1) * self.cell[1])
roi = phase[tl[0]:br[0], tl[1]:br[1]]
wroi = w[tl[0]:br[0], tl[1]:br[1]]
hist, _ = np.histogram(roi, bins=self.orientations, range=(-np.pi, np.pi), weights=wroi, density=True)
#hist /= (np.sum(hist) + util.EPS)
if np.sum(wroi) < EPS:
hist = np.zeros(hist.shape, dtype=hist.dtype)
ans = np.hstack((ans, hist))
ans /= (np.sum(ans) + EPS)
return ans
class BOVW:
def __init__(self, extractor, k=10, size=(8, 8), pad=(1, 1), pool='hard', codebook_len=400000):
self.k = k
self.pad = pad
self.size = size
self.pool = pool
self.codebook_len = codebook_len
self.extractor = extractor
self.clusterer = cluster.KMeans(self.k, max_iter=20, n_init=1)
def load(self, name):
self.k, self.pad, self.size = joblib.load('{}_pms.pkl'.format(name))
self.extractor = joblib.load('{}_ext.pkl'.format(name))
self.clusterer = joblib.load('{}_clu.pkl'.format(name))
def save(self, name):
joblib.load((self.k, self.pad, self.size), '{}_pms.pkl'.format(name))
joblib.load(self.extractor, '{}_ext.pkl'.format(name))
joblib.load(self.clusterer, '{}_clu.pkl'.format(name))
def fit(self, X):
assert len(X) > 0
assert self.codebook_len > len(X)
# temporal assert
self.samples_per_image = (self.codebook_len + len(X) - 1)/len(X)
print("Samples per image {}".format(self.samples_per_image))
test = X[0]
if len(test.shape) == 3:
test = test[0]
'''
xr = np.linspace(0, test.shape[0] - self.size[0], self.pad[0])
yr = np.linspace(0, test.shape[1] - self.size[1], self.pad[1])
coords = product(xr, yr)
'''
v_len = len(self.extractor.extract(test[0:self.size[0], 0:self.size[1]]))
V = np.zeros(shape=(len(X) * self.samples_per_image, v_len), dtype='float32')
it = 0
for img in X:
assert len(img) == 1
if len(img.shape) == 3:
img = img[0]
coords = np.random.uniform(low=0, high=test.shape[0] - self.size[0], size=(self.samples_per_image, 2)).astype(np.int)
for i, j in coords:
V[it] = self.extractor.extract(img[i:i + self.size[0], j:j + self.size[1]])
it += 1
assert len(V) == it
self.clusterer.fit(V)
def transform(self, X):
assert len(X) > 0
test = X[0]
if len(test.shape) == 3:
test = test[0]
xr = np.arange(0, test.shape[0] - self.size[0], self.pad[0])
yr = np.arange(0, test.shape[1] - self.size[1], self.pad[1])
coords = list(product(xr, yr))
xr_len = len(xr)
yr_len = len(yr)
print('size {}, {} len {}'.format(test.shape, yr, yr_len))
v_len = len(self.extractor.extract(test[0:self.size[0], 0:self.size[1]]))
ft = np.zeros(shape=(len(coords), v_len), dtype='float32')
if self.pool == 'hard':
V = np.zeros(shape=(len(X), self.k), dtype='float32')
elif self.pool == 'soft':
V = np.zeros(shape=(len(X), 4 * self.k), dtype='float32')
#V = np.zeros(shape=(len(X), self.k), dtype='float32')
else:
raise Exception("Undefined pooling mode: {}".format(self.pool))
C = self.clusterer.cluster_centers_
zeros = np.zeros(shape = (len(C),), dtype=C.dtype)
for k in range(len(X)):
img = X[k]
if len(img.shape) == 3:
img = img[0]
#coords = np.random.uniform(low=0, high=test.shape[0]-self.size[0], size=(self.samples_per_image, 2)).astype(np.int)
it = 0
for i, j in coords:
ft[it] = self.extractor.extract(img[i:i + self.size[0], j:j + self.size[1]])
it += 1
assert len(ft) == it
if self.pool == 'hard':
idx = self.clusterer.predict(ft)
V[k], _ = np.histogram(idx, bins=self.k, range=(0, self.k))
elif self.pool == 'soft':
it2 = 0
for i, j in coords:
index = 0
if i > ((test.shape[0] - self.size[0])/2):
index += 2
if j > ((test.shape[1] - self.size[1])/2):
index += 1
S = np.linalg.norm(C - ft[it2], axis=1)
S = np.mean(S) - S
#V[k] += np.max([S, zeros], axis=0)
V[k][index*self.k:(index+1)*self.k] += np.max([S, zeros], axis=0)
it2 += 1
else:
raise Exception("Undefined pooling mode: {}".format(self.pool))
print("V shape {}".format(V.shape))
return V
def fit_transform(self, X):
self.fit(X)
return self.transform(X)
def create_model(config):
model = None
if config == 'img':
model = BOVW(IMG(), k=10, size=(15, 15), pad=(4, 4), pool='soft', codebook_len=60000)
elif config == 'hog':
model = BOVW(HOG(cell=(5, 5)), k=600, size=(15, 15), pad=(1, 1), pool='soft')
elif config == 'img-hard':
model = BOVW(IMG(), k=600, size=(15, 15), pad=(1, 1), pool='hard')
elif config == 'img-soft':
model = BOVW(IMG(), k=600, size=(15, 15), pad=(1, 1), pool='soft')
elif config == 'hog-hard':
model = BOVW(HOG(cell=(5, 5)), k=600, size=(15, 15), pad=(1, 1), pool='hard')
elif config == 'hog-soft-16':
model = BOVW(HOG(cell=(5, 5)), k=16, size=(15, 15), pad=(1, 1), pool='soft')
elif config == 'hog-soft-32':
model = BOVW(HOG(cell=(5, 5)), k=32, size=(15, 15), pad=(1, 1), pool='soft')
elif config == 'hog-soft-64':
model = BOVW(HOG(cell=(5, 5)), k=64, size=(15, 15), pad=(1, 1), pool='soft')
elif config == 'hog-soft-128':
model = BOVW(HOG(cell=(5, 5)), k=128, size=(15, 15), pad=(1, 1), pool='soft')
elif config == 'hog-soft-256':
model = BOVW(HOG(cell=(5, 5)), k=256, size=(15, 15), pad=(1, 1), pool='soft')
elif config == 'hog-soft-512':
model = BOVW(HOG(cell=(5, 5)), k=512, size=(15, 15), pad=(1, 1), pool='soft')
elif config == 'hog-soft-512-norm':
model = BOVW(HOG(cell=(5, 5)), k=512, size=(15, 15), pad=(1, 1), pool='soft')
elif config == 'hog-soft-1024':
model = BOVW(HOG(cell=(5, 5)), k=1024, size=(15, 15), pad=(1, 1), pool='soft')
return model
def load_mnist(img_cols, img_rows, nb_classes):
(X_train, y_train), (X_test, y_test) = mnist.load_data()
tmp = []
for x in X_train:
tmp.append(imresize(x, (img_rows, img_cols)))
X_train = np.array(tmp)
tmp = []
for x in X_test:
tmp.append(imresize(x, (img_rows, img_cols)))
X_test = np.array(tmp)
print("shapes {} {}".format(X_train.shape, X_test.shape))
X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
#Y_train = np_utils.to_categorical(y_train, nb_classes)
#Y_test = np_utils.to_categorical(y_test, nb_classes)
return (X_train, y_train), (X_test, y_test)
def extract_IMG(X):
assert len(X) != 0
ext = IMG()
v_len = len(ext.extract(X[0]))
V = np.zeros(shape=(len(X), v_len), dtype="float32")
for i in range(len(X)):
V[i] = ext.extract(X[i])
return V
def extract_HOG(X):
assert len(X) != 0
ext = HOG()
v_len = len(ext.extract(X[0]))
V = np.zeros(shape=(len(X), v_len), dtype="float32")
for i in range(len(X)):
V[i] = ext.extract(X[i])
return V
if __name__ == '__main__':
bias = True
batch_size = 100
nb_epoch = 1
nb_classes = 10
img_rows, img_cols = 36, 36
(X_train, Y_train), (X_test, Y_test) = load_mnist(img_rows, img_cols, nb_classes)
print("X shape {}".format(X_train.shape))
X_train_small = X_train#X_train[range(10000)]
Y_train_small = Y_train#Y_train[range(10000)]
print("X shape {}".format(X_train_small.shape))
#bow = BOVW(HOG(cell=(5,5)), k=600, size=(15, 15), pad=(1,1), pool='hard')
bow = BOVW(IMG(), k=600, size=(15, 15), pad=(1, 1), pool='soft')
print("BOVW fit transform ...")
V_train = bow.fit_transform(X_train_small)
print("BOVW transform ...")
V_test = bow.transform(X_test)
'''
# 32x32 feature vector, 0.9498
V_train = extract_IMG(X_train_small)
V_test = extract_IMG(X_test)
'''
'''
# feature vector, 0.9498
V_train = extract_HOG(X_train_small)
V_test = extract_HOG(X_test)
'''
# BOVW 0.9488
clf = KNeighborsClassifier(5)
print("clf fit ...")
clf.fit(V_train, Y_train_small)
print("clf predict ...")
Y_pred = clf.predict(V_test)
print("Y test: {}".format(Y_test))
print("Y pred: {}".format(Y_pred))
acc = np.mean(Y_test == Y_pred)
print("Accuracy: {}".format(acc))
'''
clf = SVC(kernel='rbf')
parameters = {'C':10. ** np.arange(-3,3), 'gamma':2. ** np.arange(-5, 1)}
grid = GridSearchCV(clf, parameters, cv=StratifiedKFold(Y_train_small, 5), verbose=3, n_jobs=-1)
grid.fit(V_train, Y_train_small)
print("predicting")
print("score: {}".format(grid.score(X_test, y_test)))
print(grid.best_estimator_)
'''
def test_svc_hp(X_train, Y_train, X_test, Y_test):
for c in range(-3, 3):
c = 10 ** c
clf = SVC(kernel='rbf', C=c)
print("C = {}, clf fit ...".format(c))
clf.fit(V_train, Y_train_small)
print("clf predict ...")
Y_pred = clf.predict(V_test)
print("Y test: {}".format(Y_test))
print("Y pred: {}".format(Y_pred))
acc = np.mean(Y_test == Y_pred)
print("Accuracy: {}".format(acc))
#'''
| jmendozais/lung-nodule-detection | bovw.py | bovw.py | py | 12,019 | python | en | code | 11 | github-code | 36 | [
{
"api_name": "numpy.random.seed",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "numpy.empty",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.double",
"lin... |
28111130337 | from django.db.models import Avg
from django.shortcuts import render, redirect
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from dinnerevent.models import Review
from .forms import UserRegisterForm, ProfileForm
def register(request):
if request.method == "POST":
form = UserRegisterForm(request.POST)
profile_form = ProfileForm(request.POST)
if form.is_valid() and profile_form.is_valid():
user = form.save()
user_profile = profile_form.save(commit=False)
user_profile.user = user
user_profile.save()
profile_form.save_m2m()
username = form.cleaned_data.get('username')
messages.success(request, f'Bruker er laget for {username}')
return redirect('login')
else:
form = UserRegisterForm()
profile_form = ProfileForm()
return render(request, 'users/register.html', {'form': form, 'profile_form': profile_form})
@login_required
def profile(request, pk): # Denne viser profilen til brukerene
all_user_reviews = Review.objects.filter(event__user=User.objects.get(pk=pk))
avg_rating = all_user_reviews.aggregate(Avg('rating')).get('rating__avg')
return render(request, 'users/profile.html', {
'reviews': all_user_reviews,
'score': rounded_rating(avg_rating),
'user_profile': User.objects.get(pk=pk),
})
def rounded_rating(number):
"""Round a number to closet 1/2 integer"""
if number is not None:
return round(number * 2) / 2
return None
| taheeraahmed/Dinnersharing | middagproj/users/views.py | views.py | py | 1,640 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "forms.UserRegisterForm",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "forms.ProfileForm",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages.success",
"line_number": 21,
"usage_type": "call"
},
{
"api_name... |
31310132227 | """Extract KML data into DataFrame."""
from typing import Dict, Sequence, Union
import numpy as np
import pandas as pd
from pykml import parser
from pykml.factory import KML_ElementMaker as KML
NS = {"t": "http://www.opengis.net/kml/2.2"}
def read_kml(filepath: str) -> KML.kml:
"""Read a KML file.
Parameters
----------
filepath : str
Path to the file to read.
Returns
-------
KML.kml
Root of a KML document.
"""
with open(filepath, "rb") as f:
root = parser.parse(f).getroot()
return root
def get_doc(root: KML.kml) -> KML.Document:
"""Get the document of a KML file.
Parameters
----------
root : KML.kml
Root of a KML document.
Returns
-------
KML.Document
Document of a KML file.
"""
doc = root.xpath("./t:Document", namespaces=NS)[0]
return doc
def get_folders(doc: KML.Document) -> KML.Folder:
"""Yield folder object children in a KML node.
Parameters
----------
doc : KML.Document
A KML node.
Yields
------
KML.Folder
A KML Folder object.
"""
for folder in doc.xpath("./t:Folder", namespaces=NS):
yield folder
def get_tree(doc: KML.Document) -> dict:
"""Return a dictionary with the data of a KML.Document.
Parameters
----------
doc : KML.Document
A KML node with data.
Returns
-------
dict
Data of a KML.Document.
"""
folders = {folder.name: folder for folder in get_folders(doc)}
for folder_name in folders:
subfolders = get_tree(folders[folder_name])
if len(subfolders) > 0:
folders[folder_name] = subfolders
placemarks = list(get_placemarks(doc))
if placemarks:
folders["placemarks"] = placemarks
return folders
def get_placemarks(doc: KML.Document) -> KML.Placemark:
"""Yield placemark object children in a KML node.
Parameters
----------
doc : KML.Document
A KML node.
Yields
------
KML.Placemark
A KML Placemark object.
"""
for folder in doc.xpath("./t:Placemark", namespaces=NS):
yield folder
def get_SimpleData(placemark: KML.Placemark) -> Dict[str, str]:
"""Return data from SimpleData KML fields in a placemark.
Parameters
----------
placemark : KML.Placemark
A Placemark object.
Returns
-------
dict
A dictionary with the data from placemark.
"""
data = {
simpledata.attrib.get("name"): simpledata.text
for simpledata in placemark.xpath(".//t:SimpleData", namespaces=NS)
}
return data
def get_description(placemark: KML.Placemark) -> str:
"""Return string with description from a placemark.
Parameters
----------
placemark : KML.Placemark
A Placemark object.
Returns
-------
str
String representing the Placemark description.
"""
description = placemark.xpath(".//t:description", namespaces=NS)
return "\n---\n".join(str(d) for d in description)
def get_coordinates(placemark: KML.Placemark) -> Dict[str, float]:
"""Return dict with coordinates of Placemark.
Parameters
----------
placemark : KML.Placemark
A KML Placemark with coordinates to get.
Returns
-------
Dict[str, float]
A dictionary with the coordinates of the Placemark.
"""
if hasattr(placemark, "Point"):
if hasattr(placemark.Point, "coordinates"):
lon, lat, alt = placemark.Point.coordinates.text.split(",")
return {
"Latitude": float(lat),
"Longitude": float(lon),
"Altitude": float(alt),
}
return {
"Latitude": np.nan,
"Longitude": np.nan,
"Altitude": np.nan,
}
def get_placemarks_data(
placemarks: Sequence[KML.Placemark]
) -> Dict[str, Union[str, float]]:
"""Get data from a sequence of placemarks.
Parameters
----------
placemarks : Sequence[KML.Placemark]
A list or tuple of placemarks to get its data.
Yields
------
dict
A dict with the data of placemarks.
"""
for placemark in placemarks:
yield dict(
description=get_description(placemark),
**get_coordinates(placemark),
**get_SimpleData(placemark),
)
def get_data(
tree: dict,
folders: Sequence[str] = None
) -> Dict[str, Union[str, float]]:
"""Yield data for each placemark in a tree.
Parameters
----------
tree : dict
A dictionary from get_tree().
folders : Sequence
A sequence with names of folders to include in the returned data.
Yields
------
Dict[str, Union[str, float]]
A dictionary with all data for a placemark in the given tree.
"""
if folders is None:
folders = tuple()
for node in tree:
if node == "placemarks":
for pdata in get_placemarks_data(tree[node]):
yield dict(
**{f"Folder{i}": f for i, f in enumerate(folders)},
**pdata,
)
else:
yield from get_data(
tree=tree[node],
folders=tuple([*folders, str(node)]),
)
def get_dataframe_from_tree(tree: dict) -> pd.core.frame.DataFrame:
"""Get a dataframe from a tree dict of a KML document.
Parameters
----------
tree : dict
Tree of a KML document, given by get_tree() function.
Returns
-------
pd.core.frame.DataFrame
A DataFrame with data from the tree.
"""
data = get_data(tree)
df = pd.DataFrame.from_records(data)
return df
def read_kml_data(filepath: str) -> pd.core.frame.DataFrame:
"""Read a KML file, returning its data as a Pandas DataFrame.
Parameters
----------
filepath : str
Path of the KML file to read and parse.
Returns
-------
pd.core.frame.DataFrame
A DataFrame with data from the KML file.
"""
root = read_kml(filepath)
doc = get_doc(root)
tree = get_tree(doc)
df = get_dataframe_from_tree(tree)
return df
| dankkom/kmldata | kmldata/parser.py | parser.py | py | 6,224 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pykml.parser.parse",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "pykml.parser",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "pykml.factory.KML_ElementMaker.kml",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name"... |
22654441124 | from matplotlib.colors import ListedColormap
from sklearn import neighbors, datasets
import numpy as np
from matplotlib import pyplot as plt
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import train_test_split
# same process as svm
np.random.seed(0)
iris = datasets.load_iris()
X = iris.data[:, 0:2]
y = iris.target
train_x, test_x, train_y, test_y = train_test_split(iris.data[:, :2], iris.target, test_size=0.3, random_state=0)
#hidden_layer_sizes=[(10,),(30,),(100,),(5,5),(10,10),(30,30)] #可选的神经元层数
#ativations=["logistic","tanh","relu"] #可选的激活函数
#learnrates=[0.1,0.01,0.001] #可选的学习率
solvers=["lbfgs","sgd","adam"] #可选的solver
for i, sol in enumerate(solvers):
classifier = MLPClassifier(activation="tanh", max_iter=1000000,
hidden_layer_sizes=(10,5), solver=sol, learning_rate_init=0.01)
classifier.fit(train_x, train_y)
train_score = classifier.score(train_x, train_y)
print('when solver =', sol, '\n','train_score=',train_score)
test_score = classifier.score(test_x, test_y)
print('test_score=',test_score,'\n')
x_min, x_max = train_x[:, 0].min() - 1, train_x[:, 0].max() + 2
y_min, y_max = train_x[:, 1].min() - 1, train_x[:, 1].max() + 2
plot_step = 0.02 # 步长
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = classifier.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.subplot(1, 3, i + 1)
plt.subplots_adjust(wspace=0.3, hspace=1)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8)
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.title(sol)
plt.show() | fh-Zh/Classification.old | bpnn.py | bpnn.py | py | 1,918 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.random.seed",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "sklearn.datasets.load_iris",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sklearn.... |
7078437282 | import os
import base64
import argparse
from cliff.command import Command
from cliff.show import ShowOne
from cliff.lister import Lister
from meteoroid_cli.meteoroid.v1.client.function_client import FunctionClient
from meteoroid_cli.meteoroid.v1.errors import CommandError
from meteoroid_cli.meteoroid.v1.libs.decorator import fiware_arguments
NODE_JS_EXT = '.js'
PYTHON_EXT = '.py'
JAVA_EXT = '.jar'
SWIFT_EXT = '.swift'
PHP_EXT = '.php'
RUBY_EXT = '.rb'
GO_EXT = '.go'
BAL_BIN_EXT = '.balx'
ZIP_EXT = '.zip'
NODE_JS = 'nodejs'
PYTHON = 'python'
JAVA = 'java'
SWIFT = 'swift'
PHP = 'php'
RUBY = 'ruby'
GO = 'go'
DEFAULT = 'default'
EXT_LANG = {
NODE_JS_EXT: NODE_JS,
PYTHON_EXT: PYTHON,
JAVA_EXT: JAVA,
SWIFT_EXT: SWIFT,
PHP_EXT: PHP,
RUBY_EXT: RUBY,
GO_EXT: GO
}
class FunctionRequestDataBuilder:
def build(self, parsed_args):
data = {}
if hasattr(parsed_args, 'name'):
data['name'] = parsed_args.name
_, extension = os.path.splitext(parsed_args.file.name)
if extension == ZIP_EXT or extension == JAVA_EXT or extension == BAL_BIN_EXT:
data['code'] = base64.b64encode(parsed_args.file.read()).decode("ascii")
data['binary'] = True
else:
data['code'] = parsed_args.file.read().decode("utf-8")
if parsed_args.main is not None:
data['main'] = parsed_args.main
else:
if extension == JAVA_EXT:
err_message = ('Java actions require --main (-m) to specify '
'the fully-qualified name of the main class')
raise CommandError(err_message)
if parsed_args.language is not None:
data['language'] = parsed_args.language
else:
if extension != ZIP_EXT:
data['language'] = self.__get_default_language(extension)
if parsed_args.param is not None:
data['parameters'] = parsed_args.param
return data
def __get_default_language(self, extension):
language = EXT_LANG[extension]
return f'{language}:{DEFAULT}'
class StoreKeyPairAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
if namespace.param is None:
param = []
else:
param = namespace.param
if len(values) == 2:
k, v = values
param.append({
'key': k,
'value': v
})
setattr(namespace, self.dest, param)
class FunctionShow(ShowOne):
"Show a function"
@fiware_arguments
def get_parser(self, prog_name):
parser = super().get_parser(prog_name)
parser.add_argument('id', help='function id')
parser.add_argument('-co',
'--code',
action='store_true',
help='Show the source code')
return parser
def take_action(self, parsed_args):
response = FunctionClient().retrieve_function(
id=parsed_args.id,
fiware_service=parsed_args.fiwareservice,
fiware_service_path=parsed_args.fiwareservicepath,
code=parsed_args.code
)
parameters = list(map(lambda x: dict(x), response['parameters']))
response['parameters'] = parameters
columns = response.keys()
data = response.values()
return columns, data
class FunctionList(Lister):
"Show function list"
@fiware_arguments
def get_parser(self, prog_name):
parser = super().get_parser(prog_name)
return parser
def take_action(self, parsed_args):
response = FunctionClient().list_function(
fiware_service=parsed_args.fiwareservice,
fiware_service_path=parsed_args.fiwareservicepath
)
if len(response) > 0:
columns = response[0].keys()
data = [x.values() for x in response]
return columns, data
return (), ()
class FunctionCreate(ShowOne):
"Create a function"
@fiware_arguments
def get_parser(self, prog_name):
parser = super().get_parser(prog_name)
parser.add_argument('name', help='Function name')
parser.add_argument('file',
type=argparse.FileType('rb'),
help='Function file name')
parser.add_argument('-l', '--language',
metavar='LANG:VERSION',
help='Program language')
parser.add_argument('-m', '--main',
metavar='MAIN_FILE_NAME',
help='Main file name for java')
parser.add_argument('-p', '--param',
nargs=2,
action=StoreKeyPairAction,
metavar=('KEY', 'VALUE'),
help='Inject param to Function')
return parser
def take_action(self, parsed_args):
try:
response = FunctionClient().create_function(
fiware_service=parsed_args.fiwareservice,
fiware_service_path=parsed_args.fiwareservicepath,
data=FunctionRequestDataBuilder().build(parsed_args)
)
parameters = list(map(lambda x: dict(x), response['parameters']))
response['parameters'] = parameters
columns = response.keys()
data = response.values()
return columns, data
except CommandError as e:
self.app.stdout.write(e.args[0])
return (), ()
class FunctionUpdate(ShowOne):
"Update a function"
@fiware_arguments
def get_parser(self, prog_name):
parser = super().get_parser(prog_name)
parser.add_argument('id', help='Function id')
parser.add_argument('file',
type=argparse.FileType('rb'),
help='Function file name')
parser.add_argument('-l', '--language',
metavar='LANG:VERSION',
help='Program language')
parser.add_argument('-m', '--main',
metavar='MAIN_FILE_NAME',
help='Main file name for java')
parser.add_argument('-p', '--param',
nargs=2,
action=StoreKeyPairAction,
metavar='KEY VALUE',
help='Inject param to Function')
return parser
def take_action(self, parsed_args):
data = FunctionRequestDataBuilder().build(parsed_args)
data['id'] = parsed_args.id
try:
response = FunctionClient().update_function(
fiware_service=parsed_args.fiwareservice,
fiware_service_path=parsed_args.fiwareservicepath,
data=data
)
parameters = list(map(lambda x: dict(x), response['parameters']))
response['parameters'] = parameters
columns = response.keys()
data = response.values()
return columns, data
except CommandError as e:
self.app.stdout.write(e.args[0])
return (), ()
class FunctionDelete(Command):
"Delete a function"
@fiware_arguments
def get_parser(self, prog_name):
parser = super().get_parser(prog_name)
parser.add_argument('id', help='Function id')
return parser
def take_action(self, parsed_args):
FunctionClient().delete_function(
id=parsed_args.id,
fiware_service=parsed_args.fiwareservice,
fiware_service_path=parsed_args.fiwareservicepath,
)
self.app.stdout.write('Success delete function\n')
| OkinawaOpenLaboratory/fiware-meteoroid-cli | meteoroid_cli/meteoroid/v1/function.py | function.py | py | 7,858 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "os.path.splitext",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "base64.b64encode",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "meteoroid_cli.meteoroi... |
23420270770 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('stock', '0088_auto_20160620_1304'),
]
operations = [
migrations.CreateModel(
name='Stock',
fields=[
('producto_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='stock.Producto')),
('stock_minimo', models.DecimalField(help_text=b'Cantidad minima del producto a mantener en Stock.', verbose_name=b'Stock Minimo', max_digits=10, decimal_places=3)),
('cantidad_existente', models.DecimalField(help_text=b'Cantidad Existente en Stock', verbose_name=b'Cantidad Existente', max_digits=10, decimal_places=3)),
],
bases=('stock.producto',),
),
migrations.AddField(
model_name='producto',
name='fecha_alta_producto',
field=models.DateTimeField(default=datetime.datetime(2016, 6, 22, 18, 14, 4, 175000, tzinfo=utc), help_text=b'La Fecha de Alta se asigna al momento de guardar los datos del Producto. No se requiere el ingreso de este dato.', verbose_name=b'Fecha de Alta'),
),
migrations.AlterField(
model_name='precioproducto',
name='fecha',
field=models.DateTimeField(default=datetime.datetime(2016, 6, 22, 18, 14, 4, 176000, tzinfo=utc), help_text=b'Ingrese la fecha y hora en la que se define el precio de venta del producto.'),
),
migrations.AlterField(
model_name='producto',
name='codigo_barra',
field=models.CharField(help_text=b'', max_length=100, verbose_name=b'Codigo de Barra'),
),
migrations.AlterField(
model_name='producto',
name='marca',
field=models.CharField(help_text=b'', max_length=100, verbose_name=b'Marca'),
),
migrations.AlterField(
model_name='producto',
name='producto',
field=models.CharField(help_text=b'Ingrese el nombre o descripcion del Producto.', max_length=100, verbose_name=b'Nombre del Producto'),
),
]
| pmmrpy/SIGB | stock/migrations/0089_auto_20160622_1414.py | 0089_auto_20160622_1414.py | py | 2,301 | python | es | code | 0 | github-code | 36 | [
{
"api_name": "django.db.migrations.Migration",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.CreateModel",
"line_number": 16,
"usage_type": "call"
},
... |
6688406212 | from langchain.chat_models import ChatOpenAI
from langchain.output_parsers import PydanticOutputParser
from langchain.prompts import ChatPromptTemplate
from pydantic import BaseModel, Field
from dotenv import load_dotenv
from typing import List
import pandas as pd
import os
# loading api key and defining vars
load_dotenv("../.env")
deck_name = os.environ.get("DECK_NAME")
csv_file_path = "../csv/" + deck_name + ".csv"
openai_api_key = os.environ.get("OPENAI_API_KEY")
model = "gpt-4"
temperature = 0.0
class FlashCard(BaseModel):
question: str = Field(description="The question for the flashcard")
answer: str = Field(description="The answer for the flashcard")
class FlashCardArray(BaseModel):
flashcards: List[FlashCard]
def create_flashcards_from_text(input_text: str, user_prompt: str, csv_file_path: str):
llm = ChatOpenAI(openai_api_key=openai_api_key, model=model, temperature=temperature)
print("Creating flashcards...")
pydantic_parser = PydanticOutputParser(pydantic_object=FlashCardArray)
format_instructions = pydantic_parser.get_format_instructions()
prompt = ChatPromptTemplate.from_template(template=user_prompt)
messages = prompt.format_messages(input_text=input_text, format_instructions=format_instructions)
output = llm(messages)
flashcards = pydantic_parser.parse(output.content)
list_of_flashcards = [card.dict() for card in flashcards.flashcards]
df = pd.DataFrame(list_of_flashcards)
if os.path.isfile(csv_file_path):
df.to_csv(csv_file_path, mode="a", header=False, index=False)
else:
df.to_csv(csv_file_path, mode="w", header=False, index=False)
def main():
try:
with open("input.txt", "r") as f:
input_text = f.read()
with open("prompt.txt", "r") as f:
user_prompt = f.read()
create_flashcards_from_text(input_text, user_prompt, csv_file_path)
except Exception as e:
print(f"Error occurred: {e}")
return
if __name__ == "__main__":
main()
| oresttokovenko/gpt-anki | src/generate_flashcards.py | generate_flashcards.py | py | 2,043 | python | en | code | 9 | github-code | 36 | [
{
"api_name": "dotenv.load_dotenv",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
... |
27875338483 | import json
import requests
from openerp.tests.common import HttpCase
from openerp import api, exceptions, tools, models
HOST = '127.0.0.1'
PORT = tools.config['xmlrpc_port']
class Webhook(models.Model):
_inherit = 'webhook'
@api.one
def run_wehook_test_get_foo(self):
"""
This method is just to test webhook.
This needs receive a json request with
next json values: {'foo': 'bar'}
If value is different will raise a error.
"""
if 'bar' != self.env.request.jsonrequest['foo']:
raise exceptions.ValidationError(
"Wrong value received")
class TestWebhookPost(HttpCase):
def setUp(self):
super(TestWebhookPost, self).setUp()
self.webhook = self.env['webhook']
self.url_base = "http://%s:%s" % (HOST, PORT)
self.url = self.get_webhook_url()
def get_webhook_url(self, url='/webhook',
webhook_name="wehook_test"):
"""
:param string url: Full url of last url of webhook to use.
If you use a full url will return url
plus session_id
default: /webhook
:param string webhook_name: Name of webhook to process
default: webhook_test
:return: url with
http://IP:PORT/webhook/webhook_name?session_id=###
"""
webhook_name = webhook_name.replace('/', '')
if url.startswith('/'):
url = self.url_base + url + '/' + webhook_name
url += '?session_id=' + self.session_id
return url
def post_webhook_event(self, event, url, data, remote_ip=None,
headers=None, params=None):
"""
:param string event String: Name of webhook event.
:param string url: Full url of webhook services.
:param dict data: Payload data of request.
:param string remote_ip: Remote IP of webhook to set in
test variable.
:param dict headers: Request headers with main data.
:param dict params: Extra values to send to webhook.
"""
if headers is None:
headers = {}
if remote_ip is None:
remote_ip = '127.0.0.1'
headers.update({
'X-Webhook-Test-Event': event,
'X-Webhook-Test-Address': remote_ip,
})
headers.setdefault('accept', 'application/json')
headers.setdefault('content-type', 'application/json')
payload = json.dumps(data)
response = requests.request(
"POST", url, data=payload,
headers=headers, params=params)
return response.json()
def test_webhook_ping(self):
"""
Test to check that 'ping' generic method work fine!
'ping' event don't need to add it in inherit class.
"""
json_response = self.post_webhook_event(
'ping', self.url, {})
has_error = json_response.get('error', False)
self.assertEqual(
has_error, False, 'Error in webhook ping test!')
def test_webhook_get_foo(self):
"""
Test to check that 'get_foo' event from 'webhook_test'
work fine!
This event is defined in inherit method of test.
"""
json_response = self.post_webhook_event(
'get_foo', self.url, {'foo': 'bar'})
self.assertEqual(
json_response.get('error', False), False,
'Error in webhook get foo test!.')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| Blancorama/blancorama_tools | webhook/tests/test_webhook_post.py | test_webhook_post.py | py | 3,596 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "openerp.tools.config",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "openerp.tools",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "openerp.models.Model",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "opene... |
27551610430 | import argparse
import logging
import os
import shutil
import sys
import tarfile
import tempfile
import traceback
from zipfile import ZIP_DEFLATED, ZIP_STORED, ZipFile
import rdiffweb
from rdiffweb.core.librdiff import LANG, STDOUT_ENCODING, find_rdiff_backup, popen
logger = logging.getLogger(__name__)
# Increase the chunk size to improve performance.
CHUNK_SIZE = 4096 * 10
# Token used by rdiff-backup
TOKEN = b'Processing changed file '
class TarArchiver(object):
"""
Archiver to create tar archive (with compression).
"""
def __init__(self, dest, compression=''):
assert compression in ['', 'gz', 'bz2']
mode = "w|" + compression
# Open the tar archive with the right method.
if isinstance(dest, str):
self.z = tarfile.open(name=dest, mode=mode, encoding='UTF-8', format=tarfile.PAX_FORMAT)
self.fileobj = None
else:
self.z = tarfile.open(fileobj=dest, mode=mode, encoding='UTF-8', format=tarfile.PAX_FORMAT)
self.fileobj = dest
def addfile(self, filename, arcname, encoding):
assert isinstance(filename, bytes)
assert isinstance(arcname, bytes)
assert encoding
# Do not create a folder "./"
if os.path.isdir(filename) and arcname == b'.':
return
# The processing of symlink is broken when using bytes
# for files, so let convert it to unicode with surrogateescape.
filename = filename.decode('ascii', 'surrogateescape')
# The archive name must be unicode and will be convert back to UTF8
arcname = arcname.decode(encoding, 'surrogateescape')
# Add file to archive.
self.z.add(filename, arcname, recursive=False)
def close(self):
# Close tar archive
self.z.close()
# Also close file object.
if self.fileobj:
self.fileobj.close()
class ZipArchiver(object):
"""
Write files to zip file or stream.
Can write uncompressed, or compressed with deflate.
"""
def __init__(self, dest, compress=True):
compress = compress and ZIP_DEFLATED or ZIP_STORED
self.z = ZipFile(dest, 'w', compress)
def addfile(self, filename, arcname, encoding):
assert isinstance(filename, bytes)
assert isinstance(arcname, bytes)
assert encoding
# Do not create a folder "./"
if os.path.isdir(filename) and arcname == b'.':
return
# As of today ZipFile doesn't support symlink or named pipe.
# So we silently skip them. See bug #26269 and #18595
if os.path.islink(filename) or not (os.path.isfile(filename) or os.path.isdir(filename)):
return
# The filename need to be unicode.
filename = filename.decode('ascii', 'surrogateescape')
# The archive name must be unicode.
# But Zip doesn',t support surrogate, so let replace invalid char.
arcname = arcname.decode(encoding, 'replace')
# Add file to archive.
self.z.write(filename, arcname)
def close(self):
self.z.close()
class RawArchiver(object):
"""
Used to stream a single file.
"""
def __init__(self, dest):
assert dest
self.dest = dest
if isinstance(self.dest, str):
self.output = open(self.dest, 'wb')
else:
self.outout = dest
def addfile(self, filename, arcname, encoding):
assert isinstance(filename, bytes)
# Only stream files. Skip directories.
if os.path.isdir(filename):
return
with open(filename, 'rb') as f:
shutil.copyfileobj(f, self.outout)
def close(self):
self.outout.close()
ARCHIVERS = {
'tar': TarArchiver,
'tbz2': lambda dest: TarArchiver(dest, 'bz2'),
'tar.bz2': lambda dest: TarArchiver(dest, 'bz2'),
'tar.gz': lambda dest: TarArchiver(dest, 'gz'),
'tgz': lambda dest: TarArchiver(dest, 'gz'),
'zip': ZipArchiver,
'raw': RawArchiver,
}
# Log everything to stderr.
def _print_stderr(msg, exc_info=False):
"""
Print messages to stderr.
"""
assert isinstance(msg, str)
print(msg, file=sys.stderr)
if exc_info:
traceback.print_exc(file=sys.stderr)
sys.stderr.flush()
def _lookup_filename(base, path):
"""
Search for the given filename. This is used to mitigate encoding issue
with rdiff-backup2. That replace invalid character.
"""
assert isinstance(base, bytes)
assert isinstance(path, bytes)
# Easy path, if the file encoding is ok, will find the file.
fullpath = os.path.normpath(os.path.join(base, path))
if os.path.lexists(fullpath):
return fullpath, path
# Otherwise, Search the for a matching filename.
dirname = os.path.dirname(os.path.join(base, path))
basename = os.path.basename(path)
for file in os.listdir(dirname):
if basename == file.decode(STDOUT_ENCODING, 'replace').encode(STDOUT_ENCODING, 'replace'):
fullpath = os.path.join(dirname, file)
arcname = os.path.relpath(fullpath, base)
return fullpath, arcname
return None, None
def restore(restore, restore_as_of, kind, encoding, dest, log=logger.debug):
"""
Used to restore a file or a directory.
restore: relative or absolute file or folder to be restored (unquoted)
restore_as_of: date to restore
kind: type of archive to generate or raw to stream a single file.
encoding: encoding of the repository (used to properly encode the filename in archive)
dest: a filename or a file handler where to write the archive.
"""
assert isinstance(restore, bytes)
assert isinstance(restore_as_of, int)
assert kind in ARCHIVERS
# Generate a temporary location used to restore data.
# This location will be deleted after restore.
tmp_output = tempfile.mkdtemp(prefix=b'rdiffweb_restore_')
log('restoring data into temporary folder: %r' % tmp_output)
# Search full path location of rdiff-backup.
rdiff_backup_path = find_rdiff_backup()
# Need to explicitly export some environment variable. Do not export
# all of them otherwise it also export some python environment variable
# and might brake rdiff-backup process.
env = {
'LANG': LANG,
}
if os.environ.get('TMPDIR'):
env['TMPDIR'] = os.environ['TMPDIR']
cmd = [
rdiff_backup_path,
b'-v',
b'5',
b'--restore-as-of=' + str(restore_as_of).encode('latin'),
restore,
tmp_output,
]
log('executing %r with env %r' % (cmd, env))
# Open an archive.
archive = ARCHIVERS[kind](dest)
try:
# Read the output of rdiff-backup
with popen(cmd, env=env) as output:
for line in output:
# Since rdiff-backup 2.1.2b1 the line start with b'* '
if line.startswith(b'* '):
line = line[2:]
line = line.rstrip(b'\n')
log('rdiff-backup: %r' % line)
if not line.startswith(TOKEN):
continue
# A new file or directory was processed. Extract the filename and
# look for it on filesystem.
value = line[len(TOKEN) :]
fullpath, arcname = _lookup_filename(tmp_output, line[len(TOKEN) :])
if not fullpath:
log('error: file not found %r' % value)
continue
# Add the file to the archive.
log('adding %r' % fullpath)
try:
archive.addfile(fullpath, arcname, encoding)
except Exception:
# Many error may happen when trying to add a file to the
# archive. To be more resilient, capture error and continue
# with the next file.
log('error: fail to add %r' % fullpath, exc_info=1)
# Delete file once added to the archive.
if os.path.isfile(fullpath) or os.path.islink(fullpath):
os.remove(fullpath)
finally:
# Close the pipe
archive.close()
# Clean-up the directory.
if os.path.isdir(tmp_output):
shutil.rmtree(tmp_output, ignore_errors=True)
elif os.path.isfile(tmp_output):
os.remove(tmp_output)
def main():
parser = argparse.ArgumentParser(description='Rdiffweb restore script.')
parser.add_argument('--restore-as-of', type=int, required=True)
parser.add_argument('--encoding', type=str, default='utf-8', help='Define the encoding of the repository.')
parser.add_argument(
'--kind', type=str, choices=ARCHIVERS, default='zip', help='Define the type of archive to generate.'
)
parser.add_argument('restore', type=str, help='Define the path of the file or directory to restore.')
parser.add_argument('output', type=str, default='-', help='Define the location of the archive. Default to stdout.')
parser.add_argument('--version', action='version', version='%(prog)s ' + rdiffweb.__version__)
args = parser.parse_args()
# handle encoding of the path.
path = args.restore
if isinstance(path, str):
path = os.fsencode(path)
# handle output
if args.output == '-':
output = sys.stdout.buffer
else:
output = open(args.output, 'wb')
# Execute the restore.
try:
restore(path, args.restore_as_of, args.kind, args.encoding, output, log=_print_stderr)
except Exception:
_print_stderr('error: failure to create the archive', exc_info=1)
sys.exit(1)
if __name__ == "__main__":
main()
| ikus060/rdiffweb | rdiffweb/core/restore.py | restore.py | py | 9,753 | python | en | code | 114 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "tarfile.open",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "tarfile.PAX_FORMAT",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "tarfile.open",... |
2884786469 | # coding:utf-8
# @Time : 2020/4/21 19:18
# @Author: Xiawang
# Description:
import datetime
import time
import requests
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.header import Header
'''
用于主流程监控定期执行并发送报警信息
'''
def get_fix_time():
now_time = datetime.datetime.now()
fix_time = (now_time + datetime.timedelta(minutes=10)).strftime("%Y-%m-%d %H:%M")
return fix_time
def run_pytest(module):
'''
:param module: mainprocess, open_api_lagou
'''
url = 'http://127.0.0.1:18980/data/pytest'
data = {"module": module}
print(url)
pytest_result = requests.post(url=url, json=data, verify=False).json()
print(pytest_result)
return pytest_result
def send_feishu_report(module, pytest_result):
if pytest_result.get('state') == 4:
content = pytest_result.get('data')
return send_feishu_bot(module=module, content=content)
if pytest_result.get('state') == 0:
summary_result = pytest_result['summary_result']
fail_results = ''
names = []
for case_name, case_fail_result in pytest_result['fail_result'].items(
):
fail_result = f'''用例{case_name}报错:{case_fail_result['error_type']},原因:{case_fail_result['log']},测试:{case_fail_result.get('tester_name')},开发:{case_fail_result.get('rd_name')}\n\n'''
fail_results += fail_result
names.extend([case_fail_result.get('tester_name'), case_fail_result.get('rd_name')])
if '' in names:
names.remove('')
elif None in names:
names.remove(None)
fix_time = get_fix_time()
name_template = f'''请{','.join(list(set(names)))}在{fix_time}之前,尽快处理并给出反馈'''
content = "{}\n\n具体失败结果:\n{}\n请大家对线上问题保持敬畏之心!\n{}".format(summary_result, fail_results, name_template)
print(content)
return send_feishu_bot(module=module, content=content)
def send_mail(module):
sender = 'autotest@lagoujobs.com'
sender_password = 'Lqq123456'
receivers = ['zane@lagou.com', 'sunnyzhang@lagou.com',
'sunnysun@lagou.com', 'huifang@lagou.com'
'bingoonchen@lagou.com','anan@lagou.com',
'foxtang01@lagou.com']
ret = True
try:
message = MIMEMultipart()
message['From'] = Header(f"自动化测试报告", 'utf-8')
message['To'] = Header("测试工程师", 'utf-8')
subject = f'{module}测试报告'
message['Subject'] = Header(subject, 'utf-8')
message.attach(
MIMEText('自动化测试报告详见附件', 'plain', 'utf-8')
)
report_file_path = f'/home/test/lg-apiscript-python/backend/templates/{module}_report.html'
print(report_file_path)
# report_file_path = '/Users/wang/Desktop/lg-project/lg_api_script/backend/templates/mainprocess_report.html'
att1 = MIMEText(open(report_file_path, 'rb').read(),
'base64', 'utf-8')
att1["Content-Type"] = 'application/octet-stream'
att1["Content-Disposition"] = f'attachment; filename={module}_report.html'
message.attach(att1)
server = smtplib.SMTP_SSL("smtp.exmail.qq.com", 465)
server.login(sender, sender_password)
server.sendmail(sender, receivers, message.as_string())
server.quit()
except Exception as e:
print(str(e))
ret = False
return ret
def send_feishu_bot(module, content):
module_bot = {
'test': 'https://open.feishu.cn/open-apis/bot/hook/882babeafa3e4f0b839d6ff41efa2b84',
'mainprocess': 'https://open.feishu.cn/open-apis/bot/hook/03654ef57c4f4418ba8802cfa1cf06a0',
'open_api_lagou': 'https://open.feishu.cn/open-apis/bot/hook/ad282603210042cdb3e414f36e1acbb8'
}
url = module_bot.get(module)
data = {
"title": "自动化测试结果:",
"text": content
}
if len(data['text']) >= 2000:
data['text'] = data['text'][:2000]
result = requests.post(url=url, json=data, verify=False).json()
return result.get('ok')
def get_number(string: str):
number_str = string.split(' ')[0]
if not number_str.isdigit():
number_str = '0'
number = int(number_str)
return number
def send_oss(pytest_result):
total_result = pytest_result['data']['result']['info']['result']
errors = total_result['fail_result']
name = "main_process_test"
source = "main_process_test.py"
for key, value in errors.items():
test_demo: str = key.strip()
error_type: str = value['error_type'].strip()
error_cause: str = value['log'].strip()
module_name = test_demo
if error_cause == '具体详情,请查看测试报告':
cause = error_type
level = 'WARNING'
user_ids = 'mornyue'
else:
cause = error_cause
level = 'PROBLEM'
user_ids = 'mornyue,huifang'
description = "主流程测试"
return oss_filter_event(module_name=module_name, name=name, cause=cause,
level=level, user_ids=user_ids, description=description, source=source)
def oss_filter_event(module_name, name, description, level, user_ids: str, cause, source):
"""
将消息发送到lg-alarm-filter模块的event接口,用来生成告警
接口需要参数:["moduleName", "name", "description", "level", "userids", "cause"]
:param module_name: 模块名抑或主机名
:param name: 类型
:param description: 告警描述
:param level: 告警级别
:param user_ids: 告警通知人,逗号分隔,且不能存在空格,后端解析没有对空格进行额外处理
:param cause: 告警引起原因
:param source: 数据来源
:return:
"""
# 防止userids传入有问题,加一层处理逻辑
if ',' in user_ids:
user_list = user_ids.split(',')
elif ',' in user_ids:
user_list = user_ids.split(',')
else:
user_list = [user_ids]
user_ids = ','.join([item.strip() for item in user_list])
url = 'http://10.10.5.138:8081/filter/event'
params = {
'moduleName': module_name,
'name': name,
'description': description,
'level': level,
'userids': user_ids,
'cause': cause,
'source': source
}
requests.post(url, json=params)
def main(module):
pytest_result = run_pytest(module)
if pytest_result.get('state', 0) != 1:
time.sleep(10)
print(1)
pytest_result = run_pytest(module)
print(pytest_result)
if pytest_result.get('state', 0) != 1:
send_feishu_result = send_feishu_report(module, pytest_result)
print(send_feishu_result)
if send_feishu_result == True:
send_mail(module)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='获取执行的模块')
parser.add_argument('--module', help='获取执行模块')
args = parser.parse_args()
if args.module is not None:
main(module=args.module) | Ariaxie-1985/aria | task/send_auto_test_report.py | send_auto_test_report.py | py | 7,283 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.datetime.now",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "datetime.timedelta",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "reque... |
33279344042 | import requests, json
BaseURL = 'https://paper-api.alpaca.markets'
OrdersURL = '{}/v2/orders'.format(BaseURL)
Headers = {"APCA-API-KEY-ID": "PKU31JDVN0AYRLMI1MEQ", "APCA-API-SECRET-KEY": "LVgw3y2RuffyDAMsjDR2EfscgsokGNTsuSEn3LUb"}
def create_order(symbol, qty, side, type, time_in_force):
data = {
'symbol': symbol,
'qty': qty,
'side': side,
'type': type,
'time_in_force': time_in_force
}
r = requests.post(OrdersURL, json=data, headers=Headers)
return json.loads(r.content)
| Jacob-Kenney/JagBot | 4.Buy stocks/Buystocks.py | Buystocks.py | py | 534 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "requests.post",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 17,
"usage_type": "call"
}
] |
2317503124 |
import torch
import torch.nn as nn
import torch.nn.functional as F
from core.loss.multi_task import MultiTaskProxy
from core.loss.segment import SemanticSegmentation
from core.loss.matting import ImageMatting
from core.loss.grad import MattingGrad, ImageGradient
class JointMattingParsingLoss(nn.Module):
def __init__(self, phase):
super(JointMattingParsingLoss, self).__init__()
assert phase in ['pretrain', 'end2end']
print('============> Using join loss: {}'.format(phase))
self.phase = phase
self._multi_task_weight(phase)
self.segment_loss = SemanticSegmentation()
self.matting_loss = ImageMatting()
# self.gradient_loss = MattingGrad()
def _multi_task_weight(self, phase):
if phase == 'end2end':
self.proxy = MultiTaskProxy(num_task=2)
def forward(self, target, output):
segment_pr, matting_pr = output['segment'], output['matting']
segment_gt, matting_gt = target['segment'], target['matting']
loss_segment = self.segment_loss(segment_pr=segment_pr, segment_gt=segment_gt, weight=None)
if self.phase == 'pretrain':
loss = loss_segment
return loss
else:
loss_matting = self.matting_loss(matting_pr=matting_pr, matting_gt=matting_gt, segment_gt=segment_gt)
loss = self.proxy([loss_segment, loss_matting])
return loss, loss_segment, loss_matting
| xinyunmian/matting | core/loss/joint.py | joint.py | py | 1,456 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "core.loss.segment.SemanticSegmentation",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "... |
6129429499 | import argparse
from src.gt_merger import constants
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--obaFile', type=str, required=True, help='Path to CSV file exported from OBA Firebase '
'Export App')
parser.add_argument('--gtFile', type=str, required=True, help='Path to XLSX file including the Ground Truth data')
parser.add_argument('--outputDir', type=str, default=constants.OUTPUT_DIR,
help='Path to directory where the merged data and log data will be output')
parser.add_argument('--minActivityDuration', type=float, default=constants.MIN_ACTIVITY_DURATION,
help='Minimum activity time span (minutes, default value = ' +
str(constants.MIN_ACTIVITY_DURATION) +
'), shorter activities will be dropped before merging.')
parser.add_argument('--minTripLength', type=int, default=constants.MIN_TRIP_LENGTH,
help='Minimum length distance (meters, default value ' + str(constants.MIN_TRIP_LENGTH) +
') for a trip. Shorter trips will be dropped before merging')
parser.add_argument('--tolerance', type=int, default=constants.TOLERANCE,
help='Maximum tolerated difference (milliseconds, default value ' + str(constants.TOLERANCE) +
') between matched ground truth data activity and OBA data activity')
parser.add_argument('--iterateOverTol', dest='iterateOverTol', action='store_true')
parser.add_argument('--no-iterateOverTol', dest='iterateOverTol', action='store_false')
parser.set_defaults(iterateOverTol=False)
parser.add_argument('--removeStillMode', dest='removeStillMode', action='store_true')
parser.add_argument('--no-removeStillMode', dest='removeStillMode', action='store_false')
parser.set_defaults(removeStillMode=True)
parser.add_argument('--mergeOneToOne', dest='mergeOneToOne', action='store_true')
parser.add_argument('--no-mergeOneToOne', dest='mergeOneToOne', action='store_false')
parser.set_defaults(mergeOneToOne=False)
parser.add_argument('--repeatGtRows', dest='repeatGtRows', action='store_true')
parser.add_argument('--no-repeatGtRows', dest='repeatGtRows', action='store_false')
parser.set_defaults(mergeOneToOne=False)
parser.add_argument('--deviceList', type=str, default="",
help='Path to txt file including white list of OBA devices to be used for match and merge')
args = parser.parse_args()
return args
| CUTR-at-USF/onebusaway-travel-behavior-analysis | src/gt_merger/args.py | args.py | py | 2,668 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "src.gt_merger.constants.OUTPUT_DIR",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "src.gt_merger.constants",
"line_number": 13,
"usage_type": "name"
},
{... |
26825630541 | #!/usr/bin/python
import numpy as np
import matplotlib.pyplot as plt
import argparse, os, sys, glob, time
from tqdm import tqdm
from skimage.transform import resize
import cPickle
from keras.layers import average
from keras.models import load_model, Model
from training_utils import scale_data, compute_time_series
import PlotCand_dom
from waterfaller import filterbank, waterfall
"""After taking in a directory of .fil files and a model,
outputs probabilities that the files contain an FRB. Also
returns the files that have FRBs in them, and optionally
saves those filenames to some specified document."""
# used for reading in h5 files
os.environ['HDF5_USE_FILE_LOCKING'] = 'FALSE'
def extract_candidates(fil_file, frb_cands, frbcand_path, NCHAN, NTIME, save_png=False):
# load filterbank file and candidate list
f = PlotCand_dom.FilReader(fil_file)
# other parameters
noplot = 1
nchan = f.header['nchans']
fch1 = f.header['fch1']
foff = f.header['foff']
fl = fch1 + (foff*nchan)
fh = fch1
tint = f.header['tsamp']
Ttot = f.header['tobs']
kill_time_range, kill_chans = [], []
source_name = f.header['source_name']
mask_file, smooth, zerodm, csv_file = [], [], [], [] # last arguments are missing
PlotCand_dom.extractPlotCand(fil_file, frb_cands, noplot, fl, fh, tint, Ttot, kill_time_range,
kill_chans, source_name, nchan, NCHAN, NTIME, mask_file, smooth,
zerodm, csv_file, save_png, frbcand_path)
def save_prob_to_disk(frb_info, pred, fname):
"""Given the original FRB candidate info and predictions
for each candidate, save candidate info and prediction probabilities
to disk in the same directory as the original .txt file."""
assert pred.size == frb_info.size, \
"Number of predictions don't match number of candidates ({0} vs. {1})".format(len(pred), len(frb_info))
if frb_info.size == 1: # length-1 structured arrays are buggy
frb_info = frb_info.reshape(1)
# sort original FRBcand file by largest SNR to be consistent with prediction order
frb_info[::-1].sort(order='snr')
# create new array to hold candidate data and probabilities
new_dt = np.dtype(frb_info.dtype.descr + [('frb_prob', 'f4')])
previous_names = ['snr','time','samp_idx','dm','filter','prim_beam']
FRBcand_with_probs = np.zeros(frb_info.shape, dtype=new_dt)
# populate new array with candidate data and predicted probabilities
FRBcand_with_probs[previous_names] = frb_info[previous_names]
FRBcand_with_probs['frb_prob'] = pred
# re-sort by sample index
FRBcand_with_probs.sort(order='samp_idx')
np.savetxt(fname, FRBcand_with_probs, fmt='%-12s')
def get_pulses(dir_spectra, num_channels, keep_spectra=False):
"""Imports *ALL SPECTRA* in given directory and appends them to one list.
Spectra are assumed to be in .pickle files which are subsequently deleted
after being imported."""
# get all pickled Spectra and prepare array to hold them in memory
pickled_spectra = np.sort(glob.glob('{}/*sec_DM*.pickle'.format(dir_spectra)))
print('Spectra found at {}'.format(pickled_spectra))
candidate_spectra = []
# add each Spectra to array
for spec_file in tqdm(pickled_spectra):
with open(spec_file, 'rb') as f:
spectra_obj = cPickle.load(f)
# print("File {0} has shape {1}".format(spec_file, spectra_obj.data.shape))
# resize image to correct size for neural network prediction
spectra_obj.data = resize(spectra_obj.data, (num_channels, 256), mode='symmetric', anti_aliasing=False)
candidate_spectra.append(spectra_obj)
# remove all pickle files matching this format
if not keep_spectra:
os.system('rm {}/*sec_DM*.pickle'.format(dir_spectra))
return pickled_spectra, np.array(candidate_spectra)
def create_ensemble(model_names):
"""Create ensemble of Keras models. The predictions from each model
are averaged to get one final probability for each test example. This
reduces variance, assuming each of the models tests a different hypothesis,
i.e. each of the models is not exactly the same."""
individual_outputs = []
for name in model_names:
m = load_model(name, compile=True)
# get prediction outputs from each model
individual_outputs.append(m.outputs[0])
# average all predictions
ensemble_out = average(individual_outputs)
# construct ensemble model with old inputs and averaged outputs
ensemble_model = Model(inputs=m.inputs, outputs=ensemble_out)
return ensemble_model
if __name__ == "__main__":
"""
Parameters
---------------
model_name: str
Path to trained model used to make prediction. Should be .h5 file
frb_cand_path: str
Path to .txt file that contains data about pulses within filterbank file. This
file should contain columns 'snr','time','samp_idx','dm','filter', and'prim_beam'.
filterbank_candidate: str
Path to candidate file to be predicted. Should be .fil file
NCHAN: int, optional
Number of frequency channels (default 64) to resize psrchive files to.
no-FRBcandprob: flag, optional
Whether or not to save edited FRBcand file containing pulse probabilities.
FRBcandprob: str, optional
Path to save FRBcandprob.txt (default is same path as frb_cand_path)
save_top_candidates: str, optional
Filename to save pre-processed candidates, just before they are thrown into CNN.
save_predicted_FRBs: str, optional
Filename to save every candidate predicted to contain an FRB.
"""
# Read command line arguments
parser = argparse.ArgumentParser()
# main arguments needed for prediction
parser.add_argument('frb_cand_path', type=str, help='Path to .txt file containing data about pulses.')
parser.add_argument('model_names', nargs='+', type=str,
help='Path to trained models used to make prediction. If multiple are given, use all to ensemble.')
parser.add_argument('-f', '--fil_file', dest='filterbank_candidate', type=str, required='--skip_extract' not in sys.argv,
help='Path to filterbank file with candidates to be predicted.')
# can set if pickle files are already in directory to avoid having to redo extraction
parser.add_argument('--skip_extract', action='store_true',
help='Whether to directly predict pickled spectra found in same dir as frb_cand_path.')
parser.add_argument('--NCHAN', type=int, default=64, help='Number of frequency channels to use from filterbank files.')
parser.add_argument('--NTIME', type=int, default=256, help='Number of time bins from filterbank files.')
parser.add_argument('--thresh', type=float, default=0.5, help='Threshold probability to admit whether example is FRB or RFI.')
parser.add_argument('--no-FRBcandprob', dest='suppress_prob_save', action='store_true',
help='Chooses not to save the FRBcand .txt file along with candidate probabilities.')
parser.add_argument('--keep_spectra', dest='keep_spectra', action='store_true',
help='Keep spectra pickle files after creating and using them. Default is to delete.')
parser.add_argument('--FRBcandprob', type=str, default=None,
help='Directory to save new FRBcand file with probabilities (default is same dir as frb_cand_path)')
parser.add_argument('--save_predicted_FRBs', type=str, default=None, help='Filename to save all candidates.')
parser.add_argument('--save_top_candidates', type=str, default=None, help='Filename to save plot of top 5 candidates.')
args = parser.parse_args()
parser.set_defaults(skip_extract=False, suppress_prob_save=False, keep_spectra=False)
# load file path
filterbank_candidate = args.filterbank_candidate
frb_cand_path = args.frb_cand_path
NCHAN = args.NCHAN
NTIME = args.NTIME
model_names = args.model_names # either single model or list of models to ensemble predict
frb_cand_info = np.loadtxt(frb_cand_path, dtype={'names': ('snr','time','samp_idx','dm','filter','prim_beam'),
'formats': ('f4', 'f4', 'i4','f4','i4','i4')})
if args.skip_extract is False:
print("Getting data about FRB candidates from " + frb_cand_path)
extract_candidates(filterbank_candidate, frb_cand_info, frb_cand_path, NCHAN, NTIME)
time.sleep(10) # give some leeway for extraction in background to finish
print("Retrieving candidate spectra")
spectra_paths, candidate_spectra = get_pulses(os.path.dirname(frb_cand_path), NCHAN, keep_spectra=args.keep_spectra)
# retrieve freq-time data from each spectra
ftdata = np.array([spec.data for spec in candidate_spectra])
# compute time series for every spectrogram in ftdata
print('Getting time series for each sample...'),
time_series = compute_time_series(ftdata)
print('All time series computed!\n')
# scale each channel to zero median and each array to unit stddev
print("\nScaling arrays."),
scale_data(ftdata)
print("Done scaling!")
# add num_channel dimension to vectors for Keras
ftdata = ftdata[..., None]
time_series = time_series[..., None]
# load model(s) and predict
if len(model_names) == 1:
model = load_model(model_names[0], compile=True)
else:
model = create_ensemble(model_names)
predictions = model.predict([ftdata, time_series], verbose=1)[:, 0]
print(predictions)
# save probabilities to disk along with candidate data
if not args.suppress_prob_save:
if not args.FRBcandprob:
FRBcand_prob_path = os.path.dirname(frb_cand_path) + '/FRBcand_prob.txt'
else:
FRBcand_prob_path = args.FRBcandprob + '/FRBcand_prob.txt'
print("Saving probabilities to {0}".format(FRBcand_prob_path))
save_prob_to_disk(frb_cand_info, predictions, FRBcand_prob_path)
# threshold predictions to choose FRB/RFI
voted_FRB_probs = predictions > args.thresh
# get paths to predicted FRBs and their probabilities
frb_filenames = spectra_paths[voted_FRB_probs]
predicted_frbs = candidate_spectra[voted_FRB_probs]
frb_probs = predictions[voted_FRB_probs]
# save all predicted FRBs to PDF, where each page contains spectrogram and 1D signal
if args.save_predicted_FRBs:
from matplotlib.backends.backend_pdf import PdfPages
print('Saving all predicted FRBs to {}.pdf'.format(args.save_predicted_FRBs))
with PdfPages(args.save_predicted_FRBs + '.pdf') as pdf:
for spec, prob, name in tqdm(zip(predicted_frbs, frb_probs, frb_filenames), total=len(predicted_frbs)):
frb_name = os.path.basename(name)
fig, ax = plt.subplots(nrows=2, ncols=1, figsize=(8, 6))
signal = np.sum(spec.data, axis=0) # 1D time series of array
# plot spectrogram on top and signal below it
ax[0].imshow(spec.data, extent=[spec.starttime, spec.starttime + len(signal)*spec.dt,
np.min(spec.freqs), np.max(spec.freqs)], origin='upper', aspect='auto')
ax[0].set(xlabel='time (s)', ylabel='freq (MHz)', title='{0}\nConfidence: {1}'.format(frb_name, prob))
ax[1].plot(np.linspace(spec.starttime, spec.starttime + len(signal)*spec.dt, len(signal)), signal)
ax[1].set(xlabel='time (s)', ylabel='flux (Janksy)')
pdf.savefig()
plt.close(fig)
# save the best 5 candidates to disk along with 1D signal
if args.save_top_candidates:
print("Saving top 5 candidates to {0}".format(args.save_top_candidates))
# sort probabilities high --> low to get top candidates in order
sorted_predictions = np.argsort(-predictions)
top_pred_spectra = candidate_spectra[sorted_predictions]
probabilities = predictions[sorted_predictions]
fig, ax_pred = plt.subplots(nrows=5, ncols=2, figsize=(14, 12))
for spec, prob, ax in zip(top_pred_spectra[:5], probabilities[:5], ax_pred):
signal = np.sum(spec.data, axis=0) # 1D time series of array
# plot spectrogram on left and signal on right
ax[0].imshow(spec.data, extent=[spec.starttime, spec.starttime + len(signal)*spec.dt,
np.min(spec.freqs), np.max(spec.freqs)], origin='upper', aspect='auto')
ax[0].set(xlabel='time (s)', ylabel='freq (MHz)', title='Confidence: {}'.format(prob))
ax[1].plot(np.linspace(spec.starttime, spec.starttime + len(signal)*spec.dt, len(signal)), signal)
ax[1].set(xlabel='time (s)', ylabel='flux (Janksy)')
fig.suptitle('Top 5 Predicted FRBs')
fig.tight_layout(rect=[0, 0.02, 1, 0.95])
fig.show()
fig.savefig(args.save_top_candidates, dpi=300)
print('Number of FRBs: {} / {} candidates'.format(np.sum(voted_FRB_probs), len(voted_FRB_probs)))
| DominicL3/hey-aliens | simulateFRBclassification/predict.py | predict.py | py | 13,210 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "os.environ",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "PlotCand_dom.FilReader",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "PlotCand_dom.extractPlotCand",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "n... |
20077173649 | from django.urls import resolve
from rest_framework import status
from rest_framework.reverse import reverse
from rest_framework.test import APITestCase, APIRequestFactory
from cars.models import Car, Manufacturer
from cars.serializers import CarGetSerializer
from cars.views import CarListCreateView
factory = APIRequestFactory()
class CarListViewTest(APITestCase):
def setUp(self) -> None:
self.view_object = CarListCreateView()
self.view = CarListCreateView.as_view()
self.url = reverse("cars:cars")
self.request = factory.get(self.url)
def test_url_revers(self):
found = resolve(self.url)
self.assertEqual(found.func.__name__, self.view.__name__)
self.assertEqual(self.url, "/cars/")
def test_empty_car_list(self):
cars = Car.objects.all()
serializer = CarGetSerializer(cars, many=True)
response = self.view(self.request)
response.render()
self.assertEqual(response.data, serializer.data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_not_empty_car_list(self):
manufacturer = Manufacturer.objects.create(make="Ford")
Car.objects.create(manufacturer=manufacturer, model="Mustang")
Car.objects.create(manufacturer=manufacturer, model="F-150")
cars = self.view_object.get_queryset()
serializer = CarGetSerializer(cars, many=True)
response = self.view(self.request)
response.render()
self.assertEqual(response.data, serializer.data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
| tomasz-rzesikowski/cars_API | cars/tests/tests_views/tests_car_list_view.py | tests_car_list_view.py | py | 1,620 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "rest_framework.test.APIRequestFactory",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "rest_framework.test.APITestCase",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "cars.views.CarListCreateView",
"line_number": 15,
"usage_type": "ca... |
35217198422 | import requests
from urllib.request import urlopen
import urllib
from selenium import webdriver
from bs4 import BeautifulSoup
import http.client
from openpyxl import Workbook
from openpyxl import load_workbook
from openpyxl.writer.excel import ExcelWriter
from openpyxl.cell.cell import ILLEGAL_CHARACTERS_RE
import json
import string
import math
import time
http.client._MAXHEADERS = 1000
def urllib_download(IMAGE_URL, pName):
try:
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
urllib.request.install_opener(opener)
urllib.request.urlretrieve(IMAGE_URL, pName.replace("/","").replace("\\","")+'.jpg')
except:
print('no')
def getNodeText(node):
if(node == None):
return ""
else:
return node.get_text().strip()
retryCount = 0
def getHtmlFromUrl(url, type="get", para={}):
global retryCount
try:
url = urllib.parse.quote(url, safe=string.printable).replace(' ','%20')
request_obj=urllib.request.Request(url=url, headers={
'Content-Type': 'text/html; charset=utf-8',
"User-Agent": "Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.86 Safari/537.36"
})
htmlHeader = requests.head(url)
if htmlHeader.status_code ==200:
response_obj=urllib.request.urlopen(request_obj)
html_code=response_obj.read()
return html_code
else:
return ''
except:
retryCount = retryCount + 1
if retryCount < 5:
print("retry index"+str(retryCount)+url)
time.sleep(60)
return getHtmlFromUrl(url)
else:
retryCount = 0
return ""
def requestJson(url):
r = requests.post(url, headers={
'Content-Type': 'application/x-www-form-urlencoded',
'cookie':'visid_incap_2255650=4oBBaRPnQfCVoYEiTmjTq/NVAWEAAAAAQUIPAAAAAAD69PQHUoB0KplKq7/j0+gH; nlbi_2255650=CJKhHYlMm17tpKyoBzOViAAAAACDEjp3gL6bj6YL8j9XE0d/; incap_ses_893_2255650=m1tJIuDRUEp3FE/5GpNkDPRVAWEAAAAAM2KkDpvtARtZral+cMXSVw==; _gcl_au=1.1.76703404.1627477493; _gid=GA1.2.730047202.1627477493; BCSessionID=83af10b8-9488-4b7b-a3b1-3640f178dca2; categoryView=grid; _ga_S46FST9X1M=GS1.1.1627477492.1.1.1627478562.0; _ga=GA1.2.31731397.1627477493; _gat_UA-139934-1=1; _uetsid=69fc2d30efa411eb8818eb045f8760e5; _uetvid=69fc3a70efa411ebba3a23c153f6e477; .Nop.Customer=d664d529-d14a-44b1-86b3-cbf5373277b4',
"User-Agent": "Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.86 Safari/537.36"
})
datas = json.loads(r.text)
return datas
def getRenderdHtmlFromUrl(url):
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--disable-gpu')
chrome_options.add_argument("window-size=1024,768")
chrome_options.add_argument("--no-sandbox")
browser = webdriver.Chrome(chrome_options=chrome_options)
browser.get(url)
return BeautifulSoup(browser.page_source, "html.parser",from_encoding="utf-8")
def writeExcel(workSheet, headers, rowIndex, info):
cellIndex=1
for head in headers:
try:
if head in info:
content = ILLEGAL_CHARACTERS_RE.sub(r'', info[head])
workSheet.cell(rowIndex, cellIndex).value = content.strip()
else:
workSheet.cell(rowIndex, cellIndex).value = ""
cellIndex=cellIndex+1
except:
print(rowIndex)
def getProductInfo(url, typeStr, products):
print(str(len(products)) + url)
html_code = getHtmlFromUrl(url)
if len(html_code)>0:
sope= BeautifulSoup(html_code, "html.parser",from_encoding="utf-8")
pName = sope.find("h1", attrs={"itemprop":"name"})
specInfos = sope.find_all("h5")
Description = sope.find("div", attrs={"class":"full-description"})
pInfo = {
"link": url,
"Product Category1": 'Fiber Optic',
"Product Category2": typeStr,
"Product Name": getNodeText(pName),
"Description": getNodeText(Description)
}
for specInfo in specInfos:
title = getNodeText(specInfo)
if title == "Features":
pInfo["Features"] = getNodeText(specInfo.next_sibling.next_sibling)
if title == "Application":
pInfo["Application"] = getNodeText(specInfo.next_sibling.next_sibling)
products.append(pInfo.copy())
def getProductList(url, typestr, products):
html_code = getHtmlFromUrl(url)
if len(html_code)>0:
sope= BeautifulSoup(html_code, "html.parser",from_encoding="utf-8")
pLinkArea = sope.find("div", attrs={"class":"page-inner clearfix"})
pLinks = pLinkArea.find_all("a")
for Plink in pLinks:
print(Plink)
def getProducType(url, products):
html_code = getHtmlFromUrl(url)
if len(html_code)>0:
sope= BeautifulSoup(html_code, "html.parser",from_encoding="utf-8")
typeArea = sope.find("li", attrs={"class":"active dropdown"})
types = typeArea.find_all("li", attrs={"class":"dropdown-submenu"})
for type in types:
lLink = type.find("a")
getProductList(lLink["href"], getNodeText(lLink), products)
excelFileName="lcom.xlsx"
wb = Workbook()
workSheet = wb.active
products = []
# getProductInfo("http://www.tydexoptics.com/products/optics_for_detectors_and_sensors/", '', products)
getProductList('http://www.tydexoptics.com/products/spectroscopy/','', products)
# getProductInfo("http://www.tydexoptics.com/products/optics_for_detectors_and_sensors/", '', products)
# getProductInfo("http://www.tydexoptics.com/products/optics_for_meteorology_and_climatology/" '', products)
# getProductInfo("http://www.tydexoptics.com/products/libs/", '', products)
# getProductInfo("http://www.tydexoptics.com/products/atypical_components/", '', products)
headers=[
'link','Product Category1','Product Category2','Product Name','Features','Application','Description'
]
for index,head in enumerate(headers):
workSheet.cell(1, index+1).value = head.strip()
for index,p in enumerate(products):
writeExcel(workSheet, headers, index + 2, p)
print("flish")
wb.save(excelFileName) | Just-Doing/python-caiji | src/work/20210719/tydexoptics.py | tydexoptics.py | py | 5,791 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "http.client.client",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "http.client",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "urllib.request.build_opener",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "urlli... |
31748860310 | import numpy as np
from timeit import default_timer as timer
from numba import vectorize
@vectorize(['float32(float32, float32)'], target='cuda')
def gpu_pow(a, b):
return a ** b
@vectorize(['float32(float32, float32)'], target='parallel')
def cpu_para_pow(a, b):
return a ** b
def cpu_pow(a, b, c):
for i in range(a.size):
c[i] = a[i] ** b[i]
def cpu_test():
vec_size = 100000000
a = b = np.array(np.random.sample(vec_size), dtype=np.float32)
c = np.zeros(vec_size, dtype=np.float32)
start = timer()
cpu_pow(a, b, c)
duration = timer() - start
print(duration)
def cpu_para_test():
vec_size = 100000000
a = b = np.array(np.random.sample(vec_size), dtype=np.float32)
c = np.zeros(vec_size, dtype=np.float32)
start = timer()
c = cpu_para_pow(a, b)
duration = timer() - start
print(duration)
def gpu_test():
vec_size = 100000000
a = b = np.array(np.random.sample(vec_size), dtype=np.float32)
c = np.zeros(vec_size, dtype=np.float32)
start = timer()
c = gpu_pow(a, b)
duration = timer() - start
print(duration)
def main():
cpu_para_test()
cpu_test()
gpu_test()
if __name__ == '__main__':
main()
| Purdue-Academic-Projects/AI_Final_Project | heart_rate_ai/cuda_test/cuda_tutorial.py | cuda_tutorial.py | py | 1,237 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numba.vectorize",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "numba.vectorize",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.random.sample",
... |
34622506671 | import os
import zipfile
from pathlib import Path
import warnings
from shutil import rmtree
import time
import pandas as pd
import numpy as np
import SimpleITK as sitk
from tqdm import tqdm
from segmentation_metrics import compute_segmentation_scores
from survival_metrics import concordance_index
class AIcrowdEvaluator:
def __init__(
self,
ground_truth_segmentation_folder="data/ground_truth/segmentation/",
ground_truth_survival_file="data/ground_truth/survival/hecktor2021_patient_endpoint_testing.csv",
bounding_boxes_file="data/hecktor2021_bbox_testing.csv",
extraction_folder="data/extraction/",
round_number=1,
):
"""Evaluator for the Hecktor Challenge
Args:
ground_truth_folder (str): the path to the folder
containing the ground truth segmentation.
ground_truth_survival_file (str): the path to the file
containing the ground truth survival time.
bounding_boxes_file (str): the path to the csv file which defines
the bounding boxes for each patient.
extraction_folder (str, optional): the path to the folder where the
extraction of the .zip submission
will take place. Defaults to "data/tmp/".
This folder has to be created beforehand.
round_number (int, optional): the round number. Defaults to 1.
"""
self.groud_truth_folder = Path(ground_truth_segmentation_folder)
self.round = round_number
self.extraction_folder = Path(extraction_folder)
self.bounding_boxes_file = Path(bounding_boxes_file)
self.gt_df = pd.read_csv(ground_truth_survival_file).set_index(
"PatientID")
def _evaluate_segmentation(self, client_payload, _context={}):
submission_file_path = client_payload["submission_file_path"]
aicrowd_submission_id = client_payload["aicrowd_submission_id"]
aicrowd_participant_uid = client_payload["aicrowd_participant_id"]
submission_extraction_folder = self.extraction_folder / (
'submission' + str(aicrowd_submission_id) + '/')
submission_extraction_folder.mkdir(parents=True, exist_ok=True)
with zipfile.ZipFile(str(Path(submission_file_path).resolve()),
"r") as zip_ref:
zip_ref.extractall(str(submission_extraction_folder.resolve()))
groundtruth_paths = [
f for f in self.groud_truth_folder.rglob("*.nii.gz")
]
bb_df = pd.read_csv(str(
self.bounding_boxes_file.resolve())).set_index("PatientID")
results_df = pd.DataFrame()
missing_patients = list()
unresampled_patients = list()
resampler = sitk.ResampleImageFilter()
resampler.SetInterpolator(sitk.sitkNearestNeighbor)
for path in tqdm(groundtruth_paths):
patient_id = path.name[:7]
prediction_files = [
f
for f in self.extraction_folder.rglob(patient_id + "*.nii.gz")
]
if len(prediction_files) > 1:
raise Exception(
"There is too many prediction files for patient {}".format(
patient_id))
elif len(prediction_files) == 0:
results_df = results_df.append(
{
"dice_score": 0,
"hausdorff_distance_95": np.inf,
"recall": 0,
"precision": 0,
},
ignore_index=True)
missing_patients.append(patient_id)
continue
bb = np.array([
bb_df.loc[patient_id, "x1"], bb_df.loc[patient_id, "y1"],
bb_df.loc[patient_id, "z1"], bb_df.loc[patient_id, "x2"],
bb_df.loc[patient_id, "y2"], bb_df.loc[patient_id, "z2"]
])
image_gt = sitk.ReadImage(str(path.resolve()))
image_pred = sitk.ReadImage(str(prediction_files[0].resolve()))
resampler.SetReferenceImage(image_gt)
resampler.SetOutputOrigin(bb[:3])
voxel_spacing = np.array(image_gt.GetSpacing())
output_size = np.round(
(bb[3:] - bb[:3]) / voxel_spacing).astype(int)
resampler.SetSize([int(k) for k in output_size])
# Crop to the bonding box and/or resample to the original spacing
spacing = image_gt.GetSpacing()
if spacing != image_pred.GetSpacing():
unresampled_patients.append(patient_id)
image_gt = resampler.Execute(image_gt)
image_pred = resampler.Execute(image_pred)
results_df = results_df.append(
compute_segmentation_scores(
sitk.GetArrayFromImage(image_gt),
sitk.GetArrayFromImage(image_pred),
spacing,
),
ignore_index=True,
)
_result_object = {
"dice_score": results_df["dice_score"].mean(),
"hausdorff_distance_95":
results_df["hausdorff_distance_95"].median(),
"recall": results_df["recall"].mean(),
"precision": results_df["precision"].mean(),
}
rmtree(str(submission_extraction_folder.resolve()))
messages = list()
if len(unresampled_patients) > 0:
messages.append(
f"The following patient(s) was/were not resampled back"
f" to the original resolution: {unresampled_patients}."
f"\nWe applied a nearest neighbor resampling.\n")
if len(missing_patients) > 0:
messages.append(
f"The following patient(s) was/were missing: {missing_patients}."
f"\nA score of 0 and infinity were attributed to them "
f"for the dice score and Hausdorff distance respectively.")
_result_object["message"] = "".join(messages)
return _result_object
def _evaluate_survival(self, client_payload, _context={}):
submission_file_path = client_payload["submission_file_path"]
predictions_df = pd.read_csv(submission_file_path).set_index(
"PatientID")
if "Prediction" not in predictions_df.columns:
raise RuntimeError("The 'Prediction' column is missing.")
extra_patients = [
p for p in predictions_df.index if p not in self.gt_df.index
]
# Discard extra patient
if len(extra_patients) > 0:
predictions_df = predictions_df.drop(labels=extra_patients, axis=0)
# Check for redundant entries
if len(predictions_df.index) > len(list(set(predictions_df.index))):
raise RuntimeError("One or more patients appear twice in the csv")
# The following function concatenate the submission csv and the
# ground truth and fill missing entries with NaNs. The missing
# entries are then counted as non-concordant by the concordance_index
# function
df = pd.concat((self.gt_df, predictions_df), axis=1)
missing_patients = list(df.loc[pd.isna(df['Prediction']), :].index)
# Compute the c-index for anti-concordant prediction (e.g. risk score)
concordance_factor = -1
_result_object = {
"concordance_index":
concordance_index(
df["Progression free survival"].values,
concordance_factor * df["Prediction"],
event_observed=df["Progression"],
),
}
messages = list()
if len(missing_patients) > 0:
messages = (f"The following patient(s) was/were missing"
f" : {missing_patients}\nThey were considered as "
f"non-concordant")
if len(extra_patients) > 0:
messages.append(
f"The following patient(s) was/were dropped "
f"(since they are not present in the test): {missing_patients}."
)
_result_object["message"] = "".join(messages)
return _result_object
def _get_evaluation_function(self, task_id, client_payload, _context={}):
if task_id == "1":
return self._evaluate_segmentation(client_payload,
_context=_context)
elif task_id == "2":
return self._evaluate_survival(client_payload, _context=_context)
else:
raise ValueError(f"{task_id} is not recognized.")
def _evaluate(self, client_payload, _context={}):
"""
`client_payload` will be a dict with (atleast) the following keys :
- submission_file_path : local file path of the submitted file
- aicrowd_submission_id : A unique id representing the submission
- aicrowd_participant_id : A unique id for participant/team submitting (if enabled)
"""
task_id = os.environ["TASK_ID"]
return self._get_evaluation_function(task_id,
client_payload,
_context=_context)
if __name__ == "__main__":
ground_truth_segmentation_folder = ""
ground_truth_survival_file = ""
bounding_boxes_file = ""
_client_payload = {}
_client_payload["aicrowd_submission_id"] = 1123
_client_payload["aicrowd_participant_id"] = 1234
# Instantiate a dummy context
_context = {}
# Instantiate an evaluator
aicrowd_evaluator = AIcrowdEvaluator(
ground_truth_segmentation_folder=ground_truth_segmentation_folder,
ground_truth_survival_file=ground_truth_survival_file,
bounding_boxes_file=bounding_boxes_file,
)
# Evaluate Survival
_client_payload[
"submission_file_path"] = ""
os.environ["TASK_ID"] = "2"
start = time.process_time()
result = aicrowd_evaluator._evaluate(_client_payload, _context)
print(f"Time to compute the sample for the survival"
f" task: {time.process_time() - start} [s]")
print(f"The c-index is {result['concordance_index']}")
if result["message"] is not "":
print(f"The message is:\n {result['message']}")
# Evaluate Segmentation
os.environ["TASK_ID"] = "1"
_client_payload[
"submission_file_path"] = ""
start = time.process_time()
result = aicrowd_evaluator._evaluate(_client_payload, _context)
print(f"Time to compute the sample for the segmentation"
f" task: {time.process_time() - start} [s]")
print(f"The results are:\n"
f" - average dice score {result['dice_score']}\n"
f" - median hausdorff distance {result['hausdorff_distance_95']}\n"
f" - average recall {result['recall']}\n"
f" - average precision {result['precision']}")
if result["message"] is not "":
print(f"The message is:\n {result['message']}")
| voreille/hecktor | src/aicrowd_evaluator/evaluator.py | evaluator.py | py | 11,240 | python | en | code | 65 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_nu... |
75273737705 | import tkinter
import pyperclip
sirka = 500
vyska = 450
data = []
size = []
c = tkinter.Canvas(width=sirka, height=vyska)
c.pack()
def copy():
cptxt = '('
for i in range(size[1]):
cptxt += '0b' + ''.join(str(e) for e in data[i]) + ','
if i == size[1] - 1:
cptxt = cptxt[:-1] + ')'
print(cptxt)
pyperclip.copy('ahojsvet')
def print_data():
for i in range(size[1]):
text = ''.join(str(e) for e in data[i])
c.create_text(350,15*i+100+i*15,fill='black',text='0b'+text,font='Timer 15')
def clear():
global data
# clear old table
c.delete('all')
# generate new table
columns, rows = size
data = [[y*0 for y in range(columns)] for _ in range(rows)]
table(*size)
print_data()
def clickon(event):
global data
x = event.x
y = event.y
if x <= 10 or x >= 260 or y <= 10 or y >= 410:
return False
tag_x = (x - 10) // 50
tag_y = (y - 10) // 50
data[tag_y][tag_x] = 1 - data[tag_y][tag_x]
# clear old table
c.delete('all')
# generate new table
table(*size)
print_data()
def table(columns, rows):
for ra in range(rows):
for co in range(columns):
color = 'darkblue'
if data[ra][co] == 1:
color = 'black'
c.create_rectangle(co*50+10, ra*50+10,co*50+60,ra*50+60, outline='white', tag='box', fill=color)
def new_screen(columns, rows):
# create table with data
global data, size
size = [columns, rows]
data = [[y*0 for y in range(columns)] for _ in range(rows)]
# genrate table
table(columns, rows)
# generate button for clear
clear_button = tkinter.Button(text='Clear',command=clear)
clear_button.place(x=300,y=420)
copy_button = tkinter.Button(text='Copy',command=copy)
copy_button.place(x=250,y=420)
print_data()
new_screen(5,8)
c.bind('<Button-1>', clickon)
| branislavblazek/notes | Python/projekty/char_creator.py | char_creator.py | py | 1,990 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tkinter.Canvas",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pyperclip.copy",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "tkinter.Button",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "tkinter.Button",
"lin... |
18483593432 | import random
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image, ImageFilter
from torchvision import datasets, transforms
class RandomGaussianBlur(object):
def __call__(self, image):
if random.random() < 0.5:
image = image.filter(ImageFilter.GaussianBlur(
radius=random.random()))
return image
class RandomSaltPepperBlur(object):
def __call__(self, image, prob=0.05):
image = np.array(image)
for i in range(image.shape[0]):
for j in range(image.shape[1]):
rdn = random.random()
if rdn < prob:
image[i][j] = round(random.random()) * 255
return Image.fromarray(image)
def Mnist(data_dir="data", input_size=(224, 224), train=True):
if train:
tsf = transforms.Compose([
transforms.RandomRotation(30),
transforms.Resize(input_size),
RandomGaussianBlur(),
RandomSaltPepperBlur(),
transforms.ToTensor(),
])
else:
tsf = transforms.Compose([
transforms.Resize(input_size),
transforms.ToTensor(),
])
dataset = datasets.MNIST(data_dir,
train=train,
transform=tsf,
download=True)
return dataset
if __name__ == '__main__':
import cv2
from torch.utils.data import DataLoader
dataset = Mnist(train=False)
mm = DataLoader(dataset)
for i, sample in enumerate(mm):
if i > 5: break;
img = sample[0].numpy().reshape((224, 224))
plt.imshow(img)
cv2.imwrite('demo/img_{}.png'.format(i), img*255)
plt.show()
| TWSFar/GhostNet-MNIST | datasets/mnist.py | mnist.py | py | 1,738 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "random.random",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "PIL.ImageFilter.GaussianBlur",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "PIL.ImageFilter",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "random.ran... |
41826934545 | # import sqlite library
import sqlite3
# create a database and make a connection.
conn = sqlite3.connect("first.db")
cursor = conn.cursor()
sql = """UPDATE programs
SET program_level = 'Master''s'
WHERE program_name IN ('Anthropology', 'Biology')"""
cursor.execute(sql)
sql = """INSERT INTO students(student, id_program) VALUES
('Josefina', 3),
('Cecilia', 2),
('Nico', 2),
('Sarah', 1)
"""
cursor.execute(sql)
conn.commit()
| Ngue-Um/DHRI-June2018-Courses-databases | scripts/challenge.py | challenge.py | py | 430 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sqlite3.connect",
"line_number": 5,
"usage_type": "call"
}
] |
74273514024 | import numpy as np
import jax
from jax import lax, random, numpy as jnp
import flax
from flax.core import freeze, unfreeze
from flax import linen as nn
from flax import optim
from typing import Any, Callable, Sequence, Optional
import pickle
from tensorflow import keras
file_prefix = "struct"
activation = nn.relu # activation function
M = 300 # width parameter
L = 20 # depth
alpha = 10e-5 # learning rate
epochs = 3000
kernel_steps = [0,1,10,20,50,100,200,300,400,500,600,700,800,900,1000,1200,1500,1700,2000,2500,3000] # epochs at which the NTK is computed
var_w_s = [1.0,2.0,2.2] # variance parameter \sigma_w^2
var_b = 0. # variance parameter \sigma_b^2
# custom fully-connected network (MLP) class
class MLP(nn.Module):
widths: Sequence[int] # We need to specify all the layer width (including input and output widths)
v_w: float # variance parameter \sigma_w^2
v_b: float # variance parameter \sigma_b^2
activation: Callable # activation function (the same in all the hidden layers)
kernel_init: Callable = jax.nn.initializers.normal # Gaussian initialization
bias_init: Callable = jax.nn.initializers.normal
def setup(self):
self.layers = [nn.Dense(self.widths[l+1],
kernel_init = self.kernel_init(jnp.sqrt(self.v_w/self.widths[l])),
bias_init = self.bias_init(jnp.sqrt(self.v_b))
) for l in range(len(self.widths)-1)]
def __call__(self, inputs):
x = inputs
for i, lyr in enumerate(self.layers[:-1]):
x = lyr(x)
x = self.activation(x)
x = self.layers[-1](x)
return x
# the NTK on a single pair of samples (x1,x2)
def K(model):
def K(x1,x2,params):
f1 = jax.jacobian(lambda p: model.apply(p,x1))(params)
f2 = jax.jacobian(lambda p: model.apply(p,x2))(params)
leaves, struct = jax.tree_util.tree_flatten(jax.tree_multimap(jnp.multiply,f1,f2))
return sum([jnp.sum(leaf) for leaf in leaves])
return jax.jit(K)
# the NTK matrix (vectorization of K)
def K_matr(model):
_K = K(model)
def K_matr(X,Y,params):
f = lambda x1,x2: _K(x1,x2,params)
return jax.vmap(jax.vmap(f,(None,0)),(0,None))(X,Y)
return jax.jit(K_matr)
# MSE loss function
def mse(x_batched, y_batched):
def mse(params):
# MSE on a single pair (x,y)
def squared_error(x, y):
pred = model.apply(params, x)
return jnp.inner(y-pred, y-pred)/2.0
return jnp.mean(jax.vmap(squared_error)(x_batched,y_batched), axis=0) #Vectorized MSE
return jax.jit(mse)
# Load and preprocess MNIST
n_class = 10
ker_size_per_class = 10
mnist_n0 = 28*28
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
x_train = x_train.reshape(x_train.shape[0], mnist_n0)
x_test = x_test.reshape(x_test.shape[0], mnist_n0)
# choose subset of data with ker_size_per_class samples from each class
ind = []
for k in range(ker_size_per_class):
ind += list(np.random.choice(np.argwhere(y_train==k).ravel(), size=ker_size_per_class, replace=False))
x_train, x_test = x_train/255.,x_test/255.
y_train, y_test = keras.utils.to_categorical(y_train, 10), keras.utils.to_categorical(y_test, 10)
x_ker = x_train[ind] # We compute the NTK only on a subset of samples
y_ker = y_train[ind]
# -------
key = random.PRNGKey(0)
subkeys = jax.random.split(key, num=len(var_w_s))
widths = [mnist_n0]+[M]*L+[n_class]
optimizer_def = optim.Adam(learning_rate=alpha) # Define Adam optimizer
loss = mse(x_train, y_train) # train loss function
loss_grad_fn = jax.value_and_grad(loss) # function to get loss value and gradient
test_loss = mse(x_test, y_test) # test loss function
for var_w, subkey in zip(var_w_s, subkeys):
model = MLP(widths = widths, v_w=var_w, v_b=var_b, activation = activation) # Define MLP model
params = model.init(subkey, x_train) # Initialize model
optimizer = optimizer_def.create(params) # Create optimizer with initial parameters
K_t = []
loss_t = []
test_loss_t = []
K_func = K_matr(model)
for i in range(epochs+1):
loss_val, grad = loss_grad_fn(optimizer.target) # Get gradient and train loss value
test_loss_val = test_loss(optimizer.target) # Get test loss value
test_loss_t.append(test_loss_val)
loss_t.append(loss_val)
# Compute the NTK for the chosen epochs
if i in kernel_steps:
print('Loss step {}: '.format(i), loss_val, test_loss_val)
K_t.append(K_func(x_ker,x_ker,optimizer.target))
optimizer = optimizer.apply_gradient(grad) # Update optimizer parameters
# Save the results
pickle.dump(jnp.array(K_t), open( "ntk_dynamics/"+file_prefix+"_w"+str(int(var_w*10))+"M"+str(M)+"L"+str(L), "wb" ) )
pickle.dump(jnp.array(loss_t), open( "ntk_dynamics/"+file_prefix+"_loss_w"+str(int(var_w*10))+"M"+str(M)+"L"+str(L), "wb" ) )
pickle.dump(jnp.array(test_loss_t), open( "ntk_dynamics/"+file_prefix+"_test_loss_w"+str(int(var_w*10))+"M"+str(M)+"L"+str(L), "wb" ) )
| mselezniova/ntk_beyond_limit | ntk_train_dynamics.py | ntk_train_dynamics.py | py | 5,198 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flax.linen.relu",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "flax.linen",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "flax.linen.Module",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "flax.linen",
... |
73381842664 |
import matplotlib.pyplot as plt
import numpy as np
#IMAGEN 1
Imagen='33.jpg'
I=plt.imread(Imagen)
plt.title('Imagen original')
plt.imshow(I)
plt.show()
rgb = [0.2989, 0.5870, 0.1140]
ig = np.dot(I[...,:3], rgb)
plt.imshow(ig,cmap='gray')
plt.axis('off')
plt.savefig('b&w.png',bbox_inches='tight',pad_inches=0,dpi=1200)
plt.title('Imagen a escala de grises')
plt.show()
#ACCEDER AL RGB
rojo=[]
azul=[]
verde=[]
for x in range(round(len(I)/3)):
for h in range(len(I[x])):
for f in range(len(I[x][h])):
rojo.append(I[x][h][0])
azul.append(I[x][h][1])
verde.append(I[x][h][2])
print(len(verde))
print(len(rojo))
print(len(azul))
intervalos = range(round(min(rojo)), round(max(rojo)) + 2) #calculamos los extremos de los intervalos
plt.hist(x=rojo, bins=intervalos, color='r', rwidth=0.85)
plt.xticks([0, 50, 100, 150, 200, 255],[0, 50, 100, 150, 200, 255])
plt.title('Histogram of Red')
plt.xlabel('Intensity values')
plt.ylabel('Number of pixels')
plt.savefig('Histogramarojo.png',dpi=1200)
plt.show()
intervalos = range(round(min(verde)), round(max(verde)) + 2) #calculamos los extremos de los intervalos
plt.hist(x=verde, bins=intervalos, color='y', rwidth=0.85)
plt.xticks([0, 50, 100, 150, 200, 255],[0, 50, 100, 150, 200, 255])
plt.title('Histogram of Green')
plt.xlabel('Intensity values')
plt.ylabel('Number of pixels')
plt.savefig('Histogra_verde.png',dpi=1200)
plt.show()
intervalos = range(round(min(azul)), round(max(azul)) + 2) #calculamos los extremos de los intervalos
plt.hist(x=azul, bins=intervalos, color='b', rwidth=0.85)
plt.xticks([0, 50, 100, 150, 200, 255],[0, 50, 100, 150, 200, 255])
plt.title('Histogram of Blue')
plt.xlabel('Intensity values')
plt.ylabel('Number of pixels')
plt.savefig('Histogra_azul.png',dpi=1200)
plt.show()
| BrianCobianS/Capitulo4-Python | rgb.py | rgb.py | py | 1,806 | python | es | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.imread",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "matpl... |
70064817065 | import jsonschema
from API.validation import error_format
class Validate_AddChannel():
def __init__(self, data):
self.data = data
self.schema = {
"type": "object",
"properties": {
"name": {"type": "string"},
"type": {"type": "string"},
},
"required": ["name", "type"]
}
def validate_data(self):
try:
jsonschema.validate(instance=self.data, schema=self.schema)
return True
except jsonschema.exceptions.ValidationError as error:
# If the error is from jsonSchema there's been a validation error so we can give a good error output
return error_format.FormatValidationError(error).schema_validation_error()
except Exception as e:
# Otherwise, something else has happened, and we need to figure out what...
print(e)
return error_format.UnknownError(str(e)).unknown_errorunknown_error()
| OStillman/ODACShows | API/validation/add_channel_validation.py | add_channel_validation.py | py | 1,067 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "jsonschema.validate",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "jsonschema.exceptions",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "API.validation.error_format.FormatValidationError",
"line_number": 22,
"usage_type": "call... |
8451867993 | from collections import deque
from collections import namedtuple
PairedTasks = namedtuple('PairedTasks', ('task_1', 'task_2'))
def compute_task_assignment(task_durations: list):
durations = deque(sorted(task_durations))
total_time = 0
while durations:
total_time = max(total_time, durations.popleft() + durations.pop())
# return total_time
task_durations.sort()
return [
PairedTasks(task_durations[i], task_durations[~i]) for i in range(len(task_durations)//2)
]
if __name__ == '__main__':
print(compute_task_assignment([3, 8, 1, 4,])) | kashyapa/coding-problems | epi/revise-daily/6_greedy_algorithms/1_compute_optimum_task_assignment.py | 1_compute_optimum_task_assignment.py | py | 594 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.namedtuple",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "collections.deque",
"line_number": 8,
"usage_type": "call"
}
] |
13549248356 | import random
import Crypto
from Crypto.PublicKey import RSA
from Crypto import Random
import ast
import pyDes
from Crypto.Cipher import DES
from config import max_input_bit_length
import time
def OT_transfer(m0,m1,bit):
m0=int(m0)
m1=int(m1)
x0=random.randint(0, 2**1024-1)
x1=random.randint(0, 2**1024-1)
#loading public key
publickeyfile=""
with open('alice-public.pem', 'r') as f:
publickeyfile=f.read()
# print(publickeyfile)
n=0
with open('alice-publickey.n.pem', 'r') as f:
n=f.read()
# print(n)
n=long(n)
publickey=RSA.importKey(publickeyfile)
with open('alice-private.pem', 'r') as f:
privatekeyfile=f.read()
privatekey=RSA.importKey(privatekeyfile)
#bob choose random k
k=random.randint(0, 2**32-1)
# send x1, so choose 1 instead of index 0
if(bit==1):
v=(x1+publickey.encrypt(long(k), 32)[0])%(n)
else:
v=(x0+publickey.encrypt(long(k), 32)[0])%(n)
k0=privatekey.decrypt(v-x0)%n
k1=privatekey.decrypt(v-x1)%n
m00=m0+k0
m11=m1+k1
if(bit==1):
mf=m11-k
else:
mf=m00-k
return mf
start = time.clock()
n=max_input_bit_length
#bob runs this file
#using rsa to generating private keys and public keys
# random_generator = Random.new().read
# key = RSA.generate(1024, random_generator) #generate pub and priv key, that is e and d,n is the
# publickey = key.publickey() # pub key export for exchange, stands for e
# encrypted = publickey.encrypt('encrypt this message', 32)
# print 'encrypted message: ', encrypted #ciphertext
# decrypted = key.decrypt(ast.literal_eval(str(encrypted)))
# print 'decrypted message: ', decrypted
# private_pem = key.exportKey()
# with open('bob-private.pem', 'w') as f:
# f.write(private_pem)
# public_pem = key.publickey().exportKey()
# with open('bob-public.pem', 'w') as f:
# f.write(public_pem)
# public_n = key.n
# with open('bob-publickey.n.pem', 'w') as f:
# f.write(public_n)
f=open('alice_ran_keys.txt')
alice_ran_keys=f.read().split()
f.close()
# bob_bit=0b10
print("please input bob's data ")
bob_bit=int(input())
print("bob_input: ",bob_bit)
bob_input=[]
bob_input.append(OT_transfer(alice_ran_keys[8],alice_ran_keys[9],bob_bit&0b1))
bob_input.append(OT_transfer(alice_ran_keys[2],alice_ran_keys[3],bob_bit>>1))
bob_input.append(OT_transfer(alice_ran_keys[0],alice_ran_keys[1],bob_bit>>1))
bob_bit=bob_bit>>2
#augmented
# for i in range(n-2):
# print("bob_bit ",bob_bit)
if(n>2):
# print("n is bigger than 2")
for i in range(n-2):
bob_input.append(OT_transfer(alice_ran_keys[36+i*22],alice_ran_keys[37+i*22],bob_bit&0b1))
bob_input.append(OT_transfer(alice_ran_keys[38+i*22],alice_ran_keys[39+i*22],bob_bit&0b1))
bob_bit=bob_bit>>1
# print("in ot test")
# print(alice_ran_keys[36])
# print(bob_input[3])
# print(alice_ran_keys[38])
# print(bob_input[4])
with open('inputsb.txt', 'w') as f:
for i in range(len(bob_input)):
f.write(str(bob_input[i])+'\n')
end = time.clock()
print("Time for doing obvious transfer ", end-start)
| makeapp007/cryptography | mpc/code/nbit-comparator/ot.py | ot.py | py | 2,974 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "random.randint",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "Crypto.PublicKey.RSA.importKey",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "Crypto.P... |
73290357865 | import json
import numpy as np
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
# 读取JSON文件
with open('./47_data.json') as f:
data = json.load(f)
# 将数据转换为NumPy数组
images = np.array(data)
# 创建一个2x4的子图,用于显示8张图片
fig, axs = plt.subplots(2, 4)
# 迭代显示每张图片
for i, ax in enumerate(axs.flatten()):
# 从NumPy数组中取出一张图片
image = images[i]
# 将1D数组重塑为28x28的2D数组
image = image.reshape(28, 28)
print(type(image))
# 显示图片
ax.imshow(image, cmap='gray')
ax.axis('off')
# 调整子图间距
plt.tight_layout()
# 显示图片
plt.show()
| LazySheeeeep/Trustworthy_AI-Assignments | 1/testing_images_showcase.py | testing_images_showcase.py | py | 696 | python | zh | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.use",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"... |
21130491503 | import psycopg2
from config import host, user, password, db_name
try:
# подключение к существующей БД
connection = psycopg2.connect(
host=host,
user=user,
password=password,
database=db_name
)
connection.autocommit = True
# курсор для предоставления операций над БД
# cursor = connection.cursor()
# проверка версии БД
with connection.cursor() as cursor:
cursor.execute(
'select version();'
)
print(f'Server version: {cursor.fetchone()}')
# создание таблицы size
with connection.cursor() as cursor:
cursor.execute(
'''CREATE TABLE IF NOT EXISTS size(
id serial PRIMARY KEY,
name varchar(50));
'''
)
print(f'Таблица size успешно создана!')
with connection.cursor() as cursor:
cursor.execute(
'''CREATE TABLE IF NOT EXISTS category(
id serial PRIMARY KEY,
name varchar(50));
'''
)
print(f'Таблица category успешно создана!')
with connection.cursor() as cursor:
cursor.execute(
'''CREATE TABLE IF NOT EXISTS category(
id serial PRIMARY KEY,
name varchar(50));
'''
)
print(f'Таблица category успешно создана!')
with connection.cursor() as cursor:
cursor.execute(
'''CREATE TABLE IF NOT EXISTS discount(
id serial PRIMARY KEY,
value integer);
'''
)
print(f'Таблица discount успешно создана!')
with connection.cursor() as cursor:
cursor.execute(
'''CREATE TABLE IF NOT EXISTS goods(
id serial PRIMARY KEY,
name varchar(50),
description text,
calory integer,
protein integer ,
fat integer,
weight integer ,
price integer ,
category_id integer references category(id),
discount_id integer references discount(id));
'''
)
print(f'Таблица goods успешно создана!')
with connection.cursor() as cursor:
cursor.execute(
'''CREATE TABLE IF NOT EXISTS goods_size(
id serial PRIMARY KEY,
goods_id integer references goods(id),
size_id integer references size(id));
'''
)
print(f'Таблица discount успешно создана!')
# # вставка данных в таблицу
# with connection.cursor() as cursor:
# cursor.execute(
# '''INSERT INTO users(firs_name, nick_name)
# VALUES ('Igor', 'proger3000');
# '''
# )
# print(f'Данные успешно добавлены!')
#
# # выборка данных из таблицы
# with connection.cursor() as cursor:
# cursor.execute(
# '''SELECT nick_name FROM users WHERE firs_name = 'Igor';
# '''
# )
# print(cursor.fetchone())
#
# with connection.cursor() as cursor:
# cursor.execute(
# '''DROP TABLE users;
# '''
# )
# print('Таблица удалена!')
except Exception as e:
print('Ошибка в процессе выполнения PostgresQL', e)
finally:
if connection:
cursor.close()
connection.close()
print('[INFO] PostgreSQL соединение закрыто!') | Tosic48/pizzeria | main2.py | main2.py | py | 3,728 | python | ru | code | 0 | github-code | 36 | [
{
"api_name": "psycopg2.connect",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "config.host",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "config.user",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "config.password",
"line_num... |
71038363623 | import collections
import numpy as np
from math import pi
def log_gaussian_prob(obs, mu, sig):
num = (obs - mu) ** 2
denum = 2 * sig ** 2
# norm = 1 / sqrt(2 * pi * sig ** 2)
# prob = norm * exp(-num/denum)
log_prob = (-num / denum) + 0.5 * (np.log(2) + np.log(pi) + 2 * np.log(sig))
return log_prob
class GNB(object):
def __init__(self):
self.possible_labels = ['left', 'keep', 'right']
self.is_trained = False
self._log_prior_by_label = collections.defaultdict(float)
# order in the list is [s, d, s_dot, d_dot]
self._label_to_feature_means = {key: [] for key in self.possible_labels}
self._label_to_feature_stds = {key: [] for key in self.possible_labels}
def _get_label_counts(self, labels):
label_to_counts = collections.defaultdict(int)
for l in labels:
label_to_counts[l] += 1
return label_to_counts
def _group_data_by_label(self, data, labels):
label_to_data = dict()
for label in self.possible_labels:
label_to_data[label] = []
for label, data_point in zip(labels, data):
label_to_data[label].append(data_point)
return label_to_data
def train(self, data, labels):
"""
Trains the classifier with N data points and labels.
INPUTS
data - array of N observations
- Each observation is a tuple with 4 values: s, d,
s_dot and d_dot.
- Example : [
[3.5, 0.1, 5.9, -0.02],
[8.0, -0.3, 3.0, 2.2],
...
]
labels - array of N labels
- Each label is one of "left", "keep", or "right".
"""
# prior: p(label = left_or_kepp_or_right)
# likelihood: p(feature1, feature2, ..., feature_n | label)
N = len(labels)
label_to_counts = self._get_label_counts(labels)
for key in self.possible_labels:
self._log_prior_by_label[key] = np.log(label_to_counts[key]) - np.log(N)
label_to_data = self._group_data_by_label(data, labels)
for label, data in label_to_data.items():
data = np.array(data)
means = np.mean(data, axis=0)
stds = np.std(data, axis=0)
self._label_to_feature_means[label] = means
self._label_to_feature_stds[label] = stds
self.is_trained = True
def predict(self, observation):
"""
Once trained, this method is called and expected to return
a predicted behavior for the given observation.
INPUTS
observation - a 4 tuple with s, d, s_dot, d_dot.
- Example: [3.5, 0.1, 8.5, -0.2]
OUTPUT
A label representing the best guess of the classifier. Can
be one of "left", "keep" or "right".
"""
if not self.is_trained:
print("Classifier has not been trained! ")
print("Please train it before predicting!")
return
MAP_estimates = dict()
for label in self.possible_labels:
# use log convert product to sum
log_product = self._log_prior_by_label[label]
for i, feature_val in enumerate(observation):
log_product += log_gaussian_prob(feature_val,
self._label_to_feature_means[label][i],
self._label_to_feature_stds[label][i])
MAP_estimates[label] = log_product
# MAP_estimates contains likelihood*prior that is not normalized
# in other words, it is not divided by the total probability
# P(s=observation[0], d=observation[1], s_dot=observation[2], d_dot=observation[3])
# because it is the same for all four labels, we only need to compare them to decide
# which label to take. so comparing numerators suffices to do the job
prediction = 'None'
max_prob = 0
for label, prob in MAP_estimates.items():
if prob > max_prob:
prediction = label
max_prob = prob
return prediction
| Xiaohong-Deng/algorithms | AIML/gaussianNaiveBayes/classifier.py | classifier.py | py | 3,774 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.log",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "math.pi",
"line_number": 11,
"usage_type": "argument"
},
{
"api_name": "collections.defaultdict",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict... |
29673546988 | import boto3
import gzip
import json
import os
from math import ceil
from pymongo import MongoClient
def load_file(filepath):
documents = {}
lines = open(filepath, 'r').read().splitlines()
for line in lines:
columns = line.split('\t')
documents[columns[0]] = columns[1]
return documents
def get_document_dict(remote_bucket, remote_filename):
with open('/tmp/source.gz', 'wb') as dest:
gcp_client.download_fileobj(remote_bucket, remote_filename, dest)
with gzip.open('/tmp/source.gz', 'rb') as gzfile:
byte_contents = gzfile.read()
with open('/tmp/source.tsv', 'wb') as tsvfile:
count = tsvfile.write(byte_contents)
return load_file('/tmp/source.tsv')
def check_existence(document_dict):
id_list = ['PMID:' + document_id for document_id in document_dict.keys()]
print(id_list[:10])
print(len(id_list))
found_ids = []
subs = ceil(len(id_list) / 10000)
for i in range(subs):
start = i * 10000
end = min(start + 10000, len(id_list))
sublist = [doc['document_id'] for doc in collection.find({'document_id': {'$in': id_list[start:end]}})]
found_ids.extend(sublist)
print(f'{len(sublist)} | {len(found_ids)}')
unfound_ids = set(id_list) - set(found_ids)
print(len(unfound_ids))
missing_dict = {}
for unfound_id in unfound_ids:
document_id = unfound_id.replace('PMID:', '')
if document_id not in document_dict:
print('not sure what to do with this ID: ' + document_id)
continue
filename = document_dict[document_id]
if filename not in missing_dict:
missing_dict[filename] = []
missing_dict[filename].append(document_id)
return missing_dict
def check_nonexistence(document_dict):
id_list = ['PMID:' + document_id for document_id in document_dict.keys()]
print(id_list[:10])
print(len(id_list))
found_ids = []
subs = ceil(len(id_list) / 10000)
for i in range(subs):
start = i * 10000
end = min(start + 10000, len(id_list))
sublist = [doc['document_id'] for doc in collection.find({'document_id': {'$in': id_list[start:end]}})]
found_ids.extend(sublist)
print(f'{len(sublist)} | {len(found_ids)}')
print(len(found_ids))
found_dict = {}
for found_id in found_ids:
document_id = found_id.replace('PMID:', '')
if document_id not in document_dict:
print('not sure what to do with this ID:' + document_id)
continue
filename = document_dict[document_id]
if filename not in found_dict:
found_dict[filename] = []
found_dict[filename].append(document_id)
return found_dict
def lambda_handler(event, context):
if 'body' in event:
body = json.loads(event['body'])
else:
body = event
if os.environ and 'connection_string' in os.environ:
client = MongoClient(os.environ['connection_string'])
else:
return 'Could not get database connection information', 500
if 'source' not in body:
return 'No source information provided', 400
source_info = body['source']
global gcp_client
global collection
gcp_client = boto3.client(
's3',
region_name='auto',
endpoint_url='https://storage.googleapis.com',
aws_access_key_id=source_info['hmac_key_id'],
aws_secret_access_key=source_info['hmac_secret']
)
db = client['test']
collection = db['documentMetadata']
main_dict = get_document_dict(source_info['bucket'], source_info['filepath'])
if 'deleted' in source_info['filepath']:
return check_nonexistence(main_dict)
return check_existence(main_dict)
| edgargaticaCU/DocumentMetadataAPI | data_checker.py | data_checker.py | py | 3,767 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "gzip.open",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "math.ceil",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "math.ceil",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 84,
... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.