seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
42495755382 | #First attempt to connect to ethereum mainnet via Infura API
import json
import web3
from web3 import Web3, HTTPProvider
try:
w3 = Web3(Web3.HTTPProvider("https://mainnet.infura.io/dPotOByPqLlLN3nx14Pq"))
print('w3 HTTPProvider call success')
except: print('w3 HTTPProvider call failure')
block = w3.eth.getBlock('latest')
uncles = block["uncles"]
#for element in block: print(element, block[element])
blockNumber = block["number"]
txnCount = w3.eth.getBlockTransactionCount(blockNumber)
print("Block:", blockNumber, " Number of transactions:", txnCount, "Miner: ", block["miner"])
print("Number of Uncles:", len(uncles))
minerReward = 3.0
uncleList = list()
for uncle in uncles:
#print("uncle:", w3.toHex(uncle))
uBlock = w3.eth.getBlock(uncle)
minerReward += (uBlock["number"] + 8 - blockNumber) * 3 / 8
print("Miner Reward: ", minerReward)
txnHashes = block["transactions"]
# Extract cumulativeGasUsed from last transaction in the block
lastTxnHash = txnHashes[txnCount - 1]
cumTotal = 0.0
lastTxnR = w3.eth.getTransactionReceipt(lastTxnHash)
if lastTxnR != None:
cumTotal = lastTxnR["cumulativeGasUsed"]
gwei = w3.toWei(cumTotal, 'gwei')
cumTotal = w3.fromWei(gwei, 'ether')
print("Total Gas Consumed", cumTotal)
minerReward += float(cumTotal)
print("Miner Reward: ", minerReward)
#for txnHash in txnHashes:
# txn = w3.eth.getTransaction(txnHash)
# wei = txn["value"]
# value = w3.fromWei(wei, 'ether')
# print(txn["from"], txn["to"], value)
| KedarJo/ethScan | ethHello.py | ethHello.py | py | 1,494 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "web3.Web3",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "web3.Web3.HTTPProvider",
"line_number": 7,
"usage_type": "call"
}
] |
20528489084 | import pygame
WIDTH = 600
HEIGHT = 700
class Start:
def __init__(self):
pygame.init()
self.display = pygame.display.set_mode((WIDTH, HEIGHT))
self.background = pygame.Surface(self.display.get_size()).convert()
self.words = pygame.Surface(self.display.get_size()).convert()
self.font = pygame.font.SysFont('comicsansms',30)
self.fonty = pygame.font.SysFont('lucidaconsole',70)
self.play = self.font.render('Play',True,(0,255,0))
self.title = self.fonty.render('Memorize',True,(0,0,255))
self.emoji = self.fonty.render('Emoji',True,(255,0,0))
self.tape = pygame.image.load('tape.png').convert_alpha()
self.smart = pygame.image.load('smartemoji.png').convert_alpha()
self.tape = pygame.transform.scale(self.tape,(50,50))
self.smart = pygame.transform.scale(self.smart,(150,150))
self.mouse = pygame.mouse.get_pos()
letter = 'Memorize'
self.x = 150
for c in letter:
self.text = self.fonty.render(c,True,(0,0,255))
pygame.time.delay(50)
self.display.blit(self.text,(self.x,200))
self.words.blit(self.display,(self.x,350))
self.x += 40
pygame.display.flip()
pygame.time.delay(200)
self.display.blit(self.background,(0,0))
pygame.display.flip()
self.display.blit(self.play,(400,500))
pygame.draw.rect(self.display, (200,200,200),(110,100,230,80))
self.display.blit(self.emoji,(120,100))
self.display.blit(self.tape,(315,80))
self.display.blit(self.tape,(95,145))
self.display.blit(self.title,(150,200))
self.display.blit(self.smart,(150,400))
pygame.display.flip()
def choice(self):
done = False
while not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
return False
elif event.type == pygame.MOUSEMOTION:
self.mouse = pygame.mouse.get_pos()
if 400<self.mouse[0]<470 and 500<self.mouse[1]<540:
pygame.draw.rect(self.display, (255,255,255),(400,500,70,45))
self.display.blit(self.play,(400,500))
pygame.display.flip()
else:
pygame.draw.rect(self.display, (0,0,0),(400,500,70,45))
self.display.blit(self.play,(400,500))
pygame.display.flip()
elif event.type == pygame.MOUSEBUTTONDOWN:
if pygame.mouse.get_pressed()[0] and 400<self.mouse[0]<470 and 500<self.mouse[1]<550:
return True
pygame.display.flip()
pygame.quit()
| dlam15/Emoji-Memorize | Start.py | Start.py | py | 2,930 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pygame.init",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "pygame.Surface",... |
30510489975 | # coding=utf-8
import hashlib
from falcon.errors import HTTPBadRequest
from ultros_site.base_route import BaseRoute
__author__ = "Gareth Coles"
class ProfileRoute(BaseRoute):
route = "/profile"
def on_get(self, req, resp):
user = req.context["user"]
if not user:
raise HTTPBadRequest()
self.render_template(
req, resp, "users/profile.html",
user=user,
avatar="https://www.gravatar.com/avatar/{}".format(self.gravatar_hash(user.email))
)
def gravatar_hash(self, email: str):
email = email.strip()
email = email.lower()
email = email.encode("UTF-8")
return hashlib.md5(email).hexdigest()
| UltrosBot/Ultros-site | ultros_site/routes/users/profile.py | profile.py | py | 719 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "ultros_site.base_route.BaseRoute",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "falcon.errors.HTTPBadRequest",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "hashlib.md5",
"line_number": 30,
"usage_type": "call"
}
] |
31871823537 | from http.server import HTTPServer, SimpleHTTPRequestHandler, BaseHTTPRequestHandler, test
import json
import io, shutil,urllib
from raidtool import get_models
host = ('localhost', 8888)
class CORSRequestHandler(SimpleHTTPRequestHandler):
def end_headers(self):
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Access-Control-Allow-Methods', 'GET')
self.send_header('Cache-Control', 'no-store, no-cache, must-revalidate')
return super(CORSRequestHandler, self).end_headers()
def do_GET(self):
self.queryString = urllib.parse.unquote(self.path.split('?',1)[1])
params = urllib.parse.parse_qs(self.queryString)
print(params)
PID = int(params['pid'][0])
EC = int(params['ec'][0])
IVs = list(map(lambda x: int(x), params['IVs'][0].split(",")))
usefilters = False if int(params['usefilters'][0]) == 0 else True
MaxResults = int(params['maxResults'][0])
flawlessiv = int(params['flawlessiv'][0])
HA = int(params['ha'][0])
RandomGender = int(params['randomGender'][0])
IsShinyType = False if int(params['isShinyType'][0]) == 0 else True
data = {
'result': 'this is a test',
'filter': get_models(
PID,
EC,
IVs,
usefilters,
MaxResults,
flawlessiv,
HA,
RandomGender,
IsShinyType
)
}
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
self.wfile.write(json.dumps(data).encode())
if __name__ == '__main__':
server = HTTPServer(host, CORSRequestHandler)
print("Starting server, listen at: %s:%s" % host)
server.serve_forever()
| a1992012015/find-tool | tool/api.py | api.py | py | 1,861 | python | en | code | 14 | github-code | 6 | [
{
"api_name": "http.server.SimpleHTTPRequestHandler",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "urllib.parse.unquote",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "urllib.parse",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_na... |
27259885900 | """We are the captains of our ships, and we stay 'till the end. We see our stories through.
"""
"""515. Find Largest Value in Each Tree Row
"""
from collections import deque
class TreeNode:
def __init__(self, val):
self.val = val
self.left = None
self.right = None
class Solution:
def largestValues(self, root):
if not root:
return []
largest_values = []
queue = deque()
queue.append(root)
queue.append(None)
row_max = float('-inf')
while queue:
node = queue.popleft()
if node:
row_max = max(row_max, node.val)
if node.left:
queue.append(node.left)
if node.right:
queue.append(node.right)
else:
largest_values.append(row_max)
row_max = float('-inf')
if queue:
queue.append(None)
return largest_values
| asperaa/back_to_grind | Trees/largestValues.py | largestValues.py | py | 1,007 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "collections.deque",
"line_number": 22,
"usage_type": "call"
}
] |
13588905096 | import numpy as np
from sklearn.decomposition import PCA
# Calculate the average of the list
def calculate_list_avg(lst):
if len(lst) == 0:
avg_list = 0.0
else:
avg_list = sum(lst) / len(lst)
return avg_list
# Extract the information for each sample
def extract_msg(mrna_exp_mat, tf_exp_mat, mirna_exp_mat, mrna_id_list, tf_id_list, mirna_id_list,
mrna_to_mrna_dict, tf_to_mrna_dict, mirna_to_mrna_dict_for_mrna,
mirna_to_mrna_dict_for_mirna, mirna_to_tf_dict, tf_to_mirna_dict):
mrna_num = len(mrna_id_list)
tf_num = len(tf_id_list)
mirna_num = len(mirna_id_list)
sample_num = mrna_exp_mat.shape[1]
mrna_feature_mat = np.zeros((mrna_num, sample_num))
mrna_to_mrna_feature_mat = np.zeros((mrna_num, sample_num))
tf_to_mrna_feature_mat = np.zeros((mrna_num, sample_num))
mirna_to_mrna_feature_mat_for_mrna = np.zeros((mrna_num, sample_num))
mirna_feature_mat = np.zeros((mirna_num, sample_num))
mirna_to_mrna_feature_mat_for_mirna = np.zeros((mirna_num, sample_num))
mirna_to_tf_feature_mat = np.zeros((mirna_num, sample_num))
tf_to_mirna_feature_mat = np.zeros((mirna_num, sample_num))
# extract the useful information for each sample
for sample_index in range(sample_num):
mrna_index = 0
mirna_index = 0
# mRNA/TF/miRNA expression data
# Format:{ID:exp}
mrna_id_exp_dict = {}
tf_id_exp_dict = {}
mirna_id_exp_dict = {}
# Read the mRNA expression data save in the dictionary
for i in range(mrna_num):
mrna_id = mrna_id_list[i]
mrna_exp = float(mrna_exp_mat[i][sample_index])
mrna_id_exp_dict[mrna_id] = mrna_exp
for i in range(tf_num):
tf_id = tf_id_list[i]
tf_exp = float(tf_exp_mat[i][sample_index])
tf_id_exp_dict[tf_id] = tf_exp
for i in range(mirna_num):
mirna_id = mirna_id_list[i]
mirna_exp = float(mirna_exp_mat[i][sample_index])
mirna_id_exp_dict[mirna_id] = mirna_exp
# mRNA feature matrix
for mrna in mrna_id_list:
mrna_exp = mrna_id_exp_dict[mrna]
mrna_to_mrna_exp_list = []
tf_to_mrna_exp_list = []
mirna_to_mrna_exp_list_for_mrna = []
for i in mrna_to_mrna_dict[mrna]:
mrna_to_mrna_exp_list.append(mrna_id_exp_dict[i])
for i in tf_to_mrna_dict[mrna]:
tf_to_mrna_exp_list.append(tf_id_exp_dict[i])
for i in mirna_to_mrna_dict_for_mrna[mrna]:
mirna_to_mrna_exp_list_for_mrna.append(mirna_id_exp_dict[i])
# calculate the average of the list
avg_mrna_to_mrna_exp = calculate_list_avg(mrna_to_mrna_exp_list)
avg_tf_to_mrna_exp = calculate_list_avg(tf_to_mrna_exp_list)
avg_mirna_to_mrna_exp_for_mrna = calculate_list_avg(mirna_to_mrna_exp_list_for_mrna)
mrna_feature_mat[mrna_index, sample_index] = mrna_exp
mrna_to_mrna_feature_mat[mrna_index, sample_index] = avg_mrna_to_mrna_exp
tf_to_mrna_feature_mat[mrna_index, sample_index] = avg_tf_to_mrna_exp
mirna_to_mrna_feature_mat_for_mrna[mrna_index, sample_index] = avg_mirna_to_mrna_exp_for_mrna
mrna_index += 1
# mRNA feature matrix
for mirna in mirna_id_list:
mirna_to_mrna_exp_list_for_mirna = []
mirna_to_tf_exp_list = []
tf_to_mirna_exp_list = []
mirna_exp = mirna_id_exp_dict[mirna]
for i in mirna_to_mrna_dict_for_mirna[mirna]:
mirna_to_mrna_exp_list_for_mirna.append(mrna_id_exp_dict[i])
for i in mirna_to_tf_dict[mirna]:
mirna_to_tf_exp_list.append(tf_id_exp_dict[i])
for i in tf_to_mirna_dict[mirna]:
tf_to_mirna_exp_list.append(tf_id_exp_dict[i])
# calculate the average of the list
avg_mirna_to_mrna_exp_for_mirna = calculate_list_avg(mirna_to_mrna_exp_list_for_mirna)
avg_mirna_to_tf_exp = calculate_list_avg(mirna_to_tf_exp_list)
avg_tf_to_mirna_exp = calculate_list_avg(tf_to_mirna_exp_list)
mirna_feature_mat[mirna_index, sample_index] = mirna_exp
mirna_to_mrna_feature_mat_for_mirna[mirna_index, sample_index] = avg_mirna_to_mrna_exp_for_mirna
mirna_to_tf_feature_mat[mirna_index, sample_index] = avg_mirna_to_tf_exp
tf_to_mirna_feature_mat[mirna_index, sample_index] = avg_tf_to_mirna_exp
mirna_index += 1
return mrna_feature_mat, mrna_to_mrna_feature_mat, tf_to_mrna_feature_mat, mirna_to_mrna_feature_mat_for_mrna, \
mirna_feature_mat, mirna_to_mrna_feature_mat_for_mirna, mirna_to_tf_feature_mat, tf_to_mirna_feature_mat
# Use PCA to reduce dimension
def get_dim(total_ratio, temp_mat):
pca = PCA(n_components=total_ratio, svd_solver='full')
pca.fit_transform(temp_mat)
main_dim = pca.n_components_
return main_dim
# Use PCA to reduce dimension
def reduce_dim(dim, temp_mat):
pca = PCA(n_components=dim)
reduce_dim_mat = pca.fit_transform(temp_mat)
return reduce_dim_mat.T
| yiangcs001/CSPRV | extract_features.py | extract_features.py | py | 5,370 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.zeros",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": ... |
38601541912 | #!/usr/bin/python3
import argparse
import sys
import json
import dballe
__version__ = '@PACKAGE_VERSION@'
def main(inputfiles, out):
importer = dballe.Importer("BUFR")
out.write('{"type":"FeatureCollection", "features":[')
for f in inputfiles:
with importer.from_file(f) as fp:
is_first = True
for msgs in fp:
for msg in msgs:
for cur in msg.query_data():
lev = cur["level"]
tr = cur["trange"]
if not is_first:
out.write(",")
else:
is_first = False
var = cur["variable"]
json.dump({
"type": "Feature",
"geometry": {
"type": "Point",
"coordinates": [cur.enqd("lon"), cur.enqd("lat")],
},
"properties": {
"lon": cur.enqi("lon"),
"lat": cur.enqi("lat"),
"datetime": cur["datetime"].strftime("%Y-%m-%dT%H:%M:%SZ"),
"network": cur["report"],
"ident": cur["ident"],
"level_t1": lev.ltype1 if lev is not None else None,
"level_v1": lev.l1 if lev is not None else None,
"level_t2": lev.ltype2 if lev is not None else None,
"level_v2": lev.l2 if lev is not None else None,
"trange_pind": tr.pind if tr is not None else None,
"trange_p1": tr.p1 if tr is not None else None,
"trange_p2": tr.p2 if tr is not None else None,
"bcode": var.code,
"value": var.get(),
}
}, out)
out.write("]}")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Convert BUFR files to GeoJSON format")
parser.add_argument("inputfile", nargs="*", metavar="FILE", help="BUFR file")
parser.add_argument('-V', '--version', action='version',
version='%(prog)s ' + __version__)
args = parser.parse_args()
if not args.inputfile:
inputfiles = [sys.stdin]
else:
inputfiles = args.inputfile
main(inputfiles, sys.stdout)
| ARPA-SIMC/bufr2json | bufr2json.py | bufr2json.py | py | 2,639 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "dballe.Importer",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
"li... |
15983166378 | # Mjolnir
from ...infrastrcutures.dynamo.infrastructure import DynamodbInfrastructure
# Third party
from boto3.dynamodb.conditions import Key
from decouple import config
class DynamodbRepository:
infra = DynamodbInfrastructure
@classmethod
async def get_items(cls, key: str, value: str) -> list:
async with cls.infra.get_dynamodb_resource() as dynamodb_resource:
table = await dynamodb_resource.Table(config('AWS_TABLE_NAME'))
result = await table.query(
KeyConditionExpression=Key(key).eq(value)
)
return result['Items']
@classmethod
async def put_items(cls, item: dict):
async with cls.infra.get_dynamodb_resource() as dynamodb_resource:
table = await dynamodb_resource.Table(config('AWS_TABLE_NAME'))
await table.put_item(
Item=item
)
| vinireeis/Mjolnir | src/repositories/dynamodb/repository.py | repository.py | py | 895 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "infrastrcutures.dynamo.infrastructure.DynamodbInfrastructure",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "decouple.config",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "boto3.dynamodb.conditions.Key",
"line_number": 19,
"usage_ty... |
7759575517 | import serial
class SerialParameters:
def __init__(self, port=None, baudrate=9600, bytesize=serial.EIGHTBITS, parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE, timeout=None, xonxoff=False, rtscts=False,
write_timeout=None, dsrdtr=False, inter_byte_timeout=None, exclusive=None,
local_echo=False, appendCR=False, appendLF=False):
self.port = port
self.baudrate = baudrate
self.bytesize = bytesize
self.parity = parity
self.stopbits = stopbits
self.timeout = timeout
self.xonxoff = xonxoff
self.rtscts = rtscts
self.write_timeout = write_timeout
self.dsrdtr = dsrdtr
self.inter_byte_timeout = inter_byte_timeout
self.exclusive = exclusive
self.readTextIndex = "read_line"
self.readBytes = 1
self.readUntil = ''
self.DTR = False
self.maxSignalRate = 10 # Hz
self.Kennbin = ""
self.local_echo = local_echo
self.appendCR = appendCR
self.appendLF = appendLF
| timhenning1997/Serial-Port-Monitor | SerialParameters.py | SerialParameters.py | py | 1,086 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "serial.EIGHTBITS",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "serial.PARITY_NONE",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "serial.STOPBITS_ONE",
"line_number": 6,
"usage_type": "attribute"
}
] |
17668930312 | #!/usr/bin/env python3
# Compare event boundary timing in HMMs from cortical Yeo ROIs
# to timing in hand(RA)-labeled events
import os
import tqdm
import brainiak.eventseg.event
from scipy.fftpack import fft,ifft
from scipy.stats import zscore, norm, pearsonr
from HMM_settings import *
from event_comp import ev_conv, Pro_ev_conv, child_ev_conv
ev_conv = child_ev_conv
ev_conv_perm = ev_conv[1:]
task='DM'
nTR=750
nbins = len(bins)
nROI = len(ROIl)
xcorrx = np.concatenate([np.arange(-nTR+2,0)*TR,np.arange(nTR-1)*TR])
savefile = HMMpath+'HMM_vs_hand_child_'
dE_k = {key:{key:[] for key in bins} for key in ROIl}
dE_k_corr = np.zeros((nROI,nbins))
bin_corr = np.zeros(nROI)
#dE_k_p = np.zeros((nPerm+1,nROI,nbins))
event_bounds = {key:{key:[] for key in bins} for key in ROIl}
matchz_mat = np.zeros((nROI,nbins))
for seed in tqdm.tqdm(seeds):
for r,roi_short in tqdm.tqdm(enumerate(ROIl)):
roi=HMMsavedir+seed+'/'+roi_short+'.h5'
k = dd.io.load(roi,'/best_k')
D = [dd.io.load(roidir+seed+'/'+roi_short+'.h5','/'+task+'/bin_'+str(b)+'/D') for b in bins]
hmm = brainiak.eventseg.event.EventSegment(n_events=k)
hmm.fit([np.mean(d,axis=0).T for d in D])
for bi,b in enumerate(bins):
dE_k[roi_short][b] = np.diff(np.dot(hmm.segments_[bi], np.arange(k)+1))
dE_k_corr[r,bi],_ = pearsonr(dE_k[roi_short][b],ev_conv_perm)
bin_corr[r],_ = pearsonr(dE_k[roi_short][0],dE_k[roi_short][4])
dd.io.save(savefile+'_'+seed+'.h5',{'dE_k_corr':dE_k_corr, 'dE_k':dE_k, 'bin_corr':bin_corr})
| samsydco/HBN | HMM_vs_hand.py | HMM_vs_hand.py | py | 1,502 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "event_comp.ev_conv",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "event_comp.child_ev_conv",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "event_comp.ev_conv",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "tqdm.t... |
20066269029 | import os
import pandas as pd
import properties
from audio import audio_utils as au
from files import file_utils as fu
min_fragment_duration_ms = 400
def __build_syncmap_sentences(chapter_audio, chapter_syncmap):
sentences = []
for fragment in chapter_syncmap['fragments']:
start_time = float(fragment['begin']) * 1000
end_time = float(fragment['end']) * 1000
if (end_time - start_time) > min_fragment_duration_ms:
sentences.append({
"audio": chapter_audio[start_time:end_time],
"text": fragment['lines'][0]
})
return sentences
def __export_dataset_audio_sample(audio_sample, dataset_chapter_index, syncmap_fragment_index):
audio_sample.export(
fu.build_dataset_audio_path(dataset_chapter_index, syncmap_fragment_index),
format="wav"
)
def __append_to_metadata(metadata_df, dataset_chapter_index, fragment_index, fragment_text, fragment_audio):
return metadata_df.append(
pd.DataFrame(
[{
'filename': fu.build_dataset_audio_filename(dataset_chapter_index, fragment_index),
'text': fragment_text,
'up_votes': 0,
'down_votes': 0,
'age': 0,
'gender': 'male',
'accent': '',
'duration': fragment_audio.duration_seconds
}],
columns=properties.csv_sample_columns
)
)
def __build_chapter_dataframe(dataframe, sentences, dataset_chapter_index):
for syncmap_fragment_index, sentence in enumerate(sentences):
trimmed_audio = au.trim_silence(sentence['audio'])
__export_dataset_audio_sample(trimmed_audio, dataset_chapter_index, syncmap_fragment_index)
dataframe = __append_to_metadata(dataframe,
dataset_chapter_index,
syncmap_fragment_index,
sentence['text'],
trimmed_audio)
return dataframe
def __build_metadata_and_export_audio_samples(dataframe, book_name, book_chapter_index, dataset_chapter_index):
chapter_audio = au.load_mp3_audio(book_name, book_chapter_index)
syncmap = fu.load_syncmap(book_name, book_chapter_index)
sentences = __build_syncmap_sentences(chapter_audio, syncmap)
dataframe = __build_chapter_dataframe(dataframe, sentences, dataset_chapter_index)
return dataframe
def __export_metadata(dataframe):
dataframe.to_csv(fu.build_dataset_metadata_path(),
sep='|', encoding='utf-8', index=False
)
def run():
os.makedirs(fu.build_dataset_audio_dir(), exist_ok=True)
df = pd.DataFrame(columns=properties.csv_sample_columns)
dataset_chapter_index = 1
for book in properties.book_list:
print("Exporting book \'{:s}\'.".format(book))
for book_chapter_index in range(1, properties.chapter_count_in[book] + 1):
print("Exporting chapter {:d}...".format(book_chapter_index))
df = __build_metadata_and_export_audio_samples(df, book, book_chapter_index, dataset_chapter_index)
dataset_chapter_index += 1
__export_metadata(df)
if __name__ == "__main__":
run()
| arnasRad/speech_dataset | export_dataset.py | export_dataset.py | py | 3,322 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "files.file_utils.build_dataset_audio_path",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "files.file_utils",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 34,
"usage_type": "call"
},
{
"api_n... |
37552127134 | from utils.utils import getLinesOfFile
def getPriority(char: str):
asciiVal = ord(char[0])
if(asciiVal>=97 and asciiVal<=122):
# lettera minuscola
return asciiVal-96
else:
#lettera maiuscola
return asciiVal - 65 + 27
def findLetterInBothString(s1,s2):
for char in s1:
if char in s2:
return char
return "ERROR"
def findLetterInAllString(s1,s2,s3):
for char in s1:
if char in s2 and char in s3:
return char
return "ERROR"
class Rucksack:
def __init__(self, row:str):
self.rucksack = row
self.firstCompartment = row[:len(row)//2]
self.secondCompartment = row[len(row)//2:]
if __name__ == '__main__':
rucksacks = [Rucksack(elem) for elem in getLinesOfFile('input.txt')]
priorities = [getPriority(findLetterInBothString(elem.firstCompartment, elem.secondCompartment)) for elem in rucksacks]
print(f"sum of priorities is {sum(priorities)}")
groups = [getLinesOfFile('input.txt')[n:n+3] for n in range(0, len(rucksacks), 3)]
priorities2 = [getPriority(findLetterInAllString(*group)) for group in groups]
print(f"sum of priorities of badges is {sum(priorities2)}")
| liuker97/adventOfCode2022 | src/day3/day3.py | day3.py | py | 1,216 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "utils.utils.getLinesOfFile",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "utils.utils.getLinesOfFile",
"line_number": 34,
"usage_type": "call"
}
] |
32925752157 | import scrapy
import os
import wget
class BlogSpider(scrapy.Spider):
name = 'blogspider'
start_urls = ['https://www.va.gov/vdl/application.asp?appid=6']
def parse(self, response):
try:
link='https://www.va.gov/vdl/'
for title in response.xpath('//tr'):
sect=response.xpath('//*[@id="tier4innerContent"]/p').css('::text').get().replace("Section","")
pack=response.xpath('//*[@id="tier4innerContent"]/h2[2]').css('::text').get()
cnt=0
doc="<td></td>"
pdf="<td></td>"
for title1 in title.xpath('td'):
#print(title.xpath('td').css('::text').get())
if cnt==0:
titl=title1.css('::text').get()
if cnt==3:
for title2 in title1.css('::text'):
if title2.get()=="PDF":
pdf='<td><a href="' + link + title1.xpath('a').xpath('@href').extract()[0] + '">Link</a></td>'
elif title2.get()=="DOCX":
doc='<td><a href="' + link + title1.xpath('a').xpath('@href').extract()[1] + '">Link</a></td>'
print('<tr><td>' + sect + '</td><td>' + pack + '</td><td>' + titl + '</td>' + doc + pdf + '</tr>\n')
cnt=cnt+1
except:
print("")
try:
for next_page in response.xpath('//td/a'):
yield response.follow(next_page, self.parse)
except:
print("")
| RamSailopal/VA-Markup | scrape3.py | scrape3.py | py | 1,537 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "scrapy.Spider",
"line_number": 4,
"usage_type": "attribute"
}
] |
19243874206 | import sys
from pathlib import Path
import environ
PROJECT_DIR = Path(__file__).resolve().parent
ROOT_DIR = PROJECT_DIR.parent
# Environment
ENV_FILE = "/etc/purldb/.env"
if not Path(ENV_FILE).exists():
ENV_FILE = ROOT_DIR / ".env"
env = environ.Env()
environ.Env.read_env(str(ENV_FILE))
# Security
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("ALLOWED_HOSTS", default=[".localhost", "127.0.0.1", "[::1]"])
# SECURITY WARNING: do not run with debug turned on in production
DEBUG = env.bool("PURLDB_DEBUG", default=False)
PURLDB_REQUIRE_AUTHENTICATION = env.bool(
"PURLDB_REQUIRE_AUTHENTICATION", default=False
)
# SECURITY WARNING: do not run with debug turned on in production
DEBUG_TOOLBAR = env.bool("PURLDB_DEBUG_TOOLBAR", default=False)
PURLDB_PASSWORD_MIN_LENGTH = env.int("PURLDB_PASSWORD_MIN_LENGTH", default=14)
# SCANCODE.IO
SCANCODEIO_URL = env.str("SCANCODEIO_URL", "")
SCANCODEIO_API_KEY = env.str("SCANCODEIO_API_KEY", "")
# PurlDB
PURLDB_LOG_LEVEL = env.str("PURLDB_LOG_LEVEL", "INFO")
# Application definition
INSTALLED_APPS = (
# Local apps
# Must come before Third-party apps for proper templates override
'clearcode',
'clearindex',
'minecode',
'matchcode',
'packagedb',
# Django built-in
"django.contrib.auth",
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
"django.contrib.humanize",
# Third-party apps
'django_filters',
'rest_framework',
'rest_framework.authtoken',
)
MIDDLEWARE = (
"django.middleware.security.SecurityMiddleware",
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'purldb_project.urls'
WSGI_APPLICATION = "purldb_project.wsgi.application"
SECURE_PROXY_SSL_HEADER = env.tuple(
"SECURE_PROXY_SSL_HEADER", default=("HTTP_X_FORWARDED_PROTO", "https")
)
# API
DATA_UPLOAD_MAX_NUMBER_FIELDS = env.int(
"DATA_UPLOAD_MAX_NUMBER_FIELDS", default=2048
)
# Database
DATABASES = {
'default': {
'ENGINE': env.str('PACKAGEDB_DB_ENGINE', 'django.db.backends.postgresql'),
'HOST': env.str('PACKAGEDB_DB_HOST', 'localhost'),
'NAME': env.str('PACKAGEDB_DB_NAME', 'packagedb'),
'USER': env.str('PACKAGEDB_DB_USER', 'packagedb'),
'PASSWORD': env.str('PACKAGEDB_DB_PASSWORD', 'packagedb'),
'PORT': env.str('PACKAGEDB_DB_PORT', '5432'),
'ATOMIC_REQUESTS': True,
}
}
DEFAULT_AUTO_FIELD = "django.db.models.AutoField"
# Templates
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
"DIRS": [str(PROJECT_DIR.joinpath("templates"))],
"APP_DIRS": True,
'OPTIONS': {
"debug": DEBUG,
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.request',
"django.template.context_processors.static",
],
},
},
]
# Login
LOGIN_REDIRECT_URL = "/"
LOGOUT_REDIRECT_URL = "/"
# Passwords
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
"OPTIONS": {
"min_length": PURLDB_PASSWORD_MIN_LENGTH,
},
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Testing
# True if running tests through `./manage test or pytest`
IS_TESTS = any(clue in arg for arg in sys.argv for clue in ("test", "pytest"))
# Cache
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
"LOCATION": "default",
}
}
# Logging
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"simple": {
"format": "{levelname} {message}",
"style": "{",
},
},
"handlers": {
"null": {
"class": "logging.NullHandler",
},
"console": {
"class": "logging.StreamHandler",
"formatter": "simple",
},
},
"loggers": {
"scanpipe": {
"handlers": ["null"] if IS_TESTS else ["console"],
"level": PURLDB_LOG_LEVEL,
"propagate": False,
},
"django": {
"handlers": ["null"] if IS_TESTS else ["console"],
"propagate": False,
},
# Set PURLDB_LOG_LEVEL=DEBUG to display all SQL queries in the console.
"django.db.backends": {
"level": PURLDB_LOG_LEVEL,
},
},
}
# Internationalization
LANGUAGE_CODE = "en-us"
TIME_ZONE = env.str("TIME_ZONE", default="UTC")
USE_I18N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
STATIC_URL = '/static/'
STATIC_ROOT = '/var/purldb/static/'
STATICFILES_DIRS = [
PROJECT_DIR / 'static',
]
# Third-party apps
# Django restframework
REST_FRAMEWORK_DEFAULT_THROTTLE_RATES = {'anon': '3600/hour', 'user': '10800/hour'}
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': ('rest_framework.authentication.TokenAuthentication',),
'DEFAULT_PERMISSION_CLASSES': ('rest_framework.permissions.IsAuthenticated',),
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
'rest_framework.renderers.BrowsableAPIRenderer',
'rest_framework.renderers.AdminRenderer',
),
'DEFAULT_FILTER_BACKENDS': (
'django_filters.rest_framework.DjangoFilterBackend',
'rest_framework.filters.SearchFilter',
),
'DEFAULT_THROTTLE_CLASSES': [
'packagedb.throttling.StaffUserRateThrottle',
'rest_framework.throttling.AnonRateThrottle',
'rest_framework.throttling.UserRateThrottle',
],
'DEFAULT_THROTTLE_RATES': REST_FRAMEWORK_DEFAULT_THROTTLE_RATES,
'EXCEPTION_HANDLER': 'packagedb.throttling.throttled_exception_handler',
'DEFAULT_PAGINATION_CLASS': 'packagedb.api_custom.PageSizePagination',
# Limit the load on the Database returning a small number of records by default. https://github.com/nexB/vulnerablecode/issues/819
"PAGE_SIZE": 20,
}
if not PURLDB_REQUIRE_AUTHENTICATION:
REST_FRAMEWORK["DEFAULT_PERMISSION_CLASSES"] = (
"rest_framework.permissions.AllowAny",
)
if DEBUG_TOOLBAR:
INSTALLED_APPS += ("debug_toolbar",)
MIDDLEWARE += ("debug_toolbar.middleware.DebugToolbarMiddleware",)
DEBUG_TOOLBAR_PANELS = (
"debug_toolbar.panels.history.HistoryPanel",
"debug_toolbar.panels.versions.VersionsPanel",
"debug_toolbar.panels.timer.TimerPanel",
"debug_toolbar.panels.settings.SettingsPanel",
"debug_toolbar.panels.headers.HeadersPanel",
"debug_toolbar.panels.request.RequestPanel",
"debug_toolbar.panels.sql.SQLPanel",
"debug_toolbar.panels.staticfiles.StaticFilesPanel",
"debug_toolbar.panels.templates.TemplatesPanel",
"debug_toolbar.panels.cache.CachePanel",
"debug_toolbar.panels.signals.SignalsPanel",
"debug_toolbar.panels.logging.LoggingPanel",
"debug_toolbar.panels.redirects.RedirectsPanel",
"debug_toolbar.panels.profiling.ProfilingPanel",
)
INTERNAL_IPS = [
"127.0.0.1",
]
# Active seeders: each active seeder class need to be added explictly here
ACTIVE_SEEDERS = [
'minecode.visitors.maven.MavenSeed',
]
| nexB/purldb | purldb_project/settings.py | settings.py | py | 7,976 | python | en | code | 23 | github-code | 6 | [
{
"api_name": "pathlib.Path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "environ.Env",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "environ.Env.read_env",
"line... |
342809739 | from collections import OrderedDict
import numpy as np
import pandas as pd
import requests
from bs4 import BeautifulSoup
import re
df = pd.read_csv('pd_url_list_short.csv') #df 변수로 csv 파일을 읽어옵니다.
#기존에 수동으로 입력하던 크롤링 범위를 start와 end로 지정해줬습니다.(클래스 만들때 입력)
class GetText(object):
def __init__(self, ulist, start, end): #나중에 ulist 부분에는 앞에서 정의한 df를 넣어줍니다.
self.ulist = ulist
self.start = start
self.end = end
def wine_info(self): #wine_dict는 id, name, production 등등을 key로 갖는 사전.
wine_dict = OrderedDict() # 각각의 key는 리스트를 value로 갖습니다.
wine_dict['id'] = []
wine_dict['name'] = []
wine_dict['production1'] = []
wine_dict['production2'] = []
wine_dict['production3'] = []
wine_dict['production4'] = []
wine_dict['type'] = []
wine_dict['alc'] = []
wine_dict['producer'] = []
wine_dict['varieties'] = []
wine_dict['bestfor'] = []
wine_dict['sweetness'] = []
wine_dict['body'] = []
wine_dict['tastingnote'] = []
for i in range(self.start, self.end): # 크롤링할 범위 설정(wine_code가 아니라 인덱스 번호)
url = self.ulist.iloc[i]['URL'] # self.ulist가 dataframe 형식이므로 iloc 이용해서 url을 가져옵니다.
res = requests.get(url)
soup = BeautifulSoup(res.content)
idnum = re.search(r'\d{5}', url).group() #wine_code부터 크롤링 시작
wine_dict['id'].append(idnum)
try:
li0 = soup.find('li', attrs = {'class' : 'WineEndName'}) #예외처리 해줄 것
wine_name = li0.get_text()
wine_dict['name'].append(wine_name)
except:
wine_dict['name'].append('None')
try:
li1 = soup.find('li', attrs = {'class' : 'WineProduction'})
a = li1.find_all('a')
for i in range(4):
if i <= len(a) -1 :
wine_dict['production{}'.format(i+1)].append(a[i].get_text())
else :
wine_dict['production{}'.format(i+1)].append('None')
except:
wine_dict['production1'].append('None')
wine_dict['production2'].append('None')
wine_dict['production3'].append('None')
wine_dict['production4'].append('None')
try:
li1_1 = soup.find('li', attrs = {'class' : 'WineInfo'})
words = li1_1.get_text().strip()
wine_dict['type'].append(re.search(r'^\w+', words).group())
except:
wine_dict['type'].append('None')
try:
li = soup.find('li', attrs = {'class' : 'WineInfo'})
aic = re.search(r'AIC[.\d]+', li.get_text().strip())
if not aic :
wine_dict['alc'].append('None')
else :
wine_dict['alc'].append(aic.group())
except:
wine_dict['alc'].append('None')
try:
li2 = soup.find('li', attrs = {'class' : 'Winery'})
producer = li2.a.get_text()
reproducer = re.sub(r'\s', ' ', producer)
wine_dict['producer'].append(reproducer)
except:
wine_dict['producer'].append('None')
try:
li3 = soup.find('li', attrs = {'class' : 'Varieties'})
varieties = ''
for var in li3.find_all('a') :
varieties += var.get_text()
wine_dict['varieties'].append(varieties)
except:
wine_dict['varieties'].append('None')
try:
li4 = soup.find('li', attrs = {'class' : 'BestFor'})
bestfor = li4.get_text()
wine_dict['bestfor'].append(bestfor.strip())
except:
wine_dict['bestfor'].append('None')
try :
li6 = soup.find('li', attrs = {'class' : 'Sweetness'})
px = li6.find_all('img')[1]['style']
wine_dict['sweetness'].append(re.search(r'\d+', px).group())
except :
wine_dict['sweetness'].append('None')
try :
li7 = soup.find('li', attrs = {'class' : 'Body'})
px = li7.find_all('img')[1]['style']
wine_dict['body'].append(re.search(r'\d+', px).group())
except :
wine_dict['body'].append('None')
try:
ul = soup.find('ul', attrs = {'class' : 'TastingnoteList'})
note = ul.get_text().strip()
subnote = re.sub(r'\s', ' ', note) #정규표현식으로 \s(공백?)을 그냥 띄어쓰기로 바꿔줬습니다.
wine_dict['tastingnote'].append(subnote) #(\s 형식 중에 공백이 아닌 문자도 있던데 그부분이 저장시
except: #문제를 일으키는것 같아서요)
wine_dict['tastingnote'].append('None')
wine_df = pd.DataFrame(wine_dict) # 사전 형식의 wine_dict를 dataframe 형식의 wine_df로 바꿔줍니다.
return wine_df
#엑셀로 저장하는 것이 문제이므로 500개씩 저장을 시도하고 오류가 나면 다음 500개를 저장하게 코드를 짰습니다.
#0~4000번째까지 긁는 코드입니다.
i=0
while i<4000:
wine2 = GetText(df,i,i+500) # 시작과 끝이 루프를 돌 때마다 변하게 설정
result = wine2.wine_info()
try:
writer = pd.ExcelWriter('./wine{}_{}.xlsx'.format(i,i+500), engine=None) #파일 이름도 자동으로 변경하게 설정
result.to_excel(writer, sheet_name='1', encoding ='utf-8') # 결과를 엑셀로 저장
writer.save()
i += 500 #500개를 크롤링 후 저장을 끝내면 i가 500씩 증가
except:
i += 500 #오류가 나면 바로 i가 500만큼 증가해서 다음 500개에 대한 크롤링을 진행합니다.
continue
| nosangho/team_project | [02-15][junyang] wine21_save_loop.py | [02-15][junyang] wine21_save_loop.py | py | 6,458 | python | ko | code | 0 | github-code | 6 | [
{
"api_name": "pandas.read_csv",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "collections.OrderedDict",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup... |
17508342693 | from typing import List
class Solution:
def maxArea(self, height: List[int]) -> int:
i,j=0,len(height)-1
im,jm,mx=0,0,0
while i<j:
val = (j-i)*min(height[i],height[j])
if val > mx:
im,jm,mx=i,j,val
if height[i]<height[j]:
i+=1
else:
j-=1
return mx
print(Solution().maxArea([1,8,6,2,5,4,8,3,7])) | soji-omiwade/cs | dsa/before_rubrik/container_with_most_water.py | container_with_most_water.py | py | 426 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "typing.List",
"line_number": 3,
"usage_type": "name"
}
] |
15835641511 | from config import db
class PricePerHour(db.Model):
id = db.Column(db.Integer, primary_key=True)
date_of_parsing = db.Column(db.String(10), nullable=False)
hour = db.Column(db.Integer, nullable=False)
price = db.Column(db.Float, nullable=False)
sales_volume_MWh = db.Column(db.Float, nullable=False)
purchase_volume_MWh = db.Column(db.Float, nullable=False)
declared_sales_volume_MWh = db.Column(db.Float, nullable=False)
declared_purchase_volume_MWh = db.Column(db.Float, nullable=False)
def to_dict(self):
return {
"id": self.id,
"date_of_parsing": self.date_of_parsing,
"hour": self.hour,
"price": self.price,
"sales_volume_MWh": self.sales_volume_MWh,
"purchase_volume_MWh": self.purchase_volume_MWh,
"declared_sales_volume_MWh": self.declared_sales_volume_MWh,
"declared_purchase_volume_MWh": self.declared_purchase_volume_MWh,
}
| BohdanLazaryshyn/rdn_test_task | models.py | models.py | py | 986 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "config.db.Model",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "config.db",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "config.db.Column",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "config.db",
"line_nu... |
39920785113 | from flask import Flask, render_template, request, redirect, jsonify, after_this_request
from flask_cors import CORS
from app.trajectory import *
from app.ion import get_ion
from app.esp import *
esp_addr = ''
data = {}
app = Flask(__name__,
static_url_path='',
static_folder='static',
template_folder="templates")
CORS(app)
@app.route('/esp')
def esp():
return esp_simulation()
@app.route('/api/esp')
def api_esp():
global data
data = esp_parse(esp_addr)
return jsonify(data)
@app.route('/time')
def time():
global data
data = esp_parse(esp_addr)
return render_template('time.html', time=data['time'])
@app.route('/api/tracking/<int:norad>')
def api_tracking_prn(norad):
res = jsonify(get_trajectory(norad))
res.headers.add("Access-Control-Allow-Origin", "*")
return res
@app.route('/tracking/<int:norad>')
def tracking_norad(norad):
return render_template('tracking.html', norad=norad)
@app.route('/tracking')
def tracking():
global data
if (not data):
data = esp_parse(esp_addr)
prn_norad = get_norad(data)
print (prn_norad)
return render_template('tracking_menu.html', prn_norad=prn_norad)
@app.route('/api/ion/<int:norad>')
def api_ion_prn(norad):
res = jsonify(get_ion(norad))
res.headers.add("Access-Control-Allow-Origin", "*")
return res
@app.route('/ion/<int:norad>')
def ion_norad(norad):
return render_template('ion.html', norad=norad)
@app.route('/ion')
def ion():
global data
if (not data):
data = esp_parse(esp_addr)
prn_norad = get_norad(data)
print (prn_norad)
return render_template('ion_menu.html', prn_norad=prn_norad)
@app.route('/settings', methods = ['POST', 'GET'])
def settings():
global esp_addr
if request.method == 'POST':
esp_addr = request.form['ip']
return redirect('/')
else:
return render_template('settings.html')
@app.route('/')
def home():
global esp_addr
if (esp_addr == ''):
return redirect('/settings')
return render_template('index.html')
| Eugen171/gps | app/__init__.py | __init__.py | py | 1,952 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "app.trajectory",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "flask.Flask",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "flask_cors.CORS",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "app.trajectory",
"line_n... |
28395932084 | import torch
from torch import optim
from torch import nn
from torch.utils import data
from data import AnimeDataset, LossWriter
from model import Generator, Discriminator
DATA_DIR = "../datasets/selfie2anime/all"
MODEL_G_PATH = "./Net_G.pth"
MODEL_D_PATH = "./Net_D.pth"
LOG_G_PATH = "./Log_G.txt"
LOG_D_PATH = "./Log_D.txt"
IMAGE_SIZE = 64
BATCH_SIZE = 128
WORKER = 1
LR = 0.0002
NZ = 100
num_epochs = 300
dataset = AnimeDataset(dataset_path=DATA_DIR, image_size=IMAGE_SIZE)
data_loader = data.DataLoader(dataset, batch_size=BATCH_SIZE,
shuffle=True, num_workers=WORKER)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
netG = Generator().to(device)
netD = Discriminator().to(device)
criterion = nn.BCELoss()
real_label = 1.
fake_label = 0.
optimizerD = optim.Adam(netD.parameters(), lr=LR, betas=(0.5, 0.999))
optimizerG = optim.Adam(netG.parameters(), lr=LR, betas=(0.5, 0.999))
g_writer = LossWriter(save_path=LOG_G_PATH)
d_writer = LossWriter(save_path=LOG_D_PATH)
img_list = []
G_losses = []
D_losses = []
iters = 0
print(dataset.__len__())
print("开始训练")
for epoch in range(num_epochs):
for data in data_loader:
#################################################
# 1. 更新判别器D: 最大化 log(D(x)) + log(1 - D(G(z)))
# 等同于最小化 - log(D(x)) - log(1 - D(G(z)))
#################################################
netD.zero_grad()
# 1.1 来自数据集的样本
real_imgs = data.to(device)
b_size = real_imgs.size(0)
label = torch.full((b_size,), real_label, dtype=torch.float, device=device)
# 使用鉴别器对数据集样本做判断
output = netD(real_imgs).view(-1)
# 计算交叉熵损失 -log(D(x))
errD_real = criterion(output, label)
# 对判别器进行梯度回传
errD_real.backward()
D_x = output.mean().item()
# 1.2 生成随机向量
noise = torch.randn(b_size, NZ, device=device)
# 来自生成器生成的样本
fake = netG(noise)
label.fill_(fake_label)
# 使用鉴别器对生成器生成样本做判断
output = netD(fake.detach()).view(-1)
# 计算交叉熵损失 -log(1 - D(G(z)))
errD_fake = criterion(output, label)
# 对判别器进行梯度回传
errD_fake.backward()
D_G_z1 = output.mean().item()
# 对判别器计算总梯度,-log(D(x))-log(1 - D(G(z)))
errD = errD_real + errD_fake
# 更新判别器
optimizerD.step()
#################################################
# 2. 更新判别器G: 最小化 log(D(x)) + log(1 - D(G(z))),
# 等同于最小化log(1 - D(G(z))),即最小化-log(D(G(z)))
# 也就等同于最小化-(log(D(G(z)))*1+log(1-D(G(z)))*0)
# 令生成器样本标签值为1,上式就满足了交叉熵的定义
#################################################
netG.zero_grad()
# 对于生成器训练,令生成器生成的样本为真,
label.fill_(real_label)
# 输入生成器的生成的假样本
output = netD(fake).view(-1)
# 对生成器计算损失
errG = criterion(output, label)
# 对生成器进行梯度回传
errG.backward()
D_G_z2 = output.mean().item()
# 更新生成器
optimizerG.step()
# 输出损失状态
if iters % 5 == 0:
print('[%d/%d][%d/%d]\tLoss_D: %.4f\tLoss_G: %.4f\tD(x): %.4f\tD(G(z)): %.4f / %.4f'
% (epoch, num_epochs, iters, len(data_loader),
errD.item(), errG.item(), D_x, D_G_z1, D_G_z2))
d_writer.add(loss=errD.item(), i=iters)
g_writer.add(loss=errG.item(), i=iters)
# 保存损失记录
G_losses.append(errG.item())
D_losses.append(errD.item())
iters += 1
| cwpeng-cn/DCGAN | train.py | train.py | py | 3,971 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "data.AnimeDataset",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "torch.utils.data",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "torch.... |
19400181919 | from typing import List
import common.arrayCommon as Array
import heapq
class Solution:
def pondSizes(self, land: List[List[int]]) -> List[int]:
h = len(land)
w = len(land[0])
result = []
for i in range(h):
for j in range(w):
if land[i][j] == 0:
a = []
self.search(land, w, h, i, j, a)
heapq.heappush(result, len(a))
return heapq.nsmallest(len(result), result)
def search(self, land, w, h, i, j, ans):
if i < 0 or i >= h or j < 0 or j >= w:
return
if land[i][j] != 0:
return
if land[i][j] == 0:
land[i][j] = 1
ans.append(0)
self.search(land, w, h, i + 1, j + 1, ans)
self.search(land, w, h, i - 1, j - 1, ans)
self.search(land, w, h, i + 1, j - 1, ans)
self.search(land, w, h, i - 1, j + 1, ans)
self.search(land, w, h, i - 1, j, ans)
self.search(land, w, h, i + 1, j, ans)
self.search(land, w, h, i, j + 1, ans)
self.search(land, w, h, i, j - 1, ans)
land = [
[0, 2, 1, 0],
[0, 1, 0, 1],
[1, 1, 0, 1],
[0, 1, 0, 1]
]
Array.print2DArray(land)
r = Solution().pondSizes(land)
print(r)
| Yigang0622/LeetCode | pondSizes.py | pondSizes.py | py | 1,310 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "typing.List",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "heapq.heappush",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "heapq.nsmallest",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "common.arrayCommon.print2DA... |
16536913637 | import pandas as pd
dataset = pd.read_csv('iris.csv')
data = dataset.iloc[ : 99 , :]
target = data.iloc[ : , -1: ]
y = []
for x in target.values:
if x == 'Iris-setosa':
y.append(1)
else:
y.append(0)
x = data.iloc[ : , : -1]
x = x.values.tolist()
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
import numpy as np
shuffle(x, y)
x_train = []
x_test = []
y_train = []
y_test = []
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.1)
x_train = np.array(x_train)
y_train = np.array(y_train)
x_test = np.array(x_test)
y_test = np.array(y_test)
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
clf = LogisticRegression()
clf.fit(x_train,y_train)
y_pred = clf.predict(x_test)
print(accuracy_score(y_test,y_pred)) | Nuhru1/Machine_Learning_Logistic_Regression_From_Scratch | Logistic_Regression_with_Sklearn.py | Logistic_Regression_with_Sklearn.py | py | 897 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pandas.read_csv",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "sklearn.utils.shuffle",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 33,
"usage_type": "call"
},
{
"api... |
70065264188 |
'Program to create the Functional Requirement Classifer model and validate it'
from fileProcess import FileProcess
import numpy
from pandas import DataFrame
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import Pipeline
from sklearn.cross_validation import KFold
from sklearn.metrics import confusion_matrix, f1_score
from sklearn.feature_extraction.text import TfidfTransformer
def build_data_frame(path, classification):
rows = []
index = []
fp = FileProcess()
for file_name, text in fp.read_files(path):
rows.append({'text': text, 'class': classification})
index.append(file_name)
data_frame = DataFrame(rows, index=index)
return data_frame
'Main'
data = DataFrame({'text': [], 'class': []})
for path, classification in FileProcess.SOURCES:
data = data.append(build_data_frame(path, classification))
data = data.reindex(numpy.random.permutation(data.index))
pipeline = Pipeline([
#('count_vectorizer', CountVectorizer(ngram_range=(1, 2))),
('count_vectorizer', CountVectorizer()),
# ('tfidf_transformer', TfidfTransformer()),
('classifier', MultinomialNB())
])
k_fold = KFold(n=len(data), n_folds=10)
scores = []
confusion = numpy.array([[0, 0], [0, 0]])
for train_indices, test_indices in k_fold:
train_text = data.iloc[train_indices]['text'].values
train_y = data.iloc[train_indices]['class'].values.astype(str)
test_text = data.iloc[test_indices]['text'].values
test_y = data.iloc[test_indices]['class'].values.astype(str)
pipeline.fit(train_text, train_y)
predictions = pipeline.predict(test_text)
print("******************* predictions*********")
# print(predictions)
confusion += confusion_matrix(test_y, predictions)
score = f1_score(test_y, predictions, pos_label=FileProcess.FRN)
scores.append(score)
for i in range(0, len(predictions)) :
if predictions[i] != test_y[i] :
print("********text is \n" + test_text[i])
print("The wrong clf is: " + predictions[i])
print("*******************")
print('Total files classified:', len(data))
print('Score:', sum(scores)/len(scores))
print('Confusion matrix:')
print(confusion)
print("++++++++++++ vocabulary from the documents ++++++++++=")
vector = pipeline.named_steps['count_vectorizer']
#print(vector.vocabulary_) | xiejen/rfpFunctionReqClf | classifier.py | classifier.py | py | 2,435 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "fileProcess.FileProcess",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "fileProcess... |
39911784422 | import argparse
import time
import warnings
import pickle
import torch
import random
import numpy as np
import pandas as pd
import torch.nn.functional as F
from transformers import AutoTokenizer, AutoModelForSequenceClassification, Trainer, TrainingArguments, ElectraForSequenceClassification, AdamW
from torch import nn, optim
from torch.utils.data import DataLoader
from torch.cuda.amp import autocast, GradScaler
from tqdm import tqdm
from data_set import *
def seed_everything(seed):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
def get_pickle(pickle_path):
'''Custom Dataset을 Load하기 위한 함수'''
f = open(pickle_path, "rb")
dataset = pickle.load(f)
f.close()
return dataset
def get_data():
tokenizer = AutoTokenizer.from_pretrained("xlm-roberta-large")
ai_hub = get_pickle("../../data/ai_hub_dataset.pkl")
train_token, train_label = tokenized_dataset(ai_hub["train"], tokenizer)
val_token, val_label = tokenized_dataset(ai_hub["validation"], tokenizer)
train_set = RE_Dataset(train_token, train_label)
val_set = RE_Dataset(val_token, val_label)
train_iter = DataLoader(train_set, batch_size=16, shuffle=True)
val_iter = DataLoader(val_set, batch_size=16, shuffle=True)
return train_iter, val_iter
def get_model():
network = AutoModelForSequenceClassification.from_pretrained("xlm-roberta-large", num_labels=6, hidden_dropout_prob=0.0).to("cuda:0")
optimizer = AdamW(network.parameters(), lr=5e-6)
scaler = GradScaler()
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer=optimizer, T_max=10, eta_min=1e-6)
criterion = nn.CrossEntropyLoss().to("cuda:0")
return network, optimizer, scaler, scheduler, criterion
def training_per_step(model, loss_fn, optimizer, scaler, input_ids, attention_mask, labels, device):
'''매 step마다 학습을 하는 함수'''
model.train()
with autocast():
labels = labels.to(device)
preds = model(input_ids.to(device), attention_mask = attention_mask.to(device))[0]
loss = loss_fn(preds, labels)
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
optimizer.zero_grad()
return loss
def validating_per_steps(epoch, model, loss_fn, test_loader, device):
'''특정 step마다 검증을 하는 함수'''
model.eval()
loss_sum = 0
sample_num = 0
preds_all = []
targets_all = []
pbar = tqdm(test_loader, total=len(test_loader), position=0, leave=True)
for input_ids, attention_mask, labels in pbar :
labels = labels.to(device)
preds = model(input_ids.to(device), attention_mask = attention_mask.to(device))[0]
preds_all += [torch.argmax(preds, 1).detach().cpu().numpy()]
targets_all += [labels.detach().cpu().numpy()]
loss = loss_fn(preds, labels)
loss_sum += loss.item()*labels.shape[0]
sample_num += labels.shape[0]
description = f"epoch {epoch + 1} loss: {loss_sum/sample_num:.4f}"
pbar.set_description(description)
preds_all = np.concatenate(preds_all)
targets_all = np.concatenate(targets_all)
accuracy = (preds_all == targets_all).mean()
print(" test accuracy = {:.4f}".format(accuracy))
return accuracy
def train(model, loss_fn, optimizer, scaler, train_loader, test_loader, scheduler, device):
'''training과 validating을 진행하는 함수'''
prev_acc = 0
global_steps = 0
for epoch in range(1):
running_loss = 0
sample_num = 0
preds_all = []
targets_all = []
pbar = tqdm(enumerate(train_loader), total=len(train_loader), position=0, leave=True)
for step, (input_ids, attention_mask, labels) in pbar:
# training phase
loss = training_per_step(model, loss_fn, optimizer, scaler, input_ids, attention_mask, labels, device)
running_loss += loss.item()*labels.shape[0]
sample_num += labels.shape[0]
global_steps += 1
description = f"{epoch+1}epoch {global_steps: >4d}step | loss: {running_loss/sample_num: .4f} "
pbar.set_description(description)
# validating phase
if global_steps % 500 == 0 :
with torch.no_grad():
acc = validating_per_steps(epoch, model, loss_fn, test_loader, device)
if acc > prev_acc:
torch.save(model, "../../output/question_model.pt")
prev_acc = acc
if scheduler is not None :
scheduler.step()
def main():
seed_everything(2021)
train_iter, val_iter = get_data()
network, optimizer, scaler, scheduler, criterion = get_model()
train(network, criterion, optimizer, scaler, train_iter, val_iter, scheduler, "cuda:0")
if __name__ == "__main__":
main()
| TEAM-IKYO/Open-Domain-Question-Answering | code/question_labeling/train.py | train.py | py | 5,127 | python | en | code | 24 | github-code | 6 | [
{
"api_name": "random.seed",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.random.seed",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "torch.manual_seed",
... |
73879522109 | # -*- coding: utf-8 -*-
import requests
import pandas as pd
import pytest
import urllib
import pprint
# 課題1
def get_api(url):
result = requests.get(url)
return result.json()
def main():
keyword = "鬼滅"
url = "https://app.rakuten.co.jp/services/api/IchibaItem/Search/20170706?format=json&keyword={}&applicationId=1019079537947262807".format(
keyword)
print(get_api(url))
main()
# 課題2
url = 'https://app.rakuten.co.jp/services/api/IchibaItem/Search/20170706'
payload = {
'applicationId': 1017762098426453356,
'keyword': 'Python',
'hits': 10,
'sort': '+itemPrice',
}
r = requests.get(url, params=payload)
resp = r.json()
pprint.pprint(resp)
print ("num of kensaku =",resp['count'])
print ('-'*40)
for i in resp['Items']:
item = i['Item']
print (item['itemName'])
print (item['itemPrice'], 'yen')
# 課題3
url = 'https://app.rakuten.co.jp/services/api/Product/Search/20170426'
payload = {
'applicationId': 1017762098426453356,
'keyword': 'rakuten',
'hits': 10,
'genreId': 560278,
}
r = requests.get(url, params=payload)
resp = r.json()
a=[]
b=[]
for i in resp['Products']:
item = i['Product']
a.append(item['minPrice'])
b.append(item['maxPrice'])
print (item['minPrice'], 'yen')
print(item['maxPrice'], 'yen')
print("最安値は、", min(a), "円です。")
print("最高値は、", max(b), "円です。")
#課題4
url = 'https://app.rakuten.co.jp/services/api/IchibaItem/Search/20140222'
payload = {
'applicationId': 1017762098426453356,
'keyword': 'Python',
'hits': 10,
'sort': '-itemPrice',
'rankTargetProductCount':30485
}
r = requests.get(url, params=payload)
resp = r.json()
print ("num of kensaku =",resp['count'])
print ('-'*40)
a=[]
b=[]
for i in resp['Items']:
item = i['Item']
a.append(item['itemName'])
b.append(item['itemPrice'])
print (item['itemName'])
print (item['itemPrice'], 'yen')
print(len(a), len(b))
df = pd.DataFrame({"Items":a,
"Prices":b})
df.to_csv("/Users/ishikawakanji/Desktop/kadai6/item.csv", encoding="utf-8-sig")
#課題5
def test_get_rakutenAPI():
price_list = list(item['itemName'])
for i in price_list:
print(i)
assert len(i)>=1
assert price_list[0].title | KanjiIshikawa-lab/Kadai6syuusei | kadai6_4.py | kadai6_4.py | py | 2,333 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "requests.get",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "pprint.pprint",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_numb... |
25006908635 | from git import Repo
from logging import info
from pathlib import Path
from platform import system
from shutil import copyfile, rmtree
from stat import S_IWRITE
from subprocess import check_output, STDOUT, CalledProcessError
from tempfile import TemporaryDirectory
from twrpdtgen import current_path
from twrpdtgen.utils.find_package import find_package
from typing import Union
def handle_remove_readonly(func, path, _):
Path(path).chmod(S_IWRITE)
func(path)
class AIKManager:
"""
This class is responsible for dealing with AIK tasks
such as cloning, updating, and extracting recovery images.
"""
def __init__(self, is_debug):
"""
AIKManager constructor method
First, check if AIK path exists, if so, update AIK, else clone AIK.
:param aik_path: Path object of AIK directory
"""
self.is_debug = is_debug
if not self.is_debug:
self.tempdir = TemporaryDirectory()
self.path = Path(self.tempdir.name)
else:
self.path = current_path / "extract"
if self.path.is_dir():
rmtree(self.path, ignore_errors=False, onerror=handle_remove_readonly)
self.images_path = self.path / "split_img"
self.ramdisk_path = self.path / "ramdisk"
# Check whether cpio package is installed
if platform.system() == "Linux" and not find_package("cpio"):
raise RuntimeError("cpio package is not installed. Install it by sudo apt install cpio or sudo pacman -S cpio (Based on what package manager you're using)")
info("Cloning AIK...")
if system() == "Linux":
Repo.clone_from("https://github.com/SebaUbuntu/AIK-Linux-mirror", self.path)
elif system() == "Windows":
Repo.clone_from("https://github.com/SebaUbuntu/AIK-Windows-mirror", self.path)
def extract(self, recovery_image: Union[Path, str]) -> None:
"""
Extract an image using AIK.
:param recovery_image: recovery image string or path object
"""
new_recovery_image = self.path / "recovery.img"
copyfile(recovery_image, new_recovery_image)
if system() == "Linux":
command = [self.path / "unpackimg.sh", "--nosudo", new_recovery_image]
elif system() == "Windows":
command = [self.path / "unpackimg.bat", new_recovery_image]
else:
raise NotImplementedError(f"{system()} is not supported!")
try:
process = check_output(command, stderr=STDOUT, universal_newlines=True)
except CalledProcessError as e:
returncode = e.returncode
output = e.output
else:
returncode = 0
output = process
if returncode != 0:
if self.is_debug:
print(output)
raise RuntimeError(f"AIK extraction failed, return code {returncode}")
self.get_image_infos()
def get_image_infos(self):
self.aik_images_path_base = str(self.images_path / "recovery.img-")
kernel = self.get_extracted_info("zImage")
self.kernel = kernel if kernel.is_file() else None
dt_image = self.get_extracted_info("dt")
self.dt_image = dt_image if dt_image.is_file() else None
dtb_image = self.get_extracted_info("dtb")
self.dtb_image = dtb_image if dtb_image.is_file() else None
self.dtbo_image = None
for name in ["dtbo", "recovery_dtbo"]:
dtbo_image = self.get_extracted_info(name)
if dtbo_image.is_file():
self.dtbo_image = dtbo_image
self.base_address = self.read_recovery_file(self.get_extracted_info("base"))
self.board_name = self.read_recovery_file(self.get_extracted_info("board"))
self.cmdline = self.read_recovery_file(self.get_extracted_info("cmdline"))
header_version = self.get_extracted_info("header_version")
self.header_version = self.read_recovery_file(header_version) if header_version.exists() else "0"
self.recovery_size = self.read_recovery_file(self.get_extracted_info("origsize"))
self.pagesize = self.read_recovery_file(self.get_extracted_info("pagesize"))
self.ramdisk_compression = self.read_recovery_file(self.get_extracted_info("ramdiskcomp"))
self.ramdisk_offset = self.read_recovery_file(self.get_extracted_info("ramdisk_offset"))
self.tags_offset = self.read_recovery_file(self.get_extracted_info("tags_offset"))
# Get a usable build.prop to parse
self.buildprop = None
buildprop_locations = [self.ramdisk_path / "default.prop",
self.ramdisk_path / "vendor" / "build.prop",
self.ramdisk_path / "system" / "build.prop",
self.ramdisk_path / "system" / "etc" / "build.prop"]
for folder in buildprop_locations:
if folder.is_file():
self.buildprop = folder
break
@staticmethod
def read_recovery_file(file: Path) -> str:
"""
Read file contents
:param file: file as a Path object
:return: string of the first line of the file contents
"""
return file.read_text().splitlines()[0]
def get_extracted_info(self, file: str) -> Path:
return self.images_path / ("recovery.img-" + file)
def cleanup(self):
if not self.is_debug:
self.tempdir.cleanup() | DENE-dev/dene-dev | RQ1-data/exp2/969-lobo1978-coder@device-tree-generator-aab7df0a3c0246a5dbe524f1196bedc1b4c05e05/twrpdtgen/utils/aik_manager.py | aik_manager.py | py | 4,787 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "stat.S_IWRITE",
"line_number": 14,
"usage_type": "argument"
},
{
"api_name": "pathlib.Path",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "tempfile.TemporaryDirectory",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "pathlib.Pa... |
39174277833 | import numpy as np
import torch
from torch.utils.data import DataLoader
from random import seed
from dataset import MNIST
from network import FeedForward
from train_mnist import Train, TrainConfig
from plotter import Plotter
np.random.seed(1234)
seed(1234)
torch.manual_seed(1234)
if '__main__' == __name__:
data = dict()
data['train'] = MNIST('./dataset', train=True, download=True, randomize=False)
data['test'] = MNIST('./dataset', train=False)
loader = dict()
loader['train'] = torch.utils.data.DataLoader(data['train'], batch_size=60000, shuffle=False)
loader['test'] = torch.utils.data.DataLoader(data['test'], batch_size=10000, shuffle=False)
# setup
input_size = 28 * 28
output_size = 10
hidden_sizes = [784, 1024, 1024, 20, 20, 20, 10]
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f'to device: {device}')
net = FeedForward(input_size, hidden_sizes, output_size).to(device)
criterion = torch.nn.CrossEntropyLoss(reduction='sum')
optimizer = torch.optim.Adam(net.parameters(), lr=0.001)
cfg = TrainConfig(net, criterion, optimizer)
train = Train(cfg)
train.epochs = 4000
train.mi_cycle = 20
train.run(loader)
train.dump()
plot = Plotter(train)
plot.plot_losses()
plot.plot_accuracy()
plot.plot_info_plan('train')
plot.plot_info_plan('test')
| shalomma/PytorchBottleneck | ib_mnist.py | ib_mnist.py | py | 1,394 | python | en | code | 7 | github-code | 6 | [
{
"api_name": "numpy.random.seed",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "random.seed",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "torch.manual_seed",
... |
3536318559 | from django.db import models
class Task(models.Model):
username = models.CharField(verbose_name='Имя сотрудника', max_length=30)
task_name = models.CharField(verbose_name='Текст задачи', max_length=100)
per_day = models.PositiveIntegerField(
default=1,
verbose_name='Количество напоминаний за 1 день'
)
def time_dates(self):
quantity = 24 / self.per_day
time_list = [0, ] # [0, 6, 12, 18]
for num in range(self.per_day - 1):
new_time = time_list[num] + quantity
time_list.append(new_time)
return time_list
| DalaevBC/ping_bot | inside/models.py | models.py | py | 658 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.db.models.Model",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "... |
28076023790 | from .gaussian_attention import gaussian_mask, gaussian_attention
from keras.layers import Layer
class VisualAttentionLayer(Layer):
def __init__(self, output_dim, transpose=False, **kwargs):
if len(output_dim) != 2:
raise ValueError("`output_dim` has to be a 2D tensor [Height, Width].")
self._output_dim = output_dim
super(VisualAttentionLayer, self).__init__(**kwargs)
def build(self, input_shape):
super(VisualAttentionLayer, self).build(input_shape)
def call(self, x):
if len(x) != 2:
raise ValueError("Input of the layer has to consist of 2 different inputs: the images and the parameters.")
img_tensor, transform_params = x
return gaussian_attention(img_tensor, transform_params, self._output_dim)
def compute_output_shape(self, input_shape):
if len(input_shape) == 2 and len(input_shape[0]) == 4:
return (None, *self._output_dim, input_shape[0][-1])
else:
raise ValueError("The `input_shape` is not correct.") | zimmerrol/tf_keras_attention | src/gaussian_attention_layer.py | gaussian_attention_layer.py | py | 1,066 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "keras.layers.Layer",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "gaussian_attention.gaussian_attention",
"line_number": 19,
"usage_type": "call"
}
] |
40199173264 | from django.test import TestCase
from django.urls import reverse
from . import utils
class TestView(TestCase):
"""
Test that access to views that accept get do not raise exception.
"""
def setUp(self) -> None:
self.views = [
{"name": 'index', 'requires_authentication': False},
{"name": 'about', 'requires_authentication': False},
]
self.configuration = utils.createConfiguration()
return super().setUp()
def test_access_to_views(self):
for view in self.views:
view_name = view['name']
response = self.client.get(reverse(f'core:{view_name}'))
if view['requires_authentication']:
self.assertEqual(response.status_code, 302, f"Access to core:{view_name} raised unexpected status code")
else:
self.assertEqual(response.status_code, 200, f"Access to core:{view_name} raised unexpected status code")
| Koffi-Cobbin/ACES-WEB | core/tests/test_views.py | test_views.py | py | 961 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "django.test.TestCase",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "django.urls.reverse",
"line_number": 20,
"usage_type": "call"
}
] |
21480313260 | import bpy
import re
from ..helpers import sentence_join
default_lock = False
default_lock_array = [default_lock] * 3
component_names = ('X', 'Y', 'Z', 'W')
def is_prop_locked(pb, name, component_index):
if name == 'location':
return getattr(pb, 'lock_location', default_lock_array)[component_index]
elif name in {'rotation_euler', 'rotation_quaternion', 'rotation_axis_angle'}:
if component_index < 3:
return getattr(pb, 'lock_rotation', default_lock_array)[component_index]
else:
return getattr(pb, 'lock_rotation_w', default_lock)
elif name == 'scale':
return getattr(pb, 'lock_scale', default_lock_array)[component_index]
class GRET_OT_channels_delete_unavailable(bpy.types.Operator):
"""Delete location/rotation/scale channels that are locked in the transform panel"""
bl_idname = 'gret.channels_delete_unavailable'
bl_label = "Delete Unavailable Channels"
bl_options = {'REGISTER', 'UNDO'}
@classmethod
def poll(cls, context):
return context.space_data and context.space_data.type in {'DOPESHEET_EDITOR', 'GRAPH_EDITOR'}
def execute(self, context):
obj = context.active_object
action = obj.animation_data.action if (obj and obj.animation_data) else None
if not action:
return {'CANCELLED'}
remove_fcurves = []
delete_invalid = False
num_invalid = num_locked = 0
for fc in action.fcurves:
try:
obj.path_resolve(fc.data_path)
except ValueError:
if delete_invalid:
print(f"Removing curve, can't resolve {fc.data_path}")
remove_fcurves.append(fc)
num_invalid += 1
continue
pb_match = re.match(r'^pose\.bones\[\"([^\"]+)"\]\.(\w+)$', fc.data_path)
if pb_match:
pb = obj.pose.bones.get(pb_match[1])
prop_name = pb_match[2]
if pb and is_prop_locked(pb, prop_name, fc.array_index):
print(f"Removing curve, bone {pb.name} {component_names[fc.array_index]} "
f"{prop_name} is locked")
remove_fcurves.append(fc)
num_locked += 1
continue
for fc in remove_fcurves:
action.fcurves.remove(fc)
num_removed_str = sentence_join([
f"{num_invalid} invalid" if num_invalid else "",
f"{num_locked} locked transform" if num_locked else "",
])
if num_removed_str:
self.report({'INFO'}, f"Removed {num_removed_str} curves.")
return {'FINISHED'}
def draw_menu(self, context):
self.layout.operator(GRET_OT_channels_delete_unavailable.bl_idname)
def register(settings, prefs):
if not prefs.animation__enable_channels_delete_unavailable:
return False
bpy.utils.register_class(GRET_OT_channels_delete_unavailable)
bpy.types.GRAPH_MT_channel.append(draw_menu)
bpy.types.DOPESHEET_MT_channel.append(draw_menu)
def unregister():
bpy.types.GRAPH_MT_channel.remove(draw_menu)
bpy.types.DOPESHEET_MT_channel.remove(draw_menu)
bpy.utils.unregister_class(GRET_OT_channels_delete_unavailable)
| greisane/gret | anim/channels_delete_unavailable.py | channels_delete_unavailable.py | py | 3,374 | python | en | code | 298 | github-code | 6 | [
{
"api_name": "bpy.types",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "re.match",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "helpers.sentence_join",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "bpy.utils.register_clas... |
29947747353 | """
Tesla Crystal for Tornado wallet
"""
import sys
import time
from os import path
from modules.basehandlers import CrystalHandler
from modules.i18n import get_dt_language
from modules.helpers import base_path
from modules.helpers import async_get_with_http_fallback
sys.path.append('crystals/420_tesla')
from bismuthsimpleasset import BismuthSimpleAsset
from teslaapihandler import TeslaAPIHandler
DEFAULT_THEME_PATH = path.join(base_path(), "crystals/420_tesla/themes/default")
MODULES = {}
__version__ = "1.0.0"
class TeslaHandler(CrystalHandler):
def initialize(self):
# Parent init
super().initialize()
data = ""
self.bismuth_vars["extra"] = {
"header": "<!-- TESLA HEADER -->",
"footer": data,
}
reg = "tesla:register"
unreg = "tesla:unregister"
transfer = "tesla:transfer"
op_data = "tesla:battery"
self.teslahandler = TeslaAPIHandler(self.bismuth,reg,unreg,op_data)
address = "Bis1TeSLaWhTC2ByEwZnYWtsPVK5428uqnL46"
thresholds = {"reg": 25}
checkfunc = {"f": self.teslahandler.checkID}
self.assethandler = BismuthSimpleAsset(self.bismuth,address,reg,unreg,transfer,thresholds,checkfunc)
async def message_popup(self, params=None):
title = self.get_argument("title", default=None, strip=False)
message = self.get_argument("msg", default=None, strip=False)
type = self.get_argument("type", default=None, strip=False)
self.render("message_pop.html", bismuth=self.bismuth_vars, title=title, message=message, type=type)
async def about(self, params=None):
namespace = self.get_template_namespace()
self.bismuth_vars["dtlanguage"] = get_dt_language(self.locale.translate)
kwargs = {"bismuth": self.bismuth_vars}
namespace.update(kwargs)
self.render("about.html", bismuth=self.bismuth_vars)
async def fetch_asset_id(self, params=None):
""""
Fetch asset ID associated with email address. pwd is the vehicle anonymizer
"""
email = self.get_argument("email", default=None, strip=False)
pwd = self.get_argument("pwd", default=None, strip=False) #For XOR
data = self.teslahandler.tesla_vins(email, pwd)
time.sleep(1)
self.render("json.html", data=data)
async def fetch_api_data(self, params=None):
"""
Returns a dict with vehicle data for all VINs associated with email and anonymizer
"""
email = self.get_argument("email", default=None, strip=False)
pwd = self.get_argument("pwd", default=None, strip=False) #For XOR
out = self.teslahandler.fetch_vehicle_data(email,pwd)
self.render("json.html", data=out)
async def check_vin_registrant(self, params=None):
"""
Returns registrant given asset id (vin number in vin_input)
"""
vin = self.get_argument("vin_input", default=None, strip=False)
# First check if this is a valid VIN
data = self.teslahandler.checkVIN(vin)
if data != -1:
# Second check if active wallet address is registrant
data = -1
registrant = self.assethandler.get_registrant(vin)
if registrant == self.bismuth.address:
data = 1
self.render("json.html", data=registrant)
async def check_vin_register(self, params=None):
"""
Checks if an asset id (VIN number) is valid and registered
"""
vin = self.get_argument("vin_input", default=None, strip=False)
# First check if this is a valid VIN
data = self.teslahandler.checkID(vin)
if data != -1:
# Second check if VIN is already registered
registrant = self.assethandler.get_registrant(vin)
if len(registrant) > 0:
data = -1
self.render("json.html", data=data)
async def check_vin_unregister(self, params=None):
"""
Unregisters VIN if valid and current address has previously registered it
"""
vin = self.get_argument("vin_input", default=None, strip=False)
# First check if this is a valid VIN
data = self.teslahandler.checkID(vin)
if data != -1:
# Second check if this account has registered this VIN
registrant = self.assethandler.get_registrant(vin)
if registrant != self.bismuth.address:
data = -1
self.render("json.html", data=data)
async def get_chain_data(self, params=None):
"""
Returns vehicle data as specified by 'variable' between start and end dates
Used for displaying data by DataTable and ChartJS
"""
vin = self.get_argument("vin", default=None, strip=False)
addresses = self.get_argument("address", default=None, strip=False)
variable = self.get_argument("variable", default=None, strip=False)
filter = self.get_argument("filter", default=None, strip=False)
range_unit = self.get_argument("range", default=None, strip=False)
temperature = self.get_argument("temperature", default=None, strip=False)
startdate = self.get_argument("startdate", default=None, strip=False)
enddate = self.get_argument("enddate", default=None, strip=False)
if variable == "battery_cycles":
out = self.teslahandler.get_cycle_data(vin,addresses,"battery_level",filter,range_unit,temperature,startdate,enddate)
else:
out = self.teslahandler.get_chain_data(vin,addresses,variable,filter,range_unit,temperature,startdate,enddate)
self.render("json.html", data=out)
async def get_all_asset_ids(self, params=None):
asset_search = self.get_argument("asset_search", default=None, strip=False)
out = self.assethandler.get_all_asset_ids(asset_search)
self.render("json.html", data=out)
async def page1(self, params=None):
namespace = self.get_template_namespace()
self.bismuth_vars["dtlanguage"] = get_dt_language(self.locale.translate)
kwargs = {"bismuth": self.bismuth_vars}
namespace.update(kwargs)
self.render("page1.html", bismuth=self.bismuth_vars)
async def page2(self, params=None):
namespace = self.get_template_namespace()
self.bismuth_vars["dtlanguage"] = get_dt_language(self.locale.translate)
kwargs = {"bismuth": self.bismuth_vars}
namespace.update(kwargs)
self.render("page2.html", bismuth=self.bismuth_vars)
async def page3(self, params=None):
namespace = self.get_template_namespace()
self.bismuth_vars["dtlanguage"] = get_dt_language(self.locale.translate)
kwargs = {"bismuth": self.bismuth_vars}
namespace.update(kwargs)
self.render("page3.html", bismuth=self.bismuth_vars)
async def get(self, command=""):
command, *params = command.split("/")
if not command:
command = "about"
await getattr(self, command)(params)
async def post(self, command=""):
command, *params = command.split("/")
if not command:
command = "about"
await getattr(self, command)(params)
def get_template_path(self):
"""Override to customize template path for each handler."""
return DEFAULT_THEME_PATH
def static(self):
"""Defining this method will automagically create a static handler pointing to local /static crystal dir"""
pass
| bismuthfoundation/TornadoWallet | wallet/crystals/420_tesla/__init__.py | __init__.py | py | 7,537 | python | en | code | 14 | github-code | 6 | [
{
"api_name": "sys.path.append",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number... |
8267950596 | from __future__ import annotations
import socket
import pytest
from kombu import Connection, Consumer, Exchange, Producer, Queue
class test_PyroTransport:
def setup(self):
self.c = Connection(transport='pyro', virtual_host="kombu.broker")
self.e = Exchange('test_transport_pyro')
self.q = Queue('test_transport_pyro',
exchange=self.e,
routing_key='test_transport_pyro')
self.q2 = Queue('test_transport_pyro2',
exchange=self.e,
routing_key='test_transport_pyro2')
self.fanout = Exchange('test_transport_pyro_fanout', type='fanout')
self.q3 = Queue('test_transport_pyro_fanout1',
exchange=self.fanout)
self.q4 = Queue('test_transport_pyro_fanout2',
exchange=self.fanout)
def test_driver_version(self):
assert self.c.transport.driver_version()
@pytest.mark.skip("requires running Pyro nameserver and Kombu Broker")
def test_produce_consume_noack(self):
channel = self.c.channel()
producer = Producer(channel, self.e)
consumer = Consumer(channel, self.q, no_ack=True)
for i in range(10):
producer.publish({'foo': i}, routing_key='test_transport_pyro')
_received = []
def callback(message_data, message):
_received.append(message)
consumer.register_callback(callback)
consumer.consume()
while 1:
if len(_received) == 10:
break
self.c.drain_events()
assert len(_received) == 10
def test_drain_events(self):
with pytest.raises(socket.timeout):
self.c.drain_events(timeout=0.1)
c1 = self.c.channel()
c2 = self.c.channel()
with pytest.raises(socket.timeout):
self.c.drain_events(timeout=0.1)
del c1 # so pyflakes doesn't complain.
del c2
@pytest.mark.skip("requires running Pyro nameserver and Kombu Broker")
def test_drain_events_unregistered_queue(self):
c1 = self.c.channel()
producer = self.c.Producer()
consumer = self.c.Consumer([self.q2])
producer.publish(
{'hello': 'world'},
declare=consumer.queues,
routing_key=self.q2.routing_key,
exchange=self.q2.exchange,
)
message = consumer.queues[0].get()._raw
class Cycle:
def get(self, callback, timeout=None):
return (message, 'foo'), c1
self.c.transport.cycle = Cycle()
self.c.drain_events()
@pytest.mark.skip("requires running Pyro nameserver and Kombu Broker")
def test_queue_for(self):
chan = self.c.channel()
x = chan._queue_for('foo')
assert x
assert chan._queue_for('foo') is x
| celery/kombu | t/unit/transport/test_pyro.py | test_pyro.py | py | 2,892 | python | en | code | 2,643 | github-code | 6 | [
{
"api_name": "kombu.Connection",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "kombu.Exchange",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "kombu.Queue",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "kombu.Queue",
"line_n... |
70096737469 | import itertools
# 소수 판별 함수
# 2보다 작으면 무조건 False
# 2나 3이면 소수다.
# 2 또는 3으로 나눠지면 소수가 아니다.
# 10 미만의 값들은 2나 3으로만 나눠지지 않으면 된다.
# 그 이상의 수들에 대해서는 5, 7, 9, 11, 13, 15... 등의 홀수로 나눠보면 된다. 하지만 이미 3의 배수에 대해서는 앞에서 검사하기 때문에 5, 7, 11, 15,... 의 패턴으로 검사할 수 있다.
# N이 소수인지를 알고 싶으면 N의 제곱근까지만 검사해보면 된다.
def is_prime(n):
if n < 2:
return False
if n == 2 or n == 3:
return True
if n % 2 == 0 or n % 3 == 0:
return False
if n < 9:
return True
k, l = 5, n ** 0.5
while k <= l:
if n % k == 0 or n % (k+2) == 0:
return False
k += 6
return True
def solution(nums):
answer = 0
nums = list(itertools.combinations(nums,3))
for i in nums:
n = sum(i)
if is_prime(n):
answer += 1
return answer | YooGunWook/coding_test | practice_coding_old/연습문제/소수 만들기.py | 소수 만들기.py | py | 1,070 | python | ko | code | 0 | github-code | 6 | [
{
"api_name": "itertools.combinations",
"line_number": 31,
"usage_type": "call"
}
] |
10368918603 | import asyncio
import re
from os import remove
from pyUltroid.dB import DEVLIST
try:
from tabulate import tabulate
except ImportError:
tabulate = None
from telethon import events
from telethon.errors import MessageNotModifiedError
from telethon.tl.functions.contacts import (
BlockRequest,
GetBlockedRequest,
UnblockRequest,
)
from telethon.tl.functions.messages import ReportSpamRequest
from telethon.utils import get_display_name, resolve_bot_file_id
from pyUltroid.dB.base import KeyManager
from . import *
# ========================= CONSTANTS =============================
COUNT_PM = {}
LASTMSG = {}
WARN_MSGS = {}
U_WARNS = {}
if isinstance(udB.get_key("PMPERMIT"), (int, str)):
value = [udB.get_key("PMPERMIT")]
udB.set_key("PMPERMIT", value)
keym = KeyManager("PMPERMIT", cast=list)
Logm = KeyManager("LOGUSERS", cast=list)
PMPIC = udB.get_key("PMPIC")
LOG_CHANNEL = udB.get_key("LOG_CHANNEL")
UND = get_string("pmperm_1")
UNS = get_string("pmperm_2")
NO_REPLY = get_string("pmperm_3")
UNAPPROVED_MSG = "**PMSecurity of {ON}!**\n\n{UND}\n\nYou have {warn}/{twarn} warnings!"
if udB.get_key("PM_TEXT"):
UNAPPROVED_MSG = (
"**PMSecurity of {ON}!**\n\n"
+ udB.get_key("PM_TEXT")
+ "\n\nYou have {warn}/{twarn} warnings!"
)
# 1
WARNS = udB.get_key("PMWARNS") or 4
PMCMDS = [
f"{HNDLR}a",
f"{HNDLR}approve",
f"{HNDLR}da",
f"{HNDLR}disapprove",
f"{HNDLR}block",
f"{HNDLR}unblock",
]
_not_approved = {}
_to_delete = {}
my_bot = asst.me.username
def update_pm(userid, message, warns_given):
try:
WARN_MSGS.update({userid: message})
except KeyError:
pass
try:
U_WARNS.update({userid: warns_given})
except KeyError:
pass
async def delete_pm_warn_msgs(chat: int):
try:
await _to_delete[chat].delete()
except KeyError:
pass
# =================================================================
if udB.get_key("PMLOG"):
@ultroid_cmd(
pattern="logpm$",
)
async def _(e):
if not e.is_private:
return await e.eor("`Use me in Private.`", time=3)
if not Logm.contains(e.chat_id):
return await e.eor("`Wasn't logging msgs from here.`", time=3)
Logm.remove(e.chat_id)
return await e.eor("`Now I Will log msgs from here.`", time=3)
@ultroid_cmd(
pattern="nologpm$",
)
async def _(e):
if not e.is_private:
return await e.eor("`Use me in Private.`", time=3)
if Logm.contains(e.chat_id):
return await e.eor("`Wasn't logging msgs from here.`", time=3)
Logm.add(e.chat_id)
return await e.eor("`Now I Won't log msgs from here.`", time=3)
@ultroid_bot.on(
events.NewMessage(
incoming=True,
func=lambda e: e.is_private,
),
)
async def permitpm(event):
user = await event.get_sender()
if user.bot or user.is_self or user.verified or Logm.contains(user.id):
return
await event.forward_to(udB.get_key("PMLOGGROUP") or LOG_CHANNEL)
if udB.get_key("PMSETTING"):
if udB.get_key("AUTOAPPROVE"):
@ultroid_bot.on(
events.NewMessage(
outgoing=True,
func=lambda e: e.is_private and e.out and not e.text.startswith(HNDLR),
),
)
async def autoappr(e):
miss = await e.get_chat()
if miss.bot or miss.is_self or miss.verified or miss.id in DEVLIST:
return
if keym.contains(miss.id):
return
keym.add(miss.id)
await delete_pm_warn_msgs(miss.id)
try:
await ultroid_bot.edit_folder(miss.id, folder=0)
except BaseException:
pass
try:
await asst.edit_message(
LOG_CHANNEL,
_not_approved[miss.id],
f"#AutoApproved : <b>OutGoing Message.\nUser : {inline_mention(miss, html=True)}</b> [<code>{miss.id}</code>]",
parse_mode="html",
)
except KeyError:
await asst.send_message(
LOG_CHANNEL,
f"#AutoApproved : <b>OutGoing Message.\nUser : {inline_mention(miss, html=True)}</b> [<code>{miss.id}</code>]",
parse_mode="html",
)
except MessageNotModifiedError:
pass
@ultroid_bot.on(
events.NewMessage(
incoming=True,
func=lambda e: e.is_private
and e.sender_id not in DEVLIST
and not e.out
and not e.sender.bot
and not e.sender.is_self
and not e.sender.verified,
)
)
async def permitpm(event):
inline_pm = Redis("INLINE_PM") or False
user = event.sender
if not keym.contains(user.id) and event.text != UND:
if Redis("MOVE_ARCHIVE"):
try:
await ultroid_bot.edit_folder(user.id, folder=1)
except BaseException as er:
LOGS.info(er)
if event.media and not udB.get_key("DISABLE_PMDEL"):
await event.delete()
name = user.first_name
fullname = get_display_name(user)
username = f"@{user.username}"
mention = inline_mention(user)
count = keym.count()
try:
wrn = COUNT_PM[user.id] + 1
await asst.edit_message(
udB.get_key("LOG_CHANNEL"),
_not_approved[user.id],
f"Incoming PM from **{mention}** [`{user.id}`] with **{wrn}/{WARNS}** warning!",
buttons=[
Button.inline("Approve PM", data=f"approve_{user.id}"),
Button.inline("Block PM", data=f"block_{user.id}"),
],
)
except KeyError:
_not_approved[user.id] = await asst.send_message(
udB.get_key("LOG_CHANNEL"),
f"Incoming PM from **{mention}** [`{user.id}`] with **1/{WARNS}** warning!",
buttons=[
Button.inline("Approve PM", data=f"approve_{user.id}"),
Button.inline("Block PM", data=f"block_{user.id}"),
],
)
wrn = 1
except MessageNotModifiedError:
wrn = 1
if user.id in LASTMSG:
prevmsg = LASTMSG[user.id]
if event.text != prevmsg:
if "PMSecurity" in event.text or "**PMSecurity" in event.text:
return
await delete_pm_warn_msgs(user.id)
message_ = UNAPPROVED_MSG.format(
ON=OWNER_NAME,
warn=wrn,
twarn=WARNS,
UND=UND,
name=name,
fullname=fullname,
username=username,
count=count,
mention=mention,
)
update_pm(user.id, message_, wrn)
if inline_pm:
results = await ultroid_bot.inline_query(
my_bot, f"ip_{user.id}"
)
try:
_to_delete[user.id] = await results[0].click(
user.id, reply_to=event.id, hide_via=True
)
except Exception as e:
LOGS.info(str(e))
elif PMPIC:
_to_delete[user.id] = await ultroid_bot.send_file(
user.id,
PMPIC,
caption=message_,
)
else:
_to_delete[user.id] = await ultroid_bot.send_message(
user.id, message_
)
else:
await delete_pm_warn_msgs(user.id)
message_ = UNAPPROVED_MSG.format(
ON=OWNER_NAME,
warn=wrn,
twarn=WARNS,
UND=UND,
name=name,
fullname=fullname,
username=username,
count=count,
mention=mention,
)
update_pm(user.id, message_, wrn)
if inline_pm:
try:
results = await ultroid_bot.inline_query(
my_bot, f"ip_{user.id}"
)
_to_delete[user.id] = await results[0].click(
user.id, reply_to=event.id, hide_via=True
)
except Exception as e:
LOGS.info(str(e))
elif PMPIC:
_to_delete[user.id] = await ultroid_bot.send_file(
user.id,
PMPIC,
caption=message_,
)
else:
_to_delete[user.id] = await ultroid_bot.send_message(
user.id, message_
)
LASTMSG.update({user.id: event.text})
else:
await delete_pm_warn_msgs(user.id)
message_ = UNAPPROVED_MSG.format(
ON=OWNER_NAME,
warn=wrn,
twarn=WARNS,
UND=UND,
name=name,
fullname=fullname,
username=username,
count=count,
mention=mention,
)
update_pm(user.id, message_, wrn)
if inline_pm:
try:
results = await ultroid_bot.inline_query(
my_bot, f"ip_{user.id}"
)
_to_delete[user.id] = await results[0].click(
user.id, reply_to=event.id, hide_via=True
)
except Exception as e:
LOGS.info(str(e))
elif PMPIC:
_to_delete[user.id] = await ultroid_bot.send_file(
user.id,
PMPIC,
caption=message_,
)
else:
_to_delete[user.id] = await ultroid_bot.send_message(
user.id, message_
)
LASTMSG.update({user.id: event.text})
if user.id not in COUNT_PM:
COUNT_PM.update({user.id: 1})
else:
COUNT_PM[user.id] = COUNT_PM[user.id] + 1
if COUNT_PM[user.id] >= WARNS:
await delete_pm_warn_msgs(user.id)
_to_delete[user.id] = await event.respond(UNS)
try:
del COUNT_PM[user.id]
del LASTMSG[user.id]
except KeyError:
await asst.send_message(
udB.get_key("LOG_CHANNEL"),
"PMPermit is messed! Pls restart the bot!!",
)
return LOGS.info("COUNT_PM is messed.")
await ultroid_bot(BlockRequest(user.id))
await ultroid_bot(ReportSpamRequest(peer=user.id))
await asst.edit_message(
udB.get_key("LOG_CHANNEL"),
_not_approved[user.id],
f"**{mention}** [`{user.id}`] was Blocked for spamming.",
)
@ultroid_cmd(pattern="(start|stop|clear)archive$", fullsudo=True)
async def _(e):
x = e.pattern_match.group(1).strip()
if x == "start":
udB.set_key("MOVE_ARCHIVE", "True")
await e.eor("Now I will move new Unapproved DM's to archive", time=5)
elif x == "stop":
udB.set_key("MOVE_ARCHIVE", "False")
await e.eor("Now I won't move new Unapproved DM's to archive", time=5)
elif x == "clear":
try:
await e.client.edit_folder(unpack=1)
await e.eor("Unarchived all chats", time=5)
except Exception as mm:
await e.eor(str(mm), time=5)
@ultroid_cmd(pattern="(a|approve)(?: |$)", fullsudo=True)
async def approvepm(apprvpm):
if apprvpm.reply_to_msg_id:
user = (await apprvpm.get_reply_message()).sender
elif apprvpm.is_private:
user = await apprvpm.get_chat()
else:
return await apprvpm.edit(NO_REPLY)
if user.id in DEVLIST:
return await eor(
apprvpm,
"Lol, He is my Developer\nHe is auto Approved",
)
if not keym.contains(user.id):
keym.add(user.id)
try:
await delete_pm_warn_msgs(user.id)
await apprvpm.client.edit_folder(user.id, folder=0)
except BaseException:
pass
await eod(
apprvpm,
f"<b>{inline_mention(user, html=True)}</b> <code>approved to PM!</code>",
parse_mode="html",
)
try:
await asst.edit_message(
udB.get_key("LOG_CHANNEL"),
_not_approved[user.id],
f"#APPROVED\n\n<b>{inline_mention(user, html=True)}</b> [<code>{user.id}</code>] <code>was approved to PM you!</code>",
buttons=[
Button.inline("Disapprove PM", data=f"disapprove_{user.id}"),
Button.inline("Block", data=f"block_{user.id}"),
],
parse_mode="html",
)
except KeyError:
_not_approved[user.id] = await asst.send_message(
udB.get_key("LOG_CHANNEL"),
f"#APPROVED\n\n<b>{inline_mention(user, html=True)}</b> [<code>{user.id}</code>] <code>was approved to PM you!</code>",
buttons=[
Button.inline("Disapprove PM", data=f"disapprove_{user.id}"),
Button.inline("Block", data=f"block_{user.id}"),
],
parse_mode="html",
)
except MessageNotModifiedError:
pass
else:
await apprvpm.eor("`User may already be approved.`", time=5)
@ultroid_cmd(pattern="(da|disapprove)(?: |$)", fullsudo=True)
async def disapprovepm(e):
if e.reply_to_msg_id:
user = (await e.get_reply_message()).sender
elif e.is_private:
user = await e.get_chat()
else:
return await e.edit(NO_REPLY)
if user.id in DEVLIST:
return await eor(
e,
"`Lol, He is my Developer\nHe Can't Be DisApproved.`",
)
if keym.contains(user.id):
keym.remove(user.id)
await eod(
e,
f"<b>{inline_mention(user, html=True)}</b> <code>Disapproved to PM!</code>",
parse_mode="html",
)
try:
await asst.edit_message(
udB.get_key("LOG_CHANNEL"),
_not_approved[user.id],
f"#DISAPPROVED\n\n<b>{inline_mention(user, html=True)}</b> [<code>{user.id}</code>] <code>was disapproved to PM you.</code>",
buttons=[
Button.inline("Approve PM", data=f"approve_{user.id}"),
Button.inline("Block", data=f"block_{user.id}"),
],
parse_mode="html",
)
except KeyError:
_not_approved[user.id] = await asst.send_message(
udB.get_key("LOG_CHANNEL"),
f"#DISAPPROVED\n\n<b>{inline_mention(user, html=True)}</b> [<code>{user.id}</code>] <code>was disapproved to PM you.</code>",
buttons=[
Button.inline("Approve PM", data=f"approve_{user.id}"),
Button.inline("Block", data=f"block_{user.id}"),
],
parse_mode="html",
)
except MessageNotModifiedError:
pass
else:
await eod(
e,
f"<b>{inline_mention(user, html=True)}</b> <code>was never approved!</code>",
parse_mode="html",
)
@ultroid_cmd(pattern="block( (.*)|$)", fullsudo=True)
async def blockpm(block):
match = block.pattern_match.group(1).strip()
if block.reply_to_msg_id:
user = (await block.get_reply_message()).sender_id
elif match:
try:
user = await block.client.parse_id(match)
except Exception as er:
return await block.eor(str(er))
elif block.is_private:
user = block.chat_id
else:
return await eor(block, NO_REPLY, time=10)
await block.client(BlockRequest(user))
aname = await block.client.get_entity(user)
await block.eor(f"{inline_mention(aname)} [`{user}`] `has been blocked!`")
try:
keym.remove(user)
except AttributeError:
pass
try:
await asst.edit_message(
udB.get_key("LOG_CHANNEL"),
_not_approved[user],
f"#BLOCKED\n\n{inline_mention(aname)} [`{user}`] has been **blocked**.",
buttons=[
Button.inline("UnBlock", data=f"unblock_{user}"),
],
)
except KeyError:
_not_approved[user] = await asst.send_message(
udB.get_key("LOG_CHANNEL"),
f"#BLOCKED\n\n{inline_mention(aname)} [`{user}`] has been **blocked**.",
buttons=[
Button.inline("UnBlock", data=f"unblock_{user}"),
],
)
except MessageNotModifiedError:
pass
@ultroid_cmd(pattern="unblock( (.*)|$)", fullsudo=True)
async def unblockpm(event):
match = event.pattern_match.group(1).strip()
reply = await event.get_reply_message()
if reply:
user = reply.sender_id
elif match:
if match == "all":
msg = await event.eor(get_string("com_1"))
u_s = await event.client(GetBlockedRequest(0, 0))
count = len(u_s.users)
if not count:
return await eor(msg, "__You have not blocked Anyone...__")
for user in u_s.users:
await asyncio.sleep(1)
await event.client(UnblockRequest(user.id))
# GetBlockedRequest return 20 users at most.
if count < 20:
return await eor(msg, f"__Unblocked {count} Users!__")
while u_s.users:
u_s = await event.client(GetBlockedRequest(0, 0))
for user in u_s.users:
await asyncio.sleep(3)
await event.client(UnblockRequest(user.id))
count += len(u_s.users)
return await eor(msg, f"__Unblocked {count} users.__")
try:
user = await event.client.parse_id(match)
except Exception as er:
return await event.eor(str(er))
elif event.is_private:
user = event.chat_id
else:
return await event.eor(NO_REPLY, time=10)
try:
await event.client(UnblockRequest(user))
aname = await event.client.get_entity(user)
await event.eor(f"{inline_mention(aname)} [`{user}`] `has been UnBlocked!`")
except Exception as et:
return await event.eor(f"ERROR - {et}")
try:
await asst.edit_message(
udB.get_key("LOG_CHANNEL"),
_not_approved[user],
f"#UNBLOCKED\n\n{inline_mention(aname)} [`{user}`] has been **unblocked**.",
buttons=[
Button.inline("Block", data=f"block_{user}"),
],
)
except KeyError:
_not_approved[user] = await asst.send_message(
udB.get_key("LOG_CHANNEL"),
f"#UNBLOCKED\n\n{inline_mention(aname)} [`{user}`] has been **unblocked**.",
buttons=[
Button.inline("Block", data=f"block_{user}"),
],
)
except MessageNotModifiedError:
pass
@ultroid_cmd(pattern="listapproved$", owner=True)
async def list_approved(event):
xx = await event.eor(get_string("com_1"))
all = keym.get()
if not all:
return await xx.eor("`You haven't approved anyone yet!`", time=5)
users = []
for i in all:
try:
name = get_display_name(await ultroid_bot.get_entity(i))
except BaseException:
name = ""
users.append([name.strip(), str(i)])
with open("approved_pms.txt", "w") as list_appr:
if tabulate:
list_appr.write(
tabulate(users, headers=["UserName", "UserID"], showindex="always")
)
else:
text = "".join(f"[{user[-1]}] - {user[0]}" for user in users)
list_appr.write(text)
await event.reply(
f"List of users approved by [{OWNER_NAME}](tg://user?id={OWNER_ID})",
file="approved_pms.txt",
)
await xx.delete()
remove("approved_pms.txt")
@callback(
re.compile(
b"approve_(.*)",
),
from_users=[ultroid_bot.uid],
)
async def apr_in(event):
uid = int(event.data_match.group(1).decode("UTF-8"))
if uid in DEVLIST:
await event.edit("It's a dev! Approved!")
if not keym.contains(uid):
keym.add(uid)
try:
await ultroid_bot.edit_folder(uid, folder=0)
except BaseException:
pass
try:
user = await ultroid_bot.get_entity(uid)
except BaseException:
return await event.delete()
await event.edit(
f"#APPROVED\n\n<b>{inline_mention(user, html=True)}</b> [<code>{user.id}</code>] <code>was approved to PM you!</code>",
buttons=[
[
Button.inline("Disapprove PM", data=f"disapprove_{uid}"),
Button.inline("Block", data=f"block_{uid}"),
],
],
parse_mode="html",
)
await delete_pm_warn_msgs(uid)
await event.answer("Approved.", alert=True)
else:
await event.edit(
"`User may already be approved.`",
buttons=[
[
Button.inline("Disapprove PM", data=f"disapprove_{uid}"),
Button.inline("Block", data=f"block_{uid}"),
],
],
)
@callback(
re.compile(
b"disapprove_(.*)",
),
from_users=[ultroid_bot.uid],
)
async def disapr_in(event):
uid = int(event.data_match.group(1).decode("UTF-8"))
if keym.contains(uid):
keym.remove(uid)
try:
user = await ultroid_bot.get_entity(uid)
except BaseException:
return await event.delete()
await event.edit(
f"#DISAPPROVED\n\n<b>{inline_mention(user, html=True)}</b> [<code>{user.id}</code>] <code>was disapproved to PM you!</code>",
buttons=[
[
Button.inline("Approve PM", data=f"approve_{uid}"),
Button.inline("Block", data=f"block_{uid}"),
],
],
parse_mode="html",
)
await event.answer("Disapproved.", alert=True)
else:
await event.edit(
"`User was never approved!`",
buttons=[
[
Button.inline("Disapprove PM", data=f"disapprove_{uid}"),
Button.inline("Block", data=f"block_{uid}"),
],
],
)
@callback(
re.compile(
b"block_(.*)",
),
from_users=[ultroid_bot.uid],
)
async def blck_in(event):
uid = int(event.data_match.group(1).decode("UTF-8"))
try:
await ultroid_bot(BlockRequest(uid))
except BaseException:
pass
try:
user = await ultroid_bot.get_entity(uid)
except BaseException:
return await event.delete()
await event.edit(
f"BLOCKED\n\n<b>{inline_mention(user, html=True)}</b> [<code>{user.id}</code>] <code>was blocked!</code>",
buttons=Button.inline("UnBlock", data=f"unblock_{uid}"),
parse_mode="html",
)
await event.answer("Blocked.", alert=True)
@callback(
re.compile(
b"unblock_(.*)",
),
from_users=[ultroid_bot.uid],
)
async def unblck_in(event):
uid = int(event.data_match.group(1).decode("UTF-8"))
try:
await ultroid_bot(UnblockRequest(uid))
except BaseException:
pass
try:
user = await ultroid_bot.get_entity(uid)
except BaseException:
return await event.delete()
await event.edit(
f"#UNBLOCKED\n\n<b>{inline_mention(user, html=True)}</b> [<code>{user.id}</code>] <code>was unblocked!</code>",
buttons=Button.inline("Block", data=f"block_{uid}"),
parse_mode="html",
)
await event.answer("Unblocked.", alert=True)
@callback("deletedissht")
async def ytfuxist(e):
try:
await e.answer("Deleted.")
await e.delete()
except BaseException:
await ultroid_bot.delete_messages(e.chat_id, e.id)
@in_pattern(re.compile("ip_(.*)"), owner=True)
async def in_pm_ans(event):
from_user = int(event.pattern_match.group(1).strip())
try:
warns = U_WARNS[from_user]
except Exception as e:
LOGS.info(e)
warns = "?"
try:
msg_ = WARN_MSGS[from_user]
except KeyError:
msg_ = "**PMSecurity of {OWNER_NAME}**"
wrns = f"{warns}/{WARNS}"
buttons = [
[
Button.inline("Warns", data=f"admin_only{from_user}"),
Button.inline(wrns, data=f"don_{wrns}"),
]
]
include_media = True
mime_type, res = None, None
cont = None
try:
ext = PMPIC.split(".")[-1].lower()
except (AttributeError, IndexError):
ext = None
if ext in ["img", "jpg", "png"]:
_type = "photo"
mime_type = "image/jpg"
elif ext in ["mp4", "mkv", "gif"]:
mime_type = "video/mp4"
_type = "gif"
else:
try:
res = resolve_bot_file_id(PMPIC)
except ValueError:
pass
if res:
res = [
await event.builder.document(
res,
title="Inline PmPermit",
description="~ @TeamUltroid",
text=msg_,
buttons=buttons,
link_preview=False,
)
]
else:
_type = "article"
include_media = False
if not res:
if include_media:
cont = types.InputWebDocument(PMPIC, 0, mime_type, [])
res = [
event.builder.article(
title="Inline PMPermit.",
type=_type,
text=msg_,
description="@TeamUltroid",
include_media=include_media,
buttons=buttons,
thumb=cont,
content=cont,
)
]
await event.answer(res, switch_pm="• Ultroid •", switch_pm_param="start")
@callback(re.compile("admin_only(.*)"), from_users=[ultroid_bot.uid])
async def _admin_tools(event):
chat = int(event.pattern_match.group(1).strip())
await event.edit(
buttons=[
[
Button.inline("Approve PM", data=f"approve_{chat}"),
Button.inline("Block PM", data=f"block_{chat}"),
],
[Button.inline("« Back", data=f"pmbk_{chat}")],
],
)
@callback(re.compile("don_(.*)"))
async def _mejik(e):
data = e.pattern_match.group(1).strip().decode("utf-8").split("/")
text = "👮♂ Warn Count : " + data[0]
text += "\n🤖 Total Warn Count : " + data[1]
await e.answer(text, alert=True)
@callback(re.compile("pmbk_(.*)"))
async def edt(event):
from_user = int(event.pattern_match.group(1).strip())
try:
warns = U_WARNS[from_user]
except Exception as e:
LOGS.info(str(e))
warns = "0"
wrns = f"{warns}/{WARNS}"
await event.edit(
buttons=[
[
Button.inline("Warns", data=f"admin_only{from_user}"),
Button.inline(wrns, data=f"don_{wrns}"),
]
],
)
| TeamUltroid/Ultroid | plugins/pmpermit.py | pmpermit.py | py | 29,216 | python | en | code | 2,615 | github-code | 6 | [
{
"api_name": "tabulate.tabulate",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "pyUltroid.dB.base.KeyManager",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "pyUltroid.dB.base.KeyManager",
"line_number": 35,
"usage_type": "call"
},
{
"api_n... |
5648159483 |
import sys
from typing import List, Optional, Tuple, cast
import unittest
def how_construct(target_string: str, strings: List[str]) -> Optional[List[str]]:
n = len(target_string) + 1
table: List[Optional[List[str]]] = [
[] if i == 0 else None for i in range(n)]
for i in range(n):
if table[i] is not None:
for string in strings:
j = i + len(string)
if j < n and target_string[i: j] == string:
table[j] = [*cast(List[str], table[i]), string]
return table[len(target_string)]
class SolutionTest(unittest.TestCase):
def test_solution(self):
sys.setrecursionlimit(10000)
fixtures = [
(
("abcdef", ["ab", "abc", "cd", "def", "abcd"]),
["abc", "def"],
),
(
("skateboard", ["bo", "rd", "ate", "t", "ska", "sk", "boar"]),
None,
),
(
("", ["cat", "dog", "mouse"]),
[],
),
]
for inputs, output in fixtures:
solution = how_construct(*inputs)
if solution:
self.assertEqual(sorted(output), sorted(solution))
else:
self.assertEqual(output, solution)
| bradtreloar/freeCodeCamp_DP_problems | problems/tabulated/how_construct.py | how_construct.py | py | 1,311 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "typing.List",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "typing.cast",
"line_number":... |
40367677442 | import pygame
import random
from Farm import Farm
from Lab import Lab
from Armor import Armor
from PowerPlant import PowerPlant
from Battery import Battery
from Engine import Engine
from Command_module import Comand_Module
from Warehouse import Warehouse
from Laser import Laser
from Biome import Biome
from Asteroid import Asteroid
from Container import Container
class Ship():
def __init__(self, screen):
self.x = 150
self.y = 75
self.distance = 0
self.aim_distance = 1000000
self.velocity = 10
self.under_control = True
self.map = [['n' for _ in range(30)] for _ in range(14)]
self.resourses = {'Fe': 100, 'Cu': 50, 'O2': 50, 'CO2': 50, 'Al': 50, 'Si': 50, 'U': 50, 'H2O': 50,
'food': 50, 'energy': 0, 'science': 0}
self.every_single_unit = {'energy': [], 'commands': [], 'food': [], 'storages': [], 'engines': [],
'science': [], 'defense': [], 'cabins': [],
'armor': []}
self.storages = {'energy': [], 'science': [], 'storages': []}
self.group = pygame.sprite.Group()
self.cannons = []
self.comand_modules = []
self.humans = 10
self.cell_size = 30
self.screen = screen
eng = Engine(self, 14, 7)
eng1 = Engine(self, 14, 9)
plant1 = PowerPlant(self, 18, 7)
plant2 = PowerPlant(self, 18, 9)
self.comand_module = Comand_Module(self, 16, 11)
bat1 = Battery(self, 20, 7)
bat2 = Battery(self, 20, 9)
biome1 = Biome(self, 22, 7)
biome2 = Biome(self, 22, 9)
lab1 = Lab(self, 17, 6)
farm = Farm(self, 24, 7)
ware = Warehouse(self, 20, 6)
ware.charges = {'Fe': 10000, 'Cu': 10000, 'O2': 10000, 'CO2': 10000, 'Al': 10000, 'Si': 10000, 'U': 10000,
'H2O': 10000, 'food': 10000}
arm = Armor(self, 23, 6)
arm = Armor(self, 23, 7)
arm = Armor(self, 23, 8)
laser1 = Laser(self, 3, 1)
laser2 = Laser(self, 8, 12)
for i in self.every_single_unit.keys():
for a in self.every_single_unit[i]:
self.group.add(a)
for i in self.storages.keys():
for unit in self.storages[i]:
unit.input()
self.new_group = pygame.sprite.Group()
self.new_group.add(self.comand_module)
self.storages_types = [Battery, Lab]
def destroy(self, unit):
self.group.remove(unit)
self.every_single_unit[unit.cat].remove(unit)
if type(unit) in self.storages_types:
self.storages[unit.cat].remove(unit)
unit.working = False
def dfs(self, sprite, visited):
visited.append(sprite)
for i in pygame.sprite.spritecollide(sprite, self.group, False):
if i not in visited:
self.new_group.add(i)
self.dfs(i, visited)
def blt(self):
self.surf = pygame.Surface((self.cell_size * len(self.map[0]), self.cell_size * len(self.map)), pygame.SRCALPHA)
for i in self.every_single_unit.keys():
for unit in self.every_single_unit[i]:
unit.new_image()
self.group.draw(self.screen)
def all_systems_check(self):
for i in self.group.sprites():
if i.health <= 0:
self.destroy(i)
self.dfs(self.comand_module, [])
for unit in self.group:
if unit not in self.new_group.sprites():
self.destroy(unit)
self.new_group = pygame.sprite.Group()
self.new_group.add(self.comand_module)
self.resourses = {'Fe': 0, 'Cu': 0, 'O2': 0, 'CO2': 0, 'Al': 0, 'Si': 0, 'U': 0, 'H2O': 0,
'food': 0, 'energy': 0, 'science': 0}
self.humans = 0
for i in self.every_single_unit['cabins']:
i.output()
for i in self.storages.keys():
for unit in self.storages[i]:
unit.output()
self.under_control = False
for i in self.comand_modules:
if i.working:
self.under_control = True
for cat in self.every_single_unit.keys():
for unit in self.every_single_unit[cat]:
unit.do()
for i in self.storages.keys():
for unit in self.storages[i]:
unit.input()
for i in self.every_single_unit['cabins']:
i.input()
def change(self, x, y):
for unit in self.group.sprites():
if unit.rect.collidepoint(x, y):
if unit.working:
unit.working = False
else:
unit.working = True
def move(self, nx, ox, ny, oy):
self.x = nx
self.y = ny
for cat in self.every_single_unit.keys():
for unit in self.every_single_unit[cat]:
unit.rect.move_ip(nx - ox, ny - oy)
def shoot(self, event_group):
for cannon in self.cannons:
if pygame.sprite.spritecollideany(cannon, event_group, pygame.sprite.collide_circle_ratio(3.5)) != None:
for i in [pygame.sprite.spritecollideany(cannon, event_group, pygame.sprite.collide_circle_ratio(3.5))]:
if type(i) == Asteroid:
cannon.shoot(i)
elif type(i) == Container:
cannon.grab(i)
for i in self.resourses.keys():
self.resourses[i] += random.randint(100, 100) | Martian2024/PyGame_Project | Ship.py | Ship.py | py | 5,582 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "pygame.sprite.Group",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "pygame.sprite",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "Engine.Engine",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "Engine.Engine",
... |
44497013120 | from traceback import print_stack
from allure_commons.types import AttachmentType
from selenium.webdriver import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from selenium.common.exceptions import NoSuchElementException, ElementNotVisibleException, ElementNotSelectableException
import allure
import SeleniumFrameWork.utilities.CustomLogger as cl
class BaseClass:
log = cl.customLogger()
def __init__(self, driver):
self.driver = driver
def launchWebPage(self, url, title):
try:
self.driver.get(url)
assert title in self.driver.title
self.log.info("Web Page Launch with " + url)
except:
self.log.info("Web Page Not Launch with " + url)
def getLocatorType(self, locatorType):
locatorType = locatorType.lower()
if locatorType == "id":
return By.ID
elif locatorType == "name":
return By.NAME
elif locatorType == "class":
return By.CLASS_NAME
elif locatorType == "link":
return By.LINK_TEXT
elif locatorType == "xpath":
return By.XPATH
elif locatorType == "css":
return By.CSS_SELECTOR
elif locatorType == "tag":
return By.TAG_NAME
elif locatorType == "plink":
return By.PARTIAL_LINK_TEXT
else:
self.log.error(f"Locator Type {locatorType} entered not found")
print_stack()
return False
def getWebElement(self, locatorValue, locatorType="id"):
webElement = None
try:
locatorType = locatorType.lower()
locatorByType = self.getLocatorType(locatorType)
webElement = self.driver.find_element(locatorByType, locatorValue)
self.log.info(f"Web Element found with locator value {locatorValue} using locator type {locatorByType}")
except:
self.log.error(
f"Web Element Not found with locator value {locatorValue} using locator type {locatorByType}")
print_stack()
return webElement
def waitForElement(self, locatorValue, locatorType="id"):
webElement = None
try:
locatorType = locatorType.lower()
locatorByType = self.getLocatorType(locatorType)
wait = WebDriverWait(self.driver, 25, poll_frequency=1,
ignored_exceptions=[NoSuchElementException, ElementNotVisibleException,
ElementNotSelectableException])
# webElement = self.driver.find_element(locatorByType, locatorValue)
webElement = wait.until(lambda x: x.find_element(locatorByType, locatorValue))
self.log.info(f"Web Element found with locator value {locatorValue} using locator type {locatorByType}")
except:
self.log.error(
f"Web Element Not found with locator value {locatorValue} using locator type {locatorByType}")
print_stack()
self.takeScreenshot(locatorType)
assert False
return webElement
def clickOnElement(self, locatorValue, locatorType="id"):
try:
locatorType = locatorType.lower()
webElement = self.waitForElement(locatorValue, locatorType)
webElement.click()
self.log.info(f"Click On Web Element with locator value {locatorValue} using locator type {locatorType}")
except:
self.log.error(
f"Unable to Click On Element with locator value {locatorValue} using locator type {locatorType}")
print_stack()
assert False
def sendText(self, text, locatorValue, locatorType="id"):
try:
locatorType = locatorType.lower()
webElement = self.waitForElement(locatorValue, locatorType)
webElement.send_keys(text)
self.log.info(
f"Send the text {text} in Web Element with locator value {locatorValue} using locator type {locatorType}")
except:
self.log.info(
f"Unable to Send the text {text} in Web Element with locator value {locatorValue} using locator type {locatorType}")
print_stack()
self.takeScreenshot(locatorType)
assert False
def getText(self, locatorValue, locatorType="id"):
elementText = None
try:
locatorType = locatorType.lower()
webElement = self.waitForElement(locatorValue, locatorType)
elementText = webElement.text
self.log.info(
f"Got the text {elementText} in Web Element with locator value {locatorValue} using locator type {locatorType}")
except:
self.log.info(
f"Unable to get the text {elementText} in Web Element with locator value {locatorValue} using locator type {locatorType}")
print_stack()
return elementText
def isElementDisplayed(self, locatorValue, locatorType="id"):
elementDisplayed = None
try:
locatorType = locatorType.lower()
webElement = self.waitForElement(locatorValue, locatorType)
elementDisplayed = webElement.is_displayed()
self.log.info(
f" Web Element is Displayed web page with locator value {locatorValue} using locator type {locatorType}")
except:
self.log.info(
f" Web Element is Not Displayed web page with locator value {locatorValue} using locator type {locatorType}")
print_stack()
return elementDisplayed
def scrollTo(self, locatorValue, locatorType="id"):
actions = ActionChains(self.driver)
try:
locatorType = locatorType.lower()
webElement = self.waitForElement(locatorValue, locatorType)
actions.move_to_element(webElement).perform()
self.log.info(
f"Scrolled to WebElement with locator value {locatorValue} using locator type {locatorType}")
except:
self.log.info(
f"Unable to Scroll to WebElement with locator value {locatorValue} using locator type {locatorType}")
print_stack()
def takeScreenshot(self, text):
allure.attach(self.driver.get_screenshot_as_png(), name=text, attachment_type=AttachmentType.PNG)
| sudeepyadav5/SeleniumA2Z | SeleniumFrameWork/basepage/BasePage.py | BasePage.py | py | 6,476 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "SeleniumFrameWork.utilities.CustomLogger.customLogger",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "SeleniumFrameWork.utilities.CustomLogger",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.ID",
"line_num... |
36814841108 | import importlib
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_assets import Environment
from flask_socketio import SocketIO
from config import config
db = SQLAlchemy()
migrate = Migrate()
assets = Environment()
socketio = SocketIO()
def create_app(config_name='default'):
app = Flask(__name__, static_url_path='')
app.config.from_object(config[config_name])
config[config_name].init_app(app)
__register_extensions(app, [db, assets, socketio])
__register_blueprints(app, ['live'])
from app.utils.assets import bundles
assets.register('main_css', bundles['main_css'])
assets.register('main_js', bundles['main_js'])
from app.cli import test, coverage, clean, lint
app.cli.add_command(test)
app.cli.add_command(coverage)
app.cli.add_command(clean)
app.cli.add_command(lint)
return app
def __register_extensions(app, extensions):
for extension in extensions:
extension.init_app(app)
migrate.init_app(app, db)
def __register_blueprints(app, modules):
for module in modules:
bp = getattr(importlib.import_module(f'app.{module}'), 'bp')
app.register_blueprint(bp)
| reaper47/weather | app/__init__.py | __init__.py | py | 1,223 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask_sqlalchemy.SQLAlchemy",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "flask_migrate.Migrate",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "flask_assets.Environment",
"line_number": 11,
"usage_type": "call"
},
{
"api_nam... |
31106779539 | from datetime import datetime
import requests
import pandas as pd
from airflow import DAG
from airflow.operators.python_operator import PythonOperator
from airflow.operators.postgres_operator import PostgresOperator
from airflow.providers.postgres.hooks.postgres import PostgresHook
from psycopg2.extras import execute_values
import time
default_args = {
'owner': 'JORGE',
'start_date': datetime(2023, 5, 18),
'schedule_interval': '0 0 * * *',
}
def obtener_datos():
url = 'https://rickandmortyapi.com/api/episode'
datos_obtenidos = []
while url is not None:
response = requests.get(url)
data = response.json()
datos_obtenidos += data['results']
url = data['info']['next']
df_episodios = pd.DataFrame(datos_obtenidos)
df_episodios.to_dict('records')
df_episodios = df_episodios.drop(columns=['characters','url'])
df_episodios.columns = ["id","nombre_episodio", "fecha_aire", "episodio","fecha_creacion"]
hook = PostgresHook(postgres_conn_id='amazon_redshift')
conn = hook.get_conn()
cur = conn.cursor()
tabla = "episodio"
columns = ['id', 'nombre_episodio', 'fecha_aire', 'episodio', 'fecha_creacion']
values = [tuple(x) for x in df_episodios.to_numpy()]
insert_sql = f"INSERT INTO {tabla} ({', '.join(columns)}) VALUES %s"
cur.execute("BEGIN")
execute_values(cur, insert_sql, values)
conn.commit()
cur.close()
conn.close()
with DAG(
default_args=default_args,
dag_id='carga_de_episodios',
description='Obtener datos de API, transformar y cargar en Redshift',
) as dag:
crear_tabla = PostgresOperator(
task_id='crear_tabla_episodio',
postgres_conn_id='amazon_redshift',
sql="""
DROP TABLE IF EXISTS jorgeflores2311233_coderhouse.episodio;
CREATE TABLE jorgeflores2311233_coderhouse.episodio(
id INTEGER PRIMARY KEY,
nombre_episodio VARCHAR(250),
fecha_aire VARCHAR(250),
episodio VARCHAR(250),
fecha_creacion DATETIME
);
"""
)
obtener_datos_episodios = PythonOperator(
task_id='obtener_datos',
python_callable=obtener_datos
)
crear_tabla >> obtener_datos_episodios
| jorge-flores-py/rick-morty | dags/dag_carga_automatica_episodios.py | dag_carga_automatica_episodios.py | py | 2,329 | python | es | code | 0 | github-code | 6 | [
{
"api_name": "datetime.datetime",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "airflow.providers.pos... |
25566549731 | from django.shortcuts import render
from .models import Product
from .forms import ProductForm
from django.http import HttpResponse
def list(request):
products = Product.objects.all()
context = {'products': products}
return render(request, 'product/list.html', context)
def save_product(request):
if(request.method == 'POST'):
product = ProductForm(request.POST)
if product.is_valid:
product.save()
products = Product.objects.all()
context = {'products': products, 'product': products}
return render(request, 'product/list.html', context)
else:
return render(request, 'layout/create.html', {'product': product})
def create(request):
form = ProductForm()
context = {'form': form}
return render(request, 'product/create.html', context)
def delete(request, id):
if(request.method == 'GET'):
product = Product.objects.get(id=id)
product.delete()
products = Product.objects.all()
context = {'products': products}
return render(request, 'product/list.html', context)
def update(request, id):
product = Product.objects.get(id=id)
if(request.method == 'GET'):
form = ProductForm(instance=product)
context = {'form': form, 'id': id}
return render(request, 'product/update.html', context)
if(request.method == 'POST'):
form = ProductForm(request.POST, instance=product)
if form.is_valid():
form.save()
products = Product.objects.all()
context = {'products': products}
return render(request, 'product/list.html', context) | d3stroya/ob-django | wishlist/product/views.py | views.py | py | 1,700 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "models.Product.objects.all",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "models.Product.objects",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "models.Product",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "dj... |
9185132950 | import collections
from collections import abc
import getpass
import io
import itertools
import logging
import os
import socket
import struct
import sys
import threading
import time
import timeit
import traceback
import types
import warnings
from absl import flags
from absl.logging import converter
try:
from typing import NoReturn
except ImportError:
pass
FLAGS = flags.FLAGS
# Logging levels.
FATAL = converter.ABSL_FATAL
ERROR = converter.ABSL_ERROR
WARNING = converter.ABSL_WARNING
WARN = converter.ABSL_WARNING # Deprecated name.
INFO = converter.ABSL_INFO
DEBUG = converter.ABSL_DEBUG
# Regex to match/parse log line prefixes.
ABSL_LOGGING_PREFIX_REGEX = (
r'^(?P<severity>[IWEF])'
r'(?P<month>\d\d)(?P<day>\d\d) '
r'(?P<hour>\d\d):(?P<minute>\d\d):(?P<second>\d\d)'
r'\.(?P<microsecond>\d\d\d\d\d\d) +'
r'(?P<thread_id>-?\d+) '
r'(?P<filename>[a-zA-Z<][\w._<>-]+):(?P<line>\d+)')
# Mask to convert integer thread ids to unsigned quantities for logging purposes
_THREAD_ID_MASK = 2 ** (struct.calcsize('L') * 8) - 1
# Extra property set on the LogRecord created by ABSLLogger when its level is
# CRITICAL/FATAL.
_ABSL_LOG_FATAL = '_absl_log_fatal'
# Extra prefix added to the log message when a non-absl logger logs a
# CRITICAL/FATAL message.
_CRITICAL_PREFIX = 'CRITICAL - '
# Used by findCaller to skip callers from */logging/__init__.py.
_LOGGING_FILE_PREFIX = os.path.join('logging', '__init__.')
# The ABSL logger instance, initialized in _initialize().
_absl_logger = None
# The ABSL handler instance, initialized in _initialize().
_absl_handler = None
_CPP_NAME_TO_LEVELS = {
'debug': '0', # Abseil C++ has no DEBUG level, mapping it to INFO here.
'info': '0',
'warning': '1',
'warn': '1',
'error': '2',
'fatal': '3'
}
_CPP_LEVEL_TO_NAMES = {
'0': 'info',
'1': 'warning',
'2': 'error',
'3': 'fatal',
}
class _VerbosityFlag(flags.Flag):
"""Flag class for -v/--verbosity."""
def __init__(self, *args, **kwargs):
super(_VerbosityFlag, self).__init__(
flags.IntegerParser(),
flags.ArgumentSerializer(),
*args, **kwargs)
@property
def value(self):
return self._value
@value.setter
def value(self, v):
self._value = v
self._update_logging_levels()
def _update_logging_levels(self):
"""Updates absl logging levels to the current verbosity.
Visibility: module-private
"""
if not _absl_logger:
return
if self._value <= converter.ABSL_DEBUG:
standard_verbosity = converter.absl_to_standard(self._value)
else:
# --verbosity is set to higher than 1 for vlog.
standard_verbosity = logging.DEBUG - (self._value - 1)
# Also update root level when absl_handler is used.
if _absl_handler in logging.root.handlers:
# Make absl logger inherit from the root logger. absl logger might have
# a non-NOTSET value if logging.set_verbosity() is called at import time.
_absl_logger.setLevel(logging.NOTSET)
logging.root.setLevel(standard_verbosity)
else:
_absl_logger.setLevel(standard_verbosity)
class _LoggerLevelsFlag(flags.Flag):
"""Flag class for --logger_levels."""
def __init__(self, *args, **kwargs):
super(_LoggerLevelsFlag, self).__init__(
_LoggerLevelsParser(),
_LoggerLevelsSerializer(),
*args, **kwargs)
@property
def value(self):
# For lack of an immutable type, be defensive and return a copy.
# Modifications to the dict aren't supported and won't have any affect.
# While Py3 could use MappingProxyType, that isn't deepcopy friendly, so
# just return a copy.
return self._value.copy()
@value.setter
def value(self, v):
self._value = {} if v is None else v
self._update_logger_levels()
def _update_logger_levels(self):
# Visibility: module-private.
# This is called by absl.app.run() during initialization.
for name, level in self._value.items():
logging.getLogger(name).setLevel(level)
class _LoggerLevelsParser(flags.ArgumentParser):
"""Parser for --logger_levels flag."""
def parse(self, value):
if isinstance(value, abc.Mapping):
return value
pairs = [pair.strip() for pair in value.split(',') if pair.strip()]
# Preserve the order so that serialization is deterministic.
levels = collections.OrderedDict()
for name_level in pairs:
name, level = name_level.split(':', 1)
name = name.strip()
level = level.strip()
levels[name] = level
return levels
class _LoggerLevelsSerializer(object):
"""Serializer for --logger_levels flag."""
def serialize(self, value):
if isinstance(value, str):
return value
return ','.join(
'{}:{}'.format(name, level) for name, level in value.items())
class _StderrthresholdFlag(flags.Flag):
"""Flag class for --stderrthreshold."""
def __init__(self, *args, **kwargs):
super(_StderrthresholdFlag, self).__init__(
flags.ArgumentParser(),
flags.ArgumentSerializer(),
*args, **kwargs)
@property
def value(self):
return self._value
@value.setter
def value(self, v):
if v in _CPP_LEVEL_TO_NAMES:
# --stderrthreshold also accepts numeric strings whose values are
# Abseil C++ log levels.
cpp_value = int(v)
v = _CPP_LEVEL_TO_NAMES[v] # Normalize to strings.
elif v.lower() in _CPP_NAME_TO_LEVELS:
v = v.lower()
if v == 'warn':
v = 'warning' # Use 'warning' as the canonical name.
cpp_value = int(_CPP_NAME_TO_LEVELS[v])
else:
raise ValueError(
'--stderrthreshold must be one of (case-insensitive) '
"'debug', 'info', 'warning', 'error', 'fatal', "
"or '0', '1', '2', '3', not '%s'" % v)
self._value = v
flags.DEFINE_boolean('logtostderr',
False,
'Should only log to stderr?', allow_override_cpp=True)
flags.DEFINE_boolean('alsologtostderr',
False,
'also log to stderr?', allow_override_cpp=True)
flags.DEFINE_string('log_dir',
os.getenv('TEST_TMPDIR', ''),
'directory to write logfiles into',
allow_override_cpp=True)
flags.DEFINE_flag(_VerbosityFlag(
'verbosity', -1,
'Logging verbosity level. Messages logged at this level or lower will '
'be included. Set to 1 for debug logging. If the flag was not set or '
'supplied, the value will be changed from the default of -1 (warning) to '
'0 (info) after flags are parsed.',
short_name='v', allow_hide_cpp=True))
flags.DEFINE_flag(
_LoggerLevelsFlag(
'logger_levels', {},
'Specify log level of loggers. The format is a CSV list of '
'`name:level`. Where `name` is the logger name used with '
'`logging.getLogger()`, and `level` is a level name (INFO, DEBUG, '
'etc). e.g. `myapp.foo:INFO,other.logger:DEBUG`'))
flags.DEFINE_flag(_StderrthresholdFlag(
'stderrthreshold', 'fatal',
'log messages at this level, or more severe, to stderr in '
'addition to the logfile. Possible values are '
"'debug', 'info', 'warning', 'error', and 'fatal'. "
'Obsoletes --alsologtostderr. Using --alsologtostderr '
'cancels the effect of this flag. Please also note that '
'this flag is subject to --verbosity and requires logfile '
'not be stderr.', allow_hide_cpp=True))
flags.DEFINE_boolean('showprefixforinfo', True,
'If False, do not prepend prefix to info messages '
'when it\'s logged to stderr, '
'--verbosity is set to INFO level, '
'and python logging is used.')
def get_verbosity():
"""Returns the logging verbosity."""
return FLAGS['verbosity'].value
def set_verbosity(v):
"""Sets the logging verbosity.
Causes all messages of level <= v to be logged,
and all messages of level > v to be silently discarded.
Args:
v: int|str, the verbosity level as an integer or string. Legal string values
are those that can be coerced to an integer as well as case-insensitive
'debug', 'info', 'warning', 'error', and 'fatal'.
"""
try:
new_level = int(v)
except ValueError:
new_level = converter.ABSL_NAMES[v.upper()]
FLAGS.verbosity = new_level
def set_stderrthreshold(s):
"""Sets the stderr threshold to the value passed in.
Args:
s: str|int, valid strings values are case-insensitive 'debug',
'info', 'warning', 'error', and 'fatal'; valid integer values are
logging.DEBUG|INFO|WARNING|ERROR|FATAL.
Raises:
ValueError: Raised when s is an invalid value.
"""
if s in converter.ABSL_LEVELS:
FLAGS.stderrthreshold = converter.ABSL_LEVELS[s]
elif isinstance(s, str) and s.upper() in converter.ABSL_NAMES:
FLAGS.stderrthreshold = s
else:
raise ValueError(
'set_stderrthreshold only accepts integer absl logging level '
'from -3 to 1, or case-insensitive string values '
"'debug', 'info', 'warning', 'error', and 'fatal'. "
'But found "{}" ({}).'.format(s, type(s)))
def fatal(msg, *args, **kwargs):
# type: (Any, Any, Any) -> NoReturn
"""Logs a fatal message."""
log(FATAL, msg, *args, **kwargs)
def error(msg, *args, **kwargs):
"""Logs an error message."""
log(ERROR, msg, *args, **kwargs)
def warning(msg, *args, **kwargs):
"""Logs a warning message."""
log(WARNING, msg, *args, **kwargs)
def warn(msg, *args, **kwargs):
"""Deprecated, use 'warning' instead."""
warnings.warn("The 'warn' function is deprecated, use 'warning' instead",
DeprecationWarning, 2)
log(WARNING, msg, *args, **kwargs)
def info(msg, *args, **kwargs):
"""Logs an info message."""
log(INFO, msg, *args, **kwargs)
def debug(msg, *args, **kwargs):
"""Logs a debug message."""
log(DEBUG, msg, *args, **kwargs)
def exception(msg, *args, **kwargs):
"""Logs an exception, with traceback and message."""
error(msg, *args, **kwargs, exc_info=True)
# Counter to keep track of number of log entries per token.
_log_counter_per_token = {}
def _get_next_log_count_per_token(token):
"""Wrapper for _log_counter_per_token. Thread-safe.
Args:
token: The token for which to look up the count.
Returns:
The number of times this function has been called with
*token* as an argument (starting at 0).
"""
# Can't use a defaultdict because defaultdict isn't atomic, whereas
# setdefault is.
return next(_log_counter_per_token.setdefault(token, itertools.count()))
def log_every_n(level, msg, n, *args):
"""Logs ``msg % args`` at level 'level' once per 'n' times.
Logs the 1st call, (N+1)st call, (2N+1)st call, etc.
Not threadsafe.
Args:
level: int, the absl logging level at which to log.
msg: str, the message to be logged.
n: int, the number of times this should be called before it is logged.
*args: The args to be substituted into the msg.
"""
count = _get_next_log_count_per_token(get_absl_logger().findCaller())
log_if(level, msg, not (count % n), *args)
# Keeps track of the last log time of the given token.
# Note: must be a dict since set/get is atomic in CPython.
# Note: entries are never released as their number is expected to be low.
_log_timer_per_token = {}
def _seconds_have_elapsed(token, num_seconds):
"""Tests if 'num_seconds' have passed since 'token' was requested.
Not strictly thread-safe - may log with the wrong frequency if called
concurrently from multiple threads. Accuracy depends on resolution of
'timeit.default_timer()'.
Always returns True on the first call for a given 'token'.
Args:
token: The token for which to look up the count.
num_seconds: The number of seconds to test for.
Returns:
Whether it has been >= 'num_seconds' since 'token' was last requested.
"""
now = timeit.default_timer()
then = _log_timer_per_token.get(token, None)
if then is None or (now - then) >= num_seconds:
_log_timer_per_token[token] = now
return True
else:
return False
def log_every_n_seconds(level, msg, n_seconds, *args):
"""Logs ``msg % args`` at level ``level`` iff ``n_seconds`` elapsed since last call.
Logs the first call, logs subsequent calls if 'n' seconds have elapsed since
the last logging call from the same call site (file + line). Not thread-safe.
Args:
level: int, the absl logging level at which to log.
msg: str, the message to be logged.
n_seconds: float or int, seconds which should elapse before logging again.
*args: The args to be substituted into the msg.
"""
should_log = _seconds_have_elapsed(get_absl_logger().findCaller(), n_seconds)
log_if(level, msg, should_log, *args)
def log_first_n(level, msg, n, *args):
"""Logs ``msg % args`` at level ``level`` only first ``n`` times.
Not threadsafe.
Args:
level: int, the absl logging level at which to log.
msg: str, the message to be logged.
n: int, the maximal number of times the message is logged.
*args: The args to be substituted into the msg.
"""
count = _get_next_log_count_per_token(get_absl_logger().findCaller())
log_if(level, msg, count < n, *args)
def log_if(level, msg, condition, *args):
"""Logs ``msg % args`` at level ``level`` only if condition is fulfilled."""
if condition:
log(level, msg, *args)
def log(level, msg, *args, **kwargs):
"""Logs ``msg % args`` at absl logging level ``level``.
If no args are given just print msg, ignoring any interpolation specifiers.
Args:
level: int, the absl logging level at which to log the message
(logging.DEBUG|INFO|WARNING|ERROR|FATAL). While some C++ verbose logging
level constants are also supported, callers should prefer explicit
logging.vlog() calls for such purpose.
msg: str, the message to be logged.
*args: The args to be substituted into the msg.
**kwargs: May contain exc_info to add exception traceback to message.
"""
if level > converter.ABSL_DEBUG:
# Even though this function supports level that is greater than 1, users
# should use logging.vlog instead for such cases.
# Treat this as vlog, 1 is equivalent to DEBUG.
standard_level = converter.STANDARD_DEBUG - (level - 1)
else:
if level < converter.ABSL_FATAL:
level = converter.ABSL_FATAL
standard_level = converter.absl_to_standard(level)
# Match standard logging's behavior. Before use_absl_handler() and
# logging is configured, there is no handler attached on _absl_logger nor
# logging.root. So logs go no where.
if not logging.root.handlers:
logging.basicConfig()
_absl_logger.log(standard_level, msg, *args, **kwargs)
def vlog(level, msg, *args, **kwargs):
"""Log ``msg % args`` at C++ vlog level ``level``.
Args:
level: int, the C++ verbose logging level at which to log the message,
e.g. 1, 2, 3, 4... While absl level constants are also supported,
callers should prefer logging.log|debug|info|... calls for such purpose.
msg: str, the message to be logged.
*args: The args to be substituted into the msg.
**kwargs: May contain exc_info to add exception traceback to message.
"""
log(level, msg, *args, **kwargs)
def vlog_is_on(level):
"""Checks if vlog is enabled for the given level in caller's source file.
Args:
level: int, the C++ verbose logging level at which to log the message,
e.g. 1, 2, 3, 4... While absl level constants are also supported,
callers should prefer level_debug|level_info|... calls for
checking those.
Returns:
True if logging is turned on for that level.
"""
if level > converter.ABSL_DEBUG:
# Even though this function supports level that is greater than 1, users
# should use logging.vlog instead for such cases.
# Treat this as vlog, 1 is equivalent to DEBUG.
standard_level = converter.STANDARD_DEBUG - (level - 1)
else:
if level < converter.ABSL_FATAL:
level = converter.ABSL_FATAL
standard_level = converter.absl_to_standard(level)
return _absl_logger.isEnabledFor(standard_level)
def flush():
"""Flushes all log files."""
get_absl_handler().flush()
def level_debug():
"""Returns True if debug logging is turned on."""
return get_verbosity() >= DEBUG
def level_info():
"""Returns True if info logging is turned on."""
return get_verbosity() >= INFO
def level_warning():
"""Returns True if warning logging is turned on."""
return get_verbosity() >= WARNING
level_warn = level_warning # Deprecated function.
def level_error():
"""Returns True if error logging is turned on."""
return get_verbosity() >= ERROR
def get_log_file_name(level=INFO):
"""Returns the name of the log file.
For Python logging, only one file is used and level is ignored. And it returns
empty string if it logs to stderr/stdout or the log stream has no `name`
attribute.
Args:
level: int, the absl.logging level.
Raises:
ValueError: Raised when `level` has an invalid value.
"""
if level not in converter.ABSL_LEVELS:
raise ValueError('Invalid absl.logging level {}'.format(level))
stream = get_absl_handler().python_handler.stream
if (stream == sys.stderr or stream == sys.stdout or
not hasattr(stream, 'name')):
return ''
else:
return stream.name
def find_log_dir_and_names(program_name=None, log_dir=None):
"""Computes the directory and filename prefix for log file.
Args:
program_name: str|None, the filename part of the path to the program that
is running without its extension. e.g: if your program is called
``usr/bin/foobar.py`` this method should probably be called with
``program_name='foobar`` However, this is just a convention, you can
pass in any string you want, and it will be used as part of the
log filename. If you don't pass in anything, the default behavior
is as described in the example. In python standard logging mode,
the program_name will be prepended with ``py_`` if it is the
``program_name`` argument is omitted.
log_dir: str|None, the desired log directory.
Returns:
(log_dir, file_prefix, symlink_prefix)
Raises:
FileNotFoundError: raised in Python 3 when it cannot find a log directory.
OSError: raised in Python 2 when it cannot find a log directory.
"""
if not program_name:
# Strip the extension (foobar.par becomes foobar, and
# fubar.py becomes fubar). We do this so that the log
# file names are similar to C++ log file names.
program_name = os.path.splitext(os.path.basename(sys.argv[0]))[0]
# Prepend py_ to files so that python code gets a unique file, and
# so that C++ libraries do not try to write to the same log files as us.
program_name = 'py_%s' % program_name
actual_log_dir = find_log_dir(log_dir=log_dir)
try:
username = getpass.getuser()
except KeyError:
# This can happen, e.g. when running under docker w/o passwd file.
if hasattr(os, 'getuid'):
# Windows doesn't have os.getuid
username = str(os.getuid())
else:
username = 'unknown'
hostname = socket.gethostname()
file_prefix = '%s.%s.%s.log' % (program_name, hostname, username)
return actual_log_dir, file_prefix, program_name
def find_log_dir(log_dir=None):
"""Returns the most suitable directory to put log files into.
Args:
log_dir: str|None, if specified, the logfile(s) will be created in that
directory. Otherwise if the --log_dir command-line flag is provided,
the logfile will be created in that directory. Otherwise the logfile
will be created in a standard location.
Raises:
FileNotFoundError: raised in Python 3 when it cannot find a log directory.
OSError: raised in Python 2 when it cannot find a log directory.
"""
# Get a list of possible log dirs (will try to use them in order).
if log_dir:
# log_dir was explicitly specified as an arg, so use it and it alone.
dirs = [log_dir]
elif FLAGS['log_dir'].value:
# log_dir flag was provided, so use it and it alone (this mimics the
# behavior of the same flag in logging.cc).
dirs = [FLAGS['log_dir'].value]
else:
dirs = ['/tmp/', './']
# Find the first usable log dir.
for d in dirs:
if os.path.isdir(d) and os.access(d, os.W_OK):
return d
raise FileNotFoundError(
"Can't find a writable directory for logs, tried %s" % dirs)
def get_absl_log_prefix(record):
"""Returns the absl log prefix for the log record.
Args:
record: logging.LogRecord, the record to get prefix for.
"""
created_tuple = time.localtime(record.created)
created_microsecond = int(record.created % 1.0 * 1e6)
critical_prefix = ''
level = record.levelno
if _is_non_absl_fatal_record(record):
# When the level is FATAL, but not logged from absl, lower the level so
# it's treated as ERROR.
level = logging.ERROR
critical_prefix = _CRITICAL_PREFIX
severity = converter.get_initial_for_level(level)
return '%c%02d%02d %02d:%02d:%02d.%06d %5d %s:%d] %s' % (
severity,
created_tuple.tm_mon,
created_tuple.tm_mday,
created_tuple.tm_hour,
created_tuple.tm_min,
created_tuple.tm_sec,
created_microsecond,
_get_thread_id(),
record.filename,
record.lineno,
critical_prefix)
def skip_log_prefix(func):
"""Skips reporting the prefix of a given function or name by :class:`~absl.logging.ABSLLogger`.
This is a convenience wrapper function / decorator for
:meth:`~absl.logging.ABSLLogger.register_frame_to_skip`.
If a callable function is provided, only that function will be skipped.
If a function name is provided, all functions with the same name in the
file that this is called in will be skipped.
This can be used as a decorator of the intended function to be skipped.
Args:
func: Callable function or its name as a string.
Returns:
func (the input, unchanged).
Raises:
ValueError: The input is callable but does not have a function code object.
TypeError: The input is neither callable nor a string.
"""
if callable(func):
func_code = getattr(func, '__code__', None)
if func_code is None:
raise ValueError('Input callable does not have a function code object.')
file_name = func_code.co_filename
func_name = func_code.co_name
func_lineno = func_code.co_firstlineno
elif isinstance(func, str):
file_name = get_absl_logger().findCaller()[0]
func_name = func
func_lineno = None
else:
raise TypeError('Input is neither callable nor a string.')
ABSLLogger.register_frame_to_skip(file_name, func_name, func_lineno)
return func
def _is_non_absl_fatal_record(log_record):
return (log_record.levelno >= logging.FATAL and
not log_record.__dict__.get(_ABSL_LOG_FATAL, False))
def _is_absl_fatal_record(log_record):
return (log_record.levelno >= logging.FATAL and
log_record.__dict__.get(_ABSL_LOG_FATAL, False))
# Indicates if we still need to warn about pre-init logs going to stderr.
_warn_preinit_stderr = True
class PythonHandler(logging.StreamHandler):
"""The handler class used by Abseil Python logging implementation."""
def __init__(self, stream=None, formatter=None):
super(PythonHandler, self).__init__(stream)
self.setFormatter(formatter or PythonFormatter())
def start_logging_to_file(self, program_name=None, log_dir=None):
"""Starts logging messages to files instead of standard error."""
FLAGS.logtostderr = False
actual_log_dir, file_prefix, symlink_prefix = find_log_dir_and_names(
program_name=program_name, log_dir=log_dir)
basename = '%s.INFO.%s.%d' % (
file_prefix,
time.strftime('%Y%m%d-%H%M%S', time.localtime(time.time())),
os.getpid())
filename = os.path.join(actual_log_dir, basename)
self.stream = open(filename, 'a', encoding='utf-8')
# os.symlink is not available on Windows Python 2.
if getattr(os, 'symlink', None):
# Create a symlink to the log file with a canonical name.
symlink = os.path.join(actual_log_dir, symlink_prefix + '.INFO')
try:
if os.path.islink(symlink):
os.unlink(symlink)
os.symlink(os.path.basename(filename), symlink)
except EnvironmentError:
# If it fails, we're sad but it's no error. Commonly, this
# fails because the symlink was created by another user and so
# we can't modify it
pass
def use_absl_log_file(self, program_name=None, log_dir=None):
"""Conditionally logs to files, based on --logtostderr."""
if FLAGS['logtostderr'].value:
self.stream = sys.stderr
else:
self.start_logging_to_file(program_name=program_name, log_dir=log_dir)
def flush(self):
"""Flushes all log files."""
self.acquire()
try:
self.stream.flush()
except (EnvironmentError, ValueError):
# A ValueError is thrown if we try to flush a closed file.
pass
finally:
self.release()
def _log_to_stderr(self, record):
"""Emits the record to stderr.
This temporarily sets the handler stream to stderr, calls
StreamHandler.emit, then reverts the stream back.
Args:
record: logging.LogRecord, the record to log.
"""
# emit() is protected by a lock in logging.Handler, so we don't need to
# protect here again.
old_stream = self.stream
self.stream = sys.stderr
try:
super(PythonHandler, self).emit(record)
finally:
self.stream = old_stream
def emit(self, record):
"""Prints a record out to some streams.
1. If ``FLAGS.logtostderr`` is set, it will print to ``sys.stderr`` ONLY.
2. If ``FLAGS.alsologtostderr`` is set, it will print to ``sys.stderr``.
3. If ``FLAGS.logtostderr`` is not set, it will log to the stream
associated with the current thread.
Args:
record: :class:`logging.LogRecord`, the record to emit.
"""
# People occasionally call logging functions at import time before
# our flags may have even been defined yet, let alone even parsed, as we
# rely on the C++ side to define some flags for us and app init to
# deal with parsing. Match the C++ library behavior of notify and emit
# such messages to stderr. It encourages people to clean-up and does
# not hide the message.
level = record.levelno
if not FLAGS.is_parsed(): # Also implies "before flag has been defined".
global _warn_preinit_stderr
if _warn_preinit_stderr:
sys.stderr.write(
'WARNING: Logging before flag parsing goes to stderr.\n')
_warn_preinit_stderr = False
self._log_to_stderr(record)
elif FLAGS['logtostderr'].value:
self._log_to_stderr(record)
else:
super(PythonHandler, self).emit(record)
stderr_threshold = converter.string_to_standard(
FLAGS['stderrthreshold'].value)
if ((FLAGS['alsologtostderr'].value or level >= stderr_threshold) and
self.stream != sys.stderr):
self._log_to_stderr(record)
# Die when the record is created from ABSLLogger and level is FATAL.
if _is_absl_fatal_record(record):
self.flush() # Flush the log before dying.
# In threaded python, sys.exit() from a non-main thread only
# exits the thread in question.
os.abort()
def close(self):
"""Closes the stream to which we are writing."""
self.acquire()
try:
self.flush()
try:
# Do not close the stream if it's sys.stderr|stdout. They may be
# redirected or overridden to files, which should be managed by users
# explicitly.
user_managed = sys.stderr, sys.stdout, sys.__stderr__, sys.__stdout__
if self.stream not in user_managed and (
not hasattr(self.stream, 'isatty') or not self.stream.isatty()):
self.stream.close()
except ValueError:
# A ValueError is thrown if we try to run isatty() on a closed file.
pass
super(PythonHandler, self).close()
finally:
self.release()
class ABSLHandler(logging.Handler):
"""Abseil Python logging module's log handler."""
def __init__(self, python_logging_formatter):
super(ABSLHandler, self).__init__()
self._python_handler = PythonHandler(formatter=python_logging_formatter)
self.activate_python_handler()
def format(self, record):
return self._current_handler.format(record)
def setFormatter(self, fmt):
self._current_handler.setFormatter(fmt)
def emit(self, record):
self._current_handler.emit(record)
def flush(self):
self._current_handler.flush()
def close(self):
super(ABSLHandler, self).close()
self._current_handler.close()
def handle(self, record):
rv = self.filter(record)
if rv:
return self._current_handler.handle(record)
return rv
@property
def python_handler(self):
return self._python_handler
def activate_python_handler(self):
"""Uses the Python logging handler as the current logging handler."""
self._current_handler = self._python_handler
def use_absl_log_file(self, program_name=None, log_dir=None):
self._current_handler.use_absl_log_file(program_name, log_dir)
def start_logging_to_file(self, program_name=None, log_dir=None):
self._current_handler.start_logging_to_file(program_name, log_dir)
class PythonFormatter(logging.Formatter):
"""Formatter class used by :class:`~absl.logging.PythonHandler`."""
def format(self, record):
"""Appends the message from the record to the results of the prefix.
Args:
record: logging.LogRecord, the record to be formatted.
Returns:
The formatted string representing the record.
"""
if (not FLAGS['showprefixforinfo'].value and
FLAGS['verbosity'].value == converter.ABSL_INFO and
record.levelno == logging.INFO and
_absl_handler.python_handler.stream == sys.stderr):
prefix = ''
else:
prefix = get_absl_log_prefix(record)
return prefix + super(PythonFormatter, self).format(record)
class ABSLLogger(logging.getLoggerClass()):
"""A logger that will create LogRecords while skipping some stack frames.
This class maintains an internal list of filenames and method names
for use when determining who called the currently executing stack
frame. Any method names from specific source files are skipped when
walking backwards through the stack.
Client code should use the register_frame_to_skip method to let the
ABSLLogger know which method from which file should be
excluded from the walk backwards through the stack.
"""
_frames_to_skip = set()
def findCaller(self, stack_info=False, stacklevel=1):
"""Finds the frame of the calling method on the stack.
This method skips any frames registered with the
ABSLLogger and any methods from this file, and whatever
method is currently being used to generate the prefix for the log
line. Then it returns the file name, line number, and method name
of the calling method. An optional fourth item may be returned,
callers who only need things from the first three are advised to
always slice or index the result rather than using direct unpacking
assignment.
Args:
stack_info: bool, when True, include the stack trace as a fourth item
returned. On Python 3 there are always four items returned - the
fourth will be None when this is False. On Python 2 the stdlib
base class API only returns three items. We do the same when this
new parameter is unspecified or False for compatibility.
Returns:
(filename, lineno, methodname[, sinfo]) of the calling method.
"""
f_to_skip = ABSLLogger._frames_to_skip
# Use sys._getframe(2) instead of logging.currentframe(), it's slightly
# faster because there is one less frame to traverse.
frame = sys._getframe(2) # pylint: disable=protected-access
while frame:
code = frame.f_code
if (_LOGGING_FILE_PREFIX not in code.co_filename and
(code.co_filename, code.co_name,
code.co_firstlineno) not in f_to_skip and
(code.co_filename, code.co_name) not in f_to_skip):
sinfo = None
if stack_info:
out = io.StringIO()
out.write(u'Stack (most recent call last):\n')
traceback.print_stack(frame, file=out)
sinfo = out.getvalue().rstrip(u'\n')
return (code.co_filename, frame.f_lineno, code.co_name, sinfo)
frame = frame.f_back
def critical(self, msg, *args, **kwargs):
"""Logs ``msg % args`` with severity ``CRITICAL``."""
self.log(logging.CRITICAL, msg, *args, **kwargs)
def fatal(self, msg, *args, **kwargs):
"""Logs ``msg % args`` with severity ``FATAL``."""
self.log(logging.FATAL, msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
"""Logs ``msg % args`` with severity ``ERROR``."""
self.log(logging.ERROR, msg, *args, **kwargs)
def warn(self, msg, *args, **kwargs):
"""Logs ``msg % args`` with severity ``WARN``."""
warnings.warn("The 'warn' method is deprecated, use 'warning' instead",
DeprecationWarning, 2)
self.log(logging.WARN, msg, *args, **kwargs)
def warning(self, msg, *args, **kwargs):
"""Logs ``msg % args`` with severity ``WARNING``."""
self.log(logging.WARNING, msg, *args, **kwargs)
def info(self, msg, *args, **kwargs):
"""Logs ``msg % args`` with severity ``INFO``."""
self.log(logging.INFO, msg, *args, **kwargs)
def debug(self, msg, *args, **kwargs):
"""Logs ``msg % args`` with severity ``DEBUG``."""
self.log(logging.DEBUG, msg, *args, **kwargs)
def log(self, level, msg, *args, **kwargs):
"""Logs a message at a cetain level substituting in the supplied arguments.
This method behaves differently in python and c++ modes.
Args:
level: int, the standard logging level at which to log the message.
msg: str, the text of the message to log.
*args: The arguments to substitute in the message.
**kwargs: The keyword arguments to substitute in the message.
"""
if level >= logging.FATAL:
# Add property to the LogRecord created by this logger.
# This will be used by the ABSLHandler to determine whether it should
# treat CRITICAL/FATAL logs as really FATAL.
extra = kwargs.setdefault('extra', {})
extra[_ABSL_LOG_FATAL] = True
super(ABSLLogger, self).log(level, msg, *args, **kwargs)
def handle(self, record):
"""Calls handlers without checking ``Logger.disabled``.
Non-root loggers are set to disabled after setup with :func:`logging.config`
if it's not explicitly specified. Historically, absl logging will not be
disabled by that. To maintaining this behavior, this function skips
checking the ``Logger.disabled`` bit.
This logger can still be disabled by adding a filter that filters out
everything.
Args:
record: logging.LogRecord, the record to handle.
"""
if self.filter(record):
self.callHandlers(record)
@classmethod
def register_frame_to_skip(cls, file_name, function_name, line_number=None):
"""Registers a function name to skip when walking the stack.
The :class:`~absl.logging.ABSLLogger` sometimes skips method calls on the
stack to make the log messages meaningful in their appropriate context.
This method registers a function from a particular file as one
which should be skipped.
Args:
file_name: str, the name of the file that contains the function.
function_name: str, the name of the function to skip.
line_number: int, if provided, only the function with this starting line
number will be skipped. Otherwise, all functions with the same name
in the file will be skipped.
"""
if line_number is not None:
cls._frames_to_skip.add((file_name, function_name, line_number))
else:
cls._frames_to_skip.add((file_name, function_name))
def _get_thread_id():
"""Gets id of current thread, suitable for logging as an unsigned quantity.
If pywrapbase is linked, returns GetTID() for the thread ID to be
consistent with C++ logging. Otherwise, returns the numeric thread id.
The quantities are made unsigned by masking with 2*sys.maxint + 1.
Returns:
Thread ID unique to this process (unsigned)
"""
thread_id = threading.get_ident()
return thread_id & _THREAD_ID_MASK
def get_absl_logger():
"""Returns the absl logger instance."""
return _absl_logger
def get_absl_handler():
"""Returns the absl handler instance."""
return _absl_handler
def use_python_logging(quiet=False):
"""Uses the python implementation of the logging code.
Args:
quiet: No logging message about switching logging type.
"""
get_absl_handler().activate_python_handler()
if not quiet:
info('Restoring pure python logging')
_attempted_to_remove_stderr_stream_handlers = False
def use_absl_handler():
"""Uses the ABSL logging handler for logging.
This method is called in :func:`app.run()<absl.app.run>` so the absl handler
is used in absl apps.
"""
global _attempted_to_remove_stderr_stream_handlers
if not _attempted_to_remove_stderr_stream_handlers:
# The absl handler logs to stderr by default. To prevent double logging to
# stderr, the following code tries its best to remove other handlers that
# emit to stderr. Those handlers are most commonly added when
# logging.info/debug is called before calling use_absl_handler().
handlers = [
h for h in logging.root.handlers
if isinstance(h, logging.StreamHandler) and h.stream == sys.stderr]
for h in handlers:
logging.root.removeHandler(h)
_attempted_to_remove_stderr_stream_handlers = True
absl_handler = get_absl_handler()
if absl_handler not in logging.root.handlers:
logging.root.addHandler(absl_handler)
FLAGS['verbosity']._update_logging_levels() # pylint: disable=protected-access
FLAGS['logger_levels']._update_logger_levels() # pylint: disable=protected-access
def _initialize():
"""Initializes loggers and handlers."""
global _absl_logger, _absl_handler
if _absl_logger:
return
original_logger_class = logging.getLoggerClass()
logging.setLoggerClass(ABSLLogger)
_absl_logger = logging.getLogger('absl')
logging.setLoggerClass(original_logger_class)
python_logging_formatter = PythonFormatter()
_absl_handler = ABSLHandler(python_logging_formatter)
_initialize()
| bazelbuild/bazel | third_party/py/abseil/absl/logging/__init__.py | __init__.py | py | 38,729 | python | en | code | 21,632 | github-code | 6 | [
{
"api_name": "absl.flags.FLAGS",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "absl.flags",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "absl.logging.converter.ABSL_FATAL",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name"... |
28492207070 | import librosa,librosa.display
import matplotlib.pyplot as plt
import numpy as np
file="your-summer-day-5448.wav"
#waveform
signal,sr=librosa.load(file,sr=22050) #signal will be a numpy array which will have no.of values=sr*duration of sound track
librosa.display.waveplot(signal,sr=sr) #visualizing the wave
plt.xlabel("Time")
plt.ylabel("Amplitude")
plt.show()
#time domain->frequency domain(fourier tranform)
fft=np.fft.fft(signal) #np array
magnitude= np.abs(fft) #indicates contrib of each frequency to the sound
frequency=np.linspace(0,sr,len(magnitude))
left_frequency=frequency[:int(len(frequency)/2)]
left_magnitude=magnitude[:int(len(magnitude)/2)]
plt.plot(left_frequency,left_magnitude)
plt.xlabel("Frequency")
plt.ylabel("Magnitude")
plt.show()
#get spectogram(amplitude as function of freq and time)
n_fft=2048 #no.of sample in each fft
hop_length=512 #amount of shift to next fft to the right
stft=librosa.core.stft(signal,hop_length=hop_length,n_fft=n_fft)
spectrogram=np.abs(stft)
log_spectrogram=librosa.amplitude_to_db(spectrogram) #converting amplitude to decibel
librosa.display.specshow(log_spectrogram,sr=sr,hop_length=hop_length) #specshow helps to visualize spectogram like data(x axis, y axis and color label)
plt.xlabel("Time")
plt.ylabel("Frequency")
plt.colorbar() #amplitude will be displayed by color
plt.show()
#mfccs
MFCCS=librosa.feature.mfcc(signal,n_fft=n_fft,hop_length=hop_length,n_mfcc=13)
librosa.display.specshow(MFCCS,sr=sr,hop_length=hop_length) #specshow helps to visualize spectogram like data(x axis, y axis and color label)
plt.xlabel("Time")
plt.ylabel("MFCC")
plt.colorbar() #amplitude will be displayed by color
plt.show()
| yashi4001/ML_Basics | audio_preprocess.py | audio_preprocess.py | py | 1,735 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "librosa.load",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "librosa.display.waveplot",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "librosa.display",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.py... |
17651189647 | import telebot
from config import TOKEN, keys
from extensions import ExchangeException, Exchange
bot = telebot.TeleBot(TOKEN)
# Обработка команды /start
@bot.message_handler(commands=['start'])
def start(message):
start = "Привет! Я бот, который может вернуть цену на определенное количество валюты.\n\n" \
"Пример использования: <имя валюты, цену которой вы хотите узнать> " \
"<имя валюты, в которой нужно узнать цену первой валюты> <количество первой валюты>\n\n" \
"Команды:\n" \
"/start - выводит инструкции по применению бота\n" \
"/help - выводит список команд бота\n" \
"/values - выводит информацию о всех доступных валютах\n\n" \
"Пример запроса: Рубль доллар 100"
bot.reply_to(message, start)
# Обработка команды /help
@bot.message_handler(commands=['help'])
def help(message):
help = "/start - выводит инструкции по применению бота\n" \
"/help - выводит список команд бота\n" \
"/values - выводит информацию о всех доступных валютах\n\n" \
"Регистр значения не имеет.\n\n" \
"Пример запроса: Рубль доллар 100"
bot.reply_to(message,help)
# Обработка команды /values
@bot.message_handler(commands=['values'])
def values(message: telebot.types.Message):
text = 'Доступные валюты:'
for key in keys.keys():
text = '\n'.join((text, key,))
bot.reply_to(message, text)
# Обработка текстовых сообщений от пользователя
@bot.message_handler(content_types=['text'])
def get_price(message: telebot.types.Message):
try:
values = message.text.lower().split(' ') # преобразование в нижний регистр регистр
if len(values) != 3:
raise ExchangeException('Введите команду или 3 параметра')
quote, base, amount = values
total_base = Exchange.get_price(quote, base, amount)
except ExchangeException as e:
bot.reply_to(message, f'Ошибка пользователя.\n{e}')
except Exception as e:
bot.reply_to(message, f'Что-то пошло не так с {e}')
else:
text = f'Переводим {quote} в {base}\n{amount} {quote} = {total_base} {base}'
bot.send_message(message.chat.id, text)
bot.polling() | Airton99999/telegram_bot_convertor | bot.py | bot.py | py | 2,989 | python | ru | code | 0 | github-code | 6 | [
{
"api_name": "telebot.TeleBot",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "config.TOKEN",
"line_number": 5,
"usage_type": "argument"
},
{
"api_name": "telebot.types",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "config.keys.keys",
... |
39380955921 |
#%% Imports
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from collections import defaultdict
from helpers import pairwiseDistCorr,nn_reg,nn_arch,reconstructionError
from matplotlib import cm
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.random_projection import SparseRandomProjection, GaussianRandomProjection
from itertools import product
from helpers import get_data_from_csv
out = './RP/'
cmap = cm.get_cmap('Spectral')
np.random.seed(42)
# import wine quality data
wineX, wineY = get_data_from_csv("./BASE/wine_trg.csv", n_features=11, sep=',', header=None)
digitX, digitY = get_data_from_csv("./BASE/digit_trg.csv", n_features=256, sep=',', header=None)
wineX = StandardScaler().fit_transform(wineX)
digitX = StandardScaler().fit_transform(digitX)
clusters = [2,5,10,15,20,25,30,35,40]
dims = [2,5,10,15,20,25,30,35,40,45,50,55,60]
dims_wine = [i for i in range(2,12)]
# data for 1
tmp = defaultdict(dict)
for i,dim in product(range(10),dims_wine):
rp = SparseRandomProjection(random_state=i, n_components=dim)
tmp[dim][i] = pairwiseDistCorr(rp.fit_transform(wineX), wineX)
tmp =pd.DataFrame(tmp).T
tmp.to_csv(out+'wine scree1.csv')
tmp = defaultdict(dict)
for i,dim in product(range(10),dims):
rp = SparseRandomProjection(random_state=i, n_components=dim)
tmp[dim][i] = pairwiseDistCorr(rp.fit_transform(digitX), digitX)
tmp =pd.DataFrame(tmp).T
tmp.to_csv(out+'digit scree1.csv')
tmp = defaultdict(dict)
for i,dim in product(range(10),dims_wine):
rp = SparseRandomProjection(random_state=i, n_components=dim)
rp.fit(wineX)
tmp[dim][i] = reconstructionError(rp, wineX)
tmp =pd.DataFrame(tmp).T
tmp.to_csv(out+'wine scree2.csv')
tmp = defaultdict(dict)
for i,dim in product(range(10),dims):
rp = SparseRandomProjection(random_state=i, n_components=dim)
rp.fit(digitX)
tmp[dim][i] = reconstructionError(rp, digitX)
tmp =pd.DataFrame(tmp).T
tmp.to_csv(out+'digit scree2.csv')
# Data for 2
grid ={'rp__n_components':dims_wine,'NN__alpha':nn_reg,'NN__hidden_layer_sizes':nn_arch}
rp = SparseRandomProjection(random_state=5)
mlp = MLPClassifier(activation='relu',max_iter=2000,early_stopping=True,random_state=5)
pipe = Pipeline([('rp',rp),('NN',mlp)])
gs = GridSearchCV(pipe,grid,verbose=10,cv=5)
gs.fit(wineX,wineY)
tmp = pd.DataFrame(gs.cv_results_)
tmp.to_csv(out+'wine dim red.csv')
grid ={'rp__n_components':dims,'NN__alpha':nn_reg,'NN__hidden_layer_sizes':nn_arch}
rp = SparseRandomProjection(random_state=5)
mlp = MLPClassifier(activation='relu',max_iter=2000,early_stopping=True,random_state=5)
pipe = Pipeline([('rp',rp),('NN',mlp)])
gs = GridSearchCV(pipe,grid,verbose=10,cv=5)
gs.fit(digitX,digitY)
tmp = pd.DataFrame(gs.cv_results_)
tmp.to_csv(out+'digit dim red.csv')
# data for 3
# Set this from chart 2 and dump, use clustering script to finish up
dim = 6
rp = SparseRandomProjection(n_components=dim,random_state=5)
wineX2 = rp.fit_transform(wineX)
wine2 = pd.DataFrame(np.hstack((wineX2,np.atleast_2d(wineY))))
cols = list(range(wine2.shape[1]))
cols[-1] = 'Class'
wine2.columns = cols
wine2.to_csv(out+'wine_datasets.csv',index=False,header=False)
dim = 60
rp = SparseRandomProjection(n_components=dim,random_state=5)
digitX2 = rp.fit_transform(digitX)
digit2 = pd.DataFrame(np.hstack((digitX2,np.atleast_2d(digitY))))
cols = list(range(digit2.shape[1]))
cols[-1] = 'Class'
digit2.columns = cols
digit2.to_csv(out+'digit_datasets.csv',index=False,header=False)
| SenRamakri/CS-7641-Assignment-3 | RP.py | RP.py | py | 3,594 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "matplotlib.cm.get_cmap",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "matplotlib.cm",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "numpy.random.seed",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.random",... |
26248063876 | from datetime import datetime
import six
from oslo_config import cfg
from oslo_log import log
from oslo_utils import uuidutils, importutils
from delfin import db
from delfin.common.constants import TelemetryCollection, TelemetryJobStatus
from delfin.exception import TaskNotFound
from delfin.i18n import _
from delfin.task_manager import rpcapi as task_rpcapi
from delfin.task_manager.scheduler import schedule_manager
from delfin.task_manager.tasks.telemetry import PerformanceCollectionTask
CONF = cfg.CONF
LOG = log.getLogger(__name__)
class JobHandler(object):
def __init__(self, ctx, task_id, storage_id, args, interval):
# create an object of periodic task scheduler
self.ctx = ctx
self.task_id = task_id
self.storage_id = storage_id
self.args = args
self.interval = interval
self.task_rpcapi = task_rpcapi.TaskAPI()
self.scheduler = schedule_manager.SchedulerManager().get_scheduler()
self.stopped = False
self.job_ids = set()
@staticmethod
def get_instance(ctx, task_id):
task = db.task_get(ctx, task_id)
return JobHandler(ctx, task_id, task['storage_id'],
task['args'], task['interval'])
def perform_history_collection(self, start_time, end_time, last_run_time):
# Trigger one historic collection to make sure we do not
# miss any Data points due to reschedule
LOG.debug('Triggering one historic collection for task %s',
self.task_id)
try:
telemetry = PerformanceCollectionTask()
ret = telemetry.collect(self.ctx, self.storage_id, self.args,
start_time, end_time)
LOG.debug('Historic collection performed for task %s with '
'result %s' % (self.task_id, ret))
db.task_update(self.ctx, self.task_id,
{'last_run_time': last_run_time})
except Exception as e:
msg = _("Failed to collect performance metrics during history "
"collection for storage id:{0}, reason:{1}"
.format(self.storage_id, six.text_type(e)))
LOG.error(msg)
def schedule_job(self, task_id):
if self.stopped:
# If Job is stopped return immediately
return
LOG.info("JobHandler received A job %s to schedule" % task_id)
job = db.task_get(self.ctx, task_id)
# Check delete status of the task
deleted = job['deleted']
if deleted:
return
collection_class = importutils.import_class(
job['method'])
instance = collection_class.get_instance(self.ctx, self.task_id)
current_time = int(datetime.now().timestamp())
last_run_time = current_time
next_collection_time = last_run_time + job['interval']
job_id = uuidutils.generate_uuid()
next_collection_time = datetime \
.fromtimestamp(next_collection_time) \
.strftime('%Y-%m-%d %H:%M:%S')
existing_job_id = job['job_id']
scheduler_job = self.scheduler.get_job(existing_job_id)
if not (existing_job_id and scheduler_job):
LOG.info('JobHandler scheduling a new job')
self.scheduler.add_job(
instance, 'interval', seconds=job['interval'],
next_run_time=next_collection_time, id=job_id,
misfire_grace_time=int(job['interval'] / 2))
update_task_dict = {'job_id': job_id}
db.task_update(self.ctx, self.task_id, update_task_dict)
self.job_ids.add(job_id)
LOG.info('Periodic collection tasks scheduled for for job id: '
'%s ' % self.task_id)
# Check if historic collection is needed for this task.
# If the last run time is already set, adjust start_time based on
# last run time or history_on_reschedule which is smaller
# If jod id is created but last run time is not yet set, then
# adjust start_time based on interval or history_on_reschedule
# whichever is smaller
end_time = current_time * 1000
# Maximum supported history duration on restart
history_on_reschedule = CONF.telemetry. \
performance_history_on_reschedule
if job['last_run_time']:
start_time = job['last_run_time'] * 1000 \
if current_time - job['last_run_time'] < \
history_on_reschedule \
else (end_time - history_on_reschedule * 1000)
self.perform_history_collection(start_time, end_time,
last_run_time)
elif existing_job_id:
interval_in_sec = job['interval']
start_time = (end_time - interval_in_sec * 1000) \
if interval_in_sec < history_on_reschedule \
else (end_time - history_on_reschedule * 1000)
self.perform_history_collection(start_time, end_time,
last_run_time)
else:
LOG.info('Job already exists with this scheduler')
def stop(self):
self.stopped = True
for job_id in self.job_ids.copy():
self.remove_scheduled_job(job_id)
LOG.info("Stopping telemetry jobs")
def remove_scheduled_job(self, job_id):
if job_id in self.job_ids:
self.job_ids.remove(job_id)
if job_id and self.scheduler.get_job(job_id):
self.scheduler.remove_job(job_id)
def remove_job(self, task_id):
try:
LOG.info("Received job %s to remove", task_id)
job = db.task_get(self.ctx, task_id)
job_id = job['job_id']
self.remove_scheduled_job(job_id)
except Exception as e:
LOG.error("Failed to remove periodic scheduling job , reason: %s.",
six.text_type(e))
class FailedJobHandler(object):
def __init__(self, ctx):
# create an object of periodic failed task scheduler
self.scheduler = schedule_manager.SchedulerManager().get_scheduler()
self.ctx = ctx
self.stopped = False
self.job_ids = set()
@staticmethod
def get_instance(ctx, failed_task_id):
return FailedJobHandler(ctx)
def schedule_failed_job(self, failed_task_id):
if self.stopped:
return
try:
job = db.failed_task_get(self.ctx, failed_task_id)
retry_count = job['retry_count']
result = job['result']
job_id = job['job_id']
if retry_count >= \
TelemetryCollection.MAX_FAILED_JOB_RETRY_COUNT or \
result == TelemetryJobStatus.FAILED_JOB_STATUS_SUCCESS:
LOG.info("Exiting Failure task processing for task [%d] "
"with result [%s] and retry count [%d] "
% (job['id'], result, retry_count))
self._teardown_task(self.ctx, job['id'], job_id)
return
# If job already scheduled, skip
if job_id and self.scheduler.get_job(job_id):
return
try:
db.task_get(self.ctx, job['task_id'])
except TaskNotFound as e:
LOG.info("Removing failed telemetry job as parent job "
"do not exist: %s", six.text_type(e))
# tear down if original task is not available
self._teardown_task(self.ctx, job['id'],
job_id)
return
if not (job_id and self.scheduler.get_job(job_id)):
job_id = uuidutils.generate_uuid()
db.failed_task_update(self.ctx, job['id'],
{'job_id': job_id})
collection_class = importutils.import_class(
job['method'])
instance = \
collection_class.get_instance(self.ctx, job['id'])
self.scheduler.add_job(
instance, 'interval',
seconds=job['interval'],
next_run_time=datetime.now(), id=job_id,
misfire_grace_time=int(job['interval'] / 2))
self.job_ids.add(job_id)
except Exception as e:
LOG.error("Failed to schedule retry tasks for performance "
"collection, reason: %s", six.text_type(e))
else:
LOG.info("Schedule collection completed")
def _teardown_task(self, ctx, failed_task_id, job_id):
db.failed_task_delete(ctx, failed_task_id)
self.remove_scheduled_job(job_id)
def remove_scheduled_job(self, job_id):
if job_id in self.job_ids:
self.job_ids.remove(job_id)
if job_id and self.scheduler.get_job(job_id):
self.scheduler.remove_job(job_id)
def stop(self):
self.stopped = True
for job_id in self.job_ids.copy():
self.remove_scheduled_job(job_id)
def remove_failed_job(self, failed_task_id):
try:
LOG.info("Received failed job %s to remove", failed_task_id)
job = db.failed_task_get(self.ctx, failed_task_id)
job_id = job['job_id']
self.remove_scheduled_job(job_id)
db.failed_task_delete(self.ctx, job['id'])
LOG.info("Removed failed_task entry %s ", job['id'])
except Exception as e:
LOG.error("Failed to remove periodic scheduling job , reason: %s.",
six.text_type(e))
@classmethod
def job_interval(cls):
return TelemetryCollection.FAILED_JOB_SCHEDULE_INTERVAL
| sodafoundation/delfin | delfin/task_manager/scheduler/schedulers/telemetry/job_handler.py | job_handler.py | py | 9,923 | python | en | code | 201 | github-code | 6 | [
{
"api_name": "oslo_config.cfg.CONF",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "oslo_config.cfg",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "oslo_log.log.getLogger",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "oslo... |
38474237620 | from trainer import image_classifier, augmentation_pipeline,GCSHelper
import argparse
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def run(data_directory, output_directory, project_id,augment_flag,augment_samples,nr_epochs,drop_out,val_split,model,batch_size,check_overfit):
image_classifier.check_input(project_id=project_id, data_dir=data_directory, output_dir=output_directory,
validation_split=val_split, num_epochs=nr_epochs, dropout=drop_out,
augmentation_samples=augment_samples)
print('AUGMENTING IMAGES...')
if augment_flag:
augmentation_pipeline.augmentImages(project_id=project_id, data_dir=data_directory, sample_size=augment_samples,cloudML=True)
print('AUGMENTING IMAGES DONE!')
print('TRAINING MODEL...')
image_classifier.retrain(project_id, data_directory, batch_size=batch_size, model=model, dropout=drop_out, num_epochs=nr_epochs,
validation_split=val_split, output_dir=output_directory, cloud_mode=True,check_overfit=check_overfit)
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, help='directory of data')
parser.add_argument('--output_dir', type=str, help='directory of output model')
parser.add_argument('--project_id', type=str, default="trainer-cl", help='Google cloud projectID')
parser.add_argument('--aug_flag', type=str2bool, default=False, help='True if augmentation is done on images')
parser.add_argument('--aug_samples', type=int, default=1, help='extra augmentation samples that are added per category')
parser.add_argument('--nr_epochs', type=int, default=1, help='extra augmentation samples that are added per category')
parser.add_argument('--drop_out', type=float, default=0.1, help='Amount of droppout to prevent overfitting')
parser.add_argument('--val_split', type=float, default=0.1, help='Percentage of data used for validation')
parser.add_argument('--model', type=str, default="MobileNet", help='Used model architecture')
parser.add_argument('--batch_size', type=int, default=16, help='Batch size used for model training')
parser.add_argument('--check_overfit', type=str2bool, default=True, help='Add early stopping check')
args = parser.parse_args()
try:
run(args.data_dir,args.output_dir,args.project_id,args.aug_flag,args.aug_samples,args.nr_epochs,args.drop_out,args.val_split,args.model,args.batch_size,args.check_overfit)
GCSHelper.uploadClosingStatusFilesToGCS(args.project_id,[],'done.txt',args.output_dir)
except Exception as e:
GCSHelper.uploadClosingStatusFilesToGCS(args.project_id,[str(e)], 'wrong.txt', args.output_dir)
| chrike-platinum/Cloud_ML_Template | trainer/task.py | task.py | py | 2,861 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "argparse.ArgumentTypeError",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "trainer.image_classifier.check_input",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "trainer.image_classifier",
"line_number": 14,
"usage_type": "name"
},
... |
19407988415 | from flask import Flask, jsonify, request
from flask_cors import CORS
from flask_jwt_extended import create_access_token, JWTManager
from flask_mysqldb import MySQL
from dotenv import load_dotenv
import os
from datetime import datetime
app = Flask(__name__)
load_dotenv()
app.config['MYSQL_HOST'] = os.environ.get('MYSQL_HOST')
app.config['MYSQL_USER'] = os.environ.get('MYSQL_USER')
app.config['MYSQL_PASSWORD'] = os.environ.get('MYSQL_PASSWORD')
app.config['MYSQL_DB'] = os.environ.get('MYSQL_DB')
mysql = MySQL(app)
CORS(app, resources={r"/*": {"origins": "*"}}, supports_credentials=True)
@app.route('/coords', methods=["GET"])
def get_coords():
try:
cursor = mysql.connection.cursor()
cursor.execute("SELECT latitude, longitude FROM sample")
results = cursor.fetchall()
cursor.close()
coords = [{'lat': row[0], 'lng': row[1]} for row in results]
return jsonify({'coords': coords})
except Exception as e:
return jsonify({'error': str(e)}), 500 | RogelioBenavides/frida-kitchen | tracking_service/routes/tracking.py | tracking.py | py | 1,020 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "flask.Flask",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "dotenv.load_dotenv",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_n... |
24059326549 | from PIL import Image
from os import listdir, mkdir
def PrepareChars5x7(jmeno, mezX, mezY):
im = Image.open(jmeno)
Pixels = im.load()
for x in range(13):
for y in range(4):
imnew = Image.new(mode="RGB", size=(5, 7))
pole = imnew.load()
print(pole[1, 1], imnew.size)
for x2 in range(5):
for y2 in range(7):
pole[x2, y2] = Pixels[x2 + (5 + mezX) * x, y2 + (7 + mezY) * y]
imnew.save("Characters/ch" + str(x + 13 * y) + ".png")
def Roztrid():
seznam = listdir("Characters")
for polozka in seznam:
im = Image.open("Characters/" + polozka)
pixels = im.load()
hodnota = 0
for x in range(5):
for y in range(7):
if pixels[x,y][0] != 0:
hodnota += 1
if str(hodnota) not in listdir("Characters"):
mkdir("Characters/" + str(hodnota))
im.save("Characters/" + str(hodnota) + "//" + polozka)
| MedOndrej/ASCIIart | Preparation.py | Preparation.py | py | 1,019 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "PIL.Image.open",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "PIL.Image.new",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 1... |
14040284357 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Picture',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('image', models.ImageField(null=True, upload_to=b'media/product_pictures', blank=True)),
('description', models.CharField(max_length=140, null=True, blank=True)),
('default_picture', models.BooleanField(default=False)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=140)),
('description', models.TextField(null=True, blank=True)),
('total_number_of_tickets', models.IntegerField()),
('tickets_sold', models.IntegerField()),
('end_time', models.DateTimeField()),
('start_time', models.DateTimeField()),
('pricing_per_ticket', models.DecimalField(max_digits=8, decimal_places=2)),
('winning_ticket_number', models.IntegerField()),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Ticket',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('ticket_number', models.IntegerField()),
('product', models.ForeignKey(related_name='tickets', to='ticketing.Product')),
('user', models.ForeignKey(related_name='tickets', to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='picture',
name='product',
field=models.ForeignKey(related_name='pictures', to='ticketing.Product'),
preserve_default=True,
),
]
| yenbryan/raffle | ticketing/migrations/0001_initial.py | 0001_initial.py | py | 2,454 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.db.migrations.Migration",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.swappable_dependency",
"line_number": 11,
"usage_type": "call... |
32936869929 | # 导入所需的库
import jieba
import docx
from docx import Document
from docx.shared import Inches
import matplotlib.pyplot as plt
from wordcloud import WordCloud, STOPWORDS
# 读取文档内容
filter_words = ['', '','','','','','','']
document = Document('221.docx')
text = ''
text= jieba.cut(text)
text = ''.join(str(x) for x in text)
for paragraph in document.paragraphs:
text += paragraph.text + ' '
for word in filter_words:
text = text.replace(word, '')
# 创建停用词集合
stopwords = set(STOPWORDS)
stopwords = ['同志们', '二','三','四','五','一','六','七','八','九','十','']
# 创建词云对象,并设置参数
wordcloud = WordCloud(
font_path="simhei.ttf",
width=1200, height=800,
background_color='white',
stopwords=stopwords,
min_font_size=10).generate(text)
# 绘制词云图
plt.figure(figsize=(8, 8), facecolor=None)
plt.imshow(wordcloud)
plt.axis("off")
plt.tight_layout(pad=0)
plt.show()
# 创建需要过滤的词汇列表
# 加载需要过滤的文本
text = 'I hate this bad movie, it is so ugly and boring.'
# 使用字符串函数 replace() 进行替换
print(text)
| lingqingjiuying/9ying1 | day1class1.py | day1class1.py | py | 1,150 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "docx.Document",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "jieba.cut",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "wordcloud.STOPWORDS",
"line_number": 22,
"usage_type": "argument"
},
{
"api_name": "wordcloud.WordCloud",... |
25072840538 | #!/usr/bin/env python3
#_*_ coding: utf8 _*_
#------------------------------------------------------------
#----- GUILLOTINE -----|
# ---- FINDER HTTP SECURITY HEADERS ----|
# ---- Gohanckz ----|
# ---- Contact : igonzalez@pwnsec.cl ----|
# ---- Version : 2.0 ----|
#------------------------------------------------------------
try:
from banner import banner
from prettytable import PrettyTable
import requests
import argparse
from urllib3.exceptions import InsecureRequestWarning
except ImportError as err:
print("Some libraries are missing:")
print(err)
parser = argparse.ArgumentParser(description="Finder Security Headers")
parser.add_argument("-t","--target",help="Show http security headers enabled and missing")
parser.add_argument("-v","--verbose",action="store_true",help="Show full response")
parser = parser.parse_args()
try:
requests.packages.urllib3.disable_warnings(category=InsecureRequestWarning)
url = requests.get(url=parser.target, verify=False)
security_headers = [
"Strict-Transport-Security",
"X-Frame-Options",
"X-Content-Type-Options",
"Content-Security-Policy",
"X-Permitted-Cross-Domain-Policies",
"Referrer-Policy",
"Clear-Site-Data",
"Cross-Origin-Embedder-Policy",
"Cross-Origin-Opener-Policy",
"Cross-Origin-Resource-Policy",
"Cache-Control"
]
info_headers = []
headers_site = []
security_headers_site = []
missing_headers = []
headers = dict(url.headers)
for i in headers:
headers_site.append(i)
for i in headers:
info_headers.append(headers[i])
for i in headers_site:
if i in security_headers:
security_headers_site.append(i)
for j in security_headers:
if not j in [h for h in headers_site]:
missing_headers.append(j)
table = PrettyTable()
table.add_column("Header",headers_site)
table.add_column("Information",info_headers)
table.align="l"
while len(security_headers_site) < len(missing_headers):
security_headers_site.append(" ")
while len(security_headers_site) > len(missing_headers):
missing_headers.append(" ")
count = 0
for i in security_headers_site:
if i != " ":
count += 1
count_m = 0
for j in missing_headers:
if j != " ":
count_m +=1
s_table = PrettyTable()
s_table.add_column("Enabled Security Header",security_headers_site)
s_table.add_column("Missing Security Header",missing_headers)
s_table.align="l"
except:
print("[!] time out, unable to connect to site.")
def main():
banner()
try:
print("\n[*] Analyzing target : ",parser.target)
print("[*] Security headers enabled :", count)
print("[*] Missing Security Headers :",count_m)
except:
print("[!] Syntax Error.")
print("[+] Usage: python3 guillotine.py -t http://example.site")
def target():
try:
print(s_table)
except:
pass
def verbose():
try:
print(table)
except:
pass
if __name__ == '__main__':
main()
if parser.verbose:
verbose()
elif parser.target:
target()
| Gohanckz/guillotine | guillotine.py | guillotine.py | py | 3,431 | python | en | code | 12 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "requests.packages.urllib3.disable_warnings",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "requests.packages",
"line_number": 27,
"usage_type": "attribute"
},
... |
69928685308 | from django.db import models
class Vehicles(models.Model):
class Meta:
ordering = [ 'year']
id = models.AutoField(
primary_key = True
)
year_min = 1900
year_max = 2100
year = models.IntegerField(
'Year',
)
man_max_len = 50
manufacturer = models.CharField(
'Manufacturer',
max_length = man_max_len,
)
model_max_len = 100
model = models.CharField(
'model',
max_length = model_max_len
)
sn_max_len = 15
serial_no = models.CharField(
'Serial Number',
unique = True,
max_length = sn_max_len
)
| babarehner/carwork | carrepairs/models.py | models.py | py | 640 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.db.models.Model",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 3,
"usage_type": "name"
},
{
"api_name": "django.db.models.AutoField",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "... |
11250773237 | import connexion
from openapi_server import orm
from openapi_server.db import db
from openapi_server.models.error import Error # noqa: E501
from openapi_server.models.qc_result import QcResult # noqa: E501
def samples_id_qc_result_delete(id): # noqa: E501
"""samples_id_qc_result_delete
Delete the QC result associated with a sample with {id}. # noqa: E501
:param id:
:type id: str
:rtype: None
"""
sample = orm.Sample.query.get(id)
if not sample or not sample.qc_result:
return Error(404, 'Not found'), 404
db.session.delete(sample.qc_result)
db.session.commit()
return '', 204
def samples_id_qc_result_get(id): # noqa: E501
"""samples_id_qc_result_get
Return the QC result associated with a sample. # noqa: E501
:param id:
:type id: str
:rtype: QcResult
"""
sample = orm.Sample.query.get(id)
if not sample or not sample.qc_result:
return Error(404, 'Not found'), 404
return sample.qc_result.to_model(), 200
def samples_id_qc_result_put(id, qc_result=None): # noqa: E501
"""samples_id_qc_result_put
Add or replace new QC result associated with a sample. # noqa: E501
:param id:
:type id: str
:param qc_result: QC result to be added
:type qc_result: dict | bytes
:rtype: QcResult
"""
if connexion.request.is_json:
qc_result = QcResult.from_dict(connexion.request.get_json()) # noqa: E501
sample = orm.Sample.query.get(id)
if not sample:
return Error(404, 'Not found'), 404
inst = orm.QcResult.from_model(qc_result)
inst.sample_id = sample.id
if sample.qc_result:
sample.qc_result = inst
else:
db.session.add(inst)
db.session.commit()
return inst.to_model(), 200, {'location': ''}
| Mykrobe-tools/mykrobe-atlas-tracking-api | openapi_server/controllers/qc_result_controller.py | qc_result_controller.py | py | 1,810 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "openapi_server.orm.Sample.query.get",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "openapi_server.orm.Sample",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "openapi_server.orm",
"line_number": 20,
"usage_type": "name"
},
{
... |
72530296187 | import os
import cv2
import pytesseract
import numpy as np
from tqdm import tqdm
INPUT_PATH: str = "inputs_control/"
OUTPUT_PATH: str = "text_pred_control/"
#CONFIG: str = "--psm 6 --oem 1"
CONFIG: str = "--psm 7 --oem 1"
def pipeline(file) -> str:
path: str = f"{INPUT_PATH}{file}"
img: np.ndarray = cv2.imread(path)
text: str = pytesseract.image_to_string(img, config=CONFIG)
iterator: str = file.split(".")[0]
with open(OUTPUT_PATH + f"{iterator}.txt", 'w') as f:
f.write(text)
return text
def main() -> int:
files = os.listdir(INPUT_PATH)
for file in tqdm(files):
pipeline(file)
return 0
if __name__ == "__main__":
main()
| lukeabela38/image2text-tesseract | workspace/main.py | main.py | py | 693 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.ndarray",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "cv2.imread",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pytesseract.image_to_string",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.listdir"... |
30513354824 | import os
import requests
from app import Processing
import nltk
from moviepy.editor import *
from pexels_api import API
from pathlib import Path
import time
import pyttsx3
# configurations of paths, output URL, file structure
# 16:9 ratios possible for upright smartphone usage
# 1080, 1920 --> FullHD resolution
# 540, 960 --> 1/4 data size compared to FullHD
# 270, 480 --> 1/8 data size compared to FullHD
WIDTH_OUT = 540/2
HEIGHT_OUT = 960/2
screensize = (WIDTH_OUT, HEIGHT_OUT)
FONT = "Helvetica-Bold"
FONTSIZE_MAIN = WIDTH_OUT * 0.1
FONTSIZE_SUB = WIDTH_OUT * 0.03
FONT_COLOUR = "white"
PADDING = WIDTH_OUT * 0.1
readingSpeed = 0.2
audio_dir_emotional = "static/music/emotional.mp3"
audio_dir_promo = "static/music/promo.mp3"
audio_dir_neutral = "static/music/neutral.mp3"
audio_emotional = AudioFileClip(audio_dir_emotional, fps=44100)
audio_neutral = AudioFileClip(audio_dir_neutral, fps=44100)
audio_promo = AudioFileClip(audio_dir_promo, fps=44100)
ABS_PATH = os.path.abspath(__file__) # "/app.py"
BASE_DIR = os.path.dirname(ABS_PATH) # "/"
Path(os.path.join(BASE_DIR, "downloads")).mkdir(parents=True, exist_ok=True)
OUTPUT = os.path.join(BASE_DIR, "downloads")
# API setups
PEXELS_API_KEY = os.getenv("PEXELS_API_KEY")
api = API(PEXELS_API_KEY)
def dl_img(url, filename):
print(filename)
r = requests.get(url, allow_redirects=True)
open(filename, 'wb').write(r.content)
return filename
def pexels_fetch(to_download):
downloaded_files = []
n = 0
for i in to_download:
api.search(" ".join(i), page=1, results_per_page=1)
dl = api.get_entries()
print(dl)
img = [
dl_img(dl[0].large, os.path.join(OUTPUT, str("image_downloaded_" + str(n) + ".jpg"))),
dl[0].photographer
]
downloaded_files.append(img)
n += 1
return downloaded_files
def zoom(file, t):
f = (ImageClip(file)
.resize(height=screensize[1])
.resize(lambda t: 1 + 0.02 * t)
.set_position(('center', 'center'))
.set_duration(t)
)
f = resize_to_ouput_size(f)
# cvc = ImageClip(f, t)
return f
def resize_to_ouput_size(f):
if f.w < WIDTH_OUT:
f = f.resize(width=WIDTH_OUT)
if f.h < HEIGHT_OUT:
f = f.resize(height=HEIGHT_OUT)
f = f.crop(x_center=f.w / 2, y_center=f.h / 2, width=WIDTH_OUT, height=HEIGHT_OUT)
return f
'''
# voiceover functionality deprecated due to non-existent espeak support on heroku
def voiceover(textSnippet, i):
engine = pyttsx3.init()
print(f"inside voiceover func, processing: {textSnippet} \nIsBusy is set to {engine.isBusy()}")
audioFileName = f"voiceover text segment no. {i}.mp3"
engine.save_to_file(textSnippet, audioFileName)
engine.runAndWait()
# engine.stop()
print(f"text to speech worked correctly? \nisBusy is set to {engine.isBusy()}")
return audioFileName
'''
def overlay_text(file, i):
overlay = TextClip(file.text_segmented[i],
size=(WIDTH_OUT * 0.9, HEIGHT_OUT),
color=FONT_COLOUR,
method="caption",
align="East",
fontsize=FONTSIZE_MAIN,
font=FONT
)
combined = CompositeVideoClip([overlay, overlay_attribution(file.downloaded_items[i][1])])
# voiceover functionality deprecated
# if file.voiceover == True or file.voiceover == "true" or file.voiceover == "True":
# audio_clip_temp = AudioFileClip(voiceover(file.text_segmented[i], i), fps=44100)
# combined = combined.set_audio(audio_clip_temp)
combined = combined.set_duration(file.text_timing[i])
return combined
def overlay_attribution(text):
attribution = TextClip(f"Image from www.pexels.com by: {text}",
size=(WIDTH_OUT, HEIGHT_OUT * 0.95),
color=FONT_COLOUR,
fontsize=FONTSIZE_SUB,
align="south",
method="caption",
font=FONT
)
attribution = attribution.set_position((0, 0.97), relative=True)
return attribution
def create_kopfkino(content):
file = Processing(user_input=content.get("user_input"), style=content.get("style"), voiceover=content.get("voiceover"))
print(f"voiceover from content JSON is set to: {file.voiceover}")
nlp_testing_2(file)
print(file.downloaded_items)
print(file.text_searchwords)
file.downloaded_items = pexels_fetch(file.text_searchwords)
for i in range(0, len(file.downloaded_items)):
file.footage.append(zoom(file.downloaded_items[i][0], file.text_timing[i]))
for i in range(0, len(file.text_segmented)):
clip = overlay_text(file, i)
combined = CompositeVideoClip([file.footage[i], clip])
file.footage_and_text.append(combined)
file.export_file = concatenate(file.footage_and_text)
if file.style == "neutral":
file.export_file = file.export_file.set_audio(audio_neutral.set_duration(file.export_file.duration))
elif file.style == "emotional":
file.export_file = file.export_file.set_audio(audio_emotional.set_duration(file.export_file.duration))
elif file.style == "promo":
file.export_file = file.export_file.set_audio(audio_promo.set_duration(file.export_file.duration))
else:
file.export_file = file.export_file.set_audio(audio_neutral.set_duration(file.export_file.duration))
file.export_file.write_videofile(os.path.join(OUTPUT, f"Kopfkino_export_in workerinstance.mp4"), codec='libx264',
audio_codec='aac', fps=24)
with open(os.path.join(OUTPUT, f"Kopfkino_export_in workerinstance.mp4"), "rb") as trans:
result = trans.read()
return result
def nlp_testing_2(file):
text_raw = file.user_input
print(text_raw)
file.text_segmented = nltk.sent_tokenize(text_raw)
for i in range(0, len(file.text_segmented)):
n = 0
for c in file.text_segmented[i]:
n += 1
n = round(n * readingSpeed, 1)
if n < 5:
n = 5
file.text_timing.append(n)
text_segmented_to_words = nltk.word_tokenize(file.text_segmented[i])
file.text_searchwords.append([])
print(f"POS Tags{nltk.pos_tag(text_segmented_to_words)}")
for p in nltk.pos_tag(text_segmented_to_words):
if p[1] in {"JJ", "NN", "NNS", "VB"}:
print(f"found word {p} and put it to the searchwords")
file.text_searchwords[i].append(p[0])
for x in file.text_searchwords:
if len(x) == 0:
x.append("error")
print("-------> ERROR HANDLING NEEDED: No searchword left: appended full sentence OR error")
return f"\nsegmented: {file.text_segmented}, \ntimings: {file.text_timing} \nsearchwords: {file.text_searchwords}"
| oliverkoetter/kopfkino | tasks.py | tasks.py | py | 6,989 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "os.path.abspath",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_numb... |
16254773107 | import pandas as pd
import numpy as np
import random
from sklearn.datasets import load_iris
import matplotlib.pyplot as plt
x= ""
def calc_color_indxs(centroids):
#function assigns centroid indexes to each training example i.e. it assigns the
# nearest cluster centroid to each training example
# It uses Eucledian Distance to measure the distance between cluster centroids and training example
global x
centroid_indx = np.zeros(((x.shape[0]),1))
for i in range(0,x.shape[0]):
dist = x[i,:]-centroids
dist = np.sum(np.power(dist,2),axis = 1)
centroid_indx[i] = np.argmin(dist)
return centroid_indx.astype(int)
def calc_cost(centroids,sample_color_indx):
#calculates the cost value of the calculated centroid.
#cost = average of the distances between the centroids and the assigned training examples
sample_centroids = centroids[sample_color_indx.reshape((sample_color_indx.shape[0]))]
dist = x - sample_centroids
dist = np.sum(np.power(np.sum(np.power(dist,2),axis = 1),0.5),axis = 0)
return dist/sample_centroids.shape[0]
def update_centroids(centroids,sample_color_indx,k):
#updates the centroid for each assigned cluster
#calculates the centroid by taking mean of all the example assigned to the cluster
for i in range(0,k):
indxs = np.where(sample_color_indx == i)
x_centroid = x[indxs[0]]
if x_centroid.shape[0] == 0:
continue
centroids[i] = np.mean(x_centroid,axis = 0)
return centroids
if __name__ == '__main__':
data = load_iris(as_frame = True)
df = data.data
num_of_features = df.shape[1]
x = np.array(df.iloc[1:,0:num_of_features])
k = int(input("Enter Number of Clusters: "))
random_init_indx = random.sample(range(0,df.shape[0]),k)
centroids = np.array(df.iloc[random_init_indx,0:num_of_features])
plt.subplot(1,2,1)
i = 0
#------------------------------------------------------------------
sample_color_indx = calc_color_indxs(centroids) #step1
cost0 = calc_cost(centroids,sample_color_indx)
prev_centroids = centroids
centroids = update_centroids(centroids,sample_color_indx,k) #step2\
plt.scatter(i,cost0)
i = i + 1
#----------------------------------------------------------------
sample_color_indx = calc_color_indxs(centroids) #step1
cost1 = calc_cost(centroids,sample_color_indx) #step2
#--------------------------------------------------------------------
while cost0-cost1>=pow(10,-9):
i = i + 1
plt.scatter(i,cost1)
prev_centroids = centroids
centroids = update_centroids(centroids,sample_color_indx,k)
cost0 = cost1
sample_color_indx = calc_color_indxs(centroids)
cost1 = calc_cost(centroids,sample_color_indx)
print(cost0)
#plots two subplots in a figure,
#1.) Cost funcn vs. no. of iterations
#2.) Plot Training examples of same clusters with same color.
plt.subplot(1,2,2)
sample_color_indx = calc_color_indxs(prev_centroids)
colors = plt.cm.Spectral(np.linspace(0,1,k))
for i,col in zip(range(k),colors):
indxs = np.where(sample_color_indx == i)
x_centroid = x[indxs[0]]
plt.scatter(x_centroid[:,0],x_centroid[:,1],color = col)
plt.show()
| DhyeyDabhi/Machine-Learning | K Means Clustering/Logic Code/KMeans.py | KMeans.py | py | 3,311 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.zeros",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.power",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.argmin",
"line_number": 1... |
35041146572 | import copy
import json
import logging
import os
from threading import Thread
import requests
import six
import yaml
from toscaparser.tosca_template import ToscaTemplate
from yaml import Loader
from configuration_tool.common.tosca_reserved_keys import IMPORTS, DEFAULT_ARTIFACTS_DIRECTORY, \
EXECUTOR, NAME, TOSCA_ELEMENTS_MAP_FILE, TOSCA_ELEMENTS_DEFINITION_FILE, TOPOLOGY_TEMPLATE, TYPE, \
TOSCA_ELEMENTS_DEFINITION_DB_CLUSTER_NAME, NODE_TEMPLATES, RELATIONSHIP_TEMPLATES
from configuration_tool.common import utils
from configuration_tool.common.configuration import Configuration
from configuration_tool.configuration_tools.ansible.instance_model.instance_model import update_instance_model
from configuration_tool.configuration_tools.combined.combine_configuration_tools import get_configuration_tool_class
from configuration_tool.providers.common.provider_configuration import ProviderConfiguration
from configuration_tool.providers.common.tosca_template import ProviderToscaTemplate
REQUIRED_CONFIGURATION_PARAMS = (TOSCA_ELEMENTS_DEFINITION_FILE, DEFAULT_ARTIFACTS_DIRECTORY, TOSCA_ELEMENTS_MAP_FILE)
REQUIRED_CONFIGURATION_PARAMS = (TOSCA_ELEMENTS_DEFINITION_FILE, DEFAULT_ARTIFACTS_DIRECTORY, TOSCA_ELEMENTS_MAP_FILE)
def load_to_db(node_templates, relationship_templates, config, database_api_endpoint, template, cluster_name):
definitions = {}
all_templates = node_templates
all_templates = utils.deep_update_dict(all_templates, relationship_templates)
def_cluster = config.get_section(config.MAIN_SECTION).get(TOSCA_ELEMENTS_DEFINITION_DB_CLUSTER_NAME)
for key, value in all_templates.items():
type = value[TYPE]
r = requests.get(utils.get_url_for_getting_dependencies(def_cluster, database_api_endpoint, type))
try:
response = r.json()
except Exception:
raise Exception("Failed to parse json response from db")
if response['status'] != 200:
raise Exception("Error in db! Status code: %s, msg: %s" % (response['status'], response['message']))
definitions = utils.deep_update_dict(definitions, response['result'])
with open(os.path.join(utils.get_tmp_clouni_dir(), 'template.yaml'), "w") as f:
template = utils.deep_update_dict(template, definitions)
del template[IMPORTS]
print(yaml.dump(template, Dumper=utils.NoAliasDumper), file=f)
with open(os.path.join(utils.get_tmp_clouni_dir(), 'template.yaml'), "r") as f:
files = {'file': f}
res = requests.post(utils.get_url_for_loading_to_db(cluster_name, database_api_endpoint), files=files)
try:
response = res.json()
except Exception:
raise Exception("Failed to parse json response from db on loading template")
if response['status'] != 200:
raise Exception("Error in db! Status code: %s, msg: %s" % (response['status'], response['message']))
def translate(provider_template, validate_only, configuration_tool, cluster_name, is_delete=False,
extra=None, log_level='info', debug=False, host_ip_parameter='public_address',
database_api_endpoint=None, grpc_cotea_endpoint=None):
log_map = dict(
debug=logging.DEBUG,
info=logging.INFO,
warning=logging.WARNING,
error=logging.ERROR,
critical=logging.ERROR
)
logging_format = "%(asctime)s %(levelname)s %(message)s"
logging.basicConfig(filename='.clouni-configuration-tool.log', filemode='a', level=log_map[log_level],
format=logging_format, datefmt='%Y-%m-%d %H:%M:%S')
config = Configuration()
template = yaml.load(provider_template, Loader=Loader)
topology_template = template.get(TOPOLOGY_TEMPLATE)
# tmp version - provider gets from first node template (can't use different providers in template)
provider = None
for key in topology_template.get('node_templates').keys():
provider_template_name = key
tosca_type = topology_template.get('node_templates').get(provider_template_name).get('type')
(provider, _, _) = utils.tosca_type_parse(tosca_type)
if provider in ['openstack', 'amazon', 'kubernetes']: # TODO: make config prividers file!
break
provider_config = ProviderConfiguration(provider)
for sec in REQUIRED_CONFIGURATION_PARAMS:
if sec not in config.get_section(config.MAIN_SECTION).keys():
logging.error('Provider configuration parameter "%s" is missing in configuration file' % sec)
raise Exception('Provider configuration parameter "%s" is missing in configuration file' % sec)
def_files = config.get_section(config.MAIN_SECTION).get(TOSCA_ELEMENTS_DEFINITION_FILE)
if isinstance(def_files, six.string_types):
def_files = [def_files]
provider_def_files = provider_config.get_section(config.MAIN_SECTION).get(TOSCA_ELEMENTS_DEFINITION_FILE)
if isinstance(provider_def_files, six.string_types):
provider_def_files = [provider_def_files]
default_import_files = []
for def_file in def_files:
default_import_files.append(os.path.join(utils.get_project_root_path(), def_file))
for def_file in provider_def_files:
default_import_files.append(os.path.join(utils.get_project_root_path(), 'configuration_tool', 'providers',
provider, def_file))
logging.info("Default TOSCA template definition file to be imported \'%s\'" % json.dumps(default_import_files))
# Add default import of normative TOSCA types to the template
template[IMPORTS] = template.get(IMPORTS, [])
for i in range(len(template[IMPORTS])):
if isinstance(template[IMPORTS][i], dict):
for import_key, import_value in template[IMPORTS][i].items():
if isinstance(import_value, six.string_types):
template[IMPORTS][i] = import_value
elif isinstance(import_value, dict):
if import_value.get('file', None) is None:
logging.error("Imports %s doesn't contain \'file\' key" % import_key)
raise Exception("Imports %s doesn't contain \'file\' key" % import_key)
else:
template[IMPORTS][i] = import_value['file']
if import_value.get('repository', None) is not None:
logging.warning("Clouni doesn't support imports \'repository\'")
template[IMPORTS].extend(default_import_files)
for i in range(len(template[IMPORTS])):
template[IMPORTS][i] = os.path.abspath(template[IMPORTS][i])
if template.get(TOPOLOGY_TEMPLATE):
tmpl = template.get(TOPOLOGY_TEMPLATE)
if database_api_endpoint:
if not tmpl.get(NODE_TEMPLATES):
tmpl[NODE_TEMPLATES] = {}
if not tmpl.get(RELATIONSHIP_TEMPLATES):
tmpl[RELATIONSHIP_TEMPLATES] = {}
load_to_db(tmpl[NODE_TEMPLATES], tmpl[RELATIONSHIP_TEMPLATES], config, database_api_endpoint, template, cluster_name)
else:
if tmpl.get(NODE_TEMPLATES):
node_templates = tmpl.get(NODE_TEMPLATES)
for elem in node_templates:
update_instance_model(cluster_name, node_templates[elem], node_templates[elem][TYPE], elem, [], [], is_delete, init=True)
if tmpl.get(RELATIONSHIP_TEMPLATES):
rel_templates = tmpl.get(RELATIONSHIP_TEMPLATES)
for elem in rel_templates:
update_instance_model(cluster_name, rel_templates[elem], rel_templates[elem][TYPE], elem, [], [], is_delete, init=True)
copy_of_template = copy.deepcopy(template)
try:
tosca_parser_template_object = ToscaTemplate(yaml_dict_tpl=copy_of_template)
except Exception as e:
logging.exception("Got exception from OpenStack tosca-parser: %s" % e)
raise Exception("Got exception from OpenStack tosca-parser: %s" % e)
# After validation, all templates are imported
if validate_only:
msg = 'The input "%(template_file)s" successfully passed validation. \n' \
% {'template_file': 'TOSCA template'}
return msg
tosca = ProviderToscaTemplate(template, provider, configuration_tool, cluster_name,
host_ip_parameter, is_delete, grpc_cotea_endpoint)
tool = get_configuration_tool_class(configuration_tool)(provider)
default_artifacts_directory = config.get_section(config.MAIN_SECTION).get(DEFAULT_ARTIFACTS_DIRECTORY)
configuration_content = tool.to_dsl(provider, tosca.provider_operations, tosca.reversed_provider_operations,
tosca.cluster_name, is_delete, target_directory=default_artifacts_directory,
extra=extra, debug=debug,
grpc_cotea_endpoint=grpc_cotea_endpoint)
return configuration_content
| sadimer/clouni_configuration_tool | configuration_tool/common/translator_to_configuration_dsl.py | translator_to_configuration_dsl.py | py | 9,016 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "configuration_tool.common.tosca_reserved_keys.TOSCA_ELEMENTS_DEFINITION_FILE",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "configuration_tool.common.tosca_reserved_keys.DEFAULT_ARTIFACTS_DIRECTORY",
"line_number": 23,
"usage_type": "name"
},
{
"api_na... |
21402626105 | import numpy as np
import tensorflow as tf
from pyTasks.task import Task, Parameter
from pyTasks.task import Optional, containerHash
from pyTasks.target import CachedTarget, LocalTarget
from pyTasks.target import JsonService, FileTarget
from .gram_tasks import PrepareKernelTask
import logging
import math
from time import time
import os
from tensorflow.contrib.tensorboard.plugins import projector
from scipy.spatial.distance import cdist
from .graph_tasks import EdgeType
class WVSkipgram(object):
def __init__(self, num_words, learning_rate, embedding_size,
num_steps, neg_sampling, unigrams, log="./log/"):
self.num_words = num_words
self.learning_rate = learning_rate
self.embedding_size = embedding_size
self.num_steps = num_steps
self.neg_sampling = neg_sampling
self.unigrams = unigrams
self.log_dir = log
self.graph, self.batch_inputs, self.batch_labels,self.normalized_embeddings,\
self.loss, self.optimizer = self.trainer_initial()
def trainer_initial(self):
graph = tf.Graph()
with graph.as_default():
# logging
self.logger = tf.summary.FileWriter(self.log_dir)
with tf.name_scope("embedding"):
batch_inputs = tf.placeholder(tf.int64, shape=([None, ]))
batch_labels = tf.placeholder(tf.int64, shape=([None, 1]))
graph_embeddings = tf.Variable(
tf.random_uniform([self.num_words, self.embedding_size], -0.5 / self.embedding_size, 0.5/self.embedding_size),
name='word_embedding')
batch_graph_embeddings = tf.nn.embedding_lookup(graph_embeddings, batch_inputs) #hiddeb layer
weights = tf.Variable(tf.truncated_normal([self.num_words, self.embedding_size],
stddev=1.0 / math.sqrt(self.embedding_size))) #output layer wt
biases = tf.Variable(tf.zeros(self.num_words)) #output layer biases
#negative sampling part
loss = tf.reduce_mean(
tf.nn.nce_loss(weights=weights,
biases=biases,
labels=batch_labels,
inputs=batch_graph_embeddings,
num_sampled=self.neg_sampling,
num_classes=self.num_words,
sampled_values=tf.nn.fixed_unigram_candidate_sampler(
true_classes=batch_labels,
num_true=1,
num_sampled=self.neg_sampling,
unique=True,
range_max=self.num_words,
distortion=0.75,
unigrams=self.unigrams)#word_id_freq_map_as_list is the
# frequency of each word in vocabulary
))
norm = tf.sqrt(tf.reduce_mean(tf.square(graph_embeddings), 1, keep_dims=True))
normalized_embeddings = graph_embeddings/norm
# summary
tf.summary.histogram("weights", weights)
tf.summary.histogram("biases", biases)
tf.summary.scalar("loss", loss)
config = projector.ProjectorConfig()
emb = config.embeddings.add()
emb.tensor_name = normalized_embeddings.name
emb.metadata_path = os.path.join(self.log_dir, 'vocab.tsv')
projector.visualize_embeddings(self.logger, config)
with tf.name_scope('descent'):
global_step = tf.Variable(0, trainable=False)
learning_rate = tf.train.exponential_decay(self.learning_rate,
global_step, 100000, 0.96, staircase=True) #linear decay over time
learning_rate = tf.maximum(learning_rate,0.001) #cannot go below 0.001 to ensure at least a minimal learning
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss,global_step=global_step)
self.logger.add_graph(graph)
return graph, batch_inputs, batch_labels, normalized_embeddings, loss, optimizer
def train(self, dataset):
with tf.Session(graph=self.graph,
config=tf.ConfigProto(log_device_placement=True,allow_soft_placement=False)) as sess:
merged_summary = tf.summary.merge_all()
saver = tf.train.Saver()
init = tf.global_variables_initializer()
sess.run(init)
sess.run(tf.tables_initializer())
step = 0
for i in range(self.num_steps):
t0 = time()
feed_it = dataset.make_initializable_iterator()
next_element = feed_it.get_next()
sess.run(feed_it.initializer)
while True:
try:
feed_dict = sess.run([next_element])
feed_dict = {self.batch_inputs: feed_dict[0][0],
self.batch_labels:
sess.run(
tf.reshape(feed_dict[0][1], [-1, 1])
)
}
loss_val = 0
_, loss_val = sess.run([self.optimizer, self.loss], feed_dict=feed_dict)
if step % 10 == 0:
s = sess.run(merged_summary, feed_dict=feed_dict)
self.logger.add_summary(s, step)
if step % 1000 == 0:
saver.save(sess, os.path.join(self.log_dir, "model.ckpt"), step)
step += 1
except tf.errors.OutOfRangeError:
break
epoch_time = time() - t0
loss = 0
#done with training
final_embeddings = self.normalized_embeddings.eval()
return final_embeddings
def collect_ast(G, nodes):
stack = []
stack.extend(nodes)
out = []
while len(stack) > 0:
act = stack.pop()
out.append(act)
for in_node, _, _, d in G.in_edges(act, keys=True, data='type'):
if d is EdgeType.se:
stack.append(in_node)
return out
def is_ast_node(G, node):
ast_node = True
for out_node, _, _, d in G.out_edges(node, keys=True, data='type'):
ast_node &= d is EdgeType.se
for out_node, _, _, d in G.in_edges(node, keys=True, data='type'):
ast_node &= d is EdgeType.se
return ast_node
class WVGraphSentenceTask(Task):
out_dir = Parameter('./w2v/sentences/')
def __init__(self, name, h, D):
self.name = name
self.h = h
self.D = D
def require(self):
return PrepareKernelTask(self.name, self.h, self.D)
def output(self):
path = self.out_dir.value + self.__taskid__() + '.txt'
return FileTarget(path)
def __taskid__(self):
return 'W2VGraphSentence_%s_%d_%d' % (self.name, self.h, self.D)
def run(self):
with self.input()[0] as i:
G = i.query()
L = []
with self.output() as output:
for node in G:
in_nodes = []
ast_nodes = []
for in_node, _, _, d in G.in_edges(node, keys=True, data='type'):
if d is EdgeType.se:
ast_nodes.append(in_node)
elif d is EdgeType.de:
in_nodes.append(in_node)
in_nodes.extend(collect_ast(G, ast_nodes))
if len(in_nodes) == 0:
continue
in_nodes = [G.node[n]['label'] for n in in_nodes]
output.write(
str(G.node[node]['label']) + ' ' + ' '.join(in_nodes)+'\n'
)
class WVVocabulary(Task):
out_dir = Parameter('./w2v/')
def __init__(self, graph_list, length, h, D):
self.graph_list = graph_list
self.h = h
self.D = D
self.length = length
def require(self):
return [
WVGraphSentenceTask(
name,
self.h,
self.D
)
for name in self.graph_list
]
def output(self):
path = self.out_dir.value + self.__taskid__() + '.json'
return CachedTarget(
LocalTarget(path, service=JsonService)
)
def __taskid__(self):
return 'W2VVocabulary_%d_%d_%d' % (self.h, self.D,
containerHash(self.graph_list))
def run(self):
vocab = {}
overall = 0
for inp in self.input():
with inp as i:
for line in i.readlines():
for w in line.split():
if w not in vocab:
vocab[w] = 0
vocab[w] += 1
overall += 1
vocab = [x for x in sorted(
list(vocab.items()), key=lambda x: x[1], reverse=True
)][:self.length]
vocab = {k[0]: (v, k[1]) for v, k in enumerate(vocab)}
print('### Parsed %s samples ###' % overall)
with self.output() as o:
o.emit(vocab)
class WVEmbeddingTask(Task):
out_dir = Parameter('./w2v/')
embedding_size = Parameter(10)
learning_rate = Parameter(0.001)
num_steps = Parameter(3)
neg_sampling = Parameter(15)
batch_size = Parameter(100)
log_dir = Parameter('./log/embedded/')
def __init__(self, graph_list, length, h, D):
self.graph_list = graph_list
self.h = h
self.D = D
self.length = length
def require(self):
out = [WVVocabulary(self.graph_list, self.length, self.h, self.D)]
out.extend([
WVGraphSentenceTask(
name,
self.h,
self.D
)
for name in self.graph_list
])
return out
def output(self):
path = self.out_dir.value + self.__taskid__() + '.json'
return CachedTarget(
LocalTarget(path, service=JsonService)
)
def __taskid__(self):
return 'W2VEmbeddingTask_%d_%d_%d' % (self.h, self.D,
containerHash(self.graph_list))
def _get_vocab(self, vocab):
vocab = [x[0] for x in
sorted(list(vocab.items()),
key=lambda v: v[1][0])]
with open(os.path.join(self.log_dir.value, 'vocab.tsv'), 'w') as o:
for v in vocab:
o.write(v+'\n')
return vocab
def run(self):
with self.input()[0] as i:
vocab = i.query()
inp = (self.input()[i] for i in range(1, len(self.input())))
filenames = [f.sandBox + f.path for f in inp]
unigrams = [x[1][1] for x in
sorted(list(vocab.items()),
key=lambda v: v[1][0])]
model_skipgram = WVSkipgram(
len(vocab),
self.learning_rate.value,
self.embedding_size.value,
self.num_steps.value,
self.neg_sampling.value,
unigrams,
self.log_dir.value
)
with tf.Session(graph=model_skipgram.graph,
config=tf.ConfigProto(log_device_placement=True,allow_soft_placement=False)) as sess:
vocab_mapping = tf.constant(self._get_vocab(vocab))
table = tf.contrib.lookup.index_table_from_tensor(
mapping=vocab_mapping, num_oov_buckets=1,
default_value=-1)
def parse_mapping(line):
line = tf.string_split([line], ' ').values
line = table.lookup(line)
label = line[0:1]
features = line[1:]
return features, tf.tile(label, [tf.shape(features)[0]])
dataset = tf.data.TextLineDataset(filenames)
dataset = dataset.map(parse_mapping)
dataset = dataset.flat_map(lambda features, labels:
tf.data.Dataset().zip((
tf.data.Dataset().from_tensor_slices(features),
tf.data.Dataset().from_tensor_slices(labels))
))
dataset = dataset.shuffle(1000).batch(self.batch_size.value)
embedding = model_skipgram.train(dataset)
with self.output() as o:
o.emit(embedding.tolist())
class WVSimilarWords(Task):
out_dir = Parameter('./w2v/')
def __init__(self, graph_list, length, h, D):
self.graph_list = graph_list
self.h = h
self.D = D
self.length = length
def require(self):
out = [WVVocabulary(self.graph_list, self.length, self.h, self.D),
WVEmbeddingTask(self.graph_list, self.length,
self.h, self.D)]
return out
def output(self):
path = self.out_dir.value + self.__taskid__() + '.json'
return CachedTarget(
LocalTarget(path, service=JsonService)
)
def __taskid__(self):
return 'W2VSimilarWords_%d_%d_%d' % (self.h, self.D,
containerHash(self.graph_list))
def run(self):
with self.input()[0] as i:
vocab = i.query()
with self.input()[1] as i:
embedding = np.array(i.query())
inv_vocab = [None]*len(vocab)
for k, v in vocab.items():
inv_vocab[v[0]] = k
inv_vocab = inv_vocab
dis = cdist(embedding, embedding, 'cosine')
arg_sort = np.argsort(dis, axis=1)[:, 1:6]
near = {}
for i, k in enumerate(inv_vocab):
row = arg_sort[i]
near[k] = []
for j in range(row.shape[0]):
near[k].append([inv_vocab[row[j]], 1-dis[i, j]])
with self.output() as o:
o.emit(near)
| cedricrupb/pySVRanker | word2vec_tasks.py | word2vec_tasks.py | py | 14,557 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "tensorflow.Graph",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "tensorflow.summary.FileWriter",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "tensorflow.summary",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name":... |
29841914634 | import threading
import traitlets
import pyrosetta
import pyrosetta.rosetta.basic.options
import pyrosetta.rosetta.protocols.rosetta_scripts as rosetta_scripts
import pyrosetta.rosetta.protocols.moves as moves
import pyrosetta.distributed
import pyrosetta.distributed.tasks.taskbase as taskbase
import pyrosetta.distributed.packed_pose as packed_pose
def validate(protocol_xml):
"""Perform schema and parse validation for the given protocol xml."""
try:
test_task = BaseRosettaScriptsTask(protocol_xml)
test_task.maybe_setup()
except RuntimeError as error:
raise error
class BaseRosettaScriptsTask(taskbase.TaskBase):
@property
@pyrosetta.distributed.requires_init
@pyrosetta.distributed.with_lock
def parser(self):
if not getattr(self, "_parser", None):
BaseRosettaScriptsTask._parser = \
rosetta_scripts.RosettaScriptsParser()
return self._parser
protocol_xml = traitlets.CUnicode()
def __init__(self, protocol_xml):
super().__init__(protocol_xml=protocol_xml)
@pyrosetta.distributed.requires_init
@pyrosetta.distributed.with_lock
def setup(self):
self.default_options = pyrosetta.rosetta.basic.options.process()
self.tag = self.parser.create_tag_from_xml_string(
self.protocol_xml, self.default_options)
# Validate by parsing
self.parser.parse_protocol_tag(self.tag, self.default_options)
self.protocol_lock = threading.Lock()
@property
@pyrosetta.distributed.requires_init
@pyrosetta.distributed.with_lock
def parsed_protocol(self):
return self.parser.parse_protocol_tag(self.tag, self.default_options)
def execute(self, pack_or_pose):
return packed_pose.to_packed(self.apply(pack_or_pose))
class MultioutputRosettaScriptsTask(BaseRosettaScriptsTask):
@pyrosetta.distributed.requires_init
def apply(self, pack_or_pose):
"""Apply task generating pose objects."""
protocol = self.parsed_protocol
wpose = packed_pose.to_pose(pack_or_pose)
with self.protocol_lock:
protocol.apply(wpose)
if protocol.get_last_move_status() != moves.MoverStatus.MS_SUCCESS:
return
while wpose:
yield wpose
wpose = protocol.get_additional_output()
class SingleoutputRosettaScriptsTask(BaseRosettaScriptsTask):
@pyrosetta.distributed.requires_init
def apply(self, pack_or_pose):
"""Apply task returning a pose object."""
protocol = self.parsed_protocol
wpose = packed_pose.to_pose(pack_or_pose)
with self.protocol_lock:
protocol.apply(wpose)
if protocol.get_last_move_status() != moves.MoverStatus.MS_SUCCESS:
return
else:
return wpose
| MedicaicloudLink/Rosetta | main/source/src/python/PyRosetta/src/pyrosetta/distributed/tasks/rosetta_scripts.py | rosetta_scripts.py | py | 2,892 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "pyrosetta.distributed.tasks.taskbase.TaskBase",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "pyrosetta.distributed.tasks.taskbase",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "pyrosetta.rosetta.protocols.rosetta_scripts.RosettaScript... |
37030043869 | import PySimpleGUI as sg
import numpy as np
import cv2
import matplotlib.pyplot as plt
from Baysian_Mat import Bayesian_Matte
from PIL import Image, ImageOps
import time # Execution TIme imports
import psutil
from laplac import Laplacianmatting
from compositing import compositing
from QualityTest import mse2d
from QualityTest import sad2d
from QualityTest import psnr2d
from smooth import smooth
# Import your Bayesian_Matte, Laplacianmatting, compositing, mse2d, sad2d, and psnr2d functions here
# Define the PySimpleGUI layout
layout = [
[sg.Text("Select image file")],
[sg.Input(key="-IMAGE_FILE-"), sg.FileBrowse()],
[sg.Text("Select trimap file")],
[sg.Input(key="-TRIMAP_FILE-"), sg.FileBrowse()],
[sg.Text("Select GT file")],
[sg.Input(key="-GT_FILE-"), sg.FileBrowse()],
[sg.Button("Submit")],
[sg.Output(size=(60, 2))]
]
# Create the PySimpleGUI window
window = sg.Window("Alpha Matte Calculation", layout)
# Start time for computing the execution time
st = time.time()
# Get initial memory usage
Memstart = psutil.Process().memory_info().rss / (1024 ** 2)
# Event loop
while True:
event, values = window.read()
if event == sg.WINDOW_CLOSED:
break
if event == "Submit":
# Get the file paths from the input fields
image_path = values["-IMAGE_FILE-"]
trimap_path = values["-TRIMAP_FILE-"]
gt_path = values["-GT_FILE-"]
# Read the image, trimap, and GT files
image = np.array(Image.open(image_path))
image_trimap = np.array(Image.open(trimap_path))
GT = np.array(Image.open(gt_path))
# Step 2 : Calculating Bayesian Matte for the given trimap
alpha, pixel_count = Bayesian_Matte(image, image_trimap)
# Step 3 : Making it back to range (0-255) for display purpose
alpha_disp = alpha * 255
alpha_int8 = np.array(alpha, dtype=int)
et = time.time()
elapsed_time = et - st
# Step 4 : End to End testing - 1 : Calculating the Laplacian Matting
Lalpha = Laplacianmatting(image, image_trimap)
# Step 5 : Compositing Function Display
background = np.array(Image.open(
'C:/Users/aduttagu/Desktop/Main/Bayesian-Matting-Implementation/bayesian-Matting-Python/background.png'))
comp_Bay = compositing(image, alpha_disp, background)
# Step 6 : Smoothening ALpha Methods
smooth_alpha = smooth(alpha_disp)
# Step 7 : Displaying THe Bayesian, Laplacian and GT.
fig, axes = plt.subplots(nrows=2, ncols=2)
axes[0, 0].imshow(alpha_disp, cmap='gray')
axes[0, 0].set_title('Bayesian - Alpha Matte')
axes[0, 1].imshow(Lalpha, cmap='gray')
axes[0, 1].set_title('Laplacian - Alpha Matte')
axes[1, 0].imshow(GT, cmap='gray')
axes[1, 0].set_title('Ground Truth')
axes[1, 1].imshow(smooth_alpha, cmap='gray')
axes[1, 1].set_title('Smoothed Alpha')
plt.show()
plt.imshow(comp_Bay)
plt.show()
# Close the PySimpleGUI window
window.close()
# Part of End to End testing - 1 : Performance Comparision between Laplacian and Bayesian.
Bay_MSE = mse2d(alpha_disp, GT)
Lap_MSE = mse2d(Lalpha, GT)
print("The MSE between the Ground Truth and Bayesian Alpha Matte is :", Bay_MSE)
print("The MSE between the Ground Truth and Laplacian Alpha Matte is :", Lap_MSE)
Bay_SAD = sad2d(alpha_disp, GT)
Lap_SAD = sad2d(Lalpha, GT)
print("The SAD between the Ground Truth and Bayesian Alpha Matte is :", Bay_SAD)
print("The SAD between the Ground Truth and Laplacian Alpha Matte is :", Lap_SAD)
Bay_PSNR = psnr2d(alpha_disp, GT)
Lap_PSNR = psnr2d(Lalpha, GT)
print("The PSNR between the Ground Truth and Bayesian Alpha Matte is :", Bay_PSNR)
print("The PSNR between the Ground Truth and Laplacian Alpha Matte is :", Lap_PSNR)
print('Execution time for Bayesian Matting: {:.3f} seconds'.format(
elapsed_time))
# get usage after completion of code
Memend = psutil.Process().memory_info().rss / (1024 ** 2)
Memuse = Memend - Memstart
print("Total memory consumed in execution of this program : ", Memuse, "MB's")
| ADG4050/Bayesian-Matting-Implementation | bayesian-Matting-Python/UI.py | UI.py | py | 4,145 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "PySimpleGUI.Text",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "PySimpleGUI.Input",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "PySimpleGUI.FileBrowse",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "PySimpleGUI... |
2279604404 | from Sentence import Sentence
import nltk
class Text:
def __init__(self, rawText, name):
self.rawText = rawText#self.formatText(rawText)
self.name = name
splitAtNewlines = [s.strip() for s in rawText.splitlines()]
rawSentences = []
for line in splitAtNewlines:
sentencesInLine = nltk.sent_tokenize(line)
rawSentences.extend(sentencesInLine)
self.sentences = []
for rawSentence in rawSentences:
sentence = Sentence(self, rawSentence)
self.sentences.append(sentence)
def formatText(self, rawText):
return rawText.replace(u"\u2018", "'").replace(u"\u2019", "'").replace(u"\xa9", "e").replace(u"\u2014","-").decode("utf8") | Lombre/LemmaLearner | Text.py | Text.py | py | 745 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "nltk.sent_tokenize",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "Sentence.Sentence",
"line_number": 18,
"usage_type": "call"
}
] |
42602830723 |
from matplotlib import pyplot as plt
font = {'family':'sans-serif', 'sans-serif':'Arial'}
plt.rc('font', **font)
plt.title('', fontsize='x-large', pad=None)
plt.xlabel('', fontsize='x-large')
plt.ylabel('', fontsize='x-large')
# plt.xscale('log')
plt.tick_params(axis="both",direction="in", labelsize='x-large')
plt.subplots_adjust(left=0.30, bottom=0.30, right=0.70, top=0.70, wspace=0.20, hspace=0.20)
plt.legend(fontsize='large').set_draggable(True)
plt.grid(alpha=0.5)
| hitergelei/tools | plt-format.py | plt-format.py | py | 475 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "matplotlib.pyplot.rc",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "matplotli... |
39359053941 | import time
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support.ui import WebDriverWait
import selenium
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.service import service
#driver = webdriver.Chrome(executable_path="G:\driver\chromedriver_win32\chromedriver.exe") #my computer
driver=webdriver.Chrome()
driver.implicitly_wait(40)
driver.get("https://www.facebook.com/")
driver.find_element(By.XPATH,'//a[@role="button"and@data-testid="open-registration-form-button"]').click()
time.sleep(2)
driver.find_element(By.XPATH,'//input[@name="firstname"and@aria-label="First name"]').send_keys('pavithran')
time.sleep(2)
driver.find_element(By.XPATH,'//input[@name="lastname"and@aria-label="Surname"]').send_keys('sethu')
time.sleep(2)
driver.find_element(By.XPATH,'//input[@aria-label="Mobile number or email address"]').send_keys('9784561524')
driver.find_element(By.XPATH,'//input[@id="password_step_input"and@type="password"]').send_keys('Passcode')
time.sleep(2)
print("--------days-------")
days_elements=driver.find_element(By.ID,"day")#assign the id
days=Select(days_elements)#selecting the all elements
#giving the values manually to the dropdownlist
days.select_by_visible_text("17")#text method
time.sleep(2)
days.select_by_index(2)#index method
time.sleep(2)
days.select_by_value("6")#value method
time.sleep(2)
days_elements.send_keys("25")#send my value to days dropdown box NORMAL METHOD
print("get attribute method the value sent to the dropbox:",days_elements.get_attribute('value')) #get my value from dropbox
time.sleep(2)
totaloptions=len(days.options)#to find total options available in days
print("Total options in day dropdownlist:",totaloptions)#31 options are there
opsd=days.options#to get all options
print("total options")#just for heading
for option in opsd:#for loop
print("option text is-{}-option value is={}".format(option.text,option.get_attribute("value")))
print("--using range--")
for x in range(0,30):
print(opsd[x].text)
print("--days after 20th\n--")
for x in opsd:
y=x.get_attribute("value")
z=int(y)
if z>=20:
print(x.text)
print("--days between 10 to 25\n--")
for x in opsd:
y=x.get_attribute("value")
z=int(y)
if z>=10 and z<=25:
print(x.text)
print('-----month-----')
#month
month_element=driver.find_element(By.ID,'month')
months=Select(month_element)
months.select_by_value("2")#feb
time.sleep(2)
months.select_by_index(4)
time.sleep(2)
months.select_by_visible_text("Aug")
month_length=len(months.options)
print("total months options are available in facebook\n:",month_length)
ops=months.options
for option in ops:
print("option text is-{}-option value is={}".format(option.text, option.get_attribute("value")))
#using range printing text
print("--using range--")
for x in range(0,12):
print(ops[x].text)
print("----last 3 months---\n")
for x in ops:
y=(x.get_attribute('value'))
z=int(y)
if z>=10:
print(x.text)
print("----between months:----\n")
for x in ops:
y=(x.get_attribute('value'))
z=int(y)
if z>=2 and z<=10:
print(x.text)
print("---1st 3 months\n---")
for x in ops:
y=(x.get_attribute('value'))
z=int(y)
if z<=3:
print(x.text)
print("-------year--------")
year_elements=driver.find_element(By.ID,"year")
years=Select(year_elements)
years.select_by_visible_text("1997")
time.sleep(3)
years.select_by_value("1996")
time.sleep(3)
years.select_by_index(1)#2021
totalyears=len(years.options)
print("total no of options in year:",totalyears)#118
opsy=years.options
for x in opsy:
print("year is={} year value is={}".format(x.text,x.get_attribute("value")))
print("--using range--")
for x in range(0,30):
print(opsy[x].text)
print("--years above 1997\n--")
for x in opsy:
y=x.get_attribute("value")
z=int(y)
if z>=1997:
print(x.text)
print("--years between 2000 to 1990\n--")
for x in opsy:
y=x.get_attribute("value")
z=int(y)
if z<=2000 and z>=1990:
print(x.text)
print(type(y))
print(type(z))
#gender selection
gender_f=driver.find_element(By.XPATH,'(//input[@type="radio"and@name="sex"])[1]').click()
status=driver.find_element(By.XPATH,'(//input[@type="radio"and@name="sex"])[1]').is_selected()
print(status)
time.sleep(3)
gender_m=driver.find_element(By.XPATH,'(//input[@type="radio"and@name="sex"])[2]').click()
status=driver.find_element(By.XPATH,'(//input[@type="radio"and@name="sex"])[2]').is_selected()
print(status)
time.sleep(3)
gender_c=driver.find_element(By.XPATH,'(//input[@type="radio"and@name="sex"])[3]').click()
status=driver.find_element(By.XPATH,'(//input[@type="radio"and@name="sex"])[3]').is_selected()
print(status)
custom=driver.find_element(By.XPATH,'//select[@aria-label="Select your pronoun"]')
custom_s=Select(custom)
custom_s.select_by_value("1")
time.sleep(2)
custom_s.select_by_value("2")
time.sleep(2)
customs=custom_s.options
for x in customs:
print(x.text)
driver.find_element(By.XPATH,'//input[@name="custom_gender"]').send_keys("they")
driver.find_element(By.XPATH,'//button[text()="Sign Up"]').click()
time.sleep(5)
driver.close()
| Paviterence/Selenium-Python-BasicCodes | fb_select_method.py | fb_select_method.py | py | 5,232 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.XPATH",
"line_number": 15,
"usage_type": "attribute"
},
... |
41034447740 | from django.test import TestCase
from car import models
class ModelTest(TestCase):
def test_create_user_with_email_successful(self):
"""Test creating a new car is successful"""
category = 'CO'
model = "TT RS 2020"
name = 'Audi TT RS TURBO'
number_of_doors = 3
description = 'This car is a beast'
car = models.Car.objects.create(
category=category,
model=model,
name=name,
number_of_doors=number_of_doors,
description=description
)
self.assertEqual(car.category, category)
self.assertEqual(car.model, model)
self.assertEqual(car.name, name)
self.assertEqual(car.number_of_doors, number_of_doors)
self.assertEqual(car.description, description)
| Womencancode/technical-test-Talana | app/car/tests/test_models.py | test_models.py | py | 814 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.test.TestCase",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "car.models.Car.objects.create",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "car.models.Car",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": ... |
3528547150 | import os
import pytest
from dvclive.data.scalar import Scalar
from dvclive.keras import DvcLiveCallback
from tests.test_main import read_logs
# pylint: disable=unused-argument, no-name-in-module, redefined-outer-name
@pytest.fixture
def xor_model():
import numpy as np
from tensorflow.python.keras import Sequential
from tensorflow.python.keras.layers import Activation, Dense
def make():
x = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
y = np.array([[0], [1], [1], [0]])
model = Sequential()
model.add(Dense(8, input_dim=2))
model.add(Activation("relu"))
model.add(Dense(1))
model.add(Activation("sigmoid"))
model.compile(
loss="binary_crossentropy", optimizer="sgd", metrics=["accuracy"]
)
return model, x, y
yield make
def test_keras_callback(tmp_dir, xor_model, capture_wrap):
model, x, y = xor_model()
model.fit(
x,
y,
epochs=1,
batch_size=1,
validation_split=0.2,
callbacks=[DvcLiveCallback()],
)
assert os.path.exists("dvclive")
logs, _ = read_logs(tmp_dir / "dvclive" / Scalar.subfolder)
assert os.path.join("train", "accuracy") in logs
assert os.path.join("eval", "accuracy") in logs
@pytest.mark.parametrize("save_weights_only", (True, False))
def test_keras_model_file(
tmp_dir, xor_model, mocker, save_weights_only, capture_wrap
):
model, x, y = xor_model()
save = mocker.spy(model, "save")
save_weights = mocker.spy(model, "save_weights")
model.fit(
x,
y,
epochs=1,
batch_size=1,
callbacks=[
DvcLiveCallback(
model_file="model.h5", save_weights_only=save_weights_only
)
],
)
assert save.call_count != save_weights_only
assert save_weights.call_count == save_weights_only
@pytest.mark.parametrize("save_weights_only", (True, False))
def test_keras_load_model_on_resume(
tmp_dir, xor_model, mocker, save_weights_only, capture_wrap
):
import dvclive.keras
model, x, y = xor_model()
if save_weights_only:
model.save_weights("model.h5")
else:
model.save("model.h5")
load_weights = mocker.spy(model, "load_weights")
load_model = mocker.spy(dvclive.keras, "load_model")
model.fit(
x,
y,
epochs=1,
batch_size=1,
callbacks=[
DvcLiveCallback(
model_file="model.h5",
save_weights_only=save_weights_only,
resume=True,
)
],
)
assert load_model.call_count != save_weights_only
assert load_weights.call_count == save_weights_only
def test_keras_no_resume_skip_load(tmp_dir, xor_model, mocker, capture_wrap):
model, x, y = xor_model()
model.save_weights("model.h5")
load_weights = mocker.spy(model, "load_weights")
model.fit(
x,
y,
epochs=1,
batch_size=1,
callbacks=[
DvcLiveCallback(
model_file="model.h5",
save_weights_only=True,
resume=False,
)
],
)
assert load_weights.call_count == 0
def test_keras_no_existing_model_file_skip_load(
tmp_dir, xor_model, mocker, capture_wrap
):
model, x, y = xor_model()
load_weights = mocker.spy(model, "load_weights")
model.fit(
x,
y,
epochs=1,
batch_size=1,
callbacks=[
DvcLiveCallback(
model_file="model.h5",
save_weights_only=True,
resume=True,
)
],
)
assert load_weights.call_count == 0
def test_keras_None_model_file_skip_load(
tmp_dir, xor_model, mocker, capture_wrap
):
model, x, y = xor_model()
model.save_weights("model.h5")
load_weights = mocker.spy(model, "load_weights")
model.fit(
x,
y,
epochs=1,
batch_size=1,
callbacks=[
DvcLiveCallback(
save_weights_only=True,
resume=True,
)
],
)
assert load_weights.call_count == 0
| gshanko125298/Prompt-Engineering-In-context-learning-with-GPT-3-and-LLMs | myenve/Lib/site-packages/tests/test_keras.py | test_keras.py | py | 4,219 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "numpy.array",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "tensorflow.python.keras.Sequential",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "tensorflow... |
33548045927 | from django.test import TestCase
from costcenter.forms import FundForm
class FundFormTest(TestCase):
def test_empty_form(self):
form = FundForm()
self.assertIn("fund", form.fields)
self.assertIn("name", form.fields)
self.assertIn("vote", form.fields)
self.assertIn("download", form.fields)
# test just one rendered field
self.assertInHTML(
'<input type="text" name="fund" maxlength="4" required id="id_fund">',
str(form),
)
def test_filled_form(self):
data = {"fund": "C119", "name": "National Procurement", "vote": 1, "download": True}
f = FundForm(data=data)
self.assertTrue(f.is_valid())
def test_vote_not_1_or_5(self):
data = {"fund": "C113", "name": "NP", "vote": "6", "download": 1}
form = FundForm(data=data)
self.assertEqual(form.errors["vote"], ["Vote must be 1 or 5"])
def test_fund_starts_with_non_letter(self):
data = {"fund": "3113"}
form = FundForm(data=data)
self.assertEqual(form.errors["fund"], ["Fund must begin with a letter"])
def test_fund_is_not_4_characters_long(self):
data = {"fund": "c3456"}
form = FundForm(data=data)
msg = f"Ensure this value has at most 4 characters (it has {len(data['fund'])})."
self.assertEqual(form.errors["fund"], [msg])
| mariostg/bft | costcenter/tests/test_forms.py | test_forms.py | py | 1,394 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.test.TestCase",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "costcenter.forms.FundForm",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "costcenter.forms.FundForm",
"line_number": 22,
"usage_type": "call"
},
{
"api_name":... |
37080131599 | def solution(s):
from collections import deque
answer = ''
s = deque(s)
while s:
a = s.popleft()
if answer:
if answer[-1] == ' ':
answer += a.upper()
else:
answer += a.lower()
else:
answer += a.upper()
return answer | JeonggonCho/algorithm | 프로그래머스/lv2/12951. JadenCase 문자열 만들기/JadenCase 문자열 만들기.py | JadenCase 문자열 만들기.py | py | 327 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "collections.deque",
"line_number": 4,
"usage_type": "call"
}
] |
11481741965 | import json
import random
while True:
inp = input("> ")
ints = {}
with open('intents.json', 'r') as f:
json.dump(f, ints)
try:
if ints[inp].type() == list:
val = random.choice(ints[inp])
else:
val = ints[inp]
print(val)
except:
print("I don't understand.") | poopcoder/Game | chat/code.py | code.py | py | 341 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "json.dump",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 11,
"usage_type": "call"
}
] |
26994135313 | from django.urls import path
from Zoo import views
import templates
urlpatterns = [
path('login/', views.user_login, name='login'),
path('logout/',views.logout, name='logout'),
path('user_create/', views.user_create, name='user_create'),
path('index/', views.index, name='index'),
path('detail/<int:id>', views.animal_detail, name='animal_detail'),
path('animal_delete/<int:id>/', views.animal_delete, name='animal_delete'),
path('check/<int:id>', views.check, name='check'),
path('search/', views.search, name='search'),
path('search_filter/', views.search_filter, name='search_filter'),
path('write_log/<int:id>/', views.write_log, name='write_log'),
path('edit_log/<int:id>/', views.edit_log, name='edit_log'),
path('log_delete/<int:id>/', views.log_delete, name='log_delete'),
path('zone/<int:id>', views.zone, name='zone'),
] | klll2/Zoozoo1 | Zoo/urls.py | urls.py | py | 886 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "Zoo.views.user_login",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "Zoo.views",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
... |
27447292826 | import time
from selenium.webdriver.support.ui import Select
from selenium import webdriver
class InventoryPage():
def __init__(self,driver) :
self.driver = driver
def navigate(self, urlLogin):
self.driver.get(urlLogin)
def changeSorting(self, locatorClass, option):
self.sel = Select (self.driver.find_element_by_class_name (locatorClass))
self.sel.select_by_value (option)
def check_A_to_Z_sort(self):
items_names = self.driver.find_elements_by_class_name("inventory_item_name")
for name in items_names:
name_text=name.text
print(name_text)
names_list=[]
names_list.append(name_text)
sorted_names = sorted(names_list)
if(names_list == sorted_names):
print("'A_to_Z' sorting working ")
else:
print("'A_to_Z' sorting not working")
def check_Z_to_A_sort(self):
items_names = self.driver.find_elements_by_class_name("inventory_item_name")
for name in items_names:
name_text=name.text
print(name_text)
names_list=[]
names_list.append(name_text)
sorted_names = sorted(names_list)
reversed_names = sorted_names.reverse()
if(names_list == reversed_names):
print("'Z_to_A' sorting working ")
else:
print("'Z_to_A' sorting not working")
def check_low_to_high_sort(self):
items_prices = self.driver.find_elements_by_class_name ("inventory_item_price")
for price in items_prices:
price_text=price.text
price_text = price_text.replace('$','')
value = float(price_text)
prices_values=[]
prices_values.append(value)
sorted_prices = sorted(prices_values)
if(prices_values == sorted_prices):
print("'low_to_high' sorting working ")
else:
print("'low_to_high' sorting not working")
def check_high_to_low_sort(self):
items_prices = self.driver.find_elements_by_class_name ("inventory_item_price")
for price in items_prices:
price_text=price.text
price_text = price_text.replace('$','')
value = float(price_text)
prices_values=[]
prices_values.append(value)
sorted_prices = sorted(prices_values)
reversed_prices = sorted_prices.reverse()
if(prices_values == reversed_prices):
print("'high_to_low' sorting working ")
else:
print("'high_to_low' sorting not working")
def click_item_page_and_verify(self,item_full_id):
self.driver.find_element_by_id(item_full_id).click()
item_id = item_full_id[5]
currentURL = self.driver.current_url
assert currentURL == "https://www.saucedemo.com/inventory-item.html?id=" + str(item_id)
print("item page " + str(item_id)+" opened")
def click_item_to_cart_and_verify(self,item_id):
self.driver.find_element_by_id(item_id).click()
item_shopped = self.driver.find_element_by_class_name("shopping_cart_badge")
assert int(item_shopped.text) == 1
self.driver.find_element_by_class_name("shopping_cart_badge").click()
time.sleep(2)
self.driver.find_element_by_id("checkout").click()
time.sleep(2)
currentURL = self.driver.current_url
assert currentURL == "https://www.saucedemo.com/checkout-step-one.html"
print("check out page opened")
| Abanoub-waheed/python_test | inventoryPage.py | inventoryPage.py | py | 3,580 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "selenium.webdriver.support.ui.Select",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 87,
"usage_type": "call"
}
] |
70084638589 | from sklearn.cluster import KMeans
import numpy as np
import matplotlib.pyplot as plt
# X = np.array([[1, 2], [1, 4], [1, 0], [10, 2], [10, 4], [10, 0]])
X = np.array(np.random.random((100, 2)))
kmeans = KMeans(n_clusters=2).fit(X)
print('Labels')
print(kmeans.labels_)
result = kmeans.predict([[0, 0], [12, 3]])
print('result')
print(result)
print('clusters')
print(kmeans.cluster_centers_)
plt.scatter(X[:, 0], X[:, 1], c=kmeans.labels_)
plt.xlabel("X")
plt.ylabel("Y")
plt.show()
| bpark/ml-demos | simple_kmeans.py | simple_kmeans.py | py | 486 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.array",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.random.random",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "sklearn.cluster.KMeans... |
74668118906 | from collections import Counter
from itertools import product
from operator import add
def solve(lines, cycles, dimensions):
board = set()
for row, line in enumerate(lines):
for col, elem in enumerate(line):
if elem == '#':
cell = dimensions * [0,]
cell[0], cell[1] = col, row
board.add(tuple(cell))
for _ in range(cycles):
new_board = set()
neighbour_counts = Counter()
for cell in board:
for delta in product(range(-1, 2), repeat=dimensions):
if delta != dimensions * (0,):
neighbour_counts[tuple(map(add, cell, delta))] += 1
for cell, count in neighbour_counts.items():
if count == 3 or (cell in board and count == 2):
new_board.add(cell)
board = new_board
return len(board)
with open('input.txt') as file:
lines = file.read().splitlines()
print(solve(lines, 6, 3))
print(solve(lines, 6, 4))
| dionyziz/advent-of-code | 2020/17/17.py | 17.py | py | 1,006 | python | en | code | 8 | github-code | 6 | [
{
"api_name": "collections.Counter",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "itertools.product",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "operator.add",
"line_number": 21,
"usage_type": "argument"
}
] |
36647480067 | import collections
from .pybeesgrid import TAG_SIZE, NUM_CONFIGS, NUM_MIDDLE_CELLS
from .pybeesgrid import GridGenerator, BadGridArtist, BlackWhiteArtist, \
MaskGridArtist, DepthMapArtist
from .pybeesgrid import drawGrids
from .pybeesgrid import INNER_BLACK_SEMICIRCLE, CELL_0_BLACK, CELL_1_BLACK, \
CELL_2_BLACK, CELL_3_BLACK, CELL_4_BLACK, CELL_5_BLACK, CELL_6_BLACK, \
CELL_7_BLACK, CELL_8_BLACK, CELL_9_BLACK, CELL_10_BLACK, CELL_11_BLACK, \
IGNORE, CELL_0_WHITE, CELL_1_WHITE, CELL_2_WHITE, CELL_3_WHITE, \
CELL_4_WHITE, CELL_5_WHITE, CELL_6_WHITE, CELL_7_WHITE, CELL_8_WHITE, \
CELL_9_WHITE, CELL_10_WHITE, CELL_11_WHITE, OUTER_WHITE_RING, \
INNER_WHITE_SEMICIRCLE
import numpy as np
import warnings
TAG_ID = ['bits']
TAG_CONFIG = ['z_rotation', 'y_rotation', 'x_rotation', 'center', 'radius']
TAG_STRUCTURE = ['inner_ring_radius', 'middle_ring_radius', 'outer_ring_radius', 'bulge_factor',
'focal_length']
TAG_LABEL_NAMES = TAG_ID + TAG_CONFIG + TAG_STRUCTURE
CONFIG_LABELS = ('z_rotation', 'y_rotation', 'x_rotation',
'center_x', 'center_y', 'radius')
CONFIG_ROTS = (
CONFIG_LABELS.index('z_rotation'),
CONFIG_LABELS.index('y_rotation'),
CONFIG_LABELS.index('x_rotation'),
)
CONFIG_CENTER = (
CONFIG_LABELS.index('center_x'),
CONFIG_LABELS.index('center_y'),
)
CONFIG_RADIUS = CONFIG_LABELS.index('radius')
MASK = collections.OrderedDict([
("INNER_BLACK_SEMICIRCLE", INNER_BLACK_SEMICIRCLE),
("CELL_0_BLACK", CELL_0_BLACK),
("CELL_1_BLACK", CELL_1_BLACK),
("CELL_2_BLACK", CELL_2_BLACK),
("CELL_3_BLACK", CELL_3_BLACK),
("CELL_4_BLACK", CELL_4_BLACK),
("CELL_5_BLACK", CELL_5_BLACK),
("CELL_6_BLACK", CELL_6_BLACK),
("CELL_7_BLACK", CELL_7_BLACK),
("CELL_8_BLACK", CELL_8_BLACK),
("CELL_9_BLACK", CELL_9_BLACK),
("CELL_10_BLACK", CELL_10_BLACK),
("CELL_11_BLACK", CELL_11_BLACK),
("IGNORE", IGNORE),
("CELL_0_WHITE", CELL_0_WHITE),
("CELL_1_WHITE", CELL_1_WHITE),
("CELL_2_WHITE", CELL_2_WHITE),
("CELL_3_WHITE", CELL_3_WHITE),
("CELL_4_WHITE", CELL_4_WHITE),
("CELL_5_WHITE", CELL_5_WHITE),
("CELL_6_WHITE", CELL_6_WHITE),
("CELL_7_WHITE", CELL_7_WHITE),
("CELL_8_WHITE", CELL_8_WHITE),
("CELL_9_WHITE", CELL_9_WHITE),
("CELL_10_WHITE", CELL_10_WHITE),
("CELL_11_WHITE", CELL_11_WHITE),
("OUTER_WHITE_RING", OUTER_WHITE_RING),
("INNER_WHITE_SEMICIRCLE", INNER_WHITE_SEMICIRCLE)
])
MASK_KEYS = list(MASK.keys())
CELLS_BLACK = MASK_KEYS[MASK_KEYS.index("CELL_0_BLACK"):MASK_KEYS.index("CELL_11_BLACK")+1]
MASK_BLACK = ["INNER_BLACK_SEMICIRCLE"] + CELLS_BLACK
CELLS_WHITE = MASK_KEYS[
MASK_KEYS.index("CELL_0_WHITE"):
MASK_KEYS.index("CELL_11_WHITE")+1]
MASK_WHITE = CELLS_WHITE + ["OUTER_WHITE_RING", "INNER_WHITE_SEMICIRCLE"]
def dtype_tag_params(nb_bits=12, with_structure=False):
keys = TAG_ID + TAG_CONFIG
if with_structure:
keys += TAG_STRUCTURE
reps = {key: 1 for key in keys}
reps['bits'] = nb_bits
reps['center'] = 2
return [(key, "({},)float32".format(n)) for key, n in reps.items()]
def draw_grids(params, with_structure='auto', scales=[1.], artist=None):
def get_positions(keys):
positions = {}
i = 0
for name in keys:
positions[name] = i
i += len(params[name][0])
return positions, i
def array_fill_by_keys(struct_arr, keys, positions, arr):
for name in keys:
b = positions[name]
e = b + len(struct_arr[name][0])
arr[:, b:e] = struct_arr[name]
if artist is None:
artist = BlackWhiteArtist(0, 255, 0, 1)
batch_size = len(params['bits'])
positions, size = get_positions(TAG_ID + TAG_CONFIG)
bits_and_config = np.zeros((batch_size, size), dtype=np.float32)
array_fill_by_keys(params, TAG_ID + TAG_CONFIG, positions, bits_and_config)
if with_structure == 'auto':
with_structure = all([struct_key in params.dtype.names for struct_key in TAG_STRUCTURE])
if with_structure:
struct_positions, struct_size = get_positions(TAG_STRUCTURE)
structure = np.zeros((batch_size, struct_size), dtype=np.float32)
array_fill_by_keys(params, TAG_STRUCTURE, struct_positions, structure)
structure = np.ascontiguousarray(structure)
else:
structure = None
bits_and_config = np.ascontiguousarray(bits_and_config)
if structure is not None and (structure == 0).all():
warnings.warn(
"draw_grids got a structure that is all zero. Did you use "
"`dtype_tag_params(with_structure=True)`"
" and forgot to set the structure?")
assert bits_and_config.dtype == np.float32
assert bits_and_config.flags['C_CONTIGUOUS']
return drawGrids(bits_and_config, structure, artist, scales)
def _normalize_angle(x):
x %= 2*np.pi
x = (x + 2*np.pi) % (2*np.pi)
x[x > np.pi] -= 2*np.pi
assert ((-np.pi <= x) & (x <= np.pi)).all()
return x
| berleon/pybeesgrid | python/beesgrid/__init__.py | __init__.py | py | 5,325 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "collections.OrderedDict",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "pybeesgrid.INNER_BLACK_SEMICIRCLE",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "pybeesgrid.CELL_0_BLACK",
"line_number": 45,
"usage_type": "name"
},
{
... |
2078438087 | # -*- coding: utf-8 -*-
from django_webtest import DjangoTestApp, WebTestMixin
import pytest
from testapp.articles.factories import AuthorFactory, ArticleFactory, TeamFactory
@pytest.fixture(scope='function')
def app(request):
wtm = WebTestMixin()
wtm._patch_settings()
wtm._disable_csrf_checks()
request.addfinalizer(wtm._unpatch_settings)
return DjangoTestApp()
@pytest.fixture(scope='function')
def data(request):
teams = [
TeamFactory()
for x in range(0, 2)
]
authors = [
AuthorFactory(team=team)
for team in teams
for x in range(0, 5)
]
articles = [
ArticleFactory(author=author)
for author in authors
for x in range(0, 10)
]
return {
'teams': teams,
'authors': authors,
'articles': articles,
}
| odoku/django-searchview | tests/conftest.py | conftest.py | py | 846 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django_webtest.WebTestMixin",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django_webtest.DjangoTestApp",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 9,
"usage_type": "call"
},
{
"api_name":... |
33595344312 | from django.http import JsonResponse
from base.views import chekctoken
WHITE_URLS = ( '/apis/login/')
class RequestMideleware(object):
def process_request(self, request):
if request.path_info in WHITE_URLS:
return
try:
ret = chekctoken(request)
if not ret:
response =JsonResponse({'result': 'Unauthorized'})
response.status_code = 401
return response
except:
return
| Hchenwy/web | www/server/base/middleware.py | middleware.py | py | 514 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "base.views.chekctoken",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 15,
"usage_type": "call"
}
] |
9137033058 | import os
import pandas as pd
from scipy.io import loadmat
def load_data():
list_of_files = os.listdir("data\\Identification\\MFCC\\")
cumulative_df = pd.DataFrame()
for file in list_of_files:
data_set = loadmat("data\\Identification\\MFCC\\" + file)
features = data_set['feat']
labels = data_set['Y']
features_df = pd.DataFrame(features)
labels_df = pd.DataFrame(labels, columns=["Subject", "Session"])
combined_df = pd.concat([features_df, labels_df], axis=1)
cumulative_df = pd.concat(
[cumulative_df, combined_df]).sort_values(by="Subject")
return cumulative_df
def load_file(filename):
data_set = loadmat("data\\Identification\\MFCC\\" + str(filename))
features = data_set['feat']
labels = data_set['Y']
features_df = pd.DataFrame(features)
labels_df = pd.DataFrame(labels, columns=["Subject", "Session"])
combined_df = pd.concat([features_df, labels_df], axis=1)
return combined_df
| PGG106/ReadMat | utils.py | utils.py | py | 1,004 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.listdir",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "scipy.io.loadmat",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"li... |
72052702908 | #!/usr/bin/env python
# coding: utf-8
""" This script collects all the data in orgs and sources folders and merge them in a single json file. """
import json, pathlib, os, sys
### ENVIRONMENTAL VARIABLES
# environmental variables can be set in order to override default values
# NOTE: you can use relative or absolute paths, with or without a separator at the end of folder names
# the folder that contains sources json files
# default: './sources'
env_sources = 'OPENDATA_SOURCES_DIR'
# the folder containing the organization details
# default: './orgs'
env_organizations = 'OPENDATA_ORGANIZATIONS_DIR'
# the filename that will store all results (include extension)
# default: './dist/index.json'
env_dist_filename = 'OPENDATA_DIST_FILENAME'
# the filename that will store nested results (include extension)
# default: './dist/nested/index.json'
env_nested_filename = 'OPENDATA_NESTED_FILENAME'
# shall the script override the data?
# default: True
env_allow_override = 'OPENDATA_CAN_OVERRIDE'
# It may be desiderable to remove the owner_org key from the source since it is implicit.
# This saves a few bytes in the final json file. If you want to keep the owner_org key
# feel free to set the variable to True
# default: False
env_keep_owner = 'OPENDATA_KEEP_OWNER'
# in case you want just to output to the console (i.e. if you want to pipe the results into a parser)
# default: False
env_to_stdout = 'OPENDATA_USE_STDOUT'
### DEFAULT SETTINGS
falsy_strings = ('no', 'false', 'never', 'n', 'f', 'falso', 'mai') # add other strings if necessary (?)
empty = ('', None)
sources_dir = os.environ[env_sources] if (env_sources in os.environ) and (os.environ[env_sources] not in empty) else pathlib.Path('.', 'sources')
orgs_dir = os.environ[env_organizations] if (env_organizations in os.environ) and (os.environ[env_organizations] not in empty) else pathlib.Path('.', 'orgs')
dist_filename = os.environ[env_dist_filename] if (env_dist_filename in os.environ) and (os.environ[env_dist_filename] not in empty) else pathlib.Path('.', 'dist/index.json')
nested_filename = os.environ[env_nested_filename] if (env_nested_filename in os.environ) and (os.environ[env_nested_filename] not in empty) else pathlib.Path('.', 'dist/nested/index.json')
override = (os.environ[env_allow_override].lower() not in falsy_strings) if (env_allow_override in os.environ) and (os.environ[env_allow_override] not in empty) else True
keep_owner = (os.environ[env_keep_owner].lower() not in falsy_strings) if (env_keep_owner in os.environ) and (os.environ[env_keep_owner] not in empty) else False
to_stdout = (os.environ[env_to_stdout].lower() not in falsy_strings) if (env_to_stdout in os.environ) and (os.environ[env_to_stdout] not in empty) else False
# A dictionary to guide in the classification of the organizations.
# There are two main branches, "nazionale" (national) and "locale" (local).
# Every branch has a inner dictionary. The inner dictionary keys are the first word in org.title whereas
# the dictionary values are the keys to be used to identify the type of organization in json output.
# You can customize the values returned; the key "*" is used as a catch-all alternative if the first word
# in org.title is not present in the dictionary's branch.
classification = {
'nazionale': {
'ministero': 'ministero',
'*': 'altro'
},
'locale': {
'citta': 'citta metropolitana',
'comune': 'comune',
'provincia': 'provincia',
'regione': 'regione',
'universita': 'universita',
'*': 'altro'
}
}
### UTILITIES
def classify(organization):
"""
the function checks the first word in the title of the organization
and returns a list of keys to be used to classify it.
"""
first_word = organization['name'].split('-')[0]
category = 'locale' if 'region' in organization.keys() else 'nazionale'
result = [category]
if category == 'locale':
result.append(organization['region'])
if first_word in classification[category].keys():
result.append(classification[category][first_word])
else:
result.append(classification[category]['*']) # first word not recognized.
return result
def populate_dict(keys_list, dictionary, organization, source):
"""
recursive function that takes a list of keys to be added to a dict of dicts (the dictionary argument).
If the list is empty, it returns the organization argument (the leaf) otherwise it returns a dictionary
created from the nested keys (the branches).
example:
--------
keys_list = ['a', 'b', 'c']
dictionary = {'other':{'nested'}, 'a':{'foo':'bar'}}
organization = {"whatever": "value", "you":"want"}
> populate_dict(keys_list, dictionary, organization)
> {'other':{'nested'}, 'a':{'foo':'bar', 'b':{'c':{"whatever": "value", "you":"want"}}}}
"""
if len(keys_list) == 0:
# time to save the new source
has_organization = False
if not keep_owner:
source.pop('owner_org', None)
# check if organization is already present
for org in dictionary:
if org['name'] == organization['name']:
# the organization already esists
organization = org
# if the organization is already in the dictionary the 'sources' key has been set
# so it is not necessary to check for its existence
organization['sources'].append(source)
has_organization = True
break
if not has_organization:
# no organization found or dictionary is empty
organization['sources'] = [source]
dictionary.append(organization)
return dictionary
key = keys_list.pop(0)
if key not in dictionary.keys():
if len(keys_list) == 0:
dictionary[key] = populate_dict(keys_list, [], organization, source)
else:
dictionary[key] = populate_dict(keys_list, {}, organization, source)
else:
dictionary[key] = populate_dict(keys_list, dictionary[key], organization, source)
return dictionary
### PARSER
def parse():
"""
the main script
"""
dist_all = {}
dist_nested = {}
for source in pathlib.Path(sources_dir).glob('*.json'):
with source.open('r') as source_file:
source_content = json.load(source_file)
if "config" in source_content:
source_content["config"] = json.loads(source_content["config"])
owner = source_content['owner_org']
try:
with pathlib.Path(orgs_dir, owner+'.json').open('r') as organization:
org_content = json.load(organization)
category = classify(org_content)
dist_nested = populate_dict(category, dist_nested, org_content, source_content)
dist_all[owner] = dist_all.get(owner, dict(org_content, sources=[]))
dist_all[owner]["sources"].append({ k:source_content[k] for k in source_content if keep_owner or source_content[k] != 'owner_org' })
except FileNotFoundError:
print(f"ERROR: file {pathlib.Path(orgs_dir, owner+'.json')} not found or not readable.", file=sys.stderr)
exit(2)
if not dist_nested or not dist_all:
print(f"WARNING: no sources found. Is {pathlib.Path(sources_dir)} the correct folder?", file=sys.stderr)
if to_stdout:
print(json.dumps(dist_all.values(), sort_keys=True, indent=4))
if override or not os.path.exists(dist_filename):
with open(dist_filename, 'w') as output_file:
json.dump(list(dist_all.values()), output_file)
else:
print("ERROR: output file exists and I'm not allowed to overwrite it.", file=sys.stderr)
if override or not os.path.exists(nested_filename):
with open(nested_filename, 'w') as output_file:
json.dump(dist_nested, output_file)
else:
print("ERROR: output file exists and I'm not allowed to overwrite it.", file=sys.stderr)
### THE SCRIPT
if __name__ == '__main__':
parse()
| italia/public-opendata-sources | export_all.py | export_all.py | py | 8,264 | python | en | code | 17 | github-code | 6 | [
{
"api_name": "os.environ",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "pathlib.Path",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "pathlib.Path",
"line... |
5454371991 | """
Problem:
1. Two Sum
Difficulty:
Easy
URL:
https://leetcode.com/problems/two-sum
Tags:
Array, Hash Table
Date:
2022-05-10T14:00:29.877163+08:00
"""
from typing import List
class Solution:
def twoSum(self, nums: List[int], target: int) -> List[int]:
for i, num in enumerate(nums):
if target - num in nums[i + 1:]:
return [i, nums.index(target - num, i + 1)]
tests = [
(
([2, 7, 11, 15], 9,
),
[0, 1],
),
(
([3, 2, 4], 6,
),
[1, 2],
),
(
([3, 3], 6,
),
[0, 1],
),
]
| s0u0b/leetcode | solutions/a00001_two_sum.py | a00001_two_sum.py | py | 630 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "typing.List",
"line_number": 17,
"usage_type": "name"
}
] |
43954128076 | import json
import requests # see http://python-requests.org
def url_for(endpoint):
return 'http://localhost:5000/{}/'.format(endpoint)
def delete_all_people():
r = requests.delete(url_for('people'))
print("'people' deleted, server response:", r.status_code)
def post_people():
data = [
{'firstname': 'John', 'lastname': 'Doe'},
{'firstname': 'Mike', 'lastname': 'Green'},
]
response = requests.post(
url_for('people'),
json.dumps(data),
headers={'Content-Type': 'application/json'}
)
print("'people' posted, server response:", response.status_code)
def get_people():
r = requests.get(url_for('people'))
print('people downloaded, server response:', r.status_code)
if r.status_code == 200:
people = r.json()['_items']
print('{} people:'.format(len(people)))
for person in people:
print('{}, {}'.format(person['firstname'], person['_id']))
def main():
delete_all_people()
post_people()
get_people()
if __name__ == '__main__':
main() | talkpython/eve-building-restful-mongodb-backed-apis-course | code/clients/client.py | client.py | py | 1,081 | python | en | code | 62 | github-code | 6 | [
{
"api_name": "requests.delete",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_num... |
21394429670 | import numpy as np
import statistics
from scipy import stats
dataset= [5,6,7,5,6,5,7,4,5,5,5,5,7,5,6,6,7,6,6,7,7,7,6,5,6]
#mean value
mean= np.mean(dataset)
#median value
median = np.median(dataset)
#mode value
mode= stats.mode(dataset)
#standard Deviation
Std = statistics.stdev(dataset)
#Variance
Var = statistics.variance(dataset)
print("Mean: ", mean)
print("Median: ", median)
print("Mode: ", mode)
print("Std", Std)
print("Var", Var)
| lamyanlok/FTDS | test.py | test.py | py | 447 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.mean",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.median",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "scipy.stats.mode",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_numbe... |
16314867701 | import sqlite3
import sys
import datetime
from collections import defaultdict
from stats_ui_window import Ui_StatWindow
from PyQt5 import QtCore, QtGui, QtWidgets
class MainWindow_EXEC():
def __init__(self):
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
self.ui = Ui_StatWindow()
self.ui.setupUi(MainWindow)
with open('examiners_names.txt','r') as examiners:
for line in examiners.readlines():
self.ui.comboBox.addItem(line.strip())
self.device_list = []
self.ui.pushButton.clicked.connect(self.add_device)
self.ui.pushButton_4.clicked.connect(self.remove_record)
self.ui.pushButton_2.clicked.connect(self.add_list)
self.ui.pushButton_3.clicked.connect(QtCore.QCoreApplication.instance().quit)
MainWindow.show()
sys.exit(app.exec_())
def add_device(self):
device_values = defaultdict()
device_values['case_number'] = self.ui.lineEdit.text()
device_values['item_number'] = self.ui.lineEdit_2.text()
device_values['manufacture'] = self.ui.lineEdit_3.text()
device_values['model_'] = self.ui.lineEdit_4.text()
device_values['crime_code'] = self.ui.lineEdit_6.text()
device_values['requesting'] = self.ui.lineEdit_5.text()
device_values['examiner'] = str(self.ui.comboBox.currentText())
if "" in (device_values['case_number'],device_values['item_number'],
device_values['manufacture'],device_values['model_'],
device_values['crime_code'],device_values['requesting']):
self.error_box()
else:
all_items = True
if self.ui.radioButton_11.isChecked():
device_values['device'] = "Computer"
elif self.ui.radioButton_10.isChecked():
device_values['device'] = "Phone"
elif self.ui.radioButton_12.isChecked():
device_values['device'] = "Hard Drive"
elif self.ui.radioButton_13.isChecked():
device_values['device'] = "Thumbdrive/Media Card"
elif self.ui.radioButton_14.isChecked():
device_values['device'] = "Vehilce"
else:
all_items = False
self.error_box(message = "Please Select Device Type")
if self.ui.radioButton.isChecked():
device_values['security'] = "Password Protected"
elif self.ui.radioButton_9.isChecked():
device_values['security'] = "Unlocked"
else:
all_items = False
self.error_box(message = "Please Select Security")
if self.ui.checkBox_2.isChecked():
device_values['secure_start'] = "Enabled"
else: device_values['secure_start'] = "No"
if self.ui.checkBox_3.isChecked():
device_values['logical'] = "Yes"
else: device_values['logical'] = "No"
if self.ui.checkBox_4.isChecked():
device_values['file_system'] = "Yes"
else: device_values['file_system'] = "No"
if self.ui.checkBox_5.isChecked():
device_values['physical'] = "Yes"
else: device_values['physical'] = "No"
if self.ui.checkBox_8.isChecked():
device_values['lt_greykey'] = "Yes"
else: device_values['lt_greykey'] = "No"
if self.ui.checkBox_6.isChecked():
device_values['greykey'] = "Yes"
else: device_values['greykey'] = "No"
if self.ui.checkBox_7.isChecked():
device_values['no_extraction'] = "No Extraction"
else: device_values['no_extraction'] = "Extracted"
device_values['date'] = datetime.datetime.now().strftime('%m/%d/%Y')
if all_items == True:
self.device_list.append(device_values)
self.ui.tableWidget.insertRow(0)
self.ui.tableWidget.setItem(0 , 0, QtWidgets.QTableWidgetItem(device_values['date']))
self.ui.tableWidget.setItem(0 , 1, QtWidgets.QTableWidgetItem(device_values['device']))
self.ui.tableWidget.setItem(0 , 2, QtWidgets.QTableWidgetItem(device_values['case_number']))
self.ui.tableWidget.setItem(0 , 3, QtWidgets.QTableWidgetItem(device_values['item_number']))
self.ui.tableWidget.setItem(0 , 4, QtWidgets.QTableWidgetItem(device_values['manufacture']))
self.ui.tableWidget.setItem(0 , 5, QtWidgets.QTableWidgetItem(device_values['model_']))
self.ui.lineEdit_2.setText("")
self.ui.lineEdit_3.setText("")
self.ui.lineEdit_4.setText("")
self.ui.checkBox_2.setChecked(False)
self.ui.checkBox_3.setChecked(False)
self.ui.checkBox_4.setChecked(False)
self.ui.checkBox_5.setChecked(False)
self.ui.checkBox_6.setChecked(False)
self.ui.checkBox_7.setChecked(False)
self.ui.checkBox_8.setChecked(False)
else: all_items = True
def remove_record(self):
row = self.ui.tableWidget.currentRow()
self.ui.tableWidget.removeRow(row)
def add_list(self):
manufacture = self.ui.lineEdit_3.text()
if manufacture != "":
self.error_box(message = "Dont forget to add the phone")
else:
self.ui.lineEdit.setText("")
self.ui.lineEdit_2.setText("")
self.ui.lineEdit_3.setText("")
self.ui.lineEdit_4.setText("")
self.ui.lineEdit_6.setText("")
self.ui.lineEdit_5.setText("")
count = self.ui.tableWidget.rowCount()
if count > 0:
self.ui.tableWidget.setRowCount(0)
with open('path.txt','r') as my_path:
path = my_path.read()
con = sqlite3.connect(path)
cur = con.cursor()
for item in self.device_list:
val = (item['date'],item['case_number'],item['item_number'],item['manufacture'],
item['model_'],item['crime_code'],item['requesting'],
item['examiner'],item['device'],item['security'],
item['secure_start'],item['logical'],item['file_system'],
item['physical'],item['lt_greykey'],item['greykey'],item['no_extraction'])
sql = "INSERT INTO entries (date,case_number,item_number,manufacture,model_,crime_code,requesting,examiner,device,security,secure_start,logical,file_system,physical,lt_greykey,greykey,no_extraction) VALUES (?,?, ?, ?, ?, ?, ?, ?,?, ?,?,?,?,?,?,?,?)"
cur.execute(sql,val)
con.commit()
con.close()
@staticmethod
def error_box(message = 'Please fill out all fields!'):
error_dialog = QtWidgets.QMessageBox()
error_dialog.setIcon(QtWidgets.QMessageBox.Warning)
error_dialog.setWindowTitle('Error')
error_dialog.setText(f'{message}')
error_dialog.setStandardButtons(QtWidgets.QMessageBox.Close)
error_dialog.exec()
if __name__ == "__main__":
MainWindow_EXEC()
| chrisw706/examination_stats | Stats/Python/Stats.py | Stats.py | py | 7,488 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "PyQt5.QtWidgets.QApplication",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "sys.argv",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWi... |
4552178157 | # Busca Local Múltiplos Inicios
# Local Search Multiple Starts
import sys
import time
sys.path.insert(1, '../stage_01')
sys.path.insert(1, '../')
from utils import corrent_solution_size, objetive_function, read_instance, viable_solution
from local_search import local_search
from semi_greedy import semi_greedy
import config
# Busca local múltiplos inícios + semi-guloso alpha + busca local primeira melhora
def multiple_starts_local_search(alpha, timeout):
desks, tests, empty = config.desks, config.tests, config.empty
desk_count = len(desks)
test_count = len(tests)
s_ = viable_solution(desk_count, desk_count, test_count)
value_ = objetive_function(s_)
initial_time = time.time()
current_time = time.time()
execution_time = current_time - initial_time
while execution_time < timeout:
s, _ = semi_greedy(alpha, False)
s, value = local_search(s, 0, False) # 1 = Primeira melhora
if value < value_:
s_ = s
value_ = value
current_time = time.time()
execution_time = current_time - initial_time
s_, value_ = corrent_solution_size(s_, empty)
return s_, value_
if __name__ == '__main__':
file_name = sys.argv[1]
timeout = int(sys.argv[2])
alpha = float(sys.argv[3])
config.set_timeout(timeout)
read_instance(file_name)
s_, value_ = multiple_starts_local_search(alpha, timeout)
print(s_)
print(value_)
| guilhermelange/Test-Assignment-Problem | stage_02/multiple_starts_local_search_02.py | multiple_starts_local_search_02.py | py | 1,452 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sys.path.insert",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "sys.path.insert",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_numbe... |
15996890764 | from django.contrib import admin
from django.urls import path
from . import views
urlpatterns = [
path('api/songs', views.SongsView.as_view(), name='songs'),
path('api/songs/<int:song_id>', views.SongInfoView.as_view(), name='song_info'),
path('api/songs/search/', views.SongSearchView.as_view(), name='song_search'),
path('api/artists', views.ArtistsView.as_view(), name='artists'),
path('api/artists/<int:artist_id>', views.ArtistInfoView.as_view(), name='artist_info'),
path('api/albums', views.AlbumsView.as_view(), name='albums'),
path('api/albums/<int:album_id>', views.AlbumInfoView.as_view(), name='album_info'),
]
| artooff/2023-MAI-Backend-A-Artov | lab3/musicProject/musicService/urls.py | urls.py | py | 652 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
26420619240 | from datetime import timedelta, datetime
from typing import Optional
from fastapi import Depends, HTTPException, status
from fastapi.security import OAuth2PasswordBearer
from sqlalchemy.orm import Session
from jose import jwt, JWTError
from app import database, models
from app.schemas import TokenData
from app.config import settings
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="login")
SECRET_KEY = settings.secret_key
ALGORITHM = settings.algorithm
ACCESS_TOKEN_EXPIRE_MINUTES = settings.access_token_expire_minutes
def create_access_token(data: dict, expires_delta: Optional[timedelta] = None):
to_encode = data.copy()
if expires_delta:
expire = datetime.utcnow() + expires_delta
else:
expire = datetime.utcnow() + timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES)
to_encode.update({"exp": expire})
encoded_jwt = jwt.encode(to_encode, SECRET_KEY, algorithm=ALGORITHM)
return encoded_jwt
def verify_access_token(token: str, credentials_exception):
try:
payload = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM])
user_id: str = payload.get("user_id")
if user_id is None:
raise credentials_exception
token_data = TokenData(id=user_id)
except JWTError:
raise credentials_exception
return token_data
def get_current_user(
token: str = Depends(oauth2_scheme), db: Session = Depends(database.get_db)
):
credentials_exception = HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Could not validate credentials",
headers={"WWW-Authenticate": "Bearer"},
)
token = verify_access_token(token, credentials_exception)
user = db.query(models.User).filter(models.User.id == token.id).first()
return user
| AdityaPunetha/FastAPI-Full-Devlopment | app/oauth2.py | oauth2.py | py | 1,771 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "fastapi.security.OAuth2PasswordBearer",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "app.config.settings.secret_key",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "app.config.settings",
"line_number": 14,
"usage_type": "name"
... |
72438613309 | from fastapi import APIRouter, Body, Depends, Request, status
from fastapi.responses import JSONResponse
from jarvis.db.database import DataBase, get_database
from jarvis.core import config, utils
from jarvis.lib import TwilioHelper
from typing import Dict
from twilio.rest import Client
import jarvis.crud as crud
import jarvis.models as model
import jarvis.core.text_responses as text
router = APIRouter()
twilio_helper = TwilioHelper()
client = Client(config.TWILIO_ACCOUNT_SID, config.TWILIO_ACCOUNT_AUTH_TOKEN)
@router.post("/add")
async def add_item_to_cart(request: Request, db: DataBase = Depends(get_database)):
async with db.pool.acquire() as conn:
body = await request.form()
parsed_body = dict(body)
cart_item = model.CartItem(**parsed_body)
normalized_cart_item = await utils.normalize_cart_item_model(conn, cart_item)
cart_item_name = normalized_cart_item.get("name")
item_quantity = normalized_cart_item.get("quantity")
success_message = text.add_item_success(cart_item_name, item_quantity)
shopping_cart_message = text.shopping_cart_info(1)
msg = "".join([success_message, shopping_cart_message])
return twilio_helper.compose_mesage(msg)
# Make potentially a new helper class that has add item
# because you have to then convert this to a message after etc
# shopping_cart = model.ShoppingCart(**payload)
# return None
@router.get("/menu/{item_type}")
async def get_menu(
item_type: str, db: DataBase = Depends(get_database),
):
async with db.pool.acquire() as conn:
try:
items = await crud.get_all_item_by_type(conn, item_type)
message_list = [utils.item_model_to_message(item) for item in items]
message = "\n".join(message_list)
twilio_message = twilio_helper.compose_mesage(message)
return twilio_message
except UserWarning as warning:
return JSONResponse(
status_code=status.HTTP_202_ACCEPTED, content=str(warning)
)
@router.post("/checkout")
async def checkout_cart(
payload: Dict = Body(...), db: DataBase = Depends(get_database)
):
pass
@router.post("/sms")
async def get_twilio_text():
resp = ":)"
return utils.create_text_response(resp)
@router.get("/test")
async def twilio_test(payload: Dict = Body(...)):
message = client.messages.create(
body="Jarvis test",
messaging_service_sid=config.TWILIO_ACCOUNT_MESSAGING_SID,
to=config.TO_PHONE_NUMBER,
)
return message.sid
| christian-miljkovic/jarvis | jarvis/api/v1/user_endpoint.py | user_endpoint.py | py | 2,604 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "fastapi.APIRouter",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "jarvis.lib.TwilioHelper",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "twilio.rest.Client",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "jarvis.c... |
16704619000 | #!/usr/bin/env python
# Code property of Matteo Scanavino - matteo.svanavino@gmail.it
# Minor changes by Iris David Du Mutel
import rospy
# from std_msgs.msg import Float32MultiArray
from myrobot.msg import vect_msg
from geometry_msgs.msg import Twist
from nav_msgs.msg import Odometry
import cv2
# import cv2.cv
import os
import math
import numpy as np
#import pyrealsense2 as rs
import message_filters
from sensor_msgs.msg import Image, CameraInfo
from cv_bridge import CvBridge, CvBridgeError
import imutils #collection of OpenCV and Python convenience functions
from collections import deque
from scipy.spatial.transform import Rotation as R
def green_ball():
rospy.init_node('realsense_behaviour', anonymous=True)
pub = rospy.Publisher('gb_vect', vect_msg, queue_size=10)
color_sub = message_filters.Subscriber('camera/color/image_raw',Image)
# depth_sub = message_filters.Subscriber('camera/depth/image_raw',Image)
x_sub = message_filters.Subscriber('/odom',Odometry)
ts = message_filters.ApproximateTimeSynchronizer([color_sub, x_sub], queue_size=10,slop=0.1)
ts.registerCallback(callback,pub)
rospy.spin()
def callback(color_raw, x_sub,pub):
vect = [0, 0]
msg = vect_msg()
bridge = CvBridge()
greenLower = (29, 86, 6)
greenUpper = (64, 255, 255)
# realsense min and max distance
try:
color_image = bridge.imgmsg_to_cv2(color_raw, "bgr8")
except CvBridgeError as e:
print(e)
Xest = x_sub
# # Variable assignation:
[yaw, pitch, roll] = get_rotation(Xest)
psi_est = yaw*180/math.pi
frame = imutils.resize(color_image, width=600)
blurred = cv2.GaussianBlur(frame, (11, 11), 0)
hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
# construct a mask for the color "green", then perform
# a series of dilations and erosions to remove any small
# blobs left in the mask
mask = cv2.inRange(hsv, greenLower, greenUpper)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
# find contours in the mask and initialize the current
# (x, y) center of the ball
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
center = None
if len(cnts) > 0:
# find the largest contour in the mask, then use
# it to compute the minimum enclosing circle and
# centroid
c = max(cnts, key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(c)
if x<280:
vect[0]=90
vect[1]=0
elif x>305:
vect[0]=-90
vect[1]=0
else:
if radius<100:
vect[0]=psi_est
vect[1]=0.8
else:
vect[0]= psi_est
vect[1]=0
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
print(center)
# only proceed if the radius meets a minimum size
if radius > 10:
# draw the circle and centroid on the frame,
# then update the list of tracked points
cv2.circle(frame, (int(x), int(y)), int(radius),
(0, 255, 255), 2)
print('radius=', radius)
cv2.circle(frame, center, 5, (0, 0, 255), -1)
else:
print('out of frame')
# show the frame to our screen
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# Send data
msg.header.stamp = rospy.Time.now()
msg.angle = vect[0]
msg.value = vect[1]
rospy.loginfo('Realsense vector data sent')
pub.publish(msg)
def get_rotation(Xest):
orientation_q = Xest.pose.pose.orientation
orientation_list = [orientation_q.x, orientation_q.y, orientation_q.z, orientation_q.w]
r = R.from_quat(orientation_list)
EuAn = r.as_euler('zyx', degrees=False)
return EuAn
if __name__ == '__main__':
try:
green_ball()
except rospy.ROSInterruptException:
pass | IrisDuMutel/myrobot | scripts/green_ball.py | green_ball.py | py | 4,054 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "rospy.init_node",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "rospy.Publisher",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "myrobot.msg.vect_msg",
"line_number": 24,
"usage_type": "argument"
},
{
"api_name": "message_filt... |
10844685453 | from database import db
from flask import request
from middleware.auth import login_required, admin_only
from models.guild import Guild
from typing import Dict, Optional, Tuple
def check_request(req: request, id_only: Optional[bool] = False) -> int | Tuple[int, str, bool] | Tuple[Dict[str, str], int]:
# Check request body
guild_name = ''
guild_manage_threads = False
try:
guild_id = req.json['id']
if not isinstance(guild_id, int):
raise ValueError('id must be an integer')
if not id_only:
guild_name = req.json.get('name', guild_name)
guild_manage_threads = req.json.get('manage_threads', guild_manage_threads)
if 'name' in req.json and not isinstance(guild_name, str):
raise ValueError('name must be a string')
if 'name' in req.json and not 0 < len(guild_name) < 256:
raise ValueError('name must be between 0 to 256 characters long')
if 'manage_threads' in req.json and not isinstance(guild_manage_threads, bool):
raise ValueError('manage_threads must be a boolean')
except KeyError as e:
return {
'success': False,
'error': f'Missing key in request body: {e}'
}, 400
except ValueError as e:
return {
'success': False,
'error': f'Bad value: {e}'
}, 400
else:
if id_only:
return guild_id
else:
return guild_id, guild_name, guild_manage_threads
@admin_only
def add_guild():
# Check request body
check_result = check_request(request)
if isinstance(check_result[0], dict):
return check_result
guild_id, guild_name, guild_manage_threads = check_result
# Check if guild is already in DB
guild = Guild.query.get(guild_id)
if guild is not None:
return {
'success': False,
'error': 'Guild already exists'
}, 409
# Create guild
guild = Guild(
id=guild_id,
name=guild_name,
manage_threads=guild_manage_threads
)
# Add to DB
db.session.add(guild)
db.session.commit()
return {
'success': True
}
@admin_only
def update_guild():
# Check request body
check_result = check_request(request)
if isinstance(check_result[0], dict):
return check_result
guild_id, guild_name, guild_manage_threads = check_result
# Check if guild is already in DB
guild = Guild.query.get(guild_id)
if guild is None:
return {
'success': False,
'error': f'Guild {guild_id} does not exist'
}, 404
# Update existing guild
if 'name' in request.json:
guild.name = guild_name
if 'manage_threads' in request.json:
guild.manage_threads = guild_manage_threads
# Commit
db.session.commit()
return {
'success': True
}, 200
@admin_only
def delete_guild():
# Check request body
check_result = check_request(request, id_only=True)
if isinstance(check_result, tuple) and isinstance(check_result[0], dict):
return check_result
guild_id = check_result
# Check if guild is in DB
guild = Guild.query.get(guild_id)
if guild is not None:
# Delete user
db.session.delete(guild)
db.session.commit()
return {
'success': True
}, 200
@login_required
def get_guild():
# Check request body
check_result = check_request(request, id_only=True)
if isinstance(check_result, tuple) and isinstance(check_result[0], dict):
return check_result
guild_id = check_result
# Check if guild is in DB
guild = Guild.query.get(guild_id)
if guild is None:
return {
'success': False,
'error': 'Guild not found'
}, 404
# Return guild data
return {
'success': True,
'guild': {
'name': guild.name,
'manage_threads': guild.manage_threads
}
}, 200
| jareddantis-bots/rico-backend | api/guilds.py | guilds.py | py | 4,053 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask.request",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_numbe... |
26043118086 | from __future__ import annotations
from textwrap import dedent
import pytest
from pants.backend.java.target_types import JavaSourcesGeneratorTarget
from pants.backend.java.target_types import rules as target_types_rules
from pants.core.util_rules import config_files, source_files
from pants.engine.addresses import Address, Addresses
from pants.jvm.resolve.common import Coordinate
from pants.jvm.resolve.coursier_fetch import NoCompatibleResolve
from pants.jvm.resolve.coursier_fetch import rules as coursier_fetch_rules
from pants.jvm.resolve.key import CoursierResolveKey
from pants.jvm.target_types import DeployJarTarget, JvmArtifactTarget
from pants.jvm.testutil import maybe_skip_jdk_test
from pants.jvm.util_rules import rules as util_rules
from pants.testutil.rule_runner import PYTHON_BOOTSTRAP_ENV, QueryRule, RuleRunner, engine_error
NAMED_RESOLVE_OPTIONS = (
'--jvm-resolves={"one": "coursier_resolve.lockfile", "two": "coursier_resolve.lockfile"}'
)
DEFAULT_RESOLVE_OPTION = "--jvm-default-resolve=one"
@pytest.fixture
def rule_runner() -> RuleRunner:
rule_runner = RuleRunner(
rules=[
*config_files.rules(),
*coursier_fetch_rules(),
*source_files.rules(),
*util_rules(),
*target_types_rules(),
QueryRule(CoursierResolveKey, (Addresses,)),
],
target_types=[DeployJarTarget, JavaSourcesGeneratorTarget, JvmArtifactTarget],
)
rule_runner.set_options(
args=[
NAMED_RESOLVE_OPTIONS,
DEFAULT_RESOLVE_OPTION,
],
env_inherit=PYTHON_BOOTSTRAP_ENV,
)
return rule_runner
def assert_resolve(
expected_resolve: str,
rule_runner: RuleRunner,
root_one_resolve: str,
root_two_resolve: str,
leaf_resolve: str,
) -> None:
rule_runner.write_files(
{
"BUILD": dedent(
f"""\
deploy_jar(name='root_one', main='Ex', dependencies=[':leaf'], resolve='{root_one_resolve}')
deploy_jar(name='root_two', main='Ex', dependencies=[':leaf'], resolve='{root_two_resolve}')
jvm_artifact(
name='leaf',
group='ex',
artifact='ex',
version='0.0.0',
resolve='{leaf_resolve}',
)
"""
),
"coursier_resolve.lockfile": "[]",
}
)
resolve_key = rule_runner.request(
CoursierResolveKey,
# NB: Although it will not happen for `deploy_jars` in production, we resolve two of them
# together here to validate the handling of multiple roots, which _can_ happen for things
# like the `repl` goal, and other goals which create an adhoc merged Classpath.
[
Addresses(
[
Address(spec_path="", target_name="root_one"),
Address(spec_path="", target_name="root_two"),
]
)
],
)
assert resolve_key.name == expected_resolve
@maybe_skip_jdk_test
def test_all_matching(rule_runner: RuleRunner) -> None:
assert_resolve("one", rule_runner, "one", "one", "one")
@maybe_skip_jdk_test
def test_no_matching_for_root(rule_runner: RuleRunner) -> None:
with engine_error(NoCompatibleResolve):
assert_resolve("n/a", rule_runner, "one", "two", "two")
@maybe_skip_jdk_test
def test_no_matching_for_leaf(rule_runner: RuleRunner) -> None:
with engine_error(NoCompatibleResolve):
assert_resolve("n/a", rule_runner, "one", "one", "two")
@pytest.mark.parametrize(
"coord_str,expected",
(
("group:artifact:version", Coordinate("group", "artifact", "version")),
(
"group:artifact:packaging:version",
Coordinate("group", "artifact", "version", "packaging"),
),
(
"group:artifact:packaging:classifier:version",
Coordinate("group", "artifact", "version", "packaging", "classifier"),
),
),
)
def test_from_coord_str(coord_str: str, expected: Coordinate) -> None:
assert Coordinate.from_coord_str(coord_str) == expected
| pantsbuild/pants | src/python/pants/jvm/resolve/coursier_fetch_test.py | coursier_fetch_test.py | py | 4,186 | python | en | code | 2,896 | github-code | 6 | [
{
"api_name": "pants.testutil.rule_runner.RuleRunner",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "pants.core.util_rules.config_files.rules",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "pants.core.util_rules.config_files",
"line_number": 30,
"u... |
28811405161 | import torch
import csv
import pytorch_lightning as pl
from sys import platform
if platform == "linux":
from pypesq import pesq
from pystoi import stoi
from math import isnan
from numpy import random
def check_inf_neginf_nan(tensor, error_msg):
assert not torch.any(torch.isinf(tensor)), error_msg
if tensor.dtype == torch.complex32 or tensor.dtype == torch.complex64 or tensor.dtype == torch.complex128:
assert not torch.any(torch.isneginf(tensor.real)), error_msg
assert not torch.any(torch.isneginf(tensor.imag)), error_msg
else:
assert not torch.any(torch.isneginf(tensor)), error_msg
assert not torch.any(torch.isnan(tensor)), error_msg
def l2_norm(s1, s2):
norm = torch.sum(s1*s2, -1, keepdim=True)
return norm
# source https://arxiv.org/pdf/2008.00264.pdf
class SiSNR(object):
def __call__(self, clean, estimate, eps=1e-8):
dot = l2_norm(estimate, clean)
norm = l2_norm(clean, clean)
s_target = (dot * clean)/(norm+eps)
e_nosie = estimate - s_target
target_norm = l2_norm(s_target, s_target)
noise_norm = l2_norm(e_nosie, e_nosie)
snr = 10*torch.log10((target_norm)/(noise_norm+eps)+eps)
return torch.mean(snr)
# source https://github.com/chanil1218/DCUnet.pytorch/blob/2dcdd30804be47a866fde6435cbb7e2f81585213/train.py
class wSDR(object):
def __call__(self, mixed, clean, clean_est, eps=2e-8):
bsum = lambda x: torch.sum(x, dim=1)
def mSDRLoss(orig, est):
correlation = bsum(orig * est)
energies = torch.norm(orig, p=2, dim=1) * torch.norm(est, p=2, dim=1)
return -(correlation / (energies + eps))
noise = mixed - clean
noise_est = mixed - clean_est
a = bsum(clean**2) / (bsum(clean**2) + bsum(noise**2) + eps)
target_wSDR = a * mSDRLoss(clean, clean_est)
noise_wSDR = (1 - a) * mSDRLoss(noise, noise_est)
wSDR = target_wSDR + noise_wSDR
return torch.mean(wSDR)
def cRM(S, Y, eps=1e-8):
M_r_numer = (Y.real * S.real) + (Y.imag * S.imag)
M_r_denom = torch.square(Y.real) + torch.square(Y.imag)
M_r = M_r_numer / (M_r_denom + eps)
M_i_numer = (Y.real * S.imag) - (Y.imag * S.real)
M_i_denom = torch.square(Y.real) + torch.square(Y.imag)
M_i = M_i_numer / (M_i_denom + eps)
M = torch.complex(M_r, M_i)
return M
def bound_cRM(cRM):
target_noise_mask_mag = torch.abs(cRM)
target_noise_mask_mag_tanh = torch.tanh(target_noise_mask_mag)
target_noise_mag_tanh_real = target_noise_mask_mag_tanh * torch.cos(torch.angle(cRM))
target_noise_mag_tanh_imag = target_noise_mask_mag_tanh * torch.sin(torch.angle(cRM))
target_noise_mask_phase = torch.atan2(target_noise_mag_tanh_imag, target_noise_mag_tanh_real)
target_noise_mask_real = target_noise_mask_mag_tanh * torch.cos(target_noise_mask_phase)
target_noise_mask_imag = target_noise_mask_mag_tanh * torch.sin(target_noise_mask_phase)
return torch.complex(target_noise_mask_real, target_noise_mask_imag)
def complex_mat_mult(A, B):
outp_real = (A.real * B.real) - (A.imag * B.imag)
outp_imag = (A.real * B.imag) + (A.imag * B.real)
Y = torch.complex(outp_real, outp_imag)
return Y
def complex_lrelu(input):
# return torch.nn.functional.leaky_relu(input.real) + 1j*torch.nn.functional.leaky_relu(input.imag)
return torch.complex(torch.nn.functional.leaky_relu(input.real), torch.nn.functional.leaky_relu(input.imag))
def apply_complex(fr, fi, input):
# return (fr(input.real)[0]-fi(input.imag)[0]) + 1j*(fr(input.imag)[0]+fi(input.real)[0])
return torch.complex(fr(input.real)-fi(input.imag), (fr(input.imag)+fi(input.real)))
# source https://github.com/huyanxin/DeepComplexCRN/blob/bc6fd38b0af9e8feb716c81ff8fbacd7f71ad82f/complexnn.py
class ComplexLSTM(torch.nn.Module):
def __init__(self, input_size, hidden_size, num_layers, bidirectional, batch_first, projection_dim=None):
super(ComplexLSTM, self).__init__()
self.input_dim = input_size
self.rnn_units = hidden_size
self.real_lstm = torch.nn.LSTM(input_size=self.input_dim, hidden_size=self.rnn_units, num_layers=num_layers,
bidirectional=bidirectional, batch_first=batch_first)
self.imag_lstm = torch.nn.LSTM(input_size=self.input_dim, hidden_size=self.rnn_units, num_layers=num_layers,
bidirectional=bidirectional, batch_first=batch_first)
if bidirectional:
bidirectional=2
else:
bidirectional=1
if projection_dim is not None:
self.projection_dim = projection_dim
self.r_trans = torch.nn.Linear(self.rnn_units*bidirectional, self.projection_dim)
self.i_trans = torch.nn.Linear(self.rnn_units*bidirectional, self.projection_dim)
else:
self.projection_dim = None
def forward(self, inputs):
if isinstance(inputs,list):
real, imag = inputs.real, inputs.imag
elif isinstance(inputs, torch.Tensor):
real, imag = inputs.real, inputs.imag
r2r_out = self.real_lstm(real)[0]
r2i_out = self.imag_lstm(real)[0]
i2r_out = self.real_lstm(imag)[0]
i2i_out = self.imag_lstm(imag)[0]
real_out = r2r_out - i2i_out
imag_out = i2r_out + r2i_out
if self.projection_dim is not None:
real_out = self.r_trans(real_out)
imag_out = self.i_trans(imag_out)
return torch.complex(real_out, imag_out)
def flatten_parameters(self):
self.imag_lstm.flatten_parameters()
self.real_lstm.flatten_parameters()
def mag_phase_2_wave(mag, phase, config):
real = mag * torch.cos(phase)
imag = mag * torch.sin(phase)
comp = torch.complex(real, imag)
comp = torch.nn.functional.pad(comp, (0,0,0,1))
audio = torch.istft(comp, n_fft=config.fft_size, hop_length=config.hop_length, \
win_length=config.window_length, normalized=config.normalise_stft)
return audio
def calc_metric(clean_audio, predict_audio, config, metric):
metric_arr = []
for i in range(predict_audio.shape[0]):
metric_i = metric(clean_audio[i,:].cpu().numpy(), predict_audio[i,:].cpu().numpy(), config.sr)
if not isnan(metric_i):
metric_arr.append(metric_i)
pesq_av = float(sum(metric_arr)) / max(len(metric_arr), 1)
return pesq_av
def calc_loss(self, target_noise_mask, predict_noise_mask, \
predict_noise_audio, predict_clean_audio,
noise_audio, noisy_audio, clean_audio):
if self.hparams['noise_loss_type'] == 0:
noise_loss_orig = self.config.L1(target_noise_mask, predict_noise_mask)
elif self.hparams['noise_loss_type'] == 1:
noise_loss_orig = self.config.wSDR(noisy_audio, noise_audio, predict_noise_audio)
elif self.hparams['noise_loss_type'] == 2:
noise_loss_orig = self.config.L1(target_noise_mask, predict_noise_mask) + \
self.config.L1(noise_audio, predict_noise_audio)
elif self.hparams['noise_loss_type'] == 3:
noise_loss_orig = self.config.wSDR(noisy_audio, noise_audio, predict_noise_audio) + \
self.config.L1(noise_audio, predict_noise_audio)
elif self.hparams['noise_loss_type'] == 4:
noise_loss_orig = self.config.wSDR(noisy_audio, noise_audio, predict_noise_audio) + \
self.config.L1(target_noise_mask, predict_noise_mask)
elif self.hparams['noise_loss_type'] == 5:
if target_noise_mask.dtype == torch.complex32 or target_noise_mask.dtype == torch.complex64 or target_noise_mask.dtype == torch.complex128:
noise_loss_orig = self.config.wSDR(noisy_audio, noise_audio, predict_noise_audio) + \
self.config.mse(target_noise_mask.real, predict_noise_mask.real) + \
self.config.mse(target_noise_mask.imag, predict_noise_mask.imag)
else:
noise_loss_orig = self.config.wSDR(noisy_audio, noise_audio, predict_noise_audio) + \
self.config.mse(target_noise_mask, predict_noise_mask)
noise_loss = (self.hparams['noise_alpha'] * noise_loss_orig)
if self.hparams['speech_loss_type'] == 0:
speech_loss_orig = -self.config.SiSNR(clean_audio, predict_clean_audio)
elif self.hparams['speech_loss_type'] == 1:
speech_loss_orig_small = torch.mean(self.config.CDPAM.forward(clean_audio, predict_clean_audio))
speech_loss_orig = speech_loss_orig_small * 10e5
speech_loss = (self.hparams['speech_alpha'] * speech_loss_orig)
total_loss = noise_loss + speech_loss
return noise_loss, speech_loss, total_loss
def train_batch_2_loss(self, train_batch, batch_idx, dtype):
noise_data, noisy_data, clean_data, id = train_batch
check_inf_neginf_nan(clean_data, "Found inf, neginf or nan in clean data STFT!")
check_inf_neginf_nan(noise_data, "Found inf, neginf or nan in noise data STFT!")
check_inf_neginf_nan(noisy_data, "Found inf, neginf or nan in noisy data STFT!")
noise_mag = torch.abs(noise_data)
noise_phase = torch.angle(noise_data)
noisy_mag = torch.abs(noisy_data)
noisy_phase = torch.angle(noisy_data)
clean_mag = torch.abs(clean_data)
clean_phase = torch.angle(clean_data)
noise_audio = mag_phase_2_wave(noise_mag, noise_phase, self.config)
noisy_audio = mag_phase_2_wave(noisy_mag, noisy_phase, self.config)
clean_audio = mag_phase_2_wave(clean_mag, clean_phase, self.config)
if dtype == "real":
target_noise_mask = torch.sigmoid(noise_mag / noisy_mag)
noisy_mag_scaled = (noisy_mag - self.config.data_minR) / (self.config.data_maxR - self.config.data_minR)
predict_noise_mask = self(noisy_mag_scaled)
predict_noise_mag = noisy_mag * predict_noise_mask
predict_clean_mag = noisy_mag - predict_noise_mag
predict_noise_audio = mag_phase_2_wave(predict_noise_mag, noisy_phase, self.config)
predict_clean_audio = mag_phase_2_wave(predict_clean_mag, noisy_phase, self.config)
elif dtype == "complex":
target_noise_mask_out = cRM(noise_data, noisy_data)
target_noise_mask = bound_cRM(target_noise_mask_out)
# noisy_data_standardised = (noisy_data - torch.mean(noisy_data)) / torch.std(noisy_data)
noisy_data_scaled = torch.view_as_complex((2 * ((torch.view_as_real(noisy_data) - self.config.data_minC) /
(self.config.data_maxC - self.config.data_minC))) - 1)
predict_noise_mask_out = self(noisy_data_scaled)
predict_noise_mask = bound_cRM(predict_noise_mask_out)
predict_noise_data = complex_mat_mult(noisy_data, predict_noise_mask)
predict_clean_data = noisy_data - predict_noise_data
predict_noise_audio = mag_phase_2_wave(torch.abs(predict_noise_data), \
torch.angle(predict_noise_data), self.config)
predict_clean_audio = mag_phase_2_wave(torch.abs(predict_clean_data), \
torch.angle(predict_clean_data), self.config)
noise_loss, speech_loss, train_loss = calc_loss(self,
target_noise_mask=target_noise_mask,
predict_noise_mask=predict_noise_mask,
predict_noise_audio=predict_noise_audio,
predict_clean_audio=predict_clean_audio,
noise_audio=noise_audio,
noisy_audio=noisy_audio,
clean_audio=clean_audio)
return noise_loss, speech_loss, train_loss
def val_batch_2_metric_loss(self, val_batch, val_idx, dtype):
noise_data, noisy_data, clean_data, id = val_batch
check_inf_neginf_nan(clean_data, "Found inf, neginf or nan in clean data STFT!")
check_inf_neginf_nan(noise_data, "Found inf, neginf or nan in noise data STFT!")
check_inf_neginf_nan(noisy_data, "Found inf, neginf or nan in noisy data STFT!")
noise_mag = torch.abs(noise_data)
noise_phase = torch.angle(noise_data)
noisy_mag = torch.abs(noisy_data)
noisy_phase = torch.angle(noisy_data)
clean_mag = torch.abs(clean_data)
clean_phase = torch.angle(clean_data)
noise_audio = mag_phase_2_wave(noise_mag, noise_phase, self.config)
noisy_audio = mag_phase_2_wave(noisy_mag, noisy_phase, self.config)
clean_audio = mag_phase_2_wave(clean_mag, clean_phase, self.config)
if dtype == "real":
target_noise_mask = torch.sigmoid(noise_mag / noisy_mag)
noisy_mag_scaled = (noisy_mag - self.config.data_minR) / (self.config.data_maxR - self.config.data_minR)
predict_noise_mask = self(noisy_mag_scaled)
predict_noise_mag = noisy_mag * predict_noise_mask
predict_clean_mag = noisy_mag - predict_noise_mag
predict_clean_audio = mag_phase_2_wave(predict_clean_mag, noisy_phase, self.config)
predict_noise_audio = mag_phase_2_wave(predict_noise_mag, noisy_phase, self.config)
elif dtype == "complex":
target_noise_mask_out = cRM(noise_data, noisy_data)
target_noise_mask = bound_cRM(target_noise_mask_out)
# noisy_data_standardised = (noisy_data - torch.mean(noisy_data)) / torch.std(noisy_data)
noisy_data_scaled = torch.view_as_complex((2 * ((torch.view_as_real(noisy_data) - self.config.data_minC) /
(self.config.data_maxC - self.config.data_minC))) - 1)
predict_noise_mask_out = self(noisy_data_scaled)
predict_noise_mask = bound_cRM(predict_noise_mask_out)
predict_noise_data = complex_mat_mult(noisy_data, predict_noise_mask)
predict_clean_data = noisy_data - predict_noise_data
predict_clean_audio = mag_phase_2_wave(torch.abs(predict_clean_data), \
torch.angle(predict_clean_data), self.config)
predict_noise_audio = mag_phase_2_wave(torch.abs(predict_noise_data), \
torch.angle(predict_noise_data), self.config)
if platform == "linux":
pesq_av = calc_metric(clean_audio, predict_clean_audio, self.config, pesq)
else:
pesq_av = 1
stoi_av = calc_metric(clean_audio, predict_clean_audio, self.config, stoi)
noise_loss, speech_loss, val_loss = calc_loss(self,
target_noise_mask=target_noise_mask,
predict_noise_mask=predict_noise_mask,
predict_noise_audio=predict_noise_audio,
predict_clean_audio=predict_clean_audio,
noise_audio=noise_audio,
noisy_audio=noisy_audio,
clean_audio=clean_audio)
return noise_loss, speech_loss, val_loss, pesq_av, stoi_av, \
predict_noise_audio, predict_clean_audio, \
noise_audio, noisy_audio, clean_audio
def test_batch_2_metric_loss(self, test_batch, test_idx, dtype):
noise_data, noisy_data, clean_data, id, start_point = test_batch
noise_mag = torch.abs(noise_data)
noise_phase = torch.angle(noise_data)
noisy_mag = torch.abs(noisy_data)
noisy_phase = torch.angle(noisy_data)
clean_mag = torch.abs(clean_data)
clean_phase = torch.angle(clean_data)
noise_audio = mag_phase_2_wave(noise_mag, noise_phase, self.config)
noisy_audio = mag_phase_2_wave(noisy_mag, noisy_phase, self.config)
clean_audio = mag_phase_2_wave(clean_mag, clean_phase, self.config)
if dtype == "real":
target_noise_mask = torch.sigmoid(noise_mag / noisy_mag)
noisy_mag_scaled = (noisy_mag - self.config.data_minR) / (self.config.data_maxR - self.config.data_minR)
predict_noise_mask = self(noisy_mag_scaled)
predict_noise_mag = noisy_mag * predict_noise_mask
predict_clean_mag = noisy_mag - predict_noise_mag
predict_clean_audio = mag_phase_2_wave(predict_clean_mag, noisy_phase, self.config)
predict_noise_audio = mag_phase_2_wave(predict_noise_mag, noisy_phase, self.config)
elif dtype == "complex":
target_noise_mask_out = cRM(noise_data, noisy_data)
target_noise_mask = bound_cRM(target_noise_mask_out)
# noisy_data_standardised = (noisy_data - torch.mean(noisy_data)) / torch.std(noisy_data)
noisy_data_scaled = torch.view_as_complex((2 * ((torch.view_as_real(noisy_data) - self.config.data_minC) /
(self.config.data_maxC - self.config.data_minC))) - 1)
predict_noise_mask_out = self(noisy_data_scaled)
predict_noise_mask = bound_cRM(predict_noise_mask_out)
predict_noise_data = complex_mat_mult(noisy_data, predict_noise_mask)
predict_clean_data = noisy_data - predict_noise_data
predict_clean_audio = mag_phase_2_wave(torch.abs(predict_clean_data), \
torch.angle(predict_clean_data), self.config)
predict_noise_audio = mag_phase_2_wave(torch.abs(predict_noise_data), \
torch.angle(predict_noise_data), self.config)
noise_audio = mag_phase_2_wave(noise_mag, noise_phase, self.config)
noisy_audio = mag_phase_2_wave(noisy_mag, noisy_phase, self.config)
if platform == "linux":
pesq_av = calc_metric(clean_audio, predict_clean_audio, self.config, pesq)
else:
pesq_av = 1
stoi_av = calc_metric(clean_audio, predict_clean_audio, self.config, stoi)
noise_loss, speech_loss, test_loss = calc_loss(self,
target_noise_mask=target_noise_mask,
predict_noise_mask=predict_noise_mask,
predict_noise_audio=predict_noise_audio,
predict_clean_audio=predict_clean_audio,
noise_audio=noise_audio,
noisy_audio=noisy_audio,
clean_audio=clean_audio)
return noise_loss, speech_loss, test_loss, pesq_av, stoi_av, \
predict_noise_audio, predict_clean_audio, \
noise_audio, noisy_audio, clean_audio, id, start_point
def epoch_end(self, outputs, type):
no_of_batches = len(outputs)
random_batches = random.choice(no_of_batches, size=min(self.config.val_log_sample_size, no_of_batches), replace=False)
no_of_samples = min(self.config.data_params['batch_size'],
outputs[-1]['clean'].shape[0],
outputs[-1]['predict_clean'].shape[0],
outputs[-1]['noise'].shape[0],
outputs[-1]['predict_noise'].shape[0],
outputs[-1]['noisy'].shape[0])
random_samples = random.choice(no_of_samples, size=min(self.config.val_log_sample_size, no_of_samples), replace=False)
for i, ridx in enumerate(range(min(self.config.val_log_sample_size, no_of_samples))):
clean_sample = outputs[random_batches[ridx]]['clean'][random_samples[ridx],:]
predict_clean_sample = outputs[random_batches[ridx]]['predict_clean'][random_samples[ridx],:]
noise_sample = outputs[random_batches[ridx]]['noise'][random_samples[ridx],:]
predict_noise_sample = outputs[random_batches[ridx]]['predict_noise'][random_samples[ridx],:]
noisy_sample = outputs[random_batches[ridx]]['noisy'][random_samples[ridx],:]
self.logger.experiment.add_audio("clean({})/{}".format(type, i),
clean_sample,
self.global_step,
sample_rate=self.config.sr)
self.logger.experiment.add_audio("predict_clean({})/{}".format(type, i),
predict_clean_sample,
self.global_step,
sample_rate=self.config.sr)
self.logger.experiment.add_audio("noise({})/{}".format(type, i),
noise_sample,
self.global_step,
sample_rate=self.config.sr)
self.logger.experiment.add_audio("predict_noise({})/{}".format(type, i),
predict_noise_sample,
self.global_step,
sample_rate=self.config.sr)
self.logger.experiment.add_audio("noisy({})/{}".format(type, i),
noisy_sample,
self.global_step,
sample_rate=self.config.sr)
class InputMonitor(pl.Callback):
def on_train_batch_start(self, trainer, pl_module, batch, batch_idx, dataloader_idx):
if (batch_idx + 1) % trainer.log_every_n_steps == 0:
noise_real = batch[0].real
noise_imag = batch[0].imag
noisy_real = batch[1].real
noisy_imag = batch[1].imag
clean_real = batch[2].real
clean_imag = batch[2].imag
logger = trainer.logger
logger.experiment.add_histogram("noise data real", noise_real, global_step=trainer.global_step)
logger.experiment.add_histogram("noise data imag", noise_imag, global_step=trainer.global_step)
logger.experiment.add_histogram("noisy data real", noisy_real, global_step=trainer.global_step)
logger.experiment.add_histogram("noisy data imag", noisy_imag, global_step=trainer.global_step)
logger.experiment.add_histogram("clean data real", clean_real, global_step=trainer.global_step)
logger.experiment.add_histogram("clean data imag", clean_imag, global_step=trainer.global_step)
class CheckBatchGradient(pl.Callback):
def on_train_start(self, trainer, model):
n = 0
example_input = model.example_input_array.to(model.device)
example_input.requires_grad = True
model.zero_grad()
output = model(example_input)
output[n].abs().sum().backward()
zero_grad_inds = list(range(example_input.size(0)))
zero_grad_inds.pop(n)
if example_input.grad[zero_grad_inds].abs().sum().item() > 0:
raise RuntimeError("Your model mixes data across the batch dimension!") | Youzi-ciki/DCS-Net | network_functions.py | network_functions.py | py | 22,891 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "sys.platform",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "torch.any",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "torch.isinf",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "torch.complex32",
"line_number"... |
8950963065 | import requests
from bs4 import BeautifulSoup
import pandas as pd
from os import listdir, remove
import datetime as dt
from time import sleep
from MainMethods import getInfo, showDays
from conf import INT, INF, URL, LOC, NINF, LOC2,\
chosenF, errorsF, doneF
"""
The information for saved days is checked
and old files deleted.
"""
oldflags = [f for f in listdir(LOC2) if f[0]== "F"]
if oldflags:
for f in oldflags:
remove(f"{LOC2}\{f}")
saved= listdir(LOC)
if saved:
saved = [f.split(".csv")[0] for f in saved]
ints = [int(f.split("y")[-1]) for f in saved]
for i, f in enumerate(saved):
if ints[i] < dt.datetime.today().day and max(ints) -ints[i] <9:
remove(f"{LOC}\{f}.csv")
saved.remove(f)
def flagIt():
now = dt.datetime.timestamp(dt.datetime.now())
name = fr"{LOC2}\F{str(int(now))}.txt"
with open(name, "w") as F:
pass
sleep(2)
return name
def unflagIt(name):
remove(name)
def checkWait():
flag = [f for f in listdir(LOC2) if f[0] == "T"]
if flag:
while flag[0] in listdir(LOC2):
print("Wait, Sign Up in process....")
sleep(5)
checkWait()
flag = flagIt()
Chosen = pd.read_csv(chosenF)
Done = pd.read_csv(doneF)[NINF[-2]].to_list()
if Done:
print(f"These SignUps are done and should be confirmed by email:\n"
f"{Chosen[Chosen[NINF[-2]].isin(Done)][[NINF[0], NINF[1], NINF[2]]].to_string(index=False)}\n\n"
f"-------------------------------------------")
Chosen.drop(Chosen[Chosen[NINF[-2]].isin(Done)].index, inplace=True)
pd.DataFrame(columns= NINF).to_csv(doneF, index= False)
Errors = pd.read_csv(errorsF)[NINF[-2]].to_list()
if Errors:
print(f"The sign up for these classes failed:\n"
f"{Errors.iloc[:,:3]}\n"
f"Please check manually if you are still interested and "
f"allow them to be deleted from the program.")
conf = "n"
while conf.lower() != "y":
conf = input("Allow? (y/n):")
if conf.lower() == "y":
Errors = pd.DataFrame(columns= NINF)
Errors.to_csv(errorsF, index = False)
Chosen.drop(Chosen[Chosen[NINF[-2]].isin(Errors)].index, inplace=True)
else:
conf = input("There is no benefit in keeping them !\n"
"Are you sure "
"you don't want to let them go?\n"
"(y/n):")
Chosen.to_csv(chosenF, index= False)
"""
This uses requests and beautiful soup to setup
the iterators.
"""
r = requests.get(URL)
soup = BeautifulSoup(r.text, "lxml")
classes = soup.find(id= "classes")
days = classes.find_all(class_= "scheduleDay")[:8]
"""
The following loop gets the basic info from the websites
and keeps it in the dictionary DFs as DataFrames
"""
DFs = {}
for day in days:
date= day["id"]
if date in saved:
continue
DFs[date] = pd.DataFrame(columns= INF)
# iterate over each class in the day
dayclss = day.find_all("div")
for clss in dayclss:
#then within each class I select the link in "schedSignup"
if any(x in clss["class"] for x in INT):
link = clss.find(class_= "schedSignup").a["href"]
inf = getInfo(link)
DFs[date] = DFs[date].append(pd.Series(inf, index= INF), ignore_index=True)
"""
This condition runs the showDays loop to check
each new day's classes for availability and presents the options
"""
num = 0
NewDF = pd.DataFrame(columns= NINF)
if DFs:
result = showDays(DFs, num, NewDF)
NewDF, num = result[0], result[1]
#############
"""
Here, the requests waiting in the 'chosen' csv file
are presented and offered for cancellation
"""
# this just sets up the sig and UId variables for
sigs= [f for f in listdir(LOC2) if f[:3]== "Sig"]
if sigs:
with open(f"{LOC2}\{sigs[0]}", "r") as s:
UId = int(s.read())
##### Cancel
if Chosen.shape[0]:
print(f"\n============== OPTIONS TO CANCEL ======================\n"
f"These are signups that are waiting to be executed:\n\n"
f"{Chosen.iloc[:,:3]}\n\n"
f"Type in the row number on the left if you want to cancel it, seperate with commas\n"
f"Otherwise, just hit enter and confirm\n")
confirm = "n"
while confirm.lower() != "y":
inp = input("CANCEL:")
if inp:
try:
inp = list(map(int, inp.split(",")))
print(f"cancel these:\n"
f"{Chosen.loc[inp, [NINF[0], NINF[1], NINF[2]]]}")
confirm = input("Confirm (y/n):")
if confirm.lower() == "y":
Chosen.drop(inp, inplace=True)
except:
print(f"There seems to be a mistake in your input,\n"
f"please don't type any unnecessary commas, spaces or words.")
else:
confirm = input("Keep all (y/n):")
"""
If there are newly available classes:
the following while loop will get requests and
add the newly chosen ones to the 'chosen' csv file
It will also give Unique IDs to each class based on
the UId variable retrieved from the Signal File (SigA or SigB)
"""
##### Choose
if num:
print(f"=====================================\n"
f"The column on the RIGHT of each list contains the code to choose the class\n"
f"please type in your choice(s)"
f"(seperate codes with commas if you want multiple, hit enter if you want none.)\n")
confirm = "n"
while confirm.lower() != "y":
choice = input("Choice:")
if choice:
try:
choice = list(map(int,choice.split(",")))
chosen = NewDF[NewDF[NINF[-1]].isin(choice)].copy()
if max(choice) <= NewDF[NINF[-1]].max():
print(f"These are your new choices:\n"
f"{chosen.iloc[:,:3].to_string(index= False)}\n")
if Chosen.shape[0]:
print(f"These are still waiting to be executed:\n"
f"{Chosen.iloc[:, :3].to_string(index=False)}\n")
else:
print(f"There are no signups waiting.")
confirm = input("Confirm (y/n):")
else:
print(f"You may have forgotten a comma or got the wrong number,\n"
f"please try again")
except:
print(f"There seems to be a mistake in your input,\n"
f"please don't type any unnecessary commas, spaces or words.")
else:
print(f"You chose none.")
chosen = pd.DataFrame()
if Chosen.shape[0]:
print(f"These are still waiting to be executed:\n"
f"{Chosen.iloc[:, :3].to_string(index= False)}\n")
else:
print(f"There are no signups waiting.")
confirm = input("Confirm (y/n):")
if chosen.shape[0]:
chosen[NINF[-2]] = [UId +i for i in range(1, chosen.shape[0]+1)]
UId = chosen[NINF[-2]].max()
Chosen = Chosen.append(chosen, ignore_index=True)
# The days and requestes are saved
Chosen.to_csv(chosenF, index= False)
unflagIt(flag)
for d in DFs:
DFs[d].to_csv(fr"{LOC}\{d}.csv", index = False)
# The SigFile is updated
if sigs:
nxtSig = int(sigs[0].split(".")[0][3:])+1
remove(fr"{LOC2}\{sigs[0]}")
with open(fr"{LOC2}\Sig{nxtSig}.txt", "w") as s:
s.write(str(UId))
| Stryder-Git/Movati_Signup | Get_Reqs.py | Get_Reqs.py | py | 7,822 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.listdir",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "conf.LOC2",
"line_number": 16,
"usage_type": "argument"
},
{
"api_name": "os.remove",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "conf.LOC2",
"line_number": 19,... |
22176331977 | import networkx as nx
from networkx.algorithms import community
from nltk.corpus import stopwords
import re
def build_graph(text):
word_list = []
G = nx.Graph()
for line in text:
line = (line.strip()).split()
for i, word in enumerate(line):
if i != len(line)-1:
word_a = word
word_b = line[i+1]
if word_a not in word_list:
word_list.append(word_a)
if word_b not in word_list:
word_list.append(word_b)
if G.has_edge(word_a,word_b):
G[word_a][word_b]['weight'] += 1
else:
G.add_edge(word_a,word_b, weight = 1)
return G
def calculate_central_nodes(text_network):
bc = (nx.betweenness_centrality(text_network,weight='weight'))
nx.set_node_attributes(text_network, bc, 'betweenness')
bc_threshold = sorted(bc.values(), reverse=True)[20]
to_keep = [n for n in bc if bc[n] > bc_threshold]
filtered_network = text_network.subgraph(to_keep)
return filtered_network
def create_and_assign_communities(text_network):
communities_generator = community.girvan_newman(text_network)
top_level_communities = next(communities_generator)
next_level_communities = next(communities_generator)
return next_level_communities
def find_topics(text):
try:
text_network = build_graph(text)
text_network = calculate_central_nodes(text_network)
topics = create_and_assign_communities(text_network)
return topics
except:
print("Error: Something went wrong. Check your input. You need at least 20 unique words in your text to start the analysis.")
def clean(text):
new_text = []
no_punct = [re.sub(r'[^\w\s]','',x) for x in text]
stop_words = set(stopwords.words('english'))
for line in no_punct:
new_line = ([item.lower() for item in line.split() if not item.lower() in stop_words])
new_text.append(' '.join((new_line)))
return new_text
| michal-pikusa/topic-network | topicnetwork/__init__.py | __init__.py | py | 2,066 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "networkx.Graph",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "networkx.betweenness_centrality",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "networkx.set_node_attributes",
"line_number": 27,
"usage_type": "call"
},
{
"api_na... |
73499996027 | import numpy as np
from numpy import ma
import xarray as xr
from netCDF4 import Dataset
import struct
import sys
import os
import datetime as dt
import glob
"""
This module contains functions for reading external data
to use with LPT.
The data_read_function is called at various points in other LPT functions.
To add a new data set, do the following:
1) Write a read function similar to read_generic_netcdf below.
2) Add an "elif" option that calls that function in readdata
"""
################################################################################
def readdata(datetime_to_read, dataset_options_dict, verbose=None):
"""
Main data read function. Get data at datetime datetime_to_read.
Based on the oprions in dataset_options_dict, it will look in the data directory
and use the rain function specified below.
To add a dataset type, add an elif block to this function.
The function is expected to return a dictionary with keys 'lon', 'lat', and 'data'
Verbose option (new 05/2023):
- If set to None (default), it will use the verbose option from dataset_options_dict.
- Otherwise, the value will be used *instead of* dataset_options_dict.
This allows a function call to override the setting in dataset_options_dict.
"""
## Manage verbose
if verbose is None:
verbose_actual = dataset_options_dict['verbose']
else:
verbose_actual = verbose
if dataset_options_dict['raw_data_format'] == 'generic_netcdf':
variable_names = (dataset_options_dict['longitude_variable_name']
, dataset_options_dict['latitude_variable_name']
, dataset_options_dict['field_variable_name'])
DATA = read_generic_netcdf_at_datetime(datetime_to_read
, variable_names = variable_names
, data_dir = dataset_options_dict['raw_data_parent_dir']
, fmt = dataset_options_dict['file_name_format']
, verbose = verbose_actual)
elif dataset_options_dict['raw_data_format'] == 'generic_netcdf_with_multiple_times':
variable_names = (dataset_options_dict['longitude_variable_name']
, dataset_options_dict['latitude_variable_name']
, dataset_options_dict['time_variable_name']
, dataset_options_dict['field_variable_name'])
DATA = read_generic_netcdf_at_datetime(datetime_to_read
, variable_names = variable_names
, dt_to_use = datetime_to_read
, data_dir = dataset_options_dict['raw_data_parent_dir']
, fmt = dataset_options_dict['file_name_format']
, verbose = verbose_actual)
elif dataset_options_dict['raw_data_format'] == 'cmorph':
DATA = read_cmorph_at_datetime(datetime_to_read
, area = dataset_options_dict['area']
, data_dir = dataset_options_dict['raw_data_parent_dir']
, fmt = dataset_options_dict['file_name_format']
, verbose = verbose_actual)
elif dataset_options_dict['raw_data_format'] == 'imerg_hdf5':
DATA = read_imerg_hdf5_at_datetime(datetime_to_read
, area = dataset_options_dict['area']
, data_dir = dataset_options_dict['raw_data_parent_dir']
, fmt = dataset_options_dict['file_name_format']
, verbose = verbose_actual)
elif dataset_options_dict['raw_data_format'] == 'cfs_forecast':
fcst_hour = int((datetime_to_read - dataset_options_dict['datetime_init']).total_seconds()/3600)
fcst_resolution_hours = dataset_options_dict['data_time_interval']
if fcst_hour < 1: # There is no data in the file for fcst = 0. Use 6h fcst values.
records = [1,]
else:
records = [int(fcst_hour/fcst_resolution_hours),]
DATA = read_cfs_rt_at_datetime(dataset_options_dict['datetime_init'] # datetime_to_read
, data_dir = dataset_options_dict['raw_data_parent_dir']
, fmt = dataset_options_dict['file_name_format']
, records = records
, verbose = verbose_actual)
DATA['data'] = ma.masked_array(DATA['precip'][0])
## -- Add an elif block here for new datasets. --
else:
print(('ERROR! '+dataset_options_dict['raw_data_format'] + ' is not a valid raw_data_format!'), flush=True)
DATA = None
return DATA
################################################################################
## Read functions for generic NetCDF data.
################################################################################
def read_generic_netcdf(fn, variable_names=('lon','lat','rain'), dt_to_use=None):
"""
DATA = read_generic_netcdf(fn)
output is like this:
list(DATA)
Out[12]: ['lon', 'lat', 'precip']
In [21]: DATA['lon'].shape
Out[21]: (1440,)
In [22]: DATA['lat'].shape
Out[22]: (400,)
In [23]: DATA['precip'].shape
Out[23]: (400, 1440)
"""
DATA = {}
with xr.open_dataset(fn) as DS:
DATA['lon'] = DS[variable_names[0]].values
DATA['lat'] = DS[variable_names[1]].values
## If no time variable, just retrieve the 2-D data as it is.
if not dt_to_use is None: #'time' in list(DS.variables):
DATA['data'] = DS.sel({variable_names[2]:str(dt_to_use)},method='nearest')[variable_names[3]].values
else:
DATA['data'] = DS[variable_names[2]].values
DATA['data'] = np.ma.masked_array(DATA['data'], mask=np.isnan(DATA['data']))
## Need to get from (-180, 180) to (0, 360) longitude.
lon_lt_0, = np.where(DATA['lon'] < -0.0001)
lon_ge_0, = np.where(DATA['lon'] > -0.0001)
if len(lon_lt_0) > 0:
DATA['lon'][lon_lt_0] += 360.0
DATA['lon'] = np.concatenate((DATA['lon'][lon_ge_0], DATA['lon'][lon_lt_0]))
DATA['data'] = np.concatenate((DATA['data'][:,lon_ge_0], DATA['data'][:,lon_lt_0]), axis=1)
return DATA
def read_generic_netcdf_at_datetime(dt, data_dir='.'
, variable_names=('lon','lat','rain'), dt_to_use=None, fmt='gridded_rain_rates_%Y%m%d%H.nc'
, verbose=False):
fn = (data_dir + '/' + dt.strftime(fmt))
DATA=None
if not os.path.exists(fn):
print('File not found: ', fn)
else:
if verbose:
print(fn)
DATA=read_generic_netcdf(fn,
variable_names = variable_names,
dt_to_use = dt_to_use)
return DATA
################################################################################
## Read functions for specific datasets.
################################################################################
"""
CMORPH reading functions.
"""
def read_cmorph_rt_bin(fn, area=[0,360,-90,90]):
"""
DATA = read_cmorph_rt_bin(fn)
DATA is a dict with keys lon, lat, and precip.
CMORPH RT files are binary.
The GrADS control file below is used as the basis for this function:
DSET ^../%y4/%y4%m2/CMORPH_V0.x_RT_8km-30min_%y4%m2%d2%h2
OPTIONS little_endian template
UNDEF -999.0
TITLE CMORPH Rain Rate (Real-Time Version)
XDEF 4948 LINEAR 0.0363783345 0.072756669
YDEF 1649 LINEAR -59.963614312 0.072771376
ZDEF 1 LEVELS 1
TDEF 99999 LINEAR 00:00z01Jan2017 30mn
VARS 1
cmorph 1 99 CMORPH Rain Rate [mm/hr]
ENDVARS
"""
dtype=np.dtype([('field1', '<i2')])
DATA={}
DATA['lon'] = np.arange(0.0363783345, 360.0, 0.072756669)
DATA['lat'] = np.arange(-59.963614312, 60.0, 0.072771376)
fid = open(fn,'rb')
## GrADS uses FORTRAN REAL values, which is np.float32 for Python.
DATA['data'] = np.fromfile(fid, dtype=np.float32, count=2*4948*1649)
if sys.byteorder == 'big': # Data is little endian.
DATA['data'] = DATA['data'].byteswap()
## Shape and scale the data.
DATA['data'] = np.reshape(np.double(DATA['data']), [2, 1649, 4948])
DATA['data'][DATA['data'] < -0.001] = 0.0 # Usually, missing high latitude data.
fid.close()
## Cut out area.
keep_lon, = np.where(np.logical_and(DATA['lon'] > area[0], DATA['lon'] < area[1]))
keep_lat, = np.where(np.logical_and(DATA['lat'] > area[2], DATA['lat'] < area[3]))
DATA['lon'] = DATA['lon'][keep_lon[0]:keep_lon[-1]+1]
DATA['lat'] = DATA['lat'][keep_lat[0]:keep_lat[-1]+1]
DATA['data'] = DATA['data'][:, keep_lat[0]:keep_lat[-1]+1, keep_lon[0]:keep_lon[-1]+1]
DATA['data'] = 0.5*(DATA['data'][0,:,:] + DATA['data'][1,:,:])
return DATA
def read_cmorph_at_datetime(dt_this, force_rt=False, data_dir='.'
, fmt='CMORPH_V0.x_RT_8km-30min_%Y%m%d%H'
, verbose=False, area=[0,360,-90,90]):
"""
DATA = read_cmorph_at_datetime(dt, force_rt=False, verbose=False)
DATA is a dict with keys lon, lat, and precip.
Based on the provided datetime dt, read in the CMORPH data.
By default, it will first check for the research product,
and use the realtime product if the research product was not found.
However, if force_rt = True, it just uses the realtime product.
"""
## First try research product
fn = (data_dir + '/' + dt_this.strftime(fmt))
if verbose:
print(fn)
DATA = read_cmorph_rt_bin(fn, area=area)
DATA['data'] = ma.masked_array(DATA['data'])
return DATA
def read_imerg_hdf5_at_datetime(dt_this, force_rt=False, data_dir='.'
, fmt='%Y/%m/%d/3B-HHR.MS.MRG.3IMERG.%Y%m%d-S%H*.HDF5'
, verbose=False, area=[0,360,-90,90]):
"""
DATA = read_imerg_hdf5_at_datetime(dt_this, force_rt=False, data_dir='.'
, fmt='%Y/%m/%d/3B-HHR.MS.MRG.3IMERG.%Y%m%d-S%H*.HDF5'
, verbose=False, area=[0,360,-90,90])
DATA is a dict with keys lon, lat, and precip.
Based on the provided datetime dt, read in the IMERG HDF data.
By default, it will first check for the final product,
and use the "late" realtime product if the final product was not found.
However, if force_rt = True, it just uses the "late" realtime product.
(It will search for a filename with modified fmt to check for "late" product
- append 'late/' to the front of the directory path.
- replace '3B-HHR' with '3B-HHR-L').
"""
fn_list = sorted(glob.glob(data_dir + '/' + dt_this.strftime(fmt)))
if len(fn_list) < 1:
if not force_rt:
## Try "late" realtime data.
print('Final data version not found. Trying to use late realtime data instead.')
fmt_rt = 'late/' + fmt.replace('3B-HHR','3B-HHR-L')
fn_list = sorted(glob.glob(data_dir + '/' + dt_this.strftime(fmt_rt)))
if len(fn_list) < 1:
print('WARNING: No input data found.')
fn = fn_list[0]
if verbose:
print(fn)
with Dataset(fn) as DS:
lon_rain = DS['Grid']['lon'][:]
lat_rain = DS['Grid']['lat'][:]
rain = DS['Grid']['precipitationCal'][:][0].T
if len(fn_list) > 1:
fn = fn_list[1]
if verbose:
print(fn)
with Dataset(fn) as DS:
rain30 = DS['Grid']['precipitationCal'][:][0].T
rain = 0.5 * (rain + rain30)
## lon -180:0 --> 180:360
idx_neg_lon = [x for x in range(len(lon_rain)) if lon_rain[x] < -0.0001]
idx_pos_lon = [x for x in range(len(lon_rain)) if lon_rain[x] > -0.0001]
lon_rain = np.append(lon_rain[idx_pos_lon[0]:idx_pos_lon[-1]+1], 360.0 + lon_rain[idx_neg_lon[0]:idx_neg_lon[-1]+1], axis=0)
rain = np.append(rain[:,idx_pos_lon[0]:idx_pos_lon[-1]+1], rain[:,idx_neg_lon[0]:idx_neg_lon[-1]+1], axis=1)
DATA={}
DATA['lon'] = lon_rain
DATA['lat'] = lat_rain
DATA['data'] = ma.masked_array(rain)
## Cut out area.
keep_lon, = np.where(np.logical_and(DATA['lon'] > area[0], DATA['lon'] < area[1]))
keep_lat, = np.where(np.logical_and(DATA['lat'] > area[2], DATA['lat'] < area[3]))
DATA['lon'] = DATA['lon'][keep_lon[0]:keep_lon[-1]+1]
DATA['lat'] = DATA['lat'][keep_lat[0]:keep_lat[-1]+1]
DATA['data'] = DATA['data'][keep_lat[0]:keep_lat[-1]+1, keep_lon[0]:keep_lon[-1]+1]
return DATA
################################################################################
################################################################################
################################################################################
"""
CFS Grib2 reading function
"""
def read_cfs_rt_at_datetime(dt_this, data_dir = './'
, fmt = 'cfs.%Y%m%d/%H/time_grib_01/prate.01.%Y%m%d%H.daily.grb2'
, records=range(1,45*4+1), verbose=False):
fn = (data_dir + '/' + dt_this.strftime(fmt))
if verbose:
print(fn, flush=True)
return read_cfs_rt_grib2(fn, records=records, verbose=verbose)
def read_cfs_rt_grib2(fn, records=range(1,45*4+1), verbose=False):
"""
RT = read_cfs_rt_grib2(fn, records=N)
N is the list of records to get.
By default, get the first 45 days, 6 hourly intervals.
example output:
In [23]: RT['lon'].shape
Out[23]: (384,)
In [24]: RT['lat'].shape
Out[24]: (190,)
In [25]: RT['precip'].shape
Out[25]: (180, 190, 384)
"""
import gdal # Import gdal if dealing with grib data.
DS = gdal.Open(fn, gdal.GA_ReadOnly)
width = DS.RasterXSize
height = DS.RasterYSize
lon = np.arange(0.0,359.062 + 0.5,0.938)
## grid file with Gaussian latitude was obtained from wgrib2 like this:
## wgrib2 -d 1 -gridout grid.txt /home/orca/data/model_fcst_grib/cfs/cfs.20190508/00/time_grib_01/prate.01.2019050800.daily.grb2
## awk -F, '{print $3}' grid.txt | uniq | tr "\n" ", "
lat = np.flip(np.array([-89.277, -88.340, -87.397, -86.454, -85.509
, -84.565, -83.620, -82.676, -81.731, -80.786
, -79.841, -78.897, -77.952, -77.007, -76.062
, -75.117, -74.173, -73.228, -72.283, -71.338
, -70.393, -69.448, -68.503, -67.559, -66.614
, -65.669, -64.724, -63.779, -62.834, -61.889
, -60.945, -60.000, -59.055, -58.110, -57.165
, -56.220, -55.275, -54.330, -53.386, -52.441
, -51.496, -50.551, -49.606, -48.661, -47.716
, -46.771, -45.827, -44.882, -43.937, -42.992
, -42.047, -41.102, -40.157, -39.212, -38.268
, -37.323, -36.378, -35.433, -34.488, -33.543
, -32.598, -31.653, -30.709, -29.764, -28.819
, -27.874, -26.929, -25.984, -25.039, -24.094
, -23.150, -22.205, -21.260, -20.315, -19.370
, -18.425, -17.480, -16.535, -15.590, -14.646
, -13.701, -12.756, -11.811, -10.866, -9.921
, -8.976, -8.031, -7.087, -6.142, -5.197
, -4.252, -3.307, -2.362, -1.417, -0.472
, 0.472, 1.417, 2.362, 3.307, 4.252
, 5.197, 6.142, 7.087, 8.031, 8.976
, 9.921, 10.866, 11.811, 12.756, 13.701
, 14.646, 15.590, 16.535, 17.480, 18.425
, 19.370, 20.315, 21.260, 22.205, 23.150
, 24.094, 25.039, 25.984, 26.929, 27.874
, 28.819, 29.764, 30.709, 31.653, 32.598
, 33.543, 34.488, 35.433, 36.378, 37.323
, 38.268, 39.212, 40.157, 41.102, 42.047
, 42.992, 43.937, 44.882, 45.827, 46.771
, 47.716, 48.661, 49.606, 50.551, 51.496
, 52.441, 53.386, 54.330, 55.275, 56.220
, 57.165, 58.110, 59.055, 60.000, 60.945
, 61.889, 62.834, 63.779, 64.724, 65.669
, 66.614, 67.559, 68.503, 69.448, 70.393
, 71.338, 72.283, 73.228, 74.173, 75.117
, 76.062, 77.007, 77.952, 78.897, 79.841
, 80.786, 81.731, 82.676, 83.620, 84.565
, 85.509, 86.454, 87.397, 88.340, 89.277]), axis=0)
num_list = []
for band in records:
if verbose:
print('Record #' + str(band), flush=True)
data_array = DS.GetRasterBand(band).ReadAsArray()
for row in data_array:
for value in row:
num_list.append(value*3600.0) # kg/m2/sec --> mm/h
DS = None # Close the file.
precip = np.array(num_list).reshape([len(records), len(lat), len(lon)])
DATA={}
DATA['lon'] = lon
DATA['lat'] = lat
DATA['precip'] = precip
return DATA
def read_cfsr_grib2(fn, band_list=None, verbose=False):
"""
RT = read_cfsr_grib2(fn)
example output:
In [23]: RT['lon'].shape
Out[23]: (384,)
In [24]: RT['lat'].shape
Out[24]: (190,)
In [25]: RT['precip'].shape
Out[25]: (180, 190, 384)
"""
DS = gdal.Open(fn, gdal.GA_ReadOnly)
width = DS.RasterXSize
height = DS.RasterYSize
lon = np.arange(0.0,359.51,0.5)
lat = np.arange(90.0,-90.01,-0.5)
n_records = DS.RasterCount
num_list = []
if band_list is None:
band_list = range(1, n_records+1)
for band in band_list:
if verbose:
print((str(band) + ' of ' + str(n_records)))
data_array = DS.GetRasterBand(band).ReadAsArray()
for row in data_array:
for value in row:
num_list.append(value)
DS = None # Close the file.
precip = np.array(num_list).reshape([int(len(band_list)/6), 6, len(lat), len(lon)])
#precip /= 1e6 # Values in file are multiplied by 1e6.
# kg/m2 in 1h is equivalent to mm/h.
DATA={}
DATA['lon'] = lon
DATA['lat'] = lat
DATA['precip'] = precip
return DATA
def get_cfsr_6h_rain(dt_ending, verbose=False):
"""
Read in the rainfall using read_cfs_historical_grib2(fn)
Then calculate the 6 hourly rain rate (mm/h) and return it.
CFSR rain is stored in monthly files. It it initialized every 6 h,
and the data provide hourly accumulations (in kg/m^2, equivalent to mm) like this:
1:0:d=2011120100:APCP:surface:0-1 hour acc fcst:
2:94325:d=2011120100:APCP:surface:1-2 hour acc fcst:
3:193206:d=2011120100:APCP:surface:2-3 hour acc fcst:
4:309596:d=2011120100:APCP:surface:3-4 hour acc fcst:
5:421187:d=2011120100:APCP:surface:4-5 hour acc fcst:
6:537704:d=2011120100:APCP:surface:5-6 hour acc fcst:
To get the 6 hourly accumulation, all 6 of these need to be added.
Then take the mean (e.g., divide by 6h) to get mm/h.
"""
dt_beginning = dt_ending - dt.timedelta(hours=6)
if dt_beginning < dt.datetime(2011,3,31,23,59,0):
fn_beginning = ('/home/orca/data/model_anal/cfsr/rain_accum/' + dt_beginning.strftime('%Y')
+ '/apcp.gdas.' + dt_beginning.strftime('%Y%m') + '.grb2')
else:
fn_beginning = ('/home/orca/data/model_anal/cfsr/rain_accum/' + dt_beginning.strftime('%Y')
+ '/apcp.cdas1.' + dt_beginning.strftime('%Y%m') + '.grb2')
if verbose:
print(fn_beginning, flush=True)
rec_num = 1 + int((dt_beginning - dt.datetime(dt_beginning.year, dt_beginning.month,1,0,0,0)).total_seconds()/3600.0)
F = read_cfsr_grib2(fn_beginning, band_list=range(rec_num,rec_num+6,1), verbose=verbose)
precip6hr = np.nanmean(F['precip'], axis=1)[0]
DATA={}
DATA['lon'] = F['lon']
DATA['lat'] = F['lat']
DATA['precip'] = precip6hr
return DATA
| brandonwkerns/lpt-python-public | lpt/readdata.py | readdata.py | py | 19,320 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "numpy.ma.masked_array",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "numpy.ma",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "xarray.open_dataset",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "numpy.ma.masked_a... |
75051529788 | import logging
import random
from typing import Set, Generator, Optional
from .location import Location
from .move import Move
from .piece import Color, Piece, Rank
from .board import Board
class MoveSet:
_brd = None # type: Board
@staticmethod
def set_board(brd: Board) -> None:
r""" Sets the board for the entire class """
MoveSet._brd = brd
r""" Moves available to a player """
def __init__(self, color: Color):
r"""
:param color: Color that is making the moves
"""
self._avail = dict() # Available moves
self._color = color
@property
def avail(self) -> dict:
r""" Accessor for the available moves """
return self._avail
@staticmethod
def build(pieces: Set[Piece], locs: dict, other_locs: dict) -> 'MoveSet':
r"""
Factory method used to construct an initial move set.
:param pieces: All of the players pieces
:param locs: Location of the player pieces
:param other_locs: Location of pieces of other player
:return: Constructed move set
"""
assert MoveSet._brd is not None, "Board information be present"
assert pieces, "Piece set can never be empty"
color = next(iter(pieces)).color
ms = MoveSet(color)
for p in pieces:
ms.add_piece(p, locs, other_locs)
return ms
def add_piece(self, piece: Piece, locs: dict, other_locs: dict):
r"""
Add a piece's moves to the MoveSet
:param piece: Piece whose moves (if any) will be added
:param locs: Location of the player pieces
:param other_locs: Location of pieces of other player
"""
self._process_piece(piece, locs, other_locs, add=True)
def del_piece(self, piece: Piece, locs: dict, other_locs: dict):
r"""
Add a piece's moves to the MoveSet
:param piece: Piece whose moves (if any) will be added
:param locs: Location of the player pieces
:param other_locs: Location of pieces of other player
"""
self._process_piece(piece, locs, other_locs, add=False)
def _process_piece(self, piece: Piece, locs: dict, other_locs: dict, add: bool):
r"""
Standardizes adding/removing a piece since same algorithm with minor change.
:param piece: Piece to process
:param locs: Location for pieces of same color as \p Piece
:param other_locs: Location for other player's pieces
:param add: If True, add the piece, otherwise remove the piece
"""
# Verify color is same for all pieces
assert piece.color == self._color, "Piece set has pieces of different colors"
# Standard function for either adding or deleting a move
def _process_func(_p: Piece, _loc: Location):
if add:
try: self._add_move(_p, _loc, other_locs[_loc])
except KeyError: self._add_move(_p, _loc)
else: self._del_move(_p, _loc)
# Bombs and flags can be ignored
if piece.is_immobile(): return
# Check ordinary pieces
if piece.rank != Rank.scout():
for loc in piece.loc.neighbors():
# Ignore pieces not allowed by board or where piece of same color
if not self._brd.is_inside(loc) or loc in locs: continue
_process_func(piece, loc)
# Check scout pieces specially
else:
for direction_list in self._brd.to_edge_lists(piece.loc):
for loc in direction_list:
# If scout blocked by board location or same color, immediately stop
if not self._brd.is_inside(loc) or loc in locs: break
_process_func(piece, loc)
if loc in other_locs: break
def _add_move(self, p: Piece, other: Location, attacked: Optional[Piece] = None) -> None:
r""" Add \p piece's move to \p other to the \p MoveSet """
assert p.is_scout() or p.loc.is_adjacent(other)
key = self._make_move_key(p.loc, other)
# assert key not in self._avail
self._avail[key] = Move(p, p.loc, other, attacked)
def _del_move(self, p: Piece, other: Location) -> None:
r"""
Delete the corresponding move from the \p MoveSet
:param p: Piece whose move will be deleted
:param other: Location where \p p will be moved
"""
assert p.is_scout() or p.loc.is_adjacent(other)
key = self._make_move_key(p.loc, other)
del self._avail[key]
def has_move(self, p: Piece, new_loc: Location) -> bool:
r""" Returns True if the \p Piece has an availble move to the specified \p Location """
key = self._make_move_key(p.loc, new_loc)
return key in self._avail
def get_move(self, p: Piece, new_loc: Location) -> Optional[Move]:
r"""
Gets the move corresponding to the \p Piece and \p Location. If the corresponding \p Move
is not found, \p None is returned.
"""
key = self._make_move_key(p.loc, new_loc)
try: return self._avail[key]
except KeyError: return None
def __len__(self) -> int:
r""" Return number of moves in the \p MoveSet """
return len(self._avail)
def __contains__(self, item: Move) -> bool:
r""" Adds support for the "in" operator """
if item.piece is None: return False
return self.has_move(item.piece, item.new)
def remove_moves_after_add(self, loc: Location, plyr_locs: dict, other_locs: dict) -> None:
r"""
Process the adding of a piece at Location \p loc
:param loc: Location of added piece
:param plyr_locs: Location of pieces for same color as \p MoveSet
:param other_locs: Location of pieces of other \p Player
"""
self._handle_loc_change(loc, plyr_locs, other_locs, False)
def add_moves_after_delete(self, loc: Location, plyr_locs: dict, other_locs: dict) -> None:
r"""
Process the deletion of a piece that was at Location \p loc
:param loc: Location of deleted piece
:param plyr_locs: Location of pieces for same color as \p MoveSet
:param other_locs: Location of pieces of other \p Player
"""
self._handle_loc_change(loc, plyr_locs, other_locs, True)
def _handle_loc_change(self, loc: Location, plyr_locs: dict, other_locs: dict, add: bool):
r"""
Process a \p Location's state change by either removing or add moves to the MoveSet.
:param loc: Location whose state is being changed
:param plyr_locs: Locations of the implicit player's pieces
:param other_locs: Location dictionary for the other player
:param add: If True, add moves to the MoveSet. Otherwise, remove those locations.
"""
el = self._brd.to_edge_lists(loc)
el_groups = [(el.right, el.left), (el.left, el.right), (el.up, el.down), (el.down, el.up)]
def _add_func(_p: Piece, _loc: Location):
try: self._add_move(_p, _loc, other_locs[_loc])
except KeyError: self._add_move(_p, _loc)
for search, opp in el_groups:
# Find first piece in search direction (if any)
p = None
for srch in search:
if srch in plyr_locs: p = plyr_locs[srch]
elif srch in other_locs: p = other_locs[srch]
if p is not None: break
# If no piece in search direction
if p is None or p.is_immobile(): continue
# Ignore pieces of other color since will be handled in separate function call
if p.color != self._color: continue
# If found p is not a scout and not adjacent, move on
if not p.is_scout() and not p.loc.is_adjacent(loc): continue
# Delete first since may need to add in next step
if not add: self._del_move(p, loc)
# In an add, always add the move. In a delete, may need to add back if the moved
# piece is of the other player's color
if add or loc in other_locs:
_add_func(p, loc)
if p.is_scout():
for srch in opp:
if srch in plyr_locs: break
if add: _add_func(p, srch)
else: self._del_move(p, srch)
# Perform second since could still attack
if srch in other_locs: break
@staticmethod
def _make_move_key(orig: Location, new: Location):
return orig, new
def is_empty(self, cyclic_moves: Set[Move] = None) -> bool:
r""" Returns \p True if the \p MoveSet is empty """
if cyclic_moves is not None and cyclic_moves:
avail = set(self.avail.values())
# If available larger than cyclic, definitely not empty move set
if len(avail) > len(cyclic_moves): return False
# Check if each available moves in cyclic. If any not in there, not empty move set
for a_m in avail:
for c_m in cyclic_moves:
if Move.is_identical(a_m, c_m):
break
else:
return False
return True
return not bool(self.avail)
def __iter__(self):
return iter(self.avail.values())
class Player:
r""" Represents one of the two players """
def __init__(self, color: Color):
r"""
:param color: Color of the player
"""
self._color = color
# noinspection PyTypeChecker
self._move_set = None # type: MoveSet
self._locs = dict()
self._pieces = set()
@property
def color(self) -> Color:
r""" Accessor for the \p Player's \p Color. """
return self._color
@property
def num_pieces(self) -> int:
r""" Accessor for number of pieces the player has """
return len(self._pieces)
@property
def move_set(self) -> MoveSet:
r""" Accessor for the \p Player's \p MoveSet"""
return self._move_set
def add_piece(self, piece: Piece, other: 'Player' = None) -> None:
r""" Add \p piece to \p Player's set of pieces """
assert piece not in self._pieces, "Duplicate piece"
assert piece.loc not in self._locs, "Two pieces in same location"
self._pieces.add(piece)
self._locs[piece.loc] = piece
if other is not None:
assert self._color != other.color
self.move_set.add_piece(piece, self._locs, other._locs)
def delete_piece_info(self, piece: Piece, other: 'Player') -> None:
r""" Remove \p piece from the \p Player's set of pieces """
self._pieces.remove(piece)
del self._locs[piece.loc]
self.move_set.del_piece(piece, self._locs, other._locs)
def delete_moveset_info(self, loc: Location, other: 'Player') -> None:
r""" Update the MoveSet information after deleting a piece at Location \p loc """
assert self._color != other.color
self.move_set.add_moves_after_delete(loc, self._locs, other._locs)
def update_moveset_after_add(self, loc: Location, other: 'Player') -> None:
r"""
When adding a piece (i.e., moving it and placing it back down), some previously valid moves
become blocked. This method updates \p MoveSet to accomodate that.
:param loc: \p Location where piece was placed
:param other: Other player
"""
assert self._color != other.color
# pylint: disable=protected-access
self.move_set.remove_moves_after_add(loc, self._locs, other._locs)
def has_flag(self) -> bool:
r""" Returns True if the player has a flag """
flag = Rank.flag()
return any(p.rank == flag for p in self._pieces)
def get_piece_at_loc(self, loc: Location) -> Optional[Piece]:
r""" Returns the piece at the specified location. If no piece is there, returns None """
try: return self._locs[loc]
except KeyError: return None
def has_move(self, piece: Piece, new_loc: Location) -> bool:
r""" Returns \p True if the player has a move for the piece ot the specified \p Location """
assert piece is not None
return self.move_set.has_move(piece, new_loc)
def is_valid_next(self, m: Move) -> bool:
r"""
Checks whether move \m is in the player's \p MoveSet
:param m: \p Move to check
:return: True if \p m is a valid next move.
"""
return m in self.move_set
def get_move(self, piece: Piece, new_loc: Location) -> Optional[Move]:
r""" Returns \p True if the player has a move for the piece ot the specified \p Location """
assert piece is not None
return self.move_set.get_move(piece, new_loc)
def piece_locations(self) -> Set[Location]:
r""" Location of all of the \p Player's pieces """
set_locs = set(self._locs.keys())
assert len(set_locs) == len(self._pieces)
return set_locs
def pieces(self) -> Generator[Piece, None, None]:
r""" Generator that yields the Player's pieces """
for p in self._pieces:
yield p
def build_move_set(self, other: 'Player'):
r""" Construct the move set of the """
assert self._color != other.color
self._move_set = MoveSet.build(self._pieces, self._locs, other._locs)
def verify_piece_set(self, piece_set: Board.PieceSet) -> bool:
r"""
Verify that the player piece information is compliance with the \p Board \p PieceSet
:param piece_set: Piece set maximum counts
:return: True if the player's piece set information is in compliance
"""
pieces_by_rank = dict()
# Count the number of pieces for each rank
for p in self._pieces:
try: pieces_by_rank[p.rank] += 1
except KeyError: pieces_by_rank[p.rank] = 1
res = True
for r in Rank.get_all():
if r in pieces_by_rank and pieces_by_rank[r] > piece_set.get_rank_count(r):
logging.warning("Color %s has too many pieces of rank: \"%s\"", self._color.name, r)
res = False
return res
def get_random_move(self) -> Move:
r"""
Selects a piece to move uniformly at random. Then select the move from that piece's
available moves uniformly at random.
:return: Randomly selected move
"""
move_dict = dict()
keys = []
for m in self.move_set.avail.values():
try:
move_dict[m.piece].append(m)
except KeyError:
keys.append(m.piece)
move_dict[m.piece] = [m]
key = random.choice(keys)
return random.choice(move_dict[key])
| ZaydH/stratego | src/stratego/player.py | player.py | py | 14,932 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "board.Board",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "piece.Color",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "typing.Set",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "piece.Piece",
"line_number": 3... |
42162211409 | """tilltheend URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import path
from forever import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('login', views.login_request, name='login'),
path('logout', views.logout_request, name='logout'),
path('register', views.register, name='register'),
path('', views.index, name='home'),
path('todo', views.todoadd, name='todo'),
path('translate', views.translate, name='translate'),
path('texttospech', views.texttospech, name='texttospech'),
path('qrcode', views.qrcode, name='qrcode'),
path('weather', views.weather, name='weather'),
path('download', views.download_video, name='download'),
path('delete/<int:id>', views.delete, name='delete'),
path('doing/<int:id>', views.doing, name='doing'),
path('finish/<int:id>', views.finish, name='finish'),
path('history/<int:id>', views.history, name='history'),
path('news', views.news, name='news'),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | king9799/7-1-projects | forever/urls.py | urls.py | py | 1,695 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.urls.path",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "forever.views.login_request",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "forever.views",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "djang... |
27551842247 | #K-Nearesst Neighbour
#importing the Librares
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
#set the index value as index_col 0
data=pd.read_csv('./Dataset/Classified Data',index_col=0)
#standardize the values
from sklearn.preprocessing import StandardScaler
#Create a object for StandardScaler
scaler=StandardScaler()
#fit and tranform the value to the standard values
scaler.fit(data.drop('TARGET CLASS',axis=1))
scaled_features=scaler.transform(data.drop('TARGET CLASS',axis=1))
#This scaled data doesn't have index name and column name
#store the scaled feature in a dataset
#columns will fil the columns index
#and also neglecting the target class because that is
#independent feature
df_feat=pd.DataFrame(scaled_features,columns=data.columns[:-1])
#sns.pairplot(data,hue='TARGET CLASS')
#Separate the train and test data
from sklearn.model_selection import train_test_split
#first is independent feature ->input dependent feature -> output
x_train,x_test,y_train,y_test=train_test_split(scaled_features,data['TARGET CLASS'])
#k_nearest Neighbour
from sklearn.neighbors import KNeighborsClassifier
#Giving K=1
knn = KNeighborsClassifier(n_neighbors=1)
knn.fit(x_train,y_train)
pred=knn.predict(x_test)
#use the confusion matrix to get the what are the values correctly perdicted
#and unpredicted
from sklearn.metrics import classification_report,confusion_matrix
#here you can see which are all the values g
print(confusion_matrix(pred,y_test))
#Give reports accuracy and other scores
print(classification_report(pred,y_test))
#Run the error rate find the point falls below and predict the K value
error_rate=[]
for i in range(1,40):
knn=KNeighborsClassifier(n_neighbors=i)
knn.fit(x_train,y_train)
pred=knn.predict(x_test)
error_rate.append(np.mean(pred!=y_test))
#plot the error rate vs K values for the error rate calculated
plt.figure(figsize=(10,6))
plt.plot(range(1,40),error_rate,linestyle="dashed",marker="o",markersize=10,
markerfacecolor="red")
plt.title("Error Rate Graph")
plt.xlabel("K-value")
plt.ylabel("Error_Rate")
#here you can see that after k=24 it never touches back so choose that value
knn=KNeighborsClassifier(n_neighbors=24)
knn.fit(x_train,y_train)
pred=knn.predict(x_test)
#Confusion Matric
print(confusion_matrix(y_test,pred))
#classification report
print(classification_report(y_test,pred))
#You can see that accuracy has increased
| kamarajanis/Machine-Learning | K_Nearest_Neighbor/k-nearest.py | k-nearest.py | py | 2,535 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pandas.read_csv",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.StandardScaler",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": ... |
7241271696 | ####################
# Joint distribution of Ask/Bid Qty
####################
import os
import pickle
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d import axes3d
data_directory = 'data/xFGBL'
img_directory = 'images/'
data_file = 'xFGBL20130702.pkl'
for fn in os.listdir(data_directory):
with open(os.path.join(data_directory, fn), 'rb') as input:
r=pickle.load(input)
X = r['AskQty']
Y = r['BidQty']
bins = np.arange(0, 600, 20)
hist, xedges, yedges = np.histogram2d(Y, X, bins=bins, normed=True)
fig = plt.figure()
fig.suptitle(fn, fontsize=20)
ax = fig.add_subplot(111, projection='3d')
elements = (len(xedges) - 1) * (len(yedges) - 1)
X, Y = np.meshgrid(xedges[:-1]+0.25, yedges[:-1]+0.25)
ax.plot_wireframe(X, Y, hist)
# xpos = X.flatten()
# ypos = Y.flatten()
# zpos = np.zeros(elements)
# dx = 10 * np.ones_like(zpos)
# dy = dx.copy()
# dz = hist.flatten()
#ax.bar3d(xpos, ypos, zpos, dx, dy, dz, color='b', zsort='average')
#ax.scatter(xpos, ypos, dz)
#plt.show()
plt.savefig(os.path.join(img_directory, fn + '.png'))
| maroxe/SchoolProjects | EA/joint_distribution.py | joint_distribution.py | py | 1,286 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.listdir",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "pickle.load",
"line_number":... |
74992294908 | from django.core.exceptions import ValidationError
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
from django.utils.translation import gettext_lazy as _
from django.contrib.postgres.fields import ArrayField
from udemy.apps.core.models import TimeStampedBase, OrderedModel, CreatorBase
from udemy.apps.course.models import Course
from udemy.apps.module.models import Module
from udemy.apps.quiz.annotations import QuizAnnotations
class Quiz(TimeStampedBase, OrderedModel):
title = models.CharField(_('Title'), max_length=200)
description = models.TextField(_('Description'))
is_published = models.BooleanField(default=False)
is_draft = models.BooleanField(default=True)
is_timed = models.BooleanField(default=False)
pass_percent = models.PositiveIntegerField(validators=[MaxValueValidator(100)])
module = models.ForeignKey(
Module,
related_name='quizzes',
on_delete=models.CASCADE,
)
course = models.ForeignKey(
Course,
related_name='quizzes',
on_delete=models.CASCADE,
)
order_in_respect = ('course', 'module')
annotation_class = QuizAnnotations()
class Question(TimeStampedBase, OrderedModel):
question = models.TextField()
feedback = models.TextField()
answers = ArrayField(models.TextField())
max_time = models.PositiveIntegerField(default=0)
quiz = models.ForeignKey(
Quiz,
related_name='questions',
on_delete=models.CASCADE
)
course = models.ForeignKey(
Course,
related_name='questions_quiz',
on_delete=models.CASCADE,
)
correct_response = models.IntegerField(validators=[MinValueValidator(1)])
order_in_respect = ('quiz',)
def save(self, *args, **kwargs):
if self.correct_response > len(self.answers):
raise ValidationError({'correct_response': 'invalid response'})
super().save(*args, **kwargs)
class QuizRelation(CreatorBase, TimeStampedBase):
quiz = models.ForeignKey(Quiz, on_delete=models.CASCADE)
done = models.BooleanField(default=False)
class Meta:
constraints = [
models.UniqueConstraint(fields=('creator', 'quiz'), name='unique quiz relation')]
| gabrielustosa/udemy-old | udemy/apps/quiz/models.py | models.py | py | 2,272 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "udemy.apps.core.models.TimeStampedBase",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "udemy.apps.core.models.OrderedModel",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 14,
"usage_type": ... |
17334218878 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
import re,os,sys
import random
import argparse
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--per', dest='per', type=int, default=10, help='ratio of test set (%)')
parser.add_argument('--file', dest='file', type=str, default='data_from_USPTO_utf8_converted_clear', help='input file')
args = parser.parse_args()
params = vars(args)
print(params)
file = params['file']
test_percent = params['per']
# select n% as test set
def select_file(fin,test_p):
lines = open(fin,'r+').readlines()[1:] #remove the first title line and the last blank line
writer1= open(fin+'_train', 'w')
writer2= open(fin+'_test', 'w')
all_num = len(lines)
test_num = int(all_num*(test_p*0.01))
print('all num: %d' %all_num)
print('test num: %d' %test_num)
print('train num: %d' %(all_num-test_num))
print('slecting...')
test_set = random.sample(lines, test_num)
for item in test_set:
lines.remove(item)
print('selected')
writer1.writelines(lines)
writer2.writelines(test_set)
select_file(file, test_percent) | jshmjs45/data_for_chem | codes/select_file.py | select_file.py | py | 1,111 | python | en | code | 13 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentDefaultsHelpFormatter",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "random.sample",
"line_number": 28,
"usage_type": "call"
}
] |
11706943391 | from string import ascii_lowercase, ascii_uppercase
from utils.data import read_data_as_list
characters = list(ascii_lowercase) + list(ascii_uppercase)
priority_lookup = dict(zip(characters, range(1, len(characters) + 1)))
rucksacks = read_data_as_list(day=3)
# Part 1
total = 0
for rucksack in rucksacks:
midpoint = len(rucksack) // 2
compartment_1, compartment_2 = rucksack[:midpoint], rucksack[midpoint:]
common_item = set(compartment_1).intersection(compartment_2).pop()
priority = priority_lookup[common_item]
total += priority
print(f'Part 1 Solution: {total}')
# Part 2
total = 0
for i in range(0, len(rucksacks), 3):
rucksack_1, rucksack_2, rucksack_3 = rucksacks[i: i+3]
common_item = set(rucksack_1).intersection(rucksack_2).intersection(rucksack_3).pop()
priority = priority_lookup[common_item]
total += priority
print(f'Part 2 Solution: {total}')
| stuartjwright/advent_of_code_2022 | day_03.py | day_03.py | py | 902 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "string.ascii_lowercase",
"line_number": 5,
"usage_type": "argument"
},
{
"api_name": "string.ascii_uppercase",
"line_number": 5,
"usage_type": "argument"
},
{
"api_name": "utils.data.read_data_as_list",
"line_number": 8,
"usage_type": "call"
}
] |
70675296187 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from ..pki.migrate_data import migrate_pki_data
class Migration(migrations.Migration):
dependencies = [
('ssl_pki', '0002_default_config'),
]
operations = [
migrations.RunPython(migrate_pki_data, migrations.RunPython.noop),
]
| ngageoint/exchange | exchange/sslpki/migrations/0001_migrate_pki_data.py | 0001_migrate_pki_data.py | py | 361 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.db.migrations.Migration",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.RunPython",
"line_number": 16,
"usage_type": "call"
},
{
... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.