seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
14351239029 | import requests, uuid, json, argparse, sys
import langcodes
# This is for the safety of the key
from config import AZURE_KEY
# Add your key and endpoint
key = AZURE_KEY
endpoint = "https://api.cognitive.microsofttranslator.com"
# Creating an argument parser
parser = argparse.ArgumentParser(description="Tranlate text from one language to another")
parser.add_argument('input_file', help='Path to the input file')
parser.add_argument("-t", "--to", help = "translation language")
args = parser.parse_args()
#getting translation language and input file from arguments
input_file = args.input_file
translation_language = args.to
translation_language_code = langcodes.find(translation_language.lower()).language
with open(input_file, 'r') as file:
text = file.read()
# location, also known as region.
# required if you're using a multi-service or regional (not global) resource. It can be found in the Azure portal on the Keys and Endpoint page.
location = "centralindia"
path = '/translate'
constructed_url = endpoint + path
params = {
'api-version': '3.0',
'to': [translation_language_code]
}
headers = {
'Ocp-Apim-Subscription-Key': key,
# location required if you're using a multi-service or regional (not global) resource.
'Ocp-Apim-Subscription-Region': location,
'Content-type': 'application/json',
'X-ClientTraceId': str(uuid.uuid4())
}
# You can pass more than one object in body.
body = [{
'text': text
}]
request = requests.post(constructed_url, params=params, headers=headers, json=body)
response = request.json()
translations = response[0]['translations']
translated_text = [translation['text'] for translation in translations]
# Create the output file name
output_file = f"translated_{translation_language}.txt"
# Write the translated content to the output file
with open(output_file, 'w') as file:
file.write(translated_text[0])
print(f"Translation completed. Translated content saved to: {output_file}") | SonuLohani-1/MyTranslator | project.py | project.py | py | 1,972 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "config.AZURE_KEY",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "langcodes.find",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "uuid.uuid4",
... |
1498857319 | #!/usr/bin/env python3
# dpw@Darryls-iMac.localdomain
# 2023-03-25 22:51:18
#
def dms2dd(d, m, s):
return d + m / 60 + s / 3600
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
description="Enter an angle in degrees, minutes and seconds"
)
parser.add_argument(
"numbers",
metavar="degrees, minutes, seconds",
type=float,
nargs="+",
help="the degrees, minutes and seconds of an angle",
)
args = parser.parse_args()
d = args.numbers[0]
m = args.numbers[1]
s = args.numbers[2]
dd = dms2dd(d, m, s)
print(f"{dd:.3f}")
| darrylwest/python-play | maths/dms2dd.py | dms2dd.py | py | 645 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 14,
"usage_type": "call"
}
] |
14204687745 | from django.shortcuts import render,redirect
from django.http import HttpResponse
from .models import Income, Expense
from .forms import Update_E,Update_I
def Index(request):
income = Income.objects.all()
expense = Expense.objects.all()
return render(request, "index.html", {'income': income, 'expense': expense})
# add view
def add_E(request):
if request.method == 'POST':
expense = request.POST.get('expense')
amount = request.POST.get('amount')
date = request.POST.get('date')
new_expense = Expense(expense=expense, amount=amount, date=date)
new_expense.save()
return redirect('/Index')
return redirect('error_page')
# add view
def add_I(request):
if request.method == 'POST':
income = request.POST.get('income')
amount = request.POST.get('amount')
new_income = Income(income=income, amount=amount)
new_income.save()
return redirect('/Index')
return redirect('error_page')
# update view
def update_I(request, pk):
current = Income.objects.get(id=pk)
form = Update_I(request.POST or None, instance=current)
if form.is_valid():
form.save()
return redirect('/Index')
return render(request, 'Pages/update_I.html', {'form': form})
# update view
def update_E(request, pk):
current = Expense.objects.get(id=pk)
form = Update_E(request.POST or None, instance=current)
if form.is_valid():
form.save()
return redirect('/Index')
return render(request, 'Pages/update_E.html', {'form': form})
# delete view
def delete_E(request, pk):
delete = Expense.objects.get(id=pk)
delete.delete()
return redirect('/Index')
# delete view
def delete_I(request, pk):
delete = Income.objects.get(id=pk)
delete.delete()
return redirect('/Index') | Gomolemo-M-Motlhamme/Finance-IO | Finance_IO/Index/views.py | views.py | py | 1,900 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "models.Income.objects.all",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "models.Income.objects",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "models.Income",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "model... |
11036914150 | from PIL import Image
import glob
import os
frontPicPaths = glob.glob(os.getcwd() + '/**/front.png', recursive=True)
backPicPaths = glob.glob(os.getcwd() + '/**/back.png', recursive=True)
files = frontPicPaths + backPicPaths
print(files)
for f in files:
try:
im = Image.open(f)
# rearrage the palette
paletteArray = im.getpalette()
color1R = paletteArray[3]
color1G = paletteArray[4]
color1B = paletteArray[5]
paletteArray[3] = paletteArray[6]
paletteArray[4] = paletteArray[7]
paletteArray[5] = paletteArray[8]
paletteArray[6] = color1R
paletteArray[7] = color1G
paletteArray[8] = color1B
im.putpalette(paletteArray)
#change image data to reflect the new palette
pixels = im.load() # create the pixel map
for i in range(im.size[0]): # for every pixel:
for j in range(im.size[1]):
if pixels[i,j] == 1:
pixels[i,j] = 2
elif pixels[i,j] == 2:
pixels[i,j] = 1
im.save(f)
except:
print("Unable to process " + f)
| tshadowknight/proto_crystal | gfx/pokemon/swapPal.py | swapPal.py | py | 1,183 | python | en | code | 17 | github-code | 1 | [
{
"api_name": "glob.glob",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "os.getcwd",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.getcwd",
"line_number": 6,
"usa... |
14839102625 | """
Section 1
Multithreading - Thread(1) - Basic
Keyword - Threading basic
"""
import logging
import threading
import time
# ์ค๋ ๋ ์คํ ํจ์
def thread_func(name):
logging.info(f"Sub-Thread {name}: starting")
time.sleep(3)
logging.info(f"Sub-Thread {name}: finishing")
# ๋ฉ์ธ ์์ญ
if __name__ == "__main__":
# Logging format ์ค์
format = "%(asctime)s: %(message)s"
logging.basicConfig(format=format, level=logging.INFO, datefmt="%H:%M:%S")
logging.info("Main-Thread: before creating thread")
# ํจ์ ์ธ์ ํ์ธ
x = threading.Thread(target=thread_func, args=('First',))
logging.info("Main-Thread: before running thread")
# ์๋ธ ์ค๋ ๋ ์์
x.start()
# ์ฃผ์ ์ ํ ๊ฒฐ๊ณผ
# join์ ์์ thread์ ์์
์ ๊ธฐ๋ค๋ฆผ
x.join()
logging.info("Main-Tread: wait for the thread to finish")
logging.info("Main-Thread: all done")
| taxijjang/develop_study | ๊ณ ์๊ฐ ๋๋ ํ์ด์ฌ/py_ad_1_3.py | py_ad_1_3.py | py | 929 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "logging.info",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "logging.basicConfig",
"line_... |
40926512907 | #!/usr/bin/env python
import argparse
import os
import subprocess
def main():
# arguments
parser = argparse.ArgumentParser()
parser.add_argument('-top_dir', help='top level directory of chromo grouped mafs', required=True)
parser.add_argument('-ref', help='Reference species name', required=True)
parser.add_argument('-chr_tag', help='ID for ref chromosome', type=int, required=True)
parser.add_argument('-n_thou', default=0, type=int)
args = parser.parse_args()
# get chromo dir
all_dirs = [args.top_dir + x + '/' for x in os.listdir(args.top_dir) if x.startswith('BrownTrout')]
chr_index = (args.n_thou * 1000) + (args.chr_tag - 1)
maf_dir = all_dirs[chr_index]
mafs = maf_dir + '*.maf'
out_dir = maf_dir + 'aligned/'
out_maf = out_dir + maf_dir.split('/')[-2] + '.multiple.maf'
if not os.path.isdir(out_dir):
os.mkdir(out_dir)
temp_dir = out_dir + 'roast_temp'
if not os.path.isdir(temp_dir):
os.mkdir(temp_dir)
tree = ('"(((((BrownTrout AtlanticSalmon) (ArcticChar (RainbowTrout (CohoSalmon SockeyeSalmon)))) '
'DanubeSalmon) EuropeanGrayling) NorthernPike)"')
ref_name = maf_dir.split('/')[-2].split('.')[0]
os.chdir(maf_dir)
# construct and submit command line
roast = ('roast + '
'T=' + temp_dir + ' '
'E=' + ref_name + ' ' +
tree + ' ' +
mafs + ' ' +
out_maf)
subprocess.call(roast, shell=True)
if __name__ == '__main__':
main()
| henryjuho/sal_enhancers | genome_alignment/roast_fish.py | roast_fish.py | py | 1,545 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_... |
35577719590 | # Program for Connection Setup
# Depreciated
raise DeprecationWarning
import os
from Crypto.PublicKey import RSA
from Crypto import Random
import hashlib
from Crypto.Cipher import PKCS1_OAEP, AES
import select
from TSE import tse
from Flags import flags
from Chromos import Chromos
o = Chromos()
class Connection():
def __init__(self, conn):
self.server_keys_path = "./SKeys/"
self.client_keys_path = "./CKeys/"
self.conn = conn
self.timeout = 2
return
def load_keys(self, path, prefix='')->None:
path = ''.join([path, '/', prefix])
if(os.path.isfile(path +"public.key") is False or os.path.isfile(path + "private.key") is False): self.generate_key(path, prefix)
with open(''.join([path, "public.key",]), 'r') as f:
public = f.read().encode()
with open(''.join([path, "private.key"]), 'r') as f:
private = f.read().encode()
return public, private
def load_key(self, keypath):
f = open(keypath, "r")
key = f.read().encode()
f.close()
return key
def generate_key(self, path, prefix):
random = Random.new().read
RSAKey = RSA.generate(1024, random)
public = RSAKey.publickey().exportKey()
private = RSAKey.exportKey()
with open(path + prefix + "public.key", "w") as f:
f.write(public.decode())
f.flush()
with open(path + prefix + "private.key", "w") as f:
f.write(private.decode())
f.flush()
o.info_y("RSA Keys Generated! ")
return True
def rand_gen(self, n):
return str(int.from_bytes(os.urandom(n), byteorder='big'))
def ret_hash(self, __data):
return hashlib.md5(__data).hexdigest().encode()
def ret_enc(self, __key):
__RSAKey = RSA.importKey(__key)
return PKCS1_OAEP.new(__RSAKey)
def connection_setting_server(self, username):
SPub, SPri = self.load_keys(self.server_keys_path)
hashSPub = self.ret_hash(SPub)
rs, _, _ = select.select([self.conn], [], [], self.timeout)
if(rs):
data = self.conn.recv(4096)
__data_splt_temp = data.split(b':0:')
if(__data_splt_temp == [data] or len(__data_splt_temp) < 2): return False, 0, []
CPub = __data_splt_temp[0]
tmp_hashSPub = __data_splt_temp[1]
if(tmp_hashSPub==hashSPub):
session = self.rand_gen(8).encode()
__aes = os.urandom(16) # AES Key
__enc = tse.Key(aeskey=__aes) # Transport Security Encryption
re_auth = __enc.exportKey().encode() # TSE
hashCPub = self.ret_hash(CPub)
encryptor = self.ret_enc(CPub)
self.conn.sendall(b''.join([encryptor.encrypt(b''.join([hashCPub, b':0:', session, b':0:', __aes])), b':0:', re_auth]))
session = self.ret_hash(session)
re_auth = self.ret_hash(re_auth)
rs, _, _ = select.select([self.conn], [], [], self.timeout)
if(rs):
Get = self.conn.recv(4096).split(b':0:')
tmp_session = Get[0]
tmp_re_auth = Get[1]
if(tmp_session == session):
# print("session matched")
if(tmp_re_auth == re_auth):
# print("re_auth matched")
# Exchange Usernames
encryptor = self.ret_enc(CPub)
self.conn.sendall(encryptor.encrypt(''.join([flags.USERNAME_PREFIX_POSTFIX, str(username), flags.USERNAME_PREFIX_POSTFIX]).encode()))
decryptor = self.ret_enc(SPri)
data = self.conn.recv(4096)
data = decryptor.decrypt(data)
__susername = data.split(flags.USERNAME_PREFIX_POSTFIX.encode())[1].decode()
print(__susername)
return True, __enc, CPub, __susername
print('Failed!')
return False, 0, []
def connection_setting_client(self, username):
CPub, CPri = self.load_keys(self.client_keys_path, "client_")
if(os.path.isfile(self.client_keys_path + "public.key")==False):
o.error_info("Server Public Key Not Found! ")
raise FileNotFoundError
SPub = self.load_key(self.client_keys_path + "public.key")
hashSPub = self.ret_hash(SPub)
hashCPub = self.ret_hash(CPub)
self.conn.sendall(CPub + b':0:' + hashSPub)
rs, _, _ = select.select([self.conn], [], [], self.timeout)
if(rs):
Get = self.conn.recv(4096)
__cget_array = Get.split(b':0:')
Get = __cget_array[0]
re_auth = __cget_array[1]
decryptor = self.ret_enc(CPri)
Get = decryptor.decrypt(Get)
__cget_array = Get.split(b':0:')
tmp_hashCPub = __cget_array[0]
session = __cget_array[1]
__aes = __cget_array[2]
if(tmp_hashCPub == hashCPub):
__enckey = re_auth.decode()
session = self.ret_hash(session)
re_auth = self.ret_hash(re_auth)
self.conn.sendall(b''.join([session, b':0:', re_auth]))
# Exchange Usernames and UIDs
encryptor = self.ret_enc(SPub)
self.conn.sendall(encryptor.encrypt(''.join([flags.USERNAME_PREFIX_POSTFIX, str(username), flags.USERNAME_PREFIX_POSTFIX]).encode()))
decryptor = self.ret_enc(CPri)
data = self.conn.recv(4096)
data = decryptor.decrypt(data)
__susername = data.split(flags.USERNAME_PREFIX_POSTFIX.encode())[1].decode()
print(__susername)
return True, tse.Key(aeskey=__aes, key=__enckey), __susername
print('Failed!')
return False, None, ''
pass
| devanshshukla99/Juno-ReDesign-Sockets-Communication | Connection/connection.py | connection.py | py | 6,157 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "Chromos.Chromos",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "Crypto.Random.new",
"... |
15518284763 | import requests
from common import URL, GRAPHQL_URL, graph_query
def test_check_introspect_fields():
fields = ['pastes', 'paste', 'systemUpdate', 'systemDiagnostics', 'systemDebug', 'systemHealth', 'users', 'readAndBurn', 'search', 'audits', 'deleteAllPastes', 'me']
r = requests.get(URL + '/difficulty/easy')
assert r.status_code == 200
query = """
query {
__schema {
queryType {
fields {
name
}
}
}
}
"""
r = graph_query(GRAPHQL_URL, query)
for field in r.json()['data']['__schema']['queryType']['fields']:
field_name = field['name']
assert field_name in fields
assert not field_name not in fields
fields.remove(field_name)
assert len(fields) == 0
def test_check_introspect_when_expert_mode():
query = """
query {
__schema {
__typename
}
}
"""
r = graph_query(GRAPHQL_URL, query, headers={"X-DVGA-MODE":'Expert'})
assert r.status_code == 200
assert r.json()['errors'][0]['message'] == '400 Bad Request: Introspection is Disabled'
def test_check_introspect_mutations():
fields = ['createUser', 'createPaste', 'editPaste', 'login', 'uploadPaste', 'importPaste', 'deletePaste']
r = requests.get(URL + '/difficulty/easy')
assert r.status_code == 200
query = """
query {
__schema {
mutationType {
fields {
name
}
}
}
}
"""
r = graph_query(GRAPHQL_URL, query)
for field in r.json()['data']['__schema']['mutationType']['fields']:
field_name = field['name']
assert field_name in fields
assert not field_name not in fields
fields.remove(field_name)
assert len(fields) == 0 | dolevf/Damn-Vulnerable-GraphQL-Application | tests/test_introspect.py | test_introspect.py | py | 1,842 | python | en | code | 1,367 | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "common.URL",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "common.graph_query",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "common.GRAPHQL_URL",
"li... |
70737356834 | import paho.mqtt.client as mqtt
import json
from influxdb import InfluxDBClient
server = "54.87.52.22"
def on_connect(client, userdata, flags, rc):
print("Connected with RC : " + str(rc))
client.subscribe("cpu/2016146022/evt/light")
def on_message(client, userdata, msg):
print(msg.topic + " " + msg.payload.decode('utf-8'))
json_body = [
{
"measurement" : msg.topic,
"tags" : {
"host" : "server01",
"region" : "us-west"
},
"fields" : {
"value" : msg.payload
}
}
]
dbclient.write_points(json_body)
if float(msg.payload) > 150.0:
client.publish("cpu/2016146022/cmd/relay", "ON") # Relay ON
else:
client.publish("cpu/2016146022/cmd/relay", "OFF") # Relay OFF
dbclient = InfluxDBClient(server, 8086, None, None, 'weatherDB2')
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
client.connect(server, 1883, 60)
client.loop_forever() | ChanWhanPark/IoT_Basic | 20201123_12์ฃผ์ฐจ/sub_lab3.py | sub_lab3.py | py | 1,002 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "influxdb.InfluxDBClient",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "paho.mqtt.client.Client",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "paho.mqtt.client",
"line_number": 36,
"usage_type": "name"
}
] |
73944562912 | import enum
import geomdl
import math
import numpy as np
import pybullet as p
import scipy.optimize
from sklearn.decomposition import PCA
import torch
import pytorch_kinematics as pk
from scipy.spatial.transform import Rotation as R
def fibonacci_sphere(samples=1):
points = []
phi = math.pi * (3. - math.sqrt(5.)) # golden angle in radians
for i in range(samples):
y = 1 - (i / float(samples - 1)) * 2 # y goes from 1 to -1
radius = math.sqrt(1 - y * y) # radius at y
theta = phi * i # golden angle increment
x = math.cos(theta) * radius
z = math.sin(theta) * radius
points.append((x, y, z))
return np.asarray(points)
def construct_T_from_position_quaterion(translation, quaternion):
"""
Given translation t and rotation q, construct
T = [R(q), t; 0, 1]
:param translation:
:param quaternion:
:return:
"""
T = np.zeros([4, 4])
T[:3, -1] = translation
T[:3, :3] = np.array(p.getMatrixFromQuaternion(quaternion)).reshape([3, 3])
T[-1, -1] = 1
return T
def construct_T_from_position_matrix(translation, matrix):
if type(translation) == torch.Tensor:
T = torch.zeros([4, 4]).type_as(translation)
T[:3, -1] = translation
T[:3, :3] = matrix
T[-1, -1] = torch.tensor(1).type_as(translation)
T = torch.clone(T)
else:
T = np.zeros([4, 4])
T[:3, -1] = translation
T[:3, :3] = matrix
T[-1, -1] = 1
return T
def compute_joint_T(joint_angle, joint_axis):
""" This function is used to compute the transformation matrix for a
rotation operation. You are supposed to make use of the self._theta
parameter and the sin and cos functions which have already been imported.
You will have 3 cases depending on which axis the rotation is about, which
you can obtain from either self.axis or self.axis_index.
Returns:
transform: The 4x4 transformation matrix.
"""
assert(len(joint_axis)==3)
nonzero_axis = np.nonzero(joint_axis)[0]
if len(nonzero_axis) == 0:
# Fixed joint
if type(joint_angle) == torch.Tensor:
return torch.eye(4)
else:
return np.eye(4)
assert len(nonzero_axis) == 1
# Flip the joint angle if joint axis is negative
if type(joint_angle) == torch.Tensor:
joint_angle = torch.clone(joint_angle) * joint_axis[nonzero_axis[0]]
cth, sth = torch.cos(joint_angle), torch.sin(joint_angle)
tensor_0 = torch.zeros_like(joint_angle)
tensor_1 = torch.ones_like(joint_angle)
if nonzero_axis[0] == 0:
R = torch.stack([
torch.stack([tensor_1, tensor_0, tensor_0]),
torch.stack([tensor_0, cth, -sth]),
torch.stack([tensor_0, sth, cth])]).reshape(3, 3)
elif nonzero_axis[0] == 1:
R = torch.stack([
torch.stack([cth, tensor_0, sth]),
torch.stack([tensor_0, tensor_1, tensor_0]),
torch.stack([-sth, tensor_0, cth])]).reshape(3, 3)
elif nonzero_axis[0] == 2:
R = torch.stack([
torch.stack([cth, -sth, tensor_0]),
torch.stack([sth, cth, tensor_0]),
torch.stack([tensor_0, tensor_0, tensor_1])]).reshape(3, 3)
else:
raise AssertionError
return torch.block_diag(R, tensor_1).type_as(joint_angle)
else:
joint_angle = np.copy(joint_angle) * joint_axis[nonzero_axis[0]]
cth, sth = np.cos(joint_angle), np.sin(joint_angle)
H = np.zeros((4, 4))
H[3, 3] = 1.
# H[:3, :3] = R.from_euler(self.axis, self._theta).as_matrix()
if nonzero_axis[0] == 0:
# axis is x
H[1, 1] = cth
H[2, 2] = cth
H[1, 2] = -sth
H[2, 1] = sth
H[0, 0] = 1.
elif nonzero_axis[0] == 1:
# axis is y
H[0, 0] = cth
H[2, 2] = cth
H[2, 0] = -sth
H[0, 2] = sth
H[1, 1] = 1.
elif nonzero_axis[0] == 2:
# axis is z
H[0, 0] = cth
H[1, 1] = cth
H[0, 1] = -sth
H[1, 0] = sth
H[2, 2] = 1.
else:
raise AssertionError
return H
def convert_6d_rotation_to_flattened_matrix_torch(orn_6d_vec):
"""
:param orn_6d_vec: 1D or 2D torch tensor whose last dimension is 6
:return:
"""
# Always unsqueeze to 2d
orn_6d_vec_2d = torch.atleast_2d(orn_6d_vec).reshape(-1,6)
v1 = orn_6d_vec_2d[..., :3]
v2 = orn_6d_vec_2d[..., 3:]
e1 = v1*(torch.torch.nan_to_num(1./torch.norm(v1, dim=-1)).unsqueeze(1))
u2 = v2-e1*(torch.sum(torch.mul(e1, v2),dim=1).unsqueeze(1))
# u2 = v2-torch.inner(e1, v2)*e1
e2 = u2 * (torch.torch.nan_to_num(1./torch.norm(u2, dim=-1)).unsqueeze(1)) # Nx3
base_rotation_matrix = torch.transpose(
torch.stack([e1, e2, torch.cross(e1, e2, dim=1)], e1.dim()-1), dim0=-2, dim1=-1)
return base_rotation_matrix.reshape(-1,9)
def get_basis_from_3_normals(normals):
'''
Given three 6D normals vectors with positions of shape 3x(3+3), return a
matrix with shape 3x4x3. The 4 dim1s are p, n, t1, t2, where p is the position
component in the input, [n,t1,t2] is an orthonormal basis in 3D space.
n is the normals component in the input, t1 is the basis that is parallel to
the plane defined by the 3 p's, n is the basis perpendicular to the 3 p's
:param normals:
:return:
'''
assert normals.shape == (3, 6)
if type(normals) == torch.Tensor:
device = normals.device
t1 = torch.vstack([normals[:, 4],
-normals[:, 3],
torch.zeros(3).to(device)]).transpose(0, 1).type_as(normals)
for i in range(3):
if torch.allclose(t1[i, :], torch.zeros(3).type_as(t1)):
t1[i, 0] += 1
t2 = torch.cross(normals[:, 3:], t1, dim=1)
t1 = (t1.transpose(0, 1)/torch.linalg.norm(t1, dim=1)).transpose(0, 1)
t2 = (t2.transpose(0, 1)/torch.linalg.norm(t2, dim=1)).transpose(0, 1)
ans = torch.stack([normals[:, :3].reshape(3, 1, 3),
normals[:, 3:].reshape(3, 1, 3),
t1.reshape(3, 1, 3),
t2.reshape(3, 1, 3)
], dim=1).reshape([3, 4, 3])
return ans
else:
raise NotImplementedError
def get_cross_matrix(p):
if isinstance(p, torch.Tensor):
return torch.tensor([[0., -p[2], p[1]],
[p[2], 0., -p[0]],
[-p[1], p[0], 0.]], requires_grad=p.requires_grad)
else:
return np.array([[0., -p[2], p[1]],
[p[2], 0., -p[0]],
[-p[1], p[0], 0.]])
def reformulate_to_soft_QP(Q_0, p_0, G_0, h_0, A_0, b_0,
inequality_constraint_weight,
equality_constraint_weight
):
'''
Transforms a QP of the form
min zแตQโz + pโแตz s.t. Aโz=bโ, Gโzโคhโ
into a "soft" QP where Az=b is penalized with equality_constraint_weight
and Gzโคh is penalized with inequality_constraint_weight:
min zฬแตQฬzฬ + pฬแตzฬ s.t. zฬ โฅ 0
Dropping the constant terms,
the constraint Aโz=bโ is transformed to quadratic penalty
min (Aโzโbโ)แต(Aโz-bโ) โ min zแตAโแตAโzโ2bโแตAโz
and the constraint Gzโคh is transformed to quadratic penalty with slack
variables s
min (Gโz-hโ+s)แต(Gโz-hโ+s) โ min zแตGโแตGโz+2sแตGโz+sแตsโ2hโแตGโzโ2hโแตs
Rewriting the optimization by defining
zฬ = [z; s], len(s) = len(hโ)
The original costs become
Qฬโ = block_diag([Qโ, 0โ])
pฬโแต = [pโแต, 0]
The equality constraint cost weights are
Qฬโ = block_diag([AโแตAโ, 0โ])*equality_constraint_weight
pฬโแต = [โ2bโแตAโ, 0]*equality_constraint_weight
The inequality constraint cost weights are
Qฬแตขโ = [GโแตGโ, Gโ; Gโแต, Iโ]*inequality_constraint_weight
pฬแตขโแต = [-2hโแตGโ, -2hโแต]*inequality_constraint_weight
The total optimization becomes
Qฬ=Qฬโ+Qฬโ+Qฬแตขโ
pฬแต=pฬโแต+pฬโแต+pฬแตขโแต
Aฬ, bฬ = Variable(torch.Tensor())
Gฬ = -I
hฬ = 0
:return: The matrices defining the soft QP Qฬ, pฬ, Gฬ, hฬ, Aฬ, bฬ
'''
raise DeprecationWarning
slack_count = len(h_0)
zeros_vec_slack_count = torch.zeros(slack_count).type_as(Q_0)
zeros_mat_slack_count = torch.zeros(
[slack_count, slack_count]).type_as(Q_0)
eye_slack_count = torch.eye(slack_count).type_as(Q_0)
Q_0_tilde = torch.block_diag(Q_0, zeros_mat_slack_count)
p_0_tilde = torch.hstack([p_0, zeros_vec_slack_count])
Q_e_tilde = torch.block_diag(A_0.transpose(0, 1)@A_0,
zeros_mat_slack_count) *\
equality_constraint_weight
p_e_tilde = torch.hstack([-2.*b_0@A_0,
zeros_vec_slack_count])*equality_constraint_weight
Q_ie_tilde = torch.vstack(
[torch.hstack([G_0.transpose(0, 1)@G_0, G_0.transpose(0, 1)]),
torch.hstack([G_0, eye_slack_count])]) *\
inequality_constraint_weight
p_ie_tilde = torch.hstack([-2.*h_0@G_0, -2.*h_0]) * \
inequality_constraint_weight
Q_tilde = (Q_0_tilde+Q_e_tilde+Q_ie_tilde).type_as(Q_0)
p_tilde = (p_0_tilde+p_e_tilde+p_ie_tilde).type_as(Q_0)
G_tilde = -torch.eye(Q_tilde.shape[0]).type_as(Q_0)
h_tilde = torch.zeros(Q_tilde.shape[0]).type_as(Q_0)
A_tilde = torch.autograd.Variable(torch.Tensor()).type_as(Q_0)
b_tilde = torch.autograd.Variable(torch.Tensor()).type_as(Q_0)
return Q_tilde, p_tilde, G_tilde, h_tilde, A_tilde, b_tilde
def reformulate_eq_to_soft_QP(Q_0, p_0, G_0, h_0, A_0, b_0,
equality_constraint_weight
):
'''
Transforms a QP of the form
min zแตQโz + pโแตz s.t. Aโz=bโ, Gโzโคhโ
into a "soft" QP where Az=b is penalized with equality_constraint_weight
but Gzโคh is still enforced.
min zแตQฬz + pฬแตz s.t. z โฅ 0, Gโzโคhโ
Dropping the constant terms
the constraint Aโz=bโ is transformed to quadratic penalty
min (Aโzโbโ)แต(Aโz-bโ) โ min zแตAโแตAโzโ2bโแตAโz
The equality constraint cost weights are
Qฬโ = block_diag([AโแตAโ])*equality_constraint_weight
pฬโแต = [โ2bโแตAโ]*equality_constraint_weight
The total optimization becomes
Qฬ=Qโ+Qฬโ
pฬแต=pโแต+pฬโแต
Aฬ, bฬ = Variable(torch.Tensor())
Gฬ = [Gโ; -I]
hฬ = [hโ; 0]
:return: The matrices defining the soft QP Qฬ, pฬ, Gฬ, hฬ, Aฬ, bฬ
'''
equality_constraint_weight = equality_constraint_weight.to(A_0.device)
# Synced all devices to match Q_0
p_0, G_0, h_0, A_0, b_0, equality_constraint_weight = p_0.to(Q_0.device), G_0.to(Q_0.device), h_0.to(Q_0.device), A_0.to(Q_0.device), b_0.to(Q_0.device), equality_constraint_weight.to(Q_0.device)
Q_e_tilde = A_0.transpose(0, 1)@A_0*equality_constraint_weight
p_e_tilde = -torch.tensor(2.,device=Q_0.device)*b_0@A_0*equality_constraint_weight
Q_tilde = (Q_0+Q_e_tilde).type_as(Q_0)
p_tilde = (p_0+p_e_tilde).type_as(Q_0)
G_tilde = torch.vstack(
[G_0, -torch.eye(Q_0.shape[0]).type_as(Q_0)]).type_as(Q_0)
h_tilde = torch.hstack(
[h_0, torch.zeros(Q_0.shape[0]).type_as(Q_0)]).type_as(Q_0)
A_tilde = torch.autograd.Variable(torch.Tensor()).type_as(Q_0)
b_tilde = torch.autograd.Variable(torch.Tensor()).type_as(Q_0)
return Q_tilde, p_tilde, G_tilde, h_tilde, A_tilde, b_tilde
def polynomial_function_deg3(data, a, b, c, d, e, f, g, h, i, j):
"""
z = f(x,y) = axยณ+byยณ+cxยฒy+dxyยฒ+exยฒ+fyยฒ+gxy+hx+iy+j
:param data: nx2 data
:return: nx1
"""
x = data[:,0]
y = data[:,1]
return a*x**3+b*y**3+c*x**2*y+d*x*y**2+e*x**2+f*y**2+g*x*y+h*x+i*y+j
def polynomial_function_deg3_grad(data, a, b, c, d, e, f, g, h, i, j):
"""
โf/โx = 3axยฒ+2cxy+dyยฒ+2ex+gy+h
โf/โy = 3byยฒ+cxยฒ+2dxy+2fy+gx+i
:param data:
:return: 2x1 numpy array [โf/โx, โf/โy]แต
"""
x = data[:,0]
y = data[:,1]
return np.array([[3*a*x**2+2*c*x*y+d*y**2+2*e*x+g*y+h],
[3*b*y**2+c*x**2+2*d*x*y+2*f*y+g*x+i]])
def polynomial_function_deg1(data, a, b, c):
"""
z = ax+by+c
:param data:
:return:
"""
x = data[:,0]
y = data[:,1]
return a*x+b*y+c
def polynomial_function_deg1_grad(data, a, b, c):
return np.array([[a], [b]])
class SurfaceFitFunction(enum.Enum):
POLYNOMIAL_DEG3 = (polynomial_function_deg3, polynomial_function_deg3_grad)
POLYNOMIAL_DEG1 = (polynomial_function_deg1, polynomial_function_deg1_grad)
def get_fitted_fn(fn_type, popt):
fn, fn_grad = fn_type.value
def fitted_f(x):
return fn(x, *popt)
def fitted_f_grad(x):
return fn_grad(x, *popt)
return fitted_f, fitted_f_grad
def compute_principal_components(points):
"""
:param points:
:return: 3-tuple
ndarray of shape (n_components, n_features)
ndarray of shape (n_features,)
"""
pca = PCA(n_components=3)
transformed_points = pca.fit_transform(points)
return pca.components_, pca.mean_, transformed_points
def fit_surface_to_points(points,
fit_function=SurfaceFitFunction.POLYNOMIAL_DEG1,
**kwargs):
"""
Given nx3 points, find the rotation matrix that provide.
:param points:
:param fit_function:
:param kwargs:
:return: (R, the best rotation matrix that fits the
"""
assert points.shape[1] == 3
return scipy.optimize.curve_fit(fit_function.value[0], points[:, :2], points[:, 2])
def transform_and_fit_surface_to_points(
p_W,
fit_function=SurfaceFitFunction.POLYNOMIAL_DEG3,
**kwargs):
"""
:param p_W:
:param fit_function:
:param kwargs:
:return:
(R, t, fitted_points, fitted_fn, transformed_fitted_fn)
Given a point x, y, the predicted function is
"""
# First do PCA
components, mean, p_S = compute_principal_components(p_W)
# Fit to the transformed points
popt, pcov = fit_surface_to_points(p_S,
fit_function,**kwargs)
# Reconstruct the function
fitted_fn_S, fitted_fn_grad_S = get_fitted_fn(fit_function, popt)
R_WS = components.T
t_WS = mean
return R_WS, t_WS, p_S, fitted_fn_S, fitted_fn_grad_S
def transform_and_fit_consistent_normal_surface_to_points(
all_p_W,
fit_function=SurfaceFitFunction.POLYNOMIAL_DEG3,
**kwargs):
"""
:param points: 3D array of shape [3, n, 3]
:param fit_function:
:param kwargs:
:return:
(R, t, fitted_points, fitted_fn, transformed_fitted_fn)
Given a point x, y, the predicted function is
"""
assert len(all_p_W) == 3
for i in range(len(all_p_W)):
assert all_p_W[i].shape[1] == 3
all_components = np.zeros([3, 3, 3])
all_means = np.zeros([3, 3])
for idx, points in enumerate(all_p_W):
# First do PCA
components, mean, _ = compute_principal_components(points)
all_components[idx, :, :] = components
all_means[idx,:] = mean
all_pavg_W = np.average(all_means, axis=0)
ans = []
for idx, points in enumerate(all_p_W):
# Flip the signs of the basis if necessary
sign = int(np.dot(all_means[idx,:]-all_pavg_W, all_components[idx, 2, :]) < 0)
all_components[idx] *= (-1)**sign
# Make sure the normals are consistent
# Fit to the transformed points
# Reconstruct the function
R_WS = all_components[idx].T
t_WS = all_means[idx,:]
p_S = (R_WS.T @ (all_p_W[idx]-t_WS).T).T
popt, pcov = fit_surface_to_points(p_S,
fit_function, **kwargs)
fitted_fn, fitted_fn_grad = get_fitted_fn(fit_function, popt)
ans.append((R_WS, t_WS, p_S, fitted_fn, fitted_fn_grad))
return ans
def compute_nF_S(p_S, fitted_fn_grad):
"""
Compute the fitted surface normal of a point p_S in the PCA basis space
See https://en.wikipedia.org/wiki/Normal_(geometry)#Calculating_a_surface_normal for math
:param p_S:
:param fitted_fn_grad:
:return: nF_S
"""
grad_p = np.squeeze(fitted_fn_grad(p_S[:2,:].reshape(1,-1)))
nF_S = np.array([-grad_p[0], -grad_p[1], 1.])
return nF_S / np.linalg.norm(nF_S)
def compute_n_W(p_W, R_WS, t_WS, fitted_fn_grad):
p_S = np.matmul(R_WS.T, p_W.reshape(3,1) - t_WS.reshape(3,1))
n_S = compute_nF_S(p_S, fitted_fn_grad)
return R_WS @ n_S
def rotation_matrix_from_vectors(vec1, vec2):
"""
Compute the rotation matrix R that can: Rvec1 = vec2,
If flipped 180, bu default rotate along x axis.
There may still be some edge cases that can cause problem..
"""
a, b = (vec1 / np.linalg.norm(vec1)).reshape(3), (vec2 / np.linalg.norm(vec2)).reshape(3)
v = np.cross(a, b)
c = np.dot(a, b)
s = np.linalg.norm(v)
if s==0:
if c > 0:
return np.eye(3)
else:
return np.array([[-1,0,0],[0,1,0],[0,0,-1]])
kmat = np.array([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]])
rotation_matrix = np.eye(3) + kmat + kmat.dot(kmat) * ((1 - c) / (s ** 2))
return rotation_matrix
def minimumDist2dBox(p_client, rigid_box, shape, point):
"""
axes aligned rect is a rectangle numpy array [4,3]
if not we need to express the point in rect frame
"""
pos, orn = p_client.getBasePositionAndOrientation(rigid_box)
rel_pos = torch.from_numpy(point - np.asarray(pos))
orn = torch.tensor([orn[3], orn[0], orn[1], orn[2]])
inv_q = pk.quaternion_invert(orn)
x,y,z = pk.quaternion_apply(inv_q, rel_pos).tolist()
# Calculate minimum distance
dx = max(abs(x)-shape[0], 0)
dy = max(abs(y)-shape[1], 0)
dz = max(abs(z)-shape[2], 0)
return np.linalg.norm([dx, dy, dz])
| Ericcsr/synthesize_pregrasp | utils/math_utils.py | math_utils.py | py | 18,341 | python | en | code | 8 | github-code | 1 | [
{
"api_name": "math.pi",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "math.sqrt",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "math.cos",
"line_number": 24,
... |
38267722193 | # -*- coding: utf-8 -*-
"""
Created on Thu Jun 1 00:29:59 2023
@author: 16692
"""
from sqlalchemy import Column, Integer, String, DateTime, Text
from sqlalchemy.orm import declarative_base
from datetime import datetime
Base = declarative_base()
class User(Base):
__tablename__ = "users"
user_id = Column(Integer(), primary_key=True)
name = Column(String(100), nullable=False)
email = Column(String(100), nullable=False)
signup_date = Column(DateTime(), default=datetime.now)
def __init__(self,user_id,name,email,signup_date):
self.user_id = user_id
self.name = name
self.email = email
self.signup_date =signup_date
from connection import get_connection,get_engine
engine =get_engine()
try:
Base.metadata.create_all(bind=engine)
print("database creation done")
except Exception as e:
print(e)
| saurabhsathe/eikontx-backend-swe2 | database_ops/users.py | users.py | py | 903 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sqlalchemy.orm.declarative_base",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "... |
39587395835 | import time
import torch
import random
import numpy as np
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from tqdm import tqdm, trange
from torchtext import data
from sklearn.metrics import f1_score
from sklearn.metrics import confusion_matrix
from transformers import XLMRobertaModel
from models.MultiHead_BiRU import MultiheadBIRUAttention
from models.bert_cnn_model import BERTCNNSentiment
from transformers import XLMRobertaTokenizer
SEED = 6548
random.seed(SEED)
np.random.seed(SEED)
torch.manual_seed(SEED)
torch.backends.cudnn.deterministic = True
CUDA_LAUNCH_BLOCKING = 1
max_input_length = 64
BATCH_SIZE = 128
train_name = "training.csv"
test_name = "test.csv"
val_name = "validation.csv"
model_save_names = ['./checkpoint/modelA.txt', "./checkpoint/model2.txt" ]
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
tokenizer = XLMRobertaTokenizer.from_pretrained("xlm-roberta-base")
print('XLM Roberta Tokenizer Loaded...')
init_token_idx = tokenizer.cls_token_id
eos_token_idx = tokenizer.sep_token_id
pad_token_idx = tokenizer.pad_token_id
unk_token_idx = tokenizer.unk_token_id
print("Max input length: %d" %(max_input_length))
def tokenize_and_cut(sentence):
tokens = tokenizer.tokenize(sentence)
tokens = tokens[:max_input_length-2]
return tokens
UID=data.Field(
sequential=False,
use_vocab=False,
pad_token=None
)
TEXT=data.Field(batch_first=True,
use_vocab=False,
tokenize=tokenize_and_cut,
preprocessing=tokenizer.convert_tokens_to_ids,
init_token=init_token_idx,
eos_token=eos_token_idx,
pad_token=pad_token_idx,
unk_token=unk_token_idx
)
LABEL=data.LabelField()
fields = [('id',UID),('text', TEXT),('category', LABEL)]
train_data, valid_data, test_data = data.TabularDataset.splits(
path='./',
train=train_name,
test=test_name,
validation=val_name,
format='csv',
fields=fields,
skip_header=True)
print('Data loading complete')
print(f"Number of training examples: {len(train_data)}")
print(f"Number of validation examples: {len(valid_data)}")
print(f"Number of test examples: {len(test_data)}")
LABEL.build_vocab(train_data, valid_data)
train_iterator, valid_iterator, test_iterator=data.BucketIterator.splits(
(train_data, valid_data, test_data),
sort_key=lambda x: len(x.text),
batch_size = BATCH_SIZE,
device = device,
)
bert = XLMRobertaModel.from_pretrained('xlm-roberta-base')
OUTPUT_DIM = 5
DROPOUT = 0.3
N_FILTERS = 100
FILTER_SIZES = [2, 3, 5, 7, 9]
model_names = ["A", "B"]
models = [
BERTCNNSentiment(bert, OUTPUT_DIM, DROPOUT, N_FILTERS, FILTER_SIZES),
MultiheadBIRUAttention(bert, 128, 100, 100, 4, 768, 5)
]
def clip_gradient(model, clip_value):
params = list(filter(lambda p: p.grad is not None, model.parameters()))
for p in params:
p.grad.data.clamp_(-clip_value, clip_value)
optimizers = [optim.Adam(models[0].parameters()), optim.Adam(models[1].parameters())]
criterion = nn.CrossEntropyLoss()
nll_loss = nn.NLLLoss()
log_softmax = nn.LogSoftmax()
for i in range(2):
models[i] = models[i].to(device)
criterion=criterion.to(device)
nll_loss=nll_loss.to(device)
log_softmax=log_softmax.to(device)
def categorical_accuracy(preds, y):
device = y.device
count0,count1,count2,count3,count4 = torch.zeros(1),torch.zeros(1),torch.zeros(1),torch.zeros(1),torch.zeros(1)
count0,count1,count2,count3,count4 = count0.to(device) ,count1.to(device) ,count2.to(device) ,count3.to(device) ,count4.to(device)
total0,total1,total2,total3,total4 = torch.FloatTensor(1),torch.FloatTensor(1),torch.FloatTensor(1),torch.FloatTensor(1),torch.FloatTensor(1)
max_preds = preds.argmax(dim = 1, keepdim = True)
correct = max_preds.squeeze(1).eq(y)
predictions = max_preds.squeeze(1)
true_correct = [0,0,0,0,0]
for j,i in enumerate(y.cpu().numpy()):
true_correct[y.cpu().numpy()[j]]+=1
if i==0:
count0+=correct[j]
total0+=1
elif i==1:
count1+=correct[j]
total1+=1
elif i==2:
count2+=correct[j]
total2+=1
elif i==3:
count3+=correct[j]
total3+=1
elif i==4:
count4+=correct[j]
else:
total4+=1
metric=torch.FloatTensor([count0/true_correct[0],count1/true_correct[1],count2/true_correct[2],count3/true_correct[3],count4/true_correct[4],f1_score(y.cpu().numpy(),predictions.cpu().numpy(),average='weighted')])
metric[metric!=metric] = 0
hi4 = correct.sum()
hi5 = torch.FloatTensor([y.shape[0]]).to(device)
hi1 = hi4 / hi5
hi2 = metric
hi3 = confusion_matrix(y.cpu().numpy(),max_preds.cpu().numpy(),labels=[0,1,2,3,4])
return hi1,hi2,hi3
def train(model, iterator, optimizer, criterion, i):
epoch_loss = 0
epoch_acc = 0
model.train()
for batch in iterator:
optimizer.zero_grad()
if (i == 0):
predictions = model(batch.text).squeeze(1)
else:
predictions = model(batch.text, batch_size = len(batch)).squeeze(1)
loss = criterion(predictions, batch.category)
acc,_,_ = categorical_accuracy(predictions, batch.category)
loss.backward()
clip_gradient(model, 1e-1)
optimizer.step()
epoch_loss += loss.item()
epoch_acc += acc.item()
return epoch_loss / len(iterator), epoch_acc / len(iterator)
def evaluate(model, iterator, criterion, i):
epoch_loss = 0
epoch_acc = 0
epoch_all_acc = torch.FloatTensor([0,0,0,0,0,0])
confusion_mat = torch.zeros((5,5))
confusion_mat_temp = torch.zeros((5,5))
model.eval()
with torch.no_grad():
for batch in iterator:
if (i == 0):
predictions = model(batch.text).squeeze(1)
else:
predictions = model(batch.text,batch_size=len(batch)).squeeze(1)
loss = criterion(predictions, batch.category)
acc,all_acc,confusion_mat_temp = categorical_accuracy(predictions, batch.category)
epoch_loss += loss.item()
epoch_acc += acc.item()
epoch_all_acc += all_acc
confusion_mat+=confusion_mat_temp
return epoch_loss / len(iterator), epoch_acc / len(iterator),epoch_all_acc/len(iterator),confusion_mat
def epoch_time(start_time, end_time):
elapsed_time = end_time - start_time
elapsed_mins = int(elapsed_time / 60)
elapsed_secs = int(elapsed_time - (elapsed_mins * 60))
return elapsed_mins, elapsed_secs
N_EPOCHS = 60
best_f1 = [-1, -1]
t = trange(N_EPOCHS)
for epoch in t:
t.set_description('EPOCH %i' % epoch)
for i in range(2):
print(model_names[i])
start_time = time.time()
train_loss, train_acc = train(models[i], train_iterator, optimizers[i], criterion, i)
valid_loss, valid_acc,tot,conf = evaluate(models[i], valid_iterator, criterion, i)
f1 = tot[5]
end_time = time.time()
epoch_mins, epoch_secs = epoch_time(start_time, end_time)
if f1 > best_f1[i]:
best_f1[i] = f1
path = model_save_names[i]
print(path)
torch.save(models[i].state_dict(), path)
print(f'Epoch: {epoch+1:02} | Epoch Time: {epoch_mins}m {epoch_secs}s')
print(f'\tTrain Loss: {train_loss:.3f} | Train Acc: {train_acc*100:.2f}%')
print(f'\t Val. Loss: {valid_loss:.3f} | Val. Acc: {valid_acc*100:.2f}%')
print("Validation F1 : ", f1)
| PrasannaKumaran/ECMAG---An-Ensemble-Framework-for-Sentiment-Analysis-in-Code-Mixed-Data | main.py | main.py | py | 7,944 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "random.seed",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.random.seed",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "torch.manual_seed",
... |
41004428752 | """
This module defines input sets for CP2K and is a work in progress. The structure/philosophy
of this module is based on the Vasp input sets in Pymatgen. These sets are meant to contain
tested parameters that will result in successful, reproducible, consistent calculations without
need for intervention 99% of the time. 99% of the time, you only need to provide a pymatgen
structure object and let the defaults take over from there.
The sets are intended to be very general, e.g. a set for geometry relaxation, and so most of the
time, if you have specific needs, you can simply specify them via the keyword argument
override_default_params (see Section.update() method). If you have the need to create a new input
set (say for a standardized high throughput calculation) then you can create a new child of the
Cp2kInputSet class.
In order to implement a new Set within the current code structure, follow this 3 step flow:
(1) Inherit from Cp2kInputSet or one of its children and call the super() constructor
(2) Create the new sections and insert them into self and its subsections as needed
(3) Call self.update(override_default_params) in order to allow user settings.
"""
import warnings
from pathlib import Path
from typing import Dict, Union
from pymatgen.core.periodic_table import Element
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Molecule, Structure
from pymatgen.io.cp2k.inputs import (
LDOS,
PBE,
PDOS,
QS,
XC_FUNCTIONAL,
Cell,
Coord,
Cp2kInput,
Dft,
E_Density_Cube,
ForceEval,
Global,
Keyword,
KeywordList,
Kind,
Kpoints,
Mgrid,
MO_Cubes,
OrbitalTransformation,
Scf,
Section,
Smear,
Subsys,
V_Hartree_Cube,
)
from pymatgen.io.cp2k.utils import (
get_aux_basis,
get_basis_and_potential,
get_unique_site_indices,
)
__author__ = "Nicholas Winner"
__version__ = "0.2"
__email__ = "nwinner@berkeley.edu"
__date__ = "January 2019"
MODULE_DIR = Path(__file__).resolve().parent
class Cp2kInputSet(Cp2kInput):
"""
The basic representation of a CP2K input set as a collection of "sections" defining the simulation
connected to a structure object. At the most basis level, CP2K requires a &GLOBAL section and
&FORCE_EVAL section. Global sets parameters like "RUN_TYPE" or the overall verbosity. FORCE_EVAL is
the largest section usually, containing the cell and coordinates of atoms, the DFT settings, and more.
This top level input set is meant to initialize GLOBAL and FORCE_EVAL based on a structure object and
and sections that the user provides.
Like everything that goes into a cp2k input file, this base input set is essentially a section object.
These sets are distinguished by saving default settings for easy implementation of calculations such
as relaxation and static calculations. This base set is here to transfer a pymatgen structure object
into the input format for cp2k and associate the basis set and pseudopotential to use with each
element in the structure.
Generally, this class will not be used directly, and instead one of
its child-classes will be used, which contain more predefined initializations of various sections, and,
if modifications are required, the user can specify override_default_settings.
"""
def __init__(
self,
structure: Union[Structure, Molecule],
potential_and_basis: Dict = {},
multiplicity: int = 0,
project_name: str = "CP2K",
override_default_params: Dict = {},
**kwargs,
):
"""
Args:
structure: (Structure or Molecule) pymatgen structure or molecule object used to define
the lattice, coordinates, and elements. This structure object cannot contain "special"
species like the Dummy species, e.g. X, or fractional occupations, e.g. Fe0.2, etc.
potential_and_basis: (dict) Specifies what basis set and potential to use. Specify these
as a dict of the form:
{ element: {'cardinality': __, 'sr': __, 'q': __},
'cardinality': __, 'functional': __}
Where cardinality and functional are overall specifications (for all elements), while
<key='element'> specifies the overrides for a specific element. Currently the following
conventions must be followed:
(a) All species of a particular element must have the same potential/basis
multiplicity: (int) Specify the system's multiplicity if appropriate
project_name: (str) Specify the project name. This will be used to name the output files
from a CP2K calculation
override_default_params: (dict) Specifies user-defined settings to override the settings of any
input set (See Section.update())
"""
super().__init__(name="CP2K_INPUT", subsections={})
# Important CP2K set parameters
self.structure = structure
self.charge = structure.charge
self.potential_and_basis = potential_and_basis
self.multiplicity = multiplicity # spin multiplicity = 2s+1
self.override_default_params = override_default_params
self.project_name = project_name
self.kwargs = kwargs
for s in self.structure.species:
assert s in Element
self.insert(ForceEval()) # always present in cp2k
self.basis_set_file_names = None # need for dft
self.potential_file_name = None # need for dft
self.create_subsys(self.structure) # assemble structure with atom types and pseudopotentials assigned
if self.kwargs.get("print_forces", True):
self.print_forces()
if self.kwargs.get("print_motion", True):
self.print_motion()
self.update(override_default_params)
def create_subsys(self, structure: Union[Structure, Molecule]):
"""
Create the structure for the input
"""
subsys = Subsys()
if isinstance(structure, Structure):
subsys.insert(Cell(structure.lattice))
# Decide what basis sets/pseudopotentials to use
basis_and_potential = get_basis_and_potential([str(s) for s in structure.species], self.potential_and_basis)
# Insert atom kinds by identifying the unique sites (unique element and site properties)
unique_kinds = get_unique_site_indices(structure)
for k, v in unique_kinds.items():
kind = k.split("_")[0]
kwargs = {}
if "magmom" in self.structure.site_properties:
kwargs["magnetization"] = self.structure.site_properties["magmom"][v[0]]
if "ghost" in self.structure.site_properties:
kwargs["ghost"] = self.structure.site_properties["ghost"][v[0]]
if "basis_set" in self.structure.site_properties:
basis_set = self.structure.site_properties["basis_set"][v[0]]
else:
basis_set = basis_and_potential[kind]["basis"]
if "potential" in self.structure.site_properties:
potential = self.structure.site_properties["potential"][v[0]]
else:
potential = basis_and_potential[kind]["potential"]
if "aux_basis" in self.structure.site_properties:
kwargs["aux_basis"] = self.structure.site_properties["aux_basis"][v[0]]
subsys.insert(Kind(kind, alias=k, basis_set=basis_set, potential=potential, **kwargs))
coord = Coord(structure, aliases=unique_kinds)
subsys.insert(coord)
self["FORCE_EVAL"].insert(subsys)
self.basis_set_file_names = basis_and_potential["basis_filenames"]
self.potential_file_name = basis_and_potential["potential_filename"]
def print_forces(self):
"""
Print out the forces and stress during calculation
"""
self["FORCE_EVAL"].insert(Section("PRINT", subsections={}))
self["FORCE_EVAL"]["PRINT"].insert(Section("FORCES", subsections={}))
self["FORCE_EVAL"]["PRINT"].insert(Section("STRESS_TENSOR", subsections={}))
def print_motion(self):
"""
Print the motion info (trajectory, cell, forces, stress
"""
if not self.check("MOTION"):
self.insert(Section("MOTION", subsections={}))
self["MOTION"].insert(Section("PRINT", subsections={}))
self["MOTION"]["PRINT"].insert(Section("TRAJECTORY", section_parameters=["ON"], subsections={}))
self["MOTION"]["PRINT"].insert(Section("CELL", subsections={}))
self["MOTION"]["PRINT"].insert(Section("FORCES", subsections={}))
self["MOTION"]["PRINT"].insert(Section("STRESS", subsections={}))
class DftSet(Cp2kInputSet):
"""
Base for an input set using the Quickstep module (i.e. a DFT calculation). The DFT section is pretty vast
in CP2K, so this set hopes to make the DFT setup fairly simple. The provided parameters are pretty conservative,
and so they should not need to be changed very often.
"""
def __init__(
self,
structure: Union[Structure, Molecule],
ot: bool = True,
band_gap: float = 0.01,
eps_default: float = 1e-12,
eps_scf: float = 1e-7,
max_scf: Union[int, None] = None,
minimizer: str = "DIIS",
preconditioner: str = "FULL_ALL",
algorithm: str = "STRICT",
linesearch: str = "2PNT",
cutoff: int = 1200,
rel_cutoff: int = 80,
ngrids: int = 5,
progression_factor: int = 3,
override_default_params: Dict = {},
wfn_restart_file_name: str = None,
kpoints: Union[Kpoints, None] = None,
smearing: bool = False,
**kwargs,
):
"""
Args:
structure: Pymatgen structure or molecule object
ot (bool): Whether or not to use orbital transformation method for matrix diagonalization. OT is the
flagship scf solver of CP2K, and will provide huge speed-ups for this part of the calculation,
but the system must have a band gap for OT to be used (higher band-gap --> faster convergence).
Band gap is also used by the preconditioner for OT, and should be set as a value SMALLER than the true
band gap to get good efficiency. Generally, this parameter does not need to be changed from
default of 0.01
band_gap (float): The band gap can also be specified in order to determine if ot should be turned on.
eps_default (float): Replaces all EPS_XX Keywords in the DFT section (NOT its subsections!) to have this
value, ensuring an overall accuracy of at least this much.
eps_scf (float): The convergence criteria for leaving the SCF loop in Hartrees. Default is 1e-7. Should
ensure reasonable results for all properties. Smaller than 1e-7 is generally not needed unless
you need very high precision. 1e-6 may be used for difficult systems, and should still give
reasonable results for most properties.
max_scf (int): The max number of SCF cycles before terminating the solver. NOTE: With the OT solver, this
corresponds to the max number of INNER scf loops, and then the outer loops are set with outer_max_scf,
while with diagnolization it corresponds to the overall (INNER*OUTER) number of SCF steps, with the
inner loop limit set by
minimizer (str): The minimization scheme. DIIS can be as much as 50% faster than the more robust conjugate
gradient method, and so it is chosen as default. Switch to CG if dealing with a difficult system.
preconditioner (str): Preconditioner for the OT method. FULL_ALL is the most reliable, and is the
default. Though FULL_SINGLE_INVERSE has faster convergence according to our internal tests. Should
only change from theses two when simulation cell gets to be VERY large,
in which case FULL_KINETIC might be preferred.
cutoff (int): Cutoff energy (in Ry) for the finest level of the multigrid. A high cutoff will allow you to
have very accurate calculations PROVIDED that REL_CUTOFF is appropriate.
rel_cutoff (int): This cutoff decides how the Guassians are mapped onto the different levels of the
multigrid. From CP2K: A Gaussian is mapped onto the coarsest level of the multi-grid, on which the
function will cover number of grid points greater than or equal to the number of grid points
will cover on a reference grid defined by REL_CUTOFF.
progression_factor (int): Divisor of CUTOFF to get the cutoff for the next level of the multigrid.
Takeaway for the cutoffs: https://www.cp2k.org/howto:converging_cutoff
If CUTOFF is too low, then all grids will be coarse and the calculation may become inaccurate; and if
REL_CUTOFF is too low, then even if you have a high CUTOFF, all Gaussians will be mapped onto the coarsest
level of the multi-grid, and thus the effective integration grid for the calculation may still be too
coarse.
"""
super().__init__(structure, **kwargs)
self.structure = structure
self.ot = ot
self.band_gap = band_gap
self.eps_default = eps_default
self.eps_scf = eps_scf
self.max_scf = max_scf
self.minimizer = minimizer
self.preconditioner = preconditioner
self.algorithm = algorithm
self.linesearch = linesearch
self.cutoff = cutoff
self.rel_cutoff = rel_cutoff
self.ngrids = ngrids
self.progression_factor = progression_factor
self.override_default_params = override_default_params
self.wfn_restart_file_name = wfn_restart_file_name
self.kpoints = kpoints
self.smearing = smearing
self.kwargs = kwargs
# Build the QS Section
qs = QS(eps_default=eps_default)
max_scf = max_scf if max_scf else 20 if ot else 400 # If ot, max_scf is for inner loop
scf = Scf(eps_scf=eps_scf, max_scf=max_scf, subsections={})
# If there's a band gap, use OT, else use Davidson
if ot:
if band_gap <= 0:
warnings.warn(
"Orbital Transformation method is being used for"
"a system without a bandgap. OT can have very poor"
"convergence for metallic systems, proceed with caution.",
UserWarning,
)
scf.insert(
OrbitalTransformation(
minimizer=minimizer,
preconditioner=preconditioner,
energy_gap=band_gap,
algorithm=algorithm,
linesearch=linesearch,
)
)
scf.insert(
Section(
"OUTER_SCF",
subsections={},
keywords={
"MAX_SCF": Keyword("MAX_SCF", kwargs.get("outer_max_scf", 20)),
"EPS_SCF": Keyword("EPS_SCF", kwargs.get("outer_eps_scf", eps_scf)),
},
)
)
else:
scf.insert(Section("DIAGONALIZATION", subsections={}))
mixing_kwds = {
"METHOD": Keyword("METHOD", "BROYDEN_MIXING"),
"ALPHA": Keyword("ALPHA", 0.2),
"NBUFFER": Keyword("NBUFFER", 5),
}
mixing = Section("MIXING", keywords=mixing_kwds, subsections=None)
scf.insert(mixing)
davidson_kwds = {"PRECONDITIONER": Keyword("PRECONDITIONER", "FULL_ALL")}
davidson = Section("DAVIDSON", keywords=davidson_kwds, subsections=None)
scf["DIAGONALIZATION"].insert(davidson)
# Create the multigrid for FFTs
mgrid = Mgrid(
cutoff=cutoff,
rel_cutoff=rel_cutoff,
ngrids=ngrids,
progression_factor=progression_factor,
)
# Set the DFT calculation with global parameters
dft = Dft(
MULTIPLICITY=self.multiplicity,
CHARGE=self.charge,
basis_set_filenames=self.basis_set_file_names,
potential_filename=self.potential_file_name,
subsections={"QS": qs, "SCF": scf, "MGRID": mgrid},
wfn_restart_file_name=wfn_restart_file_name,
)
if kpoints:
dft.insert(Kpoints.from_kpoints(kpoints))
if smearing or (band_gap <= 0.0):
scf.kwargs["ADDED_MOS"] = 100
scf["ADDED_MOS"] = 100 # TODO: how to grab the appropriate number?
scf.insert(Smear())
# Create subsections and insert into them
self["FORCE_EVAL"].insert(dft)
xc_functional = XC_FUNCTIONAL(functional=kwargs.get("functional", "PBE"))
xc = Section("XC", subsections={"XC_FUNCTIONAL": xc_functional})
self["FORCE_EVAL"]["DFT"].insert(xc)
self["FORCE_EVAL"]["DFT"].insert(Section("PRINT", subsections={}))
if isinstance(structure, Molecule):
self.activate_nonperiodic()
if kwargs.get("print_pdos", True):
self.print_pdos()
if kwargs.get("print_ldos", False):
self.print_ldos()
if kwargs.get("print_mo_cubes", True):
self.print_mo_cubes()
if kwargs.get("print_hartree_potential", False):
self.print_hartree_potential()
if kwargs.get("print_e_density", False):
self.print_e_density()
self.update(self.override_default_params)
def print_pdos(self, nlumo=-1):
"""
Activate creation of the PDOS file.
Args:
nlumo (int): Number of virtual orbitals to be added to the MO set (-1=all).
CAUTION: Setting this value to be higher than the number of states present may cause a Cholesky error.
"""
if not self.check("FORCE_EVAL/DFT/PRINT/PDOS"):
self["FORCE_EVAL"]["DFT"]["PRINT"].insert(PDOS(nlumo=nlumo))
def print_ldos(self, nlumo=-1):
"""
Activate the printing of LDOS files, printing one for each atom kind by default
Args:
nlumo (int): Number of virtual orbitals to be added to the MO set (-1=all).
CAUTION: Setting this value to be higher than the number of states present may cause a Cholesky error.
"""
if not self.check("FORCE_EVAL/DFT/PRINT/PDOS"):
self["FORCE_EVAL"]["DFT"]["PRINT"].insert(PDOS(nlumo=nlumo))
for i in range(self.structure.num_sites):
self["FORCE_EVAL"]["DFT"]["PRINT"]["PDOS"].insert(LDOS(i + 1, alias="LDOS {}".format(i + 1), verbose=False))
def print_mo_cubes(self, write_cube=False, nlumo=-1, nhomo=-1):
"""
Activate printing of molecular orbitals.
Args:
write_cube (bool): whether to write cube file for the MOs (setting false will just print levels in out file)
nlumo (int): Controls the number of lumos that are printed and dumped as a cube (-1=all)
nhomo (int): Controls the number of homos that are printed and dumped as a cube (-1=all)
"""
if not self.check("FORCE_EVAL/DFT/PRINT/MO_CUBES"):
self["FORCE_EVAL"]["DFT"]["PRINT"].insert(MO_Cubes(write_cube=write_cube, nlumo=nlumo, nhomo=nhomo))
def print_mo(self):
"""
Print molecular orbitals when running non-OT diagonalization
"""
raise NotImplementedError
def print_hartree_potential(self, stride=[1, 1, 1]):
"""
Controls the printing of a cube file with eletrostatic potential generated by the total density
(electrons+ions). It is valid only for QS with GPW formalism.
Note that by convention the potential has opposite sign than the expected physical one.
"""
if not self.check("FORCE_EVAL/DFT/PRINT/V_HARTREE_CUBE"):
self["FORCE_EVAL"]["DFT"]["PRINT"].insert(V_Hartree_Cube(keywords={"STRIDE": Keyword("STRIDE", *stride)}))
def print_e_density(self):
"""
Controls the printing of cube files with the electronic density and, for LSD calculations, the spin density
"""
if not self.check("FORCE_EVAL/DFT/PRINT/E_DENSITY_CUBE"):
self["FORCE_EVAL"]["DFT"]["PRINT"].insert(E_Density_Cube())
def set_charge(self, charge):
"""
Set the overall charge of the simulation cell
"""
self["FORCE_EVAL"]["DFT"]["CHARGE"] = Keyword("CHARGE", charge)
def activate_hybrid(
self,
hybrid_functional: str = "PBE0",
hf_fraction: float = 0.25,
gga_x_fraction: float = 0.75,
gga_c_fraction: float = 1,
max_memory: int = 2000,
cutoff_radius: float = 8.0,
potential_type: str = None,
omega: float = 0.2,
aux_basis: Union[Dict, None] = None,
admm: bool = True,
eps_schwarz: float = 1e-6,
eps_schwarz_forces: float = 1e-6,
screen_on_initial_p: bool = True,
screen_p_forces: bool = True,
):
"""
Basic set for activating hybrid DFT calculation using Auxiliary Density Matrix Method.
Note 1: When running ADMM with cp2k, memory is very important. If the memory requirements exceed
what is available (see max_memory), then CP2K will have to calculate the 4-electron integrals
for HFX during each step of the SCF cycle. ADMM provides a huge speed up by making the memory
requirements *feasible* to fit into RAM, which means you only need to calculate the integrals
once each SCF cycle. But, this only works if it fits into memory. When setting up ADMM
calculations, we recommend doing whatever is possible to fit all the 4EI into memory.
Note 2: This set is designed for reliable high-throughput calculations, NOT for extreme
accuracy. Please review the in-line comments in this method if you want more control.
Args:
hybrid_functional (str): Type of hybrid functional. This set supports HSE (screened) and PBE0
(truncated). Default is PBE0, which converges easier in the GPW basis used by
cp2k.
hf_fraction (float): fraction of exact HF exchange energy to mix. Default: 0.25
gga_x_fraction (float): fraction of gga exchange energy to retain. Default: 0.75
gga_c_fraction (float): fraction of gga correlation energy to retain. Default: 1.0
max_memory (int): Maximum memory available to each MPI process (in Mb) in the calculation.
Most modern computing nodes will have ~2Gb per core, or 2048 Mb, but check for
your specific system. This value should be as large as possible while still leaving
some memory for the other parts of cp2k. Important: If this value is set larger
than the memory limits, CP2K will likely seg-fault.
Default: 2000
cutoff_radius (float): for truncated hybrid functional (i.e. PBE0), this is the cutoff
radius. The default is selected as that which generally gives convergence, but
maybe too low (if you want very high accuracy) or too high (if you want a quick
screening). Default: 8 angstroms
potential_type (str): what interaction potential to use for HFX. Available in CP2K are
COULOMB, GAUSSIAN, IDENTITY, LOGRANGE, MIX_CL, MIX_CL_TRUNC, MIX_LG, SHORTRANGE,
and TRUNCATED. Default is None, and it will be set automatically depending on the
named hybrid_functional that you use, but setting it to one of the acceptable
values will constitute a user-override.
omega (float): For HSE, this specifies the screening parameter. HSE06 sets this as
0.2, which is the default.
aux_basis (dict): If you want to specify the aux basis to use, specify it as a dict of
the form {'specie_1': 'AUX_BASIS_1', 'specie_2': 'AUX_BASIS_2'}
admm (bool): Whether or not to use the auxiliary density matrix method for the exact
HF exchange contribution. Highly recommended. Speed ups between 10x and aaa1000x are
possible when compared to non ADMM hybrid calculations. Default: True
eps_schwarz (float): Screening threshold for HFX, in Ha. Contributions smaller than this
will be screened. The smaller the value, the more accurate, but also the more
costly. Default value is 1e-6, which is quite aggressive. Aggressive screening
can also lead to convergence issues. 1e-7 should be a safe value if 1e-6 is too
aggressive.
eps_schwarz_forces (float): Same as for eps_schwarz, but for screening contributions to
forces. Convergence is not as sensitive with respect to eps_schwarz forces as
compared to eps_schwarz, and so 1e-6 should be good default.
screen_on_initial_p (bool): If an initial density matrix is provided, in the form of a
CP2K wfn restart file, then this initial density will be used for screening. This
is generally very computationally efficient, but, as with eps_schwarz, can lead to
instabilities if the initial density matrix is poor.
screen_p_forces (bool): Same as screen_on_initial_p, but for screening of forces.
"""
if admm:
aux_basis = aux_basis if aux_basis else {}
aux_basis = {s: aux_basis[s] if s in aux_basis else None for s in self.structure.symbol_set}
basis = get_aux_basis(basis_type=aux_basis)
if isinstance(self["FORCE_EVAL"]["DFT"]["BASIS_SET_FILE_NAME"], KeywordList):
self["FORCE_EVAL"]["DFT"]["BASIS_SET_FILE_NAME"].extend(
[Keyword("BASIS_SET_FILE_NAME", k) for k in ["BASIS_ADMM", "BASIS_ADMM_MOLOPT"]],
)
for k, v in self["FORCE_EVAL"]["SUBSYS"].subsections.items():
if v.name.upper() == "KIND":
kind = v["ELEMENT"].values[0]
v.keywords["BASIS_SET"] += Keyword("BASIS_SET", "AUX_FIT", basis[kind])
# Don't change unless you know what you're doing
# Use NONE for accurate eigenvalues (static calcs)
aux_matrix_params = {
"ADMM_PURIFICATION_METHOD": Keyword("ADMM_PURIFICATION_METHOD", "NONE"),
"METHOD": Keyword("METHOD", "BASIS_PROJECTION"),
}
aux_matrix = Section(
"AUXILIARY_DENSITY_MATRIX_METHOD",
keywords=aux_matrix_params,
subsections={},
)
self.subsections["FORCE_EVAL"]["DFT"].insert(aux_matrix)
# Define the GGA functional as PBE
pbe = PBE("ORIG", scale_c=gga_c_fraction, scale_x=gga_x_fraction)
xc_functional = XC_FUNCTIONAL("PBE", subsections={"PBE": pbe})
screening = Section(
"SCREENING",
subsections={},
keywords={
"EPS_SCHWARZ": Keyword("EPS_SCHWARZ", eps_schwarz),
"EPS_SCHWARZ_FORCES": Keyword("EPS_SCHWARZ_FORCES", eps_schwarz_forces),
"SCREEN_ON_INITIAL_P": Keyword("SCREEN_ON_INITIAL_P", screen_on_initial_p),
"SCREEN_P_FORCES": Keyword("SCREEN_P_FORCES", screen_p_forces),
},
)
ip_keywords = {}
if hybrid_functional == "HSE06":
potential_type = potential_type if potential_type else "SHORTRANGE"
xc_functional.insert(
Section(
"XWPBE",
subsections={},
keywords={
"SCALE_X0": Keyword("SCALE_X0", 1),
"SCALE_X": Keyword("SCALE_X", -hf_fraction),
"OMEGA": Keyword("OMEGA", omega),
},
)
)
ip_keywords.update(
{
"POTENTIAL_TYPE": Keyword("POTENTIAL_TYPE", potential_type),
"OMEGA": Keyword("OMEGA", omega),
}
)
elif hybrid_functional == "PBE0":
potential_type = potential_type if potential_type else "TRUNCATED"
ip_keywords.update(
{
"POTENTIAL_TYPE": Keyword("POTENTIAL_TYPE", potential_type),
"CUTOFF_RADIUS": Keyword("CUTOFF_RADIUS", cutoff_radius),
"T_C_G_DATA": Keyword("T_C_G_DATA", "t_c_g.dat"),
}
)
interaction_potential = Section("INTERACTION_POTENTIAL", subsections={}, keywords=ip_keywords)
# Unlikely for users to override
load_balance = Section(
"LOAD_BALANCE",
keywords={"RANDOMIZE": Keyword("RANDOMIZE", True)},
subsections={},
)
# EPS_STORAGE_SCALING squashes the integrals for efficient storage
# Unlikely for users to override.
memory = Section(
"MEMORY",
subsections={},
keywords={
"EPS_STORAGE_SCALING": Keyword("EPS_STORAGE_SCALING", 0.1),
"MAX_MEMORY": Keyword("MAX_MEMORY", max_memory),
},
)
hf = Section(
"HF",
keywords={"FRACTION": Keyword("FRACTION", hf_fraction)},
subsections={
"SCREENING": screening,
"INTERACTION_POTENTIAL": interaction_potential,
"LOAD_BALANCE": load_balance,
"MEMORY": memory,
},
)
xc = Section("XC", subsections={"XC_FUNCTIONAL": xc_functional, "HF": hf})
self.subsections["FORCE_EVAL"]["DFT"].insert(xc)
def activate_fast_minimization(self, on):
"""
Method to modify the set to use fast SCF minimization.
"""
if on:
ot = OrbitalTransformation(
minimizer="DIIS",
preconditioner="FULL_ALL",
algorithm="IRAC",
energy_gap=0.01,
linesearch="2PNT",
)
self.update({"FORCE_EVAL": {"DFT": {"SCF": {"OT": ot}}}})
def activate_robust_minimization(self):
"""
Method to modify the set to use more robust SCF minimization technique
"""
ot = OrbitalTransformation(
minimizer="CG",
preconditioner="FULL_ALL",
algorithm="STRICT",
energy_gap=0.05,
linesearch="3PNT",
)
self.update({"FORCE_EVAL": {"DFT": {"SCF": {"OT": ot}}}})
def activate_very_strict_minimization(self):
"""
Method to modify the set to use very strict SCF minimization scheme
:return:
"""
ot = OrbitalTransformation(
minimizer="CG",
preconditioner="FULL_ALL",
algorithm="STRICT",
energy_gap=0.05,
linesearch="GOLD",
)
self.update({"FORCE_EVAL": {"DFT": {"SCF": {"OT": ot}}}})
def activate_nonperiodic(self):
"""
Activates a calculation with non-periodic calculations by turning of PBC and
changing the poisson solver. Still requires a CELL to put the atoms
"""
kwds = {
"POISSON_SOLVER": Keyword("POISSON_SOLVER", "MT"),
"PERIODIC": Keyword("PERIODIC", "NONE"),
}
self["FORCE_EVAL"]["DFT"].insert(Section("POISSON", subsections={}, keywords=kwds))
if not self.check("FORCE_EVAL/SUBSYS/CELL"):
x = max([s.coords[0] for s in self.structure.sites])
y = max([s.coords[1] for s in self.structure.sites])
z = max([s.coords[2] for s in self.structure.sites])
self["FORCE_EVAL"]["SUBSYS"].insert(Cell(lattice=Lattice([[x, 0, 0], [0, y, 0], [0, 0, z]])))
self["FORCE_EVAL"]["SUBSYS"]["CELL"].add(Keyword("PERIODIC", "NONE"))
def modify_dft_print_iters(self, iters, add_last="no"):
"""
Modify all DFT print iterations at once. Common use is to set iters to the max
number of iterations + 1 and then set add_last to numeric. This would have the
effect of printing only the first and last iteration, which might be useful for
speeding up/saving space on GEO_OPT or MD runs where you don't need the intermediate
values.
Args
iters (int): print each "iters" iterations.
add_last (str): Whether to explicitly include the last iteration, and how to mark it.
numeric: mark last iteration with the iteration number
symbolic: mark last iteration with the letter "l"
no: do not explicitly include the last iteration
"""
assert add_last.lower() in ["no", "numeric", "symbolic"]
if self.check("FORCE_EVAL/DFT/PRINT"):
run_type = self["global"].get("run_type", Keyword("run_type", "energy")).values[0]
for k, v in self["force_eval"]["dft"]["print"].subsections.items():
if v.name.upper() in [
"ACTIVE_SPACE",
"BAND_STRUCTURE",
"GAPW",
"IMPLICIT_PSOLVER",
"SCCS",
"WFN_MIX",
]:
continue
v.insert(
Section(
"EACH",
subsections=None,
keywords={run_type: Keyword(run_type, iters)},
)
)
v.keywords["ADD_LAST"] = Keyword("ADD_LAST", add_last)
class StaticSet(DftSet):
"""
Basic static energy calculation. Turns on Quickstep module, sets the run_type in global,
and uses structure object to build the subsystem.
"""
def __init__(
self,
structure: Union[Structure, Molecule],
project_name: str = "Static",
run_type: str = "ENERGY_FORCE",
override_default_params: Dict = {},
**kwargs,
):
"""
Args:
structure: Pymatgen structure object
project_name (str): What to name this cp2k project (controls naming of files printed out)
run_type (str): Run type. As a static set it should be one of the static aliases, like 'ENERGY_FORCE'
"""
super().__init__(structure, **kwargs)
global_section = Global(project_name=project_name, run_type=run_type)
self.structure = structure
self.project_name = project_name
self.run_type = run_type
self.override_default_params = override_default_params
self.insert(global_section)
self.update(override_default_params)
self.kwargs = kwargs
class RelaxSet(DftSet):
"""
CP2K input set containing the basic settings for performing geometry optimization. Values are all cp2k
defaults, and should be good for most systems of interest.
"""
def __init__(
self,
structure: Union[Structure, Molecule],
max_drift: float = 3e-3,
max_force: float = 4.5e-3,
max_iter: int = 200,
project_name: str = "Relax",
optimizer: str = "BFGS",
override_default_params: Dict = {},
**kwargs,
):
"""
Args:
structure:
max_drift: Convergence criterion for the maximum geometry change between the current and the
last optimizer iteration. This keyword cannot be repeated and it expects precisely one real.
Default value: 3.00000000E-003
Default unit: [bohr]
max_force (float): Convergence criterion for the maximum force component of the current configuration.
This keyword cannot be repeated and it expects precisely one real.
Default value: 4.50000000E-004
Default unit: [bohr^-1*hartree]
max_iter (int): Specifies the maximum number of geometry optimization steps.
One step might imply several force evaluations for the CG and LBFGS optimizers.
This keyword cannot be repeated and it expects precisely one integer.
Default value: 200
optimizer (str): Specify which method to use to perform a geometry optimization.
This keyword cannot be repeated and it expects precisely one keyword. BFGS is a
quasi-newtonian method, and will best for "small" systems near the minimum. LBFGS
is a limited memory version that can be used for "large" (>1000 atom) systems when
efficiency outweights robustness. CG is more robust, especially when you are far from
the minimum, but it slower.
Default value: BFGS
"""
super().__init__(structure, **kwargs)
self.structure = structure
self.max_drift = max_drift
self.max_force = max_force
self.max_iter = max_iter
self.project_name = project_name
self.optimizer = optimizer
self.override_default_params = override_default_params
self.kwargs = kwargs
global_section = Global(project_name=project_name, run_type="GEO_OPT")
geo_opt_params = {
"TYPE": Keyword("TYPE", "MINIMIZATION"),
"MAX_DR": Keyword("MAX_DR", max_drift),
"MAX_FORCE": Keyword("MAX_FORCE", max_force),
"RMS_DR": Keyword("RMS_DR", 1.5e-3),
"MAX_ITER": Keyword("MAX_ITER", max_iter),
"OPTIMIZER": Keyword("OPTIMIZER", optimizer),
}
geo_opt = Section("GEO_OPT", subsections={}, keywords=geo_opt_params)
if not self.check("MOTION"):
self.insert(Section("MOTION", subsections={}))
self["MOTION"].insert(geo_opt)
self.insert(global_section)
self.modify_dft_print_iters(max_iter + 1, add_last="numeric")
self.update(override_default_params)
class CellOptSet(DftSet):
"""
CP2K input set containing the basic settings for performing geometry optimization. Values are all cp2k
defaults, and should be good for most systems of interest.
"""
def __init__(
self,
structure: Union[Structure, Molecule],
project_name: str = "CellOpt",
override_default_params: Dict = {},
**kwargs,
):
"""
Args:
structure: Pymatgen structure object
max_drift: Convergence criterion for the maximum geometry change between the current and the
last optimizer iteration. This keyword cannot be repeated and it expects precisely one real.
Default value: 3.00000000E-003
Default unit: [bohr]
max_force (float): Convergence criterion for the maximum force component of the current configuration.
This keyword cannot be repeated and it expects precisely one real.
Default value: 4.50000000E-004
Default unit: [bohr^-1*hartree]
max_iter (int): Specifies the maximum number of geometry optimization steps.
One step might imply several force evaluations for the CG and LBFGS optimizers.
This keyword cannot be repeated and it expects precisely one integer.
Default value: 200
optimizer (str): Specify which method to use to perform a geometry optimization.
This keyword cannot be repeated and it expects precisely one keyword. BFGS is a
quasi-newtonian method, and will best for "small" systems near the minimum. LBFGS
is a limited memory version that can be used for "large" (>1000 atom) systems when
efficiency outweights robustness. CG is more robust, especially when you are far from
the minimum, but it slower.
Default value: BFGS
"""
super().__init__(structure, **kwargs)
self.structure = structure
self.project_name = project_name
self.override_default_params = override_default_params
self.kwargs = kwargs
global_section = Global(project_name=project_name, run_type="CELL_OPT")
self.insert(global_section)
self.modify_dft_print_iters(self.get("max_iter", 200) + 1, add_last="numeric")
self.update(override_default_params)
class HybridStaticSet(StaticSet):
"""
Static calculation using hybrid DFT with the ADMM formalism in Cp2k.
"""
def __init__(
self,
structure: Union[Structure, Molecule],
hybrid_functional: str = "PBE0",
hf_fraction: float = 0.25,
project_name: str = "Hybrid-Static",
gga_x_fraction: float = 0.75,
gga_c_fraction: float = 1,
override_default_params: Dict = {},
max_memory: int = 2000,
cutoff_radius: float = 8.0,
omega: float = 0.2,
aux_basis: Union[Dict, None] = None,
admm: bool = True,
eps_schwarz: float = 1e-6,
eps_schwarz_forces: float = 1e-6,
screen_on_initial_p: bool = True,
screen_p_forces: bool = True,
**kwargs,
):
"""
Args:
structure: pymatgen structure object
method: hybrid dft method to use (currently select between HSE06 and PBE0)
hf_fraction: percentage of exact HF to mix-in
project_name: what to call this project
gga_x_fraction: percentage of gga exchange to use
gga_c_fraction: percentage of gga correlation to use
override_default_params: override settings (see above).
"""
super().__init__(structure, project_name=project_name, **kwargs)
self.structure = structure
self.hybrid_functional = hybrid_functional
self.hf_fraction = hf_fraction
self.project_name = project_name
self.gga_x_fraction = gga_x_fraction
self.gga_c_fraction = gga_c_fraction
self.override_default_params = override_default_params
self.max_memory = max_memory
self.cutoff_radius = cutoff_radius
self.omega = omega
self.aux_basis = aux_basis
self.admm = admm
self.eps_schwarz = eps_schwarz
self.eps_schwarz_forces = eps_schwarz_forces
self.screen_on_initial_p = screen_on_initial_p
self.screen_p_forces = screen_p_forces
self.kwargs = kwargs
self.activate_hybrid(
hybrid_functional=hybrid_functional,
hf_fraction=hf_fraction,
gga_x_fraction=gga_x_fraction,
gga_c_fraction=gga_c_fraction,
max_memory=max_memory,
cutoff_radius=cutoff_radius,
omega=omega,
aux_basis=aux_basis,
admm=admm,
eps_schwarz=eps_schwarz,
eps_schwarz_forces=eps_schwarz_forces,
screen_on_initial_p=screen_on_initial_p,
screen_p_forces=screen_p_forces,
)
self.update(override_default_params)
class HybridRelaxSet(RelaxSet):
"""
Static calculation using hybrid DFT with the ADMM formalism in Cp2k.
"""
def __init__(
self,
structure: Union[Structure, Molecule],
hybrid_functional: str = "PBE0",
hf_fraction: float = 0.25,
project_name: str = "Hybrid-Relax",
gga_x_fraction: float = 0.75,
gga_c_fraction: float = 1,
override_default_params: Dict = {},
max_memory: int = 2000,
cutoff_radius: float = 8.0,
omega: float = 0.2,
aux_basis: Union[Dict, None] = None,
admm: bool = True,
eps_schwarz: float = 1e-6,
eps_schwarz_forces: float = 1e-6,
screen_on_initial_p: bool = True,
screen_p_forces: bool = True,
**kwargs,
):
"""
Args:
structure: pymatgen structure object
method: hybrid dft method to use (currently select between HSE06 and PBE0)
hf_fraction: percentage of exact HF to mix-in
project_name: what to call this project
gga_x_fraction: percentage of gga exchange to use
gga_c_fraction: percentage of gga correlation to use
override_default_params: override settings (see above).
"""
super().__init__(structure, project_name=project_name, **kwargs)
self.structure = structure
self.hybrid_functional = hybrid_functional
self.hf_fraction = hf_fraction
self.project_name = project_name
self.gga_x_fraction = gga_x_fraction
self.gga_c_fraction = gga_c_fraction
self.override_default_params = override_default_params
self.max_memory = max_memory
self.cutoff_radius = cutoff_radius
self.omega = omega
self.aux_basis = aux_basis
self.admm = admm
self.eps_schwarz = eps_schwarz
self.eps_schwarz_forces = eps_schwarz_forces
self.screen_on_initial_p = screen_on_initial_p
self.screen_p_forces = screen_p_forces
self.kwargs = kwargs
self.activate_hybrid(
hybrid_functional=hybrid_functional,
hf_fraction=hf_fraction,
gga_x_fraction=gga_x_fraction,
gga_c_fraction=gga_c_fraction,
max_memory=max_memory,
cutoff_radius=cutoff_radius,
omega=omega,
aux_basis=aux_basis,
admm=admm,
eps_schwarz=eps_schwarz,
eps_schwarz_forces=eps_schwarz_forces,
screen_on_initial_p=screen_on_initial_p,
screen_p_forces=screen_p_forces,
)
self.update(override_default_params)
class HybridCellOptSet(CellOptSet):
"""
Static calculation using hybrid DFT with the ADMM formalism in Cp2k.
"""
def __init__(
self,
structure: Union[Structure, Molecule],
hybrid_functional: str = "PBE0",
hf_fraction: float = 0.25,
project_name: str = "Hybrid-CellOpt",
gga_x_fraction: float = 0.75,
gga_c_fraction: float = 1,
override_default_params: Dict = {},
max_memory: int = 2000,
cutoff_radius: float = 8.0,
omega: float = 0.2,
aux_basis: Union[Dict, None] = None,
admm: bool = True,
eps_schwarz: float = 1e-6,
eps_schwarz_forces: float = 1e-6,
screen_on_initial_p: bool = True,
screen_p_forces: bool = True,
**kwargs,
):
"""
Args:
structure: pymatgen structure object
method: hybrid dft method to use (currently select between HSE06 and PBE0)
hf_fraction: percentage of exact HF to mix-in
project_name: what to call this project
gga_x_fraction: percentage of gga exchange to use
gga_c_fraction: percentage of gga correlation to use
override_default_params: override settings (see above).
"""
super().__init__(structure, project_name=project_name, **kwargs)
self.structure = structure
self.hybrid_functional = hybrid_functional
self.hf_fraction = hf_fraction
self.project_name = project_name
self.gga_x_fraction = gga_x_fraction
self.gga_c_fraction = gga_c_fraction
self.override_default_params = override_default_params
self.max_memory = max_memory
self.cutoff_radius = cutoff_radius
self.omega = omega
self.aux_basis = aux_basis
self.admm = admm
self.eps_schwarz = eps_schwarz
self.eps_schwarz_forces = eps_schwarz_forces
self.screen_on_initial_p = screen_on_initial_p
self.screen_p_forces = screen_p_forces
self.kwargs = kwargs
self.activate_hybrid(
hybrid_functional=hybrid_functional,
hf_fraction=hf_fraction,
gga_x_fraction=gga_x_fraction,
gga_c_fraction=gga_c_fraction,
max_memory=max_memory,
cutoff_radius=cutoff_radius,
omega=omega,
aux_basis=aux_basis,
admm=admm,
eps_schwarz=eps_schwarz,
eps_schwarz_forces=eps_schwarz_forces,
screen_on_initial_p=screen_on_initial_p,
screen_p_forces=screen_p_forces,
)
self.update(override_default_params)
| jsyony37/pymatgen | pymatgen/io/cp2k/sets.py | sets.py | py | 48,957 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "pathlib.Path",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "pymatgen.io.cp2k.inputs.Cp2kInput",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 90,
"usage_type": "name"
},
{
"api_name": "pymatgen.... |
2481003217 | import numpy as np
import pandas as pd
import networkx as nx
import sys
def main(argv):
if len(argv) != 1:
raise ValueError("Must pass in graph representation input")
df = pd.read_csv(argv[0], sep="\t")
assert len(df.index) > 0
DEBUG = False
G = nx.from_pandas_edgelist(
df,
"FromNodeId",
"ToNodeId")
if DEBUG:
print("G number of nodes: ", G.number_of_nodes())
print("G max node: ", max(list(G.nodes)))
print("G number of edges: ", G.number_of_edges())
print("Number of edges for node 9721: ", G.degree[9721])
print("9721 neighbors: ", G.edges(9721))
assert G.number_of_edges() > 0
d = nx.shortest_path_length(G, source=0)
d_sorted = dict(sorted(d.items()))
p = nx.shortest_path(G, source=0)
f = open("python_shortest_path_outs.txt", "w")
f.write("source\tdistance\n")
for (key, value) in d_sorted.items():
f.write(f"{key}\t{value}\n")
print(p[1969993][-1])
if __name__ == "__main__":
main(sys.argv[1:])
| FelixHohne/cuda_shortest_paths | src/correct.py | correct.py | py | 1,032 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "pandas.read_csv",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "networkx.from_pandas_edgelist",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "networkx.shortest_path_length",
"line_number": 28,
"usage_type": "call"
},
{
"api_n... |
2229652486 | from sys import argv
from stenograpi import Stenograpi
from argparse import ArgumentParser
def parse_args(args):
parser = ArgumentParser(prog='stenograpi.py',
description='Document your HTTP API automatically through tests.')
parser.add_argument('hostname', type=str, help='hostname of Stenograpi')
parser.add_argument('port', type=int, help='port Stenograpi should listen on')
parser.add_argument('app-hostname', type=str, help='hostname of your app')
parser.add_argument('app-port', type=int, help='port your app is listening on')
return parser.parse_args(args)
if __name__ == '__main__':
args = parse_args(argv[1:])
stenograpi = Stenograpi(args.hostname, args.port, getattr(args, 'app-port'))
stenograpi.listen()
request = stenograpi.get_latest_request()
print(request)
| michaelmcmillan/Stenograpi | src/main.py | main.py | py | 849 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "stenograpi.Stenograpi",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "stenograpi.list... |
27800773574 | import numpy as np
import matplotlib.pyplot as plt
import math
def quad2(x_o=[-9.0,9.0],a=2.0,eta=0.1,threshold=0.01,maxiter=1000,alpha=0,anim = 1):
it = 0
x1 = np.linspace(-10,10,21)
x2 = np.linspace(-10,10,21)
[X,Y] = np.meshgrid(x1,x2)
Y = (a*X**2+Y**2)/2
plt.clf()
plt.contour(Y,10)
plt.xticks([0,5,10,15,20],[-10, -5, 0, 5, 10])
plt.yticks([0,5,10,15,20],[-10, -5, 0, 5, 10])
ax = plt.gca()
ax.set_aspect('equal','box')
plt.tight_layout()
f = (a*x_o[0]**2+x_o[1]**2)/2
varx = np.array([0,0])
###Gradient Method####
while it != maxiter:
fold = f
grad = np.array([a*x_o[0], x_o[1]])
varx = alpha*varx+(1-alpha)*grad
x_old = np.asarray(x_o)
x_o = np.asarray(x_o-eta*varx)
try:
f = (a*x_o[0]**2+x_o[1]**2)/2
if (f < threshold or fold < threshold):
break
else:
if anim:
plt.plot([x_old[0]+10, x_o[0]+10],[x_old[1]+10,x_o[1]+10],'r.-')
plt.pause(0.2)
it += 1
except:
print('Diverged!')
#plt.show()
break
if it == maxiter:
return False, it
else:
return True, it+1
if __name__ == "__main__":
eta = (0.003, 0.01, 0.03, 0.1, 0.3, 1, 3, 10)
alpha = (0, 0.5, 0.7, 0.9, 0.95)
for e in eta:
for a in alpha:
conv, iters = quad2(a=20.0,eta=e,anim = 0,alpha=a)
if conv:
string = f"{iters} iterations"
else:
string = "Diverged!"
print(f"eta: {e}, alpha: {a} ->" + string)
| Alexandre425/aa | lab2/part3.py | part3.py | py | 1,763 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.linspace",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.meshgrid",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.clf",
... |
11297389625 | """
Author: Emanuele Albini
Adapter from a counterfactual method to background generator.
"""
from typing import Union
import numpy as np
from ..base import (BaseBackgroundGenerator, BackgroundGenerator, CounterfactualMethod, MultipleCounterfactualMethod,
ListOf2DArrays)
from ..utils import (
get_top_counterfactuals,
expand_dims_counterfactuals,
)
__all__ = ['CounterfactualMethodBackgroundGeneratorAdapter']
class CounterfactualMethodBackgroundGeneratorAdapter(BaseBackgroundGenerator, BackgroundGenerator):
"""Adapter to make a counterfactual method into a background generator"""
def __init__(
self,
counterfactual_method: CounterfactualMethod,
n_top: Union[None, int] = None, # By default: All
):
"""
Args:
counterfactual_method (CounterfactualMethod): The counterfactual method
n_top (Union[None, int], optional): Number of top-counterfactuals to select as background. Defaults to None (all).
"""
self.counterfactual_method = counterfactual_method
self.n_top = n_top
def get_backgrounds(self, X: np.ndarray) -> ListOf2DArrays:
"""Generate the background datasets for each query instance
Args:
X (np.ndarray): The query instances
Returns:
ListOf2DArrays: A list/array of background datasets
nb_query_intances x nb_background_points x nb_features
"""
X = self.preprocess(X)
# If we do not have any background then we compute it
if isinstance(self.counterfactual_method, MultipleCounterfactualMethod):
if self.n_top is None:
return self.counterfactual_method.get_multiple_counterfactuals(X)
elif self.n_top == 1:
return expand_dims_counterfactuals(self.counterfactual_method.get_counterfactuals(X))
else:
return get_top_counterfactuals(
self.counterfactual_method.get_multiple_counterfactuals(X),
X,
n_top=self.n_top,
nan=False,
)
elif isinstance(self.counterfactual_method, CounterfactualMethod):
if self.n_top is not None and self.n_top != 1:
raise ValueError('Counterfactual methodology do not supportthe generation of multiple counterfactuals.')
return np.expand_dims(self.counterfactual_method.get_counterfactuals(X), axis=1)
else:
raise NotImplementedError('Unsupported CounterfactualMethod.') | jpmorganchase/cf-shap | src/cfshap/background/counterfactual_adapter.py | counterfactual_adapter.py | py | 2,612 | python | en | code | 10 | github-code | 1 | [
{
"api_name": "base.BaseBackgroundGenerator",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "base.BackgroundGenerator",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "base.CounterfactualMethod",
"line_number": 24,
"usage_type": "name"
},
{
"a... |
38247937244 |
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
import gif
import matplotlib.animation as animation
train = np.loadtxt('data/click.csv', delimiter=',', dtype='int', skiprows=1)
train_x = train[:, 0]
train_y = train[:, 1]
mu = train_x.mean()
sigma = train_x.std()
# zscore ๆ ๅๅ๏ผmeanๅไธบ0๏ผsigamaๅไธบ1
def standardize(x):
return (x - mu) / sigma
train_z = standardize(train_x)
# ้ขๆตๅฝๆฐ ไธๅ
ไธๆฌก๏ผf(x)-theta0+theta1*x
def f(x):
return theta0 + theta1 * x
# ็ฎๆ ๅฝๆฐ
def E(x, y):
return 0.5 * np.sum((y - f(x)) ** 2)
#ไปฃไปทๅฝๆฐ
theta1 = np.arange(-10, 10, 0.1)
theta0 = np.arange(-10, 10, 0.1)
theta1, theta0 = np.meshgrid(theta1, theta0)
print(theta1) # ๆๅฐๅบๆฅ็
็
print(theta0)
es = 0
#ๅ ไธบtheta1ๅtheta0ๅทฒ็ปๅๆไบ็ฝๆ ผ็ฉ้ตไบ
#ไธๆฌกๆงๅธฆๅ
ฅๅ
จ้จ่ฎก็ฎ๏ผๆไปฌ้่ฆไธไธชไธไธช็็ฎ
n = len(train_y)
for i in range(n):
y_pre = theta1*train_x[i]+theta0 # ๅๅบไธไธชๆ ทๆฌๅจ็ฝๆ ผ็ฉ้ตไธ่ฎก็ฎ๏ผๅพๅฐไธไธช้ขๆต็ฉ้ต
e = (train_y[i]-y_pre)**2 # ๆ ๅๅผๅๅป้ขๆต๏ผ็ฉ้ต๏ผๅพๅฐๆนๅทฎ็ฉ้ต
es += e # ๆๅๆ ทๆฌไธ็ๆนๅทฎ็ฉ้ตไธๆญ็ดฏๅ ๅฐesไธ
es = es/n # ๆฑๅนณๅๅผ๏ผ่ฟๆ ทesๆนๅทฎ็ฉ้ตๆฏไธช็น็ไฝ็ฝฎๅฐฑๆฏๅฏนๅบ็theta1ๅtheta0็ฉ้ตๆฏไธช็นไฝ็ฝฎ้ขๆตๅพๅฐ็ๆนๅทฎ
fig = plt.figure()
ax = Axes3D(fig)
ax.set_zlim(0, 500000)
#plot_surfaceๅฝๆฐ็ปๅถๆฒ้ข
#cmap='rainbow่กจ็คบๅฝฉ่นๅพ๏ผ็จไธๅ็้ข่ฒ่กจ็คบไธๅๅผ๏ผ
ax.plot_surface(theta1, theta0, es, cmap='rainbow')
#ๆพ็คบๅพๅ
plt.show()
# ๅญฆไน ็
ETA = 1e-3
# ่ฏฏๅทฎ็ๅทฎๅผ
diff = 1
# ๆดๆฐๆฌกๆฐ
epoch = 0
# ๅๆฐๅๅงๅ
theta0 = np.random.rand()
theta1 = np.random.rand()
# ็ดๅฐ่ฏฏๅทฎ็ๅทฎๅผๅฐไบ 0.01 ไธบๆญข๏ผ้ๅคๅๆฐๆดๆฐ
error = E(train_z, train_y)
errors = []
errors.append(error)
# ๏ผ่ฟ้็จdiffๆฅๅคๆญๆฏๅฆๅๆญขๅพช็ฏ๏ผไนๅฐฑๆฏๅฝๆถๆๆถๅๆญขๅพช็ฏ๏ผๆ็ๆไบไบบ็ไปฃ็ ๆฏ็จepochๆฅ่งๅฎ่ฟญไปฃๆฌกๆฐ๏ผๆไปฅๆบๅจๅญฆไน ้ๆฏ็จๅชไธไธช๏ผ
fig = plt.figure()
ims = []
plt.xlim(-3, 3)
plt.ylim(0, 700)
x = np.linspace(-3, 3, 100)
while diff > 1e-2:
# ๆดๆฐ็ปๆไฟๅญๅฐไธดๆถๅ้ (้ฎ๏ผไธบไปไน่ฆๅ
ไฟๅญๅจไธดๆถๅ้ไธญ๏ผ ็ญ๏ผไธบไบๅๆถๆดๆฐๅๆฐ๏ผไธ็ถไธไธช็ดๆฅๆนไบ๏ผๅฆไธไธชๅๆฐ็ๆดๆฐไนๅจๆนๅ)
tmp_theta0 = theta0 - ETA * np.sum((f(train_z) - train_y))
tmp_theta1 = theta1 - ETA * np.sum((f(train_z) - train_y) * train_z)
# ๆดๆฐๅๆฐ
theta0 = tmp_theta0
theta1 = tmp_theta1
# ่ฎก็ฎไธไธไธๆฌก่ฏฏๅทฎ็ๅทฎๅผ
error = E(train_z, train_y)
errors.append(error)
diff = abs(errors[-2]-errors[-1])
# ่พๅบๆฅๅฟ
epoch += 1
log = 'Epoch {} : theta0 = {:.3f}, theta1 = {:.3f}, diff = {:.4f}'
print(log.format(epoch, theta0, theta1, diff))
# ๅจๆๆพ็คบf(x)ๅๅๅพ
# plt.text(0.8, 1, f'Epoch:{epoch:},Loss:{error:.2f}')
im = plt.plot(train_z, train_y, 'o', color="blue") + plt.plot(x, f(x))
ims.append(im)
ani = animation.ArtistAnimation(fig, ims, interval=20, repeat_delay=1000)
ani.save("regression1_linear.gif", writer='pillow')
# # ็ปๅพ็กฎ่ฎค
plt.clf()
plt.plot(train_z, train_y, 'o')
plt.plot(x, f(x))
plt.show()
# # error ไธๆญๅจไธ้
x = np.arange(len(errors))
plt.plot(x, errors)
plt.show()
| Achuan-2/machine_learning_record | 01_regression/regression1_linear.py | regression1_linear.py | py | 3,310 | python | zh | code | 2 | github-code | 1 | [
{
"api_name": "numpy.loadtxt",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number":... |
30344410584 | # This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import random
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
# import os
# for dirname, _, filenames in os.walk('/kaggle/input'):
# for filename in filenames:
# print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
from tensorflow.python.keras.layers import Dense, Dropout, Input, Conv2D, Flatten
from tensorflow.python.keras.models import Sequential, Model
from tensorflow.python.keras.layers.advanced_activations import LeakyReLU
from tensorflow.python.keras.optimizers import adam
from tensorflow.python.keras.layers import Reshape, Conv2DTranspose, BatchNormalization, Activation
import matplotlib.pyplot as plt
import numpy as np
from tqdm import tqdm
import glob
from PIL import Image
num_epochs = 50
batch_size = 32
input_height = 32
input_width = 32
output_height = 256
output_width = 256
train_dir = '/kaggle/input/flower-enhance/train/'
val_dir = '/kaggle/input/flower-enhance/test/'
train_size = len(
glob.glob(train_dir + "/*-in.jpg"))
small_input_filenames = glob.glob(train_dir + "/*-in.jpg")
large_input_filenames = glob.glob(train_dir + "/*-out.jpg")
# small_images = np.zeros(
# (train_size, input_width, input_height, 3))
Small_images = np.empty((train_size, 32*32*3), dtype=np.uint8)
Large_images = np.empty((train_size, 256, 256, 3), dtype=np.uint8)
for i in range(train_size):
small_img = small_input_filenames[i]
large_img = large_input_filenames[i]
small_images = np.array(Image.open(small_img))
small_images = (small_images.astype(np.float32) - 127.5)/127.5
small_images = small_images.reshape(1, input_width*input_height*3)
Small_images[i] = small_images
large_images = np.array(Image.open(large_img))
large_images = (large_images.astype(np.float32) - 127.5) / 127.5
# large_images = large_images.reshape(1, input_width * input_height * 3)
Large_images[i] = large_images
def adam_optimizer():
return adam(lr=0.0002, beta_1=0.5)
z_dim = input_height*input_width*3
def create_generator():
model = Sequential()
# Reshape input into 32x32x256 tensor via a fully connected layer
model.add(Dense(64 * 32 * 32, input_dim=z_dim))
model.add(Reshape((32, 32, 64)))
model.add(Conv2DTranspose( # Transposed convolution layer, from 32x32x256 into 64x64x128 tensor
128, kernel_size=3, strides=2, padding='same'))
model.add(BatchNormalization()) # Batch normalization
model.add(LeakyReLU(alpha=0.01)) # Leaky ReLU
model.add(Conv2DTranspose( # Transposed convolution layer, from 64x64x128 to 128x128x64 tensor
64, kernel_size=3, strides=2, padding='same'))
model.add(BatchNormalization()) # Batch normalization
model.add(LeakyReLU(alpha=0.01)) # Leaky ReLU
model.add(Conv2DTranspose( # Transposed convolution layer, from 128x128x64 to 256x256x3 tensor
3, kernel_size=3, strides=2, padding='same'))
model.add(Activation('tanh')) # Tanh activation
model.compile(loss='binary_crossentropy', optimizer=adam_optimizer())
model.summary()
return model
def create_discriminator():
model = Sequential()
model.add(Conv2D(32, kernel_size=3, strides=2, padding="same", input_shape=(256, 256, 3)))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(64, kernel_size=3, strides=2, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(128, kernel_size=3, strides=2, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(BatchNormalization(momentum=0.8))
model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer=adam_optimizer())
model.summary()
return model
def image_generator(batch_size, img_dir):
"""A generator that returns small images and large images. DO NOT ALTER the validation set"""
input_filenames = glob.glob(img_dir + "/*-in.jpg")
counter = 0
random.shuffle(input_filenames)
while True:
small_images = np.zeros(
(batch_size, 32, 32, 3))
large_images = np.zeros(
(batch_size, 256, 256, 3))
if counter+batch_size >= len(input_filenames):
counter = 0
for i in range(batch_size):
img = input_filenames[counter + i]
small_images[i] = np.array(Image.open(img)) / 255.0
large_images[i] = np.array(
Image.open(img.replace("-in.jpg", "-out.jpg"))) / 255.0
small_images = small_images.reshape(batch_size, 32*32*3)
yield (small_images, large_images)
counter += batch_size
def create_gan(discriminator, generator):
discriminator.trainable = False
gan_input = Input(shape=32*32*3)
x = generator(gan_input)
gan_output = discriminator(x)
gan = Model(inputs=gan_input, outputs=gan_output)
gan.compile(loss='binary_crossentropy', optimizer='adam')
return gan
def plot_generated_images(train_dir, epoch, generator, examples=100, dim=(10,10), figsize=(10,10)):
img_gen_object = image_generator(100, train_dir)
small_images, large_images = next(img_gen_object)
noise = small_images[np.random.randint(low=0, high=small_images.shape[0], size=examples)]
generated_images = generator.predict(noise)*255
generated_images = generated_images.reshape(100, 256, 256, 3)
plt.figure(figsize=figsize)
for i in range(generated_images.shape[0]):
plt.subplot(dim[0], dim[1], i+1)
plt.imshow(generated_images[i], interpolation='nearest')
plt.axis('off')
plt.tight_layout()
plt.savefig('gan_generated_image %d.png' %epoch)
def training(train_dir, epochs=1, batch_size=32):
# Loading the data
# (X_train, y_train, X_test, y_test) = load_data()
# batch_count = X_train.shape[0] / batch_size
# Creating GAN
generator = create_generator()
discriminator = create_discriminator()
gan = create_gan(discriminator, generator)
for e in range(1, epochs + 1):
img_gen_object = image_generator(batch_size, train_dir)
small_images, large_images = next(img_gen_object)
print("Epoch %d" % e)
for _ in tqdm(range(batch_size)):
# generate random noise as an input to initialize the generator
noise = small_images[np.random.randint(low=0, high=small_images.shape[0], size=batch_size)]
# Generate fake MNIST images from noised input
generated_images = generator.predict(noise)
# Get a random set of real images
image_batch = large_images[np.random.randint(low=0, high=large_images.shape[0], size=batch_size)]
# Construct different batches of real and fake data
X = np.concatenate([image_batch, generated_images])
# Labels for generated and real data
y_dis = np.zeros(2 * batch_size)
y_dis[:batch_size] = 0.9
# Pre train discriminator on fake and real data before starting the gan.
discriminator.trainable = True
discriminator.train_on_batch(X, y_dis)
# Tricking the noised input of the Generator as real data
noise = small_images[np.random.randint(low=0, high=small_images.shape[0], size=batch_size)]
y_gen = np.ones(batch_size)
# During the training of gan,
# the weights of discriminator should be fixed.
# We can enforce that by setting the trainable flag
discriminator.trainable = False
# training the GAN by alternating the training of the Discriminator
# and training the chained GAN model with Discriminatorโs weights freezed.
gan.train_on_batch(noise, y_gen)
if e == 1 or e % 20 == 0:
plot_generated_images(train_dir, e, generator)
training(train_dir, 50, 32)
| sathwikgs/Superres_GAN | first_gan.py | first_gan.py | py | 8,621 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "glob.glob",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "numpy.empty",
"line_number": 46,
... |
71526899233 | # -*- coding: utf-8 -*-
"""{{ cookiecutter.repo_name }} URL Configuration
https://docs.djangoproject.com/en/1.8/topics/http/urls/
"""
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.contrib.staticfiles.storage import staticfiles_storage
from django.http import HttpResponse
from django.views.defaults import (
server_error
)
from django.views.generic.base import RedirectView
from wagtail.contrib.wagtailapi import urls as wagtailapi_urls
from wagtail.wagtailcore import urls as wagtail_urls
from wagtail.wagtailadmin import urls as wagtailadmin_urls
from wagtail.wagtaildocs import urls as wagtaildocs_urls
from wagtail.wagtailsearch import urls as wagtailsearch_urls
urlpatterns = [
# www.example.com/500.html
url(
regex=r'^500/$',
view=server_error),
# www.example.com/django-admin
url(
regex=r'^favicon.ico$',
view=RedirectView.as_view(
url=staticfiles_storage.url('favicon.ico'),
permanent=False),
name="favicon"
),
url(
regex=r'^django-admin/',
view=include(admin.site.urls)),
# www.example.com/admin
url(
regex=r'^admin/',
view=include(wagtailadmin_urls)),
# www.example.com/search
url(
regex=r'^search/',
view=include(wagtailsearch_urls)),
# www.example.com/documents
url(
regex=r'^documents/',
view=include(wagtaildocs_urls)),
# www.example.com/api/vi/{pages, images, documents}
url(
regex=r'^api/',
view=include(wagtailapi_urls)),
# www.example.com
url(
regex=r'',
view=include(wagtail_urls)),
# www.example.com/robots.txt
url(
regex=r'^robots.txt$',
view=lambda r: HttpResponse("User-agent: *\nDisallow: /",
content_type="text/plain")),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
# url pattern for django debug toolbar
if settings.DEBUG:
import debug_toolbar
urlpatterns += [
url(r'^__debug__/', include(debug_toolbar.urls)),
]
| athomasoriginal/starterkit-wagtail | {{cookiecutter.repo_name}}/src/config/urls.py | urls.py | py | 2,192 | python | en | code | 52 | github-code | 1 | [
{
"api_name": "django.conf.urls.url",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "django.views.defaults.server_error",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.url",
"line_number": 32,
"usage_type": "call"
},
{
"api_... |
22918921227 | import zipfile
import os
import torchvision.transforms as transforms
# once the images are loaded, how do we pre-process them before being passed into the network
# by default, we resize the images to 64 x 64 in size
# and normalize them to mean = 0 and standard-deviation = 1 based on statistics collected from
# the training set
mean_nums = [0.485, 0.456, 0.406]
std_nums = [0.229, 0.224, 0.225]
resnet_train_transforms = transforms.Compose([
transforms.Resize(size=224),
transforms.CenterCrop(size=224),
transforms.RandomRotation(degrees=10),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean_nums, std_nums)
])
resnet_test_transforms = transforms.Compose([
transforms.Resize(224),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean_nums, std_nums)
])
inceptionv3_train_transforms = transforms.Compose([
transforms.RandomRotation(degrees=10),
transforms.RandomHorizontalFlip(),
transforms.Resize(299),
transforms.CenterCrop(299),
transforms.ToTensor(),
transforms.Normalize(mean_nums, std_nums)
])
inceptionv3_test_transforms = transforms.Compose([
transforms.Resize(299),
transforms.CenterCrop(299),
transforms.ToTensor(),
transforms.Normalize(mean_nums, std_nums)
])
inceptionv3_params = {
'batch_size': 32,
'momentum': 0.9,
'lr': 0.045,
'misc': 'RMSProp(0.9, eps=1.0), exp lr decay 0.94 every 2 epochs, grad clipping for stability',
} | bastiendechamps/MVA_ORCV | A3_DECHAMPS_Bastien/data.py | data.py | py | 1,504 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "torchvision.transforms.Compose",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Resize",
"line_number": 15,
"usage_type": "call"
},
{
... |
38420721323 | from django.contrib.auth.models import User
from django.db import models
class Location(models.Model):
name = models.CharField(max_length=255)
world = models.CharField(max_length=255)
description = models.TextField()
image = models.ImageField(upload_to='photos/')
author = models.ForeignKey(User, on_delete=models.CASCADE, blank=True, null=True)
def __str__(self):
return self.name
class Character(models.Model):
name = models.CharField(max_length=255)
birth_location = models.ForeignKey(Location, on_delete=models.CASCADE, related_name='birth_location')
location = models.ForeignKey(Location, on_delete=models.CASCADE, related_name='current_location')
description = models.TextField()
image = models.ImageField(upload_to='photos/')
GENDER = (
('male', 'ะัะถัะบะพะน'),
('female', 'ะะตะฝัะบะธะน'),
('unknown', 'ะะตะธะทะฒะตััะฝะพ')
)
gender = models.CharField(max_length=10, choices=GENDER)
race = models.CharField(max_length=55)
DEAD = (
('dead', 'ะะตััะฒัะน'),
('alive', 'ะะธะฒะพะน')
)
dead = models.CharField(max_length=10, choices=DEAD)
def __str__(self):
return self.name
class Episode(models.Model):
name = models.CharField(max_length=255)
number = models.IntegerField()
number_season = models.IntegerField()
image = models.ImageField(upload_to='photos/')
description = models.TextField()
premiere = models.DateField()
character = models.ManyToManyField(Character, related_name='char_to_ep')
def __str__(self):
return f"{self.number_season} + {self.number}"
| asylburkitbayev/rickmorty | main/models.py | models.py | py | 1,656 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.db.models.Model",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "... |
644470377 | import aiohttp
import asyncio
import json
from . import errors
async def json_or_text(response):
text = await response.text(encoding="utf-8")
try:
if "application/json" in response.headers["Content-Type"]:
return json.loads(text)
except KeyError:
# Thanks Cloudflare
pass
return text
class Route:
BASE = "https://g.tenor.com/v1"
def __init__(self, path):
self.url = f"{self.BASE}{path}"
class HTTPClient:
def __init__(self, api_key):
self.api_key = api_key
self.session = None
async def request(self, route, parameters=None):
if parameters is None:
parameters = {}
parameters["key"] = self.api_key
if self.session is None:
self.session = await self._create_session()
params = {}
for k, v in parameters.items():
if v is not None:
params[k] = v
for tries in range(5):
async with self.session.get(route.url, params=params) as resp:
data = await json_or_text(resp)
if "error" in data and resp.status == 200:
raise errors.TenorException(data["error"])
if 200 <= resp.status < 300:
return data
if resp.status == 404:
raise errors.NotFound(data)
if resp.status == 429:
# we are rate limited, wait for 30s
await asyncio.sleep(30)
continue
return data
if resp.status >= 500:
raise errors.TenorServerError(data)
async def _create_session(self):
return aiohttp.ClientSession()
async def close(self):
if self.session:
await self.session.close()
async def search(self, **parameters):
return await self.request(Route("/search"),
parameters=parameters)
async def trending(self, **parameters):
return await self.request(Route("/trending"),
parameters=parameters)
async def categories(self, **parameters):
return await self.request(Route("/categories"),
parameters=parameters)
async def search_suggestions(self, **parameters):
return await self.request(Route("/search_suggestions"),
parameters=parameters)
async def autocomplete(self, **parameters):
return await self.request(Route("/autocomplete"),
parameters=parameters)
async def trending_terms(self, **parameters):
return await self.request(Route("/trending_terms"),
parameters=parameters)
async def registershare(self, **parameters):
return await self.request(Route("/registershare"),
parameters=parameters)
async def gifs(self, **parameters):
return await self.request(Route("/gifs"),
parameters=parameters)
async def random(self, **parameters):
return await self.request(Route("/random"),
parameters=parameters)
async def anonid(self, **parameters):
return await self.request(Route("/anonid"),
parameters=parameters)
| Snaptraks/aiotenor | aiotenor/http.py | http.py | py | 3,424 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "json.loads",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "asyncio.sleep",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "aiohttp.ClientSession",
"line_number": 69,
"usage_type": "call"
}
] |
21103763023 | from typing import Dict, List, Union, Tuple
from urllib.parse import urljoin
import requests
from PIL import Image
from bs4 import BeautifulSoup
from email_validator import validate_email, EmailNotValidError
from plan_ilan.data_mining.staff.lookup_parameters import StaffLookup, StaffLookupAnswer
from plan_ilan.apps.web_site.models import DepartmentEnum
class StaffCrawler:
"""An object that crawls over different staff members web pages according to their attributes."""
def __init__(self, url: Union[str, List[str]], department: DepartmentEnum, staff_lookup: StaffLookup,
**kwargs) -> None:
self._urls = url if isinstance(url, List) else [url]
self._department = department
self.staff_lookup = staff_lookup
self._has_photos = kwargs['has_photos'] if 'has_photos' in kwargs else True
self._email_suffix = kwargs['email_suffix'] if 'email_suffix' in kwargs else ''
self.__teachers_data = None
@property
def urls(self) -> List[str]:
"""Return the url of the crawled web page"""
return self._urls
@property
def department(self):
"""Returns the department of the crawled web page"""
return self._department
def get_teachers_data(self) -> List[StaffLookupAnswer]:
if self.__teachers_data is None:
self.crawl()
return self.__teachers_data
def crawl(self) -> int:
""""The main method for this interface, crawls over the staff members web page.
returns the amount of valid entries found."""
self.__teachers_data = []
for url in self.urls:
response = requests.get(url)
if not response:
continue
soup = BeautifulSoup(response.text, 'html.parser')
persons = self.staff_lookup.persons.get_tags(soup)
for person in persons:
teacher_data = StaffLookupAnswer()
details_url = self.staff_lookup.details_url.get_single_tag(person)
if details_url is None:
continue
details_url = details_url['href'].strip()
teacher_data.title = self.staff_lookup.params.title.get_values_from_tag(person)
teacher_data.name = self.staff_lookup.params.name.get_values_from_tag(person)
if not teacher_data.is_valid:
continue
teacher_response = requests.get(details_url)
if not teacher_response:
continue
teacher_web_page = BeautifulSoup(teacher_response.text, 'html.parser')
teacher_data.email = self.staff_lookup.params.email.get_values_from_tag(teacher_web_page)
if not teacher_data.email:
teacher_data.email = ''
mail_valid, value = self._validate_email(teacher_data.email)
if mail_valid:
teacher_data.email = value
else:
# todo: add logging
print(teacher_data.email, value)
teacher_data.phone = self.staff_lookup.params.phone.get_values_from_tag(teacher_web_page)
teacher_data.website = self.staff_lookup.params.website.get_values_from_tag(teacher_web_page)
teacher_data.office = self.staff_lookup.params.office.get_values_from_tag(teacher_web_page)
image_tag = self.staff_lookup.params.photo.get_single_tag(teacher_web_page)
image_url = urljoin(url, image_tag['src'])
teacher_data.photo = Image.open(requests.get(image_url, stream=True).raw)
self.__teachers_data.append(teacher_data)
return len(self.__teachers_data)
def _validate_email(self, email: str) -> Tuple[bool, Union[Dict, str]]:
"""Validate the given email address
Args:
email:str
The email to validate
Returns:
tuple -
A tuple of size 2:
1) outcome - indicates whether the email address is valid
2) value - if outcome is true returns the email address in normalized form, else
returns a human readable error message.
"""
try:
if '@' not in email:
email = f'{email}{self._email_suffix}'
valid = validate_email(email, timeout=10)
outcome = True
value = valid.email
except EmailNotValidError as e:
outcome = False
value = str(e)
return outcome, value
| matanm28/PlanIlan | plan_ilan/data_mining/staff/staff_crawler.py | staff_crawler.py | py | 4,615 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "typing.Union",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "plan_ilan.apps.web_site.models.DepartmentEnum",
"line_number": 16,
"usage_type": "name"
},
{
"api_name":... |
24418281496 | import qiskit as qk
from qiskit import QuantumProgram
qp = QuantumProgram()
qr = qp.create_quantum_register('qr', 2)
cr = qp.create_classical_register('cr',2)
qc = qp.create_circuit('qc', [qr], [cr])
circuit = qp.get_circuit('qc')
quantum_r = qp.get_quantum_register('qr')
classical_r = qp.get_classical_register('cr')
circuit.x(quantum_r[0])
circuit.y(quantum_r[1])
circuit.cx(quantum_r[0], quantum_r[1])
circuit.measure(quantum_r[0], classical_r[0])
circuit.measure(quantum_r[1], classical_r[1])
backend = 'local_qsam_simulator'
circuits = ['qc']
result = qp.execute(circuit, backend, wait=2, timeout = 240)
print(result)
result.get_counts('qc')
out = result.get_ran_qasm('qc')
print(out)
| zillerium/shoro | test1.py | test1.py | py | 697 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "qiskit.QuantumProgram",
"line_number": 4,
"usage_type": "call"
}
] |
39549222223 | import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from webdriver_manager.chrome import ChromeDriverManager
from bs4 import BeautifulSoup
import csv
import random
linkas = input("Autopliuso masinu linkas: ")
def randomlaikas():
_sleep = random.randint(3,10)
time.sleep(_sleep)
def scrape_phone_numbers():
driver = webdriver.Chrome(executable_path=ChromeDriverManager().install())
driver.get(linkas)
time.sleep(4)
accept_cookies = driver.find_element(By.XPATH, '//button[text()="Sutinku"]')
accept_cookies.click()
time.sleep(3)
results = []
num_cars = int(input("Kiek nuskreipinti automobiliu: "))
current_window_handle = driver.current_window_handle
while len(results) < num_cars:
cars = driver.find_elements(By.XPATH, '//div[@class="announcement-title"]')
for i, car in enumerate(cars):
if i == num_cars:
break
current_window_handle = driver.current_window_handle
car.click()
time.sleep(3)
for handle in driver.window_handles:
if handle != current_window_handle:
driver.switch_to.window(handle)
html = driver.page_source
soup = BeautifulSoup(html, "html.parser")
phone = soup.find("div", class_="button seller-phone-number js-phone-number")
if phone is not None:
phone_number = phone.text.replace(" ", "")
if "+370" in phone_number and phone_number not in results:
results.append(phone_number)
driver.close()
driver.switch_to.window(current_window_handle)
try:
next_page = driver.find_element(By.XPATH, '//a[@rel="next" and @class="next"]')
next_page.click()
time.sleep(3)
except Exception as e:
break
with open("phones.csv", "w", newline="") as f:
writer = csv.writer(f)
writer.writerow(["Phone Number"])
for phone_number in results:
phone_number = phone_number.strip('"\n')
if "+370" not in phone_number:
continue
writer.writerow([phone_number])
print(results)
scrape_phone_numbers()
| erikonasz/APliusTel | main.py | main.py | py | 2,286 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "random.randint",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "selenium.webdrive... |
29309465531 | from io import BytesIO
import re
import sys
import requests
import img2pdf
import PyPDF4
from rich.console import Console
from rich.progress import track
console = Console()
INPUT_SESSION = console.input("๐ช Your '_reader_session' key: ")
ENDPOINT_SCUOLABOOK = "https://webapp.scuolabook.it/books"
HEADER = {'X-Requested-With': 'XMLHttpRequest',
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': f'_reader_session={INPUT_SESSION}'}
class Book:
"""
A Book interface
"""
def __init__(self):
self.book_id: int = 0
self.title: bytes = ''
self.author: bytes = ''
self.publisher: bytes = ''
self.isbn: bytes = ''
self.npages: int = 0
def get_books_list_data():
"""
This function return a dictionary that contains all books present in your library
"""
data = requests.get(ENDPOINT_SCUOLABOOK,
headers=HEADER).text
books = {}
for i in range(len(re.findall('"id":(.*?),', data))):
books[re.findall('"id":(.*?),', data)[i]] = re.findall(
'"ws_title":"(.*?)"', data)[i].replace("\u0026", "&")
return books
def get_books_list(books_list_data: dict):
"""
This function takes in input the `books_list_data` dictionary data that contains all books
check if the `_reader_session` is valid and if don't exit the program.
Format them in a readable user manner and output them
so the user can choose what book wanna download
"""
if books_list_data == {}:
console.print(
"โ Your library is empty or your '_reader_session' key is expired")
exit(255)
console.print("\n๐ [b][Your library][/b]\n")
for bookid in books_list_data:
console.print(f"๐ ID:({bookid}) [b]{books_list_data[bookid]}[/b]")
def get_selected_book(books_list_data: dict):
"""
This function takes in input the `books_list_data` dictionary data that contains all books
check if the `input_book_id` is valid and present in the library if don't exit the program.
Output to the user all the information about the book selected and return a book object
"""
try:
input_book_id = int(console.input(
"\n๐ข Insert ID of the book to download them: "))
for bookid in books_list_data:
if not input_book_id != bookid:
raise ValueError
except ValueError:
console.print("โ Invalid Book ID")
exit(255)
book = Book()
data = requests.get(
f"{ENDPOINT_SCUOLABOOK}/{input_book_id}", headers=HEADER).text
book.book_id = input_book_id
book.title = re.search('"ws_title":"(.*?)"', data).group(1).encode('utf-8')
book.author = re.search('"ws_author":"(.*?)"',
data).group(1).encode('utf-8')
book.publisher = re.search(
'"ws_publisher":"(.*?)"', data).group(1).encode('utf-8')
book.isbn = re.search('"ws_isbn":"(.*?)"', data)
if book.isbn:
book.isbn = book.isbn.group(1).encode('utf-8')
book.npages = int(re.search('"ws_num_pages":"(.*?)"',
data).group(1).encode('utf-8'))
console.print("\n[Selected book]\n")
console.print(f"๐ Title: {book.title.decode()}")
console.print(f"๐ค Author: {book.author.decode()}")
console.print(f"๐๏ธ Publisher: {book.publisher.decode()}")
console.print(f"๐ ISBN: {book.isbn.decode()}")
console.print(f"๐ Pages: {book.npages}")
return book
def get_all_pages(book: Book):
"""
This function take in input a `Book` object and return a list of page link to download
"""
data = ""
payload = ""
for i in track(range(1, book.npages + 2)):
if i % 100 == 0 or i == book.npages + 1:
data += requests.get(
f"{ENDPOINT_SCUOLABOOK}/{book.book_id}/pages?{payload[1:]}", headers=HEADER).text
payload = ""
payload += f"&pages[]={i}"
matches = re.findall('":"(.*?)"', data)
return matches
def dowload_convert_all_pages(book: Book, matches: list):
"""
This function takes as input a `Book` object and a list of links of pages
and return a `list` that contains each PDF page in bytes format
"""
pdfs_bytes = []
for match in matches:
console.print(
f"\r๐ฅ Downloading & Converting page {matches.index(match) + 1}/{book.npages}...")
sys.stdout.flush()
link = match.replace("\\u0026", "&").encode('utf-8')
pdf_bytes = img2pdf.convert(requests.get(link).content)
pdfs_bytes.append(pdf_bytes)
return pdfs_bytes
def merging_pdf(pdfs_bytes: list, book: Book):
"""
This function takes as input a `list` of PDF page of book in bytes format and `Book` object
each element of the `list` is read and merged with the previous one and at the end of the `list`
the final merged Book (PDF) is created
"""
merger = PyPDF4.PdfFileMerger(strict=False)
output_file = book.title.decode().replace('\u007C', '') + ".pdf"
console.print("\nโฏ๏ธ Merging everything...")
for pdf_bytes in pdfs_bytes:
with BytesIO(pdf_bytes) as pdf_file:
pdf_file_stream = PyPDF4.PdfFileReader(
stream=pdf_file, strict=False)
merger.append(pdf_file_stream)
console.print(f"โ Creating merged book ''{output_file}''...")
merger.write(output_file)
console.print(
f":thumbs_up: The book: ''{output_file}'' was created succesfully!")
def main():
"""
This function execute all program
"""
books_list_data = get_books_list_data()
get_books_list(books_list_data=books_list_data)
book = get_selected_book(books_list_data=books_list_data)
matches = get_all_pages(book=book)
pdfs_bytes = dowload_convert_all_pages(book=book, matches=matches)
merging_pdf(pdfs_bytes=pdfs_bytes, book=book)
if __name__ == '__main__':
main()
| alessionossa/Scuolabook-Downloader-2 | download.py | download.py | py | 5,967 | python | en | code | 13 | github-code | 1 | [
{
"api_name": "rich.console.Console",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_n... |
28201890311 | import json
import logging
import os
import random
import re
import time
from datetime import datetime
import boto3
import requests
from dateutil import tz
from dateutil.parser import parse
from selectolax.parser import HTMLParser
from utils import headers
logger = logging.getLogger()
logger.setLevel(logging.INFO)
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table(os.environ['VLR_MATCHES_TABLE'])
# vlr match events cache
vlr_events_cache = {}
def insert(table, matches):
'''
put items into specified DynamoDB table.
'''
with table.batch_writer() as batch:
for match in matches:
logger.info('put match info into the table: {}'.format(match))
batch.put_item({k: v for k, v in match.items()})
def sleep():
'''
sleep for 1~10 secs (randomly)
'''
sec = random.randint(1, 10)
time.sleep(sec)
def get_event_from_cache(event_url_path):
global vlr_events_cache
result = ''
if event_url_path in vlr_events_cache:
result = vlr_events_cache[event_url_path]
return result
def scrape_event(event_url_path):
'''
scrape event page of url_path
'''
global vlr_events_cache
url = 'https://www.vlr.gg{}'.format(event_url_path)
logger.info('get event info: {}'.format(url))
resp = requests.get(url, headers=headers)
html = HTMLParser(resp.text)
event_id = int(event_url_path.split('/')[2])
event_name = html.css_first('.wf-title').text().strip()
event_name = event_name.replace('\t', '').replace('\n', '')
country_flag = html.css_first(
'.event-desc-item-value .flag').attributes['class']
country_flag = country_flag.replace(' mod-', '_').replace('flag_', '')
data = {
'event_id': event_id,
'event_name': event_name,
'country_flag': country_flag
}
# caching
vlr_events_cache[event_url_path] = data
return data
def scrape_match(match_url_path):
'''
scrape match page of url_path
'''
global vlr_events_cache
url = 'https://www.vlr.gg{}'.format(match_url_path)
logger.info('get match info: {}'.format(url))
resp = requests.get(url, headers=headers)
html = HTMLParser(resp.text)
match_id = int(match_url_path.split('/')[1])
match_name = html.css_first('.match-header-event-series').text()
match_name = match_name.replace('\t', '').replace('\n', '')
start_time = html.css_first('.moment-tz-convert').attributes['data-utc-ts']
with_timezone = ' '.join([start_time, 'EST'])
tzinfo = {'EST': tz.gettz('America/New_York'),
'CST': tz.gettz('America/Chicago')}
start_time_est = parse(with_timezone, tzinfos=tzinfo)
start_time_utc = start_time_est.astimezone(tz.gettz('Etc/GMT'))
start_time_utc = datetime.strftime(start_time_utc, '%Y-%m-%dT%H:%M:%S%z')
teams = html.css('.wf-title-med')
teams = [t.text().replace('\t', '').replace('\n', '') for t in teams]
best_of = html.css('.match-header-vs-note')[-1].text()
best_of = best_of.replace('Bo', '').replace(' Maps', '')
best_of = best_of.replace('\t', '').replace('\n', '')
best_of = int(best_of)
event_url_path = html.css_first('a.match-header-event').attributes['href']
if event_url_path in vlr_events_cache:
logger.info('get event info from cache: {}'.format(event_url_path))
event_info = vlr_events_cache[event_url_path]
else:
logger.info('get event info from website: {}'.format(event_url_path))
event_info = scrape_event(event_url_path)
data = {
'match_id': match_id,
'event_name': event_info['event_name'],
'event_country_flag': event_info['country_flag'],
'start_time': start_time_utc,
'best_of': best_of,
'match_name': match_name,
'teams': teams
}
return data
def scrape_matches(page: str = 1):
'''
scrape /matches page
'''
url = 'https://www.vlr.gg/matches?page={}'.format(page)
logger.info('fetch matches list from: {}'.format(url))
resp = requests.get(url, headers=headers)
html = HTMLParser(resp.text)
matches = []
for item in html.css('a.wf-module-item'):
match_url_path = item.attributes['href']
sleep()
match_detail = scrape_match(match_url_path)
item = {
'id': match_detail['match_id'],
'eventName': match_detail['event_name'],
'eventCountryFlag': match_detail['event_country_flag'],
'startTime': match_detail['start_time'],
'bestOf': match_detail['best_of'],
'matchName': match_detail['match_name'],
'teams': [{'title': team} for team in match_detail['teams']],
'pagePath': match_url_path
}
logger.info('add match to the list: {}'.format(item))
matches.append(item)
return matches
def lambda_handler(event, context):
records = event['Records']
match_list = []
for record in records:
body = json.loads(record['body'])
page = str(body['page'])
matches = scrape_matches(page)
match_list.extend(matches)
insert(table, match_list)
return {
'matches_count': len(match_list)
}
| miztch/sasha | functions/sasha/index.py | index.py | py | 5,239 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "boto3.resource",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.environ",
"... |
37573481705 | # -*- coding: utf-8 -*-
"""
Created on Sun Apr 25 19:52:38 2021
@author: Eliu
"""
#pip install keyboard
import keyboard
from time import sleep
def free_fire(cx, cy, w):
res_x = 640
res_y = 480
time_release = 0.1
sleep(0.001)
no_move = w/3 #Divisor hace efecto en la sensibilidad
if cx>(res_x/2)+no_move and cx !=0:
print("IZQUIERDA")
keyboard.press('a')
#sleep(time_release)
keyboard.press_and_release('d')
elif cx<(res_x/2)-no_move:
print("DERECHA")
keyboard.press('d')
#sleep(time_release)
keyboard.press_and_release('a')
else:
print('CENTER')
keyboard.press_and_release('d')
keyboard.press_and_release('a')
'''
keyboard.press('w')
sleep(1)
keyboard.press_and_release('w')
'''
import cv2
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
cap = cv2.VideoCapture(0) #My camer 640x480
while True:
_, img = cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.1,4)
for (x,y,w,h) in faces:
cv2.rectangle(img, (x,y), (x+w, y+h), (255,0,0), 2)
cx = x+(w/2)
cy = y+(h/2)
free_fire(cx, cy, w)
print(str(cx)+' '+str(cy))
cv2.imshow('img', img)
k = cv2.waitKey(30)
if k == 27:
break
cap.release()
| EliuPineda/FreeFire_Sensor | FreeSensor.py | FreeSensor.py | py | 1,438 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "time.sleep",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "keyboard.press",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "keyboard.press_and_release",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "keyboard.press",... |
13720433941 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 18 08:53:40 2020
@author: Mathew
"""
# These are the packages we are using.
from skimage.io import imread
import matplotlib.pyplot as plt
from skimage import filters,measure
image=imread("/Users/Mathew/Desktop/Axioscan/apt.tif")
# This is just to show a small region of the image, since it's difficult to see what's happening with the full image.
region=image[500:1000,500:1000]
plt.imshow(region,cmap="Blues")
plt.show()
# Determine a threshold using the Otsu method - Note, if I was comparing cases etc., I'd keep the threhsold constant, and not use Otsu for all of them.
threshold=filters.threshold_otsu(image)
print(threshold) # This prints the threshold that Otsu has determined.
filtered=image>threshold # Apply the threshold to the image. If going for constant threshold for all images, could just replace this with a number.
region_filtered=filtered[:,:] # Can set a region to look at.
plt.imshow(region_filtered,cmap="Blues")
plt.show()
# Now find the different features in the thresholded image.
label_image=measure.label(region_filtered)
plt.imshow(label_image)
plt.show()
# Measure parameters of labelled regions.
table=measure.regionprops_table(label_image,properties=('area','centroid','orientation','major_axis_length','minor_axis_length'))
# Get the area and length data.
areas=table['area']
lengths=table['major_axis_length']
number=len(areas) # Count the number of features detected.
print(number)
# Plot some histograms.
plt.hist(areas, bins = 50,range=[0,100], rwidth=0.9,color='#607c8e')
plt.xlabel('Area (pixels)')
plt.ylabel('Number of Features')
plt.title('Area of features')
plt.show()
plt.hist(lengths, bins = 50,range=[0,200], rwidth=0.9,color='#607c8e')
plt.xlabel('Length (pixels)')
plt.ylabel('Number of Features')
plt.title('Length')
plt.show()
# For control:
imagecont=imread("/Users/Mathew/Desktop/Axioscan/noapt.tif")
filteredcont=imagecont>threshold # Apply the threshold to the image. If going for constant threshold for all images, could just replace this with a number.
label_image_cont=measure.label(filteredcont)
# Measure parameters of labelled regions.
tablecont=measure.regionprops_table(label_image_cont,properties=('area','centroid','orientation','major_axis_length','minor_axis_length'))
# Get the area and length data.
areascont=tablecont['area']
lengthscont=tablecont['major_axis_length']
numbercont=len(areascont) # Count the number of features detected.
print(numbercont)
# Plot some histograms.
plt.hist(areascont, bins = 50,range=[0,100], rwidth=0.9,color='#607c8e')
plt.xlabel('Area (pixels)')
plt.ylabel('Number of Features')
plt.title('Area of features (control)')
plt.show()
plt.hist(lengthscont, bins = 50,range=[0,200], rwidth=0.9,color='#607c8e')
plt.xlabel('Length (pixels)')
plt.ylabel('Number of Features (control)')
plt.title('Length')
plt.show() | orie1876/axioscan | Quick_Anal.py | Quick_Anal.py | py | 2,970 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "skimage.io.imread",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "matplotl... |
27076133308 | from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, BLOB, DateTime
base = declarative_base()
class ConfigNmap(base):
__tablename__ = 'config_nmap'
id = Column(Integer, primary_key=True)
property = Column(String(255), nullable=False)
value = Column(String(255), nullable=False)
class NmapReportsDHCPDiscover(base):
__tablename__ = 'nmap_reports_dhcp_discover'
id = Column(Integer, primary_key=True)
time = Column(DateTime)
report = Column(BLOB)
class NmapReportsSnifferDetect(base):
__tablename__ = 'nmap_reports_sniffer_detect'
id = Column(Integer, primary_key=True)
time = Column(DateTime)
report = Column(BLOB)
class NmapDiff(base):
__tablename__ = 'nmap_diff'
id = Column(Integer, primary_key=True)
result = Column(BLOB) | Annihilat0r/JP-python | nmaper_jp/tables_config.py | tables_config.py | py | 842 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sqlalchemy.ext.declarative.declarative_base",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 9,
"usage_type": "argument"
},
{
... |
71491666593 | import os
import tensorflow as tf
import time
import numpy as np
import data_loader
import matplotlib.pyplot as plt
from tqdm import trange
class DNN:
"""DNNๆจกๅ่ฎญ็ป"""
def __init__(self, path=None, sheet_name="Sheet2",
save_model_path="./model/model-1/", batch_size=32,
learning_rate=0.001, num_steps=10000, model_dim=(101, 64, 40),
steps=10, threshold_value=0.9, dropout=0, save_model_threshold_value=0.8):
"""
ๅๅปบๆจกๅ่ฎญ็ปๅฎไพ
:param path: ่พๅ
ฅxlsxๆไปถ่ทฏๅพ
:param save_model_path: ๆจกๅไฟๅญ่ทฏๅพ
:param batch_size: ่ฎญ็ปbatchๅคงๅฐ
:param num_steps: ่ฟญไปฃๆญฅๆฐ
:param steps: ๆๅฐ่ฏไผฐ้ด้ๆญฅๆฐ
:param threshold_value: ่พๅบ้ๅผ
:param dropout: ไธขๅผ็
:param save_model_threshold_value: ๆจกๅไฟๅญ้ๅผ
"""
self.path = path
self.save_model_path = save_model_path
self.batch_size = batch_size
self.num_steps = num_steps
self.steps = steps
self.all_counts = int(num_steps / steps) - 1
self.count = 0 # ่ฏไผฐ่ฎกๆฐ
self.fig_loss = np.zeros([self.all_counts])
self.fig_acc = np.zeros([self.all_counts])
self.save_model_threshold_value = save_model_threshold_value # ไฟๅญๆจกๅ้ๅผ
self.threshold_value = threshold_value # ้ๅผ
self.sheet_name = sheet_name
self.keep_rate = 1 - dropout # dropoutไฝฟ็ฅ็ป็ฝ็ปๅฑไธญ้ๆบไธขๅผ็ฅ็ปๅๅ
็ๅผ็ไฟ็็
self.learning_rate = learning_rate # ๅญฆไน ็
self.hidden_dim = model_dim[1:-1] # ้่ๅฑ็ปดๅบฆ
self.input_dim = model_dim[0] #่พๅ
ฅๅฑ็ปดๅบฆ
self.output_dim = model_dim[-1] #่พๅบๅฑ็ปดๅบฆ
self.graph = tf.Graph() # ๅปบ็ซๅพ
with self.graph.as_default(): # ่ฎพไธบ้ป่ฎค็graph
self.keep_prob = tf.placeholder(tf.float32, None, name="keep_prob")
self.train_inputs = tf.placeholder(tf.float32, shape=[None, None], name="train_inputs") # ๅ ไฝ็ฌฆ๏ผๆ ทๆฌ่พๅ
ฅ
self.train_labels = tf.placeholder(tf.float32, shape=[None, None], name="train_labels") # ๅ ไฝ็ฌฆ๏ผๆ ทๆฌๆ ็ญพ
# ไฝฟ็จGPU่ฟ่กๅผ ้่ฎก็ฎ
with tf.device('/cpu:0'):
# ้่ๅฑ1
self.hidden_layer = self.layer(output_dim=self.hidden_dim[0], input_dim=self.input_dim,
inputs=self.train_inputs,
keep_prob=self.keep_prob,
activation=None)
# ้่ๅฑ(>1)
for num in range(len(self.hidden_dim) - 1):
self.hidden_layer = self.layer(output_dim=self.hidden_dim[num + 1], input_dim=self.hidden_dim[num],
inputs=self.hidden_layer,
keep_prob=self.keep_prob,
activation=None)
# ่พๅบๅฑ
self.output_layer = self.layer(output_dim=self.output_dim, input_dim=self.hidden_dim[-1],
inputs=self.hidden_layer,
keep_prob=None,
activation=None)
# ๆ็ปไฝฟ็จsigmodๅฝๆฐ่พๅบ้ขๆต็ปๆ
self.prediction = tf.nn.sigmoid(self.output_layer)
tf.identity(self.prediction, "output")
# ไฝฟ็จsigmoid_cross_entropyไฝไธบๆๅคฑๅฝๆฐ
self.loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=self.output_layer, labels=self.train_labels))
# ๅฎไนไผๅๆๅคฑๅฝๆฐ๏ผไฝฟ็จAdamไปฅๅ้ข่ฎพๅญฆไน ็่ฎญ็ปๅๆฐ
self.optimizer = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)
self.init = tf.global_variables_initializer()
def layer(self, output_dim, input_dim, inputs, keep_prob=None, activation=None):
"""
็ฅ็ป็ฝ็ปๅฑ
:param output_dim: ่พๅบ็ปดๅบฆ
:param input_dim: ่พๅ
ฅ็ปดๅบฆ
:param inputs: ่พๅ
ฅๆฐๆฎ
:param activation: ้็จๆฟๆดปๅฝๆฐ
:param keep_prob: drop_outไฟๆ็
:return: ไธไธๅฑ่พๅ
ฅ
"""
# ๆ้
W = tf.Variable(tf.random_normal([input_dim, output_dim]))
# ๅ็ฝฎ
b = tf.Variable(tf.random_normal([1, output_dim]))
XWb = tf.matmul(inputs, W) + b
if keep_prob is not None:
XWb = tf.nn.dropout(XWb, keep_prob=keep_prob)
if activation is None:
outputs = XWb
else:
outputs = activation(XWb)
return outputs
def train(self, data_inputs, data_labels, keep_prob):
"""
ๆจกๅ่ฎญ็ป
:param data_inputs: ๆ ทๆฌ่พๅ
ฅ
:param data_labels: ๆ ทๆฌๆ ็ญพ
:param keep_prob: dropoutไฟ็็ฅ็ปๅ
ๆฆ็
"""
with tf.Session(graph=self.graph) as session:
self.init.run()
if not os.path.exists(self.save_model_path):
os.mkdir(self.save_model_path)
# ๆฃๆฅๆฏๅฆๅทฒๅญๅจๆจกๅ๏ผๆๅๆขๅค่ฎญ็ป๏ผ้ๆฐ่ฎญ็ปๆฐๆจกๅ้ๅ
ๅ ้คๆจกๅ
if os.path.exists(self.save_model_path + "model.ckpt.meta"):
ckpt = tf.train.latest_checkpoint(self.save_model_path)
tf.train.Saver().restore(session, ckpt)
total_loss = 0
total_acc = 0
max_acc = 0
# ๆจกๅไฟๅญ็ฎก็
saver = tf.train.Saver(max_to_keep=2)
start_time = time.time()
# ไฝฟ็จtqdm.trange็ฎก็่ฟๅบฆๆก่พๅบ
with trange(self.num_steps) as t:
for step in t:
t.set_description("Training")
batch_inputs, batch_labels, one_hot_label = next(
data_loader.generate_batch(data_inputs, data_labels, batch_size=self.batch_size))
feed_dict = {self.train_inputs: batch_inputs, self.train_labels: batch_labels,
self.keep_prob: keep_prob}
_, loss, output = session.run([self.optimizer, self.loss, self.output_layer], feed_dict)
# ้ๅผๅคๆญ่พๅบ
output = [[1 if i > self.threshold_value else 0 for i in j] for j in output]
batch_labels = list(batch_labels)
for num, value in enumerate(output):
if value == list(batch_labels[num]):
total_acc = total_acc + (1. / self.batch_size)
# ็ป่ฎกๆๅคฑๅๅ็กฎ็
total_loss = total_loss + loss
# ๆฏstepsๆฌก่ฟ่กไธๆฌก่ฏไผฐ่พๅบ
if step % self.steps == 0 and step != 0:
# ่ฎก็ฎๅนณๅๆๅคฑ
average_loss = total_loss / self.steps
average_acc = total_acc / self.steps
spend_time = time.time() - start_time
t.set_postfix(Loss="{:.9f}".format(average_loss), Accuracy="{:.9f}".format(average_acc))
self.fig_loss[self.count] = average_loss
self.fig_acc[self.count] = average_acc
if average_acc > self.save_model_threshold_value:
# ๅฆๆ่ฏไผฐๅ็กฎ็ๅๅฒๆไผ๏ผไฟๅญๆจกๅ
self.save_model_threshold_value = average_acc
saver.save(sess=session, save_path=self.save_model_path + "model.ckpt")
# ๆฏไธๆฌก่ฏไผฐๅๆๅคฑไธๅ็กฎ็็ฝฎ้ถ
total_loss = 0
total_acc = 0
# ่ฎกๆฐ+1
self.count += 1
# ้็ฝฎๆถ้ด
start_time = time.time()
def plot(self):
# ็ปๅถๆๅคฑไธๅ็กฎ็ๅพ
fig, ax1 = plt.subplots()
ax2 = ax1.twinx()
lns1 = ax1.plot(np.arange(self.all_counts), self.fig_loss, label="Loss")
lns2 = ax2.plot(np.arange(self.all_counts), self.fig_acc, 'r', label="Accuracy")
ax1.set_xlabel('iteration')
ax1.set_ylabel('training loss')
ax2.set_ylabel('training accuracy')
# ๅๅนถๅพไพ
lns = lns1 + lns2
labels = ["Loss", "Accuracy"]
plt.legend(lns, labels, loc=7)
plt.show()
def main(self):
"""ๆจกๅ่ฎญ็ป"""
# ่ฏปๅๆ ทๆฌ
data_inputs, data_labels = data_loader.read_data(path=self.path, sheet_name=self.sheet_name, input_num=self.input_dim, label_num=self.output_dim)
# ่ฎญ็ปๆ ทๆฌ
self.train(data_inputs, data_labels, self.keep_rate)
# ็ปๅถlossไธaccๅพ
self.plot()
if __name__ == '__main__':
# ๅๅปบ็ฑป็ๅฎไพ
###########################################ๅๆฐ่ฏดๆ############################################
# path: ๆฐๆฎ่พๅ
ฅ่ทฏๅพ๏ผsheet_name๏ผๆฐๆฎๆๅจ่กจๅๅ๏ผ save_model_path: ๆจกๅไฟๅญ่ทฏๅพ
# batch_size: ่ฎญ็ปๆนๆฌกๅคงๅฐ๏ผlearning_rate: ๅญฆไน ็, num_steps: ๆป่ฟญไปฃๆญฅๆฐ
# model_dimไธบไธไธชๅ
็ฅ๏ผๅญๆพๆดไธชๆจกๅๅๅฑ็็ปดๅบฆ[tuple:๏ผ่พๅ
ฅๅฑ็ปดๅบฆ, ้่ๅฑ1็ปดๅบฆ, ้่ๅฑ2็ปดๅบฆ..., ่พๅบๅฑ็ปดๅบฆ)]
# steps: ๆฏๅคๅฐๆญฅ่ฟ่ก่ฏไผฐ๏ผthreshold_value: ่พๅบ็ปๆ้ๅผ๏ผdropout: ้ฒ่ฟๆๅ๏ผ้ๆบ็ฅ็ปๅๅ
ไธขๅผ็
# save_model_threshold_value: ๆจกๅไฟๅญ้ๅผ๏ผ่ฏไผฐๅ็กฎ็ๅคงไบ้ๅผไฟๅญๆจกๅ๏ผ
dnn = DNN(path="./data/ๆ ทๆฌ.xlsx", sheet_name="Sheet2", save_model_path="./models/model-1/",
batch_size=64, learning_rate=0.001, num_steps=20000, model_dim=(101, 64, 51, 40), steps=100,
threshold_value=0.9, dropout=0.1, save_model_threshold_value=0.8)
# ็จๅบๅ
ฅๅฃ๏ผๅผๅง่ฟ่ก่ฎญ็ป
dnn.main()
| xucong053/Fault-Classification | main.py | main.py | py | 10,108 | python | en | code | 5 | github-code | 1 | [
{
"api_name": "numpy.zeros",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "tensorflow.Graph",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "tensorflow.placeholder",
... |
25885622776 | from pymongo import MongoClient
import requests
from watson_service import WatsonService
from data_service import DataService
def main():
data_service = DataService()
news_journals = data_service.get_known_online_news_journals()
news_journal_articles = []
for news_journal in news_journals:
try:
articles_of_journal_over_timeframe = data_service.get_articles_by_new_journal_over_timeframe(news_journal['url'])
except Exception as query_failure:
print('source name, source id, resulted in xmany articles or none at all within the timeframe')
source_articles_by_source_id = {}
for news_source in news_sources:
articles = data_service.get_articles_from_online_news_journal_by_url(news_source['url'])
source_articles_by_source_id[news_source['_id']] = articles
if 'docs' in source_articles_by_source_id.keys():
for article in source_articles_by_source_id['docs']['source']['enriched']['url']:
pass
# find the best possible news across one or more sources the most both singularly about a topic and with an
# interesting or strong perspective
# want news that is unique due to its paring of strange characters doing odd things, expressing a strong
# and unique opinion towards common actions performed by a character in novel way
# Hierarchy of boxes model, where the color of the lies of each rectange can convey different information, same as the
# spacing between the rectangles
stop_here = ""
stop_here = ""
query_url = 'https://topics.mediacloud.org/api/collections/9139458/stories'
parameters = {
'snapshotId': '1477',
'timespandId': '39849',
'sort': 'inlink',
'limit': '5000'
}
# response = requests.get(query_url, params=parameters)
# stop_here = ""
client = MongoClient('localhost', 27017)
database = client['AkashicRecords']
articles = database['historic_political_article_data']
cbt = database['cleaned_breitbart_test']
articles_found = []
for article in articles.find():
articles_found.append(article)
cleaned_articles = []
for article in articles_found:
if len(article['entities']) > 5:
article['entities'] = article['entities'][:5]
stop_here = ""
for article in articles_found:
relation_by_sent_id = {}
for relation in article['relations']:
try:
sent_id = hash(relation['sentence'])
if 'subject' in relation.keys():
if 'object' in relation.keys():
pass
if 'object' in relation.keys():
pass
except Exception as e:
print(e.args)
# watson_service = WatsonService()
# articles_found = data_service.pull_from_source('http://www.breitbart.com')
cleaned_articles = data_service.clean_source_articles('breitbart.com', articles_found)
cbt.insert_many(cleaned_articles)
article_data_list = []
for article in articles.find():
article_element_types = []
relevance_threshold = 0.80
for entity in article['entities']:
if entity['relevance'] > relevance_threshold:
if 'knowledgeGraph' in entity.keys():
if 'typeHierarchy' in entity['knowledgeGraph'].keys():
article_element_types.append(entity['knowledgeGraph']['typeHierarchy'].split('/')[1:])
for keyword in article['keywords']:
if keyword['relevance'] > relevance_threshold:
if 'knowledgeGraph' in keyword.keys():
if 'typeHierarchy' in keyword['knowledgeGraph'].keys():
article_element_types.append(keyword['knowledgeGraph']['typeHierarchy'].split('/')[1:])
article_data_list.append(article_element_types)
stop_here = ""
if __name__ == '__main__':
main()
| Thaumat/AkashicRecords | gather-data.py | gather-data.py | py | 4,034 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "data_service.DataService",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "data_service.get_known_online_news_journals",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "data_service.get_articles_by_new_journal_over_timeframe",
"line_number":... |
42426607543 | from collections import deque
n=int(input())
sea=[list(map(int,input().split())) for _ in range(n)]
d=[(1,0),(-1,0),(0,1),(0,-1)]
def bfs(q,shark):
global cnt
checklist=[]
checkpoint=n**2
while q:
x,y,t=q.popleft()
if t>checkpoint:
sea[shark[0]][shark[1]]=0
checklist.sort()
sea[checklist[0][0]][checklist[0][1]]=9
cnt+=1
return t
for dx,dy in d:
nx=x+dx
ny=y+dy
if 0<=nx<n and 0<=ny<n and not v[nx][ny]:
if sea[nx][ny]==0 or sea[nx][ny]==size:
v[nx][ny]=True
q.append((nx,ny,t+1))
elif sea[nx][ny]<size:
checkpoint=t
checklist.append((nx,ny))
if checklist:
sea[shark[0]][shark[1]]=0
checklist.sort()
sea[checklist[0][0]][checklist[0][1]]=9
cnt+=1
return t+1
return False
cnt=0
size=2
ans=0
while True:
v=[[False]*n for _ in range(n)]
for i in range(n):
for j in range(n):
if sea[i][j]==9:
q=deque([(i,j,0)])
v[i][j]=True
shark=(i,j)
tmp=bfs(q,shark)
if not tmp:
break
ans+=tmp
if cnt==size:
size+=1
cnt=0
print(ans) | jhchoy00/baekjoon | 16236.py | 16236.py | py | 1,349 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "collections.deque",
"line_number": 46,
"usage_type": "call"
}
] |
14282248517 | from flask import Flask
from app.extensions import db, migrate, login_manager
from app.user import user
def create_app():
app = Flask(__name__)
app.config.from_pyfile('config.py')
db.init_app(app)
migrate.init_app(app, db)
login_manager.init_app(app)
app.register_blueprint(user, url_prefix='/user')
return app
| saviogodinho2002/Programacao-Web-Flask | app/__init__.py | __init__.py | py | 344 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "app.extensions",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "flask.Flask",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "app.extensions.config.from_pyfile",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "app.extensi... |
20841732899 | from flask import Flask, jsonify, request, render_template
from datetime import datetime
from db import despesas, tipos_de_pagamento, categorias, Pagamento, Categoria
app = Flask(__name__)
# criar os tipos de pagamento // importada do banco de dados
# criar as categorias // importada do banco de dados
# pagina inicial
@app.route('/', methods=['GET'])
def pagina_inicial():
return render_template('index.html')
# listar todas as despesas do mes vigente
@app.route('/api/despesas', methods=['GET'])
def get_despesas():
lista = []
total = 0
for despesa in despesas:
if despesa.get('data').month == datetime.today().month:
lista.append(despesa)
total += despesa.get('valor')
return jsonify({'data': lista, 'total': total, 'success': True})
# listar todas as depesas totais
@app.route('/api/despesasTotais', methods=['GET'])
def get_todas_despesas():
return jsonify({'data':despesas, 'success': True})
# listas despesas por categoria
@app.route('/api/despesas/<categoria>', methods=['GET'])
def get_por_categoria(categoria):
lista = []
for despesa in despesas:
if despesa.get('categoria') == categoria:
lista.append(despesa)
return jsonify({'data': lista, 'success': True})
# adicionar despesa
@app.route('/api/add/despesa', methods=['POST'])
def cadastrar_despesa():
despesa = request.get_json()
id = despesa['id'] # get id
valor = despesa['valor'] # get valor
descricao = despesa['descricao'] # get descricao
for index, pagamento in enumerate(tipos_de_pagamento): # converte o pagamento
if despesa['pagamento'] == pagamento.nome:
pagamento = tipos_de_pagamento[index]
for index, categoria in enumerate(categorias): # converte a categoria
if despesa['categoria'] == categoria.nome:
categoria = categorias[index]
data = datetime.strptime(despesa['data'], '%d/%m/%y').date() # converte em data
nova_despesa = {
'id': id,
'valor': valor,
'descricao': descricao,
'pagamento': pagamento.nome,
'categoria': categoria.nome,
'data': data
}
despesas.append(nova_despesa)
return jsonify({'data': despesas, 'success': True})
# adicionar tipo de pagamento
@app.route('/api/add/pagamento', methods=['POST'])
def cadastrar_pagamento():
pagamento = request.get_json()
id = pagamento['id']
nome = pagamento['nome']
novo_pagamento = Pagamento(id, nome)
tipos_de_pagamento.append(novo_pagamento)
return jsonify({'data': f'Pagamento adicionado: {novo_pagamento.id} - {novo_pagamento.nome}',
'success': True})
# adicionar nova categoria de compra
@app.route('/api/add/categoria', methods=['POST'])
def cadastrar_categoria():
categoria = request.get_json()
id = categoria['id']
nome = categoria['nome']
descricao = categoria['descricao']
nova_categoria = Categoria(id, nome, descricao)
categorias.append(nova_categoria)
return jsonify({'data': f'Categoria adicionada: {nova_categoria.id} - {nova_categoria.nome} - {nova_categoria.descricao}',
'success': True})
# listar tipos de pagamentos
@app.route('/api/pagamentos', methods=['GET'])
def mostrar_pagamentos():
lista = []
for value in tipos_de_pagamento:
lista.append({'id': value.id,
'nome': value.nome})
return jsonify({'data': lista, 'success': True})
# listar categorias e suas descricoes
@app.route('/api/categorias', methods=['GET'])
def mostrar_categorias():
lista = []
for value in categorias:
lista.append({'id': value.id,
'nome': value.nome,
'descricao': value.descricao})
return jsonify({'data': lista, 'success': True})
app.run(port=5000, host='localhost', debug=True)
| pachla/desafio-muralis | Muralis/app.py | app.py | py | 3,943 | python | pt | code | 0 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "db.despesas",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.today"... |
7443437305 | import pandas as pd
from tqdm import tqdm
import utilities as ut
import matplotlib.pyplot as plt
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import f1_score, recall_score, plot_confusion_matrix, roc_auc_score, classification_report,\
accuracy_score, precision_score
if __name__ == '__main__':
# =================================================================================
# ==========================load prognostics data==================================
data_path = r"/ML2/project/extra_cell_lymphoma_dev/prognostics_dev.xlsx" # enter path
df = ut.load_data_and_transform(data_path, "prognostics")
# =================================================================================
# ===========================data exploration&label handling=======================
# we saw that their are patient w/o label.
# in addition we saw that there is corr between missing labels and na in more than 4 features.
# thus we decided to delete rows w/o labels.
print(df.isnull().sum(axis=0))
print(df.describe())
for i in range(2,27,9): # features distribution
ut.draw_multiple_histograms(df[df.columns[i:i+9]], 2, 2)
ut.heatmap(df)
ut.split_pos_and_neg(df)
# we decided to delete number of features according to their distributions:
# 1. Diagnosis - obviously all the patients has the same diagnosis so this is redundant
# 2. Treatment - half of the data is missing and the other half has the same treatment.
# 3. Biopsy Type - the major of the feature is pre-treatment
# these features from the future thus can't be part of the model
# 1. Included in survival analysis
# 2. all progressions
# 3. follow ups
curr_df = ut.preprocessing_dataframe(df)
print(curr_df.describe())
# ==================================================================================================================
# ============================= baseline model imputed missing values by traditional way ===========================
baseline_df_trad = curr_df.copy()
baseline_df_trad = ut.fill_missing_values_traditional_way(baseline_df_trad)
baseline_df_trad = baseline_df_trad.round(
{'Age': 0, 'IPI Group': 0, 'Ann Arbor Stage': 0, 'ECOG Performance Status': 0})
X_train_trad, X_val_trad, y_train_trad, y_val_trad = ut.train_test_split_and_rename(baseline_df_trad,
baseline_df_trad[
'Number of Extranodal Sites'
])
rf0 = RandomForestClassifier()
log_r0 = LogisticRegression()
knn0 = KNeighborsClassifier()
mlp0 = MLPClassifier()
models = [rf0, log_r0, knn0, mlp0]
for clf in models:
mean_auc = ut.cross_validation(10,X_train_trad,y_train_trad,clf)
if clf == rf0:
baseline_auc = mean_auc
# ==================================================================================================================
# =============================== baseline model with missing values imputed by KNN ================================
curr_df = ut.fill_missing_values_by_KNN(curr_df)
curr_df = curr_df.round({'Age': 0, 'IPI Group': 0, 'Ann Arbor Stage': 0, 'ECOG Performance Status': 0})
# split train labels and validation
X_train, X_val, y_train, y_val = ut.train_test_split_and_rename(curr_df, curr_df['Number of Extranodal Sites'])
rf = RandomForestClassifier()
log_r = LogisticRegression()
knn = KNeighborsClassifier()
mlp = MLPClassifier()
models = [rf, log_r, knn, mlp]
for clf in models:
ut.cross_validation(10,X_train,y_train,clf)
# =====================================================================================================
# ============================================grid_search==============================================
# rf = RandomForestClassifier()
# rf_param_grid = {
# 'max_depth': [10,20,50,80,110],
# 'max_features': ['auto', 'sqrt'],
# 'min_samples_leaf': [1,2,6,10],
# 'min_samples_split': [2,6,10],
# 'n_estimators': [10,100,600,1000]}
# train_size = 0.8
# # In the first step we will split the data in training and remaining dataset
# X_train_grid, X_rem, y_train_grid, y_rem = X_train_trad, X_val_trad, y_train_trad, y_val_trad
#
# # Now since we want the valid and test size to be equal (10% each of overall data).
# # we have to define valid_size=0.5 (that is 50% of remaining data)
# test_size = 0.5
# X_val_grid, X_test_grid, y_val_grid, y_test_grid = ut.train_test_split(X_rem, y_rem, test_size=0.5)
#
# best_param = ut.run_grid_search(rf, rf_param_grid, X_train_grid, y_train_grid, X_val_grid, y_val_grid)
#
# # rf_grid = RandomForestClassifier(**best_param)
# # rf_grid.fit(X_train_grid,y_train_grid)
# # plot_roc_curve(rf_grid, X_test_grid,y_test_grid)
# # plt.show()
# ==========================================================================================================
# the two Imputing methods yield the same results, thus we will continue with the traditional baseline
# In addition, after gridsearch we found the best parameters are the default
X_train, X_val, y_train, y_val = X_train_trad, X_val_trad, y_train_trad, y_val_trad
# ==========================================================================================================
# ===================================load genetic data======================================================
data_path = r"/ML2/project/extra_cell_lymphoma_dev/genomics_dev.xlsx" # enter path
genomic_df = ut.load_data_and_transform(data_path, "genomics")
X_train, y_train, X_val, y_val = ut.join_genomics(genomic_df, X_train, y_train, X_val, y_val)
# ==========================================================================================================
# ===================================drop features by domain knowledge =====================================
X_train = X_train.drop(['ECOG Performance Status', 'IPI Range', 'dbGaP_accession_phs000178',
'dbGaP_accession_phs001444', 'Gene_Expression_Subgroup_ABC', 'Gene_Expression_Subgroup_GCB',
'Gene_Expression_Subgroup_Unclass', 'Genetic_Subtype_BN2', 'Genetic_Subtype_EZB',
'Genetic_Subtype_MCD', 'Genetic_Subtype_N1', 'Genetic_Subtype_Other'], axis=1)
X_val = X_val.drop(['ECOG Performance Status', 'IPI Range', 'dbGaP_accession_phs000178',
'dbGaP_accession_phs001444', 'Gene_Expression_Subgroup_ABC', 'Gene_Expression_Subgroup_GCB',
'Gene_Expression_Subgroup_Unclass', 'Genetic_Subtype_BN2', 'Genetic_Subtype_EZB',
'Genetic_Subtype_MCD', 'Genetic_Subtype_N1', 'Genetic_Subtype_Other'], axis=1)
# =========================================================================================================
# ===================================run basic model with genomic data=====================================
print("=================run basic model with all genomic data=================")
rf2 = RandomForestClassifier()
auc = ut.run_model_and_draw_ROC(rf2, X_train, y_train, X_val, y_val)
print(f"for the baseline model with all the genomic data the AUC is: {auc}")
# =========================================================================================================
# ===================================we will try reducing dimension by using pca===========================
print("=================run model with reduced dimensions by PCA=================")
X_train_pca, X_val_pca = ut.get_reduced_dimensions_df(X_train, X_val)
y_train_pca = y_train.reindex(X_train_pca.index)
y_val_pca = y_val.reindex(X_val_pca.index)
print(f"Reduced dimensions to: {X_train_pca.shape}")
rf3 = RandomForestClassifier()
auc = ut.run_model_and_draw_ROC(rf3, X_train_pca, y_train_pca, X_val_pca, y_val_pca)
print(f"for the model with reduced dimensions by PCA the AUC is: {auc}")
# =========================================================================================================
# ===================================We will try feature selection by Domain knowledge====================
print("=================run model with features selected by Domain knowledge=================")
relevant_features = ['Gender', 'Age', 'Ann Arbor Stage', 'LDH Ratio',
'IPI Group', 'JAM2', 'JAM3', 'MYC', 'POU2AF1', 'EZH2',
'BCL2', 'AQP9',
'LMBR1L', 'FGF20', 'TANK', 'CRP', 'ORM1', 'JAK1', 'BACH1', 'MTCP1', 'IFITM1', 'TNFSF10',
'FGF12', 'RFX5', 'LAP3']
print(f"The selected features are:{relevant_features}")
X_train_fs = X_train[relevant_features]
X_val_fs = X_val[relevant_features]
rf4 = RandomForestClassifier()
auc = ut.run_model_and_draw_ROC(rf4, X_train_fs, y_train, X_val_fs, y_val)
print(f"for the model with features selected by Domain knowledge the AUC is: {auc}")
# =========================================================================================================
# =================================== lets try feature selection using forward selection ==================
def forward_selection(data, target, X_val, y_val,auc, significance_level=0.01):
initial_features = data.columns.tolist()
best_features = ['Gender', 'Age', 'Ann Arbor Stage', 'LDH Ratio',
'IPI Group']
scores = [auc,auc,auc]
best_features_and_scores = {}
while (len(initial_features) > 0):
print(best_features_and_scores)
remaining_features = list(set(initial_features) - set(best_features))
new_pval = pd.Series(index=remaining_features)
for new_column in tqdm(remaining_features):
new_pval[new_column] = 0
for i in range(3):
model = RandomForestClassifier().fit(data[best_features + [new_column]], target)
new_pval[new_column] += roc_auc_score(y_val,
model.predict_proba(X_val[best_features + [new_column]])[:,
1])
new_pval[new_column] = new_pval[new_column] / 3
print(f"{new_column}:", new_pval[new_column])
max_score = new_pval.max()
if (max_score - scores[-1] > significance_level):
best_features.append(new_pval.idxmax())
scores.append(max_score)
best_features_and_scores[new_pval.idxmax()] = max_score
else:
best_features.append(new_pval.idxmax())
scores.append(scores[-1])
if scores[-1] == scores[-2] and scores[-1] == scores[-3]:
best_features = best_features[:len(best_features) - 2]
print(f"scores: {scores}")
break
return best_features, best_features_and_scores
# =========================================================================================================
# ===================3 experiments for the feature selection===============================================
for experiment in range(1,4):
best_features, best_features_and_scores = forward_selection(X_train, y_train, X_val, y_val, baseline_auc)
print(f"best features : {best_features}")
print(f"best features for exp. {experiment}: {best_features_and_scores}")
ut.open_file_and_save(f"exp.{experiment}", best_features_and_scores)
# after finding the best features plot roc curves that compares the baseline model to the one
# with the best genetics features
print("=================run model with features selected by Forward Selection=================")
print(f"best features: {best_features}")
rf_basic = RandomForestClassifier()
rf_genetic = RandomForestClassifier()
models = [rf_basic, rf_genetic]
relevant_features = {rf_basic: ['Gender', 'Age', 'Ann Arbor Stage', 'LDH Ratio', 'IPI Group'],
rf_genetic: best_features}
ut.plot_multiple_ROC_curves(X_train, y_train, X_val, y_val, models, relevant_features)
X_train = X_train[best_features]
X_val = X_val[best_features]
# =========================================================================================================
# =====================================CM and scores=============================================
rf = RandomForestClassifier()
rf.fit(X_train, y_train)
plot_confusion_matrix(rf, X_val, y_val)
plt.show()
y_pred_test = rf.predict(X_val)
# accuracy: (tp + tn) / (p + n)
accuracy = accuracy_score(y_val, y_pred_test)
print(f"Accuracy: {accuracy}")
# precision tp / (tp + fp)
precision = precision_score(y_val, y_pred_test)
print(f"Precision: {precision}")
# recall: tp / (tp + fn)
recall = recall_score(y_val, y_pred_test)
print(f"Recall: {recall}")
# f1: 2 tp / (2 tp + fp + fn)
f1 = f1_score(y_val, y_pred_test)
print(f"F1 score: {f1}")
# View the classification report for test data and predictions
print(classification_report(y_val, y_pred_test))
ut.plot_train_test_ROC_curves(X_train, y_train, X_val, y_val, rf)
| hilakatz/Extranodal-lymphoma-project | Extranodal_lymphoma_project.py | Extranodal_lymphoma_project.py | py | 13,919 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "utilities.load_data_and_transform",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "utilities.draw_multiple_histograms",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "utilities.heatmap",
"line_number": 29,
"usage_type": "call"
},
{... |
24349070486 | from mxnet.gluon.data import dataset, DataLoader
import numpy as np
import mxnet.ndarray as ndarray
from utils.augmentations import *
class MyTransform:
def __init__(self, im, opts):
if opts.is_val:
transforms_list_idx = [0, 14, 1]
else:
transforms_list_idx = [0, ] + [opts.transforms_list_idx[j]
for j in np.random.choice(opts.transforms_list_idx.__len__(),
opts.transforms_list_idx.__len__(), replace=False)] \
+ [14, 1, ]
self.to_norm = not opts.already_normed
self.norm_thr = opts.norm_thr
self.nsee = opts.num_slices_each_end
self.label_in_last_channel = opts.label_in_last_channel
self.max_angle = opts.max_angle
self.coor_maps = opts.coor_maps
self.is_val = opts.is_val
self.zdim = opts.zdim
self.mid_len_ratio = opts.mid_len_ratio
self.translate_ratio = opts.translate_ratio
self.mean_scaling = opts.mean_scaling
self.crop_size_list = opts.crop_size_list
self.crop_size = opts.crop_size
self.use_ADC = opts.use_ADC
self.cov = opts.center_translation_cov
self.transforms_dict = {
'sample_slices': sample_slices, # 0
'concat_coor_maps': concat_coor_maps, # concat_coor_maps index must be 0 # 1
'rand_scale': rand_scale, # 2
'drop_out': drop_out, # 3
'contrast_norm': contrast_norm, # 4
'deform': deform, # 5
'modify_values': modify_values, # 6
'Gauss_blur': Gauss_blur, # 7
'add_Gauss_noise': add_noise, # 8
'rand_translate': rand_translate, # 9
'rand_rot': rand_rot, # 10
'rand_fliplr': rand_fliplr, # 11
'center_crop_and_scale': None, # 12
'swap_channels': swap_channels, # 13
'crop_and_resize': crop_and_resize, # 14
'rand_flipud': rand_flipud, # 15
}
self.transforms_list = list(self.transforms_dict.keys())
self.transforms_list = [self.transforms_list[i] for i in transforms_list_idx]
self.im = im
def random_transform(self):
for fn in self.transforms_list:
self.im = self.transforms_dict[fn](self)
return self.im
class MyDataset(dataset.Dataset):
"""A dataset that combines multiple dataset-like objects, e.g.
Datasets, lists, arrays, etc.
The i-th sample is defined as `(x1[i], x2[i], ...)`.
Parameters
----------
*args : one or more dataset-like objects
The data arrays.
"""
def __init__(self, opts, *args):
assert len(args) > 0, "Needs at least 1 arrays"
self._length = len(args[0])
self._data = []
self.opts = opts
for i, data in enumerate(args):
assert len(data) == self._length, \
"All arrays must have the same length; array[0] has length %d " \
"while array[%d] has %d." % (self._length, i+1, len(data))
if isinstance(data, ndarray.NDArray) and len(data.shape) == 1:
data = data.asnumpy()
self._data.append(data)
def __getitem__(self, idx):
if len(self._data) == 1:
return MyTransform(self._data[0][idx], self.opts).random_transform()
else:
return tuple(MyTransform(data[idx], self.opts).random_transform() for data in self._data)
def __len__(self):
return self._length
if __name__ == "__main__":
x = np.zeros(shape=(5, 2, 10, 30, 30))
d = MyDataset(x)
data_loader = DataLoader(d, batch_size=2)
for b in data_loader:
print(b[0].shape)
| minhto2802/T2_ADC | ProstateSegmentation/utils/custom_dataset.py | custom_dataset.py | py | 3,802 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.random.choice",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "mxnet.gluon.data.dataset.Dataset",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_n... |
17646806271 |
import cv2
# haarcascade ๋ถ๋ฌ์ค๊ธฐ
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_eye.xml')
# ์ด๋ฏธ์ง ๋ถ๋ฌ์ค๊ธฐ
original_num = 2
img = cv2.imread(str(original_num)+'.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# ์ผ๊ตด์ธ์ ๋ฐ ์ธ์๊ฐ์๊ฐ 1๊ฐ ์ด๊ณผ ์ ์ข
๋ฃ(๋์ค์ ๋ฐ๋ณต๋ฌธ์ผ๋ก ๋ฆฌํด ๊ตฌํ)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
if (len(faces)==1):
for (x, y, w, h) in faces:
cropped = img[y - int(h / 4):y + h + int(h / 1), x - int(w / 4):x + w + int(w / 4)] # ์:ํ, ์ข:์ฐ
else:
print("์ผ๊ตด ๋ค์ค์ธ์")
exit()
# ์๋ฅธ ์ฌ์ง ํฌ๊ธฐ ์กฐ์ #๊ฒ์ IF ์ด๋ฏธ์ง ํฌ๊ธฐ: 400, 500
if cropped.shape[1]*cropped.shape[0]>2000:
resize_img = cv2.resize(img, dsize=(400, 500), interpolation=cv2.INTER_AREA)
else:
resize_img = cv2.resize(img, dsize=(400, 500), interpolation=cv2.INTER_CUBIC)
# ์๋ฅธ ์ฌ์ง ์ ์ฅ
change_num = 99
cv2.imwrite(str(change_num)+".jpg",cropped)
# ์๋ณธ ์ฌ์ง, ์๋ฅธ ์ฌ์ง ์ถ๋ ฅ
cv2.imshow('image', img)
cv2.imshow("cropped", cropped)
key = cv2.waitKey(0)
cv2.destroyAllWindows()
| baecci/ALPHAGO_Project22 | test1.py | test1.py | py | 1,278 | python | ko | code | 0 | github-code | 1 | [
{
"api_name": "cv2.CascadeClassifier",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "cv2.data",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "cv2.CascadeClassifier",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "cv2.data",
... |
1527930933 | import logging
from metadrive.constants import Semantics
import math
from typing import List, Dict
from panda3d.bullet import BulletBoxShape, BulletGhostNode
from panda3d.core import Vec3, LQuaternionf, Vec4, TextureStage, RigidBodyCombiner, \
SamplerState, NodePath, Texture, Material
from metadrive.base_class.base_object import BaseObject
from metadrive.component.lane.abs_lane import AbstractLane
from metadrive.component.lane.point_lane import PointLane
from metadrive.component.road_network.node_road_network import NodeRoadNetwork
from metadrive.component.road_network.road import Road
from metadrive.constants import MetaDriveType, CamMask, PGLineType, PGLineColor, DrivableAreaProperty
from metadrive.engine.asset_loader import AssetLoader
from metadrive.engine.core.physics_world import PhysicsWorld
from metadrive.utils.coordinates_shift import panda_vector, panda_heading
from metadrive.utils.math import norm
logger = logging.getLogger(__name__)
class BaseBlock(BaseObject, DrivableAreaProperty):
"""
Block is a driving area consisting of several roads
Note: overriding the _sample() function to fill block_network/respawn_roads in subclass
Call Block.construct_block() to add it to world
"""
ID = "B"
def __init__(
self, block_index: int, global_network: NodeRoadNetwork, random_seed, ignore_intersection_checking=False
):
super(BaseBlock, self).__init__(str(block_index) + self.ID, random_seed, escape_random_seed_assertion=True)
# block information
assert self.ID is not None, "Each Block must has its unique ID When define Block"
assert len(self.ID) == 1, "Block ID must be a character "
self.block_index = block_index
self.ignore_intersection_checking = ignore_intersection_checking
# each block contains its own road network and a global network
self._global_network = global_network
self.block_network = self.block_network_type()
# a bounding box used to improve efficiency x_min, x_max, y_min, y_max
self._bounding_box = None
# used to spawn npc
self._respawn_roads = []
self._block_objects = None
if self.render and not self.use_render_pipeline:
self.ts_color = TextureStage("color")
self.ts_normal = TextureStage("normal")
self.ts_normal.setMode(TextureStage.M_normal)
# Only maintain one copy of asset
self.road_texture = self.loader.loadTexture(AssetLoader.file_path("textures", "sci", "new_color.png"))
self.road_normal = self.loader.loadTexture(AssetLoader.file_path("textures", "sci", "normal.jpg"))
self.road_texture.set_format(Texture.F_srgb)
self.road_normal.set_format(Texture.F_srgb)
self.road_texture.setMinfilter(SamplerState.FT_linear_mipmap_linear)
self.road_texture.setAnisotropicDegree(8)
# # continuous line
# self.lane_line_model = self.loader.loadModel(AssetLoader.file_path("models", "box.bam"))
# self.lane_line_model.setPos(0, 0, -DrivableAreaProperty.LANE_LINE_GHOST_HEIGHT / 2)
self.lane_line_texture = self.loader.loadTexture(AssetLoader.file_path("textures", "sci", "floor.jpg"))
# self.lane_line_model.setScale(DrivableAreaProperty.STRIPE_LENGTH*4,
# DrivableAreaProperty.LANE_LINE_WIDTH,
# DrivableAreaProperty.LANE_LINE_THICKNESS)
# # self.lane_line_normal = self.loader.loadTexture(
# # AssetLoader.file_path("textures", "sci", "floor_normal.jpg"))
# # self.lane_line_texture.set_format(Texture.F_srgb)
# # self.lane_line_normal.set_format(Texture.F_srgb)
# self.lane_line_model.setTexture(self.ts_color, self.lane_line_texture)
# # self.lane_line_model.setTexture(self.ts_normal, self.lane_line_normal)
#
# # # broken line
# self.broken_lane_line_model = self.loader.loadModel(AssetLoader.file_path("models", "box.bam"))
# self.broken_lane_line_model.setScale(DrivableAreaProperty.STRIPE_LENGTH,
# DrivableAreaProperty.LANE_LINE_WIDTH,
# DrivableAreaProperty.LANE_LINE_THICKNESS)
# self.broken_lane_line_model.setPos(0, 0, -DrivableAreaProperty.LANE_LINE_GHOST_HEIGHT / 2)
# self.broken_lane_line_model.setTexture(self.ts_color, self.lane_line_texture)
# side
self.side_texture = self.loader.loadTexture(AssetLoader.file_path("textures", "sidewalk", "color.png"))
self.side_texture.set_format(Texture.F_srgb)
self.side_texture.setMinfilter(SamplerState.FT_linear_mipmap_linear)
self.side_texture.setAnisotropicDegree(8)
self.side_normal = self.loader.loadTexture(AssetLoader.file_path("textures", "sidewalk", "normal.png"))
self.side_normal.set_format(Texture.F_srgb)
self.sidewalk = self.loader.loadModel(AssetLoader.file_path("models", "box.bam"))
self.sidewalk.setTwoSided(False)
self.sidewalk.setTexture(self.ts_color, self.side_texture)
# self.sidewalk = self.loader.loadModel(AssetLoader.file_path("models", "output.egg"))
# self.sidewalk.setTexture(self.ts_normal, self.side_normal)
def _sample_topology(self) -> bool:
"""
Sample a new topology to fill self.block_network
"""
raise NotImplementedError
def construct_block(
self,
root_render_np: NodePath,
physics_world: PhysicsWorld,
extra_config: Dict = None,
no_same_node=True,
attach_to_world=True
) -> bool:
"""
Randomly Construct a block, if overlap return False
"""
self.sample_parameters()
if not isinstance(self.origin, NodePath):
self.origin = NodePath(self.name)
# else:
# print("Origin already exists: ", self.origin)
self._block_objects = []
if extra_config:
assert set(extra_config.keys()).issubset(self.PARAMETER_SPACE.parameters), \
"Make sure the parameters' name are as same as what defined in pg_space.py"
raw_config = self.get_config(copy=True)
raw_config.update(extra_config)
self.update_config(raw_config)
self._clear_topology()
success = self._sample_topology()
self._global_network.add(self.block_network, no_same_node)
self._create_in_world()
self.attach_to_world(root_render_np, physics_world)
if not attach_to_world:
self.detach_from_world(physics_world)
return success
def destruct_block(self, physics_world: PhysicsWorld):
self._clear_topology()
self.detach_from_world(physics_world)
self.origin.removeNode()
self.origin = None
self.dynamic_nodes.clear()
self.static_nodes.clear()
for obj in self._block_objects:
obj.destroy()
self._block_objects = None
def construct_from_config(self, config: Dict, root_render_np: NodePath, physics_world: PhysicsWorld):
success = self.construct_block(root_render_np, physics_world, config)
return success
def get_respawn_roads(self):
return self._respawn_roads
def get_respawn_lanes(self):
"""
return a 2-dim array [[]] to keep the lane index
"""
ret = []
for road in self._respawn_roads:
lanes = road.get_lanes(self.block_network)
ret.append(lanes)
return ret
def get_intermediate_spawn_lanes(self):
"""Return all lanes that can be used to generate spawn intermediate vehicles."""
raise NotImplementedError()
def _add_one_respawn_road(self, respawn_road: Road):
assert isinstance(respawn_road, Road), "Spawn roads list only accept Road Type"
self._respawn_roads.append(respawn_road)
def _clear_topology(self):
if len(self._global_network.graph.keys()) > 0:
self._global_network -= self.block_network
self.block_network.graph.clear()
self.PART_IDX = 0
self.ROAD_IDX = 0
self._respawn_roads.clear()
"""------------------------------------- For Render and Physics Calculation ---------------------------------- """
def _create_in_world(self, skip=False):
"""
Create NodePath and Geom node to perform both collision detection and render
Note: Override the create_in_world() function instead of this one, since this method severing as a wrapper to
help improve efficiency
"""
self.lane_line_node_path = NodePath(RigidBodyCombiner(self.name + "_lane_line"))
self.sidewalk_node_path = NodePath(RigidBodyCombiner(self.name + "_sidewalk"))
self.lane_node_path = NodePath(RigidBodyCombiner(self.name + "_lane"))
self.lane_vis_node_path = NodePath(RigidBodyCombiner(self.name + "_lane_vis"))
self.sidewalk_node_path.setTag("type", Semantics.SIDEWALK.label)
self.lane_vis_node_path.setTag("type", Semantics.ROAD.label)
self.lane_line_node_path.setTag("type", Semantics.LANE_LINE.label)
if skip: # for debug
pass
else:
self.create_in_world()
self.lane_line_node_path.flattenStrong()
self.lane_line_node_path.node().collect()
self.sidewalk_node_path.flattenStrong()
self.sidewalk_node_path.node().collect()
self.sidewalk_node_path.hide(CamMask.ScreenshotCam)
# only bodies reparent to this node
self.lane_node_path.flattenStrong()
self.lane_node_path.node().collect()
self.lane_vis_node_path.flattenStrong()
self.lane_vis_node_path.node().collect()
self.lane_vis_node_path.hide(CamMask.DepthCam | CamMask.ScreenshotCam | CamMask.SemanticCam)
self.origin.hide(CamMask.Shadow)
self.sidewalk_node_path.reparentTo(self.origin)
self.lane_line_node_path.reparentTo(self.origin)
self.lane_node_path.reparentTo(self.origin)
self.lane_vis_node_path.reparentTo(self.origin)
try:
self._bounding_box = self.block_network.get_bounding_box()
except:
if len(self.block_network.graph) > 0:
logging.warning("Can not find bounding box for it")
self._bounding_box = None, None, None, None
self._node_path_list.append(self.sidewalk_node_path)
self._node_path_list.append(self.lane_line_node_path)
self._node_path_list.append(self.lane_node_path)
self._node_path_list.append(self.lane_vis_node_path)
def create_in_world(self):
"""
Create lane in the panda3D world
"""
raise NotImplementedError
def add_body(self, physics_body):
raise DeprecationWarning(
"Different from common objects like vehicle/traffic sign, Block has several bodies!"
"Therefore, you should create BulletBody and then add them to self.dynamics_nodes "
"manually. See in construct() method"
)
def get_state(self) -> Dict:
"""
The record of Block type is not same as other objects
"""
return {}
def set_state(self, state: Dict):
"""
Block type can not set state currently
"""
pass
def _add_lane_line(self, lane: AbstractLane, colors: List[Vec4], contruct_two_side=True):
raise DeprecationWarning("Leave for argoverse using")
if isinstance(lane, PointLane):
parent_np = self.lane_line_node_path
lane_width = lane.width_at(0)
for c, i in enumerate([-1, 1]):
line_color = colors[c]
acc_length = 0
if lane.line_types[c] == PGLineType.CONTINUOUS:
for segment in lane.segment_property:
lane_start = lane.position(acc_length, i * lane_width / 2)
acc_length += segment["length"]
lane_end = lane.position(acc_length, i * lane_width / 2)
middle = (lane_start + lane_end) / 2
self._add_lane_line2bullet(
lane_start, lane_end, middle, parent_np, line_color, lane.line_types[c]
)
def _add_box_body(self, lane_start, lane_end, middle, parent_np: NodePath, line_type, line_color):
raise DeprecationWarning("Useless, currently")
length = norm(lane_end[0] - lane_start[0], lane_end[1] - lane_start[1])
if PGLineType.prohibit(line_type):
node_name = MetaDriveType.LINE_SOLID_SINGLE_WHITE if line_color == PGLineColor.GREY else MetaDriveType.LINE_SOLID_SINGLE_YELLOW
else:
node_name = MetaDriveType.BROKEN_LINE
body_node = BulletGhostNode(node_name)
body_node.set_active(False)
body_node.setKinematic(False)
body_node.setStatic(True)
body_np = parent_np.attachNewNode(body_node)
self._node_path_list.append(body_np)
shape = BulletBoxShape(
Vec3(length / 2, DrivableAreaProperty.LANE_LINE_WIDTH / 2, DrivableAreaProperty.LANE_LINE_GHOST_HEIGHT)
)
body_np.node().addShape(shape)
mask = DrivableAreaProperty.CONTINUOUS_COLLISION_MASK if line_type != PGLineType.BROKEN else DrivableAreaProperty.BROKEN_COLLISION_MASK
body_np.node().setIntoCollideMask(mask)
self.static_nodes.append(body_np.node())
body_np.setPos(panda_vector(middle, DrivableAreaProperty.LANE_LINE_GHOST_HEIGHT / 2))
direction_v = lane_end - lane_start
# theta = -numpy.arctan2(direction_v[1], direction_v[0])
theta = panda_heading(math.atan2(direction_v[1], direction_v[0]))
body_np.setQuat(LQuaternionf(math.cos(theta / 2), 0, 0, math.sin(theta / 2)))
@property
def block_network_type(self):
"""
There are two type of road network to describe the relation of all lanes, override this func to assign one when
you are building your own block.
return: roadnetwork
"""
raise NotImplementedError
def destroy(self):
if self.block_network is not None:
self.block_network.destroy()
if self.block_network.graph is not None:
self.block_network.graph.clear()
self.block_network = None
self.PART_IDX = 0
self.ROAD_IDX = 0
self._respawn_roads.clear()
self._global_network = None
super(BaseBlock, self).destroy()
def __del__(self):
self.destroy()
logger.debug("{} is being deleted.".format(type(self)))
@property
def bounding_box(self):
return self._bounding_box
| metadriverse/metadrive | metadrive/component/block/base_block.py | base_block.py | py | 15,032 | python | en | code | 471 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "metadrive.base_class.base_object.BaseObject",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "metadrive.constants.DrivableAreaProperty",
"line_number": 24,
"usage_type":... |
29480630063 | """
Digita endpoint.
You must declare environment variable DIGITA_URL to activate this plugin.
"""
import os
import json
import logging
import binascii
import dateutil
import pytz
from django.conf import settings
from django.conf.urls import url
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from django.utils import timezone
from influxdb.exceptions import InfluxDBClientError
from endpoints.utils import BasePlugin
from endpoints.utils import get_influxdb_client, create_influxdb_obj
from endpoints.utils import get_setting, get_datalogger
from endpoints.views import dump_request
ENV_NAME = 'DIGITA_URL'
URL = get_setting(ENV_NAME)
DIGITA_DB = get_setting('DIGITA_DB', 'digita')
logger = logging.getLogger(__name__)
def hex2int(hex_str):
"""
Convert 2 hex characters (e.g. "23") to int (35)
:param hex_str: hex character string
:return: int integer
"""
return int(hex_str, 16)
def calc_temp(hex_str):
"""
Convert 4 hex characters (e.g. "040b") to float temp (25.824175824175825)
:param hex_str: hex character string
:return: float temperature
"""
adc = int(hex_str[0:2], 16) * 256 + int(hex_str[2:4], 16)
temp = (300 * adc / 4095) - 50
return temp
def calc_volts(hex_str):
"""
Convert 2 hex characters (e.g. "fe") to float volts (3.5043478260869567)
:param hex_str: hex character string
:return: float volts
"""
volts = ((int(hex_str, 16) / 0.23) + 2400) / 1000
return volts
def handle_clickey_tempsens(hex_str):
temp1 = calc_temp(hex_str[2:6])
temp2 = calc_temp(hex_str[6:10])
volts = calc_volts(hex_str[10:12])
return {
'temp1': temp1,
'temp2': temp2,
'volt': volts
}
def hex2value10(hex_str):
return hex2int(hex_str) / 10.0
def handle_aqburk(hex_str):
"""
Parse payload like "2a2a0021002c002800300056003b0000" float values
:param hex_str: AQLoRaBurk hex payload
:return: dict containing float values
"""
data = {
'pm25min': hex2value10(hex_str[4:8]),
'pm25max': hex2value10(hex_str[8:12]),
'pm25avg': hex2value10(hex_str[12:16]),
'pm25med': hex2value10(hex_str[16:20]),
'pm10min': hex2value10(hex_str[20:24]),
'pm10max': hex2value10(hex_str[24:28]),
'pm10avg': hex2value10(hex_str[28:32]),
'pm10med': hex2value10(hex_str[32:36]),
}
if len(hex_str) == 52:
data['temp'] = round(hex2value10(hex_str[36:40]) - 100, 1)
data['humi'] = hex2value10(hex_str[40:44])
data['pres'] = hex2value10(hex_str[44:48])
data['gas'] = hex2value10(hex_str[48:52])
return data
def handle_keyval(hex_str):
"""
:param hex_str: key-value hex payload
:return: dict containing parsed balues
:raises UnicodeDecodeError: if hex_str contains illegal bytes for utf8
"""
_str = binascii.unhexlify(hex_str) # --> b'temp=24.61,hum=28.69'
_str = _str.decode() # --> 'temp=24.61,hum=28.69'
keyvals = [x.split('=') for x in _str.split(',')] # --> [['temp', '24.61'], ['hum', '28.69']]
keyvals = [[x[0], float(x[1])] for x in keyvals] # --> [['temp', 24.61], ['hum', 28.69]]
data = dict(keyvals) # --> {'temp': 24.61, 'hum': 28.69}
return data
class Plugin(BasePlugin):
"""
Digita plugin. Checks if endpoint's URL has been set in env.
"""
name = 'digita'
viewname = 'digitahandler'
def __init__(self):
"""Check that `ENV_NAME` is in env variables."""
super().__init__()
if URL is not None:
self.in_use = True
def register(self):
print('Registering plugin "{}"'.format(self.name))
def get_urlpatterns(self):
if self.in_use is False:
print('{} environment variable is not set. {} endpoint is not in use.'.format(ENV_NAME, self.name))
urlpatterns = []
else:
url_pattern = r'^{}$'.format(URL)
urlpatterns = [
url(url_pattern, self.view_func, name=self.viewname),
]
return urlpatterns
@csrf_exempt
def view_func(self, request):
"""
Endpoint requires valid Digita formatted JSON payload.
"""
err_msg = ''
status = 200
try:
body_data = request.body
data = json.loads(body_data.decode('utf-8'))
except (json.decoder.JSONDecodeError, UnicodeDecodeError) as err:
log_msg = '[DIGITA] Invalid data: "{}". Hint: should be UTF-8 json.'.format(body_data[:50])
err_msg = 'Invalid data: "{}"... Hint: should be UTF-8 json.'.format(body_data[:50])
logger.error(log_msg)
return HttpResponse(err_msg, status=400)
# meta and type keys should be always in request json
try:
d = data['DevEUI_uplink']
device = d['DevEUI']
times = str(d['Time'])
rssi = d['LrrRSSI']
payload_hex = d['payload_hex']
except KeyError as err:
log_msg = '[DIGITA] Invalid json structure: "{}". Missing key: {}.'.format(body_data, err)
err_msg = 'Invalid json structure: "{}". Hint: missing key {}.'.format(body_data, err)
logger.error(log_msg)
return HttpResponse(err_msg, status=400)
now = timezone.now().astimezone(pytz.utc)
path = os.path.join(settings.MEDIA_ROOT, 'digita', now.strftime('%Y-%m-%d'), device)
os.makedirs(path, exist_ok=True)
fpath = os.path.join(path, now.strftime('%Y%m%dT%H%M%S.%fZ.json'))
with open(fpath, 'wt') as destination:
destination.write(json.dumps(data, indent=1))
response = HttpResponse("ok")
# TODO: move this to a function
if len(payload_hex) == 8:
idata = {
'wifi': int(payload_hex[0:4], 16),
'ble': int(payload_hex[4:8], 16)
}
idata['rssi'] = rssi
keys_str = 'wifi-ble'
dl_descr = 'paxcounter'
datalogger, created = get_datalogger(device, description=dl_descr, update_activity=True)
ts = dateutil.parser.parse(times)
measurement = create_influxdb_obj(device, keys_str, idata, ts)
measurements = [measurement]
# dbname = request.GET.get('db', DIGITA_DB)
dbname = 'paxcounter'
iclient = get_influxdb_client(database=dbname)
iclient.create_database(dbname)
try:
iclient.write_points(measurements)
except InfluxDBClientError as err:
err_msg = '[DIGITA] InfluxDB error: {}'.format(err)
status = 500
elif payload_hex[:2] == '13':
idata = handle_clickey_tempsens(payload_hex)
idata['rssi'] = rssi
keys_str = 'tempsens'
datalogger, created = get_datalogger(device, description='Clickey Tempsens PRO', update_activity=True)
ts = dateutil.parser.parse(times)
measurement = create_influxdb_obj(device, keys_str, idata, ts)
measurements = [measurement]
# dbname = request.GET.get('db', DIGITA_DB)
dbname = 'digita'
iclient = get_influxdb_client(database=dbname)
iclient.create_database(dbname)
try:
iclient.write_points(measurements)
except InfluxDBClientError as err:
err_msg = '[DIGITA] InfluxDB error: {}'.format(err)
status = 500
elif payload_hex[:2].lower() == '2a': # payload_hex[:4].lower() == '2a2a':
idata = handle_aqburk(payload_hex)
idata['rssi'] = rssi
keys_str = 'aqburk'
datalogger, created = get_datalogger(device, description='FVH AQ burk', update_activity=True)
ts = dateutil.parser.parse(times)
measurement = create_influxdb_obj(device, keys_str, idata, ts)
measurements = [measurement]
DIGITA_DB = 'aqburk'
dbname = request.GET.get('db', DIGITA_DB)
iclient = get_influxdb_client(database=dbname)
iclient.create_database(dbname)
try:
iclient.write_points(measurements)
except InfluxDBClientError as err:
err_msg = '[DIGITA] InfluxDB error: {}'.format(err)
status = 500
elif len(payload_hex) >= 2: # Assume we have key-val data
try:
idata = handle_keyval(payload_hex)
except (UnicodeDecodeError, IndexError) as err:
err_msg = '[DIGITA] Payload error: {}'.format(err)
status = 400
logger.error(err_msg)
dump_request(request, postfix='digita')
response = HttpResponse(err_msg, status=status)
return response
idata['rssi'] = rssi
ikeys = list(idata.keys())
ikeys.sort()
keys_str = '_'.join(ikeys)
datalogger, created = get_datalogger(device, description='LoRaWAN device', update_activity=True)
ts = dateutil.parser.parse(times)
measurement = create_influxdb_obj(device, keys_str, idata, ts)
measurements = [measurement]
# dbname = request.GET.get('db', DIGITA_DB)
dbname = 'digita'
iclient = get_influxdb_client(database=dbname)
iclient.create_database(dbname)
try:
iclient.write_points(measurements)
except InfluxDBClientError as err:
err_msg = '[DIGITA] InfluxDB error: {}'.format(err)
status = 500
else:
err_msg = '[DIGITA] Not handled'
if err_msg != '':
logger.error(err_msg)
dump_request(request, postfix='digita')
response = HttpResponse(err_msg, status=status)
return response
| aapris/IoT-Web-Experiments | iotendpoints/endpoints/plugins/digita.py | digita.py | py | 9,963 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "endpoints.utils.get_setting",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "endpoints.utils.get_setting",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 28,
"usage_type": "call"
},
{
"api_nam... |
16957265295 | # -*- encoding: UTF-8 -*-
##############################################################################
from openerp import fields, api
from openerp.addons.field_secure import models # @UnresolvedImport
import logging
_logger = logging.getLogger(__name__)
AVAILABLE_PRIORITIES = [
('0', 'Bad'),
('1', 'Below Average'),
('2', 'Average'),
('3', 'Good'),
('4', 'Excellent')
]
class hr_applicant(models.SecureModel):
_inherit = "hr.applicant"
salary_expected_secure = fields.Secure(
string='Expected Salary',
security="_password_security",
help="Salary Expected by Applicant")
salary_proposed_secure = fields.Secure(
string='Proposed Salary',
security="_password_security",
help="Salary Proposed by the Organization")
partner_name = fields.Char("Applicant's Name", track_visibility="onchange")
email_from = fields.Char('Email', size=128,
help="These people will receive email.",
track_visibility="onchange")
partner_mobile = fields.Char('Mobile', size=32,
track_visibility="onchange")
type_id = fields.Many2one('hr.recruitment.degree', 'Degree',
track_visibility="onchange")
date_action = fields.Date('Next Action Date', track_visibility="onchange")
title_action = fields.Char('Next Action', size=64,
track_visibility="onchange")
priority = fields.Selection(AVAILABLE_PRIORITIES, 'Appreciation',
track_visibility="onchange")
source_id = fields.Many2one('hr.recruitment.source', 'Source',
track_visibility="onchange")
reference = fields.Char('Referred By', track_visibility="onchange")
job_id = fields.Many2one('hr.job', 'Applied Job',
track_visibility="onchange")
availability = fields.Integer(
'Availability', help="The number of days in which the applicant will "
"be available to start working", track_visibility="onchange")
categ_ids = fields.Many2many('hr.applicant_category', string='Tags',
track_visibility="onchange")
description = fields.Text('Description', track_visibility="onchange")
@api.model
def create(self, vals):
"""
Override function
Calculate Subject = {Applied Job} - {Applicants name}
"""
job_obj = self.env['hr.job']
job_name = ''
if vals.get('job_id'):
job_name = job_obj.browse(vals['job_id']).name
vals['name'] = job_name and job_name + ' - ' + vals['partner_name'] \
or vals['partner_name']
return super(hr_applicant, self).create(vals)
@api.multi
def write(self, vals):
"""
Override function
Calculate Subject = {Applied Job} - {Applicants name}
"""
if 'job_id' not in vals and 'partner_name' not in vals:
# Nothing change
return super(hr_applicant, self).write(vals)
job_obj = self.env['hr.job']
if 'job_id' in vals and 'partner_name' in vals:
# change job and applicant name
# update all applicant at the same time
job_name = ''
if vals.get('job_id'):
job_name = job_obj.browse(vals['job_id']).name
vals['name'] = job_name and job_name + ' - ' \
+ vals['partner_name'] \
or vals['partner_name']
return super(hr_applicant, self).write(vals)
else:
# change job or partner_name
if 'job_id' in vals:
# Only change job name
job_name = ''
if vals['job_id']:
job_name = job_obj.browse(vals['job_id']).name
for app in self:
pos = app.name.find('-')
vals['name'] = job_name and job_name + ' - ' + \
(pos > -1 and app.name[pos + 2:] or app.name) \
or app.name[pos + 2:]
super(hr_applicant, app).write(vals)
elif 'partner_name' in vals:
# On change the application name
for app in self:
pos = app.name.find('-')
vals['name'] = (pos > -1 and app.name[: pos + 2] or '') + \
vals['partner_name']
super(hr_applicant, app).write(vals)
return True
@api.multi
def _password_security(self):
"""
Only the followers of the application can read/update/delete the
Propose Salary/Suggested Salary.
"""
is_allow = False
for rec in self:
if self.env.user.partner_id.id in rec.message_follower_ids.ids:
is_allow = True
else:
is_allow = False
break
return is_allow
@api.model
def _get_applicants_of_followers(self, user_id):
filter_ids = []
current_user = self.env["res.partner"].search(
[('user_id', '=', user_id)])
self._cr.execute(""" SELECT id FROM hr_applicant """)
datas = [data[0] for data in self._cr.fetchall()]
for rec in self.browse(datas):
if current_user in rec.message_follower_ids:
filter_ids.append(rec.id)
if filter_ids:
return [('id', 'in', filter_ids)]
else:
return [('id', 'in', [])]
@api.model
def search(self, args, offset=0, limit=None, order=None, count=False):
uid = self._context.get('uid', self._uid)
if self.message_follower_ids and \
uid in self.message_follower_ids.ids:
args.extend(self._get_applicants_of_followers(uid))
return super(hr_applicant, self).search(
args, offset=offset, limit=limit, order=order, count=count)
@api.cr_uid_ids_context
def message_track(self, cr, uid, ids, tracked_fields,
initial_values, context=None):
def convert_for_display(value, col_info):
if not value and col_info['type'] == 'boolean':
return 'False'
if not value:
return ''
if col_info['type'] == 'many2one':
return value.name_get()[0][1]
if col_info['type'] == 'selection':
return dict(col_info['selection'])[value]
if col_info['type'] == 'many2many':
str1 = ', '.join([v.name_get()[0][1] for v in value])
return str1
return value
def format_message(message_description, tracked_values):
message = ''
if message_description:
message = '<span>%s</span>' % message_description
for name, change in tracked_values.items():
old_values = change.get('old_value')
if isinstance(old_values, int):
old_values = str(old_values)
list_old_values = \
[item.strip().encode('utf-8')
for item in old_values.strip('[]').split(',')]
new_values = change.get('new_value')
if isinstance(new_values, int):
new_values = str(new_values)
list_new_values = \
[item.strip().encode('utf-8')
for item in new_values.strip('[]').split(',')]
vals = []
for x in list_old_values:
if x not in list_new_values:
vals.append(x.decode('utf-8'))
if vals:
message +=\
'<div> • <b>Removed %s</b>: ' %\
change.get('col_info')
message += '%s</div>' % ', '.join(vals)
vals = []
for x in list_new_values:
if x not in list_old_values:
vals.append(x.decode('utf-8'))
if vals:
message +=\
'<div> • <b>Added %s</b>: ' %\
change.get('col_info')
message += '%s</div>' % ', '.join(vals)
return message
if not tracked_fields:
return True
for browse_record in self.browse(cr, uid, ids, context=context):
initial = initial_values[browse_record.id]
changes = set()
tracked_values = {}
# generate tracked_values data structure: {'col_name': {col_info,
# new_value, old_value}}
for col_name, col_info in tracked_fields.items():
field = self._fields[col_name]
initial_value = initial[col_name]
record_value = getattr(browse_record, col_name)
if record_value == initial_value and\
getattr(field, 'track_visibility', None) == 'always':
tracked_values[col_name] = dict(
col_info=col_info['string'],
new_value=convert_for_display(record_value, col_info),
)
# because browse null != False
elif record_value != initial_value and\
(record_value or initial_value):
if getattr(field, 'track_visibility', None) in\
['always', 'onchange']:
tracked_values[col_name] = dict(
col_info=col_info['string'],
old_value=convert_for_display(
initial_value, col_info),
new_value=convert_for_display(
record_value, col_info),
)
if col_name in tracked_fields:
changes.add(col_name)
if not changes:
continue
# find subtypes and post messages or log if no subtype found
subtypes = []
# By passing this key, that allows to let the subtype empty and so
# don't sent email because partners_to_notify from
# mail_message._notify will be empty
if not context.get('mail_track_log_only'):
for field, track_info in self._track.items():
if field not in changes:
continue
for subtype, method in track_info.items():
if method(self, cr, uid, browse_record, context):
subtypes.append(subtype)
posted = False
for subtype in subtypes:
subtype_rec = self.pool.get('ir.model.data').xmlid_to_object(
cr, uid, subtype, context=context)
if not (subtype_rec and subtype_rec.exists()):
_logger.debug('subtype %s not found' % subtype)
continue
message = format_message(
subtype_rec.description if subtype_rec.description else
subtype_rec.name, tracked_values)
self.message_post(
cr, uid, browse_record.id, body=message,
subtype=subtype, context=context)
posted = True
if not posted:
message = format_message('', tracked_values)
self.message_post(
cr, uid, browse_record.id,
body=message, context=context)
return True
| TinPlusIT05/tms | project/tms_modules/model/hr/hr_applicant.py | hr_applicant.py | py | 11,774 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "openerp.addons.field_secure.models.SecureModel",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "openerp.addons.field_secure.models",
"line_number": 19,
"usage_type"... |
17341574832 | from pathlib import Path
from experimaestro.compat import cached_property
import importlib
import os
import hashlib
import logging
import inspect
import json
from experimaestro.mkdocs.metaloader import Module
import pkg_resources
from typing import Iterable, Iterator, List, Dict
from .utils import CachedFile, downloadURL
from .settings import UserSettings, Settings
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from datamaestro.definitions import AbstractDataset
class Compression:
@staticmethod
def extension(definition):
if not definition:
return ""
if definition == "gzip":
return ".gz"
raise Exception("Not handled compression definition: %s" % definition)
class Context:
"""
Represents the application context
"""
MAINDIR = Path(os.environ.get("DATAMAESTRO_DIR", "~/datamaestro")).expanduser()
INSTANCE = None
"""Main settings"""
def __init__(self, path: Path = None):
assert not Context.INSTANCE
Context.INSTANCE = self
self._path = path or Context.MAINDIR
self._dpath = Path(__file__).parents[1]
self._repository = None
# self.registry = Registry(self.datapath / "registry.yaml")
self.keep_downloads = False
self.traceback = False
# Read global preferences
self.settings = Settings.load(self._path / "settings.json")
# Read user preferences
path = Path("~").expanduser() / ".config" / "datamaestro" / "user.json"
self.user_settings = UserSettings.load(path)
@staticmethod
def instance():
if Context.INSTANCE is None:
Context.INSTANCE = Context()
return Context.INSTANCE
@staticmethod
def remote(host, pythonpath, datapath=None):
"""Create a remote context by connecting to a given host"""
from experimaestro.rpyc import client
client = client(hostname=host, pythonpath=pythonpath).__enter__()
context = client.connection.modules.datamaestro.context.Context(datapath)
return context
@staticmethod
def frompath(path: Path):
context = Context.instance()
class ContextManager:
def __enter__(self):
self.previous = Context.INSTANCE
return context
def __exit__(self, exc_type, exc_val, exc_tb):
Context.INSTANCE = self.previous
return ContextManager()
@property
def datapath(self):
return self._path.joinpath("data")
@property
def cachepath(self) -> Path:
return self._path.joinpath("cache")
@cached_property
def repositorymap(self) -> Dict[str, "Repository"]:
return {
repository.basemodule(): repository for repository in self.repositories()
}
def repositories(self) -> Iterable["Repository"]:
"""Returns an iterator over repositories"""
for entry_point in pkg_resources.iter_entry_points("datamaestro.repositories"):
yield entry_point.load().instance()
def repository(self, repositoryid):
if repositoryid is None:
return None
l = [
x
for x in pkg_resources.iter_entry_points(
"datamaestro.repositories", repositoryid
)
]
if not l:
raise Exception("No datasets repository named %s", repositoryid)
if len(l) > 1:
raise Exception(
"Too many datasets repository named %s (%d)" % (repositoryid, len(l))
)
return l[0].load()(self)
@property
def running_test(self):
return "PYTEST_CURRENT_TEST" in os.environ
def datasets(self):
"""Returns an iterator over all files"""
for repository in self.repositories():
for dataset in repository:
yield dataset
def dataset(self, datasetid) -> "AbstractDataset":
"""Get a dataset by ID"""
for repository in self.repositories():
dataset = repository.search(datasetid)
if dataset is not None:
return dataset
raise Exception("Dataset {} not found".format(datasetid))
def downloadURL(self, url, size: int = None):
"""Downloads an URL
Args:
url (str): The URL to download
size (str): The size if known (in bytes)
"""
self.cachepath.mkdir(exist_ok=True)
def getPaths(hasher):
"""Returns a cache file path"""
path = self.cachepath.joinpath(hasher.hexdigest())
urlpath = path.with_suffix(".url")
dlpath = path.with_suffix(".dl")
if urlpath.is_file():
if urlpath.read_text() != url:
# TODO: do something better
raise Exception(
"Cached URL hash does not match. Clear cache to resolve"
)
return urlpath, dlpath
hasher = hashlib.sha256(json.dumps(url).encode("utf-8"))
urlpath, dlpath = getPaths(hasher)
urlpath.write_text(url)
if dlpath.is_file():
logging.debug("Using cached file %s for %s", dlpath, url)
else:
logging.info("Downloading %s", url)
tmppath = dlpath.with_suffix(".tmp")
downloadURL(url, tmppath, tmppath.is_file(), size=size)
# Now, rename to original
tmppath.rename(dlpath)
return CachedFile(dlpath, keep=self.keep_downloads, others=[urlpath])
def ask(self, question: str, options: Dict[str, str]):
"""Ask a question to the user"""
print(question)
answer = None
while answer not in options:
answer = input().strip().lower()
return options[answer]
class ResolvablePath:
"""An object than can be resolved into a Path"""
@staticmethod
def resolve(context, path):
if isinstance(path, ResolvablePath):
return path(context)
return Path(path)
"""Class that returns a path"""
def __call__(self, context: Context) -> Path:
raise NotImplementedError()
class DatafolderPath(ResolvablePath):
def __init__(self, folderid, path):
self.folderid = folderid
self.path = path
def __str__(self):
return "datafolder-path({folderid}):{path}".format(**self.__dict__)
def __call__(self, context: Context) -> Path:
return Path(context.settings.datafolders[self.folderid]) / self.path
class Datasets(Iterable["AbstractDataset"]):
"""A set of datasets contained within a Python module"""
def __init__(self, module: Module):
"""Initialize with a module"""
self.module = module
self._title = None
self._description = None
@property
def id(self):
return ".".join(self.module.__name__.split(".", 2)[2:])
@property
def title(self):
self._getdoc()
return self._title
@property
def description(self):
self._getdoc()
return self._description
def _getdoc(self):
if self._title is not None:
return
if not self.module.__doc__:
self._title = ""
self._description = ""
return
intitle = True
title = []
description = []
for line in self.module.__doc__.split("\n"):
if line.strip() == "" and intitle:
intitle = False
else:
(title if intitle else description).append(line)
self._title = " ".join(title)
self._description = "\n".join(description)
def __iter__(self) -> Iterable["AbstractDataset"]:
from .definitions import DatasetWrapper
# Iterates over defined symbols
for key, value in self.module.__dict__.items():
# Ensures it is annotated
if isinstance(value, DatasetWrapper):
# Ensure it comes from the module
if self.module.__name__ == value.t.__module__:
yield value
class Repository:
"""A repository regroup a set of datasets and their corresponding specific handlers (downloading, filtering, etc.)"""
def __init__(self, context: Context):
"""Initialize a new repository
:param context: The dataset main context
:param basedir: The base directory of the repository
(by default, the same as the repository class)
"""
self.context = context
p = inspect.getabsfile(self.__class__)
self.basedir = Path(p).parent
self.configdir = self.basedir.joinpath("config")
self.id = self.__class__.NAMESPACE
self.name = self.id
self.module = self.__class__.__module__
self.__class__.INSTANCE = self
@classmethod
def basemodule(cls):
return cls.__module__
@classmethod
def instance(cls, context=None):
try:
return cls.__getattribute__(cls, "INSTANCE")
except AttributeError:
return cls(context if context else Context.instance())
@classmethod
def version(cls):
from pkg_resources import get_distribution, DistributionNotFound
try:
return get_distribution(cls.__module__).version
except DistributionNotFound:
__version__ = None
def __repr__(self):
return "Repository(%s)" % self.basedir
def __hash__(self):
return self.basedir.__hash__()
def __eq__(self, other):
assert isinstance(other, Repository)
return self.basedir == other.basedir
def search(self, name: str):
"""Search for a dataset in the definitions"""
logging.debug("Searching for %s in %s", name, self.configdir)
candidates: List[str] = []
components = name.split(".")
path = self.configdir
for i, c in enumerate(components):
path = path / c
if (path / "__init__.py").is_file():
candidates.append(".".join(components[: i + 1]))
if path.with_suffix(".py").is_file():
candidates.append(".".join(components[: i + 1]))
if not path.is_dir():
break
# Get the dataset
for candidate in candidates[::-1]:
logging.debug("Searching in module %s.config.%s", self.module, candidate)
module = importlib.import_module("%s.config.%s" % (self.module, candidate))
for value in Datasets(module):
if name in value.aliases:
return value
return None
def datasets(self, candidate):
try:
module = importlib.import_module("%s.config.%s" % (self.module, candidate))
except ModuleNotFoundError:
return None
return Datasets(module)
def modules(self) -> Iterator["Module"]:
"""Iterates over all modules in this repository"""
for _, fid, package in self._modules():
try:
module = importlib.import_module(package)
yield Datasets(module)
except Exception as e:
import traceback
traceback.print_exc()
logging.error("Error while loading module %s: %s", package, e)
def _modules(self):
"""Iterate over modules (without parsing them)"""
for path in self.configdir.rglob("*.py"):
try:
relpath = path.relative_to(self.configdir)
c = [p.name for p in relpath.parents][:-1][::-1]
if path.name != "__init__.py":
c.append(path.stem)
fid = ".".join(c)
package = ".".join([self.module, "config", *c])
yield self, fid, package
except Exception as e:
import traceback
traceback.print_exc()
logging.error("Error while reading definitions file %s: %s", path, e)
def __iter__(self) -> Iterator["AbstractDataset"]:
"""Iterates over all datasets in this repository"""
for datasets in self.modules():
for dataset in datasets:
yield dataset
@property
def generatedpath(self):
return self.basedir.joinpath("generated")
@property
def datapath(self):
return self.context.datapath.joinpath(self.id)
@property
def extrapath(self):
"""Path to the directory containing extra configuration files"""
return self.basedir.joinpath("data")
def find_dataset(dataset_id: str):
"""Find a dataset given its id"""
from .definitions import AbstractDataset
return AbstractDataset.find(dataset_id)
def prepare_dataset(dataset_id: str):
"""Find a dataset given its id and download the resources"""
from .definitions import AbstractDataset
ds = AbstractDataset.find(dataset_id)
return ds.prepare(download=True)
def get_dataset(dataset_id: str):
"""Find a dataset given its id"""
from .definitions import AbstractDataset
ds = AbstractDataset.find(dataset_id)
return ds.prepare(download=False)
| experimaestro/datamaestro | src/datamaestro/context.py | context.py | py | 13,210 | python | en | code | 12 | github-code | 1 | [
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "os.environ",
"li... |
24613096759 | from django.contrib.auth.models import AbstractUser
from django.db import models
class User(AbstractUser):
MAN = "man"
WOMEN = "women"
SEX = [(MAN, MAN), (WOMEN, WOMEN)]
sex = models.CharField(max_length=6, choices=SEX, default=MAN)
class Meta:
verbose_name = "ะะพะปัะทะพะฒะฐัะตะปั"
verbose_name_plural = "ะะพะปัะทะพะฒะฐัะตะปะธ"
ordering = ["username"]
| bazoy789/todolist | core/models.py | models.py | py | 408 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.contrib.auth.models.AbstractUser",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 10,
"usage_type": "name"
}
] |
1355144247 | #!/usr/bin/env python
"""
Measure phase ghosts by taking the ratio of the phase background to the frequency background.
"""
import sys
import nibabel
import argparse
import labels
import _utilities as util
import numpy
def nii_4d(in_nii):
pass
def cummean_nii(in_nii, scale=1, verbose=False):
# Read in NIBABEL NIFTI object
in_array = in_nii.get_data()
# Perform cumulative sum
out_array = scale * numpy.cumsum( in_array, axis=3)
# Scale each sum by the number of volumes used in the summation to calculate the mean.
for ii in range(0,out_array.shape[3]):
out_array[...,ii] /= ii+1
# Create nibabel NIFTI object
out_nii = nibabel.Nifti1Image( out_array, None, in_nii.get_header())
return out_nii
#
# Main Function
#
if __name__ == '__main__':
## Parsing Arguments
usage = 'usage: %prog [options] arg1 arg2'
parser = argparse.ArgumentParser(prog='sort_nii')
parser.add_argument('in_nii', help='Background labels')
parser.add_argument('-o','--out_nii', help='Filename of NIFTI output label. (default = cumsum_nii.<in> ) ', default=None)
parser.add_argument('-s','--scale', help='Multiply NIFTI output array by scale factor ', type=float, default=1.0)
parser.add_argument('-v','--verbose', help='Verbose flag', action='store_true', default=False )
inArgs = parser.parse_args()
# Read NIFTI File
in_nii = nibabel.load(inArgs.in_nii)
out_nii = cummean_nii(in_nii, inArgs.scale)
if inArgs.out_nii is None:
out_nii_filename = util.add_prefix_to_filename( inArgs.in_nii, 'cumsum_nii.')
else:
out_nii_filename = inArgs.out_nii
nibabel.save( out_nii, out_nii_filename)
| kchawla-pi/tic_modules | ~archive/tools/cummean_nii.py | cummean_nii.py | py | 1,762 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.cumsum",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "nibabel.Nifti1Image",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "nibabel.load... |
69795468514 |
debug_flag=True
import base64
import hmac
import secrets
import time
import traceback
import flask
from Application.Api.UserContext import UserContext
from Application.Util.ParamsBinder import bind_optional_params, bind_params
from Application.Api.ApiError import ApiError
from Application.App import App
from Crypto.Cipher import AES
import hashlib
#def make_audio_urls(audios:list):# optimize for many audios to avoid instantiation of AES multiple times
# pass
AUDIO_URL_LIFETIME=1200
def check_audio_url(ip:str,url:str):
key=bytes.fromhex(App.key)
raw=b''
url_i=url.replace('~','=')
try:
raw=base64.urlsafe_b64decode(url_i)
if raw==b'':return False,'Invalid URL'
except:
return False,'Invalid URL'
if len(raw)<(16+32+8):
return False,'Invalid URL'
nonce=raw[:16]
mac=raw[16:16+32]
b=raw[16+32:]
cipher=AES.new(key,mode=AES.MODE_EAX,nonce=nonce)
raw=cipher.decrypt(b)
_mac=hmac.new(key,msg=nonce+b,digestmod='SHA256')
pack_id=raw[:4]
expires=raw[4:8]
data=raw[8:]
if not hmac.compare_digest(mac,_mac.digest()):
return False,'Signature invalid'
try:
_ip,exp,_url='.'.join([str(x) for x in pack_id]),int.from_bytes(expires,'big'),base64.urlsafe_b64encode(data).decode().replace('=','')
if _ip!=ip:
return False,"Invalid ip"
if exp<int(time.time()):
return False,'URL Expired'
return True,_url
except:
print(traceback.format_exc())
return False,'Invalid URL'
def make_audio_url(ip:str,s:bytes,owner_id:int,audio_id:int,data_url:str):
key=bytes.fromhex(App.key)
#action_hashes=[hmac.new(key,msg="{}_{}_{}_{}".format(act,owner_id,audio_id,s.hex()).encode('utf-8'),digestmod='sha256').hexdigest() for act in actions]
nonce=secrets.token_bytes(16)
cipher=AES.new(key=key,mode=AES.MODE_EAX,nonce=nonce)
data_raw=base64.urlsafe_b64decode(data_url)
pack_ip=bytes([int(x) for x in ip.split('.')])
ts=int(time.time()+AUDIO_URL_LIFETIME)
expires=ts.to_bytes(4,'big')
b=cipher.encrypt(pack_ip+expires+data_raw)
mac=hmac.new(key,msg=nonce+b,digestmod="SHA256").digest()
raw=nonce+mac+b
return base64.urlsafe_b64encode(raw).decode().replace('=','~'),ts
def check_audio_hash(h:str,s:bytes,owner_id:int,audio_id:int):
key=bytes.fromhex(App.key)
exp,_h=h.split('_')
sig=hmac.new(key,msg="ts={};id={}_{},s={};".format(expires,owner_id,audio_id,s.hex()).encode('utf-8'),digestmod='sha256').hexdigest()
return hmac.compare_digest(_h,sig)
def make_audio_hash(s:bytes,owner_id:int,audio_id:int):
key=bytes.fromhex(App.key)
expires=int(time.time()+AUDIO_URL_LIFETIME)
h="{}_{}".format(expires,hmac.new(key,msg="ts={};id={}_{},s={};".format(expires,owner_id,audio_id,s.hex()).encode('utf-8'),digestmod='sha256').hexdigest())
return h
class Audios():
def start_playback(args,user_context:UserContext):
bind_params(args,{
'id':str,
'hash':str
})
owner_id,audio_id=args['id'].split('_')
data=None
with App.db.make_connection(False,20) as cur:
cur.execute("""
SELECT data_url
FROM audios
WHERE owner_id=? AND audio_id=?
""",[int(owner_id),int(audio_id)])
data=cur.fetchone()
if not data:return {}
data_url=data[0]
s=user_context.session_id.binary
url,expires=make_audio_url(str(flask.request.remote_addr),s,int(owner_id),int(audio_id),data_url)
return {
'url':'http://cdn.notvk.com/audios/'+url+'/128.mp3',
'expires':expires
}
def search(args,user_context:UserContext):
bind_params(args,{
'q':str,
'count':int,
'offset':int,
})
bind_optional_params(args,{
'name':str,
'artist':str,
'owner_id':int
})
data=[]
audios=[]
s=user_context.session_id.binary
q=str(args['q']).strip()
if len(q.strip())==0:
return []
count=args['count']
offset=args['offset']
where=''
#_args=[q,'%{}%'.format(q),'%{}%'.format(q)]
_args=[q]
if 'owner_id' in args:
owner_id=args['owner_id']
where="owner_id=? AND"
_args.append(owner_id)
_args.append(count)
_args.append(offset)
#
"""
WITH tempvar AS (SELECT ? AS q)
SELECT audios.id,audios.owner_id,audios.audio_id,audios.data_url,audios.duration,audios.artist,audios.name,
instr(lower(audios.name),tempvar.q) AS npos,
instr(lower(audios.artist),tempvar.q) AS apos
FROM audios,tempvar
WHERE {} ( npos OR apos)
ORDER BY MIN(npos,apos))
LIMIT ?
OFFSET ?
"""
#
with App.db.make_connection(False,20) as cur:
cur.execute("""
WITH tempvar AS (SELECT lower(?) AS q)
SELECT audios.id,audios.owner_id,audios.audio_id,audios.data_url,audios.duration,audios.artist,audios.name,audios.is_official,audios.thumbnail,audios.text,audios.explicit,audios.genre,
instr(lower(audios.name),tempvar.q) AS npos,
instr(lower(audios.artist),tempvar.q) AS apos
FROM audios,tempvar
WHERE ( npos+apos)!=0
ORDER BY ( SELECT CASE WHEN npos==0 THEN apos WHEN apos==0 THEN npos ELSE MIN(apos,npos) END u)
LIMIT ?
OFFSET ?
""".format(where),_args)
data=cur.fetchall()
if not data:
return []
for x in data:
i,owner_id,audio_id,data_url,duration,artist,name,is_official,thumbnail,text,explicit,genre,unused1,unused2=x
url,expires=make_audio_url(str(flask.request.remote_addr),s,int(owner_id),int(audio_id),data_url)
audios.append({
'duration':duration,
'name':name,
'artist':artist,
'hash':make_audio_hash(s,owner_id,audio_id),
'id':"{}_{}".format(owner_id,audio_id),
'url':'http://cdn.notvk.com/audios/'+url+'/128.mp3',
'expires':expires,
'is_official':bool(is_official),
'thumbnail':thumbnail,
'text':text,
'explicit':bool(explicit),
'genre':genre
})
return audios
def get(args,user_context:UserContext):
bind_params(args,{
'owner_id':int,
'count':int,
'offset':int
})
data=[]
audios=[]
s=user_context.session_id.binary
owner_id=args['owner_id']
count=args['count']
offset=args['offset']
with App.db.make_connection(False,20) as cur:
cur.execute("""
SELECT id,audio_id,data_url,duration,name,artist,is_official,thumbnail,text,explicit,genre
FROM audios
WHERE owner_id = ?
ORDER BY audio_id DESC
LIMIT ?
OFFSET ?
""",[owner_id,count,offset])
data=cur.fetchall()
if not data:
return []
for x in data:
_id,audio_id,data_url,duration,name,artist,is_official,thumbnail,text,explicit,genre=x
url,expires=make_audio_url(str(flask.request.remote_addr),s,int(owner_id),int(audio_id),data_url)
audios.append({
'duration':duration,
'name':name,
'artist':artist,
'hash':make_audio_hash(s,owner_id,audio_id),
'id':"{}_{}".format(owner_id,_id),
'url':'http://cdn.notvk.com/audios/'+url+'/128.mp3',
'expires':expires,
'is_official':bool(is_official),
'thumbnail':thumbnail,
'text':text,
'explicit':bool(explicit),
'genre':genre
})
return audios
| byldocoder/VK-Clone | Application/Api/ApiImp/Audios.py | Audios.py | py | 8,336 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "Application.App.App.key",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "Application.App.App",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "base64.urlsafe_b64decode",
"line_number": 28,
"usage_type": "call"
},
{
"api_nam... |
33385668182 | import os
import random
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.datasets import load_iris, load_breast_cancer, load_wine, make_blobs
from sklearn.model_selection import train_test_split
project_dir = os.path.dirname(os.getcwd())
# This file describes utility functions for loading and creating (dummy) datasets.
def load(subsample_train_frac=None, prop_train=None, prop_test=None, is_iid=True, verbose=None):
# This is where we locate the .csv files and read them as pandas dataframes.
iris = load_iris()
df = pd.DataFrame(iris.data, columns=iris.feature_names)
df_train, df_test = train_test_split(df, test_size=prop_test)
# print(df.columns)
if verbose:
print(df.head())
return df_train, df_test
def load_data(dataset=None, is_iid=True, num_devices=3, split_train_test=False, prop_test=None, dims=2, samples=100,
clusters=3, verbose=False):
eligible_datasets = ['iris', 'breast cancer', 'wine', 'heart disease', 'forest types', 'blobs', 'dummy']
if dataset not in eligible_datasets:
print("No dataset was requested or the requested dataset could not be retrieved.")
print("Defaulting to generating blobs...")
return create_blobs(dims=dims, samples=samples, clusters=clusters, split_train_test=split_train_test,
prop_test=prop_test, is_iid=is_iid, num_devices=num_devices, verbose=verbose)
else:
if dataset == 'iris':
iris = load_iris()
df = pd.DataFrame(iris.data, columns=iris.feature_names)
labels = iris.target
return split_data(df.to_numpy(), labels, num_devices, split_train_test, prop_test, is_iid)
if dataset == 'breast cancer':
breast_cancer = load_breast_cancer()
df = pd.DataFrame(breast_cancer.data, columns=breast_cancer.feature_names)
labels = breast_cancer.target
return split_data(df.to_numpy(), labels, num_devices, split_train_test, prop_test, is_iid)
if dataset == 'wine':
wine = load_wine()
df = pd.DataFrame(wine.data, columns=wine.feature_names)
labels = wine.target
return split_data(df.to_numpy(), labels, num_devices, split_train_test, prop_test, is_iid)
if dataset == 'heart disease':
df = pd.read_csv("data/heart_disease_cleveland.csv") # not sure how to retrieve just labels here...
# this implementation does not use split_data for now, because it would require us to extract labels.
if split_train_test and prop_test is not None:
df_train, df_test = train_test_split(df, test_size=prop_test)
if not is_iid:
return NotImplementedError
else:
return np.array_split(df_train, num_devices), np.array_split(df_test, num_devices)
elif split_train_test:
df_train, df_test = train_test_split(df, test_size=.2)
if not is_iid:
return NotImplementedError
else:
return np.array_split(df_train, num_devices), np.array_split(df_test, num_devices)
else:
return np.array_split(df, num_devices)
if dataset == 'forest types':
cur_file = os.path.abspath(os.path.dirname(__file__))
covtype_file = os.path.join(cur_file, 'data/forest_covertypes.csv')
df = pd.read_csv(covtype_file)
labels = df['Cover_Type']
df = df.drop('Cover_Type', axis=1)
print(df.shape)
# this implementation does not use split_data for now, because it would require us to extract labels.
if split_train_test and prop_test is not None:
df_train, df_test = train_test_split(df, test_size=prop_test)
if not is_iid:
return NotImplementedError
else:
return np.array_split(df_train, num_devices), np.array_split(df_test, num_devices)
elif split_train_test:
df_train, df_test = train_test_split(df, test_size=.2)
if not is_iid:
return NotImplementedError
else:
return np.array_split(df_train, num_devices), np.array_split(df_test, num_devices)
else:
return np.array_split(df.to_numpy(), num_devices), np.array_split(labels.to_numpy(), num_devices)
if dataset == 'dummy':
clients_per_cluster = num_devices//clusters
data, _ = create_dummy_data(dims=dims, clients_per_cluster=clients_per_cluster,
samples_each=samples//num_devices, clusters=clusters, verbose=True)
print(len(data))
return data, _
else: # default case
print("Generating blobs...")
create_blobs(dims=dims, samples=samples, clusters=clusters, split_train_test=split_train_test,
prop_test=prop_test, is_iid=is_iid, num_devices=num_devices, verbose=verbose)
def split_data(df, labels, num_devices, split_train_test=False, prop_test=None, is_iid=True):
if not is_iid:
if split_train_test:
if prop_test is not None:
prop = prop_test
else:
prop = .2
num_test = int(prop * len(df))
order_train = np.argsort(labels[num_test:])
order_test = np.argsort(labels[:num_test])
df_train = df[order_train]
df_test = df[order_test]
exp_train_per_device = round(len(df_train)/num_devices)
exp_test_per_device = round(len(df_test)/num_devices)
range_sizes_train = range(exp_train_per_device - exp_train_per_device//4,
exp_train_per_device + exp_train_per_device//4)
range_sizes_test = range(exp_test_per_device - exp_test_per_device//4,
exp_test_per_device + exp_test_per_device//4)
rand_sizes_train = [random.choice(range_sizes_train)]
rand_sizes_test = [random.choice(range_sizes_test)]
for i in range(1, num_devices):
rand_sizes_train.append(random.choice(range_sizes_train) + rand_sizes_train[i - 1])
rand_sizes_test.append(random.choice(range_sizes_test) + rand_sizes_test[i - 1])
return np.split(df_test, rand_sizes_test), np.split(df_train, rand_sizes_train), \
np.split(labels[:num_test][order_test], rand_sizes_test), \
np.split(labels[num_test:][order_train], rand_sizes_train) # test, train, y_test, y_train
else:
order = np.argsort(labels)
exp_records_per_device = round(len(df)/num_devices)
range_sizes = range(exp_records_per_device - exp_records_per_device//4,
exp_records_per_device + exp_records_per_device//4)
rand_sizes = [random.choice(range_sizes)]
for i in range(1, num_devices):
rand_sizes.append(random.choice(range_sizes) + rand_sizes[i-1])
return np.split(df[order], rand_sizes), np.split(labels[order], rand_sizes) # X, y
elif is_iid:
if split_train_test:
if prop_test is not None:
prop = prop_test
else:
prop = .2
num_test = int(prop * len(df))
exp_train_per_device = round((len(df)-num_test)/num_devices)
exp_test_per_device = round(num_test/num_devices)
range_sizes_train = range(exp_train_per_device - exp_train_per_device//4,
exp_train_per_device + exp_train_per_device//4)
range_sizes_test = range(exp_test_per_device - exp_test_per_device//4,
exp_test_per_device + exp_test_per_device//4)
rand_sizes_train = [random.choice(range_sizes_train)]
rand_sizes_test = [random.choice(range_sizes_test)]
for i in range(1, num_devices):
rand_sizes_train.append(random.choice(range_sizes_train) + rand_sizes_train[i-1])
rand_sizes_test.append(random.choice(range_sizes_test) + rand_sizes_test[i-1])
return np.split(df[:num_test], rand_sizes_test[:-1]), np.split(df[num_test:], rand_sizes_train[:-1]), \
np.split(labels[:num_test], rand_sizes_test), np.split(labels[num_test:], rand_sizes_train)
# test, train, y_test, y_train
else:
exp_records_per_device = round(len(df) / num_devices)
range_sizes = range(exp_records_per_device - exp_records_per_device//4,
exp_records_per_device + exp_records_per_device//4)
rand_sizes = [random.choice(range_sizes)]
for i in range(1, num_devices):
rand_sizes.append(random.choice(range_sizes) + rand_sizes[i-1])
return np.split(df, rand_sizes), np.split(labels, rand_sizes) # X, y
else: # this case should never be reached, but we include it anyway
exp_records_per_device = round(len(df) / num_devices)
range_sizes = range(exp_records_per_device - exp_records_per_device//4,
exp_records_per_device + exp_records_per_device//4)
rand_sizes = []
for i in range(num_devices):
rand_sizes.append(random.choice(range_sizes))
return np.split(df, rand_sizes), np.split(labels, rand_sizes) # X, y
# Function that returns the bounds (min, max) for each dimension in dataset df. Expects df to be a np.array.
# Should be a matrix describing n records having i features (dimensions).
def obtain_bounds(df):
min_vals = df.min(axis=0)
max_vals = df.max(axis=0)
if isinstance(min_vals, pd.Series):
min_vals = min_vals.to_numpy()
if isinstance(max_vals, pd.Series):
max_vals = max_vals.to_numpy()
return min_vals.flatten(), max_vals.flatten()
def obtain_bounds_multiple(dfs):
min_vals, max_vals = obtain_bounds(np.asarray(dfs[0]))
for df in dfs[1:]:
min_vals_df, max_vals_df = obtain_bounds(df)
for dim in range(len(min_vals)):
if min_vals_df[dim] < min_vals[dim]:
min_vals[dim] = min_vals_df[dim]
if max_vals_df[dim] > max_vals[dim]:
max_vals[dim] = max_vals_df[dim]
if isinstance(min_vals, pd.Series):
min_vals = min_vals.to_numpy()
if isinstance(max_vals, pd.Series):
max_vals = max_vals.to_numpy()
return min_vals, max_vals
def create_dummy_data(dims=1, clients_per_cluster=10, samples_each=10, clusters=10, scale=.5, verbose=False):
np.random.seed(42) # 42, 420, 4200, 42000, 420000 = 5 runs.
num_clients = clients_per_cluster * clusters
print(f"Number of clients set to: {num_clients}.")
# create gaussian data set, per client one mean
means = np.arange(1, clusters + 1)
means = np.tile(A=means, reps=clients_per_cluster)
noise = np.random.normal(loc=0.0, scale=scale, size=(num_clients, samples_each, dims))
data = np.expand_dims(np.expand_dims(means, axis=1), axis=2) + noise
if verbose:
# print(means)
# print(noise)
print("dummy data shape: ", data.shape)
data = [data[i] for i in range(num_clients)]
return data, means
# Function to create blobs that are very clearly structured as clusters.
# Can already obtain train and test sets from this within this function.
def create_blobs(dims=2, samples=100, clusters=3, split_train_test=False, prop_test=None, is_iid=True, num_devices=3,
verbose=False):
X, y = make_blobs(n_samples=samples, centers=clusters, n_features=dims, random_state=42)
if verbose:
print(X.shape)
print(len(y), y)
return split_data(X, y, num_devices, split_train_test, prop_test, is_iid)
| WVLeeuw/BC_Unsupervised_FL | utils/data_utils.py | data_utils.py | py | 12,027 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "os.path.dirname",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sklearn.datasets.load_iris",
... |
33586487448 | import folium, pandas, ast
import json
# -*- coding: utf-8 -*-
locations_p = []
geos_p = []
files = 'tweets_#gopatriots.txt'
# get geo data only from rows with non-empty values
with open(files,'r') as ifile:
for line in ifile.readlines():
tweet = json.loads(line)
if tweet['tweet']['geo'] is not None:
locations_p.append(tweet['tweet']['geo']['coordinates'])
ifile.close()
locations_h = []
files = 'tweets_#gohawks.txt'
with open(files,'r') as ifile:
for line in ifile.readlines():
tweet = json.loads(line)
if tweet['tweet']['geo'] is not None:
locations_h.append(tweet['tweet']['geo']['coordinates'])
ifile.close()
# for location in locations:
# # add to geos array an evaluated python literal syntax of the data
# geos.append(ast.literal_eval(location))
# initialize and create map
tweet_map= folium.Map(location=[52.8, -2], tiles='Mapbox Bright', zoom_start=7)
# add markers
for geo in locations_p:
tweet_map.circle_marker(location=geo, radius=250, line_color='#E91616')
for geo in locations_h:
tweet_map.circle_marker(location=geo, radius=250, line_color='#3186cc')
tweet_map.create_map(path='map.html')
| qinyiyan/EE239AS | proj4/part6/map/map_patriot&hawks.py | map_patriot&hawks.py | py | 1,213 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "json.loads",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "folium.Map",
"line_number": 30,
"usage_type": "call"
}
] |
35840992661 |
# coding: utf-8
# In[1]:
import requests
import json
from pprint import pprint
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# In[2]:
def view_imdb_data_votes(season_number,episode_number):
url="http://www.omdbapi.com/?t="
season="&Season="+str(season_number)
episode="&Episode="+str(episode_number)
api_key="&apikey=7329004f"
response=requests.get(url+"Game of Thrones" +season+episode+api_key)
data=response.json()
return data["imdbVotes"]
# In[3]:
def view_imdb_data_ratings(season_number,episode_number):
url="http://www.omdbapi.com/?t="
season="&Season="+str(season_number)
episode="&Episode="+str(episode_number)
api_key="&apikey=7329004f"
response=requests.get(url+"Game of Thrones" +season+episode+api_key)
data=response.json()
return data["imdbRating"]
# In[4]:
#season_one votes
season_one=[]
for y in range(11):
try:
season_one.append(view_imdb_data_votes(1,y))
except:
continue
# In[5]:
season_one
for i in range(len(season_one)):
season_one[i]=int(season_one[i])
# In[6]:
#season_two
season_two=[]
for y in range(11):
try:
season_two.append(view_imdb_data_votes(2,y))
except:
continue
# In[7]:
season_two
for i in range(len(season_two)):
season_two[i]=int(season_two[i])
# In[8]:
#season_three
season_three=[]
for y in range(11):
try:
season_three.append(view_imdb_data_votes(3,y))
except:
continue
# In[9]:
season_three
for i in range(len(season_three)):
season_three[i]=int(season_three[i])
# In[10]:
#season_four
season_four=[]
for y in range(11):
try:
season_four.append(view_imdb_data_votes(4,y))
except:
continue
# In[11]:
season_four
for i in range(len(season_four)):
season_four[i]=int(season_four[i])
# In[12]:
#season_five
season_five=[]
for y in range(11):
try:
season_five.append(view_imdb_data_votes(5,y))
except:
continue
# In[13]:
season_five
for i in range(len(season_five)):
season_five[i]=int(season_five[i])
# In[14]:
#season 6
season_six=[]
for y in range(11):
try:
season_six.append(view_imdb_data_votes(6,y))
except:
continue
# In[15]:
season_six
for i in range(len(season_six)):
season_six[i]=int(season_six[i])
# In[16]:
#season 7
season_seven=[]
for y in range(8):
try:
season_seven.append(view_imdb_data_votes(7,y))
except:
continue
# In[17]:
for i in range(len(season_seven)):
season_seven[i]=int(season_seven[i])
# In[18]:
votes_df=pd.DataFrame({"Season 1":season_one,
"Season 2":season_two,
"Season 3":season_three,
"Season 4":season_four,
"Season 5":season_five,
"Season 6":season_six,
})
# In[19]:
votes_df_season_seven=pd.DataFrame({"Season 7":season_seven})
# In[20]:
#now for the ratings
season_one_ratings=[]
for y in range(11):
try:
season_one_ratings.append(view_imdb_data_ratings(1,y))
except:
continue
# In[21]:
season_one_ratings
for i in range(len(season_one_ratings)):
season_one_ratings[i]=float(season_one_ratings[i])
# In[22]:
season_two_ratings=[]
for y in range(11):
try:
season_two_ratings.append(view_imdb_data_ratings(2,y))
except:
continue
# In[23]:
season_two_ratings
for i in range(len(season_two_ratings)):
season_two_ratings[i]=float(season_two_ratings[i])
# In[24]:
season_three_ratings=[]
for y in range(11):
try:
season_three_ratings.append(view_imdb_data_ratings(3,y))
except:
continue
# In[25]:
season_three_ratings
for i in range(len(season_three_ratings)):
season_three_ratings[i]=float(season_three_ratings[i])
# In[26]:
season_four_ratings=[]
for y in range(11):
try:
season_four_ratings.append(view_imdb_data_ratings(4,y))
except:
continue
# In[27]:
season_four_ratings
for i in range(len(season_four_ratings)):
season_four_ratings[i]=float(season_four_ratings[i])
# In[28]:
season_five_ratings=[]
for y in range(11):
try:
season_five_ratings.append(view_imdb_data_ratings(5,y))
except:
continue
# In[29]:
season_five_ratings
for i in range(len(season_five_ratings)):
season_five_ratings[i]=float(season_five_ratings[i])
# In[30]:
season_six_ratings=[]
for y in range(11):
try:
season_six_ratings.append(view_imdb_data_ratings(6,y))
except:
continue
# In[31]:
season_six_ratings
for i in range(len(season_six_ratings)):
season_six_ratings[i]=float(season_six_ratings[i])
# In[32]:
season_seven_ratings=[]
for y in range(8):
try:
season_seven_ratings.append(view_imdb_data_ratings(7,y))
except:
continue
# In[33]:
for i in range(len(season_seven_ratings)):
season_seven_ratings[i]=float(season_seven_ratings[i])
# In[34]:
ratings_df=pd.DataFrame({"Season 1":season_one_ratings,
"Season 2":season_two_ratings,
"Season 3":season_three_ratings,
"Season 4":season_four_ratings,
"Season 5":season_five_ratings,
"Season 6":season_six_ratings,
})
# In[35]:
ratings_season_seven_df=pd.DataFrame({"Season 7":season_seven_ratings})
# In[36]:
x_axis=["Ep.1","Ep.2","Ep.3","Ep.4","Ep.5","Ep.6","Ep.7","Ep.8","Ep.9","Ep.10"]
y_axis=ratings_df["Season 1"]
plt.plot(x_axis,y_axis, label="Season 1",color="black")
y_axis=ratings_df["Season 2"]
plt.plot(x_axis,y_axis, label="Season 2", color="lightcoral")
y_axis=ratings_df["Season 3"]
plt.plot(x_axis,y_axis, label="Season 3", color="lightskyblue")
y_axis=ratings_df["Season 4"]
plt.plot(x_axis,y_axis, label="Season 4", color="red")
y_axis=ratings_df["Season 5"]
plt.plot(x_axis,y_axis, label="Season 5", color="darkblue")
y_axis=ratings_df["Season 6"]
plt.plot(x_axis,y_axis, label="Season 6", color="green")
x_axis=["Ep.1","Ep.2","Ep.3","Ep.4","Ep.5","Ep.6","Ep.7"]
y_axis=ratings_season_seven_df
plt.plot(x_axis,y_axis, label="Season 7", color="orange")
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.title("Ratings Per Episode in Each Season")
plt.xlabel("Episode")
plt.ylabel("Rating Score")
plt.savefig("RatingsPerEpisodeinEachSeason")
plt.show()
# In[37]:
x_axis=["Ep.1","Ep.2","Ep.3","Ep.4","Ep.5","Ep.6","Ep.7","Ep.8","Ep.9","Ep.10"]
y_axis=votes_df["Season 1"]
plt.plot(x_axis,y_axis, label="Season 1",color="black")
y_axis=votes_df["Season 2"]
plt.plot(x_axis,y_axis, label="Season 2",color="lightcoral")
y_axis=votes_df["Season 3"]
plt.plot(x_axis,y_axis, label="Season 3",color="lightskyblue")
y_axis=votes_df["Season 4"]
plt.plot(x_axis,y_axis, label="Season 4",color="red")
y_axis=votes_df["Season 5"]
plt.plot(x_axis,y_axis, label="Season 5",color="purple")
y_axis=votes_df["Season 6"]
plt.plot(x_axis,y_axis, label="Season 6",color="green")
x_axis=["Ep.1","Ep.2","Ep.3","Ep.4","Ep.5","Ep.6","Ep.7"]
y_axis=votes_df_season_seven
plt.plot(x_axis,y_axis, label="Season 7",color="darkblue")
plt.title("IMdB votes per Episode by Season")
plt.xlabel("Episode Number")
plt.ylabel("Number of Votes Per Episode ")
plt.plot(x_axis,y_axis)
plt.legend()
plt.savefig("IMdBvotesperEpisodebySeason")
plt.show()
# In[38]:
avg_rating_season_one=ratings_df["Season 1"].mean()
avg_rating_season_two=ratings_df["Season 2"].mean()
avg_rating_season_three=ratings_df["Season 3"].mean()
avg_rating_season_four=ratings_df["Season 4"].mean()
avg_rating_season_five=ratings_df["Season 5"].mean()
avg_rating_season_six=ratings_df["Season 6"].mean()
avg_rating_season_seven=ratings_season_seven_df.mean()
# In[39]:
x_axis=["Season 1"]
y_axis=avg_rating_season_one
plt.bar(x_axis,y_axis)
x_axis=["Season 2"]
y_axis=avg_rating_season_two
plt.bar(x_axis,y_axis)
x_axis=["Season 3"]
y_axis=avg_rating_season_three
plt.bar(x_axis,y_axis)
x_axis=["Season 4"]
y_axis=avg_rating_season_four
plt.bar(x_axis,y_axis)
x_axis=["Season 5"]
y_axis=avg_rating_season_five
plt.bar(x_axis,y_axis)
x_axis=["Season 6"]
y_axis=avg_rating_season_six
plt.bar(x_axis,y_axis)
x_axis=["Season 7"]
y_axis=avg_rating_season_seven
plt.bar(x_axis,y_axis)
plt.ylim(6.0,10.0)
plt.title("Average Ratings Per Season")
plt.xlabel("Season")
plt.ylabel("Average Rating")
plt.xticks(rotation=45)
plt.savefig("AverageRatingsPerSeason")
plt.show()
# In[40]:
avg_votes_season_one=votes_df["Season 1"].mean()
avg_votes_season_two=votes_df["Season 2"].mean()
avg_votes_season_three=votes_df["Season 3"].mean()
avg_votes_season_four=votes_df["Season 4"].mean()
avg_votes_season_five=votes_df["Season 5"].mean()
avg_votes_season_six=votes_df["Season 6"].mean()
avg_votes_season_seven=votes_df_season_seven.mean()
# In[41]:
x_axis="Season 1"
y_axis=avg_votes_season_one
plt.bar(x_axis,y_axis)
x_axis="Season 2"
y_axis=avg_votes_season_two
plt.bar(x_axis,y_axis)
x_axis="Season 3"
y_axis=avg_votes_season_three
plt.bar(x_axis,y_axis)
x_axis="Season 4"
y_axis=avg_votes_season_four
plt.bar(x_axis,y_axis)
x_axis="Season 5"
y_axis=avg_votes_season_five
plt.bar(x_axis,y_axis)
x_axis="Season 6"
y_axis=avg_votes_season_six
plt.bar(x_axis,y_axis)
x_axis="Season 7"
y_axis=avg_votes_season_seven
plt.bar(x_axis,y_axis)
plt.xticks(rotation=45)
plt.title("Average Votes per Season")
plt.xlabel("Season")
plt.ylabel("Average Number of Votes")
plt.savefig("AverageVotesperSeason")
plt.show()
| totopi/Retro-Fireballs | Nikki/IMdb+Requests+for+project (2).py | IMdb+Requests+for+project (2).py | py | 9,697 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"l... |
10976806014 | import datetime
import calendar
import configparser
import os
class Utils:
@staticmethod
def get_business_date():
today = datetime.datetime.now()
if today.isoweekday() == 6 or today.isoweekday() == 7:
while today.isoweekday() == 6 or today.isoweekday() == 7:
one_day = datetime.timedelta(days=1)
today = today - one_day
today = today.strftime("%Y-%m-%d")
return today
@staticmethod
def read_properties():
config = configparser.RawConfigParser()
config.read(os.getcwd() + "/data/indicator.properties")
return config
| ZehanLi/Roboadvisor | src/Utils.py | Utils.py | py | 634 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "datetime.datetime.now",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "datetime.timedelta",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "confi... |
35575769245 | from selenium.webdriver.common.by import By
from behave import given, when, then
from selenium.webdriver.support import expected_conditions as EC
from time import sleep
ADD_TO_CART = (By.CSS_SELECTOR, "button[type='submit'].product-form__submit.button.button--secondary")
ADD_TO_CART_CONFIRM = (By.CSS_SELECTOR, "h3.label")
VIEW_CART = (By.CSS_SELECTOR, "div.button-container a[href='/cart']")
@when('Click to add product to cart')
def add_to_cart(context):
context.app.product_detail_page.add_to_cart()
sleep(5)
#context.driver.find_element(*ADD_TO_CART).click()
@when("Click View my cart")
def click_view_cart(context):
context.app.product_detail_page.click_view_cart()
# context.driver.wait.until(EC.element_to_be_clickable(VIEW_CART)).click()
# sleep(3)
@then('Verify that the Subtotal is shown for confirmation')
def verify_add_to_cart_confirm(context):
context.app.product_detail_page.verify_add_to_cart_confirm()
# expected_text = 'Subtotal'
# actual_text = context.driver.find_element(*ADD_TO_CART_CONFIRM).text
# assert actual_text == expected_text, f'Expected {expected_text} but got {actual_text}'
| brightihegworo/internship--project | features/steps/product_page.py | product_page.py | py | 1,159 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver.common.by.By",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR",
"line_number":... |
34634869215 | from django.contrib.auth.models import AbstractUser
from django.db import models
from django.utils.translation import gettext_lazy as _
from django.utils.functional import cached_property
from database.utils import translate_doc
from database.models.data_collections.data_collections import Collection
@translate_doc
class User(AbstractUser):
help_text = _('''
User Model
People using irekua must be previously registered and should provide
minimal personal information. This is to track contributions and to control
access to data.
Users can belong to collections (:model:`database.Collection`), possess
devices (:model:`database.PhysicalDevice`), sign licences
(:model:`database.Licence`), upload items (:model:`database.Item`),
annotate data (:model:`database.Annotation`), and more.
''')
institution = models.ForeignKey(
'Institution',
on_delete=models.PROTECT,
db_column='institution_id',
verbose_name=_('institution'),
help_text=_('Institution to which user belongs'),
blank=True,
null=True)
is_developer = models.BooleanField(
db_column='is_developer',
verbose_name=_('is developer'),
help_text=_('Flag to indicate if user is a model developer'),
blank=False,
null=False,
default=False)
is_curator = models.BooleanField(
db_column='is_curator',
verbose_name=_('is curator'),
help_text=_('Flag to indicate if user is a curator'),
blank=False,
null=False,
default=False)
is_model = models.BooleanField(
db_column='is_model',
verbose_name=_('is model'),
help_text=_('Flag to indicate if user is an AI model'),
blank=False,
null=False,
default=False)
class Meta:
verbose_name = _('User')
verbose_name_plural = _('Users')
unique_together = [
['email',],
]
@property
def is_special(self):
return self.is_developer | self.is_curator | self.is_model | self.is_superuser
@cached_property
def is_collection_type_admin(self):
return self.collectiontype_set.exists()
@cached_property
def managed_collections(self):
queryset = Collection.objects.filter(
collection_type__in=self.collectiontype_set.all())
return queryset
| CONABIO-audio/irekua | irekua/database/models/users/users.py | users.py | py | 2,397 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.contrib.auth.models.AbstractUser",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.gettext_lazy",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 25,
"usage_typ... |
19323504885 | """
Python3.6+ only
"""
import re
from pathlib import Path
from pie import *
class env(env):
@classmethod
def _parse_lines(cls,ls):
"""Parses lines and returns a dict."""
d={}
for i,l in enumerate(ls,1):
l=l.strip()
# skip blank lines and comments
if not l or l.startswith('#'): continue
# ignore `set` and `export` and split into pre and post `=` parts
mo=re.match(r'^(set\s+|export\s+)?(?P<key>[^=\s]+)\s*=(?P<value>.*)',l)
if mo:
d[mo.group('key')]=mo.group('value')
else:
raise Exception(f'Failed to parse line {i}: "{l}"')
return d
@classmethod
def from_files(cls,*files):
d={}
# turn files into Path objects
env_files=[Path(f) for f in files]
# and filter out files that don't exist
existing_env_files=[]
for ef in env_files:
if ef.exists():
existing_env_files.append(ef)
else:
print(f'INFO: {ef} not found')
# parse all existing files in order
for p in existing_env_files:
with p.open('r',encoding='utf-8') as fin:
file_d=cls._parse_lines(fin.readlines())
d.update(file_d)
return cls(d)
@classmethod
def dump_env(cls):
import os
for k in sorted(os.environ.keys()):
v=os.environ[k]
print(f'{k}={v}')
| bizcubed/intergov | pie_env_ext.py | pie_env_ext.py | py | 1,497 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "re.match",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "os.environ.keys",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number":... |
42320457538 | import h5py
import os
import shutil
import argparse
from genome import *
parser = argparse.ArgumentParser()
parser.add_argument("read_basedir", help="base directory of resquiggled fast5 files")
parser.add_argument("output_dir", help="directory to copy files to (must exist)")
parser.add_argument("reference", help="fasta file with reference genome")
parser.add_argument("begin", help="begining of interval (inclusive)")
parser.add_argument("end", help="end of interval (exclusive)")
args = parser.parse_args()
if args.read_basedir == args.output_dir:
print("read_basedir and output_dir must be different")
exit(1)
begin = int(args.begin)
end = int(args.end)
if end - begin <= 0:
print("interval must have positive length")
exit(1)
read_files = [file for file in os.listdir(args.read_basedir) if not os.path.isdir(os.path.join(args.read_basedir, file))]
read_files = filter(lambda x : x[-6:] == ".fast5", read_files)
reference = load_fasta(args.reference)[0].bases
for read_file in read_files:
filename = os.path.join(args.read_basedir, read_file)
good = False
print(filename)
try:
with h5py.File(filename, "r") as f:
read_id = list(f['Raw/Reads'].keys())[0]
alignment_meta = f['Analyses/RawGenomeCorrected_000/BaseCalled_template/Alignment'].attrs
start_in_reference = alignment_meta['mapped_start']
end_in_reference = alignment_meta['mapped_end']
strand = alignment_meta['mapped_strand'].decode('ascii')
if strand == '-':
start_in_reference, end_in_reference = len(reference) - end_in_reference, len(reference) - start_in_reference
if min(end_in_reference, end) - max(start_in_reference, begin) > 0:
good = True
except KeyError:
continue
if good:
copy_name = os.path.join(args.output_dir, read_file)
print("copying {} to {}".format(filename, copy_name))
shutil.copy2(filename, copy_name)
| baklazan/thesis | filter_reads.py | filter_reads.py | py | 1,897 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_n... |
21104650721 | from flask import Flask
from flask_restful import Api
import logging as log
from api.config.apiconf import config
from api.views import VRStats,VRTopPlaces,\
VRClassifPlaces,VRClassifPlacesAccuracy,VRTop10Places
app = Flask(__name__)
ap = Api(app)
ap.add_resource(VRStats, '/api/stats')
ap.add_resource(VRTopPlaces, '/api/top_places/<string:inp_number>')
ap.add_resource(VRTop10Places, '/api/top_places')
ap.add_resource(VRClassifPlaces, '/api/classify')
ap.add_resource(VRClassifPlacesAccuracy, '/api/classif_accuracy')
if __name__ == '__main__':
log.basicConfig(filename=config['log']['file'], level=log.DEBUG)
app.run(debug = bool(config['flask']['debug']),
host = config['flask']['host'],
port = int(config['flask']['port'])
)
log.info("Flask Restful server is started ...") | asiaat/mxresto | apirestful/main.py | main.py | py | 912 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "flask_restful.Api",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "api.views.VRStats",
"line_number": 14,
"usage_type": "argument"
},
{
"api_name": "api.views.VRTopPl... |
1474225083 | # Install Python Packages
#!pip install openai tiktoken
# Import Python Packages
import platform
import os
import openai
import tiktoken
import time
print('Python: ', platform.python_version())
# Count the Number of Tokens
def count_tokens(filename):
encoding = tiktoken.get_encoding("gpt2")
with open(filename, 'r') as f:
text = f.read()
input_ids = encoding.encode(text)
num_tokens = len(input_ids)
return num_tokens
# Count the Number of Tokens in a String
def count_tokens_in_string(text):
encoding = tiktoken.get_encoding("gpt2")
input_ids = encoding.encode(text)
num_tokens = len(input_ids)
return num_tokens
filename = "inputFile.txt"
num_tokens = count_tokens(filename=filename)
print("Number of tokens: ", num_tokens)
# Break up text into chunks of 2000 tokens with an overlap of 100 tokens
def break_up_file_to_chunks(filename, chunk_size=2000, overlap=100):
encoding = tiktoken.get_encoding("gpt2")
with open(filename, 'r') as f:
text = f.read()
tokens = encoding.encode(text)
num_tokens = len(tokens)
chunks = []
for i in range(0, num_tokens, chunk_size - overlap):
chunk = tokens[i:i + chunk_size]
chunks.append(chunk)
return chunks
# Modified the function break_up_file_to_chunks to work with a list of strings
def break_up_text_to_chunks(text_list, chunk_size=2000, overlap=100):
encoding = tiktoken.get_encoding("gpt2")
tokens = [encoding.encode(text) for text in text_list]
tokens = [token for sublist in tokens for token in sublist] # Flatten the list
num_tokens = len(tokens)
chunks = []
for i in range(0, num_tokens, chunk_size - overlap):
chunk = tokens[i:i + chunk_size]
chunks.append(chunk)
return chunks
chunks = break_up_file_to_chunks(filename)
for i, chunk in enumerate(chunks):
print(f"Chunk {i}: {len(chunk)} tokens")
# Set OpenAI API Key
os.environ["OPENAI_API_KEY"] = 'OPENAI_API_KEY goes HERE'
openai.api_key = os.getenv("OPENAI_API_KEY")
# Recursive function to handle API rate limits and large texts
def summarize_text(text_list, max_tokens):
if count_tokens_in_string(str(text_list)) > max_tokens:
text_list = break_up_text_to_chunks(text_list)
summary_list = []
for text_chunk in text_list:
prompt_request = "Summarize this long summary section: " + encoding.decode(text_chunk)
try:
response = openai.Completion.create(
model="text-davinci-003",
prompt=prompt_request,
temperature=.5,
max_tokens=max_tokens,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
summary_list.append(response["choices"][0]["text"].strip())
time.sleep(1) # Add a delay between each API request to avoid hitting rate limits
except openai.api_resources.completion.CompletionError as e:
if e.args[0]['error']['message'] == 'Rate limit exceeded':
print('Rate limit exceeded. Waiting for 60 seconds.')
time.sleep(60)
return summarize_text(text_list, max_tokens)
elif e.args[0]['error']['message'] == 'Token limit exceeded':
print('Token limit exceeded. Breaking the text into smaller chunks.')
text_chunks = break_up_text_to_chunks([text_list])
summarized_text = []
for chunk in text_chunks:
summarized_text.append(summarize_text(encoding.decode(chunk), max_tokens))
return ' '.join(summarized_text)
return summarize_text(summary_list, max_tokens) # Recursive call to handle the case where the summarized text still exceeds the maximum token limit
else:
prompt_request = "Consolidate these paper summaries: " + str(text_list)
try:
response = openai.Completion.create(
model="text-davinci-003",
prompt=prompt_request,
temperature=.5,
max_tokens=max_tokens,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
return response["choices"][0]["text"].strip()
except openai.api_resources.completion.CompletionError as e:
if e.args[0]['error']['message'] == 'Rate limit exceeded':
print('Rate limit exceeded. Waiting for 60 seconds.')
time.sleep(60)
return summarize_text(text_list, max_tokens)
elif e.args[0]['error']['message'] == 'Token limit exceeded':
print('Token limit exceeded. Breaking the text into smaller chunks.')
text_chunks = break_up_text_to_chunks([text_list])
summarized_text = []
for chunk in text_chunks:
summarized_text.append(summarize_text(encoding.decode(chunk), max_tokens))
return ' '.join(summarized_text)
# Summarize the text one chunk at a time
prompt_response = []
encoding = tiktoken.get_encoding("gpt2")
chunks = break_up_file_to_chunks(filename)
for i, chunk in enumerate(chunks):
prompt_request = "Summarize this partial section of a paper: " + encoding.decode(chunks[i])
summary = summarize_text(prompt_request, 2000)
prompt_response.append(summary)
#Print to the user the partial section summaries
print("Summary of each part ",i,": ",summary)
#print("These are the aggregated chunk summaries:\n")
#print(prompt_response)
# Consolidate the summaries
prompt_request = "Consolidate these paper summaries: " + ' '.join(prompt_response)
paper_summary = summarize_text(prompt_request, 2000)
# Summary of Summaries
print("This is the final overall summary: \n")
print(paper_summary)
| rayborg/textSummarize_GPT | recursiveSummaryLongFiles.py | recursiveSummaryLongFiles.py | py | 5,974 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "platform.python_version",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "tiktoken.get_encoding",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "tiktoken.get_encoding",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "t... |
1149116969 | from unittest.mock import MagicMock, patch
import torch.nn as nn
from mmcv.device.mlu import MLUDataParallel, MLUDistributedDataParallel
from mmcv.parallel import is_module_wrapper
from mmcv.utils import IS_MLU_AVAILABLE
def mock(*args, **kwargs):
pass
@patch('torch.distributed._broadcast_coalesced', mock)
@patch('torch.distributed.broadcast', mock)
@patch('torch.nn.parallel.DistributedDataParallel._ddp_init_helper', mock)
def test_is_module_wrapper():
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(2, 2, 1)
def forward(self, x):
return self.conv(x)
model = Model()
assert not is_module_wrapper(model)
if IS_MLU_AVAILABLE:
mludp = MLUDataParallel(model)
assert is_module_wrapper(mludp)
mluddp = MLUDistributedDataParallel(model, process_group=MagicMock())
assert is_module_wrapper(mluddp)
| rawalkhirodkar/egohumans | egohumans/external/mmcv/tests/test_device/test_mlu/test_mlu_parallel.py | test_mlu_parallel.py | py | 948 | python | en | code | 16 | github-code | 1 | [
{
"api_name": "torch.nn.Module",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_nu... |
29522459651 | # Activate
# |- Linear
# |- ReLU
# |- Sigmoid
# |- Softmax
# |- Tanh
# Regularize
# |- L2
# |- Dropout
# Initialize (variance)
# |- Constant
# |- He
# |- Xavier
# Optimize
# |- EMA
# |- RMSprop
# |- Adam
# Normalization/ standardization
# Grad checking
import numpy as np
import gc
import matplotlib.pyplot as plt
class Activate:
"""
Following activation functions are defined under this class
(1) Linear
(2) ReLU (Rectified Linear Unit)
(3) Sigmoid
(4) Softmax
(5) Tanh (Hyperbolic tangent)
"""
def linear(self, x):
"""
Linear activation function (pass-through).
Returns nothing but the the input vector itself.
"""
return x
def relu(self, x, max_value=None, threshold=0):
"""
Rectified linear unit.
With default values, it returns element-wise `max(x, 0)`.
Otherwise:
`relu(x) = max_value` for `x >= max_value`,
`relu(x) = x` for `threshold <= x < max_value`,
Parameters
----------
x : ndarray
max_value: float, default = None
threshold : float, default = 0
"""
output = np.maximum(x, threshold)
output = np.minimum(output, max_value)
return output
def sigmoid(self, x):
"""
Sigmoid activation function,
`sigmoid(x) = 1 / (1 + exp(-x))`.
The sigmoid function always returns a value between 0 and 1.
For example:
x = [-20, -1.0, 0.0, 1.0, 20]
output = [2.06e-09, 2.689e-01, 5.00e-01, 7.31e-01, 1.0]
"""
output = 1 / (1 + np.exp(-x))
return output
def softmax(self, x, axis=-1):
"""
Softmax converts a vector of values to a probability distribution. It can
be viewed as a higher-dimensional generalization of the sigmoid function.
Parameters
----------
x : ndarray of shape ()
axis : int, default=-1
axis along which the softmax normalization is applied.
Returns
-------
output : ndarray of shape ()
Output of softmax transformation (all values must be non-negative
and sum to 1).
"""
norm = np.sum(np.exp(x))
output = np.exp(x)/norm
# assert all the values are non-negative
assert all(output > 0)
# All the elements of the output vector sum to 1.
assert np.sum(output) == 1.0
return output
def tanh(self, x):
"""
Hyperbolic tangent activation function.
Returns:
ndarray of same shape and dtype as those of input `x`.
"""
return np.tanh(x)
# ----- Methods useful for Convolutional Neural Networks (CNN) ---- #
def conv_dims(dim: int, f: int, pad: int, stride: int):
"""
Arguments:
dim -- int; dim of the input image
f -- int; dim of the filter/ weight (square) matrix
pad -- int; padding level
stride -- int; stride parameter
Returns:
dim -- dim of the output layer after convolution
"""
dim = 1 + int((dim - f + 2 * pad) / stride)
return dim
def zero_pad(image, pad: int, demo=False):
"""
Pad the images with zeros
Parameters:
image -- ndarray; of shape (m, nH, nW, nc) representing a vector of m images
pad -- int; amount of padding around height, width dimensions of each image
demo -- bool; show the image pixels before and after padding
Returns:
im_pad -- padded image of shape (m, nH + 2*pad, nW + 2*pad, nc)
Raises:
ValueError -- Input dimension must be (m, nH, nW, nc)
"""
# Check the input format
if len(image.shape) < 4:
raise ValueError("Input dimensions not of the form (m, nh, nw, nc).")
im_pad = np.pad(image, ((0, 0), (pad, pad), (pad, pad), (0, 0)),
mode='constant', constant_values=(0, 0))
# If 'demo' requested then show image before & after padding
if demo:
fig, ax = plt.subplots(1, 2)
ax[0].set_title('Original Image')
ax[0].imshow(image[0, :, :, 0])
ax[1].set_title('Padded Image')
ax[1].imshow(im_pad[0, :, :, 0])
plt.show()
return im_pad
def conv_block(im_block, kernel, bias):
"""
Convolve the filter with a single block matrix of the same size.
Parameters:
im_block -- ndarray, slice of input image of shape (f, f, nC_prev)
kernel -- ndarray, Weight parameters of the filter matrix of shape (f, f, nC_prev)
bias -- float, Bias parameters - matrix of shape (1, 1, 1)
Returns:
conv_im -- float, result of convolution with (kernel, bias)
"""
# Element-wise product between im_block and kernel.
s = np.multiply(im_block, kernel)
# Sum over all entries of the volume s and add the bias term.
conv_im = np.sum(s) + float(bias)
return conv_im
def conv_full(image, kernel, bias, stride, pad, demo=False):
"""
Implements the convolution operation on the full input image. This is done
by repeating 'conv_block' method on the entire image.
Arguments:
image -- output activations of the previous layer,
ndarray of shape (m, nH_prev, nW_prev, nC_prev)
kernel -- Weights, ndarray of shape (f, f, nC_prev, nC)
bias -- Biases, ndarray of shape (1, 1, 1, nC)
stride -- int; stride parameter
pad -- int; padding parameter
demo -- bool; show the image pixels before and after convolution
Returns:
im_out -- convolved output, numpy array of shape (m, nH, nW, nC)
"""
# Retrieve the shapes of the matrices
(m, nh_prev, nw_prev, nc_prev) = image.shape
# nc = num channels in the output layer = num filters
(f, f, nc_prev, nc) = kernel.shape
# Compute the number of dims in height and width of the output
nh = conv_dims(nh_prev, f, pad, stride)
nw = conv_dims(nw_prev, f, pad, stride)
# The convolved output image is initialized by zeros
im_out = np.zeros(shape=(m, nh, nw, nc))
# Pad the input image before convolution begins and overwrite
image = zero_pad(image, pad, demo=False)
for i in range(m): # loop over the training examples
im_i = image[i, :, :, :]
for h in range(nh): # loop over the vertical axis of the matrix
top = h * stride
bottom = top + f
for w in range(nw): # loop over the horizontal axis of the matrix
left = w * stride
right = left + f
for c in range(nc): # loop over the channels
im_block = im_i[top:bottom, left:right, :]
weights = kernel[:, :, :, c]
biases = bias[:, :, :, c]
# Convolve the image block
im_out[i, h, w, c] = conv_block(im_block, weights, biases)
# Making sure the output shape is correct
assert (im_out.shape == (m, nh, nw, nc))
# If 'demo' requested then show image before & after convolution
if demo:
fig, ax = plt.subplots(1, 2)
ax[0].set_title('Original Image (%ix%ix%i)'
% (nh_prev + 2 * pad, nw_prev + 2 * pad, nc_prev))
ax[0].imshow(image[0, :, :, 0])
ax[1].set_title('Convolved Image (%ix%ix%i)' % (nh, nw, nc))
ax[1].imshow(im_out[0, :, :, 0])
plt.show()
# if input matrix is large enough it's good to deallocate memory
del image
gc.collect()
return im_out
def pooling(image, f: int, stride: int, mode='max', demo=False):
"""
Implements the convolution operation on the full input image. This is done
by repeating 'conv_block' method on the entire image.
Arguments:
image -- output activations of the previous layer,
ndarray of shape (m, nH_prev, nW_prev, nC_prev)
f -- int; dim of the filter/ weight (square) matrix
stride -- int; stride parameter
mode -- 'max' for max pooling; 'avg' for average pooling
demo -- bool; show the image pixels before and after pooling
Returns:
im_out -- pooled output, ndarray of shape (m, nH, nW, nC)
Exceptions:
KeyError -- raised if 'mode' key is incorrectly specified.
"""
# Retrieve the shapes of the input
(m, nh_prev, nw_prev, nc_prev) = image.shape
# Compute the number of dims in height and width of the output
nh = conv_dims(nh_prev, f, 0, stride) # pooling doesn't need padding
nw = conv_dims(nw_prev, f, 0, stride)
nc = nc_prev
# The convolved output image is initialized by zeros
im_out = np.zeros(shape=(m, nh, nw, nc))
for i in range(m): # loop over the training examples
im_i = image[i, :, :, :]
for h in range(nh): # loop over the vertical axis of the matrix
top = h * stride
bottom = top + f
for w in range(nw): # loop over the horizontal axis of the matrix
left = w * stride
right = left + f
for c in range(nc): # loop over the channels
im_block = im_i[top:bottom, left:right, c]
if mode == 'max':
im_out[i, h, w, c] = np.max(im_block)
elif mode == "avg":
im_out[i, h, w, c] = np.mean(im_block)
else:
raise KeyError("'mode' key unrecognized, use 'avg' or 'max'.")
# Making sure the output shape is correct
assert (im_out.shape == (m, nh, nw, nc))
# If 'demo' requested then show image before & after convolution
if demo:
fig, ax = plt.subplots(1, 2)
ax[0].set_title('Original Image')
ax[0].imshow(image[0, :, :, 0])
ax[1].set_title('%s-pooled Image' % mode)
ax[1].imshow(im_out[0, :, :, 0])
plt.show()
del image
gc.collect()
return im_out
# def forward_prop(self, X):
# CONV2D -> RELU -> MAX_POOL -> CONV2D -> RELU -> MAX_POOL -> FLATTEN -> FULLY_CONNECTED
# CONV2D: stride of 1, padding 'SAME'
# Z1 = None
# # RELU
# A1 = None
# # MAX_POOL: window 8x8, stride 8, padding 'SAME'
# P1 = None
# # CONV2D: filters W2, stride 1, padding 'SAME'
# Z2 = None
# # RELU
# A2 = None
# # MAX_POOL: window 4x4, stride 4, padding 'SAME'
# P2 = None
# # FLATTEN
# F = None
# # FULLY-CONNECTED without non-linear activation function (not not call softmax).
# # 6 neurons in output layer. Hint: one of the arguments should be "activation_fn=None"
# Z3 = None --> return Z3 (a linear unit) as a 'self' object.
# ----- Speeding methods for Artificial Neural Networks (ANN) ---- #
#
# def init_var_scale(num_hidden_units, key, const=0.01):
# # Good for ReLU
# if key == 'He':
# # pass the number of hidden units in the previous layer
# return np.sqrt(2. / num_hidden_units)
#
# # Good for Sigmoid or Tanh
# elif key == 'Xavier':
# # pass the number of hidden units in the previous layer
# return np.sqrt(1. / num_hidden_units)
#
# # If not sure, just use a constant variance
# else:
# return const
#
#
# def accuracy(y_orig, y_pred):
# return round(np.mean(y_orig == y_pred) * 100, 4)
#
#
# def activate(x, key):
#
# if key == 'sigmoid':
# a_func = 1 / (1 + np.exp(-x))
# a_grad = np.multiply(a_func, 1-a_func)
#
# elif key == 'tanh':
# a_func = np.tanh(x)
# a_grad = 1 - np.power(a_func, 2)
#
# elif key == 'ReLU':
# a_func = np.maximum(0, x)
# a_grad = np.heaviside(x, 0) # x==0 returns 0
#
# else:
# raise KeyError("Invalid activation key. "
# "Choose from 'tanh', 'sigmoid', 'ReLU'")
#
# return a_func, a_grad
#
#
# def train_test(A, B, test_size=0.2):
#
# nums = A.shape[1]
# frac = round(nums * (1 - test_size))
#
# # Shuffle the index array and then map that to X, Y
# idx = np.arange(nums)
# np.random.shuffle(idx)
# A = A[:, idx]
# B = B[:, idx]
#
# return A[:, :frac], A[:, frac:], B[:, :frac], B[:, frac:]
#
#
# def set_bias_as_weight(shape):
# return shape[0] + 1, shape[1]
#
#
# def add_bias(vector):
# return np.hstack([vector, np.ones((vector.shape[0], 1))])
#
#
# def sigmoid(z):
# return 1 / (1 + np.exp(-z))
#
#
# def shuffle_vectors(x, y):
# rd = np.arange(len(x))
# np.random.shuffle(rd)
# x = x[rd]
# y = y[rd]
# return x, y
#
#
# def _stable_clip(x):
# """Used to avoid numerical inestability when"""
# return np.clip(x, 1e-7, 1 - 1e-7)
#
#
# def mean_squared_error(ypred, ytrue):
# return (ypred - ytrue) * ypred * (1 - ypred)
#
#
# def cross_entropy(ypred, ytrue, binary=True):
# # return -ytrue * np.log(_stable_clip(ypred)) -\
# # (1 - ytrue) * np.log(1 - _stable_clip(ypred))
# return _stable_clip(ypred) - ytrue
| Duckchoy/AI-algos | ANN/utils.py | utils.py | py | 13,448 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.maximum",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "numpy.minimum",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": ... |
14351233649 | import requests, uuid, json, argparse, sys, os
import langcodes
def main():
# get credentials
credentials = get_Credentials()
key = credentials["key"]
endpoint = credentials["endpoint"]
location = credentials["location"]
# If program is run without any command-line arguments
if len(sys.argv) == 1:
sys.exit("Usage: python project.py [-f FILE] [-t TO] \n MISSING FILE AND LANGUAGE!")
#parsing the arguments to get file and language information
try:
parsed_data = parse_arguments()
File = parsed_data["File"]
if File != None and os.path.splitext(File)[1] != ".txt":
sys.exit("The only supported file format is \".txt\"")
translation_language = parsed_data["translation_language"]
if File is None:
while True:
try:
print(live_translator(key, endpoint, location, translation_language))
except EOFError:
print("\nBye, Have a nice day!")
return
else:
print(file_translator(key, endpoint, location, File, translation_language))
except KeyError:
sys.exit("Usage: python project.py [-f FILE] [-t TO]")
# When the language name couldn't be resolved
except LookupError:
sys.exit("UNRECOGNIZED/INVALID LANGUAGE")
# When File provided doesn't exist
except FileNotFoundError:
sys.exit("FILE DOESN'T EXIST")
def get_Credentials():
from config import AZURE_KEY, endpoint, location
return {"key": AZURE_KEY, "endpoint": endpoint, "location" : location}
def parse_arguments():
# Creating an argument parser
parser = argparse.ArgumentParser(description="Tranlate text from one language to another")
parser.add_argument("-f", "--file", help='Path to the input file')
parser.add_argument("-t", "--to", help = "translation language")
args = parser.parse_args()
#getting translation language and input file from arguments
input_file = args.file
translation_language = args.to
# if the language is not given return the default language 'en'
if translation_language is None:
translation_language_code = "en"
else:
translation_language_code = langcodes.find(translation_language.lower()).language
return {"File" : input_file, "translation_language" : translation_language_code}
def file_translator(key, endpoint, location, File, translation_language):
with open(File, 'r') as file:
text = file.read()
path = '/translate'
constructed_url = endpoint + path
params = {
'api-version': '3.0',
'to': [translation_language]
}
headers = {
'Ocp-Apim-Subscription-Key': key,
# location required if you're using a multi-service or regional (not global) resource.
'Ocp-Apim-Subscription-Region': location,
'Content-type': 'application/json',
'X-ClientTraceId': str(uuid.uuid4())
}
# You can pass more than one object in body.
body = [{
'text': text
}]
request = requests.post(constructed_url, params=params, headers=headers, json=body)
response = request.json()
translations = response[0]['translations']
translated_text = [translation['text'] for translation in translations]
# Create the output file name
output_file = f"translated_{translation_language}.txt"
# Write the translated content to the output file
with open(output_file, 'w') as file:
file.write(translated_text[0])
return f"Translation completed. Translated content saved to: {output_file}"
def live_translator(key, endpoint, location, translation_language="en"):
text = input("Text: ")
path = '/translate'
constructed_url = endpoint + path
params = {
'api-version': '3.0',
'to': [translation_language]
}
headers = {
'Ocp-Apim-Subscription-Key': key,
# location required if you're using a multi-service or regional (not global) resource.
'Ocp-Apim-Subscription-Region': location,
'Content-type': 'application/json',
'X-ClientTraceId': str(uuid.uuid4())
}
# You can pass more than one object in body.
body = [{
'text': text
}]
request = requests.post(constructed_url, params=params, headers=headers, json=body)
response = request.json()
translations = response[0]['translations']
translated_text = [translation['text'] for translation in translations]
return translated_text;
if __name__ == "__main__":
main() | SonuLohani-1/MyTranslator | functions.py | functions.py | py | 4,586 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.argv",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path.splitext",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": ... |
15828920588 | import numpy as np
import pandas as pd
from tabulate import tabulate
df = pd.read_excel('Database.xlsx')
df2 = pd.read_excel('Daftar Pickup.xlsx')
df_baru = df.dropna()
Harga = df_baru.drop('kode', axis =1)
Harga2 = df2.drop('No', axis =1)
#untuk fungsi cekKota dan ceKotaAsal, dilakukan pencarian apakah input ada di dalam database, loop akan diulang sampai
#data yang diinput valid
#Yunita
def jenis_paket():
while True:
try:
paket = int(input("Masukkan nomor jenis pengiriman (Contoh : 1)\t: "))
if paket == 1:
return paket, "Reguler"
elif paket == 2:
return paket, "Cepat"
elif paket == 3:
return paket, "Kargo"
else:
raise ValueError
except ValueError:
print('Masukkan pilihan yang valid')
def cekKota():
while True:
tujuan = input("Masukkan kode kota tujuan (Contoh : Jakarta)\t: ")
tujuan = tujuan.lower()
cek = tujuan in df.kota.unique()
if cek == True:
return tujuan
break
else:
print("Kota tujuan tidak terdaftar, silahkan masukkan kembali")
#Kiki
def cekKotaAsal():
while True:
pickup = input("Masukkan kota Pickup (Contoh : Surakarta): ")
pickup = pickup.capitalize()
cek = pickup in df2.tempat.unique()
if cek == True:
return pickup
break
else:
print("Kota pickup tidak terdaftar, silahkan masukkan kembali")
#pengolahan data menggunakan pandas, yakni dengan mencari lokasi harga ekspedisi, dengan data yang diinput
#berupa kota asal/kota tujuan. Untuk biaya ongkir kargo/truk yang kurang dari 25kg akan dianggap 25kg dengan biaya
#sepersepuluh dari yang ditentukan, jika lebih maka kemudian baru diberikan kelipatan sebesar sepersepuluh dari
#biaya ongkir yang ditentukan oleh database. Untuk paket cepat, biaya akan dikenakan 5x lipat dari yang tertera di
#database, untuk fee akan dikenakan biaya untuk kelipatan 10kg pertama, dan akan ditambah sepersepuluh dari database per
#kilogramnya
#Yasmin
def hitungongkir(kota, berat, ekspedisi, fee, jenis_paket):
if jenis_paket == 1:
berat_asli = berat
elif jenis_paket == 2:
berat_asli = berat*5
elif jenis_paket == 3:
berat_floor = berat//25
if berat_floor >= 1:
berat_asli = berat_floor + ((berat%25)/25)
else:
berat_asli = 1
harga_ongkir = (Harga.loc[df['kota'] == kota, ekspedisi].iloc[0])*berat_asli
fee = (Harga2.loc[df2['tempat'] == fee, ekspedisi].iloc[0]) * ((berat//10)+1)
total = harga_ongkir + fee
return total
#melakukan olah data dengan melakukan pengurutan dari yang termurah hingga termahal, numpy digunakan untuk memberikan
#index dari angka 1-5
#Yasmin
def rekomendasi(ekspedisi, harga2):
tampilan = {'Ekpedisi' : ekspedisi,
'Harga': harga2}
df_tampil = pd.DataFrame(tampilan, index = np.arange(1, 6))
dft = df_tampil.sort_values(by = 'Harga')
dft.insert(0,'No',[1,2,3,4,5])
dft.index = np.arange(1,6)
dft = tabulate(dft, headers=dft.columns, showindex=False)
return(dft) | yasminzulfa/22-TeamProject-Prokom | Modul.py | Modul.py | py | 3,063 | python | id | code | 0 | github-code | 1 | [
{
"api_name": "pandas.read_excel",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pandas.read_excel",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
... |
16626978852 | # -----------------------------------------------------------
#Cafedev.vn - Kรชnh thรดng tin IT hร ng ฤแบงu Viแปt Nam
#@author cafedevn
#Contact: cafedevn@gmail.com
#Fanpage: https://www.facebook.com/cafedevn
#Group: https://www.facebook.com/groups/cafedev.vn/
#Instagram: https://instagram.com/cafedevn
#Twitter: https://twitter.com/CafedeVn
#Linkedin: https://www.linkedin.com/in/cafe-dev-407054199/
#Pinterest: https://www.pinterest.com/cafedevvn/
#YouTube: https://www.youtube.com/channel/UCE7zpY_SlHGEgo67pHxqIoA/
# -----------------------------------------------------------
import json
from pprint import*
def doc_noi_dung_json(filename):
data_file = open(filename, encoding = "utf-8")
data = json.load(data_file)
data_file.close()
return data
if __name__ == '__main__':
url_data = 'du_lieu/QLCT_1.json'
noi_dung = doc_noi_dung_json(url_data)
#pprint(noi_dung) # Show content json
cong_ty = noi_dung['CONG_TY'][0] # Lay cong ty dau tien
don_vi = noi_dung['DON_VI']
tong_so_nv = 0
print('Ten cong ty:', cong_ty['Ten'])
print('Dia chi cong ty: ', cong_ty['Dia_chi'])
print(' Tong so nhanz vien: ')
| Vantoancodegym/python_json_ex | 2 bt voi json/baitap doc json/read_json_file_thong_ke.py | read_json_file_thong_ke.py | py | 1,169 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "json.load",
"line_number": 19,
"usage_type": "call"
}
] |
2666099432 | '''Functions used in river_tracker1.py
Author: guangzhi XU (xugzhi1987@gmail.com; guangzhi.xu@outlook.com)
Update time: 2019-05-10 11:03:36.
'''
from __future__ import print_function
import numpy as np
import pandas as pd
import networkx as nx
from skimage import measure
from skimage import morphology
from scipy import ndimage
import matplotlib.pyplot as plt
import cdms2 as cdms
import MV2 as MV
from genutil import statistics as stats
import cdutil
from utils import rdp
from utils import funcs
from utils import peak_prominence2d as pp2d
NX_VERSION=nx.__version__[0]
def plotGraph(graph,ax=None,show=True):
'''Helper func to plot the graph of an AR coordinates
'''
if ax is None:
fig=plt.figure()
ax=fig.add_subplot(111)
pos=[(ii[1],ii[0]) for ii in graph.nodes()] # x,y
pos_dict=dict(zip(graph.nodes(),pos))
nx.draw(graph,ax=ax,pos=pos_dict,node_size=15,node_color='darkgray',
edge_color='dimgray')
if show:
plt.show(block=False)
return
def areaFilt(mask,area,min_area=None,max_area=None):
'''Filter AR binary masks by region areas
Args:
mask (ndarray): 2D binary mask with detected objects shown as 1s.
area (ndarray): 2D map showing grid cell areas in km^2.
min_area (float or None): if not None, minimum area to filter objects
in <mask>.
max_area (float or None): if not None, maximum area to filter objects
in <mask>.
Returns:
result (ndarray): 2D binary mask with objects area-filtered.
'''
if min_area is None and max_area is None:
return mask
labels=measure.label(mask,connectivity=1)
n=labels.max()+1
areas=ndimage.sum(area,labels,np.arange(n))
sel=np.ones(n,bool)
if min_area is not None:
sel=np.where(areas<min_area,0,sel)
if max_area is not None:
sel=np.where(areas>max_area,0,sel)
# remove background area
sel[0]=0
result=sel[labels]
return result
def spherical2Cart(lat,lon):
clat=(90-lat)*np.pi/180.
lon=lon*np.pi/180.
x=np.cos(lon)*np.sin(clat)
y=np.sin(lon)*np.sin(clat)
z=np.cos(clat)
return np.array([x,y,z])
def cart2Spherical(x,y,z, shift_lon):
r=np.sqrt(x**2+y**2+z**2)
clat=np.arccos(z/r)/np.pi*180
lat=90.-clat
lon=np.arctan2(y,x)/np.pi*180
lon=(lon+360)%360
lon=np.where((lon>=0) & (lon<shift_lon),lon+360,lon)
return np.array([lat,lon,np.ones(lat.shape)])
def computeTheta(p1,p2):
'''Tangent line to the arc |p1-p2|
<p1>,<p2>: (lat,lon) coordinates
'''
p1=spherical2Cart(p1[0],p1[1])
p2=spherical2Cart(p2[0],p2[1])
theta=p2-np.dot(p1,p2)*p1
norm=np.linalg.norm(theta)
if norm>0:
theta=theta/norm
return theta
def wind2Cart(u,v,lats,lons):
'''Convert u,v winds to Cartesian, consistent with spherical2Cart.
'''
latsr=lats*np.pi/180
lonsr=lons*np.pi/180
vh=v*np.sin(latsr)
ux=-u*np.sin(lonsr) - vh*np.cos(lonsr)
uy=u*np.cos(lonsr) - vh*np.sin(lonsr)
uz=v*np.cos(latsr)
vs=np.array([ux,uy,uz])
return vs
def cart2Wind(vs,lats,lons):
'''Convert winds in Cartesian to u,v, inverse to wind2Cart.
'''
latsr=lats*np.pi/180
lonsr=lons*np.pi/180
#ux=vs[0]
uy=vs[1]
uz=vs[2]
u=uy/np.cos(lonsr) + uz*np.tan(latsr)*np.tan(lonsr)
v=uz/np.cos(latsr)
return u,v
def maskToGraph(mask, quslab, qvslab, costhetas, sinthetas, edge_eps,
connectivity=2):
'''Create graph from AR mask
Args:
mask (ndarray): 2D binary map showing the location of an AR with 1s.
quslab (cdms.TransientVariable): 2D map of u-flux.
qvslab (cdms.TransientVariable): 2D map of v-flux.
costhetas (cdms.TransientVariable): (n * m) 2D slab of grid cell shape:
cos=dx/sqrt(dx^2+dy^2).
sinthetas (cdms.TransientVariable): (n * m) 2D slab of grid cell shape:
sin=dy/sqrt(dx^2+dy^2).
edge_eps (float): float in (0,1), minimal proportion of flux component
in a direction to total flux to allow edge building
in that direction. Defined in Global preamble.
connectivity (int): 1 or 2. 4- or 8- connectivity in defining neighbor-
hood relationship in a 2D square grid.
Returns:
g (networkx.DiGraph): directed planar graph constructed from AR mask
and flows.
'''
quslab=np.array(quslab)
qvslab=np.array(qvslab)
wsslab=np.sqrt(quslab**2+qvslab**2)
g=nx.DiGraph()
# 1 connectivity edges
# the list approach
'''
y,x=np.where(mask)
zipcor=zip(y,x)
right=[(yi,xi) for yi,xi in zipcor if (yi,xi+1) in zipcor]
left=[(yi,xi) for yi,xi in zipcor if (yi,xi-1) in zipcor]
up=[(yi,xi) for yi,xi in zipcor if (yi+1,xi) in zipcor]
down=[(yi,xi) for yi,xi in zipcor if (yi-1,xi) in zipcor]
# nodes to the right/left/up/down
right0=[(yi,xi+1) for yi,xi in right]
left0=[(yi,xi-1) for yi,xi in left]
up0=[(yi+1,xi) for yi,xi in up]
down0=[(yi-1,xi) for yi,xi in down]
'''
# the shifting approach
right=np.roll(mask, -1, axis=1)*mask
left=np.roll(mask, 1, axis=1)*mask
up=np.roll(mask, -1, axis=0)*mask
down=np.roll(mask, 1, axis=0)*mask
def addWeightedEdges2(nodes1,speedslab,d):
'''Add directed edges to graph. For shifting approach
'''
ratio=np.where(wsslab==0., 0, speedslab/wsslab)
idx=np.where(ratio>=edge_eps, 1, 0)*nodes1
idx=zip(*np.where(idx>0))
for ii, (yii,xii) in enumerate(idx):
# nii: start, nii2: end
yii=int(yii)
xii=int(xii)
nii=(yii,xii)
if d=='r':
nii2=(yii,xii+1)
elif d=='l':
nii2=(yii,xii-1)
elif d=='u':
nii2=(yii+1,xii)
elif d=='d':
nii2=(yii-1,xii)
elif d=='tr':
nii2=(yii+1,xii+1)
elif d=='br':
nii2=(yii-1,xii+1)
elif d=='tl':
nii2=(yii+1,xii-1)
elif d=='bl':
nii2=(yii-1,xii-1)
meanivt=speedslab[yii,xii]
g.add_edge(nii,nii2,
weight=np.exp(-meanivt/1e2),
ivt=meanivt)
def addWeightedEdges(nodes1,nodes2,speedslab):
'''Add directed edges to graph. For the list approach
'''
# nii: start, nii2: end
for nii,nii2 in zip(nodes1,nodes2):
if speedslab[nii]/wsslab[nii]>=edge_eps:
meanivt=speedslab[nii]
g.add_edge(nii,nii2,
weight=np.exp(-meanivt/1e2),
ivt=meanivt)
# add 1 connectivity edges
#addWeightedEdges(right,right0,quslab)
#addWeightedEdges(left,left0,-quslab)
#addWeightedEdges(up,up0,qvslab)
#addWeightedEdges(down,down0,-qvslab)
addWeightedEdges2(right,quslab,'r')
addWeightedEdges2(left,-quslab,'l')
addWeightedEdges2(up,qvslab,'u')
addWeightedEdges2(down,-qvslab,'d')
# 2 connectivity edges
if connectivity==2:
# the list approach
'''
tr=[(yi,xi) for yi,xi in zipcor if (yi+1,xi+1) in zipcor]
br=[(yi,xi) for yi,xi in zipcor if (yi-1,xi+1) in zipcor]
tl=[(yi,xi) for yi,xi in zipcor if (yi+1,xi-1) in zipcor]
bl=[(yi,xi) for yi,xi in zipcor if (yi-1,xi-1) in zipcor]
tr0=[(yi+1,xi+1) for yi,xi in tr]
br0=[(yi-1,xi+1) for yi,xi in br]
tl0=[(yi+1,xi-1) for yi,xi in tl]
bl0=[(yi-1,xi-1) for yi,xi in bl]
'''
# the shifting approach
tr=np.roll(np.roll(mask, -1, axis=0), -1, axis=1)*mask
br=np.roll(np.roll(mask, 1, axis=0), -1, axis=1)*mask
tl=np.roll(np.roll(mask, -1, axis=0), 1, axis=1)*mask
bl=np.roll(np.roll(mask, 1, axis=0), 1, axis=1)*mask
# add 2 connectivity edges
#addWeightedEdges(tr,tr0,quslab*costhetas+qvslab*sinthetas)
#addWeightedEdges(br,br0,quslab*costhetas-qvslab*sinthetas)
#addWeightedEdges(tl,tl0,-quslab*costhetas+qvslab*sinthetas)
#addWeightedEdges(bl,bl0,-quslab*costhetas-qvslab*sinthetas)
addWeightedEdges2(tr,quslab*costhetas+qvslab*sinthetas,'tr')
addWeightedEdges2(br,quslab*costhetas-qvslab*sinthetas,'br')
addWeightedEdges2(tl,-quslab*costhetas+qvslab*sinthetas,'tl')
addWeightedEdges2(bl,-quslab*costhetas-qvslab*sinthetas,'bl')
return g
def getARAxis(g, quslab, qvslab, mask):
'''Find AR axis from AR region mask
Args:
g (networkx.DiGraph): directed planar graph constructed from AR mask
and flows. See maskToGraph().
quslab (cdms.TransientVariable): 2D map of u-flux.
qvslab (cdms.TransientVariable): 2D map of v-flux.
mask (ndarray): 2D binary map showing the location of an AR with 1s.
Returns:
path (ndarray): Nx2 array storing the AR axis coordinate indices in
(y, x) format.
axismask (ndarray): 2D binary map with same shape as <mask>, with
grid cells corresponding to coordinates in <path>
set to 1s.
'''
nodes=list(g.nodes())
#---------------Find boundary nodes---------------
edge=mask-morphology.binary_erosion(mask)
gy,gx=np.gradient(np.array(mask))
inedge=(gx*quslab+gy*qvslab)*edge
inedgecoor=np.where(inedge>0)
inedgecoor=zip(inedgecoor[0],inedgecoor[1])
inedgecoor=list(set(inedgecoor).intersection(nodes))
outedgecoor=np.where(inedge<0)
outedgecoor=zip(outedgecoor[0],outedgecoor[1])
outedgecoor=list(set(outedgecoor).intersection(nodes))
n1=len(inedgecoor)
n2=len(outedgecoor)
# when mask is at edge of the map. Rarely happens.
if n1==0:
inedgecoor=nodes
n1=len(inedgecoor)
if n2==0:
outedgecoor=nodes
n2=len(outedgecoor)
dists=np.zeros((n1,n2))
def sumDists(path,attr,g):
'''Sum edge distances along a path'''
s=0
for ii in range(len(path)-1):
if NX_VERSION=='2':
sii=g[path[ii]][path[ii+1]][attr]
else:
sii=g.edge[path[ii]][path[ii+1]][attr]
# penalize sharp turns. Doesn't make big difference but notably
# slower
'''
if ii+2<len(path):
pii1=(lats[path[ii][0]], lons[path[ii][1]])
pii2=(lats[path[ii+1][0]], lons[path[ii+1][1]])
pii3=(lats[path[ii+2][0]], lons[path[ii+2][1]])
theta1=computeTheta(pii1,pii2)
theta2=computeTheta(pii2,pii3)
dtheta=theta1.dot(theta2)
dtheta=abs(dtheta)**1
#if ii==0:
#dtheta_old=1.
#dtheta=np.mean([dtheta,dtheta_old])
#sii=sii*dtheta
#dtheta_old=dtheta
'''
s+=sii
return s
#---------------Find "longest" path---------------
for ii in range(n1):
eii=inedgecoor[ii]
pathsii=nx.single_source_dijkstra_path(g,eii,weight='weight')
pathsii=dict([(kk,vv) for kk,vv in pathsii.items() if kk in outedgecoor])
if len(pathsii)>0:
distdict=dict([(kk, sumDists(vv,'ivt',g)) for kk,vv in pathsii.items()])
nodeii=sorted(distdict,key=distdict.get)[-1]
distii=distdict[nodeii]
dists[ii,outedgecoor.index(nodeii)]=distii
if np.max(dists)==0:
# this may happen when a mask is touching the map edges, and inedgecoor
# outedgecoor can't be linked by a path. Very rarely happen, but damn
# annoying. A fallback solution is to use an undirected graph linking
# the most inward and most outward pixels.
mostin=np.unravel_index(np.argmax(inedge), mask.shape)
mostout=np.unravel_index(np.argmin(inedge), mask.shape)
g_und=g.to_undirected()
try:
path=nx.dijkstra_path(g_und,mostin,mostout,weight='weight')
except:
# if it still can't find a path, make a full connected network
g_full=maskToGraph(mask, quslab, qvslab, np.ones(mask.shape),
np.ones(mask.shape), -np.inf)
path=nx.dijkstra_path(g_full,mostin,mostout,weight='weight')
else:
maxidx=np.argmax(dists)
yidx,xidx=np.unravel_index(maxidx,(n1,n2))
path=nx.dijkstra_path(g,inedgecoor[yidx],outedgecoor[xidx],weight='weight')
# get a mask for axis
axismask=np.zeros(mask.shape)
for (y,x) in path:
axismask[y,x]=1
path=np.array(path)
return path, axismask
def cropMask(mask, edge=4):
'''Cut out a bounding box around mask==1 areas
Args:
mask (ndarray): 2D binary map showing the location of an AR with 1s.
edge (int): number of pixels as edge at 4 sides.
Returns:
mask[y1:y2, x1:x2] (ndarray): a sub region cut from <mask> surrouding
regions with value=1.
(yy,xx): y-, x- indices of the box of the cut region. Can later by
used in applyCropIdx(new_slab, (yy,xx)) to crop out the same
region from a new array <new_slab>.
'''
yidx,xidx=np.where(mask==1)
if len(yidx)==0:
raise Exception("mask empty")
y1=np.min(yidx)
y2=np.max(yidx)
x1=np.min(xidx)
x2=np.max(xidx)
y1=max(0,y1-edge)
y2=min(mask.shape[0],y2+edge)
x1=max(0,x1-edge)
x2=min(mask.shape[1],x2+edge)
xx=np.arange(x1,x2)
yy=np.arange(y1,y2)
return mask[y1:y2,x1:x2], (yy,xx)
def applyCropIdx(slab, cropidx):
'''Cut out a bounding box from given 2d slab given corner indices
Args:
slab (ndarray): 2D array to cut a box from.
cropidx (tuple): (y, x) coordinate indices, output from cropMask().
Returns:
cropslab (ndarray): 2D sub array cut from <slab> using <cropidx> as
boundary indices.
'''
cropslab=np.array(slab)[np.ix_(*cropidx)]
try:
croplat=slab.getLatitude()[:][cropidx[0]]
croplon=slab.getLongitude()[:][cropidx[1]]
croplat=cdms.createAxis(croplat)
croplat.designateLatitude()
croplat.id='y'
croplat.units='degree'
croplat.name='latitude'
croplon=cdms.createAxis(croplon)
croplon.designateLongitude()
croplon.id='x'
croplon.units='degree'
croplon.name='longitude'
cropslab=MV.array(cropslab)
cropslab.setAxis(0,croplat)
cropslab.setAxis(1,croplon)
except:
pass
return cropslab
def insertCropSlab(shape, cropslab, cropidx, axislist=None):
'''Insert the cropped sub-array back to a larger empty slab
Args:
shape (tuple): (n, m) size of the larger slab.
cropslab (ndarray): 2D array to insert.
cropidx (tuple): (y, x) coordinate indices, output from cropMask(),
defines where <cropslab> will be inserted into.
Kwargs:
axislist (list or None): if list, a list of cdms.TransientAxis objs.
Returns:
result (ndarray): 2D slab with shape (n, m), an empty array with a
box at <cropidx> replaced with data from <cropslab>.
Optionally, axes information is added if <axistlist>
is not None, making it an TransientVariable.
'''
result=np.zeros(shape)
result[np.ix_(*cropidx)]=cropslab
if axislist is not None:
result=MV.array(result)
result.setAxisList(axislist)
return result
def getMaskEdge(mask):
'''Get the ordered boundary cell indices around non-zeros values in a
binary mask
Args:
mask (ndarray): 2D binary mask.
Returns:
edge (ndarray): Nx2 array storing (x, y) coordinate indices of the
grid cells in <mask> that form the boundary of
objects defined as non-zero values.
'''
edge=mask-morphology.binary_erosion(mask)
edge=np.where(edge>0)
edge=zip(edge[1],edge[0])
edge=np.array(edge)
edge=funcs.getLineFromPoints(edge)
return edge
def partPeaks(cropmask, cropidx, orislab, max_ph_ratio):
'''Separate local maxima by topographical prominence
Args:
cropmask (ndarray): 2D binary array, defines regions of local maxima.
cropidx (tuple): (y, x) coordinate indices, output from cropMask().
orislab (ndarray): 2D array, giving magnitude/height/intensity values
defining the topography.
max_ph_ratio (float): maximum peak/height ratio. Local peaks with
a peak/height ratio larger than this value is
treated as an independent peak.
Returns:
result (ndarray): 2D binary array, similar as the input <cropmask>
but with connected peaks (if any) separated so that
each connected region (with 1s) denotes an
independent local maximum.
'''
cropslab=applyCropIdx(orislab,cropidx)
if 0 in cropidx[0] or 0 in cropidx[1] or orislab.shape[0]-1 in\
cropidx[0] or orislab.shape[1]-1 in cropidx[1]:
include_edge=True
else:
include_edge=False
# compute prominences
peaks,peakid,peakpro,peakparents=pp2d.getProminence(cropslab*cropmask,
10.,include_edge=include_edge,centroid_num_to_center=1,verbose=False)
peakheights=(peakpro>0)*cropslab*cropmask
ratios=cropmask*peakpro/peakheights
# take maxima whose prominence/height ratio> max_ph_ratio
localpeaks=np.where(ratios>max_ph_ratio)
localpeaks=zip(localpeaks[0],localpeaks[1])
mask1=np.zeros(cropmask.shape) # modified mask
# residual mask, the union of complimentary masks. A complimentary mask
# is the sea level mask that separates a peak from its parent. Note that
# peaks' sea levels are not necessarily at the same height.
resmask=np.zeros(cropmask.shape)
def breakPeaks(yidx,xidx,localpeaks,col):
'''Separate the contour of a peak from its parent by iteratively
rising the sea level
'''
labels=morphology.label(cropslab*cropmask>col)
dropthis=False
while True:
#plabels=[labels[yjj,xjj] for yjj,xjj in localpeaks]
plabels=[labels[yjj,xjj] for yjj,xjj in localpeaks if labels[yjj, xjj]==labels[yidx, xidx]]
#if len(set(plabels))==len(localpeaks):
if len(plabels)==1:
break
col+=5.
if col>cropslab[yidx,xidx]:
dropthis=True
col-=5.
break
labels=morphology.label(cropslab*cropmask>col)
tmpmask=np.zeros(cropmask.shape)
if not dropthis:
tmpmask[yidx,xidx]=1
tmpmask=morphology.reconstruction(tmpmask,cropslab>col,'dilation')
return tmpmask,col
if len(localpeaks)==1:
mask1=cropmask
else:
# sort by prominence/height ratios
ratios=[ratios[int(yjj), int(xjj)] for yjj,xjj in localpeaks]
heights=[peakheights[int(yjj), int(xjj)] for yjj, xjj in localpeaks]
sortidx=np.argsort(ratios)
ratios.sort()
localpeaks=[localpeaks[idjj] for idjj in sortidx]
'''
localpeakids=[peakid[yjj,xjj] for yjj,xjj in localpeaks]
localpeakcols=[peaks[idjj]['col_level'] for idjj in localpeakids]
c_col=np.min(localpeakcols)-10
labs=morphology.label(cropslab*cropmask>c_col)
while True:
if c_col>np.max(cropslab*cropmask):
break
plabels=[labs[yjj,xjj] for yjj,xjj in localpeaks]
if len(set(plabels))==len(localpeaks):
break
c_col+=5.
labs=morphology.label(cropslab*cropmask>c_col)
mask1=morphology.reconstruction(localpeaks_map, cropslab>c_col, 'dilation')
'''
for yjj,xjj in localpeaks:
yjj=int(yjj)
xjj=int(xjj)
idjj=peakid[yjj,xjj]
coljj=peaks[idjj]['col_level']
if peaks[idjj]['parent']==0 and peakheights[yjj,xjj]==np.max(heights):
# the heighest peak
tmpmask=np.zeros(cropslab.shape)
tmpmask[yjj,xjj]=1
tmpmask=morphology.reconstruction(tmpmask,
(cropmask-resmask)>0,'dilation')
mask1=mask1+tmpmask
else:
# separate local peaks
tmpmask,coljj2=breakPeaks(yjj,xjj,localpeaks,coljj)
# if childrens overlap, may not need this anymore
if (tmpmask+mask1).max()>1:
tmpmask2=np.zeros(cropmask.shape)
tmpmask2[yjj,xjj]=1
tmpmask=morphology.reconstruction(tmpmask2,tmpmask-tmpmask*resmask,'dilation')
mask1=mask1+tmpmask
resmask=np.where((resmask==1) | (cropslab<coljj2),1,0)
result=insertCropSlab(orislab.shape,mask1,cropidx)
return result
def getARData(slab, quslab, qvslab, anoslab, quano, qvano, areas,
mask_list, axis_list, timestr, param_dict, shift_lon, isplot,
outputdir):
'''Fetch AR related data
Args:
slab (cdms.TransientVariable): (n * m) 2D array of IVT, in kg/m/s.
quslab (cdms.TransientVariable): (n * m) 2D array of u-flux, in kg/m/s.
qvslab (cdms.TransientVariable): (n * m) 2D array of v-flux, in kg/m/s.
anoslab (cdms.TransientVariable): (n * m) 2D array of IVT anomalies,
in kg/m/s.
quano (cdms.TransientVariable): (n * m) 2D array of u-flux anomalies,
in kg/m/s.
qvano (cdms.TransientVariable): (n * m) 2D array of v-flux anomalies,
in kg/m/s.
areas (cdms.TransientVariable): (n * m) 2D grid cell area slab, in km^2.
mask_list (list): list of 2D binary masks, each with the same shape as
<anoslab> etc., and with 1s denoting the location of a
found AR.
axis_list (list): list of AR axis coordinates. Each coordinate is defined
as a Nx2 ndarray storing (y, x) indices of the axis
(indices defined in the matrix of corresponding mask
in <masks>.)
timestr (str): string of time snap.
param_dict (dict): parameter dict defined in Global preamble.
shift_lon (float): starting longitude of data domain, defined in Global
preamble.
isplot (bool): if True, create plot of AR axis, flux orientation and
cross-sectional flux for each AR.
outputdir (str): folder to save plots. If None, don't save. If <isplot>
is False, not relevant.
Returns:
labels (cdms.TransientVariable): (n * m) 2D int map showing all ARs
at current time. Each AR is labeled by
an int label, starting from 1. Background
is filled with 0s.
angles (cdms.TransientVariable): (n * m) 2D map showing orientation
differences between AR axes and fluxes,
for all ARs. In degrees.
crossfluxes (cdms.TransientVariable): (n * m) 2D map showing cross-
sectional fluxes in all ARs.
In kg/m/s.
anocrossflux (cdms.TransientVariable): similar as <crossfluxes> but for
anomalous fluxes (corresponding
to <anoslab>).
df (pandas.DataFrame): AR record table. Each row is an AR, see code
below for columns.
'''
max_isoq=param_dict['max_isoq']
min_length=param_dict['min_length']
min_length_hard=param_dict['min_length_hard']
rdp_thres=param_dict['rdp_thres']
min_area=param_dict['min_area']
lonax=slab.getLongitude() # NOTE: max > 360
latax=slab.getLatitude()
# prepare outputs
labels=MV.zeros(slab.shape)
angles=MV.zeros(slab.shape)
crossfluxes=MV.zeros(slab.shape)
results={}
#-----------------Loop through ARs-----------------
for ii in range(len(mask_list)):
maskii=mask_list[ii]
# region properties, in pixel units
rpii=measure.regionprops(maskii, intensity_image=np.array(slab))[0]
# get centroid
centroidy,centroidx=rpii.weighted_centroid
centroidy=latax[int(centroidy)]
centroidx=lonax[int(centroidx)]
# get axis coordinate array
skelii=axis_list[ii]
latsii=latax[skelii[:,0]]
lonsii=lonax[skelii[:,1]]
axisii=np.c_[latsii,lonsii]
# segment axis using rdp
axis_rdpii=np.array(rdp.rdpGC(axisii.tolist(),rdp_thres)) # lat,lon
# area
areaii=(maskii*areas).sum() # km^2
# compute length
lens=funcs.greatCircle(axis_rdpii[:-1,0], axis_rdpii[:-1,1],
axis_rdpii[1:,0], axis_rdpii[1:,1])/1e3
lenii=lens.sum() #km
# skip if too small and too short
if areaii<min_area or lenii<min_length_hard:
continue
# mean width
widthii=areaii/lenii # km
# mask contour
contii=funcs.getBinContour(maskii,lonax,latax)
# isoperimetric quotient
isoquoii=4*np.pi*rpii.area/rpii.perimeter**2
# length/width ratio
ratioii=lenii/widthii
# mean strength
slabii=MV.masked_where(maskii==0,slab)
strengthii=cdutil.averager(slabii,axis='xy',
weights=['generate','generate'])
# strength std
strengthstdii=float(stats.std(slabii,axis='xy'))
# anomaly strength
anoslabii=MV.masked_where(maskii==0,anoslab)
anostrengthii=cdutil.averager(anoslabii,axis='xy',
weights=['generate','generate'])
# max strength
max_strengthii=float(MV.max(slabii))
# compute angles and cross-section flux of total flux
cropmask,cropidx=cropMask(maskii)
cropskelii=skelii-np.array([cropidx[0].min(), cropidx[1].min()])
cropu=applyCropIdx(quslab,cropidx)
cropv=applyCropIdx(qvslab,cropidx)
anglesii,anglesmeanii,crossfluxii,seg_thetasii=crossSectionFlux(
cropmask, cropu, cropv, axis_rdpii)
# create plots
if isplot:
pass
#plotARCrosssectionFlux(cropmask, cropu, cropv, cropskelii, axis_rdpii,
#'%s AR-%d' %(timestr, ii+1), shift_lon, anglesii, anglesmeanii,
#crossfluxii, seg_thetasii, outputdir)
# insert crop back to the big map
anglesii=insertCropSlab(maskii.shape,anglesii,cropidx,
slab.getAxisList())
anglesii=MV.where(maskii==1,anglesii,0)
crossfluxii=insertCropSlab(maskii.shape,crossfluxii,cropidx,
slab.getAxisList())
crossfluxii=MV.where(maskii==1,crossfluxii,0)
# mean meridional flux
cropv=applyCropIdx(qvslab,cropidx)
cropv=MV.masked_where(cropmask==0,cropv)
qvmeanii=cdutil.averager(cropv,axis='xy',weights=['generate',\
'generate'])
# is candidate a strict AR
is_relaxedii=False
if isoquoii>max_isoq or ratioii<2:
is_relaxedii=True
if lenii<min_length:
is_relaxedii=True
if qvmeanii<=0:
is_relaxedii=True
labels=labels+maskii*(ii+1)
angles=angles+anglesii
crossfluxes=crossfluxes+crossfluxii
results[ii+1]={
'id': ii+1,
'time':timestr,
'contour_y': contii.vertices[:,1],
'contour_x': contii.vertices[:,0],
'centroid_y': centroidy,
'centroid_x': centroidx,
'axis_y':axisii[:,0],
'axis_x':axisii[:,1],
'axis_rdp_y':axis_rdpii[:,0],
'axis_rdp_x':axis_rdpii[:,1],
'area': areaii,
'length': lenii,
'width': widthii,
'iso_quotient':isoquoii,
'LW_ratio':ratioii,
'strength':strengthii,
'strength_ano':anostrengthii,
'strength_std':strengthstdii,
'max_strength':max_strengthii,
'mean_angle': float(anglesmeanii),
'is_relaxed':is_relaxedii,
'qv_mean':qvmeanii
}
labels.setAxisList(slab.getAxisList())
angles.setAxisList(slab.getAxisList())
crossfluxes.setAxisList(slab.getAxisList())
labels.id='labels'
labels.long_name='AR labels'
labels.standard_name=labels.long_name
labels.title=labels.long_name
labels.units=''
angles.id='angles'
angles.long_name='AR moisture flux orientation difference'
angles.standard_name=angles.long_name
angles.title=angles.long_name
angles.units='degree'
crossfluxes.id='ivt_cross'
crossfluxes.long_name='AR total cross sectional moisture flux'
crossfluxes.standard_name=crossfluxes.long_name
crossfluxes.title=crossfluxes.long_name
crossfluxes.units=getattr(slab, 'units', '')
keys=['id', 'time', 'contour_y', 'contour_x', 'centroid_y', 'centroid_x',
'axis_y', 'axis_x', 'axis_rdp_y', 'axis_rdp_x',
'area', 'length', 'width', 'iso_quotient', 'LW_ratio',
'strength', 'strength_ano', 'strength_std', 'max_strength',
'mean_angle', 'is_relaxed', 'qv_mean']
df=pd.DataFrame(results).T
if len(df)>0:
df=df[keys]
return labels,angles,crossfluxes,df
def uvDecomp(u0, v0, i1, i2):
'''Decompose background-transient components of u-, v- fluxes
Args:
u0 (cdms.TransientVariable): nd array of total u-flux.
v0 (cdms.TransientVariable): nd array of total v-flux.
i1 (cdms.TransientVariable): nd array of the reconstruction component
of IVT.
i2 (cdms.TransientVariable): nd array of the anomalous component
of IVT (i2 = IVT - i1).
Returns:
u1 (cdms.TransientVariable): nd array of the u-flux component
corresponding to <i1>, i.e. the background
component.
v1 (cdms.TransientVariable): nd array of the v-flux component
corresponding to <i1>, i.e. the background
component.
u2 (cdms.TransientVariable): nd array of the u-flux component
corresponding to <i2>, i.e. the transient
component.
v2 (cdms.TransientVariable): nd array of the v-flux component
corresponding to <i2>, i.e. the transient
component.
'''
i0=i1+i2
v1=v0*i1/i0
v2=v0*i2/i0
u1=u0*i1/i0
u2=u0*i2/i0
return u1,u2,v1,v2
def save2DF(result_dict):
'''Save AR records to a pandas DataFrame
Args:
result_dict (dict): key: time str in 'yyyy-mm-dd hh:00'
value: pandas dataframe. See getARData().
Returns:
result_df (pandas.DataFrame): AR record table containing records
from multiple time steps sorted by
time.
'''
for ii,kk in enumerate(result_dict.keys()):
vv=result_dict[kk]
if ii==0:
result_df=vv
else:
result_df=pd.concat([result_df,vv],axis=0,ignore_index=True)
result_df['time']=pd.to_datetime(result_df.time)
result_df=result_df.sort_values(by='time')
return result_df
def plotAR(ardf, ax, bmap):
'''Helper function to plot the regions and axes of ARs
Args:
ardf (pandas.DataFrame): table containing AR records.
ax (matplotlib axis): axis to plot onto.
bmap (Basemap obj): defining the geo map.
'''
for ii in range(len(ardf)):
vv=ardf.iloc[ii]
isrelaxkk=vv['is_relaxed']
# plot contour
px=vv['contour_x']
py=vv['contour_y']
px,py=bmap(px,py)
linewidth=1 if isrelaxkk else 1
linestyle=':' if isrelaxkk else '-'
ax.plot(px,py,color='k',linestyle=linestyle,linewidth=linewidth)
# plot axis
px=vv['axis_x']
py=vv['axis_y']
px,py=bmap(px,py)
ax.plot(px,py,'g:',linewidth=1.5)
# plot cross flux text
'''
lenkk=vv['length']
areakk=vv['area']
widthkk=vv['width']
cx=float(vv['centroid_x'])%360
cy=float(vv['centroid_y'])
cx,cy=bmap(cx,cy)
strkk=r'ID=%d, $R=%.0f$' %(ii+1,np.sqrt(areakk/3.14)) + '\n'+\
r'$L = %d km$' %lenkk +'\n'+\
r'$W = %d km$' %widthkk
ax.annotate(strkk,xy=(cx,cy),
horizontalalignment='center',
verticalalignment='center',
fontsize=8,
bbox=dict(facecolor='white',alpha=0.5))
'''
return
def getNormalVectors(point_list, idx):
'''Get the normal vector and the tagent vector to the plane dividing
2 sections along the AR axis.
Args:
point_list (list): list of (lat, lon) coordinates.
idx (int): index of the point in <point_list>, denoting the point
in question.
Returns:
normi (tuple): the (x, y, z) Cartesian coordinate of the unit
normal vector, at the point denoted by <idx>,
on the Earth surface. This is the normal vector to
the plane spanned by the vector Theta and P.
Where P is the vector pointing to the point in
question (point_list[idx]), and Theta is the
tangent vector evenly dividing the angle formed by
<P,P1>, and <P,P2>. Where P1, P2 are 2 points on
both side of P.
thetai (tuple): the (x, y, z) Cartesian coordinate of the tangent
vector Theta above.
'''
pi=point_list[idx]
pic=spherical2Cart(*pi)
theta1=computeTheta(pi,point_list[idx-1])
theta2=computeTheta(pi,point_list[idx+1])
thetai=(theta1+theta2)/2.
normi=np.cross(pic,thetai)
normi=normi/np.linalg.norm(normi)
thetai=thetai/np.linalg.norm(thetai)
return normi,thetai
def crossSectionFlux(mask, quslab, qvslab, axis_rdp):
'''Compute setion-wise orientation differences and cross-section fluxes
in an AR
Args:
mask (ndarray): CROPPED (see cropMask and applyCropIdx) 2D binary map
showing the location of an AR with 1s.
quslab (cdms.TransientVariable): CROPPED (n * m) 2D array of u-flux,
in kg/m/s.
qvslab (cdms.TransientVariable): CROPPED (n * m) 2D array of v-flux,
in kg/m/s.
axis_rdp (ndarray): Nx2 array storing the (lat, lon) coordinates of
rdp-simplified AR axis.
Returns:
angles (TransientVariable): 2D map with the same shape as <mask>,
showing section-wise orientation
differences between horizontal flux (as
in <quslab>, <qvslab>) and the AR axis of
that section. In degrees. Regions outside
of AR (0s in <mask>) are masked.
anglesmean (float): area-weighted averaged of <angles> inside <mask>.
crossflux (TransientVariable): 2D map with the same shape as <mask>,
the section-wise cross-section fluxes
in the AR, defined as the projection
of fluxes onto the AR axis, i.e. flux
multiplied by the cos of <angles>.
seg_thetas (list): list of (x, y, z) Cartesian coordinates of the
tangent vectors along section boundaries.
'''
# get coordinates
axislist=quslab.getAxisList()
lats=quslab.getLatitude()[:]
lons=quslab.getLongitude()[:]
lonss,latss=np.meshgrid(lons,lats)
# convert to cartesian coordinates
carts=spherical2Cart(latss,lonss)
vs=wind2Cart(quslab,qvslab,latss,lonss)
vsnorm=np.linalg.norm(vs,axis=0)
vsnorm=vs/vsnorm[None,:,:]
# loop through segments to get orientation differences
nsegs=len(axis_rdp)-1
seg_thetas=[]
angles=np.zeros(mask.shape)
for ii in range(nsegs):
pic=spherical2Cart(*axis_rdp[ii])
pi1c=spherical2Cart(*axis_rdp[ii+1])
if ii==0:
setL=1.
thetai=0 # dummy place holder
else:
# get evenly dividing angle theta and normal vector to theta
normi,thetai=getNormalVectors(axis_rdp, ii)
# dot products between normal vector and grid coordinates
dotsi=(normi[:,None,None]*carts).sum(axis=0)
setL=np.where(dotsi*(normi.dot(pi1c))>=0,1,0)
if ii==nsegs-1:
setR=1.
thetai=0 # dummy place holder
else:
normi1,thetai=getNormalVectors(axis_rdp, ii+1)
dotsi1=(normi1[:,None,None]*carts).sum(axis=0)
setR=np.where(dotsi1*(normi1.dot(pic))>0,1,0)
segii=setL*setR*mask
seg_thetas.append(thetai)
# sel the correct region if shape too curvy
segregii=measure.label(segii)
if segregii.max()>1:
piidx=[funcs.findIndex(axis_rdp[ii][0],lats),\
funcs.findIndex(axis_rdp[ii][1],lons)]
for jj in range(segregii.max()):
segjj=np.where(segregii==jj+1,1,0)
if segjj[piidx[0],piidx[1]]==1:
segii=segjj
break
# mean orientation of AR axis segment
meanori=np.cross(pic,pi1c)
meanori=meanori/np.linalg.norm(meanori)
# orientation of flux vectors
'''
fluxori=np.cross(carts,vs,axisa=0,axisb=0) # output: ny,nx,3
norms=np.linalg.norm(fluxori,axis=2)
fluxori=fluxori/(1e-6+norms[:,:,None])
# get angles as arccos of the dot product of meanori and fluxori
anglesii=(meanori[None,None,:]*fluxori).sum(axis=-1)
anglesii=anglesii*segii
'''
# get sin(angle of flux vector and axis segment plane)
# sign of sin() is: >0: flux vector aligns with meanori, and it
# is pointing towards the cold side (according to thermal wind).
# sin() <0: flux vector aligns against meanori, and it is pointing
# towards the warm side.
anglesii=(meanori[:,None,None]*vsnorm).sum(axis=0)
#anglesii=np.sqrt(1-cos_alphaii**2)*np.where(cos_alphaii<0,-1,1)
anglesii=anglesii*segii
angles=angles+anglesii
# compute cross section flux
angles=np.array(angles)
cos_angles=np.sqrt(1-angles**2)
crossflux_c=cos_angles[None,:,:]*vs
# convert to local tangent winds
crossflux_u,crossflux_v=cart2Wind(crossflux_c,latss,lonss)
crossflux=np.sqrt(crossflux_u**2+crossflux_v**2)
crossflux=MV.masked_where(mask==0,crossflux)
crossflux.setAxisList(axislist)
# convert cos to angle in degrees
angles=np.arcsin(angles)/np.pi*180
angles=MV.masked_where(mask==0,angles)
angles.setAxisList(axislist)
anglesmean=cdutil.averager(angles,axis='xy',weights=['generate','generate'])
return angles,anglesmean,crossflux,seg_thetas
| Clynie/AR_tracker | river_tracker1_funcs.py | river_tracker1_funcs.py | py | 40,512 | python | en | code | null | github-code | 1 | [
{
"api_name": "networkx.__version__",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "... |
41489295874 | # -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2021-01-17 19:53
import os
import tempfile
from typing import Iterable
import torch
from elit.utils.io_util import merge_files
from elit.utils.time_util import CountdownTimer
class FileCache(object):
def __init__(self, filename=None, delete=True) -> None:
self.delete = delete
if not filename:
filename = tempfile.NamedTemporaryFile(prefix='elit-cache-', suffix='.pkl', delete=delete).name
self._filename = filename
def close(self):
if self.delete:
if os.path.isfile(self._filename):
os.remove(self._filename)
def __del__(self):
self.close()
class RandomAccessFileCache(FileCache):
def __init__(self, filename=None) -> None:
super().__init__(filename)
self.fp = open(filename, 'wb+')
self.offsets = dict()
def __setitem__(self, key, value):
# Always write to the end of file
self.fp.seek(0, 2)
start = self.fp.tell()
torch.save(value, self.fp, _use_new_zipfile_serialization=False)
self.offsets[key] = start
def __getitem__(self, key):
offset = self.offsets.get(key, None)
assert offset is not None, f'{key} does not exist in the cache'
self.fp.seek(offset)
return torch.load(self.fp)
def __contains__(self, key):
return key in self.offsets
def close(self):
if self.fp:
self.fp.close()
self.fp = None
super().close()
def __len__(self):
return len(self.offsets)
class SequentialFileCache(FileCache):
def __init__(self, iterator: Iterable = None, size=None, filename=None, delete=True, device=None) -> None:
super().__init__(filename, delete)
if isinstance(device, int):
device = torch.device(f'cuda:{device}' if device >= 0 else torch.device('cpu'))
self.device = device
self.size = size
# If the cache is already there then load the size and return
if not delete and filename and os.path.isfile(filename):
if not size:
with open(self._filename, "rb") as f:
self.size = torch.load(f)
return
os.makedirs(os.path.dirname(self._filename), exist_ok=True)
# Otherwise generate the cache
timer = CountdownTimer(size) if size else None
with open(self._filename, "wb") as f:
if size:
torch.save(size, f, _use_new_zipfile_serialization=False)
for i, batch in enumerate(iterator):
torch.save(batch, f, _use_new_zipfile_serialization=False)
if timer:
timer.log(f'Caching {self._filename} [blink][yellow]...[/yellow][/blink]', erase=True)
self.size = i + 1
if not size:
_content = self._filename + '.content'
os.rename(self._filename, _content)
_index = self._filename + '.index'
with open(_index, "wb") as f:
torch.save(self.size, f, _use_new_zipfile_serialization=False)
merge_files([_index, _content], self._filename)
os.remove(_content)
os.remove(_index)
def __iter__(self):
with open(self._filename, "rb") as f:
self.size = torch.load(f)
for i in range(self.size):
batch = torch.load(f, map_location=self.device)
yield batch
def __len__(self):
return self.size
| emorynlp/seq2seq-corenlp | elit/common/cache.py | cache.py | py | 3,538 | python | en | code | 13 | github-code | 1 | [
{
"api_name": "tempfile.NamedTemporaryFile",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "os.remove",
... |
18151751727 | import torch
from src.test_statistics import *
from src.utils import get_W_matrix,KMM_weights_for_W_matrix
from sklearn.preprocessing import KBinsDiscretizer
from sklearn.base import clone
import numpy as np
import random
import math
def get_binned_weights(weights, n_bins):
binner = KBinsDiscretizer(n_bins=n_bins,encode = "ordinal")
binner.fit(weights.reshape(-1,1))
binned_weights = binner.transform(weights.reshape(-1,1))
return binned_weights
def binned_permutation(binned_weights):
perm = np.array((range(len(binned_weights))))
for i in range(int(binned_weights.max().item())):
bin_i = ((binned_weights == i).squeeze(1))
perm[ bin_i ] = np.random.choice(perm[ bin_i ], len(perm[ bin_i ]),replace = False)
return perm
def invert_permutation(permutation):
inv = np.empty_like(permutation)
inv[permutation] = np.arange(len(inv), dtype=inv.dtype)
return inv
def weights_tol(weights,weights_minmax):
weights[weights < weights_minmax] = weights_minmax
weights[weights > 1-weights_minmax] = 1-weights_minmax
return weights
def kernel_permutation_test(data_train,data_test,X_ker,Y_ker,weights_model,test_stat="DATE",n_bins=10,n_permutations=200,num_train_permutations=1,reg=[1,1],permute_weights=False,func="cme", KMM_weights = False,weights_minmax=10**(-5)):
weights_model_train = clone(weights_model)
weights_model_test = clone(weights_model)
weights_model_train.fit(data_train.X,data_train.T)
weights_model_test.fit(data_test.X,data_test.T)
weights_train = weights_tol(torch.tensor(weights_model_test.predict_proba(data_train.X)[:,1]).float(),weights_minmax)
weights_test = weights_tol(torch.tensor(weights_model_train.predict_proba(data_test.X)[:,1]).float(),weights_minmax)
data_full = data_test.join(data_train)
weights = torch.concat([weights_train,weights_test])
shuffle = torch.randperm(len(data_full.T))
weights = weights[shuffle]
data_full = data_full.return_shuffled_data(shuffle)
data_train,data_test = data_full.split()
weights_train_perm,weights_test_perm = weights[:math.floor(len(weights)/2)],weights[math.floor(len(weights)/2):]
weights_model_train.fit(data_train.X,data_train.T)
weights_test = weights_tol(torch.tensor(weights_model_train.predict_proba(data_test.X)[:,1]).float(),weights_minmax)
if test_stat == "DATE":
W0_weights = 1/KMM_weights_for_W_matrix(X_ker,data_train.X0,data_train.X,KMM_weights)
W1_weights = 1/KMM_weights_for_W_matrix(X_ker,data_train.X1,data_train.X,KMM_weights)
W0 = get_W_matrix(X_ker(data_train.X0).evaluate(),reg[0],func,weights=W0_weights)
W1 = get_W_matrix(X_ker(data_train.X1).evaluate(),reg[1],func,weights=W1_weights)
base_stat = DATE_test_stat(data_train,data_test,X_ker,Y_ker,weights_test,W0,W1)
elif test_stat == "DETT":
W1_weights = 1/KMM_weights_for_W_matrix(X_ker,data_train.X1,data_train.X0,KMM_weights)
W1 = get_W_matrix(X_ker(data_train.X1).evaluate(),reg[1],func,weights=W1_weights)
base_stat = DETT_test_stat(data_train,data_test,X_ker,Y_ker,weights_test,W1)
elif test_stat == "Diff":
base_stat = diff_test_stat(data_train,data_test,X_ker,Y_ker)
binned_weights_train = get_binned_weights(weights_train_perm, n_bins)
binned_weights_test = get_binned_weights(weights_test_perm, n_bins)
if test_stat == "DATE":
permuted_model_list = [(data_train,weights_model_train,W0,W1)]
elif test_stat == "DETT":
permuted_model_list = [(data_train,weights_model_train,W1)]
for i in range(num_train_permutations):
train_permutation = binned_permutation(binned_weights_train)
permuted_train_data = data_train.return_permuted_data(train_permutation)
if test_stat == "DATE":
W0_weights_perm = 1/KMM_weights_for_W_matrix(X_ker,permuted_train_data.X0,permuted_train_data.X,KMM_weights)
W1_weights_perm = 1/KMM_weights_for_W_matrix(X_ker,permuted_train_data.X1,permuted_train_data.X,KMM_weights)
W0_permuted = get_W_matrix(X_ker(permuted_train_data.X0).evaluate(),reg[0],func,W0_weights_perm)
W1_permuted = get_W_matrix(X_ker(permuted_train_data.X1).evaluate(),reg[1],func,W1_weights_perm)
permuted_weights_model = clone(weights_model)
permuted_weights_model.fit(permuted_train_data.X,permuted_train_data.T)
permuted_model_list.append((permuted_train_data,permuted_weights_model,W0_permuted,W1_permuted))
elif test_stat == "DETT":
W1_weights_perm = 1/KMM_weights_for_W_matrix(X_ker,permuted_train_data.X1,permuted_train_data.X,KMM_weights)
W1_permuted = get_W_matrix(X_ker(permuted_train_data.X1).evaluate(),reg[1],func,W1_weights_perm)
permuted_weights_model = clone(weights_model)
permuted_weights_model.fit(permuted_train_data.X,permuted_train_data.T)
permuted_model_list.append((permuted_train_data,permuted_weights_model,W1_permuted))
permuted_stats = [base_stat]
for i in range(n_permutations):
test_permutation = binned_permutation(binned_weights_test)
permuted_test_data = data_test.return_permuted_data(test_permutation)
sample_index = random.randrange(len(permuted_model_list)-1)
permuted_models = permuted_model_list[sample_index]
if test_stat == "DATE":
test_stat_weight_permuted = weights_tol(torch.tensor(permuted_models[1].predict_proba(permuted_test_data.X)[:,1]).float(),weights_minmax)
permuted_stats.append(DATE_test_stat(permuted_models[0],permuted_test_data,X_ker,Y_ker,test_stat_weight_permuted,permuted_models[2],permuted_models[3]))
elif test_stat == "DETT":
test_stat_weight_permuted = weights_tol(torch.tensor(permuted_models[1].predict_proba(permuted_test_data.X)[:,1]).float(),weights_minmax)
permuted_stats.append(DETT_test_stat(permuted_models[0],permuted_test_data,X_ker,Y_ker,test_stat_weight_permuted,permuted_models[2]))
elif test_stat == "Diff":
train_permutation = binned_permutation(binned_weights_train)
permuted_train_data_diff = data_test.return_permuted_data(train_permutation)
permuted_stats.append(diff_test_stat(permuted_train_data_diff,permuted_test_data,X_ker,Y_ker))
p_val = np.mean(np.array(permuted_stats) >= base_stat)
return {"p_val": p_val,"stat": base_stat ,"permuted_stats": permuted_stats}
def goodness_of_fit_test(fit_samples,data_train,data_test,X_ker,Y_ker,weights_model,t=1,test_stat="DATE",reg=1,func="cme", KMM_weights = False):
weights = torch.tensor(weights_model.predict_proba(data_test.X)[:,1]).float()
if test_stat == "DATE":
if t==1:
W1_weights = 1/KMM_weights_for_W_matrix(X_ker,data_train.X1,data_train.X,KMM_weights)
W1 = get_W_matrix(X_ker(data_train.X1).evaluate(),reg[1],func,weights=W1_weights)
fit_stat = DATE_goodness_of_fit(fit_samples,data_train,data_test,X_ker,Y_ker,weights,W1,t=1)
else:
W0_weights = 1/KMM_weights_for_W_matrix(X_ker,data_train.X0,data_train.X,KMM_weights)
W0 = get_W_matrix(X_ker(data_train.X0).evaluate(),reg[0],func,weights=W0_weights)
fit_stat = DATE_goodness_of_fit(fit_samples,data_train,data_test,X_ker,Y_ker,1-weights,W0,t=0)
elif test_stat == "DETT":
if t==1:
W1_weights = 1/KMM_weights_for_W_matrix(X_ker,data_train.X1,data_train.X,KMM_weights)
W1 = get_W_matrix(X_ker(data_train.X1).evaluate(),reg[1],func,weights=W1_weights)
fit_stat = DETT_goodness_of_fit(fit_samples,data_train,data_test,X_ker,Y_ker,weights,W1,t=1)
else:
W0_weights = 1/KMM_weights_for_W_matrix(X_ker,data_train.X0,data_train.X,KMM_weights)
W0 = get_W_matrix(X_ker(data_train.X0).evaluate(),reg[0],func,weights=W0_weights)
fit_stat = DETT_goodness_of_fit(fit_samples,data_train,data_test,X_ker,Y_ker,1-weights,W0,t=0)
return fit_stat
| Jakefawkes/DR_distributional_test | src/test.py | test.py | py | 8,221 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sklearn.preprocessing.KBinsDiscretizer",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.random.choice",
"line_number": 20,
"usage_type": "call"
},
{
"api_name":... |
38354447792 | '''module for building state machines to search through TokenSets'''
import re
from functools import reduce
from typing import List
import networkx as nx
from networkx.drawing.nx_agraph import to_agraph
import auto_types as T
import random
class Spec:
def __init__(self, name: str):
self.name = name
self.requirements = set()
def add_req(self, req):
self.requirements.add(req)
def match(self, tokens: T.TokenSetFast) -> T.TokenSetFast:
return reduce(T.TokenSetFast.intersect, [tokens.filter(lambda x: f(x, tokens)) for f in self.requirements]).map(lambda x: x.update_type(self.name))
class Ruleset:
def __init__(self, name):
self.graph = nx.MultiDiGraph()
self.name = name
def add_instance(self, _id, _type) -> int:
# _id = len(self.graph.nodes)
self.graph.add_node(_id, type=_type)
return _id
def instance_by_id(self, id: str):
return self.graph.nodes[id]
def add_relation(self, tail: int, head: int):
self.graph.add_edge(tail, head)
def draw(self, filename='rules.png', label_key='type') -> None:
G = self.graph.copy()
labels = {}
for node in G.nodes:
labels[node] = {'label': G.nodes[node][label_key]}
nx.set_node_attributes(G, labels)
A = to_agraph(G)
A.layout('dot')
A.draw(filename)
def start_types(self) -> List[str]:
results = []
for n in self.graph.nodes:
if self.graph.in_degree(n) == 0:
results.append(n)
return results
def match(self, tokens: T.TokenSetFast, instance_id: int = None) -> T.TokenSetFast:
'''Returns the first subset of a Tokenset satisifies the ruleset.'''
if instance_id is None:
instance_id = self.start_types()[0]
start_index = tokens.min_index
first_tokens = tokens.starts_at(start_index)
target_type = self.instance_by_id(instance_id)['type']
accepted_tokens = first_tokens.filter(lambda x: x.type == target_type)
if len(accepted_tokens) == 0:
return T.TokenSetFast([])
next_node_ids = list(self.graph.neighbors(instance_id))
if len(next_node_ids) == 0:
return tokens.subset(T.Range(start_index, accepted_tokens.max_index))
# results: List[T.TokenSet] = []
for node_id in next_node_ids:
for accepted_tkn in accepted_tokens.content:
next_range = T.Range(accepted_tkn.match.end, tokens.max_index)
match = self.match(tokens.subset(next_range), node_id)
if len(match) > 0:
return tokens.subset(T.Range(start_index, match.max_index))
# if len(results) > 0:
# min_result_index = min([x.max_index for x in results])
# return tokens.subset(T.Range(start_index, min_result_index))
return T.TokenSetFast([])
def find(self, tokens: T.TokenSetFast, count: int = 100, start: str='cw') -> List[T.TokenSetFast]:
start_types = self.start_types()
window_size = 300 # in number of characters
i = 2768
num_found = 0
results = set()
while num_found < count and i < tokens.max_index:
x = tokens.subset(T.Range(i, i + window_size))
m = self.match(x, start)
if len(m) == 0:
i = tokens.next_at(i + 1).min_index
else:
results.add(T.Token(self.name, m.compile_to_str(),
T.Range(m.min_index, m.max_index)))
num_found += 1
i = tokens.next_at(m.max_index).min_index
return T.TokenSetFast(results)
| maxsun/SemanticScribe | AutoLang/automata.py | automata.py | py | 3,723 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "auto_types.TokenSetFast",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "functools.reduce",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "auto_types.TokenSetFast",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_na... |
14437464114 | from django.shortcuts import render,redirect
from .forms import UserCreateForm,SignUpForm
from django.contrib.auth.forms import UserCreationForm,AuthenticationForm
from django.contrib.auth import login,logout
# Create your views here.
def signup_view(request):
#get ์์ฒญ ์ HTML ์๋ต
if request.method=='GET':
form=SignUpForm
context={'form':form}
return render(request,'accounts/signup.html',context)
else:
#post ์์ฒญ ์ ๋ฐ์ดํฐ ํ์ธ ํ ํ์ ์์ฑ
form=SignUpForm(request.POST)
if form.is_valid():
# #ํ์๊ฐ์
์ฒ๋ฆฌ
# form.cleaned_data['username']
# form.cleaned_data['email']
# form.cleaned_data['password2']
instance=form.save()
return redirect('index')
else:
return redirect('accounts:signup')
def login_view(request):
#get.post divide
if request.method=='GET':
return render(request,'accounts/login.html',{'form':AuthenticationForm()})
else:
#๋ฐ์ดํฐ ์ ํจ์ฑ ๊ฒ์ฌ
form=AuthenticationForm(request,data=request.POST)
if form.is_valid():
# ๋น์ฆ๋์ค ๋ก์ง ์ฒ๋ฆฌ
login(request,form.user_cache)
#์๋ต
return redirect('index')
pass
else:
return redirect(request,'accounts/login.html',{'form':form})
#data velidation
#take business logic
#response
def logout_view(request):
if request.user.is_authenticated:
logout(request)
return redirect('index') | Heeville/likelion_backend_lecture | liongram/accounts/views.py | views.py | py | 1,642 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "forms.SignUpForm",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "forms.SignUpForm",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "django.shor... |
74913549472 | """"
Wrapper class for the FPDF.
Enables use of HTML.
@author Chase Fleming
4/23/17
"""
from fpdf import FPDF
from operator import itemgetter
title = "Participant Schedule"
def converter(interval):
times = {110.0: "10:00 am", 110.5: "10:30 am", 111.0: "11:00 am", 111.5: "11:30 am", 112.0: "12:00 pm",
112.5: "12:30 pm", 113.0: "1:00 pm", 113.5: "1:30 pm", 114.0: "2:00 pm", 114.5: "2:30 pm",
115.0: "3:00 pm", 115.5: "3:30 pm", 116.0: "4:00 pm", 116.5: "4:30 pm", 117.0: "5:00 pm",
117.5: "5:30 pm", 118.0: "6:00 pm", 118.5: "6:30 pm", 119.0: "7:00 pm", 119.5: "7:30 pm",
120.0: "8:00 pm", 120.5: "8:30 pm", 121.0: "9:00 pm", 121.5: "9:30 pm", 122.0: "10:00 pm",
210.0: "10:00 am", 210.5: "10:30 am", 211.0: "11:00 am", 211.5: "11:30 am", 212.0: "12:00 pm",
212.5: "12:30 pm", 213.0: "1:00 pm", 213.5: "1:30 pm", 214.0: "2:00 pm", 214.5: "2:30 pm",
215.0: "3:00 pm", 215.5: "3:30 pm", 216.0: "4:00 pm", 216.5: "4:30 pm", 217.0: "5:00 pm",
217.5: "5:30 pm", 218.0: "6:00 pm", 218.5: "6:30 pm", 219.0: "7:00 pm", 219.5: "7:30 pm",
220.0: "8:00 pm", 220.5: "8:30 pm", 221.0: "9:00 pm", 221.5: "9:30 pm", 222.0: "10:00 pm",
109.0: "9:00 am", 109.5: "9:30 am", 209: "9:00 am", 209.5: "9:30 am"}
datestring = ""
day = str(interval[0][0])[0]
if day == "1":
datestring += "Friday"
else:
datestring += "Saturday"
time1 = times[interval[0][0]]
time2 = times[interval[0][1]]
time3 = time1 + " to " + time2
datestring = datestring + " " + time3
return datestring
class PDF(FPDF):
def header(self):
# logos
self.image("/Users/chasefleming/PycharmProjects/NHFSchedule/docs/images/ACE_logo_Bee.jpg", 10, 10, 50, 50)
self.image("/Users/chasefleming/PycharmProjects/NHFSchedule/docs/images/generic.jpg", 160, 10, 50, 50)
self.set_font('Helvetica', '', 28)
# calculate width of title and position
w = self.get_string_width(title) + 10
self.set_x((210 - w) / 2)
# title
self.cell(w, 39, title, 0, 1, 'C', 0)
def footer(self):
# position at 1.5 cm from bottom
self.set_y(-15)
# helvetica 10 font
self.set_font('Helvetica', '', 10)
# bottom information
self.cell(0, 10, "Hilton: Rooms 1-10, Marriott: Rooms 22-30, Hyatt: Rooms 33-42 and Exam Room", 0, 0, 'C')
def schedule_name(self, name, uid, seed):
self.set_font('Helvetica', 'B', 22)
# Title
w = self.get_string_width(name) + 10
self.set_x((210 - w) / 2)
self.set_line_width(1.25)
self.cell(w, 15, name, 1, 1, 'C', 0)
# Fill id
code = "ID: " + seed.upper() + "-" + str(uid).upper()
w = self.get_string_width(code) + 10
self.set_x((210 - w) / 2)
self.set_line_width(1.25)
self.cell(w, 15, code, 1, 1, 'C', 0)
# Line break
self.ln(5)
def schedule_body(self, schedule):
# add schedule to pdf
self.set_margins(10, 10)
self.set_line_width(.75)
self.set_font('Helvetica', '', 12)
for spot in sorted(schedule, key=itemgetter(1)):
eventtitle = spot[0]
time = converter(spot[1])
place = spot[2]
self.cell(80, 15, eventtitle, align="C", border=1)
self.cell(80, 15, time, align="C", border=1)
self.cell(30, 15, place, align="C", border=1)
self.ln(15)
def print_schedule(self, name, schedule, uid, seed):
self.add_page()
self.schedule_name(name, uid, seed)
self.schedule_body(schedule)
| cflemi12/NHFSchedule | sample/FPDFClass.py | FPDFClass.py | py | 3,708 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "fpdf.FPDF",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "operator.itemgetter",
"line_number": 83,
"usage_type": "call"
}
] |
12656420663 | from textblob import TextBlob
import nltk
#nltk.download("stopwords") ##3 downloadsa file of common stop words
# only do this once
from nltk.corpus import stopwords
from pathlib import Path
import pandas as pd
stops = stopwords.words("english")
blob= TextBlob("Today is a beautiful day") ### we are going to use a list comprehension to generate words that are not stop words
#print([word for word in blob.words if word not in stops])
### it is going to cycle in our blob each word, and if it is not one of the stop words, it will print it out.
#we are about to create a word cloud of top words frequently used in the text of "romeo and juliet"
# we will get a list of tuples
blob= TextBlob(Path('RomeoAndJuliet.txt').read_text())
items = blob.word_counts.items()
#this above will create every word with every count
#print(items)
#### we want to get rid of all the tuples that have item 0 is the actual word not the number, if it is not in the stop list
### then it goes into the new list we are creating. It is iterating through each words and it searches the stop list then doing what
### i just described above.
items_no_stops= [item for item in items if item [0] not in stops]
#print(items_no_stops)
###3 here we want to grab the top 20 words, you don't have to install the library
from operator import itemgetter
### helps us sort this list we made above
sorted_items = sorted (items_no_stops, key=itemgetter(1),reverse=True) ### reverse is getting us descending order, true is case sensitive
### the itemgetter is getting the second object in the list, the numbers pretty much and giving us the words with the highest number
top20= sorted_items[:21] ### this is slicing because we only want the top 20
print(top20)
##3after this we activated the virtual environment nlp_venv/scripts/activate
#### then installed pandas pip install pandas
### then install imageio pip install imageio
df= pd.DataFrame(top20, columns=['word', 'count'])
print(df)
import matplotlib.pyplot as plt
from wordcloud import WordCloud
import imageio
###pip install wordcloud
df.plot.bar(x='word', y= 'count', rot=0, legend= False,
color = ["y","c","m","b", "g", "r"])
plt.gcf().tight_layout()
plt.show
###it has to be a white background, so we use imageio because it allows us to specify
text= Path("RomeoAndJuliet.txt"). read_text()
mask_image = imageio.imread('mask_heart.png')
wordcloud = WordCloud(colormap= 'prism', mask=mask_image, background_color='w')
wordcloud = wordcloud.generate(text)
wordcloud= wordcloud.to_file("RomeoandJulietHeart.png")
plt.imshow(wordcloud)
print("done") | ericakaze/NLP | nlp_3.py | nlp_3.py | py | 2,610 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "nltk.corpus.stopwords.words",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.stopwords",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "textblob.TextBlob",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "t... |
21889904761 | import os
import glob
import cv2 as cv
X_path = glob.glob(os.path.join('DiretorioEscolhido/NomePasta', '*'))
X = []
for f in X_path:
try:
cv.imwrite(f,cv.resize(cv.imread(f), (224, 224), interpolation=cv.INTER_AREA))
except:
print(f)
| GuilhermeNakahata/ResizeImage | main.py | main.py | py | 264 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "glob.glob",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "cv2.imwrite",
"line_number": 11,... |
25160202408 | import numpy as np
import plotly.offline as pyo
import plotly.graph_objs as go
np.random.seed(56)
# data
x_values = np.linspace(0,1,100)
y_values = np.random.randn(100)
# edited this to make a line chart
trace0 = go.Scatter(x=x_values, y=y_values+5,
mode='markers+lines',
name='markers')
trace1 = go.Scatter(x=x_values, y=y_values,
mode='lines',
name='lines')
# data list
data=[trace0, trace1] #needs to be a list in plotly
# Layout is optional to edit titles, axis, etc
layout = go.Layout(title='Line Chart')
fig = go.Figure(data=data, layout=layout)
pyo.plot(fig)
| eugeniosp3/udemy_plotly_course | linecharts_plotly.py | linecharts_plotly.py | py | 672 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.random.seed",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "numpy.linspace",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.random.randn",
... |
8092338015 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""BMP ๅฎคๆธฉๆฐๅงใปใณใตใผ."""
import logging
from datetime import datetime
import Adafruit_BMP.BMP085 as BMP085
from db import MongoDB
class Bmp(MongoDB):
"""BMP180 IO."""
def __init__(self):
"""ใคใใทใฃใฉใคใถ."""
super().__init__()
# ใปใณใตใผ
self.sensor = BMP085.BMP085()
# MongoDB
db_name = 'bmp'
self.db = self.client[db_name]
def __str__(self):
return "bmp"
def get_sensor(self):
"""Get ใปใณใตใผๅค.
Returns
-------
dict
ๅใปใณใตใผๅค
"""
try:
return {
'btemp': float(self.sensor.read_temperature()),
'press': round(float(self.sensor.read_pressure()) / 100, 1),
'alti': round(float(self.sensor.read_altitude()) / 100, 1),
'sealev': float(self.sensor.read_sealevel_pressure()),
'timestamp': datetime.now()
}
except Exception as e:
logging.error('SensorError: {}'.format(e))
return None
| akiraseto/airwatch | sensors/models/bmp.py | bmp.py | py | 1,152 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "db.MongoDB",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "Adafruit_BMP.BMP085.BMP085",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "Adafruit_BMP.BMP085",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "datetime.da... |
2818687083 | from flask import current_app
from marshmallow import Schema, ValidationError, fields, validates_schema
from marshmallow.validate import Length
class CreateTaskSchema(Schema):
title = fields.Str(
required=True,
validate=Length(min=1),
)
description = fields.Str(required=True, validate=Length(max=128))
user_id = fields.Str(required=True, validate=Length(equal=14))
@validates_schema
def validate_user_id(self, data, **kwargs):
board = current_app.config["DB_CONN"]["factwise"]["board"].find_one({"name": data.get("title", "")})
if data.get("user_id") not in current_app.config["DB_CONN"]["factwise"]["team"].distinct(
"users.user_id", {"team_id": board.get("team_id")}
):
raise ValidationError("user_id not belong to the team to whom the board is assigned")
if data.get("title") not in current_app.config["DB_CONN"]["factwise"]["board"].distinct("name"):
raise ValidationError("Board not available for the requested title")
| sidddhesh100/factwise-assingment | schema/CreateTaskSchema.py | CreateTaskSchema.py | py | 1,036 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "marshmallow.Schema",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "marshmallow.fields.Str",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "marshmallow.fields",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "marshmallow... |
72856173474 | import unittest
import itertools
from bisect import bisect
import operator
import sys
if sys .version_info >=(3 ,):
xrange =range
class LebesgueSet (object ):
_inf =float ('infinity')# can be tested with math.isinf()
_minf =-_inf
UNION ,INTER ,XOR =range (3 )
def __init__ (self ,points ,left_infinite =False ,fast =False ):
"""Create a new LebesgueSet object.
@ points: a sequence of real numbers
@ left_infinite : a boolean, True if the first interval is
semi-infinite ]-infinity, .[
@ fast : can be set to True if the sequence of points is
already sorted and has no duplicates.
"""
if not fast :
points =sorted (set (points ))
self .points =list (points )
self .left_infinite =bool (left_infinite )
def intervals (self ):
start =0
p =self .points
n =len (p )
if self .left_infinite :
if n :
yield (self ._minf ,p [0 ])
start =1
else :
yield (self ._minf ,self ._inf )
return
while start +1 <n :
yield (p [start ],p [start +1 ])
start +=2
if start <n :
yield (p [start ],self ._inf )
def __str__ (self ):
return str (list (self .intervals ()))
def __nonzero__ (self ):
return self .left_infinite or self .points
def __invert__ (self ):
"""Compute the set's complement"""
return LebesgueSet (self .points ,not self .left_infinite ,fast =True )
def is_bounded (self ):
return not (self .left_infinite or (len (self .points )&1 ))
def lower_bound (self ):
if self .left_infinite :
return self ._minf
elif not self .points :
return self ._inf
else :
return self .points [0 ]
def upper_bound (self ):
if self .left_infinite ^(len (self .points )&1 ):
return self ._inf
elif not self .points :
return self ._minf
else :
return self .points [-1 ]
def zoom (self ,center =0.0 ,factor =1.0 ):
if factor ==0.0 :
points =()
left_infinite =False
else :
points =(center +factor *(x -center )for x in self .points )
if factor >0.0 :
left_infinite =self .left_infinite
else :
left_infinite =(self .upper_bound ()==self ._inf )
points =reversed (points )
return LebesgueSet (points ,left_infinite ,fast =True )
def measure (self ):
"""self.measure() -> a finite or infinite float.
Compute the Lebesgue measure of the set self. If this
measure is infinite, return float('infinity')."""
if self .is_bounded ():
p = self.points
return sum ( [float(p[i+1]) - p[i] for i in xrange(0,len(p),2)] )
else :
return self ._inf
def status (self ,x_real ):
"""self.status(x) returns -1, 0, 1 depending on x being outside,
on the boundary, or inside the set self (x is a real number)"""
i =bisect (self .points ,x_real )
if (i >0 )and x_real ==self .points [i -1 ]:
return 0
return 1 if self .left_infinite ^(i &1 )else -1
def is_interior (self ,x ):
"""True if x is in the topological interior of the set self."""
return self .status (x )==1
def is_exterior (self ,x ):
"""True if x is in the topological interior of the complement."""
return self .status (x )==-1
def is_boundary (self ,x ):
"""True if x is one end of one of the intervals"""
return self .status (x )==0
def is_adherent (self ,x ):
"""True if x is in one of the closed intervals defining self."""
return self .status (x )>=0
def deltas (self ,negated =False ):
infinite =self .left_infinite ^(1 if negated else 0 )
assert infinite in (0 ,1 )
for i ,x in enumerate (self .points ):
yield (x ,-1 if (infinite ^(i &1 ))else 1 )
@classmethod
def operate (cls ,op ,family ):
"""Compute the union, intersection or xor of a family of sets.
@ op : one of LebesgueSet.UNION or .INTER or .XOR
@ family : a non empty sequence of LebesgueSet's.
A real number x belongs to the xor of the family if it belongs
to an odd number of members of the family.
"""
family =tuple (family )
value =sum (u .left_infinite for u in family )
P =[]
if op ==cls .XOR :
left_infinite =bool (value &1 )
L =sorted (itertools .chain (*(u .points for u in family )))
for k ,group in itertools .groupby (L ):
if len (tuple (group ))&1 :
P .append (k )
else :
inter =(op ==cls .INTER )
if inter :
value =len (family )-value
left_infinite =not (value )if inter else bool (value )
L =sorted (itertools .chain (*(u .deltas (inter )for u in family )))
for k ,group in itertools .groupby (L ,key =operator .itemgetter (0 )):
group =tuple (group )
new_value =value +sum (delta [1 ]for delta in group )
if not (value and new_value ):
P .append (k )
value =new_value
return LebesgueSet (P ,left_infinite ,fast =True )
def __and__ (self ,other ):
"""self & other -> intersection of self and other."""
return self .operate (self .INTER ,(self ,other ))
def __or__ (self ,other ):
"""self | other, self + other -> union of self and other."""
return self .operate (self .UNION ,(self ,other ))
__add__ =__or__
def __xor__ (self ,other ):
"""self ^ other -> symetric difference of self and other."""
return self .operate (self .XOR ,(self ,other ))
def __sub__ (self ,other ):
"""self - other -> set difference."""
return self &~other
@classmethod
def union (cls ,family ):
"""union of a family of Lebesgue sets."""
return cls .operate (cls .UNION ,family )
@classmethod
def inter (cls ,family ):
"""intersection of a family of Lebesgue sets"""
return cls .operate (cls .INTER ,family )
@classmethod
def xor (cls ,family ):
"""xor of a family of Lebesgue sets."""
return cls .operate (cls .XOR ,family )
class LebesgueTestCase (unittest.TestCase ):
"""Test class for LebesgueSets."""
def setUp (self ):
import random
self .gauss =random .gauss
self .randint =random .randint
self .nbr_families =100
self .nbr_test_points =1000
def random_point (self ):
return self .gauss (0 ,10 )
def random_union (self ):
points =(self .random_point ()for i in range (20 ))
left_infinite =self .randint (0 ,1 )
return LebesgueSet (points ,left_infinite )
def random_family (self ):
return [self .random_union ()for i in range (3 )]
def runTest (self ):
itt =itertools
for i in xrange (self .nbr_families ):
family =self .random_family ()
points =set (itt .chain (*(u .points for u in family )))
union_res =LebesgueSet .union (family )
inter_res =LebesgueSet .inter (family )
xor_res =LebesgueSet .xor (family )
for j in xrange (self .nbr_test_points ):
while True :
x =self .random_point ()
if not x in points :
break
status_list =list (u .status (x )for u in family )
us_ ,is_ ,xs_ =(u .status (x )
for u in (union_res ,inter_res ,xor_res ))
u_expected =1 if any (s ==1 for s in status_list )else -1
i_expected =1 if all (s ==1 for s in status_list )else -1
x_expected =1 if sum (s ==1 for s in status_list )&1 else -1
self .assertEqual (us_ ,u_expected ,"Union failed")
self .assertEqual (is_ ,i_expected ,"Intersection failed")
self .assertEqual (xs_ ,x_expected ,"Xor failed")
if __name__ =="__main__":
unittest .main () | heathkh/iwct | snap/deluge/lebesgueset.py | lebesgueset.py | py | 7,673 | python | en | code | 5 | github-code | 1 | [
{
"api_name": "sys.version_info",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "bisect.bisect",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "itertools.chain",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "itertools.groupby... |
30406506542 | import json
import torch
from torch.utils.data import Dataset
from transformers import GPT2LMHeadModel, GPT2Tokenizer, DataCollatorForLanguageModeling, Trainer, TrainingArguments
from data_preprocess import *
# Load the JSON file with prompt-completion pairs
with open("prompt_completion_pairs.json", "r") as json_file:
prompt_completion_pairs = json.load(json_file)
# Load the GPT-2 model and tokenizer
model_name = "gpt2"
tokenizer = GPT2Tokenizer.from_pretrained(model_name)
# Add a pad token to the tokenizer's vocabulary
pad_token = "<PAD>"
tokenizer.add_special_tokens({"pad_token": pad_token})
# Load the GPT-2 model
model = GPT2LMHeadModel.from_pretrained(model_name)
# Define a custom dataset class
class CustomDataset(torch.utils.data.Dataset):
def __init__(self, tokenizer, pairs, block_size):
self.examples = []
for pair in pairs:
prompt = pair["prompt"]
completion = pair["completion"]
self.examples.append((prompt, completion))
self.tokenizer = tokenizer
self.block_size = block_size
def __len__(self):
return len(self.examples)
def __getitem__(self, index):
prompt, completion = self.examples[index]
input_text = f"{prompt}\n{completion}"
input_ids = self.tokenizer.encode(input_text, add_special_tokens=True)
return torch.tensor(input_ids)
# Prepare data for fine-tuning
train_dataset = CustomDataset(tokenizer=tokenizer, pairs=prompt_completion_pairs, block_size=32)
data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False) # mlm=False disables masking
# # Set up training arguments
training_args = TrainingArguments(
output_dir="./output",
overwrite_output_dir=True,
num_train_epochs=5,
per_device_train_batch_size=4,
save_steps=500,
save_total_limit=2,
logging_dir="./logs",
logging_steps=100,
do_train=True,
)
# Initialize Trainer and start fine-tuning
trainer = Trainer(
model=model,
args=training_args,
data_collator=data_collator,
train_dataset=train_dataset,
)
trainer.train()
| namandua7/Training | GPT model/csv_GPT/fine_tuning.py | fine_tuning.py | py | 2,114 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "json.load",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "transformers.GPT2Tokenizer.from_pretrained",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "transformers.GPT2Tokenizer",
"line_number": 13,
"usage_type": "name"
},
{
"ap... |
43762004245 | import cv2
import numpy as np
import os
import uuid
import copy
def rotateImage(image, angle):
l = len(image.shape)
image_center = tuple(np.array(image.shape[:2]) / 2)
rot_mat = cv2.getRotationMatrix2D(image_center, angle, 1.0)
result = cv2.warpAffine(image, rot_mat, image.shape[:2], flags=cv2.INTER_LINEAR)
if len(result.shape) < l:
y, x = result.shape
result = result.reshape((y, x, 1))
return result
def zoom(image, zoom_scale):
size = image.shape
l = len(size)
image = cv2.resize(image, None, fx=zoom_scale, fy=zoom_scale)
if len(image.shape) < l:
y, x = image.shape
image = image.reshape((y, x, 1))
new_size = image.shape
if len(size) == 3:
if zoom_scale > 1:
return image[int((new_size[0] - size[0]) / 2): int((new_size[0] - size[0]) / 2 + size[0]),
int((new_size[1] - size[1]) / 2): int((new_size[1] - size[1]) / 2 + size[1]), :]
elif zoom_scale == 1:
return image
else:
new_image = np.zeros(size).astype('uint8')
new_image[int((size[0] - new_size[0]) / 2): int((size[0] - new_size[0]) / 2 + new_size[0]),
int((size[1] - new_size[1]) / 2): int((size[1] - new_size[1]) / 2 + new_size[1]), :] = image
return new_image
def sample(image, rotation_min, rotation_max, fliplr, flipud, zoom_min, zoom_max):
angle = np.random.uniform(rotation_min, rotation_max)
image = rotateImage(image, angle)
if fliplr:
if np.random.random() < 0.5:
image = np.fliplr(image)
if flipud:
if np.random.random() < 0.5:
image = np.flipud(image)
zoom_scale = np.random.uniform(zoom_min, zoom_max)
image = zoom(image, zoom_scale)
return image
def augmentor(npy_path, save_path, batch_number=1, rotation_min=0, rotation_max=0, fliplr=False, flipud=False, zoom_min=1, zoom_max=1, input_c=6):
c = 0
npy_files = [os.path.join(npy_path, f) for f in os.listdir(npy_path) if f.endswith('.npy')]
while c < batch_number:
for npy_file in npy_files:
data = np.load(npy_file)
data = sample(data, rotation_min, rotation_max, fliplr, flipud, zoom_min, zoom_max)
unid = uuid.uuid4().hex
image = data[:, :, :input_c]
masks = data[:, :, input_c:]
num_objs = masks.shape[2]
print(masks.shape[2])
for i in reversed(range(num_objs)):
mask = masks[:, :, i]
if mask.max() < 250:
masks = np.delete(masks, i, axis=2)
continue
mask = mask >= 250
pos = np.where(mask)
xmin = np.min(pos[1])
xmax = np.max(pos[1])
ymin = np.min(pos[0])
ymax = np.max(pos[0])
if xmin >= xmax:
masks = np.delete(masks, i, axis=2)
continue
if ymin >= ymax:
masks = np.delete(masks, i, axis=2)
continue
print(masks.shape[2])
print('\n')
data = np.append(image, masks, axis=2)
save_file = npy_file.split('.npy')[0].split('/')[-1] + "_" + unid + ".npy"
save_file = os.path.join(save_path, save_file)
np.save(save_file, data)
c += 1
def balanced_augmentor(image_path, label_path, aug_path, augmentation_batch=1, augmentation_ratio=[],
rotation_min=0, rotation_max=0, fliplr=False, flipud=False, zoom_min=1, zoom_max=1):
image_files = [os.path.join(image_path, f) for f in os.listdir(image_path) if f.endswith('.jpg')]
while augmentation_batch:
augmentation_batch -= 1
for image_file in image_files:
f_name = image_file.split('/')[-1][:-4]
mask_file = os.path.join(label_path, f_name + "_nd.npy")
cls_file = os.path.join(label_path, f_name + "_cls.npy")
if not os.path.isfile(mask_file):
continue
if not os.path.isfile(cls_file):
continue
image = cv2.imread(image_file)
masks = np.load(mask_file)
clses = np.load(cls_file) # cls should start with 0, e.g. [0, 1, 2, 3, ...]
image_mask = np.concatenate((image, masks), axis=2)
n = augmentation_ratio[int(clses.max())]
for _ in range(n):
data = copy.deepcopy(image_mask)
data = sample(data, rotation_min, rotation_max, fliplr, flipud, zoom_min, zoom_max)
image = data[:, :, :3]
masks = data[:, :, 3:]
cls = copy.deepcopy(clses)
if masks.max() < 0.1:
continue
print(masks.shape[2])
num_objs = masks.shape[2]
for i in reversed(range(num_objs)):
mask = masks[:, :, i]
if mask.max() < 250:
masks = np.delete(masks, i, axis=2)
cls = np.delete(cls, i)
continue
mask = mask >= 250
pos = np.where(mask)
xmin = np.min(pos[1])
xmax = np.max(pos[1])
ymin = np.min(pos[0])
ymax = np.max(pos[0])
if (xmin >= xmax) | (ymin >= ymax):
masks = np.delete(masks, i, axis=2)
cls = np.delete(cls, i)
continue
if masks.shape[2] == 0:
continue
if masks.max() == 0:
continue
print(masks.shape[2])
print('\n')
unid = uuid.uuid4().hex
new_mask_file = os.path.join(aug_path, f_name + "_" + unid + "_nd.npy")
new_cls_file = os.path.join(aug_path, f_name + "_" + unid + "_cls.npy")
new_image_file = os.path.join(aug_path, f_name + "_" + unid + ".jpg")
np.save(new_mask_file, masks)
np.save(new_cls_file, cls)
cv2.imwrite(new_image_file, image)
if __name__ == '__main__':
config = dict(
batch_number=6,
rotation_min=0,#-90,
rotation_max=0,#90,
fliplr=True,
flipud=True,
zoom_min=1,#0.8,
zoom_max=1,#1.2,
input_c=6)
config_ = dict(
augmentation_batch=5,
augmentation_ratio=[1, 3, 15, 50, 100],
rotation_min=-90,
rotation_max=90,
fliplr=True,
flipud=True,
zoom_min=0.8,
zoom_max=1.2)
#image_path = './datasets/Eureka/images/'
#label_path = './datasets/Eureka/labels/'
#aug_path = './datasets/Eureka/aug/'
#balanced_augmentor(image_path, label_path, aug_path, **config_)
npy_path = './datasets/hypolith/rgb_masks/'
aug_path = './datasets/hypolith/aug/'
augmentor(npy_path, aug_path, **config)
| DREAMS-lab/mask_rcnn_pytorch | ndarray_augmentor.py | ndarray_augmentor.py | py | 7,082 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "numpy.array",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "cv2.getRotationMatrix2D",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "cv2.warpAffine",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "cv2.INTER_LINEAR",... |
28415632337 | import copy
from collections import defaultdict
from game_board import GameBoard
########################################################################################################################
# PUBLIC INTERFACE
########################################################################################################################
def solve(input_board, max_iterations):
""" Entry point to the solvers. Just calling this function, by default, should yield "good" results.
The idea is that it'll call whatever solver happens to be the best at some point, depending on what's implemented.
"""
# Define a pipeline of solvers to use. Since the intermediate solutions of each solver should be valid, the
# intermediate results can be fed into one another.
solvers = [
_deductive,
_backtrack
]
intermediate_board = input_board
for solver in solvers:
intermediate_board = solver(intermediate_board, max_iterations)
if intermediate_board.is_solved():
break
return intermediate_board
########################################################################################################################
# INTERNALS (here be dragons)
########################################################################################################################
def report_start(solver_name, occupancy):
print(f"[{solver_name}] starting. Starting board occupancy at {occupancy:.2f}.")
def report_progress(solver_name, iterations, max_iterations):
if iterations % 100 == 0:
print(f"[{solver_name}]: {(iterations/max_iterations)*100:.2f}% of max_iterations.", end="\r")
def report_result(solver_name, solved, occupancy, reached_max_iterations):
print(f"[{solver_name}] finished. Solved? {solved}. Final occupancy: {occupancy:.2f}. Reached maximum iterations? {reached_max_iterations}")
def _backtrack(input_board, max_iterations):
""" Use the backtrack method to solve a board.
The backtrack method is usually described/implemented as a recursive call, but I'm allergic to recursive programs.
Since this was always going to be a lengthy function, I decided to break up the main loop into sub-functions that
operate on the function's shared state and describe the atomic functions of the algorithm. This allowed me to define
the main loop in almost 1-to-1 correspondence with the algorithm. This allows me to both understand the algorithm as
a composition of operations, and perhaps move out functionality in the future.
I'm not sure about this structure, but I'll leave it as an example. Perhaps a class would've been a better
representation.
Reference: https://en.wikipedia.org/wiki/Sudoku_solving_algorithms#Backtracking
"""
# Duplicate the input board so we don't change it in place:
board = copy.deepcopy(input_board)
# Initilise state
report_start("Backtrack", board.occupancy())
cells = list(board.board_iterator())
current_cell_idx = 0
iterations = 0
# Basic functions on state
def get_current_value():
i, j = cells[current_cell_idx]
val = board.value(i, j)
return val
def initialise():
i, j = cells[current_cell_idx]
board.value(i, j, 1)
def increment():
i, j = cells[current_cell_idx]
board.increment(i, j)
def backtrack():
nonlocal current_cell_idx
# Clear cell and backtrack
i, j = cells[current_cell_idx]
board.erase(i, j)
current_cell_idx -= 1
# Skip over fixed cells
while current_cell_idx > 0 and board.is_fixed(*cells[current_cell_idx]):
current_cell_idx -= 1
def advance():
nonlocal current_cell_idx
current_cell_idx += 1
# Skip over fixed cells
while current_cell_idx < len(cells) and board.is_fixed(*cells[current_cell_idx]):
current_cell_idx += 1
# The main loop is split into three essential clauses:
# * If the value is empty, it's initialised as 1.
# * If the value is out of bounds, we backtrack to the previous element we were looking at.
# * If the board is not valid, we increment the current value.
# If none of these clauses get triggered, we simply move on to the next cell.
while not board.is_solved() and iterations < max_iterations and current_cell_idx < 81:
iterations += 1
curr_value = get_current_value()
report_progress("Backtrack", iterations, max_iterations)
# If the cell is empty, we set it to 1 and move on
if curr_value == None:
initialise()
continue
# If the cell or board is not a valid value anymore, we erase it and move back
if curr_value not in board._valid_values:
backtrack()
increment()
continue
# If the current value is a valid number, we increment it
if not board.is_valid():
increment()
continue
# And if the board remains valid, we move on
advance()
report_result("Backtrack", board.is_solved(), board.occupancy(), iterations == max_iterations)
return board
def _deductive(input_board, max_iterations):
""" Implements a deductive method for solving the given board.
This is based on the common heuristic everyone uses (I assume) where we pencil in the possible values for each cell
and then unravel the puzzle from there.
"""
# Duplicate the input board so we don't change it in place:
board = copy.deepcopy(input_board)
report_start("Deductive", board.occupancy())
# We'll keep the pencilled-in numbers here
pencil_board = defaultdict(set)
valid_values = board._valid_values
# Sub-functions
def generate_pencil_board():
print("[Deductive] solver generating pencil board.")
for i, j in board.board_iterator():
# Only work on empty cells
if board.value(i, j) == None:
# Iterate over all valid values
for val in valid_values:
# Try adding, if it works it's a possible value
if try_adding_value(i, j, val):
key = (i, j)
pencil_board[key].add(val)
board.erase(i, j)
def try_adding_value(i, j, value):
""" Adds a value to the board only if the resulting board is valid.
Returns whether the value stuck.
"""
board.value(i, j, value)
if not board.is_valid():
board.erase(i, j)
return False
return True
# Start by pencilling in all possibilities
generate_pencil_board()
iterations = 0
while iterations < max_iterations and not board.is_solved() and len(pencil_board) > 0:
iterations += 1
report_progress("Deductive", iterations, max_iterations)
# Iterate over the pencilled board
for (i, j), values in pencil_board.items():
# If there's only one possible value for a given cell, we write it in
if len(values) == 1:
# TODO: why would this not be valid?
# TODO: surely we can get stuff from the set without making it into a list
try_adding_value(i, j, list(values)[0])
pencil_board[(i, j)] = []
continue
# If the current value is the only possible position for this value in this sub-board, we write it in
sub_board_positions = board.sub_board_positions(*board.sub_board_from_indices(i, j))
for value in values:
# Evaluate whether the value is the only possibility for this value in this sub-board
only_possibility_in_sub_board = True
for sub_i, sub_j in sub_board_positions:
# Only check the pencil board if the key exists, since accessing the defaultdict on a non-existing
# key will add it to the dict and, thus, crash us as we're changing the dict mid-iteration.
if (sub_i, sub_j) in pencil_board.keys():
only_possibility_in_sub_board = not (value in pencil_board[(sub_i, sub_j)])
# Write the value in and break early since we won't have another value for the current cell
if only_possibility_in_sub_board:
board.value(i, j, value)
pencil_board[(i, j)].remove(value)
break
report_result("Deductive", board.is_solved(), board.occupancy(), iterations == max_iterations)
return board | gondsm/sudoku | solvers.py | solvers.py | py | 8,668 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "copy.deepcopy",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 148,
"usage_type": "call"
}
] |
32054311947 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('contacts', '0004_auto_20151210_1426'),
]
operations = [
migrations.AlterModelOptions(
name='region',
options={'ordering': ['country', 'name']},
),
migrations.AddField(
model_name='contact',
name='tax_code',
field=models.CharField(help_text='Tax Code for Argentina, Brazil, Paraguay, Peru (CUIT/CPF/RUC)', max_length=20, verbose_name='Tax Code', blank=True),
),
]
| djangoplicity/djangoplicity-contacts | djangoplicity/contacts/migrations/0005_auto_20151214_1357.py | 0005_auto_20151214_1357.py | py | 647 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.db.migrations.Migration",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.AlterModelOptions",
"line_number": 14,
"usage_type": "call"
... |
24993118256 | import sys
import numpy as np
from fractions import Fraction
if __name__ == "__main__":
readFile = open(sys.argv[1], "r")
writeFile = open("out.txt", "w")
numeroCasos = int(readFile.readline())
casoMinimo = int(sys.argv[2])
casoMaximo = int(sys.argv[3])
for _ in range(casoMinimo-1):
readFile.readline()
readFile.readline()
for caseNumber in range(casoMinimo, casoMaximo+1):
numeroEntradas = int(readFile.readline())
valores = readFile.readline().split()
valores = list(map(int,valores))
lcmVal = np.lcm.reduce(valores)
newValores = [i * lcmVal for i in valores]
sumaCaramelos = sum(newValores)
numer = Fraction(sumaCaramelos,len(newValores)).numerator
denom = Fraction(sumaCaramelos,len(newValores)).denominator
print("Case #%i: %i/%i\n" % (caseNumber, numer, denom))
writeFile.write("Case #%i: %i/%i\n" % (caseNumber, numer, denom)) | marc-gav/TuentiChallenge9 | Ch4/challenge4.py | challenge4.py | py | 967 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.argv",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "numpy.lcm.reduce",
"line... |
35938876208 | import numpy as np
import scipy.spatial
import itertools
from sklearn import metrics
def rand_score( X, labels_true, labels_pred ):
correct = 0
total = 0
arr2 = []
opop = len(X)
for i in range(opop):
arr2.append( i )
for index_combo in itertools.combinations(arr2, 2):
index1 = index_combo[0]
index2 = index_combo[1]
same_class = (labels_true[index1] == labels_true[index2])
same_cluster = (labels_pred[index1] == labels_pred[index2])
if same_class and same_cluster:
correct += 1
elif not same_class and not same_cluster:
correct += 1
total += 1
return float(correct) / total
def cluster_centroids(data, clusters, k):
return np.array([data[ clusters==i ].mean( axis = 0 ) for i in range(k)]) # Compute the mean of data[ i ] where i is set column wise
def kmeans(X, initial_centroids, max_iters):
eval_mat = []
clusters = []
k = len(initial_centroids)
opop = X.shape[1]-1
label_true = np.copy(X[:, opop])
Y = np.copy(X[:, range(0, opop)])
try:
label_true = label_true.astype(np.int)
label_true = label_true - 1
except ValueError:
qq = set(label_true)
len1 = len(qq)
for i in range(len1):
ele1 = qq.pop()
for j in range( len(X) ):
if( label_true[j]==ele1 ):
label_true[j] = str(i)
label_true = label_true.astype(np.int)
Y = Y.astype(np.float)
centroids = initial_centroids
for i in range(max_iters):
# Squared distances between each point and each centroid
sqdists = scipy.spatial.distance.cdist(Y, centroids, 'sqeuclidean') # n*k matrix of squared distance, in each row distance of point from each of the centroid is stored
# Index of the closest centroid to each data point
clusters = np.argmin(sqdists, axis = 1) # For example np.argmin(a, axis=0) returns the index of the minimum value in each of the columns
# Finding new centroids
new_centroids = cluster_centroids(Y, clusters, k)
# break early if new_centroids = centroids
if np.array_equal(new_centroids, centroids):
break
centroids = new_centroids
# Normalized Mutual Information(MI)
nmi = metrics.normalized_mutual_info_score(label_true, clusters)
# Adjusted Mutual Information(AMI)
ami = metrics.adjusted_mutual_info_score(label_true, clusters)
# Rand index(RI)
ri = rand_score(Y, label_true, clusters)
# Adjusted Rand index(ARI)
ari = metrics.adjusted_rand_score(label_true, clusters)
eval_mat.append( nmi )
eval_mat.append( ami )
eval_mat.append( ri )
eval_mat.append( ari )
return new_centroids, eval_mat | deepak0004/Assignments | 2014036_HW_1/my_kmeans.py | my_kmeans.py | py | 3,097 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "itertools.combinations",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.copy",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "numpy.copy",
"line_... |
73709085154 | import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder, MinMaxScaler
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegressionCV, LogisticRegression
from sklearn.model_selection import StratifiedKFold, RandomizedSearchCV
from sklearn.metrics import accuracy_score
from xgboost import XGBClassifier
import warnings
warnings.filterwarnings('ignore')
## Function for reporting best parameters grid search
def report(results, n_top=3):
for i in range(1, n_top + 1):
candidates = np.flatnonzero(results['rank_test_score'] == i)
for candidate in candidates:
print("Model with rank: {0}".format(i))
print("Mean validation score: {0:.3f} (std: {1:.3f})".format(
results['mean_test_score'][candidate],
results['std_test_score'][candidate]))
print("Parameters: {0}".format(results['params'][candidate]))
print("")
# Importation des donnรฉes
studentInfo = pd.read_csv('studentInfo.csv', header=0, sep=",", encoding="ISO-8859-1")
assessments = pd.read_csv('assessments.csv', header=0, sep=",", encoding="ISO-8859-1")
courses = pd.read_csv('courses.csv', header=0, sep=",", encoding="ISO-8859-1")
studentAssessment = pd.read_csv('studentAssessment.csv', header=0, sep=",", encoding="ISO-8859-1")
studentRegistration = pd.read_csv('studentRegistration.csv', header=0, sep=",", encoding="ISO-8859-1")
studentVle = pd.read_csv('studentVle.csv', header=0, sep=",", encoding="ISO-8859-1")
vle = pd.read_csv('vle.csv', header=0, sep=",", encoding="ISO-8859-1")
# Assessment
assessments.date = assessments.date.fillna(0)
assessments = assessments.merge(courses, on=["code_module", "code_presentation"])
assessments.date = [assessments.date.iloc[i] if assessments.date.iloc[i] != 0 else assessments.module_presentation_length.iloc[i] for i in range(assessments.shape[0])]
assessments = assessments.drop("module_presentation_length", 1)
# vle
vle = vle.merge(courses, on=["code_module", "code_presentation"])
vle.week_from = vle.week_from.fillna(0)
vle.week_to = vle.week_to.fillna(0)
vle.week_to = [vle.week_to[i]*7 if vle.week_to[i] != 0 else vle.module_presentation_length[i] for i in range(vle.shape[0])]
vle.week_from = [int(x) for x in vle.week_from]
vle.week_to = [int(x) for x in vle.week_to]
vle = vle.drop("module_presentation_length", 1)
# vle = vle.drop("week_from", 1)
# vle = vle.drop("week_to", 1)
# Student Info
studentInfo.disability = [1 if x == 'Y' else 0 for x in studentInfo.disability]
studentInfo.highest_education = studentInfo.highest_education.replace('No Formal quals', 0).replace('Lower Than A Level', 1)
studentInfo.highest_education = studentInfo.highest_education.replace('A Level or Equivalent', 2).replace('HE Qualification', 3)
studentInfo.highest_education = studentInfo.highest_education.replace('Post Graduate Qualification', 5)
studentInfo.imd_band = studentInfo.imd_band.replace('0-10%', 5).replace('10-20', 15).replace('20-30%', 25)
studentInfo.imd_band = studentInfo.imd_band.replace('30-40%', 35).replace('40-50%', 45).replace('50-60%', 55)
studentInfo.imd_band = studentInfo.imd_band.replace('60-70%', 65).replace('70-80%', 75).replace('80-90%', 85)
studentInfo.imd_band = studentInfo.imd_band.replace('90-100%', 95)
studentInfo.imd_band = studentInfo.imd_band.fillna(studentInfo.imd_band.mean())
studentInfo.imd_band = [int(x) for x in studentInfo.imd_band]
studentInfo.age_band = studentInfo.age_band.replace('55<=', 60).replace('35-55', 45).replace('0-35', 20)
# Student Registration
studentRegistration = studentRegistration.merge(courses, on=["code_module", "code_presentation"])
# .drop("semester", 1).drop("year", 1)
studentRegistration.date_unregistration = [int(studentRegistration.date_unregistration[i]) if not(np.isnan(studentRegistration.date_unregistration[i])) else studentRegistration.module_presentation_length[i] + 1 for i in range(studentRegistration.shape[0])]
studentRegistration = studentRegistration.drop("module_presentation_length", 1)
# Student Assessment
studentAssessment = studentAssessment.merge(assessments, on="id_assessment").merge(courses, on=["code_module", "code_presentation"])
studentAssessment["is_late"] = [1 if studentAssessment.date_submitted[i] > studentAssessment.date[i] else 0 for i in range(studentAssessment.shape[0])]
# inutile!! studentAssessment["is_banked"] = [1 if x == 1 else 0 for x in studentAssessment["is_banked"]]
studentAssessment = studentAssessment.drop("module_presentation_length", 1).drop("date", 1)
# Traitement des na
studentAssessment = studentAssessment.merge(studentInfo, on=["code_module", "code_presentation", "id_student"])
new_score = []
for i in range(studentAssessment.shape[0]):
if not(np.isnan(studentAssessment.score[i])):
new_score.append(studentAssessment.score[i])
else:
if studentAssessment.final_result[i] == "Withdrawn" or studentAssessment.final_result[i] == "Fail":
new_score.append(0)
else:
new_score.append(studentAssessment.score.mean())
studentAssessment["score"] = new_score
# Suppression des colonnes inutiles issues des merges
studentAssessment = studentAssessment.drop("code_module", 1).drop("code_presentation", 1).drop("assessment_type", 1).drop("weight", 1)
studentAssessment = studentAssessment.drop('gender', 1).drop('region', 1).drop('highest_education', 1).drop('imd_band', 1).drop('age_band', 1)
studentAssessment = studentAssessment.drop('num_of_prev_attempts', 1).drop('studied_credits', 1).drop('disability', 1).drop('final_result', 1)
# Student Vle
nStudentVle = studentVle.groupby(["code_module", "code_presentation", "id_student", "id_site", "date"]).agg(
{
'sum_click' : ["sum", "count"]
}
)
nStudentVle.columns = ['nb_click_total', 'count']
nStudentVle = nStudentVle.reset_index()
###########################
########### M L ###########
###########################
# First model: Predictions on day 1
student = studentInfo.merge(studentRegistration, on=["code_module", "code_presentation", "id_student"])
student = student.drop("date_unregistration", axis=1)
nStudentVleBefore = nStudentVle[nStudentVle.date < 1]
# First aggregation
nStudentVleBefore1 = nStudentVleBefore.groupby(["code_module", "code_presentation", "id_student", "date"]).agg(
{
'nb_click_total' : ["sum", "mean", "std"],
'id_site' : ["count"]
}
)
nStudentVleBefore1.columns = ['nb_click_total', 'mean_click', "std_click", "nb_ressources"]
nStudentVleBefore1 = nStudentVleBefore1.reset_index()
# Other data with 1st aggregation
nStudentVleBefore2 = nStudentVleBefore1.groupby(["code_module", "code_presentation", "id_student"]).agg(
{
'date' : ["count", "min", "max"],
'nb_ressources' : ["mean", "max", "min", "std"]
}
)
nStudentVleBefore2.columns = ['nb_active_days', "1st_day", "last_day", 'mean_nb_ressources', "max_nb_ressources", "min_nb_ressources", "std_nb_ressources"]
nStudentVleBefore2 = nStudentVleBefore2.reset_index()
# 2nd aggregation
nStudentVleBefore3 = nStudentVleBefore.groupby(["code_module", "code_presentation", "id_student"]).agg(
{
'nb_click_total' : ["sum", "mean", "std"],
'id_site' : ["count"]
}
)
nStudentVleBefore3.columns = ['nb_click_total', 'mean_click', "std_click", "nb_ressources"]
nStudentVleBefore3 = nStudentVleBefore3.reset_index()
# Results of these aggregations
nStudentVleBefore = nStudentVleBefore2.merge(nStudentVleBefore3, on=["code_module", "code_presentation", "id_student"])
# Third aggregation
activeVLEBefore = vle[vle.week_from < 1]
nVLE = activeVLEBefore.groupby(["code_module", "code_presentation"]).agg(
{
"id_site" : ["count"]
}
)
nVLE.columns = ["nb_total_ressources"]
nVLE = nVLE.reset_index()
nStudentVleBefore = nStudentVleBefore.merge(nVLE, on=["code_module", "code_presentation"])
# New features
nStudentVleBefore["per_ressources"] = nStudentVleBefore["nb_ressources"] / nStudentVleBefore["nb_total_ressources"]
nStudentVleBefore = nStudentVleBefore.drop("nb_total_ressources", axis=1)
# Final datatable for 1st model
studentTotal = student.merge(nStudentVleBefore, on=["code_module", "code_presentation", "id_student"], how='outer')
studentTotal = studentTotal.fillna(0)
# Recherche valeur na
studentTotal = studentTotal.replace([np.inf, -np.inf], np.nan)
studentTotal.fillna(studentTotal.mean(), inplace=True)
# Label encoder for target
studentTotal["Success"] = [1 if x=="Pass" or x=="Distinction" else 0 for x in studentTotal.final_result]
studentTotal["final_result"] = studentTotal["final_result"].replace("Fail", 0).replace("Withdrawn", 1).replace("Pass", 2).replace("Distinction", 3)
# One hot encoder for all the variables
# >>> studentTotal.shape
# (32593, 25)
studentTotal = pd.get_dummies(studentTotal)
# >>> studentTotal.shape
# (32593, 47)
# Dรฉfinition variables X et y
uselessCol = ['gender_F', "id_student", "Success"]
uselessCol.append("final_result")
X = studentTotal.drop(uselessCol, axis=1).values
y = studentTotal[["Success"]].values
# Recherches des meilleurs paramรจtres RF
params = {
'n_estimators':[50, 100, 200, 500],
"bootstrap": [True, False],
'max_depth': [None, 1, 5, 10, 20]
}
n_iter_search = 20
rf = RandomForestClassifier()
random_search_rf = RandomizedSearchCV(rf, param_distributions=params,
n_iter=n_iter_search, cv=5, iid=False)
random_search_rf.fit(X, y)
report(random_search_rf.cv_results_)
# Model with rank: 1
# Mean validation score: 0.459 (std: 0.020)
# Parameters: {'n_estimators': 500, 'max_depth': 1, 'bootstrap': False}
# Avec Success
# Model with rank: 1
# Mean validation score: 0.642 (std: 0.022)
# Parameters: {'n_estimators': 50, 'max_depth': 5, 'bootstrap': False}
# Recherches des meilleurs paramรจtres XGBoost
params = {
'n_estimators':[50, 100, 200, 500],
'min_child_weight': [1, 3, 5],
'gamma': [0.2, 0.5, 1, 2],
'subsample': [0.4, 0.6, 0.8],
'colsample_bytree': [0.4, 0.6, 0.8],
'max_depth': [1, 5, 10, 20]
}
num_class = len(studentTotal["final_result"].unique())
xgb = XGBClassifier(learning_rate=0.02, objective='multi:softmax',
num_class=num_class, silent=True, nthread=1)
folds = 3
param_comb = 30
skf = StratifiedKFold(n_splits=folds, shuffle = True, random_state = 2019)
random_search_xgb = RandomizedSearchCV(xgb,
param_distributions=params,
n_iter=param_comb,
n_jobs=4,
cv=skf.split(X, y),
verbose=3,
random_state=2019)
random_search_xgb.fit(X, y)
report(random_search_xgb.cv_results_)
# Model with rank: 1
# Mean validation score: 0.506 (std: 0.007)
# Parameters: {'subsample': 0.8, 'n_estimators': 500, 'min_child_weight': 5, 'max_depth': 5, 'gamma': 0.5, 'colsample_bytree': 0.6}
# Cross-validation
nSplits = 10
kf = StratifiedKFold(n_splits=nSplits, random_state=2019)
cv_accuracy_rf = 0.0
cv_accuracy_lr = 0.0
cv_accuracy_xgb = 0.0
col_new_data = studentTotal.drop(uselessCol, axis=1).columns
# col_results_x = ["lr_cls1", "lr_cls2", "lr_cls3", "lr_cls4"]
# col_results_y = ["rf_cls1", "rf_cls2", "rf_cls3", "rf_cls4"]
col_results_x = ["lr_cls1", "lr_cls2"]
col_results_y = ["rf_cls1", "rf_cls2"]
# col_results_z = ["xgb_cls1", "xgb_cls2", "xgb_cls3", "xgb_cls4"]
new_data = pd.DataFrame(columns=col_new_data)
results = pd.DataFrame(columns=col_results_x+col_results_y) # +col_results_z
for train_index, cv_index in kf.split(X, y):
# Division data
X_train, X_cv = X[train_index], X[cv_index]
y_train, y_cv = y[train_index], y[cv_index]
# Ajout dans dataframe
new_data_x = pd.DataFrame(X_cv, columns=col_new_data)
new_data = pd.concat((new_data, new_data_x), axis=0, ignore_index=True)
# Models
lr = LogisticRegressionCV()
rf = RandomForestClassifier(n_estimators=random_search_rf.best_estimator_.n_estimators, bootstrap=random_search_rf.best_estimator_.bootstrap, max_depth=random_search_rf.best_estimator_.max_depth)
rf = RandomForestClassifier(n_estimators=30, bootstrap=False, max_depth=1)
# xgb = XGBClassifier(learning_rate=0.02,
# n_estimators=random_search_xgb.best_estimator_.n_estimators,
# objective='multi:softmax',
# num_class=num_class,
# silent=True,
# nthread=1,
# min_child_weight=random_search_xgb.best_estimator_.min_child_weight,
# gamma=random_search_xgb.best_estimator_.gamma,
# subsample=random_search_xgb.best_estimator_.subsample,
# colsample_bytree=random_search_xgb.best_estimator_.colsample_bytree,
# max_depth=random_search_xgb.best_estimator_.max_depth)
# Training
rf.fit(X_train, y_train)
lr.fit(X_train, y_train)
# xgb.fit(X_train, y_train)
# Predictions
y_pred_lr = lr.predict(X_cv)
y_pred_rf = rf.predict(X_cv)
# y_pred_xgb = xgb.predict(X_cv)
cv_accuracy_lr += accuracy_score(y_cv, y_pred_lr)
cv_accuracy_rf += accuracy_score(y_cv, y_pred_rf)
# cv_accuracy_xgb += accuracy_score(y_cv, y_pred_xgb)
# Prediction probabilitรฉs
y_pred_lr = lr.predict_proba(X_cv)
y_pred_rf = rf.predict_proba(X_cv)
# y_pred_xgb = xgb.predict_proba(X_cv)
# Ajout dans dataframe
results_x = pd.DataFrame(y_pred_lr, columns=col_results_x)
results_y = pd.DataFrame(y_pred_rf, columns=col_results_y)
# results_z = pd.DataFrame(y_pred_xgb, columns=col_results_z)
results = pd.concat((results, pd.concat([results_x, results_y], axis=1)), axis=0, ignore_index=True) # , results_z
print('CV accuracy LR: ' + str(cv_accuracy_lr /nSplits))
print('CV accuracy RF: ' + str(cv_accuracy_rf /nSplits))
# print('CV accuracy XGB: ' + str(cv_accuracy_xgb /nSplits))
# Avec les 4 catรฉgories
# >>> print('CV accuracy LR: ' + str(cv_accuracy_lr /nSplits))
# CV accuracy LR: 0.4534381470128688
# >>> print('CV accuracy RF: ' + str(cv_accuracy_rf /nSplits))
# CV accuracy RF: 0.4580684781744283
# Dรฉfinition variables X et y
new_complete_data = pd.concat([new_data, results], axis=1)
colMerge = []
for col in studentTotal.columns:
if col in new_complete_data.columns:
colMerge.append(col)
data_lastM = new_complete_data.merge(studentTotal, on=colMerge, how="inner")
data_lastM = data_lastM[col_results_x+col_results_y]
X = data_lastM.values
nSplits = 10
kf = StratifiedKFold(n_splits=nSplits, random_state=2019)
cv_accuracy = 0.0
for train_index, cv_index in kf.split(X, y):
# Division data
X_train, X_cv = X[train_index], X[cv_index]
y_train, y_cv = y[train_index], y[cv_index]
# Apprentissage
lr = LogisticRegressionCV()
lr.fit(X_train, y_train)
# Predictions
y_pred = lr.predict(X_cv)
cv_accuracy += accuracy_score(y_cv, y_pred)
print('CV accuracy LR: ' + str(cv_accuracy /nSplits))
| JuliCou/ML_3A_p2 | initial_predict.py | initial_predict.py | py | 15,151 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "warnings.filterwarnings",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.flatnonzero",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "pandas.read... |
43200991376 | from jinja2 import Environment, FileSystemLoader
file_loader = FileSystemLoader('templates_dz')
env = Environment(loader=file_loader)
tm = env.get_template('main_dz.html')
msg = tm.render(title='ะะพะผะฐัะฝะตะต ะทะฐะดะฐะฝะธะต')
print(msg)
| Mil6734/git_class | Python2/dz40/dz40.py | dz40.py | py | 246 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "jinja2.FileSystemLoader",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "jinja2.Environment",
"line_number": 4,
"usage_type": "call"
}
] |
74702175393 | from postgres import Postgres
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import json
import pandas
import os
import sys
from math import ceil
import argparse
parser = argparse.ArgumentParser()
# Run control arguments
parser.add_argument("--schema", type=str, help="The name of the schema", default='socialnet7')
parser.add_argument("--metrics", type=str,nargs='*', help="List of metrics to run (default to all)")
parser.add_argument("--nowarn", action="store_true", default=False,help="Plot cohorts using scored metrics for all (not just skewed)")
class MetricCalculator:
def __init__(self,schema):
'''
Initialize metric calculator from schema name. Loads parameter json from the adjacent conf directory.
Loads date range from the configuration. Makes postgres connection with environment variables.
:param schema:
'''
with open('../conf/%s_metrics.json' % schema, 'r') as myfile:
self.metric_dict = json.loads(myfile.read())
self.schema=schema
self.from_date = self.metric_dict['date_range']['from_date']
self.to_date = self.metric_dict['date_range']['to_date']
self.non_metrics=('date_range','event_properties')
self.URI= f"postgresql://localhost/{os.environ['CHURN_DB']}?user={os.environ['CHURN_DB_USER']}&password={os.environ['CHURN_DB_PASS']}"
self.db = Postgres(self.URI)
with open('../sql/qa_metric.sql', 'r') as myfile:
self.qa_sql = myfile.read().replace('\n', ' ')
def remove_old_metrics_from_db(self, run_mets=None, no_warn=False):
'''
Delete values of existing metrics. If no metrics are specified, it truncates the metric table. Otherwise
just delete the specified metrics.
:param run_mets: list of strings, metric names; or else None meaning truncate all metrics
:return:
'''
if run_mets is None:
print('TRUNCATING *Metrics* in schema -> %s <- ...' % schema)
if not no_warn and input("are you sure? (enter %s to proceed) " % schema) == schema:
exit(0)
self.db.run('truncate table %s.metric' % schema)
self.db.run('truncate table %s.metric_name' % schema)
else:
if isinstance(run_mets,str): run_mets=[run_mets]
if len(run_mets)>1 and not no_warn:
print('DELETING * %d * Metrics in schema -> %s <- ...' % (len(run_mets),schema))
if input("are you sure? (enter %s to proceed) " % schema) != schema:
exit(0)
for m in run_mets:
id =self.get_metric_id(m)
if id is not None:
deletSql="delete from %s.metric where metric_name_id=%d and metric_time between '%s'::timestamp and '%s'::timestamp" \
% (schema,id,self.from_date,self.to_date)
print('Clearing old values: ' + deletSql)
self.db.run(deletSql)
def get_metric_id(self,metric_name):
'''
Get the id of one metric from the database by name
:param metric_name: string name of the metric
:return: id number of the metric, assuming one was found; or else SQL returns NULL as None in Python
'''
sql = "select metric_name_id from %s.metric_name where metric_name='%s'" % (self.schema, metric_name)
return self.db.one(sql)
def add_metric_id(self,metric):
'''
Add an id for a metric if it doesn't already exist
:param metric: string name of the metric
:return:
'''
id = self.get_metric_id(metric)
if id is None:
id = self.db.one('select max(metric_name_id)+1 from %s.metric_name' % schema)
if id is None: id = 0
insertNameSql = "insert into %s.metric_name (metric_name_id,metric_name) values (%d,'%s')" % (
schema, id, metric)
self.db.run(insertNameSql)
print('Inserted metric %s.%s as id %d' % (schema,metric,id))
return id
def metric_qa_plot(self,metric,args):
save_path = '../../../fight-churn-output/' + self.schema + '/'
os.makedirs(save_path, exist_ok=True)
print('Checking metric %s.%s' % (self.schema, metric))
id = self.get_metric_id(metric)
if id is None:
"No ID found for metric %s" % metric
return
aSql = self.qa_sql.replace('%metric_name_id', str(id))
aSql = aSql.replace('%schema', self.schema)
aSql = aSql.replace('%from_date', self.from_date)
aSql = aSql.replace('%to_date', self.to_date)
print(aSql)
res = pandas.read_sql_query(aSql, self.URI)
if res.shape[0] == 0 or res['avg_val'].isnull().values.all():
print('\t*** No result for %s' % metric)
return
cleanedName = ''.join(e for e in metric if e.isalnum())
# res.to_csv(save_path+cleanedName+'_metric_qa.csv',index=False) # uncomment to save details
plt.figure(figsize=(8, 10))
plt.subplot(4, 1, 1)
plt.plot('calc_date', 'max_val', data=res, marker='', color='black', linewidth=2, label="max")
if args.hideax: plt.gca().get_xaxis().set_visible(False) # Hiding y axis labels on the count
plt.ylim(0, ceil(1.1 * res['max_val'].dropna().max()))
plt.legend()
plt.title(metric)
plt.subplot(4, 1, 2)
plt.plot('calc_date', 'avg_val', data=res, marker='', color='black', linewidth=2, label='avg')
if args.hideax: plt.gca().get_xaxis().set_visible(False) # Hiding y axis labels on the count
plt.ylim(0, ceil(1.1 * res['avg_val'].dropna().max()))
plt.legend()
plt.subplot(4, 1, 3)
plt.plot('calc_date', 'min_val', data=res, marker='', color='black', linewidth=2, label='min')
if args.hideax: plt.gca().get_xaxis().set_visible(False) # Hiding y axis labels on the count
# plt.ylim(0, ceil(2*res['min_val'].dropna().max()))
plt.legend()
plt.subplot(4, 1, 4)
plt.plot('calc_date', 'n_calc', data=res, marker='', color='black', linewidth=2, label="n_calc")
plt.ylim(0, ceil(1.1 * res['n_calc'].dropna().max()))
plt.legend()
plt.gca().figure.autofmt_xdate()
if args.hideax:
plt.gca().get_yaxis().set_visible(False) # Hiding y axis labels on the count
monthFormat = mdates.DateFormatter('%b')
plt.gca().get_xaxis().set_major_formatter(monthFormat)
else:
plt.gcf().autofmt_xdate()
plt.savefig(save_path + 'metric_valqa_' + cleanedName + '.' + args.format)
plt.close()
def qa_metrics(self,args):
'''
Loops over the configured metrics and makes the QA plot of each. If a list was provided, it only runs the ones
in the list.
:param run_mets: list of strings, metric names; or else None meaning calculate all configured metrics
:param args: from argparse
:return:
'''
if args.metrics is None:
for metric in self.metric_dict.keys():
if metric in self.non_metrics: continue
self.metric_qa_plot(metric,args)
else:
for metric in args.metrics:
self.metric_qa_plot(metric,args)
def run_one_metric_calculation(self,metric):
'''
Calculate one metric, by name. First adds the id, then loads the raw sql from the file. To set the bind
variables it starts out with the second level dictionary for this metric from the main metric dictionary.
Then it adds all of the metric parameters that are common to all metric calcultions, such as from and to
dates, the metric name id, a schema and the value name. These are put into the SQL template with a simple
replace (did not use the Postgres bind system because it was not flexible enough.) Finally, it runs the SQL.
:param metric: string name of the metric
:return:
'''
assert metric in self.metric_dict, "No metric %s in metric dictionary!" % metric
id = self.add_metric_id(metric)
with open('../sql/%s.sql' % self.metric_dict[metric]['sql'], 'r') as myfile:
sql = myfile.read().replace('\n', ' ')
params = self.metric_dict[metric]
params['metric_name_val'] = metric
params['schema'] = schema
params['from_date'] = self.from_date
params['to_date'] = self.to_date
params['metric_name_id'] = id
bind_char='%'
for p in params.keys():
sql = sql.replace(bind_char + p, str(params[p]))
print(sql)
self.db.run(sql)
def calculate_metrics(self,run_mets=None):
'''
Loops over the configured metrics and runs them. If a list was provided, it only runs the ones in the list.
:param run_mets: list of strings, metric names; or else None meaning calculate all configured metrics
:return:
'''
for metric in self.metric_dict.keys():
if (run_mets is not None and metric not in run_mets) or metric in self.non_metrics:
continue
self.run_one_metric_calculation(metric)
'''
####################################################################################################
The main script for calculating Fight Churn With Data metrics in batch: If there are command line arguments,
use them. Otherwise defaults are hard coded
'''
if __name__ == "__main__":
args, _ = parser.parse_known_args()
schema=args.schema
run_mets = args.metrics
no_warn = args.nowarn
met_calc = MetricCalculator(schema)
met_calc.remove_old_metrics_from_db(run_mets,no_warn)
met_calc.calculate_metrics(run_mets)
| carl24k/fight-churn | extras/metric-framework/py/metric_calc.py | metric_calc.py | py | 8,677 | python | en | code | 227 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "postgres.Postgres"... |
40872798888 | from flask import Flask, render_template , request
import numpy as np
from utils import CarPrice
app = Flask(__name__)
@app.route('/')
def home():
return render_template('index.html')
@app.route('/valuation' , methods = ['POST','GET'])
def valuation():
if request.method == 'POST':
data = request.form.get
Name = data('Name')
Location = data('Location')
Age = eval(data('Age'))
Kilometers_Driven = eval(data('Kilometers_Driven'))
Fuel_Type = data('Fuel_Type')
Transmission = data('Transmission')
Owner_Type = data('Owner_Type')
Mileage = eval(data('Mileage'))
Engine = eval(data('Engine'))
Power =eval(data('Power'))
Seats = int(data('Seats'))
CP = CarPrice(Name, Location, Age, Kilometers_Driven, Fuel_Type,
Transmission, Owner_Type, Mileage,Engine,Power,Seats)
Price = CP.prediction()
return render_template('index.html' , Predicted_Price = np.around(Price, decimals = 2))
if __name__ == "__main__":
app.run(host='0.0.0.0', port=8080, debug=False)
| supriya-vedpathak/carpro | interface.py | interface.py | py | 1,115 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "flask.reque... |
40467668840 | import uuid
from teine import models, personality_operations
DEFAULT_SHOW_TITLE = 'My first show'
def get_by_id(show_id):
return models.Show.load(show_id)
def update(show_id, title='', author='', tagline='', description='',
show_hosts=[], image_id='', language='en-us'):
show = models.Show.load(show_id)
if show:
show.title = title
show.author = author
show.tagline = tagline
show.description = description
show.show_host_ids = personality_operations.people_to_ids(
show_id, show_hosts)
show.image_id = image_id
show.language = language
return show.save()
else:
raise ValueError
def create(user, title='', author='', tagline='', description='',
show_hosts=[], image_id='', language='en-us'):
show_id = str(uuid.uuid4())
user.show_ids.append(show_id)
user.save()
show_host_ids = personality_operations.people_to_ids(show_id, show_hosts)
return models.Show.create(show_id=show_id, owner_user_id=user.user_id,
title=title, author=author, tagline=tagline,
description=description,
show_host_ids=show_host_ids,
image_id=image_id, language=language).save()
def create_default(user):
return create(user, title=DEFAULT_SHOW_TITLE, author=user.user_id)
| hirogwa/teine | teine/show_operations.py | show_operations.py | py | 1,419 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "teine.models.Show.load",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "teine.models.Show",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "teine.models",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "teine.models.... |
28437711680 | from azureml.core import Run
import pickle
import argparse
from sklearn import svm
from sklearn.metrics import accuracy_score
from sklearn.model_selection import cross_val_score
from dataloader import DataLoader
# model params
C = 0.025
kernel = "linear"
# cross validation params
cv = 5
def get_args():
parser = argparse.ArgumentParser(description='Process arguments')
parser.add_argument('--data-folder', type=str, dest='data_folder',
default='./dataset', help='Output folder')
parser.add_argument('--output-folder', type=str, dest='output_folder',
default='./output_model', help='Output folder')
args = parser.parse_args()
return args
def main():
args = get_args()
run = Run.get_context()
is_offline_run = run.id.startswith('OfflineRun')
print(f"Is Offline Run: {is_offline_run}")
if is_offline_run:
dataloader = DataLoader(args.data_folder)
X_train, y_train = dataloader.load_training_data()
X_test, y_test = dataloader.load_test_data()
else:
training_data = run.input_datasets['training_dataset']
test_dataset = run.input_datasets['test_dataset']
training_df = training_data.to_pandas_dataframe()
test_df = test_dataset.to_pandas_dataframe()
training_df = training_df.drop(['Path'], axis=1)
test_df = test_df.drop(['Path'], axis=1)
X_train, y_train = training_df.iloc[:, :-1].values, training_df.iloc[:, -1].values
X_test, y_test = test_df.iloc[:, :-1].values, test_df.iloc[:, -1].values
clf = svm.SVC(kernel=kernel, C=C)
scores = cross_val_score(clf, X_train, y_train, cv=cv)
print(f"Cross validation scores: {scores};")
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
accuracy = accuracy_score(y_pred, y_test)
print(f"Accuracy: {accuracy}")
model_output = f"{args.output_folder}/classifier.pkl"
pickle.dump(clf, open(model_output, 'wb'))
if __name__ == "__main__":
main()
| liupeirong/MLOpsManufacturing | samples/edge-inferencing-and-mlops/model/main.py | main.py | py | 2,029 | python | en | code | 21 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "azureml.core.Run.get_context",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "azureml.core.Run",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": ... |
7413718188 | from functools import partial
from typing import Callable, Dict, Tuple
import jax
import jax.numpy as jnp
from chex import dataclass, Array, Scalar, PRNGKey
from tensorflow_probability.substrates import jax as tfp
from ml4wifi.utils.wifi_specs import *
from ml4wifi.utils.measurement_manager import measurement_manager, MeasurementState
tfd = tfp.distributions
tfb = tfp.bijectors
# mean value based on ns-3 equal distance simulation scenario, 1 STA, const MCS
FRAMES_PER_SECOND = 188
# If first measurement is in time t=0s, Kalman filter refuses to work
FIRST_MEASUREMENT_SHIFT = 0.001
N_SAMPLES = 1000
@dataclass
class FTMEstimates:
distance_estimated: Scalar
distance_uncertainty: Scalar
distance_ci_low: Scalar
distance_ci_high: Scalar
snr_estimated: Scalar
snr_uncertainty: Scalar
snr_ci_low: Scalar
snr_ci_high: Scalar
rate_estimated: Scalar
rate_uncertainty: Scalar
mcs_estimated: jnp.int32
@dataclass
class SimulationParameters:
confidence_level: Scalar
measurement_interval: Scalar
seed: jnp.int32
simulation_time: Scalar
start_position: Scalar
velocity: Scalar
total_frames: jnp.int32
@dataclass
class SimulationResults:
time: Array
distance: Dict
snr: Dict
mcs: Dict
rate: Dict
@jax.jit
def ftmrate_log_distance(
distance_dist: tfd.Distribution,
confidence_level: jnp.float32,
key: PRNGKey
) -> FTMEstimates:
"""
Estimates distance, SNR and MCS from distance samples.
Parameters
----------
distance_dist : tfd.Distribution
Distribution of distance estimation
confidence_level : jnp.float32
Confidence level of the estimations
key : PRNGKey
Seed
Returns
-------
estimates : FTMEstimates
Dataclass with all the estimations, uncertainties and the selected mcs
"""
alpha = 1 - confidence_level
snr_dist = distance_to_snr(tfb.Softplus()(distance_dist))
rate_dist = expected_rates_log_distance(DEFAULT_TX_POWER)(tfb.Softplus()(distance_dist))
rate_estimated = rate_dist.quantile(0.5)
mcs_estimated = jnp.argmax(rate_estimated)
return FTMEstimates(
distance_estimated=distance_dist.quantile(0.5),
distance_uncertainty=0.0,
distance_ci_low=distance_dist.quantile(alpha / 2),
distance_ci_high=distance_dist.quantile(1 - alpha / 2),
snr_estimated=snr_dist.quantile(0.5),
snr_uncertainty=0.0,
snr_ci_low=snr_dist.quantile(alpha / 2),
snr_ci_high=snr_dist.quantile(1 - alpha / 2),
rate_estimated=rate_estimated,
rate_uncertainty=0.0,
mcs_estimated=mcs_estimated,
)
@jax.jit
def ftmrate_log_distance_monte_carlo(
distance_dist: tfd.Distribution,
confidence_level: jnp.float32,
key: PRNGKey
) -> FTMEstimates:
"""
Estimates distance, SNR and MCS from distance samples.
Parameters
----------
distance_dist : tfd.Distribution
Distribution of distance estimation
confidence_level : jnp.float32
Confidence level of the estimations
key : PRNGKey
Seed
Returns
-------
estimates : FTMEstimates
Dataclass with all the estimations, uncertainties and the selected mcs
"""
alpha = 1 - confidence_level
distance_samples = distance_dist.sample(N_SAMPLES, key)
distance_estimated = distance_samples.mean()
distance_uncertainty = distance_samples.std()
snr_samples = distance_to_snr(jnp.abs(distance_samples))
snr_estimated = jnp.mean(snr_samples)
snr_uncertainty = jnp.std(snr_samples)
p_s_samples = jax.vmap(success_probability_log_distance)(snr_samples)
rate_samples = p_s_samples * wifi_modes_rates
rate_estimated = jnp.mean(rate_samples, axis=0)
rate_uncertainty = jnp.std(rate_samples, axis=0)
mcs_estimated = ideal_mcs_log_distance(DEFAULT_TX_POWER)(distance_estimated)
return FTMEstimates(
distance_estimated=distance_estimated,
distance_uncertainty=distance_uncertainty,
distance_ci_low=jnp.quantile(distance_samples, alpha / 2),
distance_ci_high=jnp.quantile(distance_samples, 1 - alpha / 2),
snr_estimated=snr_estimated,
snr_uncertainty=snr_uncertainty,
snr_ci_low=jnp.quantile(snr_samples, alpha / 2),
snr_ci_high=jnp.quantile(snr_samples, 1 - alpha / 2),
rate_estimated=rate_estimated,
rate_uncertainty=rate_uncertainty,
mcs_estimated=mcs_estimated,
)
@partial(jax.jit, static_argnames=['agent_fn', 'frames_total'])
def run_simulation(
agent_fn: Callable,
params: SimulationParameters,
frames_total: jnp.int32
) -> SimulationResults:
"""
Run one simple simulation of SNR estimation based on noisy distance measurements. The station moves away from
the AP at constant velocity from some start position and receives noisy measurements at some time intervals.
Parameters
----------
agent_fn : callable
Function that initializes the agent.
params : SimulationParameters
Parameters of the simulation.
frames_total : int
Total number of samples in the simulation.
Returns
-------
results : SimulationResults
Results of the simulation.
"""
key = jax.random.PRNGKey(params.seed)
key, init_key = jax.random.split(key)
measurements_manager = measurement_manager(params.measurement_interval)
agent = agent_fn()
time = jnp.linspace(0.0, params.simulation_time, frames_total) + FIRST_MEASUREMENT_SHIFT
true_distance = jnp.linspace(0.0, params.velocity * params.simulation_time, frames_total) + params.start_position
distance = {
'true': jnp.abs(true_distance),
'measurement': jnp.empty(frames_total),
'estimated': jnp.empty(frames_total),
'uncertainty': jnp.zeros(frames_total),
'ci_low': jnp.zeros(frames_total),
'ci_high': jnp.zeros(frames_total),
}
snr = {
'true': distance_to_snr(distance['true']),
'estimated': jnp.empty(frames_total),
'uncertainty': jnp.zeros(frames_total),
'ci_low': jnp.zeros(frames_total),
'ci_high': jnp.zeros(frames_total),
}
mcs = {
'true': jax.vmap(partial(ideal_mcs_log_distance, tx_power=DEFAULT_TX_POWER))(distance['true']),
'estimated': jnp.empty(frames_total),
}
rate = {
'true': wifi_modes_rates[mcs['true']],
'estimated': jnp.empty((frames_total, len(wifi_modes_rates))),
'uncertainty': jnp.zeros((frames_total, len(wifi_modes_rates)))
}
def fori_fn(i: jnp.int32, carry: Tuple) -> Tuple:
results, state, m_state, key = carry
key, noise_key, update_key, sample_key, results_key = jax.random.split(key, 5)
m_state, measured = measurements_manager.update(m_state, distance['true'][i], time[i], noise_key)
state = jax.lax.cond(measured, lambda: agent.update(state, update_key, m_state.distance, time[i]), lambda: state)
distance_distribution = agent.sample(state, sample_key, time[i])
ftm_estimates = ftmrate_log_distance(distance_distribution, params.confidence_level, results_key)
return save_estimates(ftm_estimates, m_state, i, *results), state, m_state, key
init = ((distance, snr, rate, mcs), agent.init(init_key), measurements_manager.init(), key)
(distance, snr, rate, mcs), *_ = jax.lax.fori_loop(0, frames_total, fori_fn, init)
return SimulationResults(
time=time,
distance=distance,
snr=snr,
mcs=mcs,
rate=rate
)
@jax.jit
def save_estimates(
ftm_estimates: FTMEstimates,
m_state: MeasurementState,
i: jnp.int32,
distance: Array,
snr: Array,
rate: Array,
mcs: Array
) -> Tuple:
distance['measurement'] = distance['measurement'].at[i].set(m_state.distance)
distance['estimated'] = distance['estimated'].at[i].set(ftm_estimates.distance_estimated)
distance['uncertainty'] = distance['uncertainty'].at[i].set(ftm_estimates.distance_uncertainty)
distance['ci_low'] = distance['ci_low'].at[i].set(ftm_estimates.distance_ci_low)
distance['ci_high'] = distance['ci_high'].at[i].set(ftm_estimates.distance_ci_high)
snr['estimated'] = snr['estimated'].at[i].set(ftm_estimates.snr_estimated)
snr['uncertainty'] = snr['uncertainty'].at[i].set(ftm_estimates.snr_uncertainty)
snr['ci_low'] = snr['ci_low'].at[i].set(ftm_estimates.snr_ci_low)
snr['ci_high'] = snr['ci_high'].at[i].set(ftm_estimates.snr_ci_high)
rate['estimated'] = rate['estimated'].at[i].set(ftm_estimates.rate_estimated)
rate['uncertainty'] = rate['uncertainty'].at[i].set(ftm_estimates.rate_uncertainty)
mcs['estimated'] = mcs['estimated'].at[i].set(ftm_estimates.mcs_estimated)
return distance, snr, rate, mcs
| ml4wifi-devs/ftmrate | ml4wifi/envs/simple_wifi/ftmrate_sim.py | ftmrate_sim.py | py | 8,913 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "tensorflow_probability.substrates.jax.distributions",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "tensorflow_probability.substrates.jax",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "tensorflow_probability.substrates.jax.bijectors",
... |
35349481168 | '''
asyncio task loop to validate ledger close time and UNL status
using keys stored in an exiting database.
'''
import asyncio
import logging
import supplemental_data.get_data
def sup_data_loop(settings):
'''
Run the .asyncio event loop.
:param settings: Configuration file
'''
loop = asyncio.get_event_loop()
if settings.ASYNCIO_DEBUG is True:
loop.set_debug(True)
logging.info("asyncio debugging enabled.")
while True:
try:
loop.run_until_complete(
supplemental_data.get_data.DomainVerification().run_verification(settings)
)
loop.run_forever()
except KeyboardInterrupt:
logging.critical("Keyboard interrupt detected. Exiting supplemental data logging.")
break
| jscottbranson/xrpl-validation-tracker | xrpl_validation_tracker/supplemental_data/sd_loop.py | sd_loop.py | py | 805 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "asyncio.get_event_loop",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "supplemental_data.get_data.get_data.DomainVerification",
"line_number": 25,
"usage_type": "call"
},... |
7486582215 | from maya import cmds,mel
from PySide import QtGui
from .lib import qt
def cpy():
mel.eval("timeSliderCopyKey;")
def pst():
mel.eval("timeSliderPasteKey false;")
def dlt():
mel.eval("timeSliderClearKey;")
def cut():
mel.eval("timeSliderCutKey;")
class MainWindow(QtGui.QMainWindow):
def __init__(self,parent=None):
super(MainWindow, self).__init__(parent)
self.setWindowTitle('Set Key')
self.resize(250,100)
widget = KeyButton()
self.setCentralWidget(widget)
class KeyButton(QtGui.QWidget):
def __init__(self, *args, **kwargs):
super(KeyButton, self).__init__(*args, **kwargs)
layout = QtGui.QVBoxLayout()
self.setLayout(layout)
button = QtGui.QPushButton('Copy')
button.clicked.connect(qt.Callback(cpy))
layout.addWidget(button)
button = QtGui.QPushButton('Paste')
button.clicked.connect(qt.Callback(pst))
layout.addWidget(button)
button = QtGui.QPushButton('Delete')
button.clicked.connect(qt.Callback(dlt))
layout.addWidget(button)
button = QtGui.QPushButton('Cut')
button.clicked.connect(qt.Callback(cut))
layout.addWidget(button)
def main():
app = MainWindow(qt.getMayaWindow())
app.show()
| Mocson/mocTools | pyTest/keyFrameBox.py | keyFrameBox.py | py | 1,299 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "maya.mel.eval",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "maya.mel",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "maya.mel.eval",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "maya.mel",
"line_number": 9,
... |
4346085126 | # Author: Xinshuo Weng
# email: xinshuo.weng@gmail.com
import numpy as np, os, matplotlib.pyplot as plt, colorsys, random, matplotlib.patches as patches
import matplotlib.collections as plycollections
from matplotlib.patches import Ellipse
from skimage.measure import find_contours
# from scipy.stats import norm, chi2
# import matplotlib as mpl; mpl.use('Agg')
# import warnings
# warnings.filterwarnings("ignore", message="numpy.dtype size changed")
# warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
# from warnings import catch_warnings, simplefilter
# with catch_warnings(record=True):
# simplefilter('ignore', FutureWarning)
from .private import save_vis_close_helper, get_fig_ax_helper
from xinshuo_math.private import safe_2dptsarray, safe_bbox
from xinshuo_math import pts_euclidean, bbox_TLBR2TLWH, bboxcheck_TLBR
from xinshuo_miscellaneous import islogical, islist, isstring, is2dptsarray_confidence, is2dptsarray_occlusion, is2dptsarray, isdict, list_reorder, list2tuple, islistofstring, ifconfscalar, isscalar, isnparray
from xinshuo_io import mkdir_if_missing, save_image
color_set = ['r', 'b', 'g', 'c', 'm', 'y', 'k', 'w', 'lime', 'cyan', 'aqua']
color_set_big = ['aqua', 'azure', 'red', 'black', 'blue', 'brown', 'cyan', 'darkblue', 'fuchsia', 'gold', 'green', 'grey', 'indigo', 'magenta', 'lime', 'yellow', 'white', 'tomato', 'salmon']
marker_set = ['o', 'v', '^', '<', '>', '1', '2', '3', '4', '8', 's', 'p', '*', 'h', 'H', '+', 'x', 'D', 'd']
hatch_set = [None, 'o', '/', '\\', '|', '-', '+', '*', 'x', 'O', '.']
linestyle_set = ['-', '--', '-.', ':', None, ' ', 'solid', 'dashed']
dpi = 80
def visualize_bbox(input_bbox, linewidth=0.5, edge_color_index=15, scores=None, threshold=0.0, textsize=8,
fig=None, ax=None, save_path=None, vis=False, warning=True, debug=True, closefig=True):
'''
visualize a set of bounding box
parameters:
input_bbox: a list of 4 elements, a listoflist of 4 elements: e.g., [[1,2,3,4], [5,6,7,8]],
a numpy array with shape or (N, 4) or (4, )
TLBR format
scores: a list of floating numbers representing the confidences
'''
if islist(input_bbox) and len(input_bbox) == 0:
return save_vis_close_helper(fig=fig, ax=ax, vis=vis, save_path=save_path, warning=warning, debug=debug, closefig=closefig)
elif isnparray(input_bbox) and input_bbox.size == 0:
return save_vis_close_helper(fig=fig, ax=ax, vis=vis, save_path=save_path, warning=warning, debug=debug, closefig=closefig)
np_bboxes = safe_bbox(input_bbox, warning=warning, debug=debug)
if debug: assert bboxcheck_TLBR(np_bboxes, warning=warning, debug=debug), 'input bounding boxes are not correct'
edge_color = color_set_big[edge_color_index % len(color_set_big)]
np_bboxes = bbox_TLBR2TLWH(np_bboxes, warning=warning, debug=debug) # convert TLBR format to TLWH format
for bbox_index in range(np_bboxes.shape[0]):
bbox_tmp = np_bboxes[bbox_index, :]
if scores is not None:
score = float(scores[bbox_index])
if score < threshold: continue
caption = '{:.2f}'.format(score)
# score = str(scores[bbox_index])
# caption = '%s' % (score)
ax.text(bbox_tmp[0], bbox_tmp[1] + textsize, caption, color='r', size=textsize, backgroundcolor='none')
ax.add_patch(plt.Rectangle((bbox_tmp[0], bbox_tmp[1]), bbox_tmp[2], bbox_tmp[3], fill=False, edgecolor=edge_color, linewidth=linewidth))
return save_vis_close_helper(fig=fig, ax=ax, vis=vis, save_path=save_path, warning=warning, debug=debug, closefig=closefig)
def visualize_pts_array(input_pts, color_index=0, pts_size=20, label=False, label_list=None, label_size=20, vis_threshold=0.3,
covariance=False, plot_occl=False, xlim=None, ylim=None,
fig=None, ax=None, save_path=None, vis=False, warning=True, debug=True, closefig=True):
'''
plot keypoints with covariance ellipse
parameters:
pts_array: 2(3) x num_pts numpy array, the third channel could be confidence or occlusion
'''
# obtain the points
try: pts_array = safe_2dptsarray(input_pts, homogeneous=True, warning=warning, debug=debug)
except AssertionError: pts_array = safe_2dptsarray(input_pts, homogeneous=False, warning=warning, debug=debug)
if debug: assert is2dptsarray(pts_array) or is2dptsarray_occlusion(pts_array) or is2dptsarray_confidence(pts_array), 'input points are not correct'
num_pts = pts_array.shape[1]
# obtain a label list if required but not provided
if debug: assert islogical(label), 'label flag is not correct'
if label and (label_list is None): label_list = [str(i) for i in xrange(num_pts)]
if label_list is not None and debug: assert islistofstring(label_list), 'labels are not correct'
# obtain the color index
if islist(color_index):
if debug: assert not (plot_occl or covariance) , 'the occlusion or covariance are not compatible with plotting different colors during scattering'
color_tmp = [color_set_big[index_tmp] for index_tmp in color_index]
else: color_tmp = color_set_big[color_index % len(color_set_big)]
fig, ax = get_fig_ax_helper(fig=fig, ax=ax)
std, conf = None, 0.95
if is2dptsarray(pts_array): # only 2d points without third rows
if debug and islist(color_tmp): assert len(color_tmp) == num_pts, 'number of points to plot is not equal to number of colors provided'
ax.scatter(pts_array[0, :], pts_array[1, :], color=color_tmp, s=pts_size)
pts_visible_index = range(pts_array.shape[1])
pts_ignore_index = []
pts_invisible_index = []
else:
# automatically justify if the third row is confidence or occlusion flag
num_float_elements = np.where(np.logical_and(pts_array[2, :] != -1, np.logical_and(pts_array[2, :] != 0, pts_array[2, :] != 1)))[0].tolist()
if len(num_float_elements) > 0: type_3row = 'conf'
else: type_3row = 'occu'
if type_3row == 'occu':
pts_visible_index = np.where(pts_array[2, :] == 1)[0].tolist() # plot visible points in red color
pts_ignore_index = np.where(pts_array[2, :] == -1)[0].tolist() # do not plot points with annotation, usually visible, but not annotated
pts_invisible_index = np.where(pts_array[2, :] == 0)[0].tolist() # plot invisible points in blue color
else:
pts_visible_index = np.where(pts_array[2, :] > vis_threshold)[0].tolist()
pts_invisible_index = np.where(pts_array[2, :] <= vis_threshold)[0].tolist()
pts_ignore_index = []
if debug and islist(color_tmp): assert len(color_tmp) == len(pts_visible_index), 'number of points to plot is not equal to number of colors provided'
ax.scatter(pts_array[0, pts_visible_index], pts_array[1, pts_visible_index], color=color_tmp, s=pts_size)
if plot_occl: ax.scatter(pts_array[0, pts_invisible_index], pts_array[1, pts_invisible_index], color=color_set_big[(color_index+1) % len(color_set_big)], s=pts_size)
if covariance: visualize_pts_covariance(pts_array[0:2, :], std=std, conf=conf, fig=fig, ax=ax, debug=debug, color=color_tmp)
if plot_occl: not_plot_index = pts_ignore_index
else: not_plot_index = pts_ignore_index + pts_invisible_index
if label_list is not None:
for pts_index in xrange(num_pts):
label_tmp = label_list[pts_index]
if pts_index in not_plot_index: continue
else:
# note that the annotation is based on the coordinate instead of the order of plotting the points, so the orider in pts_index does not matter
if islist(color_index): plt.annotate(label_tmp, xy=(pts_array[0, pts_index], pts_array[1, pts_index]), xytext=(-1, 1), color=color_set_big[(color_index[pts_index]+5) % len(color_set_big)], textcoords='offset points', ha='right', va='bottom', fontsize=label_size)
else: plt.annotate(label_tmp, xy=(pts_array[0, pts_index], pts_array[1, pts_index]), xytext=(-1, 1), color=color_set_big[(color_index+5) % len(color_set_big)], textcoords='offset points', ha='right', va='bottom', fontsize=label_size)
# set axis
if xlim is not None:
if debug: assert islist(xlim) and len(xlim) == 2, 'the x lim is not correct'
plt.xlim(xlim[0], xlim[1])
if ylim is not None:
if debug: assert islist(ylim) and len(ylim) == 2, 'the y lim is not correct'
plt.ylim(ylim[0], ylim[1])
return save_vis_close_helper(fig=fig, ax=ax, vis=vis, save_path=save_path, warning=warning, debug=debug, closefig=closefig, transparent=False)
def visualize_lines(lines_array, color_index=0, line_width=3, fig=None, ax=None, vis=True, save_path=None, debug=True, closefig=True):
'''
plot lines
parameters:
lines_array: 4 x num_lines, each column denotes (x1, y1, x2, y2)
'''
if debug: assert islinesarray(lines_array), 'input array of lines are not correct'
fig, ax = get_fig_ax_helper(fig=fig, ax=ax)
# plot lines
num_lines = lines_array.shape[1]
lines_all = []
for line_index in range(num_lines):
line_tmp = lines_array[:, line_index]
lines_all.append([tuple([line_tmp[0], line_tmp[1]]), tuple([line_tmp[2], line_tmp[3]])])
line_col = plycollections.LineCollection(lines_all, linewidths=line_width, colors=color_set[color_index])
ax.add_collection(line_col)
# ax.plot([line_tmp[0], line_tmp[2]], [line_tmp[1], line_tmp[3]], color=color_set[color_index], linewidth=line_width)
return save_vis_close_helper(fig=fig, ax=ax, vis=vis, save_path=save_path, warning=warning, debug=debug, closefig=closefig)
def visualize_pts_line(pts_array, line_index_list, method=2, seed=0, alpha=0.5,
vis_threshold=0.3, pts_size=20, line_size=10, line_color_index=0,
fig=None, ax=None, save_path=None, vis=False, warning=True, debug=True, closefig=True):
'''
given a list of index, and a point array, to plot a set of points with line on it
parameters:
pts_array: 2(3) x num_pts
line_index_list: a list of index
method: 1: all points are connected, if some points are missing in the middle, just ignore that point and connect the two nearby points
2: if some points are missing in the middle of a line, the line is decomposed to sub-lines
vis_threshold: confidence to draw the points
'''
if debug:
assert is2dptsarray(pts_array) or is2dptsarray_occlusion(pts_array) or is2dptsarray_confidence(pts_array), 'input points are not correct'
assert islist(line_index_list), 'the list of index is not correct'
assert method in [1, 2], 'the plot method is not correct'
num_pts = pts_array.shape[1]
# expand the pts_array to 3 rows if the confidence row is not provided
if pts_array.shape[0] == 2: pts_array = np.vstack((pts_array, np.ones((1, num_pts))))
fig, ax = get_fig_ax_helper(fig=fig, ax=ax)
np.random.seed(seed)
color_option = 'hsv'
if color_option == 'rgb': color_set_random = np.random.rand(3, num_pts)
elif color_option == 'hsv':
h_random = np.random.rand(num_pts, )
color_set_random = np.zeros((3, num_pts), dtype='float32')
for pts_index in range(num_pts): color_set_random[:, pts_index] = colorsys.hsv_to_rgb(h_random[pts_index], 1, 1)
line_color = color_set[line_color_index]
pts_line = pts_array[:, line_index_list]
if method == 1:
valid_pts_list = np.where(pts_line[2, :] > vis_threshold)[0].tolist()
pts_line_tmp = pts_line[:, valid_pts_list]
ax.plot(pts_line_tmp[0, :], pts_line_tmp[1, :], lw=line_size, color=line_color, alpha=alpha) # plot all lines
# plot all points
for pts_index in valid_pts_list:
pts_index_original = line_index_list[pts_index]
# ax.plot(pts_array[0, pts_index_original], pts_array[1, pts_index_original], 'o', color=color_set_big[pts_index_original % len(color_set_big)], alpha=alpha)
ax.plot(pts_array[0, pts_index_original], pts_array[1, pts_index_original], marker='o', ms=pts_size, lw=line_size, color=color_set_random[:, pts_index], alpha=alpha)
else:
not_valid_pts_list = np.where(pts_line[2, :] < vis_threshold)[0].tolist()
if len(not_valid_pts_list) == 0: # all valid
ax.plot(pts_line[0, :], pts_line[1, :], lw=line_size, color=line_color, alpha=alpha)
# plot points
for pts_index in line_index_list:
# ax.plot(pts_array[0, pts_index], pts_array[1, pts_index], 'o', color=color_set_big[pts_index % len(color_set_big)], alpha=alpha)
ax.plot(pts_array[0, pts_index], pts_array[1, pts_index], marker='o', ms=pts_size, lw=line_size, color=color_set_random[:, pts_index], alpha=alpha)
else:
prev_index = 0
for not_valid_index in not_valid_pts_list:
plot_list = range(prev_index, not_valid_index)
pts_line_tmp = pts_line[:, plot_list]
ax.plot(pts_line_tmp[0, :], pts_line_tmp[1, :], lw=line_size, color=line_color, alpha=alpha)
# plot points
for pts_index in plot_list:
pts_index_original = line_index_list[pts_index]
ax.plot(pts_array[0, pts_index_original], pts_array[1, pts_index_original], marker='o', ms=pts_size, lw=line_size, color=color_set_random[:, pts_index_original], alpha=alpha)
# ax.plot(pts_array[0, pts_index_original], pts_array[1, pts_index_original], 'o', color=color_set_big[pts_index_original % len(color_set_big)], alpha=alpha)
prev_index = not_valid_index + 1
pts_line_tmp = pts_line[:, prev_index:]
ax.plot(pts_line_tmp[0, :], pts_line_tmp[1, :], lw=line_size, color=line_color, alpha=alpha) # plot last line
# plot last points
for pts_index in range(prev_index, pts_line.shape[1]):
pts_index_original = line_index_list[pts_index]
# ax.plot(pts_array[0, pts_index_original], pts_array[1, pts_index_original], 'o', color=color_set_big[pts_index_original % len(color_set_big)], alpha=alpha)
ax.plot(pts_array[0, pts_index_original], pts_array[1, pts_index_original], marker='o', ms=pts_size, lw=line_size, color=color_set_random[:, pts_index_original], alpha=alpha)
return save_vis_close_helper(fig=fig, ax=ax, vis=vis, save_path=save_path, warning=warning, debug=debug, closefig=closefig)
def visualize_pts_covariance(pts_array, conf=None, std=None, fig=None, ax=None, debug=True, **kwargs):
"""
Plots an `nstd` sigma ellipse based on the mean and covariance of a point
"cloud" (points, an Nx2 array).
Parameters
----------
pts_array : 2 x N numpy array of the data points.
std : The radius of the ellipse in numbers of standard deviations.
Defaults to 2 standard deviations.
ax : The axis that the ellipse will be plotted on. Defaults to the
current axis.
Additional keyword arguments are pass on to the ellipse patch.
Returns
-------
A matplotlib ellipse artist
"""
if debug:
assert is2dptsarray(pts_array), 'input points are not correct: (2, num_pts) vs %s' % print_np_shape(pts_array)
if conf is not None: assert ifconfscalar(conf), 'the confidence is not in a good range'
if std is not None: assert ispositiveinteger(std), 'the number of standard deviation should be a positive integer'
pts_array = np.transpose(pts_array)
center = pts_array.mean(axis=0)
covariance = np.cov(pts_array, rowvar=False)
return visualize_covariance_ellipse(covariance=covariance, center=center, conf=conf, std=std, fig=fig, ax=ax, debug=debug, **kwargs), np.sqrt(covariance[0, 0]**2 + covariance[1, 1]**2)
def visualize_covariance_ellipse(covariance, center, conf=None, std=None, fig=None, ax=None, debug=True, **kwargs):
"""
Plots an `nstd` sigma error ellipse based on the specified covariance
matrix (`cov`). Additional keyword arguments are passed on to the
ellipse patch artist.
Parameters
covariance : The 2x2 covariance matrix to base the ellipse on
center : The location of the center of the ellipse. Expects a 2-element sequence of [x0, y0].
conf : a floating number between [0, 1]
std : The radius of the ellipse in numbers of standard deviations. Defaults to 2 standard deviations.
ax : The axis that the ellipse will be plotted on. Defaults to the current axis.
Additional keyword arguments are pass on to the ellipse patch.
Returns
A covariance ellipse
"""
if debug:
if conf is not None: assert isscalar(conf) and conf >= 0 and conf <= 1, 'the confidence is not in a good range'
if std is not None: assert ispositiveinteger(std), 'the number of standard deviation should be a positive integer'
fig, ax = get_fig_ax_helper(fig=fig, ax=ax)
def eigsorted(covariance):
vals, vecs = np.linalg.eigh(covariance)
# order = vals.argsort()[::-1]
# return vals[order], vecs[:,order]
return vals, vecs
if conf is not None: conf = np.asarray(conf)
elif std is not None: conf = 2 * norm.cdf(std) - 1
else: raise ValueError('One of `conf` and `std` should be specified.')
r2 = chi2.ppf(conf, 2)
vals, vecs = eigsorted(covariance)
theta = np.degrees(np.arctan2(*vecs[:,0][::-1]))
# theta = np.degrees(np.arctan2(*vecs[::-1, 0]))
# Width and height are "full" widths, not radius
# width, height = 2 * std * np.sqrt(vals)
width, height = 2 * np.sqrt(np.sqrt(vals) * r2)
# width, height = 2 * np.sqrt(vals[:, None] * r2)
ellipse = Ellipse(xy=center, width=width, height=height, angle=theta, **kwargs)
ellipse.set_facecolor('none')
ax.add_artist(ellipse)
return ellipse
def visualize_pts(pts, title=None, fig=None, ax=None, display_range=False, xlim=[-100, 100], ylim=[-100, 100], display_list=None, covariance=False, mse=False, mse_value=None, vis=True, save_path=None, debug=True, closefig=True):
'''
visualize point scatter plot
parameter:
pts: 2 x num_pts numpy array or a dictionary containing 2 x num_pts numpy array
'''
if debug:
if isdict(pts):
for pts_tmp in pts.values(): assert is2dptsarray(pts_tmp) , 'input points within dictionary are not correct: (2, num_pts) vs %s' % print_np_shape(pts_tmp)
if display_list is not None:
assert islist(display_list) and len(display_list) == len(pts), 'the input display list is not correct'
assert CHECK_EQ_LIST_UNORDERED(display_list, pts.keys(), debug=debug), 'the input display list does not match the points key list'
else: display_list = pts.keys()
else: assert is2dptsarray(pts), 'input points are not correct: (2, num_pts) vs %s' % print_np_shape(pts)
if title is not None: assert isstring(title), 'title is not correct'
else: title = 'Point Error Vector Distribution Map'
assert islogical(display_range), 'the flag determine if to display in a specific range should be logical value'
if display_range:
assert islist(xlim) and islist(ylim) and len(xlim) == 2 and len(ylim) == 2, 'the input range for x and y is not correct'
assert xlim[1] > xlim[0] and ylim[1] > ylim[0], 'the input range for x and y is not correct'
# figure setting
width, height = 1024, 1024
fig, _ = get_fig_ax_helper(fig=fig, ax=ax, width=width, height=height)
if ax is None:
plt.title(title, fontsize=20)
if isdict(pts):
num_pts_all = pts.values()[0].shape[1]
if all(pts_tmp.shape[1] == num_pts_all for pts_tmp in pts.values()):
plt.xlabel('x coordinate (%d points)' % pts.values()[0].shape[1], fontsize=16)
plt.ylabel('y coordinate (%d points)' % pts.values()[0].shape[1], fontsize=16)
else:
print('number of points is different across different methods')
plt.xlabel('x coordinate', fontsize=16)
plt.ylabel('y coordinate', fontsize=16)
else:
plt.xlabel('x coordinate (%d points)' % pts.shape[1], fontsize=16)
plt.ylabel('y coordinate (%d points)' % pts.shape[1], fontsize=16)
plt.axis('equal')
ax = plt.gca()
ax.grid()
# internal parameters
pts_size = 5
std = None
conf = 0.98
color_index = 0
marker_index = 0
hatch_index = 0
alpha = 0.2
legend_fontsize = 10
scale_distance = 48.8
linewidth = 2
# plot points
handle_dict = dict() # for legend
if isdict(pts):
num_methods = len(pts)
assert len(color_set) * len(marker_set) >= num_methods and len(color_set) * len(hatch_set) >= num_methods, 'color in color set is not enough to use, please use different markers'
mse_return = dict()
for method_name, pts_tmp in pts.items():
color_tmp = color_set[color_index]
marker_tmp = marker_set[marker_index]
hatch_tmp = hatch_set[hatch_index]
# plot covariance ellipse
if covariance: _, covariance_number = visualize_pts_covariance(pts_tmp[0:2, :], std=std, conf=conf, ax=ax, debug=debug, color=color_tmp, hatch=hatch_tmp, linewidth=linewidth)
handle_tmp = ax.scatter(pts_tmp[0, :], pts_tmp[1, :], color=color_tmp, marker=marker_tmp, s=pts_size, alpha=alpha)
if mse:
if mse_value is None:
num_pts = pts_tmp.shape[1]
mse_tmp, _ = pts_euclidean(pts_tmp[0:2, :], np.zeros((2, num_pts), dtype='float32'), debug=debug)
else:
mse_tmp = mse_value[method_name]
display_string = '%s, MSE: %.1f (%.1f um), Covariance: %.1f' % (method_name, mse_tmp, mse_tmp * scale_distance, covariance_number)
mse_return[method_name] = mse_tmp
else: display_string = method_name
handle_dict[display_string] = handle_tmp
color_index += 1
if color_index / len(color_set) == 1:
marker_index += 1
hatch_index += 1
color_index = color_index % len(color_set)
# reorder the handle before plot
handle_key_list = handle_dict.keys()
handle_value_list = handle_dict.values()
order_index_list = [display_list.index(method_name_tmp.split(', ')[0]) for method_name_tmp in handle_dict.keys()]
ordered_handle_key_list = list_reorder(handle_key_list, order_index_list, debug=debug)
ordered_handle_value_list = list_reorder(handle_value_list, order_index_list, debug=debug)
plt.legend(list2tuple(ordered_handle_value_list), list2tuple(ordered_handle_key_list), scatterpoints=1, markerscale=4, loc='lower left', fontsize=legend_fontsize)
else:
color_tmp = color_set[color_index]
marker_tmp = marker_set[marker_index]
hatch_tmp = hatch_set[hatch_index]
handle_tmp = ax.scatter(pts[0, :], pts[1, :], color=color_tmp, marker=marker_tmp, s=pts_size, alpha=alpha)
# plot covariance ellipse
if covariance: _, covariance_number = visualize_pts_covariance(pts[0:2, :], std=std, conf=conf, ax=ax, debug=debug, color=color_tmp, hatch=hatch_tmp, linewidth=linewidth)
if mse:
if mse_value is None:
num_pts = pts.shape[1]
mse_tmp, _ = pts_euclidean(pts[0:2, :], np.zeros((2, num_pts), dtype='float32'), debug=debug)
display_string = 'MSE: %.1f (%.1f um), Covariance: %.1f' % (mse_tmp, mse_tmp * scale_distance, covariance_number)
mse_return = mse_tmp
else:
display_string = 'MSE: %.1f (%.1f um), Covariance: %.1f' % (mse_value, mse_value * scale_distance, covariance_number)
mse_return = mse_value
handle_dict[display_string] = handle_tmp
plt.legend(list2tuple(handle_dict.values()), list2tuple(handle_dict.keys()), scatterpoints=1, markerscale=4, loc='lower left', fontsize=legend_fontsize)
# display only specific range
if display_range:
axis_bin = 10 * 2
interval_x = (xlim[1] - xlim[0]) / axis_bin
interval_y = (ylim[1] - ylim[0]) / axis_bin
plt.xlim(xlim[0], xlim[1])
plt.ylim(ylim[0], ylim[1])
plt.xticks(np.arange(xlim[0], xlim[1] + interval_x, interval_x))
plt.yticks(np.arange(ylim[0], ylim[1] + interval_y, interval_y))
plt.grid()
save_vis_close_helper(fig=fig, ax=ax, vis=vis, save_path=save_path, warning=warning, debug=debug, closefig=closefig, transparent=False)
return mse_return
def random_colors(N, bright=True):
"""
Generate random colors.
To get visually distinct colors, generate them in HSV space then
convert to RGB.
"""
brightness = 1.0 if bright else 0.7
hsv = [(i / float(N), 1, brightness) for i in range(N)]
colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))
# random.shuffle(colors)
return colors
def apply_mask(image, mask, color, alpha=0.5):
"""Apply the given mask to the image.
"""
for c in range(3):
image[:, :, c] = np.where(mask == 1, image[:, :, c] * (1 - alpha) + alpha * color[c] * 255, image[:, :, c])
return image
def visualize_image_with_bbox_mask(image, boxes, masks, class_ids, class_names, class_to_plot=None, scores=None, alpha=0.7, fig=None, ax=None, color_list=None, title='Mask & Bounding Box Visualization'):
"""
visualize the image with bbox and mask (and text and score)
parameters:
boxes: [num_instance, (x1, y1, x2, y2, class_id)] in image coordinates.
masks: [height, width, num_instances], numpy images, range in [0, 1]
class_ids: [num_instances]
class_names: list of class names of the dataset
scores: (optional) confidence scores for each box
class_to_plot: list of class index in the class_names to plot
title:
"""
max_numinstances = 20
if class_to_plot is None: class_to_plot = range(len(class_names))
num_instances = boxes.shape[0] # Number of instances
if not num_instances: print("\n*** No instances to display *** \n")
else: assert boxes.shape[0] == masks.shape[-1] == class_ids.shape[0]
colors = random_colors(max_numinstances) # Generate random colors
if color_list is None: color_list = range(num_instances)
height, width = image.shape[:2]
# print(height)
# print(width)
# zxc
fig, _ = get_fig_ax_helper(fig=fig, ax=ax, width=width, height=height)
ax = fig.add_axes([0, 0, 1, 1])
ax.axis('off')
# if not ax: fig = plt.figure(figsize=(16, 16))
# ax = fig.add_axes([0, 0, 1, 0.5])
# ax.axis('off')
ax.set_title(title)
masked_image = image.astype(np.uint8).copy()
# print(masked_image.shape)
# save_image(masked_image, '/home/xinshuo/test.jpg')
# tmp_dir = '/home/xinshuo/Workspace/junk/vis_individual'
for instance_index in range(num_instances):
color = colors[color_list[instance_index] % max_numinstances]
# print(color)
# zxc
# skip to visualize the class we do not care
class_id = class_ids[instance_index]
if not (class_id in class_to_plot): continue
# zxc
# visualize the bbox
if not np.any(boxes[instance_index]): continue # Skip this instance. Has no bbox. Likely lost in image cropping.
x1, y1, x2, y2 = boxes[instance_index]
# print(x1)
# print(y1)
# print(x2)
# print(y2)
p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2, alpha=alpha, edgecolor=color, facecolor='none')
ax.add_patch(p)
# add the text and score
score = scores[instance_index] if scores is not None else None
label = class_names[class_id]
x = random.randint(x1, (x1 + x2) // 2)
caption = "{} {:.2f}".format(label, score) if score else label
ax.text(x1, y1 + 8, caption, color='w', size=8, backgroundcolor='none')
# add the mask
mask = masks[:, :, instance_index]
# print(np.max(mask))
# print(np.min(mask))
# save_image(mask, '/home/xinshuo/test.jpg')
# zxc
masked_image = apply_mask(masked_image, mask, color)
# save_image(masked_image, '/home/xinshuo/test%d.jpg' % instance_index)
# zxc
# save the individual mask one by one
# save_tmp_dir = os.path.join(tmp_dir, 'instance_%04d.jpg' % instance_index); mkdir_if_missing(save_tmp_dir)
# save_image(masked_image.astype('uint8'), save_path=save_tmp_dir)
# add the contour of the mask
padded_mask = np.zeros((mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8)
padded_mask[1:-1, 1:-1] = mask
contours = find_contours(padded_mask, 0.5)
for verts in contours:
verts = np.fliplr(verts) - 1 # Subtract the padding and flip (y, x) to (x, y)
p = patches.Polygon(verts, facecolor="none", edgecolor=color, alpha=alpha)
ax.add_patch(p)
# zxc
# print(masked_image.shape)
# zxc
ax.imshow(masked_image.astype(np.uint8))
ax.set(xlim=[0, width], ylim=[height, 0], aspect=1)
return fig, ax | xinshuoweng/Xinshuo_PyToolbox | xinshuo_visualization/geometry_vis.py | geometry_vis.py | py | 29,886 | python | en | code | 61 | github-code | 1 | [
{
"api_name": "xinshuo_miscellaneous.islist",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "private.save_vis_close_helper",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "xinshuo_miscellaneous.isnparray",
"line_number": 42,
"usage_type": "call"
},... |
19752234298 | import json
import torch
from SAC.Agent import AgentV2
if __name__ == "__main__":
jsonFilePath = "./cfg/RealTrain.json"
with open(jsonFilePath) as file:
json_dict = json.load(file)
agentDict = json_dict['Agent']
x = AgentV2(agentDict)
rstate = torch.zeros((32, 8))
lidarpt = torch.zeros((32, 1, 360))
image = torch.zeros((32, 1, 96, 96))
y = x.forward((rstate, lidarpt, image))
print("here")
| seungju-mmc/SAC | agenttest.py | agenttest.py | py | 450 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "json.load",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "SAC.Agent.AgentV2",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_numbe... |
29343515477 | import common, sql
import os, json
import subprocess, shlex
from dateutil import parser as dt
import dateutil
import requests, json
import logging, coloredlogs
coloredlogs.install()
ecosystem = 'Composer'
from packaging import version as PythonVersion
def isValidVersion(v):
try:
v = PythonVersion.Version(v)
return True
except:
logging.info(v)
return False
def version_sorting(vers):
temp=[]
for v in vers:
if isValidVersion(v):
temp.append(v)
vers=temp
#https://stackoverflow.com/a/58035970/1445015
return sorted(vers, key=lambda x: PythonVersion.Version(x))
def get_repository_url(package):
repository = None
assert package.count('/') == 1
try:
url = 'https://repo.packagist.org/p2/{}.json'.format(package)
page = requests.get(url)
data = json.loads(page.content)
data = data['packages'][package][0]
data = data['source']['url']
assert data.endswith('.git')
repository = data[:-len('.git')]
# if 'source_code_uri' in data:
# repository = data['source_code_uri']
if not repository:
repository = common.search_for_github_repo(package, data)
except Exception as e:
logging.info(e)
print(package, repository)
if repository:
return repository
else:
return common.norepo
def get_release_info(package, version):
#composer api sends release as properly sorted: https://packagist.org/packages/drupal/core
release_date = prior_release = None
url = 'https://repo.packagist.org/p2/{}.json'.format(package)
print(url)
page = requests.get(url)
if page.status_code == 200:
data = json.loads(page.content)
assert 'packages' in data and package in data['packages']
releases = data['packages'][package]
versions = []
for item in releases:
cur = item['version']
if cur.startswith('v'):
cur = cur[1:]
versions.append(cur)
if version == cur:
if 'time' in item:
release_date = dt.parse(item['time']).astimezone(dateutil.tz.tzutc())
if version in versions:
idx = versions.index(version)
if idx < len(versions) - 1: #oldest
prior_release = versions[idx+1] #sorted from recent to oldest
else:
logging.info(version)
return release_date, prior_release
if __name__=='__main__':
#get repository remote url of packages
packages = common.getPackagesToSearchRepository(ecosystem)
for item in packages:
id, repo = item['id'], get_repository_url(item['name'])
sql.execute('update package set repository_url=%s where id = %s',(repo,id))
#get release info (publish date and prior release) for each fixing release
packages = common.getPackagesToProcessRelease(ecosystem)
for item in packages:
package_id, package, version = item['package_id'], item['package'], item['version']
publish_date, prior_release = get_release_info(package,version)
print(package, package_id, version, publish_date, prior_release)
sql.execute('insert into release_info values(null,%s,%s,%s,%s)',(package_id, version, publish_date, prior_release)) | nasifimtiazohi/secrel | data_explore/composer.py | composer.py | py | 3,367 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "coloredlogs.install",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "packaging.version.Version",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "packaging.version",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "loggin... |
8691161609 | from typing import List
class Solution:
def firstMissingPositive(self, nums: List[int]) -> int:
n = len(nums)
if n == 0 or 1 not in nums:
return 1
i = 0
while i < n:
idx = nums[i] - 1
if 0 <= idx < n and nums[idx] != nums[i]:
nums[i], nums[idx] = nums[idx], nums[i]
else:
i += 1
for i in range(n):
if nums[i] != i + 1:
return i + 1
return n + 1
| songkuixi/LeetCode | Python/First Missing Positive.py | First Missing Positive.py | py | 507 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "typing.List",
"line_number": 5,
"usage_type": "name"
}
] |
15994920309 | import re
import collections
def mostCommon(paragraph, banned):
words = [word for word in re.sub(r'[^\w]', ' ', paragraph)
.lower().split()
if word not in banned]
counts = collections.Counter(words)
print(counts)
return counts.most_common(1)[0][0]
if __name__ == "__main__":
paragraph = "Bob hit a ball, the hit BALL flew far after it was hit."
banned = ["hit"]
print(mostCommon(paragraph, banned))
| Kynel/algorithm | python/๋ฌธ์์ด ์กฐ์/code/most_common.py | most_common.py | py | 458 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "re.sub",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "collections.Counter",
"line_number": 10,
"usage_type": "call"
}
] |
23519662780 | import os
import logbook
import gossip
from .project import get_project
# from .celery_utils import celery_app
_logger = logbook.Logger(__name__)
_cached_app = None
_building = False
def build_app(*, use_cached=False, config_overrides=None):
from flask import Flask
global _cached_app # pylint: disable=global-statement
global _building # pylint: disable=global-statement
if use_cached and _cached_app is not None:
return _cached_app
if config_overrides is None:
config_overrides = {}
if os.environ.get('COB_TESTING'):
config_overrides.update({'TESTING': True})
if _building:
raise RuntimeError('Attempted to create an app while an app was already being initialized!')
_building = True
try:
proj = get_project()
_logger.debug('Starting app {.name}...', proj)
_cached_app = Flask(get_project().name, static_folder=None, template_folder=None)
_cached_app.config.update(config_overrides)
proj.configure_app(_cached_app)
gossip.trigger_with_tags('cob.after_configure_app', {'app': _cached_app})
_logger.trace('URL map: {}', _cached_app.url_map)
return _cached_app
finally:
_building = False
| getweber/cob | cob/app.py | app.py | py | 1,246 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "logbook.Logger",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "project.get_project",
... |
73547403554 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Upsampler(nn.Module):
def __init__(self, in_channels=3, ngf=128):
super(Upsampler, self).__init__()
self.up = nn.Sequential(
nn.ConvTranspose2d(in_channels, ngf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 4),
nn.ReLU(True),
# state size. (ngf*4) x 8 x 8
nn.ConvTranspose2d( ngf * 4, ngf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 2),
nn.ReLU(True),
# state size. (ngf*2) x 16 x 16
nn.ConvTranspose2d( ngf * 2, 3, 4, 2, 1, bias=False),
nn.Tanh()
# state size. 3 x 32 x 32
)
def forward(self, x):
return self.up(x)
## From: https://github.com/LukeDitria/CNN-VAE
class ResDown(nn.Module):
"""
Residual down sampling block for the encoder
"""
def __init__(self, channel_in, channel_out, kernel_size=3):
super(ResDown, self).__init__()
self.conv1 = nn.Conv2d(channel_in, channel_out // 2, kernel_size, 2, kernel_size // 2)
self.bn1 = nn.GroupNorm(channel_out//2, channel_out//2, affine=True)#nn.BatchNorm2d(channel_out // 2, eps=1e-4)
self.conv2 = nn.Conv2d(channel_out // 2, channel_out, kernel_size, 1, kernel_size // 2)
self.bn2 = nn.GroupNorm(channel_out, channel_out, affine=True)#nn.BatchNorm2d(channel_out, eps=1e-4)
self.conv3 = nn.Conv2d(channel_in, channel_out, kernel_size, 2, kernel_size // 2)
self.act_fnc = nn.ELU()
def forward(self, x):
skip = self.conv3(x)
x = self.act_fnc(self.bn1(self.conv1(x)))
x = self.conv2(x)
return self.act_fnc(self.bn2(x + skip))
class ResUp(nn.Module):
"""
Residual up sampling block for the decoder
"""
def __init__(self, channel_in, channel_out, kernel_size=3, scale_factor=2):
super(ResUp, self).__init__()
self.conv1 = nn.Conv2d(channel_in, channel_out // 2, kernel_size, 1, kernel_size // 2)
self.bn1 = nn.GroupNorm(channel_out//2, channel_out//2, affine=True)#nn.BatchNorm2d(channel_out // 2, eps=1e-4)
self.conv2 = nn.Conv2d(channel_out // 2, channel_out, kernel_size, 1, kernel_size // 2)
self.bn2 = nn.GroupNorm(channel_out, channel_out, affine=True)#nn.BatchNorm2d(channel_out, eps=1e-4)
self.conv3 = nn.Conv2d(channel_in, channel_out, kernel_size, 1, kernel_size // 2)
self.up_nn = nn.Upsample(scale_factor=scale_factor, mode="nearest")
self.act_fnc = nn.ELU()
def forward(self, x):
x = self.up_nn(x)
skip = self.conv3(x)
x = self.act_fnc(self.bn1(self.conv1(x)))
x = self.conv2(x)
return self.act_fnc(self.bn2(x + skip))
class Encoder(nn.Module):
"""
Encoder block
Built for a 3x32x32 image and will result in a latent vector of size z x 1 x 1
As the network is fully convolutional it will work for images LARGER than 64
For images sized 64 * n where n is a power of 2, (1, 2, 4, 8 etc) the latent feature map size will be z x n x n
When in .eval() the Encoder will not sample from the distribution and will instead output mu as the encoding vector
and log_var will be None
"""
def __init__(self, channels, ch=128, latent_channels=512, ae=False):
super(Encoder, self).__init__()
self.ae = ae
self.conv_in = nn.Conv2d(channels, ch, 7, 1, 3)
self.conv_label = nn.Conv2d(10, ch, 7, 1, 3)
self.res_down_block1 = ResDown(ch*2, 4 * ch)
self.res_down_block2 = ResDown(4 * ch, 8 * ch)
self.res_down_block3 = ResDown(8 * ch, 16 * ch)
if self.ae:
self.conv_latent = nn.Conv2d(16 * ch, latent_channels, 4, 1)
else:
self.conv_mu = nn.Conv2d(16 * ch, latent_channels, 4, 1)
self.conv_log_var = nn.Conv2d(16 * ch, latent_channels, 4, 1)
self.act_fnc = nn.ELU()
self.fill = torch.zeros([10, 10, 32, 32]).to('cuda')
for i in range(10):
self.fill[i, i, :, :] = 1
def sample(self, mu, log_var):
std = torch.exp(0.5*log_var)
eps = torch.randn_like(std)
return mu + eps*std
def forward(self, x, y):
y = self.fill[y]
x = self.act_fnc(self.conv_in(x))
y = self.act_fnc(self.conv_label(y))
x = torch.cat([x, y], 1)
x = self.res_down_block1(x) # 16
x = self.res_down_block2(x) # 8
x = self.res_down_block3(x) # 4
if self.ae:
z = self.conv_latent(x)
return z
else:
mu = self.conv_mu(x) # 1
log_var = self.conv_log_var(x) # 1
z = self.sample(mu, log_var)
return mu, log_var, z
class Decoder(nn.Module):
def __init__(self, channels, ch=128, latent_channels=512):
super(Decoder, self).__init__()
self.conv_t_up = nn.ConvTranspose2d(latent_channels, ch * 8, 4, 1)
self.res_up_block1 = ResUp(ch * 8, ch * 4)
self.res_up_block2 = ResUp(ch * 4, ch * 2)
self.res_up_block3 = ResUp(ch * 2, ch)
self.conv_out = nn.Conv2d(ch, channels, 3, 1, 1)
self.act_fnc = nn.ELU()
def forward(self, x):
x = self.act_fnc(self.conv_t_up(x)) # 4
x = self.res_up_block1(x) # 8
x = self.res_up_block2(x) # 16
x = self.res_up_block3(x) # 32
x = torch.tanh(self.conv_out(x))
return x
class ResVAE(nn.Module):
def __init__(self, channel_in=3, ch=64, latent_size=32, ae=False):
super(ResVAE, self).__init__()
self.ae = ae
self.encoder = Encoder(channel_in, ch=ch, latent_channels=latent_size, ae=self.ae)
self.decoder = Decoder(channel_in, ch=ch, latent_channels=latent_size)
def forward(self, x, y):
if self.ae:
z = self.encoder(x, y)
else:
mu, log_var, z = self.encoder(x, y)
recon_img = self.decoder(z)
if self.ae:
return recon_img, z
return recon_img, mu, log_var, z | julschoen/DC-VAE | vae.py | vae.py | py | 6,085 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "torch.nn.Module",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_n... |
4444262235 | import numpy as np
from tqdm import tqdm
import networkx as nx
import scipy.sparse as sp
import dgl
import random
from time import time
from collections import defaultdict
import warnings
warnings.filterwarnings('ignore')
n_users = 0
n_items = 0
n_entities = 0
n_relations = 0
n_nodes = 0
train_user_set = defaultdict(list)
test_user_set = defaultdict(list)
def read_cf(file_name):
inter_mat = list()
lines = open(file_name, "r").readlines()
for l in lines:
tmps = l.strip()
inters = [int(i) for i in tmps.split(" ")]
u_id, pos_ids = inters[0], inters[1:]
pos_ids = list(set(pos_ids))
for i_id in pos_ids:
inter_mat.append([u_id, i_id])
return np.array(inter_mat)
def remap_item(train_data, test_data):
global n_users, n_items
n_users = max(max(train_data[:, 0]), max(test_data[:, 0])) + 1
n_items = max(max(train_data[:, 1]), max(test_data[:, 1])) + 1
for u_id, i_id in train_data:
train_user_set[int(u_id)].append(int(i_id))
for u_id, i_id in test_data:
test_user_set[int(u_id)].append(int(i_id))
#KGIN์ด tiple ๊ฐ์ ๋๋ฐฐ์ธ๊ฑฐ ๊ฐ์๋ฐใ
def read_triplets(file_name):
global n_entities, n_relations, n_nodes
can_triplets_np = np.loadtxt(file_name, dtype=np.int32)
can_triplets_np = np.unique(can_triplets_np, axis=0)
if args.inverse_r:
# get triplets with inverse direction like <entity, is-aspect-of, item>
inv_triplets_np = can_triplets_np.copy()
inv_triplets_np[:, 0] = can_triplets_np[:, 2]
inv_triplets_np[:, 2] = can_triplets_np[:, 0]
inv_triplets_np[:, 1] = can_triplets_np[:, 1] + max(can_triplets_np[:, 1]) + 1
# consider two additional relations --- 'interact' and 'be interacted'
can_triplets_np[:, 1] = can_triplets_np[:, 1] + 1
inv_triplets_np[:, 1] = inv_triplets_np[:, 1] + 1
# get full version of knowledge graph
triplets = np.concatenate((can_triplets_np, inv_triplets_np), axis=0)
else:
# consider two additional relations --- 'interact'.
can_triplets_np[:, 1] = can_triplets_np[:, 1] + 1
triplets = can_triplets_np.copy()
n_entities = max(max(triplets[:, 0]), max(triplets[:, 2])) + 1 # including items + users
n_nodes = n_entities + n_users
n_relations = max(triplets[:, 1]) + 1
train_kg_dict = defaultdict(list)
for row in triplets:
h, r, t = row
train_kg_dict[h].append((t, r))
cs = []
for ii in range(n_relations):
idx = np.where(triplets[:, 1] == ii)[0]
cs.append(len(list(set(triplets[idx, 0].tolist()))))
cs = np.array(cs)
# cs=cs/np.sum(cs)
# dim=256
# cs=cs*dim
# cs0=np.around(cs)
return triplets, train_kg_dict, cs
def build_graph(train_data, triplets):
# ckg_graph = nx.MultiDiGraph()
rd = defaultdict(list)
print("Begin to load interaction triples ...")
for u_id, i_id in tqdm(train_data, ascii=True):
rd[0].append([u_id, i_id])
print("\nBegin to load knowledge graph triples ...")
for h_id, r_id, t_id in tqdm(triplets, ascii=True):
# ckg_graph.add_edge(h_id, t_id, key=r_id)
rd[r_id].append([h_id, t_id])
return rd
def build_graph_AK(train_data, triplets):
"""build konwledge graph"""
relation_dict = {}
relation_num_dict = {}
for i in range(n_relations):
idx = np.where(triplets[:, 1] == i)[0]
node_pair = triplets[:, [0, 2]][idx]
name = ('item', i, 'item')
relation_dict[name] = (node_pair[:, 0].tolist(), node_pair[:, 1].tolist())
relation_num_dict[name] = len(idx)
graph_AK = dgl.heterograph(relation_dict)
"""build user-item and item-item graph"""
relation_dict_ui = {}
name = ('item', 1, 'user')
relation_dict_ui[name] = (train_data[:, 1], train_data[:, 0])
name_graph_UIS = {'user': n_users, 'item': n_items}
graph_UIS = dgl.heterograph(relation_dict_ui, name_graph_UIS)
return graph_AK, graph_UIS, relation_num_dict
def build_sparse_relational_graph(relation_dict):
def _bi_norm_lap(adj):
# D^{-1/2}AD^{-1/2}
rowsum = np.array(adj.sum(1))
d_inv_sqrt = np.power(rowsum, -0.5).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
# bi_lap = adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt)
bi_lap = d_mat_inv_sqrt.dot(adj).dot(d_mat_inv_sqrt)
return bi_lap.tocoo()
def _si_norm_lap(adj):
# D^{-1}A
rowsum = np.array(adj.sum(1))
d_inv = np.power(rowsum, -1).flatten()
d_inv[np.isinf(d_inv)] = 0.
d_mat_inv = sp.diags(d_inv)
norm_adj = d_mat_inv.dot(adj)
return norm_adj.tocoo()
adj_mat_list = []
print("Begin to build sparse relation matrix ...")
for r_id in tqdm(relation_dict.keys()):
np_mat = np.array(relation_dict[r_id])
if r_id == 0:
cf = np_mat.copy()
cf[:, 1] = cf[:, 1] + n_users # [0, n_items) -> [n_users, n_users+n_items)
vals = [1.] * len(cf)
adj = sp.coo_matrix((vals, (cf[:, 0], cf[:, 1])), shape=(n_nodes, n_nodes))
else:
vals = [1.] * len(np_mat)
adj = sp.coo_matrix((vals, (np_mat[:, 0], np_mat[:, 1])), shape=(n_nodes, n_nodes))
adj_mat_list.append(adj)
norm_mat_list = [_bi_norm_lap(mat) for mat in adj_mat_list]
mean_mat_list = [_si_norm_lap(mat) for mat in adj_mat_list]
# interaction: user->item, [n_users, n_entities]
norm_mat_list[0] = norm_mat_list[0].tocsr()[:n_users, n_users:].tocoo()
mean_mat_list[0] = mean_mat_list[0].tocsr()[:n_users, n_users:].tocoo()
return adj_mat_list, norm_mat_list, mean_mat_list
def load_data(model_args, device):
global args
args = model_args
device = device
directory = args.data_path + args.dataset + '/'
print('reading train and test user-item set ...')
train_cf = read_cf(directory + 'train.txt')
test_cf = read_cf(directory + 'test.txt')
remap_item(train_cf, test_cf)
print('combinating train_cf and kg data ...')
triplets, train_kg_dict, r_num = read_triplets(directory + 'kg_final.txt')
print('building the graph ...')
relation_dict = build_graph(train_cf, triplets)
graph_AK, graph_UIS,relation_num_dict = build_graph_AK(train_cf, triplets)
graph_AK = graph_AK.to(device)
print('building the adj mat ...')
adj_mat_list, norm_mat_list, mean_mat_list = build_sparse_relational_graph(relation_dict)
n_params = {
'n_users': int(n_users),
'n_items': int(n_items),
'n_entities': int(n_entities),
'n_nodes': int(n_nodes),
'n_relations': int(n_relations),
'n_kg_train': len(triplets),
'num_r': r_num,
'num_r0':relation_num_dict
}
user_dict = {
'train_user_set': train_user_set,
'test_user_set': test_user_set
}
return train_cf, test_cf, user_dict, train_kg_dict, n_params, graph_AK, graph_UIS, \
[adj_mat_list, norm_mat_list, mean_mat_list]
| gayeon603/kgin | utils/data_loader.py | data_loader.py | py | 7,149 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "warnings.filterwarnings",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 19,
"usage_type": "call"
},
{
"api_name"... |
4607138870 | import tkinter as tk
import tkinter.ttk as ttk
from tkinter import filedialog
from PIL import Image, ImageTk, ImageFile
import subprocess
import os
import json
from constants import *
from logic import get_page_maps
ImageFile.LOAD_TRUNCATED_IMAGES = True
class ToolBar(tk.Frame):
def __init__(self, master, vars, **kwargs):
super().__init__(master, **kwargs)
self.vars = vars
self.search = ""
left_frame = tk.Frame(self)
left_frame.pack(side= tk.LEFT)
center_frame = tk.Frame(self)
center_frame.pack(side= tk.RIGHT)
search_entry = tk.Entry(left_frame, textvariable=self.vars["search_word"])
search_button = ttk.Button(left_frame, text="ๆ็ดข", width=6)
kind_selector = ttk.Combobox(left_frame, width=15, values=SUGGEST,
textvariable=self.vars["kind_selector"])
search_reset = ttk.Button(left_frame, text="้็ฝฎ", width=6)
left_button = ttk.Button(center_frame, text="ไธไธ้กต")
page_label = tk.Label(center_frame, text="ๅฝๅ้กต้ข: ")
page_i_label = tk.Label(center_frame, textvariable=self.vars["page_index"])
page_label2 = tk.Label(center_frame, text="ๆปๅฐๅพๆฐ้: ")
nums_label = tk.Label(center_frame, textvariable=self.vars["nums_count"])
right_button = ttk.Button(center_frame, text="ไธไธ้กต")
search_entry.grid(row=0, column=0)
search_button.grid(row=0, column=1, padx=5)
kind_selector.grid(row=0, column=2)
search_reset.grid(row=0, column=3, padx=5)
left_button.grid(row=0, column=0)
page_label.grid(row=0, column=1)
page_i_label.grid(row=0, column=2, padx=10)
page_label2.grid(row=0, column=3)
nums_label.grid(row=0, column=4, padx=10)
right_button.grid(row=0, column=5)
self.widgets = {
"left": left_button,
"right": right_button,
"search": search_button,
"reset": search_reset,
"return_search": search_entry,
"select_search": kind_selector
}
def bind_funcs(self, **funcs):
for k in funcs:
if k.startswith("return") and k in self.widgets:
self.widgets[k].bind("<Return>", funcs[k])
elif k.startswith("select") and k in self.widgets:
self.widgets[k].bind("<<ComboboxSelected>>", funcs[k])
elif k in self.widgets:
self.widgets[k].config(command=funcs[k])
class MapBoard(tk.Frame):
def __init__(self, master, map_path, size, cr):
super().__init__(master)
self.map_path = map_path
self.size = size
self.c = cr[0]
self.r = cr[1]
self.img_list = [None for _ in range(self.r * self.c)]
self.label_list = [None for _ in range(self.r * self.c)]
self.text_list = [None for _ in range(self.r * self.c)]
self.page_info = get_page_maps(self.map_path, page=0, nums=self.r * self.c)
self.vars = {}
def set_vars_map(self, vars):
self.vars = vars
def refresh_with_setting(self, **kwargs):
"""
:param kwargs: dict, keys:map_path, size, cr,
"""
if "map_path" in kwargs:
self.map_path = kwargs["map_path"]
if "size" in kwargs:
self.size = kwargs["size"]
if "cr" in kwargs:
self.c, self.r = kwargs["cr"]
lst = self.grid_slaves()
for l in lst:
l.destroy()
self.img_list = [None for _ in range(self.r * self.c)]
self.label_list = [None for _ in range(self.r * self.c)]
self.text_list = [None for _ in range(self.r * self.c)]
self.page_info = get_page_maps(self.map_path, page=0, nums=self.r * self.c)
self.show_page_by_index(0)
def refresh_vars(self):
if "page_index" in self.vars:
pi_str = "%s / %s" % (self.page_info["page"] + 1, self.page_info["pages"])
self.vars["page_index"].set(pi_str)
if "nums_count" in self.vars:
self.vars["nums_count"].set(self.page_info["total"])
if "left_button" in self.vars:
if self.page_info.get("prev"):
self.vars["left_button"].config(state=tk.NORMAL)
else:
self.vars["left_button"].config(state=tk.DISABLED)
if "right_button" in self.vars:
if self.page_info.get("next"):
self.vars["right_button"].config(state=tk.NORMAL)
else:
self.vars["right_button"].config(state=tk.DISABLED)
if "show_info" in self.vars:
if os.path.exists(self.map_path):
self.vars["show_info"].set("")
else:
self.vars["show_info"].set("ไฝ ็ๅฐๅพๆไปถๅคนไธๅญๅจๆๅทฒไธขๅคฑ")
def show_page_by_index(self, index):
if os.path.exists(self.map_path):
key = self.vars["search_val"]
self.page_info = get_page_maps(self.map_path, page=index, nums=self.r * self.c, filter=key)
else:
self.page_info = {}
for i in range(self.r * self.c):
ri = i // self.c
ci = i % self.c
if i < len(self.page_info["maps"]):
map_info = self.page_info["maps"][i]
if "art_tga" in map_info:
self.img_list[i] = Image.open(map_info["art_tga"])
elif "tga" in map_info:
self.img_list[i] = Image.open(map_info["tga"])
else:
self.img_list[i] = Image.new("RGB", IMG_SIZE[self.size], (0, 0, 0))
# print(map_info["dir"])
# if map_info["dir"] == "D:/Documents/Red Alert 3/other\(as)kuzhanmaqinuo1.6":
# print("debug")
self.img_list[i] = self.img_list[i].resize(IMG_SIZE[self.size])
self.img_list[i] = ImageTk.PhotoImage(self.img_list[i])
if self.label_list[i] is None:
self.label_list[i] = tk.Label(self, image=self.img_list[i])
self.label_list[i].grid(row=ri * 2, column=ci)
self.label_list[i].bind("<Double-Button-1>", lambda e, i=i: self.click_map(i))
else:
self.label_list[i].config(image=self.img_list[i])
if self.text_list[i] is None:
tr = LINES[self.size]
# self.text_list[i] = tk.Label(self, text=map_info["map"][:self.max_width], width=self.max_width)
self.text_list[i] = tk.Text(self, width=MAX_WIDTH[self.size], height=tr, background=TEXT_BG)
self.text_list[i].insert(tk.END, map_info["map"])
self.text_list[i].grid(row=ri * 2 + 1, column=ci)
self.text_list[i].configure(state='disabled')
else:
self.text_list[i].configure(state='normal')
self.text_list[i].delete("1.0", "end")
self.text_list[i].insert(tk.END, map_info["map"])
self.text_list[i].configure(state='disabled')
else:
# ๆๅไธ้กต็็ฉบ็ฝไฝ็ฝฎ
self.img_list[i] = Image.new("RGB", IMG_SIZE[self.size], BLANK)
self.img_list[i] = ImageTk.PhotoImage(self.img_list[i])
if self.label_list[i] is None:
self.label_list[i] = tk.Label(self, image=self.img_list[i])
self.label_list[i].grid(row=ri * 2, column=ci)
self.label_list[i].bind("<Double-Button-1>", lambda e, i=i: self.click_map(i))
else:
self.label_list[i].config(image=self.img_list[i])
if self.text_list[i] is None:
tr = LINES[self.size]
self.text_list[i] = tk.Text(self, width=MAX_WIDTH[self.size], height=tr, background=TEXT_BG)
self.text_list[i].grid(row=ri * 2 + 1, column=ci)
self.text_list[i].configure(state='disabled')
else:
self.text_list[i].configure(state='normal')
self.text_list[i].delete("1.0", "end")
self.text_list[i].configure(state='disabled')
self.refresh_vars()
def click_map(self, index):
if "maps" in self.page_info and index <len(self.page_info["maps"]):
map_info = self.page_info["maps"][index]
dir_path = map_info["dir"]
dir_path= dir_path.replace("/", "\\")
subprocess.Popen(r'explorer /select,"%s"' % dir_path)
def search(self, event=None):
if self.vars["search_val"] != self.vars["search_word"].get():
self.vars["search_val"] = self.vars["search_word"].get()
self.show_page_by_index(0)
def reset_search(self):
self.vars["search_word"].set("")
self.vars["search_val"] = ""
self.show_page_by_index(0)
def select_search(self, event=None):
val = self.vars["kind_selector"].get()
if val in SUGGEST and val != SUGGEST[0]:
self.vars["search_val"] = val
self.show_page_by_index(0)
def prev_page(self):
if self.page_info.get("prev"):
prev_index = self.page_info["page"] - 1
self.show_page_by_index(prev_index)
def next_page(self):
if self.page_info.get("next"):
next_index = self.page_info["page"] + 1
self.show_page_by_index(next_index)
class MainBoard(tk.Frame):
def __init__(self, master, **kwargs):
super().__init__(master, **kwargs)
self.setting = {}
self.vars = {}
def set_setting(self, **setting):
"""
:param setting: dict, keys: map_path, size, cr, font
"""
self.setting.update(setting)
def set_vars_main(self, vars):
self.vars = vars
def refresh(self):
self.map_board.refresh_with_setting(
map_path=self.setting["map_path"], size=self.setting["size"], cr=self.setting["cr"])
def init(self):
frame_top = ToolBar(self, self.vars)
frame_top.pack(fill="x")
self.map_board = MapBoard(self, self.setting["map_path"], self.setting["size"], self.setting["cr"])
self.map_board.set_vars_map(self.vars)
self.map_board.show_page_by_index(0)
self.map_board.pack()
frame_bottom = tk.Frame(self)
frame_bottom.pack()
tk.Label(frame_bottom, text="ๅๅปๅฐๅพๅพ็๏ผๅณๅฏๅจๆไปถๆต่งๅจไธญๆๅผๅฐๅพๆไปถๅคน", font=self.setting["font_1"]).grid()
tk.Label(frame_bottom, textvariable=self.vars["show_info"], font=self.setting["font_1"], fg="#FF69B4").grid()
self.map_board.refresh_vars()
frame_top.bind_funcs(left=self.map_board.prev_page, right=self.map_board.next_page,
search=self.map_board.search, reset=self.map_board.reset_search,
return_search=self.map_board.search, select_search=self.map_board.select_search)
self.master.bind('<Left>', lambda e: self.map_board.prev_page())
self.master.bind('<Right>', lambda e: self.map_board.next_page())
class SettingBoard(tk.Frame):
def __init__(self, master):
super().__init__(master)
self.init_done = False
self.local_vars = {}
self.local_vars["r"] = tk.IntVar(self, 0)
self.local_vars["c"] = tk.IntVar(self, 0)
self.local_vars["size"] = tk.StringVar(self, "S")
def read_setting(self, main_board):
self.local_vars["mb"] = main_board
self.local_vars["size"].set(main_board.setting["size"])
self.local_vars["c"].set(main_board.setting["cr"][0])
self.local_vars["r"].set(main_board.setting["cr"][1])
def set_vars(self, vars):
"""
:param vars: dict, keys: map_path(StringVar), setting_info(StringVar)
"""
self.vars = vars
def init(self):
ROW1 = tk.Frame(self)
ROW2 = tk.Frame(self)
ROW3 = tk.Frame(self)
ROW1.pack()
ROW2.pack()
ROW3.pack()
label0 = ttk.Label(ROW1, textvariable=self.vars["show_info"], foreground="red")
label0.grid(row=0, columnspan=2, pady=10)
b0 = ttk.Button(ROW1, text="ไฟฎๆนๅฐๅพๆไปถๅคน", command=self.select_dir)
b0.grid(row=1, column=0)
label_1 = ttk.Entry(ROW1, textvariable=self.vars["map_path"], state="disabled", width=80)
label_1.grid(row=1, column=1, columnspan=2)
ttk.Label(ROW2, text="่กๆฐ๏ผ").grid(row=0, column=0)
ttk.Label(ROW2, text="ๅๆฐ๏ผ").grid(row=1, column=0)
tk.Scale(ROW2, from_=ROW_RANGE[0], to=ROW_RANGE[1], variable=self.local_vars["r"], orient='horizontal',
length=RANGE_WIDTH * (ROW_RANGE[1] - ROW_RANGE[0])).grid(row=0, column=1, columnspan=3)
tk.Scale(ROW2, from_=COLUMN_RANGE[0], to=COLUMN_RANGE[1], variable=self.local_vars["c"], orient='horizontal',
length=RANGE_WIDTH * (COLUMN_RANGE[1] - COLUMN_RANGE[0])).grid(row=1, column=1, columnspan=3, padx=10)
ttk.Label(ROW2, textvariable=self.local_vars["r"]).grid(row=0, column=4)
ttk.Label(ROW2, textvariable=self.local_vars["c"]).grid(row=1, column=4)
ttk.Label(ROW2, text="ๅพ็ๅคงๅฐ๏ผ").grid(row=2, column=0, padx=10, pady=10)
tk.Radiobutton(ROW2, text="ๅฐ", variable=self.local_vars["size"], value="S", width=7,
indicatoron=False).grid(row=2, column=1)
tk.Radiobutton(ROW2, text="ไธญ", variable=self.local_vars["size"], value="M", width=7,
indicatoron=False).grid(row=2, column=2)
tk.Radiobutton(ROW2, text="ๅคง", variable=self.local_vars["size"], value="L", width=7,
indicatoron=False).grid(row=2, column=3)
tk.Button(self, text="ไฟๅญๅนถ้ๅบ", command=self.save).pack(pady=10)
tk.Button(self, text="่ฟๅ", command=self.back).pack(pady=10)
self.init_done = True
def save(self):
map_path = self.vars["map_path"].get()
c = self.local_vars["c"].get()
r = self.local_vars["r"].get()
size = self.local_vars["size"].get()
self.pack_forget()
data = {
"size": size,
"c": c,
"r": r
}
with open(JSON_NAME, "w") as f:
json.dump(data, f)
self.local_vars["mb"].set_setting(map_path=map_path, size=size, cr=(c, r))
self.local_vars["mb"].refresh()
self.local_vars["mb"].pack()
def back(self):
self.pack_forget()
self.local_vars["mb"].pack()
def select_dir(self):
try:
dir = filedialog.askdirectory()
if dir:
self.vars["map_path"].set(dir)
print(dir)
self.vars["show_info"].set("")
except Exception as e:
self.vars["show_info"].set(str(e))
def open_setting(gvars):
gvars["mb"].pack_forget()
gvars["sb"].read_setting(gvars["mb"])
if not gvars["sb"].init_done:
gvars["sb"].init()
gvars["sb"].pack()
| BigShuang/ra3-map-browser | board.py | board.py | py | 15,216 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "PIL.ImageFile.LOAD_TRUNCATED_IMAGES",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "PIL.ImageFile",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "tkinter.Frame",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_nam... |
71458557795 | #!usr/bin/env python3
#Jacob Foppes Project 6 Game
#game like pokenmon where you go down differnent paths and can run into pokemon on the way.
# Uses random to randomly slect a pokemon from a list for you to fight
# you fight the poken nad it cna wither be cought or run away attacks are effective or ineffective
# differnt color texts for diffenrt poeple or actions
# ie prof oak one color
# you heard a rumble in the busehd another
# for random path lengths: print ... in differnt ammouns based on list of []"......","..",".","..................."]
#use turtle to visualize map
#use tkinter to pupup new window when a fight is happeneing.
import time
import random
import os
from pathlib import Path
import sys
'''Intro: Welocme to the game baisic tutorial.
Choose your name, color '''
# GLOBAL VARIABLES
savedGames = [] #list of all saved games
auth_usr = ""
save = "" # users current location in the game game
owd = os.getcwd()
wokeDex = {} #authenitcated users wokedex
currentLevel = 0
with open("accounts.txt","r+") as users:
games = users.read()
savedGames = games.split("\n")
print(savedGames)
wokemon = {"Wikachu":10,"Wetapod":5,"Wonix":10,"Wortle":5,"Wewtwo":20,"Wurtterfree":3,"Wattata":4}# dictionary of pokemon with health points
waterWokemon = {"Wyaradose":21, "Wlastoise":8, "Wampert": 10, "Wagicarp":4}
fireWokemon = {"Warmander":6,"Wapidash": 8,"Wagmar": 7, "Wimchar": 4, "Winetails":12}
earthWokemon = {"Whydor":3, "Wolem":5,"Wonix": 18,"Weodude":8, "Womastart": 3}
def print_slow(str):# Credit : Sebastian - Stack overflow https://stackoverflow.com/questions/4099422/printing-slowly-simulate-typing
for letter in str:
sys.stdout.write(letter)
sys.stdout.flush()
time.sleep(0.05)
def input_slow(str): # Credit: https://www.101computing.net/python-typing-text-effect/
for character in str:
sys.stdout.write(character)
sys.stdout.flush()
time.sleep(0.05)
value = input()
return value
def battle(randWokemon):# take
rwok = (randWokemon[0],randWokemon[1]) # extract name and healktgh
while True:
wok = input_slow("\nChoose your Wokemon:\n"+str(wokeDex)+"\n")
if wok in wokeDex:
global cwokhealth
global pwokhealth
pwokhealth = int(wokeDex[wok])
print_slow("\nYour HP: " + str(pwokhealth) + "\n") #print player helath
cwokhealth = int(rwok[1])
print_slow("Oponent HP: " + str(cwokhealth) + "\n")# print oponent helath
print_slow("\nLooks like its " + wok + " vs " + rwok[0] + "\n")
time.sleep(1)
def attack():
global cwokhealth
global pwokhealth
phit1 = random.randrange(0,5,1)#randomly genreated player dammage to compouter
cwokhealth -= phit1 #subtract hit points form health
print_slow(wok + " Strikes!\nIt does " + str(phit1) + " dammage!\n\n")
chit1 = random.randrange(0,5,1)#randomly genereated compouter dammage to play
pwokhealth -= chit1#subtract hit points form health
print_slow(rwok[0] + " Strikes!\nIt does " + str(chit1) + " dammage!\n\n")
print_slow("Oponent HP: " + str(cwokhealth) + "\n")
print_slow("Your HP: " + str(pwokhealth) + "\n\n")
attack()
else:
print("Please choose a Wokemon from your Wokedex")
continue
while pwokhealth > 0 and cwokhealth > 0: # while both players health greate than 0 comntinue the attack function
attack()
if pwokhealth <= 0 and pwokhealth <= cwokhealth: #if comoputer wins
print_slow("Dang..., Thats tough boss. \nLooks like you lost this one.\nTime to head home and heal your Wokemon\n ")
loby()
elif cwokhealth <= 0 and pwokhealth >= cwokhealth: # if player wins
global currentLevel
currentLevel += 1
print_slow("You won!!\nYou now have " + rwok[0] + " added to your wokedex!!\nYou will now move on to "+ str(currentLevel)+"!\n\n")
wokeDex[rwok[0]] = rwok[1]
savel()
loby()
def l1():#game level 1
global wokemon
print_slow("Welcome to level 1!\n")
while True:
path1 = input_slow("You are wlaking down the street and you encounter a set a of 2 trail heads:\n 'Elk Road', and 'Spoon Drive' which do you take \n Enter 'Elk' or 'Spoon'\n").lower()
if path1 == "elk":
print_slow("Your walking down the elk path when you spot something in the bushes...\n")
time.sleep(.75)
print_slow(".")
time.sleep(.75)
print_slow(".")
time.sleep(.75)
print_slow(".")
randWokemon = random.choice(list(wokemon.items())) #chose random Wokemon from dict of wokemon
rwok = randWokemon[0] # extract just the name
while True:
fight1 = input_slow("\nA wild " + rwok + " appears!!\n Do you battle? or Run away?\n Enter 'Battle', or 'Run\n").lower()
if fight1 == "run":
print_slow("You got away just in time! Better head back.\n\n")
time.sleep(2)
break
elif fight1 == "battle":
battle(randWokemon)
else:
print_slow("Please choose run or battle\n")
continue
elif path1 == "spoon":
print_slow("\nYour walking down the Spoon Path and you come to a fork in the path\n The left looks like it leads to a river, and the right looks to be more forrest.\n ")
while True:
lefRig = input_slow("Do you Turn left or right\n").lower()
if lefRig == "left":
print_slow("\nWhile Crossing the shallow end of the river, you see some splashing...")
time.sleep(.75)
print_slow(".")
time.sleep(.75)
print_slow(".")
randWokemon = random.choice(list(waterWokemon.items())) #chose random Wokemon from dict of wokemon
rwok = randWokemon[0] # extract just the name
fight2 = input_slow("\n\nA wild " + rwok + " appears!!\n Do you battle? or Run away?\n Enter 'Battle', or 'Run'\n").lower()
if fight2 == "battle":
battle(randWokemon)
elif fight2 == "run":
print_slow("You got away just in time, better head back to that fork in the path...\n\n")
continue
else:
print_slow("Choose battle, or Run.\n")
elif lefRig == "right":
print_slow("Your take a right on Spoon Path when you spot something in the bushes...\n")
time.sleep(.75)
print_slow(".")
time.sleep(.75)
print_slow(".")
time.sleep(.75)
randWokemon = random.choice(list(wokemon.items())) #chose random Wokemon from dict of wokemon
rwok = randWokemon[0] # extract just the name
fight1 = input_slow("\nA wild " + rwok + " appears!!\n Do you battle? or Run away?\n Enter 'Battle', or n'Run\n").lower()
if fight1 == "run":
print_slow("You got away just in time! Better head back to that fork in the path.\n\n")
time.sleep(2)
continue
elif fight1 == "battle":
battle(randWokemon)
else:
print_slow("Choose battle, or Run.\n")
else:
print_slow("Please choose left or right\n")
continue
def l2():#game level 2
print_slow("Welcome to Level 2!\n\n")
print_slow("You have entered a new area now...\n")
time.sleep(.25)
print_slow("You see new kinds of terrain ready to explore!\n")
time.sleep(.25)
while True:
print_slow("To your left you see a massive volcano, and to your right you see a vast rocky desert.\n")
time.sleep(.15)
path3 = input_slow("Do you visit the desert or the volcano?\nsay 'Desert', or 'Volcano'\n").lower()
if path3 == "desert":
print_slow("You begin to wander the desert.\nThe Sun is beating down on you\n")
time.sleep(.5)
print_slow("You see something in the distance....\n")
time.sleep(.5)
print_slow("You walk closer......\n")
time.sleep(.75)
randWokemon = random.choice(list(earthWokemon.items())) #chose random Wokemon from dict of wokemon
rwok = randWokemon[0] # extract just the name
while True:
fight1 = input_slow("\nA wild " + rwok + " appears!!\n Do you battle? or Run away?\n Enter 'Battle', or n'Run\n").lower()
if fight1 == "run":
print_slow("You got away just in time! Better head back.\n\n")
time.sleep(2)
continue
elif fight1 == "battle":
battle(randWokemon)
else:
print_slow("Choose battle, or Run.\n")
elif path3 == "volcano":
print_slow("You start walking towards the volcano.\n")
time.sleep(.75)
print_slow("Suddenly a creature rushes twords you!")
randWokemon = random.choice(list(fireWokemon.items())) #chose random Wokemon from dict of wokemon
rwok = randWokemon[0] # extract just the name
while True:
fight1 = input_slow("\nA wild " + rwok + " appears!!\n Do you battle? or Run away?\n Enter 'Battle', or n'Run\n").lower()
if fight1 == "run":
print_slow("You got away just in time! Better head back.\n\n")
time.sleep(2)
continue
elif fight1 == "battle":
battle(randWokemon)
else:
print_slow("Choose battle, or Run.\n")
else:
print_slow("Please choose desert, or volcano.\n")
continue
def l3(): #Game level 3
print("Level 3 Comming Soon!")
time.sleep(3)
pass
levels = {1:l1,2:l2,3:l3}
def loby():# lobby is where the player once logged in, can either view thier Wokedex, or continue playing at the start of thier current level
global wokeDex
global currentLevel
lvl = open("saveG.txt","r")
currentLevel = int(lvl.read())
while True:
lchoice = input("Hello "+auth_usr+" Welcome to the lobby!\n Your Currently at Level: " + str(currentLevel) + "\nSay 'start' to resume your game, 'view' to view your wokedex, 'Prev' to redo a previos level, or 'Exit' to retun to the main screen."+"\n").lower()
if lchoice == "start":
level = open("saveG.txt","r").read()# save game file
level = int(level)
levels[level]()# read savegame file and call level finciton based on the text in the file. this text is used as a key in a dictionary of all levels where the values are the fucntions that start the levels
elif lchoice == "prev":
if currentLevel == 1:
print_slow("\nYou have not completed any levels yet!. Come back here after you have progressed.\n")
time.sleep(1)
continue
elif currentLevel == 2:
try:
lev = int(input("You can Visit the Following Levels:\nLevel 1, Level 2\nType the number of the level you want to visit\n"))
if lev > currentLevel:
print("You can not access this yet.")
else:
levels[lev]()
except ValueError:
print("Level does not exist.\n")
continue
elif currentLevel == 3:
try:
lev = int(input("You can Visit the Following Levels:\nLevel 1, Level 2, Level 3\nType the number of the level you want to visit\n"))
if lev > currentLevel:
print("You can not access this yet.\n\n")
else:
levels[lev]()
except ValueError:
print("Level does not exist.\n")
continue
elif lchoice == "view":
print("\n"+str(wokeDex)+"\n")
time.sleep(1)
elif lchoice == "exit":#exit loby and return to welocme/ main directory
os.chdir(owd)
wokeDex = {}
currentLevel = 0
welcome()
else:
print("Select a valid choice\n")
time.sleep(.5)
def mkuser(): # if the user does not have an account they can make one
breaker = True
while breaker ==True:
print_slow("Prof. Woke: Wecome to WokeyWorld!")
pname = input("What shall I call you?\n")
if pname in savedGames or os.path.exists(pname+"/"):
print("User already exists. Try again ")
continue
else:
global auth_usr
global wokeDex
auth_usr = pname
savedGames.append(pname) # ad new name to the saved games list
auth = open("accounts.txt","r+")
auth.write("\n".join(str(line) for line in savedGames))# write easch line of the saved gmaes list to the accouts file
auth.close()
p = Path(pname)
os.chdir("savedGames") # changes dir to the users folder so that a new game can be saved
p.mkdir() # make play direcotry
os.chdir(pname) # enter player direcotry
global save
global wokeDex
sav = open("saveG.txt", "x") # create save file
wd = open("wokedex.txt", "x") # create save file
os.chdir(owd)
print("Account creation sucessfull. Logged in as:", pname,"\n")
breaker == False
newGame()
break
def saveg():## this fuction can be called to save the game durring play by typing save
os.chdir("savedGames/" + auth_usr)
dex = open("wokedex.txt","w")
for key, value in wokeDex.items():
dex.write('%s %s\n' % (key, value))
lvl = open("saveG.txt","w")
lvl.write(str(currentLevel))
def savel():## this fuction can be called to save the game durring play by typing save
dex = open("wokedex.txt","w")
for key, value in wokeDex.items():
dex.write('%s %s\n' % (key, value))
dex.close()
lvl = open("saveG.txt","w")
lvl.write(str(currentLevel))
lvl.close()
print("Game Saved. Your currnet level: "+str(currentLevel))
def newGame():# First Sequence in game after user amkes account
time.sleep(.25)
print_slow("\nProf Woke: Hello "+auth_usr+" My Name is Professor Woke! Ill show you arround\n")
time.sleep(.1)
print_slow("Prof Woke: Im giving you a wokedex.\n")
time.sleep(.1)
print_slow("Prof Woke: This is where you will store the wokemon you catch along the way.\n")
time.sleep(.1)
print_slow("Prof Woke: I going to start you off with this Wikachu.\n")
wokeDex["Wikachu"] = 11
print("\n",auth_usr,"'s Wokedex:",wokeDex,"\n")
time.sleep(1)
global currentLevel
currentLevel = 1
saveg()
loby()
pass
def welcome():# where user logs in to contine or creates new game
while True:
print("\n\nWokemon Gotta Snatch em' all!")
game = input("To start a new game, say 'New', to continue a game, enter 'cont'\n").lower()
if game == "new":
mkuser()
elif game == "cont":
while True:
print("Available saved Games:",savedGames,"\n")
un = input("Enter your username or type 'Exit to return to main menue\n")
if un == "exit":
welcome()
break
elif un not in savedGames:
print("\n User not found. Try agian. OR Type Exit to return to the main screen \n")
time.sleep(1)
elif un in savedGames: #checks username agains list of saved games
global auth_usr
auth_usr = un
global wokeDex
os.chdir("savedGames/")
os.chdir(auth_usr)
with open("wokedex.txt", "r+") as wd:
for line in wd:
(wok,pow) = line.split()# Create tuple of wokemon/powerlevels
wokeDex[(wok)] = pow #break the tuple in to doctiuonary key,value
print("\n Found Your Game!\n")
print(" Lets Get to it ", auth_usr,"\n")
loby()
break
else: print("Please select a valid choice")
welcome()
| jfoppes/week_6 | project_6.py | project_6.py | py | 17,314 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.getcwd",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "sys.stdout.write",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "sys.stdout.flush",
"li... |
19121216961 | # hcm/util/file_utils.py
"""File utilities. """
import os
import sys
import logging
import pandas as pd
logger = logging.getLogger(__name__)
pd.set_option('display.width', 1000)
def days_suffix(obs_period):
return obs_period.replace('-', '_').replace(' ', '_')
def progress(count, total, status=''):
bar_len = 60
filled_len = int(round(bar_len * count / float(total)))
percents = round(100.0 * count / float(total), 1)
bar = '=' * filled_len + '-' * (bar_len - filled_len)
sys.stdout.write('[{}] {}{} ...{}\r'.format(bar, percents, '%', status))
sys.stdout.flush() # As suggested by Rom Ruben (see: http://stackoverflow.com/questions/3173320/text-progress
# # -bar-in-the-console/27871113#comment50529068_27871113)
def dataroot():
return "/data/HCM/"
def datadir(exp_name): # day=None):
""" TODO: Move this to the data field. """
daydir = ""
other_exp = ["2cD1A2aCRE", "2cD1A2aCRE2", "2CFast", "1ASTRESS", "Stress_HCMe1r1", "CORTTREAT", "HiFat2", "HiFat1"]
if exp_name in other_exp:
datadir_parent = ""
data_dir = exp_name
# if day is not None: daydir = "D%d" % (day + 1)
# elif exp_name.startswith("SS_Data_051905_FV"):
# datadir, expround = self.name.split(":")
# datadir_parent = "EventFiles/EventFiles_SSe1r%s" % expround
# date = (self.start + datetime.timedelta(days=day)).strftime("%m%d%Y")
# daydir = "%se1r%sd%s" % (date, expround, day + 1)
elif exp_name == "StrainSurvey":
datadir_parent = ""
data_dir = "SS_Data_051905_FV"
elif exp_name.startswith("WR"):
datadir_parent = "WR"
data_dir = exp_name
else:
raise ValueError("Unknown experiment: {}".format(exp_name))
return os.path.join(dataroot(), "Experiments", datadir_parent, data_dir, daydir)
def hcm_dir():
dir_parent, sub_dir = None, None
if os.uname()[1] == "giorgios-MacBook-Pro.local":
dir_parent = "/Users/go"
sub_dir = "Projects/HomeCageMonitoring/"
return os.path.join(dir_parent, sub_dir)
def repo_dir():
return os.path.join(hcm_dir(), "hcm2")
def find_files(path, ext='npy'):
""" returns a generator over filenames in path """
return (os.path.join(dirpath, f) for dirpath, _, files in os.walk(path) for f in sorted(files) if
f.endswith('.{}'.format(ext)))
def mouseday_label_from_filename(fname, ext="npy"):
stripped = fname.strip('.{}'.format(ext)).split('/')
exp_name, akind, qty, md_label = [stripped[x] for x in [6, 7, -2, -1]]
one, group, two, mouse, day = md_label.split('_')
return group, mouse, int(day[1:])
# # kinda old
# # # EVENTS
# def create_df_from_series_data(experiment, dfs, days, ignore=False):
# df = pd.DataFrame(dfs).T
# df = df.reset_index().rename(index=str, columns={'level_0': 'group', 'level_1': 'mouse', 'level_2': 'day'})
# df = df[df['day'].isin(days)]
# if ignore:
# # ignored = list() or experiment.ignored
# df = remove_ignored_from_dataframe(experiment, df)
# df = set_df_indices(df, experiment, index=['group', 'mouse', 'day'])
# return df
#
# def load_ingestion_events_dataframe(experiment, days=(), ev_type='F', ignore=False):
# path_to_npy1 = path_to_binary(experiment, subdir=os.path.join('preprocessing', '{}_timeset'.format(ev_type)))
# print "loading {} event data from npy:\n{}".format(ev_type, path_to_npy1)
# tot = len(list(find_files(path_to_npy1, ext='npy')))
# dfs1, dfs2 = dict(), dict()
# # durations
# for cnt, fname in enumerate(find_files(path_to_npy1, ext='npy')):
# vals = np.load(fname)
# if len(vals):
# dur, = np.diff(vals).T
# interdur = vals[1:, 0] - vals[:-1, 1]
# else:
# dur, interdur = list(), list()
#
# index = mouseday_label_from_filename(fname)
# dfs1[index] = pd.Series(dur)
# dfs2[index] = pd.Series(interdur)
# progress(cnt, tot)
#
# df1 = create_df_from_series_data(experiment, dfs1, days, ignore)
# df2 = create_df_from_series_data(experiment, dfs2, days, ignore)
#
# # Feeding and Licking Coeff
# coeff_name = 'FC' if ev_type == 'F' else "LC"
# path_to_npy2 = path_to_binary(experiment, subdir=os.path.join('preprocessing', coeff_name))
# print "loading {} data from npy:\n{}".format(coeff_name, path_to_npy2)
# tot = len(list(find_files(path_to_npy2, ext='npy')))
# dfs3 = dict()
# for cnt, fname in enumerate(find_files(path_to_npy2, ext='npy')):
# vals = np.load(fname).tolist()
# index = mouseday_label_from_filename(fname)
# dfs3[index] = pd.Series(vals)
# progress(cnt, tot)
#
# df3 = create_df_from_series_data(experiment, dfs3, days, ignore) * 1000 # to mg/s
# return df1, df2, df3
#
#
# @utils.timing
# def load_locomotion_events_dataframe(experiment, days=(), ignore=False):
# keys = ['delta_t', 'idx_timestamps_out_hb', 'idx_timestamps_at_hb', 'velocity', 'distance']
# print "loading timestamp data from npy:\n{}".format(keys)
# dfs_list = [dict() for _ in range(9)]
# tot = len(list(experiment.mousedays))
# for cnt, md in enumerate(experiment.mousedays):
# if md.day in days:
# delta_t, idx_out, idx_at, vel, dist = [load_preprocessing_data(md, keys=[key])['preprocessing'][key]
# for key in keys]
# dfs_list[0][md.label] = pd.Series(delta_t)
# dfs_list[1][md.label] = pd.Series(vel)
# dfs_list[2][md.label] = pd.Series(dist)
# dfs_list[3][md.label] = pd.Series(delta_t[idx_out])
# dfs_list[4][md.label] = pd.Series(vel[idx_out])
# dfs_list[5][md.label] = pd.Series(dist[idx_out]) # todo: REWRITE
# dfs_list[6][md.label] = pd.Series(delta_t[idx_at])
# dfs_list[7][md.label] = pd.Series(vel[idx_at])
# dfs_list[8][md.label] = pd.Series(dist[idx_at])
# progress(cnt, tot)
# cnt += 1
#
# return [create_df_from_series_data(experiment, dfs, days, ignore) for dfs in dfs_list]
| giorgio-o/hcm2 | util/file_utils.py | file_utils.py | py | 6,148 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pandas.set_option",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sys.stdout.write",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"... |
13395702605 | """Remove order in shop product and replace with the unwieldy row/column again :/
Revision ID: 4f786f3c132b
Revises: 2d4ea6b57d6e
Create Date: 2016-03-20 07:11:09.422825
"""
# revision identifiers, used by Alembic.
revision = '4f786f3c132b'
down_revision = '2d4ea6b57d6e'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('Shop_product', sa.Column('column', sa.Integer(), nullable=True))
op.add_column('Shop_product', sa.Column('row', sa.Integer(), nullable=True))
op.drop_column('Shop_product', 'order')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.alter_column('permission', 'user_id',
existing_type=mysql.INTEGER(display_width=11),
nullable=False)
op.add_column('Shop_product', sa.Column('order', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True))
op.drop_column('Shop_product', 'row')
op.drop_column('Shop_product', 'column')
op.create_table('backup_Class_description',
sa.Column('description_id', mysql.INTEGER(display_width=11), server_default=sa.text(u"'0'"), autoincrement=False, nullable=False),
sa.Column('order', mysql.SMALLINT(display_width=5, unsigned=True), autoincrement=False, nullable=False),
sa.Column('title', mysql.VARCHAR(length=255), nullable=False),
sa.Column('abbr', mysql.VARCHAR(length=255), nullable=False),
sa.Column('description', mysql.LONGTEXT(), nullable=False),
sa.Column('menu', mysql.LONGTEXT(), nullable=False),
sa.Column('knife_level', mysql.VARCHAR(length=1), nullable=False),
sa.Column('veggie_level', mysql.VARCHAR(length=1), nullable=False),
sa.Column('dairy_level', mysql.VARCHAR(length=1), nullable=False),
sa.Column('wheat_level', mysql.VARCHAR(length=1), nullable=False),
sa.Column('cost_override', mysql.SMALLINT(display_width=5, unsigned=True), autoincrement=False, nullable=True),
sa.Column('wine', mysql.VARCHAR(length=255), nullable=False),
mysql_default_charset=u'latin1',
mysql_engine=u'InnoDB'
)
op.create_table('gift_certificate',
sa.Column('certificate_id', mysql.INTEGER(display_width=11), nullable=False),
sa.Column('campus_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.Column('created', mysql.DATETIME(), nullable=True),
sa.Column('sender_name', mysql.VARCHAR(length=100), nullable=True),
sa.Column('sender_email', mysql.VARCHAR(length=75), nullable=True),
sa.Column('sender_phone', mysql.VARCHAR(length=20), nullable=True),
sa.Column('amount_to_give', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.Column('recipient_name', mysql.VARCHAR(length=100), nullable=True),
sa.Column('recipient_email', mysql.VARCHAR(length=75), nullable=True),
sa.Column('message', mysql.TEXT(), nullable=True),
sa.Column('giftcard', mysql.TINYINT(display_width=1), autoincrement=False, nullable=True),
sa.Column('date_sent', sa.DATE(), nullable=True),
sa.Column('name_on_envelope', mysql.VARCHAR(length=255), nullable=True),
sa.Column('street_address', mysql.VARCHAR(length=255), nullable=True),
sa.Column('city', mysql.VARCHAR(length=100), nullable=True),
sa.Column('state', mysql.VARCHAR(length=2), nullable=True),
sa.Column('zip_code', mysql.VARCHAR(length=10), nullable=True),
sa.Column('code', mysql.VARCHAR(length=10), nullable=True),
sa.Column('creditcard_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.Column('paid_with', mysql.VARCHAR(length=10), nullable=True),
sa.Column('expiration_date', sa.DATE(), nullable=True),
sa.ForeignKeyConstraint(['campus_id'], [u'Class_campus.campus_id'], name=u'gift_certificate_ibfk_1'),
sa.PrimaryKeyConstraint('certificate_id'),
mysql_default_charset=u'latin1',
mysql_engine=u'InnoDB'
)
op.drop_table('forgot_password_links')
### end Alembic commands ###
| fenriz07/flask-hippooks | migrations/versions/4f786f3c132b_.py | 4f786f3c132b_.py | py | 4,089 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "alembic.op.add_column",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.