seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
24681181962 | import streamlit as st
from transformers import T5Tokenizer, T5ForConditionalGeneration
from transformers import pipeline
import torch
#model and tokenizer loading
checkpoint = "LaMini-Flan-T5-248M"
tokenizer = T5Tokenizer.from_pretrained(checkpoint)
base_model = T5ForConditionalGeneration.from_pretrained(checkpoint, device_map='auto', torch_dtype=torch.float32)
def paragraph_summarization(input_text):
paragraphs = input_text.split('\n\n') # Split text into paragraphs
summary_pipeline = pipeline(
'summarization',
model=base_model,
tokenizer=tokenizer,
max_length=300, # Adjust max_length as needed for paragraph summaries
min_length=30) # Adjust min_length as needed
summaries = []
for paragraph in paragraphs:
if len(paragraph.strip()) > 0:
summary = summary_pipeline(paragraph)[0]['summary_text']
summaries.append(summary)
return summaries
#streamlit code
st.set_page_config(layout="wide")
def main():
st.title("Paragraph Summarization App")
# user input text
input_text = st.text_area("Enter your paragraphs here:", "", )
if st.button("Summarize"):
col1, col2 = st.columns(2)
with col1:
st.info("Written paragraphs")
st.write(input_text)
#pdf_viewer = displayPDF(filePath)
with col2:
st.info("Summarized paragraphs")
summaries = paragraph_summarization(input_text)
for i, summary in enumerate(summaries):
st.success(f"Summary for Paragraph {i+1}: {summary}")
if __name__ == "__main__":
main() | Shoaib-Alauudin/Text-Summarization-Using-LLM | app.py | app.py | py | 1,650 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "transformers.T5Tokenizer.from_pretrained",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "transformers.T5Tokenizer",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "transformers.T5ForConditionalGeneration.from_pretrained",
"line_number": 9,
... |
25352417620 | # coding: utf-8
__author__ = "humkyung <humkyung@atools.co.kr>"
# Imports
import os, sys
import vtk
from enum import IntEnum
class NetworksJsonImporter:
KEY = vtk.vtkInformationStringVectorKey.MakeKey('Attribute', 'vtkActor')
def __init__(self):
self._file_path = None
self._nodes = {}
self._edges = []
def SetFileName(self, file_path: str) -> None:
self._file_path = file_path
def Read(self) -> None:
"""
@brief: read given file
"""
import json
if os.path.isfile(self._file_path):
_dict = None
with open(self._file_path, encoding="utf-8") as f:
all = f.read()
_dict = json.loads(all)
if _dict:
self.Parse(_dict)
def Parse(self, _dict: dict):
"""
@brief: parse given lines
"""
for node in _dict['nodes']:
self._nodes[node['name']] = [float(x) for x in node['pos'].split(',')]
for edge in _dict['edges']:
self._edges.append([edge['start'], edge['end'], float(edge['length'])])
def GetOutput(self, renderer):
"""
@brief: add actors to renderer
"""
from Primitives.Sphere import Sphere
from Primitives.Cylinder import Cylinder
try:
for name, pos in self._nodes.items():
actor = Sphere(renderer, pos).actor
info = actor.GetProperty().GetInformation()
info.Append(NetworksJsonImporter.KEY, f'{{\"name\":\"{name}\"}}')
# Generate the polyline for the spline.
points = vtk.vtkPoints()
edge_data = vtk.vtkPolyData()
# Edges
for edge in self._edges:
u, v = edge[0], edge[1]
(sx, sy, sz) = self._nodes[u]
(ex, ey, ez) = self._nodes[v]
actor = Cylinder(renderer, pt1=(sx, sy, sz), pt2=(ex, ey, ez), radius=0.1).actor
info = actor.GetProperty().GetInformation()
info_str = f'{{\"start\":\"{u}\",\"end\":\"{v}\",\"length\":\"{edge[2]}\"}}'
info.Append(NetworksJsonImporter.KEY, info_str)
except Exception as ex:
message = f"error occurred({repr(ex)}) in {sys.exc_info()[-1].tb_frame.f_code.co_filename}:" \
f"{sys.exc_info()[-1].tb_lineno}"
print(message)
if __name__ == '__main__':
importer = NetworksJsonImporter()
importer.SetFileName('sample.json')
importer.Read()
| humkyung/AViewer | NetworkxJson/NetworkxJsonImporter.py | NetworkxJsonImporter.py | py | 2,564 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "vtk.vtkInformationStringVectorKey.MakeKey",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "vtk.vtkInformationStringVectorKey",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "os.path.isfile",
"line_number": 27,
"usage_type": "call"... |
35063056732 | from json import JSONDecoder, dumps
from rest_framework.generics import get_object_or_404
from rest_framework.response import Response
from rest_framework.views import APIView
from django.db.models import ObjectDoesNotExist
from api.models import Record, KeysToken, ChildRecord, User, UnregisteredRecord
from api.serializer import RecordSerializer, UserSerializer, ChildRecordSerializer
from serv.MRDApi import Cryptographer, NotValid
class Editor(APIView):
cryp = Cryptographer()
def put(self, req):
token = get_object_or_404(
KeysToken.objects.all(),
token=req.data['token'])
user = token.user
if token.is_valid():
token.update_activity()
user_data = req.data['usr']
errors = [None, None]
try:
errors[0] = self._update_user_data(user_data, user)
except NotValid:
return Response(status=400, data="lel")
if errors[0] != None:
return Response(status=400, data=errors[0])
record_data = req.data['rec']
try:
errors[1] = self._update_record_data(record_data, user)
except NotValid:
return Response(status=400)
if errors[1] != None:
return Response(status=400, data=errors[1])
return Response(status=200)
else:
return Response(status=401)
def _update_user_data(self, user_data, user):
unique_status, errors = self._unique_user_creds(user_data, user)
if unique_status:
usr_ser = UserSerializer(instance=user, data=user_data, partial=True)
if usr_ser.is_valid():
usr_ser.save()
else:
raise NotValid
if errors == None:
return None
return errors
def _update_record_data(self, record_data, user):
record = get_object_or_404(Record.objects.all(), owner=user)
unique_status, errors = self._unique_record_creds(record_data, record)
if unique_status:
rec_ser = RecordSerializer(
instance=record,
data=record_data,
partial=True)
if rec_ser.is_valid():
rec_ser.save()
else:
raise NotValid
if errors == None:
return None
return errors
def _unique_record_creds(self, record_data, record_instance):
record = None
errors = []
if 'serial_number' in record_data.keys():
try:
record = Record.objects.get(serial_number=record_data['serial_number'])
except ObjectDoesNotExist:
record = None
if record:
errors.append('serialNumberError')
try:
snum = record_data['serial_number']
unreg_rec = UnregisteredRecord.objects.get(serial_number=snum)
except ObjectDoesNotExist:
errors.append('noSuchRecord')
unreg_rec = None
if unreg_rec and unreg_rec.code == record_data['code']:
if not record or record == record_instance:
record_instance.serial_number = record_data['serial_number']
record_instance.save()
unreg_rec.delete()
return True, None
return False, errors
return True, None
def _unique_user_creds(self, user_data, user_instance):
user = None
errors = []
if 'username' in user_data.keys():
try:
user = User.objects.get(username=user_data['username'])
except ObjectDoesNotExist:
user = None
if user:
errors.append('usernameError')
if 'email' in user_data.keys():
try:
user = User.objects.get(email=user_data['email'])
except ObjectDoesNotExist:
user = None
if user:
errors.append('emailError')
if 'telephone' in user_data.keys():
try:
user = User.objects.get(telephone=user_data['telephone'])
except ObjectDoesNotExist:
user = None
if user:
errors.append('phoneError')
if 'email' in user_data.keys() or 'username' in user_data.keys() or 'telephone' in user_data.keys():
if user != user_instance and errors:
return False, errors
elif not user or user == user_instance:
return True, None
return True, None
class ChildEditor(APIView):
def put(self, req):
token = get_object_or_404(
KeysToken.objects.all(),
token=req.data['token'])
user = token.user
child_rec = get_object_or_404(
ChildRecord.objects.all(),
serial_number=req.data['id'])
if token.is_valid() and child_rec.parent == user:
data = req.data['data']
rec = ChildRecordSerializer(
instance=child_rec, data=data, partial=True)
if rec.is_valid():
rec.save()
parent_record = Record.objects.get(owner=user)
json_decoder = JSONDecoder()
children = json_decoder.decode(parent_record.children)
if rec.is_valid() and (
data['first_name'] or data['last_name'] or data['second_name']):
for each in children:
if each['serial_number'] == req.data['id']:
tmp = f'{rec.data["last_name"]} '
tmp += f'{rec.data["first_name"]} '
tmp += f'{rec.data["second_name"]}'
each['name'] = tmp
parent_record.children = dumps(
children, separators=(
',', ':'), ensure_ascii=False)
parent_record.save()
token.update_activity()
return Response(status=200)
else:
return Response(status=403)
| cann1neof/mrecord | backend/api/views/view_edit.py | view_edit.py | py | 6,173 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "rest_framework.views.APIView",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "serv.MRDApi.Cryptographer",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "rest_framework.generics.get_object_or_404",
"line_number": 18,
"usage_type": "call... |
6486476570 | #!/usr/local/bin/python
# -*- coding: utf-8 -*-
from django.shortcuts import render_to_response
from django.template import RequestContext
from lavidaorganic.apps.talleres.models import Taller
from paypal.standard.forms import PayPalPaymentsForm
from django.shortcuts import get_object_or_404
import datetime
def talleres(request):
lista_talleres = Taller.objects.filter(fecha__gte=datetime.date.today()).order_by('fecha')
#ALGORITMO PARA FILTRAR
ctx = {'talleres': lista_talleres}
return render_to_response('talleres/talleres.html', ctx, context_instance=RequestContext(request))
def taller(request, titulo):
titulo = titulo.replace('_', ' ')
taller = get_object_or_404(Taller, titulo=titulo)
if taller.inscritos < taller.capacidad:
cupo = True
else:
cupo = False
#Asesorio personalizada
paypal_dict_taller = {
"business": "lavidaorganic@lavidaorganic.com",
"amount": taller.precio,
"item_name": taller.titulo,
"notify_url": "http://lavidaorganic.com/paypalito-manager/",
"return_url": "http://lavidaorganic.com/historia-de-salud/",
"cancel_return": "http://lavidaorganic.com/",
}
# Create the instance.
form_taller = PayPalPaymentsForm(initial=paypal_dict_taller)
ctx = {'taller': taller, 'form_taller':form_taller, 'cupo': cupo}
return render_to_response('talleres/taller_detalle.html', ctx, context_instance=RequestContext(request))
| Reston/lavidaorganic | lavidaorganic/lavidaorganic/apps/talleres/views.py | views.py | py | 1,372 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "lavidaorganic.apps.talleres.models.Taller.objects.filter",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "lavidaorganic.apps.talleres.models.Taller.objects",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "lavidaorganic.apps.talleres.model... |
21402553475 | from pyTasks.tasks import Task, Parameter
from pyTasks.utils import containerHash
from .graph_tasks import GraphPruningTask
from .mongo_tasks import MongoResourceTarget
from sklearn.model_selection import KFold
import numpy as np
from bson.code import Code
def non_filter(label):
return False
def identity(obj):
return obj
class MongoGraphNodesTask(Task):
collection = Parameter("graph_nodes")
def __init__(self, graph, D):
self.graph = graph
self.D = D
def require(self):
return GraphPruningTask(self.graph, self.D)
def __taskid__(self):
return "GraphNodesTask_%s_%d_%d" % (self.graph, self.D)
def output(self):
return MongoResourceTarget(
self.collection.value, '_id', self.graph
)
def run(self):
with self.input()[0] as i:
G = i.query()
nodes = set([])
for node in G:
label = G.node[node]['label']
nodes.add(label)
with self.output() as o:
coll = o.collection
coll.insert_many([
{'graph_id': self.graph,
'node': n}
for n in nodes
])
class MongoFrequencyTask(Task):
collection = Parameter("node_frequency")
def __init__(self, graphs, it, D):
self.graphs = graphs
self.it = it
self.D = D
def require(self):
return [
MongoGraphNodesTask(g, self.D)
for g in self.graphs
]
def output(self):
return MongoResourceTarget(
self.collection.value, '_id', 'frequency_%d' % self.it
)
def run(self):
with self.input()[0] as i:
coll = i.collection
map = Code("""
function(){
emit(this.node, 1);
}
""")
reduce = Code("""
function(key, values){
var total = 0;
for(var i = 0; i < values.length; i++){
total += values[i];
}
return total;
}
""")
reduce = coll.map_reduce(map, reduce, self.collection.value)
all = len(self.graphs)
reduce.update({}, {'$mul': {'value': 1/all}})
| cedricrupb/pySVRanker | frequent_pattern_tasks.py | frequent_pattern_tasks.py | py | 2,344 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "pyTasks.tasks.Task",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "pyTasks.tasks.Parameter",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "graph_tasks.GraphPruningTask",
"line_number": 26,
"usage_type": "call"
},
{
"api_name"... |
38775154404 | import random
import os
import cv2
import numpy as np
import pickle
from matplotlib import style
from AI_KNearestAlogrithm import Classifier
np.set_printoptions(threshold=np.inf, suppress=True)
style.use('fivethirtyeight')
class FacialClassifier:
def __init__(self):
self.frame_array = []
self.face = []
self.face_cascade = cv2.CascadeClassifier('C:\\Users\\DELL\\Python\\#DM\\haarcascade_frontalface_default.xml')
def pick(self, pickle_file):
if os.path.isfile(pickle_file):
with open(pickle_file, 'rb') as f:
self.frame_array = pickle.load(f)
else:
raise FileNotFoundError("Pickle file not found. ")
def crop_face(self, img):
array = []
i = 0
while array == []:
i += 1
faces = self.face_cascade.detectMultiScale(img, 1.3, 5)
for (x, y, a, b) in faces:
array = img[y:y+b, x:x+b]
if i == 5:
return img
return array
def process_img(self, frame):
face = self.crop_face(frame)
grey_face = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
# frame = clahe.apply(grey_face)
# f = cv2.equalizeHist(frame)
frame = cv2.resize(face, (200, 200))
# print(f)
# cv2.imshow('window', frame)
# cv2.waitKey(0)
# frame_eigenvalue, frame_eigenvector = np.linalg.eig(frame)
return frame
def fit(self, directory=''):
pics = os.listdir(directory)
pics.remove('desktop.ini')
random.shuffle(pics)
# pics.remove('desktop.ini')
groups = []
for pic in pics:
if pic[0] not in groups:
groups.append(pic[0])
for g in groups:
similar = []
i = 0
for pic in pics:
group = pic[0]
print('detecting face ' + str(i + 1) + ' of ' + str(len(pics)))
if group == g:
try:
frame = cv2.imread(directory + '\\' + pic)
frame_value = self.process_img(frame)
similar.append(frame_value.astype('int64'))
except:
pass
i += 1
self.frame_array.append([g, similar])
return self.frame_array
def return_face(self):
return self.face
def cache(self, pickle_file):
with open(pickle_file, 'wb') as f:
pickle.dump(self.frame_array, f)
def recognize(self, image_dir=''):
frame = []
if image_dir == '':
img = cv2.VideoCapture(0)
for i in range(40):
check, frame = img.read()
else:
frame = cv2.imread(image_dir)
self.face = frame
cv2.imshow('window', frame)
cv2.waitKey(0)
frame_eigenvalue = self.process_img(frame)
CLR = Classifier(self.frame_array, opt='list')
result = CLR.predict(frame_eigenvalue, 3)
return result
class FacialNN:
def __init__(self, X, Y, w1, b1):
self.x = np.array(X)
self.y = np.array(Y)
self.w1 = np.array(w1)
# self.w2 = np.array(w2)
self.b1 = np.array(b1)
# self.b2 = np.array(b2)
self.L1 = np.array([])
self.L2 = np.array([])
def sigmoid(self, x):
return 1 / (1 + np.e ** -x)
def sigmoid_der(self, x):
return self.sigmoid(x) * (1 - self.sigmoid(x))
def preprocess(self, directory='', remove="desktop.ini", parts=6):
X = [[], [], [], [], [], []]
Y = []
if directory != '':
pics = os.listdir()
random.shuffle(pics)
pics.remove(remove)
for pic in pics:
# print(pic)
frame = cv2.imread(directory + '\\' + pic)
grey_frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = cv2.resize(grey_frame, (234, 234))
part_size = int(frame.shape[0] / parts)
j = i = 0
for _ in range(6):
print(i + part_size)
frame_part = frame[i:i + part_size, j:j + part_size]
X[_].append(frame_part)
i += part_size
j += part_size
self.x = X
def cache(self, pickle_file):
with open(pickle_file, 'wb') as f:
pickle.dump(self.x, f)
def feed_forward(self):
# Layer 1:
self.WX11 = WX11 = np.dot(self.w1[0], self.x[0]) + self.b1[0]
self.WX12 = WX12 = np.dot(self.w1[1], self.x[1]) + self.b1[1]
# self.WX13 = WX13 = np.dot(self.w1[2], self.x[2]) + self.b1[2]
L1 = self.sigmoid(WX11 + WX12 + self.b1[3])
self.L2 = L1
# Layer 2:
# WX21 = np.dot(self.w2[0], L1)
# WX22 = np.dot(self.w2[1], L1)
# WX23 = np.dot(self.w2[2], L1)
# self.L2 = self.sigmoid(WX21 + WX22 + WX23 + self.b2)
def back_propagation(self):
error = ((self.L2 - self.y)**2)/2
loss = error.sum()
print(loss)
# WX11
d1 = self.sigmoid_der(self.WX11)# self.sigmoid_der(self.WX11)
d2 = d1 * error
d3 = np.dot(d2, self.x[0].T)
# WX12
d4 = self.sigmoid_der(self.WX12)# self.sigmoid_der(self.WX11)
d5 = d4 * error
d6 = np.dot(d5, self.x[1].T)
# Updates:
self.w1[0] += d3
# self.w2[1] -= d6
#
# def return_weights(self):
# def predict(self):
X = [[[1,0,1], [0,0,0]], [[1,1,1], [0,1,1]]]
Y = [1,0,1]
w1 = np.random.rand(2,2)
b1 = [0.3, 0.2, 0.1, 0.5]
def main():
FNN = FacialNN(X, Y, w1, b1)
for i in range(60000):
FNN.preprocess('C:\\Users\\DELL\\Pictures\\Camera Roll')
FNN.feed_forward()
FNN.back_propagation()
if __name__ == '__main__':
main()
| olusegvn/Defence-and-Privacy-mechanisms | AI_FacialRecognition.py | AI_FacialRecognition.py | py | 6,190 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.set_printoptions",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.inf",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.style.use",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "matplotlib.st... |
37528229182 | import bs4 as bs
from urllib import request
def get_urls(file):
f = open(file,"r")
urls = []
for line in f.readlines():
urls.append(line)
return urls
def enter_urls(file,urls):
f = open(file,'w')
for url in urls:
f.write(url+'\n')
f.close()
def make_unique(arr):
arr2 = []
for i in arr:
if i not in arr2:
arr2.append(i)
return arr2
fname = 'urls.csv'
urls = get_urls(fname)
i = 0
while True:
try:
html = request.urlopen(urls[i]).read()
soup = bs.BeautifulSoup(html,'html.parser')
links = soup.find_all('a')
urls_new = []
for link in links:
href = link['href']
if href.find('http') == -1:
urls_new.append(urls[i].replace('\n','')+href[1:len(href)])
for url in urls_new:
urls.append(url)
i += 1
except:
break
urls = make_unique(urls)
enter_urls('result.csv',urls)
| stefanivus/Web-scraping | Web Cralwer.py | Web Cralwer.py | py | 1,017 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "urllib.request.urlopen",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "urllib.request",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 31,
"usage_type": "call"
}
] |
20606152072 | import psutil
from influxdb import InfluxDBClient
import time
client = InfluxDBClient(host='localhost', port=8086)
client.create_database('system')
measurement_name = 'system_data'
data_end_time = int(time.time() * 1000)
data = []
cpu_p, mem_p, disk_read, disk_write, net_sent_now, net_recv_now, temp, \
boot_time, net_sent_prev, net_recv_prev = \
0, 0, 0, 0, 0, 0, 0, 0, \
psutil.net_io_counters().bytes_sent, psutil.net_io_counters().bytes_recv
def get_system_data():
global cpu_p, mem_p, disk_write, disk_read, net_recv_now, net_sent_now,\
temp, boot_time, data_end_time
data_end_time = int(time.time() * 1000)
cpu_p = psutil.cpu_percent()
mem_p = psutil.virtual_memory().percent
disk_read = psutil.disk_io_counters().read_count
disk_write = psutil.disk_io_counters().write_count
net_sent_now = psutil.net_io_counters().bytes_sent
net_recv_now = psutil.net_io_counters().bytes_recv
temp = psutil.sensors_temperatures()['acpitz'][0].current
boot_time = psutil.boot_time()
data.append(
{
"measurement": "system_data",
"tags": {
"boot_time": boot_time
},
"fields": {
"cpu_percent": cpu_p,
"memory_percent": mem_p,
"disk_read": disk_read,
"disk_write": disk_write,
"net_sent": net_sent_now-net_sent_prev,
"net_received": net_recv_now-net_recv_prev,
"temperature": temp,
},
"time": data_end_time
}
)
client.write_points(data, database='system', time_precision='ms',
protocol='json')
def run(interval=1): # interval in seconds
global net_recv_prev, net_sent_prev
print("Script is running, press Ctrl+C to stop!")
while 1:
try:
get_system_data()
net_sent_prev = psutil.net_io_counters().bytes_sent
net_recv_prev = psutil.net_io_counters().bytes_recv
time.sleep(interval)
except KeyboardInterrupt:
quit()
except:
pass
run()
| rishabh-22/influxdb-scripts | system_data_insert.py | system_data_insert.py | py | 2,150 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "influxdb.InfluxDBClient",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "psutil.net_io_counters",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "time.time",
... |
38454000752 | from collections import defaultdict
def factorize(n):
if n == 1:
return 0
factor = defaultdict(int)
while True:
isPrime = True
for i in range(2, int(n**0.5)+1):
if n % i == 0:
factor[i] += 1
n = n // i
isPrime = False
break
if isPrime:
factor[n] += 1
break
return factor
while True:
L = list(map(int,input().split()))
if L == [0]:
break
x = 1
for i in range(0,len(L),2):
x *= L[i]**L[i+1]
x -= 1
D = factorize(x)
Factor = []
for i in D:
Factor.append((i,D[i]))
Factor.sort(reverse=True)
for i in Factor:
print(*i, end=" ")
print('')
| LightPotato99/baekjoon | math/prime/primeland.py | primeland.py | py | 766 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "collections.defaultdict",
"line_number": 7,
"usage_type": "call"
}
] |
36164982911 | from PyQt6 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1500,200)
MainWindow.setStyleSheet("background-color: #282828")
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout = QtWidgets.QGridLayout(self.centralwidget)
self.search = QtWidgets.QLineEdit(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Times New Roman")
font.setPointSize(32)
self.search.setFont(font)
self.search.setToolTipDuration(-3)
self.search.setStyleSheet("background-color: #161a1e; color: white; border: 3px solid #161a1e")
self.search.setObjectName("search")
self.gridLayout.addWidget(self.search, 0, 0, 1, 4)
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Times New Roman")
font.setPointSize(32)
self.pushButton.setFont(font)
self.pushButton.setStyleSheet("background-color: #161a1e; color: white;")
self.pushButton.setObjectName("pushButton")
self.gridLayout.addWidget(self.pushButton, 0, 4, 1, 1)
self.line_1 = QtWidgets.QFrame(self.centralwidget)
self.line_1.setStyleSheet("")
self.line_1.setObjectName("line_1")
self.line_1.setFrameShape(QtWidgets.QFrame.Shape.HLine)
self.line_1.setFrameShadow(QtWidgets.QFrame.Shadow.Sunken)
self.gridLayout.addWidget(self.line_1, 1, 0, 1, 5)
self.marketLabel_1 = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Times New Roman")
font.setPointSize(24)
self.marketLabel_1.setFont(font)
self.marketLabel_1.setStyleSheet("color: white")
self.marketLabel_1.setAlignment(QtCore.Qt.AlignmentFlag.AlignLeft|QtCore.Qt.AlignmentFlag.AlignVCenter)
self.marketLabel_1.setFixedHeight(40)
self.marketLabel_1.setObjectName("marketLabel_1")
self.gridLayout.addWidget(self.marketLabel_1, 2, 0, 1, 1)
self.marketLabel_2 = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Times New Roman")
font.setPointSize(24)
self.marketLabel_2.setFont(font)
self.marketLabel_2.setStyleSheet("color: white")
self.marketLabel_2.setAlignment(QtCore.Qt.AlignmentFlag.AlignRight|QtCore.Qt.AlignmentFlag.AlignTrailing|QtCore.Qt.AlignmentFlag.AlignVCenter)
self.marketLabel_2.setFixedHeight(40)
self.marketLabel_2.setObjectName("marketLabel_2")
self.gridLayout.addWidget(self.marketLabel_2, 2, 1, 1, 1)
self.marketLabel_3 = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Times New Roman")
font.setPointSize(24)
self.marketLabel_3.setFont(font)
self.marketLabel_3.setStyleSheet("color: white")
self.marketLabel_3.setAlignment(QtCore.Qt.AlignmentFlag.AlignRight|QtCore.Qt.AlignmentFlag.AlignTrailing|QtCore.Qt.AlignmentFlag.AlignVCenter)
self.marketLabel_3.setFixedHeight(40)
self.marketLabel_3.setObjectName("marketLabel_3")
self.gridLayout.addWidget(self.marketLabel_3, 2, 2, 1, 1)
self.marketLabel_4 = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Times New Roman")
font.setPointSize(24)
self.marketLabel_4.setFont(font)
self.marketLabel_4.setStyleSheet("color: white")
self.marketLabel_4.setAlignment(QtCore.Qt.AlignmentFlag.AlignRight|QtCore.Qt.AlignmentFlag.AlignTrailing|QtCore.Qt.AlignmentFlag.AlignVCenter)
self.marketLabel_4.setFixedHeight(40)
self.marketLabel_4.setObjectName("marketLabel_4")
self.gridLayout.addWidget(self.marketLabel_4, 2, 3, 1, 1)
self.line_2 = QtWidgets.QFrame(self.centralwidget)
self.line_2.setStyleSheet("")
self.line_2.setObjectName("line_2")
self.line_2.setFrameShape(QtWidgets.QFrame.Shape.HLine)
self.line_2.setFrameShadow(QtWidgets.QFrame.Shadow.Sunken)
self.gridLayout.addWidget(self.line_2, 3, 0, 1, 5)
self.tradingPairs = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Times New Roman")
font.setPointSize(26)
self.tradingPairs.setFont(font)
self.tradingPairs.setStyleSheet("color: white;")
self.tradingPairs.setAlignment(QtCore.Qt.AlignmentFlag.AlignLeft|QtCore.Qt.AlignmentFlag.AlignVCenter)
self.tradingPairs.setFixedHeight(40)
self.tradingPairs.setObjectName("tradingPairs")
self.gridLayout.addWidget(self.tradingPairs, 4, 0, 1, 1)
self.lastTradedPrice = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Times New Roman")
font.setPointSize(26)
self.lastTradedPrice.setFont(font)
self.lastTradedPrice.setStyleSheet("color: white;")
self.lastTradedPrice.setAlignment(QtCore.Qt.AlignmentFlag.AlignRight|QtCore.Qt.AlignmentFlag.AlignTrailing|QtCore.Qt.AlignmentFlag.AlignVCenter)
self.lastTradedPrice.setObjectName("lastTradedPrice")
self.gridLayout.addWidget(self.lastTradedPrice, 4, 1, 1, 1)
self.percentageChange = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Times New Roman")
font.setPointSize(26)
self.percentageChange.setFont(font)
self.percentageChange.setStyleSheet("color: white;")
self.percentageChange.setAlignment(QtCore.Qt.AlignmentFlag.AlignRight|QtCore.Qt.AlignmentFlag.AlignTrailing|QtCore.Qt.AlignmentFlag.AlignVCenter)
self.percentageChange.setObjectName("percentageChange")
self.gridLayout.addWidget(self.percentageChange, 4, 2, 1, 1)
self.turnover = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Times New Roman")
font.setPointSize(26)
self.turnover.setFont(font)
self.turnover.setStyleSheet("color: white;")
self.turnover.setAlignment(QtCore.Qt.AlignmentFlag.AlignRight|QtCore.Qt.AlignmentFlag.AlignTrailing|QtCore.Qt.AlignmentFlag.AlignVCenter)
self.turnover.setObjectName("label_5")
self.gridLayout.addWidget(self.turnover, 4, 3, 1, 1)
self.line_3 = QtWidgets.QFrame(self.centralwidget)
self.line_3.setStyleSheet("")
self.line_3.setObjectName("line_3")
self.line_3.setFrameShape(QtWidgets.QFrame.Shape.HLine)
self.line_3.setFrameShadow(QtWidgets.QFrame.Shadow.Sunken)
self.gridLayout.addWidget(self.line_3, 5, 0, 1, 5)
self.notificationsLabel = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Times New Roman")
font.setPointSize(24)
self.notificationsLabel.setFont(font)
self.notificationsLabel.setToolTipDuration(-3)
self.notificationsLabel.setStyleSheet("color: white")
self.notificationsLabel.setAlignment(QtCore.Qt.AlignmentFlag.AlignRight|QtCore.Qt.AlignmentFlag.AlignTrailing|QtCore.Qt.AlignmentFlag.AlignVCenter)
self.notificationsLabel.setObjectName("notificationsLabel")
self.gridLayout.addWidget(self.notificationsLabel, 6, 0, 1, 5)
self.line_4 = QtWidgets.QFrame(self.centralwidget)
self.line_4.setStyleSheet("")
self.line_4.setObjectName("line_5")
self.line_4.setFrameShape(QtWidgets.QFrame.Shape.HLine)
self.line_4.setFrameShadow(QtWidgets.QFrame.Shadow.Sunken)
self.gridLayout.addWidget(self.line_4, 7, 0, 1, 5)
self.notificationsLabel_1 = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Times New Roman")
font.setPointSize(24)
self.notificationsLabel_1.setFont(font)
self.notificationsLabel_1.setToolTipDuration(-3)
self.notificationsLabel_1.setStyleSheet("color: white")
self.notificationsLabel_1.setAlignment(QtCore.Qt.AlignmentFlag.AlignLeft|QtCore.Qt.AlignmentFlag.AlignVCenter)
self.notificationsLabel_1.setObjectName("notificationsLabel")
self.gridLayout.addWidget(self.notificationsLabel_1, 8, 0, 1, 1)
self.notificationsLabel_2 = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Times New Roman")
font.setPointSize(24)
self.notificationsLabel_2.setFont(font)
self.notificationsLabel_2.setToolTipDuration(-3)
self.notificationsLabel_2.setStyleSheet("color: white")
self.notificationsLabel_2.setAlignment(QtCore.Qt.AlignmentFlag.AlignRight|QtCore.Qt.AlignmentFlag.AlignVCenter)
self.notificationsLabel_2.setObjectName("notificationsLabel")
self.gridLayout.addWidget(self.notificationsLabel_2, 8, 1, 1, 1)
self.notificationsLabel_3 = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Times New Roman")
font.setPointSize(24)
self.notificationsLabel_3.setFont(font)
self.notificationsLabel_3.setToolTipDuration(-3)
self.notificationsLabel_3.setStyleSheet("color: white")
self.notificationsLabel_3.setAlignment(QtCore.Qt.AlignmentFlag.AlignRight|QtCore.Qt.AlignmentFlag.AlignVCenter)
self.notificationsLabel_3.setObjectName("notificationsLabel")
self.gridLayout.addWidget(self.notificationsLabel_3, 8, 2, 1, 1)
self.notificationsLabel_4 = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Times New Roman")
font.setPointSize(24)
self.notificationsLabel_4.setFont(font)
self.notificationsLabel_4.setToolTipDuration(-3)
self.notificationsLabel_4.setStyleSheet("color: white")
self.notificationsLabel_4.setAlignment(QtCore.Qt.AlignmentFlag.AlignRight|QtCore.Qt.AlignmentFlag.AlignVCenter)
self.notificationsLabel_4.setObjectName("notificationsLabel")
self.gridLayout.addWidget(self.notificationsLabel_4, 8, 3, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.pushButton.setText(_translate("MainWindow", "найти"))
self.marketLabel_1.setText(_translate("MainWindow", "Торговая пара"))
self.marketLabel_2.setText(_translate("MainWindow", "Цена"))
self.marketLabel_3.setText(_translate("MainWindow", "Изменения, %"))
self.marketLabel_4.setText(_translate("MainWindow", "Объемы"))
self.notificationsLabel.setText(_translate("MainWindow", "Уведомления"))
self.notificationsLabel_1.setText(_translate("MainWindow", "Торговая пара"))
self.notificationsLabel_2.setText(_translate("MainWindow", "Цена"))
self.notificationsLabel_3.setText(_translate("MainWindow", "Изменения, %"))
self.notificationsLabel_4.setText(_translate("MainWindow", "Дата и время"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec()) | Framon64/CryptoProgram | programWindow.py | programWindow.py | py | 11,677 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "PyQt6.QtWidgets.QWidget",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "PyQt6.QtWidgets",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "PyQt6.QtWidgets.QGridLayout",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "P... |
6399955298 | # welcome to tweet-yeet – lets yeet those tweets!
from datetime import datetime, timedelta
import tweepy
if __name__ == "__main__":
# options
delete_tweets = True
deletes_favs = False
censor= True
days_to_keep = 7
censor_word = "word"
# api info
consumer_key = 'XXXXXXXX'
consumer_secret = 'XXXXXXXX'
access_token = 'XXXXXXXX'
access_token_secret = 'XXXXXXXX'
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
# set cutoff date
cutoff_date = datetime.utcnow() - timedelta(days=days_to_keep)
# delete tweets
if delete_tweets:
timeline = tweepy.Cursor(api.user_timeline).items()
deletion_count = 0
for tweet in timeline:
if tweet.created_at < cutoff_date:
api.destroy_status(tweet.id)
deletion_count += 1
print(deletion_count, "tweets have been deleted.")
# delete favorites / retweets
if delete_favs:
favorites = tweepy.Cursor(api.favorites).items()
deletion_count = 0
for tweet in favorites:
if tweet.created_at < cutoff_date:
api.destroy_status(tweet.id)
deletion_count += 1
print(deletion_count, "favorites have been deleted.")
if censor:
favorites = tweepy.Cursor(api.favorites).items()
timeline = tweepy.Cursor(api.user_timeline).items()
for favorite in favorites:
if censor_word in favorite:
api.destroy_status(favorite.id)
for tweet in timeline:
if censor_word in tweet:
api.destroy_status(tweet.id)
print(censor_word, "is a bad word") | Malcolmms/tweet_yeet | main.py | main.py | py | 1,769 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "tweepy.OAuthHandler",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "tweepy.API",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.utcnow",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "datetime.date... |
33917530992 | from dateutil.parser import parse as parse_date
from flask import current_app
from inspire_dojson import record2marcxml
from inspire_utils.record import get_value
from lxml import etree
def dumps_etree(pid, record, **kwargs):
"""Dump MARC21 compatible record.
:param pid: The :class:`invenio_pidstore.models.PersistentIdentifier`
instance.
:param record: The :class:`invenio_records.api.Record` instance.
:returns: A LXML Element instance.
"""
r = record['_source']
# adding legacy version (controlfield 005)
acquisition_date = parse_date(r['acquisition_source']['date'])
r['legacy_version'] = acquisition_date.strftime("%Y%m%d%H%M%S.0")
# adding number of pages (datafield 300)
page_nr = get_value(r, 'page_nr[0]')
if page_nr:
r['number_of_pages'] = page_nr
# create and add download url
if 'urls' not in r and '_files' in r:
files = []
for f in r['_files']:
url = 'http://%s/api/files/%s/%s' % (current_app.config.get('SERVER_NAME'), f['bucket'], f['key'])
files.append({
'value': url,
'description': f.get('filetype', '')
})
r['urls'] = files
return etree.fromstring(record2marcxml(r))
| SCOAP3/scoap3-next | scoap3/modules/records/oai_serializer.py | oai_serializer.py | py | 1,264 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "dateutil.parser.parse",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "inspire_utils.record.get_value",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "flask.current_app.config.get",
"line_number": 31,
"usage_type": "call"
},
{
... |
1848378529 | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.action_chains import ActionChains
import pyperclip
import time
def copy_input(driver, xpath, input) :
pyperclip.copy(input)
driver.find_element_by_xpath(xpath).click()
ActionChains(driver).key_down(Keys.CONTROL).send_keys('v').key_up(Keys.CONTROL).perform()
time.sleep(1)
def goSuntable() :
driver = webdriver.Chrome()
url = 'https://google.com'
driver.get(url)
driver.find_element_by_css_selector('.gLFyf.gsfi').send_keys('썬 테이블')
driver.find_element_by_css_selector('.gLFyf.gsfi').send_keys(Keys.ENTER)
driver.find_element_by_css_selector('.LC20lb.DKV0Md').click()
driver.find_element_by_xpath("//div[@class='nav sub-menu sub_menu_hide v-menu-type1 menu-vertical row-cnt-3 row-cnt-mobile-3']/ul/li[@data-code='m202004152c81c6f5f4a24']/a").send_keys(Keys.ENTER)
driver.quit()
def goKkamdung() :
driver = webdriver.Chrome()
url = 'https://www.naver.com'
driver.get(url)
time.sleep(1)
copy_input(driver, "//input[@id='query']", "63빌딩 돌상")
time.sleep(1)
driver.find_element_by_xpath("//input[@id='query']").send_keys(Keys.ENTER)
time.sleep(1)
driver.find_element_by_xpath("//a[@href='https://blog.naver.com/kongminsook66']").click()
time.sleep(1)
driver.quit()
for i in range(10) :
goKkamdung() | SLT-DJH/selenium-test | test.py | test.py | py | 1,484 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pyperclip.copy",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.common.action_chains.ActionChains",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.common.keys.Keys.CONTROL",
"line_number": 11,
"usag... |
12050806024 | """Setup the tilemap and the camera"""
import pygame as pg
from settings import TILESIZE, MAPSIZE, VIEWSIZE
from tile import Tile
class Camera():
"""A camera like"""
def __init__(self, width, height):
self.camera = pg.Rect(0, 0, width, height)
self.width = width
self.height = height
self.x = 0
self.y = 0
def apply(self, rect):
"""Update the pos of a rect
Args:
rect (Rect): the rect to move
Returns:
Rect
"""
return rect.move(self.camera.topleft)
def update(self):
"""Used to move the camera"""
keys = pg.key.get_pressed()
if keys[pg.K_i]:
self.y += TILESIZE
if keys[pg.K_k]:
self.y -= TILESIZE
if keys[pg.K_j]:
self.x += TILESIZE
if keys[pg.K_l]:
self.x -= TILESIZE
self.x = min(self.x, 0)
self.y = min(self.y, 0)
self.x = max(self.x, (VIEWSIZE - MAPSIZE) * TILESIZE)
self.y = max(self.y, (VIEWSIZE - MAPSIZE) * TILESIZE)
self.camera = pg.Rect(self.x, self.y, self.width, self.height)
def get_x(self):
"""Get the number of tile moved in x
Returns:
int
"""
return self.x // TILESIZE
def get_y(self):
"""Get the number of tile moved in y
Returns:
int
"""
return self.y // TILESIZE
| Barbapapazes/dungeons-dragons | map_editor/tilemap.py | tilemap.py | py | 1,448 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "pygame.Rect",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pygame.key.get_pressed",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "pygame.key",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_i",
"... |
20362021082 | from rest_framework.decorators import api_view
from rest_framework.response import Response
from account.models import User
from store.models import Product
from django.contrib.auth.decorators import login_required
from account.authentication import create_access_token, create_refresh_token, decode_access_token, decode_refresh_token, JWTAuthentication
from rest_framework.authentication import get_authorization_header
from rest_framework.views import APIView
from rest_framework.exceptions import AuthenticationFailed
from rest_framework import status
from .models import Order, OrderItem, DiscountCode
from account.serializers import UserSerializer
from .serializers import OrderSerializers, OrderItemSerializers, DiscountCodeSerializers
import base64
class DiscountCodeCreateAPIView(APIView):
def get(self, request):
sale_code = request.GET.get("sale_code", "")
if not sale_code:
return Response({"error": "Sale code is required."}, status=status.HTTP_400_BAD_REQUEST)
try:
discount_code = DiscountCode.objects.get(name_code=sale_code)
serializer = DiscountCodeSerializers(discount_code)
return Response(serializer.data)
except DiscountCode.DoesNotExist:
return Response({"error": "Discount code not found."}, status=status.HTTP_404_NOT_FOUND)
def post(self, request):
serializer = DiscountCodeSerializers(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def create_order_item(pro, order, quantity, price):
order_Item = OrderItem(pro, order, quantity, price)
order_Item.save()
return order_Item
class OrderCodeCreateAPIView(APIView):
def post(self, request):
data = request.data
userAddress_id = data['userAddress_id']
if data['discount_code']:
discount_code = data['discount_id']
note = data['note']
proList = data.getlist('ProList')
quantity = data['quantity']
order_status = data['order_status']
payment_method = data['payment_method']
total_price = data['total_price']
serializer = OrderSerializers(data=data)
if (serializer.is_valid()):
order = serializer.save()
for pro in proList:
create_order_item(pro, order, pro.quantity, pro.price)
# class OrderAPIView(APIView):
# def get(self, request):
# auth = get_authorization_header(request).split()
# if auth and len(auth) == 2:
# token = auth[1].decode('utf-8')
# id = decode_access_token(token)
# user = User.objects.filter(pk=id).first()
# try:
# cart = Cart.objects.get(user=user)
# serializer = CartSerializers(instance=cart)
# # get cart item in cart:
# except Cart.DoesNotExist:
# cart = Cart.objects.create(user=user)
# serializer = CartSerializers(instance=cart)
# print(cart.quantity)
# cartItems = CartItem.objects.filter(cart=cart)
# itemSerializer = CartItemSerializers(instance=cartItems, many=True)
# listP = []
# for PItem in cartItems:
# p_name = PItem.product.product_name
# p_slug = PItem.product.slug
# p_price = PItem.product.price
# p_image = PItem.product.image
# print(str(p_image))
# p_invetory = PItem.product.inventory
# product = {
# 'product_name': p_name,
# 'slug': p_slug,
# 'price': p_price,
# 'image': str(p_image),
# 'inventory': p_invetory,
# 'quantity': PItem.quantity
# }
# listP.append(product)
# cart = {
# 'cart': serializer.data,
# 'item_cart': itemSerializer.data,
# 'list_product': listP,
# }
# return Response({"cart_detail": cart})
# raise AuthenticationFailed('Unauthenticated!')
| MTrungNghia/Gundam_web | BackEnd/GundamMTN/Orders/views.py | views.py | py | 4,330 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "rest_framework.views.APIView",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "rest_framework.response.Response",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST",
"line_number": 22,
"usage_type... |
9766345042 | # Run this app with `python app.py` and
# visit http://127.0.0.1:8050/ in your web browser.
"""
Dashboard that shows user groups with percentages
and recommended books
"""
from dash import dcc, html
import plotly.express as px
import pandas as pd
from dash.exceptions import PreventUpdate
from dash.dependencies import Input, Output
import requests
import dash_bootstrap_components as dbc
from app import app
"""
Importacion de datos
"""
# Casteamos el pd.read_json a un DataFrame
"""
Creacion de labels para los dropdowns
"""
# Label: valor que se muestra en el dropdowns
# value: valor que tendra el dropdown despues de seleccionar
"""
HTML
"""
layout = html.Div(
children=[
html.H1(children="UAQUE: Pertenencia de los usuarios"),
html.Div(
dcc.Dropdown(
id="users_id_pertenencia_dropdown",
value="50052490d474975ef40a67220d0491571630eca6",
clearable=False,
)
),
dbc.Spinner(children=[
html.Div(
id="user_name",
children="""
""",
),
dcc.Graph(
id="dewey_graph",
),
html.Div(
children=[
html.Ul(id="book_list", children=[html.Li(i) for i in []]),
],
),
]),
]
)
"""
Callbacks
Input: lo que cambiar. El primer valor es el id del item que cambia en el HTML. El segundo valor es child del item que cambia.
Output: item que cambia en reaccion. Lo mismo que arriba
Funcion debajo del callback es lo que controla el cambio. Las entradas de la funcion es el segundo valor
del input y el retorno es valor que va a tener el segundo argumento del output.
"""
# Cuando cambia el valor de busqueda, cambian las opciones que preesnta el dropdown.
@app.callback(
Output("users_id_pertenencia_dropdown", "options"),
Input("users_id_pertenencia_dropdown", "search_value"),
)
def update_options(search_value):
if not search_value:
raise PreventUpdate
# carga solo 50 resultados (no puede cargar los 40,000)
smartuj_endpoint: str = "localhost:8000/api"
uso_biblioteca: str = "suj-e-004"
DashboardPertenenciaUpdateOptions: str = "DashboardPertenenciaUtilsUpdateOptions"
url_pertenencia_update_options: str = (
"http://"
+ smartuj_endpoint
+ "/"
+ uso_biblioteca
+ "/"
+ DashboardPertenenciaUpdateOptions
)
users_id = requests.get(
url=url_pertenencia_update_options, params={"search_value": search_value}
)
users_id = users_id.json()
return users_id
# Cuando cambia el valor del dropdown cambia el nombre de usuario del titulo
@app.callback(
Output("user_name", "children"), [Input("users_id_pertenencia_dropdown", "value")]
)
def update_table_title(user_name):
result = "\n", str(user_name), "\n"
return result
# Cuando cambia el valor del dropdown, cambia la lista de libros
@app.callback(
Output("book_list", "children"), [Input("users_id_pertenencia_dropdown", "value")]
)
def update_book_list(dropdown_value):
smartuj_endpoint: str = "localhost:8000/api"
uso_biblioteca: str = "suj-e-004"
DashboardPertenenciaUpdateBookList: str = "DashboardPertenenciaUtilsUpdateBookList"
url_pertenencia_update_book_list: str = (
"http://"
+ smartuj_endpoint
+ "/"
+ uso_biblioteca
+ "/"
+ DashboardPertenenciaUpdateBookList
)
book_table = requests.get(
url=url_pertenencia_update_book_list, params={"dropdown_value": dropdown_value}
)
book_table = pd.DataFrame.from_dict(book_table.json())
book_list: list = []
# Creamos la lista de listas, tal como se muestra en <li> de mozilla
for i in range(len(book_table)):
book_list.append(html.Li(book_table["Titulo"].values[i]))
nested_list = html.Ul(
children=[
html.Li("Llave: " + str(book_table["Llave"].values[i])),
html.Li("Dewey: " + str(book_table["DeweyUnidad"].values[i])),
]
)
book_list.append(nested_list)
return book_list
# Cuando cambia el valor del dropdown, cambia la grafica
@app.callback(
Output("dewey_graph", "figure"), [Input("users_id_pertenencia_dropdown", "value")]
)
def update_graph(dropdown_value):
smartuj_endpoint: str = "localhost:8000/api"
uso_biblioteca: str = "suj-e-004"
dashboardGruposUtil: str = "DashboardPertenencia"
# Agrupamiento crear perfiles grupales http://{{smartuj-endpoint}}/{{perfil-grupal}}/model
url_pertenencia: str = (
"http://" + smartuj_endpoint + "/" + uso_biblioteca + "/" + dashboardGruposUtil
)
selected_row = requests.get(
url=url_pertenencia, params={"dropdown_value": dropdown_value}
)
selected_row = selected_row.json()
fig = px.bar(selected_row, hover_data=["value"])
# Estilo para la hoverinfo
fig.update_traces(hovertemplate="<b>Pertenencia: %{y:.2f}%</b><extra></extra>")
# Titulo de axis x
fig.update_xaxes(type="category", title_text="Deweys")
# Titulo de tabla
fig.update_yaxes(title_text="Pertenencia (%)")
# quitar legenda
fig.update_layout(showlegend=False)
return fig
| Johan-Ortegon-1/uaque | Dashboard/apps/dashboard_pertenencia.py | dashboard_pertenencia.py | py | 5,360 | python | es | code | 0 | github-code | 6 | [
{
"api_name": "dash.html.Div",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "dash.html",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "dash.html.H1",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "dash.html",
"line_number": 3... |
12978124343 | import sys
from pymongo import MongoClient,DESCENDING,ASCENDING
def get_rank(user_id):
client =MongoClient()
db = client.shiyanlou
contests = db.contests
#计算用户 user_id 的排名,总分数以及花费的总时间
#排名的计算方法:
# 1.找到user_id所对应的信息总和,即得分总数,用时总数
# 2.根据得到的得分与用时,对应排名规则,筛选出排名在该用户前的所有用户
# 3.对筛选结果进行计数,即可得到当前用户的排名
#匹配user_id对应的所有数据
pl_match = {'$match':{'user_id':user_id}}
#对这些数据进行求和
pl_group ={'$group':{
'_id':'$user_id',
'total_score':{'$sum':'$score'},
'total_time':{'$sum':'$submit_time'}
}}
user_info = contests.aggregate([pl_match,pl_group])
l = list(user_info)
#记录指定用户的信息
user_score=l[0]['total_score']
user_time=l[0]['total_time']
# print("id:%s,score:%s,time:%s"%(user_id,
# user_score,user_time))
if len(l) == 0:
return 0,0,0
#根据id对所有数据进行分组求和
p_group1 = {'$group':{'_id':'$user_id','total_score':{'$sum':'$score'},
'total_time':{'$sum':'$submit_time'}}}
#根据排名规则筛选符合排名在指定用户前面的所有数据
p_match = {'$match':{'$or':[
{'total_score':{'$gt':user_score}},
{'total_time':{'$lt':user_time},'total_score':user_score}
]}}
#对筛选结果进行计数
p_group2={'$group':{'_id':None,'count':{'$sum':1}}}
pipline=[p_group1,p_match,p_group2]
result =list(contests.aggregate(pipline))
#获得排名
if len(result)>0:
rank = result[0]['count'] + 1
else:
rank = 1
#依次返回排名,分数和时间,不能修改顺序
return rank,user_score,user_time
if __name__ == '__main__':
"""
1.判断参数格式是否符合要求
2.获取user_id 参数
"""
if len(sys.argv) != 2:
print("Parameter error.")
sys.exit(1)
user_id=sys.argv[1]
#根据用户ID获取用户排名,分数和时间
userdata = get_rank(int(user_id))
print(userdata)
| JockerMa/syl_challenge3 | getrank.py | getrank.py | py | 2,282 | python | zh | code | 0 | github-code | 6 | [
{
"api_name": "pymongo.MongoClient",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number... |
36849894613 | import tensorflow.compat.v1 as tf
print(tf.__version__)
import matplotlib.pyplot as plt
import numpy as np
from datetime import datetime
from graphnnSiamese import graphnn
from utils import *
import os
import argparse
import json
parser = argparse.ArgumentParser()
parser.add_argument('--device', type=str, default='0,1,2,3',
help='visible gpu device')
parser.add_argument('--use_device', type=str, default='/gpu:1',
help='used gpu device')
parser.add_argument('--fea_dim', type=int, default=76,
help='feature dimension')
parser.add_argument('--embed_dim', type=int, default=64,
help='embedding dimension')
parser.add_argument('--embed_depth', type=int, default=5,
help='embedding network depth')
parser.add_argument('--output_dim', type=int, default=64,
help='output layer dimension')
parser.add_argument('--iter_level', type=int, default=5,
help='iteration times')
parser.add_argument('--lr', type=float, default=1e-4,
help='learning rate')
parser.add_argument('--epoch', type=int, default=100,
help='epoch number')
parser.add_argument('--batch_size', type=int, default=128,
help='batch size')
# parser.add_argument('--load_path', type=str,
# default='../data/saved_model/graphnn_model_ghidra/saved_ghidra_model_best',
# help='path for model loading, "#LATEST#" for the latest checkpoint')
parser.add_argument('--load_path', type=str,
default='../data/saved_model/graphnn_model_ghidra_depth5/graphnn_model_ghidra_best',
help='path for model loading, "#LATEST#" for the latest checkpoint')
parser.add_argument('--log_path', type=str, default=None,
help='path for training log')
if __name__ == '__main__':
args = parser.parse_args()
args.dtype = tf.float32
print("=================================")
print(args)
print("=================================")
os.environ["CUDA_VISIBLE_DEVICES"]=args.device
Dtype = args.dtype
NODE_FEATURE_DIM = args.fea_dim
EMBED_DIM = args.embed_dim
EMBED_DEPTH = args.embed_depth
OUTPUT_DIM = args.output_dim
ITERATION_LEVEL = args.iter_level
LEARNING_RATE = args.lr
MAX_EPOCH = args.epoch
BATCH_SIZE = args.batch_size
LOAD_PATH = args.load_path
LOG_PATH = args.log_path
DEVICE = args.use_device
SHOW_FREQ = 1
TEST_FREQ = 1
SAVE_FREQ = 5
# DATA_FILE_NAME_VALID = '../data/validation_arm2non_arm_gemini_data/'
DATA_FILE_NAME_TRAIN_TEST = '../data/vector_deduplicate_ghidra_format_less_compilation_cases/train_test'
F_PATH_TRAIN_TEST = get_f_name(DATA_FILE_NAME_TRAIN_TEST)
FUNC_NAME_DICT_TRAIN_TEST = get_f_dict(F_PATH_TRAIN_TEST)
print("start reading data")
Gs_train_test, classes_train_test = read_graph(F_PATH_TRAIN_TEST, FUNC_NAME_DICT_TRAIN_TEST, NODE_FEATURE_DIM)
print("train and test ---- 8:2")
print("{} graphs, {} functions".format(len(Gs_train_test), len(classes_train_test)))
perm = np.random.permutation(len(classes_train_test))
Gs_train, classes_train, Gs_test, classes_test =\
partition_data(Gs_train_test, classes_train_test, [0.8, 0.2], perm)
print("Train: {} graphs, {} functions".format(
len(Gs_train), len(classes_train)))
print("Test: {} graphs, {} functions".format(
len(Gs_test), len(classes_test)))
print("valid")
DATA_FILE_NAME_VALID = '../data/vector_deduplicate_ghidra_format_less_compilation_cases/valid'
F_PATH_VALID = get_f_name(DATA_FILE_NAME_VALID)
FUNC_NAME_DICT_VALID = get_f_dict(F_PATH_VALID)
Gs_valid, classes_valid = read_graph(F_PATH_VALID, FUNC_NAME_DICT_VALID, NODE_FEATURE_DIM)
print("{} graphs, {} functions".format(len(Gs_valid), len(classes_valid)))
Gs_valid, classes_valid = partition_data(Gs_valid, classes_valid, [1], list(range(len(classes_valid))))
# Model
gnn = graphnn(
N_x = NODE_FEATURE_DIM,
Dtype = Dtype,
N_embed = EMBED_DIM,
depth_embed = EMBED_DEPTH,
N_o = OUTPUT_DIM,
ITER_LEVEL = ITERATION_LEVEL,
lr = LEARNING_RATE,
device = DEVICE
)
gnn.init(LOAD_PATH, LOG_PATH)
auc0, fpr0, tpr0, thres0 = get_auc_epoch_batch(gnn, Gs_train, classes_train,
BATCH_SIZE)
gnn.say("Initial training auc = {0} @ {1}".format(auc0, datetime.now()))
print(auc0)
print(max((1-fpr0+tpr0)/2))
index = np.argmax((1-fpr0+tpr0)/2)
print("index:", index)
print("fpr", fpr0[index])
print("tpr", tpr0[index])
print(thres0[index])
auc1, fpr1, tpr1, thres1 = get_auc_epoch_batch(gnn, Gs_test, classes_test,
BATCH_SIZE)
gnn.say("Initial testing auc = {0} @ {1}".format(auc1, datetime.now()))
print(auc1)
print(max((1-fpr1+tpr1)/2))
index = np.argmax(1-fpr1+tpr1)
print("index:", index)
print("fpr", fpr1[index])
print("tpr", tpr1[index])
print(thres1[index])
auc2, fpr2, tpr2, thres2 = get_auc_epoch_batch(gnn, Gs_valid, classes_valid,
BATCH_SIZE)
gnn.say("Initial validation auc = {0} @ {1}".format(auc2, datetime.now()))
print(auc2)
print(max((1-fpr2+tpr2)/2))
index = np.argmax((1-fpr2+tpr2)/2)
print("index:", index)
print("fpr", fpr2[index])
print("tpr", tpr2[index])
print(thres2[index])
plt.figure()
plt.title('ROC CURVE')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.plot(fpr1,tpr1,color='b')
plt.plot(fpr1, 1-fpr1+tpr1, color='b')
plt.plot(fpr2, tpr2,color='r')
plt.plot(fpr2, 1-fpr2+tpr2, color='r')
# plt.plot([0, 1], [0, 1], color='m', linestyle='--')
plt.savefig('auc_depth5.png') | DeepSoftwareAnalytics/LibDB | main/torch/get_threshold.py | get_threshold.py | py | 5,724 | python | en | code | 31 | github-code | 6 | [
{
"api_name": "tensorflow.compat.v1.__version__",
"line_number": 2,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.compat.v1",
"line_number": 2,
"usage_type": "name"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 12,
"usage_type": "call"
},
{
"... |
74222296187 | #!/usr/bin/env python
# coding: utf-8
import pandas as pd
import matplotlib.pyplot as plt
import geopandas as gpd
import requests
# For storing png files in memory
import io
# For generating GIF
import imageio
###########################################################
########## Globals....
###########################################################
# Top value to use in scale, 0 = mean + 2 std devs
max_value = 0
# Bottom value to use in scale, should be zero
min_value = 0
# SANDAG API Link
sd_api = "https://opendata.arcgis.com/datasets/854d7e48e3dc451aa93b9daf82789089_0.geojson"
# Zipcode shape file link
zip_shape_full = "https://github.com/mulroony/State-zip-code-GeoJSON/raw/master/ca_california_zip_codes_geo.min.json"
# File to write gif to. Leave blank to just render inline
# Probably won't work in this script...
gif_path = "/tmp/SD_Covid_cases.gif"
#gif_path = ""
# Select ColorMap: https://matplotlib.org/3.2.1/tutorials/colors/colormaps.html
color_map = 'YlOrRd'
###########################################################
###########################################################
r = requests.get(sd_api)
rd = [ _r['properties'] for _r in r.json()['features'] ]
case_df = pd.DataFrame(rd)
# Cleanup, reduce mem
del [r, rd]
known_zips = list(case_df['ziptext'].unique())
print("Zipcodes in data: %s"%(len(known_zips)))
# Got API link from: https://sdgis-sandag.opendata.arcgis.com/datasets/covid-19-statistics-by-zip-code
r = requests.get(zip_shape_full)
rd = r.json()
zip_shape_known = {}
zip_shape_known['type'] = rd['type']
zip_shape_known['features'] = []
for i in rd['features']:
if i['properties']['ZCTA5CE10'] in known_zips:
zip_shape_known['features'].append(i)
print("Found %s matching zip codes in shape file"%(len(zip_shape_known['features'])))
del [r, rd]
gdf = gpd.GeoDataFrame.from_features(zip_shape_known)
gdf.rename(columns={'ZCTA5CE10': 'zipcode'}, inplace=True)
gdf.set_index('zipcode',inplace=True)
# Drop time from date, not useful
case_df['date'] = case_df['updatedate'].apply(lambda x: x.split(" ")[0])
# Drop unused fields
case_df.drop(inplace=True, columns=['FID',
'zipcode_zip',
'created_date',
'updatedate',
'created_date',
'created_user',
'last_edited_date',
'last_edited_user',
'globalid'])
# Missing data becomes zeros
case_df.fillna(0, inplace=True)
# Rename column
case_df.rename(columns={'ziptext': 'zipcode'}, inplace=True)
# Drop duplicates, have seen some
case_df.drop_duplicates(subset=['zipcode','date'], inplace=True)
# Ugly, but creates table I want
case_df = case_df.groupby(['zipcode', 'date']).sum().unstack().fillna(0)
# End up with nested column name, remove it
case_df.columns = case_df.columns.droplevel()
# Create super list of all case values so we can get stats if we are going to use it
if max_value == 0:
_tmp_case_list = []
# Not necessary, but can't hurt
dates = sorted(case_df.columns.values)
# subtract tomorrow from today, and set that as the value for today. repeat, skipping last day...
for i in range(len(dates)-1):
today = dates[i]
tomorrow = dates[i+1]
case_df[today] = case_df[tomorrow] - case_df[today]
# #Uncomment to find all negative values. Happens due to adjusting the numbers, we handle it
# #Good to do though
# _tmp_df = case_df[today].apply(lambda x: x if x < -1 else None).dropna()
# if _tmp_df.values.size > 0:
# print("%s"%(today))
# print(_tmp_df)
if max_value == 0:
_tmp_case_list += list(case_df[today].values)
if max_value == 0:
_tmp_case_df = pd.DataFrame(_tmp_case_list)
max_value = int(_tmp_case_df.mean()[0] + (2 * _tmp_case_df.std()[0]))
print("max_value = %s"%(max_value))
# Limit values based on max / min
for i in dates[:-1]:
case_df[i] = case_df[i].apply(lambda x: min_value if x < min_value else x)
case_df[i] = case_df[i].apply(lambda x: max_value if x > max_value else x)
# Remove last day
case_df.drop(inplace=True, columns=[case_df.columns[-1]])
# ## Merge shape file with zipcodes gdf and case_df, create case_gdf
case_gdf = gdf.merge(case_df, left_on='zipcode', right_on='zipcode')
output_files = []
for idx in dates[:-1]:
# Create inmemory file
output_files.append(io.BytesIO())
fig = case_gdf.plot(cmap=color_map,
column=idx,
linewidth=0.8,
edgecolor='0.8',
vmax=int(max_value),
vmin=min_value,
legend=True,
figsize=(14,8))
fig.axis('off')
fig.annotate("Daily Cases COVID-19 : %s - %s"%(dates[0],dates[-2]),
xy=(.1, .9), xycoords='figure fraction',
horizontalalignment='left', verticalalignment='top',
fontsize=20)
fig.annotate("""P. Mulrooney <mulroony@gmail.com>
* Upper case count limited to mean + 2 std devs
* Missing replaced with zeros
* Decreases between days set to zero
* https://sdgis-sandag.opendata.arcgis.com/datasets/covid-19-statistics-by-zip-code""",
xy=(.5, .1), xycoords='figure fraction',
horizontalalignment='left', verticalalignment='top',
fontsize=8)
fig.annotate(idx,
xy=(0.1, .1), xycoords='figure fraction',
horizontalalignment='left', verticalalignment='top',
fontsize=20)
fig.annotate(idx,
xy=(0.1, .1), xycoords='figure fraction',
horizontalalignment='left', verticalalignment='top',
fontsize=20)
chart = fig.get_figure()
chart.savefig(output_files[-1], dpi=150)
plt.close('all')
output_files[-1].seek(0)
print("Generated %s in memory PNG files\n"%(len(output_files)))
images = []
for output_file in output_files:
images.append(imageio.imread(output_file))
if not gif_path:
gif_path = io.BytesIO()
imageio.mimsave(gif_path, images, format="gif", duration=0.75, loop=1)
| mulroony/SDCovidGIFMap | CovidMapFullMinimal.py | CovidMapFullMinimal.py | py | 6,244 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "requests.get",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "geopandas.GeoDataFrame.fro... |
26625562386 | """A Mailman newsletter subscription interface.
To use this plugin, enable the newsletter module and set the newsletter module and name settings
in the admin settings page.
"""
from django.utils.translation import ugettext as _
from Mailman import MailList, Errors
from models import Subscription
from livesettings import config_value
import logging
import sys
log = logging.getLogger('newsletter.mailman')
class UserDesc: pass
def is_subscribed(contact):
return Subscription.email_is_subscribed(contact.email)
def update_contact(contact, subscribe, attributes={}):
email = contact.email
current = Subscription.email_is_subscribed(email)
attributesChanged = False
sub = None
if attributes:
sub, created = Subscription.objects.get_or_create(email=email)
if created:
attributesChanged = True
else:
oldAttr = [(a.name,a.value) for a in sub.attributes.all()]
oldAttr.sort()
sub.update_attributes(attributes)
newAttr = [(a.name,a.value) for a in sub.attributes.all()]
newAttr.sort()
if not created:
attributesChanged = oldAttr != newAttr
if current == subscribe:
if subscribe:
if attributesChanged:
result = _("Updated subscription for %(email)s.")
else:
result = _("Already subscribed %(email)s.")
else:
result = _("Already removed %(email)s.")
else:
if not sub:
sub, created = Subscription.objects.get_or_create(email=email)
sub.subscribed = subscribe
sub.save()
if subscribe:
mailman_add(contact)
result = _("Subscribed: %(email)s")
else:
mailman_remove(contact)
result = _("Unsubscribed: %(email)s")
return result % { 'email' : email }
def mailman_add(contact, listname=None, send_welcome_msg=None, admin_notify=None):
"""Add a Satchmo contact to a mailman mailing list.
Parameters:
- `Contact`: A Satchmo Contact
- `listname`: the Mailman listname, defaulting to whatever you have set in settings.NEWSLETTER_NAME
- `send_welcome_msg`: True or False, defaulting to the list default
- `admin_notify`: True of False, defaulting to the list default
"""
mm, listname = _get_maillist(listname)
print >> sys.stderr, 'mailman adding %s to %s' % (contact.email, listname)
if send_welcome_msg is None:
send_welcome_msg = mm.send_welcome_msg
userdesc = UserDesc()
userdesc.fullname = contact.full_name
userdesc.address = contact.email
userdesc.digest = False
if mm.isMember(contact.email):
print >> sys.stderr, _('Already Subscribed: %s' % contact.email)
else:
try:
try:
mm.Lock()
mm.ApprovedAddMember(userdesc, send_welcome_msg, admin_notify)
mm.Save()
print >> sys.stderr, _('Subscribed: %(email)s') % { 'email' : contact.email }
except Errors.MMAlreadyAMember:
print >> sys.stderr, _('Already a member: %(email)s') % { 'email' : contact.email }
except Errors.MMBadEmailError:
if userdesc.address == '':
print >> sys.stderr, _('Bad/Invalid email address: blank line')
else:
print >> sys.stderr, _('Bad/Invalid email address: %(email)s') % { 'email' : contact.email }
except Errors.MMHostileAddress:
print >> sys.stderr, _('Hostile address (illegal characters): %(email)s') % { 'email' : contact.email }
finally:
mm.Unlock()
def mailman_remove(contact, listname=None, userack=None, admin_notify=None):
"""Remove a Satchmo contact from a Mailman mailing list
Parameters:
- `contact`: A Satchmo contact
- `listname`: the Mailman listname, defaulting to whatever you have set in settings.NEWSLETTER_NAME
- `userack`: True or False, whether to notify the user, defaulting to the list default
- `admin_notify`: True or False, defaulting to the list default
"""
mm, listname = _get_maillist(listname)
print >> sys.stderr, 'mailman removing %s from %s' % (contact.email, listname)
if mm.isMember(contact.email):
try:
mm.Lock()
mm.ApprovedDeleteMember(contact.email, 'satchmo_ext.newsletter', admin_notify, userack)
mm.Save()
finally:
mm.Unlock()
def _get_maillist(listname):
try:
if not listname:
listname = config_value('NEWSLETTER', 'NEWSLETTER_NAME')
if listname == "":
log.warn("NEWSLETTER_NAME not set in store settings")
raise NameError('No NEWSLETTER_NAME in settings')
return MailList.MailList(listname, lock=0), listname
except Errors.MMUnknownListError:
print >> sys.stderr, "Can't find the MailMan newsletter: %s" % listname
raise NameError('No such newsletter, "%s"' % listname)
| dokterbob/satchmo | satchmo/apps/satchmo_ext/newsletter/mailman.py | mailman.py | py | 5,119 | python | en | code | 30 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "models.Subscription.email_is_subscribed",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "models.Subscription",
"line_number": 19,
"usage_type": "name"
},
{
"api... |
724197866 |
from time import time
from flask import render_template, redirect, url_for, flash, make_response, current_app, request, abort
from flask.json import jsonify
from flask.ext.login import login_required, current_user
from etherpad_lite import EtherpadLiteClient
from . import main
from .forms import UserDefaultRoom
from .. import db, avatars, generator
from ..models import User, Room, RoomAuthorization, RoomPermissions, Widget, EventTypes, FileStorage
def get_available_rooms():
pass
@main.route('/')
def index():
return redirect(url_for('.home'))
@main.route('/home')
def home():
if not current_user.is_authenticated():
return redirect(url_for('auth.login'))
if current_user.guest:
return redirect(url_for('auth.logout'))
rs = Room.query.order_by('componentId').all()
form = UserDefaultRoom()
rc = [('0', 'None')]
rooms = []
my_rooms = []
for room in rs:
if room.ownerId == current_user.id:
my_rooms += [room]
rc += [(str(room.id), room.name)]
elif room.get_permissions(current_user):
rooms += [room]
rc += [(str(room.id), room.name)]
form.selectRoom.choices = rc
form.selectRoom.data = str(current_user.defaultRoomId)
if current_app.config['IA_ENABLED']:
return render_template('home_ia.html', my_rooms=my_rooms, rooms=rooms, User=User, form=form, avatar_url=avatar_url(current_user), current_user=current_user, current_app=current_app)
else:
return render_template('home.html', my_rooms=my_rooms, rooms=rooms, User=User, form=form, avatar_url=avatar_url(current_user), current_user=current_user, current_app=current_app)
@main.route("/set/room", methods=['POST'])
@login_required
def set_room():
form = UserDefaultRoom()
rs = Room.query.order_by('componentId').all()
rc = [('0', 'None')]
for room in rs:
if room.ownerId == current_user.id:
rc += [(str(room.id), room.name)]
elif room.get_permissions(current_user):
rc += [(str(room.id), room.name)]
form.selectRoom.choices = rc
if form.validate_on_submit():
if form.selectRoom.data == '0':
current_user.defaultRoomId = None
else:
current_user.defaultRoomId = form.selectRoom.data
db.session.commit()
return redirect(url_for('.home'))
else:
for form_error in form.errors:
for field_error in form[form_error].errors:
flash(form[form_error].label.text+" - "+field_error, 'error')
return redirect(url_for('.home'))
@main.route('/lobby')
def lobby():
if not current_user.is_authenticated():
return redirect(url_for('auth.login'))
if current_user.guest:
return redirect(url_for('auth.logout'))
rs = Room.query.all()
rooms = []
for room in rs:
if get_room_permissions(room, current_user):
rooms += [room]
return render_template('lobby.html', rooms=rooms, User=User)
@main.route('/room/<roomId>')
def room(roomId):
room = Room.query.filter_by(id=roomId).first()
if not current_user.is_authenticated():
if room.guest_permission():
return redirect(url_for('auth.guest_user', roomId=roomId))
return redirect(url_for('auth.login', next=url_for('.room', roomId=1)))
cl = room.events.filter_by(type=EventTypes.ROOM_CHAT).order_by('datetime').limit(20)
return render_template(room.component.template, room=room, chat=cl, current_user=current_user, current_app=current_app)
@login_required
@main.route('/upload_avatar', methods=['POST'])
def upload_avatar():
if 'avatar' in request.files:
filename = avatars.save(request.files['avatar'])
url = avatars.url(filename)
file = FileStorage(type='avatar', filename=current_user.email, url=url)
db.session.add(file)
db.session.commit()
return redirect(url_for('.home'))
def avatar_url(user):
file = FileStorage.query.filter_by(type='avatar', filename=current_user.email).first()
if file:
return file.url
else:
return url_for('.identicon', user_id=user.id)
@login_required
@main.route('/avatar/<user_id>')
def identicon(user_id):
user = User.query.filter_by(id=user_id).first()
identicon = generator.generate(user.email, 100, 100, output_format='png')
response = make_response(identicon)
response.headers["Content-Type"] = "image/png"
response.headers["Cache-Control"] = "public, max-age=43200"
return response
| compeit-open-source/dashboard | app/main/views.py | views.py | py | 4,544 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "flask.redirect",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "flask.ext.login.current_user.is_authenticated",
"line_number": 23,
"usage_type": "call"
},
{
"api_na... |
7092756844 | #!/usr/bin/env python3
import ast
import astor
expr_l = """
for e in g.Edges():
e.dst["sum1"] += e.data123
for e in g.Edges():
e["data1"] = e.data2 / e.dst.sum1
"""
expr_l_ast = ast.parse(expr_l)
print(ast.dump(expr_l_ast))
def is_call_edges(call):
if isinstance(call, ast.Call):
f_func = call.func
if isinstance(f_func, ast.Attribute) and isinstance(
f_func.value, ast.Name
):
f_object = f_func.value.id
f_method = f_func.attr
if f_object == "g" and f_method == "Edges":
return True
return False
def dst_node_var(n, e):
if (
isinstance(n, ast.Subscript)
and isinstance(n.value, ast.Attribute)
and n.value.value.id == e
and n.value.attr == "dst"
):
return n.slice.value
if (
isinstance(n, ast.Attribute)
and isinstance(n.value, ast.Attribute)
and n.value.value.id == e
and n.value.attr == "dst"
):
return n.attr
else:
return None
global_vars = {}
for node in ast.walk(expr_l_ast):
if (
isinstance(node, ast.For)
and is_call_edges(node.iter)
and isinstance(node.target, ast.Name)
):
edge_var = node.target.id
if len(node.body) == 1:
body = node.body[0]
if isinstance(body, ast.AugAssign) and isinstance(
body.target, ast.Subscript
):
var_name = dst_node_var(body.target, edge_var)
if var_name is not None:
if (
isinstance(body.value, ast.Attribute)
and body.value.value.id == edge_var
and body.value.attr != var_name
):
global_vars[var_name] = [node]
body.target = ast.Name(id="n_var")
elif (
isinstance(body, ast.Assign)
and len(body.targets) == 1
and isinstance(body.targets[0], ast.Subscript)
):
assign_var = dst_node_var(body.targets[0], edge_var)
if assign_var not in global_vars:
if (
isinstance(body.value, ast.BinOp)
and isinstance(body.value.left, ast.Attribute)
and isinstance(body.value.right, ast.Attribute)
):
var_left = dst_node_var(body.value.left, edge_var)
var_right = dst_node_var(body.value.right, edge_var)
if var_left in global_vars:
global_vars[var_left].append(node)
body.value.left = ast.Name(id="n_var")
elif var_right in global_vars:
global_vars[var_right].append(node)
body.value.right = ast.Name(id="n_var")
for var, nodes in global_vars.items():
if len(nodes) == 2:
re_ast = ast.For(
target=ast.Name(id="n"),
iter=ast.Call(
func=ast.Attribute(value=ast.Name(id="g"), attr="Nodes"),
args=[],
keywords=[],
),
body=[
ast.Assign(
targets=[ast.Name(id="n_var")],
value=ast.Constant(value=0.0),
),
ast.For(
target=ast.Name(id="e"),
iter=ast.Call(
func=ast.Attribute(
value=ast.Name(id="n"), attr="incoming_edges"
),
args=[],
keywords=[],
),
body=nodes[0].body,
orelse=[],
),
ast.For(
target=ast.Name(id="e"),
iter=ast.Call(
func=ast.Attribute(
value=ast.Name(id="n"), attr="incoming_edges"
),
args=[],
keywords=[],
),
body=nodes[1].body,
orelse=[],
),
],
orelse=[],
)
expr_l_ast.body.append(re_ast)
expr_l_ast.body.remove(nodes[0])
expr_l_ast.body.remove(nodes[1])
print(astor.to_source(expr_l_ast))
| K-Wu/python_and_bash_playground | pyctor/astor_edge_loop_to_node_loop.py | astor_edge_loop_to_node_loop.py | py | 4,511 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "ast.parse",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "ast.dump",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "ast.Call",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "ast.Attribute",
"line_number": 1... |
41767569580 | import numpy as np
from scipy.stats import norm
import matplotlib.pyplot as plt
class LinearDynamicSystem:
def __init__(self, gamma, sigma):
self.gamma = gamma
self.sigma = sigma
self.log_p_x_history = []
self.pred_mu_history = []
self.mu_history = []
self.pred_mu_history = []
self.p_history = []
self.pred_p_history = []
def fit(self, observations, mu, p):
self._filter(observations, mu, p)
smoothed_mu_list = self._smoothing()
plt.plot(observations, label="Observation")
plt.plot(self.pred_mu_history, label="Predicted Latent Variable")
plt.plot(self.mu_history, label="Updated Latent Variable")
plt.plot(smoothed_mu_list, label="Smoothed Latent Variable")
plt.legend()
plt.show()
def _filter(self, observations, mu, p):
_mu, _p = mu, p
for i, obs in enumerate(observations):
pred_mu, pred_p, log_p_x = self._predict(_mu, _p, obs)
self.log_p_x_history.append(log_p_x)
_mu, _p = self._update(obs, pred_mu, pred_p)
self.pred_mu_history.append(pred_mu)
self.pred_p_history.append(pred_p)
self.mu_history.append(_mu)
self.p_history.append(_p)
print("Answer {}: ".format(i+1))
if i > 0:
print("predicted p.d.f. of latent variable: mu = {:.3f}, sigma = {:.3f}, joint_prob. = {:.3f}"
.format(pred_mu, pred_p, np.sum(self.log_p_x_history)))
print("log-scaled likelihood = {:.3f}, "
"updated p.d.f. of latent value: mu = {:.3f}, sigma = {:.3f}"
.format(log_p_x, _mu, _p))
def _predict(self, mu, p, obs):
pred_mu = mu
pred_p = p + self.gamma
log_p_x = norm.logpdf(x=obs, loc=pred_mu, scale=(pred_p + self.sigma))
return pred_mu, pred_p, log_p_x
def _kalman_gain(self, p, s):
return p / (p + s)
def _update(self, obs, mu, p):
k = self._kalman_gain(p, self.sigma)
new_mu = mu + k * (obs - mu)
new_p = (1 - k) * p
return new_mu, new_p
def _smoothing(self):
smoothed_mu_list = []
smoothed_p_list = []
last_smoothed_mu = self.mu_history[-1]
last_smoothed_p = self.p_history[-1]
smoothed_mu_list.append(last_smoothed_mu)
smoothed_p_list.append(last_smoothed_p)
for i in reversed(range(len(self.mu_history)-1)):
current_mu = self.mu_history[i]
pred_mu = self.pred_mu_history[i+1]
current_p = self.p_history[i]
pred_p = self.pred_p_history[i+1]
j = current_p / pred_p
last_smoothed_mu = current_mu + j * (last_smoothed_mu - pred_mu)
last_smoothed_p = current_p + j ** 2 * (last_smoothed_p - pred_p)
smoothed_mu_list.insert(0, last_smoothed_mu)
smoothed_p_list.insert(0, last_smoothed_p)
for mu, p in zip(smoothed_mu_list, smoothed_p_list):
print("smoothed μ = {:.3f}, smoothed P = {:.3f}".format(mu, p))
return smoothed_mu_list
def generate_observation(b):
return 10 * b + 20
STUDENT_ID = "2011039"
print("Your ID is {}".format(STUDENT_ID))
b_1, b_2, b_3, b_4 = [int(sid) for sid in STUDENT_ID[-4:]]
print("b_1 = {}, b_2 = {}, b_3 = {}, b_4 = {}".format(b_1, b_2, b_3, b_4))
INPUT_X = np.array([generate_observation(b) for b in [b_1, b_2, b_3, b_4]])
INIT_P = 50
INIT_MU = 200
INIT_GAMMA = 20
INIT_SIGMA = 10
print("initialized value...: X = {}, P_0 = {}, μ_0 = {}, Γ = {}, Σ = {}"
.format(INPUT_X, INIT_P, INIT_MU, INIT_GAMMA, INIT_SIGMA))
lds_model = LinearDynamicSystem(INIT_GAMMA, INIT_SIGMA)
lds_model.fit(INPUT_X, INIT_MU, INIT_P) | faabl/class_seq | seq4-2.py | seq4-2.py | py | 3,796 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "matpl... |
15619190553 | import numpy as np
import os
from PIL import Image
from PIL import ImageFont, ImageDraw
import time
import core.utils as utils
import core.v_detection as detect
import cv2
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-i', type=str, default="./1.mp4", help="input video")
parser.add_argument('-o', type=str, default="./output.avi", help="input video")
args = parser.parse_args()
#videos
if __name__ == "__main__":
input = args.i
output = args.o
cap = cv2.VideoCapture(input)
frame_width = int(cap.get(3))
frame_height = int(cap.get(4))
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter(args.o, cv2.VideoWriter_fourcc(
'M', 'J', 'P', 'G'), 25, (frame_width, frame_height))
while(True):
ret, image = cap.read()
if ret == True:
start = time.time()
bboxes = detect.vehicle_detection(image)
# print("bboxes: ", bboxes)
image = utils.draw_bbox(image, bboxes)
print("processing time: ", time.time() - start)
out.write(image)
cap.release()
out.release() | byq-luo/vehicle_detection-1 | video_clients.py | video_clients.py | py | 1,122 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "cv2.VideoWriter_fourcc",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "cv2.V... |
17944469658 | import tensorflow as tf
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.layers.core import Dense, Activation, Flatten
# import tensorflow as tf
# tf.python.control_flow_ops = tf # some hack to get tf running with Dropout
# 224x224
def alex_net_keras(x, num_classes=2, keep_prob=0.5):
x = Conv2D(92, kernel_size=(11, 11), strides=(4, 4), padding='same')(x) # conv 1
# x = BatchNormalization()(x)
x = Activation('relu')(x)
# LRN is missing here - Caffe.
x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(x) # pool 1
x = Conv2D(256, kernel_size=(5, 5), padding='same')(x) # miss group and pad param # conv 2
# x = BatchNormalization()(x)
x = Activation('relu')(x)
x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(x) # pool 2
x = Conv2D(384, kernel_size=(3, 3), padding='same')(x) # conv 3
# x = BatchNormalization()(x)
x = Activation('relu')(x)
# x = MaxPooling2D(pool_size=(3, 3))(x)
x = Conv2D(384, kernel_size=(3, 3), padding='same')(x) # conv 4
# x = BatchNormalization()(x)
x = Activation('relu')(x)
# x = MaxPooling2D(pool_size=(3, 3))(x)
x = Conv2D(256, kernel_size=(3, 3), padding='same')(x) # conv 5
# x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Flatten()(x)
x = Dense(4096, kernel_initializer='normal')(x) # fc6
# dropout 0.5
# x = tf.nn.dropout(x, keep_prob=keep_prob)
# x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Dense(4096, kernel_initializer='normal')(x) # fc7
# dropout 0.5
#暂时没有过拟合,暂时不用
# x = tf.nn.dropout(x, keep_prob=keep_prob)
# x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Dense(num_classes)(x)
# x = BatchNormalization()(x)
# x = Activation('softmax')(x)
return x
| CharlesLoo/stockPrediction_CNN | alexnet_keras.py | alexnet_keras.py | py | 1,831 | python | en | code | 13 | github-code | 6 | [
{
"api_name": "keras.layers.convolutional.Conv2D",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "keras.layers.core.Activation",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "keras.layers.convolutional.MaxPooling2D",
"line_number": 15,
"usage_type":... |
26310660224 | from alarmageddon.publishing.hipchat import HipChatPublisher
from alarmageddon.result import Failure
from alarmageddon.result import Success
from alarmageddon.publishing.exceptions import PublishFailure
from alarmageddon.validations.validation import Validation, Priority
import pytest
#Successes aren't sent, so monkeypatch out post and then
#only failures should notice
@pytest.fixture
def no_post(monkeypatch):
monkeypatch.delattr("requests.post")
def new_publisher():
return HipChatPublisher(
api_end_point="fakeurl",
api_token="faketoken",
environment="UnitTest",
room_name="Alarmageddon")
def test_requires_api_end_point():
with pytest.raises(ValueError):
HipChatPublisher(api_end_point="",
api_token="faketoken",
environment="UnitTest",
room_name="Alarmageddon")
def test_requires_api_token():
with pytest.raises(ValueError):
HipChatPublisher(api_end_point="fakeurl",
api_token="",
environment="UnitTest",
room_name="Alarmageddon")
def test_requires_environment():
with pytest.raises(ValueError):
HipChatPublisher(api_end_point="fakeurl",
api_token="token",
environment="",
room_name="Alarmageddon")
def test_requires_room():
with pytest.raises(ValueError):
HipChatPublisher(api_end_point="fakeurl",
api_token="token",
environment="UnitTest",
room_name="")
def test_repr():
hipchat = new_publisher()
hipchat.__repr__()
def test_str():
hipchat = new_publisher()
str(hipchat)
def testSendSuccess(no_post, monkeypatch):
hipchat = new_publisher()
v = Validation("low", priority=Priority.LOW)
success = Success("bar", v)
hipchat.send(success)
def testSendFailure(no_post, monkeypatch):
hipchat = new_publisher()
v = Validation("low", priority=Priority.LOW)
failure = Failure("foo", v, "unable to frobnicate")
with pytest.raises(AttributeError):
hipchat.send(failure)
def test_publish_failure(httpserver):
httpserver.serve_content(code=300, headers={"content-type": "text/plain"},
content='{"mode":"NORMAL"}')
pub = HipChatPublisher(httpserver.url, "token", "env", "room")
v = Validation("low", priority=Priority.CRITICAL)
failure = Failure("bar", v, "message")
with pytest.raises(PublishFailure):
pub.send(failure)
| PearsonEducation/Alarmageddon | tests/publishing/test_hipchat.py | test_hipchat.py | py | 2,640 | python | en | code | 15 | github-code | 6 | [
{
"api_name": "pytest.fixture",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "alarmageddon.publishing.hipchat.HipChatPublisher",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pytest.raises",
"line_number": 25,
"usage_type": "call"
},
{
... |
27089945038 | #=============================================================================================================#
# HOW TO RUN? #
# python3 chop_segments.py #
# Please provide LABEL, SEGMENT_LENGTH, OUTPUT_PATH, INPUT_FILE_TO_CHOP. #
#=============================================================================================================#
import csv
import sys
import nltk
from nltk import tokenize
#=============================================================================================================#
LABEL = 1
SEGMENT_LENGTH = 100
OUTPUT_PATH = "APPROPRIATE.CSV"
INPUT_FILE_TO_CHOP = "SAMPLE_INPUT_FILE.txt"
#=============================================================================================================#
#When we run the script for appropriate channels, we set the LABEL as 1 else we set it as 0 for inappropriate #
#=============================================================================================================#
visited = set()
## To ensure there are no duplicate video IDs
s = ""
cur = ""
prev = []
flag = 0
identifier = 0
#=============================================================================================================#
f = open(INPUT_FILE_TO_CHOP)
with open(OUTPUT_PATH, 'w') as csvfile:
fieldnames = ['','Unnamed: 0','Unnamed: 0.1','Transcript','ID','Label']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writerow({'': '', 'Unnamed: 0': 'Unnamed: 0', 'Unnamed: 0.1': 'Unnamed: 0.1', 'Transcript' : 'Transcript', 'ID' : 'VIDEO ID', 'Label' : 'Label'})
for line in f:
# Channel IDs are present in lines containing ||||
if("||||" in line):
continue
# Video IDs are present in lines containing ####
if("####" in line):
cur = line.strip()
if(flag == 1):
identifier +=1
text = s.strip()
groups = nltk.word_tokenize(text)
n_split_groups = []
while len(groups):
n_split_groups.append(' '.join(groups[:SEGMENT_LENGTH]))
groups = groups[SEGMENT_LENGTH:]
for part in n_split_groups:
writer.writerow({'': identifier, 'Unnamed: 0': identifier, 'Unnamed: 0.1': identifier, 'Transcript' : part, 'ID' : prev[1], 'Label' : LABEL })
print("PROCESSED VIDEO ID",str(prev[1]))
flag = 0
if("####" in line and (line.strip() not in visited)):
s = ""
flag = 1
visited.add(line.strip())
prev = cur.split("####")
if("####" not in line and flag == 1):
s += line.strip() + " "
if(flag == 1):
identifier +=1
text = s.strip()
groups = nltk.word_tokenize(text)
n_split_groups = []
while len(groups):
n_split_groups.append(' '.join(groups[:SEGMENT_LENGTH]))
groups = groups[SEGMENT_LENGTH:]
for part in n_split_groups:
writer.writerow({'': identifier, 'Unnamed: 0': identifier, 'Unnamed: 0.1': identifier, 'Transcript' : part, 'ID' : prev[1], 'Label' : LABEL })
#=============================================================================================================#
| STEELISI/Youtube-PG | chop_segments.py | chop_segments.py | py | 3,488 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "csv.DictWriter",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "nltk.word_tokenize",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "nltk.word_tokenize",
"line_number": 62,
"usage_type": "call"
}
] |
34660089311 | import requests
from bs4 import BeautifulSoup
import csv
import os
URL = 'https://citaty.info/book/quotes'
HEADERS = {'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:61.0) Gecko/20100101 Firefox/61.0',
'accept': '*/*'}
FILE_CSV = 'quotations_csv.csv'
def get_html(url, params=None):
r = requests.get(url, headers=HEADERS, params=params)
return r
def get_content(html):
soup = BeautifulSoup(html, 'html.parser')
items = soup.find_all('div', class_='node__content')
quotations = []
for item in items:
topic_tags = item.find('div', class_='node__topics')
if topic_tags:
topic_tags = topic_tags.get_text(', ')
else:
topic_tags = 'No tags'
quotations.append({
'autor': item.find('a', title='Автор цитаты').get_text(),
'book': item.find('a', title='Цитата из книги').get_text(),
'text_quotation': item.find('div', class_='field-item even last').get_text().replace('\xa0', ' ').replace('\n', ' '),
'topic_tags': topic_tags
})
print(*quotations, sep='\n')
save_file_csv(quotations, FILE_CSV)
os.startfile(FILE_CSV)
def save_file_csv (items, path):
with open(path, 'w', newline='') as file:
writer=csv.writer(file, delimiter=';')
writer.writerow(['Autor', 'Book', 'Quotation', 'Tags'])
for item in items:
writer.writerow([item['autor'], item['book'], item['text_quotation'], item['topic_tags']])
def parse():
html = get_html(URL)
if html.status_code == 200:
get_content(html.text)
else:
print('Error!')
parse() | isorokina012/course_work | course_paper.py | course_paper.py | py | 1,737 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "requests.get",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.startfile",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "csv.writer",
"line_nu... |
32584237829 | # #
import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
app = dash.Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP])
card = dbc.Card(
[dbc.CardHeader("Header"), dbc.CardBody("Body", style={"height": 250})],
className="h-100",
)
graph_card = dbc.Card([dbc.CardHeader("Here's a graph"), dbc.CardBody([dcc.Graph()])])
app.layout = dbc.Container(
dbc.Row(
[
dbc.Col(
[
dbc.CardDeck([card] * 4),
html.H2("Here is some important information to highlight..."),
dbc.CardDeck([graph_card] * 2),
],
width=10,
),
dbc.Col(card, width=2),
]
),
fluid=True,
className="m-3",
)
if __name__ == "__main__":
app.run_server(debug=True)
| thigbee/dashBootstrapThemeExplorer | gallery/layout_template_1.py | layout_template_1.py | py | 890 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "dash.Dash",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "dash_bootstrap_components.themes",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "dash_bootstrap_components.Card",
"line_number": 9,
"usage_type": "call"
},
{
"api_n... |
11036108434 | import wx
import PropToolPanel
import CollisionToolPanel
import POIToolPanel
class ToolBrowser(wx.Panel):
def __init__(self, parent, id):
wx.Panel.__init__(self, parent, id)
self.notebook = wx.Notebook(self, -1)
self.Bind(wx.EVT_NOTEBOOK_PAGE_CHANGED, self.onPageChanged, self.notebook)
sizer = wx.BoxSizer(wx.HORIZONTAL)
sizer.Add(self.notebook, 1, wx.EXPAND)
self.SetSizer(sizer)
self.observers = []
self.selectedTool = None
self.selectedEditor = None
self.collisionToolPanel = CollisionToolPanel.CollisionToolPanel(self.notebook, -1)
#self.notebook.AddPage(self.collisionToolPanel, "Collision")
self.notebook.AddPage(self.collisionToolPanel, "Col")
self.poiToolPanel = POIToolPanel.POIToolPanel(self.notebook, -1)
self.notebook.AddPage(self.poiToolPanel, "POI")
self.propToolPanel = PropToolPanel.PropToolPanel(self.notebook, -1)
self.notebook.AddPage(self.propToolPanel, "Props")
def addObserver(self, observer): self.observers.append(observer)
def removeObserver(self, observer): self.observers.remove(observer)
def onPageChanged(self, event):
if self.selectedEditor and self.selectedTool:
self.selectedTool.detachFromEditor(self.selectedEditor)
self.selectedTool = self.notebook.GetPage(self.notebook.GetSelection())
if self.selectedEditor:
self.selectedTool.attachToEditor(self.selectedEditor)
self.selectedEditor.Refresh()
def onEditorChanged(self, editor):
if self.selectedEditor:
self.selectedTool.detachFromEditor(self.selectedEditor)
self.selectedEditor = editor
if editor:
self.selectedTool.attachToEditor(editor)
def _notify(self):
for observer in self.observers: observer(self)
| sdetwiler/pammo | editor/source/ToolBrowser.py | ToolBrowser.py | py | 1,872 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "wx.Panel",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "wx.Panel.__init__",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "wx.Panel",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "wx.Notebook",
"line_nu... |
4403084906 | import unittest
from pyunitreport import HTMLTestRunner
import parse_api_test_cases
import os
import time
from api_test_case import ApiTestCase
from parse_api_test_cases import TestCaseParser
import argparse
import logging
if __name__ == '__main__':
# Argument Parse
parser = argparse.ArgumentParser(
description='API Test Assistant, Help you to do api test.')
parser.add_argument(
'test_suite_name', help="Please add your test suite's path"
)
args = parser.parse_args()
test_suite_name = args.test_suite_name
# create log file
log_file__folder_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'tests/log/')
if not os.path.exists(log_file__folder_path):
os.makedirs(log_file__folder_path)
time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
test_log_file_path = os.path.join(log_file__folder_path, str(test_suite_name)+time_str+'.log')
logging.basicConfig(filename=test_log_file_path, level=logging.INFO,
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
filemode='w')
logging.info("=========Start test execution!========")
suite = unittest.TestSuite()
tcp = TestCaseParser(test_suite_name)
test_cases_list = tcp.get_test_case_list()
logging.info("Got est cases list of {test_suite_name}".format(test_suite_name=test_suite_name))
for test_case in test_cases_list:
ApiTestCase.runTest.__doc__ = test_case['name']
test = ApiTestCase(test_case)
suite.addTest(test)
logging.info("Added {tc_name} to test suite".format(tc_name=test_case['name']))
test_report_folder_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'tests/reports/')
if not os.path.exists(test_report_folder_path):
os.makedirs(test_report_folder_path)
logging.info("Report Folder Created at {path}".format(path=test_report_folder_path))
else:
logging.info("Report Folder already exists at {path}".format(path=test_report_folder_path))
time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
test_report_file_path = str(test_suite_name) + time_str
kwargs = {
"output": test_report_folder_path,
"report_name": test_report_file_path
}
runner = HTMLTestRunner(**kwargs)
runner.run(suite)
logging.info("Test suite execution done!")
| PengDongCd/ApiTestAssitant | api_test_assistant_main.py | api_test_assistant_main.py | py | 2,481 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
... |
43248987614 | import os
import itertools
import scipy.io
import scipy.stats as stt
import numpy as np
import matplotlib.pyplot as plt
from mou_model import MOU
_RES_DIR = 'model_parameter/'
_I_REST_RUN = 0
_I_NBACK_RUN = 1
_I_NO_TIMESHIFT = 0
_I_ONE_TIMESHIFT = 1
_SUBJECT_AXIS = 0
plt.close('all')
## Create a local folder to store results.
if not os.path.exists(_RES_DIR):
print('created directory:', _RES_DIR)
os.makedirs(_RES_DIR)
## Read in data and define constants.
fMRI_data_and_labels = scipy.io.loadmat('data/DATA_TASK_3DMOV_HP_CSF_WD.mat')
regionalized_preprocessed_fMRI_data = fMRI_data_and_labels['TASKEC'][0][0]
roi_labels = fMRI_data_and_labels['ROIlbls'][0]
rest_run_data = regionalized_preprocessed_fMRI_data['Rest']
n_back_run_data = regionalized_preprocessed_fMRI_data['nBack']
flanker_run_data = regionalized_preprocessed_fMRI_data['Flanker']
m_rotation_run_data = regionalized_preprocessed_fMRI_data['mRotation']
odd_man_out_run_data = regionalized_preprocessed_fMRI_data['OddManOut']
n_subjects = rest_run_data.shape[2]
n_runs = len(fMRI_data_and_labels['TASKEC'][0][0])
n_rois = rest_run_data.shape[1]
n_ts_samples = rest_run_data.shape[0]
# Structure data to match the format used at
# https://github.com/mb-BCA/notebooks_review2019/blob/master/1_MOUEC_Estimation.ipynb
# to enhance comparability.
filtered_ts_emp = np.zeros([n_subjects, n_runs, n_rois, n_ts_samples])
run = list(regionalized_preprocessed_fMRI_data.dtype.fields.keys())
for k in range(len(run)):
filtered_ts_emp[:, k, :, :] = np.transpose(
regionalized_preprocessed_fMRI_data[run[k]], (2, 1, 0))
## Calculate functional connectivity (BOLD covariances) [Q0 und Q1].
# time_shift = np.arange(4, dtype=float) # for autocovariance plots
time_shift = np.arange(2, dtype=float)
n_shifts = len(time_shift)
FC_emp = np.zeros([n_subjects, n_runs, n_shifts, n_rois, n_rois])
n_ts_span = n_ts_samples - n_shifts + 1
for i_subject in range(n_subjects):
for i_run in range(n_runs):
# Center the time series (around zero).
filtered_ts_emp[i_subject, i_run, :, :] -= \
np.outer(filtered_ts_emp[i_subject, i_run, :, :].mean(1),
np.ones([n_ts_samples]))
# Calculate covariances with various time shifts.
for i_shift in range(n_shifts):
FC_emp[i_subject, i_run, i_shift, :, :] = \
np.tensordot(filtered_ts_emp[i_subject, i_run, :,
0:n_ts_span],
filtered_ts_emp[i_subject, i_run, :,
i_shift:n_ts_span+i_shift],
axes=(1, 1)) / float(n_ts_span - 1)
rescale_FC_factor = (0.5 / FC_emp[:, _I_REST_RUN, _I_NO_TIMESHIFT, :,
:].diagonal(axis1=1, axis2=2).mean())
FC_emp *= rescale_FC_factor
# filtered_ts_emp /= np.sqrt(0.135) # Rescale to get the same order of magnitude for locale variability as in paper.
print('most of the FC values should be between 0 and 1')
print('mean FC0 value:', FC_emp[:, :, _I_NO_TIMESHIFT, :, :].mean(),
FC_emp.mean())
print('max FC0 value:', FC_emp[:, :, _I_NO_TIMESHIFT, :, :].max())
print('mean BOLD variance (diagonal of each FC0 matrix):',
FC_emp[:, :, _I_NO_TIMESHIFT, :, :].diagonal(axis1=2, axis2=3).mean())
print('rescaleFactor: ' + str(rescale_FC_factor))
# Show distibution of FC0 values.
plt.figure()
plt.hist(FC_emp[:, :, _I_NO_TIMESHIFT, :, :].flatten(),
bins=np.linspace(-1, 5, 30))
plt.xlabel('FC0 value', fontsize=14)
plt.ylabel('matrix element count', fontsize=14)
plt.title('distribution of FC0 values')
# Show FC0 averaged over subjects first run (rest).
plt.figure()
FC_avg_over_subj = FC_emp[:, _I_REST_RUN,
_I_NO_TIMESHIFT, :, :].mean(axis=_SUBJECT_AXIS)
plt.imshow(FC_avg_over_subj, origin='lower', cmap='Blues', vmax=1, vmin=0)
plt.colorbar()
plt.xlabel('target ROI', fontsize=14)
plt.ylabel('source ROI', fontsize=14)
plt.title('FC0 (functional connectivity with no time lag)')
# Show FC1 averaged over subjects first run (rest).
plt.figure()
FC_avg_over_subj = FC_emp[:, _I_REST_RUN,
_I_ONE_TIMESHIFT, :, :].mean(axis=_SUBJECT_AXIS)
plt.imshow(FC_avg_over_subj, origin='lower', cmap='Blues', vmax=1, vmin=0)
plt.colorbar()
plt.xlabel('target ROI', fontsize=14)
plt.ylabel('source ROI', fontsize=14)
plt.title('FC1 (functional connectivity with time lag 1TR)')
# Show the autocovariance for the first run (rest).
ac = FC_emp.diagonal(axis1=3, axis2=4)
plt.figure()
ac_avg_over_subj = np.log(np.maximum(ac[:, _I_REST_RUN, :, :].
mean(axis=_SUBJECT_AXIS), np.exp(-4.0)))
plt.plot(range(n_shifts), ac_avg_over_subj)
plt.xlabel('time lag', fontsize=14)
plt.ylabel('log autocovariance', fontsize=14)
plt.title('rest', fontsize=16)
plt.ylim((-3, 0))
plt.xlim((0, 3))
# Show the autocovariance for the 2nd run (nBack).
plt.figure()
ac_avg_over_subj = np.log(np.maximum(ac[:, _I_NBACK_RUN, :, :].
mean(axis=_SUBJECT_AXIS), np.exp(-3.1)))
plt.plot(range(n_shifts), ac_avg_over_subj)
plt.xlabel('time lag', fontsize=14)
plt.ylabel('log autocovariance', fontsize=14)
plt.title('nBack', fontsize=16)
plt.ylim((-3, 0))
plt.xlim((0, 3))
## Include structural connectivity.
# Load the binary structural connectivity matrix.
mask_EC = np.array(scipy.io.loadmat('data/BINARY_EC_MASK.mat')
['grouped_umcu50_60percent'], dtype=bool)
# Enforce hermispheric connections.
for i in range(int(n_rois/2)):
mask_EC[i, int(n_rois/2)+i] = True
mask_EC[int(n_rois/2)+i, i] = True
# Visualise the structural connectivity mask.
plt.figure()
plt.imshow(mask_EC, origin='lower')
plt.xlabel('target ROI', fontsize=14)
plt.ylabel('source ROI', fontsize=14)
plt.title('Mask for existing connections', fontsize=12)
## Calculate EC-matrix.
# Construct diagonal mask for input noise matrix
# (here, no input cross-correlation).
mask_Sigma = np.eye(n_rois, dtype=bool)
# Run the model optimization.
# Initialize the source arrays.
# Jacobian (off-diagonal elements = EC)
J_mod = np.zeros([n_subjects, n_runs, n_rois, n_rois])
# Local variance (input covariance matrix, chosen to be diagonal)
Sigma_mod = np.zeros([n_subjects, n_runs, n_rois, n_rois])
# Model error
dist_mod = np.zeros([n_subjects, n_runs])
# Approximation of variance about the fitted data (FC covariance matrices)
R2_mod = np.zeros([n_subjects, n_runs])
# Between-region EC matrix
C_mod = np.zeros([n_subjects, n_runs, n_rois, n_rois])
mou_model = MOU()
for i_subject in range(n_subjects):
for i_run in range(n_runs):
# Run the estimation of model parameters, for all sessions.
# All parameters/restrictions not explicitly passed, have the
# correct defaults in fit_LO@MOU.
mou_model.fit(filtered_ts_emp[i_subject, i_run, :, :].T,
mask_Sigma=mask_Sigma, mask_C=mask_EC)
# Organize the optimization results into arrays.
# Extract Jacobian of the model.
J_mod[i_subject, i_run, :, :] = mou_model.J
# Extract noise (auto-)covariance matrix.
Sigma_mod[i_subject, i_run, :, :] = mou_model.Sigma
# Extract the matrix distance between the empirical objective
# covariances and their model counterparts
# (normalized for each objective matrix).
dist_mod[i_subject, i_run] = mou_model.d_fit['distance']
# The squared Pearson correlation is taken as an approximation
# of the variance.
R2_mod[i_subject, i_run] = mou_model.d_fit['correlation']**2
# The between-region EC matrix of the model
C_mod[i_subject, i_run, :, :] = mou_model.get_C()
print('sub / run:', i_subject, i_run, ';\t model error, R2:',
dist_mod[i_subject, i_run], R2_mod[i_subject, i_run])
# Store the results in files.
np.save(_RES_DIR + 'FC_emp.npy',
FC_emp) # Empirical spatiotemporal FC
np.save(_RES_DIR + 'mask_EC.npy',
mask_EC) # Mask of optimized connections
np.save(_RES_DIR + 'mask_Sigma.npy',
mask_Sigma) # Mask of optimized Sigma elements
np.save(_RES_DIR + 'Sigma_mod.npy',
Sigma_mod) # Estimated Sigma matrices
np.save(_RES_DIR + 'dist_mod.npy',
dist_mod) # Model error
np.save(_RES_DIR + 'J_mod.npy',
J_mod) # Estimated Jacobian, EC + inverse time const. on diag.
print('\nFinished.')
# Plot C-matrix for resting state data.
plt.figure()
plt.imshow(C_mod[:, _I_REST_RUN, :, :].mean(axis=_SUBJECT_AXIS),
origin='lower', cmap='Reds')
plt.colorbar()
plt.xlabel('target ROI', fontsize=14)
plt.ylabel('source ROI', fontsize=14)
plt.title('effective connectivity C_{ij}')
plt.show()
## Calculate local variability for rich club and periphery.
mean_rc_var = np.zeros([n_runs])
mean_periph_var = np.zeros([n_runs])
conf_int_rc = np.zeros([n_runs, 2])
conf_int_periph = np.zeros([n_runs, 2])
# Create a 1D-mask for rich club regions.
mask_rc = np.zeros(n_rois, dtype=bool)
indexes_rich_club = [23, 26, 27, 57, 60, 61]
mask_rc[indexes_rich_club] = True
print('rich club regions: '
+ str(np.concatenate(roi_labels[indexes_rich_club]).tolist()))
for i_run in range(n_runs):
local_var = Sigma_mod[:, i_run, :, :].diagonal(axis1=1, axis2=2)
rc_var = local_var[:, mask_rc].mean(axis=1)
periph_var = local_var[:, ~mask_rc].mean(axis=1)
mean_rc_var[i_run] = rc_var.mean()
mean_periph_var[i_run] = periph_var.mean()
sigma_rc_var = rc_var.std(ddof=1)
sigma_periph_var = periph_var.std(ddof=1)
conf_int_rc[i_run, :] = stt.norm.interval(0.95,
loc=mean_rc_var[i_run],
scale=sigma_rc_var)
conf_int_periph[i_run, :] = stt.norm.interval(0.95,
loc=mean_periph_var[i_run],
scale=sigma_periph_var)
print('Mittel der lokalen Variabilität (rich club): ' + str(mean_rc_var))
print('Mittel der lokalen Variabilität (periphery): ' + str(mean_periph_var))
print('95% Konfidenz Interval (rich cluc): ' + str(conf_int_rc))
print('95% Konfidenz Interval (periphery): ' + str(conf_int_periph))
## Calculate the input-output ratio.
# Create a 2D-mask for rich club regions.
mask_inter_rc = np.zeros([n_rois, n_rois], dtype=bool)
# The entries on the diagonal of C are 0 anyway, so that they can be
# ignored when it comes to the mask:
# mask_inter_rc[indexes_rich_club, indexes_rich_club] = True
rc_ind_combin = np.array(list(itertools.permutations(indexes_rich_club, 2))).T
mask_inter_rc[rc_ind_combin[0], rc_ind_combin[1]] = True
mean_rc_io = np.zeros([n_runs])
mean_periph_io = np.zeros([n_runs])
for i_run in range(n_runs):
# Examine input-output ratio ignoring inter-rich-club connections.
no_rc_connections_C = C_mod[:, i_run, :, :]
no_rc_connections_C[:, mask_inter_rc] = 0
roi_input = no_rc_connections_C[:, :, :].sum(axis=1)
roi_output = no_rc_connections_C[:, :, :].sum(axis=2)
io_rc = (roi_input[:, mask_rc].sum(axis=1) /
roi_output[:, mask_rc].sum(axis=1))
io_periph = (roi_input[:, ~mask_rc].sum(axis=1) /
roi_output[:, ~mask_rc].sum(axis=1))
mean_rc_io[i_run] = io_rc.mean()
mean_periph_io[i_run] = io_periph.mean()
sigma_io_rc = io_rc.std(ddof=1)
sigma_io_periph = io_periph.std(ddof=1)
conf_int_rc[i_run, :] = stt.norm.interval(0.95,
loc=mean_rc_io[i_run],
scale=sigma_io_rc)
conf_int_periph[i_run, :] = stt.norm.interval(0.95,
loc=mean_periph_io[i_run],
scale=sigma_io_periph)
print('input-output ratio rich club: ', str(mean_rc_io))
print('input-output ratio periphery: ', str(mean_periph_io))
print('95% Konfidenz Interval (rich cluc): ' + str(conf_int_rc))
print('95% Konfidenz Interval (periphery): ' + str(conf_int_periph))
| bjoernkoehn21/Reproduce-results-from-Senden-paper | mou_ec_estimation.py | mou_ec_estimation.py | py | 12,115 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "matplotlib.pyplot.close",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "os.path.exists",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path",
... |
19541081496 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestRegressor
df = pd.read_csv('Franchise_Dataset.csv')
df.head()
X =df.iloc[:, 1:2].values
y =df.iloc[:, 2].values
model = RandomForestRegressor(n_estimators = 10, random_state = 0)
model.fit(X, y)
y_pred =model.predict([[6]])
print(y_pred)
X_grid_data = np.arange(min(X), max(X), 0.01)
X_grid_data = X_grid_data.reshape((len(X_grid_data), 1))
plt.scatter(X, y, color = 'red')
plt.plot(X_grid_data,model.predict(X_grid_data), color = 'blue')
plt.title('Random Forest Regression')
plt.xlabel('Counter Sales')
plt.ylabel('Net Profit')
plt.show() | flawlesscode254/Linear-Regression-Assignment | three.py | three.py | py | 657 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pandas.read_csv",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sklearn.ensemble.RandomForestRegressor",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "ma... |
36689632480 | from __future__ import division
import re
import sys
import threading
import pyautogui
import pyperclip
import os
import speech_recognition as sr
from pywinauto import Application
import time
import openai
import win32com.client as wincl
#발급 받은 API 키 설정
OPENAI_API_KEY = "..."
# openai API 키 인증
openai.api_key = OPENAI_API_KEY
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = r"..."
from google.cloud import speech
import pyaudio
from six.moves import queue
# Audio recording parameters
RATE = 20000 ## 음성인식 성능향상
CHUNK = int(RATE / 30) # 100ms ## 음성인식 성능향상
class MicrophoneStream(object):
"""Opens a recording stream as a generator yielding the audio chunks."""
def __init__(self, rate, chunk):
self._rate = rate
self._chunk = chunk
# Create a thread-safe buffer of audio data
self._buff = queue.Queue()
self.closed = True
def __enter__(self):
self._audio_interface = pyaudio.PyAudio()
self._audio_stream = self._audio_interface.open(
format=pyaudio.paInt16,
# The API currently only supports 1-channel (mono) audio
# https://goo.gl/z757pE
channels=1, rate=self._rate,
input=True, frames_per_buffer=self._chunk,
# Run the audio stream asynchronously to fill the buffer object.
# This is necessary so that the input device's buffer doesn't
# overflow while the calling thread makes network requests, etc.
stream_callback=self._fill_buffer,
)
self.closed = False
return self
def __exit__(self, type, value, traceback):
self._audio_stream.stop_stream()
self._audio_stream.close()
self.closed = True
# Signal the generator to terminate so that the client's
# streaming_recognize method will not block the process termination.
self._buff.put(None)
self._audio_interface.terminate()
def _fill_buffer(self, in_data, frame_count, time_info, status_flags):
"""Continuously collect data from the audio stream, into the buffer."""
self._buff.put(in_data)
return None, pyaudio.paContinue
def generator(self):
while not self.closed:
# Use a blocking get() to ensure there's at least one chunk of
# data, and stop iteration if the chunk is None, indicating the
# end of the audio stream.
chunk = self._buff.get()
if chunk is None:
return
data = [chunk]
# Now consume whatever other data's still buffered.
while True:
try:
chunk = self._buff.get(block=False)
if chunk is None:
return
data.append(chunk)
except queue.Empty:
break
yield b''.join(data)
def type_text(text):
pyperclip.copy(text)
pyautogui.hotkey('ctrl', 'v')
# 기존 임포트 및 설정 코드 생략 ...
def speak(text, rate=1.0):
speak = wincl.Dispatch("SAPI.SpVoice")
speak.Rate = rate
speak.Speak(text)
waiting = False
def waiting_message():
global waiting
while waiting:
print("잠시만 기다려 주세요...")
speak("잠시만 기다려 주세요...")
time.sleep(1)
def is_windows_spoken():
global waiting
# 음성 인식 객체 생성
r = sr.Recognizer()
mic = sr.Microphone()
with mic as source:
r.adjust_for_ambient_noise(source)
messages = []
while True:
print("Listening...")
speak("음성인식 실행")
with mic as source:
r.non_blocking = True
audio = r.listen(source)
try:
text = r.recognize_google_cloud(audio, language='ko-KR')
if "윈도우" in text:
speak("네 말씀하세요.")
print("명령어를 말씀해주세요.")
return True
elif "입력" in text:
speak("잠시만 기다려주세요")
print("잠시만 기다려주세요")
return False
#gpt api 사용.
elif "검색" in text:
print("검색 질문을 말씀해주세요.")
speak("검색 질문을 말씀해주세요.")
with mic as source:
audio = r.listen(source)
waiting = True
waiting_thread = threading.Thread(target=waiting_message)
waiting_thread.start()
question = r.recognize_google_cloud(audio, language='ko-KR')
messages.append({"role": "user", "content": question})
answer = speech_to_gpt(messages)
waiting = False
waiting_thread.join()
print(f'GPT: {answer}')
messages.append({"role": "assistant", "content": answer})
type_text(answer)
#speak(answer)
elif "종료" in text:
speak("종료하겠습니다.")
sys.exit("Exiting")
else:
speak("다시 말씀해주세요.")
print("다시 말씀해주세요.")
except sr.UnknownValueError:
speak("다시 말씀해주세요.")
print("다시 말씀해주세요.")
except sr.WaitTimeoutError:
# 타임아웃 에러 발생 시 다시 음성 수집 시작
continue
def listen_print_loop(responses):
transcript = ""
final_transcript = ""
for response in responses:
if not response.results:
continue
result = response.results[0]
if not result.alternatives:
continue
transcript = result.alternatives[0].transcript
# 기존에 출력된 텍스트 지우기
sys.stdout.write("\033[F\033[K")
# 최종 결과만 출력하고 나머지는 저장
if result.is_final:
final_transcript += transcript + " "
print(final_transcript)
if re.search(r'\b(음성인식 꺼 줘|quit)\b', final_transcript, re.I):
speak("종료하겠습니다.")
sys.exit("Exiting")
if re.search(r'\b(음성인식 종료|quit)\b', final_transcript, re.I):
speak("종료하겠습니다.")
sys.exit("Exiting")
if "메모장" in final_transcript:
app = Application().start("notepad.exe")
time.sleep(0.5)
break
if "크롬" in final_transcript:
app = Application().start("C:\Program Files\Google\Chrome\Application\chrome.exe")
time.sleep(0.5)
break
if "한글" in final_transcript:
app = Application().start("C:\Program Files (x86)\HNC\Office 2022\HOffice120\Bin\Hwp.exe")
time.sleep(0.5)
break
if "검색" in final_transcript:
pyautogui.keyDown("ctrl")
pyautogui.press("f")
pyautogui.keyUp("ctrl")
time.sleep(0.5)
break
if "복사" in final_transcript:
pyautogui.keyDown("ctrl")
pyautogui.press("c")
pyautogui.keyUp("ctrl")
time.sleep(0.5)
break
if "붙여 넣기" in final_transcript:
pyautogui.keyDown("ctrl")
pyautogui.press("v")
pyautogui.keyUp("ctrl")
time.sleep(0.5)
break
if "뒤로 가기" in final_transcript:
pyautogui.keyDown("ctrl")
pyautogui.press("z")
pyautogui.keyUp("ctrl")
time.sleep(0.5)
break
if "닫기" in final_transcript:
pyautogui.keyDown("alt")
pyautogui.press("f4")
pyautogui.keyUp("alt")
time.sleep(0.5)
break
if "다음" in final_transcript:
pyautogui.press("space")
time.sleep(0.5)
break
if "나가기" in final_transcript:
pyautogui.press("esc")
time.sleep(0.5)
break
if "엔터" in final_transcript:
pyautogui.press("enter")
time.sleep(0.5)
break
# ------------------------------------------5/7
if "한글" in transcript:
pyautogui.press("hangul")
time.sleep(0.5)
break
if "영어" in transcript:
pyautogui.press("hangul")
time.sleep(0.5)
break
if "저장" in transcript:
pyautogui.hotkey('ctrl', 's')
time.sleep(0.5)
break
if "전체 선택" in transcript:
pyautogui.hotkey('ctrl', 'a')
time.sleep(0.5)
break
if "캡처" in transcript:
pyautogui.hotkey('shift', 'win', 's')
time.sleep(0.5)
break
if "캡스락" in transcript:
pyautogui.press("capslocck")
time.sleep(0.5)
break
if "자르기" in transcript:
pyautogui.hotkey('ctrl', 'x')
time.sleep(0.5)
break
# ------------------------------------------5/13
if "새로고침" in transcript:
pyautogui.press("f5")
time.sleep(0.5)
break
if "시작" in transcript:
pyautogui.press("f5")
time.sleep(0.5)
break
if "바탕화면" in transcript:
pyautogui.hotkey('win', 'd')
time.sleep(0.5)
break
if "최대화" in transcript:
pyautogui.hotkey('win', 'up')
time.sleep(0.5)
break
if "삭제" in transcript:
pyautogui.press("delete")
time.sleep(0.5)
break
if "실행" in transcript:
pyautogui.hotkey('ctrl', 'alt', 'f10')
time.sleep(0.5)
break
if "작업 관리자" in transcript:
pyautogui.hotkey('ctrl', 'shift', 'esc')
time.sleep(0.5)
break
final_transcript = ""
else:
sys.stdout.write(transcript + '\r')
sys.stdout.flush()
return final_transcript
def speech_to_text():
# create a recognizer instance
r = sr.Recognizer()
# use the system default microphone as the audio source
with sr.Microphone() as source:
print("Speak:")
# adjust for ambient noise
r.adjust_for_ambient_noise(source)
# listen for audio
audio = r.listen(source)
try:
# recognize speech using Google Speech Recognition
text = r.recognize_google(audio, language='ko-KR')
speak("입력하겠습니다.")
print("You said: " + text)
return text
except sr.WaitTimeoutError:
print("Listening timed out. Please try again.")
return None
except sr.RequestError as e:
print("Could not request results from Google Speech Recognition service; {0}".format(e))
return None
def speech_to_gpt(messages):
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages
)
chat_response = completion.choices[0].message.content
return chat_response
def main():
# See http://g.co/cloud/speech/docs/languages
# for a list of supported languages.
language_code = 'ko-KR' # a BCP-47 language tag
client = speech.SpeechClient()
config = speech.RecognitionConfig(
encoding=speech.RecognitionConfig.AudioEncoding.LINEAR16,
sample_rate_hertz=RATE,
language_code=language_code)
streaming_config = speech.StreamingRecognitionConfig(
config=config,
interim_results=True)
while True:
windows_spoken = is_windows_spoken()
if windows_spoken:
end_time = time.time() + 5 # 5 seconds
while time.time() < end_time:
with MicrophoneStream(RATE, CHUNK) as stream:
audio_generator = stream.generator()
requests = (speech.StreamingRecognizeRequest(audio_content=content)
for content in audio_generator)
responses = client.streaming_recognize(streaming_config, requests)
listen_print_loop(responses)
break
elif not windows_spoken:
command = speech_to_text()
if command:
type_text(command)
else:
time.sleep(0.05)
if __name__ == '__main__':
main()
# [END speech_transcribe_streaming_mic]
| quswjdgns399/air_command | voicecommand_final.py | voicecommand_final.py | py | 13,739 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "openai.api_key",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "six.moves.queue.Queue",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "six.moves.q... |
45017283256 | try:
import sys
import os.path
sys.path.append(os.path.join(os.path.dirname(__file__), '../lib'))
import os, errno
import logging # http://www.onlamp.com/pub/a/python/2005/06/02/logging.html
from logging import handlers
import argparse
#sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'pysunspec'))
from datetime import datetime, date, time, timedelta
# import jsonpickle # pip install jsonpickle
import json
from sit_logger import SitLogger
from sit_constants import SitConstants
#from sit_date_time import SitDateTime
except ImportError as l_err:
print("ImportError: {0}".format(l_err))
raise l_err
class SitJsonConf(object):
# CONSTANTS
# VARIABLES
_logger = None
_config_dir = None #Setted by init
_config_file = None #File name only
_config_data = None # Where json.dump are put
# FUNCTIONS DEFINITION
def __init__(self, a_config_file_name=SitConstants.SIT_JSON_DEFAULT_CONFIG_FILE_NAME):
"""
Initialize
"""
self._config_dir = SitConstants.DEFAULT_CONF_DIR
self._config_file = a_config_file_name
if not os.path.isdir(self._config_dir):
raise Exception('init->Config dir {} does not exist, sudo mkdir -m 777 {}'.format(self._config_dir, self._config_dir))
try:
self._logger = SitLogger().new_logger(__name__)
if __name__ == '__main__':
self.init_arg_parse()
self.read_config(self.configuration_file_path())
except OSError as l_e:
self._logger.warning("init-> OSError, probably rollingfileAppender:{}".format(l_e))
if l_e.errno != errno.ENOENT:
raise l_e
except Exception as l_e:
self._logger.error('Error in init: {}'.format(l_e))
raise l_e
def configuration_file_path(self):
"""
"""
l_res = os.path.join(self._config_dir, self._config_file)
return l_res
def read_config(self, a_config_file_path=None):
"""
puts into self._config_data json data
"""
if a_config_file_path is None:
l_config_file_path = self.configuration_file_path()
else:
l_config_file_path = a_config_file_path
with open(l_config_file_path, "r") as l_file:
self._config_data = json.load(l_file)
def item(self, a_key):
"""
returns value of given key exception if not found
"""
l_res = None
return l_res
# SCRIPT ARGUMENTS
def init_arg_parse(self):
"""
Parsing arguments
"""
self._logger.debug('init_arg_parse-> begin')
self._parser = argparse.ArgumentParser(description='Actions with sunspec through TCP')
self._add_arguments()
l_args = self._parser.parse_args()
self._args = l_args
def _add_arguments(self):
"""
Add arguments to parser (called by init_arg_parse())
"""
self._parser.add_argument('-v', '--verbose', help='increase output verbosity', action="store_true")
self._parser.add_argument('-u', '--update_leds_status', help='Updates led status according to spec', action="store_true")
self._parser.add_argument('-t', '--test', help='Runs test method', action="store_true")
#self._parser.add_argument('-u', '--base_url', help='NOT_IMPLEMENTED:Gives the base URL for requests actions', nargs='?', default=self.DEFAULT_BASE_URL)
l_required_named = self._parser.add_argument_group('required named arguments')
# l_required_named.add_argument('-i', '--host_ip', help='Host IP', nargs='?', required=True)
# l_required_named.add_argument('-u', '--slave_address', help='Slave address of modbus device', nargs='?', required=True)
l_required_named.add_argument('-m', '--host_mac', help='Host MAC', nargs='?', required=True)
# l_required_named.add_argument('-l', '--longitude', help='Longitude coordinate (beware timezone is set to Chile)', nargs='?', required=True)
# l_required_named.add_argument('-a', '--lattitude', help='Lattitude coordinate (beware timezone is set to Chile)', nargs='?', required=True)
# l_required_named.add_argument('-d', '--device_type', help='Device Type:' + ('|'.join(str(l) for l in self.DEVICE_TYPES_ARRAY)), nargs='?', required=True)
def execute_corresponding_args( self ):
"""
Parsing arguments and calling corresponding functions
"""
if self._args.verbose:
self._logger.setLevel(logging.DEBUG)
else:
self._logger.setLevel(logging.INFO)
if self._args.test:
self.test()
#if self._args.store_values:
# TEST
def test(self):
"""
Test function
"""
try:
self._logger.info("################# BEGIN #################")
self._logger.info("--> ************* device models *************: {}".format(l_d.models)) #Lists properties to be loaded with l_d.<property>.read() and then access them
except Exception as l_e:
self._logger.exception("Exception occured: {}".format(l_e))
print('Error: {}'.format(l_e))
self._logger.error('Error: {}'.format(l_e))
sys.exit(1)
#################### END CLASS ######################
def main():
"""
Main method
"""
#logging.basicConfig(level=logging.DEBUG, stream=sys.stdout, filemode="a+", format="%(asctime)-15s %(levelname)-8s %(message)s")
logger = logging.getLogger(__name__)
try:
l_obj = SitJsonConf()
l_obj.execute_corresponding_args()
# l_id.test()
except KeyboardInterrupt:
logger.exception("Keyboard interruption")
except Exception as l_e:
logger.exception("Exception occured:{}".format(l_e))
raise l_e
#Call main if this script is executed
if __name__ == '__main__':
main()
| phgachoud/sty-pub-raspi-modbus-drivers | lib/sit_json_conf.py | sit_json_conf.py | py | 5,275 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sys.path.append",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "os.path.path.join",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"line... |
810754112 | from scipy import sparse
import time
import gradient
import vector_fields
def enforce_boundaries(phi, img_shape, dim):
# make sure we are inside the image
phi[:, 1] = phi[:, 1].clip(0, img_shape[1])
phi[:, 0] = phi[:, 0].clip(0, img_shape[0])
# 3d case
if dim == 3:
phi[:, 2] = phi[:, 2].clip(0, img_shape[2])
return phi
def integrate(x_0, kernels, alpha, c_sup, dim, steps = 10, compute_gradient = True):
start = time.time()
if (compute_gradient):
S_i = vector_fields.evaluation_matrix_blowup(kernels, x_0, c_sup, dim)
else:
S_i = vector_fields.evaluation_matrix(kernels, x_0, c_sup, dim)
#print("FE -- initial S_i ", (time.time() - start) / 60)
start = time.time()
V_i = vector_fields.make_V(S_i, alpha, dim)
#print("FE -- initial V_i ", (time.time() - start) / 60)
x_i = x_0
dphi_dalpha_i = sparse.csc_matrix((S_i.shape[0], S_i.shape[1]))
for i in range(steps):
if compute_gradient:
start = time.time()
dv_dphit_i = gradient.dv_dphit(x_i, kernels, alpha, c_sup, dim)
print("FE -- dv_dphi_i ", (time.time() - start) / 60)
start = time.time()
dphi_dalpha_i = gradient.next_dphi_dalpha(S_i, dv_dphit_i, dphi_dalpha_i, steps, dim)
print("FE -- dphi_dalpha_i ", (time.time() - start) / 60)
start = time.time()
# Make a step
x_i = x_i + V_i / steps
# Compute evaluatation matrix based on updated evaluation points
if (i < steps - 1):
if (compute_gradient):
S_i = vector_fields.evaluation_matrix_blowup(kernels, x_i, c_sup, dim)
else:
S_i = vector_fields.evaluation_matrix(kernels, x_i, c_sup, dim)
V_i = vector_fields.make_V(S_i, alpha, dim)
print("FE -- Euler step ", (time.time() - start) / 60)
# Enforce boundary conditions
img_shape = []
for d in range(dim):
img_shape.append(max(x_0[:, d]))
x_1 = enforce_boundaries(x_i, img_shape, dim)
if compute_gradient:
return x_1, dphi_dalpha_i
else:
return x_1
| polaschwoebel/NonLinearDataAugmentation | forward_euler.py | forward_euler.py | py | 2,149 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "time.time",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "vector_fields.evaluation_matrix_blowup",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "vector_fields.evaluation_matrix",
"line_number": 21,
"usage_type": "call"
},
{
"... |
2015306414 | import re
from pyspark import SparkConf, SparkContext
def normalizeWords(text):
return re.compile(r'\W+', re.UNICODE).split(text.lower())
conf = SparkConf().setMaster("local").setAppName("WordCount")
sc = SparkContext(conf = conf)
input = sc.textFile("file:///sparkcourse/book.txt")
words = input.flatMap(normalizeWords)
# Note: Count by value creates a python object instead of a RDD hence .collect() not required to fetch the data
wordCounts = words.countByValue()
for word, count in wordCounts.items():
cleanWord = word.encode('ascii', 'ignore')
if (cleanWord):
print(cleanWord.decode() + " " + str(count))
| gdhruv80/Spark | word-count-better.py | word-count-better.py | py | 654 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "re.compile",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "re.UNICODE",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "pyspark.SparkConf",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pyspark.SparkContext",
... |
22185534420 | import numpy as np
import pandas as pd
from collections import defaultdict
import matplotlib.pyplot as plt
from datetime import datetime
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
import torch.optim as optim
import os, sys
class SDAE(nn.Module):
def __init__(self, input_dim, hidden_dim, embed_dim):
super(SDAE, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.embed_dim = embed_dim
self.enc1 = nn.Linear(input_dim, hidden_dim)
self.enc2 = nn.Linear(hidden_dim, hidden_dim)
self.enc3 = nn.Linear(hidden_dim, embed_dim)
self.dec1 = nn.Linear(embed_dim, hidden_dim)
self.dec2 = nn.Linear(hidden_dim, hidden_dim)
self.dec3 = nn.Linear(hidden_dim, input_dim)
def forward(self, x):
x = F.relu(self.enc1(x))
x = F.relu(self.enc2(x))
latent = F.relu(self.enc3(x))
x = F.relu(self.dec1(latent))
x = F.relu(self.dec2(x))
x = self.dec3(x)
return latent, x
class CDL:
def __init__(self, train_imp, test_imp, input_dim, hidden_dim, dim_f, dataloader, seed, device, config):
self.dim_f = dim_f
self.user_num = train_imp.shape[0]
self.item_num = train_imp.shape[1]
self.input_dim = input_dim
self.R_tr = train_imp
self.R_tst = test_imp
self.C = np.where(self.R_tr > 0, 1, 0)
self.C_u = np.zeros((self.item_num, self.item_num))
self.C_i = np.zeros((self.user_num, self.user_num))
np.random.seed(seed)
self.X = np.random.standard_normal((self.user_num, dim_f))
self.Y = np.random.standard_normal((self.item_num, dim_f))
self.loss_tr = defaultdict(float)
self.loss_ae = defaultdict(float)
self.loss_tst = defaultdict(float)
self.ae = SDAE(input_dim=input_dim, hidden_dim=hidden_dim, embed_dim=dim_f).to(device)
self.optimizer = optim.Adam(self.ae.parameters(), lr=config.learning_rate, weight_decay=config.lambda_w)
self.dataloader = dataloader
self.lambda_u = config.lambda_u
self.lambda_w = config.lambda_w
self.lambda_v = config.lambda_v
self.lambda_n = config.lambda_n
self.device = device
self.config = config
def ae_train(self):
latent_np = np.zeros((self.item_num, self.dim_f))
loss_ae = []
for batch in self.dataloader:
y = batch['clean'].to(self.device)
x = batch['corrupt'].to(self.device)
idx = batch['idx']
latent, pred = self.ae(x)
latent_ = latent.detach().cpu().numpy()
latent_np[idx.numpy()] = latent_
loss = self.loss_fn(pred, y, idx.to(self.device), latent_)
loss.backward()
self.optimizer.step()
loss_ae.append(loss.item())
return latent_np, np.mean(loss_ae)
def fit(self):
start = datetime.now()
for epoch in range(self.config.epochs):
start_epoch = datetime.now()
self.ae.train()
self.latent_feat, self.loss_ae[epoch] = self.ae_train()
n = 0
for u in range(self.user_num):
yty = np.dot(self.Y.T, self.Y)
self.X[u, :] = self.update_user_vector(u, yty)
for i in range(self.item_num):
xtx = np.dot(self.X.T, self.X)
self.Y[i, :] = self.update_item_vector(i, xtx)
phat = self.scoring()
train_loss = self.evaluate(train_eval=True)
test_loss = self.evaluate(train_eval=False)
self.loss_tr[epoch] = train_loss
self.loss_tst[epoch] = test_loss
print(f'EPOCH {epoch+1} : TRAINING RANK {self.loss_tr[epoch]:.5f}, VALID RANK {self.loss_tst[epoch]:.5f}')
print(f'Time per one epoch {datetime.now() - start_epoch}')
end = datetime.now()
print(f'Training takes time {end-start}')
def scoring(self):
return np.dot(self.X, self.Y.T)
def update_user_vector(self, u, yty):
np.fill_diagonal(self.C_u, (self.C[u, :] - 1))
comp1 = yty
comp2 = np.dot(self.Y.T, self.C_u).dot(self.Y)
comp3 = np.identity(self.config.dim_f) * self.config.lambda_u
comp = np.linalg.inv(comp1 + comp2 + comp3)
self.C_u = self.C_u + np.identity(self.C_u.shape[0])
comp = np.dot(comp, self.Y.T).dot(self.C_u)
return np.dot(comp, self.R_tr[u, :])
def update_item_vector(self, i, xtx):
np.fill_diagonal(self.C_i, (self.C[:, i] - 1))
comp1 = xtx
comp2 = np.dot(self.X.T, self.C_i).dot(self.X)
comp3 = np.identity(self.config.dim_f) * self.config.lambda_v
comp = np.linalg.inv(comp1 + comp2 + comp3)
self.C_i = self.C_i + np.identity(self.C_i.shape[0])
comp4 = self.X.T.dot(self.C_i).dot(self.R_tr[:, i])
comp5 = self.lambda_v * self.latent_feat[i, :]
return np.dot(comp, comp4+comp5)
def loss_fn(self, pred, xc, idx, latent_feat):
X = torch.tensor(self.X).to(self.device)
Y = torch.tensor(self.Y).to(self.device)[idx, :]
R = torch.tensor(self.R_tr).float().to(self.device)[:, idx]
C = torch.tensor(self.C).float().to(self.device)[:, idx]
latent = torch.tensor(latent_feat).to(self.device)
comp1 = (X**2).sum(axis=1).sum() * self.lambda_u/2
comp2 = ((Y - latent)**2).sum(axis=1).sum() * self.lambda_v/2
comp3 = ((pred - xc)**2).sum(axis=1).sum() * self.lambda_n/2
comp4 = torch.sum((torch.mm(X, Y.T) - R)**2 * C/2)
return comp1+comp2+comp3+comp4
def evaluate(self, train_eval):
if train_eval:
R = self.R_tr
else:
R = self.R_tst
phat = self.scoring()
rank_mat = np.zeros(phat.shape)
for u in range(self.user_num):
pred_u = phat[u, :] * -1
rank = pred_u.argsort().argsort()
rank = rank / self.item_num
rank_mat[u, :] = rank
return np.sum(R * rank_mat) / np.sum(R) | yeonjun-in/GNN_Recsys_paper | rec/CDL/model.py | model.py | py | 6,259 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "torch.nn.Module",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_nu... |
71994778748 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Bernoulli
from torch.autograd import Variable
import torch.optim as optim
import numpy as np
import random
import math
import sys
import os
torch.manual_seed(0)
class armax_model(nn.Module):
def __init__(self, no_of_glucose_inputs = 10, no_of_insulin_inputs = 10, no_of_meal_inputs = 10):
output_dim = 1
super(armax_model, self).__init__()
self.glucose_weights = nn.Parameter(torch.randn((no_of_glucose_inputs, output_dim)))
self.insulin_weights = nn.Parameter(torch.randn((no_of_insulin_inputs, output_dim)))
self.meal_weights = nn.Parameter(torch.randn((no_of_meal_inputs, output_dim)))
self.bias = nn.Parameter(torch.randn((output_dim,)))
def forward(self, glucose, insulin, meal):
glucose_term = torch.matmul(glucose, self.glucose_weights)
# Enforcing insulin having negative effects
insulin_term = torch.matmul(insulin, -torch.abs(self.insulin_weights))
meal_term = torch.matmul(meal, self.meal_weights)
y = glucose_term + insulin_term + meal_term
return y
def fit_model(all_states, all_controls, all_next_states, glucose_normalizer, insulin_normalizer, meal_normalizer, filename):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
episode_count = 100
batch_size = 64
model = armax_model().to(device)
learning_rate = 1e-3
# criterion = nn.SmoothL1Loss()
criterion = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr = learning_rate)
all_train_states = np.concatenate((all_states['main']['train'], all_states['omega']['train']), axis=0)
all_train_controls = np.concatenate((all_controls['main']['train'], all_controls['omega']['train']), axis=0)
all_train_next_states = np.concatenate((all_next_states['main']['train'], all_next_states['omega']['train']), axis=0)
T = 10
num_datapoints = len(all_train_states)
for episode in range(episode_count):
number_of_batches = math.ceil(float(num_datapoints) / float(batch_size))
total_loss = 0.0
for batch_index in range(number_of_batches):
start = batch_index * batch_size
end = min(start + batch_size, num_datapoints)
loss_list = []
optimizer.zero_grad()
# for entry in range(start, end):
# glucose_input = all_train_states[entry, :T] * glucose_normalizer
# glucose_input = torch.from_numpy(glucose_input).float().to(device)
# insulin_input = all_train_states[entry, T:2*T] * insulin_normalizer
# insulin_input = torch.from_numpy(insulin_input).float().to(device)
# meal_input = all_train_states[entry, 2*T:3*T] * meal_normalizer
# meal_input = torch.from_numpy(meal_input).float().to(device)
# target = all_train_next_states[entry, :] * glucose_normalizer
# target = torch.from_numpy(target).float().to(device)
# prediction = model.forward(glucose_input, insulin_input, meal_input)
# loss = criterion(prediction, target).to(device)
# loss_list.append(loss)
# # Computing loss for tracking
# total_loss += loss.item()
glucose_input = all_train_states[start:end, :T] * glucose_normalizer
glucose_input = torch.from_numpy(glucose_input).float().to(device)
insulin_input = all_train_states[start:end, T:2*T] * insulin_normalizer
insulin_input = torch.from_numpy(insulin_input).float().to(device)
meal_input = all_train_states[start:end, 2*T:3*T] * meal_normalizer
meal_input = torch.from_numpy(meal_input).float().to(device)
target = all_train_next_states[start:end, :] * glucose_normalizer
target = torch.from_numpy(target).float().to(device)
prediction = model.forward(glucose_input, insulin_input, meal_input)
loss = criterion(prediction, target).to(device)
# Computing loss for tracking
total_loss += loss.item() * (end-start)
loss.backward()
optimizer.step()
print("At episode - ", episode, " avg loss computed - ", total_loss / float(len(all_train_states)))
if (episode+1)%10 == 0 or episode == 0:
test_loss = test_model(model, all_states, all_controls, all_next_states, glucose_normalizer, insulin_normalizer, meal_normalizer, T)
print("At episode - ", episode, " avg test loss computed - ", test_loss )
torch.save(model.state_dict(), filename)
return model
def test_model(model, all_states, all_controls, all_next_states, glucose_normalizer, insulin_normalizer, meal_normalizer, T):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
all_test_states = np.concatenate((all_states['main']['test'], all_states['omega']['test']), axis=0)
all_test_controls = np.concatenate((all_controls['main']['test'], all_controls['omega']['test']), axis=0)
all_test_next_states = np.concatenate((all_next_states['main']['test'], all_next_states['omega']['test']), axis=0)
total_loss = 0.0
for index in range(len(all_test_states)):
glucose_input = all_test_states[index, :T] * glucose_normalizer
glucose_input = torch.from_numpy(glucose_input).float().to(device)
insulin_input = all_test_states[index, T:2*T] * insulin_normalizer
insulin_input = torch.from_numpy(insulin_input).float().to(device)
meal_input = all_test_states[index, 2*T:3*T] * meal_normalizer
meal_input = torch.from_numpy(meal_input).float().to(device)
target = all_test_next_states[index, :] * glucose_normalizer
target = torch.from_numpy(target).float().to(device)
prediction = model.forward(glucose_input, insulin_input, meal_input)
prediction = prediction.cpu().detach().numpy()[0]
loss = abs( prediction - target)**2
total_loss += loss
avg_error = total_loss / float(len(all_test_states))
return avg_error.item()
def extract_state_dict():
filename = os.path.join("./networks/", "diabetes_armax_model.nt")
model = torch.load(filename)
torch.save(model.state_dict(), "./networks/diabetes_armax_model_state_dict.nt")
| kaustubhsridhar/Constrained_Models | AP/armax_model_on_reformatted_data.py | armax_model_on_reformatted_data.py | py | 6,454 | python | en | code | 15 | github-code | 6 | [
{
"api_name": "torch.manual_seed",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "torch.nn.Parameter",
... |
16326125244 | from typing import Dict
import json as _json
import datetime as _dt
def get_all_events() -> Dict:
with open("events.json", encoding='utf-8') as events_file:
data = _json.load(events_file)
return data
def get_all_month_events(month: str) -> Dict:
events = get_all_events()
month = month.lower()
try:
month_events = events[month]
return month_events
except KeyError:
return "This is not a month name"
def get_all_day_events(month: str, day: int) -> Dict:
events = get_all_events()
month = month.lower()
try:
day_events = events[month][str(day)] # Here, I passed day as a string becasue in our json, days are written in string format
return day_events
except KeyError:
return "This is not a valid input"
def get_all_today_events():
today = _dt.date.today()
month = today.strftime("%B")
return get_all_day_events(month, today.day)
| mysterious-shailendr/Web-Scraping-and-Fast-API | services.py | services.py | py | 948 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "json.load",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "typing.Dict",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 20,
... |
3706334563 | from cgitb import reset
from flask import Flask, jsonify, request, render_template
import Crypto
import Crypto.Random
from Crypto.PublicKey import RSA
import binascii
from collections import OrderedDict
from Crypto.Signature import PKCS1_v1_5
from Crypto.Hash import SHA
import webbrowser
class Transaction:
def __init__(self, sender_public_key, sender_private_key, recipient_public_key, amount):
self.sender_public_key = sender_public_key
self.sender_private_key = sender_private_key
self.recipient_public_key = recipient_public_key
self.amount = amount
#Function to convert transaction details to dictionary format
def to_dict(self):
return OrderedDict({
'sender_public_key' : self.sender_public_key,
'recipient_public_key' : self.recipient_public_key,
'amount' : self.amount
})
#Function to sign the transaction
def sign_transaction(self):
private_key = RSA.importKey(binascii.unhexlify(self.sender_private_key))
signer = PKCS1_v1_5.new(private_key)
hash = SHA.new(str(self.to_dict()).encode('utf8'))
return binascii.hexlify(signer.sign (hash)).decode('ascii')
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/make/transactions')
def make_transactions():
return render_template('make_transactions.html')
@app.route('/view/transactions')
def view_transactions():
return render_template('view_transactions.html')
#Public and private key pair generation for wallet
@app.route('/wallet/new')
def new_wallet():
random_gen = Crypto.Random.new().read
private_key = RSA.generate(1024, random_gen)
public_key = private_key.publickey()
response={
'private_key' : binascii.hexlify(private_key.export_key(format('DER'))).decode('ascii'),
'public_key' :binascii.hexlify(public_key.export_key(format('DER'))).decode('ascii')
}
return response
#Code to generate the transactions
@app.route('/generate/transactions', methods=['POST'])
def generate_transactions():
#Extract transaction details from form
sender_public_key = request.form['sender_public_key']
sender_private_key = request.form['sender_private_key']
recipient_public_key = request.form['recipient_public_key']
amount = request.form['amount']
#Make a Transaction object
transaction = Transaction(sender_public_key, sender_private_key, recipient_public_key, amount)
#Convert Transaction object into dictionary and sign it
response ={'transaction' : transaction.to_dict(),
'signature' : transaction.sign_transaction()}
return jsonify(response), 200
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('-p', '--port', default = 8081, type = int, help ="Port to listen")
args = parser.parse_args()
port = args.port
#Open url in browser
webbrowser.open('http://127.0.0.1:'+str(port))
#Run flask app
app.run(host="0.0.0.0",port=port, debug=True) | LoneCannibal/Netherite2 | blockchain_client/client.py | client.py | py | 3,109 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "collections.OrderedDict",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "Crypto.PublicKey.RSA.importKey",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "Crypto.PublicKey.RSA",
"line_number": 32,
"usage_type": "name"
},
{
"api_n... |
74031521788 | from matplotlib import pyplot as plt
import numpy as np
import random
import utils
features = np.array([1,2,3,5,6,7])
labels = np.array([155, 197, 244, 356,407,448])
print(features)
print(labels)
utils.plot_points(features, labels)
# Feature cross / synthetic feature
def feature_cross(num_rooms, population):
room_per_person_feature = num_rooms / population
return room_per_person_feature
def simple_trick(base_price, price_per_room, num_rooms, price):
# select random learning rate
small_random_1 = random.random()*0.1
small_random_2 = random.random()*0.1
# calculate the prediction.
predicted_price = base_price + price_per_room*num_rooms
# check where the point is with respect to the line.
if price > predicted_price and num_rooms > 0:
# translate the line
price_per_room += small_random_1
# rotate the line
base_price += small_random_2
if price > predicted_price and num_rooms < 0:
price_per_room -= small_random_1
base_price += small_random_2
if price < predicted_price and num_rooms > 0:
price_per_room -= small_random_1
base_price -= small_random_2
if price < predicted_price and num_rooms < 0:
price_per_room -= small_random_1
base_price += small_random_2
return price_per_room, base_price
def absolute_trick(base_price, price_per_room, num_rooms, price, learning_rate):
predicted_price = base_price + price_per_room*num_rooms
if price > predicted_price:
price_per_room += learning_rate*num_rooms
base_price += learning_rate
else:
price_per_room -= learning_rate*num_rooms
base_price -= learning_rate
return price_per_room, base_price
def square_trick(base_price, price_per_room, num_rooms, price, learning_rate):
predicted_price = base_price + price_per_room*num_rooms
price_per_room += learning_rate*num_rooms*(price-predicted_price)
base_price += learning_rate*(price-predicted_price)
return price_per_room, base_price
# We set the random seed in order to always get the same results.
random.seed(0)
def linear_regression(features, labels, learning_rate=0.01, epochs = 1000):
price_per_room = random.random()
base_price = random.random()
for epoch in range(epochs):
# Uncomment any of the following lines to plot different epochs
if epoch == 1:
utils.draw_line(price_per_room, base_price, starting=0, ending=8)
elif epoch <= 10:
utils.draw_line(price_per_room, base_price, starting=0, ending=8)
elif epoch <= 50:
utils.draw_line(price_per_room, base_price, starting=0, ending=8)
elif epoch > 50:
utils.draw_line(price_per_room, base_price, starting=0, ending=8)
i = random.randint(0, len(features)-1)
num_rooms = features[i]
price = labels[i]
# Uncomment any of the 2 following lines to use a different trick
#price_per_room, base_price = absolute_trick(base_price,
price_per_room, base_price = square_trick(base_price,
price_per_room,
num_rooms,
price,
learning_rate=learning_rate)
utils.draw_line(price_per_room, base_price, 'black', starting=0, ending=8)
utils.plot_points(features, labels)
print('Price per room:', price_per_room)
print('Base price:', base_price)
return price_per_room, base_price
# This line is for the x-axis to appear in the figure
plt.ylim(0,500)
linear_regression(features, labels, learning_rate = 0.01, epochs = 1000)
# The root mean square error function
def rmse(labels, predictions):
n = len(labels)
differences = np.subtract(labels, predictions)
return np.sqrt(1.0/n * (np.dot(differences, differences))) | sithu/cmpe255-spring21 | lecture/regression/home-price.py | home-price.py | py | 3,939 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "numpy.array",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "utils.plot_points",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "random.random",
"line_num... |
5832109821 | """
Implementation based on:
https://www.kaggle.com/c/quora-question-pairs/discussion/33371
"""
import networkx as nx
import pandas as pd
def magic_feature_3_with_load():
train = pd.read_csv('../data/train.csv', encoding='utf-8')
test = pd.read_csv('../data/test.csv', encoding='utf-8')
return magic_feature_3(train, test)
def magic_feature_3(train_orig, test_orig):
cores_dict = pd.read_csv("../data/question_max_kcores.csv", index_col="qid").to_dict()["max_kcore"]
def gen_qid1_max_kcore(x):
return cores_dict[hash(x)]
def gen_qid2_max_kcore(x):
return cores_dict[hash(x)]
#def gen_max_kcore(row):
# return max(row["qid1_max_kcore"], row["qid2_max_kcore"])
train_orig.loc[:, "m3_qid1_max_kcore"] = train_orig.loc[:, 'question1'].apply(gen_qid1_max_kcore)
test_orig.loc[:, "m3_qid1_max_kcore"] = test_orig.loc[:, 'question1'].apply(gen_qid1_max_kcore)
train_orig.loc[:, "m3_qid2_max_kcore"] = train_orig.loc[:, 'question2'].apply(gen_qid2_max_kcore)
test_orig.loc[:, "m3_qid2_max_kcore"] = test_orig.loc[:, 'question2'].apply(gen_qid2_max_kcore)
#df_train["max_kcore"] = df_train.apply(gen_max_kcore, axis=1)
#df_test["max_kcore"] = df_test.apply(gen_max_kcore, axis=1)
return dict(train=train_orig.loc[:, ['m3_qid1_max_kcore', 'm3_qid2_max_kcore']],
test=test_orig.loc[:, ['m3_qid1_max_kcore', 'm3_qid2_max_kcore']])
def create_qid_dict(train_orig):
df_id1 = train_orig.loc[:, ["qid1", "question1"]].drop_duplicates(keep="first").copy().reset_index(drop=True)
df_id2 = train_orig.loc[:, ["qid2", "question2"]].drop_duplicates(keep="first").copy().reset_index(drop=True)
df_id1.columns = ["qid", "question"]
df_id2.columns = ["qid", "question"]
print(df_id1.shape, df_id2.shape)
df_id = pd.concat([df_id1, df_id2]).drop_duplicates(keep="first").reset_index(drop=True)
print(df_id1.shape, df_id2.shape, df_id.shape)
dict_questions = df_id.set_index('question').to_dict()
return dict_questions["qid"]
def get_id(question, dict_questions):
if question in dict_questions:
return dict_questions[question]
else:
new_id = len(dict_questions[question]) + 1
dict_questions[question] = new_id
return new_id, new_id
def run_kcore():
df_train = pd.read_csv("../data/train.csv", encoding='utf-8')
df_test = pd.read_csv("../data/test.csv", encoding='utf-8')
df_train.loc[:, 'qid1'] = df_train.loc[:, 'question1'].apply(hash)
df_train.loc[:, 'qid2'] = df_train.loc[:, 'question2'].apply(hash)
df_test.loc[:, 'qid1'] = df_test.loc[:, 'question1'].apply(hash)
df_test.loc[:, 'qid2'] = df_test.loc[:, 'question2'].apply(hash)
df_all = pd.concat([df_train.loc[:, ["qid1", "qid2"]],
df_test.loc[:, ["qid1", "qid2"]]], axis=0).reset_index(drop='index')
print("df_all.shape:", df_all.shape) # df_all.shape: (2750086, 2)
df = df_all
g = nx.Graph()
g.add_nodes_from(df.qid1)
edges = list(df.loc[:, ['qid1', 'qid2']].to_records(index=False))
g.add_edges_from(edges)
g.remove_edges_from(g.selfloop_edges())
print(len(set(df.qid1)), g.number_of_nodes()) # 4789604
print(len(df), g.number_of_edges()) # 2743365 (after self-edges)
df_output = pd.DataFrame(data=g.nodes(), columns=["qid"])
print("df_output.shape:", df_output.shape)
NB_CORES = 20
for k in range(2, NB_CORES + 1):
fieldname = "kcore{}".format(k)
print("fieldname = ", fieldname)
ck = nx.k_core(g, k=k).nodes()
print("len(ck) = ", len(ck))
df_output[fieldname] = 0
df_output.ix[df_output.qid.isin(ck), fieldname] = k
df_output.to_csv("../data/question_kcores.csv", index=None)
def run_kcore_max():
df_cores = pd.read_csv("../data/question_kcores.csv", index_col="qid")
df_cores.index.names = ["qid"]
df_cores.loc[:, 'max_kcore'] = df_cores.apply(lambda row: max(row), axis=1)
df_cores.loc[:, ['max_kcore']].to_csv("../data/question_max_kcores.csv") # with index
if __name__ == '__main__':
run_kcore()
run_kcore_max()
| ahara/kaggle_quora_question_pairs | magic_feature_3.py | magic_feature_3.py | py | 4,136 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pandas.read_csv",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"... |
14696724987 | import math
import sys
from typing import Iterable, Optional
import torch
from timm.data import Mixup
from timm.utils import accuracy
import util.misc as misc
import util.lr_sched as lr_sched
import numpy as np
from scipy.stats import spearmanr, pearsonr
def train_koniq_epoch(model: torch.nn.Module, criterion: torch.nn.Module,
data_loader: Iterable, optimizer: torch.optim.Optimizer,
device: torch.device, epoch: int, loss_scaler=None, log_writer=None,
args=None):
model.train(True)
metric_logger = misc.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', misc.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Epoch: [{}]'.format(epoch)
print_freq = 20
accum_iter = args.accum_iter
optimizer.zero_grad()
if log_writer is not None:
print('log_dir: {}'.format(log_writer.log_dir))
pred_epoch = []
labels_epoch = []
for data_iter_step, (samples, targets) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
# we use a per iteration (instead of per epoch) lr scheduler
if data_iter_step % accum_iter == 0:
lr_sched.adjust_learning_rate(optimizer, data_iter_step / len(data_loader) + epoch, args)
samples = samples.to(device, non_blocking=True)
targets = torch.squeeze(targets.type(torch.FloatTensor))
targets = targets.to(device, non_blocking=True)
with torch.cuda.amp.autocast():
outputs = model(samples)
loss = criterion(torch.squeeze(outputs), targets)
loss_value = loss.item()
if not math.isfinite(loss_value):
print("Loss is {}, stopping training".format(loss_value))
sys.exit(1)
loss /= accum_iter
loss_scaler(loss, optimizer, clip_grad=None,
parameters=model.parameters(), create_graph=False,
update_grad=(data_iter_step + 1) % accum_iter == 0)
if (data_iter_step + 1) % accum_iter == 0:
optimizer.zero_grad()
torch.cuda.synchronize()
metric_logger.update(loss=loss_value)
min_lr = 10.
max_lr = 0.
for group in optimizer.param_groups:
min_lr = min(min_lr, group["lr"])
max_lr = max(max_lr, group["lr"])
metric_logger.update(lr=max_lr)
pred =concat_all_gather(outputs)
labels =concat_all_gather(targets)
pred_batch_numpy = pred.data.cpu().numpy()
labels_batch_numpy = labels.data.cpu().numpy()
pred_epoch = np.append(pred_epoch, pred_batch_numpy)
labels_epoch = np.append(labels_epoch, labels_batch_numpy)
loss_value_reduce = misc.all_reduce_mean(loss_value)
if log_writer is not None and (data_iter_step + 1) % accum_iter == 0:
""" We use epoch_1000x as the x-axis in tensorboard.
This calibrates different curves when batch size changes.
"""
epoch_1000x = int((data_iter_step / len(data_loader) + epoch) * 1000)
log_writer.add_scalar('loss', loss_value_reduce, epoch_1000x)
log_writer.add_scalar('lr', max_lr, epoch_1000x)
rho_s, _ = spearmanr(np.squeeze(pred_epoch), np.squeeze(labels_epoch))
rho_p, _ = pearsonr(np.squeeze(pred_epoch), np.squeeze(labels_epoch))
print('*[train] epoch:%d / SROCC:%4f / PLCC:%4f' % (epoch+1, rho_s, rho_p))
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
@torch.no_grad()
def evaluate_koniq(data_loader, model, device):
criterion = torch.nn.L1Loss()
metric_logger = misc.MetricLogger(delimiter=" ")
header = 'Test:'
# switch to evaluation mode
model.eval()
pred_epoch = []
labels_epoch = []
for batch in metric_logger.log_every(data_loader, 10, header):
images = batch[0]
target = batch[-1]
images = images.to(device, non_blocking=True)
targets = torch.squeeze(target.type(torch.FloatTensor))
targets = targets.to(device, non_blocking=True)
# compute output
with torch.cuda.amp.autocast():
output = model(images)
loss = criterion(torch.squeeze(output), targets)
pred =concat_all_gather(output)
labels =concat_all_gather(targets)
pred_batch_numpy = pred.data.cpu().numpy()
labels_batch_numpy = labels.data.cpu().numpy()
pred_epoch = np.append(pred_epoch, pred_batch_numpy)
labels_epoch = np.append(labels_epoch, labels_batch_numpy)
batch_size = images.shape[0]
metric_logger.update(loss=loss.item())
# gather the stats from all processes
metric_logger.synchronize_between_processes()
rho_s, _ = spearmanr(np.squeeze(pred_epoch), np.squeeze(labels_epoch))
rho_p, _ = pearsonr(np.squeeze(pred_epoch), np.squeeze(labels_epoch))
print('*[test] / SROCC:%4f / PLCC:%4f' % ( rho_s, rho_p))
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
# utils
@torch.no_grad()
def concat_all_gather(tensor):
"""
Performs all_gather operation on the provided tensors.
*** Warning ***: torch.distributed.all_gather has no gradient.
"""
tensors_gather = [torch.ones_like(tensor)
for _ in range(torch.distributed.get_world_size())]
torch.distributed.all_gather(tensors_gather, tensor, async_op=False)
output = torch.cat(tensors_gather, dim=0)
return output
| wang3702/retina_mae | koniq/train_koniq_epoch.py | train_koniq_epoch.py | py | 5,602 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "torch.nn",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "typing.Iterable",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "torch.optim",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "torch.device",
"li... |
43943174399 | # M0_C9 - Sudoku Validator
import sys
import os
from typing import List
def validate(grid: List[List[int]]) -> bool:
"""Validates a given 2D list representing a completed Sudoku puzzle"""
# Write your code here
pass
##########################################
### DO NOT MODIFY CODE BELOW THIS LINE ###
##########################################
def load_puzzle(filename: str) -> List[List[int]]:
"""Reads a file containing a Sudoku puzzle into a 2D list"""
invalid_msg = 'error: invalid Sudoku puzzle supplied'
grid = []
try:
with open(filename) as fp:
for line in fp:
row = [int(num) for num in line.split()]
if len(row) != 9:
sys.exit(invalid_msg)
grid.append(row)
except IOError as e:
sys.exit(e)
if len(grid) != 9:
sys.exit(invalid_msg)
return grid
if __name__ == '__main__':
if len(sys.argv) != 2:
print('Usage: python3 sudoku_validator.py path/to/testfile.txt')
sys.exit()
grid = load_puzzle(sys.argv[1])
print(validate(grid))
| Static-Void-Academy/M0_C9 | sudoku_validator.py | sudoku_validator.py | py | 1,112 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "typing.List",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "sys.exit",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 30,
"u... |
25225726686 | from read_the_maxfilename_for_sort import max_number
import requests
i = 1
temp_number = max_number()
def download(url):
global i
global temp_number
print('Processing {0} url:{1}'.format(i,url))
img = open('{}.jpg'.format(temp_number),'wb')
respone = requests.get(url, stream=True).content
img.write(respone)
i += 1
temp_number += 1
img.close() | HawkingLaugh/FC-Photo-Download | image_batch_download.py | image_batch_download.py | py | 382 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "read_the_maxfilename_for_sort.max_number",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 12,
"usage_type": "call"
}
] |
43364784094 | from pathlib import Path
from argparse import ArgumentParser
# The path the project resides in
BASE_PATH = Path(__file__).parent.parent
# Alarm net dimensions
ALARM_HUGE = (5000, 2000, 500, 200)
ALARM_BIG = (1000, 500, 200, 75)
ALARM_SMALL = (100, 50, 25, 10)
# Standard arguments
def add_standard_arguments(parser: ArgumentParser):
"""
Add basic arguments useful to all experiments
:param parser: argument parser object
:return: argument parser with standard arguments added
"""
parser.add_argument(
"random_seed", type=int, help="Seed to fix randomness"
)
parser.add_argument(
"data_split", type=str, help="Data split on which to evaluate the performance (i.e. val or test)"
)
parser.add_argument(
"--p_contamination", default=0.0, type=float,
help="Fraction of contamination, i.e. anomalies in the training data"
)
parser.add_argument(
"--n_train_anomalies", default=0, type=int,
help="Number of known anomalies in the training set"
)
parser.add_argument(
"--plot_freq", default=0, type=int,
help="Plotting frequency to visualise the latent and image space"
)
parser.add_argument(
"--n_epochs", default=500, type=int,
help="Number of epochs"
)
parser.add_argument(
"--learning_rate", default=.0001, type=float,
help="Learning rate for adam"
)
parser.add_argument(
"--sample_stddev", default=None, type=float,
help="Standard deviation of the generated anomalies (in the code layer)"
)
parser.add_argument(
"--model_path", default=BASE_PATH / "models", type=Path, help="Base output path for the models"
)
parser.add_argument(
"--result_path", default=None, type=Path,
help="Base output path for the results, if None use the model path"
# default = BASE_PATH / "results"
)
parser.add_argument(
"--is_override", default=False, type=bool,
help="Override existing models"
)
return parser
| Fraunhofer-AISEC/DA3D | libs/constants.py | constants.py | py | 2,063 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "pathlib.Path",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"... |
30536133746 | import importlib.util
import shutil
from pathlib import Path
from typing import List
import pandas as pd
import plotly.express as px
from bot_game import Bot
EXAMPLES_FOLDER = Path("examples")
DOWNLOAD_FOLDER = Path("downloads")
DOWNLOAD_FOLDER.mkdir(exist_ok=True)
def save_code_to_file(code: str, filename: str):
fpath = DOWNLOAD_FOLDER / f"{filename}.py"
with open(fpath, "w") as fh:
fh.write(code)
return fpath
def save_file(filename: str, filebytes: bytes):
fpath = DOWNLOAD_FOLDER / filename
with open(fpath, "wb") as fh:
fh.write(filebytes)
return fpath
def import_file(fpath: Path):
spec = importlib.util.spec_from_file_location(fpath.parts[-1], str(fpath))
m = importlib.util.module_from_spec(spec)
spec.loader.exec_module(m)
return m
def validate_file(fpath: Path):
m = import_file(fpath)
assert hasattr(m, "strategy"), "The file does not have a function called `strategy`"
assert callable(
m.strategy
), "The variable `strategy` is not callable! Is it a function?"
def build_all_bots() -> List[Bot]:
bots = []
for fp in DOWNLOAD_FOLDER.glob("*.py"):
bot = Bot(name=fp.stem)
m = import_file(fp)
bot.strategy_func = m.strategy
bots.append(bot)
return bots
def add_example_bots():
for fp in EXAMPLES_FOLDER.glob("*.py"):
shutil.copy(fp, DOWNLOAD_FOLDER / fp.parts[-1])
def plot_grand_prix_results(winnings, x_col="Bot", y_col="Races Won"):
podium_df = (
pd.Series(winnings, name=y_col)
.sort_values(ascending=False)
.rename_axis(x_col)
.reset_index()
)
fig = px.bar(podium_df, x=x_col, y=y_col, color=y_col)
return fig
def create_animation(df):
fig = px.scatter(
df.assign(
size=10,
),
x="position",
y="name",
animation_frame="round",
animation_group="name",
size="size",
color="name",
hover_data=["direction", "last_action", "action_order"],
hover_name="name",
range_x=[0, 10],
)
return fig
def delete_all_bots():
for fp in DOWNLOAD_FOLDER.glob("*.py"):
fp.unlink()
| gabrielecalvo/bot_game | util.py | util.py | py | 2,208 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pathlib.Path",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "importlib.util.util.spec_from_... |
73017760828 | from rb_api.dto.two_mode_graph.article_keyword_dto import ArticleKeywordDTO
from rb_api.json_serialize import JsonSerialize
import json
class TopicEvolutionDTO(JsonSerialize):
def __init__(self):
self.wordList = []
self.yearList = []
def add_year(self, year: int) -> None:
self.yearList.append(year)
def add_keyword(self, value: str, score: float) -> None:
word_values = [word for word in self.wordList if word.value == value]
if word_values:
word_values[0].score_list.append(score) # there is only 1 word for value
return
keyword = ArticleKeywordDTO(value, "Keyword")
keyword.scoreList.append(score)
self.wordList.append(keyword)
def normalize(self):
for year_index, _ in enumerate(self.yearList):
maxim = max([word.score_list[year_index] for word in self.wordList])
if maxim > 0:
for word in self.wordList:
word.score_list[year_index] = word.score_list[year_index] / maxim
def serialize(self):
return json.dumps(self.__dict__)
| rwth-acis/readerbenchpyapi | rb_api/dto/two_mode_graph/topic_evolution_dto.py | topic_evolution_dto.py | py | 1,146 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "rb_api.json_serialize.JsonSerialize",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "rb_api.dto.two_mode_graph.article_keyword_dto.ArticleKeywordDTO",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 38,
"usage... |
13575734742 | import os
import numpy as np
import matplotlib
matplotlib.use('agg')
import xrt.runner as xrtrun
import xrt.plotter as xrtplot
import xrt.backends.raycing as raycing
from SKIF_NSTU_SCW import SKIFNSTU
from utilits.xrt_tools import crystal_focus
resol='mat'
E0 = 30000
subdir=rf"C:\Users\synchrotron\PycharmProjects\SKIF\SKIF_NSTU_SCW\results\{resol}\{E0}\R-R"
def define_plots(bl):
plots = []
scan_name = 'change-screen-%s' % (bl.bentLaueCylinder02.Rx)
if not os.path.exists(os.path.join(subdir, scan_name)):
os.mkdir(os.path.join(subdir, scan_name))
plots.append(xrtplot.XYCPlot(beam='screen03beamLocal01', title='Sample-XZ',
xaxis=xrtplot.XYCAxis(label='x', unit='mm', data=raycing.get_x),
yaxis=xrtplot.XYCAxis(label='z', unit='mm', data=raycing.get_z),
aspect='auto', saveName='Sample-XZ.png'
))
for plot in plots:
plot.saveName = os.path.join(subdir, scan_name,
plot.title + '-%sm' % bl.bentLaueCylinder02.Rx + '.png'
)
plot.persistentName = plot.saveName.replace('.png', f'.{resol}')
return plots
def define_plots_diver(bl):
plots = []
scan_name = 'diver-screen-'
if not os.path.exists(os.path.join(subdir, scan_name)):
os.mkdir(os.path.join(subdir, scan_name))
plots.append(xrtplot.XYCPlot(beam='screen02beamLocal01', title=f'{scan_name}',
xaxis=xrtplot.XYCAxis(label='x', unit='mm', data=raycing.get_x),
yaxis=xrtplot.XYCAxis(label=r'$x^{\prime}$', unit='', data=raycing.get_xprime),
aspect='auto', saveName=f'{scan_name}_Sample-XX.png'
))
for plot in plots:
plot.saveName = os.path.join(subdir,scan_name,
plot.title + '-%sm' % bl.bentLaueCylinder01.Rx + '.png'
)
plot.persistentName = plot.saveName.replace('.png', f'.pickle')
return plots
def change_screen(plts, bl):
scan_name = 'change-screen-%s' % (bl.bentLaueCylinder02.Rx)
d0=bl.screen03.center[1]
for dist in np.linspace(-500., 500., 50):
bl.screen03.center[1]=d0+dist
for plot in plts:
plot.xaxis.limits=None
plot.yaxis.limits = None
plot.caxis.limits = None
plot.saveName = os.path.join(subdir, scan_name,
plot.title + '_%s' % bl.screen03.center[1] + '.png'
)
plot.persistentName = plot.saveName.replace('png', f'{resol}')
yield
def main():
beamLine = SKIFNSTU()
diver=False
dist0=beamLine.bentLaueCylinder02 .center[1]
beamLine.align_energy(E0, 1000)
beamLine.alignE = E0
# for R in np.linspace(-2000., -500., 5):
# beamLine.bentLaueCylinder01.Rx = R
# beamLine.bentLaueCylinder02.Rx = R
# beamLine.bentLaueCylinder01.Ry = R/1.e6
# beamLine.bentLaueCylinder02.Ry = R/1.e6
# plots = define_plots(beamLine)
# scan = change_screen
# if (diver==False):
# beamLine.screen03.center[1] = dist0+crystal_focus(subdir +
# '\diver-screen-\diver-screen-' + '-%sm' % beamLine.bentLaueCylinder01.Rx + '.pickle')
# if diver:
# scan = None
# plots = define_plots_diver(beamLine)
#
#
# xrtrun.run_ray_tracing(
# plots=plots,
# backend=r"raycing",
# repeats=5,
# beamLine=beamLine,
# generator=scan,
# generatorArgs=[plots, beamLine]
# )
# beamLine.screen03.center[1]=dist0+10000
beamLine.glow()
if __name__ == '__main__':
main() | Kutkin-Oleg/SKIF | SKIF_NSTU_SCW/scans.py | scans.py | py | 3,966 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "matplotlib.use",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_nu... |
2416697564 | import pygame
import sys
from random import randint
display = True
class snakeGame:
snake = [(16, 16),(16,15)]
apple = (18, 18)
is_dead = False
is_left = False
is_right = False
def move_left(self):
self.is_left = True
self.move_forward()
self.is_left = False
self.is_right = False
def move_right(self):
self.is_right = True
self.move_forward()
self.is_left = False
self.is_right = False
def move_forward(self):
vect = (self.snake[0][0] - self.snake[1][0], self.snake[0][1] - self.snake[1][1])
if self.is_right:
vect = (-vect[1], vect[0])
elif self.is_left:
vect = (vect[1], -vect[0])
dest_pos = (self.snake[0][0] + vect[0],self.snake[0][1]+ vect[1])
if not(0 <= dest_pos[0] < 32 and 0 <= dest_pos[1] < 32):
self.is_dead =True
elif dest_pos in self.snake:
self.is_dead = True
elif dest_pos == self.apple:
prev_tail = self.snake[-1]
for i in range(len(self.snake) - 1,0,-1):
self.snake[i] = self.snake[i-1]
self.snake[0] = dest_pos
self.snake.append(prev_tail)
#APPLE CREATION
self.apple = (randint(0,32), randint(0,32))
flag = True
while flag:
flag = False
for pos in self.snake:
if pos[0] == self.apple[0] and pos[1] == self.apple[1]:
self.apple = (randint(0,32), randint(0,32))
flag = True
else:
for i in range(len(self.snake) - 1,0,-1):
self.snake[i] = self.snake[i-1]
self.snake[0] = dest_pos
def game(self):
if display:
pygame.init()
DisplaySurface = pygame.display.set_mode((640, 640))
# Game Name Pong
pygame.display.set_caption("Pong")
self.is_dead = False
while not self.is_dead:
# User Events;
if display:
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
keys = pygame.key.get_pressed()
if keys[pygame.K_RIGHT]:
self.move_right()
elif keys[pygame.K_LEFT]:
self.move_left()
self.move_forward()
if display:
DisplaySurface.fill((0,0,0))
self.update(DisplaySurface)
(pygame.time.Clock()).tick(16)
pygame.quit()
sys.exit()
def update(self, surface):
for elem in self.snake:
pygame.draw.rect(surface, (255,255,255), (elem[0]*20,elem[1]*20,20,20))
pygame.draw.rect(surface, (255, 0, 0), (self.apple[0] * 20,self.apple[1] * 20, 20, 20))
snakeGame().game()
| dogancanalgul/Pong | scratch.py | scratch.py | py | 3,004 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "random.randint",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "pygame.init",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
... |
34511399562 | from django import forms
from .models import Producto
from django.forms import ModelForm
class ProductosForm(forms.ModelForm):
class Meta:
model = Producto
fields = ('nombre','material','cantidad','categoria')
labels = {
'nombre':'Nombre',
'cantidad':'n. cantidad'
}
def __init__(self, *args, **kwargs):
super(ProductosForm,self).__init__(*args, **kwargs)
self.fields['categoria'].empty_label = "Select"
self.fields['cantidad'].required = False | SteveManfred/eco_facil_mio | test/electivo_2023/productos/forms.py | forms.py | py | 543 | python | es | code | 0 | github-code | 6 | [
{
"api_name": "django.forms.ModelForm",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "models.Producto",
"line_number": 11,
"usage_type": "name"
}
] |
23553579650 | import numpy as np
import pandas as pd
from scipy.io import loadmat
def glf(
mz: np.ndarray,
intens: np.ndarray,
delta_cts: float = 0.1,
delta_mz: float = 2000.0,
k: float = 7.0
) -> np.ndarray:
y1 = np.max(intens) * 0.02
y2 = (np.max(intens) - y1) * delta_cts
res = y1 + y2 / (1 + np.e ** (y2 * k / (mz - delta_mz)))
return res
def parse_database(database: np.ndarray, strain_precision = "full") -> pd.DataFrame:
prec = {"genus": 1, "species": 2, "full": 3}
num_entities = len(database)
mz, peaks, strain = [], [], []
for i in range(num_entities):
curr_entity = database[i][0]
for obs_id in range(len(curr_entity["tax"])):
observation = curr_entity[obs_id]
mz.append(observation["pik"][0])
peaks.append(observation["pik"][1])
st = observation["tax"][0]
st = st.split(" ")
st = " ".join(st[:prec[strain_precision]])
strain.append(st)
df = pd.DataFrame(columns=["Intens.", "m/z", "strain"], dtype="object")
df["Intens."] = peaks
df["m/z"] = mz
df["strain"] = strain
return df
def read_pkf(pkf_path: str, strain_precision: str = "full"):
data = loadmat(pkf_path)
database = data["C"]["dbs"][0]
df = parse_database(database, strain_precision)
return df
| Wimplex/AIJourney_AI4Bio_4th | source/utils/pkf.py | pkf.py | py | 1,351 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.ndarray",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "numpy.ndarray",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "numpy.max",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_n... |
29585592655 | from fastapi import FastAPI, Depends
from sqlalchemy import create_engine
from sqlalchemy.dialects.sqlite import *
from sqlalchemy.orm import sessionmaker, Session
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String
from typing import List
from pydantic import BaseModel, constr
app = FastAPI()
SQLALCHEMY_DATABASE_URL = "sqlite:///./test.db"
engine = create_engine(SQLALCHEMY_DATABASE_URL, connect_args = {"check_same_thread": False})
session = sessionmaker(autocommit=False, autoflush=False, bind=engine)
Base = declarative_base()
def get_db():
db = session()
try:
yield db
finally:
db.close()
class Books(Base):
__tablename__ = 'book'
id = Column(Integer, primary_key=True, nullable=False)
title = Column(String(50))
author = Column(String(50))
publisher = Column(String(50))
Base.metadata.create_all(bind=engine)
class Book(BaseModel):
id: int
title: str
author:str
publisher: str
class Config:
orm_mode = True
@app.post('/add_new', response_model=Book)
def add_book(b1: Book, db: Session = Depends(get_db)):
bk=Books(id=b1.id, title=b1.title, author=b1.author,
publisher=b1.publisher)
db.add(bk)
db.commit()
db.refresh(bk)
return Books(**b1.model_dump())
@app.get('/list', response_model=List[Book])
def get_books(db: Session = Depends(get_db)):
recs = db.query(Books).all()
return recs
@app.get("/book/{id}")
async def get_book(id: int, db: Session = Depends(get_db)):
return db.query(Books).filter(Books.id == id).first()
@app.put('/update/{id}', response_model=Book)
def update_book(id:int, book:Book, db: Session = Depends(get_db)):
b1 = db.query(Books).filter(Books.id == id).first()
b1.id=book.id
b1.title=book.title
b1.author=book.author
b1.publisher=book.publisher
db.commit()
return db.query(Books).filter(Books.id == id).first()
@app.delete("/delete{id}")
async def delete_book(id: int, db: Session = Depends(get_db)):
try:
db.query(Books).filter(Books.id == id).delete()
db.commit()
except Exception as e:
raise Exception(e)
return {"delete status": "success"}
| vdtheone/crud_fastapi | main.py | main.py | py | 2,199 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "fastapi.FastAPI",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.create_engine",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.orm.sessionmaker",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "... |
27679932090 | """This module allows to interact with the user via the command line and processes
the input information.
"""
import os
import re
from inspect import getsourcefile
import numpy as np
import yaml
class CommandLineParser():
"""See documentation of the init method.
"""
def __init__(self) -> None:
"""This class allows to parse user input from the command line and validate
its syntax. Furthermore it allows to create the axes for the goniometer
from a specific user input format.
"""
self.parsed = {}
self._resources_path = os.path.abspath(getsourcefile(lambda: 0)).replace(
'command_line_interfaces/parser.py', 'resources/')
with open(f"{self._resources_path}user_input.yaml", 'r') as stream:
try:
self.input_options = yaml.safe_load(stream)
except yaml.YAMLError as error:
print(error)
# TODO sometimes e.g. for chi a trailing space is enough to fail the regex
self.validation_regex = {
'doi': None,
'layout': self._create_options_regex('layout'),
'facility': self._create_options_regex('facility'),
'beamline': None,
'rad_type': None,
"model" : None,
"location" : None,
"manufacturer" : self._create_options_regex('manufacturer'),
'principal_angle': r'\d{1,3}\Z',
'goniometer_axes' : r'((?:[^,]*,\s*(a|c),\s*)*[^,]*,\s*(a|c))\Z',
'change_goniometer_axes' : r'((?:[^,]*,\s*(a|c),\s*)*[^,]*,\s*(a|c))\Z',
'change_det_rot_axes' : r'((?:[^,]*,\s*(a|c),\s*)*[^,]*,\s*(a|c))\Z',
"two_theta_sense" : self._create_options_regex('two_theta_sense'),
'detector_axes' : r'((?:[^, ]*,\s*)*[^, ]+)\Z',
"chi_axis" : r'.*(?:\s+\d{1,3})\Z',
'kappa_axis': r'.*(((\s|,)(\s)*\d{1,3}){1,2})\Z', # capture something like kappa, 50, 50
'image_orientation': self._create_options_regex('image_orientation'),
'fast_direction': self._create_options_regex('fast_direction'),
'pixel_size': r'(\d+\.?\d*\Z)|(\d+\.?\d*,\s*\d+\.?\d*\Z)',
'array_dimension': r'(\d+(,\s*)\d+\Z)',
'filename': r'.*((\.h5)\Z|(\.cbf)\Z|(\.smv)\Z)',
'goniometer_rot_direction' : \
self._create_options_regex('goniometer_rot_direction'),
'frame_numbers': r'^\d+(,\s*\d+)*$',
'external_url': None,
'temperature': r'\d+\Z',
'keep_axes': self._create_options_regex('keep_axes'),
'url_not_reachable': self._create_options_regex('url_not_reachable'),
}
def request_input(self, label):
"""Request input from the user for the given label and the associated
information in the resources file.
Args:
label (str): the label that identifies the request
Returns:
str: the input from the user, validated
"""
while self.parsed.get(label) is None:
required = 'required' if self.input_options[label].get('required') \
else 'not required'
print(f"\n{self.input_options[label]['label']} ({required}):")
choices = ''
if self.input_options[label].get('options') is not None:
options = self.input_options[label]['options']
if self.input_options[label].get('abbreviate'):
options = [option + f' ({option[0]})' for option in options]
choices = '\n Choices: ' + ', '.join(options)
print(f"{self.input_options[label]['description']}".strip('\n') + choices)
self.parsed[label] = self._validated_user_input(label)
return self.parsed[label]
def parse_axis_string(self, axis_string):
"""Parse a string of form axis, sense, axis, sense...
Args:
axis_string (str): the axis string from the user input
Raises:
Exception: axis string is incorrect
Returns:
axes (list): a list containing the axis names
senses (list): a list containing the axis senses
"""
ax_str = [sub.strip() for sub in axis_string.split(',')]
if len(ax_str) % 2 != 0:
raise Exception("Axis string is incorrect: %axis_string")
axes = []
senses = []
for i in range(0, len(ax_str), 2):
axes.append(ax_str[i])
sense = ax_str[i+1].lower()
senses.append(sense)
return axes, senses
def make_goniometer_axes(self, goniometer_axes, kappa_info, chi_info):
"""Create the goniometer axes from the user input. The list of gonio axes
goes in order from top to bottom, meaning that the first "depends on"
the second and so forth. We assume a two theta axis.
The items we have to fill in are:
1. type -> rotation
2. depends_on -> next in list
3. equipment -> goniometer
4. vector -> almost always 1 0 0 (rotation about principal axis)
5. offset -> always [0 0 0] but needed for loop integrity
Note that our questions assume looking from above whereas imgCIF is
looking from below, so the sense of rotation is reversed.
Args:
goniometer_axes (tuple): a tuple of lists consisting of the axis names
and the senses of rotation
kappa_info (str): the parsed user input string for the kappa axis
chi_info (str): the parsed user input string for the chi axis
Returns:
dict: a dictionary containing the information about the goniometer axes
"""
axes, senses = goniometer_axes
axis_type = ["rotation" for _ in axes]
equip = ["goniometer" for _ in axes]
if kappa_info != '':
kappa_info = re.split(r',| ', kappa_info)
kappa_info = [info for info in kappa_info if info != '']
else:
kappa_info = ['']
if chi_info != '':
chi_info = re.split(r',| ', chi_info)
chi_info = [info for info in chi_info if info != '']
else:
chi_info = ['']
# Construct axis dependency chain
depends_on = []
depends_on += axes[1:]
depends_on.append('.')
# Remember the principal axis direction
principal = senses[-1]
# Create direction vectors
vector = []
offset = []
for (axis, sense) in zip(axes, senses):
rotfac = 1 if sense == principal else -1
if axis.lower() == kappa_info[0].lower():
kappa_vec = self._make_kappa_vector(kappa_info)
kappa_vec[0] *= rotfac
vector.append(kappa_vec)
elif axis.lower() == chi_info[0].lower():
vector.append(self._make_chi_vector(goniometer_axes, chi_info))
else:
vector.append([i * rotfac for i in [1, 0, 0]])
# TODO offset is always 0?
offset.append([0, 0, 0])
axes_dict = {
'axes' : goniometer_axes[0],
'axis_type' : axis_type,
'equip' : equip,
'depends_on' : depends_on,
'vector' : vector,
'offset' : offset,
}
return axes_dict
def make_detector_axes(self, det_trans_axes, det_rot_axes, principal_sense,
principal_angle, image_orientation, array_info,
scan_settings_info):
"""Add information concerning the detector axes. We define our own axis names,
with the detector distance being inserted when the data file is read. We
choose det_x to be in the horizontal direction, and det_y to be vertical.
We need to add:
1. type -> translation
2. depends_on -> x,y depend on translation
3. equipment -> detector
4. vector -> worked out from user-provided info
(5. offset -> beam centre, not added here)
Args:
principal_sense (str): the sense of the principal axis (a or c)
principal_angle (int): the orientation of the principal axis in
degree
image_orientation (str): the image orientation string, e.g. 'top left',...
two_theta_sense (str): the sense of the two theta axis (e.g. clockwise)
array_info (dict): information about the array
scan_setting_info (dict): information about the scan settings
Returns:
dict: a dictionary containing the information about the detector axes
"""
axis_id = ['dety', 'detx']
axis_type = ['translation', 'translation']
equip = ['detector', 'detector']
# Work out det_x and det_y
beam_x, beam_y = self._calculate_beam_centre(
array_info['array_dimension'], array_info['pixel_size'])
x_d, y_d, x_c, y_c = self._determine_detx_dety(
principal_angle, principal_sense, image_orientation, beam_x, beam_y)
vector = [y_d, x_d]
offset = [[0, 0, 0], [x_c, y_c, 0]]
# translational axes
axis_id += det_trans_axes
axis_type += ['translation' for _ in det_trans_axes]
equip += ['detector' for _ in det_trans_axes]
# TODO also for multiple axes correct?
# Detector translation always opposite to beam direction
vector += [[0, 0, -1] for _ in det_trans_axes]
# first_scan = sorted(scan_settings_info.keys())[0]
# first_scan_info = scan_settings_info[first_scan][0]
# z_offsets = [first_scan_info.get(axis) for axis in det_trans_axes]
# for z in z_offsets:
#TODO this sets unknown offsets to zero...
# z = z if z is not None else 0
# offset is zero and non zero in scans
offset.append([0, 0, 0])
# rotational axes
rot_axes, rot_senses = det_rot_axes
axis_id += rot_axes
axis_type += ['rotation' for _ in rot_axes]
equip += ['detector' for _ in rot_axes]
for idx, axis in enumerate(rot_axes):
rotsense = 1 if rot_senses[idx] == principal_sense else -1
vector.append([rotsense, 0, 0])
offset.append([0, 0, 0])
axis_id += ['gravity', 'source']
axis_type += ['.', '.']
equip += ['gravity', 'source']
gravity = self._determine_gravity(principal_angle, principal_sense)
vector += [gravity, [0, 0, 1]]
offset += [[0, 0, 0], [0, 0, 0]]
# the above ordering must reflect the stacking!
depends_on = axis_id[1:-(len(rot_axes)+1)] + ['.' for _ in range(len(rot_axes)+2)]
axes_dict = {
'axes' : axis_id,
'axis_type' : axis_type,
'equip' : equip,
'depends_on' : depends_on,
'vector' : vector,
'offset' : offset,
}
return axes_dict
def _validated_user_input(self, label):
"""Request an user input and validate the input according to an apppropriate
regular expression.
Args:
label (str): the label that identifies the request
"""
user_input = input(' >> ')
if self.validation_regex.get(label) is not None:
pattern = re.compile(
self.validation_regex[label])
parsed_input = pattern.match(user_input)
if parsed_input:
parsed_input = parsed_input.group(0)
else:
parsed_input = user_input
# required parameter, but either regex failed or no input was made if no regex
# is defined
if self.input_options[label].get('required') and parsed_input in [None, '']:
print(' ==> Could not interpret your input correctly! This input is required, \
please try again.')
parsed_input = None
# not required with defined regex, but no user input
elif not self.input_options[label].get('required') and user_input == '':
parsed_input = ''
# not required with defined regex, but user input
elif not self.input_options[label].get('required') and parsed_input is None:
print(' ==> Could not interpret your input correctly! Please try again.')
if parsed_input is not None:
print(f" ==> Your input was: {parsed_input}")
return parsed_input
def _make_kappa_vector(self, kappa_info):
"""Costruct the kappa vector out of the parsed information on kappa.
Args:
kappa_info (list): a list with name and rotation angle
Returns:
list: the components of the kappa vector
"""
if len(kappa_info) == 2:
kappa_info.append("0")
kapang = float(kappa_info[1])
kappoise = float(kappa_info[2])
# Now calculate direction of axis in X-Z plane assuming
# rotation of kappa is same as principal axis. There is no
# Y component as home position is always under beam path.
up_comp = np.cos(kapang)
across_comp = np.sin(kapang)
if kappoise == 0:
# is under incident beam collimator in X-Z plane
z_comp = -1 * across_comp
elif kappoise == 180:
z_comp = across_comp
return [up_comp, 0.0, z_comp]
def _make_chi_vector(self, goniometer_axes, chi_info):
"""Construct the chi vector out of the parsed information on chi.
Args:
goniometer_axes (tuple): a tuple of lists consisting of the axis names
and the senses of rotation
chi_info (list): a list with name and rotation angle
Returns:
list: the components of the chi vector
"""
axes, senses = goniometer_axes
axname = chi_info[0].lower()
rot = np.radians(-1 * float(chi_info[1]))
# Now turn this into an axis direction
# At the provided rotation, the chi axis is parallel to z. It is rotated
# by -chi_rot about omega to bring it to the start position. The sense
# of omega rotation is always about 1 0 0 by definition
axes_lowered = [axis.lower() for axis in axes]
ax_index = axes_lowered.index(axname)
chi_sense = senses[ax_index]
chi_beam_dir = np.array([0, 0, 1]) if chi_sense == "a" \
else np.array([0, 0, -1])
chi_rot = np.array([
[1.0, 0.0, 0.0],
[0.0, np.cos(rot), -np.sin(rot)],
[0.0, np.sin(rot), np.cos(rot)]
])
return list(np.dot(chi_rot, chi_beam_dir))
def _determine_gravity(self, principal_angle, principal_sense):
"""Determine the gravity vector.
Args:
principal_angle (str): the angle of the principal axis in degree
principal_sense (str): the sense of rotation of the principal axis
Returns:
list: the gavity vector
"""
angle = int(principal_angle) if principal_sense == "a" \
else int(principal_angle) + 180
if angle >= 360:
angle = angle - 360
if angle == 0:
gravity = [0, 1, 0] # spindle at 3 o'clock, rotating anticlockwise
elif angle == 90:
gravity = [1, 0, 0]
elif angle == 180:
gravity = [0, -1, 0]
else:
gravity = [-1, 0, 0]
return gravity
def _determine_detx_dety(self, principal_angle, principal_sense, corner,
beam_x, beam_y):
"""Determine direction of detx (horizontal) and dety (vertical) in
imgCIF coordinates.
Args:
principal_angle (str): the principal angle
principal_sense (str): the principal sense of the goniometer axes
corner (str): the orientation of the first pixel (e.g. 'top right')
beam_x (str): the beam center in x direction in mm
beam_y (str): the beam center in y direction in mm
Returns:
x_direction (list): the vector for the detector x direction
y_direction (list): the vector for the detector y direction
x_centre (float): the beamcentre in x direction
y_centre (float): the beamcentre in y direction
"""
# Start with basic value and then flip as necessary
x_direction = [-1, 0, 0] # spindle rotates anticlockwise at 0, top_left origin
y_direction = [0, 1, 0]
x_centre = beam_x
y_centre = -1 * beam_y
if corner == "top right":
x_direction = [i * -1 for i in x_direction]
x_centre = -1 * beam_x
elif corner == "bottom right":
x_direction = [i * -1 for i in x_direction]
y_direction = [i * -1 for i in y_direction]
x_centre *= -1
y_centre *= -1
elif corner == "bottom left":
y_direction = [i * -1 for i in y_direction]
y_centre *= -1
# The direction of the principal axis flips by 180 if the sense changes
angle = int(principal_angle) if principal_sense == "a" \
else int(principal_angle) + 180
if angle >= 360:
angle = angle - 360
if angle == 90:
temp = x_direction
temp_centre = x_centre
x_direction = y_direction
x_centre = y_centre
y_direction = [i * -1 for i in temp]
y_centre = temp_centre
elif angle == 180:
x_direction = [i * -1 for i in x_direction]
y_direction = [i * -1 for i in y_direction]
x_centre *= -1
y_centre *= -1
elif angle == 270:
temp = x_direction
temp_centre = x_centre
x_direction = [i * -1 for i in y_direction]
x_centre = -1 * y_centre
y_direction = temp
y_centre = temp_centre
return x_direction, y_direction, x_centre, y_centre
def _create_options_regex(self, label, case_insensitive=True):
"""Create a regular expression that matches only the options for the given
label.
Args:
label (str): the label that identifies the request
case_insensitive (bool, optional): Whether the regex should be case
insensitive. Defaults to True.
Returns:
regexp: regular expression formed by the options for the input label
"""
options = self.input_options[label].get('options')
if options is not None:
options_regex = r'|'.join(options)
if self.input_options[label].get('abbreviate'):
first_letters = [option[0] for option in options]
letters_regex = r'|'.join(first_letters)
options_regex += r'|' + letters_regex
options_regex = r'(' + options_regex + r')\Z'
if case_insensitive:
options_regex = r'(?i)' + options_regex
else:
options_regex = None
return options_regex
def _calculate_beam_centre(self, array_dimension, pixel_size):
"""The default beam centre is at the centre of the detector. We must
indicate this position in mm with the correct signs.
Args:
array_dimension (tuple): a tuple with the x and y dimension of the pixels
pixel_size (tuple): a tuple with the pixel sizes in x an y direction
Returns:
tuple: the x and y beam centre in mm
"""
dim_x, dim_y = array_dimension
pix_x, pix_y = pixel_size
return float(pix_x) * float(dim_x)/2, float(pix_y) * float(dim_y)/2
| COMCIFS/instrument-geometry-info | Tools/imgCIF_Creator/imgCIF_Creator/command_line_interfaces/parser.py | parser.py | py | 19,777 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.path.abspath",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "inspect.getsourcefile",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "yaml.safe_load",
... |
39674044871 | from kucoin.client import Trade
from kucoin.client import Market
import pandas as pd
from time import sleep
api_key = '60491b8da682810006e2f600'
api_secret = 'a79226df-55ce-43d0-b771-20746e338b67'
api_passphrase = 'algotrading101'
m_client = Market(url='https://api.kucoin.com')
client = Trade(api_key, api_secret, api_passphrase, is_sandbox=True)
while True:
try:
btc_old = m_client.get_ticker('BTC-USDT')
print('The price of BTC at {} is:'.format(pd.Timestamp.now()), btc_old['price'])
except Exception as e:
print(f'Error obtaining BTC data: {e}')
sleep(300)
try:
btc_new = m_client.get_ticker('BTC-USDT')
print('The price of BTC at {} is:'.format(pd.Timestamp.now()), btc_new['price'])
except Exception as e:
print(f'Error obtaining BTC data: {e}')
percent = (((float(btc_new['bestAsk']) - float(btc_old['bestAsk'])) * 100) / float(btc_old['bestAsk']))
if percent < 5:
print('The trade requirement was not satisfied. The percentage move is at ',percent)
continue
elif percent >= 5:
try:
order = client.create_market_order('ETH-USDT', 'buy', size='5')
print()
except Exception as e:
print(f'Error placing order: {e}')
sleep(2)
try:
check = client.get_order_details(orderId=order['orderId'])
print(check)
except Exception as e:
print(f'Error while checking order status: {e}')
if check['isActive'] == True:
print ('Order placed at {}'.format(pd.Timestamp.now()))
break
else:
print('Order was canceled {}'.format(pd.Timestamp.now()))
break
| briansegs/Kucoin-api-example- | example-2.py | example-2.py | py | 1,725 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "kucoin.client.Market",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "kucoin.client.Trade",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pandas.Timestamp.now",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pandas.... |
22702565799 | import logging
import random
import asyncio
import websockets
import pandas as pd
import numpy as np
import plotly.express as px
import traits
from alleles import *
pd.options.plotting.backend = "plotly"
#Fix population crash issue
#Work on save function, produce population/world snapshots
#Improve data visualization methods
#Improve color genetics complexity
#Balancing
logging.basicConfig(filename='debug1.txt',level=logging.DEBUG, filemode='w')
def shift_list(list):
item = None
if len(list) > 0:
item = list[0]
del list[0]
return item
class World:
def __init__(self):
self.resource_increment = 50
self.resource_cap = 500
self.reset()
def get_resources(self):
return self.resources
def increment(self):
logging.debug(f'Incrementing world. Resources: {self.resources} + {self.resource_increment} = {self.resources + self.resource_increment}.')
self.resources += self.resource_increment
if self.resources > self.resource_cap:
self.resources = self.resource_cap
logging.debug(f'Resource count set to cap: {self.resource_cap}')
def reset(self):
self.current_time = 0
self.resources = 500
class Population:
def __init__(self):
self.reset()
def find_by_id(self, id):
return self.items[id]
def filter_mature(self, minAge):
all_organisms = self.get_all()
by_age = []
for org in all_organisms:
if org.age >= minAge:
by_age.append(org)
return by_age
def addOrganism(self, organism):
self.items[organism.id] = organism
def nextId(self):
self.current_id = self.current_id + 1
return self.current_id
def get_all(self):
return list(self.items.values())
def reset(self):
self.items = {}
self.current_id = 0
self.info = {}
class Organism:
def __init__(self, alleles, traits, id):
self.id = id
self.alleles = alleles
self.traits = traits
self.age = 0
self.has_fed = True
self.mature_age = 2
self.max_age = 5
self.gender = random.randint(0,1)
for trait in traits:
trait.attach(self)
def __getattr__(self, name):
# return something when requesting an attribute that doesnt exist on this instance
return None
def could_breed(self):
return self.age >= self.mature_age
class SystemManager:
def time_advance(self, world):
world.current_time += 1
logging.debug(f'Time advanced to {world.current_time}')
def resource_distribute(self, pop, world):
fitness_list = sorted(pop.get_all(), key=lambda item: item.fitness, reverse = True)
for org in fitness_list:
if world.resources > 0:
org.has_fed = True
world.resources -= 1
else:
logging.debug(f'Organism {org.id} unfed.')
org.has_fed = False
def cull(self, pop):
for org in pop.get_all():
if org.age >= org.max_age or org.has_fed == False:
logging.debug(f'Culling Organism: {org.id}. Fed: {org.has_fed}. Age: {org.age}')
del pop.items[org.id]
def logPopulation(self, pop, report, world):
total_red = 0
total_green = 0
total_blue = 0
for organism in pop.items.values():
report.append(f'{world.current_time},{organism.id},{organism.age},{organism.r},{organism.g},{organism.b}')
total_red += organism.r
total_green += organism.g
total_blue += organism.b
pop_count = len(pop.get_all())
pop.info["average_red"] = 0
pop.info["average_green"] = 0
pop.info["average_blue"] = 0
if pop_count > 0:
pop.info["average_red"] = total_red/pop_count
pop.info["average_green"] = total_green/pop_count
pop.info["average_blue"] = total_blue/pop_count
def calcBreedScore(self, pop):
logging.debug("calcBreedScore called")
for organism in pop.items.values():
organism.breed_score = 100
organism.breed_score += organism.redness * -0.5 * 50
if organism.redness == 255:
organism.breed_score = 0
logging.debug("Pure red punished (breedScore)")
else:
pass
organism.breed_score += organism.blueness * 0.5 * 50
if organism.blueness == 255:
organism.breed_score = 0
logging.debug("Pure blue punished (breedScore)")
else:
pass
organism.breed_score += organism.greenness * 0 * 50
if organism.greenness == 255:
organism.breed_score = 0
logging.debug("Pure green punished (breedScore)")
else:
pass
random_num = random.randint(0,10)
if random_num > 7:
organism.breed_score += random.randint(0,25)
elif random_num < 3:
organism.breed_score -= random.randint(0,25)
else:
pass
#logging.debug(f'Organism {organism.id} breed_score: : {organism.breed_score}\n redness: {redness}, greenness: {greenness}, blueness: {blueness}')
def calcFitness(self, pop):
for organism in pop.items.values():
organism.fitness = 100
organism.fitness += organism.redness * 0.5 * 50
if organism.redness == 255:
organism.fitness = 0
logging.debug("Pure red punished (fitness)")
else:
pass
organism.fitness += organism.blueness * -0.5 * 50
if organism.blueness == 255:
organism.fitness = 0
logging.debug("Pure blue punished (fitness)")
else:
pass
organism.fitness += organism.greenness * 0 * 50
if organism.greenness == 255:
organism.fitness = 0
logging.debug("Pure green punished (fitness)")
else:
pass
random_num = random.randint(0,10)
if random_num > 7:
organism.fitness += random.randint(0,25)
elif random_num < 3:
organism.fitness -= random.randint(0,25)
else:
pass
#logging.debug(f'Organism {organism.id} fitness: : {organism.fitness}\n redness: {organism.redness}, greenness: {organism.greenness}, blueness: {organism.blueness}')
def selectPairs(self, pop):
logging.debug("selectPairs called")
males = []
females = []
for organism in pop.items.values():
logging.debug(f"selectPairs: organism could breed? {organism.could_breed()}")
if organism.could_breed():
if organism.gender == 0:
females.append(organism)
elif organism.gender == 1:
males.append(organism)
else:
logging.debug(f'UNEXPECTED GENDER VALUE: {organism.gender}')
logging.debug(f"{len(males)} males, {len(females)} females")
pairs = []
if len(males) >= 1:
def organism_to_string(org):
return str(org.id)
males = sorted(males, key=lambda item: item.breed_score, reverse = True)
females = sorted(females, key=lambda item: item.breed_score, reverse = True)
for male in males:
female0 = shift_list(females)
if (female0 is not None):
pairs.append([male, female0])
else:
break
female1 = shift_list(females)
if (female1 is not None):
pairs.append([male, female1])
else:
break
female2 = shift_list(females)
if (female2 is not None):
pairs.append([male, female2])
else:
break
return pairs
def mutate(self, organism):
global mutation_count
mutation_target = organism.alleles[random.randint(0, len(organism.alleles)-1)]
organism.alleles.remove(mutation_target)
possible_alleles = list(filter(lambda allele: allele.type == mutation_target.type, all_alleles))
possible_alleles.remove(mutation_target)
mutant_allele = possible_alleles[random.randint(0, len(possible_alleles)-1)]
organism.alleles.append(mutant_allele)
mutation_count += 1
logging.debug(f"Organism {organism.id} mutated. {mutation_target.name} -> {mutant_allele.name}.")
def breedPair(self, pair, pop):
logging.debug("breedPair called")
a = pair[0]
b = pair[1]
children_count = 2
trait_alleles_a = None
trait_alleles_b = None
both_alleles = None
child_alelles = []
child_traits = []
# we want to ensure that both parents have compatible traits and alleles for those traits
# For loop should take list of relevant alleles for each trait, shuffle them, give output.
# If either parent has a trait the other lacks, abort the whole function and move to next pair.
for trait in a.traits:
if not trait in b.traits:
logging.debug(f"Pairing rejected: Org {b.id} doesnt have trait {trait}")
return
for trait in b.traits:
if not trait in a.traits:
logging.debug(f"Pairing rejected: Org {a.id} doesnt have trait {trait}")
return
for trait in a.traits:
trait_alleles_a = list(filter(lambda allele: allele.type == trait.allele_type, a.alleles))
trait_alleles_b = list(filter(lambda allele: allele.type == trait.allele_type, b.alleles))
both_alleles = trait_alleles_a + trait_alleles_b
random.shuffle(both_alleles)
#logging.debug(f"both_alleles length: {len(both_alleles)}")
for allele in both_alleles[0:2]:
child_alelles.append(allele)
child_traits.append(trait)
child = Organism(child_alelles, child_traits, pop.nextId())
if random.randint(0,100) == 100:
self.mutate(child)
pop.addOrganism(child)
logging.debug(f"Org {child.id} created. Redness: {child.redness}, R: {child.r}. Greeness: {child.greenness}, G: {child.g}. Blueness: {child.blueness}, B: {child.b}.")
def incrementAge(self, pop):
for organism in pop.items.values():
organism.age += 1
def Update(self, pop, world):
# print("manager.Update called")
# A new breeding season
logging.debug(f"Population at start of timestep {world.current_time}: {len(pop.get_all())}")
self.calcFitness(pop)
self.resource_distribute(pop, world)
self.incrementAge(pop)
self.calcBreedScore(pop)
pairs = self.selectPairs(pop)
for pair in pairs:
self.breedPair(pair, pop)
self.cull(pop)
logging.debug(f"Population at end of timestep {world.current_time}: {len(pop.get_all())}")
world.increment()
self.time_advance(world)
report = []
population_report = ["time,population,average_red,average_green,average_blue"]
pop = Population()
manager = SystemManager()
world = World()
mutation_count = 0
def runSim(count):
while count > 0:
logging.debug(f"runSim, calling manager.Update")
manager.Update(pop, world)
manager.logPopulation(pop, report, world)
pop_count = len(pop.get_all())
population_report.append(f"{world.current_time},{pop_count}, {pop.info['average_red']},{pop.info['average_green']},{pop.info['average_blue']}")
output = open("output.csv", "wt")
for item in report:
output.write(f"{item}\n")
output.close()
output = open("population.csv", "wt")
for item in population_report:
output.write(f"{item}\n")
output.close()
count -= 1
return population_report
def resetSim():
initialize()
def initialize():
FirstOrg = Organism([Coloration_Green, Coloration_Blue], [traits.ColorationOne], pop.nextId())
SecondOrg = Organism([Coloration_Red, Coloration_Blue], [trait.ColorationOne], pop.nextId())
ThirdOrg = Organism([Coloration_Blue, Coloration_Blue], [trait.ColorationOne], pop.nextId())
FourthOrg = Organism([Coloration_Red, Coloration_Red], [trait.ColorationOne], pop.nextId())
FifthOrg = Organism([Coloration_Green, Coloration_Blue], [trait.ColorationOne], pop.nextId())
SixthOrg = Organism([Coloration_Red, Coloration_Green], [trait.ColorationOne], pop.nextId())
SeventhOrg = Organism([Coloration_Green, Coloration_Green], [traits.Coloration], pop.nextId())
FirstOrg.gender = 1
SecondOrg.gender = 0
initial_generation = [FirstOrg, SecondOrg, ThirdOrg, FourthOrg, FifthOrg, SixthOrg, SeventhOrg]
pop.reset()
world.reset()
report.clear()
population_report.clear()
population_report.append("time,population,average_red,average_green,average_blue")
report.append(f'Time,ID,Age,Red,Green,Blue')
for org in initial_generation:
pop.addOrganism(org)
def showColors():
color_frame = pd.read_csv('population.csv', usecols = ['time', 'average_red', 'average_green', 'average_blue'])
color_frame.plot(x='time', y=['average_blue','average_red','average_green']).show()
def showPop():
pop_frame = pd.read_csv('population.csv', usecols = ['time', 'population'])
pop_frame.plot(x='time', y=['population']).show()
async def main():
initialize()
# Start the websocket server and run forever waiting for requests
async with websockets.serve(handleRequest, "localhost", 8765):
await asyncio.Future() # run forever
async def handleRequest(websocket, path):
#reset command is causing an error, probably getting stuck somewhere in resetSim()
async for message in websocket:
parts = message.split(",")
command_name = parts[0]
logging.debug(f"Got websocket request")
if command_name == "getPop":
print("Population count request recieved")
await websocket.send(f"Population count: {len(pop.get_all())}")
print("Population count sent")
elif command_name == "runSim":
print(f"Incrementing simulation by t={parts[1]}")
runSim(int(parts[1]))
await websocket.send(f"Simulation incremented by t={parts[1]}")
elif command_name == "reset":
print("Reset command recieved")
resetSim()
print("Simulation reset")
await websocket.send("Simulation reset")
elif command_name == "showColors":
showColors()
await websocket.send("Ok")
elif command_name == "showPop":
showPop()
await websocket.send("Ok")
elif command_name == "showAll":
showPop()
showColors()
await websocket.send(f"Final population: {len(pop.get_all())}. Final time: {world.current_time}. Number of mutations: {mutation_count}.")
else:
await websocket.send("Unknown Command")
print(f"{message}")
if __name__ == "__main__":
asyncio.run(main())
| Adrian-Grey/EvoProject | evoBackend.py | evoBackend.py | py | 15,553 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "pandas.options",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "logging.basicConfig",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "logging.de... |
650674857 | #! /g/kreshuk/pape/Work/software/conda/miniconda3/envs/cluster_env37/bin/python
import os
import json
import luigi
from cluster_tools import MulticutSegmentationWorkflow
def initial_mc(max_jobs, max_threads, tmp_folder, target='slurm'):
n_scales = 1
input_path = '/g/kreshuk/data/FIB25/cutout.n5'
exp_path = './exp_data/exp_data.n5'
input_key = 'volumes/affinities'
ws_key = 'volumes/segmentation/watershed'
out_key = 'volumes/segmentation/multicut'
node_labels_key = 'node_labels/multicut'
configs = MulticutSegmentationWorkflow.get_config()
config_folder = './config'
if not os.path.exists(config_folder):
os.mkdir(config_folder)
shebang = "#! /g/kreshuk/pape/Work/software/conda/miniconda3/envs/cluster_env37/bin/python"
global_config = configs['global']
global_config.update({'shebang': shebang})
with open(os.path.join(config_folder, 'global.config'), 'w') as f:
json.dump(global_config, f)
ws_config = configs['watershed']
ws_config.update({'threshold': .3, 'apply_presmooth_2d': False,
'sigma_weights': 2., 'apply_dt_2d': False,
'sigma_seeds': 2., 'apply_ws_2d': False,
'two_pass': False, 'alpha': .85,
'halo': [25, 25, 25], 'time_limit': 90,
'mem_limit': 8, 'size_filter': 100,
'channel_begin': 0, 'channel_end': 3})
with open(os.path.join(config_folder, 'watershed.config'), 'w') as f:
json.dump(ws_config, f)
subprob_config = configs['solve_subproblems']
subprob_config.update({'weight_edges': True,
'threads_per_job': max_threads,
'time_limit': 180,
'mem_limit': 16})
with open(os.path.join(config_folder, 'solve_subproblems.config'), 'w') as f:
json.dump(subprob_config, f)
feat_config = configs['block_edge_features']
feat_config.update({'offsets': [[-1, 0, 0], [0, -1, 0], [0, 0, -1],
[-4, 0, 0], [0, -4, 0], [0, 0, -4]]})
with open(os.path.join(config_folder, 'block_edge_features.config'), 'w') as f:
json.dump(feat_config, f)
# set number of threads for sum jobs
beta = .5
tasks = ['merge_sub_graphs', 'merge_edge_features', 'probs_to_costs',
'reduce_problem']
for tt in tasks:
config = configs[tt]
config.update({'threads_per_job': max_threads,
'mem_limit': 64, 'time_limit': 260,
'weight_edges': True, 'beta': beta})
with open(os.path.join(config_folder, '%s.config' % tt), 'w') as f:
json.dump(config, f)
time_limit_solve = 24*60*60
config = configs['solve_global']
config.update({'threads_per_job': max_threads,
'mem_limit': 64, 'time_limit': time_limit_solve / 60 + 240,
'time_limit_solver': time_limit_solve})
with open(os.path.join(config_folder, 'solve_global.config'), 'w') as f:
json.dump(config, f)
task = MulticutSegmentationWorkflow(input_path=input_path, input_key=input_key,
ws_path=input_path, ws_key=ws_key,
problem_path=exp_path,
node_labels_key=node_labels_key,
output_path=input_path,
output_key=out_key,
n_scales=n_scales,
config_dir=config_folder,
tmp_folder=tmp_folder,
target=target,
max_jobs=max_jobs,
max_jobs_multicut=1,
skip_ws=False)
ret = luigi.build([task], local_scheduler=True)
assert ret, "Multicut segmentation failed"
if __name__ == '__main__':
# target = 'slurm'
target = 'local'
if target == 'slurm':
max_jobs = 300
max_threads = 8
else:
max_jobs = 64
max_threads = 16
tmp_folder = './tmp_mc'
initial_mc(max_jobs, max_threads, tmp_folder, target=target)
| constantinpape/cluster_tools | publications/leveraging_domain_knowledge/5_lifted_solver/initial_multicut.py | initial_multicut.py | py | 4,330 | python | en | code | 32 | github-code | 6 | [
{
"api_name": "cluster_tools.MulticutSegmentationWorkflow.get_config",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "cluster_tools.MulticutSegmentationWorkflow",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "os.path.exists",
"line_number": 24,
"usa... |
71452823867 | import os
import pandas as pd
import numpy as np
from keras.models import load_model
sample_submission = pd.read_csv('submissions/sample_submission.csv')
print(sample_submission['Class'])
band = ''
def preproc(X_all):
X_all[X_all == -np.inf] = -10
X_all[X_all > 1000] = 1000
X_all = np.swapaxes(X_all, 1, 2)
X_all = X_all.reshape(X_all.shape[0],X_all.shape[1],X_all.shape[2]*X_all.shape[3])
return X_all
model1 = load_model('models/LSTMSpectro/P1/test8-31-0.821.h5')
#model1 = load_model('models/CNNSpectro/P1/test1-18-0.877.h5')
X_s_1 = np.load('data/ffts/'+band+'test_1_new/X_new_s.npy')
X_s_1 = preproc(X_s_1)
preds1 = model1.predict_proba(X_s_1)[:,1]
print(preds1)
print(preds1.shape)
del X_s_1
del model1
model2 = load_model('models/LSTMSpectro/P2/test8-22-0.798.h5')
#model2 = load_model('models/CNNSpectro/P2/test1-19-0.747.h5')
X_s_2 = np.load('data/ffts/'+band+'test_2_new/X_new_s.npy')
X_s_2 = preproc(X_s_2)
preds2 = model2.predict_proba(X_s_2)[:,1]
print(preds2)
print(preds2.shape)
del X_s_2
del model2
model3 = load_model('models/LSTMSpectro/P3/test2.h5')
#model3 = load_model('models/CNNSpectro/P3/test1-27-0.658.h5')
X_s_3 = np.load('data/ffts/'+band+'test_3_new/X_new_s.npy')
X_s_3 = preproc(X_s_3)
preds3 = model3.predict_proba(X_s_3)[:,1]
print(preds3)
print(preds3.shape)
del X_s_3
del model3
preds_submission = np.concatenate((preds1,preds2,preds3))
print(preds_submission.shape)
sample_submission['Class'] = preds_submission
sample_submission.to_csv('submissions/LSTMSpectro882.csv', index=False)
#sample_submission.to_csv('submissions/CNNSpectro111.csv', index=False)
| Anmol6/kaggle-seizure-competition | make_sub.py | make_sub.py | py | 1,618 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pandas.read_csv",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "numpy.inf",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "numpy.swapaxes",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "keras.models.load_model"... |
74907691708 | # 2-D plot function for SODA TEMPERATURE
# YUE WANG
# Nov. 12st 2013
import numpy as np
import netCDF4
from mpl_toolkits.basemap import Basemap,cm
import matplotlib.pyplot as plt
def soda_plot(url,variable,llat, ulat, llon, rlon):
nc = netCDF4.Dataset(url)
var = nc.variables[variable][0,0,:,:]
lon = nc.variables['LON'][:]
lat = nc.variables['LAT'][:]
# setting up data into basemap with given projection
lons, lats = np.meshgrid(lon, lat)
fig = plt.figure(figsize=(16,8))
ax = fig.add_axes([0.1,0.1,0.8,0.8])
m = Basemap(llcrnrlat=llat,urcrnrlat=ulat,\
llcrnrlon=llon,urcrnrlon=rlon,\
projection='mill',resolution = 'h',ax=ax)
x,y = m(lons, lats)
# drawing the map
m.fillcontinents(color='gray',lake_color='gray')
m.drawcoastlines(linewidth = 0.4)
m.drawparallels(np.arange(-90.,90.,15.), labels =[1,0,0,1],fontsize=10)
m.drawmeridians(np.arange(-180.,181.,40.),labels =[0,1,0,1],fontsize=10)
m.drawmapboundary()
# plotting data on the map
plt.contourf(x,y,var,cmap=cm.sstanom)
cb = plt.colorbar(orientation='horizontal')
cb.set_label(r'Sea Surface Temperature (deg C) Jan 1998',fontsize=14,style='italic')
plt.show()
#plt.savefig('SST_globeplot_Hw3.png')
'''url = 'http://sodaserver.tamu.edu:80/opendap/TEMP/SODA_2.3.1_01-01_python.cdf'
variable = 'TEMP'
#nino 3.4 region
llat = -5.
####Q: range of latitude: 0-360, do we need a loop? transfore the latidue
ulat = 5.
llon = -170.
rlon = -120.''' | yueewang/Python_Digitizer | soda_plot_function_2.py | soda_plot_function_2.py | py | 1,539 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "netCDF4.Dataset",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.meshgrid",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "matplotlib.py... |
8939054368 | from django.conf.urls.defaults import *
from django.views.generic.simple import direct_to_template
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Map-related
url(r'^/?$', direct_to_template, {'template': 'pom/index.html'}),
url(r'^refresh/?$', 'pom.views.refresh_cache'),
url(r'^filtered/bldgs/?$', 'pom.views.get_filtered_bldgs'),
url(r'^filtered/data/bldg/(?P<bldg_code>\S+)/?$', 'pom.views.get_filtered_data_bldg'),
url(r'^filtered/data/all/?$', 'pom.views.get_filtered_data_all'),
url(r'^widget/search/resp/?$', 'pom.views.widget_search_resp'),
url(r'^widget/locations/setup/?$', 'pom.views.widget_locations_setup'),
(r'^login/?$', 'django_cas.views.login'),
(r'^logout/?$', 'django_cas.views.logout'),
#Admin
url(r'^admin/?$', 'django_cas.views.login', kwargs={'next_page': '/djadmin/'}),
(r'^djadmin/', include(admin.site.urls)),
)
urlpatterns += staticfiles_urlpatterns()
| epkugelmass/USG-srv-dev | tigerapps/pom/urls.py | urls.py | py | 1,045 | python | en | code | null | github-code | 6 | [
{
"api_name": "django.contrib.admin.autodiscover",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.contrib.admin",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "django.views.generic.simple.direct_to_template",
"line_number": 10,
"usage_type": "a... |
16116517818 | import tools as t
import json
import requests
def get_wikipedia(title):
base_url = "https://de.wikipedia.org/w/api.php"
params = {
"action": "query",
"format": "json",
"prop": "extracts",
"exintro": True,
"titles": title
}
response = requests.get(base_url, params=params)
data = response.json()
if "query" in data:
pages = data["query"]["pages"]
for page_id, page_info in pages.items():
if page_id != "-1":
article_text = page_info["extract"]
t.his(title=title)
return t.clean_html(article_text)
else:
return None
return None
def get_moviedb(choice, keyword):
lan = t.config_var("moviedb", "lan")
print(lan)
choice = choice.lower()
print(choice)
keyword = keyword.replace(" ", "%20")
if keyword == "actor":
keyword = "person"
print(keyword)
url = f"https://api.themoviedb.org/3/search/{choice}?query={keyword}&include_adult=false&language={lan}&page=1"
print(url)
auth = frozenset(t.auth("AUTH"))
print(auth)
headers = {
"accept": "application/json",
"Authorization": auth
}
print(headers)
response = requests.get(url, headers=headers)
data = json.loads(response.text)
print(data)
get_moviedb("movie", "snowden")
| Paul-Tru/PyAssistant2 | api.py | api.py | py | 1,369 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "requests.get",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "tools.his",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "tools.clean_html",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "tools.config_var",
"line_... |
4691442147 | from attrdict import AttrDict
from flask import Flask, request, jsonify, make_response
from semantic_selector.model.one_to_one import NNFullyConnectedModel
from semantic_selector.adapter.one_to_one import JSONInferenceAdapter
app = Flask(__name__)
model = None
@app.before_first_request
def startup():
global model
print("initializing model...")
model = NNFullyConnectedModel()
model.load()
@app.route("/api/inference", methods=['POST'])
def inference():
global model
if request.headers['Content-Type'] != 'application/json':
return make_response("Content-Type must be application/json", 400)
if "html" not in request.json.keys():
err_message = 'request body json must contain "html" attributes'
return make_response(err_message, 400)
target_tag = AttrDict({'html': request.json["html"]})
options = {'record': target_tag, 'dictionary': model.dictionary}
adapter = JSONInferenceAdapter(options)
estimated_topic = model.inference_html(adapter)
res = {"topic": estimated_topic}
return jsonify(res)
| cuhavp/semantic_selector | projects/bin/api.py | api.py | py | 1,079 | python | en | code | null | github-code | 6 | [
{
"api_name": "flask.Flask",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "semantic_selector.model.one_to_one.NNFullyConnectedModel",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "flask.request.headers",
"line_number": 22,
"usage_type": "attribute"
... |
32019692905 | from subprocess import run
from pathlib import Path
import os
from rich.console import Console
from rich.markup import escape
from builtins import print as builtin_print
import shutil
if __name__ == "__main__":
console = Console(emoji=False)
def print(msg):
console.print(msg)
here = Path(__file__).parent.absolute()
for subitem in here.iterdir():
if not subitem.is_file() and "-" in subitem.name:
print(f"Building {subitem.name}")
sub_projects = [Path(i).parent for i in subitem.rglob("Makefile")]
for project in sub_projects:
print(f"\t- Building {project.name}")
result = run("make clean && make -r -j --output-sync=target --no-print-directory", cwd=project, shell=True, capture_output=True)
if result.returncode != 0:
print("[red]Failed![/red]\n")
print(escape(result.stderr.decode()))
exit(result.returncode)
print("Packaging...")
build_folder = Path(here) / "build"
for project in sub_projects:
dest = build_folder / Path(f"{subitem.name}/{project.name}/build")
dest.mkdir(exist_ok=True, parents=True)
for i in (project / "build").iterdir():
if i.is_file() and i.suffix == ".bin" and "_app" not in i.name:
print(f"Copying {i} to {dest}")
shutil.copy(i, dest)
if (project / "run.sh").exists():
dest = build_folder / Path(f"{subitem.name}/{project.name}")
shutil.copy(project / "run.sh", dest) | Analog-Devices-MSDK/refdes | .github/workflows/scripts/build.py | build.py | py | 1,733 | python | en | code | 14 | github-code | 6 | [
{
"api_name": "rich.console.Console",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "subprocess.run",
"... |
19203182463 | import os
from dotenv import load_dotenv
from minio import Minio
from io import BytesIO
from data_pipeline.classes.data_loader.DataLoader import DataLoader
class MinIOLoader(DataLoader):
def __init__(self, endpoint, bucket_name):
super().__init__(endpoint)
self._bucket_name = bucket_name
self._create_bucket()
def _open_connection(self):
load_dotenv()
access_key = os.getenv("MINIO_USER")
secret_key = os.getenv("MINIO_PASSWORD")
return Minio(self._endpoint, access_key=access_key, secret_key=secret_key, secure=False)
def _create_bucket(self):
bucket_exists = self._connection.bucket_exists(self._bucket_name)
if not bucket_exists:
return self._connection.make_bucket(f"{self._bucket_name}")
else:
print(f"Bucket {self._bucket_name} already exists")
def load_from_csv(self, upload_filepath, file_name, file_type='csv'):
self._load_file(upload_filepath, file_name, file_type)
def load_from_dataframe(self, dataframe, file_name, file_type='csv'):
csv_file = dataframe.to_csv().encode('utf-8')
csv_buffer = BytesIO(csv_file)
self._connection.put_object(
self._bucket_name,
file_name,
csv_buffer,
length=len(csv_file),
content_type=f'application/{file_type}'
)
def load_from_json(self, upload_filepath, file_name, file_type='json'):
self._load_file(upload_filepath, file_name, file_type)
def _load_file(self, upload_filepath, file_name, file_type):
self._connection.fput_object(
self._bucket_name,
file_name,
upload_filepath,
content_type=f'application/{file_type}'
)
| robbailiff/data_pipeline | src/data_pipeline/classes/data_loader/MinIOLoader.py | MinIOLoader.py | py | 1,836 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "data_pipeline.classes.data_loader.DataLoader.DataLoader",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "dotenv.load_dotenv",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 18,
"usage_type": "call"
},
{
... |
71754427069 | import paramiko
import paramiko.client
client = paramiko.client.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(
paramiko.AutoAddPolicy()
)
client.connect(
"localhost",
username="developer",
password="4linux"
)
(stdin, stdout, stderr) = client.exec_command("ls")
status = stdout.channel.recv_exit_status()
if status == 0:
stdout.flush()
print(stdout.read().decode())
else:
stderr.flush()
print(stderr.read().decode())
client.close() | elderlima/4linux | ssh/main.py | main.py | py | 500 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "paramiko.client.SSHClient",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "paramiko.client",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "paramiko.AutoAddPolicy",
"line_number": 7,
"usage_type": "call"
}
] |
20678034139 | from omegaconf import DictConfig
import hydra
from VisualClassificationRunner import VisualClassificationRunner
class CueConflictRunner(VisualClassificationRunner):
def test(self) -> dict:
"""Test the model on the standard dataset and the randomized datasets.
:return: Dictionary with randomized test added
:rtype: dict
"""
self.module.testset.set_shape_or_texture('shape')
shape_test_log = super().test()
self.module.testset.set_shape_or_texture('texture')
texture_test_log = super().test()
return {'shape_bias': 100.0 * shape_test_log['correct'] / (shape_test_log['correct'] + texture_test_log['correct'])}
def extract_test_log(self, log: dict) -> dict:
out_log = {
'correct': log['correct']
}
return out_log
@hydra.main(config_path="../conf", config_name="visual_classification.yaml")
def main(conf: DictConfig):
runner = CueConflictRunner(conf)
runner.main()
if __name__ == '__main__':
main() | nathansomavarapu/core-ml | src/CueConflictRunner.py | CueConflictRunner.py | py | 1,053 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "VisualClassificationRunner.VisualClassificationRunner",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "omegaconf.DictConfig",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "hydra.main",
"line_number": 30,
"usage_type": "call"
}
] |
38589291607 | import os
from django.conf import settings
from django.core.mail import EmailMessage
from .models import STATUS_TYPES
from smtplib import SMTPException
from django.core.mail import BadHeaderError
from python_http_client import exceptions
import logging
logger = logging.getLogger("django")
# admin emails (add more here)
NO_REPLY_EMAIL = settings.SENDGRID_NO_REPLY_EMAIL
ADMIN_EMAIL = settings.SENDGRID_ADMIN_EMAIL
# sendgrid templates for users
SENDGRID_TICKET_CREATED_TEMPLATE_ID_USER = settings.SENDGRID_TICKET_CREATED_TEMPLATE_ID_USER
SENDGRID_TICKET_DELETED_TEMPLATE_ID_USER = settings.SENDGRID_TICKET_DELETED_TEMPLATE_ID_USER
SENDGRID_TICKET_REJECTED_TEMPLATE_ID_USER = settings.SENDGRID_TICKET_REJECTED_TEMPLATE_ID_USER
SENDGRID_TICKET_UPDATED_TEMPLATE_ID_USER = settings.SENDGRID_TICKET_UPDATED_TEMPLATE_ID_USER
SENDGRID_BUCKET_CREATED_TEMPLATE_ID_USER = settings.SENDGRID_BUCKET_CREATED_TEMPLATE_ID_USER
# sendgrid templates for admins
SENDGRID_TICKET_CREATED_TEMPLATE_ID_ADMIN = settings.SENDGRID_TICKET_CREATED_TEMPLATE_ID_ADMIN
SENDGRID_TICKET_DELETED_TEMPLATE_ID_ADMIN = settings.SENDGRID_TICKET_DELETED_TEMPLATE_ID_ADMIN
SENDGRID_TICKET_UPDATED_TEMPLATE_ID_ADMIN = settings.SENDGRID_TICKET_UPDATED_TEMPLATE_ID_ADMIN
class Mail(EmailMessage):
def __init__(self, ticket, status):
ticket_url = os.environ.get("AZURE_SITES_URL")
logger.info("status = " + status)
# general dynamic data for all emails
dynamic_template_data = {
"name": ticket.name,
"study_name": ticket.study_name,
"ticket_url": f"{ticket_url}/{ticket.id}/update",
"ticket_status": status
}
# create email message object and fill in the details
# in tandem with the user's email
self.admin_email = EmailMessage(from_email=NO_REPLY_EMAIL)
self.admin_email.to = [ADMIN_EMAIL]
# create message
if status == "Created":
self.template_id = SENDGRID_TICKET_CREATED_TEMPLATE_ID_USER
self.admin_email.template_id = SENDGRID_TICKET_CREATED_TEMPLATE_ID_ADMIN
# attach other data here
# delete message
elif status == "Deleted":
self.template_id = SENDGRID_TICKET_DELETED_TEMPLATE_ID_USER
self.admin_email.template_id = SENDGRID_TICKET_DELETED_TEMPLATE_ID_ADMIN
# attach other data here
# rejected message: Data Intake Form Rejected
elif status == STATUS_TYPES[0]:
self.template_id = SENDGRID_TICKET_REJECTED_TEMPLATE_ID_USER
self.admin_email.template_id = SENDGRID_TICKET_UPDATED_TEMPLATE_ID_ADMIN
# attach rejected_reason
dynamic_template_data["rejected_reason"] = ticket.ticket_review_comment
# bucket created message: Awaiting Data Custodian Upload Start
elif status == STATUS_TYPES[3]:
self.template_id = SENDGRID_BUCKET_CREATED_TEMPLATE_ID_USER
self.admin_email.template_id = SENDGRID_TICKET_UPDATED_TEMPLATE_ID_ADMIN
# attach other data here
# data upload completed message: Awaiting Gen3 Acceptance
elif status == STATUS_TYPES[5]:
self.template_id = SENDGRID_TICKET_UPDATED_TEMPLATE_ID_USER
self.admin_email.template_id = SENDGRID_TICKET_UPDATED_TEMPLATE_ID_ADMIN
# attach other data here
# update message
else:
self.template_id = SENDGRID_TICKET_UPDATED_TEMPLATE_ID_USER
self.admin_email.template_id = SENDGRID_TICKET_UPDATED_TEMPLATE_ID_ADMIN
# attach ticket_status
self.admin_email.dynamic_template_data = dynamic_template_data
self.dynamic_template_data = dynamic_template_data
super().__init__(
from_email=NO_REPLY_EMAIL,
to=[ticket.email],
)
def send(self, fail_silently=False):
result = 0
# send emails out to the admin
try:
logger.info("Attempting to send admin email.")
self.admin_email.send(fail_silently)
logger.info("Attempting to send email data custodian email.")
result = super().send(fail_silently)
# SendGrid email error handling: https://github.com/sendgrid/sendgrid-python/blob/main/use_cases/error_handling.md
except BadHeaderError:
logger.error('Invalid header found for email.')
except SMTPException as e:
logger.error('SMTP exception when sending email: ' + e)
except exceptions.BadRequestsError as e:
logger.error('BadRequestsHeader for email.')
logger.error(e.body)
except:
logger.error("Mail Sending Failed for email to")
return result
| NimbusInformatics/bdcat-data-tracker | api/tracker/mail.py | mail.py | py | 4,796 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.SENDGRID_NO_REPLY_EMAIL",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 13,
"usage_type": "name"
},
... |
29626198089 | import pygame
import pathlib
import random
img_path = pathlib.Path(__file__).parent / 'img'
class Locators(object):
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
GREEN = (0, 255, 0)
RED = (255, 0, 0)
screen_width = 800
screen_height = 600
random_speed = [1, -1]
rect_width = 40
rect_height = 40
img_path = pathlib.Path(__file__).parent / 'source' / 'img'
music_path = pathlib.Path(__file__).parent / 'source' / 'music'
all_coordinates = []
all_objects = pygame.sprite.Group()
all_data_dict = {}
class Object(pygame.sprite.Sprite):
def __init__(self, img, id, type_obj):
""" Constructor, create the image of the block. """
super().__init__()
image = pygame.image.load(img)
self.id = id
self.type = type_obj
self.image = pygame.transform.scale(image, (40, 40))
self.rect = self.image.get_rect()
self.rect.x, self.rect.y = self.get_initial_points()
self.add_object_to_map(self.rect.x, self.rect.y)
self.speed_x = random.choice(Locators.random_speed)
self.speed_y = random.choice(Locators.random_speed)
def get_initial_points(self):
rect_x = 0
rect_y = 0
is_correct = False
while not is_correct:
rect_x = random.randint(20, Locators.screen_width - 20)
rect_y = random.randint(20, Locators.screen_height - 20)
is_correct = self.check_valid_point(rect_x, rect_y)
return rect_x, rect_y
def check_valid_point(self, x, y):
for i in range(y-20, y+20):
for j in range(x-20, x+20):
if Locators.all_coordinates[i][j] != 0:
return False
return True
def add_object_to_map(self, x, y):
for i in range(y-20, y+20):
for j in range(x-20, x+20):
Locators.all_coordinates[i][j] = self.id
def update(self):
print(f"{self.id} {self.type} {self.image}")
self.rect.x += self.speed_x
self.rect.y += self.speed_y
if self.rect.left < 0:
self.speed_x = abs(self.speed_x)
return
if self.rect.right > Locators.screen_width:
self.speed_x = abs(self.speed_x)*(-1)
return
if self.rect.top < 0:
self.speed_y = abs(self.speed_y)
return
if self.rect.bottom > Locators.screen_height:
self.speed_y = abs(self.speed_y)*(-1)
return
| aksaule-bagytzhanova/game | Objects.py | Objects.py | py | 2,501 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pathlib.Path",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "pygame.sprite.Group",
"line... |
72515571067 | from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.chains import RetrievalQAWithSourcesChain, RetrievalQA
from langchain import OpenAI
from langchain.vectorstores import Chroma
import gradio as gr
from langchain.chains.question_answering import load_qa_chain
qa_chain = load_qa_chain(OpenAI(temperature=0.7), chain_type="map_rerank")
import json
# create embedding from jsonloader
path = "./source/blog.json"
# load json and get lines and slug
pages = []
slugs = []
def load_json(path=path):
with open(path, "r") as f:
raw = json.load(f)
# get lines and slug
for i, j in enumerate(raw["pages"]):
j = json.loads(j)
# flat to lines
line = " ".join(j["lines"])
if line == "":
continue
pages.append(line)
s = {"source": j["slug"]}
slugs.append(s)
splitter = CharacterTextSplitter(chunk_size=500, chunk_overlap=0)
docs = splitter.create_documents(texts=pages, metadatas=slugs)
return docs
def split_documents(docs):
"""documents を tokenize する
Args:
docs (_type_): _description_
Returns:
_type_: _description_
"""
# split chauncs
splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
return splitter.split_documents(docs)
def get_embeddings_for_search(texts):
embeddings = OpenAIEmbeddings()
docsearch = Chroma.from_documents(texts, embeddings=embeddings,
#metadatas=[{"source": meta[i]} for i in range(len(meta))]
)
return docsearch
class Chain():
def __init__(self, doc):
self.doc = doc
self.chain = RetrievalQA(combine_documents_chain=qa_chain, retriever=self.doc.as_retriever(), return_source_documents=True)
def get(self, query):
#res = self.chain.run(question=query)
res = self.chain({"query": query})
sources = []
for r in res["source_documents"]:
sources.append(f'https://uni-3.app/{r.metadata["source"]}')
return res["result"], "\n".join(sources)
def main():
# get docsearch
docs = load_json()
# get embeddings
#print("docs", docs)
docsearch = get_embeddings_for_search(docs)
# init chain
c = Chain(docsearch)
# launch gradio app
with gr.Blocks() as demo:
# input query then get_query
input_query = gr.Textbox(label="Query")
# show query result
result = gr.Textbox(label="Result")
source = gr.Markdown(label="source")
b = gr.Button("run")
b.click(c.get, inputs=[input_query], outputs=[result, source])
demo.launch()
if __name__ == '__main__':
main()
| uni-3/gradio-apps | qa_retrieval/app.py | app.py | py | 2,753 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "langchain.chains.question_answering.load_qa_chain",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "langchain.OpenAI",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 22,
"usage_type": "call"
},
{
"api_... |
74977717307 | import csv
import os
from datetime import datetime
import logging
import re
from dipper.sources.PostgreSQLSource import PostgreSQLSource
from dipper.models.assoc.Association import Assoc
from dipper.models.assoc.G2PAssoc import G2PAssoc
from dipper.models.Genotype import Genotype
from dipper.models.Reference import Reference
from dipper.models.Model import Model
from dipper import config
from dipper.models.GenomicFeature import Feature, makeChromID
LOG = logging.getLogger(__name__)
class MGI(PostgreSQLSource):
"""
This is the
[Mouse Genome Informatics](http://www.informatics.jax.org/) resource,
from which we process genotype and phenotype data about laboratory mice.
Genotypes leverage the GENO genotype model.
Here, we connect to their public database, and download a subset of
tables/views to get specifically at the geno-pheno data,
then iterate over the tables. We end up effectively performing joins
when adding nodes to the graph.
In order to use this parser, you will need to have user/password connection
details in your conf.yaml file, like:
dbauth : {'mgi' : {'user' : '<username>', 'password' : '<password>'}}
You can request access by contacting mgi-help@jax.org
"""
# CONSIDER IF WE NEED:
# mgi_organism_acc_view:
# Consider using this for the taxon mapping instead of
# the hashmap encoded below
# mgi_reference_allele_view:
# Don't believe this view is used in either
# the genotype of phenotype view
# all_allele_cellline_view: When we want to start dealing with cell lines
# mgi_note_strain_view: prose descriptions of strains.
# prb_strain_summary_view:
# Don't believe this view is used in
# either the genotype of phenotype view
# prb_strain_marker_view:
# eventually i think we want this because
# it has other relevant markers that are affected
resources = {
'query_map': [
{
'query': '../../resources/sql/mgi/mgi_dbinfo.sql',
'outfile': 'mgi_dbinfo',
'Force': True
},
{
'query': '../../resources/sql/mgi/gxd_genotype_view.sql',
'outfile': 'gxd_genotype_view'
},
{
'query': '../../resources/sql/mgi/gxd_genotype_summary_view.sql',
'outfile': 'gxd_genotype_summary_view'
},
{
'query': '../../resources/sql/mgi/gxd_allelepair_view.sql',
'outfile': 'gxd_allelepair_view'
},
{
'query': '../../resources/sql/mgi/all_summary_view.sql',
'outfile': 'all_summary_view'
},
{
'query': '../../resources/sql/mgi/all_allele_view.sql',
'outfile': 'all_allele_view'
},
{
'query': '../../resources/sql/mgi/all_allele_mutation_view.sql',
'outfile': 'all_allele_mutation_view'
},
{
'query': '../../resources/sql/mgi/mrk_marker_view.sql',
'outfile': 'mrk_marker_view'
},
{
'query': '../../resources/sql/mgi/voc_annot_view.sql',
'outfile': 'voc_annot_view'
},
{
'query': '../../resources/sql/mgi/evidence.sql',
'outfile': 'evidence_view'
},
{
'query': '../../resources/sql/mgi/bib_acc_view.sql',
'outfile': 'bib_acc_view'
},
{
'query': '../../resources/sql/mgi/prb_strain_view.sql',
'outfile': 'prb_strain_view'
},
{
'query': '../../resources/sql/mgi/mrk_summary_view.sql',
'outfile': 'mrk_summary_view'
},
{
'query': '../../resources/sql/mgi/mrk_acc_view.sql',
'outfile': 'mrk_acc_view'
},
{
'query': '../../resources/sql/mgi/prb_strain_acc_view.sql',
'outfile': 'prb_strain_acc_view'
},
{
'query': '../../resources/sql/mgi/prb_strain_genotype_view.sql',
'outfile': 'prb_strain_genotype_view'
},
{
'query': '../../resources/sql/mgi/mgi_note_vocevidence_view.sql',
'outfile': 'mgi_note_vocevidence_view'
},
{
'query': '../../resources/sql/mgi/mgi_note_allele_view.sql',
'outfile': 'mgi_note_allele_view'
},
{
'query': '../../resources/sql/mgi/mrk_location_cache.sql',
'outfile': 'mrk_location_cache' # gene locations
}
],
'test_keys': '../../resources/mgi_test_keys.yaml'
}
# with an existing set of (fresh) files in the shell; we can get a head start with:
# for v in raw/mgi/*;do echo -e "\t\t'${v##*/}': \
# {\n\t\t\t'columns': [";head -1 $v|tr '\t' '\n'|sed "s/\(.*\)/\t\t\t\t'\1',/";done
tables = {
'all_allele_mutation_view': {
'columns': [
'_allele_key',
'mutation']},
'all_allele_view': {
'columns': [
'_allele_key',
'_marker_key',
'_strain_key',
'symbol',
'name',
'iswildtype']},
'all_summary_view': {
'columns': [
'_object_key',
'preferred',
'mgiid',
'description',
'short_description']},
'bib_acc_view': {
'columns': [
'accid',
'prefixpart',
'numericpart',
'_object_key',
'logicaldb',
'_logicaldb_key']},
'evidence_view': {
'columns': [
'_annotevidence_key',
'_annot_key',
'evidencecode',
'jnumid',
'term',
'value',
'annottype']},
'gxd_allelepair_view': {
'columns': [
'_allelepair_key',
'_genotype_key',
'_allele_key_1',
'_allele_key_2',
'allele1',
'allele2',
'allelestate']},
'gxd_genotype_summary_view': {
'columns': [
'_object_key',
'preferred',
'mgiid',
'subtype',
'short_description']},
'gxd_genotype_view': {
'columns': [
'_genotype_key',
'_strain_key',
'strain',
'mgiid']},
'mgi_note_allele_view': {
'columns': [
'_object_key',
'notetype',
'note',
'sequencenum']},
'mgi_note_vocevidence_view': {
'columns': [
'_object_key',
'note']},
'mgi_relationship_transgene_genes': {
'columns': [
'rel_key',
'object_1',
'allele_id',
'allele_label',
'category_key',
'category_name',
'property_key',
'property_name',
'property_value']},
'mrk_acc_view': {
'columns': [
'accid',
'prefixpart',
'_logicaldb_key',
'_object_key',
'preferred',
'_organism_key']},
'mrk_location_cache': {
'columns': [
'_marker_key',
'_organism_key',
'chromosome',
'startcoordinate',
'endcoordinate',
'strand',
'version']},
'mrk_marker_view': {
'columns': [
'_marker_key',
'_organism_key',
'_marker_status_key',
'symbol',
'name',
'latinname',
'markertype']},
'mrk_summary_view': {
'columns': [
'accid',
'_logicaldb_key',
'_object_key',
'preferred',
'mgiid',
'subtype',
'short_description']},
'prb_strain_acc_view': {
'columns': [
'accid',
'prefixpart',
'_logicaldb_key',
'_object_key',
'preferred']},
'prb_strain_genotype_view': {
'columns': [
'_strain_key',
'_genotype_key']},
'prb_strain_view': {
'columns': [
'_strain_key',
'strain',
'species']},
'voc_annot_view': {
'columns': [
'_annot_key',
'annottype',
'_object_key',
'_term_key',
'_qualifier_key',
'qualifier',
'term',
'accid']},
}
# For ambiguous/undefined taxa terms that will
# conflict with seq alt_type portion of local tt
unknown_taxa = [
'Not Applicable',
'Not Specified',
]
# for testing purposes, this is a list of internal db keys
# to match and select only portions of the source
def __init__(
self,
graph_type,
are_bnodes_skolemized,
data_release_version=None
):
super().__init__(
graph_type=graph_type,
are_bnodes_skolemized=are_bnodes_skolemized,
data_release_version=data_release_version,
name='mgi',
ingest_title='Mouse Genome Informatics',
ingest_url='http://www.informatics.jax.org/',
ingest_logo="source-mgi.png",
license_url=None,
data_rights='http://www.informatics.jax.org/mgihome/other/copyright.shtml',
file_handle=None)
# so that we don't have to deal with BNodes,
# we will create hash lookups
# for the internal identifiers the hash will hold
# the type-specific-object-keys to MGI public identifiers.
# then, subsequent views of the table will lookup the identifiers
# in the hash. this allows us to do the 'joining' on the fly
self.idhash = {
'allele': {}, 'marker': {}, 'publication': {}, 'strain': {},
'genotype': {}, 'annot': {}, 'notes': {}, 'seqalt': {}}
# to store if a marker is a class or indiv
self.markers = {
'classes': [], 'indiv': []}
# use this to store internally generated labels for various features
self.label_hash = {}
# use this to store the genotype strain ids
# for building genotype labels
self.geno_bkgd = {}
self.strain_to_genotype_map = {}
self.wildtype_alleles = set()
# also add the gene ids from the test_ids
# in order to capture transgenes of the test set
if 'gene' in self.all_test_ids:
self.test_ids = self.all_test_ids['gene']
else:
LOG.warning("not configured with gene test ids.")
self.test_ids = []
self.test_keys = self.open_and_parse_yaml(self.resources['test_keys'])
def fetch(self, is_dl_forced=False):
"""
For the MGI resource, we connect to the remote database,
and pull the tables into local files.
We'll check the local table versions against the remote version
:return:
"""
# check if config exists; if it doesn't, error out and let user know
if 'dbauth' not in config.get_config() and 'mgi' \
not in config.get_config()['dbauth']:
LOG.error("not configured with PG user/password.")
# create the connection details for MGI
cxn = config.get_config()['dbauth']['mgi']
pg_iri = ''.join((
'jdbc:postgresql://', cxn['host'], ':', str(cxn['port']), '/',
cxn['database']))
self.dataset.set_ingest_source(pg_iri)
self.dataset.set_ingest_source_file_version_retrieved_on(
pg_iri,
datetime.today().strftime('%Y-%m-%d'))
# process the tables
# self.fetch_from_pgdb(self.tables, cxn, 100) # for testing only
# self.fetch_from_pgdb(self.tables, cxn, None, is_dl_forced)
for query_map in self.resources['query_map']:
query_fh = open(os.path.join(
os.path.dirname(__file__), query_map['query']), 'r')
query = query_fh.read()
# force = False
# if 'Force' in query_map: # unused
# force = query_map['Force']
self.fetch_query_from_pgdb(
query_map['outfile'], query, None, cxn)
# always get this - it has the verion info
self.fetch_transgene_genes_from_db(cxn)
datestamp = ver = None
# get the resource version information from
# table mgi_dbinfo, already fetched above
outfile = '/'.join((self.rawdir, 'mgi_dbinfo'))
if os.path.exists(outfile):
with open(outfile, 'r') as reader:
reader.readline() # read the header row; skip
info = reader.readline()
cols = info.split('\t')
ver = cols[0] # col 0 is public_version
ver = ver.replace('MGI ', '') # MGI 5.20 --> 5.20
# MGI has a datestamp for the data within the database;
# use it instead of the download date
# datestamp in the table: 2014-12-23 00:14:20[.12345]
# modification date without micro seconds
dat = cols[1].strip().split('.')[0]
datestamp = datetime.strptime(
dat, "%Y-%m-%d %H:%M:%S").strftime("%Y-%m-%d")
self.dataset.set_ingest_source_file_version_num(pg_iri, ver)
self.dataset.set_ingest_source_file_version_date(pg_iri, datestamp)
def parse(self, limit=None):
"""
We process each of the postgres tables in turn.
The order of processing is important here, as we build
up a hashmap of internal vs external identifers
(unique keys by type to MGI id). These include allele, marker (gene),
publication, strain, genotype, annotation (association),
and descriptive notes.
:param limit: Only parse this many rows in each table
:return:
"""
if limit is not None:
LOG.info("Only parsing first %d rows of each file", limit)
LOG.info("Parsing files...")
if self.test_only:
self.test_mode = True
# the following will provide us the hash-lookups
# These must be processed in a specific order
self._process_prb_strain_acc_view(limit)
self._process_mrk_acc_view()
self._process_all_summary_view(limit)
self._process_bib_acc_view(limit)
self._process_gxd_genotype_summary_view(limit)
# The following will use the hash populated above
# to lookup the ids when filling in the graph
self._process_prb_strain_view(limit)
# self._process_prb_strain_genotype_view(limit)
self._process_gxd_genotype_view(limit)
self._process_mrk_marker_view(limit)
self._process_mrk_acc_view_for_equiv(limit)
self._process_mrk_summary_view(limit)
self._process_all_allele_view(limit)
self._process_all_allele_mutation_view(limit)
self._process_gxd_allele_pair_view(limit)
self._process_voc_annot_view(limit)
self._process_evidence_view(limit)
self._process_mgi_note_vocevidence_view(limit)
self._process_mrk_location_cache(limit)
self.process_mgi_relationship_transgene_genes(limit)
self.process_mgi_note_allele_view(limit)
LOG.info("Finished parsing.")
LOG.info("Loaded %d nodes", len(self.graph))
def fetch_transgene_genes_from_db(self, cxn):
"""
This is a custom query to fetch the non-mouse genes that
are part of transgene alleles.
:param cxn:
:return:
"""
query = '''
SELECT r._relationship_key as rel_key,
r._object_key_1 as object_1,
a.accid as allele_id,
alabel.label as allele_label,
rc._category_key as category_key,
rc.name as category_name,
t._term_key as property_key,
t.term as property_name,
rp.value as property_value
FROM mgi_relationship r
JOIN mgi_relationship_category rc ON r._category_key = rc._category_key
JOIN acc_accession a ON r._object_key_1 = a._object_key
AND rc._mgitype_key_1 = a._mgitype_key
AND a._logicaldb_key = 1
JOIN all_label alabel ON a._object_key = alabel._allele_key
AND alabel._label_status_key = 1
AND alabel.priority = 1
JOIN mgi_relationship_property rp ON r._relationship_key = rp._relationship_key
AND rp._propertyname_key = 12948292
JOIN voc_term t ON rp._propertyname_key = t._term_key
WHERE r._category_key = 1004
'''
self.fetch_query_from_pgdb(
'mgi_relationship_transgene_genes', query, None, cxn)
def _process_gxd_genotype_view(self, limit=None):
"""
This table indicates the relationship between a genotype
and it's background strain. It leverages the Genotype class methods
to do this.
Makes these triples:
<MGI:genotypeid> GENO:has_reference_part <MGI:strainid>
<MGI:strainid> a GENO:genomic_background
If the genotype id isn't in the hashmap, it adds it here
(but this shouldn't happen):
<MGI:genotypeid> a GENO:genotype
If the strain isn't in the hashmap, it also adds it here with a
monarchized identifier using the unique key of the strain,
formatted like: :_mgistrainkey12345
:param limit:
:return:
"""
src_key = 'gxd_genotype_view'
line_num = 0
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
geno = Genotype(graph)
model = Model(graph)
col = self.tables[src_key]['columns']
raw = '/'.join((self.rawdir, src_key))
LOG.info("getting genotypes and their backgrounds")
with open(raw, 'r') as reader:
row = reader.readline().rstrip("\n").split('\t')
if not self.check_fileheader(col, row, src_key):
pass
for line in reader:
line = line.rstrip("\n")
row = line.split('\t')
line_num += 1
genotype_key = row[col.index('_genotype_key')].strip()
strain_key = row[col.index('_strain_key')].strip()
strain = row[col.index('strain',)].strip()
mgiid = row[col.index('mgiid')].strip()
if self.test_mode is True:
if int(genotype_key) not in self.test_keys.get('genotype'):
continue
if self.idhash['genotype'].get(genotype_key) is None:
# just in case we haven't seen it before,
# catch and add the id mapping here
self.idhash['genotype'][genotype_key] = mgiid
geno.addGenotype(mgiid, None)
# the label is elsewhere...
# need to add the MGI label as a synonym
# if it's in the hash,
# assume that the individual was created elsewhere
strain_id = self.idhash['strain'].get(strain_key)
background_type = self.globaltt['genomic_background']
if strain_id is None or int(strain_key) < 0:
if strain_id is None:
# some of the strains don't have public identifiers!
# so we make one up, and add it to the hash
strain_id = self._make_internal_identifier('strain', strain_key)
self.idhash['strain'].update({strain_key: strain_id})
model.addComment(strain_id, "strain_key:" + strain_key)
elif int(strain_key) < 0:
# these are ones that are unidentified/unknown.
# so add instances of each.
strain_id = self._make_internal_identifier(
'strain', re.sub(r':', '', str(strain_id)))
strain_id += re.sub(r':', '', str(mgiid))
strain_id = re.sub(r'^_', '_:', strain_id)
strain_id = re.sub(r'::', ':', strain_id)
model.addDescription(
strain_id,
"This genomic background is unknown. " +
"This is a placeholder background for " +
mgiid + "."
)
background_type = self.globaltt[
'unspecified_genomic_background']
# add it back to the idhash
LOG.info(
"adding background as internal id: %s %s: %s",
strain_key, strain, strain_id)
geno.addGenomicBackgroundToGenotype(
strain_id, mgiid, background_type)
self.label_hash[strain_id] = strain
# add BG to a hash so we can build the genotype label later
self.geno_bkgd[mgiid] = strain_id
if not self.test_mode and limit is not None and line_num > limit:
break
def _process_gxd_genotype_summary_view(self, limit=None):
"""
Add the genotype internal id to mgiid mapping to the idhashmap.
Also, add them as individuals to the graph.
We re-format the label to put the background strain in brackets
after the gvc.
We must pass through the file once to get the ids and
aggregate the vslcs into a hashmap into the genotype
Triples created:
<genotype id> a GENO:intrinsic_genotype
<genotype id> rdfs:label "<gvc> [bkgd]"
:param limit:
:return:
"""
src_key = 'gxd_genotype_summary_view'
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
line_num = 0
geno_hash = {}
raw = '/'.join((self.rawdir, src_key))
LOG.info("building labels for genotypes")
col = self.tables[src_key]['columns']
with open(raw, 'r') as reader:
row = reader.readline().rstrip("\n").split('\t')
if not self.check_fileheader(col, row, src_key):
pass
for line in reader:
line = line.rstrip("\n")
line_num += 1
row = line.split('\t')
object_key = row[col.index('_object_key')].strip()
preferred = row[col.index('preferred')].strip()
mgiid = row[col.index('mgiid')].strip()
subtype = row[col.index('subtype')].strip()
short_description = row[col.index('short_description')].strip()
if self.test_mode is True:
if int(object_key) not in self.test_keys.get('genotype'):
continue
# add the internal genotype to mgi mapping
self.idhash['genotype'][object_key] = mgiid
if preferred == '1':
d = re.sub(r'\,', '/', short_description.strip())
if mgiid not in geno_hash:
geno_hash[mgiid] = {'vslcs': [d], 'subtype': subtype,
'key': object_key}
else:
vslcs = geno_hash[mgiid].get('vslcs')
vslcs.append(d)
else:
pass
# TODO what to do with != preferred
if not self.test_mode and limit is not None and line_num > limit:
break
# now, loop through the hash and add the genotypes as individuals
# we add the mgi genotype as a label
# (we generate our own label later and add as a synonym)
geno = Genotype(graph)
for gt in geno_hash:
genotype = geno_hash.get(gt)
gvc = sorted(genotype.get('vslcs'))
label = '; '.join(gvc) + ' [' + genotype.get('subtype') + ']'
model.addComment(
gt, self._make_internal_identifier(
'genotype', genotype.get('key')
)
)
geno.addGenotype(gt, label.strip())
def _process_all_summary_view(self, limit):
"""
Here, we get the allele definitions: id, label, description, type
We also add the id to this source's global idhash for lookup later
<alleleid> a OWL:NamedIndividual
rdfs:label "allele symbol"
dc:description "long allele name"
:param limit:
:return:
"""
src_key = 'all_summary_view'
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
line_num = 0
raw = '/'.join((self.rawdir, src_key))
col = self.tables[src_key]['columns']
col_len = len(col)
LOG.info(
"alleles with labels and descriptions from all_summary_view")
with open(raw, 'r') as reader:
row = reader.readline().rstrip("\n").split('\t')
if not self.check_fileheader(col, row, src_key):
pass
# head -1 workspace/build-mgi-ttl/dipper/raw/mgi/all_summary_view|\
# tr '\t' '\n' | grep -n . | \
# awk -F':' '{col=$1;$1="";print $0,",\t #" col}'
for line in reader:
line = line.rstrip("\n")
line_num += 1
row = line.split('\t')
# bail if the row is malformed
if col_len != len(row):
LOG.warning('Expected %i columns.', col_len)
LOG.warning('Received %i columns.', len(row))
LOG.warning(line.format())
continue
# no stray tab in the description column
object_key = row[col.index('_object_key')].strip()
preferred = row[col.index('preferred')].strip()
mgiid = row[col.index('mgiid')].strip()
description = row[col.index('description')].strip()
short_description = row[col.index('short_description')].strip()
# NOTE: May want to filter alleles based on the preferred field
# (preferred = 1) or will get duplicates
# (24288, to be exact...
# Reduced to 480 if filtered on preferred = 1)
if self.test_mode is True:
if int(object_key) not in self.test_keys.get('allele'):
continue
# we are setting the allele type to None,
# so that we can add the type later
# since we don't actually know
# if it's a reference or altered allele
# altype = None # temporary; we'll assign the type later
# set type to a parent term incase a more specific term is not found
altype = self.globaltt['allele']
# If we want to filter on preferred:
if preferred == '1':
# add the allele key to the hash for later lookup
self.idhash['allele'][object_key] = mgiid
# TODO consider not adding the individuals in this one
model.addIndividualToGraph(
mgiid, short_description.strip(), altype, description.strip()
)
self.label_hash[mgiid] = short_description.strip()
# TODO deal with non-preferreds, are these deprecated?
if not self.test_mode and limit is not None and line_num > limit:
break
def _process_all_allele_view(self, limit):
"""
Add the allele as a variant locus (or reference locus if wild-type).
If the marker is specified, we add the link to the marker.
We assume that the MGI ids are available in the idhash,
added in all_summary_view.
We add the sequence alteration as a BNode here, if there is a marker.
Otherwise, the allele itself is a sequence alteration.
Triples:
<MGI:allele_id> a GENO:variant_locus
OR GENO:reference_locus
OR GENO:sequence_alteration IF no marker_id specified.
[GENO:has_variant_part OR GENO:has_reference_part] <MGI:marker_id>
GENO:derived_from <MGI:strain_id>
GENO:has_variant_part <_seq_alt_id>
<_seq_alt_id> a GENO:sequence_alteration
derives_from <strain_id>
:param limit:
:return:
"""
src_key = 'all_allele_view'
# transmission_key -> inheritance? Need to locate related table.
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
geno = Genotype(graph)
line_num = 0
LOG.info(
"adding alleles, mapping to markers, extracting their "
"sequence alterations from all_allele_view")
raw = '/'.join((self.rawdir, src_key))
col = self.tables[src_key]['columns']
col_len = len(col)
with open(raw, 'r') as reader:
row = reader.readline().rstrip("\n").split('\t')
if not self.check_fileheader(col, row, src_key):
pass
for line in reader:
line = line.rstrip("\n")
line_num += 1
row = line.split('\t')
# bail if the row is malformed
if col_len != len(row):
LOG.warning('Expected %i columns.', col_len)
LOG.warning('Received %i columns.', len(row))
LOG.warning(line.format())
continue
allele_key = row[col.index('_allele_key')].strip()
marker_key = row[col.index('_marker_key')].strip()
strain_key = row[col.index('_strain_key')].strip()
symbol = row[col.index('symbol')].strip()
name = row[col.index('name')].strip()
iswildtype = row[col.index('iswildtype')].strip()
# TODO update processing to use this view better
# including jnums!
if self.test_mode is True and \
int(allele_key) not in self.test_keys.get('allele'):
continue
# so are allele_key ints or not? -TEC
allele_id = self.idhash['allele'].get(allele_key)
if allele_id is None:
LOG.error(
"what to do! can't find allele_id. skipping %s %s",
allele_key, symbol)
continue
marker_id = None
if marker_key is not None and marker_key != '':
# we make the assumption here that the markers
# have already been added to the table
marker_id = self.idhash['marker'].get(marker_key)
if marker_id is None:
LOG.error(
"what to do! can't find marker_id. skipping %s %s",
marker_key, symbol)
continue
iseqalt_id = self._make_internal_identifier('seqalt', allele_key)
# for non-wild type alleles:
if iswildtype == '0':
locus_type = self.globaltt['variant_locus']
locus_rel = self.globaltt['is_allele_of']
# for wild type alleles:
elif iswildtype == '1':
locus_type = self.globaltt['reference_locus']
locus_rel = self.globaltt['is_reference_allele_of']
# add the allele to the wildtype set for lookup later
self.wildtype_alleles.add(allele_id)
else:
locus_rel = None
locus_type = None
model.addIndividualToGraph(allele_id, symbol, locus_type)
model.makeLeader(allele_id)
self.label_hash[allele_id] = symbol
self.idhash['seqalt'][allele_key] = iseqalt_id
# HACK - if the label of the allele == marker,
# then make the thing a seq alt
allele_label = self.label_hash.get(allele_id)
marker_label = self.label_hash.get(marker_id)
if allele_label is not None and allele_label == marker_label:
# model.addSameIndividual(allele_id, marker_id)
# this causes disjoint category violations, see
# https://github.com/monarch-initiative/dipper/issues/519
self.idhash['seqalt'][allele_key] = allele_id
model.addComment(
allele_id,
self._make_internal_identifier('allele', allele_key)
)
if marker_id is not None:
# marker_id will be none if the allele
# is not linked to a marker
# (as in, it's not mapped to a locus)
geno.addAlleleOfGene(allele_id, marker_id, locus_rel)
# sequence alteration in strain
if iswildtype == '0':
sa_label = symbol
sa_id = iseqalt_id
if marker_key is not None \
and allele_label != marker_label and marker_key != '':
# sequence alteration has label reformatted(symbol)
if re.match(r".*<.*>.*", symbol):
sa_label = re.sub(r".*<", "<", symbol)
elif re.match(r"\+", symbol):
# TODO: Check to see if this is the proper handling
# as while symbol is just +,
# marker symbol has entries without any <+>.
sa_label = '<+>'
geno.addSequenceAlterationToVariantLocus(iseqalt_id,
allele_id)
else:
# make the sequence alteration == allele
sa_id = allele_id
# else this will end up adding the non-located transgenes
# as sequence alterations also removing the < and > from sa
sa_label = re.sub(r'[\<\>]', '', sa_label)
geno.addSequenceAlteration(sa_id, sa_label, None, name)
self.label_hash[sa_id] = sa_label
strain_id = self.idhash['strain'].get(strain_key)
# scrub out if the strain is "not specified"
if strain_id is not None and \
strain_id not in ['MGI:4867032', 'MGI:5649511']:
geno.addSequenceDerivesFrom(allele_id, strain_id)
if not self.test_mode and limit is not None and line_num > limit:
break
def _process_gxd_allele_pair_view(self, limit):
"""
This assumes that the genotype and alleles
have already been added to the id hashmap.
We use the Genotype methods to add all the parts we need.
Triples added:
<genotype_id> has_part <vslc>
<vslc> has_part <allele1>
<vslc> has_part <allele2>
<vslc> has_zygosity <zygosity>
:param limit:
:return:
"""
src_key = 'gxd_allelepair_view'
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
geno = Genotype(graph)
line_num = 0
raw = '/'.join((self.rawdir, src_key))
col = self.tables[src_key]['columns']
LOG.info("processing allele pairs (VSLCs) for genotypes")
geno_hash = {}
with open(raw, 'r') as reader:
row = reader.readline().rstrip("\n").split('\t')
if not self.check_fileheader(col, row, src_key):
pass
for line in reader:
line = line.rstrip("\n")
line_num += 1
row = line.split('\t')
allelepair_key = row[col.index('_allelepair_key')].strip()
genotype_key = row[col.index('_genotype_key')].strip()
allele_key_1 = row[col.index('_allele_key_1')].strip()
allele_key_2 = row[col.index('_allele_key_2')].strip()
allele1 = row[col.index('allele1')].strip()
allele2 = row[col.index('allele2')].strip()
allelestate = row[col.index('allelestate')].strip()
# NOTE: symbol = gene/marker,
# allele1 + allele2 = VSLC,
# allele1/allele2 = variant locus,
# allelestate = zygosity
# FIXME Need to handle alleles not in the *<*> format,
# incl gene traps, induced mut, & transgenics
if self.test_mode is True:
if int(genotype_key) not in self.test_keys.get('genotype'):
continue
genotype_id = self.idhash['genotype'].get(genotype_key)
if genotype_id not in geno_hash:
geno_hash[genotype_id] = set()
if genotype_id is None:
LOG.error(
"genotype_id not found for key %s; skipping", genotype_key)
continue
allele1_id = self.idhash['allele'].get(allele_key_1)
allele2_id = self.idhash['allele'].get(allele_key_2)
# Need to map the allelestate to a zygosity term
zygosity_id = self.resolve(allelestate.strip())
ivslc_id = self._make_internal_identifier('vslc', allelepair_key)
geno_hash[genotype_id].add(ivslc_id)
# TODO: VSLC label likely needs processing similar to
# the processing in the all_allele_view
# FIXME: handle null alleles
vslc_label = allele1 + '/'
if allele2_id is None:
if zygosity_id in [
self.globaltt['hemizygous insertion-linked'],
self.globaltt['hemizygous-x'],
self.globaltt['hemizygous-y'],
self.globaltt['hemizygous'],
]:
vslc_label += '0'
elif zygosity_id == self.globaltt['heterozygous']:
vslc_label += '+'
elif zygosity_id == self.globaltt['indeterminate']:
vslc_label += '?'
elif zygosity_id == self.globaltt['heteroplasmic']:
vslc_label += '?' # todo is there anything else to add here?
elif zygosity_id == self.globaltt['homoplasmic']:
vslc_label += '?' # todo is there anything else to add here?
elif zygosity_id == self.globaltt['homozygous']:
# we shouldn't get here, but for testing this is handy
vslc_label += allele1
else:
LOG.info(
"A different kind of zygosity found is: %s",
self.globaltcid[zygosity_id])
vslc_label += '?'
else:
vslc_label += allele2
model.addIndividualToGraph(
ivslc_id,
vslc_label,
self.globaltt['variant single locus complement']
)
self.label_hash[ivslc_id] = vslc_label
rel1 = rel2 = self.globaltt['has_variant_part']
if allele1_id in self.wildtype_alleles:
rel1 = self.globaltt['has_reference_part']
if allele2_id in self.wildtype_alleles:
rel2 = self.globaltt['has_reference_part']
geno.addPartsToVSLC(
ivslc_id, allele1_id, allele2_id, zygosity_id, rel1, rel2
)
# if genotype_id not in geno_hash:
# geno_hash[genotype_id] = [vslc_label]
# else:
# geno_hash[genotype_id] += [vslc_label]
if not self.test_mode and limit is not None and line_num > limit:
break
# build the gvc and the genotype label
for gt in geno_hash.keys():
if gt is None: # not sure why, but sometimes this is the case
continue
vslcs = sorted(list(geno_hash[gt]))
gvc_label = None
if len(vslcs) > 1:
gvc_id = re.sub(r'_', '', ('-'.join(vslcs)))
gvc_id = re.sub(r':', '', gvc_id)
gvc_id = self.make_id(gvc_id, '_')
vslc_labels = []
for v in vslcs:
vslc_labels.append(self.label_hash[v])
gvc_label = '; '.join(vslc_labels)
model.addIndividualToGraph(
gvc_id, gvc_label, self.globaltt['genomic_variation_complement'])
self.label_hash[gvc_id] = gvc_label
for v in vslcs:
geno.addParts(v, gvc_id, self.globaltt['has_variant_part'])
geno.addVSLCtoParent(v, gvc_id)
geno.addParts(gvc_id, gt, self.globaltt['has_variant_part'])
elif len(vslcs) == 1:
gvc_id = vslcs[0]
gvc_label = self.label_hash[gvc_id]
# type the VSLC as also a GVC
model.addIndividualToGraph(
gvc_id, gvc_label, self.globaltt['genomic_variation_complement']
)
geno.addVSLCtoParent(gvc_id, gt)
else:
LOG.info("No VSLCs for %s", gt)
# make the genotype label = gvc + background
bkgd_id = self.geno_bkgd.get(gt)
if bkgd_id is not None:
bkgd_label = self.label_hash.get(bkgd_id)
if bkgd_label is None:
bkgd_label = bkgd_id # just in case
else:
bkgd_label = 'unspecified background'
if gvc_label is not None:
genotype_label = gvc_label + ' [' + bkgd_label + ']'
else:
genotype_label = '[' + bkgd_label + ']'
self.label_hash[gt] = genotype_label
def _process_all_allele_mutation_view(self, limit):
"""
This fetches the mutation type for the alleles,
and maps them to the sequence alteration.
Note that we create a BNode for the sequence alteration because
it isn't publicly identified.
<sequence alteration id> a <SO:mutation_type>
:param limit:
:return:
"""
src_key = 'all_allele_mutation_view'
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
line_num = 0
raw = '/'.join((self.rawdir, src_key))
col = self.tables[src_key]['columns']
LOG.info("getting mutation types for sequence alterations")
with open(raw, 'r') as reader:
row = reader.readline().rstrip("\n").split('\t')
if not self.check_fileheader(col, row, src_key):
pass
for line in reader:
line = line.rstrip("\n")
line_num += 1
row = line.split('\t')
allele_key = row[col.index('_allele_key')].strip()
mutation = row[col.index('mutation')].strip()
iseqalt_id = self.idhash['seqalt'].get(allele_key)
if iseqalt_id is None:
continue
# nothing will ever connect w/these 350k bnode "individuals"
# iseqalt_id = self._make_internal_identifier('seqalt', allele_key)
if self.test_mode and int(allele_key) \
not in self.test_keys.get('allele'):
continue
# TODO we might need to map the seq alteration to the MGI id
# for unlocated things; need to use hashmap
# map the sequence_alteration_type
seq_alt_type_id = self.resolve(mutation, mandatory=False)
if seq_alt_type_id == mutation:
LOG.error("No mappjng found for seq alt '%s'", mutation)
LOG.info("Defaulting to 'sequence_alteration'")
seq_alt_type_id = self.globaltt['sequence_alteration']
# HACK - if the seq alteration is a transgene,
# then make sure it is a transgenic insertion
allele_id = self.idhash['allele'].get(allele_key)
if allele_id is not None:
allele_label = self.label_hash.get(allele_id)
if allele_label is not None and re.search(r'Tg\(', allele_label):
LOG.info(
"Found a transgenic insertion for %s", allele_label)
# transgenic_insertion, instead of plain old insertion
seq_alt_type_id = self.globaltt["transgenic_insertion"]
model.addIndividualToGraph(iseqalt_id, None, seq_alt_type_id)
if not self.test_mode and limit is not None and line_num > limit:
break
def _process_voc_annot_view(self, limit):
"""
This MGI table represents associations between things.
We add the internal annotation id to the idhashmap.
It is expected that the genotypes have already been added to the idhash
:param limit:
:return:
"""
# TODO also get Strain/Attributes (annottypekey = 1000)
# TODO what is Phenotype (Derived) vs
# non-derived? (annottypekey = 1015)
# TODO is evidence in this table? what is the evidence vocab key?
src_key = 'voc_annot_view'
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
line_num = 0
LOG.info("getting G2P associations")
raw = '/'.join((self.rawdir, src_key))
col = self.tables[src_key]['columns']
with open(raw, 'r') as reader:
row = reader.readline().rstrip('\n').split('\t')
if not self.check_fileheader(col, row, src_key):
pass
for line in reader:
row = line.rstrip('\n').split('\t')
annot_key = row[col.index('_annot_key')]
annot_type = row[col.index('annottype')]
object_key = row[col.index('_object_key')]
term_key = row[col.index('_term_key')]
qualifier_key = row[col.index('_qualifier_key')]
qualifier = row[col.index('qualifier')]
# term,
accid = row[col.index('accid')]
if self.test_mode is True:
if int(annot_key) not in self.test_keys.get('annot'):
continue
# qualifier of "norm" means the phenotype was measured but
# was normal, since we don't have negation or normal phenotypes
# modelled just yet, skip the row
if qualifier == 'norm':
continue
# iassoc_id = self._make_internal_identifier('annot', annot_key)
# assoc_id = self.make_id(iassoc_id)
assoc_id = None
# Mammalian Phenotype/Genotype are curated G2P assoc
if annot_type == 'Mammalian Phenotype/Genotype':
line_num += 1
# We expect the label for the phenotype
# to be taken care of elsewhere
model.addClassToGraph(accid, None)
genotype_id = self.idhash['genotype'].get(object_key)
if genotype_id is None:
LOG.error(
"can't find genotype id for %s", object_key)
else:
# add the association
assoc = G2PAssoc(graph, self.name, genotype_id, accid)
assoc.add_association_to_graph()
assoc_id = assoc.get_association_id()
# OMIM/Genotype are disease-models
elif annot_type == 'DO/Genotype':
# skip NOT annotations for now FIXME
if qualifier_key == '1614157':
continue
genotype_id = self.idhash['genotype'].get(object_key)
if genotype_id is None:
LOG.error("can't find genotype id for %s", object_key)
else:
# add the association
assoc = Assoc(graph, self.name)
# TODO PYLINT
# Redefinition of assoc type from
# dipper.models.assoc.G2PAssoc.G2PAssoc to
# dipper.models.assoc.Association.Assoc
assoc.set_subject(genotype_id)
assoc.set_object(accid)
assoc.set_relationship(self.globaltt['is model of'])
assoc.add_association_to_graph()
assoc_id = assoc.get_association_id()
elif annot_type == 'MCV/Marker':
# marker category == type
marker_id = self.idhash['marker'].get(object_key)
if str(term_key).strip() in self.localtt:
# check "Not Applicable": "reference_locus"
term_id = self.resolve(str(term_key).strip())
else:
term_id = None
logging.warning('No type mapping for: %s', term_key)
# note that the accid here is an internal mouse cv term,
# and we don't use it.
if term_id is not None and marker_id is not None:
# do something special for transgenics -
# make sure these are transgenic insertions
model.addType(marker_id, term_id)
elif annot_type == 'DO/Allele': # allele/Disease
allele_id = self.idhash['allele'].get(object_key)
if allele_id is None:
LOG.error("can't find genotype id for %s", object_key)
else:
# add the association
assoc = Assoc(graph, self.name)
assoc.set_subject(allele_id)
assoc.set_object(accid)
assoc.set_relationship(self.globaltt['is model of'])
assoc.add_association_to_graph()
assoc_id = assoc.get_association_id()
if assoc_id is not None:
# add the assoc to the hashmap (using the monarch id)
self.idhash['annot'][annot_key] = assoc_id
model.addComment(assoc_id, "annot_key:" + annot_key)
if not self.test_mode and limit is not None and line_num > limit:
break
def _process_evidence_view(self, limit):
"""
Here we fetch the evidence (code and publication) for the associations.
The evidence codes are mapped from the standard GO codes to ECO.
J numbers are added for publications.
We will only add the evidence if the annotation is in our idhash.
We also pull in evidence qualifiers, as of June 2018 they are
Data Interpretation Center (eg IMPC)
external ref (eg UniProtKB:Q9JHI2-3 for Proteoform/Marker assoc)
Phenotyping Center (eg WTSI)
Resource Name (eg MGP)
MP-Sex-Specificity (eg NA, M, F)
Triples:
<annot_id> dc:evidence <evidence_id>
<pub_id> a owl:NamedIndividual
<annot_id> dc:source <pub_id>
:param limit:
:return:
"""
src_key = 'evidence_view'
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
line_num = 0
LOG.info("getting evidence and pubs for annotations")
raw = '/'.join((self.rawdir, src_key))
col = self.tables[src_key]['columns']
with open(raw, 'r') as reader:
line = reader.readline()
line = line.rstrip("\n")
row = line.split('\t')
if not self.check_fileheader(col, row, src_key):
pass
for line in reader:
line = line.rstrip("\n")
row = line.split('\t')
line_num += 1
annot_evidence_key = row[col.index('_annotevidence_key')]
annot_key = row[col.index('_annot_key')]
evidence_code = row[col.index('evidencecode')]
jnumid = row[col.index('jnumid')]
qualifier = row[col.index('term')]
qualifier_value = row[col.index('value')]
# annotation_type = row[col.index('annottype')]
if self.test_mode and annot_key not in self.test_keys.get('annot'):
continue
# add the association id to map to the evidence key
# (to attach the right note to the right assn)
self.idhash['notes'][annot_evidence_key] = annot_key
assoc_id = self.idhash['annot'].get(annot_key)
if assoc_id is None:
# assume that we only want to add the evidence/source
# for annots that we have in our db
continue
evidence_id = self.resolve(evidence_code)
reference = Reference(graph, jnumid)
reference.addRefToGraph()
# add the ECO and citation information to the annot
model.addTriple(assoc_id, self.globaltt['has evidence'], evidence_id)
model.addTriple(assoc_id, self.globaltt['Source'], jnumid)
# For Mammalian Phenotype/Genotype annotation types
# MGI adds sex specificity qualifiers here
if qualifier == 'MP-Sex-Specificity' and qualifier_value in ('M', 'F'):
model._addSexSpecificity(assoc_id, self.resolve(qualifier_value))
if not self.test_mode and limit is not None and line_num > limit:
break
def _process_bib_acc_view(self, limit):
"""
This traverses the table twice:
once to look up the internal key to J number mapping
for the id hashmap then again to make the equivalences.
All internal keys have both a J and MGI identifier.
This will make equivalences between the different pub ids
Triples:
<pub_id> a owl:NamedIndividual
<other_pub_id> a owl:NamedIndividual
<pub_id> owl:sameAs <other_pub_id>
:param limit:
:return:
"""
src_key = 'bib_acc_view'
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
# firstpass, get the J number mapping, and add to the global hash
LOG.info('populating pub id hash')
raw = '/'.join((self.rawdir, src_key))
col = self.tables[src_key]['columns']
with open(raw, 'r', encoding="utf8") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
row = next(filereader)
if not self.check_fileheader(col, row, src_key):
pass
for row in filereader:
accid = row[col.index('accid')]
prefixpart = row[col.index('prefixpart')]
# 'numericpart'
object_key = int(row[col.index('_object_key')]) # likely unstable
# logicaldb = row[col.index('logicaldb')]
# logicaldb_key = row[col.index('_logicaldb_key')]
if self.test_mode and object_key not in self.test_keys.get('pub'):
continue
# we use the J number here because
# it is the externally-accessible identifier
if prefixpart != 'J:':
continue
self.idhash['publication'][object_key] = accid
reference = Reference(graph, accid)
reference.addRefToGraph()
if not self.test_mode and limit is not None and \
filereader.line_num > limit:
break
# 2nd pass, look up the MGI identifier in the hash
LOG.info("getting pub equivalent ids")
with open(raw, 'r', encoding="utf8") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
row = next(filereader) # header already checked
for row in filereader:
accid = row[col.index('accid')]
prefixpart = row[col.index('prefixpart')]
# 'numericpart'
object_key = int(row[col.index('_object_key')])
logicaldb = row[col.index('logicaldb')].strip()
logicaldb_key = row[col.index('_logicaldb_key')]
if self.test_mode is True:
if int(object_key) not in self.test_keys.get('pub'):
continue
jid = self.idhash['publication'].get(object_key)
pub_id = None
if logicaldb_key == '29': # pubmed
pub_id = 'PMID:' + accid
elif logicaldb_key == '1' and prefixpart[:4] == 'MGI:':
# don't get the J numbers,
# because we dont' need to make the equiv to itself.
pub_id = accid
elif logicaldb == 'Journal Link':
# some DOIs seem to have spaces
# FIXME MGI needs to FIX THESE UPSTREAM!!!!
# we'll scrub them here for the time being
accid = re.sub(r'\s+', '', accid)
# some DOIs have un-urlencoded brackets <>
accid = re.sub(r'<', '%3C', accid)
accid = re.sub(r'>', '%3E', accid)
pub_id = 'DOI:' + accid
elif logicaldb_key == '1' and re.match(r'J:', prefixpart):
# we can skip the J numbers
continue
if pub_id is not None:
# only add these to the graph if
# it's mapped to something we understand
reference = Reference(graph, pub_id)
# make the assumption that if it is a PMID, it is a journal
if re.match(r'PMID', pub_id):
reference.setType(self.globaltt['journal article'])
model.makeLeader(pub_id)
reference.addRefToGraph()
model.addSameIndividual(jid, pub_id)
else:
LOG.warning(
"Publication from (%s) not mapped for %s",
logicaldb, object_key)
if not self.test_mode and limit is not None and \
filereader.line_num > limit:
break
def _process_prb_strain_view(self, limit):
"""
Process a table to get strains (with internal ids), and their labels.
These strains are created as instances of the species that they are.
Triples:
<strain id> a GENO:intrinsic_genotype
rdfs:label "strain label"
RO:in_taxon <NCBI taxon id>
:param limit:
:return:
"""
src_key = 'prb_strain_view'
# Only 9 strain types if we want to map them
# recombinant congenci, inbred strain, NA,
# congenic, consomic, coisogenic,
# recombinant inbred, NS, conplastic
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
geno = Genotype(graph)
raw = '/'.join((self.rawdir, src_key))
col = self.tables[src_key]['columns']
LOG.info("getting strains and adding their taxa")
with open(raw, 'r', encoding="utf8") as csvfile:
reader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
row = next(reader)
if not self.check_fileheader(col, row, src_key):
pass
for row in reader:
strain_key = row[col.index('_strain_key')].strip()
strain = row[col.index('strain')].strip()
species = row[col.index('species')].strip()
if self.test_mode is True:
if int(strain_key) not in self.test_keys.get('strain'):
continue
strain_id = self.idhash['strain'].get(strain_key)
if strain_id is not None:
self.label_hash[strain_id] = strain
# add the species to the graph as a class
species = species.strip()
sp = self.resolve(species, False)
if sp == species:
LOG.error("No taxon mapping for " + species)
# they may tag a geo name on house mouse
if species[:17] == 'M. m. domesticus ':
LOG.warning("defaulting to Mus musculus")
sp = self.globaltt['Mus musculus']
else:
LOG.warning("defaulting to genus 'Mus'")
sp = self.globaltt['Mus']
elif species in MGI.unknown_taxa:
LOG.warning("defaulting to genus 'Mus'")
sp = self.globaltt['Mus']
model.addClassToGraph(sp, None)
geno.addTaxon(sp, strain_id)
model.addIndividualToGraph(strain_id, strain, sp)
if not self.test_mode and limit is not None and reader.line_num > limit:
break
def _process_mrk_marker_view(self, limit):
"""
This is the definition of markers
(as in genes, but other genomic loci types as well).
It looks up the identifiers in the hashmap
This includes their labels, specific class, and identifiers
TODO should we use the mrk_mouse_view instead?
Triples:
<marker_id> a owl:Class OR owl:NamedIndividual
GENO:marker_type
rdfs:label <symbol>
RO:in_taxon <NCBITaxon_id>
:param limit:
:return:
"""
src_key = 'mrk_marker_view'
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
geno = Genotype(graph)
line_num = 0
raw = '/'.join((self.rawdir, src_key))
col = self.tables[src_key]['columns']
LOG.info("getting markers and assigning types")
with open(raw, 'r') as reader:
row = reader.readline().rstrip("\n").split('\t')
if not self.check_fileheader(col, row, src_key):
pass
for line in reader:
line = line.rstrip("\n")
line_num += 1
(marker_key,
organism_key,
marker_status_key,
symbol,
name,
latin_name,
marker_type) = line.split('\t')
if self.test_mode is True:
if int(marker_key) not in self.test_keys.get('marker'):
continue
# use only non-withdrawn markers
if marker_status_key != '2':
marker_id = self.idhash['marker'].get(marker_key)
# only pull info for mouse genes for now
# other species should come from other dbs
if organism_key != '1':
continue
if marker_id is None:
LOG.error(
"can't find %s %s in the id hash", marker_key, symbol)
# check "Not Applicable" -> "reference_locus"
mapped_marker_type = self.resolve(marker_type.strip())
# if it's unlocated, or is not a gene,
# then don't add it as a class because
# it's not added as a gene.
# everything except for genes are modeled as individuals
if mapped_marker_type in [
self.globaltt['gene'],
self.globaltt['pseudogene']]:
model.addClassToGraph(
marker_id, symbol, mapped_marker_type, name
)
model.addSynonym(
marker_id, name, self.globaltt['has_exact_synonym']
)
self.markers['classes'].append(marker_id)
else:
model.addIndividualToGraph(
marker_id, symbol, mapped_marker_type, name
)
model.addSynonym(
marker_id, name, self.globaltt['has_exact_synonym']
)
self.markers['indiv'].append(marker_id)
self.label_hash[marker_id] = symbol
# add the taxon (default to Mus m.)
# latin_name is not always a proper binomial
if latin_name in MGI.unknown_taxa: # localtt conflict
latin_name = 'Mus'
taxon_id = self.resolve(
latin_name, default=self.globaltt['Mus musculus'])
geno.addTaxon(taxon_id, marker_id)
# make MGI the leader for mouse genes.
if taxon_id == self.globaltt['Mus musculus']:
model.makeLeader(marker_id)
if not self.test_mode and limit is not None \
and line_num > limit:
break
def _process_mrk_summary_view(self, limit):
"""
Here we pull the mgiid of the features, and make equivalent (or sameAs)
associations to referenced ids.
Only adding the ENSEMBL genes and NCBI gene ids.
Will wait on other ids later.
:param limit:
:return:
"""
src_key = 'mrk_summary_view'
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
LOG.info("getting markers and equivalent ids from mrk_summary_view")
line_num = 0
raw = '/'.join((self.rawdir, src_key))
col = self.tables[src_key]['columns']
with open(raw, 'r') as reader:
row = reader.readline().rstrip("\n").split('\t')
if not self.check_fileheader(col, row, src_key):
pass
for line in reader:
line = line.rstrip("\n")
line_num += 1
row = line.split('\t')
accid = row[col.index('accid')].strip()
logicaldb_key = row[col.index('_logicaldb_key')].strip()
object_key = row[col.index('_object_key')].strip()
preferred = row[col.index('preferred')].strip()
mgiid = row[col.index('mgiid')].strip()
subtype = row[col.index('subtype')].strip()
short_description = row[col.index('short_description')].strip()
if self.test_mode is True and \
int(object_key) not in self.test_keys.get('marker'):
continue
if preferred == '1':
if self.idhash['marker'].get(object_key) is None:
# can't find the marker in the hash; add it here:
self.idhash['marker'][object_key] = mgiid
LOG.error(
"this marker hasn't been seen before %s %s",
mgiid, short_description)
if accid == mgiid:
# don't need to make equivalences to itself
continue
mapped_id = None
if logicaldb_key == '60':
mapped_id = 'ENSEMBL:' + accid
elif logicaldb_key == '1':
# don't need to add the equivalence to itself.
continue
elif logicaldb_key == '55':
mapped_id = 'NCBIGene:' + accid
if mapped_id is not None:
if mgiid in self.markers['classes'] \
or subtype in ['Gene', 'Pseudogene']:
model.addClassToGraph(mapped_id, None)
model.addEquivalentClass(mgiid, mapped_id)
elif mgiid in self.markers['indiv']:
model.addIndividualToGraph(mapped_id, None)
model.addSameIndividual(mgiid, mapped_id)
# could parse the "subtype" string
# to get the kind of thing the marker is
if not self.test_mode and limit is not None and line_num > limit:
break
def _process_mrk_acc_view(self):
"""
Use this table to create the idmap between the internal marker id and
the public mgiid.
No triples are produced in this process
a second pass through the same file is made
:return:
"""
src_key = 'mrk_acc_view'
# make a pass through the table first,
# to create the mapping between the external and internal identifiers
line_num = 0
LOG.info("mapping markers to internal identifiers")
raw = '/'.join((self.rawdir, src_key))
col = self.tables[src_key]['columns']
with open(raw, 'r') as reader:
row = reader.readline().rstrip("\n").split('\t')
if not self.check_fileheader(col, row, src_key):
pass
for line in reader:
line = line.rstrip('\n')
line_num += 1
row = line.split('\t')
accid = row[col.index('accid')]
prefix_part = row[col.index('prefixpart')]
logicaldb_key = row[col.index('_logicaldb_key')]
object_key = row[col.index('_object_key')]
preferred = row[col.index('preferred')]
# = row[col.index('_organism_key')]
if self.test_mode is True:
if int(object_key) not in self.test_keys.get('marker'):
continue
# get the hashmap of the identifiers
if logicaldb_key == '1' and prefix_part == 'MGI:' and preferred == '1':
self.idhash['marker'][object_key] = accid
def _process_mrk_acc_view_for_equiv(self, limit):
"""
Add the equivalences, either sameAs or equivalentClass,
depending on the nature of the marker.
We only process the ENSEMBL genes and NCBI gene ids.
:param limit:
:return:
"""
src_key = 'mrk_acc_view'
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
# pass through the file again,
# and make the equivalence statements to a subset of the idspaces.
# TODO verify the difference between what the
# mrk_acc_view vs mrk_summary_view buys us here.
# if nothing, then we should remove one or the other.
LOG.info("mapping marker equivalent identifiers in mrk_acc_view")
line_num = 0
col = self.tables[src_key]['columns']
with open('/'.join((self.rawdir, src_key)), 'r') as reader:
row = reader.readline().rstrip("\n").split('\t')
if not self.check_fileheader(col, row, src_key):
pass
for line in reader:
line = line.rstrip("\n")
line_num += 1
row = line.split('\t')
accid = row[col.index('accid')]
prefix_part = row[col.index('prefixpart')]
logicaldb_key = row[col.index('_logicaldb_key')]
object_key = row[col.index('_object_key')]
preferred = row[col.index('preferred')]
organism_key = row[col.index('_organism_key')]
if self.test_mode is True:
if int(object_key) not in self.test_keys.get('marker'):
continue
# right now not caring about other organisms
if organism_key != 1:
continue
mgiid = self.idhash['marker'].get(object_key)
if mgiid is None:
# presumably we've already added the relevant MGI ids,
# so skip those that we can't find
LOG.debug("can't find mgiid for %s", object_key)
continue
marker_id = None
if preferred == '1': # TODO what does it mean if it's 0?
if logicaldb_key == '55': # entrez/ncbi
marker_id = 'NCBIGene:' + accid
elif logicaldb_key == '1' and prefix_part != 'MGI:':
marker_id = accid
elif logicaldb_key == '60':
marker_id = 'ENSEMBL:' + accid
# TODO get non-preferred ids==deprecated?
if marker_id is not None:
if mgiid in self.markers['classes']:
model.addClassToGraph(marker_id, None)
model.addEquivalentClass(mgiid, marker_id)
elif mgiid in self.markers['indiv']:
model.addIndividualToGraph(marker_id, None)
model.addSameIndividual(mgiid, marker_id)
else:
LOG.error("mgiid not in class or indiv hash %s", mgiid)
if not self.test_mode and limit is not None and line_num > limit:
break
def _process_prb_strain_acc_view(self, limit):
"""
Use this table to create the idmap between
the internal marker id and the public mgiid.
Also, add the equivalence statements between strains for MGI and JAX
Triples:
<strain_id> a GENO:intrinsic genotype
<other_strain_id> a GENO:intrinsic_genotype
<strain_id> owl:sameAs <other_strain_id>
:param limit:
:return:
"""
src_key = 'prb_strain_acc_view'
# make a pass through the table first,
# to create the mapping between the external and internal identifiers
line_num = 0
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
LOG.info("mapping strains to internal identifiers")
raw = '/'.join((self.rawdir, src_key))
col = self.tables[src_key]['columns']
tax_id = self.globaltt["Mus musculus"]
with open(raw, 'r') as reader:
row = reader.readline().rstrip("\n").split('\t')
if not self.check_fileheader(col, row, src_key):
pass
for line in reader:
line = line.rstrip("\n")
line_num += 1
row = line.split('\t')
accid = row[col.index('accid')].strip()
prefixpart = row[col.index('prefixpart')].strip()
logicaldb_key = row[col.index('_logicaldb_key')].strip()
object_key = row[col.index('_object_key')].strip()
preferred = row[col.index('preferred')].strip()
# scrub out the backticks from accids
# TODO notify the source upstream
accid = re.sub(r'`', '', accid)
if self.test_mode is True:
if int(object_key) not in self.test_keys.get('strain'):
continue
# get the hashmap of the identifiers
if logicaldb_key == '1' and prefixpart == 'MGI:' and preferred == '1':
self.idhash['strain'][object_key] = accid
model.addIndividualToGraph(
accid, self.globaltt['intrinsic genotype'], tax_id)
# The following are the stock centers for the strains
# (asterisk indicates complete)
# *1 MGI Mouse Genome Informatics
# *22 JAX Registry (null)
# *37 EMMA European Mutant Mouse Archive
# *38 MMRRC Mutant Mouse Regional Resource Center
# 39 Harwell Mammalian Genome Unit Stock List
# *40 ORNL Oak Ridge National Lab mutant resource
# *54 NCIMR NCI Mouse Repository
# *56 NMICE Neuromice.org, a consortium of three NIH-sponsored
# mutagenesis projects designed to search for
# neurological mutations
# 57 CARD Center for Animal Resources and Development @ Kumamoto U
# *70 RIKEN BRC RIKEN BioResource Center
# *71 CMMR Canadian Mouse Mutant Resource
# 84 JPGA The Center for New Mouse Models of
# Heart, Lung, BLood and Sleep Disorders,
# JAX-PGA at The Jackson Laboratory
# *87 MUGEN Network of Excellence in Integrated Functional Genomics
# in Mutant Mouse Models as Tools to Investigate the
# Complexity of Human Immunological Disease
# *90 APB Australian Phenomics Bank
# ? 91 EMS Elizabeth M. Simpson
# ? 93 NIG National Institute of Genetics,
# Mammalian Genetics Laboratory, Japan
# 94 TAC Taconic
# 154 OBS Oriental BioService , Inc.
# 161 RMRC-NLAC National Applied Research Laboratories,Taiwan, R.O.C.
# pass through the file again,
# and make the equivalence statements to a subset of the idspaces
LOG.info("mapping strain equivalent identifiers")
line_num = 0
with open(raw, 'r') as reader:
reader.readline() # read the header row; skip
for line in reader:
line = line.rstrip("\n")
line_num += 1
row = line.split('\t')
accid = row[col.index('accid')].strip()
prefixpart = row[col.index('prefixpart')].strip()
logicaldb_key = row[col.index('_logicaldb_key')].strip()
object_key = row[col.index('_object_key')].strip()
preferred = row[col.index('preferred')].strip()
# scrub out the backticks from accids
# TODO notify the source upstream
accid = re.sub(r'`', '', accid)
if self.test_mode is True:
if int(object_key) not in self.test_keys.get('strain'):
continue
mgiid = self.idhash['strain'].get(object_key)
if mgiid is None:
# presumably we've already added the relevant MGI ids,
# so skip those that we can't find
# LOG.info("can't find mgiid for %s",object_key)
continue
strain_id = None
deprecated = False
comment = None
if preferred == '1': # what does it mean if it's 0?
if logicaldb_key == '22': # JAX
# scrub out the backticks from accids
# TODO notify the source upstream
accid = re.sub(r'`', '', accid).strip()
strain_id = 'JAX:' + accid
elif logicaldb_key == '38': # MMRRC
strain_id = accid
if not re.match(r'MMRRC:', strain_id):
strain_id = 'MMRRC:' + strain_id
elif logicaldb_key == '37': # EMMA
# replace EM: prefix with EMMA:, or for accid's
# with bare digits (e.g. 06335) prepend 'EMMA:'
strain_id = re.sub(r'^(EM:)*', 'EMMA:', accid)
elif logicaldb_key == '90': # APB
strain_id = 'APB:' + accid # Check
elif logicaldb_key == '40': # ORNL
# ORNL is not in existence any more.
# these are deprecated, and we will prefix with JAX
strain_id = 'JAX:' + accid
comment = "Originally from ORNL."
deprecated = True
# add these as synonyms of the MGI mouse
model.addSynonym(mgiid, accid)
elif logicaldb_key == '54': # NCIMR
strain_id = 'NCIMR:' + accid
# CMMR not great - doesn't resolve well
# elif logicaldb_key == '71':
# strain_id = 'CMMR:'+accid
elif logicaldb_key == '56': # neuromice
# neuromice.org doesn't exist any more.
# but all these are actually MGI ids
strain_id = accid
elif logicaldb_key == '70': # RIKEN
# like
# http://www2.brc.riken.jp/lab/animal/detail.php?brc_no=RBRC00160
strain_id = 'RBRC:RBRC' + accid
elif logicaldb_key == '87':
strain_id = 'MUGEN:' + accid
# I can't figure out how to get to some of the strains
# TODO get non-preferred ids==deprecated?
# TODO make these strains, rather than instance of taxon?
if strain_id is not None:
model.addIndividualToGraph(strain_id, None, tax_id)
if deprecated:
model.addDeprecatedIndividual(strain_id, [mgiid])
model.addSynonym(mgiid, accid)
else:
model.addSameIndividual(mgiid, strain_id)
if re.match(r'MMRRC', strain_id):
model.makeLeader(strain_id)
if comment is not None:
model.addComment(strain_id, comment)
if not self.test_mode and limit is not None and line_num > limit:
break
def _process_mgi_note_vocevidence_view(self, limit):
"""
Here we fetch the free text descriptions of the phenotype associations.
Triples:
<annot_id> dc:description "description text"
:param limit:
:return:
"""
src_key = 'mgi_note_vocevidence_view'
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
LOG.info("getting free text descriptions for annotations")
raw = '/'.join((self.rawdir, src_key))
col = self.tables[src_key]['columns']
with open(raw, 'r', encoding="utf8") as csvfile:
reader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
row = next(reader)
if not self.check_fileheader(col, row, src_key):
pass
for row in reader:
object_key = row[col.index('_object_key')].strip()
note = row[col.index('note')].strip()
if self.test_mode is True:
if int(object_key) not in self.test_keys.get('notes'):
continue
# object_key == evidence._annotevidence_key
annotkey = self.idhash['notes'].get(object_key)
annot_id = self.idhash['annot'].get(annotkey)
# only add the description for the annotations
# we have captured through processing
if annot_id is not None:
model.addDescription(annot_id, note.strip())
if not self.test_mode and limit is not None and reader.line_num > limit:
break
def _process_mrk_location_cache(self, limit):
src_key = 'mrk_location_cache'
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
LOG.info("getting marker locations")
raw = '/'.join((self.rawdir, src_key))
geno = Genotype(graph)
col = self.tables[src_key]['columns']
with open(raw, 'r', encoding="utf8") as csvfile:
reader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
row = next(reader)
if not self.check_fileheader(col, row, src_key):
pass
for row in reader:
marker_key = row[col.index('_marker_key')].strip()
organism_key = row[col.index('_organism_key')].strip()
chromosome = row[col.index('chromosome')].strip()
startcoordinate = row[col.index('startcoordinate')].strip()
endcoordinate = row[col.index('endcoordinate')].strip()
strand = row[col.index('strand')].strip()
version = row[col.index('version')].strip()
# only get the location information for mouse
if str(organism_key) != '1' or str(chromosome) == 'UN':
continue
if self.test_mode is True:
if int(marker_key) not in self.test_keys.get('marker'):
continue
# make the chromosome, and the build-instance
chrom_id = makeChromID(chromosome, 'NCBITaxon:10090', 'CHR')
if version is not None and version != '' and version != '(null)':
# switch on maptype or mapkey
assembly = version
build_id = 'NCBIGenome:' + assembly
geno.addChromosomeInstance(
chromosome, build_id, assembly, chrom_id)
chrom_id = makeChromID(chromosome, build_id, 'MONARCH')
if marker_key in self.idhash['marker']:
gene_id = self.idhash['marker'][marker_key]
feature = Feature(graph, gene_id, None, None)
if strand == '(null)' or strand == '':
strand = None
if startcoordinate == '(null)' or startcoordinate == '':
startcoordinate = None
if endcoordinate == '(null)' or endcoordinate == '':
endcoordinate = None
if startcoordinate is not None:
feature.addFeatureStartLocation(
int(float(startcoordinate)), chrom_id, strand)
else:
feature.addFeatureStartLocation(
startcoordinate, chrom_id, strand,
[self.globaltt['FuzzyPosition']])
if endcoordinate is not None:
feature.addFeatureEndLocation(
int(float(endcoordinate)), chrom_id, strand)
# note we don't add the uncertain end coordinate,
# because we don't know what it is.
add_as_class = False
if gene_id in self.markers['classes']:
add_as_class = True
feature.addFeatureToGraph(True, None, add_as_class)
else:
LOG.warning('marker key %s not in idhash', str(marker_key))
if not self.test_mode and limit is not None and reader.line_num > limit:
break
def process_mgi_relationship_transgene_genes(self, limit=None):
"""
Here, we have the relationship between MGI transgene alleles,
and the non-mouse gene ids that are part of them.
We augment the allele with the transgene parts.
:param limit:
:return:
"""
src_key = 'mgi_relationship_transgene_genes'
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
LOG.info("getting transgene genes")
raw = '/'.join((self.rawdir, src_key))
geno = Genotype(graph)
col = self.tables[src_key]['columns']
with open(raw, 'r', encoding="utf8") as csvfile:
reader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
row = next(reader)
if not self.check_fileheader(col, row, src_key):
pass
for row in reader:
# rel_key = row[col.index('rel_key')].strip()
allele_key = int(row[col.index('object_1')])
allele_id = row[col.index('allele_id')]
# allele_label = row[col.index('allele_label')].strip()
# category_key = row[col.index('category_key')].strip()
# category_name = row[col.index('category_name')].strip()
# property_key = row[col.index('property_key')].strip()
# property_name = row[col.index('property_name')].strip()
gene_num = int(row[col.index('property_value')])
if self.test_mode and allele_key not in self.test_keys.get('allele') \
and gene_num not in self.test_ids:
continue
gene_id = 'NCBIGene:' + str(gene_num)
# geno.addParts(gene_id, allele_id, self.globaltt['has_variant_part'])
seqalt_id = self.idhash['seqalt'].get(allele_key)
if seqalt_id is None:
seqalt_id = allele_id
geno.addSequenceDerivesFrom(seqalt_id, gene_id)
if not self.test_mode and limit is not None and \
reader.line_num > limit:
break
def process_mgi_note_allele_view(self, limit=None):
"""
These are the descriptive notes about the alleles.
Note that these notes have embedded HTML -
should we do anything about that?
:param limit:
:return:
"""
src_key = 'mgi_note_allele_view'
line_num = 0
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
LOG.info("Assembling notes on alleles")
raw = '/'.join((self.rawdir, src_key))
col = self.tables[src_key]['columns']
notehash = {}
with open(raw, 'r', encoding="utf8") as csvfile:
reader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
row = next(reader)
if not self.check_fileheader(col, row, src_key):
pass
for row in reader:
object_key = row[col.index('_object_key')].strip()
notetype = row[col.index('notetype')].strip()
note = row[col.index('note')].strip()
sequencenum = row[col.index('sequencenum')].strip()
# read all the notes into a hash to concatenate
if object_key not in notehash:
notehash[object_key] = {}
if notetype not in notehash[object_key]:
notehash[object_key][notetype] = []
if len(notehash[object_key][notetype]) < int(sequencenum):
for i in range(
len(notehash[object_key][notetype]),
int(sequencenum)
):
notehash[object_key][notetype].append('')
notehash[object_key][notetype][int(sequencenum) - 1] = note.strip()
# finish iteration over notes
line_num = 0
for allele_key in notehash:
line_num += 1
if self.test_mode is True:
if int(allele_key) not in self.test_keys.get('allele'):
continue
allele_id = self.idhash['allele'].get(allele_key)
if allele_id is None:
continue
for n in notehash[allele_key]:
# pretty chatty for expected behavior
# LOG.info(
# "found %d %s notes for %s",
# len(notehash[allele_key]), n, allele_id)
notes = ''.join(notehash[allele_key][n])
notes += ' [' + n + ']'
model.addDescription(allele_id, notes)
if not self.test_mode and limit is not None and line_num > limit:
break
def _process_prb_strain_genotype_view(self, limit=None):
"""
Here we fetch the free text descriptions of the phenotype associations.
Triples:
<annot_id> dc:description "description text"
:param limit:
:return:
"""
src_key = 'prb_strain_genotype_view'
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
LOG.info("Getting genotypes for strains")
raw = '/'.join((self.rawdir, src_key))
col = self.tables[src_key]['columns']
with open(raw, 'r', encoding="utf8") as csvfile:
reader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
row = next(reader)
if not self.check_fileheader(col, row, src_key):
pass
for row in reader:
strain_key = row[col.index('_strain_key')].strip()
genotype_key = row[col.index('_genotype_key')].strip()
if self.test_mode is True and \
int(genotype_key) not in self.test_keys.get('genotype') \
and int(strain_key) not in self.test_keys.get('strain'):
continue
strain_id = self.idhash['strain'].get(strain_key)
if strain_id is None:
strain_id = self._make_internal_identifier(
'strain', strain_key)
genotype_id = self.idhash['genotype'].get(genotype_key)
if genotype_id is None:
genotype_id = self._make_internal_identifier(
'genotype', genotype_key)
if strain_id is not None and genotype_id is not None:
self.strain_to_genotype_map[strain_id] = genotype_id
graph.addTriple(strain_id, self.globaltt['has_genotype'], genotype_id)
# TODO
# verify if this should be contingent on the exactness or not
# if qualifier == 'Exact':
# gu.addTriple(
# graph, strain_id,
# self.globaltt['has_genotype'],
# genotype_id)
# else:
# gu.addXref(graph, strain_id, genotype_id)
if not self.test_mode and limit is not None and reader.line_num > limit:
break
def _make_internal_identifier(self, prefix, key):
"""
This is a special MGI-to-MONARCH-ism.
MGI tables have unique keys that we use here, but don't want to
necessarily re-distribute those internal identifiers.
Therefore, we make them into keys in a consistent way here.
:param prefix: the object type to prefix the key with,
since the numbers themselves are not unique across tables
:param key: the number
:return:
"""
# these are just more blank node identifiers
iid = self.make_id('mgi' + prefix + 'key' + key, '_')
return iid
# def _querysparql(self):
#
# #load the graph
# vg = Graph()
# vg.parse(self.outfile, format="turtle")
#
# qres = g.query(
# """SELECT DISTINCT ?aname ?bname
# WHERE {
# ?a foaf:knows ?b .
# ?a foaf:name ?aname .
# ?b foaf:name ?bname .
# }""")
#
# for row in qres:
# print("%s knows %s" % row)
| monarch-initiative/dipper | dipper/sources/MGI.py | MGI.py | py | 99,120 | python | en | code | 53 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "dipper.sources.PostgreSQLSource.PostgreSQLSource",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "dipper.config.get_config",
"line_number": 348,
"usage_type": "call"
... |
20516636897 | from AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTShadowClient
import sys
import time
import json
import getopt
# Import SPI library (for hardware SPI) and MCP3008 library.
import Adafruit_GPIO.SPI as SPI
import Adafruit_MCP3008
# Hardware SPI configuration:
SPI_PORT = 0
SPI_DEVICE = 0
mcp = Adafruit_MCP3008.MCP3008(spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE))
# Configure AWS IOT
thing_name = "medidor"
topic_consumo = "medidor/consumo"
topic_sms = "medidor/sms"
# Configure parameters
already_notificated = False
delay_s = 1
#Function measure current
def calculaConsumo():
sensor = 0
Consumo = 0
sensibilidade = 0.185
for num in range(0,1000):
sensor = mcp.read_adc(0) * (5 / 1023.0)
Consumo = Consumo + (sensor - 5) / sensibilidade
time.sleep(0.001)
Consumo = Consumo / 1000
return Consumo
# Custom Shadow callback
def customShadowCallback_Update(payload, responseStatus, token):
# payload is a JSON string ready to be parsed using json.loads(...)
# in both Py2.x and Py3.x
if responseStatus == "timeout":
print("Update request " + token + " time out!")
if responseStatus == "accepted":
payloadDict = json.loads(payload)
print("~~~~~~~~~~~~~~~~~~~~~~~")
print("Update request with token: " + token + " accepted!")
try:
print("property: " + str(payloadDict["state"]["reported"]))
except (Exception) as e:
print("property: " + str(payloadDict["state"]["desired"]))
print("~~~~~~~~~~~~~~~~~~~~~~~\n\n")
if responseStatus == "rejected":
print("Update request " + token + " rejected!")
# Usage
usageInfo = """Usage:
Use certificate based mutual authentication:
python medidor-iot-aws.py -e <endpoint> -r <rootCAFilePath> -c <certFilePath> -k <privateKeyFilePath>
Type "python medidor-iot-aws.py -h" for available options.
"""
# Help info
helpInfo = """-e, --endpoint
Your AWS IoT custom endpoint
-r, --rootCA
Root CA file path
-c, --cert
Certificate file path
-k, --key
Private key file path
-h, --help
Help information
"""
# Read in command-line parameters
host = ""
rootCAPath = ""
certificatePath = ""
privateKeyPath = ""
try:
opts, args = getopt.getopt(sys.argv[1:], "hwe:k:c:r:", ["help", "endpoint=", "key=","cert=","rootCA="])
if len(opts) == 0:
raise getopt.GetoptError("No input parameters!")
for opt, arg in opts:
if opt in ("-h", "--help"):
print(helpInfo)
exit(0)
if opt in ("-e", "--endpoint"):
host = arg
if opt in ("-r", "--rootCA"):
rootCAPath = arg
if opt in ("-c", "--cert"):
certificatePath = arg
if opt in ("-k", "--key"):
privateKeyPath = arg
except getopt.GetoptError:
print(usageInfo)
exit(1)
# Missing configuration notification
missingConfiguration = False
if not host:
print("Missing '-e' or '--endpoint'")
missingConfiguration = True
if not rootCAPath:
print("Missing '-r' or '--rootCA'")
missingConfiguration = True
if not certificatePath:
print("Missing '-c' or '--cert'")
missingConfiguration = True
if not privateKeyPath:
print("Missing '-k' or '--key'")
missingConfiguration = True
if missingConfiguration:
exit(2)
# Configure logging
"""
logger = logging.getLogger("AWSIoTPythonSDK.core")
logger.setLevel(logging.DEBUG)
streamHandler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
streamHandler.setFormatter(formatter)
logger.addHandler(streamHandler)
"""
# Custom MQTT message callback
def callbackLed(client, userdata, message):
global led_status
global already_notificated
result = json.loads(message.payload)
if(result["Consumo"] <= 0.1):
#print "Led on"
led_status = updateLed(True)
already_notificated = True
else:
#print "Led off"
led_status = updateLed(False)
already_notificated = False
# Init AWSIoTMQTTShadowClient
myShadowClient = None
myShadowClient = AWSIoTMQTTShadowClient("medidor")
myShadowClient.configureEndpoint(host, 8883)
myShadowClient.configureCredentials(rootCAPath, privateKeyPath, certificatePath)
# Init AWSIoTMQTTClient
myAWSIoTMQTTClient = None
myAWSIoTMQTTClient = myShadowClient.getMQTTConnection()
# AWSIoTMQTTShadowClient connection configuration
myShadowClient.configureAutoReconnectBackoffTime(1, 32, 20)
myShadowClient.configureConnectDisconnectTimeout(10) # 10 sec
myShadowClient.configureMQTTOperationTimeout(5) # 5 sec
# AWSIoTMQTTClient connection configuration
myAWSIoTMQTTClient.configureAutoReconnectBackoffTime(1, 32, 20)
myAWSIoTMQTTClient.configureOfflinePublishQueueing(-1) # Infinite offline Publish queueing
myAWSIoTMQTTClient.configureDrainingFrequency(2) # Draining: 2 Hz
myAWSIoTMQTTClient.configureConnectDisconnectTimeout(10) # 10 sec
myAWSIoTMQTTClient.configureMQTTOperationTimeout(5) # 5 sec
# Connect and subscribe to AWS IoT
# Connect to AWS IoT
myShadowClient.connect()
# Create a deviceShadow with persistent subscription
myDeviceShadow = myShadowClient.createShadowHandlerWithName(thing_name, True)
myAWSIoTMQTTClient.subscribe(topic_led, 1, callbackLed)
# Start Shadow
ConsumoShadow = 0
payloadShadow = ('"Consumo": "{0:0.1f}", "Notificated": "{1}"'.format(ConsumoShadow, already_notificated))
payloadShadow = '{"state":{ "desired": {'+payloadShadow+'}}}'
myDeviceShadow.shadowUpdate(payloadShadow, customShadowCallback_Update, 5)
# Publish to a topic in a loop forever
try:
while True:
# Read Current
ConsumoDC = calculaConsumo()
# Publish temperature
msg = ('"Consumo DC": "{0:0.01f}", "Notificated": "{1}"'.format(ConsumoDC, already_notificated))
msg = '{'+msg+'}'
myAWSIoTMQTTClient.publish(topic_consumo, msg, 1)
# Update Shadow
payloadShadow = ('"Consumo": "{0:0.1f}", "Notificated": "{1}"'.format(ConsumoDC, already_notificated))
payloadShadow = '{"state":{ "reported": {'+payloadShadow+'}}}'
myDeviceShadow.shadowUpdate(payloadShadow, customShadowCallback_Update, 5)
time.sleep(delay_s)
except KeyboardInterrupt:
pass
| chls84/TCC | Código Python/medidor-aws-iot.py | medidor-aws-iot.py | py | 6,262 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "Adafruit_MCP3008.MCP3008",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "Adafruit_GPIO.SPI.SpiDev",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "Adafruit_GPIO.SPI",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "t... |
19933423422 | import requests, zipfile, os, json, sqlite3
def get_manifest():
manifest_url = 'http://www.bungie.net/Platform/Destiny2/Manifest/'
print("Downloading Manifest from http://www.bungie.net/Platform/Destiny2/Manifest/...")
r = requests.get(manifest_url)
manifest = r.json()
mani_url = 'http://www.bungie.net'+manifest['Response']['mobileWorldContentPaths']['en']
#Download the file, write it to 'MANZIP'
r = requests.get(mani_url)
with open("MANZIP", "wb") as zip:
zip.write(r.content)
print("Download Complete!")
print("Unzipping contents...")
#Extract the file contents, rename the extracted file to 'Manifest.content'
with zipfile.ZipFile('MANZIP') as zip:
name = zip.namelist()
zip.extractall()
os.rename(name[0], 'Manifest.content')
os.remove('MANZIP')
print('Unzipped!')
def build_dict():
con = sqlite3.connect('manifest.content')
print('Connected to sqlite db')
cur = con.cursor()
cur.execute('SELECT json from DestinyInventoryItemDefinition')
print('Generating DestinyInventoryItemDefinition dictionary....')
items = cur.fetchall()
item_jsons = [json.loads(item[0]) for item in items]
item_dict = {}
for item in item_jsons:
if ( item['displayProperties']['name'] != "Classified" ):
if ( item['itemTypeDisplayName'] != None):
if ( item['itemTypeAndTierDisplayName'] ):
if ( "Exotic" in item['itemTypeAndTierDisplayName'] and "Intrinsic" not in item['itemTypeAndTierDisplayName']):
if ( item['displayProperties']['name'] ):
item_dict[item['displayProperties']['name']] = item['displayProperties']
item_dict[item['displayProperties']['name']]['type'] = item['itemTypeDisplayName']
item_dict[item['displayProperties']['name']]['tier'] = item['itemTypeAndTierDisplayName']
item_dict[item['displayProperties']['name']]['image'] = "https://www.bungie.net" + item['displayProperties']['icon']
item_dict[item['displayProperties']['name']]['active'] = "false"
try:
#item_dict[item['displayProperties']['name']]['class'] = item['quality']['infusionCategoryName']
item_dict[item['displayProperties']['name']]['class'] = item['classType']
except:
item_dict[item['displayProperties']['name']]['class'] = "null"
print('Dictionary Generated!')
return item_dict
def saveToJs(data):
weapons = []
armor = []
warlock = []
hunter = []
titan = []
weapon_ornaments = []
ships = []
emotes = []
vehicles = []
ghosts = []
ornaments = []
for item in list(data):
del(data[item]['icon'])
del(data[item]['hasIcon'])
if (data[item]['type'] == "Weapon Ornament"):
weapon_ornaments.append(data[item])
elif (data[item]['type'] == "Ship"):
ships.append(data[item])
elif (data[item]['type'] == "Emote"):
emotes.append(data[item])
elif (data[item]['type'] == "Vehicle"):
vehicles.append(data[item])
elif (data[item]['type'] == "Trait"):
del(data[item])
elif (data[item]['type'] == "Helmet"):
armor.append(data[item])
elif (data[item]['type'] == "Chest Armor"):
armor.append(data[item])
elif (data[item]['type'] == "Leg Armor"):
armor.append(data[item])
elif (data[item]['type'] == "Gauntlets"):
armor.append(data[item])
elif (data[item]['type'] == "Ghost Shell"):
ghosts.append(data[item])
elif ("Ornament" in data[item]['type']):
ornaments.append(data[item])
elif (data[item]['type'] == "Engram"):
del(data[item])
else:
weapons.append(data[item])
for piece in armor:
if (piece['class'] == 2):
warlock.append(piece)
elif (piece['class'] == 0):
titan.append(piece)
elif (piece['class'] == 1):
hunter.append(piece)
else:
print("This armor piece has an issue", end="")
print()
print(piece)
with open("ExampleData.js", "w") as text_file:
print("\nvar exoticHunterArmorList = ", file=text_file, end="")
print(hunter, file=text_file)
print("\nvar exoticTitanArmorList = ", file=text_file, end="")
print(titan, file=text_file)
print("\nvar exoticWarlockArmorList = ", file=text_file, end="")
print(warlock, file=text_file)
print("\nvar exoticWeaponList = ", file=text_file, end="")
print(weapons, file=text_file)
print("\nvar exoticVehicleList = ", file=text_file, end="")
print(vehicles, file=text_file)
print("\nvar exoticShipList = ", file=text_file, end="")
print(ships, file=text_file)
print("\nvar exoticEmoteList = ", file=text_file, end="")
print(emotes, file=text_file)
print("\nvar exoticGhostList = ", file=text_file, end="")
print(ghosts, file=text_file)
print("\nvar exoticOrnamentList = ", file=text_file, end="")
print(ornaments, file=text_file)
#print(len(data))
#print(data["Rat King"])
#from collections import Counter
print("Starting...")
if (os.path.isfile('ExampleData.js')):
print("Found existing data, overwriting...")
os.remove('ExampleData.js')
get_manifest()
all_data = build_dict()
os.remove('Manifest.content')
print("Formatting and saving to JavaScript file...")
saveToJs(all_data)
print("Done")
#print(len(all_data))
#print(all_data['Coldheart']['tier'])
#print("Exotic" in all_data['Coldheart']['tier'])
#print(not all_data['Raven Shard']['tier'])
| RyanGrant/RyanGrant.github.io | Python/Manifest.py | Manifest.py | py | 5,969 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "requests.get",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "zipfile.ZipFile",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.rename",
"line_number... |
24556414335 | import json
from flask import Flask, jsonify, request
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import func
from flask_bcrypt import Bcrypt
from flask_redis import FlaskRedis
app = Flask(__name__)
app.config['SECRET_KEY'] = 'ghjrhhrohirorthrtohi'
app.config['SQLALCHEMY_DATABASE_URI'] = "mysql://root:root@localhost/quizapp"
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
redis_cli = FlaskRedis(app)
bcrypt = Bcrypt(app)
@app.route('/', methods=['GET', 'POST'])
def home():
if request.method == 'POST':
return {"status": "active", "message": "You are now in home page"}
@app.route('/register', methods=['POST'])
def register():
response = {"status": "False", "message": "Error occurred"}
try:
from models import User
data = request.get_json()
print(data)
hashed_password = bcrypt.generate_password_hash(data.get("password")).decode('utf-8')
register_data = User(
username=data.get('username'),
email=data.get('email'),
password=hashed_password,
contact_no=data.get('contact_no')
)
print(register_data)
db.session.add(register_data)
db.session.commit()
response = {"status": "True", "message": "data stored successfully"}
return response
except Exception as e1:
response["message"] = "Exception occurred", str(e1)
return response
@app.route('/login', methods=['POST'])
def login():
from models import User
return_response = {"status": False, "message": "Error occurred"}
try:
if request.method == "POST":
data = request.get_json()
print(data)
email = data.get('email')
user = User.query.filter_by(email=email).first()
if user and bcrypt.check_password_hash(user.password, data.get('password')):
import codecs
redis_cli.set('id', user.id)
redis_cli.set('username', user.username)
redis_un = codecs.decode(redis_cli.get('username'), 'UTF-8')
return_response = {"status": "True", "message": "Logged in successfully", "flag": "1",
"username": redis_un}
return return_response
else:
return_response = {"status": "False", "message": "Please enter valid input"}
return return_response
except Exception as e1:
return_response["message"] = "Exception occurred", str(e1)
return return_response
@app.route('/quiz', methods=['GET', 'POST'])
def quiz():
from models import QA
try:
data = request.get_json()
print(data)
if data.get("data") is not None:
import random
qes = db.session.query(QA).filter(QA.sub_name.in_(eval(data.get('data')))).limit(5).all()
qa_list = []
for qa in qes:
qa_data = {'id': qa.id, 'question': qa.question,
'options': json.loads(qa.options)}
qa_list.append(qa_data)
response = {"status": "True", "message": "data stored successfully"}
return jsonify({'response': response, "data": qa_list})
else:
import random
questions = QA.query.order_by(func.random()).limit(5).all()
qa_list = []
for qa in questions:
qa_data = {'id': qa.id, 'sub_name': qa.sub_name, 'question': qa.question,
'options': json.loads(qa.options)}
qa_list.append(qa_data)
response = {"status": "True", "message": "data stored successfully"}
return {'response': response, 'data': qa_list}
except:
return "{'error':'invalid data'}"
@app.route('/view_que', methods=['GET', 'POST'])
def view_que():
from models import QA
try:
questions = QA.query.all()
correct_options = json.dumps([x.correct_opt for x in questions])
print("correct_option", correct_options)
redis_cli.set('correct_opt', correct_options)
print("redis get", redis_cli.get('correct_opt'))
qa_list = []
for qa in questions:
qa_data = {'id': qa.id, 'sub_name': qa.sub_name, 'question': qa.question, 'options': json.loads(qa.options),
'correct_opt': qa.correct_opt}
qa_list.append(qa_data)
return jsonify({'status': True, 'data': qa_list})
except Exception as e:
return {"error": e}
@app.route("/delete", methods=['POST'])
def delete():
try:
from models import QA
data = request.get_json()
qa = QA.query.filter_by(id=data.get('id')).first()
local_object = db.session.merge(qa)
db.session.delete(local_object)
db.session.commit()
return jsonify({"Status": True, "data": "Data deleted successfully "})
except Exception as e:
return {"error": e}
@app.route("/edit", methods=['GET', 'POST'])
def edit():
try:
from models import QA
data = request.get_json()
qa = QA.query.filter_by(id=data.get('id')).first()
qa_list = []
qa_data = {'id': qa.id, 'sub_name': qa.sub_name, 'question': qa.question, 'options': json.loads(qa.options),
'correct_opt': qa.correct_opt}
qa_list.append(qa_data)
return jsonify({'status': True, 'data': qa_list})
except Exception as e:
return {"error": e}
@app.route("/add", methods=['POST'])
def add():
try:
response = {"status": "True", "message": "data added successfully"}
if request.method == 'POST':
from models import QA
data = request.get_json()
ques = data.get('question')
sub = data.get('sub_name')
data1 = {
"option1": data.get('option1'),
"option2": data.get('option2'),
"option3": data.get('option3'),
"option4": data.get('option4')
}
options = data1
correct_opt = data.get('correct_option')
qa = QA(question=ques, options=json.dumps(options), sub_name=sub, correct_opt=correct_opt)
db.session.add(qa)
db.session.commit()
return response
except Exception as e:
return {"error": e}
@app.route("/update", methods=['GET', 'POST'])
def update():
try:
from models import QA
data = request.get_json()
response = {"status": "True", "message": "data updated successfully"}
if request.method == 'POST':
ques = data.get('question')
sub = data.get('subject')
data1 = {
"option1": data.get('option1'),
"option2": data.get('option2'),
"option3": data.get('option3'),
"option4": data.get('option4')
}
options = data1
correct_opt = data.get('correct_opt')
qa = QA(question=ques, options=json.dumps(options), id=data.get('id'), correct_opt=correct_opt,
sub_name=sub)
local_object = db.session.merge(qa)
db.session.add(local_object)
db.session.commit()
return response
except Exception as e:
return {"error": e}
@app.route('/taken_quiz', methods=['POST'])
def taken_quiz():
try:
if request.method == "POST":
from models import User, QA
redis_id = int(redis_cli.get('id'))
print("redis stored id", redis_id)
import codecs
redis_corr = eval(codecs.decode(redis_cli.get('correct_opt'), 'UTF-8'))
data = request.get_json()
question1 = db.session.query(QA).filter(QA.id.in_(data.get('questions'))).all()
main_dict = {x.id: {'question': x.question, 'correct_opt': x.correct_opt} for x in question1}
user_result = {
"question": data.get('questions'),
"select_option": data.get('selected_option')
}
count = 0
for i in redis_corr:
if i in user_result["select_option"]:
count = count + 1
questions = data['questions']
sel_opt = data['selected_option']
for q in questions:
main_dict[int(q)].update({
'selected_option': sel_opt[questions.index(q)]
})
main_dict["score"] = count
user1 = User.query.filter_by(id=redis_id).first()
if user1.user_result in [None, ""]:
user1.user_result = json.dumps([main_dict])
user1.score = count
local_object = db.session.merge(user1)
db.session.add(local_object)
db.session.commit()
return {"status": True, "data": json.dumps(main_dict)}
else:
old_data = json.loads(user1.user_result)
old_data.append(main_dict)
user1.user_result = json.dumps(old_data)
local_object = db.session.merge(user1)
db.session.add(local_object)
db.session.commit()
return {"data": json.dumps(main_dict), "score": count}
except Exception as e:
return {"error": e}
@app.route('/result', methods=['POST', 'GET'])
def result():
try:
from models import User
redis_id = int(redis_cli.get('id'))
user1 = User.query.filter_by(id=redis_id).first()
if user1.user_result in [None, ""]:
return jsonify({"response": "No Quiz Taken Yet"})
user_result = json.loads(user1.user_result)
return {"response": user_result}
except Exception as e:
return {"error": e}
| Ankita2802/Quiz_backend | routes.py | routes.py | py | 9,890 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask.Flask",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "flask_sqlalchemy.SQLAlchemy",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "flask_redis.FlaskRedis",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "flask_b... |
4520925854 | from django.urls import path
from rest_framework_simplejwt.views import (
TokenObtainPairView,
TokenRefreshView, TokenVerifyView,
TokenObtainSlidingView, TokenRefreshSlidingView
)
from .views import SingUpView, BlacklistRefreshView
urlpatterns = [
path('signup/', SingUpView.as_view(), name='sign_up'),
# path('api/logout/', BlacklistRefreshView.as_view(), name="logout"),
path('api/token/', TokenObtainPairView.as_view(), name='token_obtain_pair'),
path('api/token/refresh/', TokenRefreshView.as_view(), name='token_refresh'),
path('api/token/verify/', TokenVerifyView.as_view(), name='token_verify'),
]
| rustamovjavohir/EcommerceSHOP | auth_user/urls.py | urls.py | py | 638 | python | en | code | 8 | github-code | 6 | [
{
"api_name": "django.urls.path",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "views.SingUpView.as_view",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "views.SingUpView",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "django.url... |
10167784496 | # (c) Nelen & Schuurmans. MIT licensed, see LICENSE.rst.
from __future__ import unicode_literals
from django.http.multipartparser import parse_header
from rest_framework.renderers import BaseRenderer
COLNAME_FORMAT = '%Y-%m-%d %H:%M:%S.%f'
class CSVRenderer(BaseRenderer):
"""
Renderer which serializes to csv.
"""
media_type = 'text/csv'
format = 'csv'
def render(self, data, accepted_media_type=None, renderer_context=None):
"""
Render `obj` into csv.
"""
if data is None:
return ''
response = '"datetime (utc)";' + \
';'.join(['"%s"' % column for column in data.columns]) + '\n' + \
''.join(['%s\n' % row for row in \
['"%s";' % timestamp.strftime(COLNAME_FORMAT) + \
';'.join(['"%s"' % row[i] for i, _ in enumerate(data.columns)])
for timestamp, row in data.iterrows()]])
return response
| ddsc/dikedata-api | dikedata_api/renderers.py | renderers.py | py | 957 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "rest_framework.renderers.BaseRenderer",
"line_number": 10,
"usage_type": "name"
}
] |
6846678287 | from tkinter import Frame, Label, Menu, ttk
from tkinter.messagebox import showinfo
import tkinter as tk
import requests
class AttackInfo(Frame):
def __init__(self, master=None):
super(AttackInfo, self).__init__(master)
master = master
self.type_label = Label(self, text=" ", font=('Helvetica', 18, "bold"), pady=10)
self.type_label.grid(row=0, column=2)
self.info_label = Label(self, text=" ", font=('Helvetica', 8), wraplength=600)
self.info_label.grid(row=1, column=2, sticky='w')
reply = requests.get('http://127.0.0.1:23432/attack/')
attacks = reply.json()
print(attacks)
attack_titles = [attack['type'] for attack in attacks]
attack_info = [attack['info'] for attack in attacks]
var = tk.Variable(value=attack_titles)
listbox = tk.Listbox(
self,
listvariable=var,
height=len(attack_titles))
listbox.grid(row=0, column=0, rowspan=2, sticky="ns")
self.rowconfigure(0, weight=1)
self.rowconfigure(1, weight=1)
scrollbar = ttk.Scrollbar(
self,
orient=tk.VERTICAL,
command=listbox.yview
)
listbox['yscrollcommand'] = scrollbar.set
scrollbar.grid(row=0, rowspan=2, column=1, sticky="ns")
def items_selected(event):
# get selected indices
self.info_label.destroy()
self.type_label.destroy()
selected_indices = listbox.curselection()[0]
print(selected_indices)
self.type_label = Label(self, text=attack_titles[selected_indices], font=('Helvetica', 18, "bold"), pady=10)
self.type_label.grid(row=0, column=2, sticky="n")
self.info_label = Label(self, text=attack_info[selected_indices], font=('Helvetica', 8), wraplength=600)
self.info_label.grid(row=1, column=2, sticky='n')
listbox.bind('<<ListboxSelect>>', items_selected)
| iamcrysun/eqw | desktop/views/attackinfo.py | attackinfo.py | py | 2,001 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "tkinter.Frame",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "tkinter.Label",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "tkinter.Label",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_num... |
20701388833 | import sqlite3
samira = sqlite3.connect('shallownowschool.db')
cursor = samira.cursor()
cursor.execute("""
INSERT INTO td_estudante(nome, endereco, nascimento, matricula)
VALUES ('Maria da Conceição', 'Rua da Paz', '1902-12-12', 20161382596);
""")
samira.commit()
print("Inserido com sucesso.")
samira.close()
| kemelynfigueiredo/TopicosEspeciais | MeuPrimeiroSQLite/temp.py | temp.py | py | 324 | python | pt | code | 0 | github-code | 6 | [
{
"api_name": "sqlite3.connect",
"line_number": 3,
"usage_type": "call"
}
] |
6105704383 | import requests
import sys
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
proxies = {'http': 'http://127.0.0.1:8080', 'https': 'http://127.0.0.1:8080'}
def promote_to_admin(s, url):
# login as the wiener user
login_url = url + "/login"
data_login = {"username": "wiener", "password": "peter"}
r = s.post(login_url, data=data_login, verify=False, proxies=proxies)
res = r.text
if "Log out" in res:
print("(+) Successfully logged in as the wiener user.")
# Exploit access control vulnerability to promote the user to admin
admin_roles_url = url + "/admin-roles?username=wiener&action=upgrade"
r = s.get(admin_roles_url, verify=False, proxies=proxies)
res = r.text
if "Admin panel" in res:
print("(+) Successfully promoted the user to administrator.")
else:
print("(-) Could not promote the user to administrator.")
sys.exit(-1)
else:
print("(-) Could not login as the wiener user.")
sys.exit(-1)
def main():
if len(sys.argv) != 2:
print("(+) Usage: %s <url>" % sys.argv[0])
print("(+) Example: %s www.example.com" % sys.argv[0])
sys.exit(-1)
s = requests.Session()
url = sys.argv[1]
promote_to_admin(s, url)
if __name__ == "__main__":
main() | rkhal101/Web-Security-Academy-Series | broken-access-control/lab-06/access-control-lab-06.py | access-control-lab-06.py | py | 1,364 | python | en | code | 396 | github-code | 6 | [
{
"api_name": "urllib3.disable_warnings",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "urllib3.exceptions",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "sys.exit",
... |
4083668934 | from urllib.parse import urlencode
import httpx
from common.kan_params import crd_params
from common.voe_ipc import KanAgent
#host = 'localhost'
#host = 'kan-agent'
#port = '8088'
class KanAgentClient:
#def __init__(self, host=host, port=port, scope='default', version='v1', ref='v1alpha2.ReferenceK8sCRD'):
def __init__(self, scope='default', version='v1', ref='v1alpha2.ReferenceK8sCRD'):
#self.url = f'http://{host}:{port}/v1alpha2/agent/references'
self.url = f'{KanAgent.Url}/v1alpha2/agent/references'
self.scope = scope
self.version = version
self.ref = ref
def _gen_params(self, crd_name):
return {
'scope': self.scope,
'version': self.version,
'ref': self.ref
}
def _get(self, crd_name, object_name):
params = self._gen_params(crd_name)
params['kind'] = crd_params[crd_name]['plural']
params['group'] = crd_params[crd_name]['group']
params['id'] = object_name
#FIXME error handling
#print(params)
try:
r = httpx.get(self.url, params=params)
except:
raise Exception('Cannot access Kan Agent')
return r.json()
def get_target(self, name):
return self._get('Target', name)
def get_device(self, name):
return self._get('Device', name)
def get_solution(self, name):
return self._get('Solution', name)
def get_instance(self, name):
return self._get('Instance', name)
def get_skill(self, name):
return self._get('Skill', name)
def get_model(self, name):
return self._get('Model', name)
def get_skill_with_instance_name_and_alias(self, name, instance_name, alias):
params = self._gen_params('Skill')
params['kind'] = crd_params['Skill']['plural']
params['group'] = crd_params['Skill']['group']
params['id'] = name
params['instance'] = instance_name
params['alias'] = alias
try:
r = httpx.get(self.url, params=params)
except:
raise Exception('Cannot access Kan Agent')
return r.json()
def export_model(self, name):
params = self._gen_params('Model')
params['kind'] = crd_params['Model']['plural']
params['group'] = crd_params['Model']['group']
params['id'] = name
params['lookup'] = 'download'
params['iteration'] = 'latest'
params['platform'] = 'ONNX'
try:
r = httpx.get(self.url, params=params)
except:
raise Exception('Cannot access Kan Agent')
return r.json()
def _post(self, crd_name, object_name, data):
params = self._gen_params(crd_name)
params['kind'] = crd_params[crd_name]['plural']
params['group'] = crd_params[crd_name]['group']
params['id'] = object_name
try:
r = httpx.post(self.url, params=params, json=data)
except:
raise Exception('Cannot access Kan Agent')
def post_instance_status(self, name, status_code, status_description):
self._post('Instance', name, data={"status_code": status_code, "status_description": status_description})
def post_instance_fps(self, name, skill_name, fps):
self._post('Instance', name, data={f'fps_{skill_name}': str(fps)})
if __name__ == '__main__':
sac = KanAgentClient()
print(sac.get_target('sdsdsd'))
| Azure/KAN | src/edge/EdgeSolution/modules/common/common/kan_agent_client.py | kan_agent_client.py | py | 3,527 | python | en | code | 61 | github-code | 6 | [
{
"api_name": "common.voe_ipc.KanAgent.Url",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "common.voe_ipc.KanAgent",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "common.kan_params.crd_params",
"line_number": 35,
"usage_type": "name"
},
{
... |
17625798812 | import sys
import os
import pandas as pd
from util import load_column_transformers, preprocess_data
from alphagan_class import AlphaGAN
from keras.losses import MeanAbsoluteError
from bigan import BIGAN
import keras.backend as K
import tensorflow as tf
import numpy as np
if __name__ == '__main__':
session = K.get_session()
init = tf.global_variables_initializer()
session.run(init)
ag = AlphaGAN()
ag.load_pretrained_models('./snapshots/3900_')
test_normal_df = pd.read_csv('./data/test_set_normal.csv')
preprocess_data(test_normal_df, './data/ranges.csv')
test_abnomal_df = pd.read_csv('./data/test_set_abnomal.csv')
preprocess_data(test_abnomal_df, './data/ranges.csv')
X_1 = test_normal_df.to_numpy()
X_2 = test_abnomal_df.to_numpy()
Z_hat_1 = ag.encoder.predict(X_1)
X_hat_1 = ag.generator.predict(Z_hat_1)
Z_hat_2 = ag.encoder.predict(X_2)
X_hat_2 = ag.generator.predict(Z_hat_2)
rec_losses_normal = np.linalg.norm(np.subtract(X_1, X_hat_1), axis=1)
rec_losses_fraud = np.linalg.norm(np.subtract(X_2, X_hat_2), axis=1)
num = len(rec_losses_normal) + len(rec_losses_fraud)
print('Number of test samples: %d' % num)
THRESH = 9.25
rec_losses_normal_correct = [loss for loss in rec_losses_normal if loss < THRESH]
print('Precision of normal transactions: %1.2f%%(%d/%d)' % (len(rec_losses_normal_correct) * 100 / len(rec_losses_normal),
len(rec_losses_normal_correct), len(rec_losses_normal)))
rec_losses_fraud_correct = [loss for loss in rec_losses_fraud if loss > THRESH]
print('Precision of fraud transactions: %1.2f%%(%d/%d)' % \
(len(rec_losses_fraud_correct) * 100 / len(rec_losses_fraud), len(rec_losses_fraud_correct), len(rec_losses_fraud)))
| royalsalute/fraud-creditcard-detection | eval.py | eval.py | py | 1,784 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "keras.backend.get_session",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "keras.backend",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "tensorflow.global_variables_initializer",
"line_number": 16,
"usage_type": "call"
},
{
"a... |
26774631311 | import pygame
from sys import exit
import numpy as np
import copy as cp
import math
import random as rd
import time
class GameState:
def __init__(self, board):
self.board = board
self.end=-1
self.children = []
self.parent = None
self.parentPlay = None # (play, movtype)
self.parentCell = None
self.numsimulation=0
self.mctsv=0
self.mctss=0
self.uct=0
def createChildren(self, player_id):
differentPlayBoards = []
for i in range(len(self.board)):
for j in range(len(self.board)):
if self.board[j][i] == player_id:
moves = get_moves(self, (i,j))
for mov in moves:
if moves[mov][0]:
newboard = cp.deepcopy(self.board)
play = (i+mov[0], j+mov[1])
if moves[mov][1] == 1: #movtype
newboard[play[1]][play[0]] = player_id
elif moves[mov][1] == 2:
newboard[play[1]][play[0]] = player_id
newboard[j][i] = 0
if newboard not in differentPlayBoards:
differentPlayBoards.append(newboard)
newboard = get_and_apply_adjacent(play, newboard, player_id)
newState = GameState(newboard)
newState.parentCell = (i,j)
newState.parentPlay = (play, moves[mov][1])
newState.parent = self
self.children.append(newState)
def evaluatePlay_mcts(game,board,play,cell,player):
s1=1
s2=0.4
s3=0.7
s4=0.4
soma=0
vec=[(1,0),(-1,0),(0,1),(0,-1),(1,1),(1,-1),(-1,1),(-1,-1)]
if play[1] == 1:
soma+=s3
for mov in vec:
if not (play[0][0]<1 or play[0][0]>len(game.board)-1-1 or play[0][1]<1 or play[0][1]>len(game.board)-1-1):
if board[play[0][0]+mov[0]][play[0][1]+mov[1]]==3-player:
soma+=s1
if board[play[0][0]+mov[0]][play[0][1]+mov[1]]==player:
soma+=s2
elif play[1] == 2:
for mov in vec:
if not (play[0][0]<1 or play[0][0]>len(game.board)-1-1 or play[0][1]<1 or play[0][1]>len(game.board)-1-1):
if board[play[0][1]+mov[1]][play[0][0]+mov[0]]==3-player:
soma+=s1
if board[play[0][1]+mov[1]][play[0][0]+mov[0]]==player:
soma+=s2
if not (cell[0]<1 or cell[0]>len(game.board)-1-1 or cell[1]<1 or cell[1]>len(game.board)-1-1):
if board[cell[1]+mov[1]][cell[0]+mov[0]]==player:
soma-=s4
return soma
def final_move(game,board,play,player): #### função que checka se o estado não tem children
#print(player,'final')
gamenp=np.array(board)
#print(gamenp,'nparray')
if np.count_nonzero(gamenp==(3-player))==0:
return (True,player)
if np.count_nonzero(gamenp==(player))==0:
return (True,3-player)
if np.count_nonzero(gamenp==0) != 0:
return (False,-1)
if np.count_nonzero(gamenp==0) == 0:
count_p=np.count_nonzero(gamenp==player)
count_o=np.count_nonzero(gamenp==(3-player))
if count_p > count_o:
return (True,player)
if count_o > count_p:
return (True,3-player)
return (True,0)
def evaluatePlay_minmax(game,board,play,cell,player,values): #### heurística para o minimax
s1=values[0]
s2=values[1]
s3=values[2]
s4=values[3]
soma=0
vec=[(1,0),(-1,0),(0,1),(0,-1),(1,1),(1,-1),(-1,1),(-1,-1)]
#print(player,'pre final')
final=final_move(game,board,play,player)
#print(final)
if final[0]: #### ver que tipo de fim é e retornar o valor
if final[1]==player:
return (math.inf)
if final[1]==3-player:
return (-math.inf)
if final[1]==0:
return 0
if play[1] == 1:
soma+=s3
for mov in vec:
if not (play[0][0]<1 or play[0][0]>len(game.board)-1-1 or play[0][1]<1 or play[0][1]>len(game.board)-1-1):
if board[play[0][0]+mov[0]][play[0][1]+mov[1]]==3-player:
soma+=s1
if board[play[0][0]+mov[0]][play[0][1]+mov[1]]==player:
soma+=s2
elif play[1] == 2:
for mov in vec:
if not (play[0][0]<1 or play[0][0]>len(game.board)-1-1 or play[0][1]<1 or play[0][1]>len(game.board)-1-1):
if board[play[0][1]+mov[1]][play[0][0]+mov[0]]==3-player:
soma+=s1
if board[play[0][1]+mov[1]][play[0][0]+mov[0]]==player:
soma+=s2
if not (cell[0]<1 or cell[0]>len(game.board)-1-1 or cell[1]<1 or cell[1]>len(game.board)-1-1):
if board[cell[1]+mov[1]][cell[0]+mov[0]]==player:
soma-=s4
return soma
def randomplay(game, player):
game.createChildren(player)
r = rd.randint(0,len(game.children)-1)
return game.children[r]
def implementar_montecarlos(game,player):
C=1.5
game.createChildren(player)
bestchildren=[]
Sbasic=100
for child in game.children:
childnp=np.array(child.board)
child.mctss=Sbasic*(1+0.1*(np.count_nonzero(childnp==player)))
child.mctsv=montecarlots(int(child.mctss),child,player)
child.numsimulation+=child.mctss
if len(bestchildren)<=5:
bestchildren.append(child)
else:
for bchild in bestchildren:
if child.mctsv>bchild.mctsv:
bestchildren.remove(bchild)
bestchildren.append(child)
for child in bestchildren:
child.mctsv+=montecarlots(int(child.mctss),child,player)
child.numsimulation+=child.mctss
bestchildren=[]
for child in game.children:
child.uct=child.mctsv+C*(math.sqrt(1/(child.numsimulation)))
if len(bestchildren)<=3:
bestchildren.append(child)
else:
for bchild in bestchildren:
if child.uct>bchild.uct:
bestchildren.remove(bchild)
bestchildren.append(child)
for child in bestchildren:
child.mctsv+=montecarlots(int((child.mctss)/2),child,player)
bestchildren=[]
for child in game.children:
if len(bestchildren)==0:
bestchildren.append(child)
else:
if child.mctsv>bestchildren[0].mctsv:
bestchildren.pop(0)
bestchildren.append(child)
return bestchildren[0]
def montecarlots(numSimulations, game,player):
gamenp=np.array(game.board)
E=np.count_nonzero(gamenp==player)-np.count_nonzero(gamenp==(3-player))
for i in range(numSimulations):
player_hid=player
board=cp.deepcopy(game.board)
while game.end == -1 and game.end<10:
if player_hid == 1:
game.createChildren(player_hid)
board = randomplay(game,player_hid) #para testar os algoritmos da AI é só trocar aqui a função pelo algoritmo desejado
game = GameState(board)
player_hid = switchPlayer(player_hid)
elif player_hid == 2:
game.createChildren(player_hid)
board = randomplay(game,player_hid) #igual ao comentario acima
game = GameState(board)
player_hid = switchPlayer(player_hid)
game.end = objective_testmcts(game, player_hid)
if game.end-10==player_hid:
E+=500
elif game.end-10==3-player_hid:
E-=500
elif game.end==player_hid:
E+=50
elif game.end==3-player_hid:
E-=50
return E
def greedy(game,player):
bestPlay = ([], -math.inf) #random tuple where the 2nd element holds the best play evaluation and the 1st holds its board
game.createChildren(player)
for state in game.children: #play[0] -> (i,j) || play][1] -> movType
board = cp.deepcopy(state.board)
value=evaluatePlay_mcts(state,board,state.parentPlay,state.parentCell,player)
if value > bestPlay[1]:
if state.parentPlay[1] == 1:
board[state.parentPlay[0][1]][state.parentPlay[0][0]] = player
elif state.parentPlay[1] == 2:
board[state.parentCell[1]][state.parentCell[0]] = 0
board[state.parentPlay[0][1]][state.parentPlay[0][0]] = player
board = get_and_apply_adjacent((state.parentPlay[0][0], state.parentPlay[0][1]), board, player)
bestPlay = (board, value)
return GameState(bestPlay[0])
def implement_minimax(game,player,playerAI):
max_depth = 5
absodepth=max_depth
result=minimaxabc(game,max_depth,absodepth,player,playerAI,-math.inf,math.inf)
newresult = result[0]
depth = result[2]
for _ in range(max_depth-depth-1):
newresult = newresult.parent
return newresult
def minimaxabc(game, max_depth,absodepth, player, playerAI, alpha, beta):
game.createChildren(player)
if max_depth==0 or game.children == []:
values = (1.0, 0.4, 0.7, 0.4)
board = cp.deepcopy(game.board)
#print(player,'pre evaluate')
value=(game,evaluatePlay_minmax(game,board,game.parentPlay,game.parentCell,player,values), max_depth)
#print(value[0].board,' ',value[1],'depth ',max_depth)
return value
if player == 3-playerAI:
value =(GameState([]), math.inf,absodepth)
for state in game.children:
evaluation = minimaxabc(state, max_depth - 1,absodepth, 3-player, playerAI, alpha, beta)
#print(evaluation[0].board,' ',evaluation[1],'minimizer maxdepth %d' % max_depth)
if evaluation[1]<value[1]:
value=evaluation
beta = min(beta, evaluation[1])
if beta <= alpha:
break
return value
value =(GameState([]), -math.inf,absodepth)
for state in game.children:
evaluation = minimaxabc(state, max_depth - 1,absodepth, 3-player, playerAI, alpha, beta)
#print(evaluation[0].board,' ',evaluation[1], 'maximizer maxdepth %d' % max_depth)
if evaluation[1]>value[1]:
value=evaluation
alpha = max(alpha, evaluation[1])
if beta <= alpha:
break
return value
#i=y and j=x : tuples are (y,x)
def get_moves(game,cell):
vect = [(1,0),(2,0),(1,1),(2,2),(1,-1),(2,-2),(-1,0),(-2,0),(-1,1),(-2,-2),(0,1),(0,2),(0,-1),(0,-2),(-1,-1),(-2,2)]
#moves é um dicionario onde a chave de cada elemento é uma lista com a validade do mov (True/False) no indice 0 e o tipo de movimento no indice 1
moves={}
for mov in vect:
play=(cell[0]+mov[0],cell[1]+mov[1])
if play[0]<0 or play[0]>len(game.board)-1 or play[1]<0 or play[1]>len(game.board)-1 or game.board[play[1]][play[0]]!=0:
moves[mov]=[False]
else:
moves[mov]=[True]
if 1 in mov or -1 in mov:
moves[mov].append(1)
elif 2 in mov or -2 in mov:
moves[mov].append(2)
return moves
#draws the board on screen
def drawBoard(game, screen):
n = len(game.board)
screen.fill((255,255,255)) #background branco
#desenha frame do tabuleiro
pygame.draw.line(screen, (0,0,0), (0,0), (800,0), 2)
pygame.draw.line(screen, (0,0,0), (0,0), (0,800), 2)
pygame.draw.line(screen, (0,0,0), (0,798), (800,798), 2)
pygame.draw.line(screen, (0,0,0), (798, 0), (798,800), 2)
#desenha linhas do tabuleiro
for i in range(1,n):
#linhas verticais
pygame.draw.line(screen, (0,0,0), (800*i/n,0), (800*i/n,800), 2)
#linhas horizontais
pygame.draw.line(screen, (0,0,0), (0,800*i/n), (800,800*i/n), 2)
def drawPieces(game, screen):
n = len(game.board)
for i in range(n):
for j in range(n):
#desenha peças do jogador 1
if game.board[j][i] == 1:
pygame.draw.circle(screen, (0,0,255), ((800*i/n)+800/(2*n), (800*j/n)+800/(2*n)), 800/(3*n))
#desenha peças do jogador 2
if game.board[j][i] == 2:
pygame.draw.circle(screen, (0,150,0), ((800*i/n)+800/(2*n), (800*j/n)+800/(2*n)), 800/(3*n))
#desenha quadrados onde não se pode jogar
if game.board[j][i] == 8:
pygame.draw.rect(screen, (0,0,0), (800*i/n, 800*j/n, 800/n + 1, 800/n + 1))
#mostrar o resultado do jogo graficamente
def drawResult(game, screen):
if game.end == -1:
return None
font = pygame.font.Font('freesansbold.ttf', 32)
pygame.draw.rect(screen, (0,0,0), (120, 240, 560, 320))
pygame.draw.rect(screen, (255,255,255), (140, 260, 520, 280))
if game.end == 0:
text = font.render("Empate!", True, (0,0,0))
elif game.end == 1:
text = font.render("Jogador 1 vence!", True, (0,0,255))
elif game.end == 2:
text = font.render("Jogador 2 vence!", True, (0,150,0))
text_rect = text.get_rect(center=(400, 400))
screen.blit(text, text_rect)
#return the coordinates of the mouse in the game window
def mousePos(game):
click = pygame.mouse.get_pos()
n = len(game.board)
i = int(click[0]*n/800)
j = int(click[1]*n/800)
coord=(i,j)
return coord
#shows the selected cell on screen
def showSelected(game, screen, coord, player_id):
n = len(game.board)
i=coord[0]
j=coord[1]
#selectedType é um dicionario onde cada elemento é um dos quadrados onde se pode jogar e cuja chave é o tipo de movimento
selectedType = {}
if game.board[j][i] == player_id:
#desenha as cell possiveis de se jogar do player id
if player_id == 1:
selectedCellRGB = (173,216,230) #azul claro
elif player_id == 2:
selectedCellRGB = (144,238,144) #verde claro
pygame.draw.rect(screen, selectedCellRGB, (800*i/n + 2, 800*j/n + 2, 800/n - 2 , 800/n - 2))
moves=get_moves(game,coord)
for mov in moves:
if moves[mov][0]:
play=(coord[0]+mov[0],coord[1]+mov[1])
selectedType[play] = moves[mov][1]
pygame.draw.rect(screen, selectedCellRGB, (800*play[0]/n + 2, 800*play[1]/n + 2, 800/n - 2 , 800/n - 2))
return selectedType
def get_and_apply_adjacent(targetCell, newBoard, player_id):
vectors = [(-1,-1), (-1,0), (-1,1), (0,-1), (0,1), (1,-1), (1,0), (1,1)]
#adjacent é um dicionario que vai ter como elemento cada cell que esta a volta da targetCell e cujos elementos sao True/False
#se essa cell tiver/não tiver uma peça do oponente
adjacent = {}
for vect in vectors:
play=(targetCell[0]+vect[0],targetCell[1]+vect[1])
if play[0]<0 or play[0]>len(newBoard)-1 or play[1]<0 or play[1]>len(newBoard)-1 or newBoard[play[1]][play[0]] != switchPlayer(player_id):
adjacent[vect] = False
else:
adjacent[vect] = True
for adj in adjacent:
if adjacent[adj]:
adjCell = (targetCell[0]+adj[0], targetCell[1]+adj[1])
newBoard[adjCell[1]][adjCell[0]] = player_id
return newBoard
def skip(game,player):
game.createChildren(player)
if len(game.children) == 0:
return True
return False
def objective_testmcts(game,player): #atualizar count
gamenp=np.array(game.board)
if np.count_nonzero(gamenp==0)==0:
if np.count_nonzero(gamenp==player)>np.count_nonzero(gamenp==(3-player)):
return player
if np.count_nonzero(gamenp==player)<np.count_nonzero(gamenp==(3-player)):
return 3-player
else:
return 0
if np.count_nonzero(gamenp==player)==0:
return (3-player+10)
for j in range(len(gamenp)):
for i in range(len(gamenp)):
if gamenp[j][i]==player:
if True in get_moves(game,(i,j)):
return -1
else:
return (3-player+10)
def objective_test(game,player): #atualizar count
gamenp=np.array(game.board)
if np.count_nonzero(gamenp==(3-player))==0:
return player
if np.count_nonzero(gamenp==0) != 0:
return -1
if np.count_nonzero(gamenp==0) == 0:
count_p=np.count_nonzero(gamenp==player)
count_o=np.count_nonzero(gamenp==(3-player))
if count_p > count_o:
return player
if count_o > count_p:
return (3-player)
return 0
def executeMov(game, initialCell, targetCell, selectedType, player_id):
newBoard = cp.deepcopy(game.board)
if targetCell in selectedType:
movType = selectedType[targetCell]
#movimento tipo 1
if movType == 1:
newBoard[targetCell[1]][targetCell[0]] = player_id
newBoard = get_and_apply_adjacent(targetCell, newBoard, player_id)
#movimento tipo 2
elif movType == 2:
newBoard[targetCell[1]][targetCell[0]] = player_id
newBoard[initialCell[1]][initialCell[0]] = 0
newBoard = get_and_apply_adjacent(targetCell, newBoard, player_id)
newGame = GameState(newBoard)
return newGame
def switchPlayer(player_id):
return 3-player_id
#game mode Human vs Human
def jogo_Humano_Humano(game, screen):
player_id = 1
clickState = False
while game.end==-1:
drawPieces(game, screen)
events = pygame.event.get()
for event in events:
if event.type == pygame.QUIT:
pygame.quit()
exit()
#verificar se o jogador está cercado / não tem jogadas possiveis e tem de passar a jogada
if not skip(game,player_id):
#escolher a peca para jogar e as possiveis plays
if event.type == pygame.MOUSEBUTTONDOWN and clickState == False:
drawBoard(game, screen)
coord = mousePos(game)
selected = showSelected(game, screen, coord, player_id)
clickState = True
drawPieces(game, screen)
#fazer o movimento da jogada
elif event.type == pygame.MOUSEBUTTONDOWN and clickState == True:
targetCell = mousePos(game)
prevBoard = cp.deepcopy(game.board)
game = executeMov(game, coord, targetCell, selected, player_id)
if not (np.array_equal(prevBoard,game.board)):
player_id = switchPlayer(player_id)
clickState=False
drawBoard(game, screen)
drawPieces(game, screen)
else:
player_id = switchPlayer(player_id)
game.end = objective_test(game,player_id)
#to display the winner
while game.end != -1:
drawResult(game,screen)
events = pygame.event.get()
for event in events:
if event.type == pygame.QUIT:
pygame.quit()
exit()
pygame.display.update()
pygame.display.update()
def jogo_Humano_AI(game, screen, algorithm):
player_id = 1
clickState = False
while game.end==-1:
drawPieces(game, screen)
events = pygame.event.get()
for event in events:
if event.type == pygame.QUIT:
pygame.quit()
exit()
#verificar se o jogador está cercado / não tem jogadas possiveis e tem de passar a jogada
if not skip(game,player_id):
if player_id == 1:
#escolher a peca para jogar e as possiveis plays
if event.type == pygame.MOUSEBUTTONDOWN and clickState == False:
drawBoard(game, screen)
coord = mousePos(game)
selected = showSelected(game, screen, coord, player_id)
clickState = True
drawPieces(game, screen)
#fazer o movimento da jogada
elif event.type == pygame.MOUSEBUTTONDOWN and clickState == True:
targetCell = mousePos(game)
prevBoard = cp.deepcopy(game.board)
game = executeMov(game, coord, targetCell, selected, player_id)
if not (np.array_equal(prevBoard,game.board)):
player_id = switchPlayer(player_id)
clickState=False
drawBoard(game, screen)
drawPieces(game, screen)
else:
if algorithm == 1:
game = implement_minimax(game,player_id, player_id)
elif algorithm == 2:
game = implementar_montecarlos(game,player_id)
elif algorithm == 3:
game = greedy(game,player_id)
elif algorithm == 4:
game = randomplay(game, player_id)
drawBoard(game, screen)
drawPieces(game, screen)
player_id = 1
else:
player_id = switchPlayer(player_id)
game.end = objective_test(game,player_id)
#to display the winner
while game.end != -1:
drawResult(game,screen)
events = pygame.event.get()
for event in events:
if event.type == pygame.QUIT:
pygame.quit()
exit()
pygame.display.update()
pygame.display.update()
#sets the game window
def setScreen():
width = 800
height = 800
screen = pygame.display.set_mode((width, height))
pygame.display.set_caption("Ataxx")
return screen
#abre um ficheiro com um mapa do tabuleiro a ser usado no jogo e cria o estado/objeto inicial
def readBoard(ficheiro):
f = open(ficheiro, "r")
n = int(f.readline())
board = []
for i in range(n):
board.append(list(map(int, f.readline().split())))
f.close()
return GameState(board)
#pede ao user para escolher o tabuleiro que pretende usar
def chooseBoard():
#todos os ficheiros com tabuleiros devem ter nome do tipo "tabX.txt"
tableNum = input("Escolha o número do tabuleiro que quer usar para o jogo!\n1) 10x10\n2) 8x8\n3) 6x6\n4) 5x5\n5) 12x12\nTabuleiro: ")
table = "tab"+tableNum+".txt"
return table
def chooseMode():
mode = int(input("Escolha o modo de jogo!\n1) Humano vs Humano\n2) Humano vs AI\nModo: "))
return mode
def chooseAI():
algorithm=int(input("Escolha o seu adversário!\n1) Minimax\n2) MonteCarloTreeSearch **VT**\n3) Greedy\n4) Random Play\nModo: "))
return algorithm
def playMode(game, screen, mode,algorithm):
if mode == 1:
jogo_Humano_Humano(game, screen)
elif mode == 2:
jogo_Humano_AI(game,screen,algorithm)
def simulacao(numSimulations):
playerTurns = [1,2,1,1]
empate = 0
w1 = 0
comeutodas1 = 0
w2 = 0
comeutodas2 = 0
for i in range(numSimulations):
table = "tabSim" + str(i+1) + ".txt"
#table = "tabSim18.txt"
game = readBoard(table)
#player_id = playerTurns[i%4]
player_id = 1
while game.end == -1:
if not skip(game,player_id):
if player_id == 1:
game = greedy(game,player_id) #para testar os algoritmos da AI é só trocar aqui a função pelo algoritmo desejado
player_id = switchPlayer(player_id)
elif player_id == 2:
game = implement_minimax(game, player_id, player_id) #igual ao comentario acima
player_id = switchPlayer(player_id)
else:
player_id = switchPlayer(player_id)
game.end = objective_test(game, player_id)
print(i+1)
if game.end == 0:
empate += 1
elif game.end == 1:
w1 += 1
gamenp = np.array(game.board)
if np.count_nonzero(gamenp == 2) == 0:
comeutodas1 += 1
elif game.end == 2:
w2 += 1
gamenp = np.array(game.board)
if np.count_nonzero(gamenp == 1) == 0:
comeutodas2 += 1
with open("data.txt", "a") as data:
numbers = "%d %d %d %d\n" % (numSimulations, w1, w2, empate)
data.write(numbers)
print("Jogos: %d\nAI 1: %d (Jogos com todas as peças do oponente eliminadas: %d)\nAI 2: %d (Jogos com todas as peças do oponente eliminadas: %d)\nEmpate: %d\n" % (numSimulations, w1, comeutodas1, w2, comeutodas2, empate))
def main():
mode = chooseMode()
if mode==2:
algorithm=chooseAI()
else:
algorithm=0
table = chooseBoard()
pygame.init()
screen = setScreen()
game = readBoard(table)
drawBoard(game, screen)
playMode(game, screen, mode,algorithm)
start_time = time.time()
main()
#simulacao(100)
print("--- %.5f seconds ---" % (time.time() - start_time)) | ToniCardosooo/EIACD-Artificial-Intelligence-Project | ataxx.py | ataxx.py | py | 26,079 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "copy.deepcopy",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "numpy.count_nonzero",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "numpy.count_nonzero",
... |
13659068319 | from django.http import HttpResponse
from django.shortcuts import redirect, render
from .forms import ContactForm
from django.core.mail import send_mail, BadHeaderError
from config.settings import RECIPIENTS_EMAIL, DEFAULT_FROM_EMAIL
# Create your views here.
def contact_view(request):
if request.method == "GET":
form = ContactForm()
elif request.method == "POST":
form = ContactForm(request.POST)
if form.is_valid():
full_name = form.cleaned_data['full_name']
from_email = form.cleaned_data['from_email']
message = form.cleaned_data['message']
try:
send_mail(f'{full_name} от {from_email}',
message,
DEFAULT_FROM_EMAIL,
[DEFAULT_FROM_EMAIL],
RECIPIENTS_EMAIL)
except BadHeaderError:
return HttpResponse('Ошибка в теме письма.')
return redirect('success')
else:
return HttpResponse('Неверный запрос.')
return render(request, "email/contact.html", {'form': form})
def success_view(request):
return render(request, "email/consuc.html")
| Dauka03/food_project_back | sendemail/views.py | views.py | py | 1,235 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "forms.ContactForm",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "forms.ContactForm",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "django.core.mail.send_mail",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "config... |
14959290509 | import json
from json import JSONEncoder
from yoti_python_sdk.crypto import Crypto
from yoti_python_sdk.http import SignedRequestBuilder
import yoti_python_sandbox
from .anchor import SandboxAnchor
from .attribute import SandboxAttribute
from .endpoint import SandboxEndpoint
from .sandbox_exception import SandboxException
from .token import YotiTokenRequest
from .token import YotiTokenResponse
class SandboxEncoder(JSONEncoder):
def default(self, o):
if isinstance(o, YotiTokenRequest):
return o.__dict__()
if isinstance(o, SandboxAttribute):
return o.__dict__()
if isinstance(o, SandboxAnchor):
return o.__dict__()
return json.JSONEncoder.default(self, o)
class SandboxClient(object):
def __init__(self, sdk_id, pem_file, sandbox_url=None):
if sandbox_url is None:
sandbox_url = yoti_python_sandbox.DEFAULT_SANDBOX_URL
self.sdk_id = sdk_id
self.__endpoint = SandboxEndpoint(sdk_id)
self.__sandbox_url = sandbox_url
if isinstance(pem_file, Crypto):
self.__crypto = pem_file
else:
self.__crypto = Crypto.read_pem_file(pem_file, "SandboxClient.__init__")
def setup_sharing_profile(self, request_token):
"""
Using the supplied YotiTokenRequest, this function will make a request
to the defined sandbox environment to create a profile with the supplied values.
The returned token can be used against the sandbox environment to retrieve the profile
using the standard YotiClient.
:param YotiTokenRequest request_token:
:return: the token for accessing a profile
"""
request_path = self.__endpoint.get_sandbox_path()
payload = json.dumps(request_token, cls=SandboxEncoder).encode("utf-8")
signed_request = (
SignedRequestBuilder()
.with_pem_file(self.__crypto)
.with_base_url(self.__sandbox_url)
.with_endpoint(request_path)
.with_payload(payload)
.with_post()
.build()
)
response_payload = signed_request.execute()
if response_payload.status_code < 200 or response_payload.status_code >= 300:
raise SandboxException(
"Error making request to sandbox service: "
+ str(response_payload.status_code),
response_payload,
)
parsed = json.loads(response_payload.text)
return YotiTokenResponse(parsed["token"])
@staticmethod
def builder():
"""
Creates an instance of the sandbox client builder
:return: instance of SandboxClientBuilder
"""
return SandboxClientBuilder()
class SandboxClientBuilder(object):
def __init__(self):
self.__sdk_id = None
self.__pem_file = None
self.__sandbox_url = None
def for_application(self, sdk_id):
"""
Sets the application ID on the builder
:param str sdk_id: the SDK ID supplied from Yoti Hub
:return: the updated builder
"""
self.__sdk_id = sdk_id
return self
def with_pem_file(self, pem_file):
"""
Sets the pem file to be used on the builder
:param str pem_file: path to the PEM file
:return: the updated builder
"""
self.__pem_file = pem_file
return self
def with_sandbox_url(self, sandbox_url):
"""
Sets the URL of the sandbox environment on the builder
:param str sandbox_url: the sandbox environment URL
:return: the updated builder
"""
self.__sandbox_url = sandbox_url
return self
def build(self):
"""
Using all supplied values, create an instance of the SandboxClient.
:raises ValueError: one or more of the values is None
:return: instance of SandboxClient
"""
if self.__sdk_id is None or self.__pem_file is None:
raise ValueError("SDK ID/PEM file must not be None")
return SandboxClient(self.__sdk_id, self.__pem_file, self.__sandbox_url)
| getyoti/yoti-python-sdk-sandbox | yoti_python_sandbox/client.py | client.py | py | 4,176 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "json.JSONEncoder",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "token.YotiTokenRequest",
"line_number": 18,
"usage_type": "argument"
},
{
"api_name": "attribute.SandboxAttribute",
"line_number": 20,
"usage_type": "argument"
},
{
"api_na... |
1414123757 | import pytest
import os
import shutil
import tngsdk.project.workspace as workspace
from tngsdk.project.workspace import Workspace
from tngsdk.project.project import Project
class TestProjectUnit:
# create and return a temporary workspace 'test-ws'
@pytest.fixture(scope='module')
def workspace(self):
# start clean without workspace
if os.path.isdir('test-ws'):
shutil.rmtree('test-ws')
args = workspace.parse_args_workspace([
'-w', 'test-ws',
'--debug'
])
workspace.init_workspace(args)
assert os.path.isdir('test-ws')
yield 'test-ws'
shutil.rmtree('test-ws')
# test descriptors of project 'example-project'
def test_example_project_descriptors(self, workspace):
ws = Workspace.load_workspace(workspace)
example_project = Project.load_project('example-project', workspace=ws)
example_project.status()
vnfds = example_project.get_vnfds()
assert vnfds == ['tango_vnfd0.yml']
nsds = example_project.get_nsds()
assert nsds == ['tango_nsd.yml']
| sonata-nfv/tng-sdk-project | tests/test_project_unit.py | test_project_unit.py | py | 1,124 | python | en | code | 5 | github-code | 6 | [
{
"api_name": "os.path.isdir",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "shutil.rmtree",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "tngsdk.project.workspace.par... |
24519278933 | import argparse
import os
import cv2
from wand.image import Image
import numpy as np
#ArgumentParser객체:명령행을 파이썬 데이터형으로 파싱하는데 필요한 모든 정보가 들어있음
#ArgumentParser객체 생성
ap=argparse.ArgumentParser()
ap.add_argument("-i","--images",required=True, help="absolute path to the input image")
ap.add_argument("-c","--cascade",default="C:/Users/cat7892/Documents/GitHub/catholic/catface_detection/haarcascade_frontalcatface.xml",help="absolute path to detector haar cascade")
#parse_args():문자열을 객체로 변환 후 namespace의 attribute로 설정
args=vars(ap.parse_args())
count=0
#imagefolder->only heic, jpg type images
imagefolder=args['images'].replace('\\','/')
for image in os.listdir(args["images"]):
print(image)
#If the image file format is 'heic', it is converted to 'jpg'.
if image[-3:]!='jpg':
img=Image(filename=imagefolder+'/'+image)
img.format='jpg'
image=image.replace('heic', 'jpg')
img.save(filename=imagefolder+'/'+image)
img.close()
os.remove(imagefolder+'/'+image.replace('jpg','heic'))
'''
# load image+convert grayscale
color=cv2.imread(imagefolder+'/'+image)
gray=cv2.cvtColor(color, cv2.COLOR_BGR2GRAY)
#load detector, then detect
detector=cv2.CascadeClassifier(args["cascade"])
rects=detector.detectMultiScale(gray, scaleFactor=1.3, minNeighbors=10)
#loop over detect and save file
for (i, (x,y,w,h)) in enumerate(rects):
roi=color[y:y+h,x:x+w].copy()
print(x,y,w,h)
cv2.imwrite('C:/Users/cat7892/Documents/GitHub/catholic/catface_detection/test_detect/'+str(count)+'.jpg',roi)
cv2.waitKey()
count+=1
''' | Zzang-yeah/catholic | cat_detection/feic_to_jpg.py | feic_to_jpg.py | py | 1,733 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "wand.image.Image",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.remove",
"l... |
11711962924 | import os
import numpy as np
import bpy
import mathutils
import math
from satvis.utils.blender_utils import make_sat, make_sun, \
make_camera, make_torch, setup_depth, rotate_sat, \
rotate_earth, save_render, get_data, CAMERA_VIEW_DIRECTION, \
get_sat_path, move_sun, move_chaser
from satvis.orbital_dynamics.sun_position import julian_date, get_sun_dir
from satvis.orbital_dynamics.satellite import ReferenceSatellite
from satvis.orbital_dynamics.clohessy_wiltshire import compute_cw
def setup_scene(configs):
bpy.context.scene.render.resolution_x = configs["height"]
bpy.context.scene.render.resolution_y = configs["height"]
bpy.context.scene.render.resolution_percentage = 100
bpy.context.scene.world.horizon_color = (0, 0, 0)
bpy.context.scene.world.light_settings.use_environment_light = True
bpy.context.scene.world.light_settings.environment_energy = 0.02
def setup_params_partial(configs, params, depth_path):
# Setup parameters
initial_pos = np.array(configs["initial_pos"], dtype=np.float32) * configs["distance_scale"]
configs["cam_pos"] = np.array(configs["cam_pos"], dtype=np.float32) * configs["distance_scale"]
configs["max_depth"] = configs["max_depth"] * configs["distance_scale"]
configs["attitude"] = [math.radians(float(i)) for i in configs["attitude"]]
# If ang_vel is not a list of euler angles, get the rotation from axis angle
if not isinstance(configs["ang_vel"], (list, np.ndarray)):
axis = configs["axis"]
axis /= np.linalg.norm(axis)
# Rotate axis to line up with the camera view direction
rotation_diff = mathutils.Vector(CAMERA_VIEW_DIRECTION).rotation_difference(mathutils.Vector(configs["cam_dir"]))
axis = mathutils.Vector(axis)
axis.rotate(rotation_diff)
# Work out the rotation per step as a quaternion
angle = math.radians(configs["ang_vel"])
rotation_step = mathutils.Quaternion(axis, angle)
else:
ang_vel = [math.radians(float(i)) for i in configs["ang_vel"]]
rotation_step = mathutils.Euler(ang_vel, 'XYZ').to_quaternion()
# Convert angular velocity into quaternion rotation per step
axis, angle = rotation_step.to_axis_angle()
rotation_step = mathutils.Quaternion(axis, angle*configs["frame_skip"]/configs["fps"])
if params.sun_behind:
configs["sun_dir"] = 0
# If sun_dir is an angle, rotate this much from the view direction, around the world z axis
if not isinstance(configs["sun_dir"], (list, np.ndarray)):
angle = configs["sun_dir"] * np.pi / 180.
Rz = np.array([[np.cos(angle), -np.sin(angle), 0],
[np.sin(angle), np.cos(angle), 0],
[0, 0, 1]]) # rotation matrix about z
sun_dir = Rz @ configs["cam_dir"]
configs["sun_dir"] = sun_dir
# Make objects
earth = bpy.data.objects['Earth']
earth.location = initial_pos
earth.scale = np.array([1,1,1]) * 637100 * configs['distance_scale']
#earth.transform_apply(scale=True)
sat_initial_attitude = mathutils.Euler(configs["attitude"], 'XYZ').to_quaternion()
sat_path = get_sat_path(configs["satObjDir"], configs["target"])
sat = make_sat(sat_path, sat_initial_attitude, configs["distance_scale"])
sun = make_sun(configs["sun_dir"], -10000000*configs["distance_scale"]*np.array(configs["sun_dir"]))
cam = make_camera(configs["cam_pos"], configs["cam_dir"], configs["distance_scale"], configs["fov"])
torch = make_torch(configs["cam_pos"], configs["cam_dir"], configs["distance_scale"], use_torch=not params.no_torch)
setup_depth(depth_path)
# Set clip distance
cam.data.clip_end = 1.5 * np.linalg.norm(initial_pos)
for a in bpy.context.screen.areas:
if a.type == 'VIEW_3D':
for s in a.spaces:
if s.type == 'VIEW_3D':
s.clip_end = 1.5 * np.linalg.norm(initial_pos)
return rotation_step, earth, sat, sun, cam, torch
def setup_params_full(configs, params, depth_path):
# Setup random orbit
inclination, ascending_node, periapsis = np.random.rand(3) * np.pi
mu = float(configs["mu"])
angular_momentum = np.sqrt(mu * configs["radius"])
ref_sat = ReferenceSatellite(0, angular_momentum, mu, inclination, ascending_node, periapsis)
print(ref_sat)
# Start at a random point in orbit
num_steps_in_orbit = ref_sat.period * configs["fps"]
start_iter = np.random.randint(num_steps_in_orbit)
times = np.arange(configs["duration"]*configs["fps"]+1) / configs["fps"] + start_iter
anomalies = times / num_steps_in_orbit * 2*np.pi
anomalies = anomalies % (2*np.pi)
ref_sat.set_states(anomalies)
# Convert date and time to Julian date
date = configs["date"].split('/')
assert(len(date) == 3 and len(date[2]) == 4), 'Date should be in the form dd/mm/yyyy'
year, month, day = [float(i) for i in date]
day += float(configs["time"]) / 24.
jd = julian_date(year, month, day)
# Get initial position of Earth, Sun and chaser
initial_pos = np.array(configs["radius"] * configs["distance_scale"]) * np.array([-1, 0, 0])
configs["max_depth"] = configs["max_depth"] * configs["distance_scale"]
configs["attitude"] = [math.radians(float(i)) for i in configs["attitude"]]
sun_dir = get_sun_dir(jd, 0, ref_sat.get_state()[:3])
# Get states of target and chaser
# If ang_vel is not a list of euler angles, get the rotation from axis angle
if not isinstance(configs["ang_vel"], (list, np.ndarray)):
axis = mathutils.Vector(configs["axis"])
axis.normalize()
# Work out the rotation per step as a quaternion
angle = math.radians(configs["ang_vel"])
rotation_step = mathutils.Quaternion(axis, angle)
else:
ang_vel = [math.radians(float(i)) for i in configs["ang_vel"]]
rotation_step = mathutils.Euler(ang_vel, 'XYZ').to_quaternion()
# Convert angular velocity into quaternion rotation per step
axis, angle = rotation_step.to_axis_angle()
rotation_step = mathutils.Quaternion(axis, angle*configs["frame_skip"]/configs["fps"])
# Get chaser states
chaser_initial_state = np.array([configs["distance"], 0, 0, 0, -2*ref_sat.omega*configs["distance"], 0]) # Initial state for a circular relative orbit
chaser_pos = chaser_initial_state[:3] * configs["distance_scale"]
chaser_dir = -chaser_initial_state[:3] / np.linalg.norm(chaser_initial_state[:3])
chaser_states = compute_cw(chaser_initial_state, ref_sat, anomalies)
# Get sun direction at each step
sun_dirs = []
for i, t in enumerate(times):
ref_sat.set_iter(i)
sun_dirs.append(get_sun_dir(jd, times[i], ref_sat.get_state()))
# Make objects
earth = bpy.data.objects['Earth']
earth.location = initial_pos
earth.scale = np.array([1,1,1]) * 637100 * configs['distance_scale']
#earth.transform_apply(scale=True)
sat_initial_attitude = mathutils.Euler(configs["attitude"], 'XYZ').to_quaternion()
sat_path = get_sat_path(configs["satObjDir"], configs["target"])
sat = make_sat(sat_path, sat_initial_attitude, configs["distance_scale"])
sun = make_sun(sun_dir, -10000000*configs["distance_scale"]*np.array(sun_dir))
cam = make_camera(chaser_pos, chaser_dir, configs["distance_scale"], configs["fov"])
torch = make_torch(chaser_pos, chaser_dir, configs["distance_scale"], use_torch=not params.no_torch)
setup_depth(depth_path)
# Set clip distance
cam.data.clip_end = 1.5 * np.linalg.norm(initial_pos)
for a in bpy.context.screen.areas:
if a.type == 'VIEW_3D':
for s in a.spaces:
if s.type == 'VIEW_3D':
s.clip_end = 1.5 * np.linalg.norm(initial_pos)
return rotation_step, earth, sat, sun, cam, torch, ref_sat.period, chaser_states, sun_dirs
def animate(configs, params, save_path, data_path, rotation_step, earth, sat, sun, cam, torch, period, chaser_states=None, sun_dirs=None):
frame_end = configs["duration"] * configs["fps"]
bpy.context.scene.frame_end = configs["duration"] * configs["fps"]
earth_rot_angle = configs["frame_skip"] / configs["fps"] / period * 2 * np.pi
# Rotate satellite by constant angular velocity
for i in range(frame_end//configs["frame_skip"]+1):
rotate_sat(sat, i*configs["frame_skip"], rotation_step)
rotate_earth(earth, i*configs["frame_skip"], earth_rot_angle)
# If doing a full simulation, move chaser and sun accordingly to the dynamics
if chaser_states is not None:
iter = i * configs["frame_skip"]
move_sun(sun, i, sun_dirs[iter], -1E8*configs["distance_scale"]*sun_dirs[iter])
move_chaser(cam, torch, i, chaser_states[iter,:3]*configs["distance_scale"], -chaser_states[iter,:3]/np.linalg.norm(chaser_states[iter,:3]))
# Save animation
data = []
for i in range(frame_end//configs["frame_skip"]+1):
bpy.context.scene.frame_set(i*configs["frame_skip"])
save_render(i, save_path)
data.append(get_data(sat, cam, i/configs["fps"]*configs["frame_skip"], configs["distance_scale"]))
# Save data to file
np.savetxt(os.path.join(data_path, "data.txt"), data, header="pos[3], target q[4], chaser q[4], time")
| Ben-Guthrie/satvis | utils/vis_utils.py | vis_utils.py | py | 9,405 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "bpy.context",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "bpy.context",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "bpy.context",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "bpy.context",
... |
27516283256 | import random
from discord.ext import commands
class Hive(commands.Cog):
def __init__(self, bot):
self.bot = bot
self._last_member = None
@commands.command(name='roll_dice',
help='<min> <max>')
async def roll_dice(self, ctx, min: int, max: int):
await ctx.send(random.randint(min, max))
def setup(bot):
bot.add_cog(Hive(bot))
| tintin10q/hive-discord-bot | commands/roll_dice.py | roll_dice.py | py | 394 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "discord.ext.commands.Cog",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "random.randint",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "disco... |
5095318867 | import random
import torchvision
from torchvision import datasets, transforms
from data.RPSLS.types import *
transformations = transforms.Compose([
transforms.ToTensor(),
])
dataset = datasets.ImageFolder(
root='../../../data/RPSLS/rock-paper-scissors-lizard-spock',
transform = transformations
)
def generate_data(dataset):
examples = list()
i = list(range(len(dataset)))
random.shuffle(i)
i = iter(i)
while True:
try:
examples.append(next_example(i,dataset))
except StopIteration:
break
def next_example(i,dataset):
x1, x2 = next(i), next(i)
y = winner(x1, x2,dataset)
return x1, x2, y
generate_data(dataset) | AytugAltin/ProblogRPSLS | examples/RPSLS/Rock-Paper-Scissors/generate_data.py | generate_data.py | py | 698 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "torchvision.transforms.Compose",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.ToTensor",
"line_number": 7,
"usage_type": "call"
},
{
... |
30789951381 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import plotly.graph_objects as go
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.svm import SVC
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import accuracy_score
from sklearn.neighbors import KNeighborsClassifier
from sklearn.impute import KNNImputer
from sklearn.metrics import f1_score
# # Exploratory Data Analysis (EDA):
#
# Step 1: Loaded the dataset and examined its structure and dimensions.
#
# Step 2: Checked for missing values and handled them appropriately (e.g., imputation or removal).
#
# Step 3: Explored the distribution and summary statistics of each feature.
#
# Step 4: Visualized the relationships between variables using heat map.
# In[2]:
# Load the dataset
data = pd.read_csv(r"C:\Users\Nimisha\OneDrive\Desktop\Assessment\starcraft_player_data.csv")
# Display the first few rows of the dataset
data.head()
# In[3]:
data.info()
# In[4]:
# Check the shape of the dataset
print("Shape of the dataset:", data.shape)
# Check for missing values
print("Missing values:\n", data.isna().sum())
# Summary statistics
print("Summary statistics:\n", data.describe())
# In[5]:
# Check the distribution of the target variable
print("Distribution of the target variable:\n", data['LeagueIndex'].value_counts())
# In[6]:
class_counts = data['LeagueIndex'].value_counts()
plt.bar(class_counts.index, class_counts.values)
plt.xlabel('Class')
plt.ylabel('Count')
plt.title('Class Distribution')
plt.show()
# After looking at the dimension and structure of the dataset , I noticed a few important characteristics about the dataset:
# 1. There are 3 columns described as objects and those are Age, TotalHours and HoursPerWeek. I tried to find the null values in these columns but there are no null values. Instead, they have '?' so it needs to be either removed or imputed. First, we will simply remove all the '?' from the dataset.
#
# 2. This is a class imbalance problem which we will address later on. As we can see there are very few data points with LeagueIndex 7 and 8.
# Conducted feature selection using correlation analysis and identified relevant features.
# In[7]:
data['Age'].unique()
# In[8]:
data['HoursPerWeek'].unique()
# In[9]:
data['TotalHours'].unique()
# In[10]:
data[data['TotalHours']=='?']
# I checked all the three columns with '?' and figured out that TotalHours has the maximum '?' and if we drop its rows then our
# issue will be resolved because it combines the '?' rows of the other 2 columns as well.
# In[11]:
data2 = data.drop(data[data['TotalHours'] == '?'].dropna().index)
# In[12]:
data2.head()
# In[13]:
data2.info()
# In[14]:
data2[data2['Age']=='?']
# In[15]:
data2[data2['HoursPerWeek']=='?']
# Then I converted all the 3 columns to integer type to find the correlation between the features.
# In[16]:
#converting them into integer
data2['Age'] = data2['Age'].astype('int64')
data2['HoursPerWeek'] = data2['HoursPerWeek'].astype('int64')
data2['TotalHours'] = data2['TotalHours'].astype('int64')
# In[17]:
data2.isna().sum()
# In[18]:
#Then I checked correlation between columns to understand what impact does the other features have on target variable.
correl = data2.corr()
trace = go.Heatmap(z=correl.values,
x=correl.index.values,
y=correl.columns.values)
data=[trace]
layout = go.Layout(width=1000, height=900)
fig = go.Figure(data=data, layout=layout)
fig.show()
# In[19]:
sorted_corr = correl['LeagueIndex'].sort_values(ascending=False)
sorted_corr
#found the two least correlated columns to LeagueIndex i.e. GameID and TotalHours
# # Data Preprocessing and Feature Engineering:
#
# Step 1: Split the data into features (X) and the target variable (y) for rank prediction.
#
# Step 2: Scaled the continuous variables using standardization or normalization.
# In[20]:
# Split the dataset into features and target variable
X = data2.drop('LeagueIndex', axis=1)
y = data2['LeagueIndex']
# Split the data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# Feature scaling using StandardScaler
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)
# # Model Selection, Training and Evaluation:
#
# 1. Selected appropriate models for rank prediction, such as logistic regression, decision trees, random forests, gradient boosting, SVM, or Neural Network.
#
#
# 2. Split the data into training and testing sets for model evaluation.
#
#
# 3. Trained the chosen models on the training set.
#
#
# 4. Evaluated the trained models on the testing set using suitable metrics like F1 score. I used F1 score to evaluate the performance instead of accuracy because this is a class imbalance problem.
# In[21]:
# Create and train different models
models = [
LogisticRegression(),
DecisionTreeClassifier(),
RandomForestClassifier(),
GradientBoostingClassifier(),
SVC(),
MLPClassifier()
]
model_names = [
'Logistic Regression',
'Decision Tree',
'Random Forest',
'Gradient Boosting',
'SVM',
'Neural Network'
]
scores = []
# Evaluate models and print accuracy
for model, name in zip(models, model_names):
model.fit(X_train_scaled, y_train)
y_pred = model.predict(X_test_scaled)
f1score = f1_score(y_test, y_pred, average='weighted')
print(f"{name} f1 Score: {f1score}")
scores.append(f1score)
# In[22]:
# Plotting the F1 scores
plt.figure(figsize=(8, 6))
plt.bar(model_names, scores)
plt.xlabel('Models')
plt.ylabel('F1 Score')
plt.title('Comparison of F1 Scores for Different Models')
plt.xticks(rotation=45)
plt.ylim(0, 1) # Set the y-axis limit
plt.show()
# # Class Imbalance Problem
# Now we will address the class imbalance problem by class weighting. Assign higher weights to the minority class samples or
# lower weights to the majority class samples during model training. This gives more importance to the minority class during the
# learning process. I added weights and re-evaluated the decision tree classifier.
# In[23]:
# Calculate class weights
class_weights = dict(zip(np.unique(y_train), np.bincount(y_train)))
# Create and train the decision tree classifier with class weights
dt_classifier = DecisionTreeClassifier(class_weight = class_weights)
dt_classifier.fit(X_train_scaled, y_train)
# Make predictions on the testing data
y_pred = dt_classifier.predict(X_test_scaled)
# Compute the weighted F1 score
f1score = f1_score(y_test, y_pred, average='weighted')
print("f1 Score:",f1score)
# # Removed least correlated columns
# In[24]:
#Next, we remove the two least correlated columns to LeagueIndex.
data3 = data2.drop(columns=['GameID','TotalHours'])
# In[25]:
# Split the dataset into features and target variable
X = data3.drop('LeagueIndex', axis=1)
y = data3['LeagueIndex']
# Split the data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# Feature scaling using StandardScaler
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)
# Create and train different models
models = [
LogisticRegression(),
DecisionTreeClassifier(),
RandomForestClassifier(),
GradientBoostingClassifier(),
SVC(),
MLPClassifier()
]
model_names = [
'Logistic Regression',
'Decision Tree',
'Random Forest',
'Gradient Boosting',
'SVM',
'Neural Network'
]
# Evaluate models and print accuracy
for model, name in zip(models, model_names):
model.fit(X_train_scaled, y_train)
y_pred = model.predict(X_test_scaled)
f1score = f1_score(y_test, y_pred, average="weighted")
print(f"{name} F1 Score: {f1score}")
# # K-Nearest Neighbors Classifier
# In[26]:
knn_model = KNeighborsClassifier(n_neighbors=14)
knn_model.fit(X_train_scaled, y_train)
knn_pred = knn_model.predict(X_test_scaled)
f1score = f1_score(y_test, knn_pred, average="weighted")
print("KNN F1 Score:", f1score)
# # Imputation using KNN
# Now we will perform imputation. Instead of dropping all the rows with '?', we will fill the missing values through imputation.
# In[27]:
sampledata = pd.read_csv(r"C:\Users\Nimisha\OneDrive\Desktop\Assessment\starcraft_player_data.csv")
# In[28]:
sampledata[['Age','TotalHours','HoursPerWeek']] = sampledata[['Age','TotalHours','HoursPerWeek']].replace('?', None)
# In[29]:
sampledata.info()
# In[30]:
sampledata.isna().sum()
# In[31]:
#imputing the values using knn
missingdata = sampledata[['Age','TotalHours','HoursPerWeek']]
# In[32]:
k = 5
knn_imputer = KNNImputer(n_neighbors=k)
imputed_data = knn_imputer.fit_transform(missingdata)
# In[33]:
df_imputed = pd.DataFrame(imputed_data, columns=missingdata.columns)
# In[34]:
df_imputed.info()
# In[35]:
sampledata[['Age','TotalHours','HoursPerWeek']] = df_imputed[['Age','TotalHours','HoursPerWeek']]
# In[36]:
sampledata.info()
# In[37]:
sampledata.isna().sum()
# In[38]:
# Split the dataset into features and target variable
X = sampledata.drop('LeagueIndex', axis=1)
y = sampledata['LeagueIndex']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=50)
X_train
X_test
y_train
y_test
# In[39]:
rf_model = RandomForestClassifier()
rf_model.fit(X_train, y_train)
rf_pred = rf_model.predict(X_test)
f1score = f1_score(y_test, rf_pred,average= "weighted")
print("Random Forest F1 Score:", f1score)
# Finally, let's address the hypothetical scenario where stakeholders want to collect more data and seek guidance. Based on the EDA and model results, I would suggest the following:
#
# 1. Collect more samples for the minority classes: Since the dataset is imbalanced, collecting more data for the underrepresented rank levels can improve the model's performance.
#
# 2. Gather additional features: If there are relevant features that are not present in the current dataset, collecting additional data with those features can enhance the model's predictive power.
#
# 3. Monitor data quality: Ensure that the new data collection process maintains data quality standards, such as avoiding missing values, outliers, or inconsistencies.
#
# 4. Perform iterative model updates: As more data becomes available, it's beneficial to periodically update and retrain the model using the augmented dataset to capture any evolving patterns or changes in player performance.
#
# These recommendations aim to enhance the predictive capabilities of the model and provide more accurate rank predictions.
# In[ ]:
| nimishakhaitan/Evil-Geniuses-assessment | Assessment_Data_Science.py | Assessment_Data_Science.py | py | 11,117 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pandas.read_csv",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.bar",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "matplotlib.py... |
30896806208 | import cv2
# loading the images
img1 = cv2.imread("png//yy.jpg")
img2 = cv2.imread("png//ra.jpg")
# resizing the both images in same resolution
scale_percent = 60 # percent of original size
width = int(img1.shape[1] * scale_percent / 90)
height = int(img2.shape[0] * scale_percent / 90)
dim = (width, height)
# resize image
reimg1 = cv2.resize(img1,dsize= dim, interpolation = cv2.INTER_AREA)
reimg2 = cv2.resize(img2, dsize=dim, interpolation = cv2.INTER_AREA)
#including face detection
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
gray1 = cv2.cvtColor(reimg1,cv2.COLOR_BGR2GRAY)
gray2 = cv2.cvtColor(reimg2,cv2.COLOR_BGR2GRAY)
face1 = face_cascade.detectMultiScale(gray1,scaleFactor = 1.05, minNeighbors=5)
face2 = face_cascade.detectMultiScale(gray2,scaleFactor = 1.05 ,minNeighbors=5)
# putting the rectangle on the faces
for x,y ,w,h in face1:
reimg1 = cv2.rectangle(reimg1, (x, y), (x+w, y+h),(255, 0, 0), 3)
for x,y ,w,h in face2:
reimg2 = cv2.rectangle(reimg2, (x, y), (x+w, y+h), (0, 0, 250), 3)
# difference img of the images
diff = cv2.subtract(reimg1, reimg2)
cv2.imshow("diff",diff)
if cv2.waitKey(0) == ord('q'):
cv2.destroyAllWindows()
print("Difference : ",diff)
# comparing the two images for exactly same of not
b, g, r = cv2.split(diff)
if cv2.countNonZero(b)==0 and cv2.countNonZero(g) == 0 and cv2.countNonZero(r) == 0:
print("Both Images Are Identical !")
else:
print("Images Are Not Identical")
# finding the similarities of two images
sift = cv2.xfeatures2d.SIFT_create()
kp1, desc1 = sift.detectAndCompute(reimg2, None) # key-points corresponds to the position
kp2, desc2 = sift.detectAndCompute(reimg1, None)
index_params = dict(algorithm=0, trees=5)
search_params = dict()
# Fast Library for Approximate Nearest Neighbors
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(desc1, desc2, k=2) # to get the best matches
good_points = [] # correct matches
ratio = 0.6
for m, n in matches:
if m.distance < ratio*n.distance:
good_points.append(m)
# showing the both images (compare mode)
result = cv2.drawMatches(reimg1, kp1, reimg2, kp2, good_points, None)
cv2.imshow("result", result)
cv2.imshow("img1", reimg1)
cv2.imshow("img2", reimg2)
cv2.waitKey(0)
cv2.destroyAllWindows()
acc = len(good_points)*100/len(matches)
print("Good Points : {}".format(len(good_points)))
print("Total Matches : {}".format(len(matches)))
print("Accuracy : {}".format(acc))
if acc > 0:
print("Both Are The same person")
else:
print("Different Persons") | singhsaurabh1998/OpenCv | SimilarImg.py | SimilarImg.py | py | 2,656 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "cv2.imread",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "cv2.INTER_AREA",
"line_number": 13... |
38979909130 | from datetime import datetime, timedelta, timezone
import pytz
tokyo_tz = pytz.timezone('Asia/Tokyo')
# def delete_feed_with_too_many_entries(reader, db, url):
# entries = list(reader.get_entries())
# if len(entries) > 300:
# print("deleting feeds: ", url)
# reader.delete_feed(url)
# return setup_feed(db, url)
# return reader
def delete_old_entries(reader):
entries = list(reader.get_entries())
for entry in entries:
if entry.published.replace(tzinfo=None) < datetime.now() - timedelta(days=1):
print(entry)
reader.delete_entry(entry)
| kei49/rss-to-slack | src/feed.py | feed.py | py | 637 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pytz.timezone",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "datetime.timedel... |
37693718022 | from enum import Enum, auto
from typing import Any, Tuple
from mesa import Model
from mesa.datacollection import DataCollector
from mesa.space import ContinuousSpace
from mesa.time import SimultaneousActivation
from autonomous_intersection.agents.direction import Direction
from autonomous_intersection.agents.visualcell import VisualCell
from autonomous_intersection.constants import PIXEL_PER_METER, STEPS_PER_SECOND
from autonomous_intersection.managers.advanced_reservation_manager import AdvancedReservationBasedManager
from autonomous_intersection.managers.prediction_manager import PredictionBasedManager
from autonomous_intersection.managers.reservation_manager import ReservationBasedManager
from autonomous_intersection.managers.traffic_light_manager import TrafficLightManager
from autonomous_intersection.rect import Rect
class Manager(Enum):
TrafficLight = auto()
BasicReservation = auto()
AdvancedReservation = auto()
Prediction = auto()
class Intersection(Model):
def __init__(self, height=1000, width=1000, spawn_rate=10, manager: str = Manager.TrafficLight.name, *args: Any,
**parameters: Any):
super().__init__(*args, **parameters)
self.schedule = SimultaneousActivation(self)
self.space = ContinuousSpace(height, width, False)
self.width = width
self.height = height
self.road_width = 7 * PIXEL_PER_METER
self.manager = self.get_manager(manager)(self.width, self.height, self.road_width, parameters, self)
self.build_background()
self.agent_id = 0
self.running = True
self.spawn_rate = spawn_rate / 100
self.car_height = int(1.5 * PIXEL_PER_METER)
self.data_collector = DataCollector(model_reporters={"Throughput [cars / min]": self.get_agent_rate})
@staticmethod
def get_manager(manager):
if manager == Manager.TrafficLight.name: return TrafficLightManager
if manager == Manager.BasicReservation.name: return ReservationBasedManager
if manager == Manager.AdvancedReservation.name: return AdvancedReservationBasedManager
return PredictionBasedManager
def get_agent_id(self):
self.agent_id += 1
return self.agent_id
def build_background(self):
for cell in self.manager.build_background():
self.space.place_agent(cell, (cell.x, cell.y))
self.schedule.add(cell)
def spawn_car(self, entry, width, height):
cell = self.manager.create_new_car(entry, (width, height), self.get_agent_id())
self.space.place_agent(cell, (cell.x, cell.y))
self.schedule.add(cell)
def add_new_agents(self):
for entry in Direction:
if not self.manager.is_entry_occupied(entry) and self.random.random() < self.spawn_rate:
self.spawn_car(entry, *self.random_car_size(self.car_height))
def random_car_size(self, height) -> Tuple[int, int]:
return self.random.randint(round(height * 1.3), height * 2), height
def step(self):
self.add_new_agents()
self.manager.remove_cars(self.space, self.schedule)
self.manager.control_cars()
self.schedule.step()
self.data_collector.collect(self)
def draw_debug_object(self, rect: Rect, color: str) -> VisualCell:
cell = VisualCell((rect.left, rect.top), (rect.width, rect.height), self, color, 2)
self.space.place_agent(cell, (cell.x, cell.y))
self.schedule.add(cell)
return cell
def delete_debug_object(self, cell: VisualCell) -> None:
self.space.remove_agent(cell)
self.schedule.remove(cell)
def get_agent_rate(self):
if self.manager.first_step is None: return 0
steps = self.manager.steps - self.manager.first_step + 1
if steps < 50: return 0
return ((STEPS_PER_SECOND * 60) * self.manager.agent_count) // steps
| GrzegorzNieuzyla/Autonomous-Intersection | autonomous_intersection/model.py | model.py | py | 3,906 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "enum.Enum",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "enum.auto",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "enum.auto",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "enum.auto",
"line_number": 22,
... |
39363863586 | import networkx as nx
from node import Node
class Graph(nx.Graph):
def __init__(self):
super().__init__()
self.arcs=[]
self.nao_servido = 0
def increment_nao_servido(self):
self.nao_servido += 1
def decrement_nao_serveido(self):
self.nao_servido-=1
def create_node(self, id: str) -> Node:
node = Node(id)
return node
def create_nodes(self, matrix):
listaN = []
dict_ap = {}
for i in range(1, len(matrix[0])):
node_name = matrix[0][i]
node = self.create_node(node_name)
listaN.append(node)
dict_ap[node_name] = node
self.add_nodes_from(listaN)
multiple = set()
i = 0
for i in range(1, len(matrix)):
for j in range(1, len(matrix[i])):
if matrix[i][j] == '1':
if(not (matrix[0][j], matrix[i][0]) in multiple):
dict_ap[matrix[0][j]].add_neighbor(
dict_ap[matrix[i][0]])
dict_ap[matrix[i][0]].add_neighbor(
dict_ap[matrix[0][j]])
self.add_edge(dict_ap[matrix[0][j]],
dict_ap[matrix[i][0]])
multiple.add((matrix[0][j], matrix[i][0]))
multiple.add((matrix[i][0], matrix[0][j]))
def create_arcs(self):
for i in self.nodes:
for j in i.vizinhos:
if ((i.id, j.id) not in self.arcs) and ((j.id, i.id) not in self.arcs):
self.arcs.append((i, j))
| marcoscezar1/Simulador | Simulação I/graph.py | graph.py | py | 1,642 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "networkx.Graph",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "node.Node",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "node.Node",
"line_number": 17,
"usage_type": "name"
}
] |
21033229877 | """
All together
Demonstrate how you can build a quick and dirty framework with a bottle like
API.
Chick is the main application frame.
CapitalizeResponse is a middleware which you can use to wrap your application.
It stores session information, and it capitalizes all responses.
"""
def request_factory(env):
"""
Dynamically create a Request class with slots and read
only attributes
"""
class Request:
__slots__ = [k.replace(".", "_").lower() for k in env.keys()]
def __setattr__(self, name, value):
try:
getattr(self, name)
raise ValueError("Can't modify {}".format(name))
except AttributeError:
super().__setattr__(name, value)
request = Request()
for k, v in env.items():
setattr(request, k.replace(".", "_").lower(), v)
return request
class Chick:
"""
A WSGI Application framework with API inspired by Bottle and Flask.
There is No HTTPRequest Object and No HTTPResponse object.
Just barebone routing ...
"""
routes = {}
def __call__(self, environ, start_response):
try:
callback, method = self.routes.get(environ.get('PATH_INFO'))
except TypeError:
start_response('404 Not Found', [('Content-Type', 'text/plain')])
return [b'404 Not Found']
if method != environ.get('REQUEST_METHOD'):
start_response('405 Method Not Allowed',
[('Content-Type', 'text/plain')])
return [b'404 Method Not Allowed']
request = request_factory(environ)
response = callback(request)
start_response(response.status, response.content_type)
return (response.body.encode(),)
def add_route(self, path, wrapped, method):
self.routes[path] = (wrapped, method)
def get(self, path):
def decorator(wrapped):
self.add_route(path, wrapped, 'GET')
return wrapped
return decorator
def post(self, path):
def decorator(wrapped):
self.add_route(path, wrapped, 'POST')
return wrapped
return decorator
class Response:
__slots__ = ('body', 'status', 'content_type')
def __init__(self, body, status='200 OK',
content_type="text/plain"):
self.body = body
self.status = status
self.content_type = [('Content-Type', content_type)]
def __iter__(self):
return [self.body]
class CapitalizeResponseMiddleware:
"""
A middlerware that manipulates the response
"""
def __init__(self, app):
self.app = app
self.visits = 0 # state keeping made easier
def __call__(self, environ, start_response, *args, **kw):
self.visits += 1
response = self.app(environ, start_response)
response = [line.upper() for line in response]
response.append("\nYou visited {} times".format(self.visits).encode())
return response
chick = Chick()
@chick.get("/")
def index(request):
return Response("hello world!")
@chick.post("/input/")
def test_post(request):
return Response("")
@chick.get("/response/")
def response(request):
res = "".join("{}: {}\n".format(s, getattr(request, s))
for s in request.__slots__)
return Response("hello world!\n" + res)
capital_chick = CapitalizeResponseMiddleware(chick)
if __name__ == "__main__":
from wsgiref.simple_server import make_server
httpd = make_server('', 8000, capital_chick)
print("Serving on port 8000...")
# Serve until process is killed
httpd.serve_forever()
| oz123/advanced-python | examples/all_together/chick_w_request_response.py | chick_w_request_response.py | py | 3,678 | python | en | code | 9 | github-code | 6 | [
{
"api_name": "wsgiref.simple_server.make_server",
"line_number": 147,
"usage_type": "call"
}
] |
34607069404 | import os
import json
import pandas as pd
import numpy as np
import networkx as nx
#所有节点和边构成网络图
def form_graph(parent_path):
every_pro_path = os.path.join(parent_path, r'dataset\secondExperiments\COVID_Node2Vec.csv')
simGO_path = os.path.join(parent_path, r'dataset\secondExperiments\train_COVID_AllHuman_GoSim.xls')
# 网络图的构建
G = nx.Graph()
pro_file = pd.read_csv(every_pro_path)
pro = np.array(pro_file["name"])
proList = [] # 每个蛋白质的索引
for i in range(len(pro)):
proList.append(pro[i])
G.add_node(i)
edges_df = pd.read_excel(simGO_path)
source = np.array(edges_df["protein"])
target = np.array(edges_df["neghbor_protein"])
sim = np.array(edges_df["sim"])
edgesList = []
for j in range(len(source)):
edgesList.append((source[j].split("_")[1], target[j].split("_")[1]))
G.add_edge(int(source[j].split("_")[0]), int(target[j].split("_")[0]))
G[int(source[j].split("_")[0])][int(target[j].split("_")[0])]['weight'] = sim[j]
print("图形已构成")
return G
#通过所有核数据
def readresultjson(parent_path, graph):
result_corefile = os.path.join(parent_path, r'result\secondExperiments\train_COVID_result.json')
with open(result_corefile, 'r', encoding='utf8')as fp:
json_data = json.load(fp)
seed = []
for core_k, core_v in json_data.items():
core = core_v[0]["cover"]
seed.extend(core)
seed = list(set(seed))
return json_data,seed
#通过核获得一阶邻点
def getfirstneighbor(json_data,seed,graph):
adj = graph._adj
neighbordict = {}
for k,v in json_data.items():
allneighborNode = []
neighborNode = []
core = v[0]["cover"]
for i in core:
everyneigh = adj[int(i)].keys()
neighborNode.extend(everyneigh)
neighborNode = list(set(neighborNode))
for i in neighborNode:
if i not in seed:
allneighborNode.append(i)
neighbordict[k] = neighborNode
return adj,neighbordict
#适应度函数
def fitness_function(adj,complex, core,graph):
sum_degree_in = 0 # 所有顶点的入度之和
sum_degree_out = 0 # 所有顶点的出度之和
E = 0
for i in range(len(complex)):
degree_in = 0 # 每个节点的入度
degree_out = 0 # 每个节点的出度
i_adj = adj[complex[i]].keys()
for z in i_adj:
if z in complex:
if (complex[i], int(z)) in graph.edges():
degree_in += graph[complex[i]][int(z)]['weight']
else:
degree_in += graph[int(z)][complex[i]]['weight']
else:
if (complex[i], int(z)) in graph.edges():
degree_out += graph[complex[i]][int(z)]['weight']
else:
degree_out += graph[int(z)][complex[i]]['weight']
for j in range(len(complex)):
if i < j:
if (complex[i], complex[j]) in graph.edges() or (
complex[j], complex[i]) in graph.edges():
E += 1
sum_degree_in += degree_in
sum_degree_out += degree_out
a = 0.8
modularity = (sum_degree_in - sum_degree_out) / (sum_degree_out + sum_degree_in)
density = (2 * E) / (len(complex) * (len(complex) - 1))
score = a*density+(1-a)*modularity
return score
#通过分数形成核心-附件的形式
def core_accessories(json_data,neighbordict,adj,graph):
resultDict = {}
for k, v in json_data.items():
complexjson = {}
resultList = []
core = [int(i) for i in v[0]["cover"]]
complex = core+neighbordict[k]
#求每个一阶邻点与core总的功能相似性
score_neighbordict = {}
for j in neighbordict[k]:
score = 0
for z in core:
if (int(z), int(j)) in graph.edges():
score += graph[int(z)][int(j)]['weight']
elif (int(j), int(z)) in graph.edges():
score += graph[int(j)][int(z)]['weight']
else:
score += 0
score_neighbordict[j] = score
score_neighbordict = sorted(score_neighbordict.items(), key=lambda item: item[1])
if len(complex) > 3:
core_score = fitness_function(adj, complex, core, graph)
for i in score_neighbordict:
# for i in neighbordict[k]:
if len(complex) > 3:
complex.remove(i[0])
complex_score = fitness_function(adj, complex, core, graph)
if complex_score >= core_score:
core_score = complex_score
else:
complex.append(i[0])
else:
break
elif len(complex) == 3:
core_score = fitness_function(adj, complex, core, graph)
else:
# continue
core_score = 0
# if len(core) > 1:
# core_score = fitness_function(adj,core)
# else:
# core_score = 0
# complex = core
# for i in neighbordict[k]:
# complex.append(i)
# complex_score = fitness_function(adj, complex)
# if complex_score >= core_score:
# core_score = complex_score
# else:
# complex.remove(i)
complexjson["cover"] = complex
complexjson["score"] = core_score
resultList.append(complexjson)
resultDict[k] = resultList
return resultDict
def savecomplex(parent_path,complexjson):
result_path = parent_path + r'\result\secondExperiments\train_COVID_resultComplex.json'
with open(result_path, 'w') as fw:
json.dump(complexjson, fw)
if __name__ == '__main__':
ROOT_DIR = os.path.dirname(os.path.abspath('__file__'))
parent_path = os.path.dirname(ROOT_DIR)
graph = form_graph(parent_path)
json_data,seed = readresultjson(parent_path, graph)
adj,neighbordict = getfirstneighbor(json_data,seed,graph)
complexjson = core_accessories(json_data,neighbordict,adj,graph)
print(complexjson.__len__())
savecomplex(parent_path,complexjson) | LittleBird120/DiseaseGenePredicition | DiseaseGenePredicition/20210315covering-clustering-algorithm - COVID/algorithm/formComplex.py | formComplex.py | py | 6,315 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.path.join",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 10,... |
41283122101 | # -*- coding: utf-8 -*-
import math
import numpy as np
from scipy.interpolate import interp1d
class SpatialParameters:
def __init__(self, signal, fs, window_size, running_step):
self.time_steps, self.iacc, self.tiacc, self.wiacc = self.__get_xcorr_descriptors(
signal, fs, window_size, running_step)
def __get_xcorr_descriptors(self, signal, fs, window_size, running_step):
w_samples = round(window_size * fs)
rs_samples = round(running_step * fs)
time_steps = np.array([])
iacc = np.array([])
tiacc = np.array([])
wiacc = np.array([])
wiacc_d = np.array([])
wiacc_u = np.array([])
max_delay = 1 # (ms)
dT = math.floor(max_delay * 0.001 * fs)
t_axis_length = (2*dT) + 1
T_AXIS = np.linspace(-max_delay, max_delay, t_axis_length)
delta = t_axis_length//2
iD = 0
iU = w_samples
lim = math.ceil(t_axis_length/2)-1
while iU < len(signal):
c = 0
idx = 0
max_value = 0
ts = np.round(iD / fs, 2)
time_steps = np.append(time_steps,ts)
cll0 = np.correlate(signal[iD:iU,0], signal[iD:iU,0])
crr0 = np.correlate(signal[iD:iU,1], signal[iD:iU,1])
if((cll0 == 0) or (crr0 == 0)):
iacc = np.append(iacc, 0)
wiacc = np.append(wiacc, 0)
wiacc_d = np.append(wiacc_d, 0)
wiacc_u = np.append(wiacc_u, 0)
if((cll0 == 0) and (crr0 != 0)):
tiacc = np.append(tiacc, 1)
if((cll0 != 0) and (crr0 == 0)):
tiacc = np.append(tiacc, -1)
else:
tiacc = np.append(tiacc, 0)
scale = math.sqrt(cll0*crr0)
iacf = np.zeros([(2*delta)+1])
for tau in range(2*delta, -1, -1):
L = [0]
R = [0]
if(c < lim):
L = signal[iD+delta-c:iU, 0]
R = signal[iD:iU-delta+c, 1]
elif(c == lim):
L = signal[iD:iU, 0]
R = signal[iD:iU, 1]
else:
L = signal[iD:iU-c+delta, 0]
R = signal[iD+c-delta:iU, 1]
xcorr = np.correlate(L, R) / scale
#iacf[tau, step] = xcorr
c += 1
if(xcorr > max_value):
max_value = xcorr
idx = tau
iacc = np.append(iacc, max_value)
tiacc_window = T_AXIS[idx]
tiacc = np.append(tiacc, tiacc_window)
alpha = 0.9*max_value
idx_minus = np.linspace(idx, 0, idx+1)
idx_plus = np.linspace(idx, t_axis_length-1, t_axis_length-idx)
t_minus = -1;
t_plus = 1;
i_m = 0
i_p = 0
m_found = False
if(len(idx_minus) > 1):
for i in idx_minus:
if(iacf[int(i)] < alpha):
i_m = int(i)
if(idx-i > 0):
m_found = True
x_m = iacf[i_m:idx+1]
y_m = T_AXIS[i_m:idx+1]
t_f = interp1d(x_m, y_m, kind='linear')
t_minus = t_f(alpha)
break
p_found = False
if(len(idx_plus) > 1):
for i in idx_plus:
if(iacf[int(i)] < alpha):
i_p = int(i)
if(i-idx > 0):
p_found = True
x_p = iacf[idx:i_p+1]
y_p = T_AXIS[idx:i_p+1]
t_f = interp1d(x_p, y_p, kind='linear')
t_plus = t_f(alpha)
break
wiacc_d = np.append(wiacc_d, t_minus)
wiacc_u = np.append(wiacc_u, t_plus)
if(not(m_found)):
wiacc = np.append(wiacc, (t_plus - tiacc_window) * 2)
elif(not(p_found)):
wiacc = np.append(wiacc, (tiacc_window - t_minus) * 2)
else:
wiacc = np.append(wiacc, (t_plus - t_minus))
iD += rs_samples
iU += rs_samples
return time_steps, iacc, tiacc, wiacc
| kgordillo-hub/SoundMonitor-MetricsCalculator | SpatialParameters.py | SpatialParameters.py | py | 4,821 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.array",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": ... |
24416364915 | import grp
import json
import logging
import os
import pkgutil
import tempfile
from cloudify.decorators import operation
from cloudify.exceptions import NonRecoverableError
from managed_nagios_plugin._compat import text_type
from managed_nagios_plugin.constants import (
BASE_OBJECTS_DIR,
OBJECT_DIR_PERMISSIONS,
OBJECT_OWNERSHIP,
RATE_BASE_PATH,
)
from managed_nagios_plugin.rest_utils import (
get_entities,
run_workflow,
StartWorkflowFailed,
)
from managed_nagios_plugin.utils import (
deploy_configuration_file,
deploy_file,
disable_service,
download_and_deploy_file_from_blueprint,
enable_service,
generate_certs,
reload_systemd_configuration,
run,
start_service,
stop_service,
yum_install,
yum_remove,
_decode_if_bytes
)
SSL_KEY_PATH = '/etc/nagios/ssl.key'
SSL_CERT_PATH = '/etc/nagios/ssl.crt'
BLUEPRINT_SSL_KEY_PATH = 'ssl/{key_file}'
BLUEPRINT_SSL_CERT_PATH = 'ssl/{cert_file}'
NAGIOSREST_SERVICES = ['nagiosrest-gunicorn', 'httpd']
@operation
def create(ctx):
props = ctx.node.properties
ctx.logger.info('Validating SSL properties')
if bool(props['ssl_certificate']) != bool(props['ssl_key']):
raise NonRecoverableError(
'Either ssl_certificate and ssl_key must both be provided, '
'or neither of them must be provided. '
'ssl_certificate was: {ssl_certificate}; '
'ssl_key was: {ssl_key}'.format(
ssl_certificate=props['ssl_certificate'],
ssl_key=props['ssl_key'],
)
)
ctx.logger.info('Enabling EPEL (if required)')
yum_install(text_type('epel-release'))
ctx.logger.info('Installing required packages')
yum_install([
'mod_ssl',
'nagios',
'nagios-plugins-disk',
'nagios-plugins-load',
'nagios-plugins-ping',
'nagios-plugins-snmp',
'nagios-selinux',
'net-snmp',
'net-snmp-utils',
'python-flask',
'python-gunicorn',
'python-jinja2',
'python-requests',
'selinux-policy-devel',
'incron',
])
ctx.logger.info('Deploying SELinux configuration')
# Prepare SELinux context for trap handler
tmp_path = tempfile.mkdtemp()
with open(
os.path.join(tmp_path, 'cloudify-nagios-snmp-trap-handler.te'), 'w',
) as policy_handle:
policy_handle.write(_decode_if_bytes(pkgutil.get_data(
'managed_nagios_plugin',
'resources/selinux/cloudify_nagios_snmp_trap_handler.te',
)))
run(['make', '-f', '/usr/share/selinux/devel/Makefile', '-C', tmp_path],
sudo=True)
run(['semodule',
'-i',
os.path.join(tmp_path, 'cloudify-nagios-snmp-trap-handler.pp')],
sudo=True)
run(['rm', '-rf', tmp_path], sudo=True)
ctx.logger.info('Deploying nagios plugins and SNMP trap handler')
for supporting_lib in ('_compat.py',
'constants.py',
'utils.py',
'snmp_utils.py',
'nagios_utils.py',
'rest_utils.py',
'resources/scripts/nagios_plugin_utils.py',
'resources/scripts/logging_utils.py'):
if supporting_lib.startswith('resources/scripts/'):
destination_filename = supporting_lib[len('resources/scripts/'):]
else:
destination_filename = supporting_lib
deploy_file(
data=pkgutil.get_data(
'managed_nagios_plugin',
supporting_lib,
),
destination='/usr/lib64/nagios/plugins/' + destination_filename,
ownership='root.nagios',
permissions='440',
sudo=True,
)
for script in ('check_snmp_numeric',
'check_snmp_aggregate',
'check_group_aggregate',
'check_group_meta_aggregate',
'cloudify_nagios_snmp_trap_handler',
'notify_cloudify',
'check_nagios_command_file',
'check_snmptrap_checks'):
source = os.path.join('resources/scripts/', script)
script_content = pkgutil.get_data('managed_nagios_plugin', source)
destination = os.path.join('/usr/lib64/nagios/plugins', script)
deploy_file(
data=script_content,
destination=destination,
permissions='550',
sudo=True,
)
ctx.logger.info('Deploying nagiosrest')
run(['mkdir', '-p', '/usr/local/www/nagiosrest'], sudo=True)
for nagiosrest_file in ('nagiosrest.py',
'nagiosrest_group.py',
'nagiosrest_target.py',
'nagiosrest_tenant.py',
'logging_utils.py'):
deploy_file(
data=pkgutil.get_data(
'managed_nagios_plugin',
'resources/scripts/' + nagiosrest_file,
),
destination='/usr/local/www/nagiosrest/' + nagiosrest_file,
ownership='root.nagios',
permissions='440',
sudo=True,
)
for supporting_lib in ('_compat.py',
'nagios_utils.py',
'utils.py',
'constants.py'):
deploy_file(
data=pkgutil.get_data(
'managed_nagios_plugin',
supporting_lib,
),
destination='/usr/local/www/nagiosrest/' + supporting_lib,
ownership='root.nagios',
permissions='440',
sudo=True,
)
for template in ('hostgroup.template', 'target.template', 'node.template',
'group.template', 'group_check.template',
'meta_group_check.template'):
deploy_file(
data=pkgutil.get_data(
'managed_nagios_plugin',
os.path.join('resources', template),
),
destination='/usr/local/www/nagiosrest/' + template,
ownership='root.nagios',
permissions='440',
sudo=True,
)
deploy_file(
data=pkgutil.get_data(
'managed_nagios_plugin',
'resources/base_configuration/systemd_nagiosrest.conf',
),
destination='/usr/lib/systemd/system/nagiosrest-gunicorn.service',
ownership='root.root',
permissions='440',
sudo=True,
)
ctx.logger.info('Deploying notification configuration script')
deploy_file(
data=pkgutil.get_data(
'managed_nagios_plugin',
'resources/scripts/update_notify_cloudify_configuration',
),
destination='/usr/local/bin/update_notify_cloudify_configuration',
ownership='root.root',
permissions='500',
sudo=True,
# Must have the group of the agent user for reconcile operation to
# work correctly
template_params={'group': grp.getgrgid(os.getgid()).gr_name},
)
deploy_file(
data=pkgutil.get_data(
'managed_nagios_plugin',
'_compat.py',
),
destination='/usr/local/bin/_compat.py',
ownership='root.root',
permissions='400',
sudo=True,
)
deploy_file(
data=pkgutil.get_data(
'managed_nagios_plugin',
'utils.py',
),
destination='/usr/local/bin/utils.py',
ownership='root.root',
permissions='400',
sudo=True,
)
deploy_file(
data=pkgutil.get_data(
'managed_nagios_plugin',
'constants.py',
),
destination='/usr/local/bin/constants.py',
ownership='root.root',
permissions='400',
sudo=True,
)
ctx.logger.info(
'Creating directory structure for storing temporary rate data'
)
for rate_dir in ('nodes', 'instances'):
rate_storage_path = os.path.join(RATE_BASE_PATH, rate_dir)
run(['mkdir', '-p', rate_storage_path], sudo=True)
run(['chown', 'nagios.', rate_storage_path], sudo=True)
run(['restorecon', rate_storage_path], sudo=True)
if props['ssl_certificate']:
if props['ssl_certificate'].startswith("-----BEGIN CERTIFICATE-----"):
deploy_file(
data=props['ssl_key'],
destination=SSL_KEY_PATH,
ownership='root.root',
permissions='440',
sudo=True,
)
deploy_file(
data=props['ssl_certificate'],
destination=SSL_CERT_PATH,
ownership='root.root',
permissions='444',
sudo=True,
)
else:
download_and_deploy_file_from_blueprint(
source=BLUEPRINT_SSL_KEY_PATH.format(
key_file=props['ssl_key'],
),
destination=SSL_KEY_PATH,
ownership='root.root',
permissions='440',
ctx=ctx,
)
download_and_deploy_file_from_blueprint(
source=BLUEPRINT_SSL_CERT_PATH.format(
cert_file=props['ssl_certificate'],
),
destination=SSL_CERT_PATH,
ownership='root.root',
permissions='444',
ctx=ctx,
)
else:
ctx.logger.info('Generating SSL certificate')
generate_certs(SSL_KEY_PATH, SSL_CERT_PATH, ctx.logger)
with open(SSL_CERT_PATH) as crt_handle:
ctx.instance.runtime_properties['ssl_certificate'] = crt_handle.read()
ctx.logger.info('Reloading systemd configuration')
reload_systemd_configuration()
@operation
def configure(ctx):
props = ctx.node.properties
ctx.logger.info('Configuring nagios web user')
username = props['nagios_web_username']
password = props['nagios_web_password']
tmpdir = tempfile.mkdtemp()
tmp_htpass = os.path.join(tmpdir, 'passwd')
run(['htpasswd', '-bc', tmp_htpass, username, password])
run(['mv', tmp_htpass, '/etc/nagios/passwd'], sudo=True)
run(['rm', '-rf', tmpdir])
run(['chown', 'root.apache', '/etc/nagios/passwd'], sudo=True)
run(['chmod', '640', '/etc/nagios/passwd'], sudo=True)
run(['usermod', '-G', 'nagios', 'apache'], sudo=True)
ctx.logger.info('Deploying automated reaction configuration')
# We're using username+password because current token implementation is
# unsuitable for this.
reaction_configuration = {
'username': props['cloudify_manager_username'],
'password': props['cloudify_manager_password'],
}
deploy_file(
data=json.dumps(reaction_configuration),
destination='/etc/nagios/cloudify_manager.json',
ownership='nagios.{group}'.format(
# Must have the group of the agent user for reconcile operation to
# work correctly
group=grp.getgrgid(os.getgid()).gr_name,
),
permissions='440',
sudo=True,
)
notification_plugin_storage_dir = '/var/spool/nagios/cloudifyreaction'
run(['mkdir', '-p', notification_plugin_storage_dir], sudo=True)
run(['restorecon', notification_plugin_storage_dir], sudo=True)
run(['chown', 'nagios.nagios', notification_plugin_storage_dir],
sudo=True)
run(['chmod', '750', notification_plugin_storage_dir], sudo=True)
ctx.logger.info('Preparing object paths')
run(['rm', '-rf', BASE_OBJECTS_DIR], sudo=True)
object_subdirs = [
'checks',
'commands',
'contacts',
'groups/group_instances',
'groups/tenants',
'groups/types',
'templates',
'timeperiods',
'deployments',
'snmp_traps',
'targets',
'target_types',
'tenants',
]
for subdir in object_subdirs:
subdir = os.path.join(BASE_OBJECTS_DIR, subdir)
run(['mkdir', '-p', subdir], sudo=True)
run(['chown', '-R', OBJECT_OWNERSHIP, BASE_OBJECTS_DIR], sudo=True)
run(['chmod', '-R', OBJECT_DIR_PERMISSIONS, BASE_OBJECTS_DIR], sudo=True)
ctx.logger.info('Deploying nagios object configuration')
config_source_dest_params = (
# Fully qualified paths because these two go outside the objects dir
('cgi.cfg', '/etc/nagios/cgi.cfg', {'user': username}),
('nagios.cfg', '/etc/nagios/nagios.cfg', {}),
# The rest are 'normal' configuration files
('base_system.cfg', 'base_system.cfg', {}),
('command_host_icmp.cfg', 'commands/check_host_icmp.cfg', {}),
('command_no_check.cfg', 'commands/no_check.cfg', {}),
('command_local_load.cfg', 'commands/check_local_load.cfg', {}),
('command_local_disk.cfg', 'commands/check_local_disk.cfg', {}),
('command_snmp_value.cfg', 'commands/check_snmp_value.cfg', {}),
('command_check_nagios_command_file.cfg',
'commands/check_nagios_command_file.cfg', {}),
('command_snmp_aggregate.cfg',
'commands/check_snmp_aggregate.cfg', {}),
('command_group_aggregate.cfg',
'commands/check_group_aggregate.cfg', {}),
('command_group_meta_aggregate.cfg',
'commands/check_group_meta_aggregate.cfg', {}),
('command_snmptrap_checks.cfg',
'commands/check_snmptrap_checks.cfg', {}),
('notification.cfg', 'commands/notify_automation.cfg', {}),
('contact.cfg', 'contacts/automation.cfg', {}),
('template_generic_service.cfg', 'templates/generic_service.cfg', {}),
('template_generic_host.cfg', 'templates/generic_host.cfg', {}),
('template_pseudo_host.cfg', 'templates/pseudo_host.cfg', {}),
('timeperiod_24x7.cfg', 'timeperiods/24x7.cfg', {}),
)
for source, dest, params in config_source_dest_params:
deploy_configuration_file(
ctx.logger,
source=os.path.join('resources/base_configuration', source),
destination=dest,
template_params=params,
# We can't validate before we've put all of the configuration in
# place as it will be invalid until it's finished
validate=False,
# We can't reload, it's not running yet
reload_service=False,
sudo=True,
)
ctx.logger.info('Configuring httpd for ssl')
deploy_file(
data=pkgutil.get_data(
'managed_nagios_plugin',
'resources/base_configuration/httpd.conf',
),
destination='/etc/httpd/conf/httpd.conf',
ownership='root.apache',
permissions='440',
sudo=True,
)
deploy_file(
data=pkgutil.get_data(
'managed_nagios_plugin',
'resources/base_configuration/ssl.conf',
),
destination='/etc/httpd/conf.d/ssl.conf',
ownership='root.apache',
permissions='440',
sudo=True,
)
ctx.logger.info('Configuring httpd for nagiosrest')
deploy_file(
data=pkgutil.get_data(
'managed_nagios_plugin',
'resources/base_configuration/httpd_nagiosrest.conf',
),
destination='/etc/httpd/conf.d/nagiosrest.conf',
ownership='root.apache',
permissions='440',
sudo=True,
)
ctx.logger.info('Allowing nagiosrest to restart nagios')
deploy_file(
data=pkgutil.get_data(
'managed_nagios_plugin',
'resources/base_configuration/sudoers-nagiosrest',
),
destination='/etc/sudoers.d/nagios-service-restart',
ownership='root.root',
permissions='440',
sudo=True,
)
ctx.logger.info('Deploying base SNMP configuration')
deploy_file(
data=pkgutil.get_data(
'managed_nagios_plugin',
'resources/base_configuration/snmp',
),
destination='/etc/snmp/snmp.conf',
ownership='root.root',
permissions='440',
sudo=True,
)
trap_community = ctx.node.properties['trap_community']
if trap_community:
ctx.logger.info('Configuring SNMP traps to use handler')
deploy_file(
data=pkgutil.get_data(
'managed_nagios_plugin',
'resources/base_configuration/snmptrapd',
),
destination='/etc/snmp/snmptrapd.conf',
ownership='root.root',
permissions='440',
sudo=True,
template_params={
'trap_community': trap_community,
},
)
ctx.logger.info('Configuring notification script')
deploy_file(
data=pkgutil.get_data(
'managed_nagios_plugin',
'resources/base_configuration/incron.allow',
),
destination='/etc/incron.allow',
ownership='root.root',
permissions='440',
sudo=True,
)
deploy_file(
data=pkgutil.get_data(
'managed_nagios_plugin',
'resources/base_configuration/incron_root_spool',
),
destination='/var/spool/incron/root',
ownership='root.root',
permissions='400',
template_params={
'homedir': os.path.expanduser('~'),
},
sudo=True,
)
agent_config_dir = os.path.join(
os.path.expanduser('~'),
'.cfy-agent',
)
agent_configs = [
os.path.join(agent_config_dir, filename)
for filename in os.listdir(agent_config_dir)
]
# We'll use the most recently updated agent config
current_agent_config = max(agent_configs, key=os.path.getmtime)
run(
[
'/usr/local/bin/update_notify_cloudify_configuration',
current_agent_config,
],
sudo=True,
)
ctx.logger.info('Deploying logging configuration')
level = props['component_log_level'].upper()
validate_level = logging.getLevelName(level)
if not isinstance(validate_level, int):
raise NonRecoverableError(
'{level} is not a valid logging level. '
'It is recommended that component_log_level be set to one of '
'DEBUG, INFO, WARNING, ERROR'.format(level=level)
)
component_logging_config = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'default': {
'format': '%(name)s(%(process)s) [%(levelname)s]: %(message)s',
},
},
'handlers': {
'syslog': {
'formatter': 'default',
'level': level,
'class': 'logging.handlers.SysLogHandler',
'address': '/dev/log',
},
},
'loggers': {
'': {
'handlers': ['syslog'],
'level': level,
'propagate': True,
},
},
}
deploy_file(
data=json.dumps(component_logging_config),
destination='/etc/nagios/cloudify_components_logging.cfg',
ownership='root.nagios',
permissions='440',
sudo=True,
)
deploy_file(
data=pkgutil.get_data(
'managed_nagios_plugin',
'resources/base_configuration/logrotate_config',
),
destination='/etc/logrotate.d/managed_nagios',
ownership='root.root',
permissions='444',
sudo=True,
)
deploy_file(
data=pkgutil.get_data(
'managed_nagios_plugin',
'resources/base_configuration/rsyslog_config',
),
destination='/etc/rsyslog.d/managed_nagios_logging.conf',
ownership='root.root',
permissions='444',
sudo=True,
)
stop_service('rsyslog')
start_service('rsyslog')
@operation
def start(ctx):
ctx.logger.info('Enabling and starting nagios and httpd services')
services = ['nagios', 'incrond']
if ctx.node.properties['start_nagiosrest']:
services.extend(NAGIOSREST_SERVICES)
if ctx.node.properties['trap_community']:
services.append('snmptrapd')
for service in services:
enable_service(service)
start_service(service)
@operation
def delete(ctx):
ctx.logger.info('Uninstalling nagios and web server packages')
yum_remove([
'nagios',
'httpd', # Installed by nagios, remove it as it is outward facing
'nagios-selinux',
'nagios-plugins-load',
'nagios-plugins-disk',
'nagios-plugins-ping',
'nagios-plugins-snmp',
'net-snmp',
])
ctx.logger.info('Removing nagiosrest')
stop_service('nagiosrest-gunicorn')
disable_service('nagiosrest-gunicorn')
run(['rm', '/usr/lib/systemd/system/nagiosrest-gunicorn.service'],
sudo=True)
reload_systemd_configuration()
ctx.logger.info('Removing leftover data, configuration, and scripts')
for path in (
'/etc/nagios',
'/etc/httpd',
'/usr/lib64/nagios',
'/usr/local/www/nagiosrest',
'/var/spool/nagios',
'/var/log/nagios',
'/etc/snmp',
'/var/spool/incron/root',
):
run(['rm', '-rf', path], sudo=True)
def _node_has_nagiosrest_properties(node):
return 'nagiosrest_monitoring' in node.get('properties', {})
@operation
def reconcile_monitoring(ctx, only_deployments=None, only_tenants=None):
if not only_deployments:
only_deployments = []
if not only_tenants:
only_tenants = []
ctx.logger.info('Getting tenant list')
tenants = [
tenant['name']
for tenant in get_entities(
entity_type='tenants',
tenant='default_tenant',
properties=['name'],
logger=ctx.logger,
)
]
problem_deployments = {}
targets = None
for tenant in tenants:
if only_tenants and tenant not in only_tenants:
ctx.logger.info('Skipping tenant {tenant}'.format(
tenant=tenant,
))
continue
ctx.logger.info('Checking deployments for tenant {tenant}'.format(
tenant=tenant,
))
targets = {}
interesting_nodes = get_entities(
entity_type='nodes',
tenant=tenant,
properties=['deployment_id', 'id'],
logger=ctx.logger,
include=_node_has_nagiosrest_properties,
)
ctx.logger.info(
'Found {num} nodes with monitoring configuration'.format(
num=len(interesting_nodes),
)
)
notified_skipped_deployments = []
for node in interesting_nodes:
dep_id = node['deployment_id']
if only_deployments and dep_id not in only_deployments:
if dep_id not in notified_skipped_deployments:
ctx.logger.info('Skipping deployment {dep}'.format(
dep=dep_id,
))
notified_skipped_deployments.append(dep_id)
continue
if dep_id not in targets:
targets[dep_id] = []
targets[dep_id].append(node['id'])
if targets:
for deployment, nodes in targets.items():
ctx.logger.info(
'Starting monitoring for deployment {deployment}'.format(
deployment=deployment,
)
)
try:
run_workflow(
tenant=tenant,
deployment=deployment,
workflow_id='execute_operation',
parameters={
"node_ids": nodes,
"operation": (
"cloudify.interfaces.monitoring.start"
),
},
allow_custom_parameters=False,
force=False,
logger=ctx.logger,
)
except StartWorkflowFailed as err:
ctx.logger.error(
'{deployment} failed to start workflow: {err}'.format(
deployment=deployment,
err=text_type(err),
)
)
if tenant not in problem_deployments:
problem_deployments[tenant] = []
problem_deployments[tenant].append(deployment)
if targets:
ctx.logger.info('All monitored instances not listed as problems '
'should be re-added to '
'nagios within a short time. See individual '
'deployments for execution states. '
'Problem messages state: '
'Tenant <name> had problems starting workflows, '
'and list which deployments had these problems. '
'If any of these appear you can re-run just those '
'deployments by using the only_deployments '
'argument.')
if problem_deployments:
for tenant in problem_deployments:
ctx.logger.warn(
'Tenant {tenant} had problems starting workflows for '
'deployments: {deps}'.format(
tenant=tenant,
deps=','.join(problem_deployments[tenant]),
)
)
else:
ctx.logger.info('No problems were reported starting workflows.')
else:
ctx.logger.warn('Nothing needed to be done. Either the combination '
'of tenant and deployment filtering left no targets '
'or there are no monitored deployments using the '
'nagiosrest plugin on the cloudify manager.')
@operation
def start_nagiosrest(ctx):
ctx.logger.info('Enabling and starting nagios and httpd services')
services = ['httpd', 'nagiosrest-gunicorn']
for service in services:
enable_service(service)
start_service(service)
| cloudify-cosmo/cloudify-managed-nagios-plugin | managed_nagios_plugin/nagios/tasks.py | tasks.py | py | 26,382 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "cloudify.exceptions.NonRecoverableError",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "managed_nagios_plugin.utils.yum_install",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "managed_nagios_plugin._compat.text_type",
"line_number": 62,
... |
37740591228 | from fastapi import Depends, FastAPI, Header, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from app.auth import main as auths
from app.users import main as users
from app.blogs import main as blogs, sub as blogs_sub
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
app.include_router(auths.app)
app.include_router(users.app)
app.include_router(blogs.app)
app.include_router(blogs_sub.app)
| tokusumi/fastapi-nuxt-blog | backend/app/app/main.py | main.py | py | 524 | python | en | code | 6 | github-code | 6 | [
{
"api_name": "app.auth",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "fastapi.FastAPI",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "app.auth.add_middleware",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "fastapi.middleware.cors... |
28051281167 | """
Tests for the petl.fluent module.
"""
from __future__ import absolute_import, print_function, division
from tempfile import NamedTemporaryFile
import csv
from nose.tools import eq_
import petl
import petl.interactive as etl
from petl.testutils import ieq
def test_basics():
t1 = (('foo', 'bar'),
('A', 1),
('B', 2))
w1 = etl.wrap(t1)
eq_(('foo', 'bar'), w1.header())
eq_(petl.header(w1), w1.header())
ieq((('A', 1), ('B', 2)), w1.data())
ieq(petl.data(w1), w1.data())
w2 = w1.cut('bar', 'foo')
expect2 = (('bar', 'foo'),
(1, 'A'),
(2, 'B'))
ieq(expect2, w2)
ieq(petl.cut(w1, 'bar', 'foo'), w2)
w3 = w1.cut('bar', 'foo').cut('foo', 'bar')
ieq(t1, w3)
def test_staticmethods():
f = NamedTemporaryFile(delete=False)
writer = csv.writer(f, delimiter='\t')
table = (('foo', 'bar'),
('a', 1),
('b', 2),
('c', 2))
for row in table:
writer.writerow(row)
f.close()
actual = etl.fromcsv(f.name, delimiter='\t')
expect = (('foo', 'bar'),
('a', '1'),
('b', '2'),
('c', '2'))
ieq(expect, actual)
ieq(expect, actual) # verify can iterate twice
def test_container():
table = (('foo', 'bar'),
('a', 1),
('b', 2),
('c', 2))
actual = etl.wrap(table)[0]
expect = ('foo', 'bar')
eq_(expect, actual)
actual = len(etl.wrap(table))
expect = 4
eq_(expect, actual)
def test_repr_html():
table = (('foo', 'bar'),
('a', 1),
('b', 2),
('c', 2))
expect = u"""<table class='petl'>
<thead>
<tr>
<th>foo</th>
<th>bar</th>
</tr>
</thead>
<tbody>
<tr>
<td>a</td>
<td style='text-align: right'>1</td>
</tr>
<tr>
<td>b</td>
<td style='text-align: right'>2</td>
</tr>
<tr>
<td>c</td>
<td style='text-align: right'>2</td>
</tr>
</tbody>
</table>
"""
actual = etl.wrap(table)._repr_html_()
for l1, l2 in zip(expect.split('\n'), actual.split('\r\n')):
eq_(l1, l2)
def test_repr_html_limit():
table = (('foo', 'bar'),
('a', 1),
('b', 2),
('c', 2))
# lower repr limit
etl.repr_html_limit = 2
expect = u"""<table class='petl'>
<thead>
<tr>
<th>foo</th>
<th>bar</th>
</tr>
</thead>
<tbody>
<tr>
<td>a</td>
<td style='text-align: right'>1</td>
</tr>
<tr>
<td>b</td>
<td style='text-align: right'>2</td>
</tr>
</tbody>
</table>
<p><strong>...</strong></p>
"""
actual = etl.wrap(table)._repr_html_()
for l1, l2 in zip(expect.split('\n'), actual.split('\r\n')):
eq_(l1, l2)
| podpearson/petl | src/petl/test/test_interactive.py | test_interactive.py | py | 2,716 | python | en | code | null | github-code | 6 | [
{
"api_name": "petl.interactive.wrap",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "petl.interactive",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "nose.tools.eq_",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "nose.tools.eq_"... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.