seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
24849126049 | import streamlit as st
import pickle
import numpy as np
import pandas as pd
model = pickle.load(open("model.pkl", "rb"))
dt = pickle.load(open("dictionary_map.pkl", "rb"))
st.header("Laptop Price Predictor!!!")
val1 = st.selectbox(
'Company Name',
(dt["company_name"].keys()))
val2 = st.selectbox(
'Processor Name',
(dt["processor_name"].keys()))
val3 = st.selectbox(
'Processor Generation',
(dt["processor_generation"].keys()))
val4 = st.selectbox(
'Operating System',
("DOS ","64 bit Windows 11 ", "64 bit Windows 10 ", "Windows 11 ", "Mac OS ", "Windows 10 ", "Chrome "))
val5 = st.selectbox(
'Storage (SSD)',
(128, 256, 512))
val6 = st.selectbox(
'Screen Size',
(14, 15.6, 16, 17))
val7 = st.selectbox(
'Ram Capacity (GB)',
(4, 8, 16, 32))
val8 = st.selectbox(
'Ram Type',
(dt["ram_type"].keys()))
value = [dt["company_name"][val1], dt["processor_name"][val2], dt["processor_generation"][val3],
dt["operating_system"][val4], int(val5), int(val6), int(val7), dt["ram_type"][val8]]
price = model.predict([value])
print(price)
st.write('\n\tPrice of the laptop approxmately ₹', np.round(price[0], decimals=2)) | itsguptaaman/Flipkart_Laptop_Price_Prediction | app.py | app.py | py | 1,188 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pickle.load",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "streamlit.header",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "streamlit.selectbox",
"line... |
15891304933 | from cassandra.cqlengine.query import LWTException
from sanic.views import HTTPMethodView
from app.http import error_response, json_response
from app.utils.request import check_uuid
class ModelBaseView(HTTPMethodView):
model = None
@staticmethod
async def _make_request(data, many=False):
if not data:
return await json_response({})
if many:
return await json_response([item.to_dict() for item in data])
if isinstance(data, list):
return await json_response(data[0].to_dict())
return await json_response(data.to_dict())
@check_uuid
async def get(self, request):
param_id = request.raw_args.get('id')
if not param_id:
instances = self.model.objects().all()
return await self._make_request(instances, many=True)
instance = self.model.objects(id=param_id)
if not instance:
model_name = self.model.__name__.replace('Model', '')
return await error_response(msg=f'{model_name} not found',
status=404)
return await self._make_request(instance)
async def post(self, request):
try:
data = self.prepare_data(request)
instance = self.model.if_not_exists().create(**data)
return await self._make_request(instance)
except LWTException:
return await error_response(msg=f'Instance already exist.',
status=400)
@staticmethod
def prepare_data(request):
return request.json
@check_uuid
async def delete(self, request):
param_id = request.raw_args.get('id')
instance = self.model.objects(id=param_id)
if not instance:
model_name = self.model.__name__.replace('Model', '')
return await error_response(msg=f'{model_name} not found',
status=404)
return await self._make_request(instance)
| Arthur264/music-new.chat | app/utils/view.py | view.py | py | 2,024 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sanic.views.HTTPMethodView",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "app.http.json_response",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "app.http.json_response",
"line_number": 17,
"usage_type": "call"
},
{
"api_name"... |
23411321140 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('clientes', '0006_auto_20150918_2041'),
]
operations = [
migrations.AlterField(
model_name='cliente',
name='fecha_nacimiento',
field=models.DateField(default=datetime.datetime(2015, 9, 19, 0, 45, 5, 151000, tzinfo=utc)),
),
migrations.AlterField(
model_name='documento',
name='documento',
field=models.CharField(max_length=3, choices=[(b'CI', b'Cedula de Identidad'), (b'RUC', b'Registro Unico del Contribuyente'), (b'P', b'Pasaporte'), (b'RC', b'Registro de Conducir')]),
),
migrations.AlterField(
model_name='documento',
name='id',
field=models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True),
),
]
| pmmrpy/SIGB | clientes/migrations_1/0007_auto_20150918_2045.py | 0007_auto_20150918_2045.py | py | 1,037 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.db.migrations.Migration",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.AlterField",
"line_number": 16,
"usage_type": "call"
},
{... |
6793509891 | from django.db import models
from model_utils.models import TimeStampedModel
from ...conf import settings
from ...managers.consultant_industry import ConsultantIndustryManager
class ConsultantIndustry(
TimeStampedModel
):
consultant = models.ForeignKey(
'consultant.Consultant',
related_name='industries',
on_delete=models.CASCADE,
)
industry = models.ForeignKey(
'industry.Industry',
related_name='consultants',
on_delete=models.CASCADE,
)
level = models.IntegerField(
choices=settings.RELATION_INDUSTRIES_CHOICES,
)
objects = ConsultantIndustryManager()
def __str__(self):
return '{} - {}: {}'.format(self.consultant, self.industry, self.get_level_display())
| tomasgarzon/exo-services | service-exo-core/relation/models/consultant/industry.py | industry.py | py | 771 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "model_utils.models.TimeStampedModel",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 12,
"usage_type": "name"
},
{
"... |
72339753704 | import cv
import cv2
import numpy as np
import math
import time
from os import listdir
ballTargetImgIndex = 0
def quick_scan_cv(configs, autonomyToCV, GCS_TIMESTAMP, CONNECTION_TIMESTAMP):
print("Starting Quickscan CV")
#Set output image folder based on simulation attribute in configs
out_imagef_path = configs["quick_scan_specific"]["quick_scan_images"]
cam = None
if not configs['cv_simulated']['toggled_on']:
cam = cv.init_camera(configs)
if cam is None:
print("Camera not found")
rad_threshold = configs["quick_scan_specific"]["rad_threshold"] # between 2 and 3 degrees
all_kpts = [] #list of list of keypoints/descriptors (each list is an image)
all_desc = []
image_ctr = 0 #counter for saving images
#Poll to ensure start is set to True
initial_start = get_autonomy_start_and_stop(autonomyToCV)[0]
while (initial_start == False):
if get_autonomy_start_and_stop(autonomyToCV)[0]:
break
#Retreive images and store onto disk
print("\nBeginning to take and store images")
while (get_autonomy_start_and_stop(autonomyToCV) == (True, False)):
#Get image via simulation; TODO: get image via vehicle
if configs['cv_simulated']['toggled_on']:
#simulation image
img = cv.cv_simulation(configs)
else:
#image taken from camera
if cam is not None:
img = take_picture(cam)
else:
break
pitch, roll = get_autonomytoCV_vehicle_angle(autonomyToCV)
isBall, isTarget = isBallorTarget(img, configs, True)
autonomyToCV.xbeeMutex.acquire()
if autonomyToCV.xbee:
if (isBall or isTarget):
lat, lon = get_autonomyToCV_location(autonomyToCV)
print("POI @ " + str(lat) + ", " + str(lon))
poi_message = {
"type": "poi",
"id": 0,
"sid": configs["vehicle_id"],
"tid": 0,
"time": round(time.clock() - CONNECTION_TIMESTAMP) + GCS_TIMESTAMP,
"lat": lat, # Latitude of point of interest
"lng": lon, # Longitude of point of interest
}
# Instantiate a remote XBee device object to send data.
address = configs["mission_control_MAC"]
xbee = autonomyToCV.xbee
send_xbee = RemoteXBeeDevice(xbee, address)
xbee.send_data(send_xbee, json.dumps(poi_message))
autonomyToCV.xbeeMutex.release()
#determines whether to save image to output folder based on angle of vehicle or simulation
'''
if (abs(pitch) < rad_threshold and
abs(roll) < rad_threshold and
configs['cv_simulated']['toggled_on'] == False):
'''
if not configs['cv_simulated']['toggled_on']:
image_out = out_imagef_path + str(image_ctr) + ".jpg"
cv2.imwrite(image_out, img)
image_ctr += 1
#Get keypoints and discriptors of all images
print("Getting images for processing")
files = listdir(out_imagef_path)
for fname in files:
path = out_imagef_path + fname
img = cv2.imread(path, 0) #color for now
kpt, desc = feature_keypts(img)
all_kpts.append(kpt)
all_desc.append(desc)
#TODO: return list of keypoints (with new coordinates) and descriptors
#for now: None
return None #stitch_keypoints(kpts_list=all_kpts, descs_list=all_desc)
def take_picture(camera):
try:
count = camera.get(cv2.CAP_PROP_FRAME_COUNT)
camera.set(cv2.CAP_PROP_POS_FRAMES, count - 1)
camera.grab()
_, img = camera.retrieve()
crop_img = img[78:630, 270:1071]
return crop_img
except KeyboardInterrupt:
raise
except:
# Try taking the picture again
time.sleep(1)
take_picture(camera)
def get_autonomy_start_and_stop(autonomyToCV):
autonomyToCV.startMutex.acquire()
start = autonomyToCV.start
autonomyToCV.startMutex.release()
autonomyToCV.stopMutex.acquire()
stop = autonomyToCV.stop
autonomyToCV.stopMutex.release()
return start, stop
def get_autonomytoCV_vehicle_angle(autonomyToCV):
autonomyToCV.vehicleMutex.acquire()
vehicle = autonomyToCV.vehicle
pitch = vehicle.attitude.pitch
roll = vehicle.attitude.roll
autonomyToCV.vehicleMutex.release()
return pitch, roll
def get_autonomytoCV_location(autonomyToCV):
autonomyToCV.vehicleMutex.acquire()
location = autonomyToCV.vehicle.location.global_frame
lat = location.lat
lon = location.lon
autonomyToCV.vehicleMutex.release()
return lat, lon
def feature_keypts(img):
orb = cv2.ORB_create(nfeatures=1000)
kp, desc = orb.detectAndCompute(img, None)
return kp, desc
def feature_match(kp1, desc1, kp2, desc2, testing=False):
#FLANN parameters
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks=100) # or pass empty dictionary
#Matching with FLANN feature match
flann = cv2.FlannBasedMatcher(index_params, search_params)
if desc1.dtype != 'float32':
desc1 = np.float32(desc1)
if desc2.dtype != 'float32':
desc2 = np.float32(desc2)
matches = flann.knnMatch(desc1, desc2, k=2)
matchesMask = [[0, 0] for i in xrange(len(matches))]
no_good_matches = 0
good_matches = []
#Using 2 matches and "ratio test as per Lowe's paper"
for i,(m,n) in enumerate(matches):
if m.distance < 0.75*n.distance:
no_good_matches += 1
good_matches.append(m)
matchesMask[i]=[1,0]
print("Number of good matches: " + str(no_good_matches))
if testing:
#All matches and masks to produce image
return matches, matchesMask
return good_matches
def stitch_keypoints(kpts_list, descs_list):
#Use initial image as base case
if len(kpts_list) > 0:
final_kp = kpts_list[0]
final_desc = descs_list[0]
else:
return kpts_list, descs_list
#Iterate through each image
for img_no in range(1, len(kpts_list)):
img_kp = kpts_list[img_no]
img_desc = descs_list[img_no]
matches = feature_match(final_kp, final_desc, img_kp, img_desc)
points1 = np.zeros((len(matches), 2), dtype=np.float32)
points2 = points1.copy()
for i, match in enumerate(matches):
points1[i,:] = final_kp[match.queryIdx].pt
points2[i,:] = img_kp[match.trainIdx].pt
h, mast = cv2.findHomography(points1, points2, cv2.RANSAC)
points2 = np.array([points2])
#result_coordinates = cv2.perspectiveTransform(points2, h)
#print("original ")
#print(points2)
#print("transformed " )
#print(result_coordinates)
#TODO: Match transformed points to original and stitch
return None
def stitch_image(img_list):
stitcher = cv2.Stitcher_create()
status, stitched = stitcher.stitch(img_list)
print("Stitch status: " + str(status))
if status == 0:
cv2.imwrite("cv_stitched_map.jpg", stitched)
def isBallorTarget(img_rgb, configs, see_results=False):
global ballTargetImgIndex
ballTargetImgIndex += 1
img_rgb = cv2.GaussianBlur(img_rgb, (3, 3), 2)
hsv = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2HSV)
lower_red = np.array([0,100,100])
upper_red = np.array([15,255,255])
mask0 = cv2.inRange(hsv, lower_red, upper_red)
lower_red = np.array([160,100,100])
upper_red = np.array([180,255,255])
mask1 = cv2.inRange(hsv, lower_red, upper_red)
mask = mask0 + mask1
output_img = img_rgb.copy()
output_img[np.where(mask==0)] = 0
output_img[np.where(mask!=0)] = 255
output_gray = cv2.cvtColor(output_img, cv2.COLOR_BGR2GRAY)
target_circles = cv2.HoughCircles(output_gray,cv2.HOUGH_GRADIENT,1.2,.01,
param1=100,param2=50,minRadius=0,maxRadius=0)
ball_circles = cv2.HoughCircles(output_gray,cv2.HOUGH_GRADIENT,1.3,10,
param1=30,param2=43,minRadius=0,maxRadius=0)
#for smaller ball images, let restrictions be smaller
if ball_circles is None:
ball_circles = cv2.HoughCircles(output_gray,cv2.HOUGH_GRADIENT,1.3,10,
param1=30,param2=25,minRadius=0,maxRadius=0)
isBall = False
isTarget = False
center_buffer = 8
rbuffer = 1
target_pair = None
ball_pair = None
if target_circles is not None:
circles = np.uint16(np.around(target_circles))
#Looks for a target
for i, c1 in enumerate(circles[0,:]):
for c2 in circles[0, i:]:
if ( (( c1[0] - center_buffer) <= c2[0] <= (c1[0]) + center_buffer) and
(( c1[1] - center_buffer) <= c2[1] <= (c1[1]) + center_buffer) and
(((1.5 - rbuffer) * c1[2]) <= c2[2] <= ((1.5 + rbuffer) * c1[2]))
):
isTarget = True
target_pair = (c1, c2)
if isTarget:
break
if isTarget:
break
if ball_circles is not None:
if len(ball_circles[0]) == 1 or not isTarget:
isBall = True
ball_pair = (ball_circles[0][0],)
elif len(ball_circles[0]) != 1:
circles = np.uint16(np.around(ball_circles))
for i, c1 in enumerate(circles[0,:]):
for c2 in circles[0, i:]:
if (( 0 < xy_distance(c1[0], c2[0], c1[1], c2[1]) > (c1[2] + c2[2]) and
not isBall) or not isTarget
):
isBall = True
ball_pair = (c1, c2)
if isBall:
break
if isBall:
break
if see_results:
#print(ball_pair)
#print(target_pair)
#print(ball_circles)
#print(target_circles)
if target_pair is not None:
# draw circles for Target Pair
c1 = target_pair[0]
cv2.circle(img_rgb,(c1[0],c1[1]),c1[2],(255,0,0),10)
# draw the center of the circle
cv2.circle(img_rgb,(c1[0],c1[1]),2,(255,0, 0),10)
# draw the outer circle
c2 = target_pair[1]
cv2.circle(img_rgb,(c2[0],c2[1]),c2[2],(255,0,0),10)
# draw the center of the circle
cv2.circle(img_rgb,(c2[0],c2[1]),2,(255,0,0),10)
if target_circles is not None:
circles = np.uint16(np.around(target_circles))
#Looks for a target
for i, c1 in enumerate(circles[0,:]):
cv2.circle(img_rgb,(c1[0],c1[1]),c1[2],(255,0,0),10)
# draw the center of the circle
cv2.circle(img_rgb,(c1[0],c1[1]),2,(255,0,0),3)
if ball_pair is not None:
# draw circles for Ball Pair
c1 = ball_pair[0]
cv2.circle(img_rgb,(c1[0],c1[1]),c1[2],(255,0,0),10)
# draw the center of the circle
cv2.circle(img_rgb,(c1[0],c1[1]),2,(255,0,0),6)
if len(ball_pair) > 1:
# draw the outer circle
c2 = ball_pair[1]
cv2.circle(img_rgb,(c2[0],c2[1]),c2[2],(255,0,0),10)
# draw the center of the circle
cv2.circle(img_rgb,(c2[0],c2[1]),2,(255,0,0),6)
if ball_circles is not None:
circles = np.uint16(np.around(ball_circles))
#Looks for a target
for i, c1 in enumerate(circles[0,:]):
cv2.circle(img_rgb,(c1[0],c1[1]),c1[2],(255,0,0),10)
# draw the center of the circle
cv2.circle(img_rgb,(c1[0],c1[1]),2,(255,0,0),3)
#if isBall or isTarget:
display_img = cv2.resize(img_rgb, None, fx=0.9, fy=0.9)
print("Ball: " + str(isBall))
print("Target: " + str(isTarget))
if configs['quick_scan_specific']['demo']:
cv2.imshow("output", display_img)
cv2.waitKey(500)
return isBall, isTarget
def xy_distance(x1, x2, y1, y2):
np.seterr('ignore')
return (((x1 - x2) ** 2) + ((y1 - y2) ** 2)) ** 0.5
if __name__ == '__main__':
directory = "./quick_scan_images/"
files = listdir(directory)
configs = {}
d = {'demo': True}
configs['quick_scan_specific'] = d
for f in files:
img = cv2.imread(directory + f)
isBall, isTarget = isBallorTarget(img, configs, True)
if isBall or isTarget:
cv2.imwrite("./pomona_results/" + f, img)
#print(f + ": " + str(isBall) + ", " + str(isTarget))
| NGCP/VTOL | archives/quick_scan_cv.py | quick_scan_cv.py | py | 13,002 | python | en | code | 9 | github-code | 36 | [
{
"api_name": "cv.init_camera",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "cv.cv_simulation",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "time.clock",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "cv2.imwrite",
"line_nu... |
40103583521 | import flask
from flask import render_template, request
from definitions import annotation_new
import json
import services.correspondence_service as cs
import services.query_service as qs
import services.equivalence_class_service as em
import services.rotation_service as rs
import services.center_service as ccs
import services.pairwise_int_service as ps
import services.chain_info_service as ci
import services.quality as qy
import infrastructure.process_input as pi
import infrastructure.utility as ui
blueprint = flask.Blueprint('correspondence', __name__, template_folder='templates')
bound = [('4V9D', 'AA'), ('4V9D', 'BA'), ('4V9O', 'BA'), ('4V9O', 'DA'), ('4V9O', 'FA'), ('4V9O', 'HA'), ('4V9P', 'BA'),
('4V9P', 'DA'), ('4V9P', 'FA'), ('4V9P', 'HA'), ('6GWT', 'a'), ('6GXM', 'a'), ('6GXN', 'a'), ('6GXO', 'a'),
('4V54', 'AA'), ('4V54', 'CA'), ('4V55', 'AA'), ('4V55', 'CA'), ('4V9C', 'CA'), ('5KCS', '1a'), ('3J9Y', 'a'),
('3JCE', 'a'), ('5AFI', 'a'), ('5UYK', 'A'), ('5UYL', 'A'), ('5UYM', 'A'), ('5UYN', 'A'), ('5UYP', 'A'),
('5UYQ', 'A'), ('5WDT', 'a'), ('5WE4', 'a'), ('5WE6', 'a'), ('5WF0', 'a'), ('5WFK', 'a'), ('5WFS', 'a'),
('3J9Z', 'SA'), ('3JA1', 'SA'), ('3JCJ', 'g'), ('6DNC', 'A'), ('5NP6', 'D'), ('6H4N', 'a'), ('5H5U', 'h'),
('5MDV', '2'), ('5MDW', '2'), ('5MDY', '2'), ('5MGP', 'a'), ('5U4I', 'a'), ('5U9F', 'A'), ('5U9G', 'A'),
('6ENF', 'a'), ('6ENJ', 'a'), ('6ENU', 'a'), ('6C4I', 'a'), ('3JBU', 'A'), ('3JBV', 'A'), ('5JTE', 'AA'),
('5JU8', 'AA'), ('5NWY', '0'), ('5O2R', 'a'), ('5LZD', 'a'), ('5IQR', '2'), ('5KPS', '27'), ('5KPW', '26'),
('5KPX', '26'), ('5L3P', 'a'), ('4V85', 'AA'), ('4V89', 'AA'), ('3JCD', 'a'), ('6O9J', 'a'), ('6O9K', 'a'),
('6BY1', 'BA'), ('6BY1', 'AA'), ('6ORE', '2'), ('6OSQ', '2'), ('6ORL', '2'), ('6OUO', '2'), ('6OT3', '2'),
('6OSK', '2'), ('6Q97', '2'), ('6Q9A', '2'), ('6NQB', 'A')]
bound_new = [('4V9D', 'AA'),
('4V9O', 'BA'),
('4V9O', 'DA'),
('4V9O', 'FA'),
('4V9O', 'HA'),
('4V9P', 'BA'),
('4V9P', 'DA'),
('4V9P', 'FA'),
('4V9P', 'HA'),
('6GWT', 'a'),
('6GXM', 'a'),
('6GXN', 'a'),
('6GXO', 'a'),
('4V54', 'AA'),
('4V54', 'CA'),
('4V55', 'AA'),
('4V55', 'CA'),
('4V9C', 'CA'),
('5KCS', '1a'),
('3J9Y', 'a'),
('3JCE', 'a'),
('5AFI', 'a'),
('5UYK', 'A'),
('5UYL', 'A'),
('5UYM', 'A'),
('5UYN', 'A'),
('5UYP', 'A'),
('5UYQ', 'A'),
('5WDT', 'a'),
('5WE4', 'a'),
('5WE6', 'a'),
('5WF0', 'a'),
# ('5WFK', 'a'),
('5WFS', 'a'),
('3J9Z', 'SA'),
('3JA1', 'SA'),
('3JCJ', 'g'),
('6DNC', 'A'),
('6H4N', 'a'),
('5H5U', 'h'),
('5MDV', '2'),
('5MDW', '2'),
('5MDY', '2'),
('5MGP', 'a'),
('5U4I', 'a'),
# ('5U4J', 'a'),
('5U9F', 'A'),
('5U9G', 'A'),
('6ENF', 'a'),
('6ENJ', 'a'),
('6ENU', 'a'),
('6C4I', 'a'),
('3JBU', 'A'),
('3JBV', 'A'),
('5JTE', 'AA'),
('5JU8', 'AA'),
('5NWY', '0'),
('5O2R', 'a'),
('5LZD', 'a'),
('5IQR', '2'),
('5KPS', '27'),
('5KPW', '26'),
('5KPX', '26'),
('5L3P', 'a'),
('4V85', 'AA'),
('4V89', 'AA'),
('3JCD', 'a'),
('6O9K', 'a'),
('6OGF', '3'),
('6OG7', '3'),
('6OSQ', '2'),
('6ORL', '2'),
('6OUO', '2'),
('6OT3', '2'),
('6OSK', '2'),
('6Q9A', '2'),
# ('6NQB', 'A'),
('6SZS', 'a')]
empty = [('4V4Q', 'AA'), ('4V4Q', 'CA'), ('4V50', 'AA'), ('4V50', 'CA'), ('4V5B', 'BA'), ('4V5B', 'DA'), ('4YBB', 'AA'),
('4YBB', 'BA'), ('5IT8', 'AA'), ('5IT8', 'BA'), ('5J5B', 'AA'), ('5J5B', 'BA'), ('5J7L', 'AA'), ('5J7L', 'BA'),
('5J88', 'AA'), ('5J88', 'BA'), ('5J8A', 'AA'), ('5J8A', 'BA'), ('5J91', 'AA'), ('5J91', 'BA'), ('5JC9', 'AA'),
('5JC9', 'BA'), ('5MDZ', '2'), ('6BU8', 'A'), ('4U1U', 'AA'), ('4U1U', 'CA'), ('4U1V', 'AA'), ('4U1V', 'CA'),
('4U20', 'AA'), ('4U20', 'CA'), ('4U24', 'AA'), ('4U24', 'CA'), ('4U25', 'AA'), ('4U25', 'CA'), ('4U26', 'AA'),
('4U26', 'CA'), ('4U27', 'AA'), ('4U27', 'CA'), ('4V4H', 'AA'), ('4V4H', 'CA'), ('4V52', 'AA'), ('4V52', 'CA'),
('4V53', 'AA'), ('4V53', 'CA'), ('4V56', 'AA'), ('4V56', 'CA'), ('4V57', 'AA'), ('4V57', 'CA'), ('4V64', 'AA'),
('4V64', 'CA'), ('4V7S', 'AA'), ('4V7S', 'CA'), ('4V7T', 'AA'), ('4V7T', 'CA'), ('4V7U', 'AA'), ('4V7U', 'CA'),
('4V7V', 'AA'), ('4V7V', 'CA'), ('4V9C', 'AA'), ('4WF1', 'AA'), ('4WF1', 'CA'), ('4WOI', 'AA'), ('4WOI', 'DA'),
('4WWW', 'QA'), ('4WWW', 'XA'), ('5KCR', '1a'), ('5LZA', 'a'), ('4V6C', 'AA'), ('4V6C', 'CA'), ('4V6D', 'AA'),
('4V6D', 'CA'), ('4V6E', 'AA'), ('4V6E', 'CA')]
default_ordering = [('0', '5AFI|1|a'), ('1', '5UYM|1|A'), ('2', '5LZD|1|a'), ('3', '5WDT|1|a'), ('4', '5WE4|1|a'),
('5', '5WE6|1|a'), ('6', '3JCE|1|a'), ('7', '5WFS|1|a'), ('8', '6ENJ|1|a'), ('9', '6BU8|1|A'),
('10', '4V6E|1|AA'), ('11', '5UYQ|1|A'), ('12', '4WOI|1|DA'), ('13', '5KPX|1|26'),
('14', '4V9C|1|AA'), ('15', '5O2R|1|a'), ('16', '6DNC|1|A'), ('17', '4V6D|1|AA'),
('18', '4V9D|1|BA'), ('19', '6H4N|1|a'), ('20', '5JTE|1|AA'), ('21', '5WFK|1|a'),
('22', '6ENU|1|a'), ('23', '6GWT|1|a'), ('24', '5KCR|1|1a'), ('25', '5WF0|1|a'),
('26', '5JU8|1|AA'), ('27', '5LZA|1|a'), ('28', '3JCD|1|a'), ('29', '5U9F|1|A'),
('30', '6ENF|1|a'), ('31', '5MDZ|1|2'), ('32', '5NWY|1|0'), ('33', '5MDV|1|2'),
('34', '3JBU|1|A'), ('35', '5UYK|1|A'), ('36', '5U4I|1|a'), ('37', '5UYL|1|A'), ('38', '5MGP|1|a'),
('39', '5U9G|1|A'), ('40', '5MDW|1|2'), ('41', '5UYN|1|A'), ('42', '5KPS|1|27'),
('43', '5IQR|1|2'), ('44', '6C4I|1|a'), ('45', '5KPW|1|26'), ('46', '3J9Z|1|SA'),
('47', '5NP6|1|D'), ('48', '5H5U|1|h'), ('49', '5UYP|1|A'), ('50', '6GXM|1|a'), ('51', '4WOI|1|AA'),
('52', '5MDY|1|2'), ('53', '5L3P|1|a'), ('54', '4V9C|1|CA'), ('55', '5KCS|1|1a'),
('56', '3J9Y|1|a'), ('57', '6GXN|1|a'), ('58', '4V6E|1|CA'), ('59', '4V50|1|CA'),
('60', '4V50|1|AA'), ('61', '4V6D|1|CA'), ('62', '3JCJ|1|g'), ('63', '4V9D|1|AA'),
('64', '6GXO|1|a'), ('65', '3JA1|1|SA'), ('66', '4V9O|1|BA'), ('67', '3JBV|1|A'),
('68', '4V5B|1|BA'), ('69', '4V9O|1|FA'), ('70', '6I7V|1|BA'), ('71', '4WF1|1|CA'),
('72', '4V56|1|AA'), ('73', '4V55|1|AA'), ('74', '4U1U|1|CA'), ('75', '4V64|1|AA'),
('76', '4V57|1|AA'), ('77', '4U20|1|CA'), ('78', '4U27|1|CA'), ('79', '4U25|1|CA'),
('80', '4V4H|1|AA'), ('81', '4V53|1|AA'), ('82', '4V4Q|1|AA'), ('83', '4U1V|1|CA'),
('84', '4V54|1|AA'), ('85', '4V52|1|AA'), ('86', '4U26|1|CA'), ('87', '4U24|1|CA'),
('88', '5JC9|1|BA'), ('89', '4V9O|1|DA'), ('90', '4YBB|1|BA'), ('91', '4V6C|1|CA'),
('92', '5J8A|1|BA'), ('93', '4V7U|1|CA'), ('94', '5J5B|1|BA'), ('95', '5J91|1|BA'),
('96', '5J7L|1|BA'), ('97', '5IT8|1|BA'), ('98', '4V7S|1|CA'), ('99', '4V7V|1|CA'),
('100', '4V7T|1|CA'), ('101', '5J88|1|BA'), ('102', '4WWW|1|XA'), ('103', '4V9P|1|BA'),
('104', '4V9P|1|DA'), ('105', '4U1V|1|AA'), ('106', '4WF1|1|AA'), ('107', '4U20|1|AA'),
('108', '4U27|1|AA'), ('109', '4V7V|1|AA'), ('110', '4U1U|1|AA'), ('111', '4V7U|1|AA'),
('112', '4V7S|1|AA'), ('113', '4U25|1|AA'), ('114', '4V7T|1|AA'), ('115', '4U24|1|AA'),
('116', '5J7L|1|AA'), ('117', '5J91|1|AA'), ('118', '5J5B|1|AA'), ('119', '4V6C|1|AA'),
('120', '4YBB|1|AA'), ('121', '5J8A|1|AA'), ('122', '4WWW|1|QA'), ('123', '5J88|1|AA'),
('124', '5IT8|1|AA'), ('125', '6I7V|1|AA'), ('126', '4U26|1|AA'), ('127', '4V9P|1|HA'),
('128', '5JC9|1|AA'), ('129', '4V9P|1|FA'), ('130', '4V9O|1|HA'), ('131', '4V56|1|CA'),
('132', '4V57|1|CA'), ('133', '4V89|1|AA'), ('134', '4V5B|1|DA'), ('135', '4V55|1|CA'),
('136', '4V64|1|CA'), ('137', '4V53|1|CA'), ('138', '4V54|1|CA'), ('139', '4V52|1|CA'),
('140', '4V85|1|AA'), ('141', '4V4H|1|CA'), ('142', '4V4Q|1|CA')]
new_ordering = [('0', '4V4Q|1|CA'), ('1', '4V4H|1|CA'), ('2', '4V53|1|CA'), ('3', '4V52|1|CA'), ('4', '4V54|1|CA'),
('5', '4V64|1|CA'), ('6', '4V55|1|CA'), ('7', '4V57|1|CA'), ('8', '4V5B|1|DA'), ('9', '4V56|1|CA'),
('10', '4V9P|1|HA'), ('11', '4V9P|1|FA'), ('12', '4V85|1|AA'), ('13', '4V9O|1|HA'),
('14', '4V89|1|AA'), ('15', '4V55|1|AA'), ('16', '4V57|1|AA'), ('17', '4V53|1|AA'),
('18', '4V54|1|AA'), ('19', '4V52|1|AA'), ('20', '4V4Q|1|AA'), ('21', '4V4H|1|AA'),
('22', '4V64|1|AA'), ('23', '4V56|1|AA'), ('24', '4V9P|1|BA'), ('25', '4V7V|1|AA'),
('26', '4V7S|1|AA'), ('27', '4V7U|1|AA'), ('28', '4V6C|1|AA'), ('29', '4V7T|1|AA'),
('30', '4WWW|1|QA'), ('31', '5IT8|1|AA'), ('32', '5JC9|1|AA'), ('33', '4U26|1|AA'),
('34', '4U24|1|AA'), ('35', '5J7L|1|AA'), ('36', '5J5B|1|AA'), ('37', '5J91|1|AA'),
('38', '5J8A|1|AA'), ('39', '5J88|1|AA'), ('40', '4YBB|1|AA'), ('41', '4V9P|1|DA'),
('42', '4U25|1|AA'), ('43', '4U27|1|AA'), ('44', '4V7U|1|CA'), ('45', '4V7T|1|CA'),
('46', '4V6C|1|CA'), ('47', '4V7S|1|CA'), ('48', '4WWW|1|XA'), ('49', '4V7V|1|CA'),
('50', '4V5B|1|BA'), ('51', '3JBV|1|A'), ('52', '4V9O|1|DA'), ('53', '4WF1|1|AA'),
('54', '4U1U|1|AA'), ('55', '4U1V|1|AA'), ('56', '4U20|1|AA'), ('57', '3JA1|1|SA'),
('58', '4V9O|1|FA'), ('59', '4V50|1|AA'), ('60', '4V50|1|CA'), ('61', '5J7L|1|BA'),
('62', '5J5B|1|BA'), ('63', '5J91|1|BA'), ('64', '5IT8|1|BA'), ('65', '5J88|1|BA'),
('66', '5J8A|1|BA'), ('67', '5JC9|1|BA'), ('68', '4YBB|1|BA'), ('69', '4U26|1|CA'),
('70', '4U24|1|CA'), ('71', '4V6D|1|CA'), ('72', '5H5U|1|h'), ('73', '4V9O|1|BA'),
('74', '3JBU|1|A'), ('75', '5NP6|1|D'), ('76', '3JCD|1|a'), ('77', '6C4I|1|a'),
('78', '3J9Z|1|SA'), ('79', '5MGP|1|a'), ('80', '5MDZ|1|2'), ('81', '5MDV|1|2'),
('82', '5MDW|1|2'), ('83', '5MDY|1|2'), ('84', '6ENF|1|a'), ('85', '5U9F|1|A'),
('86', '4V9D|1|AA'), ('87', '5UYL|1|A'), ('88', '4U1U|1|CA'), ('89', '4U20|1|CA'),
('90', '4U1V|1|CA'), ('91', '4WF1|1|CA'), ('92', '5UYN|1|A'), ('93', '5UYP|1|A'),
('94', '5UYK|1|A'), ('95', '4U25|1|CA'), ('96', '4U27|1|CA'), ('97', '5U9G|1|A'),
('98', '6ENU|1|a'), ('99', '5KPS|1|27'), ('100', '5U4I|1|a'), ('101', '5KPW|1|26'),
('102', '5L3P|1|a'), ('103', '5IQR|1|2'), ('104', '4V6E|1|CA'), ('105', '5NWY|1|0'),
('106', '5O2R|1|a'), ('107', '6DNC|1|A'), ('108', '3J9Y|1|a'), ('109', '3JCJ|1|g'),
('110', '5KCS|1|1a'), ('111', '5WF0|1|a'), ('112', '5KCR|1|1a'), ('113', '5UYQ|1|A'),
('114', '4V6E|1|AA'), ('115', '5JU8|1|AA'), ('116', '4V9D|1|BA'), ('117', '4WOI|1|AA'),
('118', '4V9C|1|CA'), ('119', '4V6D|1|AA'), ('120', '5WFS|1|a'), ('121', '5WE4|1|a'),
('122', '5WDT|1|a'), ('123', '5UYM|1|A'), ('124', '6BU8|1|A'), ('125', '6ENJ|1|a'),
('126', '5LZD|1|a'), ('127', '5KPX|1|26'), ('128', '5JTE|1|AA'), ('129', '4WOI|1|DA'),
('130', '5WE6|1|a'), ('131', '3JCE|1|a'), ('132', '4V9C|1|AA')]
asite_trna = [ # ('6BU8', 'A'),
('3JCE', 'a'),
('5AFI', 'a'),
('5UYK', 'A'),
('5UYL', 'A'),
('5UYM', 'A'),
('5UYN', 'A'),
('5UYP', 'A'),
('5UYQ', 'A'),
('5WDT', 'a'),
('5WE4', 'a'),
('5WE6', 'a'),
('5WF0', 'a'),
('5WFK', 'a'),
('5WFS', 'a'),
('6ENJ', 'a'),
('3JBV', 'A'),
('5JTE', 'AA'),
('5LZD', 'a'),
('5IQR', '2'),
('5KPW', '26'),
('5KPX', '26'),
('5L3P', 'a')]
# ('4V6E', 'AA'),
# ('4V6E', 'CA'),
# ('6BY1', 'BA'),
# ('6BY1', 'AA')]
psite_trna = [ # ('4V50', 'AA'),
# ('4V50', 'CA'),
('4V9D', 'AA'),
# ('4V9D', 'BA'),
# ('5MDZ', '2'),
('6GWT', 'a'),
('6GXM', 'a'),
('6GXN', 'a'),
('6GXO', 'a'),
# ('4V9C', 'AA'),
('4V9C', 'CA'),
# ('4WOI', 'AA'),
# ('4WOI', 'DA'),
# ('5KCR', '1a'),
# ('5KCS', '1a'),
# ('3J9Y', 'a'),
('3JCD', 'a'),
('3J9Z', 'SA'),
('3JA1', 'SA'),
('3JCJ', 'g'),
('6DNC', 'A'),
# ('5NP6', 'D'),
('5H5U', 'h'),
('5MDV', '2'),
('5MDW', '2'),
('5MDY', '2'),
('5MGP', 'a'),
('5U4I', 'a'),
# ('5U4J', 'a'),
('5U9F', 'A'),
('5U9G', 'A'),
('6ENF', 'a'),
('6ENU', 'a'),
('6C4I', 'a'),
('3JBU', 'A'),
# ('5JU8', 'AA'),
('5NWY', '0'),
('5O2R', 'a'),
('5LZA', 'a'),
('5KPS', '27'),
# ('4V6D', 'AA'),
# ('4V6D', 'CA'),
# ('6O9J', 'a'),
('6O9K', 'a'),
# ('6OFX', '3'),
# ('6OGI', '3'),
('6OGF', '3'),
('6OG7', '3'),
# ('6ORE', '2'),
('6OSQ', '2'),
('6ORL', '2'),
('6OUO', '2'),
('6OT3', '2'),
('6OSK', '2'),
# ('6Q97', '2'),
# ('6Q9A', '2'),
('6SZS', 'a')]
th_ref = [('5F8K', '2a'), ('4V6F', 'BA'), ('4V67', 'AA'), ('3J9Z', 'SA'), ('4V8U', 'AA'), ('4W29', 'AA'),
('4V9J', 'AA'), ('4V9L', 'AA'), ('4V9M', 'AA'), ('4V9K', 'AA'), ('4V9H', 'AA'), ('4V90', 'AA')]
@blueprint.route('/correspondence/<method>/<ife>/<selection>/<exp>/<core>')
# @blueprint.route('/correspondence/<method>/<ife>/<selection>/<core>')
# @response(template_file='packages/details.html')
def correspondence_geometric(method, ife, selection, exp, core):
if method == 'geometric':
query_ife = ife
exp_method = exp
query_list = pi.input_type(selection)
query_type = pi.check_query(query_list)
query_units = qs.get_query_units(query_type, query_list, query_ife)
rejected_members, ec_members, ec_id, nr_release = em.get_ec_members(query_ife, exp_method)
corr_complete, corr_std = cs.get_correspondence(query_units, ec_members)
ife_list, coord_data = ui.build_coord(corr_complete)
# Get the pairwise annotation for the instances in the EC
# pw_info, pw_sorted, unique_pw = ps.get_pairwise_annotation(corr_complete, query_units, ife_list)
bp_annotation, bp_num, bsk_annotation, bsk_num, \
br_annotation, br_num, bph_annotation, bph_num = ps.get_pairwise_test(corr_complete, query_units, ife_list)
# # Get the tertiary pairwise annotation
pw_lr, rna_chain = ps.get_pairwise_tertiary(corr_complete, ife_list)
# chain_info_rna = ci.get_chain_info(rna_chain)
# rp_contacts, protein_chain = ps.get_pairwise_rnap(corr_complete, ife_list)
# chain_info_protein = ci.get_chain_info(protein_chain)
# chain_info = ui.merge_chain_info(chain_info_rna, chain_info_protein)
# Get the rotation data for calculating discrepancy
rotation_data = rs.get_rotation(corr_std)
# Get the center data for calculating discrepancy
center_data, center_numbering, a, b = ccs.get_center(corr_std)
center_len = ui.get_center_len(center_data)
# return json.dumps(center_numbering)
# Calculate discrepancy using the geometric method
discrepancy_data = ui.calculate_geometric_disc(ife_list, rotation_data, center_data)
# Order the instances by similarity
ifes_ordered, coord_ordered = ui.get_ordering(ife_list, discrepancy_data, coord_data)
# ifes_ordered, coord_ordered = ui.get_ordering_manual(ife_list, coord_data, new_ordering)
# Get discrepancy statistics and build the heatmap data for display
max_disc, percentile, mean, median, heatmap_data, dist_data = ui.build_heatmap_data(discrepancy_data,
ifes_ordered)
quality_data, b = qy.get_rsr(coord_ordered)
quality_heatmap_data = ui.build_quality_heatmap_data(ifes_ordered, quality_data, query_units)
# dist_csv = ui.build_dist(dist_data, query_units)
# Get all the annotation from the definition file
calculated_head, calculated_intersubunit, description, structure_method, structure_resolution, \
principal_investigator, publication_year, trna_occupancy, functional_state, factors_bound, \
antibiotic_bound, codon_pairing = ui.get_annotation_new(ifes_ordered)
# Reorder the pairwise annotation based on the new ordering
bp_ordered = ui.reorder_pw(ifes_ordered, bp_annotation)
bp_list = ui.process_pw(bp_ordered)
bsk_ordered = ui.reorder_pw(ifes_ordered, bsk_annotation)
bsk_list = ui.process_pw(bsk_ordered)
# br_ordered = ui.reorder_pw(ifes_ordered, br_annotation)
# br_list = ui.process_pw(br_ordered)
# bph_ordered = ui.reorder_pw(ifes_ordered, bph_annotation)
# bph_list = ui.process_pw(bph_ordered)
complete_pw = [a + b for a, b in zip(bp_list, bsk_list)]
pw_data = ui.calculate_pw_score(ifes_ordered, complete_pw)
pw_heatmap_data = ui.build_pairwise_heatmap(pw_data, ifes_ordered)
pw_lr_ordered = ui.reorder_pw(ifes_ordered, pw_lr)
# rp_contacts_ordered = ui.reorder_pw(ifes_ordered, rp_contacts)
# chain_info_ordered = ui.reorder_chain(ifes_ordered, chain_info)
return render_template("correspondence_display.html", query_nts=query_units,
coord=coord_ordered, coord_core=None, ifes=ifes_ordered, maxDisc=max_disc, p2=percentile,
data=heatmap_data, trna_occupancy=trna_occupancy, functional_state=functional_state,
factors_bound=factors_bound, # data2=pw_heatmap_data,
calculated_rotation=calculated_intersubunit,
calculated_head=calculated_head, antibiotic_bound=antibiotic_bound,
description=description, structure_method=structure_method,
structure_resolution=structure_resolution, principal_investigator=principal_investigator,
publication_year=publication_year, bp_num=bp_num, bsk_num=bsk_num,
bp_list=bp_ordered, bsk_list=bsk_ordered, pw_tertiary=pw_lr_ordered,
release_id=nr_release, ec_id=ec_id, mean=mean, mdn=median,
quality_data=quality_heatmap_data)
# return render_template("correspondence_display_test.html", query_nts=query_units, coord=coord_ordered,
# coord_core=None, ifes=ifes_ordered, maxDisc=max_disc, p2=percentile, data=heatmap_data,
# release_id=nr_release, ec_id=ec_id, mean=mean, mdn=median)
elif method == 'relative':
query_ife = ife
query_list = pi.input_type(selection)
query_type = pi.check_query(query_list)
exp_method = exp
core_selection = core.split(",")
query_units = qs.get_query_units(query_type, query_list, query_ife)
core_units = qs.get_complete_units(core_selection, query_ife)
rejected_members, ec_members, ec_id, nr_release = em.get_ec_members(query_ife, exp_method)
# Get correspondence for the query nts
corr_complete, corr_std = cs.get_correspondence(query_units, bound_new)
# Get correspondence for the core nts
core_complete = cs.get_correspondence_core(core_units, bound_new)
ife_list, coord_data = ui.build_coord_relative(core_complete, corr_complete)
# Merge the correspondence between core nts and query nts
corr_complete = ui.merge_list(core_complete, corr_std)
# Get the pairwise annotation for the instances in the EC
pw_info, pw_sorted = ps.get_pairwise_annotation(corr_std, query_units, ife_list)
# Get the tertiary pairwise annotation
pw_lr, rna_chain = ps.get_pairwise_tertiary(corr_complete, ife_list)
chain_info_rna = ci.get_chain_info(rna_chain)
rp_contacts, protein_chain = ps.get_pairwise_rnap(corr_complete, ife_list)
chain_info_protein = ci.get_chain_info(protein_chain)
chain_info = ui.merge_chain_info(chain_info_rna, chain_info_protein)
# Get the center data for calculating discrepancy
center_data, center_numbering, a, b = ccs.get_center(corr_complete)
# Calculate discrepancy using the geometric method
discrepancy_data = ui.calculate_relative_disc(ife_list, center_data, len(core_units), len(query_units))
# Order the instances by similarity
ifes_ordered, coord_ordered = ui.get_ordering(ife_list, discrepancy_data, coord_data)
# Get discrepancy statistics and build the heatmap data for display
max_disc, percentile, mean, median, heatmap_data, dist_data = ui.build_heatmap_data(discrepancy_data,
ifes_ordered)
dist_csv = ui.build_dist(dist_data, query_units)
# Get all the annotation from the definition file
calculated_head, calculated_intersubunit, description, structure_method, structure_resolution, \
principal_investigator, publication_year, trna_occupancy, functional_state, factors_bound, \
antibiotic_bound, codon_pairing = ui.get_annotation_new(ifes_ordered)
# Reorder the pairwise annotation based on the new ordering
pw_info_ordered = ui.reorder_pw(ifes_ordered, pw_info)
pw_lr_ordered = ui.reorder_pw(ifes_ordered, pw_lr)
rp_contacts_ordered = ui.reorder_pw(ifes_ordered, rp_contacts)
chain_info_ordered = ui.reorder_chain(ifes_ordered, chain_info)
return render_template("correspondence_display.html", query_nts=query_units,
coord=coord_ordered, coord_core=None, ifes=ifes_ordered, maxDisc=max_disc, p2=percentile,
data=heatmap_data, trna_occupancy=trna_occupancy, functional_state=functional_state,
factors_bound=factors_bound,
calculated_rotation=calculated_intersubunit,
calculated_head=calculated_head, antibiotic_bound=antibiotic_bound,
description=description, structure_method=structure_method,
structure_resolution=structure_resolution, principal_investigator=principal_investigator,
publication_year=publication_year, pw_info=pw_info_ordered, pw_list=pw_sorted,
pw_tertiary=pw_lr_ordered, rp_contacts=rp_contacts_ordered, release_id=nr_release,
ec_id=ec_id, chain_info=chain_info_ordered, mean=mean, mdn=median)
| sridevan/correspondence_server | corr_server/views/correspondence_views.py | correspondence_views.py | py | 23,793 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Blueprint",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "infrastructure.process_input.input_type",
"line_number": 289,
"usage_type": "call"
},
{
"api_name": "infrastructure.process_input",
"line_number": 289,
"usage_type": "name"
},
{... |
32635863663 | # -*- coding:utf-8 -*-
import os
import cv2
import numpy as np
import torch.utils.data as data
kernel = np.ones((3, 3), np.uint8)
infinite = 1e-10
INF = 1e-3
def make_dataset(root):
imgs = []
count = 0
for i in os.listdir(root):
count += 1
img = os.path.join(root, i)
(filename, extension) = os.path.splitext(i)
if os.path.exists(root.replace("texture", "pure")):
mask = os.path.join(root.replace("texture", "pure"), filename + '.png')
imgs.append((img, mask))
return imgs
class Dataset(data.Dataset):
def __init__(self, root):
imgs = make_dataset(root)
imgs = np.random.permutation(imgs)
self.imgs = imgs
def __getitem__(self, index): # x y m 分别为原图,尺度,pure
x_path, m_path = self.imgs[index]
img_x = cv2.imread(x_path)
img_x = img_x.transpose(2, 0, 1)
return img_x / 255., img_x / 255.,x_path
def __len__(self):
return len(self.imgs)
| 1528219849/DFETS-Net | data_handle/dataset_eval.py | dataset_eval.py | py | 1,048 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.ones",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number"... |
18915731113 | import pytest
from src.unique_morse_code_words import Solution
@pytest.mark.parametrize(
"word,expected",
[
("gin", "--...-."),
("msg", "--...--."),
],
)
def test_to_morse(word, expected):
assert Solution().to_morse(word) == expected
@pytest.mark.parametrize(
"words,expected",
[
(["gin", "zen", "gig", "msg"], 2),
(["a"], 1),
],
)
def test_solution(words, expected):
assert Solution().uniqueMorseRepresentations(words) == expected
| lancelote/leetcode | tests/test_unique_morse_code_words.py | test_unique_morse_code_words.py | py | 501 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "src.unique_morse_code_words.Solution",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_n... |
3650309290 | import psycopg2
import os
from os.path import join, dirname
from dotenv import load_dotenv
dotenv_path = join(dirname(__file__), '../.env')
load_dotenv(dotenv_path)
def connect():
return psycopg2.connect(
host=os.environ.get('ENV_host'),
database=os.environ.get('ENV_database'),
user=os.environ.get('ENV_user'),
password=os.environ.get('ENV_password')
)
def runCommand(cmd, *args):
conn = connect()
cur = conn.cursor()
if not args:
cur.execute(cmd)
else:
for arg in args:
cur.execute(cmd, arg)
conn.commit()
result = cur.fetchall()
response = convertResult(result)
cur.close()
conn.close()
return response
def convertResult(data):
result = [];
for row in data:
result.append({"id": row[0], "title":row[1], "description": row[2]})
return result
| dapt4/taskapp-flask-angular-postgres | backend/db/connect_db.py | connect_db.py | py | 876 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "dotenv.load_dotenv",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "psycopg2.connect",
"... |
41893805327 | # pylint: disable=F0401
from dataclasses import dataclass
from functools import reduce
from itertools import chain, combinations, islice, tee
from typing import Iterator, Optional, Union
from utils import read_input
@dataclass
class SnailNode:
val: Optional[int] = None
left: Optional["SnailNode"] = None
right: Optional["SnailNode"] = None
depth: int = 0
def __repr__(self) -> str:
return (
f"{self.val}"
if self.val is not None
else f"[{self.left!r}, {self.right!r}]"
)
@property
def is_num(self) -> bool:
return self.val is not None
@property
def is_num_pair(self) -> bool:
if not self.is_num:
return self.left.is_num and self.right.is_num
return False
def in_order_nums(self) -> Iterator["SnailNode"]:
if self.left is not None:
yield from self.left.in_order_nums()
if self.is_num:
yield self
if self.right is not None:
yield from self.right.in_order_nums()
def in_order_pairs(self) -> Iterator["SnailNode"]:
if self.left is not None:
yield from self.left.in_order_pairs()
if self.is_num_pair:
yield self
if self.right is not None:
yield from self.right.in_order_pairs()
def predecessor(self, num: "SnailNode") -> Optional["SnailNode"]:
if not num.is_num:
raise ValueError
it1, it2 = tee(self.in_order_nums(), 2)
it2 = islice(it2, 1, None)
for (pred, node) in zip(it1, it2):
if node is num:
return pred
return None
def successor(self, num: "SnailNode") -> Optional["SnailNode"]:
if not num.is_num:
raise ValueError
it1, it2 = tee(self.in_order_nums(), 2)
it2 = islice(it2, 1, None)
for (node, succ) in zip(it1, it2):
if node is num:
return succ
return None
def explode(self, pair: "SnailNode"):
L, R = pair.left, pair.right
if L and L.is_num and R and R.is_num:
pred = self.predecessor(L)
succ = self.successor(R)
else:
raise ValueError(f"{pair} is not a numeric pair.")
if pred is not None:
if pred.is_num:
pred.val += L.val
else:
raise ValueError(f"{pred} is not a numeric node.")
if succ is not None:
if succ.is_num:
succ.val += R.val
else:
raise ValueError(f"{succ} is not a numeric node.")
pair.val = 0
pair.left = None
pair.right = None
return
def split(self, num: "SnailNode"):
if num.is_num:
n = num.val
else:
raise ValueError(f"{num} is not a numeric node.")
num.val = None
num.left = SnailNode(val=(n // 2), depth=num.depth + 1)
num.right = SnailNode(val=(n + 1) // 2, depth=num.depth + 1)
return
def reduce(self):
explode_pairs = filter(lambda p: p.depth >= 4, self.in_order_pairs())
split_nums = filter(lambda n: n.val >= 10, self.in_order_nums())
node_to_process = next(chain(explode_pairs, split_nums), None)
if node_to_process is None:
return
if node_to_process.is_num_pair:
self.explode(node_to_process)
else:
self.split(node_to_process)
self.reduce()
def magnitude(self) -> int:
if self.is_num:
return self.val
else:
return (3 * self.left.magnitude()) + (2 * self.right.magnitude())
def __add__(self, other):
expr_str = f"[{self!r}, {other!r}]"
sum_ = SnailNode.parse(eval(expr_str))
sum_.reduce()
return sum_
def __eq__(self, other):
return repr(self) == repr(other)
@staticmethod
def parse(
expr: Union[list, int], parent: Optional["SnailNode"] = None
) -> "SnailNode":
if isinstance(expr, list):
expr_left, expr_right = expr
node = SnailNode(depth=parent.depth + 1 if parent else 0)
left, right = (
SnailNode.parse(expr_left, parent=node),
SnailNode.parse(expr_right, parent=node),
)
node.left = left
node.right = right
return node
elif isinstance(expr, int):
return SnailNode(val=expr, depth=parent.depth + 1 if parent else 0)
else:
raise ValueError
def part1(snail_nums: list[SnailNode]):
total = reduce(lambda x, y: x + y, snail_nums)
return total.magnitude()
def part2(snail_nums: list[SnailNode]):
snail_num_pairs = combinations(snail_nums, 2)
magnitudes_of_sums = (
[(x + y).magnitude(), (y + x).magnitude()] for x, y in snail_num_pairs
)
return max(chain.from_iterable(magnitudes_of_sums))
def prepare_input():
input_ = read_input("18").strip().split("\n")
return [SnailNode.parse(eval(line)) for line in input_]
def main():
snail_nums = prepare_input()
print(part1(snail_nums))
print(part2(snail_nums))
if __name__ == "__main__":
main()
| hsherkat/AOC2021 | day18.py | day18.py | py | 5,244 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.Optional",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "typing.Iterator",
... |
27473837054 | """A modern skeleton for Sphinx themes."""
__version__ = "1.0.0.dev2"
from pathlib import Path
from typing import Any, Dict
from sphinx.application import Sphinx
_THEME_PATH = (Path(__file__).parent / "theme" / "basic-ng").resolve()
def setup(app: Sphinx) -> Dict[str, Any]:
"""Entry point for sphinx theming."""
app.require_sphinx("4.0")
app.add_html_theme("basic-ng", str(_THEME_PATH))
return {
"parallel_read_safe": True,
"parallel_write_safe": True,
"version": __version__,
}
| pradyunsg/sphinx-basic-ng | src/sphinx_basic_ng/__init__.py | __init__.py | py | 532 | python | en | code | 26 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sphinx.application.Sphinx",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"... |
1701452870 | from pathlib import Path
class contextmanager:
def __init__(self, filepath="", mode="r"):
self.filepath=filepath
self.mode=mode
def __enter__(self):
try:
if self.mode not in ["w", "a", "r"]:
raise ValueError
except ValueError:
self.mode=input("Wrong mode! Try once again:")
if self.filepath=="":
self.filepath=input("Enter file path:")
p=Path(self.filepath)
try:
if p.is_file()==True:
file=self.filepath
self.rd=open(file, self.mode, newline="\n")
else:
raise FileNotFoundError
except FileNotFoundError:
self.filepath=input("File not found! Try another one:")
p=Path(self.filepath)
if p.is_file()==True:
file=self.filepath
self.rd=open(file, self.mode, newline="\n")
else:
raise Exception("File not found!")
except ValueError:
print("Wrong mode!")
return self.rd
def __exit__(self,*args):
self.rd.close()
with contextmanager("newfile.txt", "a") as cm:
cm.write('New line!')
pass | love2kick/Learn_Python_Homeworks | HW7/01_fileworker.py | 01_fileworker.py | py | 1,222 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 25,
"usage_type": "call"
}
] |
2894204249 | from typing import List
from src.dialog.common.Dialog import Dialog
from src.dialog.common.DialogContainer import DialogContainer
from src.dialog.common.DialogFactory import DialogFactory
from src.dialog.common.form_doc.FormDocFuncs import FormDocFuncs
from src.docs_publisher.common.DocsPublisher import DocsPublisher
from src.property.Property import Property
from src.session.common.Session import Session
from src.storage.common.entity.EntityStorage import EntityStorage
from src.template.doc.DocTemplate import DocTemplate
from src.template.doc.DocsTemplates import DocsTemplates
from src.template.property.PropertyTemplate import PropertyTemplate
class FormDocContainer(DialogContainer, FormDocFuncs):
def __init__(
self,
dialog_factory: DialogFactory,
session: Session,
docs_publisher: DocsPublisher,
storage: EntityStorage,
docs_templates: DocsTemplates
):
super().__init__(dialog_factory)
self.__session = session
self.__docs_publisher = docs_publisher
self.__storage = storage
self.__docs_templates = docs_templates
def form_doc(self, form_doc_filled_props: List[Property]):
self.__docs_publisher.publish_docs(
self.__storage.get_entity(
self.__session.get_form_doc_entity_key()
).props,
{
form_doc_filled_props[i].get_id(): form_doc_filled_props[i]
for i in range(0, len(form_doc_filled_props))
}
)
def get_docs_templates(self) -> List[DocTemplate]:
return self.__docs_templates.templates
def get_form_doc_entity_key(self) -> str:
return self.__session.get_form_doc_entity_key()
def create_dialog(self) -> Dialog:
return self.dialog_factory.create_form_doc_dialog(self)
def closed_on_x(self):
self.close_dialog()
| andreyzaytsev21/MasterDAPv2 | src/dialog/common/form_doc/FormDocContainer.py | FormDocContainer.py | py | 1,917 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "src.dialog.common.DialogContainer.DialogContainer",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "src.dialog.common.form_doc.FormDocFuncs.FormDocFuncs",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "src.dialog.common.DialogFactory.DialogFact... |
35659485468 | """The services validators test module."""
import pytest
from django.core.exceptions import ValidationError
from django.db.models import QuerySet
from moneyed import EUR, USD, Money
from services.models import Service
from services.validators import (validate_service_location,
validate_service_price)
pytestmark = pytest.mark.django_db
def test_validate_service_price(services: QuerySet):
"""Should validate a service price."""
price = services.first().price
price.is_price_fixed = True
price.price = None
with pytest.raises(ValidationError):
validate_service_price(price)
price.price = Money(0.34, EUR)
validate_service_price(price)
price.is_price_fixed = False
price.start_price = None
with pytest.raises(ValidationError):
validate_service_price(price)
price.start_price = Money(0.34, EUR)
price.end_price = Money(1.5, USD)
with pytest.raises(ValidationError):
validate_service_price(price)
price.start_price = Money(2.5, USD)
with pytest.raises(ValidationError):
validate_service_price(price)
price.start_price = Money(0.5, USD)
validate_service_price(price)
def test_validate_service_location(
services: QuerySet,
professionals: QuerySet,
):
"""Should validate a service location."""
location = services.filter(
service_type=Service.TYPE_CLIENT_LOCATION).first().locations.first()
validate_service_location(location)
location.service.professional = professionals.exclude(
pk=location.location.professional.pk).first()
with pytest.raises(ValidationError):
validate_service_location(location)
location.service = None
with pytest.raises(ValidationError) as error:
validate_service_location(location)
assert "is not set" in str(error)
| webmalc/d8base-backend | services/tests/validators_tests.py | validators_tests.py | py | 1,855 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pytest.mark",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "django.db.models.QuerySet",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "services.models.first",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "serv... |
70116366823 |
# coding: utf-8
# # Assignment 2
#
# Before working on this assignment please read these instructions fully. In the submission area, you will notice that
# you can click the link to **Preview the Grading** for each step of the assignment. This is the criteria that will be used
# for peer grading. Please familiarize yourself with the criteria before beginning the assignment.
#
# An NOAA dataset has been stored in the file `data/C2A2_data/BinnedCsvs_d100/2f0bb04162655f0cba429b865292f31482e817e0b3ee9da0f40185d7.csv`.
# The data for this assignment comes from a subset of The National Centers for Environmental Information (NCEI)
# [Daily Global Historical Climatology Network](https://www1.ncdc.noaa.gov/pub/data/ghcn/daily/readme.txt)
# (GHCN-Daily). The GHCN-Daily is comprised of daily climate records from thousands of land surface stations across the globe.
#
# Each row in the assignment datafile corresponds to a single observation.
#
# The following variables are provided to you:
#
# * **id** : station identification code
# * **date** : date in YYYY-MM-DD format (e.g. 2012-01-24 = January 24, 2012)
# * **element** : indicator of element type
# * TMAX : Maximum temperature (tenths of degrees C)
# * TMIN : Minimum temperature (tenths of degrees C)
# * **value** : data value for element (tenths of degrees C)
#
# For this assignment, you must:
#
# 1. Read the documentation and familiarize yourself with the dataset, then write some python code which returns a line
# graph of the record high and record low temperatures by day of the year over the period 2005-2014. The area between
# the record high and record low temperatures for each day should be shaded.
# 2. Overlay a scatter of the 2015 data for any points (highs and lows) for which the ten year record (2005-2014) record
# high or record low was broken in 2015.
# 3. Watch out for leap days (i.e. February 29th), it is reasonable to remove these points from the dataset for
# the purpose of this visualization.
# 4. Make the visual nice! Leverage principles from the first module in this course when developing your solution.
# Consider issues such as legends, labels, and chart junk.
#
# The data you have been given is near **None, None, United Kingdom**, and the stations the data comes from are shown on the map below.
# In[33]:
import matplotlib.pyplot as plt
import mplleaflet
import pandas as pd
import numpy as np
import pylab
#df_temp - Opens file and make it easy to read
df_temp = pd.read_csv('Dataset.csv') #open the data file
df_temp['Temp']=df_temp['Data_Value']*0.1 #Creates a column with temperatures in °C
df_temp1 = df_temp.loc[df_temp['ID'] == 'UKM00003740'] #Select only one location
df_temp1 = df_temp1.sort_values(['Date','Element']) #Sorts the data by date
#df_temp - Creates columns for Day and Month of the year
df_temp1['Date'] = pd.to_datetime(df_temp1['Date']) #Converts argument to datetime
df_temp1['Month'] = df_temp1['Date'].dt.strftime('%b')#Creates a column "Month" with the month taken by "Date"
df_temp1['Day'] = df_temp1['Date'].dt.day #Creates a column "Day" with the day taken by "Date"
df_temp1['Year'] = df_temp1['Date'].dt.year #Creates a column "Year" with the year taken by "Date"
df_temp1.set_index(['Month','Day']).sum(level=[0,1]).reset_index() #Creates 2 indexes "Month" and "Day"
#df graph_2015 - Creates df filtered for data in 2015
graph_2015max = df_temp1[(df_temp1['Year']==2015) & (df_temp['Element']=='TMAX')] #Creates a df where there are only the temperature with Element = TMAX
graph_2015min = df_temp1[(df_temp1['Year']==2015) & (df_temp['Element']=='TMIN')] #Creates a df where there are only the temperature with Element = TMIN
graph_2015 = pd.merge(graph_2015max, graph_2015min, how='outer', on=['Date']) #Merges the 2 creates dfs "graph_2015max" and "graph:2015min" on the column "Date"
graph_2015 = graph_2015[['Date', 'Temp_x','Temp_y']] #Selects only 3 columns from the df
graph_2015 = graph_2015.sort_values(['Date']) #Sorts the df by Date
graph_2015 = graph_2015.rename(index=str, columns={"Temp_x": "2015_Tmax", "Temp_y": "2015_Tmin"})
graph_2015 = graph_2015.reset_index() #Resets index
graph_2015.drop("index", 1, inplace = True) #Drops the old index
#df graph_2015 - Creates columns for Day and Month of the year
graph_2015['Date'] = pd.to_datetime(graph_2015['Date']) #Converts argument to datetime
graph_2015['Month'] = graph_2015['Date'].dt.strftime('%b')#Creates a column "Month" with the month taken by "Date"
graph_2015['Day'] = graph_2015['Date'].dt.day #Creates a column "Day" with the day taken by "Date"
#df graph - Creates df filtered for data from 2005 until 2014 (Exclude the only other different year "2015")
graph = df_temp1.where(df_temp1['Year']!=2015).groupby(['Month','Day']).Temp.agg(['min','max']) #Groups by Month and by Day
# (removing the year 2014) and select the Max and Min Value from the column "Temp"
graph = graph.reset_index() #Resets the index
sorter = ["Jan", "Feb", "Mar", "Apr", "May", "Jun","Jul","Aug","Sep","Oct","Nov","Dec"] #Creates a list of months to be used to sort the months in the "graph" df
sorterIndex = dict(zip(sorter,range(len(sorter)))) #Creates a dictionary with months and number
# {'Aug': 7, 'Apr': 3, 'Oct': 9, 'Jun': 5, 'Dec': 11, 'Mar': 2, 'Nov': 10, 'Jul': 6, 'Feb': 1, 'Sep': 8, 'May': 4, 'Jan': 0}
graph['Month_Rank'] = graph['Month'].map(sorterIndex) #Creates a new column 'Month_Rank' with a number associated with the month
graph.sort_values(['Month_Rank', 'Day'], ascending = [True, True], inplace = True) #Sorts 'graph' df according to 'Month_Rank' and Day'
graph.drop('Month_Rank', 1, inplace = True) #Drops 'Month_Rank' column
graph = graph.reset_index() #Resets index
graph.drop("index", 1, inplace = True) #Drops the old index
#df2 - Creates a new df which included the merged dfs graph and graph_2015
df2 = pd.merge(graph, graph_2015, how='outer', on=['Month','Day']) #Merges the 2 creates dfs "graph_2015max" and "graph:2015min" on the column "Date"
df2 = df2.drop(59) #Drops the 29th of February using the index value of 59
df2 = df2.reset_index() #Resets index
df2.drop("index", 1, inplace = True) #Drops the old index
df2 = df2[['Date','Month','Day','min','max','2015_Tmin','2015_Tmax']] #Reorders the columns
df2['outmax']=df2['2015_Tmax'].where(df2['2015_Tmax']>df2['max']) #Creates a new column that shows only temperature
# where the Tmax in 2015 was higher than the other years
df2['outmin']=df2['2015_Tmin'].where(df2['2015_Tmin']<df2['min']) #Creates a new column that shows only temperature
# where the Tmin in 2015 was lower than the other years
#df2 - Plots line Tmin 2005-2014 and Tmax 2005-2014
plt.plot(df2.index.values,df2['min'], label = 'Tmin 2005-2014') #Plots the min values using a line
plt.plot(df2.index.values,df2['max'], label = 'Tmax 2005-2014') #Plots the max values using a line
plt.gca().fill_between(range(len(df2)), df2['min'], df2['max'], facecolor='blue', alpha=0.25) #Fills the area between the linear data and exponential data
#df2 - Scatter of the 2015 data for any points (highs and lows) for which the ten year record (2005-2014) record high or record low was broken in 2015
plt.scatter(df2.index.values,df2['outmin'], c='blue', label = 'Record Tmin 2015') #Plots the min values using a line
plt.scatter(df2.index.values,df2['outmax'], c='red', label = 'Record Tmax 2015') #Plots the max values using a line
#Legend format
plt.legend(loc=2, frameon=False, fontsize =12)
#Tick format
fig = plt.figure(1) # Prepare the figure
plot = fig.add_subplot(111) # Defines a fake subplot that is in fact only the plot
plot.tick_params(axis='both', which='major', labelsize=12) #Changes the fontsize of minor ticks label
#Title format
plt.title('Daily climate records in United Kingdom', fontsize = 16, fontweight='bold')
#y and x-axis label format
plt.ylabel('Temperature /°C', fontsize = 14)
plt.xlabel('Month', fontsize = 14)
plt.gca().xaxis.set_label_coords(0.5,-0.1)
#Figure size
fig = plt.gcf()
fig.set_size_inches(12.5, 7)
pylab.xlim([0,365])
x_axis = df2.groupby(['Month']).size()
x_axis = x_axis.reset_index()
x_axis['Month_Rank'] = x_axis['Month'].map(sorterIndex) #Creates a new column 'Month_Rank' with a number associated with the month
x_axis.sort_values(['Month_Rank'], ascending = True, inplace = True) #Sorts 'graph' df according to 'Month_Rank' and Day'
x_axis.drop('Month_Rank', 1, inplace = True) #Drops 'Month_Rank' column
x_axis = x_axis.reset_index() #Resets index
x_axis.drop("index", 1, inplace = True) #Drops the old index
x_axis.rename(index=str, columns={0: "Days"}, inplace = True)
x_values = []
x=0
for i in x_axis['Days']:
x = x+i
x_values.append(x)
plt.xticks(x_values, sorter)
plt.gca().axes.get_xaxis().set_ticklabels([])
x=0
for i in x_values:
plt.gca().text(i - 20, -13.7, sorter[x],fontsize=12)
x = x+1
plt.show()
| dimaggiofrancesco/DATA_VISUALISATION-UK-climate-record | UK daily climate record.py | UK daily climate record.py | py | 9,092 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "pandas.to_datetime",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "pandas.merge",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "pandas.to_datetime",
... |
26723585229 | import allure
import pytest
from models.Enpoints import ReqresInEndpoint
from models.Register import RegisterRequest, RegisterResponseSuccess, RegisterResponseError
from models.Resource import Resource
from models.User import User
@pytest.mark.reqres_in
@allure.suite(
suite_name="Test-suite №1"
)
@allure.severity(allure.severity_level.NORMAL)
class TestReqresInFirst:
@allure.description(test_description=
"Тест проверяет вхождение id пользователя в название его изображения"
)
def test_user_id_matches_the_name_of_the_avatar_file(self, api_client):
response = api_client.get_request(ReqresInEndpoint.list_of_users).json()["data"]
users = [User(**user) for user in response]
for user in users:
assert str(user.id) in user.avatar
@allure.description(test_description=
"Тест проверяет вхождение определенного почтового домена в атрибуте 'email'"
)
def test_user_email_contains_specific_domain(self, api_client):
response = api_client.get_request(ReqresInEndpoint.list_of_users).json()["data"]
users = [User(**user) for user in response]
for user in users:
assert user.email.endswith("@reqres.in")
@pytest.mark.reqres_in
@allure.description(
test_description="Тест-кейс 2"
)
@allure.severity(allure.severity_level.NORMAL)
class TestReqresInSecond:
@pytest.mark.positive
def test_user_can_successfully_register(self, api_client):
expected_id = 4
expected_token = "QpwL5tke4Pnpja7X4"
user = RegisterRequest(email="eve.holt@reqres.in", password="pistol")
response = api_client.post_request(
endpoint=ReqresInEndpoint.create_user,
body=user.dict()
)
success_register = RegisterResponseSuccess(**response.json())
assert response.status_code == 200
assert success_register.id == expected_id
assert success_register.token, expected_token
@pytest.mark.negative
def test_user_cant_successfully_register(self, api_client):
user = RegisterRequest(email="sydney@fife", password="")
response = api_client.post_request(
endpoint=ReqresInEndpoint.create_user,
body=user.dict()
)
error_register = RegisterResponseError(**response.json())
assert response.status_code == 400
assert error_register.error == "Missing password"
@pytest.mark.reqres_in
@allure.description(
test_description="Тест-кейс 3"
)
@allure.severity(allure.severity_level.NORMAL)
class TestReqresInThird:
def test_list_of_resource_is_sorted(self, api_client):
response = api_client.get_request(
ReqresInEndpoint.list_of_resources
).json()["data"]
resources: list[int] = [
Resource(**resource).year for resource in response
]
sorted_resources = sorted(resources)
assert resources == sorted_resources
@pytest.mark.reqres_in
@allure.description(
test_description="Тест-кейс 4"
)
@allure.severity(allure.severity_level.CRITICAL)
class TestReqresInFourth:
def test_can_delete_specific_user(self, api_client):
response = api_client.delete_request(
ReqresInEndpoint.delete_user
)
assert response.status_code == 204
| eugenereydel99/pyrequests_api_automation_testing | test_reqres_in.py | test_reqres_in.py | py | 3,511 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "models.Enpoints.ReqresInEndpoint.list_of_users",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "models.Enpoints.ReqresInEndpoint",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "models.User.User",
"line_number": 21,
"usage_type": ... |
73444085223 | from django import template
register = template.Library()
@register.simple_tag
def get_months():
return [
(1, 'Enero'),
(2, 'Febrero'),
(3, 'Marzo'),
(4, 'Abril'),
(5, 'Mayo'),
(6, 'Junio'),
(7, 'Julio'),
(8, 'Agosto'),
(9, 'Septiembre'),
(10, 'Octubre'),
(11, 'Noviembre'),
(12, 'Diciembre'),
]
| feldmatias/stockAmbosMG | Base/templatetags/get_months.py | get_months.py | py | 403 | python | es | code | 1 | github-code | 36 | [
{
"api_name": "django.template.Library",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "django.template",
"line_number": 3,
"usage_type": "name"
}
] |
12435354493 | """
104. Maximum Depth of Binary Tree
Given the root of a binary tree, return its maximum depth.
A binary tree's maximum depth is the number of nodes along the longest path from the root node down to the farthest leaf node.
Example 1:
Input: root = [3,9,20,null,null,15,7]
Output: 3
Example 2:
Input: root = [1,null,2]
Output: 2
Example 3:
Input: root = []
Output: 0
Example 4:
Input: root = [0]
Output: 1
Constraints:
The number of nodes in the tree is in the range [0, 104].
-100 <= Node.val <= 100
"""
from typing import Optional
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class MaxDepth:
def maxDepth(self, root: Optional[TreeNode]) -> int:
if root == None:
return 0
leftDepth = self.maxDepth(root.left)
rightDepth = self.maxDepth(root.right)
return max(leftDepth, rightDepth) + 1
def printTree(root):
if root:
print(root.val, end=" ")
printTree(root.left)
printTree(root.right)
def makeTreeFromArray(arr, root, i):
if i < len(arr) and arr[i] != None:
temp = TreeNode(arr[i])
root = temp
root.left = makeTreeFromArray(arr, root.left, 2 * i + 1)
root.right = makeTreeFromArray(arr, root.right, 2 * i + 2)
return root
if __name__ == "__main__":
arr = [1, None, 2]
tree = makeTreeFromArray(arr, None, 0)
print(MaxDepth().maxDepth(tree))
| ashishkssingh/Leetcode-Python | Algorithms/Easy/maximum_depth_of_binary_tree.py | maximum_depth_of_binary_tree.py | py | 1,499 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.Optional",
"line_number": 46,
"usage_type": "name"
}
] |
36837926119 | from __future__ import annotations
import dataclasses
import bson.json_util as json
import seaborn as sns
import pandas as pd
from sklearn.metrics import r2_score
from sklearn.linear_model import LinearRegression
import statsmodels.api as sm
from database_instance import DatabaseInstance
import execution_tree as sbe
import physical_tree as abt
from parameters_extractor import extract_execution_stats
async def load_calibration_data(database: DatabaseInstance, collection_name: str) -> pd.DataFrame:
"""Load workflow data containing explain output from database and parse it. Retuned calibration DataFrame with parsed SBE and ABT."""
data = await database.get_all_documents(collection_name)
df = pd.DataFrame(data)
df['sbe'] = df.explain.apply(lambda e: sbe.build_execution_tree(
json.loads(e)['executionStats']))
df['abt'] = df.explain.apply(lambda e: abt.build(
json.loads(e)['queryPlanner']['winningPlan']['queryPlan']))
df['total_execution_time'] = df.sbe.apply(lambda t: t.total_execution_time)
return df
def remove_outliers(df: pd.DataFrame, lower_percentile: float = 0.1,
upper_percentile: float = 0.9) -> pd.DataFrame:
"""Remove the outliers from the parsed calibration DataFrame."""
def is_not_outlier(df_seq):
low = df_seq.quantile(lower_percentile)
high = df_seq.quantile(upper_percentile)
return (df_seq >= low) & (df_seq <= high)
return df[df.groupby(['run_id', 'collection',
'pipeline']).total_execution_time.transform(is_not_outlier).eq(1)]
def extract_sbe_stages(df: pd.DataFrame) -> pd.DataFrame:
"""Extract SBE stages from calibration DataFrame."""
def flatten_sbe_stages(explain):
def traverse(node, stages):
execution_time = node['executionTimeNanos']
children_fields = ['innerStage', 'outerStage', 'inputStage']
for field in children_fields:
if field in node and node[field]:
child = node[field]
execution_time -= child['executionTimeNanos']
traverse(child, stages)
del node[field]
node['executionTime'] = execution_time
stages.append(node)
sbe_tree = json.loads(explain)['executionStats']['executionStages']
result = []
traverse(sbe_tree, result)
return result
return pd.DataFrame(list(df.explain.apply(flatten_sbe_stages).explode()))
def get_sbe_stage(stages_df: pd.DataFrame, stage_name: str) -> pd.DataFrame:
"""Filter the SBE stages DataFrame by the given SBE stage name."""
return stages_df[stages_df.stage == stage_name].copy()
def extract_abt_nodes(df: pd.DataFrame) -> pd.DataFrame:
"""Extract ABT Nodes and execution statistics from calibration DataFrame."""
def extract(df_seq):
es_dict = extract_execution_stats(df_seq['sbe'], df_seq['abt'], [])
rows = []
for abt_type, es in es_dict.items():
for stat in es:
row = {
'abt_type': abt_type, **dataclasses.asdict(stat),
**json.loads(df_seq['query_parameters']), 'run_id': df_seq.run_id,
'pipeline': df_seq.pipeline, 'source': df_seq.name
}
rows.append(row)
return rows
return pd.DataFrame(list(df.apply(extract, axis=1).explode()))
def print_trees(calibration_df: pd.DataFrame, abt_df: pd.DataFrame, row_index: int = 0):
"""Print SBE and ABT Trees."""
row = calibration_df.loc[abt_df.iloc[row_index].source]
print('SBE')
row.sbe.print()
print('\nABT')
row.abt.print()
def print_explain(calibration_df: pd.DataFrame, abt_df: pd.DataFrame, row_index: int = 0):
"""Print explain."""
row = calibration_df.loc[abt_df.iloc[row_index].source]
explain = json.loads(row.explain)
explain_str = json.dumps(explain, indent=4)
print(explain_str)
def calibrate(abt_node_df: pd.DataFrame, variables: list[str] = None):
"""Calibrate the ABT node given in abd_node_df with the given model input variables."""
# pylint: disable=invalid-name
if variables is None:
variables = ['n_processed']
y = abt_node_df['execution_time']
X = abt_node_df[variables]
X = sm.add_constant(X)
nnls = LinearRegression(positive=True, fit_intercept=False)
model = nnls.fit(X, y)
y_pred = model.predict(X)
print(f'R2: {r2_score(y, y_pred)}')
print(f'Coefficients: {model.coef_}')
sns.scatterplot(x=abt_node_df['n_processed'], y=abt_node_df['execution_time'])
sns.lineplot(x=abt_node_df['n_processed'], y=y_pred, color='red')
if __name__ == '__main__':
import asyncio
from config import DatabaseConfig
async def test():
"""Smoke tests."""
database_config = DatabaseConfig(connection_string='mongodb://localhost',
database_name='abt_calibration', dump_path='',
restore_from_dump=False, dump_on_exit=False)
database = DatabaseInstance(database_config)
raw_df = await load_calibration_data(database, 'calibrationData')
print(raw_df.head())
cleaned_df = remove_outliers(raw_df, 0.0, 0.9)
print(cleaned_df.head())
sbe_stages_df = extract_sbe_stages(cleaned_df)
print(sbe_stages_df.head())
seek_df = get_sbe_stage(sbe_stages_df, 'seek')
print(seek_df.head())
abt_nodes_df = extract_abt_nodes(cleaned_df)
print(abt_nodes_df.head())
loop = asyncio.get_event_loop()
loop.run_until_complete(test())
| mongodb/mongo | buildscripts/cost_model/experiment.py | experiment.py | py | 5,682 | python | en | code | 24,670 | github-code | 36 | [
{
"api_name": "database_instance.DatabaseInstance",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "execution_tree.build_execution_tree",
"line_number": 20,
"usage_type": "call"
},
... |
4686279397 | import time
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
import gdal
from LRSMSingleVersion.CONST.CONST import *
from LRSMSingleVersion.UILayer.Workbench.BorderItem import BorderItem
class GraphicsView(QGraphicsView):
CLICK_INVERT_TIME = 1.2
def __init__(self, parent=None):
super(GraphicsView, self).__init__(parent)
# 设置拖拽描述 橡皮筋?
self.setDragMode(QGraphicsView.RubberBandDrag)
# 渲染提示 hint提示 Antialiasing:消除混叠现象,消除走样,图形保真;
self.setRenderHint(QPainter.Antialiasing)
self.setRenderHint(QPainter.TextAntialiasing)
self.setViewportUpdateMode(QGraphicsView.FullViewportUpdate)
self.setStyleSheet("""
border: 0;
""")
self.clicked_time = 0.
# 要画的图形的形状
self.gadget = 2
# 缩放因子
self.scale_factor = 20
# 上一次鼠标触发的位置
self.is_mouse_pressed = False
self.last_cursor_pos = QPoint()
# 图像元数据模型
self.img_meta_model = QStandardItemModel()
# 文件列表数据模型
self.file_list_model = QStandardItemModel()
# 图像数据集
self.img_dataset = None
# 缩放显示RGB彩色图
self.show_color = True
self.border = None
self.ellipse = None
def read_image(self, img_path):
# gdal 注册
gdal.AllRegister()
gdal.SetConfigOption("GDAL_FILENAME_IS_UTF8", "NO")
self.img_dataset = gdal.Open(img_path, gdal.GA_ReadOnly)
if not self.img_dataset:
return
self.show_file_list(img_path)
self.show_image_info(img_path)
# 根据波段 显示图片
if self.img_dataset.RasterCount() != 3:
self.show_color = False
band = (self.img_dataset.GetRasterBand(1)) * 3,
self.show_image(band)
else:
self.img_dataset = True
band_list = (self.img_dataset.GetRasterBand(1),
self.img_dataset.GetRasterBand(2),
self.img_dataset.GetRasterBand(3))
self.show_image(band_list)
def show_file_list(self, img_path):
pass
def show_image_info(self, img_path):
pass
def show_image(self, band_list):
pass
def shape(self):
return self.gadget
def set_shape(self, shape: int):
self.gadget = shape
def zoom_by_mouse_point(self, mouse_point: QPoint):
dx = mouse_point.x() - self.last_cursor_pos.x()
dy = mouse_point.y() - self.last_cursor_pos.y()
vertical_scrollbar = self.verticalScrollBar()
horizontal_scrollbar = self.horizontalScrollBar()
if vertical_scrollbar.isVisible():
vertical_scrollbar.setValue(vertical_scrollbar.value() - dy)
if horizontal_scrollbar.isVisible():
horizontal_scrollbar.setValue(horizontal_scrollbar.value() - dx)
self.last_cursor_pos = mouse_point
def clicked_event(self, event):
pass
def moving_event(self, event):
pass
def creating_item(self, event: QMouseEvent):
mouse_point = event.pos()
if self.border is None:
self.border = BorderItem(self.mapToScene(self.last_cursor_pos),
self.scene(), shape=self.gadget,
rect=QRectF(0., 0., 0., 0.))
width = mouse_point.x() - self.last_cursor_pos.x()
height = mouse_point.y() - self.last_cursor_pos.y()
x, y = self.last_cursor_pos.x(), self.last_cursor_pos.y()
if width < 0:
x = mouse_point.x()
if height < 0:
y = mouse_point.y()
self.border.setPos(self.mapToScene(QPoint(x, y)))
self.border.set_rect(abs(width), abs(height))
def wheelEvent(self, event):
if event.modifiers() & Qt.ControlModifier:
factor = event.angleDelta().y() / 120.0
factor = 2. if factor > 0 else 0.5
self.scale(1, 1)
def mouseDoubleClickEvent(self, event):
if event.button() == Qt.LeftButton:
global_point = event.pos()
BorderItem(self.mapToScene(global_point), self.scene())
def mousePressEvent(self, event: QMouseEvent) -> None:
if event.button() == Qt.LeftButton:
self.is_mouse_pressed = True
self.last_cursor_pos = event.pos()
event.ignore()
QGraphicsView.mousePressEvent(self, event)
self.clicked_time = time.time()
def mouseMoveEvent(self, event: QMouseEvent) -> None:
mouse_point = event.pos()
if self.is_mouse_pressed and \
self.gadget != MOVE_TOOL and \
(event.buttons() and Qt.LeftButton):
if self.gadget == ELLIPSE_QUICK_SELECT_TOOL or \
self.gadget == RECT_QUICK_SELECT_TOOL:
self.creating_item(event)
elif self.gadget == ZOOM_TOOL:
dx = mouse_point.x() - self.last_cursor_pos.x()
factor = 1.03 if dx > 0 else 0.97
self.scale(factor, factor)
self.last_cursor_pos = mouse_point
elif self.gadget == GRIP_TONGS:
self.zoom_by_mouse_point(mouse_point)
event.accept()
event.ignore()
QGraphicsView.mouseMoveEvent(self, event)
def mouseReleaseEvent(self, event: QMouseEvent) -> None:
if self.is_mouse_pressed and (event.button() == Qt.LeftButton):
self.is_mouse_pressed = False
self.border = None
self.ellipse = None
# else:
event.ignore()
QGraphicsView.mouseReleaseEvent(self, event)
| yiyexingyu/LRSMSingleVersion | UILayer/Workbench/GraphicsView.py | GraphicsView.py | py | 5,810 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "gdal.AllRegister",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "gdal.SetConfigOption",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "gdal.Open",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "gdal.GA_ReadOnly",
... |
15760657167 | from datetime import datetime
#Server File
import time
import socket
import threading
import sys
import json
from connection_obj import *
from organisation import *
from session import *
# from Encryption import encrypt_message
import re
import random
import string
# from serverTest import *
import base64
from casEncrypt import *
from bank_account import *
# The bank is a client which can receive and send messages
# It keeps track of all its bank accounts, and can alter their balances
class Bank:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def __init__(self, jsonfile):
f = open (jsonfile, encoding='utf-8')
self.data = json.loads(f.read())
print(self.data)
self.myEncrypt = MyEncrypt()
self.sock.connect((self.data['server']['ip'],int(self.data['server']['port'])))
self.id = self.data['id']
self.name = self.data['name']
self.bankAccounts = []
self.initBankAccounts(self.data['users'])
# Register at central server
bdict = {"bank":{
"id": self.data['id'],
"name": self.data['name'],
"key":self.myEncrypt.getPubKeyB64()
}}
self.sendOverSocket(bdict)
iThread = threading.Thread(target = self.handleInput)
iThread.deamon = True
iThread.start()
while True:
data = self.sock.recv(6000)
if not data:
break
datadict = json.loads(data)
if 'userMessage' in datadict:
print('userMessage Detected')
self.handleUserMessage(datadict)
else:
print(datadict)
print(' ')
# Sends to server
def sendOverSocket(self, dicttosend):
json_object = json.dumps(dicttosend, indent = 4)
self.sock.send(bytes(json_object, encoding = 'utf-8'))
# Tries to send money from one bank account to the other, sends error if it's impossible
def tryAdd(self,fromID, toID, amt):
fromacc = None
toacc = None
for bacc in self.bankAccounts:
if bacc.id == fromID:
fromacc = bacc
elif bacc.id == toID:
toacc = bacc
print('-------BEFORE--------')
print('fromacc:')
print(fromacc)
print('toacc')
print(toacc)
if(fromacc is None or toacc is None):
#TODO return error message
self.sendErrorToUser(fromID,"That account wasn't found")
return
if(fromacc.balance<amt):
#TODO return error message
self.sendErrorToUser(fromID,"You don't have enough funds")
return
if(amt<0):
self.sendErrorToUser(fromID,"You can't transfer a negative amount")
return
fromacc.balance = fromacc.balance - amt
toacc.balance = toacc.balance + amt
f = open("logfile.txt", "a")
f.write("{0} -- {1}\n".format(datetime.now().strftime("%Y-%m-%d %H:%M"), "Sent "+str(amt)+" from "+str(fromacc.id)+ " to "+str(toacc.id)))
f.close()
print('-------AFTER--------')
print('fromacc:')
print(fromacc)
print('toacc')
print(toacc)
#Tries to sub from a given account, sends error if not right
def trySub(self,fromID, amt):
fromacc = None
for bacc in self.bankAccounts:
if bacc.id == fromID:
fromacc = bacc
print('-------BEFORE--------')
print('fromacc:')
print(fromacc)
if(fromacc is None):
#TODO return error message
self.sendErrorToUser(fromID,"That account wasn't found")
return
if(fromacc.balance<amt):
#TODO return error message
self.sendErrorToUser(fromID,"You don't have enough funds")
return
if(amt<0):
self.sendErrorToUser(fromID,"You can't sub a negative amount")
return
befbal = fromacc.balance
fromacc.balance = fromacc.balance - amt
f = open("logfile.txt", "a")
f.write("{0} -- {1}\n".format(datetime.now().strftime("%Y-%m-%d %H:%M"), "Subtracted "+str(amt)+" from "+str(fromID)+ ", balance after: "+str(fromacc.balance)))
f.close()
print('-------AFTER--------')
print('fromacc:')
print(fromacc)
# Decrypts message from a user and handles it
def handleUserMessage(self, datadict):
d = datadict['userMessage']
userID = d['userID']
enmes = d['message']
mes = self.myEncrypt.decryptB64(enmes)
# print(mes)
mesdict = json.loads(mes)
print(mesdict)
if(userID is not mesdict['from']):
print('user not authorised')
print(userID + ' vs '+mesdict['from'])
self.sendErrorToUser(userID,"You are not authorised to take money from that account")
return
if mesdict['type']=='ADD':
self.tryAdd(userID,mesdict['to'],mesdict['amt'])
elif mesdict['type']=='SUB':
self.trySub(userID, mesdict['amt'])
def sendErrorToUser(self, userID, message):
dicttosend = {"userError":{
"userID": userID,
"mes":message
}}
self.sendOverSocket(dicttosend)
def handleInput(self):
while True:
# self.sock.send(bytes(input(""),'utf-8'))
self.typed = input("")
self.typedsplit = self.typed.split()
temp = [self.typedsplit[0]]
reststring = ' '.join(self.typedsplit[1:])
temp2 = re.findall("\[(.*?)\]", reststring)
temp = temp + temp2
self.typedsplit = temp
# print('input: ')
# for e in self.typedsplit:
# print(e)
# if(self.typedsplit[0] == "SEND"):
# print('Send message detected')
# self.sendOverSocket({'bank': "or not"})
# if(self.typedsplit[0]== "GETONLINE"):
# print("asking who's online")
# self.sendOverSocket({"cool" : "story m8"})
# inits bank account from bank_config json
def initBankAccounts(self, d):
print("initializing bank accouts:")
for u in d:
self.bankAccounts.append(BankAccount(u))
print(self.bankAccounts)
| casgaindustries/Project | bank.py | bank.py | py | 6,574 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "socket.socket",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "socket.AF_INET",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "socket.SOCK_STREAM",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "json.loads"... |
39062464656 | import sqlite3
conn = sqlite3.connect('D:\pythonPRJ\kgitbankPython\sqliteTest\example.db')
c = conn.cursor()
data = [
('2020-03-07','buy','rhat',100,35.14),
('2020-02-09','buy','net',80,24.95),
('2020-03-05','buy','com',54,220.55),
('2020-01-18','buy','rhat',210,35.14)
]
sql = """insert into stocks values(?,?,?,?,?)"""
c.executemany(sql, data)
conn.commit()
conn.close() | Yun4e/mm | sqliteTest/sqliteTest4.py | sqliteTest4.py | py | 410 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sqlite3.connect",
"line_number": 3,
"usage_type": "call"
}
] |
19739680799 | from __future__ import absolute_import
import socket
try:
import netifaces
except ImportError:
netifaces = None
from pkg_resources import working_set
from vigilo.common.logging import get_logger
LOGGER = get_logger(__name__)
from vigilo.common.gettext import translate
_ = translate(__name__)
from vigilo.vigiconf import conf
from vigilo.vigiconf.lib import EditionError
from vigilo.vigiconf.lib.server.local import ServerLocal, ServerManagerLocal
class ServerFactory(object):
"""
I{Factory} pour L{Server<base.Server>}: retourne une instance de la bonne
sous-classe.
@cvar localnames: Ensemble des noms/adresses faisant référence
à la machine locale.
@type localnames: C{set}
"""
localnames = set()
def __init__(self):
self.remote_class = self.find_remote_class()
if not self.localnames:
# L'union permet ici de modifier le set de la classe "en place",
# ce qui a le même effet qu'un cache.
self.localnames |= self._find_local_names()
def _find_local_names(self):
"""
Retourne l'ensemble des noms et des adresses
faisant référence à la machine locale.
@return: Ensemble des noms et adresses de la machine locale.
@rtype: C{set}
"""
localnames = set([self._get_hostname(), 'localhost'])
addresses = set(['127.0.0.1', '::1'])
# Permet de garder un fonctionnement minimum lorsque le paquet
# netifaces n'est pas disponible (cas de certaines anciennes
# distributions Linux).
if netifaces:
# Récupération de la liste des addresses (IPv4/IPv6)
# des différentes interfaces réseau de la machine locale.
for iface in netifaces.interfaces():
families = netifaces.ifaddresses(iface)
for family in families:
# On ne garde que les addresses IPv4/IPv6.
if family not in (netifaces.AF_INET, netifaces.AF_INET6):
continue
for entry in families[family]:
if 'addr' in entry:
addresses.add(entry['addr'])
# Les adresses IPs de la machine sont reconnues
# comme "aliases" de la machine.
localnames |= addresses
# Résolution inverse des adresses IP
# pour obtenir le reste des aliases.
for address in addresses:
try:
aliases = socket.gethostbyaddr(address)
# Nom principal correspondant à l'adresse.
localnames.add(aliases[0])
# Autres aliases pour cette adresse.
localnames |= set(aliases[1])
except socket.error:
continue
localnames = set(value.lower() for value in localnames)
return localnames
def _get_hostname(self):
"""
Retourne le résultat de gethostname(). Utile pour les tests unitaires
@return: Résultat de socket.gethostname()
@rtype: C{str}
"""
return socket.gethostname()
def find_remote_class(self): # pylint: disable-msg=R0201
for entry in working_set.iter_entry_points(
"vigilo.vigiconf.extensions", "server_remote"):
sr_class = entry.load()
return sr_class
return None
def makeServer(self, name):
"""
Returns the right server object, depending on the hostname
@param name: the hostname of the Server object to create
@type name: C{str}
@returns: Server object with the provided hostname
@rtype: L{Server}
"""
if name in self.localnames:
return ServerLocal(name)
else:
if self.remote_class is None:
raise EditionError(_("On the Community Edition, you can "
"only use localhost"))
return self.remote_class(name)
def get_server_manager_class():
"""
Retourne la bonne sous-classe de L{ServerManager<manager.ServerManager>}
en fonction de l'édition de Vigilo.
"""
if getattr(conf, "appsGroupsByServer", None):
for entry in working_set.iter_entry_points(
"vigilo.vigiconf.extensions", "server_manager_remote"):
return entry.load()
message = _("Remote server management is not possible with the "
"Community edition. This feature is only available "
"in the Enterprise edition. Aborting.")
raise EditionError(message)
else:
return ServerManagerLocal
def get_server_manager():
"""
I{Factory} pour L{ServerManager<manager.ServerManager>}: retourne une
instance de la meilleure sous-classe en fonction de l'édition de Vigilo.
"""
server_factory = ServerFactory()
sm_class = get_server_manager_class()
return sm_class(server_factory)
# vim:set expandtab tabstop=4 shiftwidth=4:
| vigilo/vigiconf | src/vigilo/vigiconf/lib/server/factory.py | factory.py | py | 5,047 | python | fr | code | 3 | github-code | 36 | [
{
"api_name": "vigilo.common.logging.get_logger",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "vigilo.common.gettext.translate",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "netifaces.interfaces",
"line_number": 58,
"usage_type": "call"
},
{
... |
37309146962 | import pyaudio
import wave
import sys
import time
'''
init初始化的时候,time:要录制的时间,path:保存音频的路径及名称
record_audio:运行一次录制一段时长为time的音频
play_time:运行一次播放一段音频, 这里注意也要传入一个路径
'''
class Audio:
CHUNK = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 2
RATE = 44100
@staticmethod
def record_audio(time, path):
wave_output_filename = path
p = pyaudio.PyAudio()
stream = p.open(format=Audio.FORMAT, channels=Audio.CHANNELS,
rate=Audio.RATE, input=True,
frames_per_buffer=Audio.CHUNK)
print("* recording")
frames = []
for i in range(0, int(Audio.RATE / Audio.CHUNK * time)):
data = stream.read(Audio.CHUNK)
frames.append(data)
print("* done recording")
stream.stop_stream()
stream.close()
p.terminate()
wf = wave.open(wave_output_filename, 'wb')
wf.setnchannels(Audio.CHANNELS)
wf.setsampwidth(p.get_sample_size(Audio.FORMAT))
wf.setframerate(Audio.RATE)
wf.writeframes(b''.join(frames))
wf.close()
@ staticmethod
def play_audio(path):
wf = wave.open(path, 'rb')
# instantiate PyAudio (1)
p = pyaudio.PyAudio()
# define callback (2)
def callback(in_data, frame_count, time_info, status):
data = wf.readframes(frame_count)
return data, pyaudio.paContinue
# open stream using callback (3)
stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),
channels=wf.getnchannels(),
rate=wf.getframerate(),
output=True,
stream_callback=callback)
# start the stream (4)
stream.start_stream()
# wait for stream to finish (5)
while stream.is_active():
time.sleep(0.01)
# stop stream (6)
stream.stop_stream()
stream.close()
wf.close()
# close PyAudio (7)
p.terminate()
# test的时候可以用
# n = 5
# path1 = r'C:\Users\Administrator\Desktop\recording\%s.wav' % n
# Audio.record_audio(10, path1)
# # path2 = r'C:\Users\Administrator\Desktop\recording\output.wav'
# # Audio.play_audio(path2)
# n = n + 1
| imppppp7/time | Training/Audio.py | Audio.py | py | 2,408 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pyaudio.paInt16",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "pyaudio.PyAudio",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "wave.open",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "wave.open",
"line_... |
74408545383 | from birdnetlib.watcher import DirectoryWatcher
from birdnetlib.analyzer_lite import LiteAnalyzer
from birdnetlib.analyzer import Analyzer
import os
from collections import namedtuple
from mock import patch, Mock
def test_watcher_complete():
analyzer = Analyzer()
analyzer_lite = LiteAnalyzer()
directory = "."
watcher = DirectoryWatcher(directory, analyzers=[analyzer, analyzer_lite])
input_path = os.path.join(os.path.dirname(__file__), "test_files/soundscape.wav")
# Add a mocked call for on_analyze_complete
watcher.on_analyze_complete = Mock()
watcher.on_analyze_file_complete = Mock()
# Create a "file-created" event in the watcher.
# Test calling private method directly (this would be called by watchdog)
event = namedtuple("Event", "src_path")
event.src_path = input_path
watcher._on_closed(event)
# Check complete call count and results.
assert watcher.on_analyze_complete.call_count == 2
analyzer_recording = watcher.on_analyze_complete.call_args_list[0][0][0]
lite_recording = watcher.on_analyze_complete.call_args_list[1][0][0]
assert len(analyzer_recording.detections) == 36
assert analyzer_recording.analyzer.name == "Analyzer"
assert len(lite_recording.detections) == 2
assert lite_recording.analyzer.name == "LiteAnalyzer"
assert watcher.on_analyze_file_complete.call_count == 1
assert len(watcher.on_analyze_file_complete.call_args.args[0]) == 2
def test_watcher_error():
analyzer = LiteAnalyzer()
directory = "."
watcher = DirectoryWatcher(directory, analyzers=[analyzer])
# Not an mp3 file, should throw error.
input_path = os.path.join(os.path.dirname(__file__), "test_files/species_list.txt")
# Add a mocked call for on_analyze_complete
watcher.on_error = Mock()
# Create a "file-created" event in the watcher.
# Normally a txt would never make it this far,
# but we're just testing to see if error is thrown.
event = namedtuple("Event", "src_path")
event.src_path = input_path
watcher._on_closed(event)
# Check complete call count and results.
assert watcher.on_error.call_count == 1
assert type(watcher.on_error.call_args.args[0]).__name__ == "Recording"
assert type(watcher.on_error.call_args.args[1]).__name__ == "AudioFormatError"
def test_default_analyzer():
# Test that if an analyzer isn't provided, that the LiteAnalyzer is used.
directory = "."
watcher = DirectoryWatcher(directory)
assert type(watcher.analyzers[0]).__name__ == "Analyzer"
| joeweiss/birdnetlib | tests/test_watcher_both_analyzers.py | test_watcher_both_analyzers.py | py | 2,567 | python | en | code | 19 | github-code | 36 | [
{
"api_name": "birdnetlib.analyzer.Analyzer",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "birdnetlib.analyzer_lite.LiteAnalyzer",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "birdnetlib.watcher.DirectoryWatcher",
"line_number": 14,
"usage_type":... |
23410231690 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('bar', '0322_auto_20161010_1636'),
]
operations = [
migrations.AlterField(
model_name='caja',
name='sector',
field=models.ForeignKey(to='bar.Sector'),
),
migrations.AlterField(
model_name='categoriaproducto',
name='categoria',
field=models.CharField(help_text=b'Ingrese el identificador de la Categoria de los productos. (Hasta 2 caracteres)', unique=True, max_length=2, verbose_name=b'Categoria', choices=[(b'BE', b'Bebidas'), (b'CO', b'Comidas'), (b'CI', b'Cigarrillos'), (b'GO', b'Golosinas'), (b'AL', b'Articulos de Limpieza')]),
),
migrations.AlterField(
model_name='timbrado',
name='fecha_limite_vigencia_timbrado',
field=models.DateField(default=datetime.datetime(2017, 10, 13, 19, 59, 26, 68000), help_text=b'Ingrese la Fecha Limite de Vigencia del Timbrado', verbose_name=b'Fecha Limite de Vigencia del Timbrado'),
),
]
| pmmrpy/SIGB | bar/migrations/0323_auto_20161013_1959.py | 0323_auto_20161013_1959.py | py | 1,192 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.db.migrations.Migration",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.AlterField",
"line_number": 15,
"usage_type": "call"
},
{... |
4423364183 | import tempfile
from os import path, sep
from django import forms
from django.forms.util import ErrorList
from django.conf import settings
from django.template.loader import render_to_string
from transhette import polib, poutil
PO_PROJECT_BASE = 'po_project_base'
class FormAdminDjango(forms.Form):
def as_django_admin(self):
return render_to_string('transhette/form_admin_django.html', {'form': self, })
class UpdateConfirmationPoForm(FormAdminDjango):
pass
class UpdatePoForm(FormAdminDjango):
priority = forms.BooleanField(required=False)
file = forms.FileField()
def __init__(self, pofile, *args, **kwargs):
super(UpdatePoForm, self).__init__(*args, **kwargs)
self.fields['priority'].is_checkbox = True
self.data_file = None
self.pofile = pofile
if not pofile:
application_choices = self._get_application_choices()
self.fields['application'] = forms.ChoiceField(choices=application_choices, required=False)
language_choices = [('', '-----')]
if hasattr(settings, 'LANGUAGES'):
language_choices.extend([(key, "%s (%s)" % (value, key)) \
for key, value in dict(settings.LANGUAGES).items()])
self.fields['language'] = forms.ChoiceField(choices=language_choices, required=False)
self.fields.keyOrder = ['application', 'language', 'priority', 'file']
def clean(self):
cleaned_data = super(UpdatePoForm, self).clean()
if not self.errors and not self.pofile:
try:
tmp_file, po_tmp, po_dest_file = self._get_files_to_merge()
tmp_file.close()
except IOError:
file_error = self._errors.get('file', ErrorList([]))
file_error_new = ErrorList([u'Information incompatible for find the destination file'])
file_error.extend(file_error_new)
self._errors['file'] = ErrorList(file_error)
return cleaned_data
def save_temporal_file(self):
tmp_file, po_tmp, po_dest_file = self._get_files_to_merge()
tmp_file.flush()
return po_tmp, po_dest_file, self.cleaned_data['priority']
def _get_files_to_merge(self):
# Escribo el archivo que ha mandado el usuario en un archivo temporal
temporal_filepath = tempfile.NamedTemporaryFile().name
tmp_file = open(temporal_filepath, "w")
if self.data_file is None:
self.data_file = self.cleaned_data['file'].read()
tmp_file.write(self.data_file)
tmp_file.flush()
po_tmp = polib.pofile(temporal_filepath)
if not self.pofile:
# Consigo la ruta del archivo con el cual voy a hacer un merge, creo un pofile
path_file = _get_path_file(po_tmp, self.cleaned_data['file'].name,
self.cleaned_data.get('language', None),
self.cleaned_data.get('application', None))
po_dest_file = polib.pofile(path_file)
else:
po_dest_file = self.pofile
return (tmp_file, po_tmp, po_dest_file)
def _get_application_choices(self):
l = []
choices = [('', '-----')]
for language in settings.LANGUAGES:
l_extend = poutil.find_pos(language[0], include_djangos=False, include_transhette=False)
l_extend = [item.split(settings.BASEDIR)[1] for item in l_extend if item.startswith(settings.BASEDIR)]
l.extend(l_extend)
for item in l:
item_split = item.split(sep)
if item_split[1] == 'locale':
if not (PO_PROJECT_BASE, 'entire project') in choices:
choices.append((PO_PROJECT_BASE, 'entire project'))
else:
item_split2 = item.split(sep+"locale"+sep)
item_tuple = (item_split2[0][1:], item_split2[0].split('/')[-1])
if not item_tuple in choices:
choices.append(item_tuple)
return choices
def _get_lang(lang, lang_cleaned_data=None):
if lang_cleaned_data:
return lang_cleaned_data
if not lang:
return ''
lang = lang.replace('\n', '')
lang = lang.strip()
lang_words = lang.split(' ')
if len(lang_words):
lang = lang_words[0]
return lang.lower()
def _get_lang_by_file(file_path):
cut_start = "locale/"
cut_end = "/LC_MESSAGES"
index_start = file_path.index(cut_start) + len(cut_start)
index_end = file_path.index(cut_end)
return file_path[index_start:index_end]
def _get_application(application, application_cleaned_data=None):
if application_cleaned_data:
app = application_cleaned_data
if app != PO_PROJECT_BASE:
return app
else:
return ''
if not application:
return ''
application = application.replace('\n', '')
application = application.strip()
return application.lower()
def _get_path_file(po_tmp, filename, lang=None, application=None):
lang = _get_lang(po_tmp.metadata.get('Language-Team'), lang)
directory = _get_application(po_tmp.metadata.get('Project-Id-Version'), application)
return path.join(settings.BASEDIR, directory, 'locale', lang, 'LC_MESSAGES', filename)
| buriy/django-transhette | transhette/forms.py | forms.py | py | 5,327 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "django.forms.Form",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "django.template.loader.render_to_string",
"line_number": 18,
"usage_type": "call"
},
{
"api_n... |
4449615890 | import tensorflow as tf
import tensorflow_datasets as tfds
import numpy as np
from tensorflow import keras
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
imdb,info = tfds.load("imdb_reviews",with_info=True, as_supervised=True)
train_data,test_data = imdb['train'],imdb['test']
training_sentences=[]
training_labels=[]
test_sentences=[]
test_labels=[]
for s,l in train_data:
training_sentences.append(str(s.numpy()))
training_labels.append(l.numpy())
for s,l in test_data:
test_sentences.append(str(s.numpy()))
test_labels.append(l.numpy())
training_labels_final = np.array(training_labels)
test_labels_final = np.array(test_labels)
vocab_size = 10000
embedding_dim = 16
max_length = 120
trunc_type = 'post'
oov_tok = "<OOV>"
tokenizer = Tokenizer(num_words=vocab_size,oov_token=oov_tok)
tokenizer.fit_on_texts(training_sentences)
word_index = tokenizer.word_index
sequences = tokenizer.texts_to_sequences(training_sentences)
padded = pad_sequences(sequences,maxlen=max_length,truncating=trunc_type)
testing_sequences = tokenizer.texts_to_sequences(test_sentences)
testing_padded = pad_sequences(testing_sequences,maxlen=max_length,truncating=trunc_type)
reverse_word_index = dict([(value,key) for (key,value) in word_index.items()])
def decode_review(text):
return ' '.join([reverse_word_index.get(i, '?') for i in text])
# print(decode_review(padded[1]))
# print(training_sentences[1])
model = tf.keras.Sequential([
keras.layers.Embedding(vocab_size,embedding_dim,input_length=max_length),
keras.layers.Flatten(),
keras.layers.Dense(6, activation='relu'),
keras.layers.Dense(1,activation='sigmoid')
])
model.compile(loss='binary_crossentropy', optimizer='adam',metrics=['accuracy'])
model.summary()
num_epochs = 10
model.fit(padded,training_labels_final,epochs=num_epochs,validation_data=(testing_padded,test_labels_final))
e = model.layers[0]
weights = e.get_weights()[0]
import io
out_v = io.open('vecs.tsv' , 'w' , encoding='utf-8')
out_m = io.open('meta.tsv' , 'w' , encoding='utf-8')
for word_num in range(1,vocab_size):
word=reverse_word_index[word_num]
embeddings=weights[word_num]
out_m.write(word+'\n')
out_v.write('\t'.join([str(x) for x in embeddings]) + '\n')
out_v.close()
out_m.close()
| Atharva500/imdbReviews | imdb_reviews.py | imdb_reviews.py | py | 2,348 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tensorflow_datasets.load",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "keras.preprocessing.t... |
25358805817 | '''
Created on 2018年11月21日
@author: Jacky
'''
from random import randint
from collections import Counter
from pip._vendor.msgpack.fallback import xrange
import re
data = [randint(60,100) for _ in xrange(30)]
dt1 = dict.fromkeys(data, 0)
print(dt1)
for x in data:
dt1[x]+=1
print(dt1)
lst = sorted(dt1.items(),key = lambda x:x[1],reverse=True)
print(lst)
cnt1 = Counter(data)
print(cnt1)
print (cnt1.most_common(3))
txt = open('new22.txt').read()
lst2 = re.split('\W+', txt)
cnt2 = Counter(lst2)
print(lst2)
print(cnt2.most_common(20)) | jackyzhou2k/jackypy | lesson2/le23.py | le23.py | py | 579 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "random.randint",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pip._vendor.msgpack.fallback.xrange",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "collections.Counter",
"line_number": 22,
"usage_type": "call"
},
{
"api_name":... |
31070431709 | import xlsxwriter
from tkinter import *
from openpyxl import load_workbook
myFileName = 'decmo.xlsx'
# load the workbook, and put the sheet into a variable
wb = load_workbook(filename=myFileName)
newRowLocation = 1
global index
index = 1
def functionate():
v = e.get()
vv = f.get()
d = date.get()
m = month.get()
find = d + '##'
print(find)
x = 0
y = 0
c = 0
ws = wb['january']
if m == '1':
ws = wb['january']
if m == '2':
ws = wb['february']
if m == '3':
ws = wb['march']
if m == '4':
ws = wb['april']
if m == '5':
ws = wb['may']
if m == '6':
ws = wb['june']
if m == '7':
ws = wb['july']
if m == '8':
ws = wb['august']
if m == '9':
ws = wb['september']
if m == '10':
ws = wb['october']
if m == '11':
ws = wb['november']
if m == '12':
ws = wb['december']
global index
for row in ws.iter_rows(min_row=1, min_col=1, max_row=30, max_col=2):
for cell in row:
if cell.value == find:
print('found')
x = cell.column
y = cell.row
z = cell.coordinate
income_obj = ws.cell(row=y + 1, column=x + 1)
cost_obj = ws.cell(row=y + 1, column=x + 2)
c = income_obj.value
cost=cost_obj.value
z = ws.max_column
ws.cell(column=c + x + 3, row=y + 1, value=v)
ws.cell(column=c + x + 4, row=y + 1, value=vv)
total=float(cost)+float(vv)
ws.cell(column=x + 1, row=y + 1, value=c + 2)
ws.cell(column=x + 2, row=y+1, value=total)
print(c)
wb.save(filename=myFileName)
wb.close()
root = Tk()
root.geometry('500x500')
e = Entry(root)
b = Button(root, text='Click', command=functionate)
e.pack()
b.pack()
e.place(x=0, y=0)
b.place(x=50, y=50)
f = Entry(root)
f.pack()
date = Entry(root)
date.pack()
date.place(x=100, y=100)
month = Entry(root)
month.pack()
month.place(x=100, y=150)
print(4)
root.mainloop()
| utsabbuet17/My-Wallet | xl (1).py | xl (1).py | py | 2,168 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "openpyxl.load_workbook",
"line_number": 8,
"usage_type": "call"
}
] |
34589390018 | from django.shortcuts import render, get_object_or_404
from django.http import Http404, HttpResponseRedirect, HttpResponse
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.views import generic
from django.contrib.auth.models import User
import nfldb
import json
from .models import Team, Player, UserTeam, UserPlayer, NflDbHelper
from .forms import CreateTeamForm
# Create one instance of the db per process
# Create your views here.
def index(request):
""" Home page of site """
return render(request, 'index.html', { 'page': 'home' })
def page_not_found(request):
""" 404 Error Page """
return render(request, 'page_not_found.html')
def build_query(year, phase, week):
db = nfldb.connect()
query = nfldb.Query(db)
query.game(season_year=year)
weeks = []
if phase != 'All':
if phase == 'Preseason':
weeks += ["All", "1", "2", "3", "4"]
elif phase == 'Regular':
weeks += ["All", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16", "17"]
elif phase == 'Postseason':
weeks += ["All", "1", "2", "3"]
else:
raise Http404("Page not found!")
query.game(season_type=phase)
if week != 'All':
query.game(week=week)
return (query, weeks)
def players_paginate(query, page):
paginator = Paginator(query, 20)
try:
stats = paginator.page(page)
except PageNotAnInteger:
# if page is not an integer, return the first page
stats = paginator.page(1)
except EmptyPage:
# if page is out of range, return the last page
stats = paginator.page(paginator.num_pages)
return stats
def team_create(request):
if request.method == 'POST':
form = CreateTeamForm(request.POST)
# check if the form is valid
if form.is_valid():
ut = UserTeam()
ut.team_name = form.cleaned_data['team_name']
ut.user = User.objects.get(id=request.POST['user_id'])
ut.save()
return HttpResponseRedirect('/')
else:
# If this is a GET (or any other) request, create the form
form = CreateTeamForm()
return render(request, 'football/team_create.html', {'form': form })
def team_detail(request, pk):
# Get the team object
team = get_object_or_404(UserTeam, pk=pk)
# Get the players
year = 2016
(qbs, rbs, wrs, tes, oth) = NflDbHelper.players(year, team.userplayer_set.all())
return render(request,'football/team_detail.html', {
'team': team,
'qbs': qbs,
'rbs': rbs,
'wrs': wrs,
'tes': tes,
'oth': oth,
})
def reload_team_detail(request, pk):
# Get the team object
team = get_object_or_404(UserTeam, pk=pk)
# Get the players
year = 2016
(qbs, rbs, wrs, tes, oth) = NflDbHelper.players(year, team.userplayer_set.all())
return render(request,'football/reload_team_detail.html', {
'team': team,
'qbs': qbs,
'rbs': rbs,
'wrs': wrs,
'tes': tes,
'oth': oth,
})
def add_player_to_team(request, player_id):
if request.method == 'POST':
# add players to selected teams
team_ids = request.POST.getlist('team_ids[]')
response_data = {}
if team_ids:
for team_id in team_ids:
team = UserTeam.objects.get(id=team_id)
player = Player.objects.get(player_id=player_id)
# Check if this player has been added
if UserPlayer.objects.filter(fantasy_team__exact=team, player__exact=player).count() != 0:
response_data[team.id] = '%s already added to %s' % (player, team)
else:
up = UserPlayer()
up.fantasy_team = UserTeam.objects.get(id=team_id)
up.player = Player.objects.get(player_id=player_id)
up.save()
response_data[team.id] = '%s added to %s' % (player, team)
else:
response_data['result'] = 'No teams selected!'
return HttpResponse(json.dumps(response_data), content_type="application/json")
else:
return HttpResponse(
json.dumps({"nothing to see": "this isn't happening"}),
content_type="application/json"
)
def del_player_from_team(request, player_id):
if request.method == 'POST':
# remove player from team
team_id = request.POST.get('team_id')
response_data = {}
if team_id:
team = UserTeam.objects.get(id=team_id)
player = Player.objects.get(player_id=player_id)
up = UserPlayer.objects.get(fantasy_team_id=team_id, player_id=player_id)
up.delete()
response_data[team.id] = '%s removed from %s' % (player, team)
else:
response_data['result'] = 'No team found'
return HttpResponse(
json.dumps(response_data),
content_type="application/json",
)
def players_quarterbacks(request, year="2016", phase="Regular", week="All"):
""" Listing of quarterbacks """
sort = request.GET.get('sort', 'ppr')
weeks = NflDbHelper.weeks(phase)
fantasy = NflDbHelper.query(year, phase, week, 'QB', sort)
# Build the pager
page = request.GET.get('page')
stats = players_paginate(fantasy, page)
return render(request, 'positions/passing.html',
{ 'position': 'Quarterbacks',
'urlId': 'pos-qb',
'page': 'positions',
'year': year,
'phase': phase,
'weeks': weeks,
'week': week,
'stats': stats,
})
def players_runningbacks(request, year="2016", phase="Regular", week="All"):
""" Listing of running backs """
sort = request.GET.get('sort', 'ppr')
weeks = NflDbHelper.weeks(phase)
fantasy = NflDbHelper.query(year, phase, week, 'RB', sort)
# Build the pager
page = request.GET.get('page')
stats = players_paginate(fantasy, page)
return render(request, 'positions/receiving.html',
{ 'position': 'Running Backs',
'urlId': 'pos-rb',
'page': 'positions',
'year': year,
'phase': phase,
'weeks': weeks,
'week': week,
'stats': stats })
def players_widereceivers(request, year="2016", phase="Regular", week="All"):
""" Listing of wide receivers """
sort = request.GET.get('sort', 'ppr')
weeks = NflDbHelper.weeks(phase)
fantasy = NflDbHelper.query(year, phase, week, 'WR', sort)
# Build the pager
page = request.GET.get('page')
stats = players_paginate(fantasy, page)
return render(request, 'positions/receiving.html',
{ 'position': 'Wide Receivers',
'urlId': 'pos-wr',
'page': 'positions',
'year': year,
'phase': phase,
'weeks': weeks,
'week': week,
'stats': stats })
def players_tightends(request, year="2016", phase="Regular", week="All"):
""" Listing of wide receivers """
sort = request.GET.get('sort', 'ppr')
weeks = NflDbHelper.weeks(phase)
fantasy = NflDbHelper.query(year, phase, week, 'TE', sort)
# Build the pager
page = request.GET.get('page')
stats = players_paginate(fantasy, page)
return render(request, 'positions/receiving.html',
{ 'position': 'Tight-Ends',
'urlId': 'pos-te',
'page': 'positions',
'year': year,
'phase': phase,
'weeks': weeks,
'week': week,
'stats': stats })
class NflListView(generic.ListView):
model = Team
template_name = 'football/nfl_team_list.html'
class NflDetailView(generic.DetailView):
model = Team
template_name = 'football/nfl_team_detail.html'
class PlayerListView(generic.ListView):
model = Player
| seniark/ppr_oracle | football/views.py | views.py | py | 8,192 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.shortcuts.render",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "nfldb.connect",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "nfldb.Q... |
73261011945 | from math import *
import numpy as np
import matplotlib.pyplot as plt
import scipy
class DataGenerator:
def __init__(self, Length=1000*1e3, Bandwith=10*1e9, power_loss_db=0.2, dispersion=17e-6, Gamma=1.27*1e-6,
nsp=1, h=6.626*1e-34, lambda0=1.55*1e-6, T=65, N=2**11, number_symbols=3, p=0.5,M=16):
self.Length = Length
self.Bandwith = Bandwith # GHz to Hz
self.power_loss_db = power_loss_db
self.dispersion = dispersion
self.Gamma = Gamma
self.nsp = nsp
self.h = h
self.lambda0 = lambda0
self.c = 3e8
self.f0 = self.c / self.lambda0
self.alpha = 0.46*1e-4
self.beta2 = - (self.lambda0**2 / (2 * pi * self.c)) * self.dispersion
self.L0 = self.Length
self.T0 = sqrt(abs(self.beta2)*self.L0 / 2)
self.P0 = 2 / (self.Gamma * self.L0)
self.Bandwith_n = self.Bandwith * self.T0
self.sigma02 = self.nsp * self.h * self.alpha * self.f0
self.sigma2 = (self.sigma02 * self.L0) / (self.P0 * self.T0)
self.M = M
self.T = T
self.N = N
self.number_symbols = number_symbols # n in project guide
self.p = p # proba of having 0 in bit
self.dt = self.T / self.N
self.t = np.arange(start=-self.T/2,stop=self.T/2,step=self.dt)
self.F = 1 / self.dt
self.df = 1 / self.T
self.f = np.arange(start=-self.F/2,stop=self.F/2,step=self.df)
self.fft_frequencies = np.fft.fftfreq(n=self.N,d=self.dt)
self.number_bits = self.number_symbols * int(log2(self.M)) # nb in guide
self.P_q0t = 1
# Constellation
if M == 16:
self.Constellation = np.zeros(shape=(16, 2))
points_1 = [-3, -1, 1, 3]
points_2 = [3, 1, -1, -3]
const1 = np.asarray([[i, 3] for i in points_1])
const2 = np.asarray([[i, 1] for i in points_2])
const3 = np.asarray([[i, -1] for i in points_1])
const4 = np.asarray([[i, -3] for i in points_2])
self.Constellation = np.concatenate((np.concatenate((const1, const2), axis=0),
np.concatenate((const3, const4), axis=0)), axis=0)
else:
self.generate_constellation(M=self.M)
self.average_constellation_power = np.mean(self.Constellation[:,0]**2 + self.Constellation[:,1]**2)
self.Constellation = np.divide(self.Constellation,self.average_constellation_power)
def setter_noise(self,sigma2):
self.sigma2 = sigma2
def setter_noise0(self,sigma02):
self.sigma02 = sigma02
self.sigma2 = (self.sigma02 * self.L0) / (self.P0 * self.T0)
def generate_constellation(self,M):
a = 1
if M == 2:
self.Constellation = np.array([[-a,0],[a,0]])
elif M == 4:
self.Constellation = np.array([[-a,-a],[a,a],[-a,a],[a,-a]])
elif M == 8:
self.Constellation = np.array([[-a,-a],[a,a],[-a,a],[a,-a],
[(a+sqrt(3))*a,0],[-(a+sqrt(3))*a,0],
[0,(a+sqrt(3))*a],[0,-(a+sqrt(3))*a]])
def source(self):
self.bernoulli = np.random.binomial(n=1, p=self.p, size=self.number_bits)
# return self.bernoulli
def bit_to_symb(self):
self.gray_code = []
for i in range(0, 1 << int(log2(self.M))):
gray = i ^ (i >> 1)
self.gray_code.append("{0:0{1}b}".format(gray, int(log2(self.M))))
self.gray_code = np.asarray(self.gray_code)
self.gray_to_symb = dict(zip(self.gray_code,self.Constellation))
self.s = []
b0=b1=b2=b3=""
for i in range(0, self.number_bits, int(log2(self.M))):
b0 = str(self.bernoulli[i])
if int(log2(self.M)) > 1:
b1 = str(self.bernoulli[i+1])
if int(log2(self.M)) > 2:
b2 = str(self.bernoulli[i+2])
if int(log2(self.M)) > 3:
b3 = str(self.bernoulli[i+3])
b_i = b0+b1+b2+b3
self.s.append(self.gray_to_symb[b_i])
self.s = np.asarray(self.s)
def testbs(self):
self.s = np.asarray([list(self.Constellation[0]),list(self.Constellation[1])])
print(self.s)
self.mod()
def mod(self):
Ns = len(self.s)
self.l1 = -floor(Ns / 2)
self.l2 = ceil(Ns/2) - 1
self.q0t_list = []
for i in range(self.l1,self.l2+1,1):
self.q0t_list.append(sqrt(self.P_q0t * self.Bandwith_n) * complex(self.s[i - self.l1,0],self.s[i - self.l1,1]) *\
np.sinc(self.Bandwith_n * self.t - i))
self.q0t_list = np.asarray(self.q0t_list)
self.q0t = np.sum(self.q0t_list,axis=0)
self.q0t_FFT = np.fft.fft(self.q0t)
def plot_q0t(self):
self.q0t_norm = [sqrt(self.q0t[i].real**2 + self.q0t[i].imag**2) for i in range(self.N)]
plt.plot(self.t,self.q0t_norm,color='black')
plt.ylabel(r'|q(0,t)|')
plt.xlabel(r'time')
plt.title('Norm of '+r'q(0,t)'+' vs time with Bandwith = '+str(self.Bandwith))
plt.savefig('./plots/norm_q0t.png')
plt.clf()
self.fft_frequencies = np.fft.fftfreq(n=self.N,d=self.dt)
plt.plot(self.fft_frequencies,np.abs(self.q0t_FFT),color='red')
plt.xlabel(r'frequency')
plt.ylabel(r'FFT(q(0,t))')
plt.title(r'q(0,t)'+'in frequency domain with Bandwith = '+str(self.Bandwith))
plt.savefig('./plots/fft_q0t.png')
def draw_Constellation(self):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.spines['left'].set_position('center')
ax.spines['bottom'].set_position('center')
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
plt.title(str(self.M)+'-QAM constellation', pad=20)
for i in range(self.M):
plt.scatter(self.Constellation[i, 0],
self.Constellation[i, 1], color='red')
if self.Constellation[i, 1] > 0:
plt.annotate(text=str(self.Constellation[i, 0])+' + j'+str(self.Constellation[i, 1]),
xy=(self.Constellation[i, 0],
self.Constellation[i, 1]),
xytext=(self.Constellation[i, 0], self.Constellation[i, 1]+0.2))
else:
plt.annotate(text=str(self.Constellation[i, 0])+' - j'+str(abs(self.Constellation[i, 1])),
xy=(self.Constellation[i, 0], self.Constellation[i, 1]), ha='center', va='center',
xytext=(self.Constellation[i, 0], self.Constellation[i, 1]+0.2))
plt.savefig("plots/Constellation.png")
| hadifawaz1999/dnn-4-of | data.py | data.py | py | 7,383 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "numpy.arange",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "numpy.fft.fftfreq",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "numpy.fft",
"line_num... |
16172958687 | """ This script is an example of benchmarking the continuous mlp baseline."""
import datetime
import os
import os.path as osp
import random
from baselines.bench import benchmarks
import dowel
from dowel import logger as dowel_logger
import gym
import pytest
import tensorflow as tf
from metarl.envs import normalize
from metarl.experiment import deterministic
from metarl.tf.algos import PPO
from metarl.tf.baselines import ContinuousMLPBaseline
from metarl.tf.envs import TfEnv
from metarl.tf.experiment import LocalTFRunner
from metarl.tf.policies import GaussianLSTMPolicy
from tests.fixtures import snapshot_config
policy_params = {
'policy_lr': 1e-3,
'policy_hidden_sizes': 32,
'hidden_nonlinearity': tf.nn.tanh
}
baseline_params = {'regressor_args': dict(hidden_sizes=(64, 64))}
algo_params = {
'n_envs':
8,
'n_epochs':
20,
'n_rollout_steps':
2048,
'discount':
0.99,
'max_path_length':
100,
'gae_lambda':
0.95,
'lr_clip_range':
0.2,
'policy_ent_coeff':
0.02,
'entropy_method':
'max',
'optimizer_args':
dict(
batch_size=32,
max_epochs=10,
tf_optimizer_args=dict(learning_rate=policy_params['policy_lr']),
),
'center_adv':
False
}
# number of processing elements to use for tensorflow
num_proc = 4 * 2
# number of trials to run per environment
num_trials = 3
@pytest.mark.huge
def test_benchmark_ppo_continuous_mlp_baseline():
""" Compare benchmarks between CMB and potentially other baselines."""
mujoco1m = benchmarks.get_benchmark('Mujoco1M')
timestamp = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S-%f')
benchmark_dir = osp.join(os.getcwd(), 'data', 'local', 'benchmarks',
'ppo_cmb', timestamp)
for task in mujoco1m['tasks']:
env_id = task['env_id']
env = gym.make(env_id)
seeds = random.sample(range(100), num_trials)
task_dir = osp.join(benchmark_dir, env_id)
cmb_csvs = []
for trial in range(num_trials):
seed = seeds[trial]
trial_dir = task_dir + '/trial_%d_seed_%d' % (trial + 1, seed)
cmb_dir = trial_dir + '/continuous_mlp_baseline'
with tf.Graph().as_default():
env.reset()
cmb_csv = ppo_cmb(env, seed, cmb_dir)
cmb_csvs.append(cmb_csv)
env.close()
def ppo_cmb(env, seed, log_dir):
"""Create test continuous mlp baseline on ppo.
Args:
env (gym_env): Environment of the task.
seed (int): Random seed for the trial.
log_dir (str): Log dir path.
Returns:
str: training results in csv format.
"""
deterministic.set_seed(seed)
config = tf.ConfigProto(allow_soft_placement=True,
intra_op_parallelism_threads=num_proc,
inter_op_parallelism_threads=num_proc)
sess = tf.Session(config=config)
with LocalTFRunner(snapshot_config, sess=sess,
max_cpus=num_proc) as runner:
env = TfEnv(normalize(env))
policy = GaussianLSTMPolicy(
env_spec=env.spec,
hidden_dim=policy_params['policy_hidden_sizes'],
hidden_nonlinearity=policy_params['hidden_nonlinearity'],
)
baseline = ContinuousMLPBaseline(
env_spec=env.spec,
regressor_args=baseline_params['regressor_args'],
)
algo = PPO(env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=algo_params['max_path_length'],
discount=algo_params['discount'],
gae_lambda=algo_params['gae_lambda'],
lr_clip_range=algo_params['lr_clip_range'],
entropy_method=algo_params['entropy_method'],
policy_ent_coeff=algo_params['policy_ent_coeff'],
optimizer_args=algo_params['optimizer_args'],
center_adv=algo_params['center_adv'],
stop_entropy_gradient=True)
# Set up logger since we are not using run_experiment
tabular_log_file = osp.join(log_dir, 'progress.csv')
dowel_logger.add_output(dowel.StdOutput())
dowel_logger.add_output(dowel.CsvOutput(tabular_log_file))
dowel_logger.add_output(dowel.TensorBoardOutput(log_dir))
runner.setup(algo,
env,
sampler_args=dict(n_envs=algo_params['n_envs']))
runner.train(n_epochs=algo_params['n_epochs'],
batch_size=algo_params['n_rollout_steps'])
dowel_logger.remove_all()
return tabular_log_file
| icml2020submission6857/metarl | tests/benchmarks/metarl/tf/baselines/test_benchmark_continuous_mlp_baseline.py | test_benchmark_continuous_mlp_baseline.py | py | 4,736 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "tensorflow.nn",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "baselines.bench.benchmarks.get_benchmark",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "baselines.bench.benchmarks",
"line_number": 69,
"usage_type": "name"
},
{... |
30013425796 | #!/usr/bin/env python3
import os
import os.path as op
import json
import logging
from gear_toolkit import gear_toolkit_context
from utils import args
log = logging.getLogger(__name__)
def main(context):
# Build and Execute Parameters
try:
# build the command string
params = args.build(context)
# Execute on those parameters.
args.execute(context, params)
except Exception as e:
context.log.exception(e)
context.log.fatal('Error executing pydeface-gear.')
return 1
context.log.info("pydeface-gear completed Successfully!")
return 0
if __name__ == '__main__':
with gear_toolkit_context.GearToolkitContext() as gear_context:
gear_context.init_logging()
gear_context.log_config()
exit_status = main(gear_context)
log.info('exit_status is %s', exit_status)
os.sys.exit(exit_status)
| flywheel-apps/pydeface-gear | run.py | run.py | py | 901 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "utils.args.build",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "utils.args",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "utils.args.execute",
... |
28808155062 | import interface as bbox
import theano
import numpy as np
import theano.tensor as T
import lasagne
import time
def prepare_agent(in_state=None):
net = lasagne.layers.InputLayer(shape=(1,n_features),input_var=in_state)
net = lasagne.layers.DenseLayer(net,num_units=300,nonlinearity=lasagne.nonlinearities.tanh)
net = lasagne.layers.DenseLayer(net,num_units=300,nonlinearity=lasagne.nonlinearities.tanh)
net = lasagne.layers.DenseLayer(net,num_units=n_actions,nonlinearity=lasagne.nonlinearities.softmax)
with np.load('model.npz') as f:
param_values = [f['arr_%d' % i] for i in range(len(f.files))]
lasagne.layers.set_all_param_values(net, param_values)
return net
# def get_all_score_diffs(state=None,verbose=0):
# initial = bbox.get_score()
# checkpoint_id = bbox.create_checkpoint()
# all_scores = np.zeros(shape=n_actions)
# for a in range(n_actions):
# for _ in range(100):
# bbox.do_action(a)
# all_scores[a]=bbox.get_score()-initial
# bbox.load_from_checkpoint(checkpoint_id)
# return all_scores
# def get_action_by_state(state=None):
# scores = get_all_score_diffs(state)
# action_to_do = np.argmax(scores)
# # print (scores,action_to_do)
# # raw_input("Press Enter to continue...")
# return action_to_do
n_features = n_actions = max_time = -1
def prepare_bbox():
global n_features, n_actions, max_time
if bbox.is_level_loaded():
bbox.reset_level()
else:
bbox.load_level("../levels/train_level.data", verbose=1)
n_features = bbox.get_num_of_features()
n_actions = bbox.get_num_of_actions()
max_time = bbox.get_max_time()
def run_bbox(verbose=False):
has_next = 1
prepare_bbox()
#vector of the current state features
input_var= T.dvector('in_state')
input_var= T.reshape(input_var,(1,n_features))
#Load net into the agent object
agent=prepare_agent(input_var)
attempt = lasagne.layers.get_output(agent)
#function to do all of the stuff above
eval_fn = theano.function([input_var], attempt,on_unused_input='ignore')
#time to check how long it takes to run
start = time.time()
error=0
steps=0
while has_next:
state = bbox.get_state()
r_state= np.reshape(state,(1,n_features))
attempt = eval_fn(r_state)
action = np.argmax(attempt)
steps+=1
if steps%10000==0:
score = bbox.get_score()
print ("Steps: {}".format(steps))
print (" training loss: {}".format(error/steps))
print (" current score: {}".format(score))
has_next = bbox.do_action(action)
print ("Time to run: {} seconds".format(time.time()-start))
print ("{} steps total".format(steps))
np.savez('model.npz', *lasagne.layers.get_all_param_values(agent))
bbox.finish(verbose=1)
if __name__ == "__main__":
run_bbox(verbose=0)
| EladMichael/BlackBoxChallenge | testnet.py | testnet.py | py | 2,956 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "lasagne.layers.InputLayer",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "lasagne.layers",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "lasagne.layers.DenseLayer",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": ... |
27649475161 | from PySide2.QtGui import QColor, QLinearGradient, QPen, QBrush, QFont
HOVERED_SCALE = 1.2
EPSILON = 0.0001
SHAPE_STEPS = 100
ADD_MARKER_RADIUS = 7.0
ADD_MARKER_COLOR = (166, 210, 121)
SNAPPING_DISTANCE = 8.0
SHAPE_GRADIENT = QLinearGradient()
SHAPE_GRADIENT.setColorAt(0, QColor(165, 165, 165, 15))
SHAPE_GRADIENT.setColorAt(1, QColor(241, 241, 241, 100))
SHAPE_GRADIENT.setSpread(QLinearGradient.Spread.ReflectSpread)
SHAPE_PEN = QPen(QBrush(QColor(239, 239, 239)), 3)
SHAPE_STEP = 1.0 / SHAPE_STEPS
GRID_FONT = QFont("Source Code Pro", 8.0) | igor-elovikov/hou-ramped | scripts/python/ramped/settings.py | settings.py | py | 549 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "PySide2.QtGui.QLinearGradient",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "PySide2.QtGui.QColor",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "PySide2.QtGui.QColor",
"line_number": 13,
"usage_type": "call"
},
{
"api_name"... |
10713087368 | import matplotlib.pyplot as plt
import numpy as np
from learn_seq.controller.base import TaskController
from learn_seq.utils.general import saturate_vector
from learn_seq.utils.mujoco import quat2vec, quat_error
class HybridController(TaskController):
"""Simulated parallel hybrid force/position controller, where the force
control part is a feedforward controller, and the position control part
is a PD controller.
:param type kp_init: Initial stiffness of the position control part.
:param type dtau_max: Upper limit of torque change between 2 consecutive
timesteps.
"""
def __init__(self, robot_state, kp_init=None, dtau_max=2.):
super().__init__(robot_state)
self.prev_tau_cmd = np.zeros(7)
self.dtau_max = dtau_max
if kp_init is None:
kp = np.array([1000.] * 3 + [60] * 3)
else:
kp = kp_init
kd = 2 * np.sqrt(kp)
self.set_gain(kp, kd)
# store the command pose for external computation
self.reset_pose_cmd()
self.u_pos = np.zeros(3)
self.S = np.eye(6)
p, q = self.robot_state.get_pose()
# controller_state
self.controller_state = {
"t": self.robot_state.get_sim_time(),
"err": np.zeros(6),
"p": p,
"pd": p,
"q": q,
"qd": q,
"f": np.zeros(6),
"fd": np.zeros(6),
}
def forward_ctrl(self, pd, qd, vd, fd, S):
"""See TaskController. All inputs are in base frame
:param type pd: desired position.
:param type qd: desired orientation.
:param type vd: desired velocity.
:param type fd: desired force.
:param type S: desired selection matrix.
"""
# get current position and orientation
p, q = self.robot_state.get_pose()
# compute position and orientation error
ep = np.zeros(6)
ep[:3] = pd - p
ep[3:] = quat_error(q, qd)
# jacobian
jac = self.robot_state.get_jacobian()
# joint velocity
dq = self.robot_state.get_joint_velocity()
# compute velocity error
ep_dot = vd - jac.dot(dq[:7])
# position control law
f_pos = self.kp * ep + self.kd * ep_dot
# force control law
f_force = fd
# gravity and coriolis torque
tau_comp = self.robot_state.get_bias_torque()
# null space torque
tau_null = np.zeros(7)
# general control law
iS = np.identity(6) - S
f_cmd = S.dot(f_pos) + iS.dot(f_force)
tau_cmd = jac.T.dot(f_cmd) + tau_null
# saturate torque rate
tau_sat = saturate_vector(self.prev_tau_cmd, tau_cmd, self.dtau_max)
self.prev_tau_cmd = tau_sat.copy()
# update pose cmd
self.p_cmd = iS[:3, :3].dot(p) + S[:3, :3].dot(pd)
self.q_cmd = qd
self.S = S
# update controller state
self.controller_state["t"] = self.robot_state.get_sim_time()
self.controller_state["err"] = ep
self.controller_state["p"] = p
self.controller_state["q"] = q
self.controller_state["pd"] = pd
self.controller_state["qd"] = qd
self.controller_state["fd"] = fd
self.controller_state["f"] = self.robot_state.get_ee_force()
return tau_sat + tau_comp
def reset_pose_cmd(self):
p, q = self.robot_state.get_pose()
self.p_cmd = p
self.q_cmd = q
def reset_tau_cmd(self):
self.prev_tau_cmd = np.zeros(7)
def set_gain(self, kp, kd):
self.kp = kp
self.kd = kd
def get_controller_state(self):
return self.controller_state
def get_pose_cmd(self):
return self.p_cmd.copy(), self.q_cmd.copy()
def get_pose_control_cmd(self):
return self.S[:3, :3].dot(self.p_cmd), self.q_cmd.copy()
class StateRecordHybridController(HybridController):
"""Record state, useful to visualize response, trajectory."""
def __init__(self, robot_state, kp_init=None, dtau_max=2.):
super().__init__(robot_state, kp_init, dtau_max)
self.record = False
self._reset_state()
def _reset_state(self):
self.state_dict = {}
for key in self.controller_state.keys():
self.state_dict[key] = []
def start_record(self):
self._reset_state()
self.record = True
def stop_record(self, save_path=None):
self.record = False
# process quaterion to rotation vector
for i in range(len(self.state_dict["q"])):
self.state_dict["q"][i] = quat2vec(self.state_dict["q"][i])
self.state_dict["qd"][i] = quat2vec(self.state_dict["qd"][i])
if save_path is not None:
np.save(save_path, self.state_dict)
def forward_ctrl(self, *argv, **kwargs):
res = super().forward_ctrl(*argv, **kwargs)
if self.record:
for key in self.state_dict.keys():
self.state_dict[key].append(self.controller_state[key])
return res
def plot_key(self, key):
"""Plot data defined by a key list in a same ax.
:param list key: list of strings.
:return: the figure and the axes.
:rtype: list
"""
data = np.array(self.state_dict[key[0]])
N, dim = data.shape
t_record = N * self.dt
if dim == 6:
fig, ax = plt.subplots(3, 2, sharex=True)
elif dim == 3:
fig, ax = plt.subplots(3, 1, sharex=True)
for k in key:
data = np.array(self.state_dict[k])
for i in range(3):
if dim == 6:
ax[i, 0].plot(np.linspace(0, t_record, N), data[:, i])
ax[i, 1].plot(np.linspace(0, t_record, N), data[:, i + 3])
elif dim == 3:
ax[i].plot(np.linspace(0, t_record, N), data[:, i])
for i in range(3):
if dim == 6:
ax[i, 0].legend(key)
ax[i, 0].set_xlabel("Simulation time (s)")
ax[i, 1].legend(key)
ax[i, 1].set_xlabel("Simulation time (s)")
elif dim == 3:
ax[i].legend(key)
ax[i].set_xlabel("Simulation time (s)")
return fig, ax
def plot_error(self):
return self.plot_key(["err", ])
def plot_pos(self):
return self.plot_key(["p", "pd"])
def plot_orient(self):
return self.plot_key(["q", "qd"])
| deanpham98/learn-seq | learn_seq/controller/hybrid.py | hybrid.py | py | 6,567 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "learn_seq.controller.base.TaskController",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "numpy.zeros",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy... |
5673773090 | from abc import ABC, abstractmethod
from typing import List, Optional
from rlf.forecasting.data_fetching_utilities.coordinate import Coordinate
from rlf.forecasting.data_fetching_utilities.weather_provider.api.models import Response
class BaseAPIAdapter(ABC):
"""Abstract base class for APIAdapter objects"""
@abstractmethod
def get_historical(self,
coordinate: Coordinate,
start_date: str,
end_date: str,
columns: Optional[List[str]] = None) -> Response:
"""Make a GET request to the API for historical/archived data.
Args:
coordinate (Coordinate): The location to fetch data for.
start_date (str): The starting date for the requested data. In the format "YYYY-MM-DD".
end_date (str): The ending date for the requested data. In the format "YYYY-MM-DD".
columns (list[str], optional): The subset of columns to fetch. If set to None, all columns will be fetched. Defaults to None.
Returns:
Response: The response payload for the request
"""
raise NotImplementedError
@abstractmethod
def get_current(self,
coordinate: Coordinate,
past_days: int = 92,
forecast_days: int = 16,
columns: Optional[List[str]] = None) -> Response:
"""Make a GET request to the API for current/forecasted data.
Args:
coordinate (Coordinate): The location to fetch data for.
past_days (int, optional): How many days into the past to fetch data for. Defaults to 92 (OpenMeteo max value).
forecast_days (int, optional): How many days into the future to fetch data for. Defaults to 16 (OpenMeteo max value).
columns (list[str], optional): The subset of columns to fetch. If set to None, all columns will be fetched. Defaults to None.
Returns:
Response: The response payload for the request
"""
raise NotImplementedError
@abstractmethod
def get_index_parameter(self) -> str:
"""Get the index parameter which is a field in the hourly section of the response that can be used as an index in a DataFrame (must be in ISO date format).
Returns:
str: Index parameter field to use.
"""
raise NotImplementedError
| orion-junkins/river-level-forecasting | src/rlf/forecasting/data_fetching_utilities/weather_provider/api/base_api_adapter.py | base_api_adapter.py | py | 2,430 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "abc.ABC",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "rlf.forecasting.data_fetching_utilities.coordinate.Coordinate",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 16,
"usage_type": "name"
},
{
... |
40751308459 | from pyrogram import Client, filters, enums
from pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup
from config import ADMIN
@Client.on_message(filters.command("start") & filters.private)
async def start_cmd(bot, msg):
txt="ᴛʜɪs ɪs ᴘᴇʀsᴏɴᴀʟ ᴜsᴇ ʙᴏᴛ 🙏. ᴅᴏ ʏᴏᴜ ᴡᴀɴᴛ ʏᴏᴜʀ ᴏᴡɴ ʙᴏᴛ? 👇 ᴄʟɪᴄᴋ ᴛʜᴇ sᴏᴜʀᴄᴇ ᴄᴏᴅᴇ ᴛᴏ ᴅᴇᴘʟᴏʏ"
btn = InlineKeyboardMarkup([[
InlineKeyboardButton("🤖 SOURCE CODE", url="https://github.com/dor3Monbotz/CrownSimpleRenamerBot")
],[
InlineKeyboardButton("🖥️ How To Deploy (ask to owner)", url="https://t.me/little_little_hackur")
]])
if msg.from_user.id != ADMIN:
await msg.reply_text(text=txt, reply_markup=btn, disable_web_page_preview = True)
return
await start(bot, msg, cb=False)
@Client.on_callback_query(filters.regex("start"))
async def start(bot, msg, cb=True):
txt=f"🙏ɴᴀᴍᴀsᴛᴇ🙏 {msg.from_user.mention} ɪ ᴀᴍ sɪᴍᴘʟᴇ ʀᴇɴᴀᴍᴇ ʙᴏᴛ ᴡɪᴛʜ ᴘᴇʀsᴏɴᴀʟ ᴜsᴇs.\nᴛʜɪs ʙᴏᴛ ɪs ᴍᴀᴅᴇ ʙʏ <b><a href=https://github.com/dor3Monbotz</a></b>"
button= [[
InlineKeyboardButton("🤖 Bot Updates", url="https://t.me/projectcrown")
],[
InlineKeyboardButton("ℹ️ Help", callback_data="help"),
InlineKeyboardButton("📡 About", callback_data="about")
]]
if cb:
await msg.message.edit(text=txt, reply_markup=InlineKeyboardMarkup(button), disable_web_page_preview = True, parse_mode=enums.ParseMode.HTML)
else:
await msg.reply_text(text=txt, reply_markup=InlineKeyboardMarkup(button), disable_web_page_preview = True, parse_mode=enums.ParseMode.HTML)
@Client.on_callback_query(filters.regex("help"))
async def help(bot, msg):
txt=f"ᴊᴜsᴛ sᴇɴᴅ ᴀ ғɪʟᴇ ᴀɴᴅ /rename <new name> ᴡɪᴛʜ ʀᴇᴘʟᴀʏᴇᴅ ʏᴏᴜʀ ғɪʟᴇ\n\nʀᴇᴘʟʏ ᴀ ᴘʜᴏᴛᴏ ᴀɴᴅ sᴇɴᴅ /set ᴛᴏ sᴇᴛ ᴛᴇᴍᴘᴏʀᴀʀʏ ᴛʜᴜᴍʙɴᴀɪʟ\n/view ᴛᴏ sᴇᴇ ʏᴏᴜʀ ᴛʜᴜᴍʙɴᴀɪʟ"
button= [[
InlineKeyboardButton("🚫 Close", callback_data="del"),
InlineKeyboardButton("⬅️ Back", callback_data="start")
]]
await msg.message.edit(text=txt, reply_markup=InlineKeyboardMarkup(button), disable_web_page_preview = True)
@Client.on_callback_query(filters.regex("about"))
async def about(bot, msg):
me=await bot.get_me()
Master=f"<a href=https://t.me/little_little_hackur>ᴋᴜɴᴀʟ ɢᴀɪᴋᴡᴀᴅ</a> & <a href=https://t.me/projectcrown>ᴘʀᴏᴊᴇᴄᴛ ᴄʀᴏᴡɴ</a>"
Source="<a href=https://github.com/dor3Monbotz/CrownSimpleRenamerBot>Click Here</a>"
txt=f"<b>ʙᴏᴛ ɴᴀᴍᴇ: {me.mention}\n ᴅᴇᴠᴇʟᴏᴘᴇʀ: <a href=https://t.me/little_little_hackur</a>\nʙᴏᴛ ᴜᴘᴅᴀᴛᴇs: <a href=https://t.me/projectcrown>ᴄʀᴏᴡɴ ʙᴏᴛᴢ</a>\nMy ᴍᴀsᴛᴇʀ's: {Master}\nsᴏᴜʀᴄᴇ ᴄᴏᴅᴇ: {Source}</b>"
button= [[
InlineKeyboardButton("🚫 Close", callback_data="del"),
InlineKeyboardButton("⬅️ Back", callback_data="start")
]]
await msg.message.edit(text=txt, reply_markup=InlineKeyboardMarkup(button), disable_web_page_preview = True, parse_mode=enums.ParseMode.HTML)
@Client.on_callback_query(filters.regex("del"))
async def closed(bot, msg):
try:
await msg.message.delete()
except:
return
| dor3Monbotz/CrownSimpleRenamerBot | main/start_text.py | start_text.py | py | 3,663 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pyrogram.types.InlineKeyboardMarkup",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pyrogram.types.InlineKeyboardButton",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pyrogram.types.InlineKeyboardButton",
"line_number": 12,
"usage_ty... |
28630612376 | """volume_desc_length
Revision ID: 3719cf217eb9
Create Date: 2021-08-09 01:48:21.855229
"""
from alembic import op # noqa
import sqlalchemy as sa # noqa
import datetime # noqa
# revision identifiers, used by Alembic.
revision = '3719cf217eb9'
down_revision = '538bac81c28a'
branch_labels = None
depends_on = None
def upgrade():
op.alter_column('volume', column_name='description', type_=sa.String(250), nullable=True) # noqa
def downgrade():
op.alter_column('volume', column_name='description', type_=sa.String(250), nullable=True) # noqa
| hashipod/icebox | core/icebox/dba/versions/0030_volume_desc_length.py | 0030_volume_desc_length.py | py | 560 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "alembic.op.alter_column",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.String",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "alembic.op.alter... |
37753053291 | # %%
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Flatten
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Activation, BatchNormalization
from tensorflow.keras.layers import GlobalAveragePooling2D
from tensorflow.keras.utils import to_categorical
from tensorflow.keras import layers
import tensorflow as tf
from sklearn.model_selection import train_test_split
from imblearn.over_sampling import SMOTE
from os import scandir
import pyedflib as pyedf
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from PyAstronomy import pyaC
import warnings
import scipy as sp
import scipy.fftpack
warnings.filterwarnings('ignore')
# %%
ruta_archivos = "../../data/sleep-edf-database-expanded-1.0.0/sleep-cassette"
lista_archivos = [arch.name for arch in scandir(ruta_archivos) if arch.is_file()]
# %%
num_canales = 7
excedente_despierto = 0
excedente_ligero = 0
excedente_rem = 0
datos_1 = []
datos_2 = []
etiquetas = []
descriptores_1 = [[] for _ in range(27)]
descriptores_2 = [[] for _ in range(24)]
#%%
lista_nueva = lista_archivos[145:len(lista_archivos)]
#%%
for ruta_actual in lista_nueva:
print(ruta_actual)
if ruta_actual[9:len(ruta_actual)] == "Hypnogram.edf":
identificador = ruta_actual[2:7]
st_FileHypEdf = pyedf.EdfReader(ruta_archivos + "/" + ruta_actual)
v_HypTime, v_HypDur, v_Hyp = st_FileHypEdf.readAnnotations()# Lectura de las señales s_SigNum señales con nombres v_Signal_Labels
st_FileEdf = pyedf.EdfReader(ruta_archivos + "/SC" + identificador + "0-PSG.edf")
s_SigNum = st_FileEdf.signals_in_file
v_Signal_Labels = st_FileEdf.getSignalLabels()
s_FirstInd = 0
datos_actual = [[] for _ in range(num_canales)]
etiquetas_actual = []
for s_SigRef in range(num_canales):
s_NSamples = st_FileEdf.getNSamples()[0]
s_FsHz = st_FileEdf.getSampleFrequency(s_SigRef)
v_Sig = st_FileEdf.readSignal(s_SigRef)
v_Time = np.arange(0, s_NSamples) / s_FsHz
s_WinSizeSec = 30
s_WinSizeSam = np.round(s_FsHz * s_WinSizeSec)
s_FirstInd = 0
while 1:
s_LastInd = s_FirstInd + s_WinSizeSam
if s_LastInd > s_NSamples:
break
for i in range(len(v_Hyp)-1):
if ((v_HypTime[i] <= s_FirstInd/s_FsHz) and (s_FirstInd/s_FsHz < v_HypTime[i+1])):
if v_Hyp[i] == "Sleep stage W" or v_Hyp[i] == "Sleep stage 1" or v_Hyp[i] == "Sleep stage 2" or v_Hyp[i] == "Sleep stage 3" or v_Hyp[i] == "Sleep stage 4" or v_Hyp[i] == "Sleep stage R":
if s_SigRef == 0:
etiquetas_actual.append(v_Hyp[i])
datos_actual[s_SigRef].append(np.array(v_Sig[s_FirstInd:s_LastInd]))
break
s_FirstInd = s_LastInd
despierto = 0
ligero = 0
profundo = 0
rem = 0
for etiqueta in etiquetas_actual:
if etiqueta == "Sleep stage W":
despierto = despierto + 1
elif etiqueta == "Sleep stage 1" or etiqueta == "Sleep stage 2":
ligero = ligero + 1
elif etiqueta == "Sleep stage 3" or etiqueta == "Sleep stage 4":
profundo = profundo + 1
elif etiqueta == "Sleep stage R":
rem = rem + 1
if despierto < profundo:
excedente_despierto = excedente_despierto + (profundo - despierto)
if ligero < profundo:
excedente_ligero = excedente_ligero + (profundo - ligero)
if rem < profundo:
excedente_rem = excedente_rem + (profundo - rem)
datos_actual_1 = np.array(datos_actual[0:3])
datos_actual_2 = np.array(datos_actual[3:7])
datos_actual_canales_1 = []
datos_actual_canales_2 = []
for i in range(datos_actual_1.shape[1]):
datos_actual_canales_1.append(datos_actual_1[:,i,:].transpose())
datos_actual_canales_2.append(datos_actual_2[:,i,:].transpose())
X_datos_1, X_datos_2, y_datos_11, y_datos_21 = train_test_split(datos_actual_canales_1, etiquetas_actual, test_size=0.5, random_state=4)
datos_shuffle_1 = np.concatenate((X_datos_1, X_datos_2))
X_datos_1, X_datos_2, y_datos_12, y_datos_22 = train_test_split(datos_actual_canales_2, etiquetas_actual, test_size=0.5, random_state=4)
datos_shuffle_2 = np.concatenate((X_datos_1, X_datos_2))
etiquetas_shuffle = np.concatenate((y_datos_11, y_datos_22))
despierto = 0
ligero = 0
rem = 0
for i in range(etiquetas_shuffle.shape[0]):
agregar_dato = False
if etiquetas_shuffle[i] == "Sleep stage W":
if despierto < (profundo+excedente_despierto):
etiquetas.append(0)
agregar_dato = True
if despierto == profundo:
excedente_despierto = excedente_despierto - 1
else:
despierto = despierto + 1
elif etiquetas_shuffle[i] == "Sleep stage 1" or etiquetas_shuffle[i] == "Sleep stage 2":
if ligero < (profundo+excedente_ligero):
etiquetas.append(1)
agregar_dato = True
if ligero == profundo:
excedente_ligero = excedente_ligero - 1
else:
ligero = ligero + 1
elif etiquetas_shuffle[i] == "Sleep stage 3" or etiquetas_shuffle[i] == "Sleep stage 4":
etiquetas.append(2)
agregar_dato = True
elif etiquetas_shuffle[i] == "Sleep stage R":
if rem < (profundo+excedente_rem):
etiquetas.append(3)
agregar_dato = True
if rem == profundo:
excedente_rem = excedente_rem - 1
else:
rem = rem + 1
if agregar_dato:
datos_1.append(datos_shuffle_1[i,:,:])
datos_2.append(datos_shuffle_2[i,:,:])
for s_SigRef in range(num_canales):
if s_SigRef < 3:
arreglo_x = np.array(list(range(3000)))
dato_df = pd.DataFrame(datos_shuffle_1[i,:,s_SigRef])
descriptores_1[9*s_SigRef].append(dato_df.mean()[0])
descriptores_1[9*s_SigRef + 1].append(dato_df.var()[0])
descriptores_1[9*s_SigRef + 2].append(dato_df.kurt()[0])
descriptores_1[9*s_SigRef + 3].append(dato_df.skew()[0])
descriptores_1[9*s_SigRef + 4].append(len(pyaC.zerocross1d(x=arreglo_x, y=dato_df.values[:,0])))
# Frecuencia
dato_fft = sp.fftpack.fft(dato_df)
fftfreq = sp.fftpack.fftfreq(len(dato_fft), 1/100)
potencia_02 = 0
potencia_28 = 0
potencia_814 = 0
potencia_14inf = 0
for j in range(len(fftfreq)):
if abs(fftfreq[j]) <= 2:
potencia_02 = potencia_02 + abs(dato_fft[j][0])**2
elif abs(fftfreq[j]) < 8:
potencia_28 = potencia_28 + abs(dato_fft[j][0])**2
elif abs(fftfreq[j]) < 14:
potencia_814 = potencia_814 + abs(dato_fft[j][0])**2
else:
potencia_14inf = potencia_14inf + abs(dato_fft[j][0])**2
descriptores_1[9*s_SigRef + 5].append(potencia_02/2)
descriptores_1[9*s_SigRef + 6].append(potencia_28/6)
descriptores_1[9*s_SigRef + 7].append(potencia_814/6)
descriptores_1[9*s_SigRef + 8].append(potencia_14inf/(max(fftfreq)-14))
else:
arreglo_x = np.array(list(range(30)))
dato_df = pd.DataFrame(datos_shuffle_2[i,:,(s_SigRef-3)])
descriptores_2[6*(s_SigRef-3)].append(dato_df.mean()[0])
descriptores_2[6*(s_SigRef-3) + 1].append(dato_df.var()[0])
descriptores_2[6*(s_SigRef-3) + 2].append(dato_df.kurt()[0])
descriptores_2[6*(s_SigRef-3) + 3].append(dato_df.skew()[0])
descriptores_2[6*(s_SigRef-3) + 4].append(len(pyaC.zerocross1d(x=arreglo_x, y=dato_df.values[:,0])))
# Frecuencia
dato_fft = sp.fftpack.fft(dato_df)
fftfreq = sp.fftpack.fftfreq(len(dato_fft), 1)
potencia = 0
for j in range(len(fftfreq)):
potencia = potencia + abs(dato_fft[j][0])**2
descriptores_2[6*(s_SigRef-3) + 5].append(potencia/max(fftfreq))
#%%
despierto = 0
ligero = 0
profundo = 0
rem = 0
no_se_sabe = 0
for etiqueta in etiquetas:
if etiqueta == 0:
# Depierto = 0
despierto = despierto + 1
elif etiqueta == 1:
# Ligero = 1
ligero = ligero + 1
elif etiqueta == 2:
# Profundo = 2
profundo = profundo + 1
elif etiqueta == 3:
# REM = 3
rem = rem + 1
else:
no_se_sabe = no_se_sabe + 1
#%%
datos_1_np = np.array(datos_1)
datos_2_np = np.array(datos_2)
descriptores_1_np = np.array(descriptores_1).transpose()
descriptores_2_np = np.array(descriptores_2).transpose()
etiquetas_np = np.array(etiquetas)
#%%
np.save('./datos_1.npy', datos_1_np)
np.save('./datos_2.npy', datos_2_np)
np.save('./descriptores_1.npy', descriptores_1_np)
np.save('./descriptores_2.npy', descriptores_2_np)
np.save('./etiquetas.npy', etiquetas_np)
| sofgutierrez6/Parcial-2-Machine | Proyecto2_Gutiérrez_Guatibonza.py | Proyecto2_Gutiérrez_Guatibonza.py | py | 10,228 | python | es | code | 0 | github-code | 36 | [
{
"api_name": "warnings.filterwarnings",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.scandir",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "pyedflib.EdfReader",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "pyedflib.EdfRea... |
34564591070 | import logging
import requests
from requests_cache import CachedSession
# Cached requests
rc = CachedSession(
"intrusion_monitor_http_cache", backend="sqlite", use_temp=True, expire_after=604800
)
def url_builder(ip, fields_id=66846719, base_url=False):
"""
The parameter `fields_id` encodes the following json generated fields:
- status
- message
- continent
- continentCode
- country
- countryCode
- region
- regionName
- city
- district
- zip
- lat
- lon
- timezone
- offset
- currency
- isp
- org
- as
- asname
- reverse
- mobile
- proxy
- hosting
- query"""
if base_url:
return f"http://ip-api.com/json"
else:
return f"http://ip-api.com/json/{ip}?fields={fields_id}"
def api_request(ip):
"""Returns the request data provided from http://ip-api.com."""
req_str = url_builder(ip)
try:
logging.debug(f"Trying API connection on {req_str}")
req = rc.get(req_str)
logging.debug(
f"\t> Got HTTP code {req.status_code}. Request from cache: {req.from_cache}"
)
except:
err = "Something occurred while getting API connection..."
logging.error(err, exc_info=True)
raise ConnectionError(err)
return req
def process_request(req):
"""Processes the data from a request object."""
if req.status_code == 200:
try:
data = req.json()
logging.debug(f"\t> Data parsed to json")
except:
err = f"\t> An error occurred parsing API data to json. Data starts with: {req.text[0:20]}"
logging.error(err)
raise ValueError(err[3:])
else:
err = f"\t> Request status code {req.status_code} != 200. Reason: {req.reason}"
logging.error(err)
raise requests.HTTPError(err[3:])
return data
def api_request_and_process(ip):
"""Returns the json data provided from http://ip-api.com."""
req = api_request(ip)
data = process_request(req)
return data
| afonsoc12/intrusion-monitor | intrusion_monitor/api.py | api.py | py | 2,088 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "requests_cache.CachedSession",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "logging.error... |
7529076841 | import sys
from time import sleep
import serial
def touchForCDCReset(port="/dev/ttyACM0", *args, **kwargs):
"""Toggle 1200 bps on selected serial port to force board reset.
See Arduino IDE implementation:
https://github.com/arduino/Arduino/blob/master/arduino-core/src/processing/app/Serial.java
https://github.com/arduino/Arduino/blob/master/arduino-core/src/cc/arduino/packages/uploaders/SerialUploader.java
"""
serial_handler = serial.Serial(
port=port,
baudrate=1200,
bytesize=serial.EIGHTBITS,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
dsrdtr=True,
)
serial_handler.open()
assert serial_handler.is_open
serial_handler.dtr=False
serial_handler.close()
# Scanning for available ports seems to open the port or
# otherwise assert DTR, which would cancel the WDT reset if
# it happened within 250 ms. So we wait until the reset should
# have already occurred before we start scanning.
sleep(250)
if __name__ == "__main__":
if len(sys.argv) < 2:
touchForCDCReset()
touchForCDCReset(sys.argv[1])
| ysard/libre-printer | firmware/restart_interface.py | restart_interface.py | py | 1,145 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "serial.Serial",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "serial.EIGHTBITS",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "serial.PARITY_NONE",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "serial.ST... |
73850400103 | import requests
from pprint import pprint
url = 'http://api.giphy.com/v1/gifs/search?api_key=4i1aScgdTDrEGFZuIltFlaHACRS0QWA6&q=경찰&limit=1'
url2 = ''
data = requests.get(url).json()
new_url = data['data'][0]['images']['downsized']['url']
pprint(data['data'][0]['images']['downsized']['url']) | blueboy1593/Django | Django_crud/연습.py | 연습.py | py | 299 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pprint.pprint",
"line_number": 11,
"usage_type": "call"
}
] |
42005693188 | from pyspark.sql import SparkSession
from pyspark import SparkContext, SparkConf
from pyspark.sql.functions import col, udf
from pyspark.sql.types import IntegerType, BooleanType, NullType, StringType
import os
from credentials import *
# Adding the packages required to get data from S3
os.environ["PYSPARK_SUBMIT_ARGS"] = "--packages com.amazonaws:aws-java-sdk-s3:1.12.196,org.apache.hadoop:hadoop-aws:3.3.1 pyspark-shell"
# Creating our Spark configuration
conf = SparkConf() \
.setAppName('S3toSpark') \
.setMaster('local[*]')
sc=SparkContext(conf=conf)
# Configure the setting to read from the S3 bucket
accessKeyId= key_id
secretAccessKey= secret_key
hadoopConf = sc._jsc.hadoopConfiguration()
hadoopConf.set('fs.s3a.access.key', accessKeyId)
hadoopConf.set('fs.s3a.secret.key', secretAccessKey)
hadoopConf.set('spark.hadoop.fs.s3a.aws.credentials.provider', 'org.apache.hadoop.fs.s3a.SimpleAWSCredentialsProvider') # Allows the package to authenticate with AWS
# Create our Spark session
spark=SparkSession(sc)
# Read from the S3 bucket
df = spark.read.option("multiline", "true").json("s3a://pinterestdata/consumer*.json")
@udf(returnType = IntegerType())
def clean_follower_count(value):
val = value.isnumeric()
if val == True:
value = int(value)
elif val == False:
value = list(value)
save_key = value.pop(-1)
value = "".join(value)
#if (save_key.isalpha() == True) and (save_key != "k") and (save_key != "M"):
#value = None
if save_key == "k":
value = int(value) * 1000
elif save_key == "M":
value = int(value) * 1000000
return value
@udf(returnType = BooleanType())
def clean_message_downloaded(value):
if value == 1:
value = True
elif value == 0:
value = False
return value
@udf(returnType = StringType())
def clean_tag_list(value):
value.strip()
tag_list_string = "N,o, ,T,a,g,s, ,A,v,a,i,l,a,b,l,e"
if value == tag_list_string:
value = None
else:
value = value
return value
@udf(returnType = StringType())
def clean_title(value):
value.strip()
title = "No Title Data Available"
if value == title:
value = None
else:
value = value
return value
@udf(returnType = StringType())
def clean_img_src(value):
value.strip()
image_src_string = "Image src error."
if value == image_src_string:
value = None
else:
value = value
return value
@udf(returnType = StringType())
def clean_save_location(value):
string_to_strip = "Local save in "
value = value.replace(string_to_strip, "")
return value
@udf(returnType = StringType())
def clean_description(value):
value.strip()
if value == "No description available Story format":
value = None
else:
value = value
return value
# You may want to change this to read csv depending on the files your reading from the bucket
df = df.withColumn("follower_count", clean_follower_count(col("follower_count"))) \
.withColumn("downloaded", clean_message_downloaded(col("downloaded"))) \
.withColumn("tag_list", clean_tag_list(col("tag_list"))) \
.withColumn("title", clean_title(col("title"))) \
.withColumn("image_src", clean_img_src(col("image_src"))) \
.withColumn("save_location", clean_save_location(col("save_location"))) \
.withColumn("description", clean_description(col("description")))
clean_df = df.select(col("index"), col("category"), col("title"), col("description"),
col("tag_list"), col("follower_count"), col("downloaded"), col("is_image_or_video"),
col("image_src"), col("save_location"))
clean_df.show() | meedaycodes/pinterest-data-processing-pipeline | batch_processing.py | batch_processing.py | py | 3,678 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "os.environ",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "pyspark.SparkConf",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pyspark.SparkContext",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.Sp... |
14963556209 | #!/usr/bin/env python3
import serial
import sys
import argparse
import logging
from logging.config import fileConfig
fileConfig('log.ini', defaults={'logfilename': 'bee.log'})
logger = logging.getLogger('openscale')
if sys.version_info<(3,4,2):
sys.stderr.write("You need python 3.4.2 or later to run this script\n")
exit(1)
class OpenScale:
def InitSerialPort(self):
try:
self.port = serial.Serial(
port= self.server_settings['port'],
baudrate = self.server_settings['baud'],
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=2
)
logging.debug("Serial port initialised")
return True
except:
logging.error ("Could not open " + self.server_settings['port'])
logging.error ("Does this port exist???")
return False
def read_serial(self):
try:
serialLine = self.port.readline().decode()
serialLine = serialLine.replace("\r\n", '')
except UnicodeDecodeError:
#self.errmsg("invalid Serial string:%s" % serialLine)
return None
except serial.serialutil.SerialException as e:
#self.errmsg("Serial connection failed, pausing for 5 seconds then will try again")
config_serial(False)
time.sleep(5)
return None
def extract_weight_data(self, data):
weight = data.split(',')[0]
units = data.split(',')[1]
return weight, units
def get_measurement(self):
# wait for \r\n to get stream "in sych"
while True:
raw = self.port.readline().decode("utf-8")
if "Readings:" in raw:
break
while True:
raw = self.port.readline().decode("utf-8")
if "\r\n" in raw:
break
try:
raw = self.port.readline().decode("utf-8")
weight, units = self.extract_weight_data(raw)
except serial.SerialException as e:
logging.error ("Failed to read from serial port")
pass
if raw != None:
if len(raw) > 0:
return weight, units
#---------------------------------------------------------------------------
# Main function called when script executed
#---------------------------------------------------------------------------
def __init__(self, server_settings):
self.server_settings = server_settings
if not self.InitSerialPort():
sys.exit()
def __del__(self):
try:
self.port.close()
except:
pass
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-V", "--version", help="Show program version", action="store_true")
parser.add_argument("-p", "--port", help="define the serial/USB port to connect to")
parser.add_argument("-b", "--baud", help="define the baud rate (default 9600)")
parser.add_argument("-d", "--nodb", help="dont write to the database", action="store_true")
args = parser.parse_args()
# Check for --version or -V
if args.version:
logger.info("Weight measurement via OpenScale PCB - version 0.1")
if not args.port:
logger.info("A port must be defined. eg: '/dev/ttyUSB0'")
parser.print_help()
sys.exit()
writeToDatabase = False if args.nodb else True
baud = args.baud if args.baud else 9600
server_settings = {
"port": args.port,
"baud": baud
}
scale = OpenScale(server_settings)
weight, units = scale.get_measurement()
logging.info ("Weight is " + weight + units)
if writeToDatabase:
import database
database.send_data("weight", weight)
if database.send_data("weight", weight, table=database.get_host_name()):
logging.info ("Saved to database ok")
if __name__ == '__main__':
main() | jenkinsbe/hivekeepers | get_weight.py | get_weight.py | py | 4,093 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.config.fileConfig",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sys.version_info",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "sys.... |
74114075623 | import frappe,json
@frappe.whitelist()
def get_receipt_shipments(doc):
doc=json.loads(doc)
shipments=[]
for receipt in doc.get('purchase_receipts'):
for receipt_item in frappe.get_all("Purchase Receipt Item",filters={"parent":receipt.get("receipt_document")},fields=["purchase_order"],group_by="purchase_order"):
for so in frappe.get_all("Sales Order",{"po_no":receipt_item.purchase_order},["name"]):
for dn in frappe.get_all("Delivery Note Item",{"against_sales_order":so.name},['parent'],group_by="parent"):
for sp in frappe.get_all("Shipment Delivery Note",{"delivery_note":dn.parent},['parent'],group_by="parent"):
shipments.append(sp.parent)
return shipments
@frappe.whitelist()
def calculate_cost(doc):
doc=json.loads(doc)
items_total=0
for item_row in doc.get("items"):
items_total+=item_row.get("amount")
weight_total=0
for parcel in doc.get("shipment_parcel"):
weight_total+=parcel.get("weight")
item_insurance={}
component_amount={}
component_exists={}
for item_row in doc.get("items"):
charges=0
for tax_row in doc.get("taxes"):
component_exists[tax_row.get("landed_cost_component")]=tax_row.get("landed_cost_component")
for lc in frappe.get_all("Landed Cost Component",{"name":tax_row.get("landed_cost_component")},["type","calculation_based"]):
if (tax_row.get("landed_cost_component")=="InsuranceAmount"):
if (not tax_row.get("amount")):
frappe.throw("<b>Insurance</b> Amount Missing")
item_insurance[item_row.get("item_code")]=((item_row.get("amount")/items_total)*tax_row.get("amount"))
if (lc.type=="Manual" and lc.calculation_based=="FOB"):
charges+=((item_row.get("amount")/items_total)*tax_row.get("amount"))
elif (lc.type=="Manual" and lc.calculation_based=="Weight"):
charges+=((frappe.db.get_value("Item", item_row.get("item_code"),"item_weight")/weight_total)*tax_row.get("amount"))
item_row["applicable_charges"]=charges
for tax_row in doc.get("taxes"):
amount=0
for item_row in doc.get("items"):
if tax_row.get("landed_cost_component")=="Customs value air":
if not item_insurance:
frappe.throw("<b>Insurance</b> Not Exist For Calculate <b>{0}</b>".format(tax_row.get("landed_cost_component")))
amount+=(item_row.get("amount")+item_insurance[item_row.get("item_code")])
tax_row["amount"]=amount
elif tax_row.get("landed_cost_component")=="FreightLocal":
amount+=((frappe.db.get_value("Item", item_row.get("item_code"),"item_weight")/weight_total)*(doc.get("freight_charges")*doc.get("currency_exchange_rate")))
tax_row["amount"]=amount
elif tax_row.get("landed_cost_component")=="Customs value sea":
if not item_insurance:
frappe.throw("<b>Insurance</b> Not Exist For Calculate <b>{0}</b>".format(tax_row.get("landed_cost_component")))
amount+=((item_row.get("amount")+item_insurance[item_row.get("item_code")])+((frappe.db.get_value("Item", item_row.get("item_code"),"item_weight")/weight_total)*(doc.get("freight_charges")*doc.get("currency_exchange_rate"))))
tax_row["amount"]=amount
elif (tax_row.get("landed_cost_component")=="RailwayLevy" or tax_row.get("landed_cost_component")=="GOK" or tax_row.get("landed_cost_component")=="Duty"):
if not (component_exists.get("Customs value sea") or component_exists.get("Customs value air")):
frappe.throw("<b>Customs value air OR Customs value sea</b> Not Exist For Calculate <b>{0}</b>".format(tax_row.get("landed_cost_component")))
component_amount[tax_row.get("landed_cost_component")]=amount
for tax_row in doc.get("taxes"):
amount=0
for item_row in doc.get("items"):
if tax_row.get("landed_cost_component")=="RailwayLevy":
amount+=(0.02*(component_amount.get("Customs value sea") or component_amount.get("Customs value air")))
tax_row["amount"]=amount
elif tax_row.get("landed_cost_component")=="GOK":
amount+=(0.035*(component_amount.get("Customs value sea") or component_amount.get("Customs value air")))
tax_row["amount"]=amount
elif tax_row.get("landed_cost_component")=="Duty":
amount+=(frappe.db.get_value("Item", item_row.get("item_code"),"duty_percentage")*(component_amount.get("Customs value sea") or component_amount.get("Customs value air")))
tax_row["amount"]=amount
return doc
| Bizmap-Technologies-Pvt-Ltd/mfi_customization- | mfi_customization/mfi/doctype/landed_cost_voucher.py | landed_cost_voucher.py | py | 4,299 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "json.loads",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "frappe.get_all",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "frappe.get_all",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "frappe.get_all",
"line_numb... |
5775145740 | from flask import abort, request
from flask.ext.login import login_required
from webargs import fields
from webargs.flaskparser import use_args
from bauble.controllers.api import api
import bauble.db as db
from bauble.models import Accession, Location, Plant
from bauble.middleware import use_model
import bauble.utils as utils
@api.route("/plant")
@login_required
def index_plant():
plants = Plant.query.all()
data = Plant.jsonify(plants, many=True)
return utils.json_response(data)
@api.route("/plant/<int:id>")
@login_required
@use_model(Plant)
def get_plant(plant, id):
return utils.json_response(plant.jsonify())
@api.route("/plant/<int:id>", methods=['PATCH'])
@use_model(Plant)
def patch_plant(plant, id):
location_id = request.values.get('location_id', None)
if location_id is not None:
location = Location.query.filter_by(id=location_id).first()
if not location:
abort(422, "Invalid location id")
accession_id = request.values.get('accession_id', None)
if accession_id is not None:
accession = Accession.query.filter_by(id=accession_id).first()
if not accession:
abort(422, "Invalid accession id")
# create the plant change
# change = PlantChange(plant_id=request.plant.id,
# from_location_id=request.plant.location_id,
# quantity=request.plant.quantity, # store original quantity
# person=request.user.fullname if request.user.fullname is not None else request.user.email,
# # reason=request.json['change'].get('reason', None) if 'change' in request.json else None,
# reason=None,
# date=request.json['change'].get('date', None) if 'change' in request.json else None
# )
# request.session.add(change)
# if change.from_location_id != request.plant.location_id:
# # the change quantity represent the number of plants tranferred to a new location
# change.quantity = request.plant.quantity
# change.to_location_id = request.plant.location_id
# elif request.plant.quantity < change.quantity:
# # the change quantity represents the number of plants removed from a location
# change.quantity = request.plant.quantity - change.quantity
# else:
# # the change quantity represents the number of plants added to a location
# change.quantity = request.plant.quantity - change.quantity
db.session.commit()
return utils.json_response(plant.jsonify())
@api.route("/plant", methods=['POST'])
@login_required
@use_model(Plant)
def post_plant(plant):
location_id = request.values.get('location_id', None)
if location_id is not None:
location = Location.query.filter_by(id=location_id).first()
if not location:
abort(422, "Invalid location id")
accession_id = request.values.get('accession_id', None)
if accession_id is not None:
accession = Accession.query.filter_by(id=accession_id).first()
if not accession:
abort(422, "Invalid accession id")
db.session.add(plant)
# change = PlantChange(to_location_id=plant.location_id,
# quantity=plant.quantity, # store original quantity
# person=request.user.fullname if request.user.fullname is not None else request.user.email,
# #reason=request.json['change'].get('reason', None) if 'change' in request.json else None,
# reason=None,
# date=request.json['change'].get('date', None) if 'change' in request.json else None
# )
# change.plant = plant
# request.session.add(change)
db.session.commit()
return utils.json_response(plant.jsonify(), 201)
@api.route("/plant/<int:id>", methods=['DELETE'])
@login_required
@use_model(Plant)
def delete_plant(plant, id):
db.session.delete(plant)
db.session.commit()
return '', 204
@api.route("/plant/<int:id>/count")
@login_required
@use_args({
'relation': fields.DelimitedList(fields.String(), required=True)
})
def plant_count(args, id):
data = {}
plant = Plant.query.get_or_404(id)
for relation in args['relation']:
_, base = relation.rsplit('/', 1)
data[base] = utils.count_relation(plant, relation)
return utils.json_response(data)
| Bauble/bauble.web | bauble/controllers/api.OLD/plant.py | plant.py | py | 4,481 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "bauble.models.Plant.query.all",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "bauble.models.Plant.query",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "bauble.models.Plant",
"line_number": 16,
"usage_type": "name"
},
{
"... |
39845448432 | """
Iguana (c) by Marc Ammon, Moritz Fickenscher, Lukas Fridolin,
Michael Gunselmann, Katrin Raab, Christian Strate
Iguana is licensed under a
Creative Commons Attribution-ShareAlike 4.0 International License.
You should have received a copy of the license along with this
work. If not, see <http://creativecommons.org/licenses/by-sa/4.0/>.
"""
from django.forms import ModelForm, ValidationError
from tag.models import Tag
from django.utils.translation import ugettext_lazy as _
from common.widgets import CustomAutoCompleteWidgetSingle
class TagForm(ModelForm):
project = ""
class Meta:
model = Tag
fields = ['tag_text', 'color']
widget = {
'color': CustomAutoCompleteWidgetSingle(),
}
def __init__(self, *args, **kwargs):
self.project = kwargs.pop('project')
super(TagForm, self).__init__(*args, **kwargs)
def clean_tag_text(self):
tags = Tag.objects.filter(project=self.project)
tag_to_be_stored = self.cleaned_data['tag_text']
for tag in tags:
if tag.tag_text == tag_to_be_stored:
raise ValidationError(_('There is already a Tag "{}" for this project'.format(tag_to_be_stored) +
' and you are only allowed to have it once per project.'))
return tag_to_be_stored
| midas66/iguana | src/tag/forms.py | forms.py | py | 1,352 | python | en | code | null | github-code | 36 | [
{
"api_name": "django.forms.ModelForm",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "tag.models.Tag",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "common.widgets.CustomAutoCompleteWidgetSingle",
"line_number": 26,
"usage_type": "call"
},
{
... |
29403672645 | # -*- coding: utf-8 -*-
"""
Django settings for Rovercode Web project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
from __future__ import absolute_import, unicode_literals
import datetime
import environ
from urllib.parse import urlparse
ROOT_DIR = environ.Path(__file__) - 3 # (rovercode_web/config/settings/common.py - 3 = rovercode_web/)
APPS_DIR = ROOT_DIR
env = environ.Env()
env.read_env()
# APP CONFIGURATION
# ------------------------------------------------------------------------------
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
# Admin
'django.contrib.admin',
)
THIRD_PARTY_APPS = (
'crispy_forms', # Form layouts
'dj_rest_auth',
'allauth', # registration
'allauth.account', # registration
'dj_rest_auth.registration',
'allauth.socialaccount', # registration
'allauth.socialaccount.providers.github',
'allauth.socialaccount.providers.google',
'rest_framework',
'django_filters',
)
# Apps specific for this project go here.
LOCAL_APPS = (
# custom users app
'rovercode_web.users.apps.UsersConfig',
# Your stuff: custom apps go here
'mission_control.apps.MissionControlConfig',
'api.apps.ApiConfig',
'authorize.apps.AuthorizeConfig',
'curriculum.apps.CurriculumConfig',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIDDLEWARE CONFIGURATION
# ------------------------------------------------------------------------------
MIDDLEWARE = (
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
# MIGRATIONS CONFIGURATION
# ------------------------------------------------------------------------------
MIGRATION_MODULES = {
'sites': 'rovercode_web.contrib.sites.migrations'
}
# DEBUG
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool('DJANGO_DEBUG', False)
# FIXTURE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
str(APPS_DIR.path('rovercode_web/fixtures')),
str(APPS_DIR.path('mission-control/fixtures')),
)
# EMAIL CONFIGURATION
# ------------------------------------------------------------------------------
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
# MANAGER CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
("""rovercode.com""", 'admin@rovercode.com'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
SUPPORT_CONTACT = env('SUPPORT_CONTACT', default='rovercode@example.com')
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': env.db('DATABASE_URL', default='postgres:///rovercode_web'),
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
if env.bool('POSTGRES_USE_AWS_SSL', False):
DATABASES['default']['OPTIONS'] = {'sslrootcert': 'rds-ca-2015-root.crt', 'sslmode': 'require'}
# GENERAL CONFIGURATION
# ------------------------------------------------------------------------------
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'UTC'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
'DIRS': [
str(APPS_DIR.path('rovercode_web/templates')),
str(APPS_DIR.path('rovercode_web/blog/templates')),
str(APPS_DIR.path('realtime/templates')),
str(APPS_DIR.path('mission_control/templates')),
],
'OPTIONS': {
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
# Your stuff: custom template context processors go here
'django.template.context_processors.request',
],
},
},
]
# STATIC FILE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR('staticfiles'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# MEDIA CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR('rovercode_web/media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# URL Configuration
# ------------------------------------------------------------------------------
ROOT_URLCONF = 'config.urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
ASGI_APPLICATION = 'config.asgi.application'
redis_url = urlparse(env('REDIS_URL', default='redis://redis:6379'))
REDIS_HOST = redis_url.hostname
REDIS_PORT = redis_url.port
# PASSWORD VALIDATION
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
# ------------------------------------------------------------------------------
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# AUTHENTICATION CONFIGURATION
# ------------------------------------------------------------------------------
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
# Some really nice defaults
ACCOUNT_AUTHENTICATION_METHOD = 'username'
ACCOUNT_EMAIL_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = 'none'
ACCOUNT_ALLOW_REGISTRATION = env.bool('DJANGO_ACCOUNT_ALLOW_REGISTRATION', True)
ACCOUNT_ADAPTER = 'rovercode_web.users.adapters.AccountAdapter'
SOCIALACCOUNT_ADAPTER = 'rovercode_web.users.adapters.SocialAccountAdapter'
ACCOUNT_FORMS = {
'reset_password': 'rovercode_web.users.forms.SilentResetPasswordForm',
}
# Custom user app defaults
# Select the correct user model
AUTH_USER_MODEL = 'users.User'
LOGIN_REDIRECT_URL = 'api-docs:docs-index'
LOGIN_URL = 'account_login'
LOGOUT_URL = 'account_logout'
# SLUGLIFIER
AUTOSLUG_SLUGIFY_FUNCTION = 'slugify.slugify'
# Location of root django.contrib.admin URL, use {% url 'admin:index' %}
ADMIN_URL = r'^admin/'
# Your common stuff: Below this line define 3rd party library settings
# ------------------------------------------------------------------------------
# REST FRAMEWORK CONFIGURATION
# ------------------------------------------------------------------------------
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework_simplejwt.authentication.JWTAuthentication',
),
'DEFAULT_FILTER_BACKENDS': (
'django_filters.rest_framework.DjangoFilterBackend',
'rest_framework.filters.OrderingFilter',
'rest_framework.filters.SearchFilter',
),
'DEFAULT_PAGINATION_CLASS': 'mission_control.pagination.CustomPagination',
'PAGE_SIZE': 15,
}
# SOCIAL ACCOUNT CONFIGURATION
# ------------------------------------------------------------------------------
SOCIALACCOUNT_QUERY_EMAIL = True
SOCIALACCOUNT_PROVIDERS = {
'google': {
'SCOPE': [
'profile',
'email',
],
'AUTH_PARAMS': {
'access_type': 'online',
}
}
}
# OTHER SERVICES
# ------------------------------------------------------------------------------
PROFANITY_CHECK_SERVICE_HOST = env('PROFANITY_CHECK_SERVICE_HOST', default='http://profanity-check:8000')
SUBSCRIPTION_SERVICE_HOST = env('SUBSCRIPTION_SERVICE_HOST', default='http://localhost:3000')
ZENDESK_EMAIL = env('ZENDESK_EMAIL', default='support@example.com')
ZENDESK_TOKEN = env('ZENDESK_TOKEN', default='abcdefg1234567')
ZENDESK_SUBDOMAIN = env('ZENDESK_SUBDOMAIN', default='domain.zendesk.com')
# JWT CONFIGURATION
# ------------------------------------------------------------------------------
SIMPLE_JWT = {
'ACCESS_TOKEN_LIFETIME': datetime.timedelta(hours=4),
'REFRESH_TOKEN_LIFETIME': datetime.timedelta(days=7),
'ROTATE_REFRESH_TOKENS': True,
'AUTH_HEADER_TYPES': ('JWT',),
'SIGNING_KEY': env(
'JWT_SECRET_KEY',
default='ac9fb5054311bfeeefe79fbe31740850'
),
}
REST_AUTH_SERIALIZERS = {
'JWT_TOKEN_CLAIMS_SERIALIZER': 'rovercode_web.users.utils.JwtObtainPairSerializer',
}
# Enables django-rest-auth to use JWT tokens instead of regular tokens.
REST_USE_JWT = True
SOCIAL_CALLBACK_URL = env(
'SOCIAL_CALLBACK_URL',
default='http://localhost:8080/accounts/login/callback/{service}'
)
# Loads the default rover config
DEFAULT_ROVER_CONFIG = env.json('DEFAULT_ROVER_CONFIG', {'no_default_specified': True})
SILENCED_SYSTEM_CHECKS = [
# Not using Django admin
'admin.E408',
]
FREE_TIER_PROGRAM_LIMIT = env('FREE_TIER_PROGRAM_LIMIT', default=5)
DEFAULT_BLOG_QUESTION_ID = env('DEFAULT_BLOG_QUESTION_ID', default=1)
| rovercode/rovercode-web | config/settings/common.py | common.py | py | 12,431 | python | en | code | 14 | github-code | 36 | [
{
"api_name": "environ.Path",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "environ.Env",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "urllib.parse.urlparse",
"line_number": 220,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
... |
137629276 | from silence_tensorflow import silence_tensorflow
import tensorflow as tf
from tensorflow.keras import layers, Model, models, Input, regularizers, initializers
from tensorflow.keras import backend as K
import numpy as np
import matplotlib.pyplot as plt
import glob
import imageio
import os
import time
import datetime
import pathlib
from PIL import Image
import cv2
import csv
import Template
silence_tensorflow()
tf.config.run_functions_eagerly(True)
# gpu = tf.config.list_physical_devices('GPU')
# tf.config.experimental.set_memory_growth(gpu[0], True)
class MoGLayer(layers.Layer):
def __init__(self,
kernel_regularizer=None,
kernel_initializer=None, # 'glorot_uniform',
bias_initializer=None, # 'zeros',
**kwargs):
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
super(MoGLayer, self).__init__(**kwargs)
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
def build(self, input_shape):
assert len(input_shape) >= 2
input_dim = input_shape[-1]
self.std = self.add_weight(shape=(input_dim,),
name='std',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer)
self.mean = self.add_weight(shape=(input_dim,),
initializer=self.bias_initializer,
name='mean')
self.built = True
def call(self, inputs, *args, **kwargs):
output = inputs * self.std
output = K.bias_add(output, self.mean)
return output
def compute_output_shape(self, input_shape):
assert input_shape and len(input_shape) >= 2
assert input_shape[-1]
output_shape = list(input_shape)
output_shape[-1] = input_shape[-1]
return tuple(output_shape)
def time_string(sec_elapsed):
h = int(sec_elapsed/3600)
m = int((sec_elapsed % 3600)/60)
s = sec_elapsed % 60
return '{}:{:>2}:{:>05.2f}'.format(h, m, s)
# Parameter
image_row = 64
image_column = 64
image_color = 3
batch_size = 16
feature_size = 2048
category_size = 40
noise_size = 2048
Epoch = 1000
# Optimizer
generator_optimizer = tf.keras.optimizers.Adam(1e-4, beta_1=0.5)
discriminator_optimizer = tf.keras.optimizers.Adam(1e-4, beta_1=0.5)
current_folder = pathlib.Path(__file__).parent
# Models
extractor_file = 'Results/_Feature_Extractor_Model' # Trained Classifier
extractor = models.load_model(extractor_file, compile=False)
Feature_Layer_Model = Model(inputs=extractor.input, outputs=extractor.get_layer('features2048').output)
# Model for Classifying Generated Images
resnet40_file = 'Data/ResNet_40Class/ResNet_40class_Model'
resnet40 = models.load_model(resnet40_file, compile=False)
resnet40.trainable = False
def make_generator():
noise_inputs = Input(shape=(noise_size,))
eeg_inputs = Input(shape=(feature_size,))
x = MoGLayer(kernel_initializer=initializers.RandomUniform(minval=-0.2, maxval=0.2),
bias_initializer=initializers.RandomUniform(minval=-1.0, maxval=1.0),
kernel_regularizer=regularizers.l2(0.01))(noise_inputs)
x = layers.multiply([x, eeg_inputs])
x = layers.Reshape((1, 1, -1))(x)
x = layers.Conv2DTranspose(filters=512, kernel_size=4, strides=1, padding='valid')(x)
x = layers.BatchNormalization()(x)
x = layers.LeakyReLU(0.2)(x)
x = layers.Conv2DTranspose(filters=256, kernel_size=4, strides=2, padding='same')(x)
x = layers.BatchNormalization()(x)
x = layers.LeakyReLU(0.2)(x)
x = layers.Conv2DTranspose(filters=128, kernel_size=4, strides=2, padding='same')(x)
x = layers.BatchNormalization()(x)
x = layers.LeakyReLU(0.2)(x)
x = layers.Conv2DTranspose(filters=64, kernel_size=4, strides=2, padding='same')(x)
x = layers.BatchNormalization()(x)
x = layers.LeakyReLU(0.2)(x)
x = layers.Conv2DTranspose(filters=3, kernel_size=4, strides=2, padding='same')(x)
output = layers.Activation("tanh")(x)
model = Model(inputs=[noise_inputs, eeg_inputs], outputs=output)
return model
def make_discriminator():
image_inputs = Input(shape=(image_row, image_column, image_color))
x = layers.Conv2D(64, (3, 3), strides=(1, 1), padding='same')(image_inputs)
x = layers.BatchNormalization()(x)
x = layers.LeakyReLU(0.2)(x)
x = layers.Conv2D(64, (3, 3), strides=(1, 1), padding='same')(x)
x = layers.BatchNormalization()(x)
x = layers.LeakyReLU(0.2)(x)
x = layers.MaxPooling2D((2, 2))(x)
x = layers.Dropout(0.5)(x)
x = layers.Conv2D(128, (3, 3), strides=(1, 1), padding='same')(x)
x = layers.BatchNormalization()(x)
x = layers.LeakyReLU(0.2)(x)
x = layers.Conv2D(128, (3, 3), strides=(1, 1), padding='same')(x)
x = layers.BatchNormalization()(x)
x = layers.LeakyReLU(0.2)(x)
x = layers.MaxPooling2D((2, 2))(x)
x = layers.Dropout(0.5)(x)
x = layers.Conv2D(256, (3, 3), strides=(1, 1), padding='same')(x)
x = layers.BatchNormalization()(x)
x = layers.LeakyReLU(0.2)(x)
x = layers.Conv2D(256, (3, 3), strides=(1, 1), padding='same')(x)
x = layers.BatchNormalization()(x)
x = layers.LeakyReLU(0.2)(x)
x = layers.Conv2D(256, (1, 1), strides=(1, 1), padding='same')(x)
x = layers.BatchNormalization()(x)
x = layers.LeakyReLU(0.2)(x)
x = layers.MaxPooling2D((2, 2))(x)
x = layers.Dropout(0.5)(x)
x = layers.Conv2D(512, (3, 3), strides=(1, 1), padding='same')(x)
x = layers.BatchNormalization()(x)
x = layers.LeakyReLU(0.2)(x)
x = layers.Conv2D(512, (3, 3), strides=(1, 1), padding='same')(x)
x = layers.BatchNormalization()(x)
x = layers.LeakyReLU(0.2)(x)
x = layers.Conv2D(512, (1, 1), strides=(1, 1), padding='same')(x)
x = layers.BatchNormalization()(x)
x = layers.LeakyReLU(0.2)(x)
x = layers.MaxPooling2D((2, 2))(x)
x = layers.Dropout(0.5)(x)
x = layers.Flatten()(x)
real_fake = layers.Dense(1024, activation='relu')(x)
real_fake = layers.Dropout(0.5)(real_fake)
real_fake = layers.Dense(1, activation='sigmoid')(real_fake)
category = layers.Dense(1024, activation='relu')(x)
category = layers.Dropout(0.5)(category)
category = layers.Dense(1024, activation='relu')(category)
category = layers.Dropout(0.5)(category)
category = layers.Dense(category_size, activation='softmax')(category)
model = Model(inputs=image_inputs, outputs=[real_fake, category])
return model
generator = make_generator()
discriminator = make_discriminator()
# Create Image Dictionary
data_img_name_label = np.empty((2000, 2), dtype=np.object)
image_dictionary = {}
folder_path = 'Data/Class_Images_Selected'
images_file = 'Data/Class_Images_Selected/Labels.csv'
with open(images_file) as f:
img_data = csv.reader(f, delimiter='/')
images_list = [row for row in img_data]
for i in range(len(images_list)):
img_path = os.path.join(images_list[i][1], images_list[i][2])
image_name = os.path.join(folder_path, img_path)
one_image = Image.open(image_name)
one_image = np.array(one_image.resize((image_row, image_column), Image.LANCZOS))
if one_image.reshape(-1).shape[0] == image_row*image_column:
one_image = np.array(Image.open(image_name))
one_image = cv2.cvtColor(one_image, cv2.COLOR_GRAY2RGB)
one_image = Image.fromarray(one_image)
one_image = np.array(one_image.resize((image_row, image_column), Image.LANCZOS))
one_image = (one_image - 127.5) / 127.5
one_image = one_image.astype(np.float32)
image_dictionary[images_list[i][1] + '/' + images_list[i][2]] = one_image
data_img_name_label[i][0] = images_list[i][1] + '/' + images_list[i][2]
data_img_name_label[i][1] = int(images_list[i][0])
# Load Data
loader = Template.data_block_loader_train_test_image_separate2(use_all=False, eye_remove=True, do_norm=True, do_zscore=True)
train_eeg = loader[0]
train_labels = loader[1].reshape((-1, 1))
train_images = loader[2].reshape((-1, 1))
test_eeg = loader[3]
test_labels = loader[4]
test_images = loader[5].reshape((-1, 1))
counter = loader[6]
print(train_eeg.shape)
print(test_eeg.shape)
print(counter)
train_features = Feature_Layer_Model.predict(train_eeg)
test_features = Feature_Layer_Model.predict(test_eeg)
base_data = np.concatenate([train_features, train_images, train_labels], axis=1)
# Loss
cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=False)
category_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False)
def generator_loss(fake_true_false, fake_pred, label):
generated_loss = cross_entropy(tf.ones_like(fake_true_false), fake_true_false) + category_loss(label, fake_pred)
return generated_loss
def discriminator_loss_real(real_true_false, real_pred, label):
real_loss = cross_entropy(tf.ones_like(real_true_false), real_true_false) + category_loss(label, real_pred)
return real_loss
def discriminator_loss_fake(fake_true_false):
fake_loss = cross_entropy(tf.zeros_like(fake_true_false), fake_true_false)
return fake_loss
loss_tracker_gen = tf.keras.metrics.Mean(name='gen_loss')
loss_tracker_disc = tf.keras.metrics.Mean(name='disc_loss')
loss_tracker_gen_total = tf.keras.metrics.Mean(name='gen_total_loss')
loss_tracker_disc_total = tf.keras.metrics.Mean(name='disc_total_loss')
loss_tracker_test = tf.keras.metrics.Mean(name='test_gen_loss')
acc_gen = tf.keras.metrics.SparseCategoricalAccuracy(name='gen_acc')
acc_disc = tf.keras.metrics.SparseCategoricalAccuracy(name='disc_acc')
acc_test = tf.keras.metrics.SparseCategoricalAccuracy(name='test_acc')
rf_d = tf.keras.metrics.BinaryAccuracy(threshold=0.5, name='real')
rf_g = tf.keras.metrics.BinaryAccuracy(threshold=0.5, name='fake')
rf_test = tf.keras.metrics.BinaryAccuracy(threshold=0.5, name='rf')
# GAN Training Class
class GAN(Model):
def __init__(self, gen_model, disc_model):
super(GAN, self).__init__()
self.gen_model = gen_model
self.disc_model = disc_model
def compile(self, g_optimizer, d_optimizer):
super(GAN, self).compile()
self.g_optimizer = g_optimizer
self.d_optimizer = d_optimizer
def train_step(self, data):
x, label = data
feature = x[0]
real_image_name = x[1].numpy()
data_size = tf.shape(feature)[0]
noise = tf.random.normal(shape=(data_size, noise_size))
real_images = np.empty((data_size, image_row, image_column, image_color), dtype=np.float32)
for k0 in range(data_size):
real_images[k0] = image_dictionary[real_image_name[k0][0].decode()]
# Train Discriminator
generated_images = self.gen_model([noise, feature], training=True)
with tf.GradientTape() as tape:
fake_true_false, _ = self.disc_model(generated_images, training=True)
real_true_false, real_category = self.disc_model(real_images, training=True)
disc_loss_real = discriminator_loss_real(real_true_false, real_category, label)
disc_loss_fake = discriminator_loss_fake(fake_true_false)
disc_total_loss = disc_loss_real + disc_loss_fake
gradient = tape.gradient(disc_total_loss, self.disc_model.trainable_variables)
self.d_optimizer.apply_gradients(zip(gradient, self.disc_model.trainable_variables))
acc_disc.update_state(label, real_category)
disc_loss = category_loss(label, real_category)
rf_d.update_state(tf.ones_like(real_true_false), real_true_false)
# Train Generator
with tf.GradientTape() as tape:
generated_images = self.gen_model([noise, feature], training=True)
fake_true_false, fake_category = self.disc_model(generated_images, training=True)
gen_total_loss = generator_loss(fake_true_false, fake_category, label)
gradient = tape.gradient(gen_total_loss, self.gen_model.trainable_variables)
self.g_optimizer.apply_gradients(zip(gradient, self.gen_model.trainable_variables))
rf_g.update_state(tf.ones_like(fake_true_false), fake_true_false)
resnet_image = tf.image.resize(generated_images, (224, 224), method=tf.image.ResizeMethod.LANCZOS3)
resnet_image = (resnet_image + 1) / 2 * 255
resnet_image = tf.keras.applications.resnet.preprocess_input(resnet_image)
resnet_category = resnet40(resnet_image)
acc_gen.update_state(label, resnet_category)
gen_loss = category_loss(label, resnet_category)
loss_tracker_gen.update_state(gen_loss)
loss_tracker_disc.update_state(disc_loss)
loss_tracker_gen_total.update_state(gen_total_loss)
loss_tracker_disc_total.update_state(disc_total_loss)
return {'gen_acc': acc_gen.result(), 'gen_loss': loss_tracker_gen.result(),
'gen_total_loss': loss_tracker_gen_total.result(),
'disc_acc': acc_disc.result(), 'disc_loss': loss_tracker_disc.result(),
'disc_total_loss': loss_tracker_disc_total.result(),
'real': rf_d.result(), 'fake': rf_g.result()}
def test_step(self, data):
feature, label = data
data_size = tf.shape(feature)[0]
noise = tf.random.normal(shape=(data_size, noise_size))
generated_image = self.gen_model([noise, feature], training=False)
fake_real_fake, fake_cat = self.disc_model(generated_image, training=False)
rf_test.update_state(tf.ones_like(fake_real_fake), fake_real_fake)
resnet_image = tf.image.resize(generated_image, (224, 224), method=tf.image.ResizeMethod.LANCZOS3)
resnet_image = (resnet_image + 1) / 2 * 255
resnet_image = tf.keras.applications.resnet.preprocess_input(resnet_image)
resnet_cat = resnet40(resnet_image)
acc_test.update_state(label, resnet_cat)
gen_loss = category_loss(label, resnet_cat)
loss_tracker_test.update_state(gen_loss)
return {'test_acc': acc_test.result(), 'test_gen_loss': loss_tracker_test.result(),
'rf': rf_test.result()}
@property
def metrics(self):
return [acc_gen, acc_disc, acc_test, loss_tracker_gen, loss_tracker_gen_total,
loss_tracker_disc, loss_tracker_disc_total, loss_tracker_test, rf_d, rf_g, rf_test]
# Create Generated Images After Each Epoch and Save Model After Each 100 Epoch
class MakeImage(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs=None):
generate_and_save_images(self.model.gen_model, epoch+1, seed, sample_feature)
test_and_save_images(self.model.gen_model, epoch+1)
if (epoch+1) % 100 == 0 and epoch+1 != Epoch:
now0 = datetime.datetime.now()
now_time0 = now0.strftime('%y%m%d%_H%M%S')
generator_save_name0 = now_time0 + '_Generator_Model'
discriminator_save_name0 = now_time0 + '_Discriminator_Model'
generator.save(current_folder / 'Results' / generator_save_name0)
discriminator.save(current_folder / 'Results' / discriminator_save_name0)
def generate_and_save_images(model, epoch, test_seed, feature):
# 'training' is set to False
# This is so all layers run in inference mode (batch norm).
generated_image = model([test_seed, feature])
generated_image = (generated_image+1)/2 # 0-1
fig = plt.figure(figsize=(8, int(np.ceil(category_size/8))))
for i2 in range(category_size):
plt.subplot(8, int(np.ceil(category_size/8)), i2+1)
plt.imshow(generated_image[i2])
plt.axis('off')
plt.savefig('Results/Generated_Images/image_at_epoch_{:04d}.png'.format(epoch))
plt.close()
# plt.show()
def test_and_save_images(model, epoch):
# 'training' is set to False
# This is so all layers run in inference mode (batch norm).
for i3 in range(category_size):
for ii2 in range(test_labels.shape[0]):
if test_labels[ii2] == i3:
# noise = np.random.normal(size=noise_size)
noise = seed_for_test[i3]
generated_image = model([np.array([noise]), np.array([test_features[ii2]])])
generated_image = (generated_image + 1) / 2
plt.subplot(8, int(np.ceil(category_size/8)), i3+1)
plt.imshow(generated_image[0])
plt.axis('off')
break
plt.savefig('Results/Tested_Images/image_at_epoch_{:04d}.png'.format(epoch))
plt.close()
# plt.show()
# Train
start = time.time()
seed = tf.random.normal([category_size, noise_size])
seed_for_test = np.random.normal(size=(category_size, noise_size))
sample_feature = np.empty((category_size, feature_size)) # For making GIF
sample_image = np.empty((category_size, image_row, image_column, image_color))
# Save Real Images in Train Dataset
for k in range(category_size):
for m in range(len(base_data)):
if int(base_data[m][-1]) == k:
sample_feature[k] = base_data[m][:feature_size].astype(np.float32)
sample_image_name = base_data[m][feature_size:-1]
sample_img = Image.open('Data/Class_Images_Selected/' + sample_image_name[0])
sample_img_array = np.asarray(sample_img)
sample_img_array = Image.fromarray(sample_img_array)
sample_image[k] = np.array(sample_img_array.resize((image_row, image_column), Image.LANCZOS))
break
fig0 = plt.figure(figsize=(8, int(np.ceil(category_size / 8))))
for i0 in range(category_size):
plt.subplot(8, int(np.ceil(category_size / 8)), i0 + 1)
plt.imshow(sample_image[i0].astype(np.uint8))
plt.axis('off')
plt.savefig('Results/GAN_real_image.png')
plt.close()
# Save Real Images in Test Dataset
test_sample_image = np.empty((category_size, image_row, image_column, image_color))
for k in range(category_size):
for m in range(test_labels.shape[0]):
if test_labels[m] == k:
test_sample_img = Image.open('Data/Class_Images_Selected/' + test_images[m][0])
test_sample_img_array = np.asarray(test_sample_img)
test_sample_img_array = Image.fromarray(test_sample_img_array)
test_sample_image[k] = np.array(test_sample_img_array.resize((image_row, image_column), Image.LANCZOS))
break
fig0 = plt.figure(figsize=(8, int(np.ceil(category_size / 8))))
for i0 in range(category_size):
plt.subplot(8, int(np.ceil(category_size / 8)), i0 + 1)
plt.imshow(test_sample_image[i0].astype(np.uint8))
plt.axis('off')
plt.savefig('Results/GAN_real_test_image.png')
plt.close()
gan = GAN(gen_model=generator, disc_model=discriminator)
gan.compile(g_optimizer=generator_optimizer, d_optimizer=discriminator_optimizer)
history = gan.fit(x=[train_features, train_images], y=train_labels, epochs=Epoch, batch_size=batch_size,
validation_data=(test_features, test_labels), verbose=2,
callbacks=[MakeImage()])
total_time = time.time() - start
print(f'Total Time: {time_string(total_time)}')
# Save Generator and Discriminator
now = datetime.datetime.now()
now_time = now.strftime('%y%m%d%_H%M%S')
generator_save_name = now_time + '_Generator_Model'
discriminator_save_name = now_time + '_Discriminator_Model'
generator.save(current_folder/'Results'/generator_save_name)
discriminator.save(current_folder/'Results'/discriminator_save_name)
save_name = 'Results/' + now_time + '_GAN_Results'
np.savez(save_name,
gen_accs=history.history['gen_acc'], gen_losses=history.history['gen_loss'],
gen_total_loss=history.history['gen_total_loss'], disc_total_loss=history.history['disc_total_loss'],
test_gen_accs=history.history['val_test_acc'], test_gen_losses=history.history['val_test_gen_loss'],
fake_rf=history.history['fake'], real_rf=history.history['real'], test_rf=history.history['val_rf'])
# Create a GIF
anim_file = now_time + '_GAN_generated_images.gif'
with imageio.get_writer('Results/Generated_Images/' + anim_file, mode='I') as writer:
filenames = glob.glob('Results/Generated_Images/image*.png')
filenames = sorted(filenames)
last = -1
for ii, filename in enumerate(filenames):
frame = 2*(ii**0.5)
if round(frame) > round(last):
last = frame
else:
continue
image = imageio.imread(filename)
writer.append_data(image)
image = imageio.imread(filename)
writer.append_data(image)
# Create a GIF
anim_file2 = now_time + '_GAN_tested_images.gif'
with imageio.get_writer('Results/Tested_Images/' + anim_file2, mode='I') as writer:
filenames2 = glob.glob('Results/Tested_Images/image*.png')
filenames2 = sorted(filenames2)
last = -1
for ii, filename2 in enumerate(filenames2):
frame = 2*(ii**0.5)
if round(frame) > round(last):
last = frame
else:
continue
image2 = imageio.imread(filename2)
writer.append_data(image2)
image2 = imageio.imread(filename2)
writer.append_data(image2)
# Plot
fig1 = plt.figure()
plt.subplot(2, 1, 1)
plt.plot(history.history['gen_acc'], label='Generator Accuracy')
plt.plot([0, Epoch], [0.025, 0.025], color='gray', linestyle='dashed')
plt.xlabel('Epoch')
plt.ylabel('Train Accuracy')
plt.legend(loc='lower right')
plt.subplot(2, 1, 2)
plt.plot(history.history['gen_loss'], label='Generator Loss')
plt.xlabel('Epoch')
plt.ylabel('Train Loss')
plt.legend(loc='upper right')
fig2 = plt.figure()
plt.subplot(2, 1, 1)
plt.plot(history.history['disc_acc'], label='Discriminator Accuracy')
plt.plot([0, Epoch], [0.025, 0.025], color='gray', linestyle='dashed')
plt.xlabel('Epoch')
plt.ylabel('Train Accuracy')
plt.legend(loc='lower right')
plt.subplot(2, 1, 2)
plt.plot(history.history['disc_loss'], label='Discriminator Loss')
plt.xlabel('Epoch')
plt.ylabel('Train Loss')
plt.legend(loc='upper right')
fig3 = plt.figure()
plt.subplot(2, 1, 1)
plt.plot(history.history['gen_total_loss'], label='Generator Total Loss')
plt.ylabel('Total Loss')
plt.legend()
plt.subplot(2, 1, 2)
plt.plot(history.history['disc_total_loss'], label='Discriminator Total Loss')
plt.xlabel('Epoch')
plt.ylabel('Total Loss')
plt.legend()
fig4 = plt.figure()
plt.subplot(2, 1, 1)
plt.plot(history.history['val_test_acc'], label='Generator Test Accuracy')
plt.plot([0, Epoch], [0.025, 0.025], color='gray', linestyle='dashed')
plt.xlabel('Epoch')
plt.ylabel('Test Accuracy')
plt.legend(loc='lower right')
plt.subplot(2, 1, 2)
plt.plot(history.history['val_test_gen_loss'], label='Generator Loss')
plt.xlabel('Epoch')
plt.ylabel('Test Loss')
plt.legend(loc='upper left')
fig5 = plt.figure()
plt.plot(history.history['fake'], label='Fake True False')
plt.plot(history.history['real'], label='Real True False')
plt.xlabel('Epoch')
plt.legend()
fig6 = plt.figure()
plt.plot(history.history['val_rf'], label='Test True False')
plt.xlabel('Epoch')
plt.legend()
plt.show()
| shimihirouci/Improve_Imagination | Training_GAN.py | Training_GAN.py | py | 23,218 | python | en | code | 7 | github-code | 36 | [
{
"api_name": "silence_tensorflow.silence_tensorflow",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "tensorflow.config.run_functions_eagerly",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "tensorflow.config",
"line_number": 19,
"usage_type": "attri... |
28518466371 | from flask import Blueprint, render_template, request, redirect
from database import mysql
# blueprint setup
update = Blueprint('update', __name__)
@update.route('/update')
def default():
book = {}
book['id'] = request.args.get('id')
book['name'] = request.args.get('name')
return render_template('update.html', book=book)
@update.route('/updateBook', methods=['POST'])
def updateBook():
book = request.form
id = book['id']
name = book['bookTitle']
cur = mysql.get_db().cursor()
cur.execute("UPDATE books SET BookTitle=%s WHERE BookID=%s",(name, id))
mysql.get_db().commit()
return redirect('/read') | micahluedtke/engineering_computation_data_science | HomeworkFour/withblueprint/update.py | update.py | py | 650 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Blueprint",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "flask.request.args.get",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "flask.r... |
9470051726 | import os
import numpy as np
from os.path import isfile
import torch
import torch.nn.functional as F
EPS = 1e-6
def assert_same_shape(t1, t2):
for (x, y) in zip(list(t1.shape), list(t2.shape)):
assert(x==y)
def print_stats_py(name, tensor):
print('%s (%s) min = %.2f, mean = %.2f, max = %.2f' % (name, tensor.dtype, np.min(tensor), np.mean(tensor), np.max(tensor)))
def tensor2summ(tensor, permute_dim=False):
# if permute_dim = True:
# for 2D tensor, assume input is torch format B x S x C x H x W, we want B x S x H x W x C
# for 3D tensor, assume input is torch format B x S x C x H x W x D, we want B x S x H x W x C x D
# and finally unbind the sequeence dimension and return a list of [B x H x W x C].
assert(tensor.ndim == 5 or tensor.ndim == 6)
assert(tensor.size()[1] == 2) #sequense length should be 2
if permute_dim:
if tensor.ndim == 6: #3D tensor
tensor = tensor.permute(0, 1, 3, 4, 5, 2)
elif tensor.ndim == 5: #2D tensor
tensor = tensor.permute(0, 1, 3, 4, 2)
tensor = torch.unbind(tensor, dim=1)
return tensor
def normalize_single(d):
# d is a whatever shape torch tensor
dmin = torch.min(d)
dmax = torch.max(d)
d = (d-dmin)/(EPS+(dmax-dmin))
return d
def normalize(d):
# d is B x whatever. normalize within each element of the batch
out = torch.zeros(d.size())
if d.is_cuda:
out = out.cuda()
B = list(d.size())[0]
for b in list(range(B)):
out[b] = normalize_single(d[b])
return out
def reduce_masked_mean(x, mask, dim=None, keepdim=False):
# x and mask are the same shape
# returns shape-1
# axis can be a list of axes
assert(x.size() == mask.size())
prod = x*mask
if dim is None:
numer = torch.sum(prod)
denom = EPS+torch.sum(mask)
else:
numer = torch.sum(prod, dim=dim, keepdim=keepdim)
denom = EPS+torch.sum(mask, dim=dim, keepdim=keepdim)
mean = numer/denom
return mean
def pack_seqdim(tensor, B):
shapelist = list(tensor.shape)
B_, S = shapelist[:2]
assert(B==B_)
otherdims = shapelist[2:]
tensor = torch.reshape(tensor, [B*S]+otherdims)
return tensor
def unpack_seqdim(tensor, B):
shapelist = list(tensor.shape)
BS = shapelist[0]
assert(BS%B==0)
otherdims = shapelist[1:]
S = int(BS/B)
tensor = torch.reshape(tensor, [B,S]+otherdims)
return tensor
def gridcloud3D(B, Z, Y, X, norm=False):
# we want to sample for each location in the grid
grid_z, grid_y, grid_x = meshgrid3D(B, Z, Y, X, norm=norm)
x = torch.reshape(grid_x, [B, -1])
y = torch.reshape(grid_y, [B, -1])
z = torch.reshape(grid_z, [B, -1])
# these are B x N
xyz = torch.stack([x, y, z], dim=2)
# this is B x N x 3
return xyz
def gridcloud2D(B, Y, X, norm=False):
# we want to sample for each location in the grid
grid_y, grid_x = meshgrid2D(B, Y, X, norm=norm)
x = torch.reshape(grid_x, [B, -1])
y = torch.reshape(grid_y, [B, -1])
# these are B x N
xy = torch.stack([x, y], dim=2)
# this is B x N x 2
return xy
def gridcloud3D_py(Z, Y, X):
# we want to sample for each location in the grid
grid_z, grid_y, grid_x = meshgrid3D_py(Z, Y, X)
x = np.reshape(grid_x, [-1])
y = np.reshape(grid_y, [-1])
z = np.reshape(grid_z, [-1])
# these are N
xyz = np.stack([x, y, z], axis=1)
# this is N x 3
return xyz
def meshgrid2D_py(Y, X):
grid_y = np.linspace(0.0, Y-1, Y)
grid_y = np.reshape(grid_y, [Y, 1])
grid_y = np.tile(grid_y, [1, X])
grid_x = np.linspace(0.0, X-1, X)
grid_x = np.reshape(grid_x, [1, X])
grid_x = np.tile(grid_x, [Y, 1])
return grid_y, grid_x
def gridcloud2D_py(Y, X):
# we want to sample for each location in the grid
grid_y, grid_x = meshgrid2D_py(Y, X)
x = np.reshape(grid_x, [-1])
y = np.reshape(grid_y, [-1])
# these are N
xy = np.stack([x, y], axis=1)
# this is N x 2
return xy
def normalize_grid3D(grid_z, grid_y, grid_x, Z, Y, X, clamp_extreme=True):
# make things in [-1,1]
grid_z = 2.0*(grid_z / float(Z-1)) - 1.0
grid_y = 2.0*(grid_y / float(Y-1)) - 1.0
grid_x = 2.0*(grid_x / float(X-1)) - 1.0
if clamp_extreme:
grid_z = torch.clamp(grid_z, min=-2.0, max=2.0)
grid_y = torch.clamp(grid_y, min=-2.0, max=2.0)
grid_x = torch.clamp(grid_x, min=-2.0, max=2.0)
return grid_z, grid_y, grid_x
def normalize_grid2D(grid_y, grid_x, Y, X, clamp_extreme=True):
# make things in [-1,1]
grid_y = 2.0*(grid_y / float(Y-1)) - 1.0
grid_x = 2.0*(grid_x / float(X-1)) - 1.0
if clamp_extreme:
grid_y = torch.clamp(grid_y, min=-2.0, max=2.0)
grid_x = torch.clamp(grid_x, min=-2.0, max=2.0)
return grid_y, grid_x
def normalize_gridcloud3D(xyz, Z, Y, X, clamp_extreme=True):
# make things in [-1,1]
x = xyz[...,0]
y = xyz[...,1]
z = xyz[...,2]
z = 2.0*(z / float(Z-1)) - 1.0
y = 2.0*(y / float(Y-1)) - 1.0
x = 2.0*(x / float(X-1)) - 1.0
xyz = torch.stack([x,y,z], dim=-1)
if clamp_extreme:
xyz = torch.clamp(xyz, min=-2.0, max=2.0)
return xyz
def normalize_gridcloud2D(xy, Y, X, clamp_extreme=True):
# make things in [-1,1]
x = xy[...,0]
y = xy[...,1]
y = 2.0*(y / float(Y-1)) - 1.0
x = 2.0*(x / float(X-1)) - 1.0
xy = torch.stack([x,y], dim=-1)
if clamp_extreme:
xy = torch.clamp(xy, min=-2.0, max=2.0)
return xy
def meshgrid3D_yxz(B, Y, X, Z):
# returns a meshgrid sized B x Y x X x Z
# this ordering makes sense since usually Y=height, X=width, Z=depth
grid_y = torch.linspace(0.0, Y-1, Y)
grid_y = torch.reshape(grid_y, [1, Y, 1, 1])
grid_y = grid_y.repeat(B, 1, X, Z)
grid_x = torch.linspace(0.0, X-1, X)
grid_x = torch.reshape(grid_x, [1, 1, X, 1])
grid_x = grid_x.repeat(B, Y, 1, Z)
grid_z = torch.linspace(0.0, Z-1, Z)
grid_z = torch.reshape(grid_z, [1, 1, 1, Z])
grid_z = grid_z.repeat(B, Y, X, 1)
return grid_y, grid_x, grid_z
def meshgrid2D(B, Y, X, stack=False, norm=False):
# returns a meshgrid sized B x Y x X
grid_y = torch.linspace(0.0, Y-1, Y, device=torch.device('cuda'))
grid_y = torch.reshape(grid_y, [1, Y, 1])
grid_y = grid_y.repeat(B, 1, X)
grid_x = torch.linspace(0.0, X-1, X, device=torch.device('cuda'))
grid_x = torch.reshape(grid_x, [1, 1, X])
grid_x = grid_x.repeat(B, Y, 1)
if norm:
grid_y, grid_x = normalize_grid2D(
grid_y, grid_x, Y, X)
if stack:
# note we stack in xy order
# (see https://pytorch.org/docs/stable/nn.functional.html#torch.nn.functional.grid_sample)
grid = torch.stack([grid_x, grid_y], dim=-1)
return grid
else:
return grid_y, grid_x
def meshgrid3D(B, Z, Y, X, stack=False, norm=False):
# returns a meshgrid sized B x Z x Y x X
grid_z = torch.linspace(0.0, Z-1, Z, device=torch.device('cuda'))
grid_z = torch.reshape(grid_z, [1, Z, 1, 1])
grid_z = grid_z.repeat(B, 1, Y, X)
grid_y = torch.linspace(0.0, Y-1, Y, device=torch.device('cuda'))
grid_y = torch.reshape(grid_y, [1, 1, Y, 1])
grid_y = grid_y.repeat(B, Z, 1, X)
grid_x = torch.linspace(0.0, X-1, X, device=torch.device('cuda'))
grid_x = torch.reshape(grid_x, [1, 1, 1, X])
grid_x = grid_x.repeat(B, Z, Y, 1)
if norm:
grid_z, grid_y, grid_x = normalize_grid3D(
grid_z, grid_y, grid_x, Z, Y, X)
if stack:
# note we stack in xyz order
# (see https://pytorch.org/docs/stable/nn.functional.html#torch.nn.functional.grid_sample)
grid = torch.stack([grid_x, grid_y, grid_z], dim=-1)
return grid
else:
return grid_z, grid_y, grid_x
def meshgrid3D_py(Z, Y, X, stack=False, norm=False):
grid_z = np.linspace(0.0, Z-1, Z)
grid_z = np.reshape(grid_z, [Z, 1, 1])
grid_z = np.tile(grid_z, [1, Y, X])
grid_y = np.linspace(0.0, Y-1, Y)
grid_y = np.reshape(grid_y, [1, Y, 1])
grid_y = np.tile(grid_y, [Z, 1, X])
grid_x = np.linspace(0.0, X-1, X)
grid_x = np.reshape(grid_x, [1, 1, X])
grid_x = np.tile(grid_x, [Z, Y, 1])
if norm:
grid_z, grid_y, grid_x = normalize_grid3D(
grid_z, grid_y, grid_x, Z, Y, X)
if stack:
# note we stack in xyz order
# (see https://pytorch.org/docs/stable/nn.functional.html#torch.nn.functional.grid_sample)
grid = np.stack([grid_x, grid_y, grid_z], dim=-1)
return grid
else:
return grid_z, grid_y, grid_x
def sub2ind(height, width, y, x):
return y*width + x
def sql2_on_axis(x, axis, keepdim=True):
return torch.sum(x**2, axis, keepdim=keepdim)
def l2_on_axis(x, axis, keepdim=True):
return torch.sqrt(EPS + sql2_on_axis(x, axis, keepdim=keepdim))
def l1_on_axis(x, axis, keepdim=True):
return torch.sum(torch.abs(x), axis, keepdim=keepdim)
def sub2ind3D(depth, height, width, d, h, w):
# when gathering/scattering with these inds, the tensor should be Z x Y x X
return d*height*width + h*width + w
def gradient3D(x, absolute=False, square=False):
# x should be B x C x D x H x W
dz = x[:, :, 1:, :, :] - x[:, :, :-1, :, :]
dy = x[:, :, :, 1:, :] - x[:, :, :, :-1, :]
dx = x[:, :, :, :, 1:] - x[:, :, :, :, :-1]
# zeros = tf.zeros_like(x)
zeros = torch.zeros_like(x)
zero_z = zeros[:, :, 0:1, :, :]
zero_y = zeros[:, :, :, 0:1, :]
zero_x = zeros[:, :, :, :, 0:1]
dz = torch.cat([dz, zero_z], axis=2)
dy = torch.cat([dy, zero_y], axis=3)
dx = torch.cat([dx, zero_x], axis=4)
if absolute:
dz = torch.abs(dz)
dy = torch.abs(dy)
dx = torch.abs(dx)
if square:
dz = dz ** 2
dy = dy ** 2
dx = dx ** 2
return dz, dy, dx
def gradient2D(x, absolute=False, square=False):
# x should be B x C x H x W
dh = x[:, :, 1:, :] - x[:, :, :-1, :]
dw = x[:, :, :, 1:] - x[:, :, :, :-1]
# zeros = tf.zeros_like(x)
zeros = torch.zeros_like(x)
zero_h = zeros[:, :, 0:1, :]
zero_w = zeros[:, :, :, 0:1]
dh = torch.cat([dh, zero_h], axis=2)
dw = torch.cat([dw, zero_w], axis=3)
if absolute:
dh = torch.abs(dh)
dw = torch.abs(dw)
if square:
dh = dh ** 2
dw = dw ** 2
return dh, dw
def matmul2(mat1, mat2):
return torch.matmul(mat1, mat2)
def matmul3(mat1, mat2, mat3):
return torch.matmul(mat1, torch.matmul(mat2, mat3))
def matmul4(mat1, mat2, mat3, mat4):
return torch.matmul(torch.matmul(mat1, torch.matmul(mat2, mat3)), mat4)
def downsample(img, factor):
down = torch.nn.AvgPool2d(factor)
img = down(img)
return img
def downsample3D(vox, factor):
down = torch.nn.AvgPool3d(factor)
vox = down(vox)
return vox
def downsample3Dflow(flow, factor):
down = torch.nn.AvgPool3d(factor)
flow = down(flow) * 1./factor
return flow
def l2_normalize(x, dim=1):
# dim1 is the channel dim
return F.normalize(x, p=2, dim=dim)
def hard_argmax3D(tensor):
B, Z, Y, X = list(tensor.shape)
flat_tensor = tensor.reshape(B, -1)
argmax = torch.argmax(flat_tensor, dim=1)
# convert the indices into 3D coordinates
argmax_z = argmax // (Y*X)
argmax_y = (argmax % (Y*X)) // X
argmax_x = (argmax % (Y*X)) % X
argmax_z = argmax_z.reshape(B)
argmax_y = argmax_y.reshape(B)
argmax_x = argmax_x.reshape(B)
return argmax_z, argmax_y, argmax_x
def argmax3D(heat, hard=False):
B, Z, Y, X = list(heat.shape)
if hard:
# hard argmax
loc_z, loc_y, loc_x = hard_argmax3D(heat)
loc_z = loc_z.float()
loc_y = loc_y.float()
loc_x = loc_x.float()
else:
heat = heat.reshape(B, Z*Y*X)
prob = torch.nn.functional.softmax(heat, dim=1)
grid_z, grid_y, grid_x = meshgrid3D(B, Z, Y, X)
grid_z = grid_z.reshape(B, -1)
grid_y = grid_y.reshape(B, -1)
grid_x = grid_x.reshape(B, -1)
loc_z = torch.sum(grid_z*prob, dim=1)
loc_y = torch.sum(grid_y*prob, dim=1)
loc_x = torch.sum(grid_x*prob, dim=1)
# these are B
return loc_z, loc_y, loc_x
| ayushjain1144/SeeingByMoving | frustum_pointnet/kitti/utils_basic.py | utils_basic.py | py | 12,301 | python | en | code | 22 | github-code | 36 | [
{
"api_name": "numpy.min",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "torch.unbind",
"line_number": 28,
... |
16731252114 | '''
Table of Contents
Functions and Interdependencies:
proj
orthogonalize
- proj
OLS
EV
pairwise_similarity
best_permutation
- pairwise_similarity
self_similarity_pairwise
- best_permutation
'''
import numpy as np
import scipy.optimize
# import sklearn.decomposition
from numba import njit, prange, jit
import torch
from tqdm import tqdm
from . import indexing
import copy
import time
from functools import partial
def proj(v1, v2):
'''
Projects one or more vectors (columns of v1) onto one or more vectors (v2)
RH 2021
Args:
v1 (ndarray):
vector set 1. Either a single vector or a 2-D array where the columns
are the vectors
v2 (ndarray):
vector set 2. Either a single vector or a 2-D array where the columns
are the vectors
If None, v2 is set to v1
Returns:
proj_vec (ndarray):
vector set 1 projected onto vector set 2.
shape: (v1.shape[0], v1.shape[1], v2.shape[1])
proj_score (ndarray or scalar):
projection scores. shape: (v1.shape[1], v2.shape[1])
'''
if isinstance(v1, np.ndarray):
from opt_einsum import contract
einsum = contract
norm = partial(np.linalg.norm, axis=0, keepdims=True)
elif isinstance(v1, torch.Tensor):
from torch import einsum
norm = partial(torch.linalg.norm, dim=0, keepdim=True)
if v2 is None:
v2 = v1
if v1.ndim < 2:
v1 = v1[:,None]
if v2.ndim < 2:
v2 = v2[:,None]
assert v1.shape[0] == v2.shape[0], f"v1.shape[0] must equal v2.shape[0]. Got v1.shape[0]={v1.shape[0]} and v2.shape[0]={v2.shape[0]}"
u = v2 / norm(v2)
proj_score = v1.T @ u
proj_vec = einsum('ik,jk->ijk', u, proj_score)
return proj_vec, proj_score
def vector_angle(v1, v2=None, mode='cosine_similarity'):
'''
Calculates the angle between two vector sets.
RH 2023
Args:
v1 (ndarray):
vector set 1. Either a single vector or a 2-D array where the columns
are the vectors
v2 (ndarray):
vector set 2. Either a single vector or a 2-D array where the columns
are the vectors
If None, v2 is set to v1
mode (str):
'deg': returns angle in degrees
'rad': returns angle in radians
'cosine_similarity': returns cosine similarity
Returns:
angle (ndarray or scalar):
angle between v1 and v2. shape: (v1.shape[1], v2.shape[1])
'''
if isinstance(v1, np.ndarray):
norm = partial(np.linalg.norm, axis=0, keepdims=True)
arccos = np.arccos
rad2deg = np.rad2deg
elif isinstance(v1, torch.Tensor):
norm = partial(torch.linalg.norm, dim=0, keepdim=True)
arccos = torch.acos
rad2deg = torch.rad2deg
if v2 is None:
v2 = v1
if v1.ndim < 2:
v1 = v1[:,None]
if v2.ndim < 2:
v2 = v2[:,None]
assert v1.shape[0] == v2.shape[0], f"v1.shape[0] must equal v2.shape[0]. Got v1.shape[0]={v1.shape[0]} and v2.shape[0]={v2.shape[0]}"
u1 = v1 / norm(v1)
u2 = v2 / norm(v2)
if mode == 'cosine_similarity':
return u1.T @ u2
elif mode == 'deg':
return rad2deg(arccos(u1.T @ u2))
elif mode == 'rad':
return arccos(u1.T @ u2)
else:
raise ValueError(f"mode must be 'cosine_similarity', 'deg', or 'rad'. Got {mode}")
def orthogonalize(v1, v2, method='OLS', device='cpu', thresh_EVR_PCA=1e-15):
'''
Orthogonalizes one or more vectors (columns of v1) relative to another set
of vectors (v2). Subtracts the projection of v1 onto v2 off of v1.
RH 2021-2023
Args:
v1 (ndarray):
vector set 1. (y_true)
Either a single vector or a 2-D array where the columns
are the vectors. shape: (n_samples, n_vectors)
v2 (ndarray):
vector set 2. (y_pred)
Either a single vector or a 2-D array where the columns
are the vectors. shape: (n_samples, n_vectors)
method (str):
'serial': uses a gram-schmidt like iterative process to orthogonalize
v1 off of v2. This method may have some minor numerical instability
issues when v2 contains many (>100) vectors.
'OLS': uses OLS regression to orthogonalize v1 off of v2. This method
is numerically stable when v2 has many columns, and usually faster.
However, OLS can have issues when v2 is not full rank or singular.
device (str):
Device to use for torch tensors.
Default is 'cpu'.
thresh_EVR_PCA (scalar):
Threshold for the Explained Variance Ratio (EVR) of the PCA components.
Set so that it might act as a cutoff for redundant components.
If the EVR of a component is below this threshold, it is not used to
orthogonalize v1. This is useful when v2 is singular or nearly singular.
Returns:
v1_orth (ndarray):
vector set 1 with the projections onto vector set 2 subtracted off.
Same size as v1.
EVR (ndarray):
Explained Variance Ratio for each column of v1.
Amount of variance that all the vectors in v2 can explain for each
vector in v1.
When v1 is z-scored, EVR is equivalent to pearsons R^2;
as in pairwise_similarity(OLS(v2, v1)[1] , v1)**2
EVR_total (scalar):
total amount of variance explained in v1 by v2
pca_dict (dict):
dictionary containing the PCA outputs:
'comps': PCA components
'scores': PCA scores
'singVals': PCA singular values
'EVR': PCA Explained Variance Ratio
'PCs_above_thresh': boolean array indicating which PCs are above
the threshold for EVR
'''
if isinstance(v1, np.ndarray):
v1 = torch.as_tensor(v1)
return_numpy = True
else:
return_numpy = False
if isinstance(v2, np.ndarray):
v2 = torch.as_tensor(v2)
v1 = v1.to(device)
v2 = v2.to(device)
if v1.ndim < 2:
v1 = v1[:,None]
if v2.ndim < 2:
v2 = v2[:,None]
# I'm pretty sure using PCA is fine for this.
if v2.shape[1] > 1:
from .decomposition import torch_pca
comps, pc_scores, singVals, pc_EVR = torch_pca(
X_in=v2,
rank=v2.shape[1],
mean_sub=True,
device=device,
return_cpu=False,
return_numpy=isinstance(v1, np.ndarray),
cuda_empty_cache=False
)
pca_dict = {
'comps': comps,
'scores': pc_scores,
'singVals': singVals,
'EVR': pc_EVR,
'PCs_above_thresh': pc_EVR > thresh_EVR_PCA,
}
pc_scores_aboveThreshold = pcsat = pc_scores[:, pca_dict['PCs_above_thresh']]
else:
pcsat = v2
pca_dict = None
# decomp = sklearn.decomposition.PCA(n_components=v2.shape[1])
# pc_scores = decomp.fit_transform(v2)
v1_means = torch.mean(v1, dim=0)
if method == 'serial':
# Serial orthogonalization.
v1_orth = copy.deepcopy(v1 - v1_means)
for ii in range(pcsat.shape[1]):
proj_vec = proj(v1_orth , pcsat[:,ii])[0]
v1_orth = v1_orth.squeeze() - proj_vec.squeeze()
elif method == 'OLS':
# Ordinary Least Squares.
X = torch.cat((pcsat, torch.ones((pcsat.shape[0], 1), dtype=pcsat.dtype, device=device)), dim=1)
theta = torch.linalg.inv(X.T @ X) @ X.T @ (v1 - v1_means)
y_rec = X @ theta
v1_orth = v1 - y_rec + X[:,-1][:,None] * theta[-1] ## this adds back the bias term
v1_orth = v1_orth + v1_means
EVR = 1 - (torch.var(v1_orth, dim=0) / torch.var(v1, dim=0))
EVR_total = 1 - ( torch.sum(torch.var(v1_orth, dim=0), dim=0) / torch.sum(torch.var(v1, dim=0), dim=0) )
if return_numpy:
v1_orth = v1_orth.cpu().numpy()
EVR = EVR.cpu().numpy()
EVR_total = EVR_total.cpu().numpy()
if pca_dict is not None:
for key in pca_dict.keys():
if isinstance(pca_dict[key], torch.Tensor):
pca_dict[key] = pca_dict[key].cpu().numpy()
return v1_orth, EVR, EVR_total, pca_dict
@njit
def pair_orth_helper(v1, v2):
"""
Helper function for main pairwise_orthogonalization
function. Performs the pairwise orthogonalization
by subtracting off v2 from v1. Uses numba to speed
up the computation.
v1 = v1 - proj(v1 onto v2)
RH 2021
Args:
v1 (ndarray):
Vector set 1. Columns are vectors.
v2 (ndarray):
Vector set 2. Columns are vectors.
Returns:
v1_orth (ndarray):
v1 - proj(v1 onto v2)
"""
return v1 - (np.diag(np.dot(v1.T, v2)) / np.diag(np.dot(v2.T, v2))) * v2
def pairwise_orthogonalization(v1, v2, center:bool=True):
"""
Orthogonalizes columns of v2 off of the columns of v1
and returns the orthogonalized v1 and the explained
variance ratio of v2 off of v1.
v1: y_true, v2: y_pred
Since it's just pairwise, there should not be any
numerical instability issues.
RH 2021
Args:
v1 (ndarray):
y_true
Vector set 1. Either a single vector or a 2-D
array where the columns are the vectors.
v2 (ndarray):
y_pred
Vector set 2. Either a single vector or a 2-D
array where the columns are the vectors.
center (bool):
Whether to center the vectors.
Centering prevents negative EVR values.
Returns:
v1_orth (ndarray):
Vector set 1 with the projections onto vector
set 2 subtracted off.
Same size as v1.
EVR (ndarray):
Explained Variance Ratio for each column of v1.
Amount of variance that all the vectors in v2
can explain for each vector in v1.
EVR_total_weighted (scalar):
Average amount of variance explained in v1 by v2
weighted by the variance of each column of v1.
EVR_total_unweighted (scalar):
Average amount of variance explained in v1 by v2
"""
assert v1.ndim == v2.ndim
if v1.ndim==1:
v1 = v1[:,None]
v2 = v2[:,None]
assert v1.shape[1] == v2.shape[1]
assert v1.shape[0] == v2.shape[0]
if center:
v1 = v1 - np.mean(v1, axis=0)
v2 = v2 - np.mean(v2, axis=0)
v1_orth = pair_orth_helper(v1, v2)
v1_var = np.var(v1, axis=0)
EVR = 1 - (np.var(v1_orth, axis=0) / v1_var)
EVR_total_weighted = np.sum(v1_var * EVR) / np.sum(v1_var)
EVR_total_unweighted = np.mean(EVR)
return v1_orth, EVR, EVR_total_weighted, EVR_total_unweighted
@torch.jit.script
def pairwise_orthogonalization_torch_helper(v1, v2, center:bool=True):
# assert v1.ndim == v2.ndim
if v1.ndim==1:
v1 = v1[:,None]
if v2.ndim==1:
v2 = v2[:,None]
# assert v1.shape[1] == v2.shape[1]
assert v1.shape[0] == v2.shape[0]
if center:
v1 = v1 - torch.nanmean(v1, dim=0)
v2 = v2 - torch.nanmean(v2, dim=0)
# v1_orth = v1 - (torch.diag(torch.matmul(v1.T, v2)) / torch.diag(torch.matmul(v2.T, v2)))*v2
v1_orth = v1 - (torch.nansum(v1 * v2, dim=0) / torch.nansum(v2 * v2, dim=0) )*v2
v1_var = torch.var(v1, dim=0)
EVR = 1 - (torch.var(v1_orth, dim=0) / v1_var)
EVR_total_weighted = torch.nansum(v1_var * EVR) / torch.sum(v1_var)
EVR_total_unweighted = torch.nanmean(EVR)
return v1_orth, EVR, EVR_total_weighted, EVR_total_unweighted
def pairwise_orthogonalization_torch(v1, v2, center:bool=True, device='cpu'):
"""
Orthogonalizes columns of v2 off of the columns of v1
and returns the orthogonalized v1 and the explained
variance ratio of v2 off of v1.
Use center=True to get guarantee zero correlation between
columns of v1_orth and v2.
v1: y_true, v2: y_pred
Since it's just pairwise, there should not be any
numerical instability issues.
Same as pairwise_orthogonalization, but with
torch.jit.script instead of njit.
RH 2021
Args:
v1 (ndarray):
y_true
Vector set 1. Either a single vector or a 2-D
array where the columns are the vectors.
v2 (ndarray):
y_pred
Vector set 2. Either a single vector or a 2-D
array where the columns are the vectors.
center (bool):
Whether to center the vectors.
Centering prevents negative EVR values.
Returns:
v1_orth (ndarray):
Vector set 1 with the projections onto vector
set 2 subtracted off.
Same size as v1.
EVR (ndarray):
Explained Variance Ratio for each column of v1.
Amount of variance that all the vectors in v2
can explain for each vector in v1.
EVR_total_weighted (scalar):
Average amount of variance explained in v1 by v2
weighted by the variance of each column of v1.
EVR_total_unweighted (scalar):
Average amount of variance explained in v1 by v2
"""
if isinstance(v1, np.ndarray):
v1 = torch.from_numpy(v1)
return_numpy = True
else:
return_numpy = False
if isinstance(v2, np.ndarray):
v2 = torch.from_numpy(v2)
v1 = v1.to(device)
v2 = v2.to(device)
v1_orth, EVR, EVR_total_weighted, EVR_total_unweighted = pairwise_orthogonalization_torch_helper(v1, v2, center=center)
if return_numpy:
v1_orth = v1_orth.cpu().numpy()
EVR = EVR.cpu().numpy()
EVR_total_weighted = EVR_total_weighted.cpu().numpy()
EVR_total_unweighted = EVR_total_unweighted.cpu().numpy()
return v1_orth, EVR, EVR_total_weighted, EVR_total_unweighted
def EV(y_true, y_pred):
'''
Explained Variance
Calculating as 1 - (SS_residual_variance / SS_original_variance)
Should be exactly equivalent to sklearn's
sklearn.metrics.explained_variance_score but slightly faster and provides
most relevant outputs all together
RH 2021
Args:
y_true (ndarray):
array with same size as y_pred. The columns are
individual y vectors
y_pred (ndarray):
array with same size as y_true. The columns are the predicted
y vectors
Returns:
EV (1-D array):
explained variance of each y_true column by each corresponding
y_pred column. Same as
sklearn.metrics.explained_variance_score(y_true, y_pred, multioutput='raw_values')
EV_total_weighted (scalar):
total weighted explained variance. Same as
sklearn.metrics.explained_variance_score(y_true, y_pred, multioutput='variance_weighted')
EV_total_unweighted (scalar):
average of all EV values. Same as
sklearn.metrics.explained_variance_score(y_true, y_pred, multioutput='uniform_average')
'''
EV = 1 - np.sum((y_true - y_pred)**2, axis=0) / np.sum((y_true - np.mean(y_true, axis=0))**2, axis=0)
y_true_var = np.var(y_true, axis=0)
EV_total_weighted = np.sum( y_true_var* EV ) / np.sum(y_true_var)
EV_total_unweighted = np.mean(EV)
return EV , EV_total_weighted , EV_total_unweighted
def pairwise_similarity(v1 , v2=None , method='pearson' , ddof=1):
'''
Computes similarity matrices between two sets of vectors (columns within
2-D arrays) using either covariance, Pearson correlation or cosine_similarity.
Think of this function as a more general version of np.cov or np.corrcoef
RH 2021
Args:
v1 (ndarray):
2-D array of column vectors.
v2 (ndarray):
2-D array of column vectors to compare to vector_set1. If None, then
the function is a type of autosimilarity matrix
method (str):
'cov' - covariance
'pearson' or 'R' - Pearson correlation
'cosine_similarity' - cosine similarity
ddof (scalar/int):
Used if method=='cov'. Define the degrees of freedom. Set to 1 for
unbiased calculation, 0 for biased calculation.
Returns:
ouput (ndarray):
similarity matrix dependent on method
'''
methods = ['cov', 'pearson', 'R', 'cosine_similarity']
assert np.isin(method, methods), f'RH Error: method must be one of: {methods}'
if v2 is None:
v2 = v1
if v1.ndim == 1:
v1 = v1[:,None]
if v2.ndim == 1:
v2 = v2[:,None]
if method=='cov':
v1_ms = v1 - np.mean(v1, axis=0) # 'mean subtracted'
v2_ms = v2 - np.mean(v2, axis=0)
output = (v1_ms.T @ v2_ms) / (v1.shape[0] - ddof)
if method in ['pearson', 'R']:
# Below method should be as fast as numpy.corrcoef .
# Output should be same within precision, but
# numpy.corrcoef makes a doublewide matrix and can be annoying
# Note: Pearson's R-value can be different than sqrt(EV)
# calculated below if the residuals are not orthogonal to the
# prediction. Best to use EV for R^2 if unsure
v1_ms = v1 - np.mean(v1, axis=0) # 'mean subtracted'
v2_ms = v2 - np.mean(v2, axis=0)
output = (v1_ms.T @ v2_ms) / np.sqrt(np.sum(v1_ms**2, axis=0, keepdims=True).T * np.sum(v2_ms**2, axis=0, keepdims=True))
if method=='cosine_similarity':
output = (v1 / (np.linalg.norm(v1 , axis=0, keepdims=True))).T @ (v2 / np.linalg.norm(v2 , axis=0, keepdims=True))
return output
def batched_covariance(X, batch_size=1000, device='cpu'):
"""
Batched covariance matrix calculation.
Allows for large datasets to be processed in batches on GPU.
RH 2022
Args:
X (np.ndarray or torch.Tensor):
2D array of shape (n_samples, n_features)
batch_size (int):
Number of samples to process at a time.
device (str):
Device to use for computation.
Returns:
cov (np.ndarray):
"""
X_dl1 = list(indexing.make_batches(np.arange(X.shape[1]), batch_size=batch_size, return_idx=True))
X_dl2 = list(indexing.make_batches(np.arange(X.shape[1]), batch_size=batch_size, return_idx=True))
if torch.is_tensor(X):
X_cov = torch.zeros(X.shape[1], X.shape[1], device=device)
else:
X_cov = np.zeros((X.shape[1], X.shape[1]))
n_batches = X.shape[1] // batch_size
for ii, (X_batch_i, idx_batch_i) in enumerate(tqdm(X_dl1, total=n_batches, leave=False, desc='outer loop')):
for jj, (X_batch_j, idx_batch_j) in enumerate(tqdm(X_dl2, total=n_batches, leave=False, desc='inner loop')):
x_t = X[:,idx_batch_i[0]:idx_batch_i[-1]].T
x = X[:,idx_batch_j[0]:idx_batch_j[-1]]
if device != 'cpu':
x_t = x_t.to(device)
x = x.to(device)
X_cov[idx_batch_i[0]:idx_batch_i[-1], idx_batch_j[0]:idx_batch_j[-1]] = x_t @ x
return X_cov
def batched_matrix_multiply(X1, X2, batch_size1=1000, batch_size2=1000, device='cpu'):
"""
Batched matrix multiplication of two matrices.
Allows for multiplying huge matrices together on a GPU.
RH 2022
Args:
X1 (np.ndarray or torch.Tensor):
first matrix. shape (n_samples1, n_features1).
X2 (np.ndarray or torch.Tensor):
second matrix. shape (n_samples2, n_features2).
batch_size1 (int):
batch size for first matrix.
batch_size2 (int):
batch size for second matrix
device (str):
device to use for computation and output
Returns:
X1_X2 (np.ndarray or torch.Tensor):
"""
X1_dl = indexing.make_batches(np.arange(X1.shape[1]), batch_size=batch_size1, return_idx=True)
X2_dl = indexing.make_batches(np.arange(X2.shape[1]), batch_size=batch_size2, return_idx=True)
if torch.is_tensor(X1):
Y = torch.zeros(X1.shape[1], X2.shape[1], device=device)
else:
Y = np.zeros((X1.shape[1], X2.shape[1]))
n_batches1 = X1.shape[1] // batch_size1
n_batches2 = X2.shape[1] // batch_size2
for ii, (X_batch_i, idx_batch_i) in enumerate(tqdm(X1_dl, total=n_batches1, leave=False, desc='outer loop')):
for jj, (X_batch_j, idx_batch_j) in enumerate(X2_dl):
x1_t = X1[:,idx_batch_i[0]:idx_batch_i[-1]].T
x2 = X2[:,idx_batch_j[0]:idx_batch_j[-1]]
if device != 'cpu':
x1_t = x1_t.to(device)
x2 = x2.to(device)
Y[idx_batch_i[0]:idx_batch_i[-1], idx_batch_j[0]:idx_batch_j[-1]] = x1_t @ x2
return Y
# X1_dl = indexing.make_batches(X1.T, batch_size=batch_size1, return_idx=True)
# X2_dl = indexing.make_batches(X2.T, batch_size=batch_size2, return_idx=True)
# if torch.is_tensor(X1):
# Y = torch.zeros(X1.shape[1], X2.shape[1], device=device)
# else:
# Y = np.zeros((X1.shape[1], X2.shape[1]))
# n_batches1 = X1.shape[1] // batch_size1
# n_batches2 = X2.shape[1] // batch_size2
# for ii, (X_batch_i, idx_batch_i) in enumerate(tqdm(X1_dl, total=n_batches1, leave=False, desc='outer loop')):
# for jj, (X_batch_j, idx_batch_j) in enumerate(X2_dl):
# Y[idx_batch_i[0]:idx_batch_i[-1], idx_batch_j[0]:idx_batch_j[-1]] = X_batch_i @ X_batch_j.T
# return Y
def similarity_to_distance(x, fn_toUse=1, a=1, b=0, eps=0):
"""
Convert similarity metric to distance metric.
RH 2022
Args:
x (value or array):
similarity metric.
fn_toUse (int from 1 to 7):
Sets the function to use.
a (float):
Scaling parameter.
b (float):
Shifting parameter.
eps (float):
Small value to add to output.
"""
if fn_toUse == 1:
d = ((b+1) / (x**a)) -1 # fn 1: 1/x
if fn_toUse == 2:
d = np.exp(((-x+b)**a)) # fn 2: exp(-x)
if fn_toUse == 3:
d = np.arctan(a*(-x+b)) # fn 3: arctan(-x)
if fn_toUse == 4:
d = b - x**a # fn 4: 1-x
if fn_toUse == 5:
d = np.sqrt(1-(x+b))**a # fn 5: sqrt(1-x)
if fn_toUse == 6:
d = -np.log((x*a)+b) # fn 6: -log(x)
return d + eps
def cp_reconstruction_EV(tensor_dense, tensor_CP):
"""
Explained variance of a reconstructed tensor using
by a CP tensor (similar to kruskal tensor).
RH 2023
Args:
tensor_dense (np.ndarray or torch.Tensor):
Dense tensor to be reconstructed. shape (n_samples, n_features)
tensor_CP (tensorly CPTensor or list of np.ndarray/torch.Tensor):
CP tensor.
If a list of factors, then each factor should be a 2D array of shape
(n_samples, rank).
Can also be a tensorly CPTensor object.
"""
tensor_rec = None
try:
import tensorly as tl
if isinstance(tensor_CP, tl.cp_tensor.CPTensor):
tensor_rec = tl.cp_to_tensor(tensor_CP)
except ImportError as e:
raise ImportError('tensorly not installed. Please install tensorly or provide a list of factors as the tensor_CP argument.')
if tensor_rec is None:
assert isinstance(tensor_CP, list), 'tensor_CP must be a list of factors'
assert all([isinstance(f, (np.ndarray, torch.Tensor)) for f in tensor_CP]), 'tensor_CP must be a list of factors'
tensor_rec = indexing.cp_to_dense(tensor_CP)
if isinstance(tensor_dense, torch.Tensor):
var = torch.var
elif isinstance(tensor_dense, np.ndarray):
var = np.var
ev = 1 - (var(tensor_dense - tensor_rec) / var(tensor_dense))
return ev
##########################################
########### Linear Assignment ############
##########################################
def best_permutation(mat1 , mat2 , method='pearson'):
'''
This function compares the representations of two sets of vectors (columns
of mat1 and columns of mat2).
We assume that the vectors in mat1 and mat2 are similar up to a permutation.
We therefore find the 'best' permutation that maximizes the similarity
between the sets of vectors
RH 2021
Args:
mat1 (np.ndarray):
a 2D array where the columns are vectors we wish to match with mat2
mat2 (np.ndarray):
a 2D array where the columns are vectors we wish to match with mat1
method (string) :
defines method of calculating pairwise similarity between vectors:
'pearson' or 'cosine_similarity'
Returns:
sim_avg (double):
the average similarity between matched vectors. Units depend on
method
sim_matched (double):
the similarity between each pair of matched vectors.
ind1 (int):
indices of vectors in mat1 matched to ind2 in mat2 (usually just
sequential for ind1)
ind2 (int):
indices of vectors in mat2 matched to ind1 in mat1
'''
corr = mat1.T @ mat2
ind1 , ind2 = scipy.optimize.linear_sum_assignment(corr, maximize=True)
sim_matched = np.zeros(len(ind1))
for ii in range(len(ind1)):
if method=='pearson':
sim_matched[ii] = np.corrcoef(mat1[:,ind1[ii]] , mat2[:,ind2[ii]])[0][1]
if method=='cosine_similarity':
sim_matched[ii] = pairwise_similarity( mat1[:,ind1[ii]] , mat2[:,ind2[ii]] , 'cosine_similarity')
sim_avg = np.mean(sim_matched)
return sim_avg , sim_matched , ind1.astype('int64') , ind2.astype('int64')
def self_similarity_pairwise(mat_set , method='pearson'):
'''
This function compares sets of 2-D matrices within a 3-D array using the
'best_permutation' function.
We assume that the vectors within the matrices are similar up to a
permutation.
We therefore find the 'best' permutation that maximizes the similarity
between the sets of vectors within each matrix.
RH 2021
Args:
mat_set (np.ndarray):
a 3D array where the columns within the first two dims are vectors
we wish to match with the columns from matrices from other slices
in the third dimension
method (string):
defines method of calculating pairwise similarity between vectors:
'pearson' or 'cosine_similarity'
Returns:
same as 'best_permutation', but over each combo
combos: combinations of pairwise comparisons
'''
import itertools
n_repeats = mat_set.shape[2]
n_components = mat_set.shape[1]
combos = np.array(list(itertools.combinations(np.arange(n_repeats),2)))
n_combos = len(combos)
corr_avg = np.zeros((n_combos))
corr_matched = np.zeros((n_components , n_combos))
ind1 = np.zeros((n_components , n_combos), dtype='int64')
ind2 = np.zeros((n_components , n_combos), dtype='int64')
for i_combo , combo in enumerate(combos):
corr_avg[i_combo] , corr_matched[:,i_combo] , ind1[:,i_combo] , ind2[:,i_combo] = best_permutation(mat_set[:,:,combo[0]] , mat_set[:,:,combo[1]] , method)
# print(corr_avg)
return corr_avg, corr_matched, ind1, ind2, combos
def enumerate_paths(edges):
"""
From Caleb Weinreb 2022
Create a list of all paths in a directed graph
Parameters
----------
edges: list of tuples
Edges in the graph as tuples (i,j) for each edge i->j. The
edges are assumed to be in topopological order, meaning
edge (i,j) is listed prior to edge (k,l) whenever j==k.
Returns
-------
paths: list of tuples
All directed paths as tuples of node indexes
"""
child_map = {parent:[] for parent,child in edges}
for parent,child in edges: child_map[parent].append(child)
leaf_nodes = [child for _,child in edges if not child in child_map]
sub_paths = {leaf:[[leaf]] for leaf in leaf_nodes}
for parent,_ in edges[::-1]:
if not parent in sub_paths:
sub_paths[parent] = [[parent]]
for child in child_map[parent]:
for path in sub_paths[child]:
sub_paths[parent].append([parent]+path)
return [tuple(p) for p in sum(sub_paths.values(),[])]
def maximum_directed_matching(weights, partition):
"""
From Caleb Weinreb 2022
Find the "maximum directed matching" in a weighted n-partite graph.
Let $G$ be a directed graph with nodes $\{1,...,N\}$ and weighted
edges $E_{ij}$. Assume that the nodes of $G$ are partitioned into
groups, where $P_i$ denotes the group-label for node $i$, and that
all the edges $E_{ij}$ satisfy $P_i < P_j$. In other words, all
edges go from lower to higher partition index.
We define a collection of node-sets as a "directed matching" if
each set forms a connected component in $G$ and no node belongs to
more than one set. The maximum directed matching is defined as the
directed matching that maximizes the sum edge weights across all
edges that originate and terminate in the same node-set.
Parameters
----------
weights : sparse matrix, shape=(N,N)
The set of edge weights in the graph where N is the total
number of nodes. ``weights[i,j]`` is the weight of the edge
(i->j). A weight of 0 implies no edge.
partition : ndarray, shape=(N,)
The partition label for each node in the graph. We require
``partition[i] < partition[j]`` whenever ``weights[i,j] != 0``
Returns
-------
matching: list of tuples
Maximum directed matching as a list of node-index tuples.
"""
import pulp
# check that the edges are compatible with the partition
sources,targets = weights.nonzero()
assert np.all(partition[sources] < partition[targets]), 'Invalid weights/partition combination'
# enumerate all possible node groupings and their score
edge_order = np.argsort(partition[sources])
sorted_edges = list(zip(sources[edge_order], targets[edge_order]))
paths = enumerate_paths(sorted_edges)
path_weights = [sum([weights[i,j] for i in p for j in p]) for p in paths]
# configure an ILP solver with pulp
problem = pulp.LpProblem('matching', pulp.LpMaximize)
# initialize variables
path_vars = pulp.LpVariable.dicts('paths', paths, cat=pulp.LpBinary)
problem += pulp.lpSum([w*path_vars[p] for p,w in zip(paths, path_weights)])
# set constraints
path_membership = {n:[] for n in range(weights.shape[0])}
for path in paths:
for n in path: path_membership[n].append(path)
for n in path_membership:
problem += pulp.lpSum(path_vars[path] for path in path_membership[n]) <= 1
# solve and extract results
problem.solve()
matching = [p for p,v in path_vars.items() if v.value()==1]
return matching
| RichieHakim/basic_neural_processing_modules | bnpm/similarity.py | similarity.py | py | 31,408 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "numpy.ndarray",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "opt_einsum.contract",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "functools.partial",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "numpy.linalg... |
6674479915 | import logging
from typing import Type, Tuple, Dict, List
import itertools
from collections import OrderedDict
import torch
import dace
from dace import data as dt
from daceml.autodiff.backward_pass_generator import BackwardPassGenerator
from daceml.autodiff.base_abc import AutoDiffException, BackwardResult
from daceml.onnx.converters import clean_onnx_name
from daceml.onnx.onnx_importer import create_output_array, ONNXModel
from daceml import transformation
log = logging.getLogger(__name__)
def make_backward_function(
model: ONNXModel,
required_grads: List[str],
) -> Tuple[dace.SDFG, dace.SDFG, BackwardResult, Dict[str, dt.Data]]:
""" Convert an ONNXModel to a PyTorch differentiable function. This method should not be used on its own.
Instead use the ``backward=True`` parameter of :class:`daceml.torch.DaceModule`.
:param model: the model to convert.
:param required_grads: the list of inputs names of the module that we must compute gradients for.
:return: A 4-tuple of forward SDFG, backward SDFG, backward result, and input arrays for
backward pass (as mapping of names to DaCe data descriptors).
"""
if len(model.sdfg.nodes()) != 1:
raise AutoDiffException(
"Expected to find exactly one SDFGState, found {}".format(
len(model.sdfg.nodes())))
forward_sdfg = model.sdfg
forward_state = model.sdfg.nodes()[0]
backward_sdfg = dace.SDFG(forward_sdfg.name + "_backward")
backward_state = backward_sdfg.add_state()
gen = BackwardPassGenerator(
sdfg=forward_sdfg,
state=forward_state,
given_gradients=[clean_onnx_name(name) for name in model.outputs],
required_gradients=required_grads,
backward_sdfg=backward_sdfg,
backward_state=backward_state,
zero_non_transients=False)
backward_result, backward_grad_arrays, backward_input_arrays = gen.backward(
)
replaced_scalars = {}
for name, desc in backward_input_arrays.items():
if name not in forward_sdfg.arrays:
raise AutoDiffException(
"Expected to find array with name '{}' in SDFG".format(name))
forward_desc = forward_sdfg.arrays[name]
# we will save this output and pass it to the backward pass
# Views should not be forwarded. Instead the backward pass generator should forward the source of the view,
# and rebuild the sequence of required views in the backward pass.
assert type(forward_desc) is not dt.View
if isinstance(forward_desc, dt.Scalar):
# we can't return scalars from SDFGs, so we add a copy to an array of size 1
fwd_arr_name, _ = forward_sdfg.add_array(
name + "_array", [1],
forward_desc.dtype,
transient=False,
storage=forward_desc.storage,
find_new_name=True)
bwd_arr_name, bwd_desc = backward_sdfg.add_array(
name + "_array", [1],
forward_desc.dtype,
transient=False,
storage=forward_desc.storage,
find_new_name=True)
backward_sdfg.arrays[name].transient = True
fwd_copy_state = forward_sdfg.add_state_after(forward_state,
label="copy_out_" +
fwd_arr_name)
bwd_copy_state = backward_sdfg.add_state_before(backward_state,
label="copy_in_" +
bwd_arr_name)
fwd_copy_state.add_edge(fwd_copy_state.add_read(name), None,
fwd_copy_state.add_write(fwd_arr_name),
None, dace.Memlet(name + "[0]"))
bwd_copy_state.add_edge(bwd_copy_state.add_read(bwd_arr_name),
None, bwd_copy_state.add_write(name), None,
dace.Memlet(name + "[0]"))
replaced_scalars[name] = (bwd_arr_name, bwd_desc)
else:
forward_sdfg.arrays[name].transient = False
for orig_name, (replaced_name, replaced_desc) in replaced_scalars.items():
del backward_input_arrays[orig_name]
backward_input_arrays[replaced_name] = replaced_desc
for fwd_name, bwd_name in backward_result.required_grad_names.items():
desc = backward_sdfg.arrays[bwd_name]
if isinstance(desc, dt.Scalar):
arr_name, arr_desc = backward_sdfg.add_array(bwd_name + "_array",
[1],
desc.dtype,
transient=False,
storage=desc.storage,
find_new_name=True)
desc.transient = True
bwd_copy_state = backward_sdfg.add_state_after(backward_state,
label="copy_out_" +
bwd_name)
bwd_copy_state.add_edge(bwd_copy_state.add_read(bwd_name), None,
bwd_copy_state.add_write(arr_name), None,
dace.Memlet(bwd_name + "[0]"))
backward_result.required_grad_names[fwd_name] = arr_name
backward_sdfg.validate()
return forward_sdfg, backward_sdfg, backward_result, backward_input_arrays
| spcl/daceml | daceml/autodiff/torch.py | torch.py | py | 5,716 | python | en | code | 69 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "daceml.onnx.onnx_importer.ONNXModel",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "dac... |
9875076956 |
from typing import List
def add_border(picture: List[str]) -> List[str]:
longest = len(max(picture, key=len))
unframed = [
'* ' + s + f"{' ' * (longest - (len(s)-1))}*"
if len(s) < longest
else f"* {s} *"
for s in picture
]
edge = '*' * (longest + 4)
pad = f"*{' ' * (longest + 2)}*"
return [edge, pad] + unframed + [pad, edge]
print(
'\n'.join(add_border(['abc', 'ded'])),
end='\n\n')
print('\n'.join(add_border([
"Let's try a",
"big sentence",
"instead of these",
"dumb letters."])),
end='\n\n')
print('\n'.join(
add_border(['Eat', 'mor', 'chikin'])),
end='\n\n')
| barkdoll/100-algo | addBorder/add_border.py | add_border.py | py | 687 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.List",
"line_number": 5,
"usage_type": "name"
}
] |
35300873562 | import os
import sys
import argparse
import logging
from pyspark.sql.functions import col, when
from pyspark.sql import SparkSession
from table_mappings import get_schema_struct, get_column_name_diff, get_primary_key
def _get_config(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--host', dest='host', type=str)
parser.add_argument('--port', dest='port', type=int)
parser.add_argument('--username', dest='username', type=str)
parser.add_argument('--password', dest='password', type=str)
parser.add_argument('--database', dest='database', type=str)
parser.add_argument('--table', dest='table', type=str)
parser.add_argument('--source', dest='source', type=str)
parser.add_argument('--delimiter', dest='delimiter', type=str, default=',')
config, _ = parser.parse_known_args(argv)
return config
def _get_source_path(source):
"""Build absolute source file path
:param source Source path
"""
current_path = os.path.dirname(os.path.abspath(__file__))
return os.path.join(current_path, source)
def _write_to_db(data, config):
return data.write \
.format("jdbc") \
.option("url", f"jdbc:postgresql://{config.host}:{config.port}/{config.database}") \
.option("dbtable", config.table) \
.option("user", config.username) \
.option("password", config.password) \
.option("driver", "org.postgresql.Driver") \
.mode('append') \
.save()
def _map_columns(data, schema):
return data.select([col(c).alias(schema.get(c, c)) for c in data.columns])
def _read_csv(spark, source_path, schema, delimiter=','):
return spark.read \
.format("csv") \
.option("header", True) \
.option("delimiter", delimiter) \
.schema(schema) \
.load(source_path) \
.na.replace('NA', None)
def main(argv, spark):
try:
config = _get_config(argv)
source_path = _get_source_path(config.source)
raw_data = _read_csv(spark, source_path, get_schema_struct(config.table))
mapped_data = _map_columns(raw_data, get_column_name_diff(config.table))
if config.table == "flights":
mapped_data = mapped_data.drop("index")
#mapped_data_with_delay = mapped_data.withColumn("is_delayed", when((col("dep_delay") > 15), True).otherwise(False))
return _write_to_db(mapped_data.dropDuplicates(get_primary_key(config.table)), config)
except Exception as e:
return logging.exception(e)
if __name__ == '__main__':
spark = SparkSession \
.builder \
.master("local") \
.config("spark.jars", "C:\spark\spark-3.4.0-bin-hadoop3\jars\postgresql-42.6.0.jar") \
.appName("load_dw") \
.getOrCreate()
logging.getLogger().setLevel(logging.INFO)
main(sys.argv, spark)
| carlosborgesreis/CarlosBorges-data-coding-interview | challenge1/pyspark/load_dw.py | load_dw.py | py | 2,969 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath"... |
32854853808 | from ProjectUtils import *
from gym import spaces
from gym.utils import seeding
import numpy as np
from math import floor
import gym
from keras.models import Sequential, Model
from keras.layers import Dense, Activation, Flatten, Input, Concatenate, LSTM
from keras.optimizers import Adam
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from rl.agents import DDPGAgent
from rl.memory import SequentialMemory
from rl.random import OrnsteinUhlenbeckProcess
ENV_NAME = "TEST"
""" LOAD DATA """
# df = pd.read_pickle("data/crypto_dat_v4.pickle", )
df = pd.read_csv('crypto_dat_v6.csv')
# print(df)
# df['timestamp'] = df['dates']
# df = df.drop('dates',axis=1)
df.fillna(0)
df = df[np.isfinite(df['Weighted_Price'])]
neuron_multiplier = 10
# TODO reorg max and min length to be in a number of days and not a fraction
days_length_of_training_episode = 30
training_range_percent = 0.775
print(len(df))
training_stop_index = int(len(df) * training_range_percent)
max_training_session_length = len(df) - training_stop_index
# df = df[500:]
class Stocks(gym.Env):
def __init__(self, pandas_df):
plt.style.use('dark_background')
""" PROCESS DATA """
other_data_keys = ['corrected_data','High','Low','Volume_(BTC)','Volume_(Currency)','Open','Close']
self.price_keys = ['Weighted_Price', ]
self.internal_tracking_keys = ['Working Balance', 'Bitcoin Balance', 'Bank Balance', 'Total Worth', 'Transferable Money', 'previous_Weighted_Price', 'previous_prediction_results', 'Baseline']
# if isinstance(type(df['Timestamp'][0]),type('')):
# self.times_df = pd.to_datetime(df['Timestamp'])
# else:
self.times_df = pd.to_datetime(df['Timestamp'])
input_columns = other_data_keys + self.price_keys + self.internal_tracking_keys
self.df_inputs = pandas_df.reindex(columns=input_columns)
# self.df_inputs = self.df_inputs.drop('Timestamp', axis=1)
# print(self.df_inputs.columns)
self.df_inputs.loc[-1] = np.zeros((len(self.df_inputs.columns.to_list())))
# self.inputs, self.norms = norm_ops(self.df_inputs)
self.inputs, self.norms = take_zscore(self.df_inputs)
self.inputs = self.inputs.to_numpy() # convert to n
# umpy array
# print(self.inputs)
# print(self.inputs)
""" DEFINE ACTION AND OBSERVATION SPACE """
# Set number of actions (buy/sell for coins as well as hold/transfer)
self.range = 1
self.num_actions = len(self.price_keys) * 2 + 2
self.action_space = spaces.Box(low=np.zeros((self.num_actions)), high=np.ones((self.num_actions))) # , shape=(self.num_actions,), dtype=np.float32)
self.observation_space = spaces.Box(low=0, high=self.range, shape=[self.df_inputs.shape[1]], dtype=np.float32)
self.action_space.n = self.num_actions
""" DEFINE A DICTIONARY TO FIND CORRECT INPUTS BY NAME LATER ON """
self.key_df_column_dict = {}
for i in range(len(input_columns)):
self.key_df_column_dict.update({input_columns[i]: self.df_inputs.columns.get_loc(input_columns[i])})
""" DEFINE UNCHANGING ENV VARS """
self.trans_fee = 1
self.transaction_fee = self.norm(self.trans_fee)
self.total_step_counter = 0
""" SET VARS FOR RENDER FUNCTION """
self.render_keys = self.price_keys + ['Working Balance', 'Bitcoin Balance', 'Bank Balance', 'Total Worth', 'Transferable Money', 'Baseline'] # self.internal_tracking_keys #[:len(self.internal_tracking_keys)-2]
# find what key corresponds to which index for render fnc
self.render_df = self.create_empty_render_df()
self.colours = ['xkcd:ultramarine',
'xkcd:vivid purple',
'xkcd:gold',
'xkcd:irish green',
'xkcd:dark pink',
'xkcd:lighter green',
'xkcd:sea blue']
self.done = False
""" START ENV """
self.seed()
# print(input_columns)
# print(self.inputs)
""" CORE FUNCTIONS """
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
""" MOST IMPORTANT FUNCTION FOR TIMING"""
# print(action)
""" PARSE ACTION """
assert type(action) == np.ndarray
action = list(action)
# for i in range(len(action)):
# if action[i] < 0:
# action[i]= 0
# else:
# pass
# action = [abs(x) for x in action]
# print(action)
self.chosen_action = int(np.argmax(action))
chosen_action_amt = action[self.chosen_action]
""" REWARD """
# reward has 4 terms:
# 1. the total worth compared to initial worth
# 2. the amount in the bank compared to initial worth
# 3. a punishment factor for not picking a new action
# 4. a constant to shift the rewards into a range near 0 at the start
# 5. a small bonus for making a new choice
self.reward = 0# self.denorm(self.total_worth) / self.denorm(self.baseline_worth) + (self.denorm(self.bank) / self.denorm(self.random_balance)) - 1 # - np.exp(self.same_actions_counter * 0.01) + 1
# if self.same_actions_counter ==0:
# self.reward += np.exp(0.1)
# print(' same action = ', self.same_actions_counter )
""" UPDATE STATE """
coin_values = []
for i in range(len(self.coin_amounts)):
coin_values.append(self.inputs[self.step_index][self.df_inputs.columns.get_loc(self.price_keys[i])])
self.update_internal_data()
self.process_action(self.chosen_action, chosen_action_amt, coin_values)
self.time_rewards()
self.step_index += 1
""" DEFINE HOW ACTIONS WORK TO AGENT """
coin_values = []
for i in range(len(self.coin_amounts)): coin_values.append(self.inputs[self.step_index][self.df_inputs.columns.get_loc(self.price_keys[i])])
for i in range(len(self.price_keys)): self.coin_values[i] = self.norm(self.coin_amounts[i] * self.denorm(coin_values[i]))
self.baseline_worth = self.norm(self.baseline_amount * self.denorm(coin_values[0]))
""" ADD INTERNAL DATA TO NEXT INPUT """
self.state = self.inputs[self.step_index]
# self.assess_if_done()
# self.save()
return self.state, self.reward, self.done, {}
def reset(self):
self.new_start()
return self.state
def render(self, mode='live', title=None, **kwargs):
plt.ion()
plt.clf()
self.lists_to_plot = []
for i in range(len(self.render_keys)):
self.lists_to_plot.append([])
self.lists_to_plot[i] = self.render_df[self.render_keys[i]].iloc[:self.step_index].to_list()
self.ylists = self.lists_to_plot
self.xlist = self.times_df.iloc[:self.step_index].to_list()
for i in range(len(self.render_keys)): # Graph stuff
graph = plt.plot(self.xlist, self.ylists[i], label=self.render_keys[i], color=self.colours[i])[0]
graph.set_ydata(self.ylists[i])
graph.set_xdata(self.xlist)
plt.xlim(self.xlist[self.render_index] - pd.Timedelta('30 days'), self.xlist[self.render_index])
plt.legend(loc="best")
plt.draw()
plt.pause(.00001)
self.render_index += 1
""" CLASS UTILITY FUNCTIONS """
def assess_if_done(self):
if undo_single_zscore(self.bank, self.norms, self.key_df_column_dict[self.price_keys[0]]) < 0:
self.done = True
# if self.same_actions_counter >= 7: # must make a new action every 1 week(s)
# self.done = True
def new_start(self):
self.step_index = 0# np.random.randint(0, training_stop_index)
self.start_index = self.render_index = self.step_index
self.random_balance = np.random.randint(1000, 20000)
money_frac = np.random.random(1)[0]
coin_frac = 1 - money_frac
print('A new Start')
self.initial_balance = self.random_balance * money_frac # 10000 # self.df_inputs[self.price_keys[0]][self.step_index+3] # - 2300
self.balance = self.norm(self.initial_balance)
self.start_balance = self.balance # this is to track reward
self.actions_taken_list = []
self.same_actions_counter = 0
self.action_index = 0
self.bank = self.norm(0)
frac_coin_splits = (self.random_balance * coin_frac) / len(self.price_keys)
self.coin_amounts = np.zeros((len(self.price_keys)))
self.coin_values = np.zeros((len(self.price_keys)))
for i in range(len(self.coin_values)):
self.coin_values[i] = self.norm(frac_coin_splits)
self.coin_amounts[i] = frac_coin_splits / self.denorm(self.inputs[self.step_index][self.key_df_column_dict[self.price_keys[i]]])
self.baseline_worth = self.norm(self.random_balance)
self.baseline_amount = self.random_balance / self.denorm(self.inputs[self.step_index][self.key_df_column_dict[self.price_keys[0]]])
self.done = False
self.render_df = self.create_empty_render_df()
self.transferable = self.norm(0)
self.chosen_action = 0
self.state = self.inputs[self.step_index]
self.update_internal_data()
# return self.state
def update_internal_data(self):
self.total_worth = self.norm(self.denorm(self.balance) + self.denorm(self.coin_values.sum()))
self.inputs[self.step_index][self.key_df_column_dict[self.internal_tracking_keys[0]]] = self.balance
self.inputs[self.step_index][self.key_df_column_dict[self.internal_tracking_keys[1]]] = self.coin_values[0]
self.inputs[self.step_index][self.key_df_column_dict[self.internal_tracking_keys[2]]] = self.bank
self.inputs[self.step_index][self.key_df_column_dict[self.internal_tracking_keys[3]]] = self.total_worth
self.inputs[self.step_index][self.key_df_column_dict[self.internal_tracking_keys[4]]] = self.transferable
try:
self.inputs[self.step_index][self.key_df_column_dict[self.internal_tracking_keys[5]]] = self.inputs[self.step_index - 1][self.key_df_column_dict[self.internal_tracking_keys[5]]]
self.inputs[self.step_index][self.key_df_column_dict[self.internal_tracking_keys[6]]] = self.inputs[self.step_index - 1][self.key_df_column_dict[self.internal_tracking_keys[5]]]
except IndexError:
self.inputs[self.step_index][self.key_df_column_dict[self.internal_tracking_keys[5]]] = self.inputs[self.step_index][self.key_df_column_dict[self.internal_tracking_keys[5]]]
self.inputs[self.step_index][self.key_df_column_dict[self.internal_tracking_keys[6]]] = self.inputs[self.step_index][self.key_df_column_dict[self.internal_tracking_keys[5]]]
self.inputs[self.step_index][self.key_df_column_dict[self.internal_tracking_keys[7]]] = self.baseline_worth
""" ADD DATA TO RENDER FUNCTION """
dat = []
for i in range(len(self.render_keys)):
dat.append(self.denorm(self.state[self.key_df_column_dict[self.render_keys[i]]]))
self.render_df.iloc[self.step_index] = dat
# print(self.render_df.iloc[self.step_index])
def time_rewards(self):
if self.action_index > 2:
a = self.actions_taken_list[self.action_index - 1]
b = self.actions_taken_list[self.action_index]
if a == b:
self.same_actions_counter += 1
else:
self.same_actions_counter = 0
self.action_index += 1
def denorm(self, x):
return undo_single_zscore(x, self.norms, self.key_df_column_dict[self.price_keys[0]])
def norm(self, x):
return take_single_zscore(x, self.norms, self.key_df_column_dict[self.price_keys[0]])
def process_action(self, chosen_act, chosen_amt, coin_vals):
# print('transacion fees: ')
# print(self.transaction_fee)
# print(undo_single_zscore(self.transaction_fee,self.norms,self.df_inputs.columns.to_list().index(self.price_keys[0]), ))
chosen_action_amt = chosen_amt
chosen_action = chosen_act
chosen_coin = 0
has_coins = False
has_money = False
self.buy_action = False
self.sell_action = False
self.transfer_action = False
self.money_transfered = False
""" FIND TOTAL WORTH AND SEE IF ANY IS ALLOWED TO BE TRANSFERED """
if (self.denorm(self.total_worth) / self.random_balance) > 1:
self.transferable = self.norm(self.denorm(self.total_worth) - self.random_balance)
if (self.denorm(self.total_worth) / self.random_balance) <= 1:
self.transferable = self.norm(0)
if chosen_action_amt < 0: # actions can only be positive
chosen_action_amt = -chosen_action_amt
if chosen_action_amt > 1:
chosen_action_amt = 1
# print('chosen action = ',chosen_action)
# print('chosen action amount = ',chosen_action_amt)
if chosen_action <= len(self.coin_values) * 2:
for i in range(len(self.coin_values) * 2): # defining actions for buy/sell coins
# which coin we're buying/selling
if chosen_action % 2 == 0: # if even, we're buying
chosen_coin = chosen_action / 2 # 0/2 = 0, 2/2 = 1
self.buy_action = True
else: # if odd, we're selling
chosen_coin = floor(chosen_action / 2) # 1/2 = 0.5 floor-> 0, 3/2 = 1.5 floor -> 1
self.sell_action = True
chosen_coin -= 1
chosen_coin = int(chosen_coin)
""" DENORMALIZED VALUES """
coin_market_worth_denorm = self.denorm(coin_vals[chosen_coin])
balance_denorm = self.denorm(self.balance)
coin_val_denorm = self.coin_amounts[chosen_coin] * coin_market_worth_denorm
# print(' num coins = ', self.coin_amounts[chosen_coin] )
# print(' coin val = ', coin_market_worth_denorm)
buy_amt = (balance_denorm * chosen_action_amt) - self.trans_fee
sell_amt = (coin_val_denorm * chosen_action_amt)
# print('coin_val_denorm ', coin_val_denorm)
if coin_val_denorm > 0 and sell_amt <= (coin_val_denorm + self.trans_fee):
has_coins = True
if balance_denorm > self.trans_fee and buy_amt <= (balance_denorm + self.trans_fee) and buy_amt > 0:
has_money = True
if chosen_action == len(self.coin_values) * 2 + 1 and self.transferable > 0:
self.transfer_action = True
if self.buy_action and has_money: # Buy (0 mod 2 = 0, 1 mod 2 = 1)
self.balance = self.norm(balance_denorm - buy_amt)
self.coin_amounts[chosen_coin] += buy_amt / coin_market_worth_denorm
# print(' BUYING ' + str(buy_amt))
elif self.sell_action and has_coins: # Sell
self.balance = self.norm(self.denorm(self.balance) + sell_amt)
# print(sell_amt, coin_vals[chosen_coin])
self.coin_amounts[chosen_coin] = self.coin_amounts[chosen_coin] - (sell_amt / coin_market_worth_denorm)
# print(' SELLING ' + str(sell_amt))
elif self.transfer_action and has_money: # transfer to bank account
# print(' TRANSFER ' + str(buy_amt))
if buy_amt > self.denorm(self.transferable):
buy_amt = self.denorm(self.transferable)
self.bank = self.norm(self.bank + (buy_amt + self.trans_fee)) # add money to bank account
self.balance = self.norm(balance_denorm - buy_amt)
self.transferable = self.norm(self.denorm(self.transferable) - buy_amt)
self.money_transfered = True
else: # hold
# print(' HOLDING')
pass
self.actions_taken_list.append(chosen_action)
def create_empty_render_df(self):
render_df = pd.DataFrame({'dummy_dat': np.zeros((len(self.inputs)))})
empty_df = render_df.reindex(columns=self.render_keys)
# for i in range(len(self.render_keys)):
#
# empty_df = render_df.reindex(columns=self.render_keys)#pd.DataFrame({self.render_keys[i]: np.empty((len(self.df_inputs))).tolist()})
#
# if render_df.shape[0] == 0: # if first time adding a column, use append
# render_df = render_df.append(empty_df)
#
# else: # if not, use concat for correct behaviour
# render_df = pd.concat((render_df, empty_df), sort=False, axis=1)
return empty_df
def save(self, save_on_step=100):
self.total_step_counter += 1
# print('totl steps = ',self.total_step_counter)
if self.total_step_counter % save_on_step == 0 and self.total_step_counter > 0:
agent.save_weights('{}_weights.h5'.format(ENV_NAME), overwrite=True)
# def make_model_and_stuff():
# Get the environment and extract the number of actions.
env = Stocks(df)
np.random.seed(123)
env.seed(123)
assert len(env.action_space.shape) == 1
nb_actions = env.action_space.shape[0]
actor_neurons = 4 * neuron_multiplier
actor_activation = 'relu'
# Next, we build a very simple model.
actor = Sequential()
actor.add(Flatten(input_shape=(1,) + env.observation_space.shape))
actor.add(Dense(actor_neurons))
actor.add(Activation(actor_activation))
actor.add(Dense(actor_neurons))
actor.add(Activation(actor_activation))
actor.add(Dense(nb_actions))
actor.add(Activation('linear'))
print(actor.summary())
critic_neurons = 8 * neuron_multiplier
critic_activation = 'relu'
action_input = Input(shape=(nb_actions,), name='action_input')
observation_input = Input(shape=(1,) + env.observation_space.shape, name='observation_input')
flattened_observation = Flatten()(observation_input)
x = Concatenate()([action_input, flattened_observation])
x = Dense(critic_neurons)(x)
x = Activation(critic_activation)(x)
x = Dense(critic_neurons)(x)
x = Activation(critic_activation)(x)
x = Dense(1)(x)
x = Activation('linear')(x)
critic = Model(inputs=[action_input, observation_input], outputs=x)
print(critic.summary())
# Finally, we configure and compile our agent. You can use every built-in Keras optimizer and
# even the metrics!
memory = SequentialMemory(limit=500000, window_length=1)
random_process = OrnsteinUhlenbeckProcess(size=nb_actions, theta=.15, mu=0.1, sigma=.3)
agent = DDPGAgent(nb_actions=nb_actions, actor=actor, critic=critic, critic_action_input=action_input,
memory=memory, # nb_steps_warmup_critic=10, nb_steps_warmup_actor=10,
random_process=random_process, gamma=.99, target_model_update=1)
agent.compile(Adam(lr=0.001, clipnorm=1.), metrics=['mae'])
# Okay, now it's time to learn something! We visualize the training here for show, but this
# slows down training quite a lot. You can always safely abort the training prematurely using
# Ctrl + C.
# agent.load_weights('models/ddpg/{}_weights.h5f'.format(ENV_NAME))
print('Fitting agent.')
agent.fit(env, nb_steps=5000, visualize=True, verbose=1, nb_max_episode_steps=max_training_session_length, )
# agent.fit(env, nb_steps=500000, visualize=False, verbose=1, nb_max_episode_steps=max_training_session_length, )
# agent.fit(env, nb_steps=10000, visualize=True, verbose=1, nb_max_episode_steps=max_training_session_length, )
# After training is done, we save the final weights.
agent.save_weights('models/ddpg/{}_weights.h5f'.format(ENV_NAME), overwrite=True)
# out_df = pd.DataFrame({'agent':[agent]})
# save = open('ddpg_{}_agent.pickle'.format(ENV_NAME), 'wb')
# pickle.dump(out_df, save)
# save.close()
# pickle.dump(agent,'ddpg_{}_agent'.format(ENV_NAME))
# pickle.dump(agent,'ddpg_{}_agent'.format(ENV_NAME))
# Finally, evaluate our algorithm for 5 episodes.
# agent.test(env, nb_episodes=5, visualize=True, nb_max_episode_steps=200)
| GoldwinXS/TradingBot | TradingAgent.py | TradingAgent.py | py | 20,141 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.plotting.register_matplotlib_converters",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.isfinite",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "gym.Env",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_na... |
69955469226 | from setuptools import setup
from pybind11.setup_helpers import Pybind11Extension, build_ext
ext_modules = [
Pybind11Extension(
"harmonica/bindings",
[
'harmonica/orbit/kepler.cpp',
'harmonica/orbit/trajectories.cpp',
'harmonica/orbit/gradients.cpp',
'harmonica/light_curve/fluxes.cpp',
'harmonica/light_curve/gradients.cpp',
'harmonica/bindings.cpp'
],
include_dirs=["vendor/eigen", "vendor/pybind11"],
language="c++",
extra_compile_args=["-O2", "-ffast-math"]
),
]
setup(
name="planet-harmonica",
version="0.1.0",
author="David Grant",
author_email="david.grant@bristol.ac.uk",
url="https://github.com/DavoGrant/harmonica",
license="MIT",
packages=["harmonica", "harmonica.jax"],
description="Light curves for exoplanet transmission mapping.",
long_description="Light curves for exoplanet transmission mapping.",
python_requires=">=3.6",
install_requires=["numpy", "jax", "jaxlib"],
cmdclass={"build_ext": build_ext},
ext_modules=ext_modules,
classifiers=[
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Topic :: Scientific/Engineering :: Astronomy",
"Topic :: Software Development :: Libraries :: Python Modules"
],
)
| DavoGrant/harmonica | setup.py | setup.py | py | 1,523 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "pybind11.setup_helpers.Pybind11Extension",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "setuptools.setup",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "pybind11.setup_helpers.build_ext",
"line_number": 34,
"usage_type": "name"
}
] |
4013429702 | # 문제 출처 : https://www.acmicpc.net/problem/1021
from sys import stdin
from collections import deque
n, p = map(int, stdin.readline().split())
arr = list(map(int, stdin.readline().split()))
deque = deque([i for i in range(1, n+1)])
count = 0
for num in arr:
if deque[0] == num:
deque.popleft()
continue
left_move = deque.index(num)
right_move = len(deque) - left_move
if left_move <= right_move:
deque.rotate(-left_move)
deque.popleft()
count += left_move
else:
deque.rotate(right_move)
deque.popleft()
count += right_move
print(count)
| ThreeFive85/Algorithm | Algorithm_type/Queue0rDeque/rotatingQueue/rotating_queue.py | rotating_queue.py | py | 633 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "sys.stdin.readline",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "sys.stdin.readline",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
"line_nu... |
10309070734 | from configparser import ConfigParser
from multiprocessing import Pool
import os
import sys
if len(sys.argv) != 2:
print('Arguments: config')
sys.exit(-1)
cp = ConfigParser()
with open(sys.argv[1]) as fh:
cp.read_file(fh)
cp = cp['config']
nb_classes = int(cp['nb_classes'])
dataset = cp['dataset']
random_seed = int(cp["random_seed"])
first_batch_size = int(cp["first_batch_size"])
il_states = int(cp["il_states"])
feat_root = cp["feat_root"]
incr_batch_size = (nb_classes-first_batch_size)//il_states
for state_id in range(il_states+1):
print("Preparing state",state_id, "of", il_states)
root_path = os.path.join(feat_root,"fetril",dataset,"seed"+str(random_seed),"b"+str(first_batch_size),"t"+str(il_states),"train","batch"+str(state_id))
nb_classes = first_batch_size + (state_id) * incr_batch_size
def decompose_class(n):
file_path = os.path.join(root_path, str(n))
if os.path.exists(file_path):
try:
os.makedirs(file_path+'_decomposed', exist_ok=True)
compteur = 0
with open(file_path, 'r') as f:
for line in f:
with open(os.path.join(file_path+'_decomposed', str(compteur)), 'w') as f2:
f2.write(line)
compteur += 1
except:
pass
with Pool() as p:
p.map(decompose_class, range(nb_classes)) | GregoirePetit/FeTrIL | codes/prepare_train.py | prepare_train.py | py | 1,440 | python | en | code | 35 | github-code | 36 | [
{
"api_name": "sys.argv",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "configparser.ConfigParser",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_n... |
37288735237 | import os
import cv2
import numpy as np
from keras.models import load_model
from keras.utils import to_categorical
# Define the path to the FER-2013 test dataset directory
test_data_path = "/content/Dissertation-Project/dataset/test"
# Define the list of emotions and their corresponding labels
emotion_labels = {
'angry': 0,
'disgust': 1,
'fear': 2,
'happy': 3,
'neutral': 4,
'sad': 5,
'surprise': 6
}
# Load the test images and labels
test_images = []
test_labels = []
for emotion_folder in os.listdir(test_data_path):
emotion = emotion_folder
emotion_folder_path = os.path.join(test_data_path, emotion_folder)
# Skip non-directory files
if not os.path.isdir(emotion_folder_path):
continue
for image_filename in os.listdir(emotion_folder_path):
# Skip non-image files
if not image_filename.endswith(('.jpg', '.jpeg', '.png')):
continue
image_path = os.path.join(emotion_folder_path, image_filename)
image = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)
image = cv2.resize(image, (48, 48)) # Resize images to the desired shape
test_images.append(image)
test_labels.append(emotion_labels[emotion])
# Convert test images and labels to numpy arrays
test_images = np.array(test_images)
test_labels = np.array(test_labels)
# Normalize pixel values to range between 0 and 1
test_images = test_images / 255.0
# Perform one-hot encoding on the test labels
test_labels = to_categorical(test_labels)
# Load the trained model
model_path = "/content/Dissertation-Project/my_model_with_early_stopping.keras"
model = load_model(model_path)
# Evaluate the model on the test set
loss, accuracy = model.evaluate(test_images, test_labels)
print(f"Test Loss: {loss:.4f}")
print(f"Test Accuracy: {accuracy:.4f}")
| alex-nazemi/Dissertation-Project | test_model.py | test_model.py | py | 1,831 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.listdir",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "os.path.isdir",
"line_number... |
15450209554 | import requests
from bs4 import BeautifulSoup
from time import sleep
movie_links_id = []
for z in range(10):
print(z)
url_1 = f"https://www.kinoafisha.info/rating/movies/?page={z}"
sleep(5)
r_1 = requests.get(url_1, timeout=5)
soup_1 = BeautifulSoup(r_1.text, 'lxml')
films = soup_1.find_all('div', class_='movieItem')
for film in films:
link = film.find('a', 'movieItem_title').get('href')
link = ''.join([i for i in link if i.isdigit()])
movie_links_id.append(link)
| VitOsGG/parser_movie | pars_movie_id.py | pars_movie_id.py | py | 525 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "time.sleep",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 13,
"usage_type": "call"
}
] |
32983223303 |
# basics
import argparse
import os
import pickle
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
# sklearn imports
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.neighbors import NearestNeighbors
from sklearn.preprocessing import normalize
# our code
import linear_model
import utils
url_amazon = "https://www.amazon.com/dp/%s"
def load_dataset(filename):
with open(os.path.join('..','data',filename), 'rb') as f:
return pickle.load(f)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-q','--question', required=True)
io_args = parser.parse_args()
question = io_args.question
if question == "1":
filename = "ratings_Patio_Lawn_and_Garden.csv"
with open(os.path.join("..", "data", filename), "rb") as f:
ratings = pd.read_csv(f,names=("user","item","rating","timestamp"))
print("Number of ratings:", len(ratings))
print("The average rating:", np.mean(ratings["rating"]))
n = len(set(ratings["user"]))
d = len(set(ratings["item"]))
print("Number of users:", n)
print("Number of items:", d)
print("Fraction nonzero:", len(ratings)/(n*d))
X, user_mapper, item_mapper, user_inverse_mapper, item_inverse_mapper, user_ind, item_ind = utils.create_user_item_matrix(ratings)
print(type(X))
print("Dimensions of X:", X.shape)
elif question == "1.1":
filename = "ratings_Patio_Lawn_and_Garden.csv"
with open(os.path.join("..", "data", filename), "rb") as f:
ratings = pd.read_csv(f,names=("user","item","rating","timestamp"))
X, user_mapper, item_mapper, user_inverse_mapper, item_inverse_mapper, user_ind, item_ind = utils.create_user_item_matrix(ratings)
X_binary = X != 0
# YOUR CODE HERE FOR Q1.1.1
items_groups = ratings.groupby(['item']).sum()
items_groups['rating'].idxmax()
items_groups.sort_values(by='rating',ascending=False)[0:5]
# YOUR CODE HERE FOR Q1.1.2
user_groups = ratings.groupby(['user']).size().reset_index(name='count')
user_groups.sort_values(by='count',ascending=False)[0:5]
# YOUR CODE HERE FOR Q1.1.3
plt.hist(user_groups['count'], bins=45)
plt.yscale('log', nonposy='clip')
plt.xlabel('Number of ratings per user')
plt.ylabel('log(Frequency)')
plt.savefig('user_hist.png')
items_groups = ratings.groupby(['item']).sum()
plt.hist(items_groups['rating'],bins=40)
plt.yscale('log', nonposy='clip')
plt.xlabel('Number of ratings per item')
plt.ylabel('log(Frequency)')
plt.savefig('item_hist.png')
plt.hist(ratings['rating'], bins=5)
plt.xlabel('Rating out of 5 stars')
plt.ylabel('rating_hist.png')
elif question == "1.2":
filename = "ratings_Patio_Lawn_and_Garden.csv"
with open(os.path.join("..", "data", filename), "rb") as f:
ratings = pd.read_csv(f,names=("user","item","rating","timestamp"))
X, user_mapper, item_mapper, user_inverse_mapper, item_inverse_mapper, user_ind, item_ind = utils.create_user_item_matrix(ratings)
X_binary = X != 0
grill_brush = "B00CFM0P7Y"
grill_brush_ind = item_mapper[grill_brush]
grill_brush_vec = X[:,grill_brush_ind]
print(url_amazon % grill_brush)
# YOUR CODE HERE FOR Q1.2
def nearest_ids(nbrs_list):
nearest_id = np.empty(len(nbrs_list), dtype='S15')
for key,value in item_inverse_mapper.items():
for i in range(0,len(nbrs_list)):
if key == nbrs_list[i]:
nearest_id[i] = value
return nearest_id
#Transpose so KNN iterates over columns not rows
X_train = np.transpose(X)
print(X_train.shape);print(X.shape)
#Fit model
model = NearestNeighbors(n_neighbors=6)
model.fit(X_train)
#Apply knn to get the index of the nearest items
nbrs = model.kneighbors(np.transpose(grill_brush_vec), n_neighbors=6, return_distance=False)
#get the item id's of the nearest items
nbrs_idx = nbrs[0]
nearest_id = nearest_ids(nbrs_idx)
item_list = nearest_id[1:]
print(item_list)
#Normalize data and train model
X_train_norm = normalize(X_train)
model_norm = NearestNeighbors(n_neighbors=6)
model_norm.fit(X_train_norm)
#Knn to get nearest items from normned data
nbrs_normed = model_norm.kneighbors(np.transpose(grill_brush_vec), n_neighbors=6, return_distance=False)
#Get item id's of nearest items
nbrs_idx_norm = nbrs_normed[0]
nearest_norm_id = nearest_ids(nbrs_idx_norm)
norm_item_list = nearest_norm_id[1:]
print(norm_item_list)
#Fit model based on cosine similarity
model_cosine = NearestNeighbors(n_neighbors=6,metric='cosine')
model_cosine.fit(X_train)
#Knn to get nearest items
nbrs_cosine = model_cosine.kneighbors(np.transpose(grill_brush_vec), n_neighbors=6, return_distance=False)
#Get item id's of nearest items
nbrs_idx_cosine = nbrs_cosine[0]
nearest_cosine_id = nearest_ids(nbrs_idx_cosine)
cosine_item_list = nearest_cosine_id[1:]
print(cosine_item_list)
#Are both lists actually the same
print(cosine_item_list == norm_item_list)
# YOUR CODE HERE FOR Q1.3
list1 = [xi.decode() for xi in item_list]
list2 = [xi.decode() for xi in cosine_item_list]
list3 = [xi.decode() for xi in norm_item_list]
print(list1);print(list2);print(list3)
items_groups = ratings.groupby(['item']).size().reset_index(name='count')
items_groups.sort_values(by='count',ascending=False)[0:5]
count_L1 = [items_groups.loc[items_groups['item'] == xi] for xi in list1]
count_L2 = [items_groups.loc[items_groups['item'] == xi] for xi in list2]
print(count_L1);print(count_L2)
elif question == "3":
data = load_dataset("outliersData.pkl")
X = data['X']
y = data['y']
# Fit least-squares estimator
model = linear_model.LeastSquares()
model.fit(X,y)
print(model.w)
utils.test_and_plot(model,X,y,title="Least Squares",filename="least_squares_outliers.pdf")
#model2 =linear_model.WeightedLeastSquares()
#z = np.ones(500)
#model2.fit(X,y,z)
#print(model2.w)
elif question == "3.1":
data = load_dataset("outliersData.pkl")
X = data['X']
y = data['y']
z1 = np.ones(400)
z2 = np.full((100),0.1)
z = np.r_[z1,z2]
# Fit weighted least-squares estimator
model2 = linear_model.WeightedLeastSquares()
model2.fit(X,y,z)
print(model2.w)
utils.test_and_plot(model2,X,y,title="Weighted Least Squares",filename="weighted_least_squares_outliers.png")
elif question == "3.3":
# loads the data in the form of dictionary
data = load_dataset("outliersData.pkl")
X = data['X']
y = data['y']
# Fit least-squares estimator
model = linear_model.LinearModelGradient()
model.fit(X,y)
print(model.w)
utils.test_and_plot(model,X,y,title="Robust (L1) Linear Regression",filename="least_squares_robust.pdf")
elif question == "4":
data = load_dataset("basisData.pkl")
X = data['X']
y = data['y']
Xtest = data['Xtest']
ytest = data['ytest']
# Fit least-squares model
model = linear_model.LeastSquares()
model.fit(X,y)
utils.test_and_plot(model,X,y,Xtest,ytest,title="Least Squares, no bias",filename="least_squares_no_bias.pdf")
elif question == "4.1":
data = load_dataset("basisData.pkl")
X = data['X']
y = data['y']
Xtest = data['Xtest']
ytest = data['ytest']
#Linear regression with bias
model = linear_model.LeastSquaresBias()
model.fit(X,y)
utils.test_and_plot(model,X,y,Xtest,ytest,title="Least Squares, bias",filename="least_squares_bias.pdf")
elif question == "4.2":
data = load_dataset("basisData.pkl")
X = data['X']
y = data['y']
Xtest = data['Xtest']
ytest = data['ytest']
for p in range(11):
print("p=%d" % p)
model = linear_model.LeastSquaresPoly(p)
model.fit(X,y)
utils.test_and_plot(model,X,y,Xtest,ytest)
#model = linear_model.LeastSquaresPoly(5)
#model.fit(X,y)
#utils.test_and_plot(model,X,y,Xtest,ytest,title="Poly fit",filename="poly_fit.pdf")
else:
print("Unknown question: %s" % question)
| akshi96/Coursework | CPSC340 - Machine Learning/a3/code/main.py | main.py | py | 8,956 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "pickle.load",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"... |
26236122257 | # python -m pip install -r .\requirements.txt
# python .\merge.py .\annotatedPdfs\ final.pdf
import PyPDF2
import os
import sys
def merge_pdfs(pdf_dir, output_filename):
pdf_writer = PyPDF2.PdfWriter()
# Get all PDF files in the given directory
files = [os.path.join(pdf_dir, file) for file in os.listdir(pdf_dir) if file.endswith('.pdf')]
for file in files:
with open(file, 'rb') as pdf_file:
pdf_reader = PyPDF2.PdfReader(pdf_file)
for page in range(len(pdf_reader.pages)):
pdf_writer.add_page(pdf_reader.pages[page])
with open(output_filename, 'wb') as merged_pdf:
pdf_writer.write(merged_pdf)
if __name__ == "__main__":
if len(sys.argv) < 3:
print("Usage: python merge.py <PDF_DIRECTORY> <OUTPUT_FILENAME>")
sys.exit(1)
pdf_dir = sys.argv[1]
output_filename = sys.argv[2]
# Merge the PDFs into a single file
merge_pdfs(pdf_dir, output_filename)
| lalibi/annotate-merge | merge.py | merge.py | py | 970 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "PyPDF2.PdfWriter",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_numb... |
3581458900 | # Import necessary libraries
import openai
import sys
import json
import html
import re
import ssl
import os
import pprint
import nltk
import requests
import time
if not nltk.data.find('tokenizers/punkt'):
nltk.download('punkt', quiet=True)
# Get the first command line argument
location = sys.argv[1]
sku = sys.argv[2]
# Initialize an empty dictionary for credentials
credentials = {}
# Define the file path to the credentials file
creds_file_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), # Get the directory of the current file
"../creds2.txt" # Append the relative path to the credentials file
)
if os.path.exists('product.json'):
os.remove('product.json')
# Define a class to represent a location
class Location:
def __init__(self, website, user, city, phone, consumer_key, consumer_secret, api_key):
self.website = website
self.user = user
self.city = city
self.phone = phone
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.api_key = api_key # Here's the new attribute
# Initialize an empty list to store locations
locations = []
# Open the credentials file
with open(creds_file_path) as f:
# Initialize variables for parsing the file
website = None
user = None
city = None
phone = None
consumer_key = None
consumer_secret = None
openai.api_key = None
# Loop over each line in the file
for line in f:
line = line.strip() # Remove trailing and leading whitespace
# If the line is a website (indicated by brackets), store it and reset other variables
if line.startswith("[") and line.endswith("]"):
if website and user and city and phone and consumer_key and consumer_secret and openai.api_key:
locations.append(Location(website, user, city, phone, consumer_key, consumer_secret, openai.api_key))
website = line[1:-1].lstrip() # Remove the brackets and any leading whitespace
user = None
city = None
phone = None
consumer_key = None
consumer_secret = None
openai.api_key = None
# If the line starts with a bracket but doesn't end with one, it's a multiline website; just store the first part
elif line.startswith("["):
website = line[1:]
# If the line ends with a bracket but doesn't start with one, it's the end of a multiline website; append this part
elif line.endswith("]"):
website += line[:-1]
# If the line contains " = ", it's a key-value pair; parse and store it
elif website and " = " in line:
key, value = line.split(" = ")
if key == "user":
user = value
elif key == "city":
city = value
elif key == "phone":
phone = value
elif key.lower().endswith("_consumer_key"):
consumer_key = value
elif key.lower().endswith("_consumer_secret"):
consumer_secret = value
elif key == "openai.api_key":
openai.api_key = value
# Once we've parsed the entire file, check if there are any leftover variables and, if so, add another location
if website and user and city and phone and consumer_key and consumer_secret and openai.api_key:
locations.append(Location(website, user, city, phone, consumer_key, consumer_secret, openai.api_key))
# Print the locations
for location in locations:
print("Using " + location.website + " as source product." + sku)
base_url = "https://" + location.website + "/wp-json/wc/v3/products"
city = location.city
phone = location.phone
consumer_key = location.website + "_consumer_key:" + location.consumer_key
consumer_secret = location.website + "_consumer_secret:" + location.consumer_secret
auth = (
location.consumer_key,
location.consumer_secret,
)
response = requests.get(f'{base_url}', auth=auth, params={'sku': sku})
response.raise_for_status()
if not response.json():
print(f"No product found with SKU: {sku}")
exit()
product = response.json()[0]
#time.sleep(1)
# pprint.pprint(product)
print("Source site: ", location.website)
print("Product to clone: ", product['sku'], "Name: ", product['name'])
print()
break
time.sleep(1)
print("Turn AI loose...")
for location in locations[1:]:
print("Using " + location.website + " as source product." + sku)
base_url = "https://" + location.website + "/wp-json/wc/v3/products"
consumer_key = location.website + "_consumer_key:" + location.consumer_key
consumer_secret = location.website + "_consumer_secret:" + location.consumer_secret
auth = (
location.consumer_key,
location.consumer_secret,
)
response = requests.get(f'{base_url}', auth=auth, params={'sku': sku})
response.raise_for_status()
if not response.json():
print(f"No product found with SKU: {sku}")
exit()
product = response.json()[0]
sku = product['sku']
city = location.city
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages = [
{
"role": "system",
"content": "You are a helpful budtender who knows all about the cannabis industry in Northern California. ",
},
{
"role": "user",
"content": f"I have a product with SKU '{sku}' named '{product['name']}' with a short description of '{product['short_description']}'."
f"I need a new but similar name for this product that will both help with SEO and improve the product visibility in search engines."
f"Use the city name '{city}' in the title. Examples of really good usage would be variations of things like Best in Burlingame or San Ramons best. "
f"Don't stray too far from the core idea of the original name. Add the city name to the product title somehow. "
f"Limit the new product name to about 70 characters. Do not use any punctuation or apostrophes or any single or double quotes. "
f"Use proper capitalization. Optimize all for SEO. Never use prices in the new titles."
},
]
)
new_product_name = response['choices'][0]['message']['content'].strip()
new_product_name = html.unescape(re.sub('<.*?>', '', new_product_name))
print("Suggested new product name: ", new_product_name)
product['name'] = new_product_name
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages = [
{
"role": "system",
"content": "You are a helpful budtender who knows all about the cannabis industry in and around the city of '{city}'. ",
},
{
"role": "user",
"content": f"I am a budtender looking to deliver cannabis products to my customers. "
f"Create a list of 5 possible public meetup spots in the area of '{city}' that can be used as meetup spots if the customer doesnt want to meet at home or work. Prioritize discreet and easy access."
f"The locations should be convenient, well known, and not near a school or police station. Present them as a bullet list with short descriptions as to why they are good meetup spots."
},
]
)
meetup_spots = response['choices'][0]['message']['content'].strip()
meetup_spots = html.unescape(re.sub('<.*?>', '', meetup_spots))
product['description'] = product['description'] + " \n\n Have your " + new_product_name + " delivered to your home or work or choose one of these great meetup spots in " + city + "\n" + meetup_spots
#print("Suggested meetups spots for ",city,": ", meetup_spots)
#print("New product description", product['description'])
update_url = f'{base_url}/{product["id"]}'
update_response = requests.put(update_url, json=product, auth=auth)
update_response.raise_for_status()
break
| menached/ai_product_updater | fetch-first-push-to-rest.py | fetch-first-push-to-rest.py | py | 8,235 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "nltk.data.find",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "nltk.data",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "nltk.download",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_numb... |
13368558436 | # In[1]:
import os
import sys
import random
import math
import re
import time
import numpy as np
import cv2
import matplotlib
import matplotlib.pyplot as plt
import pdb
from PIL import Image
# Root directory of the project
ROOT_DIR = os.path.abspath("../")
# Root directory of the dataset
DATA_DIR = '../images/trash_test/'
DATA_NUM = 40
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn.config import Config
from mrcnn import utils
import mrcnn.model as modellib
from mrcnn import visualize
from mrcnn.model import log
# get_ipython().run_line_magic('matplotlib', 'inline')
# Directory to save logs and trained model
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
# Local path to trained weights file
COCO_MODEL_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
# Download COCO trained weights from Releases if needed
if not os.path.exists(COCO_MODEL_PATH):
utils.download_trained_weights(COCO_MODEL_PATH)
# ## Configurations
# In[2]:
class TrashConfig(Config):
"""Configuration for training on the toy shapes dataset.
Derives from the base Config class and overrides values specific
to the toy shapes dataset.
"""
# Give the configuration a recognizable name
NAME = "objects"
# Train on 1 GPU and 8 images per GPU. We can put multiple images on each
# GPU because the images are small. Batch size is 8 (GPUs * images/GPU).
GPU_COUNT = 1
IMAGES_PER_GPU = 32
# Number of classes (including background)
NUM_CLASSES = 1 + 2 # background + 3 shapes
# Use small images for faster training. Set the limits of the small side
# the large side, and that determines the image shape.
IMAGE_MIN_DIM = 256
IMAGE_MAX_DIM = 256
# Use smaller anchors because our image and objects are small
RPN_ANCHOR_SCALES = (8, 16, 32, 64, 128) # anchor side in pixels
# Reduce training ROIs per image because the images are small and have
# few objects. Aim to allow ROI sampling to pick 33% positive ROIs.
TRAIN_ROIS_PER_IMAGE = 32
# Use a small epoch since the data is simple
STEPS_PER_EPOCH = 100
# use small validation steps since the epoch is small
VALIDATION_STEPS = 5
config = TrashConfig()
config.display()
# ## Notebook Preferences
# In[3]:
def get_ax(rows=1, cols=1, size=8):
"""Return a Matplotlib Axes array to be used in
all visualizations in the notebook. Provide a
central point to control graph sizes.
Change the default size attribute to control the size
of rendered images
"""
_, ax = plt.subplots(rows, cols, figsize=(size*cols, size*rows))
return ax
# In[4]:
# ## preprocess the ground truth label
'''
file_clothes = open(DATA_DIR+'clothes.txt','r')
data_clothes = []
for i in range(DATA_NUM):
data_clothes.append([int(j) for j in file_clothes.readline().split()])
file_paperball = open(DATA_DIR+'paperball.txt','r')
data_paperball = []
for i in range(DATA_NUM):
data_paperball.append([int(j) for j in file_paperball.readline().split()])
'''
data_clothes = []
for i in range(DATA_NUM):
data_clothes.append([0, 0, 0, 0])
data_paperball = []
for i in range(DATA_NUM):
data_paperball.append([0, 0, 0, 0])
# ## Dataset
#
# Create a synthetic dataset
#
# Extend the Dataset class and add a method to load the shapes dataset, `load_objects()`, and override the following methods:
#
# * load_image()
# * load_mask()
# * image_reference()
# In[4]:
class TrashDataset(utils.Dataset):
"""Generates the shapes synthetic dataset. The dataset consists of simple
shapes (triangles, squares, circles) placed randomly on a blank surface.
The images are generated on the fly. No file access required.
"""
def load_objects(self, start, end, height, width):
"""Generate the requested number of synthetic images.
count: number of images to generate.
height, width: the size of the generated images.
"""
# Add classes
self.add_class("objects", 1, "clothes")
self.add_class("objects", 2, "paperball")
# Add images
# Generate random specifications of images (i.e. color and
# list of shapes sizes and locations). This is more compact than
# actual images. Images are generated on the fly in load_image().
for i in range(start,end):
objects = []
data_clothes_line = data_clothes[i]
data_paperball_line = data_paperball[i]
for k in range(len(data_clothes_line)//4):
objects.append(('clothes', tuple(data_clothes_line[4*k:4*(k+1)])))
for k in range(len(data_paperball_line)//4):
objects.append(('paperball', tuple(data_paperball_line[4*k:4*(k+1)])))
# bg_color, shapes = self.random_image(height, width)
self.add_image("objects", image_id=i, path=DATA_DIR + str(i) + '.png',
width=width, height=height, objects=objects)
def load_image(self, image_id):
assert image_id < len(self.image_info)
image_path = self.image_info[image_id]['path']
image = Image.open(image_path)
return np.array(image)
def image_reference(self, image_id):
"""Return the shapes data of the image."""
info = self.image_info[image_id]
if info["source"] == "shapes":
return info["shapes"]
else:
super(self.__class__).image_reference(self, image_id)
def load_mask(self, image_id):
"""Generate instance masks for shapes of the given image ID.
"""
info = self.image_info[image_id]
objects = info['objects']
count = len(objects)
mask = np.zeros([info['height'], info['width'], count], dtype=np.uint8)
for i, (shape, pos) in enumerate(info['objects']):
x1,y1,width,height = pos
mask[y1:y1+height, x1:x1+width, i] = 1
# Handle occlusions
# occlusion = np.logical_not(mask[:, :, -1]).astype(np.uint8)
# for i in range(count-2, -1, -1):
# mask[:, :, i] = mask[:, :, i] * occlusion
# occlusion = np.logical_and(occlusion, np.logical_not(mask[:, :, i]))
# Map class names to class IDs.
class_ids = np.array([self.class_names.index(s[0]) for s in objects])
return mask.astype(np.bool), class_ids.astype(np.int32)
def draw_shape(self, image, shape, dims, color):
"""Draws a shape from the given specs."""
# Get the center x, y and the size s
x, y, s = dims
if shape == 'square':
cv2.rectangle(image, (x-s, y-s), (x+s, y+s), color, -1)
elif shape == "circle":
cv2.circle(image, (x, y), s, color, -1)
elif shape == "triangle":
points = np.array([[(x, y-s),
(x-s/math.sin(math.radians(60)), y+s),
(x+s/math.sin(math.radians(60)), y+s),
]], dtype=np.int32)
cv2.fillPoly(image, points, color)
return image
def random_shape(self, height, width):
"""Generates specifications of a random shape that lies within
the given height and width boundaries.
Returns a tuple of three valus:
* The shape name (square, circle, ...)
* Shape color: a tuple of 3 values, RGB.
* Shape dimensions: A tuple of values that define the shape size
and location. Differs per shape type.
"""
# Shape
shape = random.choice(["square", "circle", "triangle"])
# Color
color = tuple([random.randint(0, 255) for _ in range(3)])
# Center x, y
buffer = 20
y = random.randint(buffer, height - buffer - 1)
x = random.randint(buffer, width - buffer - 1)
# Size
s = random.randint(buffer, height//4)
return shape, color, (x, y, s)
def random_image(self, height, width):
"""Creates random specifications of an image with multiple shapes.
Returns the background color of the image and a list of shape
specifications that can be used to draw the image.
"""
# Pick random background color
bg_color = np.array([random.randint(0, 255) for _ in range(3)])
# Generate a few random shapes and record their
# bounding boxes
shapes = []
boxes = []
N = random.randint(1, 4)
for _ in range(N):
shape, color, dims = self.random_shape(height, width)
shapes.append((shape, color, dims))
x, y, s = dims
boxes.append([y-s, x-s, y+s, x+s])
# Apply non-max suppression wit 0.3 threshold to avoid
# shapes covering each other
keep_ixs = utils.non_max_suppression(np.array(boxes), np.arange(N), 0.3)
shapes = [s for i, s in enumerate(shapes) if i in keep_ixs]
return bg_color, shapes
# In[5]:
# Test dataset
dataset_test = TrashDataset()
dataset_test.load_objects(0, DATA_NUM, config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1])
dataset_test.prepare()
# ## Detection
# In[11]:
class InferenceConfig(TrashConfig):
GPU_COUNT = 1
IMAGES_PER_GPU = 32
inference_config = InferenceConfig()
# Recreate the model in inference mode
model = modellib.MaskRCNN(mode="inference",
config=inference_config,
model_dir=MODEL_DIR)
model_path = model.find_last()
# Load trained weights
print("Loading weights from ", model_path)
model.load_weights(model_path, by_name=True)
# In[12]:
for pos_ids in range(5):
# Test on random images
original_images = []
image_ids = range(8)
for image_id in image_ids:
original_image, image_meta, gt_class_id, gt_bbox, gt_mask = modellib.load_image_gt(dataset_test, inference_config,
image_id+pos_ids*8, use_mini_mask=False)
for j in range(4):
original_images.append(original_image)
'''
log("original_image", original_image)
log("image_meta", image_meta)
log("gt_class_id", gt_class_id)
log("gt_bbox", gt_bbox)
log("gt_mask", gt_mask)
visualize.display_instances(original_image, gt_bbox, gt_mask, gt_class_id,
dataset_train.class_names, figsize=(8, 8))
'''
results = model.detect(original_images, verbose=1)
for i in range(8):
image_id = i * 4
r = results[image_id]
img_save_path = DATA_DIR+'pos_'+str(pos_ids)+'_'+str(i)+'_out.png'
visualize.display_instances(original_images[image_id], r['rois'], r['masks'], r['class_ids'],
dataset_test.class_names, r['scores'], ax=get_ax(), img_save_path=img_save_path)
| BoSmallEar/Roberto | roberto_mask_rcnn/src/test_roberto.py | test_roberto.py | py | 10,913 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.abspath",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "sys.path.append",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_num... |
18562474998 | from django.shortcuts import render, redirect, render_to_response
from django.http import HttpResponse, HttpResponseRedirect
from django.template import Context
from todo.models import ToDo
from django.contrib.auth.models import User
from todo.forms import ToDoForm, AddUserForm, AddRecurringToDoForm, TransferToDoDateForm
from django.contrib.auth.forms import UserCreationForm
from django.core.context_processors import csrf
from django.utils.timezone import utc
import datetime
def transfer_todo_date_form(request, todoID):
if 'id' not in request.session:
return HttpResponseRedirect('/accounts/unauthorized')
import time
current_date = time.strftime('%Y-%m-%d')
fullname = request.session['first_name'] + ' ' + request.session['last_name']
args = {}
args.update( csrf(request) )
args['form'] = TransferToDoDateForm()
args['curr_date'] = current_date
args['full_name'] = fullname
args['item'] = ToDo.objects.get(id=todoID).item
args['item_id'] = todoID
args['item_current_date'] = ToDo.objects.get(id=todoID).date_todo
args['is_administrator'] = request.session['is_superuser']
return render_to_response('transfer_todo_date.html', args)
def transfer_todo_date(request):
if 'id' not in request.session:
return HttpResponseRedirect('/accounts/unauthorized')
if request.method == "POST":
form = TransferToDoDateForm(request.POST)
if form.is_valid():
i = request.POST["item_id"]
new_date = form.cleaned_data['new_date']
ToDo.objects.filter(id=i).update(date_todo=new_date)
return HttpResponseRedirect('/todo/home')
def add_recurring_todo(request):
if 'id' not in request.session:
return HttpResponseRedirect('/accounts/unauthorized')
import time
current_date = time.strftime('%Y-%m-%d')
fullname = request.session['first_name'] + ' ' + request.session['last_name']
if request.method == "POST":
form = AddRecurringToDoForm(request.POST)
if form.is_valid():
i = form.cleaned_data['item']
start_date = form.cleaned_data['start_date']
end_date = form.cleaned_data['end_date']
total_days = (end_date - start_date).days + 1
d = 0
for day_number in range(total_days):
deltaday = datetime.timedelta(days=d)
current_date = start_date + deltaday
ToDo.objects.create(added_by=request.session['id'], date_todo=current_date, archive=0, item=i)
d = d + 1
return HttpResponseRedirect('/todo/home')
args = {}
args.update( csrf(request) )
args['form'] = AddRecurringToDoForm()
args['curr_date'] = current_date
args['full_name'] = fullname
args['is_administrator'] = request.session['is_superuser']
return render_to_response('addrecurringtodo.html', args)
def delete_user(request, userID):
if 'id' not in request.session:
return HttpResponseRedirect('/accounts/unauthorized')
if request.session['is_superuser'] != 1:
return HttpResponseRedirect('/todo/unauthorized')
User.objects.filter(id=userID).delete()
return redirect('/todo/view_users')
def view_users(request):
if 'id' not in request.session:
return HttpResponseRedirect('/accounts/unauthorized')
if request.session['is_superuser'] != 1:
return HttpResponseRedirect('/todo/unauthorized')
import time
current_date = time.strftime('%Y-%m-%d')
fullname = request.session['first_name'] + ' ' + request.session['last_name']
user_list = User.objects.filter(is_active=1)
return render(request, 'userlist.html',
{'curr_date': current_date,
'userList': user_list,
'full_name':fullname,
'is_administrator':request.session['is_superuser']
}
)
def add_user(request):
if 'id' not in request.session:
return HttpResponseRedirect('/accounts/unauthorized')
if request.session['is_superuser'] != 1:
return HttpResponseRedirect('/todo/unauthorized')
import time
current_date = time.strftime('%Y-%m-%d')
fullname = request.session['first_name'] + ' ' + request.session['last_name']
if request.method == "POST":
form = AddUserForm(request.POST)
if form.is_valid():
instance = form.save(commit=False)
instance.is_superuser = 0
instance.is_staff = 1
instance.is_active = 1
instance.save()
return HttpResponseRedirect('/todo/view_users')
args = {}
args.update( csrf(request) )
args['form'] = AddUserForm()
args['curr_date'] = current_date
args['full_name'] = fullname
args['is_administrator'] = request.session['is_superuser']
return render_to_response('adduser.html', args)
def addtodo(request):
if 'id' not in request.session:
return HttpResponseRedirect('/accounts/unauthorized')
if request.POST:
form = ToDoForm(request.POST)
if form.is_valid():
instance = form.save(commit=False)
instance.added_by = request.session['id']
instance.archive = 0
instance.save()
return HttpResponseRedirect('/todo/home')
else:
form = ToDoForm()
args = {}
args.update( csrf(request) )
args['form'] = form
fullname = request.session['first_name'] + ' ' + request.session['last_name']
import time
current_date = time.strftime('%Y-%m-%d')
args['curr_date'] = current_date
args['full_name'] = fullname
args['is_administrator'] = request.session['is_superuser']
return render_to_response('add_todo.html', args)
def backoperations(request):
if 'id' not in request.session:
return HttpResponseRedirect('/accounts/unauthorized')
import time
current_date = time.strftime('%Y-%m-%d')
#pending todo items will be transferred today
ToDo.objects.filter(date_todo__lt=current_date).filter(archive='0').update(date_todo=current_date)
#purge done and cancelled to do items < 7 days
import datetime
today = datetime.date.today()
seven_days = datetime.timedelta(days=7)
seven_days_ago = today - seven_days
ToDo.objects.filter(date_todo__lt=seven_days_ago).delete()
return HttpResponseRedirect('/todo/home')
def view_monthly(request, today = datetime.date.today()):
if 'id' not in request.session:
return HttpResponseRedirect('/accounts/unauthorized')
import time
current_date = time.strftime('%Y-%m-%d')
first_day_current = datetime.datetime(today.year, today.month, 1)
if(today.month == 12):
first_day_next_month = datetime.datetime(today.year+1, 1, 1)
else:
first_day_next_month = datetime.datetime(today.year, today.month+1, 1)
one_day = datetime.timedelta(days=1)
last_day_current = first_day_next_month - one_day
#last_day_previous = first_day_current - datetime. timedelta(days=1)
#first_day_previous = datetime.datetime(last_day_previous.year, last_day_previous.month, 1)
monthly_todo_list = ToDo.objects.filter(date_todo__lte=last_day_current).filter(date_todo__gte=first_day_current).filter(added_by=request.session['id'])
fullname = request.session['first_name'] + ' ' + request.session['last_name']
return render(request, 'view_monthly.html',
{'curr_date': current_date,
'end_month_day':last_day_current,
'start_month_day':first_day_current,
'todoList': monthly_todo_list,
'full_name':fullname,
'is_administrator':request.session['is_superuser']
}
)
def view_weekly(request, date_today = datetime.date.today()):
if 'id' not in request.session:
return HttpResponseRedirect('/accounts/unauthorized')
import time
current_date = time.strftime('%Y-%m-%d')
dow_today = date_today.weekday()
if dow_today == 6:
days_ago_saturday = 1
else:
days_ago_saturday = dow_today + 2
delta_saturday = datetime.timedelta(days=days_ago_saturday)
saturday = date_today - delta_saturday
delta_prevsunday = datetime.timedelta(days=6)
prev_sunday = saturday - delta_prevsunday
eight_days = datetime.timedelta(days=8)
week_end = saturday + eight_days
week_start = prev_sunday + eight_days
week_end = week_end.strftime('%Y-%m-%d')
week_start = week_start.strftime('%Y-%m-%d')
weekly_todo_list = ToDo.objects.filter(date_todo__lte=week_end).filter(date_todo__gte=week_start).filter(added_by=request.session['id'])
fullname = request.session['first_name'] + ' ' + request.session['last_name']
return render(request, 'view_weekly.html',
{'curr_date': current_date,
'end_week_day':week_end,
'start_week_day':week_start,
'todoList': weekly_todo_list,
'full_name':fullname,
'is_administrator':request.session['is_superuser']
}
)
def home_page(request):
if 'id' not in request.session:
return HttpResponseRedirect('/accounts/unauthorized')
import time
current_date = time.strftime('%Y-%m-%d')
current_todo_list = ToDo.objects.filter(date_todo=current_date).filter(added_by=request.session['id'])
fullname = request.session['first_name'] + ' ' + request.session['last_name']
return render(request, 'home.html',
{'curr_date': current_date,
'todoList': current_todo_list,
'full_name':fullname,
'is_administrator':request.session['is_superuser']
}
)
def tick_done(request, todoID):
if 'id' not in request.session:
return HttpResponseRedirect('/accounts/unauthorized')
ToDo.objects.filter(id=todoID).update(archive=1)
return redirect('home')
def tick_cancel(request, todoID):
if 'id' not in request.session:
return HttpResponseRedirect('/accounts/unauthorized')
ToDo.objects.filter(id=todoID).update(archive=2)
return redirect('home')
def unauthorized(request):
import time
current_date = time.strftime('%Y-%m-%d')
fullname = request.session['first_name'] + ' ' + request.session['last_name']
return render_to_response('unauthorized_access2.html', {'curr_date':current_date, 'full_name':fullname})
| mscs-jpslaanan/project | todo/views.py | views.py | py | 11,185 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "time.strftime",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "django.core.context_processors.csrf",
"line_number": 25,
"usage_type": "call"
},
{
... |
37918714311 | from django.http import Http404, HttpResponseRedirect
from django.shortcuts import render_to_response, get_object_or_404
from models import Meuble, CommandeParticulier, CommandeProfessionnel
from forms import MeubleQuantiteListForm, ParticulierContactForm, MeubleQuantiteFormSet, ProfessionnelContactForm
from django.template import RequestContext
from django.views.decorators.csrf import csrf_exempt
from django.http import HttpResponse
from django.utils import simplejson
from decimal import Decimal
from django.core.urlresolvers import reverse
def particulier_devis(request):
if request.method == 'POST':
contact_form = ParticulierContactForm(request.POST)
meublequantite_formset = MeubleQuantiteFormSet(request.POST)
if contact_form.is_valid() and meublequantite_formset.is_valid():
meubles = meublequantite_formset.save()
commande = contact_form.save()
for meuble in meubles:
commande.meubles.add(meuble)
commande.save()
return render_to_response('moving/merci.html', {
'commande': commande,
}, context_instance=RequestContext(request))
else:
contact_form = ParticulierContactForm()
meublequantite_formset = MeubleQuantiteListForm
return render_to_response('moving/devis.html', {
'contact_form': contact_form,
'meublequantite_formset': meublequantite_formset
}, context_instance=RequestContext(request))
def professionnel_devis(request):
if request.method == 'POST':
contact_form = ProfessionnelContactForm(request.POST)
# meublequantite_formset = MeubleQuantiteFormSet(request.POST)
if contact_form.is_valid():# and meublequantite_formset.is_valid():
# meubles = meublequantite_formset.save()
commande = contact_form.save()
# for meuble in meubles:
# commande.meubles.add(meuble)
commande.save()
return render_to_response('moving/merci.html', {
'commande': commande,
}, context_instance=RequestContext(request))
else:
contact_form = ProfessionnelContactForm()
# meublequantite_formset = MeubleQuantiteListForm
return render_to_response('moving/devis_professionnel.html', {
'contact_form': contact_form,
# 'meublequantite_formset': meublequantite_formset
}, context_instance=RequestContext(request))
def index(request):
return render_to_response('moving/index.html', {
}, context_instance=RequestContext(request))
@csrf_exempt
def calculate_volume(request):
to_return = {
'msg': u'No POST data sent.',
'volume': None,
}
if request.method == "POST": # and is_ajax
volume = Decimal(0)
meublequantite_formset = MeubleQuantiteFormSet(request.POST)
if meublequantite_formset.is_valid():
dummy_commande = CommandeParticulier()
for form in meublequantite_formset:
volume = volume + Decimal(form.cleaned_data['quantite']) * form.cleaned_data['meuble'].volume
to_return['volume'] = str(volume)
to_return['msg'] = 'Success'
data = simplejson.dumps(to_return)
return HttpResponse(data, mimetype='application/json') | austing/Hakim | hakim/moving/views.py | views.py | py | 3,298 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "forms.ParticulierContactForm",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "forms.MeubleQuantiteFormSet",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render_to_response",
"line_number": 23,
"usage_type": "call"
... |
5475708388 | import PIL.ImageGrab
import PIL.ImageOps
import PIL.ImageStat
from PIL import Image
import pytesseract
import numpy
import os
import time
import win32api, win32con
import skimage
import msvcrt
## IF SOMETHING GOES WRONG IT PROBABLY BECAUSE YOU LEVELED AND EMPTY TILE NO LONGER READ 'ae'
pytesseract.pytesseract.tesseract_cmd = 'C:\\Program Files\\Tesseract-OCR\\tesseract'
EMPTY_POS = (970,50)
SHOP_BUTTON_POS = (1750, 980)
#camDistance is 874
#camPitch is 66
#outline on
#maximumGraphic
KNIGHT_COMP = ['OMNIKNIGHT','LUNA','ABADDON','DRAGONKNIGHT','CHAOSKNIGH','BATRIDER','NECROPHOS','VIPER']
SHOP_BOUND = {
## width is 213, height is 68
## y is 192
1 : 175,
2 : 495,
3 : 816,
4 : 1135,
5 : 1455
}
UI_BOUND = {
"shopButtonBound" : (1717,954,1717+64,954+60),
"goldBound" : (1640,953,1640+50,953+53),
"levelBound" : (1807,592,1807+46,592+51),
"roundBound" : (1490,20,1490+240,20+45),
"healthBound" : (340,230,340+70,230+33),
"roundStateBound" : (1495,68,1495+233,68+42),
"unitStarBound" : (371,35,372,36),
"unitNameBound" : (260,53,260+230,53+37)
}
REFERENCE_DICT = {
"oneStar" : (45000,46000),
"twoStar" : 74407,
"threeStar" : (59000,61000)
}
BOARD_POS = { 1 : {1:(629,443), 2:(723,443), 3:(815,443), 4:(908,443), 5:(1007,443), 6:(1102,443), 7:(1197,443), 8:(1292,443)},
2 : {1:(612,545), 2:(706,545), 3:(811,545), 4:(910,545), 5:(1005,545), 6:(1113,545), 7:(1211,545), 8:(1307,545)},
3 : {1:(591,642), 2:(696,645), 3:(803,646), 4:(904,645), 5:(1009,646), 6:(1113,645), 7:(1226,645), 8:(1324,645)},
4 : {1:(568,745), 2:(678,752), 3:(794,755), 4:(905,754), 5:(1012,751), 6:(1129,757), 7:(1244,750), 8:(1353,747)}}
BENCH_POS = { 1: (438,998),
2: (587,998),
3: (738,998),
4: (886,998),
5: (1026,998),
6: (1185,998),
7: (1325,998),
8: (1478,998)}
SHOP_POS = { 1: (323,460),
2: (650,460),
3: (950,460),
4: (1287,460),
5: (1600,460),
6: (1828,614)}
currentBoardHeroes = { 1 : {1:['',1], 2:['',1], 3:['',1], 4:['',1], 5:['',1], 6:['',1], 7:['',1], 8:['',1]},
2 : {1:['',1], 2:['',1], 3:['',1], 4:['',1], 5:['',1], 6:['',1], 7:['',1], 8:['',1]},
3 : {1:['',1], 2:['',1], 3:['',1], 4:['',1], 5:['',1], 6:['',1], 7:['',1], 8:['',1]},
4 : {1:['',1], 2:['',1], 3:['',1], 4:['',1], 5:['',1], 6:['',1], 7:['',1], 8:['',1]}}
currentBenchHeroes = {
1 : ['',1],
2 : ['',1],
3 : ['',1],
4 : ['',1],
5 : ['',1],
6 : ['',1],
7 : ['',1],
8 : ['',1]
}
currentShopHeroes = {
1 : '',
2 : '',
3 : '',
4 : '',
5 : ''
}
gameState = {
'fourItemChoices' : 'False',
'isShopOpen' : True,
'goldBound' : 1,
'levelBound' : 1,
'roundBound' : 1,
'healthBound' : 100,
'roundStateBound' : ''
}
def buyUnit(slot):
openShop()
time.sleep(0.2)
mousePos(SHOP_POS[slot])
print('BUYING A UNIT IN SLOT ' + str(slot))
leftClick()
def buyCompUnit(comp):
grabWholeShop()
for i in currentShopHeroes.keys():
if currentShopHeroes[i] in comp:
print(currentShopHeroes[i] + ' in slot ' + str(i) + ' is in comp')
buyUnit(i)
else:
print(currentShopHeroes[i] + ' in slot ' + str(i) + ' is NOT in comp')
def buyExp():
openShop()
time.sleep(0.01)
mousePos(SHOP_POS[6])
leftClick()
def isLootUIOpen():
grabUI('roundStateBound')
if(gameState['roundStateBound'] == 'LOOTING'):
return True
return False
def imgToText(filename):
text = pytesseract.image_to_string(PIL.Image.open(filename))
return text
def imgToTextUniform(filename):
text = pytesseract.image_to_string(PIL.Image.open(filename), config='--psm 10 -c tessedit_char_whitelist=abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ')
return text
def imgToNumber(filename):
text = pytesseract.image_to_string(PIL.Image.open(filename), config='--psm 10 -c tessedit_char_whitelist=0123456789')
return text
def imgToNumberUniform(filename):
text = pytesseract.image_to_string(PIL.Image.open(filename), config='--psm 10')
return text
def openShop():
gameState['isShopOpen'] = True
mousePos(EMPTY_POS)
leftClick()
mousePos(SHOP_BUTTON_POS)
leftClick()
mousePos(EMPTY_POS)
time.sleep(0.01)
def closeShop():
gameState['isShopOpen'] = False
mousePos(EMPTY_POS)
leftClick()
time.sleep(0.01)
def compareImages(i1, i2):
#i1 = PIL.Image.open(image1 + ".jpg")
#i2 = PIL.Image.open(image2 + ".jpg")
assert i1.mode == i2.mode, "Different kinds of images."
assert i1.size == i2.size, "Different sizes."
pairs = zip(i1.getdata(), i2.getdata())
if len(i1.getbands()) == 1:
# for gray-scale jpegs
dif = sum(abs(p1-p2) for p1,p2 in pairs)
else:
dif = sum(abs(c1-c2) for p1,p2 in pairs for c1,c2 in zip(p1,p2))
ncomponents = i1.size[0] * i1.size[1] * 3
return (dif / 255.0 * 100) / ncomponents
def grabBench(slot):
print('in bench slot ' + str(slot))
closeShop()
bench_select(slot)
info = grabSelectedUnitInfo()
currentBenchHeroes[slot][0] = info[0]
currentBenchHeroes[slot][1] = info[1]
def board_select(row, col):
closeShop()
mousePos(BOARD_POS[row][col])
#time.sleep(0.01)
leftClick()
def grabBoard(row, col):
print('in board tile [' + str(row) + '][' + str(col) + ']')
closeShop()
# time.sleep(0.05)
board_select(row, col)
info = grabSelectedUnitInfo()
currentBoardHeroes[row][col][0] = info[0]
currentBoardHeroes[row][col][1] = info[1]
def grabWholeBoard():
grabUI('roundStateBound')
if gameState['roundStateBound'] == 'LOOTING':
print("ERROR: TRIED TO GRAB BOARD WHILE ITEM SCREEN IS UP")
exit()
for r in range(1,5):
for c in range(1,9):
grabBoard(r,c)
def grabSelectedUnitInfo():
if(gameState['isShopOpen']):
return ('EMPTY',1)
#bench_select(slot)
closeShop()
#time.sleep(0.1)
box = UI_BOUND["unitStarBound"]
im = PIL.ImageGrab.grab(box)
im = im.resize([(box[2] - box[0]) * 2, (box[3]-box[1]) * 2],PIL.Image.ANTIALIAS)
im.save(os.getcwd() + '\\selectedUnitStar.jpg', 'JPEG', quality=95)
threeStar = PIL.Image.open('threeStar.jpg')
oneStar = PIL.Image.open('oneStar.jpg')
if(compareImages(im, oneStar) < 1):
#print(compareImages(im, oneStar))
print("there is a 1 stars")
star = 1
elif(compareImages(im, threeStar) < 1):
#print(compareImages(im, threeStar))
print("there a 3 stars")
star = 3
else:
print('there is a 2 star')
star = 2
box = UI_BOUND["unitNameBound"]
im = PIL.ImageOps.grayscale(PIL.ImageGrab.grab(box))
im = im.resize([(box[2] - box[0]) * 5, (box[3]-box[1]) * 5],PIL.Image.ANTIALIAS)
thresh = 150
fn = lambda x : 255 if x > thresh else 0
im = im.convert('L').point(fn, mode='1')
im.save(os.getcwd() + '\\selectedUnitName.jpg', 'JPEG', quality=95)
name = imgToTextUniform("selectedUnitName.jpg")
if(name == 'pye' or name == 'pys' or name == 'Dye'):
name = 'Axe'
elif(name == 'ae'):
name = 'EMPTY'
print(name)
return (name, star)
def grabWholeBench():
for i in range(1,9):
grabBench(i)
def grabUI(element):
if(element != 'roundStateBound' and element != 'goldBound' and element != 'roundBound' and element != 'healthBound'):
return
closeShop()
#time.sleep(0.01)
box = UI_BOUND[element]
im = PIL.ImageOps.grayscale(PIL.ImageGrab.grab(box))
im = im.resize([(box[2] - box[0]) * 2, (box[3]-box[1]) * 2],PIL.Image.ANTIALIAS)
a = numpy.array(im.getcolors())
a = a.sum()
#print(a)
# thresh = 120
# fn = lambda x : 255 if x > thresh else 0
# im = im.convert('L').point(fn, mode='1')
im.save(os.getcwd() + '\\ui_' + element + '.jpg', 'JPEG', quality=95)
filename = 'ui_' + element + '.jpg'
result = imgToText(filename)
gameState[element] = result
print(element + ' : ' + result)
def grabLevel():
openShop()
#time.sleep(0.03)
box = UI_BOUND['levelBound']
im = PIL.ImageOps.grayscale(PIL.ImageGrab.grab(box))
im = im.resize([(box[2] - box[0]) * 2, (box[3]-box[1]) * 2],PIL.Image.ANTIALIAS)
thresh = 120
fn = lambda x : 255 if x > thresh else 0
im = im.convert('L').point(fn, mode='1')
im.save(os.getcwd() + '\\ui_levelBound.jpg', 'JPEG', quality=95)
filename = 'ui_levelBound.jpg'
result = imgToNumberUniform(filename)
if(result == 'i'):
result = 1
gameState['levelBound'] = result
print('Level is : ' + result)
def grabWholeUI():
for element in gameState.keys():
grabUI(element)
grabLevel()
def grabShop(slot):
openShop()
#time.sleep(0.03)
box = (SHOP_BOUND[slot],195,SHOP_BOUND[slot]+184,195+60)
im = PIL.ImageOps.grayscale(PIL.ImageGrab.grab(box))
im = im.resize([213*4,68*4],PIL.Image.ANTIALIAS)
thresh = 150
fn = lambda x : 255 if x > thresh else 0
im = im.convert('L').point(fn, mode='1')
im.save(os.getcwd() + '\\shop_' + str(slot) + '.jpg', 'JPEG', quality=95)
filename = 'shop_' + str(slot) + '.jpg'
print('in shop slot ' + str(slot) + ' there is ' + imgToTextUniform(filename))
currentShopHeroes[slot] = imgToTextUniform(filename)
def grabWholeShop():
for i in range(1,6):
grabShop(i)
def bench_select(slot):
closeShop()
mousePos(BENCH_POS[slot])
#time.sleep(0.01)
leftClick()
def leftClick():
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN,0,0)
time.sleep(0.01)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP,0,0)
#print('left clicked')
def leftDown():
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN,0,0)
time.sleep(0.01)
#print('left Down')
def leftUp():
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP,0,0)
time.sleep(0.01)
#print('left release')
def mousePos(cord):
win32api.SetCursorPos((cord[0], cord[1]))
def get_cords():
x,y = win32api.GetCursorPos()
x = x
y = y
print(x,y)
def startGame():
#location of first menu
mousePos((355, 917))
leftClick()
time.sleep(.1)
#location of second menu
mousePos((963, 562))
leftClick()
time.sleep(.1)
#location of third menu
mousePos((1469, 417))
leftClick()
time.sleep(.1)
def countBoard():
count = 0
for r in range(1,5):
for c in range(1,9):
if currentBoardHeroes[r][c][0] != 'EMPTY':
count += 1
print('board has ' + str(count) + ' heroes')
return count
def countBench():
count = 0;
for i in range(1,9):
if(currentBenchHeroes[i][0] != 'EMPTY'):
count += 1
print('bench has ' + str(count) + ' heroes')
return count
def getFirstEmptyBoardTile():
for r in range(1,5):
for c in range(1,9):
if currentBoardHeroes[r][c][0] == 'EMPTY':
return (r,c)
def fillBoard():
grabWholeUI()
grabWholeBoard()
if countBoard() < int(gameState['levelBound']):
closeShop()
#time.sleep(0.01)
for z in range(int(gameState['levelBound']) - countBoard()):
bestBenchUnitIndex = 0
grabWholeBench()
for i in range(1,9):
if(currentBenchHeroes[i][0] != 'EMPTY'):
bestBenchUnitIndex = i
break
if bestBenchUnitIndex == 0:
return False
for i in range(1,9):
if(currentBenchHeroes[i][0] != 'EMPTY'):
if(currentBenchHeroes[i][1] > currentBenchHeroes[bestBenchUnitIndex][1]):
bestBenchUnitIndex = i
grabWholeBoard()
tile = getFirstEmptyBoardTile()
mousePos(BENCH_POS[bestBenchUnitIndex])
print('moving mouse to bench slot ' + str(bestBenchUnitIndex))
leftDown()
print('clicking down on bench slot ' + str(bestBenchUnitIndex))
mousePos(BOARD_POS[(tile[0])][(tile[1])])
print('moving mouse to board tile ' + str(tile))
leftUp()
print('releasing mouse on board tile ' + str(tile))
print('MOVED HERO FROM BENCH SLOT ' + str(bestBenchUnitIndex) + ' TO BOARD TILE ' + str(tile))
return True
else:
return False
def logicKnights():
while True:
print('SLEEPING NOW NOW NOW NOW')
time.sleep(2)
if(isLootUIOpen() is False):
while fillBoard() is True:
next
grabWholeBench()
if(countBench() < 8):
buyCompUnit(KNIGHT_COMP)
closeShop()
def main():
pass
if __name__ == '__main__':
main()
| FridayNguyen/UnderlordsAutomaton | automaton.py | automaton.py | py | 11,884 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pytesseract.pytesseract",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "time.sleep",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "pytesseract.imag... |
40063496981 | from django.urls import include, path
from rest_framework import routers
from api.views import BasketModelAPIViewSet, ProductModelAPIViewSet
app_name = 'api'
router = routers.DefaultRouter()
router.register(r'products', ProductModelAPIViewSet)
router.register(r'baskets', BasketModelAPIViewSet)
urlpatterns = [
path('', include(router.urls)),
]
| Bereznikov/Online-shop | store/api/urls.py | urls.py | py | 353 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "rest_framework.routers.DefaultRouter",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "rest_framework.routers",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "api.views.ProductModelAPIViewSet",
"line_number": 9,
"usage_type": "argument"
... |
12214621000 | import io
import sys
_INPUT = """\
4 4
1 2
1 3
1 4
3 4
1 3
1 4
2 3
3 4
"""
sys.stdin = io.StringIO(_INPUT)
# 本番コードはこのコメント以下に書く、import,def,も
n,m=map(int, input().split()) #複数数値入力 「A B」みたいなスペース空いた入力のとき
graph_taka = [[] for _ in range(n)]
for _ in range(m):
a, b = map(int, input().split())
graph_taka[a-1].append(b-1)
graph_taka[b-1].append(a-1) # 有向グラフなら消す
# print(graph_taka) # [[2, 3, 5], ..., [1, 3, 4]]
graph_ao = [[] for _ in range(n)]
for _ in range(m):
a, b = map(int, input().split())
graph_ao[a-1].append(b-1)
graph_ao[b-1].append(a-1) # 有向グラフなら消す
# print(graph_ao) # [[2, 3, 5], ..., [1, 3, 4]]
# if graph_taka == graph_ao :
graph_ao = [ graph_ao[2] , graph_ao[1] , graph_ao[0] ,graph_ao[3]]
print(graph_ao) | MasayaKondo999/rent | ABC232/1219_3.py | 1219_3.py | py | 876 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.stdin",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "io.StringIO",
"line_number": 15,
"usage_type": "call"
}
] |
21892466278 | from pathlib import Path
from git import Repo
GENERATION_DICT = dict(example_mouse=[100])
cwd = Path.home() / "bg_auto"
cwd.mkdir(exist_ok=True)
if __name__ == "__main__":
repo_path = cwd / "atlas_repo"
atlas_gen_path = Path(__file__).parent
repo = Repo(repo_path)
# repo.git.add(".")
# repo.git.commit('-m', 'test commit', author='luigi.petrucco@gmail.com')
repo.git.pull()
repo.git.push()
| brainglobe/bg-atlasgen | bg_atlasgen/test_git.py | test_git.py | py | 427 | python | en | code | 7 | github-code | 36 | [
{
"api_name": "pathlib.Path.home",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "git.Repo",
"line_number... |
27017325161 | from typing import Dict
import logging
from flask_jwt_extended import jwt_required, get_jwt_identity
from mongoengine.errors import ValidationError
from .barbecues_blp import barbecues_blp
from .abstract_barbecue_view import AbstractBarbecuesView
from ...schemas.communs_schemas import PagingError
from ...schemas.barbecue_schemas import (
InputUpdateBarbecueSchema,
BarbecueResponseSchema
)
from ...models.user import User
from ...models.barbecue import Barbecue
from ...helpers.errors_msg_handler import BadRequest, ReasonError, NotFound
logger = logging.getLogger('console')
@barbecues_blp.route('/<int:barbecue_id>')
class OneBarbecueView(AbstractBarbecuesView):
@barbecues_blp.doc(operationId='UpdateBarbecue')
@barbecues_blp.arguments(InputUpdateBarbecueSchema)
@barbecues_blp.response(400, schema=PagingError, description="BadRequest")
@barbecues_blp.response(404, schema=PagingError, description="NotFound")
@barbecues_blp.response(200, schema=BarbecueResponseSchema, description="Update one barbecue")
@jwt_required()
def put(self, input_dict: Dict, barbecue_id: int):
"""Update an existing barbecue"""
auth_user = User.get_by_id(get_jwt_identity())
if not self.can_read_the_barbecue(auth_user.scopes):
raise NotFound(f"Barbecue #{barbecue_id} not found !")
barbecue = self.get_barbecue(barbecue_id)
barbecue.update(input_dict)
try:
barbecue.save()
except ValidationError:
raise BadRequest(ReasonError.UPDATE_BARBECUE_ERROR.value)
return {
"action": "updated",
"barbecue": barbecue
}
@barbecues_blp.doc(operationId='DeleteBarbecue')
@barbecues_blp.response(400, schema=PagingError, description="BadRequest")
@barbecues_blp.response(404, schema=PagingError, description="NotFound")
@barbecues_blp.response(200, schema=BarbecueResponseSchema, description="Delete one barbecue")
@jwt_required()
def delete(self, barbecue_id: int):
"""Delete an existing barbecue"""
auth_user = User.get_by_id(get_jwt_identity())
if not self.can_read_the_barbecue(auth_user.scopes):
raise NotFound(f"Barbecue #{barbecue_id} not found !")
barbecue = self.get_barbecue(barbecue_id)
barbecue.delete()
return {
"action": "deleted",
"barbecue": barbecue
}
| VictorCyprien/barbuc-api | barbuc_api/views/barbecues/one_barbecue_view.py | one_barbecue_view.py | py | 2,465 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "abstract_barbecue_view.AbstractBarbecuesView",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 33,
"usage_type": "name"
},
{
"api_na... |
70585509224 | import numpy as np
import torch
from collections import deque
from motion_pred.utils.config import Config
from models.motion_pred import *
import pickle
from datetime import datetime
import os
import time
index = 0
class HumanLatent:
def __init__(self, date_time=None):
self.device = torch.device('cuda:0')
self.date_time = date_time
self.history_len = 13
self.human_fps = 16.5
self.human_joints = np.array([4, 5, 7, 8, 10, 11, 27, 28]) # corresponds to [right knee, ankle, foot, big toe]
self.previous_ball = deque(maxlen=100000000)
self.previous_ball_raw = deque(maxlen=100000000)
self.previous_human = deque(maxlen=100000000)
self.previous_ball_time = deque(maxlen=100000000)
self.previous_human_time = deque(maxlen=100000000)
cfg = Config('human_kick', test=True)
self.model = get_kick_vae_model(cfg)
self.model_weight = 'vae_0500.p'
model_cp = pickle.load(open(self.model_weight, "rb"))
self.model.load_state_dict(model_cp['model_dict'])
self.model.eval()
self.model.to(self.device)
self.human_latent = None
self.index = 0
def get_human_latent(self):
return self.human_latent
def set_ball_states(self, ball_pos, ball_time):
if len(self.previous_ball) > 0 and ball_pos[0] > 6:
print("if", ball_pos[0], ball_time)
self.previous_ball.append(np.copy(self.previous_ball[-1]))
self.previous_ball_time.append(ball_time)
else:
print("else", ball_pos[0], ball_time)
self.previous_ball.append(np.copy(ball_pos))
self.previous_ball_time.append(ball_time)
# self.previous_ball_raw.append(np.copy(ball_pos))
# self.previous_ball.append(np.copy(ball_pos))
# self.previous_ball_time.append(ball_time)
def set_human_poses(self, human_pose, human_time):
self.previous_human.append(np.copy(human_pose))
self.previous_human_time.append(human_time)
def run_encoding_once(self):
print(self.human_latent)
if len(self.previous_human_time) == 0 or len(self.previous_ball_time) == 0:
return
if self.previous_ball[-1][0] < 1.5:
self.human_latent = None
return
if self.human_latent is not None:
return
human_time = self.previous_human_time[-1]
human_idx = 1
ball_idx = 1
ball_pos = np.zeros((self.history_len, 3))
human_pose = np.zeros((self.history_len, 29, 3))
for i in range(1, self.history_len+1):
while ball_idx + 1 < len(self.previous_ball_time) and human_time < self.previous_ball_time[-ball_idx-1]:
ball_idx += 1
if self.previous_ball_time[-ball_idx] - self.previous_ball_time[-ball_idx-1] == 0:
ball_pos[-i] = self.previous_ball[-ball_idx]
else:
curr_ball_pos = self.previous_ball[-ball_idx] * (human_time - self.previous_ball_time[-ball_idx-1]) + self.previous_ball[-ball_idx-1] * (self.previous_ball_time[-ball_idx] - human_time)
ball_pos[-i] = np.copy(curr_ball_pos/(self.previous_ball_time[-ball_idx]-self.previous_ball_time[-ball_idx-1]))
while human_idx + 1 < len(self.previous_human_time) and human_time < self.previous_human_time[-human_idx-1]:
human_idx += 1
if self.previous_human_time[-human_idx] - self.previous_human_time[-human_idx-1] == 0:
human_pose[-i] = self.previous_human[-human_idx]
else:
curr_human_pos = self.previous_human[-human_idx] * (human_time - self.previous_human_time[-human_idx-1]) + self.previous_human[-human_idx-1] * (self.previous_human_time[-human_idx] - human_time)
human_pose[-i] = np.copy(curr_human_pos/(self.previous_human_time[-human_idx]-self.previous_human_time[-human_idx-1]))
if human_time < self.previous_ball_time[0] or human_time < self.previous_human_time[0]:
ball_pos[-i] = 0
human_pose[-i] = 0
break
human_time = human_time - 1 / self.human_fps
ball_pos = ball_pos[:, None, :]
human_pose = human_pose[:, self.human_joints, :]
human_ball = np.concatenate([human_pose, ball_pos], axis=1)
human_ball = (human_ball[1:, :] - human_ball[:-1, :]) / 0.1
human_ball[:2] = 0 # same with training, the first two frames are zeros
human_ball = human_ball.reshape(self.history_len-1, 1, -1)
human_ball = torch.from_numpy(human_ball).to(self.device).float()
h_x = self.model.encode_x(human_ball)
self.human_latent = h_x[0].detach().cpu().numpy().tolist()
print(self.human_latent)
def run_encoding_forever(self):
while True:
print('encoding')
self.run_encoding_once()
if __name__ == '__main__':
hl = HumanLatent()
for i in range(100):
hl.set_ball_states(np.array([i, i, i]), 0.01*i)
hl.set_human_poses(np.ones((29, 3))*i, 1*i)
hl.run_encoding_once() | yichen928/RSR_Goalkeeper | src/human_latent/src/human_latent.py | human_latent.py | py | 5,285 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.device",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "collections.deque",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "collections.deque",
"l... |
8446735468 | import os
import platform
import tempfile
import urllib
from cupy import testing
from cupyx.tools import install_library
import pytest
_libraries = ['cudnn', 'nccl', 'cutensor']
def _get_supported_cuda_versions(lib):
return sorted(set([
rec['cuda'] for rec in install_library.library_records[lib]]))
class TestInstallLibrary:
@pytest.mark.parametrize('cuda', _get_supported_cuda_versions('cudnn'))
@testing.slow
def test_install_cudnn(self, cuda):
self._test_install('cudnn', cuda)
@pytest.mark.skipif(
platform.system() == 'Windows',
reason='NCCL is only available for Linux')
@pytest.mark.parametrize('cuda', _get_supported_cuda_versions('nccl'))
@testing.slow
def test_install_nccl(self, cuda):
self._test_install('nccl', cuda)
@pytest.mark.parametrize('cuda', _get_supported_cuda_versions('cutensor'))
@testing.slow
def test_install_cutensor(self, cuda):
self._test_install('cutensor', cuda)
def _test_install(self, library, cuda):
system = platform.system()
for rec in install_library.library_records[library]:
if rec['cuda'] != cuda:
continue
version = rec[library]
filenames = rec['assets'][system]['filenames']
with tempfile.TemporaryDirectory() as d:
install_library.install_lib(cuda, d, library)
self._check_installed(
d, cuda, library, version, filenames)
break
else:
pytest.fail(f'unexpected CUDA version {cuda} for {library}')
def _check_installed(self, prefix, cuda, lib, version, filenames):
install_root = os.path.join(prefix, cuda, lib, version)
assert os.path.isdir(install_root)
for _x, _y, files in os.walk(install_root):
for filename in filenames:
if filename in files:
return
pytest.fail('expected file cound not be found')
@pytest.mark.parametrize('library', _libraries)
def test_urls(self, library):
assets = [r['assets']
for r in install_library.library_records[library]]
for asset in assets:
for system in asset.keys():
url = asset[system]['url']
with urllib.request.urlopen(
urllib.request.Request(url, method='HEAD')) as resp:
assert resp.getcode() == 200
@pytest.mark.parametrize('library', _libraries)
def test_main(self, library):
install_library.main(
['--library', library, '--action', 'dump', '--cuda', 'null'])
| cupy/cupy | tests/cupyx_tests/tools_tests/test_install_library.py | test_install_library.py | py | 2,663 | python | en | code | 7,341 | github-code | 36 | [
{
"api_name": "cupyx.tools.install_library.library_records",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "cupyx.tools.install_library",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 22,
"usage_type": "... |
8844317309 | import signal
import time
import sys
from typing import *
from multiprocessing import Process, Pipe, Value
from multiprocessing.connection import Connection
from threading import Thread
import zmq
import pyaudio
import numpy as np
from config import sr, input_dim, dtw_k, dtw_cost, channels
from transformer import mfcc_transform, audio_length
from dtw import DTW
from predictor import Predictor
DATA_ENDPOINT = 'ipc://twitrecog_data'
COMMAND_ENDPOINT = 'ipc://twitrecog_command'
result_queue = []
found_sleep = False
class AudioHandler(object):
def __init__(self, callback, predictors: List[Predictor]):
self.reader, self.writer = Pipe(False)
self.exiter = Value('b', False)
self.p: Process = None
self.t: Thread = None
self.buf = np.empty((0,), dtype=np.float32)
self.callback = callback
self.predictors = predictors
self.CHUNK = 1536
self.sleepy = (self.CHUNK / sr / 2)
self.skippy = 0
def start(self):
self.p = Process(
target=AudioHandler._start_process,
args=(self.CHUNK, self.writer, self.exiter)
)
self.p.start()
self.t = Thread(
target=self._start_thread
)
self.t.start()
def _predict(self) -> Tuple[bool, int]:
for predictor in self.predictors:
sample_len = predictor.get_sample_len()
if sample_len > self.buf.shape[0]:
continue
x = self.buf[:sample_len]
x_transformed, _ = mfcc_transform(x, sr, input_dim)
if predictor.predict(x_transformed):
return True, predictor.get_sample_len()
return False, 0
def _start_thread(self):
min_sample_len = np.min([x.get_sample_len() for x in self.predictors])
skipping = False
while not self.exiter.value:
recv = np.frombuffer(self.reader.recv_bytes(), np.float32)
self.buf = np.append(self.buf, recv)
if self.skippy > 0:
skipped = self.skippy
if self.buf.shape[0] < skipped:
skipped = self.buf.shape[0]
self.skippy -= skipped
self.buf = self.buf[skipped:]
while self.skippy == 0 and self.buf.shape[0] > min_sample_len:
if skipping:
self.callback(False)
skipping = False
skipping, self.skippy = self._predict()
if skipping:
self.callback(True)
self.buf = self.buf[self.CHUNK:]
time.sleep(self.sleepy)
@staticmethod
def _start_process(chunk, conn: Connection, should_exit):
def cb(in_data, frame_count, time_info, flag):
conn.send_bytes(in_data)
return None, pyaudio.paContinue
def signal_cb(_, __):
should_exit.value = True
signal.signal(signal.SIGINT, signal_cb)
p = pyaudio.PyAudio()
print('process started')
stream = p.open(format=pyaudio.paFloat32,
channels=channels,
rate=sr,
input=True,
output=False,
stream_callback=cb,
frames_per_buffer=chunk)
while not should_exit.value:
time.sleep(0.1)
stream.close()
p.terminate()
def stop(self):
self.exiter.value = True
self.p.join()
self.t.join()
def get_models(root: str) -> Tuple[List[List[Tuple[np.ndarray, int]]], List[Tuple[np.ndarray, int]]]:
import os
import glob
from config import models_glob, models_rel_path, active_path, preprocessed_rel_path, model_extension
from config import recording_extension
ret = []
false_ret = []
path = os.path.join(root, models_rel_path)
for pattern_name in os.listdir(path):
print('found pattern', pattern_name)
pattern_path = os.path.join(path, pattern_name)
with open(os.path.join(pattern_path, active_path), 'r') as active_file:
if active_file.read() == 'false':
continue
if pattern_name == 'false':
target = false_ret
else:
ret.append([])
target = ret[-1]
pattern_models_paths = [os.path.join(pattern_path, pathname)
for pathname in glob.glob(os.path.join(pattern_path, models_glob))]
for pattern_model_path in pattern_models_paths:
pattern_model_idx = pattern_model_path[pattern_model_path.rindex("/") + 1:].replace(model_extension, '')
audio_len = int(np.ceil(audio_length(os.path.join(
root, preprocessed_rel_path,
pattern_name,
pattern_model_idx + recording_extension
)) * sr))
target.append((np.load(pattern_model_path), audio_len))
return ret, false_ret
def main():
context = zmq.Context()
socket: zmq.Socket = context.socket(zmq.PUB)
done: zmq.Socket = context.socket(zmq.SUB)
done.subscribe('')
done.setsockopt(zmq.RCVTIMEO, 2000)
socket.connect(DATA_ENDPOINT)
done.connect(COMMAND_ENDPOINT)
predictors = []
models, false_model = get_models(sys.argv[1])
for model in models:
x = [recording for recording, _ in model]
sample_len = int(np.ceil(np.average([recording_len for _, recording_len in model])))
predictor = DTW(dtw_cost, sample_len)
predictor.compile(dtw_k)
predictor.train(x, [[0.0, 1.0] for _ in range(len(model))])
predictors.append(predictor)
def cb(found):
socket.send_string('true' if found else 'false')
print('found!', found)
audio_handler = AudioHandler(cb, predictors)
audio_handler.start()
signal_received = False
def signal_cb(_, __):
nonlocal signal_received
signal_received = True
signal.signal(signal.SIGINT, signal_cb)
print('started')
socket.send_string('started')
while not signal_received:
try:
string = done.recv_string()
if string == 'done':
signal_received = True
break
except zmq.ZMQError as err:
print('Error while receiving: ', err)
time.sleep(1)
audio_handler.stop()
socket.close()
done.close()
if __name__ == '__main__':
main()
| d32f123/master-thesis | python/twitrecog.py | twitrecog.py | py | 6,471 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "predictor.Predictor",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "multiprocessing.Pipe",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Value",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "multip... |
15673441989 | import requests
from web.models import ApiKeys
def __search(params):
api_key = ''
for keys in ApiKeys.select():
api_key = keys.key
s = requests.session()
s.headers = {
"Authorization": "Bearer " + api_key
}
r = s.get("https://api.yelp.com/v3/businesses/search", params=params)
ratelimits = ApiKeys.select()
if ratelimits:
ratelimits = ApiKeys.select().get()
ratelimits.ratelimit = r.headers['ratelimit-remaining']
else:
ratelimits = ApiKeys(r.headers['ratelimit-remaining'])
ratelimits.save()
return r.json()["businesses"]
def search(latitude, longitude, radius, category='[]'):
results = []
search_params = ({
"term": "restaurants",
"limit": 50,
"latitude": latitude,
"longitude": longitude,
"radius": radius,
"categories": ",".join(list(map(lambda c: c, eval(category)))),
})
while True:
page = __search(search_params)
results.extend(page)
if len(page)<50:
# print(search_params)
break
search_params.update({
"offset": search_params.get("offset", 0) + 50
})
return results
def search_all_circles(circles, start_index=0, category='[]'):
print('start_index', start_index)
data = []
uniq_set = set()
for i,circle in enumerate(circles[start_index:]):
try:
results = search(circle[0][1],circle[0][0],circle[1],category)
except:
return False, data, start_index + i
for j in results:
if j['id'] not in uniq_set:
data.append(j)
uniq_set.add(j['id'])
return True, data, start_index + i
# if __name__ == "__main__":
# print(len(search("Los Angeles, CA")))
| 360cloudhub/Let-s-kick-it | yelp.py | yelp.py | py | 1,828 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "web.models.ApiKeys.select",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "web.models.ApiKeys",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "requests.session",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "web.models... |
17849210832 | import torch
import torch.nn as nn
from prodict import Prodict
import utils
class Reducer(nn.Module):
def __init__(self, dims: Prodict, device=torch.device("cpu")):
super(Reducer, self).__init__()
self.input_dim = dims.INPUT_DIM
self.output_dim = dims.OUTPUT_DIM
self.net = nn.Sequential(
nn.Linear(self.input_dim, self.input_dim),
nn.SELU(),
nn.Linear(self.input_dim, self.output_dim),
nn.SELU(),
)
self.device = device
utils.init_network_weights(self.net, std=0.001)
def forward(self, data):
net_out = self.net(data)
return net_out.permute(1, 0, 2)
| jameslu01/TDNODE | src/model/SLD/reducer.py | reducer.py | py | 682 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "prodict.Prodict",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "torch.device",
"line_n... |
73087531943 | # -*- coding: utf-8 -*-
# @Author: Ahmed kammorah
# @Date: 2019-04-14 22:19:20
# @Last Modified by: Ahmed kammorah
# @Last Modified time: 2019-04-14 23:11:39
from http.server import BaseHTTPRequestHandler, HTTPServer
import json
import re
import socket
from threading import Thread
import requests
MOCK_SERVER_PORT = 10001
class AKMockHTTPRequestHandler(BaseHTTPRequestHandler):
SPARKPOST_SEND_PATTERN = re.compile(r'/')
def do_GET(self):
if re.search(SPARKPOST_SEND_PATTERN, self.path):
self.send_response(requests.codes.ok)
# Add response headers.
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# Add response content.
response_content = json.dumps([])
self.wfile.write(response_content.encode('utf-8'))
return
def start_mock_server(server_port):
server_base = 'localhost'
mock_server = HTTPServer((server_base, server_port), AKMockHTTPRequestHandler)
ser_thread = Thread(target=mock_server.serve_forever)
ser_thread.setDaemon(True)
ser_thread.start() | AhmedKammorah/AKEmailService | MainService/tests/ak_mock_server.py | ak_mock_server.py | py | 1,133 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "http.server.BaseHTTPRequestHandler",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "re.compile",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "requests.code... |
29273051628 | import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import numpy as np
import os
from collections import deque
from src.game_env import *
from src.ppo import *
import src.utils as utils
is_cuda = torch.cuda.is_available()
# device = 'cuda' if is_cuda else 'cpu'
device = 'cpu'
class Net(nn.Module):
def __init__(self, input_shape, n_actions):
super(Net, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(input_shape[0], 32, kernel_size=8, stride=4),
nn.ReLU(),
nn.Conv2d(32, 64, kernel_size=4, stride=2),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=1),
nn.ReLU()
)
conv_out_size = self._get_conv_out(input_shape)
self.policy = nn.Sequential(
nn.Linear(conv_out_size, 512),
nn.ReLU(),
nn.Linear(512, n_actions)
)
self.value = nn.Sequential(
nn.Linear(conv_out_size, 512),
nn.ReLU(),
nn.Linear(512, 1)
)
def _get_conv_out(self, shape):
o = self.conv(torch.zeros(1, *shape))
return int(np.prod(o.size()))
def forward(self, x):
fx = x.float() / 255
conv_out = self.conv(fx).view(fx.size()[0], -1)
return self.policy(conv_out), self.value(conv_out)
class Agent:
def __init__(self, obs_shape, action_space):
self.policy = Net(obs_shape, action_space).to(device)
def choose_action(self, logits, noise_action=True):
# take an logits ---> action
u = torch.rand(logits.size()).to(device)
if noise_action:
_, action = torch.max( logits.detach() - (-u.log()).log(), 1)
else:
logits = F.softmax(logits, dim=-1)
action = np.argmax(logits.cpu().detach().numpy(), axis=-1)[0]
return action
def train(self, game_id,
obs_size=84,
mini_batch=4,
num_steps=128,
num_workers=16,
mini_epochs=3,
gamma_=0.99,
tau_=0.95,
initial_lr=1e-4,
constant_lr=False,
max_update_times=10000,
early_stop=False,
save_model_steps=500):
'''
Agent training process!
:param game_id:
:param obs_size:
:param mini_batch:
:param num_steps:
:param num_workers:
:param mini_epochs:
:param gamma_:
:param lambda_:
:param learning_rate:
:param max_update_times:
:param early_stop:
:param save_model_steps:
:return:
'''
batch_size = num_workers * num_steps
mini_batch_size = batch_size // mini_batch
assert (batch_size % mini_batch == 0)
workers = [Worker(game_id, obs_size) for i in range(num_workers)]
state = np.zeros((num_workers, obs_size, obs_size, 4), dtype=np.uint8)
reward_queue = deque(maxlen=100)
length_queue = deque(maxlen=100)
for worker in workers:
worker.child.send(('reset', None))
for i, worker in enumerate(workers):
state[i] = worker.child.recv()
state = state.transpose(0, 3, 1, 2) # channel first for pytorch
optimizer = optim.Adam(self.policy.parameters(), lr=initial_lr)
current_update_times = 0
while current_update_times < max_update_times and not early_stop:
# ------------------------------------
# interact with env and generate data
log_probs = []
values = []
rewards = []
actions = []
states = []
masks = []
for _ in range(num_steps):
state = torch.FloatTensor(state).to(device)
logits, value = self.policy(state)
# +++++
values.append(value)
action_this_step = self.choose_action(logits)
action_this_step = action_this_step.cpu().detach().numpy()
# +++++
actions.append(torch.from_numpy(np.asarray(action_this_step)).unsqueeze(1).to(device))
prob = F.softmax(logits, dim=-1)
log_prob = F.log_softmax(logits, dim=-1)
# +++++
log_probs.append(log_prob)
# Interact with environments
for i, worker in enumerate(workers):
worker.child.send(('step', action_this_step[i]))
next_state = []
reward = []
done = []
for w, worker in enumerate(workers):
next_state_, reward_, done_, info = worker.child.recv()
next_state_ = next_state_.transpose(2, 0, 1)
next_state.append(next_state_[np.newaxis, ...])
reward.append(reward_)
done.append(done_)
if info:
reward_queue.append(info['reward'])
length_queue.append(info['length'])
next_state = np.concatenate(next_state, axis=0)
reward = np.asarray(reward)
done = np.asarray(done)
# +++++
rewards.append(torch.FloatTensor(reward).unsqueeze(1).to(device)) # 2D list
# +++++
masks.append(torch.FloatTensor(1 - done).unsqueeze(1).to(device)) # 2D list
# +++++
states.append(state)
state = next_state
current_update_times += 1
# ------------------------------------
# Update parameters
# Change numpy to pytorch tensor and reshape
next_state = torch.FloatTensor(next_state).to(device)
_, next_value = self.policy(next_state)
returns = compute_gae(next_value, rewards, masks, values, gamma=gamma_, tau=tau_)
returns = torch.cat(returns).detach()
log_probs = torch.cat(log_probs).detach()
values = torch.cat(values).detach()
states = torch.cat(states)
actions = torch.cat(actions)
advantages = returns - values # target_reward - predict_reward
clip_p = 0.2 * (1 - current_update_times / max_update_times)
ppo_update(self.policy, optimizer, mini_epochs, mini_batch_size,
states, actions, log_probs, returns, advantages, clip_param=clip_p)
# ------------------------------------
# save model and print information
if current_update_times % save_model_steps == 0:
self.save_model(current_update_times)
if len(length_queue) != 0:
print("Update step: [{}/{}] \t mean reward: {:3f} \t length: {}".
format(current_update_times, max_update_times,
sum(reward_queue)/len(reward_queue),
sum(length_queue) / len(length_queue)))
# ------------------------------------
if not constant_lr:
utils.adjust_learning_rate(optimizer, initial_lr, max_update_times, current_update_times)
# Close each worker when training end.
for w in workers:
w.child.send(("close", None))
# Play game with model
def play(self, model_path, game, visual=True, save_video=True):
self.policy.load_state_dict(torch.load(model_path, map_location='cpu'))
state = game.reset()
done = False
total_reward = 0.0
while not done:
if visual:
game.env.render()
state = torch.FloatTensor(state.transpose(2, 0, 1)).unsqueeze(0).to(device)
logits, _ = self.policy(state)
action = self.choose_action(logits, noise_action=False)
next_state, reward, done, info = game.step(action, save_video)
state = next_state
total_reward += reward
if done:
game.env.reset()
game.env.close()
return total_reward, info['length']
def save_model(self, update_step):
filename = 'model_{}.dat'.format(update_step)
torch.save(self.policy.state_dict(), os.path.join('checkpoint', filename))
# Unit test
if __name__ == '__main__':
input_shape = (4, 84, 84)
action_shape = 12
model = Net(input_shape, action_shape)
| ray075hl/PPO_super_mario | src/agent.py | agent.py | py | 8,651 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "torch.cuda.is_available",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.Module",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "torch.nn... |
71170295463 | import datetime
import re
from django import forms
from django.contrib.auth.forms import (
UserCreationForm,
AuthenticationForm,
PasswordChangeForm,
PasswordResetForm,
SetPasswordForm,
)
from app.models import Person, User, Information, Competition
from django.conf import settings
from django.core.mail import BadHeaderError, send_mail
from django.http import HttpResponse
from django.utils import timezone
from app.defines.prefecture import PrefectureAndOversea
from django.utils.translation import gettext_lazy as _
from django.contrib.auth.backends import AllowAllUsersModelBackend
from django.core.mail import EmailMessage
class UserCreateForm(UserCreationForm):
class Meta:
model = User
fields = ("email", "password1", "password2")
help_texts = {
"email": _("使用可能なメールアドレスを入力してください。"),
}
def clean_email(self):
email = self.cleaned_data["email"]
User.objects.filter(email=email, is_active=False).delete()
return email
class PersonCreateForm(forms.ModelForm):
class Meta:
model = Person
fields = (
"last_name",
"last_name_kana",
"last_name_roma",
"first_name",
"first_name_kana",
"first_name_roma",
"gender",
"birth_at",
"prefecture_id",
)
error_messages = {
"last_name": {"max_length": _("姓が不正です。")},
"first_name": {"max_length": _("名が不正です。")},
"last_name_kana": {"max_length": _("セイが不正です。")},
"first_name_kana": {"max_length": _("メイが不正です。")},
"last_name_roma": {"max_length": _("姓(ローマ字)が不正です。")},
"first_name_roma": {"max_length": _("名(ローマ字)が不正です。")},
}
help_texts = {
"last_name": _("漢字でお願いします。(例:荒木) 海外籍の方はLast nameを入力してください。"),
"last_name_kana": _("全角カタカナでお願いします。(例:アラキ) 海外籍の方もカタカナで入力してください。"),
"last_name_roma": _("ローマ字でお願いします。(例:Araki) 海外籍の方も入力してください。"),
"first_name": _("漢字でお願いします。(例:慎平) 海外籍の方はFirst nameを入力してください。"),
"first_name_kana": _("全角カタカナでお願いします。(例:シンペイ) 海外籍の方もカタカナで入力してください。"),
"first_name_roma": _("ローマ字でお願いします。(例:Shimpei) 海外籍の方も入力してください。"),
"prefecture_id": _("現在の居住都道府県を選択してください。海外在住の方は海外を選択してください。"),
}
widgets = {
"birth_at": forms.SelectDateWidget(
years=range(datetime.date.today().year + 1, 1900, -1)
)
}
def clean_first_name_kana(self):
first_name_kana = self.cleaned_data["first_name_kana"]
re_katakana = re.compile(r"[\u30A1-\u30F4]+")
if not re_katakana.fullmatch(first_name_kana):
raise forms.ValidationError(_("全角カタカナでない文字が含まれています。"))
return first_name_kana
def clean_last_name_kana(self):
last_name_kana = self.cleaned_data["last_name_kana"]
re_katakana = re.compile(r"[\u30A1-\u30F4]+")
if not re_katakana.fullmatch(last_name_kana):
raise forms.ValidationError(_("全角カタカナでない文字が含まれています。"))
return last_name_kana
def clean_first_name_roma(self):
first_name_roma = self.cleaned_data["first_name_roma"]
if re.fullmatch("[a-zA-Z]+", first_name_roma) is None:
raise forms.ValidationError(_("半角アルファベットでない文字が含まれています。"))
if first_name_roma != first_name_roma.capitalize():
raise forms.ValidationError(_("先頭文字が大文字、それ以降の文字は小文字でお願いします。"))
return first_name_roma
def clean_last_name_roma(self):
last_name_roma = self.cleaned_data["last_name_roma"]
if re.fullmatch("[a-zA-Z]+", last_name_roma) is None:
raise forms.ValidationError(_("アルファベットでない文字が含まれています。"))
if last_name_roma != last_name_roma.capitalize():
raise forms.ValidationError(_("先頭文字が大文字、それ以降の文字は小文字でお願いします。"))
return last_name_roma
def clean_birth_at(self):
birth_at = self.cleaned_data["birth_at"]
if birth_at >= datetime.date.today():
raise forms.ValidationError(_("誕生日が不正です。正しく入力してください。"))
return birth_at
class LoginForm(AuthenticationForm):
class Meta:
model = User
def clean(self):
email = self.cleaned_data.get("username")
password = self.cleaned_data.get("password")
if email is not None and password:
backend = AllowAllUsersModelBackend()
self.user_cache = backend.authenticate(
self.request, username=email, password=password
)
if self.user_cache is None:
raise self.get_invalid_login_error()
else:
self.confirm_login_allowed(self.user_cache)
return self.cleaned_data
def confirm_login_allowed(self, user):
if not user.is_active:
raise forms.ValidationError(
_("まだユーザー登録が認証されておりません。ユーザー登録時に送信されたメールから認証してください。"),
code="inactive",
)
class PasswordChangeForm(PasswordChangeForm):
class Meta:
model = User
class PasswordResetForm(PasswordResetForm):
class Meta:
model = User
def clean_email(self):
email = self.cleaned_data["email"]
query = User.objects.filter(email=email)
if not query.exists():
raise forms.ValidationError("メールアドレスが存在しません。")
return email
class SetPasswordForm(SetPasswordForm):
class Meta:
model = User
class MailChangeForm(forms.ModelForm):
class Meta:
model = User
fields = ("email",)
def clean_email(self):
email = self.cleaned_data["email"]
User.objects.filter(email=email, is_active=False).delete()
return email
class ContactForm(forms.Form):
COMPETITION_CLOSE_AFTER_DAYS = 7
name = forms.CharField(
label="お名前",
max_length=100,
)
email = forms.EmailField(label="メールアドレス")
related = forms.fields.ChoiceField(
required=True,
initial=(settings.EMAIL_INFO, "SCJへのお問い合わせ"),
label="問い合わせ種別",
widget=forms.widgets.RadioSelect,
)
message = forms.CharField(label="お問い合わせ内容", widget=forms.Textarea)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
competitions = Competition.get_by_not_closed_before_days(
self.COMPETITION_CLOSE_AFTER_DAYS
)
related_info = ((settings.EMAIL_INFO, "SCJへのお問い合わせ"),)
for competition in competitions:
related_info += (
(competition.organizer_email, competition.name + "に関するお問い合わせ"),
)
self.fields["related"].choices = related_info
def send_email(self):
subject = "お問い合わせ"
related = self.cleaned_data["related"]
message = self.cleaned_data["message"]
name = self.cleaned_data["name"]
email = self.cleaned_data["email"]
reply_to = [email]
cc = []
if related != settings.EMAIL_INFO:
cc = [settings.EMAIL_INFO]
message = "名前: " + name + "\r\n" + "メールアドレス: " + email + "\r\n\r\n" + message
from_email = settings.EMAIL_HOST_USER
recipient_list = [related]
try:
email_message = EmailMessage(
subject, message, from_email, recipient_list, cc=cc, reply_to=reply_to
)
email_message.send()
except BadHeaderError:
return HttpResponse("無効なヘッダが検出されました。")
class ProfileForm(forms.Form):
prefecture_id = forms.fields.ChoiceField(
label="都道府県",
label_suffix="",
widget=forms.widgets.Select,
choices=PrefectureAndOversea.choices(),
)
class CompetitionForm(forms.Form):
type = forms.fields.ChoiceField(
label="大会種別", label_suffix="", widget=forms.widgets.Select
)
event_id = forms.fields.ChoiceField(
label="開催種目", label_suffix="", widget=forms.widgets.Select
)
year = forms.fields.ChoiceField(
label="開催年", label_suffix="", widget=forms.widgets.Select
)
prefecture_id = forms.fields.ChoiceField(
label="開催都道府県", label_suffix="", widget=forms.widgets.Select
)
class CompetitionRegistrationForm(forms.Form):
event_ids = forms.MultipleChoiceField(
label=_("参加種目"),
required=True,
widget=forms.CheckboxSelectMultiple(attrs={"id": "event_ids"}),
)
guest_count = forms.fields.ChoiceField(
label=_("同伴者数"),
required=True,
widget=forms.widgets.Select,
)
comment = forms.fields.CharField(
label=_("コメント"),
required=False,
widget=forms.Textarea(attrs={"cols": "80", "rows": "5"}),
)
class PostForm(forms.Form):
title = forms.CharField(
label="タイトル",
required=True,
max_length=24,
)
open_at = forms.DateTimeField(
label="開始日時",
required=True,
widget=forms.DateTimeInput(
attrs={
"type": "datetime-local",
"value": timezone.datetime.now().strftime("%Y-%m-%dT%H:%M"),
}
),
input_formats=["%Y-%m-%dT%H:%M"],
)
close_at = forms.DateTimeField(
label="終了日時",
required=True,
widget=forms.DateTimeInput(
attrs={
"type": "datetime-local",
"value": timezone.datetime.now().strftime("%Y-%m-%dT%H:%M"),
}
),
input_formats=["%Y-%m-%dT%H:%M"],
)
registration_open_at = forms.DateTimeField(
label="申し込み開始日時",
required=True,
widget=forms.DateTimeInput(
attrs={
"type": "datetime-local",
"value": timezone.datetime.now().strftime("%Y-%m-%dT%H:%M"),
}
),
input_formats=["%Y-%m-%dT%H:%M"],
)
registration_close_at = forms.DateTimeField(
label="申し込み終了日時",
required=True,
widget=forms.DateTimeInput(
attrs={
"type": "datetime-local",
"value": timezone.datetime.now().strftime("%Y-%m-%dT%H:%M"),
}
),
input_formats=["%Y-%m-%dT%H:%M"],
)
venue_name = forms.CharField(
label="開催地名",
required=True,
max_length=256,
)
venue_address = forms.CharField(
label="開催地住所",
required=True,
max_length=256,
)
latitude = forms.CharField(
label="開催地緯度",
required=True,
max_length=256,
)
longitude = forms.CharField(
label="開催地経度",
required=True,
max_length=256,
)
limit = forms.IntegerField(
label="制限人数",
required=True,
)
fee = forms.IntegerField(
label="参加費",
required=True,
)
url = text = forms.CharField(
label="URL",
required=True,
max_length=256,
)
text = forms.CharField(
label="自由入力",
required=False,
widget=forms.Textarea(attrs={"cols": "80", "rows": "5"}),
max_length=512,
)
class PostEditForm(forms.Form):
title = forms.CharField(
label="タイトル",
required=True,
max_length=24,
)
text = forms.CharField(
label="本文",
required=True,
widget=forms.Textarea(attrs={"cols": "80", "rows": "10"}),
)
class InformationForm(forms.ModelForm):
class Meta:
model = Information
fields = ("type", "title", "text", "is_public")
widgets = {
"type": forms.Select(attrs={"class": "information-type"}),
"title": forms.TextInput(attrs={"class": "information-title"}),
"text": forms.Textarea(attrs={"class": "information-text"}),
}
class RankingForm(forms.Form):
event_id = forms.fields.ChoiceField(
label="種目", label_suffix="", widget=forms.widgets.Select
)
gender_id = forms.fields.ChoiceField(
label="性別", label_suffix="", widget=forms.widgets.Select
)
generation_id = forms.fields.ChoiceField(
label="世代別", label_suffix="", widget=forms.widgets.Select
)
prefecture_id = forms.fields.ChoiceField(
label="都道府県", label_suffix="", widget=forms.widgets.Select
)
# def wrap_boolean_check(v):
# return not (v is False or v is None or v == "" or v == 0)
class PersonEditForm(forms.ModelForm):
is_active = forms.BooleanField(label="承認", required=False)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
class Meta:
model = Person
fields = (
"last_name",
"last_name_kana",
"last_name_roma",
"first_name",
"first_name_kana",
"first_name_roma",
"gender",
"birth_at",
"prefecture_id",
"is_community_posting_offer",
)
error_messages = {
"last_name": {"max_length": _("姓が不正です。")},
"first_name": {"max_length": _("名が不正です。")},
"last_name_kana": {"max_length": _("セイが不正です。")},
"first_name_kana": {"max_length": _("メイが不正です。")},
"last_name_roma": {"max_length": _("姓(ローマ字)が不正です。")},
"first_name_roma": {"max_length": _("名(ローマ字)が不正です。")},
}
help_texts = {
"last_name": _("漢字でお願いします。(例:荒木) 海外籍の方はLast nameを入力してください。"),
"last_name_kana": _("全角カタカナでお願いします。(例:アラキ) 海外籍の方もカタカナで入力してください。"),
"last_name_roma": _("ローマ字でお願いします。(例:Araki) 海外籍の方も入力してください。"),
"first_name": _("漢字でお願いします。(例:慎平) 海外籍の方はFirst nameを入力してください。"),
"first_name_kana": _("全角カタカナでお願いします。(例:シンペイ) 海外籍の方もカタカナで入力してください。"),
"first_name_roma": _("ローマ字でお願いします。(例:Shimpei) 海外籍の方も入力してください。"),
"prefecture_id": _("現在の居住都道府県を選択してください。海外在住の方は海外を選択してください。"),
}
widgets = {
"birth_at": forms.SelectDateWidget(
years=range(datetime.date.today().year + 1, 1900, -1)
)
}
def clean_first_name_kana(self):
first_name_kana = self.cleaned_data["first_name_kana"]
re_katakana = re.compile(r"[\u30A1-\u30F4]+")
if not re_katakana.fullmatch(first_name_kana):
raise forms.ValidationError(_("全角カタカナでない文字が含まれています。"))
return first_name_kana
def clean_last_name_kana(self):
last_name_kana = self.cleaned_data["last_name_kana"]
re_katakana = re.compile(r"[\u30A1-\u30F4]+")
if not re_katakana.fullmatch(last_name_kana):
raise forms.ValidationError(_("全角カタカナでない文字が含まれています。"))
return last_name_kana
def clean_first_name_roma(self):
first_name_roma = self.cleaned_data["first_name_roma"]
if re.fullmatch("[a-zA-Z]+", first_name_roma) is None:
raise forms.ValidationError(_("半角アルファベットでない文字が含まれています。"))
if first_name_roma != first_name_roma.capitalize():
raise forms.ValidationError(_("先頭文字が大文字、それ以降の文字は小文字でお願いします。"))
return first_name_roma
def clean_last_name_roma(self):
last_name_roma = self.cleaned_data["last_name_roma"]
if re.fullmatch("[a-zA-Z]+", last_name_roma) is None:
raise forms.ValidationError(_("アルファベットでない文字が含まれています。"))
if last_name_roma != last_name_roma.capitalize():
raise forms.ValidationError(_("先頭文字が大文字、それ以降の文字は小文字でお願いします。"))
return last_name_roma
def clean_birth_at(self):
birth_at = self.cleaned_data["birth_at"]
if birth_at >= datetime.date.today():
raise forms.ValidationError(_("誕生日が不正です。正しく入力してください。"))
return birth_at
| speedcubing-japan/scj | python/src/app/forms.py | forms.py | py | 17,948 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "django.contrib.auth.forms.UserCreationForm",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "app.models.User",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "django.utils.translation.gettext_lazy",
"line_number": 27,
"usage_type": "call... |
36773748033 | from bs4 import BeautifulSoup
import requests
import pymongo
from splinter import Browser
from splinter.exceptions import ElementDoesNotExist
import tweepy
# from config import (consumer_key,
# consumer_secret,
# access_token,
# access_token_secret)
# auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
# auth.set_access_token(access_token, access_token_secret)
# api = tweepy.API(auth, parser=tweepy.parsers.JSONParser())
import pandas as pd
#import pymongo
from pprint import pprint
#from flask import Flask, render_template
conn = 'mongodb://localhost:27017'
client = pymongo.MongoClient(conn)
#db = client.
def scrape():
#!which chromedriver
executable_path = {'executable_path': '/usr/local/bin/chromedriver'}
browser = Browser('chrome', **executable_path, headless=True)
# URL of page to be scraped
url = 'https://mars.nasa.gov/news/?page=0&per_page=40&order=publish_date+desc%2Ccreated_at+desc&search=&category=19%2C165%2C184%2C204&blank_scope=Latest'
browser.visit(url)
html = browser.html
# Retrieve page with the requests module
#response = requests.get(url)
# Create BeautifulSoup object; parse with 'lxml'
soup = BeautifulSoup(html, 'html.parser')
news_p_list = []
news_title_list = []
results = soup.find_all('li', class_ ='slide')
for result in results:
newstitle = result.find('div',class_='content_title')
news_title= newstitle.a.text
news_p = result.find('div', class_ = 'article_teaser_body').text
news_p_list.append(news_p)
news_title_list.append(news_title)
#print(news_title)
#print("______")
#print(news_p)
#print("______")
print(news_title_list[0])
print(news_p_list[0])
url = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'
browser.visit(url)
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
results = soup.find('div',class_='carousel_items').a
print(soup.find('div',class_='carousel_items').article['style'].split("/",1)[1].rstrip("');"))
featured_image_url = "https://www.jpl.nasa.gov/" + soup.find('div',class_='carousel_items').article['style'].split("/",1)[1].rstrip("');")
#featured_image_url = "https://www.jpl.nasa.gov" + results['data-fancybox-href']
print(featured_image_url)
# tweets = api.user_timeline('MarsWxReport',count=1)
# mars_weather = []
# for tweet in tweets:
# pprint(tweet['text'])
# mars_weather.append(tweet['text'])
# print(mars_weather)
url = 'https://twitter.com/marswxreport?lang=en'
browser.visit(url)
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
tweets = soup.find("div",class_="js-tweet-text-container").text
print(tweets)
mars_weather = tweets
target_url = 'https://space-facts.com/mars/'
tables = pd.read_html(target_url)
tables =tables[0]
tables = tables.rename(columns={0: "Info", 1:"Value"})
tables.set_index('Info', inplace=True)
weather_stuff = tables.to_dict()
weather_stuff
hemisphere_image_urls = [
{"title": "Cerberus Hemisphere Enhanced", "img_url": "https://astrogeology.usgs.gov/cache/images/cfa62af2557222a02478f1fcd781d445_cerberus_enhanced.tif_full.jpg"},
{"title": "Schiaparelli Hemisphere Enhanced", "img_url": "https://astrogeology.usgs.gov/cache/images/3cdd1cbf5e0813bba925c9030d13b62e_schiaparelli_enhanced.tif_full.jpg"},
{"title": "Syrtis Major Hemisphere Enhanced", "img_url": "https://astrogeology.usgs.gov/cache/images/ae209b4e408bb6c3e67b6af38168cf28_syrtis_major_enhanced.tif_full.jpg"},
{"title": "Valles Marineris Hemisphere Enhanced", "img_url": "https://astrogeology.usgs.gov/cache/images/7cf2da4bf549ed01c17f206327be4db7_valles_marineris_enhanced.tif_full.jpg"},
]
scraped_dict = {"Mars_news_title":news_title_list[0],"Mars_news_p":news_p_list[0],"Mars_images":featured_image_url,"Mars_tweets":mars_weather,"Mars_stats": weather_stuff,"Mars_hemi": hemisphere_image_urls}
return scraped_dict
| Jagogbua13/Web-scraping | Web-Scrape HW/scrape_mars.py | scrape_mars.py | py | 4,126 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pymongo.MongoClient",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "splinter.Browser",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulS... |
19250829785 | from __future__ import absolute_import, division, print_function
import os
import io
import sys
import glob
import base64
import json
import argparse
from tqdm import tqdm
import numpy as np
from PIL import Image
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from utils.equi_to_cube import e2c
def ndarr2b64utf8(img):
img_t = Image.fromarray(img)
with io.BytesIO() as output:
img_t.save(output, format="PNG")
content = output.getvalue()
b64_barr = base64.b64encode(content)
b_string = b64_barr.decode('utf-8')
return b_string
def b64utf82ndarr(b_string):
b64_barr = b_string.encode('utf-8')
content = base64.b64decode(b64_barr)
img = Image.open(io.BytesIO(content))
inp_np = np.asarray(img)
return inp_np
def create_name_pair(inp_paths):
pair_dict = dict()
for inp_path in inp_paths:
base_name, _ = os.path.splitext(os.path.basename(inp_path))
pair_dict[base_name] = inp_path
return pair_dict
def save_image(inp_path, output_dir, mask_path=None, face_w=128):
pano_img = Image.open(inp_path, "r")
inp_np = np.asarray(pano_img)
base_name, _ = os.path.splitext(os.path.basename(inp_path))
out_dict = dict()
out_dict['f_name'] = inp_path
out_dict['mask_flag'] = False
face_list = ['f', 'r', 'b', 'l', 't', 'd']
if mask_path is not None:
img_mask = Image.open(mask_path, "r")
inp_np_mask = np.asarray(img_mask)
cm_mask, cl_mask = e2c(inp_np_mask, face_w=face_w)
out_dict['mask_flag'] = True
cm, cl = e2c(inp_np, face_w=face_w)
cube_imgs = dict()
for idx, face in enumerate(cl):
b_string = ndarr2b64utf8(face)
if mask_path is None:
cube_imgs[face_list[idx]] = [b_string]
else:
b_string_mask = ndarr2b64utf8(cl_mask[idx])
cube_imgs[face_list[idx]] = [b_string]
cube_imgs[str(face_list[idx])+'_mask']=[b_string_mask]
out_dict['cube'] = cube_imgs
with open(os.path.join(output_dir, base_name + ".json"), "w") as f:
json.dump(out_dict, f)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-i", required=True, type=str, help="\
path to panorama image input directory. (<input dir>/*.png)")
parser.add_argument("-m", default=None, type=str, help="\
path to panorama mask directory. \
The file name of mask have to be matched with the input image. \
(<input dir>/*.png)")
parser.add_argument("-face_w", default=256)
parser.add_argument("-o", default="output_dir")
args = parser.parse_args()
img_paths = glob.glob(os.path.join(args.i, "*.png"))
out_dir = args.o
mask_dir = args.m
face_w = args.face_w
is_mask_pair = True if mask_dir is not None else False
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
if is_mask_pair:
mask_paths = glob.glob(os.path.join(mask_dir, "*.png"))
# crate pair
name_path_pair_pano = create_name_pair(img_paths)
name_path_pair_cube = create_name_pair(img_paths)
name_path_pair_mask = create_name_pair(mask_paths)
pair = dict()
for k, v in name_path_pair_pano.items():
if k in name_path_pair_mask:
pair[k] = {'pano': v, 'cube': name_path_pair_cube[k], 'mask': name_path_pair_mask[k]}
for k, v in tqdm(pair.items(), desc="cube mask pair"):
save_image(v['pano'], out_dir, mask_path=v['mask'], face_w=face_w)
| swhan0329/panorama_image_inpainting | pre_proc/create_data.py | create_data.py | py | 3,581 | python | en | code | 17 | github-code | 36 | [
{
"api_name": "sys.path.append",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_num... |
73325034344 | from selenium import webdriver
from selenium.webdriver import Chrome
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
chrome_options = Options()
chrome_options.add_argument("--headless")
wait = WebDriverWait(Chrome, 5)
driver = webdriver.Chrome('/Users/Lucas/anaconda3/chromedriver')
url = 'https://www.google.com/'
browser = Chrome()
browser.get(url)
#buscando as informações na pagina
browser.find_element_by_name('q').send_keys("Eduzz")
browser.find_element_by_name('q').send_keys(Keys.RETURN)
texto = browser.find_element_by_xpath('//*[@id="rso"]/div[1]/div/div/div/div/div/div/div[1]/a/h3')
frase = 'Vem crescer com a gente.'
validação = frase in texto.text
if(validação == True):
print('Possui no texto a frase (Vem crescer com a gente.)')
else:
print('Não possui no texto a frase (Vem crescer com a gente.)')
| LucasPeresFaria/qa_eduzz | selenium_qa/qa_teste.py | qa_teste.py | py | 1,045 | python | pt | code | 0 | github-code | 36 | [
{
"api_name": "selenium.webdriver.chrome.options.Options",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.support.ui.WebDriverWait",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 10,
"usag... |
34377423467 | import os
import pytest
import numpy as np
from pynpoint.core.pypeline import Pypeline
from pynpoint.readwrite.fitsreading import FitsReadingModule
from pynpoint.processing.frameselection import RemoveFramesModule, FrameSelectionModule, \
RemoveLastFrameModule, RemoveStartFramesModule, \
ImageStatisticsModule, FrameSimilarityModule, \
SelectByAttributeModule, ResidualSelectionModule
from pynpoint.util.tests import create_config, remove_test_data, create_star_data
class TestFrameSelection:
def setup_class(self) -> None:
self.limit = 1e-10
self.test_dir = os.path.dirname(__file__) + '/'
create_star_data(self.test_dir+'images')
create_config(self.test_dir+'PynPoint_config.ini')
self.pipeline = Pypeline(self.test_dir, self.test_dir, self.test_dir)
def teardown_class(self) -> None:
remove_test_data(self.test_dir, folders=['images'])
def test_read_data(self) -> None:
module = FitsReadingModule(name_in='read',
image_tag='read',
input_dir=self.test_dir+'images',
overwrite=True,
check=True)
self.pipeline.add_module(module)
self.pipeline.run_module('read')
data = self.pipeline.get_data('read')
assert np.sum(data) == pytest.approx(105.54278879805277, rel=self.limit, abs=0.)
assert data.shape == (10, 11, 11)
attr = self.pipeline.get_attribute('read', 'NDIT', static=False)
assert np.sum(attr) == pytest.approx(10, rel=self.limit, abs=0.)
assert attr.shape == (2, )
attr = self.pipeline.get_attribute('read', 'NFRAMES', static=False)
assert np.sum(attr) == pytest.approx(10, rel=self.limit, abs=0.)
assert attr.shape == (2, )
self.pipeline.set_attribute('read', 'NDIT', [4, 4], static=False)
def test_remove_last_frame(self) -> None:
module = RemoveLastFrameModule(name_in='last',
image_in_tag='read',
image_out_tag='last')
self.pipeline.add_module(module)
self.pipeline.run_module('last')
data = self.pipeline.get_data('last')
assert np.sum(data) == pytest.approx(84.68885503527224, rel=self.limit, abs=0.)
assert data.shape == (8, 11, 11)
self.pipeline.set_attribute('last', 'PARANG', np.arange(8.), static=False)
self.pipeline.set_attribute('last', 'STAR_POSITION', np.full((8, 2), 5.), static=False)
attr = self.pipeline.get_attribute('last', 'PARANG', static=False)
assert np.sum(attr) == pytest.approx(28., rel=self.limit, abs=0.)
assert attr.shape == (8, )
attr = self.pipeline.get_attribute('last', 'STAR_POSITION', static=False)
assert np.sum(attr) == pytest.approx(80., rel=self.limit, abs=0.)
assert attr.shape == (8, 2)
def test_remove_start_frame(self) -> None:
module = RemoveStartFramesModule(frames=1,
name_in='start',
image_in_tag='last',
image_out_tag='start')
self.pipeline.add_module(module)
self.pipeline.run_module('start')
data = self.pipeline.get_data('start')
assert np.sum(data) == pytest.approx(64.44307047549808, rel=self.limit, abs=0.)
assert data.shape == (6, 11, 11)
attr = self.pipeline.get_attribute('start', 'PARANG', static=False)
assert np.sum(attr) == pytest.approx(24., rel=self.limit, abs=0.)
assert attr.shape == (6, )
attr = self.pipeline.get_attribute('start', 'STAR_POSITION', static=False)
assert np.sum(attr) == pytest.approx(60., rel=self.limit, abs=0.)
assert attr.shape == (6, 2)
def test_remove_frames(self) -> None:
module = RemoveFramesModule(name_in='remove',
image_in_tag='start',
selected_out_tag='selected',
removed_out_tag='removed',
frames=[2, 5])
self.pipeline.add_module(module)
self.pipeline.run_module('remove')
data = self.pipeline.get_data('selected')
assert np.sum(data) == pytest.approx(43.68337741822863, rel=self.limit, abs=0.)
assert data.shape == (4, 11, 11)
data = self.pipeline.get_data('removed')
assert np.sum(data) == pytest.approx(20.759693057269445, rel=self.limit, abs=0.)
assert data.shape == (2, 11, 11)
attr = self.pipeline.get_attribute('selected', 'PARANG', static=False)
assert np.sum(attr) == pytest.approx(14., rel=self.limit, abs=0.)
assert attr.shape == (4, )
attr = self.pipeline.get_attribute('selected', 'STAR_POSITION', static=False)
assert np.sum(attr) == pytest.approx(40., rel=self.limit, abs=0.)
assert attr.shape == (4, 2)
attr = self.pipeline.get_attribute('removed', 'PARANG', static=False)
assert np.sum(attr) == pytest.approx(10., rel=self.limit, abs=0.)
assert attr.shape == (2, )
attr = self.pipeline.get_attribute('removed', 'STAR_POSITION', static=False)
assert np.sum(attr) == pytest.approx(20., rel=self.limit, abs=0.)
assert attr.shape == (2, 2)
def test_frame_selection(self) -> None:
module = FrameSelectionModule(name_in='select1',
image_in_tag='start',
selected_out_tag='selected1',
removed_out_tag='removed1',
index_out_tag='index1',
method='median',
threshold=2.,
fwhm=0.1,
aperture=('circular', 0.1),
position=(None, None, 0.2))
self.pipeline.add_module(module)
self.pipeline.run_module('select1')
data = self.pipeline.get_data('selected1')
assert np.sum(data) == pytest.approx(54.58514780071149, rel=self.limit, abs=0.)
assert data.shape == (5, 11, 11)
data = self.pipeline.get_data('removed1')
assert np.sum(data) == pytest.approx(9.857922674786586, rel=self.limit, abs=0.)
assert data.shape == (1, 11, 11)
data = self.pipeline.get_data('index1')
assert np.sum(data) == pytest.approx(5, rel=self.limit, abs=0.)
assert data.shape == (1, )
attr = self.pipeline.get_attribute('selected1', 'PARANG', static=False)
assert np.sum(attr) == pytest.approx(17., rel=self.limit, abs=0.)
assert attr.shape == (5, )
attr = self.pipeline.get_attribute('selected1', 'STAR_POSITION', static=False)
assert np.sum(attr) == pytest.approx(50, rel=self.limit, abs=0.)
assert attr.shape == (5, 2)
attr = self.pipeline.get_attribute('removed1', 'PARANG', static=False)
assert np.sum(attr) == pytest.approx(7., rel=self.limit, abs=0.)
assert attr.shape == (1, )
attr = self.pipeline.get_attribute('removed1', 'STAR_POSITION', static=False)
assert np.sum(attr) == pytest.approx(10, rel=self.limit, abs=0.)
assert attr.shape == (1, 2)
module = FrameSelectionModule(name_in='select2',
image_in_tag='start',
selected_out_tag='selected2',
removed_out_tag='removed2',
index_out_tag='index2',
method='max',
threshold=1.,
fwhm=0.1,
aperture=('annulus', 0.05, 0.1),
position=None)
self.pipeline.add_module(module)
self.pipeline.run_module('select2')
data = self.pipeline.get_data('selected2')
assert np.sum(data) == pytest.approx(21.42652724866543, rel=self.limit, abs=0.)
assert data.shape == (2, 11, 11)
data = self.pipeline.get_data('removed2')
assert np.sum(data) == pytest.approx(43.016543226832646, rel=self.limit, abs=0.)
assert data.shape == (4, 11, 11)
data = self.pipeline.get_data('index2')
assert np.sum(data) == pytest.approx(10, rel=self.limit, abs=0.)
assert data.shape == (4, )
attr = self.pipeline.get_attribute('selected2', 'PARANG', static=False)
assert np.sum(attr) == pytest.approx(8., rel=self.limit, abs=0.)
assert attr.shape == (2, )
attr = self.pipeline.get_attribute('selected2', 'STAR_POSITION', static=False)
assert np.sum(attr) == pytest.approx(20, rel=self.limit, abs=0.)
assert attr.shape == (2, 2)
attr = self.pipeline.get_attribute('removed2', 'PARANG', static=False)
assert np.sum(attr) == pytest.approx(16., rel=self.limit, abs=0.)
assert attr.shape == (4, )
attr = self.pipeline.get_attribute('removed2', 'STAR_POSITION', static=False)
assert np.sum(attr) == pytest.approx(40, rel=self.limit, abs=0.)
assert attr.shape == (4, 2)
module = FrameSelectionModule(name_in='select3',
image_in_tag='start',
selected_out_tag='selected3',
removed_out_tag='removed3',
index_out_tag='index3',
method='range',
threshold=(10., 10.7),
fwhm=0.1,
aperture=('circular', 0.1),
position=None)
self.pipeline.add_module(module)
self.pipeline.run_module('select3')
data = self.pipeline.get_data('selected3')
assert np.sum(data) == pytest.approx(22.2568501695632, rel=self.limit, abs=0.)
assert data.shape == (2, 11, 11)
data = self.pipeline.get_data('removed3')
assert np.sum(data) == pytest.approx(42.18622030593487, rel=self.limit, abs=0.)
assert data.shape == (4, 11, 11)
data = self.pipeline.get_data('index3')
assert np.sum(data) == pytest.approx(12, rel=self.limit, abs=0.)
assert data.shape == (4, )
def test_image_statistics_full(self) -> None:
module = ImageStatisticsModule(name_in='stat1',
image_in_tag='read',
stat_out_tag='stat1',
position=None)
self.pipeline.add_module(module)
self.pipeline.run_module('stat1')
data = self.pipeline.get_data('stat1')
assert np.sum(data) == pytest.approx(115.68591492205017, rel=self.limit, abs=0.)
assert data.shape == (10, 6)
def test_image_statistics_position(self) -> None:
module = ImageStatisticsModule(name_in='stat2',
image_in_tag='read',
stat_out_tag='stat2',
position=(5, 5, 0.1))
self.pipeline.add_module(module)
self.pipeline.run_module('stat2')
data = self.pipeline.get_data('stat2')
assert np.sum(data) == pytest.approx(118.7138708968444, rel=self.limit, abs=0.)
assert data.shape == (10, 6)
def test_frame_similarity_mse(self) -> None:
module = FrameSimilarityModule(name_in='simi1',
image_tag='read',
method='MSE',
mask_radius=(0., 0.1))
self.pipeline.add_module(module)
self.pipeline.run_module('simi1')
attr = self.pipeline.get_attribute('read', 'MSE', static=False)
assert np.min(attr) > 0.
assert np.sum(attr) == pytest.approx(0.11739141370277852, rel=self.limit, abs=0.)
assert attr.shape == (10, )
def test_frame_similarity_pcc(self) -> None:
module = FrameSimilarityModule(name_in='simi2',
image_tag='read',
method='PCC',
mask_radius=(0., 0.1))
self.pipeline.add_module(module)
self.pipeline.run_module('simi2')
attr = self.pipeline.get_attribute('read', 'PCC', static=False)
assert np.min(attr) > 0.
assert np.sum(attr) == pytest.approx(9.134820985662829, rel=self.limit, abs=0.)
assert attr.shape == (10, )
def test_frame_similarity_ssim(self) -> None:
module = FrameSimilarityModule(name_in='simi3',
image_tag='read',
method='SSIM',
mask_radius=(0., 0.1),
temporal_median='constant')
self.pipeline.add_module(module)
self.pipeline.run_module('simi3')
attr = self.pipeline.get_attribute('read', 'SSIM', static=False)
assert np.min(attr) > 0.
assert np.sum(attr) == pytest.approx(9.074290801266256, rel=self.limit, abs=0.)
assert attr.shape == (10, )
def test_select_by_attribute(self) -> None:
self.pipeline.set_attribute('read', 'INDEX', np.arange(44), static=False)
module = SelectByAttributeModule(name_in='frame_removal_1',
image_in_tag='read',
attribute_tag='SSIM',
number_frames=6,
order='descending',
selected_out_tag='select_sim',
removed_out_tag='remove_sim')
self.pipeline.add_module(module)
self.pipeline.run_module('frame_removal_1')
attr = self.pipeline.get_attribute('select_sim', 'INDEX', static=False)
assert np.sum(attr) == pytest.approx(946, rel=self.limit, abs=0.)
assert attr.shape == (44, )
attr = self.pipeline.get_attribute('select_sim', 'SSIM', static=False)
assert np.sum(attr) == pytest.approx(5.545578246610884, rel=self.limit, abs=0.)
assert attr.shape == (6, )
attr = self.pipeline.get_attribute('remove_sim', 'SSIM', static=False)
assert np.sum(attr) == pytest.approx(3.528712554655373, rel=self.limit, abs=0.)
assert attr.shape == (4, )
def test_residual_selection(self) -> None:
module = ResidualSelectionModule(name_in='residual_select',
image_in_tag='start',
selected_out_tag='res_selected',
removed_out_tag='res_removed',
percentage=80.,
annulus_radii=(0.1, 0.2))
self.pipeline.add_module(module)
self.pipeline.run_module('residual_select')
data = self.pipeline.get_data('res_selected')
assert np.sum(data) == pytest.approx(41.77295229983322, rel=self.limit, abs=0.)
assert data.shape == (4, 11, 11)
data = self.pipeline.get_data('res_removed')
assert np.sum(data) == pytest.approx(22.670118175664847, rel=self.limit, abs=0.)
assert data.shape == (2, 11, 11)
| PynPoint/PynPoint | tests/test_processing/test_frameselection.py | test_frameselection.py | py | 15,896 | python | en | code | 17 | github-code | 36 | [
{
"api_name": "os.path.dirname",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "pynpoint.util.tests.create_star_data",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "pyn... |
43729251600 | from torch import nn
class Convolution(nn.Module):
def __init__(self, in_channels, out_channels, k_size=3, rate=1):
super().__init__()
self.convlayer = nn.Sequential(
nn.Conv2d(in_channels=int(in_channels),
out_channels=int(out_channels),
kernel_size=(k_size, k_size),
dilation=(rate, rate),
padding='same'),
nn.BatchNorm2d(int(out_channels)),
nn.ReLU(inplace=True))
def forward(self, x):
return self.convlayer(x)
| mrFahrenhiet/CrackSegmentationDeepLearning | model/convolutionBase.py | convolutionBase.py | py | 573 | python | en | code | 24 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_n... |
15296957499 | #!/usr/bin/env python3
import os
import pickle
import warnings
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.stats import norm
import ROOT
import uproot
import yaml
SPEED_OF_LIGHT = 2.99792458
SPLIT = True
centrality_colors = [ROOT.kOrange+7, ROOT.kAzure+4, ROOT.kTeal+4, ROOT.kBlack, ROOT.kBlack, ROOT.kBlack]
# avoid pandas warning
warnings.simplefilter(action='ignore', category=FutureWarning)
ROOT.gROOT.SetBatch()
##################################################################
# read configuration file
##################################################################
config = 'config.yaml'
with open(os.path.expandvars(config), 'r') as stream:
try:
params = yaml.full_load(stream)
except yaml.YAMLError as exc:
print(exc)
ANALYSIS_RESULTS_PATH = params['ANALYSIS_RESULTS_PATH']
CT_BINS_CENT = params['CT_BINS_CENT']
CENTRALITY_LIST = params['CENTRALITY_LIST']
RANDOM_STATE = params['RANDOM_STATE']
##################################################################
# split matter/antimatter
SPLIT_LIST = ['']
if SPLIT:
SPLIT_LIST = ['antimatter', 'matter']
if not os.path.isdir("plots/cpt_and_lifetime"):
os.mkdir("plots/cpt_and_lifetime")
ROOT.gStyle.SetOptStat(0)
ROOT.gStyle.SetOptFit(0)
eff_cut_dict = pickle.load(open("file_eff_cut_dict", "rb"))
presel_eff_file = uproot.open('PreselEff.root')
signal_extraction_file = ROOT.TFile.Open('SignalExtraction.root')
signal_extraction_keys = uproot.open('SignalExtraction.root').keys()
analysis_results_file = uproot.open(os.path.expandvars(ANALYSIS_RESULTS_PATH))
# get centrality selected histogram
cent_counts, cent_edges = analysis_results_file['Centrality_selected;1'].to_numpy()
cent_bin_centers = (cent_edges[:-1]+cent_edges[1:])/2
abs_correction_file = ROOT.TFile.Open('He3_abs.root')
eff_correction_file = ROOT.TFile.Open('EffAbsCorrection_1.root')
ratio_file = ROOT.TFile.Open('Ratio.root', 'recreate')
for i_cent_bins in range(len(CENTRALITY_LIST)):
cent_bins = CENTRALITY_LIST[i_cent_bins]
# get number of events
cent_range_map = np.logical_and(cent_bin_centers > cent_bins[0], cent_bin_centers < cent_bins[1])
counts_cent_range = cent_counts[cent_range_map]
evts = np.sum(counts_cent_range)
print(f'Number of events: {evts}')
h_corrected_yields = [ROOT.TH1D(), ROOT.TH1D()]
for i_split, split in enumerate(SPLIT_LIST):
print(f'{i_split} -> {split}')
# get preselection efficiency and abs correction histograms
presel_eff_counts, presel_eff_edges = presel_eff_file[
f'fPreselEff_vs_ct_{split}_{cent_bins[0]}_{cent_bins[1]};1'].to_numpy()
presel_eff_bin_centers = (presel_eff_edges[1:]+presel_eff_edges[:-1])/2
func_name = 'BGBW'
if (cent_bins[1] == 90) or (cent_bins[0] == 0 and cent_bins[1] == 10):
func_name = 'BlastWave'
g_abs_correction = ROOT.TGraphAsymmErrors()
g_abs_correction = abs_correction_file.Get(f"{cent_bins[0]}_{cent_bins[1]}/{func_name}/fEffCt_{split}_{cent_bins[0]}_{cent_bins[1]}_{func_name}")
eff_abs_correction = ROOT.TH1D()
eff_abs_correction = eff_correction_file.Get(f"{cent_bins[0]}_{cent_bins[1]}/{func_name}/fCorrection_{split}_{cent_bins[0]}_{cent_bins[1]}_{func_name}")
# list of corrected yields
ct_bins_tmp = [0]
ct_bins_tmp += CT_BINS_CENT[i_cent_bins]
if cent_bins[0]==30 or cent_bins[0]==50:
ct_bins_tmp = [0, 2, 4, 7, 14, 35]
if cent_bins[0]==10:
ct_bins_tmp = [0, 2, 7, 14, 35]
#if cent_bins[1] == 90:
# ct_bin_tmp = CT_BINS_CENT[i_cent_bins]s
bins = np.array(ct_bins_tmp, dtype=float)
# print(bins)
h_corrected_yields[i_split] = ROOT.TH1D(
f'fYields_{split}_{cent_bins[0]}_{cent_bins[1]}', f'{split}, {cent_bins[0]}-{cent_bins[1]}%', len(bins)-1, bins)
for ct_bins in zip(CT_BINS_CENT[i_cent_bins][:-1], CT_BINS_CENT[i_cent_bins][1:]):
bin = f'{split}_{cent_bins[0]}_{cent_bins[1]}_{ct_bins[0]}_{ct_bins[1]}'
formatted_eff_cut = "{:.2f}".format(eff_cut_dict[bin])
# look for plot with eff = eff_cut (or the nearest one)
bkg_shape = 'pol1'
eff_cut_increment = 0
eff_cut_sign = -1
while signal_extraction_keys.count(f'{bin}_{bkg_shape}/fInvMass_{formatted_eff_cut};1') == 0:
if eff_cut_sign == -1:
eff_cut_increment += 0.01
eff_cut_sign *= -1
formatted_eff_cut = "{:.2f}".format(eff_cut_dict[bin]+eff_cut_increment*eff_cut_sign)
# get signal
h_raw_yield = signal_extraction_file.Get(f'{bin}_{bkg_shape}/fRawYields;1')
eff_index = h_raw_yield.FindBin(float(formatted_eff_cut))
raw_yield = h_raw_yield.GetBinContent(eff_index)
raw_yield_error = h_raw_yield.GetBinError(eff_index)
print(f'eff_cut = {formatted_eff_cut}, raw_yield = {raw_yield}+{raw_yield_error}')
# apply corrections
# 1. efficiency (presel x BDT)
presel_eff_map = np.logical_and(
presel_eff_bin_centers > ct_bins[0],
presel_eff_bin_centers < ct_bins[1])
presel_eff = presel_eff_counts[presel_eff_map]
bdt_eff = float(formatted_eff_cut)
print(f'bin: {presel_eff_map}, presel_eff: {presel_eff}')
eff = presel_eff * eff_cut_dict[bin]
# 2. absorption correction
abs = g_abs_correction.GetPointY(CT_BINS_CENT[i_cent_bins].index(ct_bins[0]))
print(f"absorption correction for point {CT_BINS_CENT[i_cent_bins].index(ct_bins[0])}: {abs}")
# 3. efficiency correction
eff_correct = eff_abs_correction.GetBinContent(eff_abs_correction.FindBin(ct_bins[0]))
ct_bin_index = h_corrected_yields[i_split].FindBin(ct_bins[0]+0.5)
h_corrected_yields[i_split].SetBinContent(ct_bin_index, raw_yield/eff[0]/abs/eff_correct)
h_corrected_yields[i_split].SetBinError(ct_bin_index, raw_yield_error/eff[0]/abs/eff_correct)
# set labels
h_corrected_yields[i_split].GetXaxis().SetTitle("#it{c}t (cm)")
h_corrected_yields[i_split].GetYaxis().SetTitle("d#it{N}/d(#it{c}t) (cm^{-1})")
#h_corrected_yields[i_split].Scale(1, "width")
for i_bin in range(len(bins))[2:]:
bin_width = h_corrected_yields[i_split].GetBinWidth(i_bin)
print(f"bin: {h_corrected_yields[i_split].GetBinLowEdge(i_bin)}; bin width: {bin_width}")
bin_content = h_corrected_yields[i_split].GetBinContent(i_bin)
bin_error = h_corrected_yields[i_split].GetBinError(i_bin)
h_corrected_yields[i_split].SetBinContent(i_bin, bin_content/bin_width)
h_corrected_yields[i_split].SetBinError(i_bin, bin_error/bin_width)
h_corrected_yields[i_split].GetYaxis().SetRangeUser(1., 450.)
h_corrected_yields[i_split].SetMarkerStyle(20)
h_corrected_yields[i_split].SetMarkerSize(0.8)
h_corrected_yields[i_split].Write()
# fit with exponential pdf
fit_function_expo = ROOT.TF1("expo", "expo", 2, 35)
if cent_bins[0] == 30 or cent_bins[0]==50:
fit_function_expo = ROOT.TF1("expo", "expo", 2, 14)
elif cent_bins[1] == 90:
fit_function_expo = ROOT.TF1("expo", "expo", 0, 35)
res_expo = h_corrected_yields[i_split].Fit(fit_function_expo, "RMIS+")
# compute and display lifetime
tau = -1/fit_function_expo.GetParameter(1)*100/SPEED_OF_LIGHT # ps
tau_error = fit_function_expo.GetParError(1)*100/SPEED_OF_LIGHT/fit_function_expo.GetParameter(1)/fit_function_expo.GetParameter(1) # ps
tau_text = ROOT.TLatex(20, 100, '#tau = ' + "{:.0f}".format(tau) + '#pm' + "{:.0f}".format(tau_error) + ' ps')
tau_text.SetTextSize(0.05)
# display chi2
formatted_chi2_lifetime = "{:.2f}".format(fit_function_expo.GetChisquare())
chi2_lifetime_text = ROOT.TLatex(20, 70, "#chi^{2}/NDF = "+formatted_chi2_lifetime+"/"+str(fit_function_expo.GetNDF()))
chi2_lifetime_text.SetTextSize(0.05)
# compute yield
integral = float()
integral_error = float()
integral = fit_function_expo.Integral(0, 1.e9)
integral_error = fit_function_expo.IntegralError(0, 1.e9, res_expo.GetParams(), res_expo.GetCovarianceMatrix().GetMatrixArray())
formatted_integral = "{:.10f}".format(integral/2./evts)
formatted_integral_error = "{:.10f}".format(integral_error/evts/2.)
print(f"N_ev = {evts}")
integral_yield = ROOT.TLatex(20, 40, "1/#it{N_{ev}} #times BR #times d#it{N}/d#it{y} = "+formatted_integral+" #pm "+formatted_integral_error)
integral_yield.SetTextSize(0.05)
# draw on canvas
canv = ROOT.TCanvas()
ROOT.gStyle.SetOptStat(0)
canv.SetTicks(1, 1)
canv.SetName(h_corrected_yields[i_split].GetName())
canv.SetTitle(h_corrected_yields[i_split].GetTitle())
canv.cd()
h_corrected_yields[i_split].Draw("")
tau_text.Draw("same")
chi2_lifetime_text.Draw("same")
integral_yield.Draw("same")
canv.SetLogy()
canv.Write() # write to file
canv.Print(f'plots/cpt_and_lifetime/{h_corrected_yields[i_split].GetName()}.pdf')
# ratios
ROOT.gStyle.SetOptStat(0)
h_ratio = ROOT.TH1D(h_corrected_yields[0])
h_ratio.SetName(f'fRatio_{cent_bins[0]}_{cent_bins[1]}')
h_ratio.SetTitle(f'{cent_bins[0]}-{cent_bins[1]}%')
h_ratio.Divide(h_corrected_yields[0], h_corrected_yields[1], 1, 1)
h_ratio.SetLineColor(centrality_colors[i_cent_bins])
h_ratio.SetMarkerColor(centrality_colors[i_cent_bins])
h_ratio.GetYaxis().SetTitle("Ratio ^{3}_{#bar{#Lambda}}#bar{H} / ^{3}_{#Lambda}H")
h_ratio.GetYaxis().SetRangeUser(0., 1.8)
h_ratio.GetXaxis().SetRangeUser(0., 35.)
h_ratio.SetMarkerStyle(20)
h_ratio.SetMarkerSize(0.8)
h_ratio.Fit("pol0","R","",2,35)
h_ratio.Write()
# plot ratios
c = ROOT.TCanvas("c", "c")
c.SetTicks(1, 1)
c.cd()
h_ratio.Draw()
formatted_ratio = "{:.2f}".format(h_ratio.GetFunction("pol0").GetParameter(0))
formatted_ratio_error = "{:.2f}".format(h_ratio.GetFunction("pol0").GetParError(0))
text_x_position = 20
ratio_text = ROOT.TLatex(text_x_position, 1.6, f"R = {formatted_ratio} #pm {formatted_ratio_error}")
ratio_text.SetTextFont(44)
ratio_text.SetTextSize(28)
ratio_text.Draw("same")
formatted_chi2 = "{:.2f}".format(h_ratio.GetFunction("pol0").GetChisquare())
chi2_text = ROOT.TLatex(text_x_position, 1.45, "#chi^{2}/NDF = "+formatted_chi2+"/"+str(h_ratio.GetFunction("pol0").GetNDF()))
chi2_text.SetTextFont(44)
chi2_text.SetTextSize(28)
chi2_text.Draw("same")
c.Print(f"plots/{h_ratio.GetName()}.pdf")
del h_corrected_yields
del h_ratio
ratio_file.Close()
| maciacco/MuBFromRatios_PbPb | Hypertriton_PbPb/ratio.py | ratio.py | py | 11,027 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "ROOT.kOrange",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "ROOT.kAzure",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "ROOT.kTeal",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "ROOT.kBlack",
... |
37645603997 | from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
import time
options = webdriver.ChromeOptions()
options.add_argument(r"user-data-dir=C:\Github\python-whatsapp-messages\whatsapp-web\data")
options.add_experimental_option("excludeSwitches", ["enable-automation"])
options.add_experimental_option('useAutomationExtension', False)
driver = webdriver.Chrome(options=options)
driver.maximize_window()
driver.get('https://web.whatsapp.com')
time.sleep(20)
driver.find_element(By.XPATH, "//*[@title='Eu']").click()
time.sleep(5)
message_field = driver.find_element(By.XPATH, "//div[@class='to2l77zo gfz4du6o ag5g9lrv bze30y65 kao4egtt']//p[@class='selectable-text copyable-text iq0m558w g0rxnol2']")
message_field.send_keys("Message to be sent!")
time.sleep(5)
message_field.send_keys(Keys.ENTER)
time.sleep(5) | paulocoliveira/python-whatsapp-messages | whatsapp-web/sample.py | sample.py | py | 885 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "selenium.webdriver.ChromeOptions",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 10,
"usage_type": "call"
},
{
"api_n... |
25305939157 | __author__ = 'diegopinheiro'
from genetic_algorithms.genetic_algorithm import GeneticAlgorithm
from stop_criterions.iteration import IterationStopCriterion
from common.set_reader import SetReader
from selection_strategies.fitness_proportionate import FitnessProportionate
from selection_strategies.tournament_selection import TournamentSelection
from selection_strategies.rank_selection import RankSelection
from crossover_strategies.single_point import SinglePoint
from crossover_strategies.two_point import TwoPoint
from fitness_fuctions.accuracy_set import AccuracySet
from initialization_strategies.random_initialization import RandomInitialization
from initialization_strategies.data_initialization import DataInitialization
from mutation_strategies.one_gene_mutation import OneGeneMutation
import random
from common.writer import Writer
from common.attribute import Attribute
from common.attribute_converter import AttributeConverter
from common.float_converter_2 import FloatConverter2
import sys
def main(output_path):
print("\r\n\r\n\r\n")
print("######## Experiment: testIris ########")
data_set_name = "iris"
data_set_reader = SetReader(data_set_name)
training_set = data_set_reader.read_train_set()
test_set = data_set_reader.read_test_set()
data_set_attributes = data_set_reader.attributes
population_size = 100
replacement_rate = 0.3
max_number_iteration = 3000
use_elitism = True
use_default = True
initial_number_rules = 4
min_number_rules = 4
max_number_rules = 4
stop_criterion = IterationStopCriterion(max_number_iteration=max_number_iteration)
selection_strategy = FitnessProportionate()
# selection_strategy = TournamentSelection(probability=0.7)
# selection_strategy = RankSelection()
# crossover_strategy = TwoPoint()
crossover_strategy = SinglePoint()
fitness_function = AccuracySet(min_number_rules=min_number_rules, max_number_rules=max_number_rules, dataset=training_set)
initialization_strategy = DataInitialization(data_set=training_set)
mutation_strategy = OneGeneMutation()
# TODO remove fixed seed
# random.seed(0)
for rule in [2,3,4]:
for mutation_rate in [.01, .05, .1, .2, .5]:
print("rule:%d mutation:%.2f" % (rule, mutation_rate) )
for run in range(0,30):
initial_number_rules = rule
genetic_algorithm = GeneticAlgorithm(population_size=population_size,
replacement_rate=replacement_rate,
mutation_rate=mutation_rate,
use_elitism=use_elitism,
use_default=use_default,
initial_number_rules=initial_number_rules,
stop_criterion=stop_criterion,
selection_strategy=selection_strategy,
crossover_strategy=crossover_strategy,
fitness_function=fitness_function,
initialization_strategy=initialization_strategy,
mutation_strategy=mutation_strategy,
attributes=data_set_attributes,
test_set=test_set)
dimensions = genetic_algorithm.calculate_rule_size() * initial_number_rules
writer = Writer(output_path+'ga_iris_%d_%d_%d_%s_%s_%.2f_%.2f_%d.with_positions' % (population_size, dimensions, initial_number_rules,
str.lower(selection_strategy.__class__.__name__),
str.lower(crossover_strategy.__class__.__name__),
mutation_rate,
replacement_rate,
run))
genetic_algorithm.writer = writer
genetic_algorithm.training_set = training_set
genetic_algorithm.learn()
accuracy_training_set = genetic_algorithm.calculate_accuracy(training_set)
accuracy_test_set = genetic_algorithm.calculate_accuracy(test_set)
print(genetic_algorithm.get_best_individual().print_individual())
print("Accuracy Training Set: " + str(accuracy_training_set))
print("Accuracy Test Set: " + str(accuracy_test_set))
if __name__ == "__main__":
output_path = sys.argv[1]
main(output_path)
| diegompin/genetic_algorithm | experiments/test_iris.py | test_iris.py | py | 5,058 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "common.set_reader.SetReader",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "stop_criterions.iteration.IterationStopCriterion",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "selection_strategies.fitness_proportionate.FitnessProportionate",
... |
27838730172 | import struct
from newsroom import jsonl
import os
from utils import preprocess_text
import parameters as p
import pandas as pd
import pickle as pkl
from tensorflow.core.example import example_pb2
import argparse
def trim_and_transform(example_generator, new_filename, transformation, constraint):
oldcount, newcount = 0, 0
if os.path.isfile(new_filename):
os.remove(new_filename)
with jsonl.open(new_filename, gzip=True) as newfile:
for line in example_generator:
oldcount += 1
line = transformation(line)
if constraint(line):
newcount += 1
newfile.appendline(line)
if oldcount % 1000 == 0:
print(oldcount)
print('# of old lines: %i, # of new lines: %i' % (oldcount, newcount))
def newsroom_constraint(line):
return line['text'] is not None and line['summary'] is not None
def newsroom_preprocess(line):
text = preprocess_text(line['text']) if line['text'] is not None else None
summary = preprocess_text(line['summary']) if line['summary'] is not None else None
return dict(text=text, summary=summary)
# # TODO: clean this up
# def cnn_preprocess(example_str):
# abstract = []
# article = []
# in_abstract = False
# in_article = False
# example_str = example_str.decode('utf-8', 'replace')
# example_str = example_str.replace('<s>', ' ')
# example_str = example_str.replace('</s>', ' . ')
# prev_c = None
# for c in example_str.split():
# if c == '.' and prev_c == c:
# continue
# if 'abstract' in c and c != 'abstract':
# in_abstract = True
# in_article = False
# continue
# if 'article' in c and c != 'article':
# in_abstract = False
# in_article = True
# continue
# c = c.replace('</s>', '.')
# if '<s>' in c: continue
# if '�' in c: continue
# if in_abstract:
# abstract.append(c)
# if in_article:
# article.append(c)
# prev_c = c
# pdb.set_trace()
# return dict(text=article, summary=abstract)
def cnn_preprocess(example_str):
# convert to tensorflow example e
e = example_pb2.Example.FromString(example_str)
# extract text and summary
try:
# the article text was saved under the key 'article' in the data files
article_text = e.features.feature['article'].bytes_list.value[0].decode().split(' ')
# the abstract text was saved under the key 'abstract' in the data files
abstract_text = e.features.feature['abstract'].bytes_list.value[0].decode().split(' ')
except ValueError:
article_text = abstract_text = None
return dict(text=article_text, summary=abstract_text)
def cnn_constraint(line):
return line['text'] is not None and line['summary'] is not None
def pico_preprocess(line):
line = dict(text=line.abstract, P=line.population, I=line.intervention, O=line.outcome)
if pico_constraint(line):
return {k:preprocess_text(v) for k,v in line.items()}
else:
return line
def pico_constraint(line):
return line['text'] == line['text'] and \
line['P'] == line['P'] and \
line['I'] == line['I'] and \
line['O'] == line['O']
def preprocess_newsroom_datafile(filename, new_filename):
with jsonl.open(filename, gzip=True) as oldfile:
trim_and_transform(oldfile, new_filename, newsroom_preprocess, newsroom_constraint)
def preprocess_cnn_datafile(filename, new_filename):
def cnn_dataset_generator():
with open(filename, "rb") as file:
while True:
len_bytes = file.read(8)
if not len_bytes: break # finished reading this file
str_len = struct.unpack('q', len_bytes)[0]
example_str = struct.unpack('%ds' % str_len, file.read(str_len))[0]
yield example_str
trim_and_transform(cnn_dataset_generator(), new_filename, cnn_preprocess, cnn_constraint)
def preprocess_pico_dataset(filename, new_filename_train, new_filename_dev, new_filename_test, aspect_file):
df = pd.read_csv(filename)
def train_generator():
for i,row in df[:30000].iterrows():
yield row
def dev_generator():
for i,row in df[30000:40000].iterrows():
yield row
def test_generator():
for i,row in df[40000:].iterrows():
yield row
trim_and_transform(train_generator(), new_filename_train, pico_preprocess, pico_constraint)
trim_and_transform(dev_generator(), new_filename_dev, pico_preprocess, pico_constraint)
trim_and_transform(test_generator(), new_filename_test, pico_preprocess, pico_constraint)
with open(aspect_file, 'w') as aspectfile:
aspectfile.write(str(['P','I','O']))
def preprocess_all_newsroom_dataset_files(folder):
preprocess_newsroom_datafile(
os.path.join(folder, 'train.data'),
os.path.join(folder, 'train_processed.data'))
preprocess_newsroom_datafile(
os.path.join(folder, 'val.data'),
os.path.join(folder, 'val_processed.data'))
preprocess_newsroom_datafile(
os.path.join(folder, 'test.data'),
os.path.join(folder, 'test_processed.data'))
def preprocess_all_cnn_dataset_files(folder):
preprocess_cnn_datafile(
os.path.join(folder, 'train.bin'),
os.path.join(folder, 'train_processed.data'))
preprocess_cnn_datafile(
os.path.join(folder, 'val.bin'),
os.path.join(folder, 'val_processed.data'))
preprocess_cnn_datafile(
os.path.join(folder, 'test.bin'),
os.path.join(folder, 'test_processed.data'))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('dataset_type')
parser.add_argument('data_folder')
args = parser.parse_args()
if args.dataset_type == 'newsroom':
# for newsroom dataset
preprocess_all_newsroom_dataset_files(args.data_folder)
elif args.dataset_type == 'cnn':
# for cnn dataset
preprocess_all_cnn_dataset_files(args.data_folder)
else:
raise Exception
# IGNORE FOR NOW
# for pico dataset
# aspect_file = '/Volumes/JEREDUSB/aspects.txt'
# filename = '/Volumes/JEREDUSB/pico_cdsr.csv'
# new_filename_train = '/Volumes/JEREDUSB/train_processed.data'
# new_filename_dev = '/Volumes/JEREDUSB/dev_processed.data'
# new_filename_test = '/Volumes/JEREDUSB/test_processed.data'
# preprocess_pico_dataset(filename, new_filename_train, new_filename_dev, new_filename_test, aspect_file)
# with open(aspect_file, 'w') as aspectfile:
# aspectfile.write(str(['P','I','O']))
| dmcinerney/Summarization | preprocess.py | preprocess.py | py | 6,740 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.isfile",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "os.remove",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "newsroom.jsonl.open",
"line... |
24827880363 | from django import forms
from myapp1.models import OrderItem
class OrderItemForm(forms.ModelForm):
class Meta:
model = OrderItem
fields = ['item', 'client', 'items_ordered']
widgets = {
'client': forms.RadioSelect(choices=[
('Client A', 'Client A'),
('Client B', 'Client B'),
('Client C', 'Client C'),
]),
}
labels = {
'items_ordered': 'Quantity',
'client': 'Client Name',
}
class InterestForm(forms.Form):
interested = forms.TypedChoiceField(
choices=((1, 'Yes'), (0, 'No')),
coerce=lambda x: bool(int(x)),
widget=forms.RadioSelect,
label='Are you interested?'
)
quantity = forms.IntegerField(min_value=1, initial=1, label='Quantity')
comments = forms.CharField(widget=forms.Textarea, required=False, label='Additional Comments') | yesshhh/grosite | myapp1/forms.py | forms.py | py | 965 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.forms.ModelForm",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "myapp1.models.OrderItem",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.... |
39068909895 | import requests
from bs4 import BeautifulSoup
# セッションの生成
session = requests.session()
# ログイン
url = "http://somewhere/?controller=login&action=login"
data = { # from Form Data
"login.x": "39", # <someValue>
"login.y": "12", # <someValue>
"accountName": "<ID>", # <ID>
"password": "<PW>" # <PW>
}
response = session.post(url, data=data) # post方式
response.raise_for_status()
# print(response.text)
soupHome = BeautifulSoup(response.text, "html.parser")
name = soupHome.select_one("#profile-nav > div.menuBody > div.data > div.item.name > a")
print("name: ", name.string)
# 登録時間を取得
url = "http://somewhere/?controller=blog&action=top"
response = session.get(url)
response.raise_for_status()
# print(response.text)
soup = BeautifulSoup(response.text, "html.parser")
regTime = soup.select_one("div.regTime")
print("regTime: ", regTime.string)
# ログインが可能になれば、アクセスできる情報が増える
# ログインする方法を簡単にまとめると、
# 1. 開発者ツールを用いて、どこでログインの処理が行われているのかを確認する
# 2. その際、どこに(;php)何で(;方式)リクエストをするのかを確認する
# 3. pythonで上記(;1., 2.)を実装する
# 3-1. requestモヂュールでsessionを生成する
# 3-2. sessionにリクエストの方式を選択する
# 3-3. raise_for_status関数でログイン処理を行う
| 8aqtba9y/study-ml-thing | 06_Using_Request/01_Mocking_Login_Request.py | 01_Mocking_Login_Request.py | py | 1,478 | python | ja | code | 0 | github-code | 36 | [
{
"api_name": "requests.session",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 30,
"usage_type": "call"
}
] |
9105855311 | # coding=utf-8
import webapp2
import logging
from controllers.restore import ExportHandler, ExportHttpWorker, ExportWorker
from controllers.event import SaveEventsHandler, LastEventsHandler, UserEventsHandler, EventsCountHandler
class BlankPageHandler(webapp2.RequestHandler):
def get(self):
self.response.write('')
app = webapp2.WSGIApplication([(r'/', BlankPageHandler),
(r'/api/user/(\w+)', SaveEventsHandler),
(r'/count', EventsCountHandler),
(r'/get', LastEventsHandler),
(r'/get/(\w+)', UserEventsHandler),
(r'/export/worker/http', ExportHttpWorker),
(r'/export/worker', ExportWorker),
(r'/export', ExportHandler)], debug=True)
def handle_404(request, response, exception):
logging.exception(exception)
response.write('Oops! I could swear this page was here!')
response.set_status(404)
def handle_500(request, response, exception):
logging.exception(exception)
response.write('success')
response.set_status(200)
app.error_handlers[404] = handle_404
app.error_handlers[500] = handle_500
| nicklasos/gae-data-fallback | main.py | main.py | py | 1,258 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "webapp2.RequestHandler",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "webapp2.WSGIApplication",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "controllers.event.SaveEventsHandler",
"line_number": 15,
"usage_type": "name"
},
{... |
11249061798 | #!/usr/bin/env python
'''
A helper script to extract regions from LRW with dlib.
Mouth ROIs are fixed based on median mouth region across 29 frames.
@author Peratham Wiriyathammabhum
@date Jan 10, 2020
'''
import argparse
import os, os.path
import sys
import glob
import errno
import pickle
import math
import time
import copy
from multiprocessing import Pool
from time import time as timer
import numpy as np
import cv2
import yaml
cwd = os.getcwd()
from os.path import expanduser
hp = expanduser("~")
sys.path.insert(0, '/cfarhomes/peratham/lrcodebase')
from lr_config import *
from collections import ChainMap
import re
def get_stats(filename):
stat_dict = {}
vidname = filename.replace('.txt', '.mp4')
# .... ex. 'Duration: 0.53 seconds' -> 0.53 float
# stat_dict['duration'] = ''
lastline = ''
with open(filename,'r') as fp:
lastline = list(fp)[-1]
x = re.match('\w+: (\d+\.\d+) \w+', lastline)
duration = float(x.group(1))
stat_dict['duration'] = duration
# ....
# stat_dict['fps'] = ''
cap = cv2.VideoCapture(vidname)
fps = cap.get(cv2.CAP_PROP_FPS)
stat_dict['fps'] = fps
# ....
# stat_dict['num_frames'] = ''
stat_dict['num_frames'] = int(round(fps*duration))
return {filename:stat_dict}
def process_boundary_stats(sample_paths, pool):
try:
batch_stats = pool.map(get_stats, sample_paths)
except:
print('[Error] {}'.format(file_paths[i]))
return dict(ChainMap(*batch_stats))
def main(args):
image_dir = args.dataset
nthreads = int(args.nthreads)
split = args.split
filenames = glob.glob(os.path.join(image_dir, '*', '{}'.format(split), '*.txt'))
filenames = sorted(filenames)
total_size = len(filenames)
pickle.dump( filenames, open( os.path.join(args.outdir, "lrw.{}.filenames.p".format(split)), "wb" ) )
# ....
res_dict = {} # result dict {filename:{duration:float.sec, fps:int1, num_frames:int2}}
current_iter = 0
chunk = 4*nthreads
while current_iter < total_size:
curr_batch_size = chunk if current_iter + chunk <= total_size else total_size - current_iter
with Pool(nthreads) as pool:
sample_paths = filenames[current_iter:current_iter+curr_batch_size]
bdict = process_boundary_stats(sample_paths, pool)
res_dict = {**res_dict, **bdict}
current_iter += curr_batch_size
if current_iter // chunk % 20 == 0:
print('[Info] Operating...{}'.format(current_iter))
# ....
with open(args.outpickle,'wb') as fp:
pickle.dump(res_dict, fp)
with open(args.outfile,'w') as fp:
yaml.dump(res_dict, fp)
return
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Pytorch Video-only BBC-LRW Example')
parser.add_argument('--dataset', default='/cfarhomes/peratham/datapath/lrw/lipread_mp4',
help='path to dataset')
parser.add_argument('--split', default='train',
help='train, val, test')
parser.add_argument('--outdir', default='/cfarhomes/peratham/datapath/lrw/boundary_stats/',
help='path to output files')
parser.add_argument('--outfile', default='/cfarhomes/peratham/datapath/lrw/boundary_stats/boundary_stats.yaml',
help='path to output yaml')
parser.add_argument('--outpickle', default='/cfarhomes/peratham/datapath/lrw/boundary_stats/boundary_stats.p',
help='path to output pickle')
parser.add_argument('--nthreads', required=False, type=int,
default=64, help='num threads')
args = parser.parse_args()
main(args)
| perathambkk/lipreading | lrcodebase/LRW_boundary_stats.py | LRW_boundary_stats.py | py | 3,349 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "os.getcwd",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.path.expanduser",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "sys.path.insert",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_num... |
2911796694 | from tqdm import tqdm
import torch
import torchvision
import torchvision.transforms as transforms
import pandas as pd
import numpy as np
from torch.utils.data import Dataset, DataLoader, Subset
from model import dCNN
from dataset import NumpySBDDataset
import torch.optim as optim
import os
import yaml
import argparse
device = "cuda" if torch.cuda.is_available() else "cpu"
def getdatasetstate(args={}):
return {k: k for k in range(args["train_size"])}
def pre_process_fold(root, fold_name, class_to_label, meta, viz=False):
path = os.path.join(root, fold_name)
files = os.listdir(path)
print("Processing fold:", fold_name)
fold_data = []
fold_labels = []
for file in tqdm(files):
try:
if file.split(".")[1] != "wav":
continue
class_label = meta[meta["slice_file_name"] == file]["classID"].values[0]
label_name = class_to_label[class_label]
data, sr = librosa.load(os.path.join(path, file))
S = librosa.feature.melspectrogram(data, sr, n_mels=128, fmax=8000)
S_dB = librosa.power_to_db(S, ref=np.max)
# pad S_dB with zeros to be (128, 173), pad the 1st dimension
length = S_dB.shape[
1
] # the 4 second clip has length of 173 (trim anything with length greater than 173)
if length > 173:
# trim the length of the spectrogram to 173
S_dB = S_dB[:173, :]
else:
# print(length)
padding_to_add = 173 - length
S_dB = np.pad(S_dB, [(0, 0), (0, padding_to_add)], mode="constant")
# print(S_dB.shape)
if S_dB.shape == (128, 173):
fold_data.append(S_dB)
fold_labels.append(class_label)
else:
print(f"Size mismatch! {S_dB.shape}")
except IndexError:
print("Index error while processing file", file)
return fold_data, fold_labels
def create_10_fold_data(ROOT="./data/"):
train_file = "train_x.npy"
full_path = os.path.join(ROOT, train_file)
if os.path.isfile(full_path) and os.access(full_path, os.R_OK):
train_x = np.load(ROOT + "train_x.npy")
train_y = np.load(ROOT + "train_y.npy")
test_x = np.load(ROOT + "test_x.npy")
test_y = np.load(ROOT + "test_y.npy")
# print(f"{train_x.shape} {train_y.shape} {test_x.shape} {test_y.shape}")
return train_x, train_y, test_x, test_y
train_x = []
train_y = []
test_x = []
test_y = []
print("Generating 10-fold data...")
for fold in tqdm(folds):
train = True
if str(TEST_FOLD) in fold:
train = False
fold_data, fold_labels = pre_process_fold(
"./data/audio/", fold, class_to_label, meta, viz=False
)
if train:
train_x.extend(fold_data)
train_y.extend(fold_labels)
else:
test_x.extend(fold_data)
test_y.extend(fold_labels)
ROOT_SAVE = "./data/"
train_x = np.array(train_x)
train_y = np.array(train_y)
test_x = np.array(test_x)
test_y = np.array(test_y)
np.save(ROOT_SAVE + "train_x.npy", train_x)
np.save(ROOT_SAVE + "train_y.npy", train_y)
np.save(ROOT_SAVE + "test_x.npy", test_x)
np.save(ROOT_SAVE + "test_y.npy", test_y)
return train_x, train_y, test_x, test_y
def processData(args, stageFor="train", indices=None):
class_to_label = {
0: "air_conditioner",
1: "car_horn",
2: "children_playing",
3: "dog_bark",
4: "drilling",
5: "engine_idling",
6: "gun_shot",
7: "jackhammer",
8: "siren",
9: "street_music",
}
meta = pd.read_csv("./data/UrbanSound8K.csv")
folds = list(map(lambda x: "fold" + str(x), list(range(1, 11))))
TEST_FOLD = 10 # TODO: make this an arg
ROOT_DATA_DIR = "./data/audio/" # TODO: make this an arg
train_x, train_y, test_x, test_y = create_10_fold_data()
if stageFor == "train":
dataset = NumpySBDDataset(train_x, train_y)
dataset = Subset(dataset, indices)
elif stageFor == "infer":
dataset = NumpySBDDataset(train_x, train_y)
dataset = Subset(dataset, indices)
else:
dataset = NumpySBDDataset(train_x, train_y)
loader = DataLoader(
dataset, batch_size=64, shuffle=True
) # TODO: add arg for batch size
return loader
def train(args, labeled, resume_from, ckpt_file):
print("========== In the train step ==========")
batch_size = args["batch_size"]
lr = args["learning_rate"]
momentum = args["momentum"]
epochs = args["train_epochs"]
train_split = args["split_train"]
loader = processData(args, stageFor="train", indices=labeled)
net = dCNN()
net = net.to(device=device)
criterion = torch.nn.CrossEntropyLoss()
optimizer = optim.Adam(net.parameters())
if resume_from is not None:
ckpt = torch.load(os.path.join(args["EXPT_DIR"], resume_from + ".pth"))
net.load_state_dict(ckpt["model"])
optimizer.load_state_dict(ckpt["optimizer"])
else:
getdatasetstate(args)
net.train()
for epoch in tqdm(range(args["train_epochs"]), desc="Training"):
running_loss = 0
for i, batch in enumerate(loader, start=0):
data, labels = batch
data = data.to(device)
labels = labels.to(device)
optimizer.zero_grad()
output = net(data)
loss = criterion(output, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if i % 1000:
print(
"epoch: {} batch: {} running-loss: {}".format(
epoch + 1, i + 1, running_loss / 1000
),
end="\r",
)
running_loss = 0
print("Finished Training. Saving the model as {}".format(ckpt_file))
ckpt = {"model": net.state_dict(), "optimizer": optimizer.state_dict()}
torch.save(ckpt, os.path.join(args["EXPT_DIR"], ckpt_file + ".pth"))
return
def test(args, ckpt_file):
print("========== In the test step ==========")
batch_size = args["batch_size"]
lr = args["learning_rate"]
momentum = args["momentum"]
epochs = args["train_epochs"]
train_split = args["split_train"]
loader = processData(args, stageFor="test")
net = dCNN()
net = net.to(device=device)
net.load_state_dict(
torch.load(os.path.join(args["EXPT_DIR"], ckpt_file + ".pth"))["model"]
)
net.eval()
predix = 0
predictions = {}
truelabels = {}
n_val = args["test_size"]
with tqdm(total=n_val, desc="Testing round", unit="batch", leave=False) as pbar:
for step, (batch_x, batch_y) in enumerate(loader):
with torch.no_grad():
batch_x = batch_x.to(device)
batch_y = batch_y.to(device)
prediction = net(batch_x)
for logit, label in zip(prediction, batch_y):
# predictions[predix] = logit.cpu().numpy().tolist()
truelabels[predix] = label.cpu().numpy().tolist()
class_probabilities = logit.cpu().numpy().tolist()
index_max = np.argmax(class_probabilities)
predictions[predix] = index_max
predix += 1
pbar.update()
# unpack predictions
predictions = [val for key, val in predictions.items()]
truelabels = [val for key, val in truelabels.items()]
return {"predictions": predictions, "labels": truelabels}
def infer(args, unlabeled, ckpt_file):
print("========== In the inference step ==========")
batch_size = args["batch_size"]
lr = args["learning_rate"]
momentum = args["momentum"]
epochs = args["train_epochs"]
train_split = args["split_train"]
loader = processData(args, stageFor="infer", indices=unlabeled)
net = dCNN()
net = net.to(device=device)
net.load_state_dict(
torch.load(os.path.join(args["EXPT_DIR"], ckpt_file + ".pth"))["model"]
)
net.eval()
n_val = len(unlabeled)
predictions = {}
predix = 0
with tqdm(total=n_val, desc="Inference round", unit="batch", leave=False) as pbar:
for step, (batch_x, batch_y) in enumerate(loader):
with torch.no_grad():
batch_x = batch_x.to(device)
batch_y = batch_y.to(device)
prediction = net(batch_x)
for logit in prediction:
predictions[unlabeled[predix]] = {}
class_probabilities = logit.cpu().numpy().tolist()
predictions[unlabeled[predix]]["pre_softmax"] = class_probabilities
index_max = np.argmax(class_probabilities)
predictions[unlabeled[predix]]["prediction"] = index_max
predix += 1
pbar.update()
return {"outputs": predictions}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--config",
default=os.path.join(os.getcwd(), "config.yaml"),
type=str,
help="Path to config.yaml",
)
args = parser.parse_args()
with open(args.config, "r") as stream:
args = yaml.safe_load(stream)
labeled = list(range(5000))
resume_from = None
ckpt_file = "ckpt_0"
print("Testing getdatasetstate")
getdatasetstate(args=args)
train(args=args, labeled=labeled, resume_from=resume_from, ckpt_file=ckpt_file)
test(args=args, ckpt_file=ckpt_file)
infer(args=args, unlabeled=[10, 20, 30], ckpt_file=ckpt_file)
| arunsanknar/AlectioExamples | urban-sound-classification/process.py | process.py | py | 9,819 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.cuda.is_available",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path",
"l... |
8882862385 | import numpy as np
from collections import defaultdict
import rl.env
import rl.player
import rl.display
class Env(rl.env.DiscreteEnv):
def __init__(self, n = 10, h = 1, p = 2, k = 3) -> None:
super().__init__()
self.n = n
self.h = h
self.p = p
self.k = k
# List of players
self.players = list()
self.state_size = 0
# Return the next state and reward
def step(self, state : tuple, actions : list[tuple], d = None) -> tuple[list[tuple],list[int]]:
# Random uniformly distributed demand between 0 and n
if d is None:
demand = np.random.randint(0, self.n+1)
else:
demand = d
# Holding cost
cost = max(0,self.h*(state + actions - demand))
# Lost sales penalty
cost += max(0,self.p*(demand - state - actions[0]))
# Fixed ordering cost
cost += self.k * (actions[0] > 0)
next_state = state + actions[0] - min(demand, state + actions[0])
reward = -cost
return int(next_state), int(reward)
# Function that returns the available actions
def A(self, state):
return tuple([tuple([i]) for i in range(self.n - state + 1)])
def goal_test(self, state) -> bool:
raise NotImplementedError("ContinuousEnv.goal_test() is not implemented")
def reset(self) -> None:
return 0
def init_render(self, grid_scale = 50):
self.grid_scale = grid_scale
self.width = (self.n+1)*self.grid_scale + self.grid_scale
self.height = (self.n+1)*self.grid_scale
self.display = rl.display.displayHandler(self.width, self.height)
def render(self, agent):
gs = self.grid_scale
min_q = 1e10
max_q = 0
color_r = [(i, 0, 0) for i in np.linspace(255, 0, int(self.n/2 + 1))]
color_g = [(0, i, 0) for i in np.linspace(0, 255, int(self.n/2 + 1))]
color = color_r[:-1] + color_g
min_q = min(agent.Q.values())
max_q = max(agent.Q.values())
for a in range(self.n+1):
for s in range(self.n+1):
if(a <= self.n-s):
q = agent.Q[s,(a,)]
color_index = int((q-min_q) * len(color)/(max_q - min_q + 1e-05))
color_index = min(color_index, self.n)
self.display.draw_square((a*gs + gs/2, s*gs + gs/2), (gs, gs), color[color_index])
self.display.draw_text(str(round(q, 2)), (a*gs + gs/2, s*gs + gs/2), (255,255,255), align="center")
else:
self.display.draw_square((a*gs + gs/2, s*gs + gs/2), (gs, gs), (0,0,0))
q_range = np.linspace(min_q, max_q, self.n+1)
if self.n%2 == 0:
n_ = self.n + 1
else: n_ = self.n
for s in range(n_):
self.display.draw_square(((self.n+1)*gs + gs/2, s*gs + gs/2), (gs, gs), color[s], width = 10)
self.display.draw_text(str(round(q_range[s], 2)), ((self.n+1)*gs + gs/2, s*gs + gs/2), (255,255,255), align="center")
self.display.update()
return self.display.eventHandler()
def is_terminal(self, state) -> bool:
return False
def get_transistion_probabilities(self, state, action):
p = defaultdict(lambda : 0)
num_states = len(self.get_states())
for d in range(num_states):
next_state, reward = self.step(state, action, d)
p[(int(next_state), int(reward))] += 1/num_states
return p
def get_states(self) -> list:
return tuple(np.arange(self.n + 1))
class EnvNonUniform(Env):
def __init__(self, n = 10, h = 1, p = 2, k = 3, b = 1, m = 0) -> None:
super().__init__(n, h, p, k)
self.b = b
self.m = m
self.p = p
def step(self, state : tuple, actions : list[tuple], d = None) -> tuple[list[tuple],list[int]]:
# Random uniformly distributed demand between 0 and n
if d is None:
demand_ratio = np.array([(self.b+1)/(self.n+1) \
if x == self.m or x == self.m + 1 \
else (self.n - 1 - 2*self.b)/(self.n**2 - 1) \
for x in range(self.n+1)])
demand = int(np.random.choice(np.arange(self.n+1),
size = 1,
p = demand_ratio)[0])
else:
demand = d
# Holding cost
cost = max(0,(self.h*(state + actions[0] - demand)))
# Lost sales penalty
cost += max(0,(self.p*(demand - state - actions[0])))
# Fixed ordering cost
if(actions[0] > 0): cost += self.k
next_state = state + actions[0] - min(demand, state + actions[0])
reward = cost
return next_state, reward
def get_transistion_probabilities(self, state, action):
p = defaultdict(lambda : 0)
num_states = len(self.get_states())
for d in range(num_states):
demand_ratio = np.array([(self.b+1)/(self.n+1) \
if x == self.m or x == self.m + 1 \
else (self.n - 1 - 2*self.b)/(self.n**2 - 1) \
for x in range(self.n+1)])
next_state, reward = self.step(state, action, d)
p[(int(next_state), int(reward))] += demand_ratio[d]
return p
| TheGoldenChicken/robust-rl | ware_house/ware_house.py | ware_house.py | py | 5,829 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "rl.env.env",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "rl.env",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "numpy.random.randint",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_nu... |
39624965175 | import requests
from bs4 import BeautifulSoup
from gql import gql, Client
from gql.transport.requests import RequestsHTTPTransport
url = 'https://events.uchicago.edu/cal/main/showMain.rdo'
base_url = 'https://events.uchicago.edu'
gql_server = 'https://ucevents.herokuapp.com/'
client = Client(
transport=RequestsHTTPTransport(url=gql_server, use_json=True),
fetch_schema_from_transport=True,
)
mutation = gql('''
mutation {
createEvent(
name: "jeff test",
description:"test",
timeobj: {start: 1521930600, end: 1521940600 },
tags: ["concert", "billie", "eilish"],
event_photo_url: "test") {
name
description
time{start, end}
tags
}
}
''')
query = gql("""
{
events {
name
}
}
""")
#######################
# DAY EVENTS
#######################
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
events = soup.find_all('table')[1].find_all('tbody')[0].find_all('tr')
parsed_events = []
for event in events:
parsed = {}
data = event.find_all('td')
if len(data) > 1:
parsed['time'] = " ".join(data[0].get_text().split())
info = data[1].get_text('|')
info = " ".join(info.split())
parsed['name'] = info.split('|')[1]
parsed['location'] = info.split('|')[3]
parsed['link'] = data[1].a['href']
parsed_events.append(parsed)
##############################################
# GET INFO FROM SINGLE EVENT
##############################################
# Returns a JSON of form { time, location, description, tags, name, link}
def get_event_info(event_link):
event_page = requests.get(event_link)
soup = BeautifulSoup(event_page.content, 'html.parser')
data = soup.find('table').get_text('|')
parsed = ' '.join(data.split())
all_items = [i for i in parsed.split('|') if i.strip()]
meta = ['When:','Where:','Description:','Cost:','Contact:','Tag:','Notes:']
our_meta = ['time','location','description','','','tags','']
parsed_event_info = {}
for i, m in enumerate(meta):
if not i == len(meta)-1:
if meta[i] in all_items and meta[i+1] in all_items:
start = all_items.index(meta[i])
end = all_items.index(meta[i+1])
repieced = ' '.join(all_items[start+1:end])
if our_meta[i]:
parsed_event_info[our_meta[i]] = repieced
title_part = soup.find(id='maincontent')
parsed_event_info['name'] = title_part.parent.find('h1').get_text()
if 'tags' in parsed_event_info:
parsed_event_info['tags'] = str(parsed_event_info['tags'].split(',')).replace("'", '"')
parsed_event_info['link'] = event_link
for key in parsed_event_info:
if not key == 'tags':
parsed_event_info[key] = parsed_event_info[key].replace('"', '')
return parsed_event_info
for event in parsed_events[1:]:
info = get_event_info( base_url + event['link'] )
if not 'All Events' in info['name']:
mutation = gql('''
mutation {
createEvent(
name: "%s",
description: "%s",
location: "%s",
timeobj: {start: 1521930600, end: 1521940600 },
tags: %s,
event_photo_url: "test") {
name
description
time{start, end}
tags
}
}
''' % ( info.get('name', ''), info.get('description', ''), info.get('location', ''), info.get('tags', '[\"test\"]') ) )
print(client.execute(mutation)) | jeffhc/campusoutlook-scraper | scripts/ucevents.py | ucevents.py | py | 3,371 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "gql.Client",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "gql.transport.requests.RequestsHTTPTransport",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "gql.gql",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "gql.g... |
36768731245 | """OpenAI Functions Service"""
import json
import openai
from .schema import *
async def parse_openai_function(
response: dict,
functions: List[Type[F]] = OpenAIFunction.Metadata.subclasses,
**kwargs: Any,
) -> FunctionCall:
"""Parses the response from OpenAI and returns a FunctionCall object."""
choice = response["choices"][0]["message"]
if "function_call" in choice:
function_call_ = choice["function_call"]
name = function_call_["name"]
arguments = function_call_["arguments"]
for i in functions:
if i.__name__ == name:
result = await i(**json.loads(arguments))(**kwargs)
break
else:
raise ValueError(f"Function {name} not found")
return result
return FunctionCall(name="chat", data=choice["content"])
async def function_call(
text: str,
model: str = "gpt-3.5-turbo-16k-0613",
functions: List[Type[F]] = OpenAIFunction.Metadata.subclasses,
**kwargs,
) -> FunctionCall:
"""Calls the function orchestrator."""
messages = [
{"role": "user", "content": text},
{"role": "system", "content": "You are a function Orchestrator"},
]
response = await openai.ChatCompletion.acreate(
model=model,
messages=messages,
functions=[i.openaischema for i in functions],
)
return await parse_openai_function(response, functions=functions, **kwargs) | obahamonde/openai-pubsub-functions | src/service.py | service.py | py | 1,451 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "json.loads",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "openai.ChatCompletion.acreate",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "openai.ChatCompletion",
"line_number": 41,
"usage_type": "attribute"
}
] |
6196959192 | from rest_framework import serializers
from .models import Profile, User
from articles.serializers import ArticleSerializer
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'first_name', 'last_name')
class ProfileSerializer(serializers.ModelSerializer):
user = UserSerializer()
class Meta:
model = Profile
fields = '__all__'
def update(self, instance, validated_data):
user_data = validated_data.pop('user')
instance.user.first_name = user_data.get('first_name')
instance.user.last_name = user_data.get('last_name')
instance.user.save()
instance.city = validated_data.get('city')
instance.bday = validated_data.get('bday')
instance.gender = validated_data.get('gender')
instance.save()
return instance
| equator40075km/backend | equator/profiles/serializers.py | serializers.py | py | 872 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "rest_framework.serializers.ModelSerializer",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.serializers",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "models.User",
"line_number": 8,
"usage_type": "name"
},
{
... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.