seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
72711274578 | import csv
import sys
import os
import tkinter as tk
# user input - input the order file and cartons file (with correct filepaths) into the quotations below:
csv_items = csv.reader(open('C:\\Users\\Desktop\\order.csv'))
csv_cartons = csv.reader(open('C:\\Users\\Desktop\\carton.csv'))
# region import data
#define preliminary data structures for orders
orderNumber = []
SKULength = []
SKUWidth = []
SKUHeight = []
count1 = 0
# add to data structures from orders txt file
for row in csv_items:
if (count1==0):
count1+=1
else:
orderNumber.append(int(row[0]))
SKULength.append(float(row[2]))
SKUWidth.append(float(row[3]))
SKUHeight.append(float(row[4]))
# define preliminary data structures for cartons
cartonNumber = []
cartonID = []
cartonLength = []
cartonWidth = []
cartonHeight = []
count2 = 0
# add to data structures from cartons txt file
for row in csv_cartons:
if (count2==0):
count2+=1
else:
cartonNumber.append(count2)
cartonID.append(row[0])
cartonLength.append(float(row[1]))
cartonWidth.append(float(row[2]))
cartonHeight.append(float(row[3]))
count2 += 1
#endregion
#region creating required data structures
# creating dict of orders
orders = {}
for x in range(orderNumber[-1]):
orders[x+1] = [0]
for x in range(len(orderNumber)):
y = orderNumber[x]
tempList = orders[y]
if (tempList[0] == 0):
tempList.remove(tempList[0])
tempList.append(x)
orders[y] = tempList
# obtaining carton volumes
cartonVolumes = {}
for x in range(len(cartonID)):
cLength = cartonLength[x]
cWidth = cartonWidth[x]
cHeight = cartonHeight[x]
cVolume = cLength * cWidth * cHeight
cartonVolumes[x+1] = cVolume
# obtaining total order volumes (sum of all item volumes)
orderVolumes = {}
for x in range(1, orderNumber[-1] + 1):
orderVolumes[x] = 0
for x in range(len(orderNumber)):
oLength = SKULength[x]
oWidth = SKUWidth[x]
oHeight = SKUHeight[x]
oVolume = oLength * oWidth * oHeight
orderVolumes[orderNumber[x]] += oVolume
# making dict of cartons that can fit the volume of each order
availableCartons = {}
for x in range(orderNumber[-1]):
availableCartons[x+1] = []
for order in orderVolumes:
for carton in cartonVolumes:
if (orderVolumes[order] < cartonVolumes[carton]):
tempList = availableCartons[order]
tempList.append(carton)
availableCartons[order] = tempList
#endregion
# region pick best cartons
# creating dicts that will be outputted, containing best cartons for each order
bestCartonsForOrder = {}
bestCartonsForOrderCUR = {}
for x in range(len(orders)+1):
bestCartonsForOrder[x+1] = []
bestCartonsForOrder[x+1] = 0
# going through every order
for order in orders:
availableCartonsForOrder = availableCartons[order]
# dict for empty space per carton per order
emptySpaceperCarton = {}
# going through every carton and checking if they are available for the order
for carton in cartonNumber:
for x in availableCartonsForOrder:
if (carton==x):
# get carton dimensions
cLength = cartonLength[carton - 1]
cWidth = cartonWidth[carton - 1]
cHeight = cartonHeight[carton - 1]
# vars to help with fitting
lengthLeft = cLength
widthLeft = cWidth
heightLeft = cHeight
maxHeightPerLayer = 0.0
emptySpace = 0.0
# packing algorithm
for item in orders[order]:
# get item dimensions
length = SKULength[item-1]
width = SKUWidth[item-1]
height = SKUHeight[item-1]
# vars to help with fitting
tryToFit = True
didFit = True
#priority system changing dimension values for better fit
if (height>length):
tempDim = height
height = length
length = tempDim
if (height>width):
tempDim = height
height = width
width = tempDim
hcount = 0
while(tryToFit):
# if item fits in current layer
if (length<lengthLeft and width<widthLeft and height<heightLeft):
# item is placed at this point
# setting max height per layer
if (height>maxHeightPerLayer):
maxHeightPerLayer = height
# recreating empty space left in layer
if ((lengthLeft-length)>(widthLeft-width)):
emptySpace += maxHeightPerLayer * (widthLeft-width) * length
lengthLeft -= length
elif ((lengthLeft-length)<(widthLeft-width)):
emptySpace += maxHeightPerLayer * (lengthLeft-length) * width
widthLeft -= width
tryToFit = False
elif (height<(heightLeft-maxHeightPerLayer) and hcount < 3):
# creating new layer for iterm
heightLeft -= maxHeightPerLayer
maxHeightPerLayer = 0
lengthLeft = cLength
widthLeft = cWidth
hcount += 1
else:
didFit = False
tryToFit = False
emptySpace = -1
# final dictionary of empty space for each carton in current order
emptySpaceperCarton[carton] = emptySpace
# setting vars to help with picking best cartons
firstValue = 999999
firstKey = 999
secondValue = 999999
secondKey = 999
for x in emptySpaceperCarton:
# if empty space is not -1 (meaning order fits in carton)
if (emptySpaceperCarton[x]>=0):
# set vars for current carton
spaceLeftValue = emptySpaceperCarton[x]
spaceLeftKey = x
# place carton accordingly
if (spaceLeftValue < secondValue):
if (spaceLeftValue < firstValue):
secondKey = firstKey
secondValue = firstValue
firstKey = spaceLeftKey
firstValue = spaceLeftValue
else:
secondKey = spaceLeftKey
secondValue = spaceLeftValue
# storing results into dicts
bestCartonsForOrderList = []
bestCartonKeyCUR = 1
if (firstKey > 0 and firstKey<len(cartonNumber)):
bestCartonsForOrderList.append(str(cartonID[firstKey-1]))
bestCartonKeyCUR = firstKey
if (secondKey > 0 and secondKey<len(cartonNumber)):
bestCartonsForOrderList.append(str(cartonID[secondKey-1]))
bestCartonsForOrder[order] = bestCartonsForOrderList
bestCartonsForOrderCUR[order] = bestCartonKeyCUR
#endregion
#region calculating carton utilization rate
totalCURNum = 0
count3 = 0
# calculate carton utilization rate for every order
for x in range(len(orderVolumes)):
CUROfOrder = orderVolumes[x+1]/cartonVolumes[bestCartonsForOrderCUR[x+1]]
if (CUROfOrder > 0 and CUROfOrder <= 1):
totalCURNum += CUROfOrder
count3+=1
# creating the average carton utilization rate (uncomment print statement for result)
avgCartonUtilizationRate = totalCURNum / count3
#print(avgCartonUtilizationRate)
#endregion
#region User Interface
# creating user interface
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
self.controller = controller
window = tk.Tk()
window.title("DHL Box Packer")
window.geometry("500x400")
window.configure(bg='#FFCC00')
titlefont = ('Helvetica', 35, 'bold')
title = tk.Label(text = "DHL Box Packer", fg='#D40511', bg='#FFCC00', pady=20)
title.config(font=titlefont)
title.pack()
# Entry box with label
lfont = ('Helvetica', 12, 'bold')
l = tk.Label(text="Enter Order Number", bg='#FFCC00')
l.config(font=lfont)
l.pack()
e = tk.Entry()
e.pack()
# calls required data structures to create output
def selection():
selectionHelp()
# Enter button
b = tk.Button(text="Enter", command=selection, padx=68, pady=0.1)
b.pack()
# resultant information display
bord = tk.LabelFrame(text="Order Information", padx=10, pady=10, bg='#FFCC00', fg='#D40511', bd=5)
bord.pack()
bord2 = tk.LabelFrame(text="Carton Selections", padx=10, pady=10, bg='#FFCC00', fg='#D40511', bd=5)
bord2.pack()
# helper method for selection
def selectionHelp():
orderNum = int(e.get()) ##takes order inputted in entry box
if (orderNum>0):
bestCartonList = bestCartonsForOrder[orderNum]
tk.Label(bord, text="Order Number: " + str(orderNum), bg='#FFCC00').pack()
tk.Label(bord, text="Items in Order: " + str(len(orders[orderNum])), bg='#FFCC00').pack()
tk.Label(bord2, text="Option 1: " + str(bestCartonList[0]), bg='#FFCC00').pack()
tk.Label(bord2, text="Option 2: " + str(bestCartonList[1]), bg='#FFCC00').pack()
else:
print("error: order number is not inputted correctly")
def reset():
python = sys.executable
os.execl(python, os.path.abspath('C:\\Users\\karth\\Desktop\\IISE_Submission\\Script-Final.py'), * sys.argv)
# New Entry button
b2 = tk.Button(text="New Entry", command=reset, padx=55, pady=0.1)
b2.pack()
window.mainloop() ##UI content must be between line 7 and this line
#endregion
| geersenthil/Packaging-Carton-Optimization | Script-Final.py | Script-Final.py | py | 10,239 | python | en | code | 0 | github-code | 13 |
16132250333 | import base64
import io
import qrcode
def lambda_handler(event, context):
url = event["url"]
img = qrcode.make(url)
img_bytes = io.BytesIO()
img.save(img_bytes, format="PNG")
return {
"statusCode": 200,
"body": base64.b64encode(img_bytes.getvalue()).decode("utf-8"),
"isBase64Encoded": True,
"headers": {"Content-Type": "image/png"},
}
"""
eventData:
{
"url": "https://www.example.com"
}
"""
| udhayprakash/PythonMaterial | python3/18_aws_cloud/a_AWS_Lambdas/d_practical_utility_functions/d_generate_QR_code.py | d_generate_QR_code.py | py | 457 | python | en | code | 7 | github-code | 13 |
4307290093 | # -*- coding: utf-8 -*-
"""
Created on Mon Oct 29 10:06:29 2018
@author: newness
"""
#LINKEDIN PROJECT SAMPLE
#WE WANT TO ANSWER QUESTION: WHICH GEOGRAPHY IS MOST ENGR JOB LISTING\
#HOW BANKS ARE SHIFTING STRATEGIES
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import glob
#A. DETERMINE RATE OF JOB UPDATE PER QUARTER (how often it changes)
#Job frequency
#SPLIT jobs DATA to smaller CSV
#Import into a DB
#Query DB column on 'engineer' per year
path =r'C:\Users\newness\Downloads\temp_datalab_records_job_listings\split\temp' # use your path
allFiles = glob.glob(path + "/*.csv")
joblist = pd.DataFrame()
list_ = []
for file_ in allFiles:
df = pd.read_csv(file_, engine='python',index_col=None, header=0)
list_.append(df)
joblist = pd.concat(list_, sort=False)
joblist.dtypes
jobEngr = joblist.loc[joblist.title.str.contains("engineer", na=False)]
jobSales = joblist.loc[joblist.title.str.contains("Sales", na=False)]
jobEngr["posted_date"] = jobEngr["posted_date"].apply(pd.to_datetime,format="%Y-%m/%d",errors='ignore')
jobSales["posted_date"] = jobSales["posted_date"].apply(pd.to_datetime,format="%Y-%m/%d",errors='ignore')
jobEngrCA = jobEngr.loc[jobEngr.region.str.contains("CA", na=False)]
# Visualising the Training set results
jobEngrStC=jobEngr.groupby('region').count()
jobEngrCA.plot(kind='line',x='posted_date',y='region')
plt.xticks( jobEngrStC['title'], jobEngrStC.index.values ) # location, labels
plt.plot( jobEngrStC['title'] )
plt.show()
jobSalesC=jobSales.groupby('region').count()
jobSalesC.plot(kind='line',x='posted_date',y='title')
plt.xticks( jobSalesC['title'], jobSalesC.index.values ) # location, labels
plt.plot( jobSalesC['title'] )
frame1 = plt.gca()
frame1.axes.xaxis.set_ticklabels([])
frame1.axes.yaxis.set_ticklabels([])
plt.show() | insighty/old_projects | proposal_n.py | proposal_n.py | py | 1,885 | python | en | code | 0 | github-code | 13 |
40208226275 | import arcpy
import string
from arcpy import env
from arcpy.sa import *
def zstat(flderName):
wkSpace = "C:/Workspace/spk/"+flderName+"/"
arcpy.env.workspace = wkSpace
maskDir = wkSpace
fcs = arcpy.ListFeatureClasses()
rasters = arcpy.ListRasters()
for fc in fcs:
for raster in rasters:
outZSaT = ZonalStatisticsAsTable(fc,"zcode", raster, maskDir+"z"+raster+".dbf", "NODATA", "MEAN")
print(raster)
def deltable(flderName):
wkSpace = "C:/Workspace/spk/"+flderName+"/"
arcpy.env.workspace = wkSpace
maskDir = wkSpace
tbs = arcpy.ListTables()
dropFields = ["ZONE_CODE","COUNT","AREA"]
for tb in tbs:
arcpy.DeleteField_management(tb, dropFields)
print(tb)
flders = ["_BanDong", "_HuaiHere", "_MuangJung", "_NaYang", "_PhoPrasart", "_WangNokAnt"]
#flders = ["_MuangJung"]
for flder in flders:
print(flder)
zstat(flder)
deltable(flder)
| sakdahomhuan/da-ArcPy | _zstatisic.py | _zstatisic.py | py | 988 | python | en | code | 1 | github-code | 13 |
41805709673 | class Solution(object):
def combine(self, n, k):
"""
:type n: int
:type k: int
:rtype: List[List[int]]
"""
ans = []
def dfs(lst, dep):
if not (k - dep):
ans.append(lst[:])
start = lst[dep - 1] + 1 if dep else 1
for i in xrange(start, n + 1):
lst.append(i)
dfs(lst, dep + 1)
lst.pop()
dfs([], 0)
return ans
| superwhd/LeetCode | 77 Combinations.py | 77 Combinations.py | py | 499 | python | en | code | 1 | github-code | 13 |
35900866569 | # Proje 1
l = [[1,'a',['cat'],2],[[[3]],'dog'],4,5]
l1 = []
def flatten(n):
for i in n :
if isinstance(i,list):
flatten(i)
else:
l1.append(i)
flatten(l)
print(l1)
# Proje 2
lst = [[1, 2], [3, 4], [5, 6, 7]]
l2 = []
def Reverse(lst):
for j in lst:
if isinstance(j,list):
j.reverse()
l2.append(j)
else:
Reverse(j)
Reverse(lst)
l2.reverse()
print(l2) | muhammed-gumus/Python-Case | Patika/Patika-final-case.py | Patika-final-case.py | py | 459 | python | en | code | 0 | github-code | 13 |
74406567696 | #!/usr/bin/env python
from __future__ import division, print_function
import os
import sys
from io import BytesIO, IOBase
# Imports
import numpy
if sys.version_info[0] < 3:
from __builtin__ import xrange as range
from future_builtins import ascii, filter, hex, map, oct, zip
#-----------------------------------------------------------------------
def showmat(mat):
print()
for i in range(len(mat)):
for j in range(len(mat[0])):
print("{}".format(mat[i][j]), end = " ")
print()
def count(mat, R, C):
ans = 0
prerow = [[0 for i in range(C)] for j in range(R)]
precol = [[0 for i in range(C)] for j in range(R)]
for i in range(R):
for j in range(C):
prerow[i][j] = mat[i][j]
precol[i][j] = mat[i][j]
if mat[i][j] == 1 and j > 0:
prerow[i][j] += prerow[i][j-1]
if mat[i][j] == 1 and i > 0:
precol[i][j] += precol[i-1][j]
for i in range(R):
for j in range(C):
if prerow[i][j] >= 2 and precol[i][j] >= 4:
ans += min(prerow[i][j] - 1, (precol[i][j] // 2) - 1)
if prerow[i][j] >= 4 and precol[i][j] >= 2:
ans += min(precol[i][j] - 1, (prerow[i][j] // 2) - 1)
return ans
def solve(_):
R, C = input().split()
R = int(R)
C = int(C)
mat = list()
for i in range(R):
row = list(map(int, input().split()))
mat.append(row)
answ = count(mat, R, C)
mat = numpy.flipud(mat)
answ += count(mat, R, C)
mat = numpy.flip(mat)
answ += count(mat, R, C)
mat = numpy.flipud(mat)
answ += count(mat, R, C)
print("Case #{}: {}".format(_, answ))
def main():
t = int(input())
for _ in range(1, t + 1):
solve(_)
#-----------------------------------------------------------------------
# region fastio
BUFSIZE = 8192
class FastIO(IOBase):
newlines = 0
def __init__(self, file):
self._fd = file.fileno()
self.buffer = BytesIO()
self.writable = "x" in file.mode or "r" not in file.mode
self.write = self.buffer.write if self.writable else None
def read(self):
while True:
b = os.read(self._fd, max(os.fstat(self._fd).st_size, BUFSIZE))
if not b:
break
ptr = self.buffer.tell()
self.buffer.seek(0, 2), self.buffer.write(b), self.buffer.seek(ptr)
self.newlines = 0
return self.buffer.read()
def readline(self):
while self.newlines == 0:
b = os.read(self._fd, max(os.fstat(self._fd).st_size, BUFSIZE))
self.newlines = b.count(b"\n") + (not b)
ptr = self.buffer.tell()
self.buffer.seek(0, 2), self.buffer.write(b), self.buffer.seek(ptr)
self.newlines -= 1
return self.buffer.readline()
def flush(self):
if self.writable:
os.write(self._fd, self.buffer.getvalue())
self.buffer.truncate(0), self.buffer.seek(0)
class IOWrapper(IOBase):
def __init__(self, file):
self.buffer = FastIO(file)
self.flush = self.buffer.flush
self.writable = self.buffer.writable
self.write = lambda s: self.buffer.write(s.encode("ascii"))
self.read = lambda: self.buffer.read().decode("ascii")
self.readline = lambda: self.buffer.readline().decode("ascii")
def print(*args, **kwargs):
"""Prints the values to a stream, or to sys.stdout by default."""
sep, file = kwargs.pop("sep", " "), kwargs.pop("file", sys.stdout)
at_start = True
for x in args:
if not at_start:
file.write(sep)
file.write(str(x))
at_start = False
file.write(kwargs.pop("end", "\n"))
if kwargs.pop("flush", False):
file.flush()
if sys.version_info[0] < 3:
sys.stdin, sys.stdout = FastIO(sys.stdin), FastIO(sys.stdout)
else:
sys.stdin, sys.stdout = IOWrapper(sys.stdin), IOWrapper(sys.stdout)
input = lambda: sys.stdin.readline().rstrip("\r\n")
# endregion
if __name__ == "__main__":
main()
| LorenFiorini/Competitive-Programming | kickStart (Google)/2021/Round A 2021/2021AB.py | 2021AB.py | py | 3,896 | python | en | code | 2 | github-code | 13 |
32276930743 | # exercise 135: The Sieve of Eratosthenes
limit = int(input('enter a limit: '))
nums = []
for i in range(0, limit + 1):
nums.append(i)
nums[1] = 0
#print(nums)
p = 2
while p < limit:
# making all multiple of p except p equal to zero, because I already know they are not prime numbers
# using p itself as third parameter of the range, so that I find all multiples until the limit
for i in range(p*2, limit + 1, p):
nums[i] = 0
# incrementing p by 1 and keeping incrementing it until I find a number which is not already zeroed
p = p + 1
while p < limit and nums[p] == 0:
p = p + 1
if __name__ == '__main__':
for n in nums:
if n != 0:
print(n)
| sara-kassani/1000_Python_example | books/Python Workbook/lists/ex135.py | ex135.py | py | 717 | python | en | code | 1 | github-code | 13 |
7500470737 | """User View tests"""
import os
from unittest import TestCase
from models import db, connect_db, Message, User, Likes, Follows
os.environ['DATABASE_URL'] = "postgresql:///warbler-test"
from app import app, CURR_USER_KEY
app.config['TESTING'] = True
app.config['DEBUG_TB_HOSTS'] = ['dont-show-debug-toolbar']
app.config['DEBUG_TB_INTERCEPT_REDIRECTS'] = False
db.drop_all()
db.create_all()
app.config['WTF_CSRF_ENABLED'] = False
class UserViewsTestCase(TestCase):
"""Test views for route /users"""
def setUp(self):
"""Create test client, add sample data."""
User.query.delete()
Message.query.delete()
self.client = app.test_client()
self.testuser1 = User.signup(username="testuser_one",
email="test@test.com",
password="testuser1",
image_url=None)
self.testuser2 = User.signup(username="testuser_two",
email="testtwo@test.com",
password="testuser2",
image_url=None)
self.testuser3 = User.signup(username="testuser_three",
email="testthree@test.com",
password="testuser3",
image_url=None)
db.session.commit()
# testuser_one is following testuser_two
f1 = Follows(
user_being_followed_id=self.testuser2.id,
user_following_id=self.testuser1.id
)
# testuser_three is following testuser_one
f2 = Follows(
user_being_followed_id=self.testuser1.id,
user_following_id=self.testuser3.id
)
db.session.add_all([f1, f2])
db.session.commit()
def test_users_show(self):
"""Display list of users
/users GET"""
with self.client as c:
resp = c.get("/users")
html = resp.get_data(as_text=True)
self.assertEqual(resp.status_code, 200)
self.assertIn("testuser_one", html, msg="The test user username should be in html")
def test_users_show(self):
"""Display single user profile
/users/<int:user_id> GET"""
m = Message(
text="Hello, I am test message",
user_id=self.testuser1.id
)
db.session.add(m)
db.session.commit()
with self.client as c:
resp = c.get(f"/users/{self.testuser1.id}")
html = resp.get_data(as_text=True)
self.assertEqual(resp.status_code, 200)
self.assertIn("testuser_one", html, msg="username should be in html")
self.assertIn("Hello, I am test message", html, msg="Test message should get queried and displayed")
def test_show_following_not_logged_in(self):
"""Show list of people user_id is following while NOT logged in
/users/<int:user_id/following GET>"""
with self.client as c:
resp = c.get(f"/users/{self.testuser1.id}/following")
self.assertEqual(resp.status_code, 302)
def test_show_following_logged_in(self):
"""Show list of people user_id is following while logged in
/users/<int:user_id/following GET>"""
with self.client as c:
with c.session_transaction() as sess:
sess[CURR_USER_KEY] = self.testuser1.id
resp = c.get(f"/users/{self.testuser1.id}/following")
html = resp.get_data(as_text=True)
self.assertEqual(resp.status_code, 200)
self.assertIn("testuser_two", html, msg="testuser2 should appear in list")
self.assertNotIn("testuser_three", html, msg="testuser3 is not followed by testuser1; should not appear")
def test_users_followers_not_logged_in(self):
"""Show list of followers while NOT logged in
/users/<int:user_id>/followers GET"""
with self.client as c:
resp = c.get(f"/users/{self.testuser1.id}/followers")
self.assertEqual(resp.status_code, 302)
def test_users_followers_logged_in(self):
"""Show list of followers while logged in
/users/<int:user_id>/followers GET"""
with self.client as c:
with c.session_transaction() as sess:
sess[CURR_USER_KEY] = self.testuser1.id
resp = c.get(f"/users/{self.testuser1.id}/followers")
html = resp.get_data(as_text=True)
self.assertEqual(resp.status_code, 200)
self.assertIn("testuser_three", html, msg="following user did not show")
self.assertNotIn("testuser_two", html, msg="a non-following user appeared")
def test_add_follow_not_logged_in(self):
"""Add follow while NOT logged in
/users/follow/<int:follow_id> POST"""
with self.client as c:
resp = c.post(f"/users/follow/{self.testuser2.id}")
self.assertEqual(resp.status_code, 302)
follow_list = Follows.query.all()
self.assertEqual(len(follow_list), 2, msg="No new follows should be added")
# def test_add_follow_logged_in(self):
# """Add follow while logged in
# /users/follow/<int:follow_id> POST"""
# with self.client as c:
# with c.session_transaction() as sess:
# sess[CURR_USER_KEY] = self.testuser1.id
# # testuser1 is trying to follow testuser3
# resp = c.post(f"/users/follow/{self.testuser3.id}")
# html = resp.get_data(as_text=True)
# self.assertEqual(resp.status_code, 200)
# self.assertIn("testuser_two", html, msg="existing followed user did not show")
# self.assertIn("testuser_three", html, msg="added followed user did not show")
# follow_list = Follows.query.all()
# self.assertEqual(len(follow_list), 3, msg="new Follow should be added to db")
def test_user_profile_not_logged_in(self):
"""/users/profile GET"""
with self.client as c:
resp = c.get("/users/profile")
self.assertEqual(resp.status_code, 302)
def test_user_profile_logged_in(self):
"""Edit user profile
/users/profile GET"""
with self.client as c:
with c.session_transaction() as sess:
sess[CURR_USER_KEY] = self.testuser1.id
resp = c.get("/users/profile")
html = resp.get_data(as_text=True)
self.assertEqual(resp.status_code, 200)
self.assertIn("testuser_one", html)
self.assertIn("Edit Your Profile", html)
def test_user_profile_edit_not_logged_in(self):
"""Edit user profile NOT logged in
/users/profile POST"""
with self.client as c:
resp = c.post("/users/profile")
self.assertEqual(resp.status_code, 302)
def test_user_profile_edit_logged_in(self):
"""Edit user profile while logged in
/users/profile POST"""
with self.client as c:
with c.session_transaction() as sess:
sess[CURR_USER_KEY] = self.testuser1.id
resp = c.post(
"/users/profile",
data={
"username": "testuser_one",
"password": "testuser1",
"email": "newemail@newmail.com",
"image_url": "www.whatever.com",
"header_image_url": "www.idontcare.com",
"bio": "I am test user."
}
)
html = resp.get_data(as_text=True)
self.assertEqual(resp.status_code, 302)
user1 = User.query.filter_by(username="testuser_one").first()
self.assertEqual("newemail@newmail.com", user1.email, msg="email not updated")
self.assertEqual("I am test user.", user1.bio, msg="bio not updated")
def test_delete_user_not_logged_in(self):
"""Delete user while NOT logged in
/users/delete POST"""
with self.client as c:
resp = c.post("/users/delete")
self.assertEqual(resp.status_code, 302)
users = User.query.all()
self.assertEqual(len(users), 3, msg="A user got deleted that should not have")
def test_delete_user_logged_in(self):
"""Delete user while logged in
/users/delete POST"""
with self.client as c:
with c.session_transaction() as sess:
sess[CURR_USER_KEY] = self.testuser1.id
resp = c.post("/users/delete")
html = resp.get_data(as_text=True)
self.assertEqual(resp.status_code, 302)
self.assertIn("/signup", html)
users = User.query.all()
self.assertEqual(len(users), 2, msg="User did not get deleted") | AlpineCurt/warbler | test_user_views.py | test_user_views.py | py | 9,044 | python | en | code | 0 | github-code | 13 |
1768952105 | import socket
# Crear socket TCP/IP
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Conectar el socket al puerto
server_address = ('localhost', 10000)
sock.bind(server_address)
print('Servidor iniciado, host {} puerto {}'.format(*server_address))
# A escucha de conexiones
sock.listen(1)
while True:
# Esperar una conexión
print('Esperando una nueva conexión')
connection, client_address = sock.accept()
try:
print('Conexión desde', client_address)
# Recibir la información y retransmitirla
while True:
data = connection.recv(512)
if data != b'':
print(client_address, 'Recibido {!r}'.format(data))
if data:
print('Reenviando información al cliente', client_address)
resp = 'Respuesta del servidor: ' + data.decode('utf-8')
resp = resp.encode('utf-8')
connection.sendall(resp)
else:
print('Sin datos desde', client_address)
break
finally:
print('Cerrando la conexión con ', client_address)
connection.close()
| leomm20/CursoPython | src/cp09_extras/cp70_cliente_servidor_servidor.py | cp70_cliente_servidor_servidor.py | py | 1,150 | python | es | code | 0 | github-code | 13 |
2014750961 | from gensim.models import Doc2Vec
import sys, os, time, subprocess
from sklearn.cluster import KMeans, AgglomerativeClustering#, SpectralClustering, DBSCAN
from sklearn.metrics import classification_report,confusion_matrix,roc_curve,auc, silhouette_score
from sklearn.ensemble import GradientBoostingClassifier
from sklearn import svm
import matplotlib.pyplot as plt
import seaborn as sns
import matplotlib.gridspec as gridspec
import pandas as pd
import numpy as np
import csv
# model feature clustering_key num_clusters function time
def main():
def move_file_to_s3(file_path):
cmd = ' '.join(['aws s3 cp ', file_path, 's3://agu-thinkful-capstone/'+file_path])
subprocess.call(cmd, shell = True)
print('copied '+file_path+' to s3', file=sys.stderr)
cmd = 'rm '+file_path
print(cmd, file= sys.stderr)
subprocess.call(cmd, shell = True)
print('removed '+file_path+' from ec2', file=sys.stderr)
def make_clusters(clustering_key,param_range, doc_vecs_df, meta_data, csv_file_name, **kwargs):
nclusts = []
param = []
sscores = []
avg_obs = []
obs_std = []
models = []
labels = []
for no in param_range:
t1 = time.time()
if clustering_key =='kmeans':
model = KMeans(n_clusters=no, random_state=43).fit(doc_vecs_df)
# elif clustering_key == 'DBSCAN':
# model = DBSCAN(eps=no, algorithm='auto').fit(doc_vecs_df)
# elif clustering_key == 'DBSCAN_kd':
# model = DBSCAN(eps=no, algorithm='kd_tree').fit(doc_vecs_df)
elif clustering_key == 'agglomerative':
model = AgglomerativeClustering(linkage = 'ward', n_clusters= no).fit(doc_vecs_df)
#AffinityPropagation(affinity='euclidean', convergence_iter=15, copy=True, damping=no, max_iter=200, preference=None, verbose=False).fit(doc_vecs_df)
fdv_clusters = model.labels_#predict(doc_vecs_df)
# print('label set', set(fdv_clusters))
nclusts.append(len(list(set(fdv_clusters))))
param.append(no)
sscores.append(silhouette_score(doc_vecs_df, fdv_clusters, metric='cosine'))
avg_obs.append(pd.value_counts(fdv_clusters).mean())
obs_std.append(pd.value_counts(fdv_clusters).std())
models.append(model)
labels.append(fdv_clusters)
# if no == param_range[0]:
# with open(csv_file_name, 'w') as csv_file:
# writer = csv.writer(csv_file, delimiter=',')
# writer.writerow(meta_data+ [no, 'make_clusters', time.time()-t1])
# else:
with open(csv_file_name, 'a') as csv_file:
writer = csv.writer(csv_file, delimiter=',')
writer.writerow(meta_data+ [no, 'make_clusters', time.time()-t1])
print(meta_data+[no, 'make_clusters', time.time()-t1], file=sys.stderr)
return models, pd.DataFrame({'nclusts':nclusts, 'param':param, 'sscores':sscores, 'avg_obs':avg_obs, 'obs_std':obs_std, 'labels':labels})
def make_sil_plot(df, **kwargs):
obsstd1 = [ i+j for i,j in zip(df['avg_obs'],df['obs_std'])]
stdneg = [ i-j for i,j in zip(df['avg_obs'],df['obs_std'])]
fig, ax = fig, ax = plt.subplots(figsize=(6,4))
ax2 = ax.twinx()
ax = sns.scatterplot(x = 'param', y = 'sscores', data = df,hue='nclusts', label='Sil. Score', ax=ax)
ax2 =sns.lineplot(df['param'], df['avg_obs'],color='purple',label='Avg obs per cluster', linewidth=2)
sns.lineplot(df['param'],obsstd1,color='r',label='+/- Std Avg Obs', linewidth=.6)
sns.lineplot(df['param'],stdneg,color='r', linewidth=.6)
ax.set_ylabel('Sil. Score')
ax.set_xlabel('Number of Clusters')
ax.axvline(x=df['param'].iloc[df['sscores'].idxmax()],color='r',linestyle='dotted')
ax.legend(loc='lower left')
ax2.legend(loc='upper right')
plt.ylabel('Average Observation per cluster')
plt.xlabel('Number of Clusters')
plt.title('Silhouette Scores by Number of Clusters',fontsize=20)
if 'save_name' in kwargs:
plt.savefig(kwargs['save_name'], format='svg', bbox_inches='tight')
def make_train_test_inds(df_for_inds):
balanced_inds_train = []
for grp in df_for_inds.groupby('labels'):
balanced_inds_train += grp[1].sample(frac = .8).index.tolist()
balanced_inds_test = df_for_inds[~df_for_inds.index.isin(balanced_inds_train)].index.tolist()
return balanced_inds_train, balanced_inds_test
def make_d2v_df(model, df):
return pd.DataFrame([model.docvecs[ik] for ik in range(len(df))])
params_df = pd.read_csv('feature_modelnum_clustermodel2.csv')
comb_num = int(sys.argv[1])
clustering_key = params_df.iloc[comb_num]['clustering_key']
model_num = str(params_df.iloc[comb_num]['model_num'])
feature = params_df.iloc[comb_num]['feature']
csv_file_name = 'clustering/ml_logging_d2v_'+clustering_key+'_'+model_num+ '_'+feature+'.csv'
cmd = 'touch '+ csv_file_name
subprocess.call(cmd, shell = True)
meta_data = [model_num, feature, clustering_key]
# Returns doc2vec modelled vectors
df = pd.read_csv('agu_df.csv')
model = Doc2Vec.load('models/d2v_'+feature+'_params_'+model_num+'.model')
doc_vecs_df = make_d2v_df(model, df)
param_d = {'kmeans': list(range(2, 27, 1)), 'agglomerative': list(range(2, 27, 1))}#, 'DBSCAN': np.arange(10, 10.78, .03).tolist(), 'DBSCAN_kd': np.arange(10, 10.78, .03).tolist()}
filter_cols = ['labels', 'section', 'session', 'labels_sub', 'identifier']
# kmeans_mdls, df_clusters = make_clusters(doc_vecs_df.iloc[:, ~doc_vecs_df.columns.isin(filter_cols)])
kmeans_mdls, df_clusters = make_clusters(clustering_key, param_d[clustering_key], doc_vecs_df.iloc[:, ~doc_vecs_df.columns.isin(filter_cols)],meta_data, csv_file_name)
clustering_csv_file = 'clustering/clustering_d2v_'+clustering_key+'_'+model_num+ '_'+feature+'.csv'
df_clusters.to_csv(clustering_csv_file)
move_file_to_s3(clustering_csv_file)
t1 = time.time()
silplot_file_name = 'clustering/silplot_d2v_'+clustering_key+'_'+model_num + '_'+feature+'.svg'
make_sil_plot(df_clusters, save_name = silplot_file_name)
move_file_to_s3(silplot_file_name)
with open(csv_file_name, "a") as csv_file:
writer = csv.writer(csv_file, delimiter=',')
writer.writerow(meta_data+ [0, 'make_sil_plot', time.time()-t1])
d = {'clust_param':[], 'X_train_inds':[], 'X_test_inds':[], 'y_test_cl':[], 'y_test_sl':[],
'svc_y_pred_cl':[], 'svc_test_score_cl': [], 'svc_train_score_cl': [],'gbc_y_pred_cl':[], 'gbc_test_score_cl':[], 'gbc_train_score_cl':[],
'svc_y_pred_sl':[], 'svc_test_score_sl': [], 'svc_train_score_sl': [],'gbc_y_pred_sl':[], 'gbc_test_score_sl':[], 'gbc_train_score_sl':[] }
doc_vecs_df['section'] = df['section']
for ik in range(len(df_clusters)):
doc_vecs_df['labels'] = df_clusters.iloc[ik]['labels']
X_train_inds, X_test_inds = make_train_test_inds(doc_vecs_df)
# supclass_X_train, supclass_X_test = doc_vecs_df[inds_d['X_train_inds']], doc_vecs_df[inds_d['X_test_inds']]
supclass_X_train, supclass_X_test = doc_vecs_df.iloc[X_train_inds], doc_vecs_df.iloc[X_test_inds]
supclass_y_train_cl, supclass_y_test_cl = supclass_X_train['labels'], supclass_X_test['labels']
supclass_y_train_sl, supclass_y_test_sl = supclass_X_train['section'], supclass_X_test['section']
supclass_X_train = supclass_X_train.iloc[:, ~supclass_X_train.columns.isin(filter_cols)]
supclass_X_test = supclass_X_test.iloc[:, ~supclass_X_test.columns.isin(filter_cols)]
SC_model_d ={'svc': svm.SVC(), 'gbc': GradientBoostingClassifier()}
for key in SC_model_d.keys():
for label_set in [['cl' , supclass_y_train_cl, supclass_y_test_cl], ['sl' , supclass_y_train_sl, supclass_y_test_sl]]:
t1 = time.time()
SC_model_d[key].fit(supclass_X_train, label_set[1])
y_pred = SC_model_d[key].predict(supclass_X_test)
# print(df_clusters.iloc[ik]['nclusts'], key, label_set[0],'Doc2Vec', 'Training set score:',SC_model_d[key].score(supclass_X_train, label_set[1]), 'Test set score:', SC_model_d[key].score(supclass_X_test, label_set[2]) , file=sys.stderr)
d[key+'_train_score_'+label_set[0]].append(SC_model_d[key].score(supclass_X_train, label_set[1]))
d[key+'_test_score_'+label_set[0]].append(SC_model_d[key].score(supclass_X_test, label_set[2]))
d[key+'_y_pred_'+label_set[0]].append([y_pred])
with open(csv_file_name, "a") as csv_file:
writer = csv.writer(csv_file, delimiter=',')
writer.writerow(meta_data+ [df_clusters.iloc[ik]['nclusts'], key+'_'+label_set[0], time.time()-t1])
d['X_train_inds'].append(X_train_inds)
d['X_test_inds'].append(X_test_inds)
d['clust_param'].append(df_clusters.iloc[ik]['nclusts'])
d['y_test_sl'].append(supclass_y_test_sl)
d['y_test_cl'].append(supclass_y_test_cl)
classifier_csv_file = 'clustering/classifiers_d2v_'+clustering_key+'_'+model_num + '_'+feature+'.csv'
pd.DataFrame(d).to_csv(classifier_csv_file)
move_file_to_s3(classifier_csv_file)
move_file_to_s3(csv_file_name)
if __name__ == '__main__':
main()
| jordanplanders/Thinkful | Bootcamp/Capstone/cluster_analysis2.py | cluster_analysis2.py | py | 8,762 | python | en | code | 1 | github-code | 13 |
17050417274 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class ContractSignRsp(object):
def __init__(self):
self._open_id = None
self._sign_url = None
self._user_id = None
self._user_name = None
@property
def open_id(self):
return self._open_id
@open_id.setter
def open_id(self, value):
self._open_id = value
@property
def sign_url(self):
return self._sign_url
@sign_url.setter
def sign_url(self, value):
self._sign_url = value
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, value):
self._user_id = value
@property
def user_name(self):
return self._user_name
@user_name.setter
def user_name(self, value):
self._user_name = value
def to_alipay_dict(self):
params = dict()
if self.open_id:
if hasattr(self.open_id, 'to_alipay_dict'):
params['open_id'] = self.open_id.to_alipay_dict()
else:
params['open_id'] = self.open_id
if self.sign_url:
if hasattr(self.sign_url, 'to_alipay_dict'):
params['sign_url'] = self.sign_url.to_alipay_dict()
else:
params['sign_url'] = self.sign_url
if self.user_id:
if hasattr(self.user_id, 'to_alipay_dict'):
params['user_id'] = self.user_id.to_alipay_dict()
else:
params['user_id'] = self.user_id
if self.user_name:
if hasattr(self.user_name, 'to_alipay_dict'):
params['user_name'] = self.user_name.to_alipay_dict()
else:
params['user_name'] = self.user_name
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = ContractSignRsp()
if 'open_id' in d:
o.open_id = d['open_id']
if 'sign_url' in d:
o.sign_url = d['sign_url']
if 'user_id' in d:
o.user_id = d['user_id']
if 'user_name' in d:
o.user_name = d['user_name']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/ContractSignRsp.py | ContractSignRsp.py | py | 2,249 | python | en | code | 241 | github-code | 13 |
28974632946 | """Synchronous API for external server-side processes
The *messaging server* should never use these interfaces,
they are intended for Django and similar processes which
want to write a message to a channel and/or add/remove
sessions to/from a channel.
"""
import os, sys
import argparse
from . import base
class Channel(base.Channel):
pass
class Session(base.Session):
def add_readable(self, channel_id):
filename = os.path.join(self.readable_path, channel_id)
with open(filename, 'w') as fh:
fh.write('flag-file')
def remove_readable(self, channel_id):
filename = os.path.join(self.readable_path, channel_id)
try:
os.remove(filename)
return True
except Exception:
return False
def add_writable(self, channel_id):
filename = os.path.join(self.writable_path, channel_id)
with open(filename, 'w') as fh:
fh.write('flag-file')
def remove_writable(self, channel_id):
filename = os.path.join(self.writable_path, channel_id)
try:
os.remove(filename)
return True
except Exception:
return False
def can_write(self, channel_id):
return os.path.exists(os.path.join(self.writable_path, channel_id))
def can_read(self, channel_id):
return os.path.exists(os.path.join(self.readable_path, channel_id))
class Server(base.Server):
"""Synchronous API for the server"""
SESSION_CLASS = Session
CHANNEL_CLASS = Channel
def alnum_string(input):
if not base.simple_id(input):
raise argparse.ArgumentTypeError("Need a value with only chars in [a-zA-Z0-9-]", input)
return input
def alnum_string_or_empty(input):
if not input:
return input
return alnum_string(input)
parser = argparse.ArgumentParser(description='Do server-side ssws configuration from scripts/command lines')
parser.add_argument('session', metavar='SESSION', type=alnum_string,
help='Session being manipulated')
parser.add_argument('--channel', metavar='CHANNEL',action='append', type=alnum_string_or_empty,
dest='channels',
help='Channel to be manipulated (argument can be repeated)')
parser.add_argument('--writable', dest='writable', action='store_const',
const=True, default=False,
help='Allow the session to write to the given channel')
parser.add_argument('--readable', dest='readable', action='store_const',
const=True, default=False,
help='Allow the session to write to the given channel')
parser.add_argument('--no-writable', dest='writable', action='store_const',
const=False, default=False,
help='Do not allow the session to write to the given channel')
parser.add_argument('--no-readable', dest='readable', action='store_const',
const=False, default=False,
help='Do not allow the session to write to the given channel')
parser.add_argument('--remove', dest='remove', action='store_const',
const=True, default=False,
help='Cleanup/de-register this session')
def session_main():
arguments = parser.parse_args()
server = Server()
session = server.session(arguments.session)
if arguments.remove:
session.cleanup()
elif not arguments.channels:
# just letting them connect so far...
pass
else:
if arguments.writable:
for channel in arguments.channels:
session.add_writable(channel)
else:
for channel in arguments.channels:
session.remove_writable(channel)
if arguments.readable:
for channel in arguments.channels:
session.add_readable(channel)
else:
for channel in arguments.channels:
session.remove_readable(channel)
mparser = argparse.ArgumentParser(description='Do server-side ssws configuration from scripts/command lines')
mparser.add_argument('channel', metavar='CHANNEL', type=alnum_string,
help='Channel to which to send message')
mparser.add_argument('--message', dest='message', default=None,
help='Pass the message in as an argument, otherwise use stdin')
def message_main():
arguments = mparser.parse_args()
if not arguments.message:
message = sys.stdin.read()
else:
message = arguments.message
server = Server()
channel = server.channel(arguments.channel)
channel.write(message)
| mcfletch/ssws | ssws/sync.py | sync.py | py | 4,647 | python | en | code | 0 | github-code | 13 |
35723646672 | # -*- coding: utf-8 -*-
import requests
import pygame
from CameraControl import CameraControl
from CarControl import CarControl
class Client:
target_host = ''
target_port = ''
def __init__(self, target_host, target_port):
self.target_host = target_host
self.target_port = target_port
# Parameters:
# - speed: The current speed of the STEVE car [0,1]
# - turnVal: The turn value to set the tires to [-1,1]
# - image: Python Image object
#
# Description:
# - Sends the state information (speed, turnVal, current camera FOV) from the STEVE
# car so the remote server can process them, and decide how to attenuate these values
#
# Return:
# - Returns a JSONObject containing:
# - status: either “OK” or “FAILED”
def send_status_update(self, speed, turn_val, image=None):
request_body = {"speed": str(speed), "turn_val": str(turn_val), "image": image}
url = str(self.target_host) + ":" + str(self.target_port) + "/receiveStatusUpdate"
print(url)
response = requests.post(url, data=request_body, headers={'Content-Type': 'application/octet-stream'})
print(response)
return response
def send_training_data(self, route_name, speed, turn_val, image=None):
request_body = {"route_name": str(route_name), "speed": str(speed), "turn_val": str(turn_val), "image": image}
url = str(self.target_host) + ":" + str(self.target_port) + "/receiveTrainingData"
print(url)
response = requests.post(url, data=request_body, headers={'Content-Type': 'application/octet-stream'})
print(response)
return response
def start_xbox_manual_control(self):
carControl = CarControl()
camera = CameraControl()
finished_flag = False
state_data = []
pygame.init()
pygame.joystick.init()
clock = pygame.time.Clock()
print
pygame.joystick.get_count()
_joystick = pygame.joystick.Joystick(0)
_joystick.init()
while 1:
for event in pygame.event.get():
# print(event)
if event.type == pygame.JOYBUTTONDOWN:
print("Joystick button pressed.")
print(event)
if event.button == 7:
finished_flag = True
if event.type == pygame.JOYAXISMOTION:
# print _joystick.get_axis(0)
# print event
if event.axis == 0: # this is the x axis
print(event.value)
if event.axis == 5: # right trigger
print(event.value)
xdir = _joystick.get_axis(0)
rtrigger = _joystick.get_axis(5)
# deadzone
if abs(xdir) < 0.2:
xdir = 0.0
if rtrigger < -0.9:
rtrigger = -1.0
# print([xdir, rtrigger])
throttle = ((rtrigger + 1) / 2) * .15
turn = ((xdir - 1) / -2) * 180
print([turn, throttle])
carControl.set_speed(float(throttle))
carControl.set_turn_val(float(turn))
state = [carControl.get_speed(), carControl.get_turn_val(), camera.get_image()]
state_data.append(state)
# self.send_status_update(carControl.get_speed(), carControl.get_turn_val(), camera.get_image())
if finished_flag:
break
clock.tick(30)
print("exited manual control")
route_name = input("Enter the name of the route: ")
for i in range(0, len(state_data)):
print(state_data[i])
self.send_training_data(route_name, state_data[i][0], state_data[i][1], state_data[i][2])
pygame.quit()
if __name__ == '__main__':
# camera = CameraControl()
client = Client("http://10.226.107.151", 9999)
# image = camera.get_image()
# print(image)
# client.send_status_update(5,6,image)
client.start_xbox_manual_control()
| Capstone-Projects-2021-Fall/project-steve | RPi/client.py | client.py | py | 4,127 | python | en | code | 0 | github-code | 13 |
24547148496 | from bs4 import BeautifulSoup
import requests
import re
from urllib.parse import urlparse
from html.parser import HTMLParser
class UrlDAO:
# def __init__(self):
# self.s = set()
def getUrls(self, url: str):
r = requests.get(url, timeout = 1)
soup = BeautifulSoup(r.content)
s = set()
for tag in soup.findAll('a', href = True):
if self.checkUrl(tag['href']):
s.add(tag['href'])
# self.uV.addUrl(tag['href'], text)
return s
def checkUrl(self, url: str) -> bool:
z = re.search("www.lastampa.it", f"{urlparse(url).netloc}://{urlparse(url).netloc}")
# t = re.search("archiviolastampa.it", f"{urlparse(url).netloc}://{urlparse(url).netloc}")
g = re.match('(?:http|ftp|https)://', url)
if z and g:
return True
else:
return False
if __name__ == '__main__':
prova = UrlDAO()
print(len(prova.getUrls('http://lastampa.it')))
for n in prova.getUrls('http://lastampa.it'):
print(n)
| saramnt/prova_venv | innolva_spider/dao/UrlDAO.py | UrlDAO.py | py | 1,072 | python | en | code | 0 | github-code | 13 |
24049969436 | from __future__ import print_function
import os.path
from flask import render_template, session, redirect
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from google.oauth2.credentials import Credentials
from . import app
from app import client_app_file, app_secret
app.secret_key = app_secret
service = None
SCOPES = [
"https://www.googleapis.com/auth/userinfo.profile",
"https://www.googleapis.com/auth/userinfo.email",
"https://www.googleapis.com/auth/drive",
"https://www.googleapis.com/auth/drive.file",
"openid"
]
# index location (landing page)
@app.route("/")
def index():
return render_template("index.html")
# login location, redirect to google auth
@app.route("/login")
def login():
if os.path.exists('client_app/token.json'):
os.remove('client_app/token.json')
creds = None
# The file token.json stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('client_app/token.json'):
creds = Credentials.from_authorized_user_file('client_app/token.json', SCOPES)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
client_app_file, SCOPES)
creds = flow.run_local_server(port=8040)
# Save the credentials for the next run
with open('client_app/token.json', 'w') as token:
token.write(creds.to_json())
service = build('drive', 'v3', credentials=creds)
print(u'{0}: {1}'.format("Refresh Token", creds.refresh_token))
results = service.files().list(
pageSize=10, fields="nextPageToken, files(id, name, mimeType, createdTime,webViewLink)").execute()
items = results.get('files', [])
if not items:
print('No files found.')
else:
print('Files:')
for item in items:
print(u'{0} ({1}) {2}'.format(item['name'], item['id'], item['webViewLink']))
return render_template("index.html", files=items)
# logout, clear session
@app.route("/logout")
def logout():
session.clear()
return redirect("/")
#
@app.route("/oncomplete")
def oncomplete():
return redirect("/")
if __name__ == "__main.py__":
app.run(debug=True)
| Joshgonzalez246/SmartDoorbell-FUN09a | routes.py | routes.py | py | 2,618 | python | en | code | 1 | github-code | 13 |
34788078020 | from ROOT import *
from utils import *
ftemplates = TFile('usefulthings/llhd-prior-coarse-1p2width.root')
ftemplates.cd('splines')
ftemplates.ls()
c1 = mkcanvas('c1')
histnames = ['hRTemplate(gPt20.0-25.0, gEta0.0-0.4)', 'hRTemplate(gPt20.0-25.0, gEta2.5-6.0)','hRTemplate(gPt200.0-300.0, gEta0.0-0.4)']
histnames.reverse()
arg = ''
graphs = []
leg = mklegend(x1=.44, y1=.66, x2=.86, y2=.82, color=kWhite)
for ih, hname in enumerate(histnames):
h = ftemplates.Get('splines/'+hname+'_graph').Clone()
h.SetTitle('')
h.GetXaxis().SetTitle('p_{T}^{reco}/p_{T}^{gen}')
h.GetYaxis().SetTitle('normalized')
graphs.append(h)
graphs[-1].SetLineColor(ih+1)
graphs[-1].SetLineWidth(3)
graphs[-1].Draw(arg)
label = hname.split('(')[-1].split(')')[0].replace('gPt', 'p_{T}=').replace(', gEta', ' GeV, |#eta|=')
leg.AddEntry(graphs[-1], label)
arg = 'same'
leg.Draw()
c1.Update()
c1.Print('pdfs/PDFs/responses.pdf')
pause()
c1 = mkcanvas('c1')
#c1.SetLogy()
histnames = ['hGenHardMetPtPtB0(ght0.0-10000.0)']
#histnames = ['hGenHardMetDPhiB0(ght0.0-10000.0)']
histnames.reverse()
arg = ''
graphs = []
leg = mklegend(x1=.44, y1=.66, x2=.86, y2=.82, color=kWhite)
for ih, hname in enumerate(histnames):
h = ftemplates.Get('splines/'+hname+'_graph').Clone()
g = h.GetHistogram()
h.SetTitle('')
h.GetXaxis().SetTitle('generator hard #slash{E}_{T}')
h.GetYaxis().SetTitle('normalized')
graphs.append(h)
graphs[-1].SetLineColor(ih+1)
graphs[-1].SetLineWidth(3)
if ih==0:
g.GetYaxis().SetRangeUser(0,1.2*g.GetMaximum())
g.GetXaxis().SetRangeUser(0,100)
g.Draw()
arg = 'same'
graphs[-1].Draw(arg)
leg.Draw()
c1.Update()
c1.Print('pdfs/PDFs/prior.pdf')
#c1.Print('pdfs/PDFs/prior-dphi.pdf')
pause()
| sbein/BayesQcd | tools/DrawResponsesAndPrior.py | DrawResponsesAndPrior.py | py | 1,804 | python | en | code | 0 | github-code | 13 |
11658694251 | # RealSeriesEvaluationRun.build_corpora()
# RealSeriesEvaluationRun.train_vecs()
import json
import os
from collections import defaultdict
import pandas as pd
from joblib import Parallel, delayed
from tqdm import tqdm
import numpy as np
from lib2vec.corpus_structure import Corpus, DataHandler, ConfigLoader, Preprocesser, CommonWords
# from corpus_processing import Preprocesser, CommonWords
import matplotlib.pyplot as plt
import seaborn as sns
class CommonWordsExperiment:
data_sets = [
# "summaries",
"german_books",
# "german_series",
]
config = ConfigLoader.get_config()
# filters = ["common_words_doc_freq"]
thresholds = [
0.00,
0.005,
0.01,
0.015,
0.0175,
0.02,
0.03,
0.04,
0.05,
0.06, 0.07, 0.08,
0.10,
0.15,
0.20,
0.25,
0.30, 0.35, 0.40, 0.45,
0.50,
0.55,
0.60, 0.65, 0.70, 0.75, 0.80, 0.85,
0.90, 0.95,
1.00
]
absolute = True
num_cores = 4
@classmethod
def filter_thresholds(cls, dir_path: str, parallel: bool = False):
data_set_bar = tqdm(cls.data_sets, total=len(cls.data_sets), desc="2 Operate on dataset!!")
for data_set in data_set_bar:
data_set_bar.set_description(f'2 Operate on dataset >{data_set}<')
data_set_bar.refresh()
annotated_corpus_path = os.path.join(cls.config["system_storage"]["corpora"], data_set)
try:
corpus = Corpus.fast_load(path=annotated_corpus_path, load_entities=False)
except FileNotFoundError:
corpus = DataHandler.load_corpus(data_set)
print('corpus loaded')
# corpus = Preprocesser.annotate_corpus(corpus, without_spacy=False)
# corpus.save_corpus_adv(annotated_corpus_path)
Preprocesser.annotate_and_save(corpus, corpus_dir=annotated_corpus_path, without_spacy=False)
print('annotated corpus')
del corpus
corpus = Corpus.fast_load(path=annotated_corpus_path, load_entities=False)
# print('saved corpus')
if cls.absolute:
thresholds = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
25, 50, 100, #1000, 2000, 3000,
len(corpus)
]
else:
thresholds = cls.thresholds
threshold_bar = tqdm(thresholds, total=len(thresholds), desc="3 Calculate filter_mode results")
if parallel:
Parallel(n_jobs=cls.num_cores)(
delayed(CommonWordsExperiment.calculate_vocab_sizes)(corpus, t, data_set=data_set,
dir_path=dir_path)
for t in threshold_bar)
else:
res = {t: CommonWordsExperiment.calculate_vocab_sizes(corpus, t, data_set=data_set,
dir_path=dir_path)
for t in threshold_bar}
with open(os.path.join(dir_path, 'all.json'), 'w', encoding='utf-8') as outfile:
json.dump(res, outfile, indent=1)
@classmethod
def calculate_vocab_sizes(cls, corpus: Corpus, threshold, data_set: str, dir_path: str):
filtered_corpus_dir = Corpus.build_corpus_dir("",
"",
data_set,
f'specific_words_{threshold}',
"None").replace('__', '_')
# print(os.path.isfile(os.path.join(dir_path.replace('.txt', ''), f'{threshold}.json')))
if os.path.isfile(os.path.join(dir_path.replace('.txt', ''), f'{threshold}.json')):
with open(os.path.join(dir_path.replace('.txt', ''), f'{threshold}.json'), 'r', encoding='utf-8') as file:
data = json.load(file)
print(threshold, data['global_vocab_size'])
return data
print('>|0', threshold)
if not os.path.isdir(filtered_corpus_dir):
if cls.absolute:
to_specfic_words = CommonWords.global_too_specific_words_doc_frequency(
corpus, percentage_share=threshold, absolute_share=threshold)
else:
to_specfic_words = CommonWords.global_too_specific_words_doc_frequency(
corpus,
percentage_share=threshold)
print('>|1 with len', len(to_specfic_words))
# filtered_corpus = corpus.common_words_corpus_copy(to_specfic_words, masking=False)
filtered_corpus = corpus.common_words_corpus_copy_mem_eff(to_specfic_words, masking=False,
corpus_dir=filtered_corpus_dir,
through_no_sentences_error=False)
else:
filtered_corpus = Corpus.load_corpus_from_dir_format(filtered_corpus_dir)
# corpus.common_words_corpus_filtered(to_specfic_words, masking=False)
# filtered_corpus = corpus
# del corpus
print('>|2')
author_dict = defaultdict(list)
for doc_id, document in filtered_corpus.documents.items():
author_dict[document.authors].append(doc_id)
author_median = np.median([len(doc_ids) for author, doc_ids in author_dict.items()])
series_median = np.median([len(doc_ids) for series_id, doc_ids in filtered_corpus.series_dict.items()])
corpus_vocab_size = len(filtered_corpus.get_corpus_vocab())
print('>|3 vocab size', corpus_vocab_size)
document_sizes = {document_id: {'vocab_size': document.vocab_size,
'document_length': document.length}
for document_id, document in tqdm(filtered_corpus.documents.items(),
total=len(filtered_corpus),
desc="Calculate Corpus Sizes")}
# for document_id, document in filtered_corpus.documents.items():
# print([token for token in document.get_flat_document_tokens() if token != 'del'][:100])
# for document_id, words in common_words.items():
# print(document_id, len(words), document_sizes[document_id]['vocab_size'])
vocab_sizes = []
document_lengths = []
for doc_id, document_size in document_sizes.items():
vocab_sizes.append(document_size['vocab_size'])
document_lengths.append(document_size['document_length'])
print(threshold, corpus_vocab_size, np.mean(vocab_sizes), np.mean(document_lengths))
result_dict = {'global_vocab_size': corpus_vocab_size,
'avg_vocab_size': np.mean(vocab_sizes),
'std_vocab_size': np.std(vocab_sizes),
'avg_document_length': np.mean(document_lengths),
'std_document_length': np.std(document_lengths),
'document_sizes': document_sizes}
with open(os.path.join(dir_path, f'{threshold}.json'), 'w', encoding='utf-8') as outfile:
json.dump(result_dict, outfile, indent=1)
# print(filtered_corpus.get_corpus_vocab())
# print(filtered_corpus.get_flat_document_tokens())
return result_dict
def heatmap(matrix, title, years, cmap="YlGnBu", norm=True, diagonal_set=None):
# normed_matrix = (matrix-matrix.mean())/matrix.std()
if diagonal_set is not None:
np.fill_diagonal(matrix, diagonal_set)
if norm:
matrix = matrix / matrix.max()
sns.heatmap(matrix, cmap=cmap, xticklabels=years, yticklabels=years, square=True)
plt.title(title, fontsize=20)
plt.show()
def plot_single(x, y, title):
plt.plot(x, y)
plt.show()
if title:
plt.title(title)
plt.xticks(rotation=90)
plt.show()
def plot_many(x, y_list, title=None, skip=0, labels=None, plot_range=None, loc="best", n=None):
fig = plt.figure(1)
ax = fig.add_subplot(111)
plots = [ax.plot(x[skip:], y[skip:])[0] for y in y_list]
# lgd = ax.legend(['Lag ' + str(lag) for lag in x], loc='center right', bbox_to_anchor=(1.3, 0.5))
if labels:
lgd = ax.legend(plots, labels, labelspacing=0., bbox_to_anchor=(1.04, 0.5), borderaxespad=0, loc=loc)
# lgd = ax.legend(['Lag ' + str(lag) for lag in x], loc='center right', bbox_to_anchor=(1.3, 0.5))
else:
lgd = None
if plot_range:
plt.ylim(plot_range[0], plot_range[1])
# plots = [plt.plot(x[skip:], y[skip:])[0] for y in y_list]
# if labels:
# plt.legend(plots, labels, labelspacing=0., bbox_to_anchor=(1.04, 0.5), borderaxespad=0, loc="best")
#
# if range:
# plt.ylim(range[0], range[1])
if title:
plt.title(title)
# #plt.tight_layout(rect=[0, 0, 0.75, 1])
plt.xticks(rotation=90)
plt.show()
fig.savefig(f'type_ratio_{n}.png', dpi=600, format='png', bbox_extra_artists=(lgd,), bbox_inches='tight')
# plt.savefig("output.png", bbox_inches="tight")
def violion_plot(dataframe):
# todo different scales for absolute values
sns.set_theme(style="whitegrid")
font_size = 20
ax = sns.violinplot(x="Document Threshold", y="Relative Corpus Size", hue="Value Type",
data=dataframe, palette="muted", split=False)
# ax2 = ax1.twinx()
ax = sns.lineplot(data=dataframe, x="Document Threshold", y="Global Vocab Size")
ax = sns.lineplot(data=dataframe, x="Document Threshold", y="Corpus Length")
ax.set_xlabel("Document Threshold", fontsize=font_size)
ax.set_ylabel("Relative Corpus Size", fontsize=font_size)
ax.tick_params(labelsize=font_size)
plt.setp(ax.get_legend().get_texts(), fontsize=str(font_size))
plt.show()
def lin_scale(x):
return x
def plot_results(path: str):
all_path = os.path.join(path, 'all.json')
if os.path.isfile(all_path):
with open(all_path, 'r', encoding='utf-8') as file:
result = json.load(file)
else:
files = [f for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))]
result = {}
for file_path in files:
with open(os.path.join(path, file_path), 'r', encoding='utf-8') as file:
result[file_path.replace('.json', '')] = json.load(file)
with open(os.path.join(path, 'all.json'), 'w', encoding='utf-8') as outfile:
json.dump(result, outfile, indent=1)
threshold_vals = []
global_vocab_vals = []
avg_vocab_size_vals = []
avg_length_vals = []
# "global_vocab_size": 319556,
# "avg_vocab_size": 12851.589041095891,
# "std_vocab_size": 5099.042552715211,
# "avg_document_length": 107681.47945205479,
# "std_document_length": 62174.930541816575,
# "document_sizes": {
orig_doc_vocab_len = None
orig_doc_len = None
orig_global_vocab_len = None
original_corpus_length = None
diff_vocab_vals = []
diff_doc_len_vals = []
diff_global_vocab_vals = []
scale = lin_scale # np.log
documents_vocab = defaultdict(list)
documents_len = defaultdict(list)
origin_doc_vocab = {}
origin_doc_len = {}
df_tuples = []
for threshold, data in result.items():
threshold_vals.append(float(threshold))
if not orig_doc_len:
orig_doc_vocab_len = data['avg_vocab_size']
orig_doc_len = data['avg_document_length']
orig_global_vocab_len = data['global_vocab_size']
original_corpus_length = sum([vals['document_length'] for doc_id, vals in data['document_sizes'].items()])
relative_vocab = 1
relative_doc_len = 1
relative_global_vocab = 1
relative_corpus_length = 1
else:
relative_vocab = 1 - scale(orig_doc_vocab_len - data['avg_vocab_size']) / orig_doc_vocab_len
relative_doc_len = 1 - scale(orig_doc_len - data['avg_document_length']) / orig_doc_len
relative_global_vocab = 1 - scale(orig_global_vocab_len - data['global_vocab_size']) / orig_global_vocab_len
relative_corpus_length = sum([vals['document_length']
for doc_id, vals in data['document_sizes'].items()]) / original_corpus_length
diff_vocab_vals.append(relative_vocab)
diff_doc_len_vals.append(relative_doc_len)
diff_global_vocab_vals.append(relative_global_vocab)
global_vocab_vals.append(scale(data['global_vocab_size']))
avg_vocab_size_vals.append(scale(data['avg_vocab_size']))
avg_length_vals.append(scale(data['avg_document_length']))
for doc_id, vals in data['document_sizes'].items():
documents_vocab[doc_id].append(vals['vocab_size'])
documents_len[doc_id].append(vals['document_length'])
# print(origin_doc_vocab)
if doc_id not in origin_doc_vocab:
origin_doc_vocab[doc_id] = vals['vocab_size']
origin_doc_len[doc_id] = vals['document_length']
relative_vocab = 1
relative_doc_len = 1
else:
relative_vocab = 1 - (origin_doc_vocab[doc_id] - vals['vocab_size']) / origin_doc_vocab[doc_id]
relative_doc_len = 1 - (origin_doc_len[doc_id] - vals['document_length']) / origin_doc_len[doc_id]
df_tuples.append((doc_id, threshold, relative_vocab, 'Vocabulary Size', relative_global_vocab,
relative_corpus_length))
df_tuples.append((doc_id, threshold, relative_doc_len, 'Document Length', relative_global_vocab,
relative_corpus_length))
df = pd.DataFrame(df_tuples, columns=['Document ID', 'Document Threshold', 'Relative Corpus Size', 'Value Type',
'Global Vocab Size', 'Corpus Length'])
print(df["Global Vocab Size"])
violion_plot(df)
documents_vocab_ls = [val for _, val in documents_vocab.items()]
documents_len_ls = [val for _, val in documents_len.items()]
b = []
b.extend(documents_vocab_ls)
b.extend(documents_len_ls)
plot_many(threshold_vals,
[diff_global_vocab_vals,
# avg_vocab_size_vals,
# avg_length_vals,
diff_vocab_vals,
diff_doc_len_vals
],
labels=['Global Vocab Size',
# 'Avg Vocab Size',
# 'Avg Document Length',
'Diff Vocab',
'Diff Doc Len'
], title=None)
# plot_many(threshold_vals,
# [diff_vocab_vals,
# diff_doc_len_vals
# ],
# labels=[
# 'Diff Vocab',
# 'Diff Doc Len'
# ], title=None)
# plot_many(threshold_vals, b, title=None)
# print(threshold_vals)
# plt.plot(threshold_vals, global_vocab_vals)
# plt.show()
if __name__ == '__main__':
path_name = '../results/common_words_experiment/threshold_values'
CommonWordsExperiment.filter_thresholds(path_name)
plot_results(path_name)
| LasLitz/ma-doc-embeddings | experiments/common_words_experiment.py | common_words_experiment.py | py | 15,669 | python | en | code | 3 | github-code | 13 |
5309021695 | t = int(input())
for i in range(t):
a, b, c = map(int, input().split())
mi = min(a, b)
mi = min(mi, c)
if mi==a:
print("Draw")
elif mi==b:
print("Bob")
else:
print("Alice") | shruti01052002/Mission-Data-Structures | Array/HardestProblem.py | HardestProblem.py | py | 220 | python | en | code | 0 | github-code | 13 |
5400078192 | import argparse
import json
import catalogue.bin
from catalogue.bibtex import decode
def main(args):
entries = []
for entry in args.path:
with open(entry) as f:
entries.extend(json.load(f))
catalogue.bin.pbcopy(decode(entries))
if __name__ == "__main__":
parser = argparse.ArgumentParser(prog="copy_bibtex.py")
parser.add_argument("path", help="JSON", nargs="+")
main(parser.parse_args())
| wesselb/catalogue | copy_bibtex.py | copy_bibtex.py | py | 438 | python | en | code | 0 | github-code | 13 |
9746661084 | import pandas as pd
class HqCsv:
def __init__(self, _ticker, csvFile):
self._ticker = _ticker
csv = pd.read_csv(csvFile, index_col=[0], parse_dates=False)
csv['PrevClose'] = csv.Close.shift(1)
csv['PrevVolume'] = csv.Volume.shift(1)
csv['VolChange'] = (csv.Volume - csv.PrevVolume)/csv.PrevVolume
csv['HL'] = (csv.High - csv.Low)/csv.PrevClose
csv['CP'] = (csv.Close - csv.PrevClose)/csv.PrevClose
csv['OP'] = (csv.Open - csv.PrevClose) /csv.PrevClose
csv['LP'] = (csv.Low - csv.PrevClose) /csv.PrevClose
df = csv.reindex(index=csv.index[::-1]) # reverse date order
df['No'] = range(len(df.index))
self.dataFrame = df
@property
def ticker(self):
return self._ticker
@property
def df(self):
return self.dataFrame
@property
def rows(self):
return self.dataFrame.iloc
if __name__ == "__main__":
import json
from datetime import datetime, timedelta
from hqrobot import CsvFolder, CsvFileName
ticker = 'ATHX'
day = (datetime.now() + timedelta(days=-2)).strftime("%Y%m%d")
hqConf = json.load(open('hqrobot.json'))
csvFolder = CsvFolder.format(hqConf['repo'], day)
csvFile = CsvFileName.format(csvFolder, ticker)
hqCsv = HqCsv(ticker, csvFile)
idx0 = hqCsv.df.index[0]
print(idx0)
df4 = hqCsv.df[0:4].Close.sort_values(ascending=False)
# df4 = hqCsv.df[0:4].sort_values(by='Close', ascending=True)
df4['No'] = range(len(df4.index))
print(df4)
print(df4.loc[idx0])
# for row in hqCsv.rows:
# print(row.index)
# break
| jbtwitt/pipy | hq/HqCsv.py | HqCsv.py | py | 1,650 | python | en | code | 0 | github-code | 13 |
43860256545 | from sys import stdin
par = []
set_size = 0
def initialize(size):
global set_size
set_size = size
return [i for i in range(size)]
def find(i):
return i if par[i] == i else find(par[i])
def union(a, b):
if find(a) != find(b):
par[find(a)] = find(b)
global set_size
set_size -= 1
t = int(input())
input()
for i in range(t):
size = ord(input()) - ord('A') +1
par = initialize(size)
for line in stdin:
if line == '\n':
break
else:
union(ord(line[0])-65, ord(line[1])-65)
print(set_size)
print('') | rezakrimi/ACM | UVaProblems/UVa459.py | UVa459.py | py | 605 | python | en | code | 0 | github-code | 13 |
16027410084 | # 길찾기
# SWEA 난이도 D4
# 0에서 99로 길 존재하는지
# stack dfs 로 구현해보자
def dfs(start, end, graph, visited):
stack = [start]
# visited[start] = True
while stack:
w = stack.pop()
if w == end:
return 1
if visited[w] == False:
visited[w] = True
for i in graph[w]:
if not visited[i]:
stack.append(i)
return 0
# t = int(input())
t = 10
for tc in range(1, t+1):
n, m = map(int, input().split())
s = list(map(int, input().split()))
graph = [[]for _ in range(100)]
for i in range(m):
a, b = s[i*2], s[i*2+1]
graph[a].append(b)
visited = [False for _ in range(100)]
print(f'#{tc} {dfs(0, 99, graph, visited)}') | joonann/ProblemSolving | python/202308/11/길찾기.py | 길찾기.py | py | 800 | python | en | code | 0 | github-code | 13 |
8536454102 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
'''
Created on Oct 30, 2017
@author: hadoop
'''
from utils.emails import send_email
from drawing.drawing_utils import draw_stock_with_multi_periods
import os
if __name__ == '__main__':
file_lst = []
code_id = "399300"
fname = "/home/hadoop/" + code_id + "-month-week" + ".png"
draw_stock_with_multi_periods(code_id, ("M", "W"), fname)
file_lst.append(fname)
#
# fname = "/home/hadoop/" + code_id + "-60F-30F" + ".png"
# draw_stock_with_multi_periods(code_id, ("60", "30"), fname)
# file_lst.append(fname)
# fname = "/home/hadoop/" + code_id + "-15F-5F" + ".png"
# draw_stock_with_multi_periods(code_id, ("15", "5"), fname)
# file_lst.append(fname)
send_email("jliu@infinera.com", "TEST", "This is a test email, just ignore it", file_lst)
for f in file_lst:
os.remove(f) | liujinguang/stockquantpro | stock-quant-pro/uts/test_email.py | test_email.py | py | 900 | python | en | code | 0 | github-code | 13 |
13210705802 | score = {'001':96,'002':98,'003':92,'004':93,'005':94}
score['006'] = 100
score['002'] = 99
del score['001']
print(score['004'])
max = score['002']
min = score['002']
count =0
for key,value in score.items():
if(max < value):
max = value
if(min > value):
min = value
count+=value
print("最大值:",max,"最小值:",min,"平均值:",count/len(score)) | lemon5227/CodeField | Python/实验三其他组合数据类型/9.py | 9.py | py | 384 | python | en | code | 0 | github-code | 13 |
30599696915 | """This is a exercise from https://exercism.io/my/tracks/python"""
from enum import Enum
from textwrap import indent
class Vector(list):
"""Adds a 2D-orientation to a list. This is usefull when dealing with matrices."""
def __init__(self, *args, axis=None):
if axis not in Matrix2D.Axes:
raise ValueError("{} is not valid `axis`, accepted values are:".format(Matrix2D.Axes))
list.__init__(self, *args)
self.axis = axis
self.shape = (0, len(self)) if self.is_row else (len(self), 0)
@property
def is_row(self):
"""True if this is a row-vector"""
return self.axis == Matrix2D.Axes.ROW
@property
def is_column(self):
"""True if this is a column-vector"""
return self.axis == Matrix2D.Axes.COLUMN
def __repr__(self):
if self.is_row:
return Matrix2D.repr([self])
return Matrix2D.repr(zip(self))
class Matrix2D:
"""A 2D-matrix, built from a list of lists."""
class Axes(Enum):
"""Give the user a way to name a 2D-matrix Axes"""
ROW = 1
COLUMN = 2
def __init__(self, data):
self.shape = Matrix2D._shape_raise_for_error(data)
self._data = data
self._transpose = None
@property
def data(self):
"""Returns a copy of the matrix raw content."""
return self[:]
@property
def T(self):
"""Returns the transposed matrix."""
if self._transpose is None:
self._transpose = Matrix2D(list(map(list, zip(*self))))
return self._transpose
def reduce(self, function=None, axis=None):
"""Reduce the matrix using `function`. The optional parameter `axis`
allows to reduce only along the given axis.
If no `axis` is given, then this method returns a scalar.
Reminder: reducing a matrix with shape (R,C) on the row axis, result in returning
a column vector of size (0, C). Reciprocally a row vector of size (R, 0)
is returned when reducing columns.
"""
if axis is not None and axis not in Matrix2D.Axes:
raise ValueError("{} is not valid `axis`, accepted values are:".format(Matrix2D.Axes))
if function is None:
raise ValueError("You must provide a reduction function array => number|boolean|string|... .")
if axis:
if axis is Matrix2D.Axes.ROW:
matrix = self
vector_axis = Matrix2D.Axes.COLUMN
else:
matrix = self.T
vector_axis = Matrix2D.Axes.ROW
return Vector([function(v) for v in matrix], axis=vector_axis)
return function([elem for row in self for elem in row])
def min(self, axis=None):
"""Simply a shorthand for matrix.reduce(function=min, axis=axis)."""
return self.reduce(function=min, axis=axis)
def max(self, axis=None):
"""Simply a shorthand for matrix.reduce(function=max, axis=axis)."""
return self.reduce(function=max, axis=axis)
def map(self, function):
"""Returns a matrix in which `function` has been applied to all element.
`function` will be called with three arguments (on each cell):
- the `cell` value,
- the row index and the
- the column index.
"""
return Matrix2D([[function(cell, i, j) for j, cell in enumerate(row)] for i, row in enumerate(self)])
def where(self, condition=lambda x: x):
"""Returns a generator that goes over cells that satisfy `condition`."""
for i, row in enumerate(self):
for j, cell in enumerate(row):
if condition(cell):
yield (i, j)
def all(self, condition=lambda x: x):
"""Return `True` if all cells verify `condition`, notice that the default value for `condition`
is the identity function."""
for row in self:
for cell in row:
if not condition(cell):
return False
return True
def match(self, other):
"""`Matching` is some sort of equality with three discint functionalities:
Matrix to matrix match:
[1 2 3] match [2 1 3] = False
[4 5 6] [4 5 7]
[1 2 3] match [1 2 3] = True
[4 5 6] [4 5 6]
Matrix to (row/column) vector match:
[1 2 3] match [1 5 3] = [O . O]
[4 5 3] [. O O]
[1 2 3] match [1] = [O . .]
[4 5 6] [5] [. O .]
Matrix to scalar match:
[1 2 3] match 3 = [. . O]
[4 5 6] [. . .]
O shows matched items, . shows unmatched items (either it matched the vector/scalar or not).
The function return either (i) `True` or `False` if `other` is a matrix,
(ii) or a matrix where matched cells contain `True` while unmatched ones contain `False`.
"""
if isinstance(other, Matrix2D):
if self.shape != other.shape:
raise ValueError("Can't match matrices that have different shapes.")
return self.map(lambda cell, i, j: cell == other[i, j]).all()
if isinstance(other, Vector):
is_shape_ok = (other.is_column and self.shape[0] == other.shape[0])
is_shape_ok = is_shape_ok or (other.is_row and self.shape[1] == other.shape[1])
if not is_shape_ok:
raise ValueError("Matrix and vector have incompatible shapes.")
return self.map(lambda cell, i, j: cell == other[j if other.is_row else i])
if isinstance(other, (int, float, bool, str)):
return self.map(lambda cell, i, j: cell == other)
if isinstance(other, list):
raise ValueError("You can't use a list with '==', but you can use a `Vector` instead.")
return False
def __eq__(self, other):
self.match(other)
def _binary_operation(self, other, operation=None):
if not operation:
raise ValueError("Please provide a function.")
return Matrix2D([[operation(a, b) for (a, b) in zip(*rows)] for rows in zip(self, other)])
def __and__(self, other):
return self._binary_operation(other, lambda a, b: a & b)
def __iter__(self):
return self._data.__iter__()
def __next__(self):
return self._data.__next__()
def __getitem__(self, index):
if not isinstance(index, tuple) or len(index) != 2:
raise ValueError("2D-matrix index should be a pair.")
return self._data[index[0]][index[1]]
@staticmethod
def _cell_repr(cell):
if cell is True:
return 'O'
if cell is False:
return '.'
return str(cell)
@staticmethod
def _shape_raise_for_error(data):
shape = (len(data), len(data[0]) if data else 0)
for row in data:
if len(row) != shape[1]:
raise ValueError("Matrix shape doesn't appear to be correct.")
return shape
def __repr__(self):
return Matrix2D.repr(self)
@staticmethod
def repr(matrix_like):
"""String representation of any 2D-matrix-shaped object."""
cell_sep = " "
row_sep = " ] \n"
rows_text = map(lambda x: cell_sep.join(map(Matrix2D._cell_repr, x)), matrix_like)
matrix_text = "{} ] \n".format(row_sep.join(rows_text))
return indent(matrix_text, " [ ")
def saddle_points(data):
"""A "saddle point" is greater than or equal to every element in its row
and less than or equal to every element in its column.
"""
matrix = Matrix2D(data)
rows_maximums = matrix.max(axis=Matrix2D.Axes.ROW)
is_max_in_row = (matrix.match(rows_maximums))
columns_minimums = matrix.min(axis=Matrix2D.Axes.COLUMN)
is_min_in_col = (matrix.match(columns_minimums))
is_saddle = is_max_in_row & is_min_in_col
saddle_indexes = set(is_saddle.where())
print("Input matrix is: \n{}".format(matrix))
print("Row's max locations:")
print(same_line(str(matrix), " match ", str(rows_maximums), ' = ', str(is_max_in_row)))
print("Columns's min locations:")
print(same_line(str(matrix), " match ", str(columns_minimums), ' = ', str(is_min_in_col)))
print("Saddle points locations:")
print(same_line(str(is_max_in_row), " & ", str(is_min_in_col), ' = ', str(is_saddle)))
print("Saddle points are at indexes: \n{}".format(saddle_indexes))
return saddle_indexes
def same_line(*strings, ghost=None):
strings_lines = [string.split('\n') for string in strings]
if ghost and len(ghost) > 0:
for string_index in ghost:
strings_lines[string_index] = [" "*len(line) for line in strings_lines[string_index]]
strings_max_len = [max(len(line) for line in lines) for lines in strings_lines]
max_nb_of_lines = max(len(lines) for lines in strings_lines)
concatenation = ""
for i in range(max_nb_of_lines):
for j, lines in enumerate(strings_lines):
line = lines[i] if i < len(lines) else " "
concatenation += line
concatenation += " "*(strings_max_len[j]-len(line))
concatenation += "\n"
return concatenation
def test_reduce_and_eq():
matrix = Matrix2D([
[9, 8, 7],
[5, 3, 2]
])
# The idea is to test wether a matrix "match" a given matrix/vector/scalar
# to be the most general as possible. This way the user can even create
# its own matrix/vector/scalar, and then try to "match" it with a matrix.
#
# "Matching" is some sort of equality such that:
#
# [1 2 3] match [2 1 3] = False
# [4 5 6] [4 5 7]
#
# [1 2 3] match [1 2 3] = True
# [4 5 6] [4 5 6]
#
# [1 2 3] match [1 5 3] = [O . O]
# [4 5 6] [. O .]
#
# [1 2 3] match [1] = [O . .]
# [4 5 6] [5] [. O .]
#
# [1 2 3] match 3 = [. . O]
# [4 5 6] [. . .]
#
# O means true, . means false (either it matched the vector/scalar or not)
row_mins = matrix.min(axis=Matrix2D.Axes.ROW)
print(same_line(str(matrix), " match ", str(row_mins), ' = ', str(matrix.match(row_mins))))
col_maxs = matrix.max(axis=Matrix2D.Axes.COLUMN)
print(same_line(str(matrix), " match ", str(col_maxs), ' = ', str(matrix.match(col_maxs)), ghost=[0]))
overall_min = matrix.min()
print(same_line(str(matrix), " match ", str(overall_min), ' = ', str(matrix.match(overall_min)), ghost=[0]))
print(same_line(str(matrix), " match ", str(matrix.T.T), ' = ', str(matrix.match(matrix.T.T)), ghost=[0]))
def test_and_operator():
matrix_A = Matrix2D([
[True, False, True],
[False, True, True]
])
matrix_B = Matrix2D([
[False, True, False],
[False, True, True]
])
print(same_line(str(matrix_A), " AND ", str(matrix_B), ' = ', str(matrix_A & matrix_B)))
print(same_line(str(matrix_A), " OR ", str(matrix_B), ' = ', str(matrix_A | matrix_B)))
if __name__ == "__main__":
saddle_points([
[9, 8, 7],
[5, 3, 2],
[6, 6, 7]
])
# test_reduce_and_eq()
#test_and_operator()
| cglacet/exercism-python | saddle-points/complete_saddle_points.py | complete_saddle_points.py | py | 11,518 | python | en | code | 5 | github-code | 13 |
34059061313 | import cv2
import numpy as np
from PIL import Image
from io import BytesIO
import base64
# Read the image
img = cv2.imread('image.jpeg')
img = cv2.resize(img, (640, 480))
# Define the points
points = [(150, 50),(150,200,),(190,200),(150, 200), (200, 150), (850, 350)]
# Draw a line through all the points
color = (0, 0, 255) # BGR color format
thickness = 5
for i in range(len(points)-1):
pt1 = points[i]
pt2 = points[i+1]
cv2.line(img, pt1, pt2, color, thickness)
# Convert the image to PNG format and encode it as base64
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # Convert to RGB format
pil_img = Image.fromarray(img)
buffer = BytesIO()
pil_img.save(buffer, format='JPEG')
img_str = base64.b64encode(buffer.getvalue()).decode('utf-8')
# Embed the base64-encoded image in an HTML page
html = f"<img src='data:image/jpeg;base64,{img_str}'>"
with open('output.html', 'w') as f:
f.write(html)
| sriprada346/Route-planner | tes.py | tes.py | py | 915 | python | en | code | 0 | github-code | 13 |
10976074278 | from output.base import Output
from output.console import ConsoleOutput, TableConsoleOutput
from output.file import CSVFileOutput, JSONFileOutput, YAMLFileOutput
_outputs = {
"console": ConsoleOutput,
"tableconsole": TableConsoleOutput,
"jsonfile": JSONFileOutput,
"yamlfile": YAMLFileOutput,
"csvfile": CSVFileOutput,
}
def get_output(option: str = "tableconsole") -> Output:
return _outputs[option]()
| pedrolp85/python_basics | nba_cli_project/output/defaults.py | defaults.py | py | 430 | python | en | code | 0 | github-code | 13 |
5748094252 | #!/usr/bin/python3
''' base.py module file. '''
class Base:
''' base class that have init methode. '''
__nb_objects = 0
def __init__(self, id=None):
''' initialize the id. '''
if (id is not None):
self.id = id
else:
Base.__nb_objects += 1
self.id = Base.__nb_objects
| Dragonkuro2/alx-higher_level_programming | 0x0C-python-almost_a_circle/models/base.py | base.py | py | 342 | python | en | code | 1 | github-code | 13 |
31013973053 | import pandas as pd
import numpy as np
import string
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import time
import xgboost as xgb
from sklearn.metrics import mean_squared_error #RMSE
from math import sqrt
from sklearn.metrics import roc_curve
from sklearn.metrics import auc
from sklearn.feature_selection import mutual_info_regression
from sklearn.feature_selection import SelectKBest
Traindata=pd.DataFrame(pd.read_csv(r'C:\Users\朝花夕拾\Desktop\机器学习\kaggle\Predict Future Sales\sales_train.csv'))
#print(Traindata.info())
Traindata['date']=pd.to_datetime(Traindata['date'])
Traindata['year']=Traindata.date.dt.year
Traindata['month'] = Traindata.date.dt.month
Traindata['day'] = Traindata.date.dt.day
# 不同年份的总销售额
'''sale_and_year=Traindata[['item_cnt_day','year']].groupby(['year']).count().plot(kind='bar')
plt.title('sale in years')
plt.show()
# 不同年份的月销售额
# 2013
Traindata_2013=Traindata[Traindata['year']==2013]
sale_and_month=Traindata_2013[['item_cnt_day','month']].groupby(['month']).count().plot(kind='bar')
plt.title('12months’ sale in 2013')
plt.show()
# 2014
Traindata_2014=Traindata[Traindata['year']==2014]
sale_and_month=Traindata_2014[['item_cnt_day','month']].groupby(['month']).count().plot(kind='bar')
plt.title('12months’ sale in 2014')
plt.show()
# 2015
Traindata_2015=Traindata[Traindata['year']==2015]
sale_and_month=Traindata_2015[['item_cnt_day','month']].groupby(['month']).count().plot(kind='bar')
plt.title('12months’ sale in 2015')
plt.show()
# 2013年不同商店的月销售额
Traindata_2013=Traindata[Traindata['year']==2013]
shop_and_sale=Traindata_2013[['shop_id','month','item_cnt_day']].groupby(['shop_id','month']).count().reset_index()
#print(shop_and_sale)
shop_and_sale_pivot=shop_and_sale.pivot(index='shop_id',columns='month',values='item_cnt_day')
#print(shop_and_sale_pivot)
shop_and_sale_pivot.plot(kind='bar')
plt.show()
# 2014
Traindata_2014=Traindata[Traindata['year']==2014]
shop_and_sale=Traindata_2014[['shop_id','month','item_cnt_day']].groupby(['shop_id','month']).count().reset_index()
#print(shop_and_sale)
shop_and_sale_pivot=shop_and_sale.pivot(index='shop_id',columns='month',values='item_cnt_day')
#print(shop_and_sale_pivot)
shop_and_sale_pivot.plot(kind='bar')
plt.show()
# 2015
Traindata_2015=Traindata[Traindata['year']==2015]
shop_and_sale=Traindata_2015[['shop_id','month','item_cnt_day']].groupby(['shop_id','month']).count().reset_index()
#print(shop_and_sale)
shop_and_sale_pivot=shop_and_sale.pivot(index='shop_id',columns='month',values='item_cnt_day')
#print(shop_and_sale_pivot)
shop_and_sale_pivot.plot(kind='bar')
plt.show()'''
# 不同商品类型的总销售量
items=pd.DataFrame(pd.read_csv(r'C:\Users\朝花夕拾\Desktop\机器学习\kaggle\Predict Future Sales\items.csv'))
items=items.drop(['item_name'],axis=1,inplace=False)
items=items.set_index('item_id').to_dict(orient='dict')
Traindata['category']=Traindata['item_id'].map(items['item_category_id'])
'''sale_and_category=Traindata[['item_cnt_day','category']].groupby(['category']).count().plot(kind='bar')
plt.title('sale in categories')
plt.show()'''
# 销售量超过10W的商品类型
'''sale_and_category=Traindata[['item_cnt_day','category']].groupby(['category']).count()
top=sale_and_category[sale_and_category['item_cnt_day']>=100000].plot(kind='bar')
plt.title('sale over 10w in categories')
plt.show()'''
#print(top)
# 不同商品类型的平均价格
'''prices_and_category=Traindata[['category','item_price']].groupby(['category']).mean().plot(kind='bar')
plt.title('mean prices in categories')
plt.show()'''
'''fig,axes = plt.subplots()
prices_and_category.boxplot(by='category',ax=axes)
temp=Traindata.item_price[Traindata['category']==12]
print(temp.describe())'''
# 数据处理
#Traindata=Traindata.drop(Traindata[Traindata['item_cnt_day']])
'''pd.DataFrame(Traindata['item_price']).boxplot()
plt.show()'''
# 处理价格
#print(pd.DataFrame(Traindata[Traindata.item_price>=30000]).info())
Traindata=Traindata.drop(Traindata[Traindata.item_price<=0].index | Traindata[Traindata.item_price>=30000].index)
'''pd.DataFrame(Traindata['item_price']).boxplot()
plt.show()'''
# 处理销售额
#print(pd.DataFrame(Traindata[Traindata['item_cnt_day']<-10]).info())
Traindata=Traindata.drop(Traindata[Traindata.item_cnt_day<-10].index | Traindata[Traindata.item_cnt_day>=500].index)
'''pd.DataFrame(Traindata['item_cnt_day']).boxplot()
plt.show()'''
# 不同商店不同产品类型价格的平均值
mean_price=pd.pivot_table(Traindata,index=['shop_id','category'],values=['item_price'],aggfunc=[np.mean],fill_value=0).reset_index()
mean_price.columns=['shop_id','category','mean']
## 销售额
# 不同商店的月销售额均值
month_sale=pd.pivot_table(Traindata,index=['shop_id','date_block_num'],values=['item_cnt_day'],aggfunc=[np.mean],fill_value=0) .reset_index()
month_sale.columns=['shop_id','month_id','saleOn_month']
# 不同类型商品的月销售额均值
month_category=pd.pivot_table(Traindata,index=['category','date_block_num'],values=['item_cnt_day'],aggfunc=[np.mean],fill_value=0) .reset_index()
month_category.columns=['category','month_id','month_category']
# 不同商店的年总销售额
year_saleonShop=pd.pivot_table(Traindata,index=['shop_id','year'],values=['item_cnt_day'],aggfunc=[np.mean],fill_value=0) .reset_index()
year_saleonShop.columns=['shop_id','year','year_saleonShop']
# 不同类型商品的年总销售额
year_saleonCategory=pd.pivot_table(Traindata,index=['category','year'],values=['item_cnt_day'],aggfunc=[np.mean],fill_value=0) .reset_index()
year_saleonCategory.columns=['category','year','year_saleonCategory']
# 不同商品的月销售额均值
month_item=pd.pivot_table(Traindata,index=['item_id','date_block_num'],values=['item_cnt_day'],aggfunc=[np.mean],fill_value=0) .reset_index()
month_item.columns=['item_id','month_id','month_item']
# 不同商品在不同店铺的月销售额均值
month_itemandshop=pd.pivot_table(Traindata,index=['item_id','shop_id','date_block_num'],values=['item_cnt_day'],aggfunc=[np.sum],fill_value=0) .reset_index()
month_itemandshop.columns=['item_id','shop_id','month_id','month_itemandshop']
#
train_data1= pd.pivot_table(Traindata,index=['shop_id','item_id','date_block_num','year','category'], values=['item_cnt_day'], aggfunc=[np.sum],fill_value=0).reset_index()
train_data1.columns=['shop_id','item_id','month_id','year','category','item_cnt_month']
combined_data=pd.merge(train_data1,mean_price,on=['shop_id','category'])
combined_data=pd.merge(combined_data,month_sale,on=['shop_id','month_id'])
combined_data=pd.merge(combined_data,month_category,on=['category','month_id'])
combined_data=pd.merge(combined_data,year_saleonShop,on=['shop_id','year'])
combined_data=pd.merge(combined_data,year_saleonCategory,on=['category','year'])
combined_data=pd.merge(combined_data,month_item,on=['item_id','month_id'])
combined_data=pd.merge(combined_data,month_itemandshop,on=['item_id','shop_id','month_id'])
#print(combined_data.info())
#print(train_data1.info())
'''from scipy.stats import pearsonr
print(pearsonr(combined_data['item_cnt_month'],combined_data['mean']))'''
# 做一个baseline
X=combined_data[['shop_id','item_id','month_id','year','category','mean','saleOn_month','month_category','year_saleonShop','year_saleonCategory','month_item','month_itemandshop']]
y=combined_data['item_cnt_month']
#print(mutual_info_regression(X,y))
'''selector=SelectKBest(score_func=mutual_info_regression,k=5)
selector.fit(X,y)
print(selector.scores_)
print(selector.pvalues_)'''
X_train,X_test,y_train,y_test=train_test_split(X,y,random_state=0,train_size=0.8)
print(",训练数据特征:",X_train.shape,
",测试数据特征:",X_test.shape)
print(",训练数据标签:",y_train.shape,
',测试数据标签:',y_test.shape )
model = xgb.XGBRegressor(max_depth=4, colsample_btree=0.1, learning_rate=0.1, n_estimators=32, min_child_weight=2);
xgb_starttime=time.time()
model.fit(X_train, y_train)
costtime=time.time()-xgb_starttime
pred=model.predict(X_test)
#print(pred)
rms=sqrt(mean_squared_error(y_test,pred))
print(rms)
print('花费时间:',costtime)
'''fpr,tpr=roc_curve(y_test,pred)
print('AUC:',auc(fpr,tpr))
print('花费时间:',costtime)'''
# Test
Testdata=pd.DataFrame(pd.read_csv(r'C:\Users\朝花夕拾\Desktop\机器学习\kaggle\Predict Future Sales\test.csv'))
Testdata['year']=2015
Testdata['month']=34
Testdata=pd.merge(Testdata,items,on=['item_id'])
#print(Testdata.info())
| shanggangli/kaggle-Predict-Future-Sales | Predict-Future-Sales.py | Predict-Future-Sales.py | py | 8,565 | python | en | code | 1 | github-code | 13 |
40210658220 | from src.graphics import run_example
import pygame, sys
from pygame.locals import *
from math import copysign
from typing import List
import random
epsilon = .00000000001
class Entity:
def __init__(self, xi, mass=1, v=0, id=None):
"""
:param x: The initial x position. This variable will not ever be updated over the course of a physics
simulation, because really it is just the initial position.
:param mass:
:param dx:
"""
self.xi = xi
self.mass = mass
self.v = v
self.color = [random.randrange(256) for _ in range(3)]
if id is None:
self.id = random.randrange(1000)
else:
self.id = id
def position(self, t):
return self.v * t + self.xi
def intersect(self, entity):
return intersect(self, entity)
def collide(self, entity):
return collide(self, entity)
def intersect(a: Entity, b: Entity):
if a.v == b.v:
return None
if (b.xi - a.xi) > 0:
# 'a' is to the left
if b.v > a.v:
return None
else:
# 'b' is to the left
if a.v > b.v:
return None
t = (b.xi - a.xi) / (a.v - b.v)
x = a.position(t)
return t, x
def collide(a: Entity, b: Entity):
t, x_curr = intersect(a, b)
# print(a.xi, a.v)
# print(b.xi, b.v)
# print(t, x_curr)
# print(f"---({a.id})({b.id})")
vaf = ((a.mass - b.mass) / (a.mass + b.mass)) * a.v + ((b.mass + b.mass) / (a.mass + b.mass)) * b.v
vbf = ((b.mass - a.mass) / (a.mass + b.mass)) * b.v + ((a.mass + a.mass) / (a.mass + b.mass)) * a.v
a.v = vaf
a.xi = x_curr - vaf * t
b.v = vbf
b.xi = x_curr - vbf * t
return Collision(t, x_curr, a, b)
def intersects(entities: List[Entity]):
last_time = -1
while True:
results = []
for a in entities:
for b in entities:
if a == b:
break
sect = a.intersect(b)
# TODO dont return any sects which have already happened
if sect is not None:
# TODO:
# this time detection is not perfectly accurate, it's more like a heuristic to eliminate most
# redundant collisions
if sect[0] <= last_time + epsilon:
continue
if len(results) == 0:
results.append((sect[0], a, b))
elif results[0][0] == sect[0]:
results.append((sect[0], a, b))
elif results[0][0] > sect[0]:
results = [(sect[0], a, b)]
if len(results) == 0:
return
for result in results:
print(f'yielding {result}')
yield result
last_time = results[0][0]
class Scene:
def __init__(self):
self.entities: List[Entity] = []
def add(self, entity: Entity):
self.entities.append(entity)
def run(self):
screen_size = (256, 256)
title = "physics sim"
bg_color = (0, 0, 0)
pygame.init()
display_surf = pygame.display.set_mode(screen_size)
pygame.display.set_caption(title)
clock = pygame.time.Clock()
collision_count = 0
t = 0
intersect_iterator = intersects(self.entities)
try:
next_sect = next(intersect_iterator)
except StopIteration:
next_sect = None
while True:
t += clock.tick(30) / 1000
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
pygame.quit()
sys.exit()
# calculate next intersects
# handle possible collisions
while next_sect is not None and next_sect[0] <= t:
collision = next_sect[1].collide(next_sect[2])
collision_count += 1
print(f"collision_count={collision_count}")
print(collision)
print(next_sect[1], next_sect[2])
try:
next_sect = next(intersect_iterator)
except StopIteration:
next_sect = None
# for a in self.entities:
# for b in self.entities:
# if a == b:
# break
# sect = a.intersect(b)
# if sect is not None and t > sect[0]:
# print(f'colliding {a.id} and {b.id}')
# a.collide(b)
# clear screen
display_surf.fill(bg_color)
# draw objects to screen
for entity in self.entities:
pygame.draw.rect(display_surf, entity.color, pygame.Rect(entity.position(t), 50, 5, 5))
# update screen
pygame.display.flip()
class Collision:
"""
TODO: impelement this for better abstraction
"""
def __init__(self, t, x, a: Entity, b: Entity):
self.t = t
self.x = x
self.entities = (a, b)
def __str__(self):
return f"t={self.t}, x={self.x}, ids=({self.entities[0].id}/{self.entities[1].id})"
def example_1():
scene = Scene()
a = Entity(50, 1, 30)
b = Entity(150, 1, 0)
scene.add(a)
scene.add(b)
scene.run()
def example_2():
scene = Scene()
for _ in range(10):
mass = random.randint(50, 100)
v = random.randrange(-50, 50)
xi = random.randrange(256)
scene.add(Entity(xi, mass, v))
scene.run()
def example_3():
scene = Scene()
scene.add(Entity(5, 100000000000000, 0, 1))
scene.add(Entity(100, 1, 0, 2))
scene.add(Entity(200, 1000000, -60, 3))
scene.run()
def main():
example_3()
if __name__ == "__main__":
main()
| samsonjj/1d-physics-python | src/main.py | main.py | py | 6,074 | python | en | code | 0 | github-code | 13 |
21473527589 | visitors = int(input())
counter_back = 0
counter_chest = 0
counter_legs = 0
counter_abs = 0
counter_protein_shake = 0
counter_protein_bar = 0
counter_train = 0
counter_buy = 0
for i in range(0, visitors):
acts = input()
if acts == 'Back':
counter_back += 1
counter_train += 1
elif acts == 'Chest':
counter_chest += 1
counter_train += 1
elif acts == 'Legs':
counter_legs += 1
counter_train += 1
elif acts == 'Abs':
counter_abs += 1
counter_train += 1
elif acts == 'Protein shake':
counter_protein_shake += 1
counter_buy += 1
elif acts == 'Protein bar':
counter_protein_bar += 1
counter_buy += 1
print(f'{counter_back} - back')
print(f'{counter_chest} - chest')
print(f'{counter_legs} - legs')
print(f'{counter_abs} - abs')
print(f'{counter_protein_shake} - protein shake')
print(f'{counter_protein_bar} - protein bar')
print(f'{counter_train * 100 / visitors:.2f}% - work out')
print(f'{counter_buy * 100 / visitors:.2f}% - protein') | patsonev/Python_Basics_Exam_Preparation | fitness_center.py | fitness_center.py | py | 1,098 | python | en | code | 0 | github-code | 13 |
10024223163 | from __future__ import print_function
# Import comet_ml in the top
from comet_ml import Experiment
import argparse
import os
import sys
import torch
from torch.optim import Adam
from given_code.classify_svhn import get_data_loader
from q3.vae.models.conv_vae import ConvVAE
from q3.vae.vae_trainer import VAETrainer
from q3.vae.vae_utils import load_config, fix_seed
dir_path = (os.path.abspath(os.path.join(os.path.realpath(__file__), './.')))
sys.path.append(dir_path)
def parse_args():
'''
Parser for the arguments.
Returns
----------
args : obj
The arguments.
'''
parser = argparse.ArgumentParser(description='Train a CNN network')
parser.add_argument('--cfg', type=str,
default=None,
help='''optional config file,
e.g. config/base_config.yml''')
parser.add_argument("--train_dataset_path", type=str,
help='''Train dataset''')
parser.add_argument("--model", default=None, type=str,
help='''If we want to continue training''')
parser.add_argument("--results_dir", type=str,
default='results/',
help='''results_dir will be the absolute
path to a directory where the output of
your training will be saved.''')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
print("pytorch version {}".format(torch.__version__))
# Load the config file
cfg = load_config(args)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("Device used: {}".format(device))
# Make the results reproductible
seed = cfg.SEED
model_dict = None
if args.model is not None:
model_dict = torch.load(args.model, map_location=device)
seed = model_dict["seed"]
fix_seed(seed)
hyper_params = cfg.HYPER_PARAMS.INITIAL_VALUES
train_loader, valid_loader, test_loader = get_data_loader(
dataset_location=args.train_dataset_path,
batch_size=cfg.TRAIN.BATCH_SIZE)
# model = VAE(label=cfg.CONFIG_NAME, image_size=cfg.IMAGE_SIZE,
# channel_num=cfg.MODEL.CHANNEL_NUM,
# kernel_num=cfg.MODEL.KERNEL_NUM,
# z_size=cfg.MODEL.LATENT_SIZE)
model = ConvVAE(
width=cfg.IMAGE_SIZE, height=cfg.IMAGE_SIZE,
num_channels=cfg.MODEL.CHANNEL_NUM,
hidden_size=500,
z_dim=cfg.MODEL.LATENT_SIZE, num_filters=cfg.MODEL.KERNEL_NUM
)
optimizer = Adam(model.parameters(), lr=hyper_params["LR"])
trainer = VAETrainer(model=model, optimizer=optimizer, cfg=cfg,
train_loader=train_loader, valid_loader=valid_loader,
device=device, output_dir=cfg.OUTPUT_DIR,
hyper_params=hyper_params,
max_patience=cfg.TRAIN.MAX_PATIENCE)
trainer.fit(hyper_params)
| Lap1n/ift6135 | tp3/src/q3/vae/run_train_vae.py | run_train_vae.py | py | 3,022 | python | en | code | 0 | github-code | 13 |
12322642741 | # http://demo.spiderpy.cn/get/ 代理接口
import requests
"""
代理形式
proxies = {
"http": "http://10.10.1.10:3128",
"https": "http://10.10.1.10:1080",
}
"""
def get_proxy():
"""获取代理函数"""
json_data = requests.get(url='http://demo.spiderpy.cn/get/').json()
# print(json_data)
proxy = json_data['proxy']
# print(proxy)
proxies = {
"http": "http://" + proxy,
"https": "http://" + proxy,
}
# print(proxies)
return proxies
get_proxy()
url = 'https://www.baidu.com/'
# proxies关键字 ————》使用代理发送请求
# 如果代理不可用就会报错, requests.exceptions.ProxyError
response = requests.get(url=url, proxies=get_proxy())
print(response.text)
'''
每一次的request请求都属于一个连接(有些时候会有超出最大连接数的提示,浏览器有些时候会针对一个ip限制连接次数)
''' | lll13508510371/Scrapping | 03 requests入门/code/09 proxy关键字参数.py | 09 proxy关键字参数.py | py | 956 | python | zh | code | 0 | github-code | 13 |
20224937382 | #import sqlanydb
import os
import pyodbc
from clases.cls_Bdd import BaseDD
os.environ["SQLANY_API_DLL"]='/opt/sqlanywhere17/lib64/libdbcapi_r.so'
servidor='193.168.1.175:5000'
usuario='sa'
clave='Emilita01'
db='master'
puerto=5000
drver='SYBASE'
print("Hola ")
Conn = BaseDD(servidor,usuario,clave,db,puerto,drver)
print(Conn.ServidorDB)
a=pyodbc.drivers()
print(a)
print(Conn)
conn = pyodbc.connect(driver=drver, server=servidor , database=db ,uid=usuario , pwd=clave)
print("Aqui")
b=conn.conectar()
print(b)
print("Termino") | wbarrazaj/Monitor-Sybase-Python | Prueba.py | Prueba.py | py | 541 | python | es | code | 0 | github-code | 13 |
28594164910 | # -*- coding: utf-8 -*-
"""
Updated 16 Dec 2017
10 sheep eat away at their environments
Greedy sheep are sick after 100 units
@author: amandaf
"""
import matplotlib.pyplot
import matplotlib.animation
import csv
import agentframework
import random
#setup variables
num_of_agents = 10
num_of_iterations = 100
agents = []
#Set to 7 x 6 in order to prevent Qwindowswindows::unable to set geometry
fig = matplotlib.pyplot.figure(figsize=(7, 6))
ax = fig.add_axes([0, 0, 1, 1])
#Empty environmental list
environment = []
#Read the file
f = open('in.txt', newline='')
reader = csv.reader(f, quoting=csv.QUOTE_NONNUMERIC)
#Loop through file
for row in reader: # A list of rows
#create new rowlist
rowlist = []
for value in row: # A list of value
rowlist.append(value)
#Add rowlist to environment
environment.append(rowlist)
f.close()
#Calculate size of environment
maxEnv = len(environment)
# Make the agents.
for i in range(num_of_agents):
agents.append(agentframework.Agent(environment, agents, maxEnv))
# Move the agents.
def update(frame_number):
fig.clear()
#setup figure limits so it stops resizing
matplotlib.pyplot.xlim(0, maxEnv-1)
matplotlib.pyplot.ylim(0, maxEnv-1)
matplotlib.pyplot.imshow(environment)
matplotlib.pyplot.title("Iteration:" + str(frame_number) + "/" + str(num_of_iterations))
#randomise order
random.shuffle(agents)
for j in range(num_of_iterations):
#print(agents[0].x,agents[0].y)
for i in range(num_of_agents):
agents[i].move()
#Agent eats values
agents[i].eat()
#print("Eating")
if agents[i].store > 100:
#Greedy agents are sick if they eat more than 100 units
agents[i].sick()
#print ("Being sick")
for i in range(num_of_agents):
matplotlib.pyplot.scatter(agents[i].x,agents[i].y)
#Display animation
animation = matplotlib.animation.FuncAnimation(fig, update, interval=1, repeat = False, frames=num_of_iterations)
matplotlib.pyplot.show()
#Write out environment to file
f2 = open('environment.txt','w', newline='')
writer = csv.writer(f2)
for row in environment:
writer.writerow(row)
f2.close()
#Write store count to file
f2 = open('store.txt','a')
for i in range(num_of_agents):
print (agents[i].store)
f2.write(str(agents[i].store)+"\n")
f2.close() | gisworld/ABM | src/unpackaged/abm/practicals/Animation/Sheep/model.py | model.py | py | 2,411 | python | en | code | 0 | github-code | 13 |
32307981415 | def longest_streak(head):
if head is None:
return 0
current = head
current_val = head.val
count = 0
lst = []
while current:
if current.val == current_val:
count += 1
current = current.next
else:
lst.append(count)
current_val = current.val
count = 1
current = current.next
lst.append(count)
return max(lst)
#runtime O(n)
#space O(n) worst case
#Better space complexity using kadanes algo
def longest_streak(head):
max_streak = 0
current_streak = 0
prev_node = None
current_node = head
while current_node:
#check and update our current streak
if current_node.val == prev_node:
current_streak +=1
else:
current_streak = 1
# update our prev node
prev_node = current_node
#check and update our max streak
if current_streak > max_streak:
max_streak = current_streak
current_node = current_node.next
return max_streak
#O(n) runtime
#O(1) space
| kabszac/dsandalgo | linkedlist/longstreak.py | longstreak.py | py | 1,041 | python | en | code | 0 | github-code | 13 |
74937180176 | import numpy as np
import random
import re
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn import datasets
import random
import time
def time_me(fn):
def _wrapper(*args, **kwargs):
start = time.clock()
fn(*args, **kwargs)
print("%s cost %s second" % (fn.__name__, time.clock() - start))
return _wrapper
def get_data():
lris_df = datasets.load_iris()
#挑选出前两个维度作为x轴和y轴,你也可以选择其他维度
x_axis = lris_df.data[:,0]
y_axis = lris_df.data[:,2]
#数据集预处理,以花萼面积为横坐标,以花瓣面积做纵坐标
hua_e = lris_df.data[:,0]
hua_ban = lris_df.data[:,2]
np.savetxt(r'data.csv', np.column_stack((hua_e,hua_ban)))
def calcuDistance(vec1, vec2):
# 计算向量1与向量2之间的欧式距离
return np.sqrt(np.sum(np.square(vec1 - vec2))) #注意这里的减号
def loadDataSet():
dataSet = np.loadtxt("data.csv")
return dataSet
def initCentroids(dataSet, k):
# 从数据集中随机选取k个数据返回
dataSet = list(dataSet)
return random.sample(dataSet, k)
def minDistance(dataSet, centroidList):
# 对每个属于dataSet的item, 计算item与centroidList中k个质心的距离,找出距离最小的,并将item加入相应的簇类中
clusterDict = dict() #dict保存簇类结果
k = len(centroidList)
for item in dataSet:
vec1 = item
flag = -1
minDis = float("inf") # 初始化为最大值
for i in range(k):
vec2 = centroidList[i]
distance = calcuDistance(vec1, vec2) # error
if distance < minDis:
minDis = distance
flag = i # 循环结束时, flag保存与当前item最近的蔟标记
if flag not in clusterDict.keys():
clusterDict.setdefault(flag, [])
clusterDict[flag].append(item) #加入相应的类别中
return clusterDict #不同的类别
def getCentroids(clusterDict):
#重新计算k个质心
centroidList = []
for key in clusterDict.keys():
centroid = np.mean(clusterDict[key], axis=0) #沿指定轴计算算术平均值
centroidList.append(centroid)
return centroidList #得到新的质心
def getVar(centroidList, clusterDict):
# 计算各蔟集合间的均方误差
# 将蔟类中各个向量与质心的距离累加求和
sum = 0.0
for key in clusterDict.keys():
vec1 = centroidList[key]
distance = 0.0
for item in clusterDict[key]:
vec2 = item
distance += calcuDistance(vec1, vec2)
sum += distance
return sum
def showCluster(centroidList, clusterDict):
# 展示聚类结果
colorMark = ['or', 'ob', 'og', 'ok', 'oy', 'ow'] #不同簇类标记,o表示圆形,另一个表示颜色
centroidMark = ['dr', 'db', 'dg', 'dk', 'dy', 'dw']
for key in clusterDict.keys():
plt.plot(centroidList[key][0], centroidList[key][1], centroidMark[key], markersize=12) #质心点
for item in clusterDict[key]:
plt.plot(item[0], item[1], colorMark[key])
plt.savefig('K-means.jpg')
plt.show()
def test_k_means():
dataSet = loadDataSet()
centroidList = initCentroids(dataSet, 3) #随机选择3组数据
#print(centroidList)
clusterDict = minDistance(dataSet, centroidList)
# # getCentroids(clusterDict)
# showCluster(centroidList, clusterDict)
newVar = getVar(centroidList, clusterDict)
oldVar = 1 # 当两次聚类的误差小于某个值是,说明质心基本确定。
times = 2
while abs(newVar - oldVar) >= 0.00001:
centroidList = getCentroids(clusterDict)
clusterDict = minDistance(dataSet, centroidList)
oldVar = newVar
newVar = getVar(centroidList, clusterDict)
times += 1
showCluster(centroidList, clusterDict)
if __name__ == '__main__':
#get_data()
test_k_means()
#print(get_data())
#散点图
lris_df = datasets.load_iris()
#挑选出前两个维度作为x轴和y轴,你也可以选择其他维度
x_axis = lris_df.data[:,0]
y_axis = lris_df.data[:,2]
#c指定点的颜色,当c赋值为数值时,会根据值的不同自动着色
plt.scatter(x_axis, y_axis, c=lris_df.target)
plt.savefig('数据集.jpg')
plt.show() | YOUNGBChen/MachineLearningCourse | Secomd/K-means.py | K-means.py | py | 4,409 | python | en | code | 3 | github-code | 13 |
24899288514 | from .redditManager import get_instance
from flask import (Blueprint, render_template, request)
import markdown
import urllib
import json
from html_diff import diff as df
from urllib.parse import quote
diff = Blueprint('diff', __name__)
class Submission:
def __init__(self, author, id, selftext, title, url, subreddit, created_utc):
self.author = author
self.id = id
self.selftext = selftext
self.html = markdown.markdown(selftext)
self.title = title
self.subreddit = subreddit
self.created_utc = created_utc
self.url = url
@diff.route('/postdiff/<post_id>', methods=('GET', 'POST'))
def displaySettings(post_id):
r = get_instance()
with urllib.request.urlopen(f"https://api.pushshift.io/reddit/search/submission/?ids={post_id}") as url:
data = json.loads(url.read().decode())
pushshift = data['data'][0]
pushshift = Submission(
author=pushshift['author'],
id=pushshift['id'],
selftext=pushshift['selftext'],
title=pushshift['title'],
url=pushshift['url'],
subreddit=pushshift['subreddit'],
created_utc=pushshift['created_utc']
)
reddit = r.submission(id=post_id)
live_html = markdown.markdown(reddit.selftext)
diff_html = df(pushshift.html, live_html)
setattr(reddit, "diff", diff_html)
setattr(reddit, "pushshift", pushshift.html)
setattr(reddit, "current", live_html)
base = request.base_url
base = base.rsplit("/",1)[0] + "/"
base = quote(base, safe='')
return render_template('postdiff.jinja2', post=reddit, url=base)
| adhesivecheese/modpanel | project/diff.py | diff.py | py | 1,487 | python | en | code | 1 | github-code | 13 |
69846570258 | # -*- coding: utf-8 -*-
"""
Created on Sun Mar 22 11:20:38 2020
@author: christopher_sampah
"""
import pandas as pd
import numpy as np
import seaborn as sbn
import matplotlib.pyplot as plt
from scipy.stats import norm
from scipy import stats
import sklearn
from sklearn import linear_model as lm
from sklearn.model_selection import cross_validate as cv
def trim(df,std_dev=1, scale = False, return_df = False):
'''
From intro stats, check the 68-95-99.7 rule with respect to 1-sd, 2-sd,
3-sd.
'''
mu = df.SalePrice.mean()
sigma = df.SalePrice.std()
if scale:
scale = 100000
else:
scale = 1
trimmed_df = df.loc[(abs(df.SalePrice) <= (mu + std_dev*sigma))]
data = trimmed_df.SalePrice
if return_df:
return trimmed_df
else:
label = str(std_dev) + "-Sigma, " + str(round(100*data.shape[0]/df.SalePrice.shape[0],2)) + "% of dataset"
sbn.distplot(data/scale, kde_kws = {"label": label})
def abs_corr(df, drop_cols = [], min_val = .6, max_val = 1, plot_me = True, plot_x = 12, plot_y = 9):
if len(drop_cols) > 0:
drop_cols = list(set(drop_cols))
df = df[[i for i in df.columns if i not in drop_cols]]
abs_mtx = df.drop(columns = 'SalePrice').corr().abs()
mtx = abs_mtx[(abs_mtx < max_val) & (abs_mtx >= min_val)].dropna(1, 'all').dropna(0, 'all')
if plot_me:
plt.subplots(figsize=(plot_x, plot_y))
sbn.heatmap(mtx, vmax=.8, square=True)
plt.show()
else:
return mtx
def check_skew_kurtosis(df, feature = 'SalePrice', pics_only = False,):
'''
Overlay a normal pdf onto the plot of the feature of interest to visually
observe its deviation from normality
'''
y = df[feature]
sbn.distplot(y, fit=norm)
plt.figure()
stats.probplot(y, plot=plt)
plt.show()
print('The kurtosis: ' + str(stats.kurtosis(y)))
print('The skew: ' + str(stats.skew(y)))
def score_options():
'''
Return a list of possible scorers for a regression model
'''
from sklearn.metrics import SCORERS
score_types = sorted(SCORERS.keys())
print('Possible scores to choose from: ')
for s in score_types:
print(s)
def build_and_eval(X,y, extra = None, scorer = 'r2',get_max = True,
return_models = False, return_optimal = False,
score_options = False, omni_seed = 8):
'''
Taking a (normalized) X and its corresponding y, the function builds a
multiple-regression model before attempting to regularize with ridge and
lasso. The function returns a dictionary of the models, specified by regu-
larizer (i.e. 'lasso', 'ridge', or 'normal'[no regularization performed]),
with the option to return only the best-performing model of each regulari-
zation type
'''
if score_options: score_options()
model_holder = {'Normal':[] ,'Ridge':[], 'Lasso':[]}
baseline = cv(lm.LinearRegression(fit_intercept = True), X, y, cv = 20,
scoring = scorer, return_estimator = True)
model_holder['Normal'] = baseline['estimator']
if get_max:
precurser = 'Largest ' + scorer + ': '
else:
precurser = 'Smallest ' + scorer + ': '
if extra is None:
print('Multiple Regression:')
else:
print('Multiple Regression ' + extra + ':')
print(precurser + str(baseline['test_score'].max()) + '\n')
# regularize
reg_vals = {'penalty':list(range(1,21)), 'Ridge':list(), 'Lasso':list() }
for penalty in reg_vals['penalty']:
ridger = cv(lm.Ridge(alpha = penalty, random_state = omni_seed), X, y, scoring = scorer,
cv = 10, return_estimator = True)
lasso = cv(lm.Lasso(alpha = penalty, max_iter = 50000, random_state = omni_seed), X, y, scoring = scorer,
cv = 10, return_estimator = True)
#obtain the min/max score and the corresponding model
s,c = get_score_and_model(ridger['test_score'],ridger['estimator'], get_max = get_max)
reg_vals['Ridge'].append(round(s,3))
model_holder['Ridge'].append(c)
s,c = get_score_and_model(lasso['test_score'], lasso['estimator'], get_max = get_max)
reg_vals['Lasso'].append(round(s,3))
model_holder['Lasso'].append(c)
best_alpha = {'Ridge':0, 'Lasso':0} # use to obtain the best models based on scoring
for val in ['Ridge', 'Lasso']:
v = min(reg_vals[val])
print(val + ' Regression:')
best_alpha[val] = reg_vals['penalty'][reg_vals[val].index(v)]
print(precurser + str(v) + ' for corresponding alpha = ' +
str(best_alpha[val]) + '\n')
if return_optimal:
return_models = True
for val in ['Ridge', 'Lasso']:
model_holder[val] = [m for m in model_holder[val] if m.alpha == best_alpha[val]]
if return_models:
return model_holder
def get_score_and_model(list_of_scores, list_of_models, get_max = True):
'''
Given a list of test scores and the corresponding list of models,
obtain the min/max score and its corresponding model
'''
if get_max:
score_val = max(list_of_scores)
else:
score_val = min(list_of_scores)
index_of_score = np.where(list_of_scores == score_val)[0][0] #[0][0] to get the value from the tuple
corresponding_model = list_of_models[index_of_score]
return score_val, corresponding_model
def absolute_diff(model,X,df, preamble = None):
'''
Predicts SalePrice feature and plots residuals and absolute residuals
against true SalePrice values
'''
if type(model) is list:
model = model[0]
y_eval = pd.Series(model.predict(X)).reindex(X.index)
y_eval.name = 'pred_SalePrice'
x2 = df.merge(y_eval, left_index = True, right_index = True)
x2['resid'] = x2.SalePrice - x2.pred_SalePrice
x2['abs_resid'] = abs(x2.resid)
addendum = ''
if preamble is not None:
addendum = ': ' + preamble
plt.subplots(figsize = (12,6))
plt.subplot(1,2,1)
sbn.regplot('SalePrice','resid', data = x2).set_title('Residual Plot' + addendum)
plt.ylabel('')
plt.subplot(1,2,2)
sbn.regplot('SalePrice','abs_resid', data = x2).set_title('Absolute Residuals'+ addendum)
plt.ylabel('')
plt.show()
if __name__ == "__main__":
'Initial outlier detection:'
df = pd.read_csv('data/train.csv').set_index('Id')
df_test = pd.concat([pd.read_csv('data/test.csv').set_index('Id'),
pd.read_csv('data/sample_submission.csv').set_index('Id')],1)
df = pd.concat([df, df_test])
print(df.SalePrice.describe())
# check the 68-95-99.7 rule, alter the df accordingly
sbn.distplot(df.SalePrice/100000, kde_kws={"label": "Original data"})
trim(df,1,True)
trim(df,2,True)
trim(df,3,True)
plt.title('Scaled Standard Deviations')
plt.show()
'Notice the large peak. Check skew and kurtosis'
check_skew_kurtosis(df)
df['log_SalePrice'] = np.log(df['SalePrice'])
check_skew_kurtosis(df,'log_SalePrice')
# from the article (https://www.spcforexcel.com/knowledge/basic-statistics/are-skewness-and-kurtosis-useful-statistics)
# skewness and kurtosis arent that useful, so may try regression with both logged and unlogged SalePrice
# trim the data based on the graphics, come back and attempt regression without
# removing outlier but log SalePrice
df = trim(df,2,return_df = True)
'Feature selection for modelling: continuous vars, date-vars, and counts only'
print(df.select_dtypes(exclude = ['object']).head())
for i in df.select_dtypes(exclude = ['object']).columns:
print('The feature: ' + i)
print(df[i].value_counts(dropna = False))
print('\n')
drop_cols = list(df.select_dtypes(exclude = ['number']).columns)
[drop_cols.append(col) for col in ['MSSubClass','OverallQual','OverallCond', 'MiscVal']]
'quick cleaning:'
df['LotFrontage'].replace(np.nan,0, inplace = True)
df['MasVnrArea'].replace(np.nan,0, inplace = True)
abs_corr(df.drop(columns = 'log_SalePrice'), drop_cols)
'''
1st floor sq feet and basement sq feet highly correlated. Sometimes the 1st floor can be the square feet,
and not all houses have a basement t.f. drop total bsmt sq feet.
Also, garage cars highly correlates with garage area, and is less informative.
Year built correlates strongly with garage's year built, and is more informative.
Total rooms above ground highly correlates with general living area, and is less informative, and seems
to be captured by living area as well as bedrooms above ground
'''
[drop_cols.append(i) for i in ['GarageYrBlt', 'GarageCars','TotalBsmtSF', 'TotRmsAbvGrd']]
abs_corr(df.drop(columns = 'log_SalePrice'), drop_cols)
"""
The remaining correlations arent as strong, but we note 2nd floor square feet and general living
area. I'll leave for now since I find them both to be informative for different reasons, but
may drop one of them depending on model performance
"""
df.drop(columns = drop_cols, inplace = True)
'Feature engineering and final data prep:'
# consolidate date vars
from operator import attrgetter
df.rename(columns = {'YrSold': 'year','MoSold':'month'}, inplace = True)
df['day'] = 1
df['date_sold'] = pd.to_datetime(df[['year', 'month','day']])
df['min_sell_date'] = df.date_sold.min()
min_sell_date = df['min_sell_date'].iloc[0]
df['months_since_sold'] = (df.date_sold.dt.to_period('M') - df.min_sell_date.dt.to_period('M')).apply(attrgetter('n'))
[df.drop(columns = date_col, inplace = True) for date_col in ['year', 'month',
'day', 'date_sold', 'min_sell_date']]
df['months_since_sold'].hist() # a quick histogram
plt.show()
df.corrwith(df.SalePrice).sort_values()
# fill missing vals
df.isnull().sum()
for feature in ['BsmtFullBath','BsmtHalfBath','BsmtFinSF1','BsmtFinSF2','BsmtUnfSF','GarageArea']:
print('Row of missing values for feature ' + feature+ ':')
print(df.loc[df[feature].isna()])
print('\n')
'''
Looking at the data, the record at indices 2121 and 2189 are missing values for all basement-related features,
so I presume this means there doesnt exist a basement. Impute accordingly
'''
df.loc[df.index.isin([2121, 2189]),['BsmtFullBath', 'BsmtHalfBath']] = 0
df.loc[df.index == 2121, ['BsmtFinSF1','BsmtFinSF2','BsmtUnfSF']] = 0
df.loc[df['GarageArea'].isna(),['GarageArea']] = 0
df.isnull().sum()
'normalize'
df = df.sample(frac = 1) #shuffle in case the data came in an ordered manner
X = df.drop(columns = ['SalePrice','log_SalePrice'])
#drop time columns that dont need normalization, attach them after normalizing
X.drop(inplace = True, columns = ['YearBuilt','YearRemodAdd','months_since_sold'])
X = (X - X.mean())/X.std()
X = X.merge(df[['YearBuilt','YearRemodAdd','months_since_sold']], left_index = True, right_index = True)
y = df['SalePrice']
'Modelling:'
model_dict = build_and_eval(X,y, scorer = 'neg_mean_squared_error',
return_optimal = True)
#plot the result of one of those models against the dependent variable
absolute_diff(model_dict['Lasso'],X,df,'Lasso')
absolute_diff(model_dict['Ridge'],X,df,'Ridge')
absolute_diff(model_dict['Normal'][0],X,df,'Normal')
| ChrisMattSam/cuanto_cuesta_casa | data_analysis.py | data_analysis.py | py | 11,634 | python | en | code | 0 | github-code | 13 |
6917501917 | from django.shortcuts import render, redirect
from django.http import HttpResponse, HttpResponseRedirect, JsonResponse
from .models import *
import random
from json import dumps
from django.views.decorators.cache import cache_control
from django.db import connection
def test(request):
with connection.cursor() as cursor:
with open('test.sql') as f:
lines = f.readlines()
query = cursor.execute(lines[0])
result = cursor.fetchall()
# movie = Movie.objects.raw('''SELECT * FROM "Movie" ''')[0]
# print(movie)
num_fields = len(cursor.description)
field_names = [i[0] for i in cursor.description]
output = field_names
for r in result:
output+= r
return HttpResponse(output)
def home(response):
return render(response, 'main/home.html', {})
def instruction(response):
return render(response, 'main/instruction.html', {})
def assessment(request):
movies = random.randint(1,55)
movie = Movie.objects.filter(id=movies)
movie_palette = list(Movie.objects.filter(id=1).values_list('top10_palette','kmeans_palette','random_palette'))
palette =dumps({'top10_palette':movie_palette[0][0], 'kmeans_palette': movie_palette[0][1], 'random_palette':movie_palette[0][2] })
frames_l = list(Frame.objects.filter(movieid=1).values_list('image').order_by('?')[:10])
frames={}
i=1
for f in frames_l:
frames[i]=f[0]
i+=1
frames = dumps(frames)
frame_l = list(Frame.objects.filter(movieid=1).values_list('id','image','top10_palette','kmeans_palette','random_palette').order_by('?')[:1])
frame=dumps({'id':frame_l[0][0],'image':frame_l[0][1], 'top10_palette':frame_l[0][2], 'kmeans_palette': frame_l[0][3], 'random_palette':frame_l[0][4] })
context={
'movie_id':movies,
'movie': movie,
'frame' : frame,
'frames' : frames,
'palette': palette
}
if request.method == 'GET':
return render(request, 'main/assessment.html', context)
# elif request.method == 'POST':
# single = request.POST.get("single")
# multiple = request.POST.get("multiple")
# if 'top10' in single:
# single = 'top10'
# Participate.objects.filter(id=1).update(movieid=movies, single_frameid = frame, single_choise = single, multi_choice=multiple)
# return redirect('thankYou')
def thankYou(response):
return render(response, 'main/thankYou.html', {})
def assessment_api(request):
if request.method == 'POST':
single = request.POST.get("single")
multiple = request.POST.get("multiple")
frame_id = request.POST.get("frame_id")
movie_id = request.POST.get("movie_id")
if 'top10' in single:
single = 'top10'
elif 'random' in single:
single = 'random'
else:
single = 'kmeans'
if 'top10' in multiple:
multiple = 'top10'
elif 'random' in multiple:
multiple = 'random'
else:
multiple = 'kmeans'
id = Participant.objects.latest('id')
Participant.objects.update(movieid=movie_id, single_frameid = frame_id, single_choise = single, multi_choice=multiple)
return redirect('thankYou')
| syuan2000/colorWebTool | main/views.py | views.py | py | 3,305 | python | en | code | 0 | github-code | 13 |
30238715243 | ''' Reverso do número. Faça uma função que retorne o reverso de um número inteiro
informado.
'''
numeroDigitado = (input('Digite um número: '))
def reverso(numero):
inverte = str(numero)
print(inverte[::-1])
reverso(numeroDigitado)
| nataliakdiniz/estrutura_dados_uniesp | primeira_unidade/reversoNumero.py | reversoNumero.py | py | 251 | python | pt | code | 0 | github-code | 13 |
6609558679 | from pyspark import SparkContext, SparkConf
from pyspark.sql import SparkSession, HiveContext, Row
from pyspark.sql.types import *
from pyspark.sql.functions import col
import pyspark.sql.functions as F
import time
from pyspark.sql.functions import lit
spark = (SparkSession
.builder
.appName('matching')
.enableHiveSupport()
.getOrCreate())
SparkContext.setSystemProperty("hive.metastore.uris", "http://192.168.58.24:8888")
spark.conf.set("spark.sql.crossJoin.enabled", "true")
master = spark.sql('SELECT * FROM dwhdb.master_matching')
#master.limit(10).toPandas()
delta = spark.sql('SELECT * FROM dwhdb.delta_matching')
#delta.limit(10).toPandas()
master = (master
.withColumn("clean_id",
F.regexp_replace(F.trim(F.lower(F.regexp_replace('nomor_identitas', "[^a-zA-Z0-9\\s]", ""))), " +", " "))
.withColumn("clean_nama",
F.regexp_replace(F.trim(F.lower(F.regexp_replace('nama_sesuai_identitas', "[^a-zA-Z0-9\\s]", ""))), " +", " "))
.withColumn("clean_tgl_lahir",
F.regexp_replace(F.trim(F.lower(F.regexp_replace('tanggal_lahir', "[^a-zA-Z0-9\\s]", ""))), " +", " "))
.withColumn("clean_nama_ibu",
F.regexp_replace(F.trim(F.lower(F.regexp_replace('nama_gadis_ibu_kandung', "[^a-zA-Z0-9\\s]", ""))), " +", " "))
.withColumn("clean_jenis_kelamin",
F.regexp_replace(F.trim(F.lower(F.regexp_replace('jenis_kelamin', "[^a-zA-Z0-9\\s]", ""))), " +", " "))
.withColumn("clean_tempat_lahir",
F.regexp_replace(F.trim(F.lower(F.regexp_replace('tempat_lahir', "[^a-zA-Z0-9\\s]", ""))), " +", " "))
)
#master.limit(10).toPandas()
master.registerTempTable("master")
prep_master = spark.sql("""
SELECT
master.matching_id
, master.clean_id
, master.clean_nama
, master.clean_tgl_lahir
, master.clean_nama_ibu
, master.clean_jenis_kelamin
, master.clean_tempat_lahir
, master.kode_pos
FROM master
""")
prep_master.repartition("clean_tempat_lahir").write.format("parquet").partitionBy("clean_tempat_lahir").mode("overwrite").saveAsTable("dwhdb.fm_prep_master")
delta = (delta
.withColumn("clean_id",
F.regexp_replace(F.trim(F.lower(F.regexp_replace('nomor_identitas', "[^a-zA-Z0-9\\s]", ""))), " +", " "))
.withColumn("clean_nama",
F.regexp_replace(F.trim(F.lower(F.regexp_replace('nama_sesuai_identitas', "[^a-zA-Z0-9\\s]", ""))), " +", " "))
.withColumn("clean_tgl_lahir",
F.regexp_replace(F.trim(F.lower(F.regexp_replace('tanggal_lahir', "[^a-zA-Z0-9\\s]", ""))), " +", " "))
.withColumn("clean_nama_ibu",
F.regexp_replace(F.trim(F.lower(F.regexp_replace('nama_gadis_ibu_kandung', "[^a-zA-Z0-9\\s]", ""))), " +", " "))
.withColumn("clean_jenis_kelamin",
F.regexp_replace(F.trim(F.lower(F.regexp_replace('jenis_kelamin', "[^a-zA-Z0-9\\s]", ""))), " +", " "))
.withColumn("clean_tempat_lahir",
F.regexp_replace(F.trim(F.lower(F.regexp_replace('tempat_lahir', "[^a-zA-Z0-9\\s]", ""))), " +", " "))
)
#delta.limit(10).toPandas()
delta.registerTempTable("delta")
prep_delta = spark.sql("""
SELECT
delta.clean_id
, delta.clean_nama
, delta.clean_tgl_lahir
, delta.clean_nama_ibu
, delta.clean_jenis_kelamin
, delta.clean_tempat_lahir
, delta.kode_pos
FROM delta
""")
prep_delta.repartition("clean_tempat_lahir").write.format("parquet").partitionBy("clean_tempat_lahir").mode("overwrite").saveAsTable("dwhdb.fm_prep_delta")
| muharandy/Identity-Matching | matching_prep.py | matching_prep.py | py | 3,751 | python | en | code | 0 | github-code | 13 |
3199765789 | from unicodedata import name
from django.urls import path
from Kitab import views
from django.contrib.auth import views as auth_views
from django.contrib import admin
from django.contrib import admin
from django.contrib.auth.views import LoginView
urlpatterns = [
path('', views.home, name='home'),
path('aboutus', views.aboutus, name='aboutus'),
path("login/",views.login,name="login"),
path("register/", views.register,name="register"),
path('contact', views.contact,name='contact'),
path("adminlogin/",views.adminlogin,name="adminlogin"),
path('dashboard', views.dashboard,name='dashboard'),
path('logout', views.logout, name="logout"),
# Most important functon
path('afterlogin', views.afterlogin_view, name="afterlogin"),
path('admindashboard',views.admindashboard_view, name="admindashboard"),
path('addData', views.Kitab,name='addData'),
path('buyNow/<int:p_id>', views.buyNow,name='buyNow'),
path('book_details/<int:p_id>', views.book_details,name='book_details'),
path('admin-products', views.admin_products_view,name='admin-products'),
path('addproduct', views.admin_add_product_view, name='addproduct'),
path('viewbooking', views.viewbooking_view,name='viewbooking'),
path('viewcustomer', views.viewcustomer, name='viewcustomer'),
path('blog1', views.blog_detail, name='blog1'),
path('blog2', views.blog_detail2, name='blog2'),
path('blog3', views.blog_detail3, name='blog3'),
path('blog4', views.blog_detail4, name='blog4'),
path('profile/', views.profile, name='profile'),
path('edit-profile/', views.edit_profile_view,name='edit-profile'),
path('delete-product/<slug:p_id>',views.deleteproduct,name='delete-product'),
path('Bdelete-product/<slug:p_id>',views.Bdeleteproduct,name='Bdelete-product'),
path('update_product/<slug:id>',views.updateproduct),
path('editproduct/<int:id>',views.editproduct),
path("password_reset/", views.password_reset_request, name="password_reset"),
]
| Samitalimbu/OnlineMusicalInstruments | Kitab/urls.py | urls.py | py | 2,090 | python | en | code | 0 | github-code | 13 |
1189690192 |
from __future__ import unicode_literals
from wxpy import *
from wechat_sender import listen
bot = Bot()
my = bot.friends()
'''
my1 = bot.friends().search('吴震')[0]
my2 = bot.friends().search('吴明')[0]
my3 = bot.friends().search('朱依心')[0]
'''
@bot.register(Friend)
def reply_test(msg):
msg.reply('欢迎关注,更多内容请关注公众号--SQuant')
@bot.register(msg_types=FRIENDS)
def auto_accept_friends(msg):
if 'sigma' in msg.text.lower():
new_friend = bot.accept_friend(msg.card)
new_friend.send('sigma小助手为您服务')
listen(bot,receivers=my,port=10111)
| WUZHEN1991/sigma_dati | wechat.py | wechat.py | py | 611 | python | en | code | 0 | github-code | 13 |
34573015212 | population = [int(x) for x in input().split(", ")]
min_wealth = int(input())
while True:
count = len(population)
# първо проверяваме дали е възможно разпределянето на богатството
if sum(population) < min_wealth * count:
print("No equal distribution possible")
break
if all(i >= min_wealth for i in population):
print(population)
break
# сега започваме да разпределяме богатството като започнем да даваме от най-голямото число на най-малкото
max_num = max(population)
min_num = min(population)
index_of_max = population.index(max_num)
index_of_min = population.index(min_num)
if min_num < min_wealth:
if max_num > min_wealth:
needed_wealth_to_give = min_wealth - min_num
population.remove(max_num)
population.remove(min_num)
max_num -= needed_wealth_to_give
min_num += needed_wealth_to_give
population.insert(index_of_max, max_num)
population.insert(index_of_min, min_num)
| TinaZhelyazova/02.-Python-Fundamentals | 18. Lists Advanced - More Exercises/01. Social Distribution.py | 01. Social Distribution.py | py | 1,176 | python | bg | code | 0 | github-code | 13 |
16156277413 | import re
# doc='''Dave Martin
# 615-555-7164
# 173 Main St., Springfield RI 55924
# davemartin@bogusemail.com
# Charles Harris
# 800-555-5669
# 969 High St., Atlantis VA 34075
# charlesharris@bogusemail.com
# Eric Williams
# 560-555-5153
# 806 1st St., Faketown AK 86847
# laurawilliams@bogusemail.com
# Corey Jefferson
# 900-555-9340
# 826 Elm St., Epicburg NE 10671
# coreyjefferson@bogusemail.com'''
f=open("data.txt",'r')
text=f.read()
a=re.compile(r'[\w\.]+@[\w\.]+')
match=a.finditer(text)
for i in match:
print(i)
b=re.sub("bogusemail","gmail" ,text)
for i in b:
print(i,end="") | saisrihari/Programs | python_class/last/rexp.py | rexp.py | py | 591 | python | en | code | 0 | github-code | 13 |
74772215378 | import os
import pickle
import time
import gym
import gym_grid
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.distributions.categorical import Categorical
from torch.utils.tensorboard import SummaryWriter
from sympy.utilities.iterables import multiset_permutations
import sys
# select the device to train on
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# class to save the agent's experience buffer
class Memory_buffer():
def __init__(self):
self.actions = []
self.states = []
self.logprobs = []
self.rewards = []
self.is_terminals = []
def clear(self):
self.actions = []
self.states = []
self.logprobs = []
self.rewards = []
self.is_terminals = []
# create the NN architecture for the PPO agent
class Agent(nn.Module):
def __init__(self, state_dim, action_dim):
super(Agent, self).__init__()
self.critic = nn.Sequential(
nn.Linear(state_dim, 64),
nn.Tanh(),
nn.Linear(64, 64),
nn.Tanh(),
nn.Linear(64, 1)
)
self.actor = nn.Sequential(
nn.Linear(state_dim, 64),
nn.Tanh(),
nn.Linear(64, 64),
nn.Tanh(),
nn.Linear(64, action_dim),
nn.Softmax(dim=-1)
)
def get_action(self, state):
action_probs = self.actor(state)
dist = Categorical(action_probs)
action = dist.sample()
action_logprob = dist.log_prob(action)
return action.detach().cpu(), action_logprob.detach().cpu(), action_probs.detach().cpu()
def evaluate(self, state, action):
action_probs = self.actor(state)
dist = Categorical(action_probs)
action_logprobs = dist.log_prob(action)
dist_entropy = dist.entropy()
value = self.critic(state)
return action_logprobs, value, dist_entropy
# Class that trains the policy
class PPO(object):
def __init__(self, state_dim, action_dim, lr_actor, lr_critic, gamma, num_epochs, eps_clip, entropy, env_size):
self.gamma = gamma
self.eps_clip = eps_clip
self.num_epochs = num_epochs
self.entropy = entropy
self.env_size = env_size
self.mem_buffer = Memory_buffer()
self.policy = Agent(state_dim, action_dim).to(device)
self.optim = optim.Adam([
{'params': self.policy.actor.parameters(), 'lr': lr_actor},
{'params': self.policy.critic.parameters(), 'lr': lr_critic}
])
self.policy_old = Agent(state_dim, action_dim).to(device)
self.policy_old.load_state_dict(self.policy.state_dict())
self.loss = nn.MSELoss()
def select_action(self, state):
with torch.no_grad():
state = torch.FloatTensor(state).to(device)
state /= self.env_size
action, action_logprob, action_prob = self.policy_old.get_action(state)
self.mem_buffer.states.append(state)
self.mem_buffer.actions.append(action)
self.mem_buffer.logprobs.append(action_logprob)
return action.item(), action_prob.cpu().numpy()
def update(self):
# Monte Carlo estimate of returns
rewards = []
discounted_reward = 0
for reward, is_terminal in zip(reversed(self.mem_buffer.rewards), reversed(self.mem_buffer.is_terminals)):
if is_terminal:
discounted_reward = 0
discounted_reward = reward + (self.gamma * discounted_reward)
rewards.insert(0, discounted_reward)
# Normalizing the rewards
rewards = torch.tensor(rewards, dtype=torch.float32).to(device)
rewards = (rewards - rewards.mean()) / (rewards.std() + 1e-7)
# convert list to tensor
old_states = torch.squeeze(torch.stack(self.mem_buffer.states, dim=0)).detach().to(device)
old_actions = torch.squeeze(torch.stack(self.mem_buffer.actions, dim=0)).detach().to(device)
old_logprobs = torch.squeeze(torch.stack(self.mem_buffer.logprobs, dim=0)).detach().to(device)
# Optimize policy for K epochs
for _ in range(self.num_epochs):
# Evaluating old actions and values
logprobs, state_values, dist_entropy = self.policy.evaluate(old_states, old_actions)
# match state_values tensor dimensions with rewards tensor
state_values = torch.squeeze(state_values)
# Finding the ratio (pi_theta / pi_theta__old)
ratios = torch.exp(logprobs - old_logprobs.detach())
# Finding Surrogate Loss
advantages = rewards - state_values.detach()
surr1 = ratios * advantages
surr2 = torch.clamp(ratios, 1-self.eps_clip, 1+self.eps_clip) * advantages
# final loss of clipped objective PPO
loss = -torch.min(surr1, surr2) + 0.5*self.loss(state_values, rewards) - self.entropy*dist_entropy
# take gradient step
self.optim.zero_grad()
loss.mean().backward()
self.optim.step()
# Copy new weights into old policy
self.policy_old.load_state_dict(self.policy.state_dict())
# clear buffer
self.mem_buffer.clear()
def save(self, checkpoint_path):
torch.save(self.policy_old.state_dict(), checkpoint_path)
def load(self, checkpoint_path):
self.policy_old.load_state_dict(torch.load(checkpoint_path, map_location=lambda storage, loc: storage))
self.policy.load_state_dict(torch.load(checkpoint_path, map_location=lambda storage, loc: storage))
def balance(b1, b2):
ret = 1 - abs(b1 - b2) / (b1 + b2 + 1e-7)
return ret
if __name__ == "__main__":
env_name = 'gym-grid-v0' # name of the environment you want to use
save_name = 'RL' # save the file
uncertainty_aware = sys.argv[1] # use entropy-aware design
exp_exp = sys.argv[2]
if exp_exp == 'true':
save_name += 'G'
elif exp_exp == 'false':
save_name += 'R'
if uncertainty_aware == 'true':
uncertainty_aware = True
save_name += '_{}'.format(sys.argv[3])
save_name += '40'
# log data
run_name = f"{env_name}_{save_name}"
writer = SummaryWriter(f"runs/{run_name}")
max_ep_len = 400 # time horizon
max_training_timesteps = max_ep_len * 2000 # training episodes
################ PPO hyperparameters ################
update_timestep = max_ep_len * 4 # update policy every n timesteps
num_epochs = 40 # update policy for K epochs in one PPO update
entropy_coeff = 0.05 # entropy coefficient to encourage exploration
epsilon_0 = 0.05
eps_clip = 0.2 # clip parameter for PPO
gamma = 0.99 # discount factor
env = gym.make(env_name)
W = env.action_space.n # parameter W for uncertainty
a = 1 / 4 # base rate, assumed uniform
lr_actor = 0.0003 # learning rate for actor network
lr_critic = 0.001 # learning rate for critic network
state_dim = env.observation_space.shape[0] # dimension of observation space
action_dim = env.action_space.n # dimension of action space
# save trained models
dir = f'models/{save_name}/'
if not os.path.exists(dir):
os.makedirs(dir)
checkpoint_path = dir + 'PPO_model'
# save each episode render
render_dir = 'episode_renders/{}/'.format(save_name)
if not os.path.exists(render_dir):
os.makedirs(render_dir)
# PPO agent
agent = PPO(state_dim, action_dim, lr_actor, lr_critic, gamma, num_epochs, eps_clip, entropy_coeff, max(env.observation_space.high))
start_time = time.time()
time_step = 0
i_episode = 0
ACTION_PDF = []
STATES = []
ENTROPY = []
VACUITY = []
DISSONANCE = []
REWARDS = []
REACHED_GOAL = []
entropy = 0.
dissonance = 0.
u_dd = 0.
# the agent interacts with the environment for the given number of episodes
while time_step <= max_training_timesteps:
states = []
state = env.reset()
states.append(state.copy())
ep_reward = 0
ep_terminal = 0
for t in range(1, max_ep_len+1):
action, action_pdf = agent.select_action(state)
ACTION_PDF.append(action_pdf.copy())
STATES.append(state.copy())
state, reward, done, _ = env.step(action)
states.append(state.copy())
if uncertainty_aware:
if sys.argv[2] == 'true':
if sys.argv[3] == 'H':
entropy_coeff = epsilon_0 * (1 - entropy)
elif sys.argv[3] == 'D':
entropy_coeff = epsilon_0 * (1 - dissonance)
elif sys.argv[3] == 'V':
entropy_coeff = epsilon_0 * (1 - u_dd)
else:
if sys.argv[3] == 'H':
reward += 1 - entropy
elif sys.argv[3] == 'D':
reward += 1 - dissonance
elif sys.argv[3] == 'V':
reward += 1 - u_dd
agent.mem_buffer.rewards.append(reward)
agent.mem_buffer.is_terminals.append(done)
time_step += 1
ep_reward += reward
if time_step % update_timestep == 0:
agent.update()
if time_step % 100000 == 0:
print()
print('-------------------')
print('Saving model at: ', checkpoint_path)
agent.save(checkpoint_path)
if done:
# check if the agetn reached the goal
if (env.agent_position - env.goal_position).all():
ep_terminal = 1
else:
ep_terminal = 0
break
REACHED_GOAL.append(ep_terminal)
# calculate the uncertainty using saved experience
dirichlet = np.asarray(ACTION_PDF)
evidence = np.argmax(dirichlet, axis=1)
unique, counts = np.unique(evidence, return_counts=True)
r = dict(zip(unique, counts))
belief = {0: 0., 1: 0., 2: 0., 3: 0.}
proj_prob = {0: 0., 1: 0., 2: 0., 3: 0.}
u = W / (W + len(evidence))
VACUITY.append(u)
entropy = 0.
# calculate the belief, projected probability, and entropy
for key in r:
belief[key] = r[key] / (W + len(evidence))
proj_prob[key] = belief[key] + a * u
entropy -= proj_prob[key] * (np.log(proj_prob[key]) / np.log(4))
ENTROPY.append(entropy)
u_dd = np.inf
for k in proj_prob:
u_dd = min(u_dd, proj_prob[k]/a)
# calculate dissonance
bal = {}
permutations = multiset_permutations(range(4), 2)
for p in permutations:
bal[str(p)] = balance(belief[p[0]], belief[p[1]])
dissonance = 0.
set_without_xi = [0, 1, 2, 3]
for xi in range(4):
set_without_xi.remove(xi)
num = 0.
den = 1e-7
for xj in set_without_xi:
num += belief[xj] * bal[f'[{int(xi)}, {int(xj)}]']
den += belief[xj]
dissonance += belief[xi] * num / den
set_without_xi = [0, 1, 2, 3]
DISSONANCE.append(dissonance)
REWARDS.append(ep_reward)
# render the episode
states = np.asarray(states, dtype=np.int16)
if i_episode > max_training_timesteps - 100:
env.save_episode(states, render_dir, i_episode)
writer.add_scalar('performance/reward', ep_reward, i_episode)
print('Episode: {}, Reward: {}'.format(i_episode, ep_reward))
i_episode += 1
ACTION_PDF = np.asarray(ACTION_PDF)
STATES = np.asarray(STATES)
ENTROPY = np.asarray(ENTROPY)
VACUITY = np.asarray(VACUITY)
DISSONANCE = np.asarray(DISSONANCE)
REWARDS = np.asarray(REWARDS)
REACHED_GOAL = np.asarray(REACHED_GOAL)
pickle.dump(STATES, open(checkpoint_path + '_states.pkl', 'wb'))
pickle.dump(ACTION_PDF, open(checkpoint_path + '_action_pdf.pkl', 'wb'))
pickle.dump(DISSONANCE, open(checkpoint_path + '_dissonance.pkl', 'wb'))
pickle.dump(ENTROPY, open(checkpoint_path + '_entropy.pkl', 'wb'))
pickle.dump(VACUITY, open(checkpoint_path + '_vacuity.pkl', 'wb'))
pickle.dump(REWARDS, open(checkpoint_path + '_rewards.pkl', 'wb'))
pickle.dump(REACHED_GOAL, open(checkpoint_path + '_reached_goal.pkl', 'wb'))
env.close()
writer.close()
print('Total time for training: ', time.time() - start_time, ' seconds')
| SagarParekh97/Decision-Making-under-Uncertainty | P2/PPO.py | PPO.py | py | 12,813 | python | en | code | 0 | github-code | 13 |
1385325825 | import math
import torch
def adv_perturbation(x, opB, opR, c_B, eta, lr, num_iters, device):
"""
Computes an adversarial perturbation e = [e1;e2] (assuming e1 = e2) for
a (subdifferentiable) reconstruction method for recovering a vector x given
measurements y1 = B @ x + e1, y2 = B @ x + e2. Expressed in stacking
form, with y = [y1;y2] and A = [B;B], then y = A @ x + e.
Here B is assumed to be a linear operator where B @ B^* = c*I for some
constant c > 0. This means the block matrix B has full row rank and its
Moore-Penrose pseudoinverse is B^*/c.
Inspired by [1] and [2], this function implements projected gradient
ascent to solve the nonconvex optimization problem
max ||R(y + e) - R(y)||_2 s.t. ||e1||_2 <= eta, e1 = e2.
Due to the nonconvexity, gradient ascent should be run several times with
different randomly initialized perturbations e. The e that yielding
largest objective value is selected and is returned.
NOTE: To avoid confusion, if the solver R uses a noise level of NOISE,
so that it expects ||e||_2 <= NOISE, then if we want the perturbation
noise level to be X times NOISE, then eta = X * NOISE / sqrt(2).
Args:
x (torch.Tensor) : ground truth vector
opB (function) : measurement operator B
opR (function) : reconstruction map
c_B (float) : constant c for which B @ B^* = c*I
eta (float) : constraint for e1 (not e!)
lr (float) : learning rate for gradient ascent
num_iters (int) : number of iterations of gradient ascent
device : CPU or GPU as returned by torch.device
Returns:
best_e (torch.Tensor) : worst-case noise perturbation
References:
[1] Ch. 19.4. "Compressive Imaging: Structure, Sampling, Learning"
Adcock, et al. ISBN:9781108421614.
[2] Sec. 3.4. "Solving Inverse Problems With Deep Neural Networks --
Robustness Included?" Genzel, et al. arXiv:2011.04268.
"""
y = opB(x,1)
x_rec = opR(y)
N = x.shape[0]
m = y.shape[0]
# squared-norm function for complex tensors
sq_norm = lambda z : torch.vdot(z,z)
obj_fn = lambda e : -0.5*sq_norm(opR(y+e)-x_rec)
best_obj_val = -float('Inf')
obj_val = None
best_e = None
noise = torch.randn(2*m, dtype=torch.float64, device=device)
noise = (eta/math.sqrt(m))*noise/torch.linalg.norm(noise,ord=2)
e = noise[:m] + 1j*noise[m:]
e.requires_grad_()
optimizer = torch.optim.SGD([e], lr=lr)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', min_lr=1e-5)
for i in range(num_iters):
# descent step
optimizer.zero_grad()
obj = obj_fn(e)
obj.backward()
optimizer.step()
with torch.no_grad():
# projection
e_len = torch.linalg.norm(e,ord=2)
if e_len > eta:
e.multiply_(eta/e_len)
obj_val = -torch.real(obj_fn(e))
scheduler.step(obj_val)
obj_val = obj_val.cpu()
print(
'Step %-3d -- norm(e): %.3e -- obj val: %.5e -- lr: %.2e' %
(
i+1,
min(eta, float(e_len)),
float(obj_val),
float(optimizer.param_groups[0]['lr']),
)
)
if obj_val > best_obj_val:
best_obj_val = obj_val
best_e = e.detach().clone()
return best_e
| mneyrane/MSc-thesis-NESTAnets | nestanet/stability.py | stability.py | py | 3,568 | python | en | code | 0 | github-code | 13 |
32871493562 | from typing import List, Optional
from pydantic import BaseModel
from enum import Enum
class Field(BaseModel):
name_field: str
type: str
length: int = None
value: str = None
primary_key: bool = False
not_null: bool = False
unique: bool = False
default: str = None
auto_increment: bool = False
collation: Optional[str] = 'latin1_swedish_ci'
comment: Optional[str] = None
class Engine(str, Enum):
MYISAM = 'MyISAM'
MEMORY = 'Memory'
INNODB = 'InnoDB'
ARCHIVE = 'Archive'
NDB = 'NDB'
class Table(BaseModel):
name: str
fields: List[Field]
collate: str = 'latin1_swedish_ci'
engine: Engine = 'InnoDB'
| vuminhhieucareer172/SparkPushNotification | backend/schemas/table.py | table.py | py | 683 | python | en | code | 0 | github-code | 13 |
23172846319 | from django.urls import path
from . import views
urlpatterns = [
path('', views.home, name = 'home'),
path('login', views.login, name = 'login'),
path('logout', views.logout_view, name = 'logout'),
path('signup', views.sigunup, name = 'signup'),
path('puzzle1/<int:ans>', views.puzzle1, name = 'puzzle1'),
path('puzzle2/<int:ans>', views.puzzle2, name = 'puzzle2'),
path('puzzle3/<int:ans>', views.puzzle3, name = 'puzzle3')
] | KShanmukhaSrinivas/Treasure_Hunt | Treasure/main/urls.py | urls.py | py | 456 | python | en | code | 0 | github-code | 13 |
956814392 | # https://projecteuler.net/problem=10
import math
import unittest
from multiprocessing import Pool, Process
def sieve_of_eratosthenes(n):
multiples = []
for i in range(2, n+1):
if i not in multiples:
print (i)
for j in range(i*i, n+1, i):
multiples.append(j)
def sieve_of_atkin(limit):
P = [2,3]
sieve=[False]*(limit+1)
for x in range(1,int(math.sqrt(limit))+1):
for y in range(1,int(math.sqrt(limit))+1):
n = 4*x**2 + y**2
if n<=limit and (n%12==1 or n%12==5) : sieve[n] = not sieve[n]
n = 3*x**2+y**2
if n<= limit and n%12==7 : sieve[n] = not sieve[n]
n = 3*x**2 - y**2
if x>y and n<=limit and n%12==11 : sieve[n] = not sieve[n]
print(x, y)
for x in range(5,int(math.sqrt(limit))):
if sieve[x]:
for y in range(x**2,limit+1,x**2):
sieve[y] = False
for p in range(5,limit):
if sieve[p] : P.append(p)
return P
def summation_of_primes(nth):
return sum(sieve_of_atkin(nth))
class TestSummationOfPrimes(unittest.TestCase):
def test_1_1(self):
self.assertEqual(summation_of_primes(2000000), 142913828922)
if __name__ == '__main__':
unittest.main()
| aj07mm/project_euler | problem_010.py | problem_010.py | py | 1,286 | python | en | code | 0 | github-code | 13 |
2061677900 | """
Cleans raw ELEXON data that was scraped using scrape_data.py
Each report requires a slightly different approach for cleaning
"""
import numpy as np
import pandas as pd
from forecast import check_dataframe
def print_duplicates(df):
dupes = df[df.index.duplicated()]
num = dupes.shape[0]
print('{} duplicates'.format(num))
if num != 0:
print('duplicates are:')
print(dupes.head())
return num
def remove_duplicates(df):
print('removing duplicates for {}'.format(df.columns))
print_duplicates(df)
return df[~df.index.duplicated(keep='first')]
def print_nans(df):
nans = df[df.isnull().any(axis=1)]
num = nans.shape[0]
print('{} nans'.format(num))
if num != 0:
print('nan values are:')
print(nans.head())
return num
def fill_nans(df):
print('filling nans in {}'.format(df.columns))
print_nans(df)
df = df.fillna(method='backfill')
return df
def clean_price_data():
price = pd.read_csv('./data/raw/B1770.csv', parse_dates=True)
price = price.pivot_table(values='imbalancePriceAmountGBP',
index='time_stamp',
columns='priceCategory')
price.index = pd.to_datetime(price.index)
return remove_duplicates(price)
def clean_vol_data():
vol = pd.read_csv('./data/raw/B1780.csv', index_col=0, parse_dates=True)
vol = vol.set_index('time_stamp', drop=True).sort_index()
vol.index = pd.to_datetime(vol.index)
return remove_duplicates(vol)
if __name__ == '__main__':
price = clean_price_data()
vol = clean_vol_data()
merged = pd.concat([price, vol], axis=1)
idx = pd.DatetimeIndex(freq='30min', start=merged.index[0], end=merged.index[-1])
out = pd.DataFrame(index=idx)
out.loc[price.index, 'ImbalancePrice_excess_balance_[£/MWh]'] = price.loc[:, 'Excess balance']
out.loc[price.index, 'ImbalancePrice_insufficient_balance_[£/MWh]'] = price.loc[:, 'Insufficient balance']
out.loc[vol.index, 'ImbalanceVol_[MW]'] = vol.loc[:, 'imbalanceQuantityMAW']
out['Imbalance_price [£/MWh]'] = np.where(
out['ImbalanceVol_[MW]'] > 0,
out['ImbalancePrice_excess_balance_[£/MWh]'],
out['ImbalancePrice_insufficient_balance_[£/MWh]']
)
out = fill_nans(out)
check_dataframe(out, freq='30min')
out.to_csv('data/clean.csv')
| ADGEfficiency/forecast | projects/elexon/cleaning_data.py | cleaning_data.py | py | 2,397 | python | en | code | 19 | github-code | 13 |
29956660589 | from functools import wraps
from typing import Any, Callable, TypeVar
T = TypeVar('T', bound=Any)
def gameover(game: Callable[..., T]) -> Callable[..., T | None]:
@wraps(game)
def wrapper(*args, **kwargs) -> T | None:
try:
return game(*args, **kwargs)
except KeyboardInterrupt:
return print(
'\n'
'\n'
'Game Over: process terminated by keyboard interruption.'
)
return wrapper
| Lingxuan-Ye/games | lib/decorators/lifecycle.py | lifecycle.py | py | 494 | python | en | code | 0 | github-code | 13 |
31740533985 | # -*- coding: utf-8 -*-
# @Time : 2023/10/15 下午3:38
# @Author : nanji
# @Site :
# @File : adaboost_c.py
# @Software: PyCharm
# @Comment :
import numpy as np
from machinelearn.decision_tree_04.decision_tree_C \
import DecisionTreeClassifier
import copy
class SAMMERClassifier:
'''
SAMME.R算法是将SAMME拓展到连续数值型的范畴
'''
def __init__(self, base_estimator=None, n_estimators=10):
'''
:param base_estimator: 基学习器
:param n_estimcators: 基学习器 的个数 T
'''
self.base_estimator = base_estimator
self.n_estimators = n_estimators
# 如果不提供学习起,则默认按照深度为2的决策树作为集分类器
if self.base_estimator is None:
self.base_estimator = DecisionTreeClassifier(max_depth=2)
if type(base_estimator) != list:
# 同质(同种类型)的分类器
self.base_estimator = [copy.deepcopy(self.base_estimator) \
for _ in range(self.n_estimators)]
else:
# 异质(不同种类型)的分类器
self.n_estimators = len(self.base_estimator)
self.estimator_weights = [] # 每个基学习器的权重系数
self.n_sample, self.n_class = None, None
def _target_encoding(self, y_train):
'''
对目标值进行编码
:param y_train: 训练目标集
:return:
'''
self.n_sample, self.n_class = len(y_train), len(set(y_train))
target = -1.0 / (self.n_class - 1) * \
np.ones((self.n_sample, self.n_class))
for i in range(self.n_sample):
target[i, y_train[i]] = 1 # 对应该样本的类别所在编码中的列
return target
def fit(self, x_train, y_train):
'''
训练AdaBoost每个基分类器 , 计算权重分布,每个基学习器的误差率和权重系数alpha
:param x_train: 训练集,二维数组;m*k
:param y_train: 目标集
:return:
'''
x_train, y_train = np.asarray(x_train), np.asarray(y_train)
target = self._target_encoding(y_train) # 编码
sample_weights = np.ones(self.n_sample) # 为适应自写的基学习器,设置样本均匀权重为1.0,样本权重
# 针对每一个学习器,根据带有权重分布的训练集训练基学习器,计算相关参数
c = (self.n_class - 1) / self.n_class
for idx in range(self.n_estimators):
# 1. 使用只有权重分布Dm的训练数据集学习,并预测
self.base_estimator[idx].fit(x_train, y_train, sample_weights)
y_pred = self.base_estimator[idx].predict_proba(x_train)
# 只关心分类错误的,如果分类错误,则为0,正确则为1
sample_weights *= np.exp(-c * target * np.log(y_pred))
sample_weights /= np.sum(sample_weights) * self.n_sample
def predict_proba(self, x_test):
'''
预测测试样本所属类别的概率,软投票
:param x_test: 测试样本集
:return:
'''
x_test = np.asarray(x_test)
C_x = np.zeros((x_test.shape[0], self.n_class))
for i in range(self.n_estimators):
y_prob = self.base_estimator[i].predict_proba(x_test)
np.clip(y_prob, np.finfo(y_prob.dtype).eps, None, out=y_prob)
y_ln = np.log(y_prob)
C_x += (self.n_class - 1) * (y_ln - 1.0 / self.n_class * np.sum(y_ln, axis=1, keepdims=True))
return self.softmax_func(C_x)
@staticmethod
def softmax_func(x):
'''
softmax 函数 ,为避免上溢或下溢,对参数x做限制
:param x: batch_size * n_classes
:return: 1*n_classes
'''
exps = np.exp(x - np.max(x)) # 避免溢出,每个数减去其最大值
exp_sum = np.sum(exps, axis=1, keepdims=True)
return exps / exp_sum
def predict(self, x_test):
'''
预测测试样本所属类别
:param x_test:
:return:
'''
return np.argmax(self.predict_proba(x_test), axis=1)
| lixixi89055465/py_stu | machinelearn/ensemble_learning_08/adaboost/adaboost_discrete_c.py | adaboost_discrete_c.py | py | 4,204 | python | en | code | 1 | github-code | 13 |
2265902891 | import tkinter as tk
fileSizeList = [
"Bytes",
"Kilobytes",
"Megabytes",
"Gigabytes"
]
app = tk.Tk()
fileSize = ''
app.geometry('100x200')
sizeMenuVal = tk.StringVar(app)
sizeMenuVal.set(fileSizeList[0])
sizeMenu = tk.OptionMenu(app, sizeMenuVal, *fileSizeList)
sizeMenu.config(width=90, font=('Helvetica', 12))
sizeMenu.pack(side="top")
labelTest = tk.Label(text="", font=('Helvetica', 12), fg='red')
labelTest.pack(side="top")
def callback(*args):
global fileSize
fileSize = sizeMenuVal.get()
sizeMenuVal.trace("w", callback)
app.mainloop() | Mobenator/FileShift | dropdowntest.py | dropdowntest.py | py | 562 | python | en | code | 0 | github-code | 13 |
43003558709 | import pandas as pd
import numpy as np
'''inputs to func look something like this:
filename = 'test_dataset.csv'
list_nans = [np.nan, 'na', 88, 999]
new_df = missing_coder(filename=filename, list_nans=list_nans'''
def missing_coder(filename, list_nans):
df = pd.read_csv(filename, na_values=list_nans)
cols = df.columns
for col in cols:
check_null = df[col].isnull()
if check_null.sum() >= 1:
new_col_name = col + '_miss_flag'
new_cols_vals = np.zeros(len(df)).astype('float32')
for index in df.index.values[check_null]:
new_cols_vals[index] = 1.0
df[new_col_name] = new_cols_vals
return df
| matthewvowels1/missingness_dummy_coder | missing_coder.py | missing_coder.py | py | 717 | python | en | code | 0 | github-code | 13 |
13478514140 | from decimal import Decimal
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import redirect, render
from django.views.generic import (
ListView,
DetailView,
CreateView,
UpdateView,
DeleteView,
)
from accounting.models import Invoice
from accounting.views import generate_invoice
from stock.models import StockItem
from procedures.models import Procedure
from .forms import (
OwnerForm,
AnimalForm,
AnimalCaseForm,
VisitForm,
VisitFormSet,
VisitProcedureFormSet,
)
from .models import Owner, Animal, AnimalCase, Visit
from django.urls import reverse
from django.shortcuts import get_object_or_404
from django.core import serializers
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.decorators import login_required
class OwnerMixin:
def get_owner(self):
owner_id = self.kwargs["owner_id"]
return get_object_or_404(Owner, pk=owner_id)
class AnimalMixin(OwnerMixin):
def get_animal(self):
owner = self.get_owner()
animal_id = self.kwargs["animal_id"]
return get_object_or_404(Animal, owner=owner, pk=animal_id)
class AnimalCaseMixin(AnimalMixin):
def get_animalcase(self):
animal = self.get_animal()
animalcase_id = self.kwargs["animalcase_id"]
return get_object_or_404(AnimalCase, animal=animal, pk=animalcase_id)
class VisitMixin(AnimalCaseMixin):
def get_visit(self):
animal_case = self.get_animalcase()
visit_id = self.kwargs["visit_id"]
return get_object_or_404(Visit, animal_case=animal_case, pk=visit_id)
def get_animalcase(owner_id, animal_id, animalcase_id):
return AnimalCase.objects.get(
animal__owner__id=owner_id, animal__id=animal_id, id=animalcase_id
)
class OwnerListView(LoginRequiredMixin, ListView):
model = Owner
paginate_by = 10
class OwnerDetailView(LoginRequiredMixin, OwnerMixin, DetailView):
model = Owner
def get_object(self):
return self.get_owner()
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["animal_list"] = Animal.objects.filter(owner=self.object)
return context
class OwnerCreateView(LoginRequiredMixin, CreateView):
model = Owner
form_class = OwnerForm
template_name = "records/owner_form.html"
def get_success_url(self):
return reverse("records:owner_detail", kwargs={"owner_id": self.object.pk})
class OwnerUpdateView(LoginRequiredMixin, OwnerMixin, UpdateView):
model = Owner
form_class = OwnerForm
template_name = "records/owner_update_form.html"
def get_object(self):
return self.get_owner()
def get_success_url(self):
return reverse("records:owner_detail", kwargs={"owner_id": self.object.pk})
class OwnerDeleteView(LoginRequiredMixin, OwnerMixin, DeleteView):
model = Owner
def get_object(self):
return self.get_owner()
def get_success_url(self):
return reverse("records:owner_list")
class AnimalCreateView(LoginRequiredMixin, OwnerMixin, CreateView):
model = Animal
form_class = AnimalForm
template_name = "records/animal_form.html"
def form_valid(self, form):
owner = self.get_owner()
form.instance.owner = owner
return super().form_valid(form)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["owner"] = self.get_owner()
return context
def get_success_url(self):
return reverse(
"records:owner_detail", kwargs={"owner_id": self.kwargs["owner_id"]}
)
class AnimalDetailView(LoginRequiredMixin, AnimalMixin, DetailView):
model = Animal
def get_object(self):
return self.get_animal()
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["animalcase_list"] = AnimalCase.objects.filter(animal=self.object)
context["owner"] = self.get_owner()
return context
class AnimalUpdateView(LoginRequiredMixin, AnimalMixin, UpdateView):
model = Animal
form_class = AnimalForm
template_name = "records/animal_update_form.html"
def get_object(self):
return self.get_animal()
def get_success_url(self):
return reverse(
"records:owner_detail", kwargs={"owner_id": self.kwargs["owner_id"]}
)
class AnimalDeleteView(LoginRequiredMixin, AnimalMixin, DeleteView):
model = Animal
def get_object(self):
return self.get_animal()
def get_success_url(self):
return reverse(
"records:owner_detail", kwargs={"owner_id": self.kwargs["owner_id"]}
)
class AnimalCaseCreateView(LoginRequiredMixin, AnimalMixin, CreateView):
model = AnimalCase
form_class = AnimalCaseForm
template_name = "records/animalcase_form.html"
def form_valid(self, form):
animal = self.get_animal()
form.instance.animal = animal
return super().form_valid(form)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["animal"] = self.get_animal()
return context
def get_success_url(self):
return reverse(
"records:animal_detail",
kwargs={
"owner_id": self.kwargs["owner_id"],
"animal_id": self.kwargs["animal_id"],
},
)
class AnimalCaseDetailView(LoginRequiredMixin, AnimalCaseMixin, DetailView):
model = AnimalCase
def get_object(self):
return self.get_animalcase()
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["visit_list"] = Visit.objects.filter(animal_case=self.object)
context["animal"] = self.get_animal()
context["owner"] = self.get_owner()
return context
class AnimalCaseUpdateView(LoginRequiredMixin, AnimalCaseMixin, UpdateView):
model = AnimalCase
form_class = AnimalCaseForm
template_name = "records/animalcase_update_form.html"
def get_object(self):
return self.get_animalcase()
def get_success_url(self):
return reverse(
"records:animal_detail",
kwargs={
"owner_id": self.kwargs["owner_id"],
"animal_id": self.kwargs["animal_id"],
},
)
class AnimalCaseDeleteView(LoginRequiredMixin, AnimalCaseMixin, DeleteView):
model = AnimalCase
def get_object(self):
return self.get_animalcase()
def get_success_url(self):
return reverse(
"records:animal_detail",
kwargs={
"owner_id": self.kwargs["owner_id"],
"animal_id": self.kwargs["animal_id"],
},
)
@login_required
def visit_create_view(request, owner_id, animal_id, animalcase_id):
animal_case = get_animalcase(owner_id, animal_id, animalcase_id)
stock_items = StockItem.objects.all()
stock_items_json = serializers.serialize(
"json", stock_items, fields=("pk", "selling_price")
)
procedures = Procedure.objects.all()
procedures_json = serializers.serialize("json", procedures, fields=("pk", "price"))
if request.method == "POST":
visit_form = VisitForm(request.POST, prefix="visit")
formset = VisitFormSet(request.POST, prefix="visit_stock_item")
procedure_formset = VisitProcedureFormSet(
request.POST, prefix="visit_procedure"
)
if (
visit_form.is_valid()
and formset.is_valid()
and procedure_formset.is_valid()
):
visit = visit_form.save(commit=False)
visit.animal_case = animal_case
visit.save()
formset.instance = visit
formset.save()
procedure_formset.instance = visit
procedure_formset.save()
for form in formset.forms:
stock_item = form.cleaned_data.get("stock_item")
quantity = form.cleaned_data.get("quantity")
stock_item.units_in_stock -= quantity
stock_item.save()
if "generate_pdf" in request.POST:
buffer = generate_invoice(visit.pk)
# Ukládání PDF do databáze
content = buffer.read()
invoice = Invoice(visit=visit, content=content)
invoice.save()
response = HttpResponse(content_type="application/pdf")
response.write(content)
response[
"Content-Disposition"
] = f"attachment; filename=faktura-{visit.pk}.pdf"
response["X-Visit-Id"] = visit.pk
return response
else:
return redirect(
"records:visit_update",
owner_id=owner_id,
animal_id=animal_id,
animalcase_id=animalcase_id,
visit_id=visit.pk,
)
else:
visit_form = VisitForm(prefix="visit")
formset = VisitFormSet(instance=Visit(), prefix="visit_stock_item")
procedure_formset = VisitProcedureFormSet(
instance=Visit(), prefix="visit_procedure"
)
default_update_view_url = reverse(
"records:visit_update",
kwargs={
"owner_id": owner_id,
"animal_id": animal_id,
"animalcase_id": animalcase_id,
"visit_id": 0,
},
)
context = {
"visit_form": visit_form,
"formset": formset,
"animal_case": animal_case,
"stock_items_json": stock_items_json,
"procedures_json": procedures_json,
"procedure_formset": procedure_formset,
"default_update_view_url": default_update_view_url,
}
return render(request, "records/visit_form.html", context)
@login_required
def visit_update_view(request, owner_id, animal_id, animalcase_id, visit_id):
animal_case = get_animalcase(owner_id, animal_id, animalcase_id)
owner = get_object_or_404(Owner, pk=owner_id)
animal = get_object_or_404(Animal, pk=animal_id)
visit = get_object_or_404(Visit, pk=visit_id)
stock_items = StockItem.objects.all()
stock_items_json = serializers.serialize(
"json", stock_items, fields=("pk", "selling_price")
)
procedures = Procedure.objects.all()
procedures_json = serializers.serialize("json", procedures, fields=("pk", "price"))
if request.method == "POST":
visit_form = VisitForm(request.POST, instance=visit, prefix="visit")
formset = VisitFormSet(request.POST, instance=visit, prefix="visit_stock_item")
procedure_formset = VisitProcedureFormSet(
request.POST, instance=visit, prefix="visit_procedure"
)
if (
visit_form.is_valid()
and formset.is_valid()
and procedure_formset.is_valid()
):
visit_form.save()
formset.save()
procedure_formset.save()
for form in formset:
if form.has_changed():
stock_item = form.cleaned_data.get("stock_item")
new_quantity = form.cleaned_data.get("quantity")
initial_quantity = form.initial.get("quantity")
if stock_item and new_quantity is not None and initial_quantity is not None:
change_in_quantity = Decimal(initial_quantity) - Decimal(new_quantity)
if change_in_quantity != 0:
stock_item.units_in_stock += change_in_quantity
stock_item.save()
if "generate_pdf" in request.POST:
# Smažte starou fakturu, pokud existuje
Invoice.objects.filter(visit=visit).delete()
buffer = generate_invoice(visit.pk)
# Ukládání PDF do databáze
content = buffer.read()
invoice = Invoice(visit=visit, content=content)
invoice.save()
response = HttpResponse(content_type="application/pdf")
response.write(content)
response[
"Content-Disposition"
] = f"attachment; filename=faktura-{visit.pk}.pdf"
response["X-Visit-Id"] = visit.pk
return response
return redirect(
"records:animal_case_detail",
owner_id=owner_id,
animal_id=animal_id,
animalcase_id=animalcase_id,
)
else:
visit_form = VisitForm(instance=visit, prefix="visit")
formset = VisitFormSet(instance=visit, prefix="visit_stock_item")
procedure_formset = VisitProcedureFormSet(
instance=visit, prefix="visit_procedure"
)
context = {
"visit_form": visit_form,
"formset": formset,
"animal_case": animal_case,
"stock_items_json": stock_items_json,
"procedures_json": procedures_json,
"procedure_formset": procedure_formset,
"owner": owner,
"animal": animal,
}
return render(request, "records/visit_update_form.html", context)
@login_required
def visit_delete_view(request, owner_id, animal_id, animalcase_id, visit_id):
animal_case = get_animalcase(owner_id, animal_id, animalcase_id)
visit = get_object_or_404(Visit, pk=visit_id, animal_case=animal_case)
if request.method == "POST":
# Return stock items to the stock
for visit_stock_item in visit.stock_items.all():
stock_item = visit_stock_item.stock_item
stock_item.units_in_stock += visit_stock_item.quantity
stock_item.save()
# Delete the visit
visit.delete()
return redirect(
"records:animal_case_detail",
owner_id=owner_id,
animal_id=animal_id,
animalcase_id=animalcase_id,
)
context = {
"visit": visit,
"animal_case": animal_case,
}
return render(request, "records/visit_confirm_delete.html", context)
| jfoltan/VetSimplify | records/views.py | views.py | py | 14,316 | python | en | code | 0 | github-code | 13 |
42482686021 | # -*- coding: utf-8 -*-
from pathlib import Path
from typing import List, Optional, Dict, Any
import re
import json
from tqdm import tqdm
import copy
RAWDATA_PATH = Path("./data/train.txt")
TARGET_DIR = Path("./data/cgec")
class DataProcessor:
leading_dash_pattern = re.compile(r"^——(.*)")
def get_gec_samples_from_str(
self,
text: str,
) -> Optional[List[Dict[str, Any]]]:
_, _, source, *corrects = text.strip().split('\t')
samples = []
source = DataProcessor._remove_leading_dash(source)
if len(source) > 0:
common = dict()
common["source"] = source
if len(corrects) > 0:
for cor in corrects:
cor = DataProcessor._remove_leading_dash(cor)
if len(cor) > 0:
sample = copy.deepcopy(common)
sample["correct"] = cor
samples.append(sample)
if len(samples) == 0:
return None
else:
common["correct"] = source
samples.append(common)
return samples
return None
@classmethod
def _remove_leading_dash(cls, text: str) -> str:
matches = cls.leading_dash_pattern.findall(text)
if len(matches) > 0:
return matches[0]
else:
return text
def get_trainset_valset_from_file(self, source_file: Path,
save_dir: Path) -> None:
trainset_path = save_dir / "train.txt"
valset_path = save_dir / "val.txt"
trainset = []
valset = []
val_num, i = 5000, 0
source_data = open(source_file, "r").readlines()
for line in tqdm(source_data):
samples = self.get_gec_samples_from_str(line)
if samples is not None:
if i < val_num:
if len(samples) == 1:
valset.extend(samples)
i += 1
else:
trainset.extend(samples)
else:
trainset.extend(samples)
save_dir.mkdir(exist_ok=True)
if len(trainset) > 0:
save_dicts_to_file(trainset, trainset_path)
save_dicts_to_file(valset, valset_path)
def save_dicts_to_file(dicts: List[Dict[str, Any]],
save_path: str,
encoding='utf-8') -> None:
with open(save_path, "w+", encoding=encoding) as fout:
for d in dicts:
json.dump(d, fout, ensure_ascii=False)
fout.write('\n')
def main():
data_processor = DataProcessor()
data_processor.get_trainset_valset_from_file(RAWDATA_PATH, TARGET_DIR)
if __name__ == '__main__':
main()
| Peter-Chou/cgec-initialized-with-plm | preprocess_data.py | preprocess_data.py | py | 2,478 | python | en | code | 3 | github-code | 13 |
23943165479 | import numpy as np
import pandas as pd
import re
def check_last_day(curr_year: int, curr_month: int, curr_day: int) -> bool:
"""
Checks if the specified day is the last day in the specified month.
Checks for 29 days during leap year (only years 1992 - 2020), otherwise 28 days for Feb.
:param curr_year: year being checked
:param curr_month: month being checked
:param curr_day: day being checked
:return: True if last day of month, otherwise false
"""
# Total number of days in each month (not including Feb-leap year)
# First value is 0 since index starts at 0, want to use indices 1 - 12
days_in_month = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
# Check Feb separately (since it depends on if leap year)
if curr_month == 2:
# Check for 29 days if leap year, otherwise 28 days
if (check_leap_year(curr_year) and curr_day == 29) or (not check_leap_year(curr_year) and curr_day == 28):
return True
# Check rest of months
elif curr_day == days_in_month[curr_month]:
return True
return False
def check_leap_year(year: int) -> bool:
"""
Checks to see if the year entered is a leap year.
:param year: year to be checked
:return: True if leap year, otherwise false
"""
if year % 4 == 0:
if year % 100 == 0 and year % 400 != 0:
return False
else:
return True
return False
def aggregate_table(table, new_name: str, valid_percentage: int):
"""
Takes the values and finds the average for each month.
The averages are only created if the number of days per month with non-null values is
greater or equal to the valid_percentage given, otherwise it will be NaN for that month.
The new values are written to a csv file and includes the other information columns
(ones that do not contain a date).
:param table: table to get values from
:param new_name: name of the new file (with averages)
:param valid_percentage: (0 - 1.0) borderline ratio of non-null days per month
"""
# No table given
if not table:
return
index_names = list(table.index)
# Create a list of all the columns names that contain dates
date_columns = []
date_names = []
info_columns = []
# Separate the date columns and info columns
for name in list(table.columns):
item = re.findall('\d+', name)
if item:
date_columns.append(name)
date_names.append(item)
else:
info_columns.append(name)
new_table = []
# Combine data from days to months
for index, row in table.iterrows():
count = 0
valid_count = 0
data_sum = 0
avg_row = []
# Check average for each month in the row
for num, column in enumerate(date_columns):
year = int(date_names[num][0])
month = int(date_names[num][1])
count += 1
value = row[column]
if not pd.isnull(value):
valid_count += 1
data_sum += value
# Calculate averages on the last day if enough values
if check_last_day(year, month, count):
if valid_count / count >= valid_percentage:
avg_row.append(data_sum / valid_count)
else:
avg_row.append(np.NaN)
# Reset values for next month's data
count = 0
valid_count = 0
data_sum = 0
new_table.append(avg_row)
column_names = []
# Create new names for the columns
for date in date_names:
name = str(date[0]) + '-' + str(date[1])
if name not in column_names:
column_names.append(name)
# Missing any indices or column names or columns
if len(new_table[0]) != len(column_names) and len(index_names) != len(new_table):
return
# Add info columns from old table to new averages table
new_frame = pd.DataFrame(np.array(new_table), columns=column_names, index=index_names)
info_table = table.filter(items=info_columns)
average_table = pd.concat([info_table, new_frame], axis=1)
return average_table
# Write the new table (without extreme values to a new csv file)
# average_table.to_csv(r"{}.csv".format(new_name), na_rep='NA')
| changsteph/CITRUS-June2022 | aggregate_data.py | aggregate_data.py | py | 4,378 | python | en | code | 0 | github-code | 13 |
31428537020 | import sqlite3
def create_database(db_connection: sqlite3.Connection):
sql_list = [
"""
CREATE TABLE IF NOT EXISTS photo (
id STRING PRIMARY KEY,
file_path STRING
);
""",
"""
CREATE TABLE IF NOT EXISTS face (
id STRING PRIMARY KEY
);
""",
"""
CREATE TABLE IF NOT EXISTS face_photo (
id INTEGER PRIMARY KEY AUTOINCREMENT,
face_id INTEGER,
photo_id STRING
)
""",
"""
CREATE TABLE IF NOT EXISTS similar_face (
face_id STRING,
similar_face_id STRING
)
""",
]
cursor = db_connection.cursor()
for sql in sql_list:
cursor.execute(sql)
def drop_database(db_connection: sqlite3.Connection):
sql_list = [
"DROP TABLE IF EXISTS face",
"DROP TABLE IF EXISTS face_photo",
"DROP TABLE IF EXISTS photo",
"DROP TABLE IF EXISTS similar_face",
]
cursor = db_connection.cursor()
for sql in sql_list:
cursor.execute(sql)
| Doka-NT/python-face-gallery | src/infrastructure/database.py | database.py | py | 1,177 | python | en | code | 0 | github-code | 13 |
41910816039 | import bpy
from mathutils import Vector, Euler, Matrix
from math import radians, degrees, sin, cos, atan2, sqrt, pi
def delete_meshes():
candidate_list = [item.name for item in bpy.data.objects if item.type == "MESH"]
# select mesh objects and remove them
for object_name in candidate_list:
bpy.data.objects[object_name].select = True
bpy.ops.object.delete()
# remove the meshes, they have no users anymore
for item in bpy.data.meshes:
bpy.data.meshes.remove(item)
def create_joint(m, alpha, a1, a2, theta, d):
rotx = Matrix.Rotation(radians(alpha), 4, 'X')
transx = Matrix.Translation(Vector((a1, 0, 0)))
rotz = Matrix.Rotation(radians(theta), 4, 'Z')
transz = Matrix.Translation(Vector((0, 0, d)))
m = m * rotx * transx * rotz * transz
bpy.ops.mesh.primitive_cone_add(radius1=9, depth=15)
j = bpy.context.scene.objects.active
j.matrix_world = m
if a2 != 0:
# render link
bpy.ops.mesh.primitive_cylinder_add(radius=2, depth=a2)
link = bpy.context.scene.objects.active
link_roty = Matrix.Rotation(radians(90), 4, 'Y')
link_transx = Matrix.Translation(Vector((0, 0, a2/2)))
link.matrix_world = m * link_roty * link_transx
if d != 0:
# render offset
bpy.ops.mesh.primitive_cube_add()
offset = bpy.context.scene.objects.active
offset_transx = Matrix.Translation(Vector((0, 0, -d/2)))
offset_s = Matrix.Scale(abs(d/2), 4, Vector((0, 0, 1)))
offset.matrix_world = m * offset_transx * offset_s
return m
def fk(a1, a2, a3, a4, theta1, theta2, theta3, d1, d2, d3):
theta1 = radians(theta1)
theta2 = radians(theta2)
theta3 = radians(theta3)
c1 = cos(theta1)
c2 = cos(theta2)
c23 = cos(theta2 + theta3)
s1 = sin(theta1)
s2 = sin(theta2)
s23 = sin(theta2 + theta3)
x = a4*c1*c23 + a3*c1*c2 - d3*s1 + a2*c1 - d2*s1
y = a4*s1*c23 + a3*s1*c2 + d3*c1 + a2*s1 + d2*c1
z = -a4*s23 - a3*s2 + d1;
return x, y, z
class NoSolutionsError(Exception):
pass
class IKError(Exception):
pass
def convert_angle(a):
"""
Return angle between -180 and 180 degrees.
"""
if a >= pi:
a = a % pi - pi
elif a < -pi:
a = pi - abs(a) % pi
return round(degrees(a), 1)
def ik_theta1(x, y, d2, d3):
d = d2 + d3
m = atan2(-x, y)
n = atan2(sqrt(x*x + y*y - d*d), d)
a1 = m + n
a2 = m - n
return convert_angle(a1), convert_angle(a2)
def ik_theta3(x, y, z, theta1, a2, a3, a4, d1):
th1 = radians(theta1)
m = -4*a2*x*cos(th1) - 4*a2*y*sin(th1) + x*x*cos(2*th1) + 2*x*y*sin(2*th1) - y*y*cos(2*th1) + 2*(z - d1)**2 + 2*a2*a2 + x*x + y*y;
n = (m/2 - a3*a3 - a4*a4) / (2*a3*a4);
n2 = round(n*n, 4)
if n2 > 1:
#print ('No solution for angle %f' % theta1)
return ()
o = sqrt(1 - n2)
a1 = atan2(o, n)
a2 = atan2(-o, n)
return convert_angle(a1), convert_angle(a2)
def ik_theta2(x, y, z, theta1, theta3, a2, a4, d1):
theta1 = radians(theta1)
theta3 = radians(theta3)
m = a2 - (x*cos(theta1) + y*sin(theta1))
n = -z + d1
o = a4*sin(theta3)
p = m*m + n*n - o*o
q = atan2(m, n)
r = atan2(sqrt(p), o)
a1 = q + r
a2 = q - r
return convert_angle(a1), convert_angle(a2)
def ik(x, y, z, a1, a2, a3, a4, d1, d2, d3):
solutions = [(theta1, theta2, theta3) for theta1 in ik_theta1(x, y, d2, d3)
for theta3 in ik_theta3(x, y, z, theta1, a2, a3, a4, d1)
for theta2 in ik_theta2(x, y, z, theta1, theta3, a2, a4, d1)]
#print ('all solutions:', solutions)
valid = []
for theta1, theta2, theta3 in solutions:
px, py, pz = fk(a1, a2, a3, a4, theta1, theta2, theta3, d1, d2, d3)
px, py, pz = round(px, 4), round(py, 4), round(pz, 4)
#print ('fk', (px, py, pz))
if abs(px - x) < 0.001 and abs(py - y) < 0.001 and abs(pz - z) < 0.001:
valid.append((theta1, theta2, theta3))
print ('valid solutions', valid)
if len(valid) >= 1:
return valid[0]
else:
return (0, 0, 0)
def render_manipulator(theta1, theta2, theta3):
alpha1 = 0;
a1 = 0;
d1 = -30;
alpha2 = -90;
a2 = 27.5;
d2 = 43;
alpha3 = 0;
a3 = 57.3;
d3 = -18;
alpha4 = 90;
a4 = 106;
d4 = 0;
#print ("Deleting all meshes...")
delete_meshes()
m = Matrix()
m = create_joint(m, alpha1, a1, a2, theta1, d1)
m = create_joint(m, alpha2, a2, a3, theta2, d2)
m = create_joint(m, alpha3, a3, a4, theta3, d3)
m = create_joint(m, alpha4, a4, 0, 0, d4)
j = bpy.context.scene.objects.active
print ()
print ('location', j.location)
print ('checking ik for', (theta1, theta2, theta3))
x, y, z = j.location
x, y, z = round(x, 4), round(y, 4), round(z, 4)
theta1ik, theta2ik, theta3ik = ik(x, y, z, a1, a2, a3, a4, d1, d2, d3)
if theta1 != theta1ik or theta2 != theta2ik or theta3 != theta3ik:
print ('ik failure, expected', (theta1, theta2, theta3), 'got', (theta1ik, theta2ik, theta3ik))
raise IKError('ik failure, expected', (theta1, theta2, theta3), 'got', (theta1ik, theta2ik, theta3ik))
theta1 = 0 # from 0 to -180
theta2 = 53 # from -90 to 90
theta3 = 60 # from 0 to 180
for theta1 in range(0, -181, -1):
for theta2 in range(-90, 91):
render_manipulator(theta1, theta2, theta3)
print ('done')
| dkobozev/quadropod | ik/ik.py | ik.py | py | 5,552 | python | en | code | 1 | github-code | 13 |
6998887758 | # encoding: utf-8
import http
import requests
from ...config import CxConfig
from ...auth import AuthenticationAPI
from ...exceptions.CxError import BadRequestError, NotFoundError, CxError
from .dto.customTasks import CxCustomTask
from .dto import CxLink
class CustomTasksAPI(object):
"""
REST API: custom tasks
"""
max_try = CxConfig.CxConfig.config.max_try
base_url = CxConfig.CxConfig.config.url
verify = CxConfig.CxConfig.config.verify
custom_tasks = []
def __init__(self):
self.retry = 0
def get_all_custom_tasks(self):
"""
REST API: get all custom tasks
Returns:
:obj:`list` of :obj:`CxCustomTask`
Raises:
BadRequestError
NotFoundError
CxError
"""
custom_tasks = []
custom_tasks_url = self.base_url + "/customTasks"
r = requests.get(
url=custom_tasks_url,
headers=AuthenticationAPI.AuthenticationAPI.auth_headers,
verify=CustomTasksAPI.verify
)
if r.status_code == http.HTTPStatus.OK:
a_list = r.json()
custom_tasks = [
CxCustomTask.CxCustomTask(
custom_task_id=item.get("id"),
name=item.get("name"),
custom_task_type=item.get("type"),
data=item.get("data"),
link=CxLink.CxLink(
(item.get("link", {}) or {}).get("rel"),
(item.get("link", {}) or {}).get("uri")
)
) for item in a_list
]
CustomTasksAPI.custom_tasks = custom_tasks
elif r.status_code == http.HTTPStatus.BAD_REQUEST:
raise BadRequestError(r.text)
elif r.status_code == http.HTTPStatus.NOT_FOUND:
raise NotFoundError()
elif (r.status_code == http.HTTPStatus.UNAUTHORIZED) and (self.retry < 3):
AuthenticationAPI.AuthenticationAPI.reset_auth_headers()
self.retry += 1
self.get_all_custom_tasks()
else:
raise CxError(r.text, r.status_code)
self.retry = 0
return custom_tasks
def get_custom_task_id_by_name(self, task_name):
"""
Args:
task_name (str):
Returns:
int: custom task id
"""
custom_tasks = self.get_all_custom_tasks()
a_dict = {
item.name: item.id for item in custom_tasks
}
return a_dict.get(task_name)
def get_custom_task_by_id(self, task_id):
"""
Args:
task_id (int): Unique Id of the custom task
Returns:
:obj:`CxCustomTask`
Raises:
BadRequestError
NotFoundError
CxError
"""
custom_task = None
custom_task_url = self.base_url + "/customTasks/{id}".format(id=task_id)
r = requests.get(
url=custom_task_url,
headers=AuthenticationAPI.AuthenticationAPI.auth_headers,
verify=CustomTasksAPI.verify
)
if r.status_code == http.HTTPStatus.OK:
a_dict = r.json()
custom_task = CxCustomTask.CxCustomTask(
custom_task_id=a_dict.get("id"),
name=a_dict.get("name"),
custom_task_type=a_dict.get("type"),
data=a_dict.get("data"),
link=CxLink.CxLink(
(a_dict.get("link", {}) or {}).get("rel"),
(a_dict.get("link", {}) or {}).get("uri")
)
)
elif r.status_code == http.HTTPStatus.BAD_REQUEST:
raise BadRequestError(r.text)
elif r.status_code == http.HTTPStatus.NOT_FOUND:
raise NotFoundError()
elif (r.status_code == http.HTTPStatus.UNAUTHORIZED) and (self.retry < self.max_try):
AuthenticationAPI.AuthenticationAPI.reset_auth_headers()
self.retry += 1
self.get_custom_task_by_id(task_id)
else:
raise CxError(r.text, r.status_code)
self.retry = 0
return custom_task
| lxj616/checkmarx-python-sdk | CheckmarxPythonSDK/CxRestAPISDK/sast/projects/CustomTasksAPI.py | CustomTasksAPI.py | py | 4,203 | python | en | code | null | github-code | 13 |
15527134748 | from .models import *
def get_classroom_list_summary(user_id, date, class_id):
user_profile = User.objects.get(id=user_id)
classroom_profile = classroom.objects.get(id=class_id)
single_classroom = classroomLists.objects.filter(lesson_classroom=classroom_profile, year=date).first()
class_name = classroom_profile.classroom_title
if single_classroom:
student_list = single_classroom.students.all()
for student in student_list:
pass
def get_student_list(user_id, class_id):
user_profile = User.objects.get(id=user_id)
classroom_profile = classroom.objects.get(id=class_id)
single_classroom = classroomLists.objects.filter(lesson_classroom=classroom_profile).first()
all_students = classroom_profile.student.all()
student_profile_matches = studentProfiles.objects.filter(id__in=all_students)
student_list = []
for student in student_profile_matches:
if student:
student_invite = studentInvitation.objects.filter(first_name= student.first_name, last_name= student.last_name, for_classroom= classroom_profile).first()
if student_invite:
email = student_invite.email
else:
email = None
#if student doesn't have a username, they are still pending
if student.student_username:
student_user = User.objects.get(id=student.student_username_id)
student_ref = None
else:
student_user = None
student_ref = student_invite
result = {'s_first': student.first_name, 's_last': student.last_name, 'g_level': student.current_grade_level, 'username': student_user,\
'student_invite': student_ref, 'email': email, 'student_id': student.student_username_id}
student_list.append(result)
if student_list != []:
student_list.sort(key=lambda x: x['s_last'])
no_students = False
else:
no_students = True
return(student_list, no_students)
def get_teacher_list(user_id, class_id):
user_profile = User.objects.get(id=user_id)
classroom_profile = classroom.objects.get(id=class_id)
single_classroom = classroomLists.objects.filter(lesson_classroom=classroom_profile).first()
teacher_list = []
#contains all the teachers currently in the classroom
all_teachers = classroom_profile.support_teachers.all()
teacher_profile_matches = User.objects.filter(id__in=all_teachers)
for teacher in teacher_profile_matches:
teacher_invite = None
result = {'t_first': teacher.first_name, 't_last': teacher.last_name, 'email': teacher.email, 'teacher_invite': teacher_invite}
teacher_list.append(result)
#contains all the teachers pending for the classroom
all_invites = teacherInvitations.objects.filter(for_classroom= classroom_profile, is_pending= True)
for invite in all_invites:
result = {'t_first': invite.first_name, 't_last': invite.last_name, 'email': invite.email, 'teacher_invite': invite}
teacher_list.append(result)
teacher_list.sort(key=lambda x: x['t_last'])
return(teacher_list)
def get_student_info(student_list, user_id):
#Each student in student_list has this info (already in alphabetical order)
#result = {'s_first': student.first_name, 's_last': student.last_name, 'g_level': student.current_grade_level, 'username': student_user,\
# 'student_invite': student_ref, 'email': student_invite.email, 'student_id': student.student_username_id}
student_info = []
user_profile = User.objects.get(id=user_id)
for student in student_list:
name = student['s_first'], student['s_last']
if student['username'] != None:
student_user = User.objects.get(id=student['student_id'])
student_id = student_user.id
praises = studentPraise.objects.filter(student=student_user).order_by('-week_of')
if praises:
recent_sticker = praises[0]
stickers = recent_sticker.sent_date
else:
stickers = "no stickers"
#studentWorksheetAnswerFull is created whenever a student starts an assigned worksheet.
#Once they complete the worksheet the is_submitted is True and they is a completion_date generated.
#filter by assigned_by
teacher_ws = studentWorksheetAnswerFull.objects.filter(student=student_user, assigned_by=user_profile)
completed_ws = 0
total_ws = 0
total_score = 0
if teacher_ws:
for worksheet in teacher_ws:
if worksheet.is_submitted == True:
completed_ws += 1
ws_score = float(worksheet.score)
total_score += ws_score
total_ws += 1
if completed_ws != 0:
average = total_score / completed_ws
else:
average = total_score
#ensure that only XX.X% for average
average = round(average, 1)
each_student = {'student_id': student_id, 'name': name, 'stickers': stickers, 'completed_ws': completed_ws, 'assigned_ws': total_ws, 'average': average}
student_info.append(each_student)
else:
student_id = "pending"
stickers = "no stickers"
completed_ws = 0
total_ws = "n/a"
average = 'student still pending'
each_student = {'student_id': student_id, 'name': name, 'stickers': stickers, 'completed_ws': completed_ws, 'assigned_ws': total_ws, 'average': average}
student_info.append(each_student)
return(student_info) | Class-Planit/class-planit | planit/get_students.py | get_students.py | py | 5,790 | python | en | code | 0 | github-code | 13 |
33001926091 | # editor.py: shader editor
#
# author: Antony Ducommun dit Boudry (nitro.tm@gmail.com)
# license: GPL
#
import io, re
from pathlib import Path
from PySide2.QtCore import Qt, Signal, Slot, QPoint, QSize
from PySide2.QtGui import QColor, QSyntaxHighlighter, QTextOption, QWindow
from PySide2.QtWidgets import (
QAction, QColorDialog, QHBoxLayout, QInputDialog, QLabel, QLineEdit, QPlainTextEdit,
QPushButton, QSlider, QTextEdit, QVBoxLayout, QWidget
)
class Editor(QWidget):
changed = Signal()
def __init__(self, parent):
super(Editor, self).__init__(parent)
self.path = Path(__file__).parent / 'shaders'
self.project = None
self.programName = None
self.vertexShader = ''
self.fragmentShader = ''
self.colorEditor = QLineEdit()
self.colorEditor.setEnabled(False)
self.colorEditor.textChanged.connect(self.updateColorHex)
self.updateColor((0.5, 0.5, 0.5, 1.0))
self.pickButton = QPushButton('Pick')
self.pickButton.setEnabled(False)
self.pickButton.clicked.connect(self.pickColor)
colorContainer = QWidget()
colorContainer.setLayout(QHBoxLayout())
colorContainer.layout().setMargin(0)
colorContainer.layout().addWidget(self.colorEditor)
colorContainer.layout().addWidget(self.pickButton)
self.displayRatioSlider = QSlider(Qt.Horizontal)
self.displayRatioSlider.setRange(0, 1000)
self.displayRatioSlider.setEnabled(False)
self.displayRatioSlider.setSingleStep(1)
self.displayRatioSlider.setPageStep(10)
self.displayRatioSlider.valueChanged.connect(self.updateDisplayRatio)
self.pointSizeSlider = QSlider(Qt.Horizontal)
self.pointSizeSlider.setRange(0, 500)
self.pointSizeSlider.setEnabled(False)
self.pointSizeSlider.setSingleStep(1)
self.pointSizeSlider.setPageStep(10)
self.pointSizeSlider.valueChanged.connect(self.updatePointSize)
self.distanceSlider = QSlider(Qt.Horizontal)
self.distanceSlider.setRange(0, 1000)
self.distanceSlider.setEnabled(False)
self.distanceSlider.setSingleStep(1)
self.distanceSlider.setPageStep(10)
self.distanceSlider.valueChanged.connect(self.updateDistanceRange)
self.vertexEditor = QPlainTextEdit()
self.vertexEditor.setStyleSheet("QPlainTextEdit { background: #393939; color: #b6dede; font: 1rem 'monospace'; }")
self.vertexEditor.setLineWrapMode(QPlainTextEdit.NoWrap)
self.vertexEditor.setWordWrapMode(QTextOption.NoWrap)
self.vertexEditor.setTabStopWidth(2)
self.vertexEditor.setTabChangesFocus(False)
self.vertexEditor.setCenterOnScroll(True)
self.vertexEditor.setEnabled(False)
self.vertexEditor.textChanged.connect(self.saveVertex)
self.fragmentEditor = QPlainTextEdit()
self.fragmentEditor.setStyleSheet("QPlainTextEdit { background: #393939; color: #b6dede; font: 1rem 'monospace'; }")
self.fragmentEditor.setLineWrapMode(QPlainTextEdit.NoWrap)
self.fragmentEditor.setWordWrapMode(QTextOption.NoWrap)
self.fragmentEditor.setTabStopWidth(2)
self.fragmentEditor.setTabChangesFocus(False)
self.fragmentEditor.setCenterOnScroll(True)
self.fragmentEditor.setEnabled(False)
self.fragmentEditor.textChanged.connect(self.saveFragment)
self.setLayout(QVBoxLayout())
self.layout().addWidget(QLabel("Display (%):"))
self.layout().addWidget(self.displayRatioSlider)
self.layout().addWidget(QLabel("Mask Point size (px):"))
self.layout().addWidget(self.pointSizeSlider)
self.layout().addWidget(QLabel("Mask Distance range:"))
self.layout().addWidget(self.distanceSlider)
self.layout().addWidget(QLabel("Clear color:"))
self.layout().addWidget(colorContainer)
self.layout().addWidget(QLabel("Vertex shader:"))
self.layout().addWidget(self.vertexEditor)
self.layout().addWidget(QLabel("Fragment shader:"))
self.layout().addWidget(self.fragmentEditor)
def setProject(self, project):
self.project = project
if project:
self.displayRatioSlider.setValue(project.displayRatio * 1000)
self.displayRatioSlider.setEnabled(True)
self.pointSizeSlider.setValue(project.maskPointSize * 10)
self.pointSizeSlider.setEnabled(True)
self.distanceSlider.setValue(project.maskDistanceRange[1] * 10)
self.distanceSlider.setEnabled(True)
self.updateColor(project.clearColor)
self.loadShader(project.cloudShaderName)
self.colorEditor.setEnabled(True)
self.pickButton.setEnabled(True)
self.vertexEditor.setEnabled(True)
self.fragmentEditor.setEnabled(True)
else:
self.displayRatioSlider.setValue(1000)
self.displayRatioSlider.setEnabled(False)
self.pointSizeSlider.setValue(10)
self.pointSizeSlider.setEnabled(False)
self.distanceSlider.setValue(10)
self.distanceSlider.setEnabled(False)
self.updateColor((0.5, 0.5, 0.5, 1.0))
self.clearShader()
self.colorEditor.setEnabled(False)
self.pickButton.setEnabled(False)
self.vertexEditor.setEnabled(False)
self.fragmentEditor.setEnabled(False)
def parseColorHex(self, value):
m = re.compile('#?([0-9a-f]{2})([0-9a-f]{2})([0-9a-f]{2})([0-9a-f]{2})').match(value)
if not m:
return None
return (
min(max(int(m.group(1), base=16), 0), 255) / 255.0,
min(max(int(m.group(2), base=16), 0), 255) / 255.0,
min(max(int(m.group(3), base=16), 0), 255) / 255.0,
min(max(int(m.group(4), base=16), 0), 255) / 255.0
)
def pickColor(self):
color = QColor()
color.setRgbF(*self.parseColorHex(self.colorEditor.text()))
color = QColorDialog.getColor(color, parent=self, title="Background Color", options=QColorDialog.ShowAlphaChannel)
if not color.isValid():
return
self.updateColor(
(
color.redF(),
color.greenF(),
color.blueF(),
color.alphaF()
)
)
def updateColorHex(self, value):
self.updateColor(self.parseColorHex(value))
def updateColor(self, color):
def formatChannel(value):
return "%02x" % int(min(max(value * 255.0, 0.0), 255.0))
bg = "#%s%s%s" % (formatChannel(color[0]), formatChannel(color[1]), formatChannel(color[2]))
fg = "#ffffff" if sum(color) / 3 < 0.5 else "#000000"
self.colorEditor.setStyleSheet("QLineEdit { color: %s; background: %s; }" % (fg, bg))
text = bg + formatChannel(color[3])
if self.colorEditor.text() != text:
self.colorEditor.setText(text)
if self.project:
self.project.setClearColor(color)
# self.changed.emit()
def updateDisplayRatio(self, value):
if self.project:
self.project.setDisplayRatio(value / 1000.0)
# self.changed.emit()
def updatePointSize(self, value):
if self.project:
self.project.setMaskPointSize(value / 10.0)
# self.changed.emit()
def updateDistanceRange(self, value):
if self.project:
self.project.setMaskDistanceRange(value / 10.0)
# self.changed.emit()
def clearShader(self):
self.programName = None
self.vertexShader = ''
self.fragmentShader = ''
self.vertexEditor.setPlainText('')
self.fragmentEditor.setPlainText('')
def loadShader(self, name):
self.programName = name
self.vertexShader = ''
self.fragmentShader = ''
try:
with io.open(self.path / (self.programName + '.vs'), 'r') as f:
self.vertexShader = f.read()
except Exception as e:
print(e)
try:
with io.open(self.path / (self.programName + '.fs'), 'r') as f:
self.fragmentShader = f.read()
except Exception as e:
print(e)
self.vertexEditor.setPlainText(self.vertexShader)
self.fragmentEditor.setPlainText(self.fragmentShader)
def saveVertex(self):
value = self.vertexEditor.toPlainText()
if not self.programName or self.vertexShader == value:
return
with io.open(self.path / (self.programName + '.vs'), 'w') as f:
f.write(value)
self.vertexShader = value
self.changed.emit()
def saveFragment(self):
value = self.fragmentEditor.toPlainText()
if not self.programName or self.fragmentShader == value:
return
with io.open(self.path / (self.programName + '.fs'), 'w') as f:
f.write(value)
self.fragmentShader = value
self.changed.emit()
| nitrotm/3dtagger | editor.py | editor.py | py | 8,240 | python | en | code | 1 | github-code | 13 |
19388764242 | """
We fit GPs to the full dataset, testing different models and kernels
"""
# Idea:
# Query new points twice + fit heteroskedastic noise to empirical data
import matplotlib as mpl
mpl.rcParams['pdf.fonttype'] = 42
from load_matlab import *
import numpy as np
import GPy
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from numpy import linalg as LA
import argparse
import os
from os import path
import itertools
import pickle
import operator
import matplotlib.cm as cm
parser = argparse.ArgumentParser()
parser.add_argument('--uid', type=int, default=2, help='uid for job number')
parser.add_argument('--emg', type=int, default=2, choices=range(7), help='emg. between 0-6')
# DEFAULT
# there is no dt in 1d. But we need this to access the trains dct
dt=0
def make_dataset_1d(trainsC, emg=2, mean=False, n=20):
# n means number of datapt per channel
# Used to test/debug certain models
trains = trainsC.get_emgdct(emg=emg)
X = []
Y = []
Xmean = []
Ymean = []
Yvars = []
for ch in CHS:
ys = random.sample(trains[ch][ch][0]['data'].max(axis=1).tolist(),n)
Y.extend(ys)
var = trains[ch][ch][0]['stdmax'] ** 2
Yvars.extend([var] * len(ys))
xy = ch2xy[ch]
X.extend([xy]*len(ys))
Ymean.append(trains[ch][ch][0]['meanmax'])
Xmean.extend([xy])
if mean:
Xmean = np.array(Xmean)
Ymean = np.array(Ymean).reshape((-1,1))
return Xmean, Ymean
X = np.array(X)
Y = np.array(Y).reshape((-1,1))
return X,Y
def train_model_1d(X,Y, num_restarts=3, ARD=True, constrain=[0.3,3.0], verbose=True):
matk = GPy.kern.Matern52(input_dim=2, ARD=ARD)
if constrain:
matk.lengthscale.constrain_bounded(*constrain, warning=verbose)
m = GPy.models.GPRegression(X,Y,matk)
m.optimize_restarts(num_restarts=num_restarts, verbose=verbose)
return m
### We try the co-kriging (multi output GP) approach to learn all EMGS
### at the same time
def make_mo_dataset(trainsC, emgs=[0,1,2,4,5,6]):
X = []
Y = []
for emg in emgs:
trains = trainsC.get_emgdct(emg=emg)
for ch in CHS:
ys = trains[ch][ch][0]['maxs']
Y.extend(ys)
xy = ch2xy[ch]
xyemg = xy + [emg]
X.extend([xyemg]*len(ys))
X = np.array(X)
Y = np.array(Y).reshape((-1,1))
return X,Y
def train_mo_model(X,Y, ARD=True, num_restarts=1):
#first make separable kernel
# kx = GPy.kern.Matern52(input_dim=2, active_dims=[0,1], ARD=ARD)
# ky = GPy.kern.Matern52(input_dim=1, active_dims=[2])
# k = kx + ky
k = GPy.kern.Matern52(input_dim=3, ARD=True)
m = GPy.models.GPRegression(X,Y,k)
m.optimize_restarts(num_restarts=num_restarts)
return m
def plot_mo_model(m, plot_data=True):
emgs = np.unique(m.X[:,2]).tolist()
fig, axes = plt.subplots(2,len(emgs),sharex=True,sharey=True)
fig.suptitle("Co-kriging model")
for j,emg in enumerate(emgs):
for i in [0,1]:
ax=axes[i][j]
m.plot(ax=ax, fixed_inputs=[(0,i), (2,emg)],
plot_data=False, lower=17, upper=83,
legend=False)
axes[0][j].set_title("emg {}".format(emg))
if plot_data:
t=1
norm = colors.Normalize(vmin=-50, vmax=len(m.X))
for (i,j,emg),y in zip(m.X, m.Y):
i,j = int(i), int(j)
k = emgs.index(emg)
axes[i][k].plot(j, y, 'x', color='C{}'.format(j))
t+=1
##### end of co-kriging section ######
def train_model_seq(trainsC, emg=0, n_random_pts=10, n_total_pts=25, ARD=True, num_restarts=3, continue_opt=False, k=2, constrain=[0.3,3.0], verbose=True):
X = []
Y = []
for _ in range(n_random_pts):
ch = random.choice(CHS)
X.append(ch2xy[ch])
resp = random.choice(trainsC.emgdct[emg][ch][ch][dt]['data'].max(axis=1))
Y.append(resp)
matk = GPy.kern.Matern52(input_dim=2, ARD=ARD)
if constrain:
matk.lengthscale.constrain_bounded(*constrain, warning=verbose)
#Make model
models = []
m = GPy.models.GPRegression(np.array(X),np.array(Y)[:,None],matk)
m.optimize_restarts(num_restarts=num_restarts, verbose=verbose)
# We optimize this kernel once and then use it for all future models
optim_params = m[:]
models.append(m)
for _ in range(n_total_pts-n_random_pts):
nextx = get_next_x(m, k=k)
X.append(nextx)
ch = xy2ch[nextx[0]][nextx[1]]
resp = random.choice(trainsC.emgdct[emg][ch][ch][dt]['data'].max(axis=1))
Y.append(resp)
m = GPy.models.GPRegression(np.array(X), np.array(Y)[:,None],matk.copy())
m[:] = optim_params
## TODO: also set gp's noise variance to be same as previous!
if continue_opt:
m.optimize_restarts(num_restarts=num_restarts, verbose=verbose)
models.append(m)
return models
def plot_seq_values(m, n_random_pts, trainsC=None, ax=None, legend=False):
if ax is None:
ax = plt.figure()
ax.plot(m.Y[:n_random_pts+1], c='b', label="{} random init pts".format(n_random_pts))
ax.plot(range(n_random_pts, len(m.Y)), m.Y[n_random_pts:,:], c='r', label="Sequential pts")
ax.set_title("Value of selected channel")
if trainsC:
maxch = trainsC.max_ch_1d()
for i,resp in enumerate(m.Y):
x,y = m.X[i]
ch = xy2ch[int(x)][int(y)]
if ch == maxch:
ax.plot(i, resp, 'x', c='k')
if legend:
ax.legend()
def plot_conseq_dists(m, n_random_pts, trainsC = None, ax=None, legend=False):
if ax is None:
ax = plt.figure()
dists = [LA.norm(m.X[i]-m.X[i+1]) for i in range(len(m.X)-1)]
ax.plot(dists[:n_random_pts+1], c='b', label="{} random init pts".format(n_random_pts))
ax.plot(range(n_random_pts, len(dists)), dists[n_random_pts:], c='r', label="Sequential pts")
ax.set_title("Distance between consecutive channels")
if trainsC:
maxch = trainsC.max_ch_1d()
label="max channel ({})".format(maxch)
for i,dist in enumerate(dists):
x,y = m.X[i]
ch = xy2ch[int(x)][int(y)]
if ch == maxch:
ax.plot(i, dist, 'x', c='k', label=label)
label=None
if legend:
ax.legend()
def plot_seq_stats(m, n_random_pts, trainsC=None, title=None, plot_acq=False, plot_gp=True):
fig = plt.figure()
ax1 = fig.add_subplot(2,2,2)
ax2 = fig.add_subplot(2,2,4)
# We first plot sequential values
plot_seq_values(m, n_random_pts, trainsC=trainsC, ax=ax1)
# Then distance between consecutive x's
plot_conseq_dists(m, n_random_pts, trainsC=trainsC, ax=ax2, legend=True)
# Finally we plot the GP with the data points
ax3 = fig.add_subplot(2,2,1)
ax4 = fig.add_subplot(2,2,3,sharex=ax3, sharey=ax3)
axes = [ax3,ax4]
lengthscales = [m.Mat52.lengthscale[i] for i in range(len(m.Mat52.lengthscale))]
fig.suptitle(("{}: ls="+" {:.2} "*len(lengthscales)).format(title,*lengthscales))
for i,ax in zip([0,1],axes):
m.plot(ax=ax, fixed_inputs=[(0,i)],
plot_data=False, title='Channels {}'.format(xy2ch[i]),
lower=17, upper=83, legend=False)
# Plot data (m.plot plots all of the data in every slice, which is
# wrong)
axes[int(m.X[0][0])].plot(int(m.X[0][1]), m.Y[0][0], '+', label="{} random init pts".format(n_random_pts))
for (i,j),y in zip(m.X[1:n_random_pts,:], m.Y[1:n_random_pts,:]):
i,j = int(i), int(j)
axes[i].plot(j, y, '+', c='b')
t=1
norm = colors.Normalize(vmin=0, vmax=len(m.X)-n_random_pts)
for (i,j),y in zip(m.X[n_random_pts:,:], m.Y[n_random_pts:,:]):
i,j = int(i), int(j)
axes[i].plot(j, y, 'x', color=plt.cm.Reds(norm(t)))
t+=1
if plot_acq:
acqmap = get_acq_map(m)
axes[1].plot(acqmap[:5], c='y', label='acq fct')
axes[0].plot(acqmap[5:], c='y', label='acq fct')
axes[0].legend()
def get_acq_map(m, k=2):
# We use UCB, k is the "exploration" parameter
mean,var = m.predict(np.array(list(ch2xy.values())))
std = np.sqrt(var)
acq = mean + k*std
return acq
def get_next_x(m, k=2):
acq = get_acq_map(m,k)
maxidx = acq.argmax()
maxch = CHS[maxidx]
xy = ch2xy[maxch]
return xy
def plot_model_1d(m, title=None, plot_acq=False, plot_data=True, verbose=True):
if verbose:
print(m)
print(m.kern)
print(m.kern.lengthscale)
fig, axes = plt.subplots(2,1,
sharex=False,
sharey=True)
#lengthscales = [m.Mat52.lengthscale[i] for i in range(len(m.Mat52.lengthscale))]
fig.suptitle(title)
#"*len(lengthscales)).format(title,*lengthscales))
for i,ax in zip([0,1],axes):
m.plot(ax=ax, fixed_inputs=[(0,i)],
plot_data=False, lower=17, upper=83, legend=False)
ax.set_xticks([0,1,2,3,4])
ax.set_xticklabels([[i,j] for j in range(5)])
if plot_data:
t=1
norm = colors.Normalize(vmin=-50, vmax=len(m.X))
for (i,j),y in zip(m.X, m.Y):
i,j = int(i), int(j)
axes[i].plot(j, y, 'x', color='C{}'.format(j))
t+=1
if plot_acq:
acqmap = get_acq_map(m)
axes[1].plot(acqmap[:5], c='y', label='acq fct')
axes[0].plot(acqmap[5:], c='y', label='acq fct')
axes[0].legend()
def plot_model_surface(m, ax=None, plot_data=True, zlim=None, extra_xlim=1, plot_colorbar=True):
x = np.linspace(0-extra_xlim,1+extra_xlim,50)
y = np.linspace(0-extra_xlim,4+extra_xlim,50)
x,y = np.meshgrid(x,y)
x_,y_ = x.ravel()[:,None], y.ravel()[:,None]
z = np.hstack((x_,y_))
mean,var = m.predict(z)
std = np.sqrt(var)
mean,std = mean.reshape(50,50), std.reshape(50,50)
if ax is None:
fig = plt.figure()
ax = fig.gca(projection='3d')
norm = plt.Normalize()
surf = ax.plot_surface(x, y, mean, linewidth=0, antialiased=False, facecolors=cm.jet(norm(std)))
if zlim:
ax.set_zlim([0,0.014])
if plot_data:
ax.scatter(m.X[:,0], m.X[:,1], m.Y, c='g')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('V')
ax.set_xticks([0,1])
ax.set_yticks([0,1,2,3,4])
m = cm.ScalarMappable(cmap=plt.cm.jet, norm=norm)
m.set_array([])
if plot_colorbar:
cbaxes = fig.add_axes([0.1, 0.1, 0.03, 0.8]) # This is the position for the colorbar
clb = plt.colorbar(m, cax = cbaxes)
clb.ax.yaxis.set_ticks_position('left')
clb.ax.set_title('std')
def l2dist(m1, m2):
X = np.array(list(itertools.product(range(2), range(5))))
pred1 = m1.predict(X)[0]
pred2 = m2.predict(X)[0]
return LA.norm(pred1-pred2)
def linfdist(m1, m2):
X = np.array(list(itertools.product(range(2), range(5))))
pred1 = m1.predict(X)[0]
pred2 = m2.predict(X)[0]
return abs((pred1.max() - pred2.max())/pred2.max())
def run_dist_exps(args):
exppath = path.join('exps', '1d', 'emg{}'.format(args.emg), 'exp{}'.format(args.uid))
if not path.isdir(exppath):
os.makedirs(exppath)
trainsC = Trains(emg=args.emg)
trains = trainsC.get_emgdct(emg=args.emg)
# We train all models with n rnd start pts and m sequential pts
# And compare them to the model trained with all datapts
# Then compute statistics and plot them
X,Y = make_dataset_1d(trainsC, emg=args.emg)
mfull = train_model_1d(X,Y, ARD=False)
nrnd = range(5,50,5)
nseq = range(0,50,5)
N = 50
l2s = np.zeros((N, len(nrnd),len(nseq)))
linfs = np.zeros((N, len(nrnd),len(nseq)))
for k in range(N):
print("Starting loop", k)
for i,n1 in enumerate(nrnd):
for j,n2 in enumerate(nseq):
print(n1,n2)
models = train_model_seq(trainsC,emg=args.emg, n_random_pts=n1, n_total_pts=n1+n2, ARD=False)
m = models[-1]
l2 = l2dist(m, mfull)
linf = linfdist(m, mfull)
l2s[k][i][j] = l2
linfs[k][i][j] = linf
np.save(os.path.join(exppath,"l2s"), l2s)
np.save(os.path.join(exppath, "linfs"), linfs)
plt.imshow(l2s.mean(axis=0), extent=[0,50,50,5])
plt.title("1d l2 dist to true gp mean")
plt.ylabel("N random pts")
plt.xlabel("N sequential")
plt.colorbar()
plt.savefig(os.path.join(exppath, "1d_l2.pdf"))
plt.close()
plt.figure()
plt.imshow(linfs.mean(axis=0), extent=[0,50,50,5])
plt.title("1d linf dist to true gp mean")
plt.ylabel("N random pts")
plt.xlabel("N sequential")
plt.colorbar()
plt.savefig(os.path.join(exppath, "1d_linf.pdf"))
plt.close()
def get_ch(xy):
x,y = xy
x = int(x)
y = int(y)
return xy2ch[x][y]
def get_maxch(m):
X = np.array(list(itertools.product(range(2),range(5))))
means,_ = m.predict(X)
maxidx = means.argmax()
maxxy = np.unravel_index(maxidx, (2,5))
maxch = get_ch(maxxy)
return maxch
def get_nmaxch(m, n=3):
if n==0:
return []
X = np.array(list(itertools.product(range(2),range(5))))
means,_ = m.predict(X)
indexed = list(zip(X.tolist(), means.flatten()))
top_3 = sorted(indexed, key=operator.itemgetter(1))[-n:]
xys = list(reversed([xy for xy, v in top_3]))
return xys
def run_ch_stats_exps(trainsC, emgs=[0,2,4], repeat=25, uid=None, jobid=None, continue_opt=True, k=2, ntotal=100, nrnd=[5,75,10], ARD=True):
# here we run a bunch of runs, gather all statistics and save as
# npy array, to later plot in jupyter notebook
if uid is None:
uid = random.randrange(99999)
exppath = path.join('exps', '1d', 'exp{}'.format(uid), 'k{}'.format(k), 'ARD{}'.format(ARD))
print("Will save in path", exppath)
if not path.isdir(exppath):
os.makedirs(exppath)
if jobid:
# We save the name of the jobid in a file, so that if the
# experiment fails, we can ask sacct for info
filename = os.path.join(exppath, 'sbatchjobid={}'.format(jobid))
with open(filename, 'w') as f:
f.write('sbatcjobid = {}'.format(jobid))
nrnd = range(*nrnd)
dct = {}
for emg in emgs:
print("Starting emg {}".format(emg))
queriedchs = np.zeros((repeat, len(nrnd), ntotal))
maxchs = np.zeros((repeat, len(nrnd), ntotal))
# we save all values the model predicted (for 10 chs)
# from this we can compute l2 and linf distances later
vals = np.zeros((repeat, len(nrnd), ntotal, 10))
for r in range(repeat):
print("Repeat", r)
for i,n1 in enumerate(nrnd):
print("nrnd: {}".format(n1))
models = train_model_seq(trainsC, emg=emg, n_random_pts=n1,
n_total_pts=ntotal, ARD=ARD,
continue_opt=continue_opt, num_restarts=1, k=k)
queriedchs[r][i] = [get_ch(xy) for xy in models[-1].X]
for midx,m in enumerate(models,n1-1):
maxchs[r][i][midx] = get_maxch(m)
X = np.array(list(itertools.product(range(2),range(5))))
vals[r,i,n1-1:] = np.hstack([m.predict(X)[0] for m in models]).T
dct[emg] = {
'queriedchs': queriedchs,
'maxchs': maxchs,
'vals': vals,
'nrnd': nrnd,
'ntotal': ntotal,
'true_ch': trainsC.max_ch_1d(emg=emg),
'k': k
}
with open(os.path.join(exppath, 'chruns_dct.pkl'), 'wb') as f:
print("Saving in path", exppath)
pickle.dump(dct, f)
return dct
if __name__ == '__main__':
args = parser.parse_args()
trainsC = Trains(emg=args.emg)
for n in [25,20,15,10,5,1]:
m, = train_model_seq(trainsC, emg=0, n_random_pts=n, n_total_pts=n,k=6)
plot_model_surface(m, plot_data=False, zlim=[0,0.014], plot_colorbar=False)
plt.show()
emg=0
X,Y = make_dataset_1d(trainsC, emg=emg)
m = train_model_1d(X,Y)
print(m.kern.lengthscale)
plot_model_surface(m, plot_data=False, plot_colorbar=False)
plt.show()
| samlaf/GP-BCI | data_10ch/gp_full_1d.py | gp_full_1d.py | py | 16,245 | python | en | code | 1 | github-code | 13 |
6230866052 | import csv
from numpy import *
import scipy.cluster.vq as kmean
from nexus import Cluster, Geopoint
class Clump:
'''Plot centroid locations on a static Google Map.'''
def __init__(self):
self.clusters = []
self.header = []
def plot_centers(self, centroid_list):
print('\nThe centers of the clusters are located at: \n')
URL_prefix = 'http://maps.googleapis.com/maps/api/staticmap?size=640x640&maptype=roadmap\\'
number = 1
markers = ''
for centroid in centroid_list:
if centroid.size > 1:
form = (number, centroid.center, centroid.size)
print('{} --> {} has {} points around it.'.format(*form))
if len(URL_prefix + markers) + 15 > 2000:
print(''.join([URL_prefix, markers, '&sensor=false']))
markers = ''
form = (number, centroid.center.lat, centroid.center.lon)
markers += '&markers=%7Ccolor:blue%7Clabel:{}%7C{},{}'.format(*form)
number += 1
final_URL = '{}{}%7C&sensor=false'.format(URL_prefix, markers)
print(
'\nThe map of the cluster centers\n{}\n{}\n'.format(final_URL, '_' * 37))
def plotter(self, parsed_file, center):
if center.lat == 0 and center.lon == 0:
return 0
colors = ['black', 'purple', 'green',
'red', 'orange', 'yellow', 'white']
URL_prefix = 'http://maps.googleapis.com/maps/api/staticmap?size=640x640&maptype=roadmap\\'
color = colors[5]
markers = '&markers=%7Ccolor:blue%7C{},{}'.format(
center.lat, center.lon)
letter = ord('A')
for point in parsed_file:
if color != colors[int(point[4]) % (len(colors) - 1)]:
color = colors[int(point[4]) - 1]
if (len(URL_prefix + markers) + 15 > 2000):
print(URL_prefix + markers + '&sensor=false')
markers = ''
form = (color, chr(letter), point[2], point[3])
markers += '&markers=%7Ccolor:{}%7Clabel:{}%7C{},{}'.format(*form)
letter += 1
final_URL = '{}{}%7C&sensor=false'.format(URL_prefix, markers)
print(final_URL, end='\n\n')
def scan_clustering(self, placement):
'''Check that there is at least one point in each cluster'''
for i in range(len(placement)):
if not placement[i]:
return False
return True
def read_existing_data(self, file_name='cluster.csv'):
'''Pull in a headered csv and return the header and data'''
parsed_file = []
with open(file_name) as csvfile:
info_reader = csv.reader(
csvfile, delimiter='|', quoting=csv.QUOTE_NONE)
for row in info_reader:
column = []
for item in row:
column.append(item.strip())
parsed_file.append(column)
self.header = parsed_file.pop(0)
return parsed_file
def write_data(self, data, file_name='cluster.csv'):
'''Write sorted data out'''
with open(file_name, 'w') as updated_file:
data.sort(key=lambda data: data[4])
writer = csv.writer(updated_file, delimiter='|',
quoting=csv.QUOTE_NONE)
writer.write_row(header)
for row in data:
writer.write_row(row)
def create_observation_vector(self, parsed_file):
obs_vector = []
for i in range(len(parsed_file)):
item = parsed_file[i]
name = item[0]
address = item[1]
lat = item[2]
lon = item[3]
if not lat or not lon:
temp = Geopoint(name, address)
item[2] = temp.lat
item[3] = temp.lon
else:
temp = Geopoint(name, address, lat, lon)
obs_vector.append([temp.lon, temp.lat])
return obs_vector
def perform_clustering(self, observation):
print(observation)
matrix = array(observation)
last_solution = None
for i in range(2, len(observation) // 2):
cluster = kmean.kmeans2(matrix, i, iter=1000, minit='random')
if self.scan_clustering(cluster[1]):
last_solution = cluster
print('Clusters =', i, cluster[1])
if not last_solution:
print('Rerun the program, this sort of thing takes time.')
exit(1) # change this
print(last_solution)
return last_solution
def initialize_clusters(self, last_solution, parsed_file):
for i in range(0, max(last_solution[1])):
self.clusters.append(Cluster())
codedList = []
for i in range(len(parsed_file)):
cluster = last_solution[1][i]
parsed_file[i][4] = cluster
clusterNode = self.clusters[cluster]
theCenter = self.clusters[cluster].center
if (theCenter.lat == 0 and theCenter.lon == 0):
theCenter.lat = last_solution[0][cluster][1]
theCenter.lon = last_solution[0][cluster][0]
clusterNode.points.append(parsed_file[i])
def __str__(self):
cluster_number = 1
for centroid in self.clusters:
form = (cluster_number, centroid.center, centroid.size)
print('Cluster {} centered at {} size: {}'.format(*form))
print('A static map of the cluster')
Clump.plotter(centroid.points, centroid.center)
i = ord('A')
pointsLined = 'from: Capitol Boise ID'
for points in centroid.points:
print('{} is {}, {}'.format(chr(i), points[0], points[1]))
pointsLined += (' to: ' + points[1])
i += 1
pointsLined += ' to: Capitol Boise ID'
print('\nPaste this into Google Maps for distance info:\n{}\n{}'.format(
pointsLined, '_' * 43))
cluster_number += 1
if __name__ == '__main__':
test = Clump()
data = test.read_existing_data()
obs_vector = test.create_observation_vector(data)
clustered = test.perform_clustering(obs_vector) | wegry/geocluster | clump.py | clump.py | py | 6,292 | python | en | code | 0 | github-code | 13 |
30102256735 | #!/usr/bin/python
from subprocess import Popen, PIPE, check_output
import argparse
import calendar
import os
import re
import time
import xml.etree.ElementTree as ET
parser = argparse.ArgumentParser(
description='Get OCSP production time for X-Road certificates.',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog='Status returns number of seconds since production of oldest OCSP responce.'
)
parser.add_argument('-s', help='Output status only', action="store_true")
args = parser.parse_args()
cache = {}
for fileName in os.listdir('/var/cache/xroad'):
if re.match('^.*\.ocsp$', fileName):
out = check_output(
['openssl', 'ocsp', '-noverify', '-text', '-respin',
'/var/cache/xroad/{}'.format(fileName)]).decode('utf-8')
r = re.search('^ {6}Serial Number: (.+)$', out, re.MULTILINE)
if r and r.group(1):
cache[r.group(1)] = out
ocsp_time = 0
with open('/etc/xroad/signer/keyconf.xml', 'r') as keyconf:
root = ET.fromstring(keyconf.read())
for key in root.findall('./device/key'):
type = 'SIGN' if key.attrib['usage'] == 'SIGNING' else 'AUTH'
keyId = key.find('./keyId').text
friendlyName = key.find('./friendlyName').text if key.find(
'./friendlyName') is not None and key.find('./friendlyName').text is not None else ''
for cert in key.findall('./cert'):
if not (cert.attrib['active'] == 'true' and cert.find(
'./status').text == 'registered'):
continue
contents = cert.find('./contents').text
# Adding newlines to base64
contents = '\n'.join([contents[i:i + 76] for i in range(0, len(contents), 76)])
pem = '-----BEGIN CERTIFICATE-----\n{}\n-----END CERTIFICATE-----\n'.format(contents)
p = Popen(['openssl', 'x509', '-noout', '-serial'], stdin=PIPE, stdout=PIPE,
stderr=PIPE)
stdout, stderr = p.communicate(pem.encode('utf-8'))
r = re.match('^serial=(.+)$', stdout.decode('utf-8'))
if r and r.group(1):
serial = r.group(1)
r = re.search('^ {4}Produced At: (.+)$', cache[serial], re.MULTILINE)
if serial in cache and r and re.search(
'^ {4}Cert Status: good$', cache[serial], re.MULTILINE):
t = time.strptime(r.group(1), '%b %d %H:%M:%S %Y %Z')
produced = time.strftime('%Y-%m-%d %H:%M:%S', t)
if not args.s:
print('{}\t{}\t{}\t{}'.format(produced, type, keyId, friendlyName))
elif not ocsp_time or calendar.timegm(t) > ocsp_time:
ocsp_time = calendar.timegm(t)
elif not args.s:
print('ERROR\t{}\t{}\t{}'.format(type, keyId, friendlyName))
else:
# One of certificates does not have OCSP response
print(1000000000)
exit(0)
if args.s and ocsp_time:
print(int(time.time()) - ocsp_time)
| 1AndyCh/ria-eek | misc/ocsp_produced.py | ocsp_produced.py | py | 3,112 | python | en | code | 0 | github-code | 13 |
41161374164 | def age_assignment(*args, **kwargs):
people = {}
people_string = ''
for name in args:
people[name] = kwargs[name[0]]
people = dict(sorted(people.items(), key= lambda kvp: kvp[0]))
for name, age in people.items():
people_string += f"{name} is {age} years old.\n"
return people_string
print(age_assignment("Peter", "George", G=26, P=19))
print(age_assignment("Amy", "Bill", "Willy", W=36, A=22, B=61))
| lefcho/SoftUni | Python/SoftUni - Python Advanced/Functions Advanced/age_assignment.py | age_assignment.py | py | 441 | python | en | code | 0 | github-code | 13 |
37972287616 | import sys
from datetime import datetime
from PyQt6.QtWidgets import (
QApplication,QWidget,
QFormLayout,QPushButton,
QLineEdit,QMenuBar,
QMainWindow,QLabel,
QComboBox
)
from PyQt6.QtGui import QAction
from PyQt6.QtCore import Qt
# class for window
class MainWindow(QWidget):
def __init__(self):
super().__init__()
#seti
# ng form layout
self.mainLayout = QFormLayout()
#intializing the variables
self.city_list = ['Ghakhar Mandi','Gujranwala','Gujrat','Sialkot','Wazirabad','Kharian','Islamabad']
self.gender_list = ['Male','Female','Other']
self.name = QLineEdit()
self.roll_no = QLineEdit()
self.gender = QComboBox()
self.city = QComboBox()
self.remarks = QLineEdit()
self.label = QLabel()
# adding list
self.city.addItems(self.city_list)
self.gender.addItems(self.gender_list)
# adding menubar
self.menu_bar = QMenuBar()
self.action_new = QAction("New",self)
self.action_view = QAction("View",self)
self.action_exit = QAction("Exit",self)
# self.action_view.triggered.connect(self.view)
self.menu_bar.addAction(self.action_new)
self.menu_bar.addAction(self.action_view)
self.menu_bar.addAction(self.action_exit)
# adding menubar to layout
self.mainLayout.addWidget(self.menu_bar)
#setting the form
self.mainLayout.addRow("Name",self.name)
self.mainLayout.addRow("Roll No.",self.roll_no)
self.mainLayout.addRow("Gender",self.gender)
self.mainLayout.addRow("City",self.city)
self.mainLayout.addRow("Remarks",self.remarks)
# making button
self.save_btn = QPushButton("Save")
self.reset_btn = QPushButton("Reset")
self.save_btn.setFixedSize(50,50)
self.reset_btn.setFixedSize(50,50)
#connect functions
self.save_btn.clicked.connect(self.save_action)
self.reset_btn.clicked.connect(self.reset_action)
# adding button
self.mainLayout.addWidget(self.save_btn)
self.mainLayout.addWidget(self.reset_btn)
self.mainLayout.addWidget(self.label)
# aadding form layout
self.setLayout(self.mainLayout)
# funcations
def view(self):
print('clicked')
self.read_from_file()
def read_from_file(self):
with open('./data.csv','r') as file:
data = file.readlines()
for d in data:
self.label.setText(d)
def save_in_file(self,data):
with open('./data.csv','a') as file:
file.write(str(data)+'\n')
def save_action(self):
name = self.name.text()
roll = self.roll_no.text()
gender = str(self.gender.currentText())
city = str(self.city.currentText())
remark = self.remarks.text()
print(f"{name},{roll},{gender},{city},{remark}")
data = name,roll,gender,city,remark
self.save_in_file(data)
def reset_action(self):
self.name.setText('')
self.roll_no.setText('')
self.remarks.setText('')
#start of main
if __name__ == '__main__':
application = QApplication([])
# object of MainWindow class
window = MainWindow()
window.setWindowTitle("Survey")
window.setGeometry(250,150,700,400)
window.show()
sys.exit(application.exec())
| AbdulRehmanjr/python | APL/mid/q2.py | q2.py | py | 3,496 | python | en | code | 1 | github-code | 13 |
42117749772 |
import json
from types import new_class
from datetime import datetime
import re
import csv
from numpy import empty, number
import operator
import itertools
import reverse_geocoder as rg
import os
import pandas as pd
from geopy.geocoders import Nominatim
file_path=str(input("Please provide the path to the JSON file: " ))
json_file= open(file_path,'r',encoding="utf-8")
data=json_file.read()
deserialised_json=json.loads(data)
json_file.close()
draftCSV = 'webapp/src/assets/js/Draft.csv'
"""
This function creates a csv file with city,lat,lon,continent,state,no of events
"""
def createcsvwithcoordinates():
#Nomainatim is used as user_agent to get address of the location, passing inputs as lat,lon
geolocator = Nominatim(user_agent="Nominatim")
#Firstly a draft file is created with city,homelat,homelon,homecontinent,state
with open(draftCSV, 'w', encoding='UTF8', newline='') as f:
header = ['city','homelat','homelon','homecontinent','state', 'n']
writer = csv.writer(f)
writer.writerow(header)
for i in range(len(deserialised_json)):
try:
if "Location" in deserialised_json[i]:
if "address" in deserialised_json[i]["Location"][0]:
if "latitude" in deserialised_json[i]["Location"][0]:
if "longitude" in deserialised_json[i]["Location"][0]:
if(len(deserialised_json[i]["Location"][0]["latitude"]) !=0 and len(deserialised_json[i]["Location"][0]["longitude"]) !=0):
if "https://" not in (deserialised_json[i]["Location"][0]["longitude"]):
lat=deserialised_json[i]["Location"][0]["latitude"]
long=deserialised_json[i]["Location"][0]["longitude"]
location = geolocator.reverse(lat+","+long)
if not location or not location.raw:
continue
address = location.raw['address']
if address.get('city', '')!="":
city = address.get('city','')
elif address.get('town','')!="":
city = address.get('town','')
elif address.get('town')=="" and address.get('region','')!="":
city=address.get('region','')
if "Old Toronto" in city:
city = "Toronto"
elif "(Old) Ottawa" in city:
city="Ottawa"
state = address.get('state', '')
country = address.get('country', '')
code = address.get('country_code')
if city is not empty and state is not empty and country is not empty:
row = (city,lat,long,country,state,deserialised_json[i]['swallow_id'])
writer.writerow(row)
except:
pass
# This function is used to get the cities count from draft.csv, replace then merge the count to a new csv with lat,long,state, and n
#
def getcitiescount():
df=pd.read_csv(draftCSV)
df=pd.DataFrame(df, columns= ['city','homelat','homelon','homecontinent','state'])
getcitycount = df.pivot_table(columns=['city'], aggfunc='size')
data=getcitycount.to_dict()
arr_finaldata=[]
with open(draftCSV, 'r', newline='') as file:
header = ['city','homelat','homelon','homecontinent','state','n']
with open('webapp/src/assets/js/final.csv', 'w', encoding='UTF8', newline='') as f:
writer = csv.writer(f)
writer.writerow(header)
myreader = csv.reader(file, delimiter=',')
next(myreader, None)
for rows in myreader:
if rows[0]:
if rows[0] not in arr_finaldata:
arr_finaldata.append(rows[0])
count=data[rows[0]]
row = (rows[0],rows[1],rows[2],rows[3],rows[4],count)
writer.writerow(row)
createcsvwithcoordinates()
getcitiescount()
# remove unnecessary draft csv file.
os.remove(draftCSV)
| spokenwebsites/ADP_Front | webapp/src/assets/js/viz3.geolocations.py | viz3.geolocations.py | py | 4,339 | python | en | code | 3 | github-code | 13 |
30176990310 | import numpy as np
import numpy.linalg as LA
import scipy
def logsig(x):
""" Compute the log-sigmoid function component-wise.
See http://fa.bianp.net/blog/2019/evaluate_logistic/ for more details.
logsig(x) = log(1/[1 + exp(-t)])
"""
out = np.zeros_like(x)
idx0 = x < -33
out[idx0] = x[idx0]
idx1 = (x >= -33) & (x < -18)
out[idx1] = x[idx1] - np.exp(x[idx1])
idx2 = (x >= -18) & (x < 37)
out[idx2] = -np.log1p(np.exp(-x[idx2]))
idx3 = x >= 37
out[idx3] = -np.exp(-x[idx3])
return out
def expit_b(x, b):
"""Compute sigmoid(x) - b component-wise."""
idx = x < 0
out = np.zeros_like(x)
exp_x = np.exp(x[idx])
b_idx = b[idx]
out[idx] = ((1 - b_idx) * exp_x - b_idx) / (1 + exp_x)
exp_nx = np.exp(-x[~idx])
b_nidx = b[~idx]
out[~idx] = ((1 - b_nidx) - b_nidx * exp_nx) / (1 + exp_nx)
return out
def safe_sparse_add(a, b):
if scipy.sparse.issparse(a) and scipy.sparse.issparse(b):
# both are sparse, keep the result sparse
return a + b
else:
# if of them is non-sparse, convert
# everything to dense.
if scipy.sparse.issparse(a):
a = a.toarray()
if a.ndim == 2 and b.ndim == 1:
b.ravel()
elif scipy.sparse.issparse(b):
b = b.toarray()
if b.ndim == 2 and a.ndim == 1:
b = b.ravel()
return a + b
def logistic_loss(A, y, w):
"""Logistic loss, numerically stable implementation.
Parameters
----------
x: array-like, shape (n_features,)
Coefficients
A: array-like, shape (n_samples, n_features)
Data matrix
b: array-like, shape (n_samples,)
Labels
Returns
-------
loss: float
"""
z = np.dot(A, w)
y = np.asarray(y)
return np.mean((1-y)*z - logsig(z))
def logistic_grad(x, A, b):
"""
Computes the gradient of the logistic loss.
Parameters
----------
x: array-like, shape (n_features,)
Coefficients
A: array-like, shape (n_samples, n_features)
Data matrix
b: array-like, shape (n_samples,)
Labels
Returns
-------
grad: array-like, shape (n_features,)
"""
z = A.dot(x)
s = expit_b(z, b)
return A.T.dot(s) / A.shape[0]
| ABMOPT/ABM | src/workers/utils.py | utils.py | py | 2,437 | python | en | code | 0 | github-code | 13 |
17041413844 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayFundWalletOperationQueryModel(object):
def __init__(self):
self._biz_scene = None
self._biz_types = None
self._current_page = None
self._end_biz_dt = None
self._page_size = None
self._product_code = None
self._start_biz_dt = None
self._user_wallet_id = None
@property
def biz_scene(self):
return self._biz_scene
@biz_scene.setter
def biz_scene(self, value):
self._biz_scene = value
@property
def biz_types(self):
return self._biz_types
@biz_types.setter
def biz_types(self, value):
if isinstance(value, list):
self._biz_types = list()
for i in value:
self._biz_types.append(i)
@property
def current_page(self):
return self._current_page
@current_page.setter
def current_page(self, value):
self._current_page = value
@property
def end_biz_dt(self):
return self._end_biz_dt
@end_biz_dt.setter
def end_biz_dt(self, value):
self._end_biz_dt = value
@property
def page_size(self):
return self._page_size
@page_size.setter
def page_size(self, value):
self._page_size = value
@property
def product_code(self):
return self._product_code
@product_code.setter
def product_code(self, value):
self._product_code = value
@property
def start_biz_dt(self):
return self._start_biz_dt
@start_biz_dt.setter
def start_biz_dt(self, value):
self._start_biz_dt = value
@property
def user_wallet_id(self):
return self._user_wallet_id
@user_wallet_id.setter
def user_wallet_id(self, value):
self._user_wallet_id = value
def to_alipay_dict(self):
params = dict()
if self.biz_scene:
if hasattr(self.biz_scene, 'to_alipay_dict'):
params['biz_scene'] = self.biz_scene.to_alipay_dict()
else:
params['biz_scene'] = self.biz_scene
if self.biz_types:
if isinstance(self.biz_types, list):
for i in range(0, len(self.biz_types)):
element = self.biz_types[i]
if hasattr(element, 'to_alipay_dict'):
self.biz_types[i] = element.to_alipay_dict()
if hasattr(self.biz_types, 'to_alipay_dict'):
params['biz_types'] = self.biz_types.to_alipay_dict()
else:
params['biz_types'] = self.biz_types
if self.current_page:
if hasattr(self.current_page, 'to_alipay_dict'):
params['current_page'] = self.current_page.to_alipay_dict()
else:
params['current_page'] = self.current_page
if self.end_biz_dt:
if hasattr(self.end_biz_dt, 'to_alipay_dict'):
params['end_biz_dt'] = self.end_biz_dt.to_alipay_dict()
else:
params['end_biz_dt'] = self.end_biz_dt
if self.page_size:
if hasattr(self.page_size, 'to_alipay_dict'):
params['page_size'] = self.page_size.to_alipay_dict()
else:
params['page_size'] = self.page_size
if self.product_code:
if hasattr(self.product_code, 'to_alipay_dict'):
params['product_code'] = self.product_code.to_alipay_dict()
else:
params['product_code'] = self.product_code
if self.start_biz_dt:
if hasattr(self.start_biz_dt, 'to_alipay_dict'):
params['start_biz_dt'] = self.start_biz_dt.to_alipay_dict()
else:
params['start_biz_dt'] = self.start_biz_dt
if self.user_wallet_id:
if hasattr(self.user_wallet_id, 'to_alipay_dict'):
params['user_wallet_id'] = self.user_wallet_id.to_alipay_dict()
else:
params['user_wallet_id'] = self.user_wallet_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayFundWalletOperationQueryModel()
if 'biz_scene' in d:
o.biz_scene = d['biz_scene']
if 'biz_types' in d:
o.biz_types = d['biz_types']
if 'current_page' in d:
o.current_page = d['current_page']
if 'end_biz_dt' in d:
o.end_biz_dt = d['end_biz_dt']
if 'page_size' in d:
o.page_size = d['page_size']
if 'product_code' in d:
o.product_code = d['product_code']
if 'start_biz_dt' in d:
o.start_biz_dt = d['start_biz_dt']
if 'user_wallet_id' in d:
o.user_wallet_id = d['user_wallet_id']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/AlipayFundWalletOperationQueryModel.py | AlipayFundWalletOperationQueryModel.py | py | 4,917 | python | en | code | 241 | github-code | 13 |
13514135206 | from ebcli.core.abstractcontroller import AbstractBaseController
from ebcli.resources.strings import strings
from ebcli.core import io
from ebcli.lib import elasticbeanstalk, aws
from botocore.compat import six
urllib = six.moves.urllib
class QuicklinkController(AbstractBaseController):
class Meta:
label = 'quicklink'
stacked_on = 'labs'
stacked_type = 'nested'
description = strings['quicklink.info']
usage = AbstractBaseController.Meta.usage.replace('{cmd}', label)
epilog = strings['quicklink.epilog']
def do_command(self):
app_name = self.get_app_name()
env_name = self.get_env_name()
get_quick_link(app_name, env_name)
def get_quick_link(app_name, env_name):
env = elasticbeanstalk.get_environment(app_name=app_name, env_name=env_name)
settings = elasticbeanstalk.describe_configuration_settings(
app_name, env_name)
option_settings = settings['OptionSettings']
environment_type = elasticbeanstalk.get_option_setting(
option_settings, 'aws:elasticbeanstalk:environment', 'EnvironmentType')
instance_type = elasticbeanstalk.get_option_setting(
option_settings, 'aws:autoscaling:launchconfiguration', 'InstanceType')
link = 'https://console.aws.amazon.com/elasticbeanstalk/home?'
region = aws.get_region_name()
link += 'region=' + urllib.parse.quote(region)
link += '#/newApplication'
link += '?applicationName=' + urllib.parse.quote(app_name)
link += '&solutionStackName=' + urllib.parse.quote(env.platform.platform_shorthand)
link += '&tierName=' + env.tier.name
if environment_type:
link += '&environmentType=' + environment_type
if env.version_label:
app_version = elasticbeanstalk.get_application_versions(
app_name, version_labels=[env.version_label])['ApplicationVersions'][0]
source_bundle = app_version['SourceBundle']
source_url = 'https://s3.amazonaws.com/' + source_bundle['S3Bucket'] + \
'/' + source_bundle['S3Key']
link += '&sourceBundleUrl=' + source_url
if instance_type:
link += '&instanceType=' + instance_type
link = _add_database_options(link, option_settings)
link = _add_vpc_options(link, option_settings)
io.echo(link)
def _add_database_options(link, option_settings):
namespace = 'aws:rds:dbinstance'
allocated_storage = elasticbeanstalk.get_option_setting(
option_settings, namespace, 'DBAllocatedStorage')
deletion_policy = elasticbeanstalk.get_option_setting(
option_settings, namespace, 'DBDeletionPolicy')
engine = elasticbeanstalk.get_option_setting(
option_settings, namespace, 'DBEngine')
instance_class = elasticbeanstalk.get_option_setting(
option_settings, namespace, 'DBInstanceClass')
multi_az = elasticbeanstalk.get_option_setting(
option_settings, namespace, 'MultiAZDatabase')
if allocated_storage:
link += '&rdsDBAllocatedStorage=' + allocated_storage
if deletion_policy:
link += '&rdsDBDeletionPolicy=' + deletion_policy
if engine:
link += '&rdsDBEngine=' + engine
if instance_class:
link += '&rdsDBInstanceClass=' + instance_class
if multi_az:
link += '&rdsMultiAZDatabase=' + multi_az
return link
def _add_vpc_options(link, option_settings):
vpc_id = elasticbeanstalk.get_option_setting(
option_settings, 'aws:ec2:vpc', 'VPCId')
if vpc_id:
link += '&withVpc=true'
return link
| aws/aws-elastic-beanstalk-cli | ebcli/labs/quicklink.py | quicklink.py | py | 3,555 | python | en | code | 150 | github-code | 13 |
32014771409 | # -*- coding: utf-8 -*-
# @Author : dhawal1939
# @File : utils.py
import cv2
import math
import torch
import numpy as np
from numpy.random import uniform
def map_range(x, low=0, high=1):
return np.interp(x, [x.min(), x.max()], [low, high]).astype(x.dtype)
def get_antilog_01_vals(val):
# Color space conversion
val = (val + 1) / 2
val = math.e ** val
tonemapDurand = cv2.createTonemap(2.2)
ldrDurand = tonemapDurand.process(val[:].detach().cpu().numpy())
val = np.clip(ldrDurand, 0, 1)
return val
class PercentileExposure(object):
def __init__(self, gamma=2.4, low_perc=2, high_perc=98, randomize=False):
if randomize:
gamma = uniform(1.8, 2.2)
low_perc = uniform(0, 15)
high_perc = uniform(85, 100)
self.gamma = gamma
self.low_perc = low_perc
self.high_perc = high_perc
def __call__(self, x):
low, high = np.percentile(x, (self.low_perc, self.high_perc))
return map_range(np.clip(x, low, high)) ** (1 / self.gamma) | dhawal1939/rot_equi | utils.py | utils.py | py | 1,048 | python | en | code | 0 | github-code | 13 |
11159279527 | from board.board import Board
def solve(board):
print("Solving.....")
return backtrack(board, 0, 0)
# procedure backtrack(c) is
# if reject(P, c) then return
# if accept(P, c) then output(P, c)
# s ← first(P, c)
# while s ≠ NULL do
# backtrack(s)
# s ← next(P, s)
def backtrack(board, curr_row, curr_col):
if curr_col == (board.NUM_COLS):
curr_row += 1
curr_col = 0
# base case
# the logic here is a little goofy, since +=1 the row before checking,
# this is the only time curr_row will equal the row size (last row is row size -1)
if (curr_row == board.NUM_ROWS):
return True
# check if the cell is already filled, move on if so
if board.get_cell(curr_row, curr_col) > 0:
return backtrack(board, curr_row, curr_col + 1)
# try each number in the current cell, and if one one works add it to the board, and move to next cell
for num in range(1, 10, 1):
if (board.validate_placement(curr_row, curr_col, num)):
# set it on the board
board.set_cell(curr_row, curr_col, num)
if backtrack(board, curr_row, curr_col + 1):
return True
# if no values work in this cell, set it back to empty
board.set_cell(curr_row, curr_col, 0)
# if the cell has no valid number, return false
return False | brendanbeck62/sudoku_solver | server/logic/solve.py | solve.py | py | 1,302 | python | en | code | 0 | github-code | 13 |
1152783397 | __version__ = '0.1.0'
import sys
import json
import time
# gridappsd-python module
from gridappsd import GridAPPSD, topics, DifferenceBuilder
from gridappsd.topics import simulation_input_topic
# global variables
gapps = None
sim_id = None
def _main():
global sim_id, gapps
if len(sys.argv)<3 or '-help' in sys.argv:
usestr = '\nUsage: ' + sys.argv[0] + ' simulation_id tap|reg|cap|switch\n'
usestr += '''
Optional command line arguments:
-help: show this usage message
'''
print(usestr, file=sys.stderr, flush=True)
exit()
gapps = GridAPPSD()
sim_id = sys.argv[1]
diff = DifferenceBuilder(sim_id)
# hardwired for 13assets
if sys.argv[2] == 'tap' or sys.argv[2] == 'reg':
reg_id = '_A480E0A9-AD2B-4D8E-9478-71C29F738221' # node RG60.2
diff.add_difference(reg_id, 'TapChanger.step', 5, 8)
elif sys.argv[2] == 'cap':
cap_id = '_28456F60-7196-47E4-9BE6-54F7EAABC04A' # bus 611
diff.add_difference(cap_id, 'ShuntCompensator.sections', 0, 1)
else:
switch_id = '_4E1B3F09-CB88-4A5E-8198-24490EE7FC58' # between bus 671-692
diff.add_difference(switch_id, 'Switch.open', 1, 0)
msg = diff.get_message()
print(json.dumps(msg))
publish_to_topic = simulation_input_topic(sim_id)
gapps.send(publish_to_topic, json.dumps(msg))
time.sleep(2)
gapps.disconnect()
if __name__ == '__main__':
_main()
| GRIDAPPSD/gridappsd-state-estimator | state-estimator/sim_starter/sim_updater.py | sim_updater.py | py | 1,448 | python | en | code | 1 | github-code | 13 |
22732498701 | # 足立くんによるお馬さん予測シミュレーション
import math
import datetime
import re
import time
import locale
import tkinter as tk # for making Desktop Application
import numpy as np # for array calculation
import sympy as sp # for mathematical operation
import openpyxl # to use Excel from python
import django # for making Web system
import requests # to download HTML data
from bs4 import BeautifulSoup # to extract HTML data
import PySimpleGUI as sg # to choose date in calendarwith under one
# net競馬ホームページからwebスクレイピングを実行する
# 32行目までtyuou, chihou 以外必要ないかも
url = 'https://race.netkeiba.com/top/?rf=footer'
res = requests.get(url)
res.encoding = res.apparent_encoding
soup = BeautifulSoup(res.content, "html.parser")
elems = soup.find_all("th")
print(elems)
place = []
tyuou = ['札幌','函館','福島','新潟','東京','中山','中京','京都','阪神','小倉']
chihou = {"帯広":65, "門別":30, "盛岡":35, "水沢":36,"浦和":42,"船橋":43, "大井":44, "川崎":45,
"金沢":46, "笠松":47,"名古屋":48, "姫路":51, "園田":25, "高知":54, "佐賀":55}
for elem in elems:
if elem.contents[0] in ['札幌','函館','福島','新潟','東京','中山','中京','京都','阪神','小倉']:
place.append(elem.contents[0])
print(place)
# カレンダーから日付選択
layout = [[sg.InputText(key='-date-'),
sg.CalendarButton('日付選択', key='-button_calendar-',
close_when_date_chosen=False,
target='-date-', format="%Y%m%d")],
[sg.Button('終了')]]
window = sg.Window('レースの日付',layout)
while True:
event, values = window.read()
if event in (sg.WIN_CLOSED, '終了'):
break
window.close()
#########################################
dt_str = values['-date-']
dt = datetime.datetime.strptime(dt_str, '%Y%m%d')
print(dt)
if (dt.strftime('%a') == 'Sun') or (dt.strftime('%a') == 'Sat') : #土日の中央競馬
url2 = 'https://race.netkeiba.com/top/race_list_sub.html?kaisai_date=' + dt_str + '¤t_group=10' + dt_str + '#racelist_top_a'
print(url2)
res2 = requests.get(url2)
res2.encoding = res2.apparent_encoding
soup2 = BeautifulSoup(res2.content, "html.parser")
#elems2 = soup2.find_all("span")
elems2 = soup2.find_all("span", class_="ItemTitle")
print(elems2)
Race = []
for elem2 in elems2:
Race.append(elem2.contents[0])
raceNum = (len(Race))
Race = np.array(Race).reshape(int(raceNum/12),12)
print(Race)
print(Race[0,10])
else : #平日の地方競馬
url2 = 'https://nar.netkeiba.com/top/?kaisai_date=' + dt_str
res2 = requests.get(url2)
res2.encoding = res2.apparent_encoding
soup2 = BeautifulSoup(res2.content, "html.parser")
#elems2 = soup2.find_all("span")
elems2 = soup2.find_all("span", class_="ItemTitle")
print(elems2)
Race = []
for elem2 in elems2:
Race.append(elem2.contents[0])
print(Race)
#「出馬表」から中央競馬の情報
#「地方競馬」から地方競馬の情報
# coded by K Himeji
| KotaroHimeji/keiba | horsePrediction.py | horsePrediction.py | py | 3,198 | python | en | code | 0 | github-code | 13 |
73332219217 | """empty message
Revision ID: f8b90c51e27b
Revises: b499bd9608a5
Create Date: 2023-04-14 14:47:13.412965
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'f8b90c51e27b'
down_revision = 'b499bd9608a5'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('data_point', schema=None) as batch_op:
batch_op.add_column(sa.Column('geometry', sa.JSON(), nullable=True))
with op.batch_alter_table('layer', schema=None) as batch_op:
batch_op.add_column(sa.Column('primary_parcel_layer', sa.Boolean(), nullable=True))
with op.batch_alter_table('query', schema=None) as batch_op:
batch_op.add_column(sa.Column('city', sa.String(length=100), nullable=True))
batch_op.add_column(sa.Column('county', sa.String(length=100), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('query', schema=None) as batch_op:
batch_op.drop_column('county')
batch_op.drop_column('city')
with op.batch_alter_table('layer', schema=None) as batch_op:
batch_op.drop_column('primary_parcel_layer')
with op.batch_alter_table('data_point', schema=None) as batch_op:
batch_op.drop_column('geometry')
# ### end Alembic commands ###
| apurv101/parcelini_backend | migrations/versions/f8b90c51e27b_.py | f8b90c51e27b_.py | py | 1,452 | python | en | code | 0 | github-code | 13 |
16747856638 |
from _setup import results_path
from tabulate import tabulate
import gridsearch
def format_tabel(mean, stddev, row_names, row_header, col_names, col_header, caption):
header = [
['', '', '\\multicolumn{%d}{c}{\\texttt{%s}}' % (len(col_names), col_header)],
['', ''] + list(map(str, col_names))
]
output = []
for name, mean_row, stddev_row in zip(row_names, mean, stddev):
formatted = [
'(%.2f, %.2f)' % (m, s) for m, s in zip(mean_row, stddev_row)
]
output.append(['', str(name)] + formatted)
output[0][0] = '\\multirow{%d}{*}{\\texttt{%s}}' % (len(row_names), row_header)
formatted = ''
formatted += '\\begin{table}[H]\n'
formatted += '\\centering\n'
formatted += '\\centerline{\\begin{tabular}{rr|%s}\n' % ('c' * len(col_names))
for h in header:
formatted += ' & '.join(h) + '\\\\ \n'
formatted += '\\hline\n'
for o in output:
formatted += ' & '.join(o) + '\\\\ \n'
formatted += '\\end{tabular}}\n'
formatted += '\\caption{%s}\n' % caption
formatted += '\\end{table}\n'
return formatted
def format_subset(select, algorithm):
mean = algorithm.mean()
stddev = algorithm.stddev()
best = algorithm.best_index()
parameters = algorithm.parameters
keys = list(parameters.keys())
# Assume there are 4 parameters
if len(best) != 4:
raise ValueError('assuming 4 parameters')
# Create numpy subset
subset = list(best)
subset[select[0]] = slice(None)
subset[select[1]] = slice(None)
rest = sorted(list(set(range(0, 4)) - set(select)))
# Fixing last two
return format_tabel(
mean[subset],
stddev[subset],
row_names=parameters[keys[select[0]]],
row_header=keys[select[0]].replace('_', '\\_'),
col_names=parameters[keys[select[1]]],
col_header=keys[select[1]].replace('_', '\\_'),
caption='Shows $(\\mu, \\sigma)$ with \\texttt{%s=%s} and \\texttt{%s=%s} fixed' % (
keys[rest[0]].replace('_', '\\_'), parameters[keys[rest[0]]][best[rest[0]]],
keys[rest[1]].replace('_', '\\_'), parameters[keys[rest[1]]][best[rest[1]]]
)
)
alns, tabu = gridsearch.Analyse.load(
results_path('alns.npy'),
results_path('tabu.npy')
)
# Fixing last two
print(format_subset([0, 1], tabu))
print(format_subset([2, 3], tabu))
print(format_subset([0, 3], alns))
print(format_subset([1, 2], alns))
| AndreasMadsen/course-42137 | code/plot/best_tables.py | best_tables.py | py | 2,474 | python | en | code | 7 | github-code | 13 |
25579140282 | # coding: utf-8
"""
For testing theBoolean-level reasoning engines in the parallel CDCL(T) SMT solving engine
"""
from arlib.tests import TestCase, main
# from ..theory import SMTLibTheorySolver, SMTLibPortfolioTheorySolver
from arlib.tests.grammar_gene import gen_cnf_numeric_clauses
from arlib.bool.pysat_solver import PySATSolver
class TestBoolEngines(TestCase):
def test_models_sampling_and_reducing(self):
for _ in range(10):
clauses = gen_cnf_numeric_clauses()
if len(clauses) == 0:
continue
# solver_name = random.choice(sat_solvers)
s = PySATSolver()
s.add_clauses(clauses)
if s.check_sat():
print("SAT")
models = s.sample_models(10)
reduced_models = s.reduce_models(models)
assert (len(models) <= len(reduced_models))
break
if __name__ == '__main__':
main()
| ZJU-Automated-Reasoning-Group/arlib | arlib/tests/test_bool_engines.py | test_bool_engines.py | py | 957 | python | en | code | 6 | github-code | 13 |
1960092744 | from django.urls import path
from . import views
from django.contrib.auth import views as auth_views
urlpatterns = [
path('registration/', views.register, name='registration'),
path('logout/', views.logout_user, name='logout'),
path('login/', auth_views.LoginView.as_view(template_name='accounts/login.html'), name='login'),
path('profile/<str:username>/', views.profile_user, name='profile_user'),
path('profile/update/<str:username>/', views.edit_profile, name='edit_profile'),
] | kubix283/Blog_Public | blog/accounts/urls.py | urls.py | py | 508 | python | en | code | 0 | github-code | 13 |
8902603640 | import pandas as pd
data = pd.read_csv(r"E:\YD\git\learn\pandas_01\nato_phonetic_alphabet.csv")
NATO_ALPHABET = {row.letter : row.code for (index, row) in data.iterrows()}
# while True:
# word = input("Word for translate: ")
# # result = []
# # for i in word:
# # if i.upper() in NATO_ALPHABET:
# # result.append(NATO_ALPHABET[i.upper()])
# try:
# result = [NATO_ALPHABET[letter.upper()] for letter in word]
# print(result, "\n")
# except KeyError:
# print("Something wrong!")
def nato_translator():
word = input("Word for translate: ")
try:
result = [NATO_ALPHABET[letter.upper()] for letter in word]
except KeyError:
print("Something wrong!", "\n")
nato_translator()
else:
print(result, "\n")
finally:
nato_translator()
nato_translator() | foxtailer/learn | 100day_of_code/pandas_01/NATO_alphabet.py | NATO_alphabet.py | py | 870 | python | en | code | 0 | github-code | 13 |
3318575026 | from mode import Mode
class Operation:
"""
this class present an operation that need to be done to finish a job.
for every operation we will save its name(number), modes, ann all resource that all modes need.
"""
def __init__(self, number):
self.number = number
self.modes = []
self.all_resources = {}
def add_mode(self, mode_number, resource, start, dur):
"""
add mode to operation with the needed resource for the mode.
if it is a new mode number, create it.
save the resource in all_resources list.
mode_number: string - the number of the mode
resource: Resource - a resource that the mode need
resource_start: float - local start time of the resource in the mode
resource_dur: float - the duretion of the resource usage
return: None
"""
# save all needed resources for every mode in all_resources dictionary
if resource not in self.all_resources:
self.all_resources[resource] = [mode_number]
else:
self.all_resources[resource].append(mode_number)
for mode in self.modes:
# check if mode exists
if mode.mode_number == mode_number:
# add mode data to existing mode
mode.add_resource(resource, start, dur)
return
# if it is new mode, create it and add it to operation modes list
mode = Mode(mode_number,self.number)
mode.add_resource(resource, start, dur)
self.modes.append(mode)
def get_mode_by_name(self, name):
for mode in self.modes:
if mode.mode_number == name:
return mode
return None
def get_min_tim(self):
"""
return the shortest mode time of this operation
return: number
"""
return self.get_shortest_mode().tim
def get_shortest_mode(self):
"""
return the shortest mode of this operation
return: Mode
"""
return min(self.modes, key=lambda mode: mode.tim)
| danielifshitz/RSSP | code/job_operation.py | job_operation.py | py | 2,100 | python | en | code | 1 | github-code | 13 |
5473232936 | def merge_list(list1, list2):
merged_data="" #' '
j = len(list1)-1 # 8 -1 = 7
for i in range(len(list1)): #1 2 3
str1 = str2 = '' #
if list1[i]: #
str1 =list1[i] # app' list[5]
if list2[j]:
str2 = list2[j] #'le
j -=1 # 6
merged_data+=str1+str2 # An apple
if i < len(list1)-1 :
merged_data+=' '
return merged_data
#Provide different values for the variables and test your program
list1=['A', 'app','a', 'd', 'ke', 'th', 'doc', 'awa']
list2=['y','tor','e','eps','ay',None,'le','n']
merged_data=merge_list(list1,list2)
print(merged_data)
def mergelist(list1,list2):
lenn = len(list2)-1
mregedData = ''
for i in range(0,len(list1)):
if list1[i] == None:
mregedData += list2[lenn]
elif list2[lenn] == None:
mregedData += list1[i] +" "
elif list1[i]!= None and list2[lenn]!=None:
mregedData += list1[i] + list2[lenn] +" "
lenn -= 1
print(mregedData)
list1 = list1=['A', 'app','a', 'd', 'ke', 'th', 'doc', 'awa']
list2= ['y','tor','e','eps','ay',None,'le','n']
mergelist(list1,list2)
class Car:
def __init__(self,model,year,registration_number):
self.__model=model
self.__year=year
self.__registration_number=registration_number
def get_model(self):
return self.__model
def get_year(self):
return self.__year
def get_registration_number(self):
return self.__registration_number
def __str__(self):
return(self.__model+" "+self.__registration_number+" "+(str)(self.__year))
#Implement Service class here
class Service:
def __init__(self,car_list) :
self.__car_list = car_list
def get_car_list(self):
return self.__car_list
def find_cars_by_year(self ,year):
list1=[]
for car in self.__car_list:
if int(car.get_year()) == year:
list1.append(car.get_model())
if (len(list1)==0):
return None
return list1
def add_cars(self ,new_car_list):
self.__car_list.extend (new_car_list)
self.__car_list.sort (key = lambda x : x.get_year())
def remove_cars_from_karnataka(self):
list1=self.__car_list.copy()
for car in list1:
if car.get_registration_number()[0:2] == "KA":
self.__car_list.remove(car)
'''
def fun (x):
x.get_year()
list1 = [1 , 2 , 3 , 4,[4,5,6],4 , 5 , 6]
list1.append ([4,5,6])
list1.extend([4,5,6])
'''
# [car1 , car2 ]
car1=Car("WagonR",2010,"KA09 3056")
car2=Car("Beat", 2011, "MH10 6776")
car3=Car("Ritz", 2013,"KA12 9098")
car4=Car("Polo",2013,"GJ01 7854")
car5=Car("Amaze",2014,"KL07 4332")
#Add different values to the list and test the program
class Player:
def __init__(self,name,experience):
self.__name=name
self.__experience=experience
def get_name(self):
return self.__name
def get_experience(self):
return self.__experience
def __str__(self):
return(self.__name+" "+(str)(self.__experience))
#Implement Game class here
class Game :
def __init__(self, players_list) :
self.__players_list = players_list
def get_players_list(self):
return self.__players_list
def sort_players_based_on_experience(self):
self.__players_list.sort(key = lambda x : Player.get_experience(x) , reverse=True)
print(self.__players_list)
def shift_player_to_new_position(self , old_index_position , new_index_position):
old_index_position=old_index_position-1
new_index_position= new_index_position-1
temp = self.__players_list[new_index_position]
self.__players_list[new_index_position] = self.__players_list[old_index_position]
self.__players_list[old_index_position] = temp
return self.__players_list
def display_player_details(self):
for i in range(len(self.__players_list)):
print(Player.get_name(self.__players_list[i]))
print(Player.get_experience(self.__players_list[i]))
#lambda x : Player.get_experience(x)
#
# def fun( x):
# Player.get_experience(x)
# return x
player1=Player("Dhoni",15)
player2=Player("Virat",10)
player3=Player("Rohit",12)
player4=Player("Raina",11)
player5=Player("Jadeja",13)
player6=Player("Ishant",9)
player7=Player("Shikhar",8)
player8=Player("Axar",7.5)
player9=Player("Ashwin",6)
player10=Player("Stuart",7)
player11=Player("Bhuvneshwar",5)
#Add different values to the list and test the program
players_list=[player1,player2,player3,player4,player5,player6,player7,player8,player9,player10,player11]
#Create object of Game class, invoke the methods and test your program
g = Game(players_list)
g.display_player_details()
print("**************************************************************")
g.sort_players_based_on_experience()
g.display_player_details()
print("**************************************************************")
g.shift_player_to_new_position(1 , 11)
g.display_player_details()
'''
temp = a
a = b
b = temp
'''
#lambda x : Player.get_experience(x)
# def fun (x):
# return Player.get_experience(x)
#lambda inp_value : expression
'''
x = lambda a , b: a * b
print(x(5, 6))
def fun (a , b):
return a * b
print(fun(5 , 6))
'''
class Node:
def __init__(self,data):
self.__data=data
self.__next=None
def get_data(self):
return self.__data
def set_data(self,data):
self.__data=data
def get_next(self):
return self.__next
def set_next(self,next_node):
self.__next=next_node
class LinkedList:
def __init__(self):
self.__head=None
self.__tail=None
def get_head(self):
return self.__head
def get_tail(self):
return self.__tail
def add(self,data):
new_node=Node(data)
if(self.__head is None):
self.__head=self.__tail=new_node
else:
self.__tail.set_next(new_node)
self.__tail=new_node
def insert(self,data,data_before):
new_node=Node(data)
if(data_before==None):
new_node.set_next(self.__head)
self.__head=new_node
if(new_node.get_next()==None):
self.__tail=new_node
else:
node_before=self.find_node(data_before)
if(node_before is not None):
new_node.set_next(node_before.get_next())
node_before.set_next(new_node)
if(new_node.get_next() is None):
self.__tail=new_node
else:
print(data_before,"is not present in the Linked list")
def display(self):
temp=self.__head
while(temp is not None):
print(temp.get_data())
temp=temp.get_next()
def find_node(self,data):
temp=self.__head
while(temp is not None):
if(temp.get_data()==data):
return temp
temp=temp.get_next()
return None
def delete(self,data):
node=self.find_node(data)
if(node is not None):
if(node==self.__head):
if(self.__head==self.__tail):
self.__tail=None
self.__head=node.get_next()
else:
temp=self.__head
while(temp is not None):
if(temp.get_next()==node):
temp.set_next(node.get_next())
if(node==self.__tail):
self.__tail=temp
node.set_next(None)
break
temp=temp.get_next()
else:
print(data,"is not present in Linked list")
#You can use the below __str__() to print the elements of the DS object while debugging
def __str__(self):
temp=self.__head
msg=[]
while(temp is not None):
msg.append(str(temp.get_data()))
temp=temp.get_next()
msg=" ".join(msg)
msg="Linkedlist data(Head to Tail): "+ msg
return msg
def create_new_sentence(word_list):
new_sentence=""
status = 0
temp = word_list.get_head()
while (temp):
ch = temp.get_data() # A
if ch=='/' or ch=='*':
new_sentence += ' '
if temp.get_next().get_data() == '/' or temp.get_next().get_data() == '*':
status = 1
temp = temp.get_next()
temp=temp.get_next()
continue
if status==1:
ch = ch.upper()
status = 0
new_sentence += ch
temp=temp.get_next()
return new_sentence
word_list=LinkedList()
word_list.add("T")
word_list.add("h")
word_list.add("e")
word_list.add("/")
word_list.add("*")
word_list.add("s")
word_list.add("k")
word_list.add("y")
word_list.add("*")
word_list.add("i")
word_list.add("s")
word_list.add("/")
word_list.add("/")
word_list.add("b")
word_list.add("l")
word_list.add("u")
word_list.add("e")
result=create_new_sentence(word_list)
print(result)
class Node:
def __init__(self,data):
self.__data=data
self.__next=None
def get_data(self):
return self.__data
def set_data(self,data):
self.__data=data
def get_next(self):
return self.__next
def set_next(self,next_node):
self.__next=next_node
class LinkedList:
def __init__(self):
self.__head=None
self.__tail=None
def get_head(self):
return self.__head
def get_tail(self):
return self.__tail
def add(self,data):
new_node=Node(data)
if(self.__head is None):
self.__head=self.__tail=new_node
else:
self.__tail.set_next(new_node)
self.__tail=new_node
def insert(self,data,data_before):
new_node=Node(data)
if(data_before==None):
new_node.set_next(self.__head)
self.__head=new_node
if(new_node.get_next()==None):
self.__tail=new_node
else:
node_before=self.find_node(data_before)
if(node_before is not None):
new_node.set_next(node_before.get_next())
node_before.set_next(new_node)
if(new_node.get_next() is None):
self.__tail=new_node
else:
print(data_before,"is not present in the Linked list")
def display(self):
temp=self.__head
while(temp is not None):
print(temp.get_data())
temp=temp.get_next()
def find_node(self,data):
temp=self.__head
while(temp is not None):
if(temp.get_data()==data):
return temp
temp=temp.get_next()
return None
def delete(self,data):
node=self.find_node(data)
if(node is not None):
if(node==self.__head):
if(self.__head==self.__tail):
self.__tail=None
self.__head=node.get_next()
else:
temp=self.__head
while(temp is not None):
if(temp.get_next()==node):
temp.set_next(node.get_next())
if(node==self.__tail):
self.__tail=temp
node.set_next(None)
break
temp=temp.get_next()
else:
print(data,"is not present in Linked list")
#You can use the below __str__() to print the elements of the DS object while debugging
def __str__(self):
temp=self.__head
msg=[]
while(temp is not None):
msg.append(str(temp.get_data()))
temp=temp.get_next()
msg=" ".join(msg)
msg="Linkedlist data(Head to Tail): "+ msg
return msg
class Child:
def __init__(self,name,item_to_perform):
self.__name=name
self.__item_to_perform=item_to_perform
def __str__(self):
return(self.__name+" "+self.__item_to_perform)
def get_name(self):
return self.__name
def get_item_to_perform(self):
return self.__item_to_perform
class Performance :
def __init__ (self,children_list):
self.__children_list= children_list
def get_children_list(self):
return self.__children_list
def change_position(self , child):
temp = self.__children_list.get_head()
temp1 = self.__children_list.get_head()
while temp.get_next():
temp1=temp1.get_next()
if temp.get_next().get_next():
temp = temp.get_next().get_next()
self.__children_list.delete(child)
self.__children_list.insert(child,temp1.get_data())
def add_new_child(self,child):
temp = self.__children_list.get_head()
while temp.get_next():
temp=temp.get_next()
self.__children_list.insert(child,temp.get_data())
#Implement Performance class here
child1=Child("Rahul","solo song")
child2=Child("Sheema","Dance")
child3=Child("Gitu","Plays Flute")
child4=Child("Tarun","Gymnastics")
child5=Child("Tom","MIME")
#Add different values to the list and test the program
children_list=LinkedList()
children_list.add(child1)
children_list.add(child2)
children_list.add(child3)
children_list.add(child4)
children_list.add(child5)
performance=Performance(children_list)
print("The order in which the children would perform:")
performance.get_children_list().display()
print()
print("After Rahul's performance, the schedule would change to:")
performance.change_position(child1)
performance.get_children_list().display()
print()
child6=Child("Swetha","Vote of Thanks")
print("After Swetha has joined, the schedule is:")
performance.add_new_child(child6)
performance.get_children_list().display()
| SnehaShet22/TCET-B2 | DAY-7.py | DAY-7.py | py | 14,644 | python | en | code | 1 | github-code | 13 |
33168618590 | """Setuid backdoor handler
SYNOPSIS:
suidroot --create <SUIDROOT_BACKDOOR>
suidroot "<COMMAND>"
DESCRIPTION:
Provide a simple way to install persistent setuid(2)
backdoor from previously obtained root access.
SUIDROOT_BACKDOOR file should be carefully chosen to not
look suspicious. Our goal is to make it as undetectable
as we can. I recommend searching for legitimate setuid()
files already installed on the system, and using a
similar file path as SUIDROOT_BACKDOOR.
# sources: http://lmgtfy.com/?q=find+suid+files+linux
LIMITATIONS:
- Only works on Linux/UNIX.
- RCE must be available (`run` plugin must work).
- Current (unprivileged) user must have execution
rights on SUIDROOT_BACKDOOR file.
WARNING:
Considering phpsploit's input parser, commands which
contain quotes, semicolons, and other chars that could be
interpreted by the framework MUST be quoted to be
interpreted as a single argument.
* Bad command:
# Here, phpsploit parser detects multiple commands:
> suidroot echo 'foo bar' > /tmp/foobar; cat /etc/passwd
* Good command:
# Here, the whole string is correctly passed to plugin
> suidroot "echo 'foo bar' > /tmp/foobar; cat /etc/passwd"
EXAMPLES:
> suidroot --create /tmp/backdoor
- Generates the payload to be run as root in order
to enable persistance through phpsploit
> suidroot cat /tmp/shadow
- Print the /etc/shadow data as root
> suidroot "whoami; id"
- Show your current user and id (enjoy!)
ENVIRONMENT:
* SUIDROOT_BACKDOOR
The setuid(2) backdoor file
* SUIDROOT_PWD
Current working directory for privileged user
AUTHOR:
nil0x42 <http://goo.gl/kb2wf>
"""
import sys
import os
import base64
from core import encoding
import ui.color
import ui.input
from api import plugin
from api import server
from api import environ
SUIDROOT_ENV_VARS = {"SUIDROOT_BACKDOOR", "SUIDROOT_PWD"}
if environ["PLATFORM"].lower().startswith("win"):
sys.exit("Plugin available on unix-based platforms only")
if len(plugin.argv) < 2:
sys.exit(plugin.help)
if plugin.argv[1] == '--create':
if len(plugin.argv) != 3:
sys.exit(plugin.help)
backdoor_file = server.path.abspath(plugin.argv[2])
# create the payload that must be run as privileged used.
# The suidroot backdoor is then created with suid byte
# enabled, making tunnel available.
file = open(os.path.join(plugin.path, "backdoor.c"), 'rb')
source_code = encoding.decode(base64.b64encode(file.read()))
payload = ("echo %b | python -m base64 -d | gcc -o %f -x c -;"
"chown root %f;"
"chmod 4755 %f;"
).replace('%f', backdoor_file).replace('%b', source_code)
# prevent previous configuration override
if SUIDROOT_ENV_VARS.issubset(set(environ)):
msg = "suidroot environment variables already set. override them ?"
if ui.input.Expect(False, skip_interrupt=False)(msg):
sys.exit("Operation canceled")
print("[*] In order to use suidroot privileged command execution, "
"run the following shell payload AS ROOT on the remote system:")
print(ui.color.colorize("\n", "%Blue", payload, "\n"))
environ['SUIDROOT_BACKDOOR'] = backdoor_file
environ['SUIDROOT_PWD'] = environ['PWD']
sys.exit()
# On classic command pass, make sure the exploit is activated
for var in SUIDROOT_ENV_VARS:
msg = "Missing environment variable: %s: Use 'suidroot --create'"
if var not in environ:
sys.exit(msg % var)
# build the command to send from given arguments
command = ' '.join(plugin.argv[1:]).strip()
# chdir to SUIDROOT_PWD before
if not command.startswith(";"):
command = " ; " + command
command = 'cd ' + environ['SUIDROOT_PWD'] + command
# token to make sure new pwd is known
if not command.endswith(";"):
command += " ; "
command += "echo ; echo suid `pwd` suid"
# build the payload to send the command to run on system
payload = server.payload.Payload("payload.php")
# prepend slashes, so backdoor can spoof it's name with fake '[kthread]' str
payload['BACKDOOR'] = "/////////" + environ['SUIDROOT_BACKDOOR']
payload['COMMAND'] = repr(command)
print("[#] raw command: %r" % command)
output = payload.send()
lines = output.splitlines()
if not lines:
sys.exit("No output received")
new_pwd = lines.pop()
try:
assert new_pwd.startswith("suid ")
assert new_pwd.endswith(" suid")
new_pwd = new_pwd[5:-5]
assert server.path.isabs(new_pwd)
environ['SUIDROOT_PWD'] = new_pwd
if lines and not lines[-1]:
lines.pop(-1)
for line in lines:
print(line)
except AssertionError:
print("[-] Couldn't retrieve new $PWD.")
print("[-] Raw output:")
print(output)
sys.exit(1)
| nil0x42/phpsploit | plugins/system/suidroot/plugin.py | plugin.py | py | 4,838 | python | en | code | 2,044 | github-code | 50 |
39273401386 |
class Person:
def __init__(self, name):
self.name = name
class Bike:
def __init__(self, speeds, owner):
self.speed = speeds
self.owner = owner
self.color = "grey"
self._layers = 1
def set_color(self, new_color):
self._layers += 1
self.color = new_color
def get_layers(self):
return self.layers
tyler = Person("tyler")
joel = Person("joel")
bike = Bike(100, joel)
tylers_bike = Bike(18, tyler)
print("OWNERS===========")
print(bike.owner.name)
print(tylers_bike.owner.name)
print("color before we change it")
print(bike.color)
print(bike.speed)
print(bike.get_layers)
print("color after we change it")
bike.set_color("midnight blue")
print(bike.color)
print(bike.get_layers)
print("color of tylers bike")
print(tylers_bike.color)
| Rick-and-morty/tic-tac-toe | day4_oop.py | day4_oop.py | py | 821 | python | en | code | 0 | github-code | 50 |
25125456678 | import pika
import json
import ssl
import os
import requests
from log.logger import Logger
from abc import ABC, abstractmethod
from db.db_operation import DBOperation
from common.password import get_password, get_password_by_auth_token
class BasePublisher(ABC):
def __init__(self, meta={}, logger=None):
self._connection = None
self._channel = None
self._password = get_password(username=meta["mq_pmpname"], meta=meta)
self.meta = meta
self._exchange = meta['mq_exchange_name']
self._exchange_type = meta['mq_exchange_type']
self._routing_key = meta['mq_routing_key']
if not os.path.exists(meta['mq_ca_certs']):
raise RuntimeError("%s doesn't exist."%meta['mq_ca_certs'])
if not os.path.exists(meta['mq_key_file']):
raise RuntimeError("%s doesn't exist."%meta['mq_key_file'])
if not os.path.exists(meta['mq_cert_file']):
raise RuntimeError("%s doesn't exist."%meta['mq_cert_file'])
self._app_id = None
self._body = None
self._logger = logger if logger else Logger(log_level="info", vendor_key=-1, retailer_key=-1)
def get_connection_parameters(self, meta):
credentials = pika.PlainCredentials(meta['mq_username'], self._password)
ssl_options = dict(
ssl_version=ssl.PROTOCOL_TLSv1,
ca_certs=meta['mq_ca_certs'],
keyfile=meta['mq_key_file'],
certfile=meta['mq_cert_file'],
cert_reqs=ssl.CERT_REQUIRED
)
return pika.ConnectionParameters(ssl=True,
host=meta['mq_host'],
port=meta['mq_port'],
credentials=credentials,
connection_attempts=int(meta['mq_connection_attempts']),
heartbeat=int(meta['mq_heartbeat_interval']),
ssl_options=ssl_options
)
def connect(self):
try:
self._connection = pika.BlockingConnection(self.get_connection_parameters(self.meta))
except:
self._logger.warning('Connection refused, try to sync the password of %s from PMP and connect again.'%self.meta["mq_pmpname"])
self._password = get_password_by_auth_token(username=self.meta["mq_pmpname"], meta=self.meta)
self._connection = pika.BlockingConnection(self.get_connection_parameters(self.meta))
def open_channel(self):
if not self._connection:
self.connect()
self._channel = self._connection.channel()
def declare_exchange(self):
if not self._channel:
self.open_channel()
self._logger.info("Declaring exchange [%s], type [%s], routing_key [%s]" % (self._exchange, self._exchange_type, self._routing_key))
self._channel.exchange_declare(exchange=self._exchange,
exchange_type=self._exchange_type,
durable=True)
@abstractmethod
def on_action(self, body):
pass
#self._app_id = 'APPID'
#self._body = 'BODY'
def run(self):
if not (self._app_id and self._body):
raise ValueError('app_id or body is not specified in the method on_action')
self.declare_exchange()
properties = pika.BasicProperties(app_id=self._app_id,
content_type='application/json',
delivery_mode=2
)
message = self._body
self._channel.basic_publish(self._exchange,
self._routing_key,
#json.dumps(message, ensure_ascii=False),
message,
properties)
self._logger.info("Published successfully [%s]" % message)
def stop(self):
self._logger.info("Closing connection")
self._connection.close()
self._logger.info("Connection closed")
if __name__ == '__main__':
def main():
import configparser
CONFIG_FILE = '../../config/config.properties'
config = configparser.ConfigParser()
config.read(CONFIG_FILE)
meta = config['DEFAULT']
meta = {k:v for k, v in meta.items()}
meta["mq_exchange_name"] = meta["mq_iris_exchange_name"]
meta["mq_exchange_type"] = meta["mq_iris_exchange_type"]
meta["mq_routing_key"] = meta["mq_iris_feedback_routing_key"]
class AFMPublisher(BasePublisher):
def on_action(self, body):
self._app_id = 'AFM'
self._body = body
afm = AFMPublisher(meta)
for i in range(0,10):
afm.on_action('{"jobId":%s,"stepId":2,"batchId":0,"retry":0,"status":0}'%i)
afm.run()
afm.stop()
def fanout():
import configparser
CONFIG_FILE = '../../config/config.properties'
config = configparser.ConfigParser()
config.read(CONFIG_FILE)
meta = config['DEFAULT']
meta = {k:v for k, v in meta.items()}
meta["mq_exchange_name"] = meta["mq_agent_exchange_name"]
meta["mq_exchange_type"] = 'fanout'
meta["mq_routing_key"] = ''
class AFMPublisher(BasePublisher):
def on_action(self, body):
self._app_id = 'AFM'
self._body = body
afm = AFMPublisher(meta)
for i in range(0,10):
afm.on_action('{"jobId":%s,"stepId":2,"batchId":0,"retry":0,"status":0}'%i)
afm.run()
afm.stop()
fanout()
| kenshinsee/common | lib/mq/publisher.py | publisher.py | py | 5,695 | python | en | code | 0 | github-code | 50 |
41512481994 | # -*- coding: utf-8 -*-
"""
@author:XuMing(xuming624@qq.com)
@description:
This basic example loads a pre-trained model from the web and uses it get entities.
"""
import sys
sys.path.append('..')
from nerpy import NERModel
if __name__ == '__main__':
# BertSoftmax中文实体识别模型: NERModel("bert", "shibing624/bert4ner-base-chinese")
# BertSpan中文实体识别模型: NERModel("bertspan", "shibing624/bertspan4ner-base-chinese")
model = NERModel("bert", "shibing624/bert4ner-base-chinese")
sentences = [
"常建良,男,1963年出生,工科学士,高级工程师,北京物资学院客座副教授",
"1985年8月-1993年在国家物资局、物资部、国内贸易部金属材料流通司从事国家统配钢材中特种钢材品种的调拨分配工作,先后任科员、主任科员。"
]
# set split_on_space=False if you use Chinese text
predictions, raw_outputs, entities = model.predict(sentences, split_on_space=False)
print(predictions, entities)
# More detailed predictions
for n, (preds, outs) in enumerate(zip(predictions, raw_outputs)):
print("\n___________________________")
print("Sentence: ", sentences[n])
print("Entity: ", entities[n])
| shibing624/nerpy | examples/base_zh_demo.py | base_zh_demo.py | py | 1,259 | python | en | code | 84 | github-code | 50 |
27906932859 | """
Programme d'alignement de sequence par paires a partir d'embedding obtenus par des methodes basees sur des transformers
"""
#import-------------------------------------------------------------------------------------------------------------------------------------
import numpy as np
import matplotlib.pyplot as plt
import argparse
#Arguments ligne de commande---------------------------------------------------------------------------------------------------------------
parser = argparse.ArgumentParser(description = "Alignement par paires de 2 sequences a l'aide de 2 embedding obtenu a l'aide de Transformers.")
parser.add_argument('-s1', '--sequence1', type=str, metavar='', help='Fichier fasta de la sequence 1')
parser.add_argument('-s2', '--sequence2', type=str, metavar='', help='Fichier fasta de la sequence 2')
parser.add_argument('-e1', '--embedding1', type=str, metavar='', help='Embedding de la sequence 1')
parser.add_argument('-e2', '--embedding2', type=str, metavar='', help='Embedding de la sequence 2')
parser.add_argument('-g', '--gap_penality', type=int, metavar='', help='penalite de gap')
args = parser.parse_args()
#Parametres---------------------------------------------------------------------------------------------------------------------------------
gap = args.gap_penality
#emb_1 = np.loadtxt("emb/1bina.t5emb")
#emb_2 = np.loadtxt("emb/1lh1.t5emb")
emb_1 = np.loadtxt("emb/" + args.embedding1)
emb_2 = np.loadtxt("emb/" + args.embedding2)
#Fonctions----------------------------------------------------------------------------------------------------------------------------------
#Chargement des fasta----------------------------------
def fasta(fasta_file):
"""
Recuperation des fichiers fasta
parametre : fichier fasta
"""
seq = []
with open(fasta_file, "r") as filin:
for line in filin:
seq.append(line)
seq = seq[1]
return seq
#matrice de score (DP)---------------------------------
def dp_matrix(emb1, emb2): #calcul du dot product
"""
Matrice de dot product entre la sequence 1 et la sequence 2 ==> SCORE
parametres :
emb 1 : embedding de la sequence 1
emb 2 : embedding de la sequence 2
"""
matrix_dp = np.zeros((len(emb1), len(emb2)))
#print(matrix_dp.shape)
for i in range(len(emb1)):
for j in range(len(emb2)):
matrix_dp[i][j] = np.dot(emb1[i], emb2[j])
return matrix_dp
#matrice de programmation dynamique--------------------
def matrix_dyna(seq_1, seq_2, matrix_dp): #matrice de programmation dynamique
"""
Matrice de programmation dynamique
parametres :
seq_1 : fichier fasta de la sequence 1
seq_2 : fichier fasta de la sequence 2
matrix_dp : matrice de score (dot product)
"""
matrix_prog_1 = [0]
matrix_prog_2 = [0]
i = 1
while i != len(seq_1):
#print(matrix_score[-i])
matrix_prog_1.append(matrix_prog_1[i-1] + gap)
i += 1
i = 1
while i != len(seq_2):
matrix_prog_2.append(matrix_prog_2[i-1] + gap)
i += 1
#matrix_score = []
matrix_dyna = np.zeros((len(seq_1),len(seq_2)))
for i in range(len(seq_1)):
matrix_dyna[i][0] = matrix_prog_1[i]
for i in range(len(seq_2)):
matrix_dyna[0][i] = matrix_prog_2[i]
#print(matrix_score.shape) ----> Matrice de prog dynamique : Ajout de + gap ou + dp_matrix en fonction de maximum
for i in range(len(seq_1)):
for j in range(len(seq_2)):
if i > 0 and j > 0:
matrix_dyna[i][j] = max(matrix_dyna[i - 1][j] + gap, matrix_dyna[i][j - 1] + gap, matrix_dyna[i - 1][j - 1] + matrix_dp[i-1][j-1])
return matrix_prog_1, matrix_prog_2, matrix_dyna
#Matrice de chemin-------------------------------------------------------------
def chemin_matrix(matrix_dynamique, matrix_dp):
"""
Recuperation du meilleur chemin a l'aide de flag : (0 = match/ 1 et 2 = gap)
parametres:
matrix_dynamique = matre de programation dynamique entre les 2 sequences
matrix_dp : matrice de dot product ==> SCORE
"""
chemin = []
chemin_flag = []
i = 0
j = 0
print("- Taille Sequence 1 (ligne) : " + str(len(matrix_dynamique)-1) + "\n- Taille Sequence 2 (colonne): " + str(len(matrix_dynamique[0])-1))
#print("\n")
#print("- Derniere case dela matrice de DP: ", matrix_dp[i-1][j-1])
#print("\n")
#max_value = max(matrix[i-1][j] + gap, matrix[i][j-1] + gap, matrix[i-1][j-1] + dp_matrix)
#print(max_value)
while i < len(matrix_dynamique)-1 and j < len(matrix_dynamique[0])-1:
max_value = max(matrix_dynamique[i+1][j], matrix_dynamique[i][j+1], matrix_dynamique[i+1][j+1])
if max_value == matrix_dynamique[i+1][j+1]:
#print("Correspondance" , i)
chemin_flag.append(0)
chemin.append(matrix_dp[i][j])
i = i + 1
j = j + 1
elif max_value == matrix_dynamique[i+1][j]:
chemin_flag.append(1)
chemin.append(matrix_dp[i][j-1])
#print("GAP_BAS")
i = i + 1
elif max_value == matrix_dynamique[i][j+1]:
chemin_flag.append(2)
chemin.append(matrix_dp[i-1][j])
#print("GAP_DROIT")
j = j + 1
else:
print("ERREUR")
break
#print(chemin_flag)
#print(chemin)
#Score de l'alignement------------------------------------------------------------
somme_score = sum(chemin)
#print("- Score de l'alignement : " + str(somme_score))
print("\n")
return chemin, somme_score, chemin_flag
#stockage et affichage de l'alignement-------------------------------------------------------------------------------------------------------
def alignement(seq_1, seq_2, chemin_flag):
"""
Alignement des sequence en fonction des evenement(match = lettre, gap = '-')
parametres :
seq_1 : fichier fasta de la sequence 1
seq_2 : fichier fasta de la sequence 2
chemin_flag = liste du chemin a l'aide de flag (0 = match, 1 et 2 = gap)
"""
print("Sequence 1 = " + seq_1)
print("Sequence 2 = " + seq_2)
#print(chemin)
#print(len(chemin))
max_seq = max(len(seq_1), len(seq_2))
#print(max_seq)
sequence_1 = []
sequence_2 = []
for i in range(len(chemin_flag)):
if i < len(seq_1):
if chemin_flag[i] == 0: #match
sequence_1.append(seq_1[i])
elif chemin_flag[i] == 1: #gap vers le bas donc i = i+1
sequence_1.append("-")
elif chemin_flag[i] == 2: #gap vers le droite donc j = j+1
sequence_1.append(seq_1[i])
else:
sequence_1.append("-")
for i in range(len(chemin_flag)):
if i < len(seq_2):
if chemin_flag[i] == 0:
sequence_2.append(seq_2[i])
elif chemin_flag[i] == 1: #gap vers le bas donc i = i+1
sequence_2.append(seq_2[i])
elif chemin_flag[i] == 2:
sequence_2.append("-")
else:
sequence_2.append("-")
if seq_1 > seq_2:
sequence_1.remove('\n')
else:
sequence_2.remove('\n')
sequence_1 = "".join(sequence_1)
sequence_2 = "".join(sequence_2)
print("ALIGNEMENT PAR PAIRE DES EMBEDDING T5 OBTENUS PAR TRANSFORMERS : \n")
print("Sequence 1 = " + sequence_1)
print("Sequence 2 = " + sequence_2)
return sequence_1, sequence_2
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
main-------------------------------------------------------------------------------------------------------------------------------------
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
if __name__ == '__main__':
# 1) recuperation des Fastas---------------------------------------------------------------------------------------------------------
seq_1 = fasta("fasta/" + args.sequence1)
seq_2 = fasta("fasta/" + args.sequence2)
# 2) Recuperation de la matrice de score (DP)----------------------------------------------------------------------------------------
matrix_dp = dp_matrix(emb_1, emb_2)
#np.savetxt("dossier/dp_matrix.txt", matrix_dp)
# 3) Recuperation de la matrice de programmation dynamique---------------------------------------------------------------------------
matrix_dynamique_gap_1, matrix_dynamique_gap_2, matrix_dynamique = matrix_dyna(seq_1, seq_2, matrix_dp)
#np.savetxt("dossier/matrice_dynamique.txt", matrix_dynamique)
# 4) Recuperation du chemin optimal--------------------------------------------------------------------------------------------------
chemin, score, chemin_flag = chemin_matrix(matrix_dynamique, matrix_dp)
#print(chemin_flag)
#print(chemin)
# 5) Stockage et affichage de l'alignement-------------------------------------------------------------------------------------------
seq1_align, seq2_align = alignement(seq_1, seq_2, chemin_flag)
print("\n- Score de l'alignement : " + str(score))
#np.savetxt("chemin_1bina_1lh1.txt", chemin_score, fmt="%1.17f")
# 6) histogramme------------------------------------------------------------------------------------------------------------------------
"""
#affiche l'histogramme permettant de determiner la penalite de gap------------------------------------------------------------------
print(np.mean(matrix_dp))
plt.hist(matrix_dp)
plt.xlabel('dot product')
plt.ylabel('y')
plt.title('Histogramme de la repartition du DP : 1bina avec 1a7s')
plt.xlim(-13, 13)
plt.ylim(0, 0.07)
plt.grid(True)
plt.show()
#affiche un representation de la matrice de DP afin d'avoir une vision d'ensmble des matchs et des gap
plt.matshow(matrix_dp)
plt.show()
""" | Kainizim/alignement_transformers | alignement_transformers.py | alignement_transformers.py | py | 10,028 | python | fr | code | 0 | github-code | 50 |
73396392475 | import pytorch_lightning as pl
#import wandb #import if tracking is desired
from pytorch_lightning.loggers import WandbLogger
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import TQDMProgressBar
# Import custom modules
from data.cifar100 import CIFAR100DataModule
from vision_transformer.models.vit_classifier import ViTClassifier
from vision_transformer.models.vit_pl_train_module import ViTTrainModule
'''Simple script for training ViT
All hyperparameters listed below can be modified. Script can be run as-is with
"python train.py" and hyperparameters are set by default to highest-performing
values for CIFAR-100.
Args:
None
Returns:
None
'''
# Set this to your local CIFAR-100 directory.
CIFAR = "/media/curttigges/project-files/datasets/cifar-100/"
pl.seed_everything(42)
hyperparameter_defaults = {
"embed_dim":256,
"hidden_dim":512,
"class_head_dim":512,
"num_encoders":24,
"num_heads":8,
"patch_size":4,
"num_patches":64,
"dropout":0.1,
"learning_rate":0.001,
"batch_size":256,
"learning_rate":0.001,
"weight_decay":0.03
}
# Enable the below if you have WandB and wish to run a sweep
'''
wandb.init(config=hyperparameter_defaults)
config = wandb.config
model_kwargs = {
"embed_size":256,
"hidden_size":512,
"hidden_class_size":512,
"num_encoders":config.num_encoders,
"num_heads":8,
"patch_size":config.patch_size,
"num_patches":(32**2//(config.patch_size**2)),
"dropout":config.dropout,
"batch_size":config.batch_size,
"learning_rate":config.learning_rate,
"weight_decay":config.weight_decay
}
'''
# Disable dictionary below if you want to run a hyperparameter sweep with WandB
model_kwargs = hyperparameter_defaults
data_module = CIFAR100DataModule(
batch_size=model_kwargs["batch_size"],
num_workers=12,
data_dir=CIFAR)
model = ViTTrainModule(**model_kwargs)
# Enable these lines if you want to log with WandB
#wandb_logger = WandbLogger(project="vit-cifar100")
#wandb_logger.watch(model, log="all")
trainer = Trainer(
max_epochs=180,
accelerator='gpu',
devices=1,
#logger=wandb_logger,
callbacks=[TQDMProgressBar(refresh_rate=10)])
trainer.fit(model, datamodule=data_module) | curt-tigges/vit | train.py | train.py | py | 2,270 | python | en | code | 2 | github-code | 50 |
70301397595 | import datetime
import logging
import numbers
import time
from creme import (compose, feature_extraction, linear_model, metrics, optim,
preprocessing, stats, time_series)
from src.features.build_features import build_train_predict_features
def get_hour(x):
x['hour'] = x['date'].hour
return x
def define_pipeline(list_cat_features, list_num_features, learning_rate):
"""Creates the modeling pipeline for online learning
Returns
-------
model: creme.compose.Pipeline
modeling pipeline to fit
metric: creme.metric.
a metric to monitor during online learning
"""
init = optim.initializers.Normal(mu=0, sigma=1, seed=42)
num = compose.Select(*list_num_features) | preprocessing.StandardScaler()
cat = compose.SelectType(
*list_cat_features) | preprocessing.OneHotEncoder()
mean_target = get_hour | feature_extraction.TargetAgg(
by=['hour'], how=stats.BayesianMean(
prior=3,
prior_weight=1)) | preprocessing.StandardScaler()
model = compose.Pipeline(
num + cat + mean_target,
linear_model.LinearRegression(
optimizer=optim.SGD(learning_rate), intercept_lr=0.001, initializer=init)
)
model = time_series.Detrender(regressor=model, window_size=60)
metric = metrics.Rolling(metrics.MAE(), 60)
return model, metric
def online_learn(contract, station, list_cat_features, list_num_features, target, timestep=120, learning_rate=0.1):
"""Launches online regression for target with list of features at a given station (time step every minute)
Parameters
----------
contract : str
city we are looking at
station : int
station identifier
list_features : list
list of features to include into the model
target : str
name of the target variable
"""
model, metric = define_pipeline(
list_cat_features, list_num_features, learning_rate=learning_rate)
real_y = {}
predicted_y = {}
t = 0
while True:
t += 1
X, y, X_pred = build_train_predict_features(contract, station, target)
real_y[str(X['date'].hour) + str(X['date'].minute)] = y
y_pred_one, y_pred_1h, metric, model = train_pred_step(
X, y, X_pred, model, metric)
predicted_y[str(X['date'].hour) + str(X['date'].minute)] = y_pred_1h
logging.info(f'Metric = {metric}, y pred = {y_pred_one}, y true = {y}')
logging.info(f'Predicted available bikes in 1 hour : {y_pred_1h}')
if t > 3600 / timestep:
date_1h = X['date'] - datetime.timedelta(minutes=60)
try:
logging.info(
f"Real number of bikes 1 hour ago = {real_y[date_1h]}")
except KeyError:
logging.error('Pb with the date')
pass
time.sleep(timestep)
def train_pred_step(X, y, X_pred, model, metric):
"""[summary]
Parameters
----------
X : [type]
[description]
y : [type]
[description]
X_pred : [type]
[description]
model : [type]
[description]
metric : [type]
[description]
Returns
-------
[type]
[description]
"""
y_pred_one = model.predict_one(X)
metric = metric.update(y, y_pred_one)
model = model.fit_one(X, y)
y_pred_1h = model.predict_one(X_pred)
return y_pred_one, y_pred_1h, metric, model
| LuisBlanche/StreamBikes | src/models/online_model.py | online_model.py | py | 3,466 | python | en | code | 2 | github-code | 50 |
11948645009 | import datetime
import enum
from dataclasses import dataclass
from dataclasses_json import dataclass_json
from option import Option, Underlying
class SwaptionType(enum.Enum):
Payer = 'Payer'
Receiver = 'Receiver'
class SettlementType(enum.Enum):
Cash = 'Cash'
Physical = 'Physical'
@dataclass
class IRSwap(Underlying):
fixed_rate: float
floating_rate_reference: str
floating_rate_spread: float
payment_frequency_months: int
currency: str
day_count_convention: str
start_date: datetime
end_date: datetime
notional: float
valuation_date: datetime
@dataclass_json
@dataclass
class Swaption(Option):
option_type: SwaptionType
settlement_type: SettlementType
underlying: IRSwap
| metahris/pypws | pypws/swaption.py | swaption.py | py | 751 | python | en | code | 2 | github-code | 50 |
11863735160 | """Словарь очень похож на список, но порядок элементов в нем не имеет значения и они
не выбираются смещением, таким как 0 и 1. Вместо этого для каждого значения вы
указываете связанный с ним уникальный ключ"""
d1 = { "Iphone": '13', 'Samsung': 'A15', 'Nokia': '3310'}
d2 = dict(name="Jack", lname="London", course = 3)
for key in d1.keys():
print(key)
for value in d1.values():
print(value)
for item in d1.items():
print(item)
d2['course'] = 4
print(d2)
d4 = d1['Nokia']
d5 = d1.get('Samsung')
print(d4, d5)
| abdumalikyaqub/python-practice | intro-py/dict.py | dict.py | py | 692 | python | ru | code | 0 | github-code | 50 |
38770201916 | from torchvision import datasets, transforms
from torch.utils.data import DataLoader
#
# class Cifar10_DataLoader(DataLoader):
# """
# MNIST data loading demo using BaseDataLoader
# """
# def __init__(self, data_dir, batch_size, shuffle=True, validation_split=0.0, num_workers=1, training=True):
#
# transform = transforms.Compose([
# transforms.ToTensor()
# ])
# self.data_dir = data_dir
# self.dataset = datasets.MNIST(self.data_dir, train=training, download=True, transform=transform)
# super().__init__(self.dataset, batch_size, shuffle, validation_split, num_workers)
def Cifar10_DataLoader(BATCH_SIZE):
data_dir = './data'
transform = transforms.Compose([
transforms.ToTensor()
])
train_dataset = datasets.CIFAR10(data_dir,
train=True,
download=True,
transform=transform)
test_dataset = datasets.CIFAR10(data_dir,
train=False,
transform=transform)
train_loader = DataLoader(dataset=train_dataset,
batch_size=BATCH_SIZE,
shuffle=True)
test_loader = DataLoader(dataset=test_dataset,
batch_size=BATCH_SIZE,
shuffle=False)
return train_loader, test_loader | hannie0615/paper-implement | CNN/data_loaders.py | data_loaders.py | py | 1,474 | python | en | code | 0 | github-code | 50 |
2331765924 | class BreadthFirstPaths:
def __init__(self, G, s):
self.marked = []
self.edgeTo = []
self.s = s
for i in range(G.v()):
self.marked.append(False)
self.edgeTo.append(-1)
self.bfs(G, s)
def bfs(self, G, s):
queue = []
self.marked[s] = True
queue.insert(0, s)
while len(queue) != 0:
v = queue.pop(-1)
w = G.adj[v]
while w != None:
if not self.marked[w.key]:
self.edgeTo[w.key] = v
self.marked[w.key] = True
queue.insert(0, w.key)
w = w.next
def hasPathTo(self, v):
return self.marked[v]
def pathTo(self, v):
if not self.hasPathTo(v):
return None
x = v
l = []
while x != self.s:
l.insert(0, x)
x= self.edgeTo[x]
l.insert(0, self.s)
return l | SenaSerefoglu/Algorithms | BreadthFirstSearch/bfp.py | bfp.py | py | 726 | python | en | code | 3 | github-code | 50 |
23654731462 | import numpy as np
import unittest
from load import *
from process_test_corpus import *
from utils import *
from viterbi import Viterbi
from hmm import HMM
import pickle
class TestViterbi(unittest.TestCase):
def setUp(self):
self.vocab = vocab
self.tag_counts = tag_counts
self.transition_matrix = transition_matrix
self.emission_matrix = emission_matrix
self.test_corpus = test_corpus
self.states = states
self.test_words, self.y = load_test_corpus(self.test_corpus)
_, self.test_words = preprocess_list(vocab=self.vocab, test_words_list=self.test_words)
def test_initialize(self):
test_cases = [
{
"name": "default_check",
"input": {
"states": self.states,
"tag_counts": self.tag_counts,
"A": self.transition_matrix,
"B": self.emission_matrix,
"corpus": self.test_words,
"vocab": self.vocab,
},
"expected": {
"best_probs_shape": (46, 34199),
"best_paths_shape": (46, 34199),
"best_probs_col0": np.array(
[
-22.60982633,
-23.07660654,
-23.57298822,
-19.76726066,
-24.74325104,
-35.20241402,
-35.00096024,
-34.99203854,
-21.35069072,
-19.85767814,
-21.92098414,
-4.01623741,
-19.16380593,
-21.1062242,
-20.47163973,
-21.10157273,
-21.49584851,
-20.4811853,
-18.25856307,
-23.39717471,
-21.92146798,
-9.41377777,
-21.03053445,
-21.08029591,
-20.10863677,
-33.48185979,
-19.47301382,
-20.77150242,
-20.11727696,
-20.56031676,
-20.57193964,
-32.30366295,
-18.07551522,
-22.58887909,
-19.1585905,
-16.02994331,
-24.30968545,
-20.92932218,
-21.96797222,
-24.29571895,
-23.45968569,
-22.43665883,
-20.46568904,
-22.75551606,
-19.6637215,
-18.36288463,
]
),
},
}
]
for test in test_cases:
viterbi = Viterbi(vocab=self.vocab, tag_counts= test["input"]["tag_counts"],
transition_matrix=test["input"]["A"], emission_matrix=test["input"]["B"],
test_words=test["input"]["corpus"],y = self.y)
viterbi._initialize()
self.assertEqual(viterbi.best_probs.shape, test["expected"]["best_probs_shape"],
msg="Wrong shape of best_probs matrix, expected {}".format((test["expected"]["best_probs_shape"])))
self.assertEqual(viterbi.best_paths.shape, test["expected"]["best_paths_shape"],
msg="Wrong shape of best_paths matrix, expected {}".format((test["expected"]["best_paths_shape"])))
np.testing.assert_almost_equal(viterbi.best_probs[:, 0], test["expected"]["best_probs_col0"],
err_msg= "Wrong value of column 0 of best_probs matrix, expected {}".format(test["expected"]["best_probs_col0"]), decimal=8)
def test_forward(self):
test_cases = [
{
"name": "default_check",
"input": {
"A": self.transition_matrix,
"B": self.emission_matrix,
"test_corpus": self.test_words,
"best_probs": np.load("./npy/best_probs.npy"),
"best_paths":
np.load("./npy/best_paths.npy"),
"vocab": self.vocab,
"verbose": False,
},
"expected": {
"best_probs0:5": np.array(
[
[
-22.60982633,
-24.78215633,
-34.08246498,
-34.34107105,
-49.56012613,
],
[
-23.07660654,
-24.51583896,
-35.04774303,
-35.28281026,
-50.52540418,
],
[
-23.57298822,
-29.98305064,
-31.98004656,
-38.99187549,
-47.45770771,
],
[
-19.76726066,
-25.7122143,
-31.54577612,
-37.38331695,
-47.02343727,
],
[
-24.74325104,
-28.78696025,
-31.458494,
-36.00456711,
-46.93615515,
],
]
),
"best_probs30:35": np.array(
[[-203.15859723, -208.79079415, -210.87179298,-210.56183994, -224.19464568],
[-202.98538493, -218.12507661, -207.63966568, -215.93214396, -224.54198098],
[-202.41118988, -214.63334729, -217.81262519, -221.14009604, -222.43579649],
[-200.84257013,-209.87178653, -209.47192559, -216.62538661, -221.49910549],
[-209.14430395, -215.02329713, -210.19587419, -213.92864354, -229.10658422]]
),
"best_paths0:5": np.array(
[
[0, 11, 20, 25, 20],
[0, 11, 20, 25, 20],
[0, 11, 20, 25, 20],
[0, 11, 20, 25, 20],
[0, 11, 20, 25, 20],
]
),
"best_paths30:35": np.array(
[
[20, 19, 35, 11, 21],
[20, 19, 35, 11, 21],
[20, 19, 35, 11, 21],
[20, 19, 35, 11, 21],
[35, 19, 35, 11, 34],
]
),
},
}
]
for test in test_cases:
viterbi = Viterbi(vocab=self.vocab, tag_counts= self.tag_counts,
transition_matrix=test["input"]["A"], emission_matrix=test["input"]["B"],
test_words=test["input"]["test_corpus"], y = self.y)
viterbi._initialize()
viterbi._forward()
for range_ in test["expected"]:
get_index = list(range_[10:].split(":"))
index0 = int(get_index[0])
index1 = int(get_index[1])
if (range_[:10] == "best_probs"):
sub_best_probs = viterbi.best_probs[index0 : index1, index0: index1]
np.testing.assert_almost_equal(sub_best_probs, test["expected"][range_],decimal=8
,err_msg= "Wrong value of {}, expected {}, got {}".format(range_, test["expected"][range_], sub_best_probs))
else:
sub_best_paths = viterbi.best_paths[index0 : index1, index0: index1]
np.testing.assert_almost_equal(sub_best_paths, test["expected"][range_],
err_msg= "Wrong value of {}, expected {}, got {}".format(range_, test["expected"][range_], sub_best_paths))
def test_backward(self):
test_cases = [
{
"name": "default_check",
"input": {
"corpus": self.test_corpus,
"best_probs": np.load("./npy/best_probs.npy")
,
"best_paths": np.load("./npy/best_paths.npy"),
"states": states,
},
"expected": {
"pred_len": 34199,
"pred_head": [
"DT",
"NN",
"POS",
"NN",
"MD",
"VB",
"VBN",
"IN",
"JJ",
"NN",
],
"pred_tail": [
"PRP",
"MD",
"RB",
"VB",
"PRP",
"RB",
"IN",
"PRP",
".",
"--s--",
],
},
}
]
for test in test_cases:
viterbi = Viterbi(vocab=self.vocab, tag_counts= self.tag_counts,
transition_matrix=self.transition_matrix, emission_matrix=self.emission_matrix,
test_words=self.test_words, y=self.y)
viterbi._initialize()
viterbi._forward()
viterbi._backward()
self.assertEqual(len(viterbi.pred) - 1, test["expected"]["pred_len"], msg="Wrong length of test_corpus prediction, expected {}, got {}".format(test["expected"]["pred_len"], len(viterbi.pred)))
np.testing.assert_equal(viterbi.pred[:10], test["expected"]["pred_head"],
err_msg= "Wrong prediction of first 10 tags, expected: {}, got: {}".format(test["expected"]["pred_head"], viterbi.pred[:10]))
np.testing.assert_equal(viterbi.pred[-11:-1], test["expected"]["pred_tail"],
err_msg= "Wrong prediction of last 10 tags, expected: {}, got: {}".format(test["expected"]["pred_tail"], viterbi.pred[-11:-1]))
if __name__ == "__main__":
print("-------Runing unittest for Viterbi class-------")
training_corpus = './data/WSJ_02-21.pos'
vocab_txt = "./data/hmm_vocab.txt"
vocab = get_index_vocab(vocab_txt=vocab_txt)
training_corpus = get_training_corpus(training_corpus)
test_corpus = './data/WSJ_24.pos'
hmm = HMM(vocab=vocab, training_corpus=training_corpus, alpha=0.001)
hmm._create_counts()
hmm._create_transition_matrix()
hmm._create_emission_matrix()
tag_counts = hmm.tag_counts
transition_matrix = hmm.transition_matrix
emission_matrix = hmm.emission_matrix
states = hmm.states
unittest.main(verbosity=2)
| LTPhat/HMM-Viterbi-POS-Tagger | test_viterbi.py | test_viterbi.py | py | 11,346 | python | en | code | 0 | github-code | 50 |
73051478555 | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def pathSum(self, root: Optional[TreeNode], targetSum: int) -> List[List[int]]:
from collections import deque
pathes = []
if root:
stack = deque()
cur_path = deque()
cur_sum = 0
stack.append((0, root))
while stack:
level, node = stack.pop()
if len(cur_path)>0 and cur_path[-1][0] >= level:
while cur_path[-1][0] >= level:
_, old_val = cur_path.pop()
cur_sum -= old_val
cur_path.append((level, node.val))
cur_sum += node.val
if node.left is None and node.right is None:
if cur_sum == targetSum:
pathes.append([p[1] for p in cur_path])
if node.right is not None:
stack.append((level+1, node.right))
if node.left is not None:
stack.append((level+1, node.left))
return pathes | AbramovAV/ml_engineer_interview_prep | coding_interview/leetcode_medium/path-sum-ii.py | path-sum-ii.py | py | 1,241 | python | en | code | 0 | github-code | 50 |
29256597281 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import os
from lab.environments import LocalEnvironment, MaiaEnvironment
from downward.reports.compare import ComparativeReport
import common_setup
from common_setup import IssueConfig, IssueExperiment, RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue693-v5", "issue693-v6"]
BUILDS = ["release32", "release64"]
SEARCHES = [
("blind", "astar(blind())"),
]
CONFIGS = [
IssueConfig(
"{nick}-{build}".format(**locals()),
["--search", search],
build_options=[build],
driver_options=["--build", build, "--search-time-limit", "1m"])
for nick, search in SEARCHES
for build in BUILDS
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = MaiaEnvironment(
priority=0, email="jendrik.seipp@unibas.ch")
if common_setup.is_test_run():
SUITE = IssueExperiment.DEFAULT_TEST_SUITE
ENVIRONMENT = LocalEnvironment(processes=1)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
# Compare revisions.
for build in BUILDS:
for rev1, rev2 in itertools.combinations(REVISIONS, 2):
algorithm_pairs = [
("{rev1}-{config_nick}-{build}".format(**locals()),
"{rev2}-{config_nick}-{build}".format(**locals()),
"Diff ({config_nick}-{build})".format(**locals()))
for config_nick, search in SEARCHES]
exp.add_report(
ComparativeReport(
algorithm_pairs,
attributes=IssueExperiment.DEFAULT_TABLE_ATTRIBUTES),
name="issue693-opt-{rev1}-vs-{rev2}-{build}".format(**locals()))
for config_nick, search in SEARCHES:
algorithms = [
"{rev1}-{config_nick}-{build}".format(**locals()),
"{rev2}-{config_nick}-{build}".format(**locals())]
for attribute in ["total_time", "memory"]:
exp.add_report(
RelativeScatterPlotReport(
attributes=[attribute],
filter_algorithm=algorithms,
get_category=lambda run1, run2: run1["domain"]),
name="issue693-relative-scatter-{config_nick}-{build}-{rev1}-vs-{rev2}-{attribute}".format(**locals()))
exp.run_steps()
| aig-upf/automated-programming-framework | PLANNERS/fast-downward/experiments/issue693/v6-blind.py | v6-blind.py | py | 2,443 | python | en | code | 13 | github-code | 50 |
9901936916 | import time
starta = time.time()
import torch
import sys
import torch.distributions as tdist
print(time.time() - starta)
#start = torch.cuda.Event(enable_timing=True)
#end = torch.cuda.Event(enable_timing=True)
#start.record()
#end.record()
MS_TO_S = 1/1000
ma_eps = 1.0E-9
aqua_device = torch.device("cpu")
posterior_b = [None] * 2
all_gt_b = [None] * 2
lowProb_b = [None] * 2
adaptiveLower_b = [None] * 2
adaptiveUpper_b = [None] * 2
posterior_o = [None] * 1
all_gt_o = [None] * 1
lowProb_o = [None] * 1
adaptiveLower_o = [None] * 1
adaptiveUpper_o = [None] * 1
repeat = False
while True:
if repeat:
splits = int(sys.argv[1])
else:
splits = int(sys.argv[1])
densityCube_p = torch.zeros(1, device=aqua_device)
b = [None] * 2
b[0] = torch.tensor([0,1], device=aqua_device)
b[0] = torch.reshape(b[0], [1, -1, 1, 1, 1])
b[1] = torch.tensor([0,1], device=aqua_device)
b[1] = torch.reshape(b[1], [1, 1, -1, 1, 1])
o = [None] * 1
o[0] = torch.arange(ma_eps, 200 + ma_eps, step=(200 - ma_eps)/splits, device=aqua_device)
o[0] = torch.reshape(o[0], [1, 1, 1, -1, 1])
x1 = torch.arange(50, 150 + ma_eps, step=(150 - 50)/splits, device=aqua_device)
x1 = torch.reshape(x1, [1, 1, 1, 1, -1])
densityCube_p = densityCube_p + torch.log((0.2) * (b[0]) + (1 - (0.2)) * (1 - (b[0])))
densityCube_p = densityCube_p + torch.log((0.5) * (b[1]) + (1 - (0.5)) * (1 - (b[1])))
densityCube_p_true = torch.tensor(0)
densityCube_p_false = torch.tensor(0)
densityCube_p_true = densityCube_p_true + torch.log(b[1] == 1)
densityCube_p_false = densityCube_p_false + torch.log((0.2) * (b[1]) + (1 - (0.2)) * (1 - (b[1])))
densityCube_p = densityCube_p + torch.log((b[0]).int() * torch.exp(densityCube_p_true) + (1 - (b[0]).int()) * torch.exp(densityCube_p_false))
densityCube_p = densityCube_p + torch.log((1/ (200 - 0)) * torch.logical_and(o[0] >= 0, o[0] <= 200))
densityCube_p = densityCube_p + torch.log((1/ (150 - 50)) * torch.logical_and(x1 >= 50, x1 <= 150))
densityCube_p_true = torch.tensor(0)
densityCube_p_false = torch.tensor(0)
densityCube_p_true = densityCube_p_true + torch.log(torch.logical_and(o[0]>=x1-(50),o[0]<=x1)*2*(o[0]-(x1-(50)))/(50*(50+10)) + torch.logical_and(o[0]>x1,o[0]<=x1+10)*2*((x1+10)-(o[0]))/(10*(50+10)))
densityCube_p_false = densityCube_p_false + torch.log(torch.logical_and(o[0]>=x1-(50),o[0]<=x1)*2*(o[0]-(x1-(50)))/(50*(50+50)) + torch.logical_and(o[0]>x1,o[0]<=x1+50)*2*((x1+50)-(o[0]))/(50*(50+50)))
densityCube_p = densityCube_p + torch.log((b[0]).int() * torch.exp(densityCube_p_true) + (1 - (b[0]).int()) * torch.exp(densityCube_p_false))
densityCube_p = densityCube_p + torch.log(b[0] == 1)
expDensityCube_p = torch.exp(densityCube_p - torch.max(densityCube_p))
z_expDensityCube = torch.sum(expDensityCube_p)
posterior_b[1] = expDensityCube_p.sum([1, 0, 3, 4]) / z_expDensityCube
posterior_x1 = expDensityCube_p.sum([1, 2, 3, 0]) / z_expDensityCube
posterior_o[0] = expDensityCube_p.sum([1, 2, 0, 4]) / z_expDensityCube
posterior_b[0] = expDensityCube_p.sum([0, 2, 3, 4]) / z_expDensityCube
b[1] = b[1].flatten()
posterior_b[1] = posterior_b[1].flatten()
x1 = x1.flatten()
posterior_x1 = posterior_x1.flatten()
o[0] = o[0].flatten()
posterior_o[0] = posterior_o[0].flatten()
b[0] = b[0].flatten()
posterior_b[0] = posterior_b[0].flatten()
if repeat == False:
break
repeat = False
#torch.set_printoptions(precision=8, sci_mode=False)
#print((o[0].flatten()))
#print(posterior_o[0])
#print(start.elapsed_time(end) * MS_TO_S)
ends = time.time()
print(ends - starta)
| uiuc-arc/aquasense | python_models/radar_query_dice.py | radar_query_dice.py | py | 3,686 | python | en | code | 0 | github-code | 50 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.