seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
24346437022 | '''
This project is an extention of the previous deepLearning-1.py project
here openCV is used to display the image but processed using Jetson inference and utilities
-------------------------------------------------------------------------------------------
'''
import jetson.inference
import jetson.utils
import cv2
import numpy as np
import time
width=640
height=480
cam=jetson.utils.gstCamera(width,height,'/dev/video0')
#cam=jetson.utils.gstCamera(width,height,'0') # PiCam
#display=jetson.utils.glDisplay()
font=jetson.utils.cudaFont()
net=jetson.inference.imageNet('googlenet')
font=cv2.FONT_HERSHEY_SIMPLEX
timeMark=time.time()
fpsFilter=0
while True:
frame,width,height=cam.CaptureRGBA(zeroCopy=1) #Capture a camera frame and convert it to float4 RGBA
classID,confidence=net.Classify(frame,width,height) # Classify an RGBA image and return the object's class and confidence.
item=net.GetClassDesc(classID) # Return the class description for the given object class.
dt=time.time()-timeMark
fps=1/dt
fpsFilter=0.95*fpsFilter+0.05*fps
timeMark=time.time()
frame=jetson.utils.cudaToNumpy(frame,width,height,4) # Create a numpy ndarray wrapping the CUDA memory, without copying it
frame=cv2.cvtColor(frame,cv2.COLOR_RGBA2BGR).astype(np.uint8)# Convert the RGBA color to BGR to work with CV2
cv2.putText(frame,str(round(fpsFilter,1))+' fps '+item,(0,30),font,1,(0,0,255),2)
cv2.imshow('RecoCam',frame)
cv2.moveWindow('RecoCam',0,0)
if cv2.waitKey(1)==ord('q'):
break
cam.release()
cv2.destroyAllWindows()
| Vishvambar-Panth/Jetson-Nano-Exercise | NVIDIA/deepLearning-1a.py | deepLearning-1a.py | py | 1,575 | python | en | code | 0 | github-code | 13 |
14629223597 | # coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from swagger_server.models.base_model_ import Model
from swagger_server import util
class NearestLeaf(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, leaf_id=None, distance=None): # noqa: E501
"""NearestLeaf - a model defined in OpenAPI
:param leaf_id: The leaf_id of this NearestLeaf. # noqa: E501
:type leaf_id: str
:param distance: The distance of this NearestLeaf. # noqa: E501
:type distance: int
"""
self.openapi_types = {
'leaf_id': str,
'distance': int
}
self.attribute_map = {
'leaf_id': 'leaf_id',
'distance': 'distance'
}
self._leaf_id = leaf_id
self._distance = distance
@classmethod
def from_dict(cls, dikt) -> 'NearestLeaf':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The NearestLeaf of this NearestLeaf. # noqa: E501
:rtype: NearestLeaf
"""
return util.deserialize_model(dikt, cls)
@property
def leaf_id(self):
"""Gets the leaf_id of this NearestLeaf.
:return: The leaf_id of this NearestLeaf.
:rtype: str
"""
return self._leaf_id
@leaf_id.setter
def leaf_id(self, leaf_id):
"""Sets the leaf_id of this NearestLeaf.
:param leaf_id: The leaf_id of this NearestLeaf.
:type leaf_id: str
"""
if leaf_id is None:
raise ValueError("Invalid value for `leaf_id`, must not be `None`") # noqa: E501
self._leaf_id = leaf_id
@property
def distance(self):
"""Gets the distance of this NearestLeaf.
:return: The distance of this NearestLeaf.
:rtype: int
"""
return self._distance
@distance.setter
def distance(self, distance):
"""Sets the distance of this NearestLeaf.
:param distance: The distance of this NearestLeaf.
:type distance: int
"""
if distance is None:
raise ValueError("Invalid value for `distance`, must not be `None`") # noqa: E501
self._distance = distance
| Mykrobe-tools/mykrobe-atlas-distance-api | swagger_server/models/nearest_leaf.py | nearest_leaf.py | py | 2,449 | python | en | code | 0 | github-code | 13 |
41048361799 | from io import BytesIO
from PIL import Image
from django.core.files.uploadedfile import SimpleUploadedFile
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from sfs_server.files.models import File
class FilesHttpTest(APITestCase):
def test_can_create_file_on_server(self):
picture_file = _create_random_picture_file()
response = self.client.post(reverse('files:files_list'), data={
'file': picture_file
})
self.assertEqual(status.HTTP_201_CREATED, response.status_code)
file = File.objects.last()
self.assertIsNotNone(file.created_at)
def test_can_list_files_on_server(self):
picture_files = [_create_random_picture_file() for _ in range(0, 5)]
# given some random files saved in the media
for picture in picture_files:
response = self.client.post(reverse('files:files_list'), data={
'file': picture
})
self.assertEqual(status.HTTP_201_CREATED, response.status_code)
# then it is possible to retrieve a list of all the files
response = self.client.get(reverse('files:files_list'))
self.assertEqual(status.HTTP_200_OK, response.status_code)
def _create_random_picture_file():
random_data = BytesIO()
Image.new('RGB', (250, 250)).save(random_data, 'JPEG')
random_data.seek(0)
return SimpleUploadedFile('random_picture.jpg', random_data.getvalue())
| SteelTurtle/sfs_project | sfs_server/files/tests/test_files_http.py | test_files_http.py | py | 1,498 | python | en | code | 0 | github-code | 13 |
36070401212 | from django import forms
from .models import People, Taluk
class PersonCreationForm(forms.ModelForm):
class Meta:
model = People
fields = '__all__'
widgets ={
'district':forms.Select(attrs={'class':'form-control'}),
'taluk': forms.Select(attrs={'class': 'form-control'}),
'vaccine': forms.Select(attrs={'class': 'form-control'}),
'date': forms.DateTimeInput(attrs={'class': 'form-control','type':'date'}),
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['taluk'].queryset = Taluk.objects.none()
if 'district' in self.data:
try:
district_id = int(self.data.get('district'))
self.fields['taluk'].queryset = Taluk.objects.filter(district_id=district_id).order_by('name')
except (ValueError, TypeError):
pass # invalid input from the client; ignore and fallback to empty City queryset
elif self.instance.pk:
self.fields['taluk'].queryset = self.instance.district.taluk_set.order_by('name')
| msgokul/vaccineapp | vaccineproject/vaccineapp/forms.py | forms.py | py | 1,137 | python | en | code | 0 | github-code | 13 |
25141038813 | tableData = [['apples', 'oranges', 'cherries', 'bannana'],
['Alice', 'Bob', 'Carol', 'David'],
['dogs', 'cats', 'moose', 'goose']]
# define function & parameter
def table_printer(args):
# zips list | *args unpacks the list into positional argument
for data in zip(*args):
# template is a function that has been aligned inside field 10 chars in length
template = '{:>10}'*len(data)
# template is a string that has a string function of format to format the unpacked * data
print(template.format(*data))
table_printer(tableData) | tyarr/atbswp | ch06/tablePrint3.py | tablePrint3.py | py | 593 | python | en | code | 1 | github-code | 13 |
38832767504 | #bunch of lists:
employees = ['Corey', 'Jim', 'Steven', 'April', 'Judy', 'Jenn', 'John', 'Jane' ]
gym_members = ['April', 'John', 'Corey']
developers = ['Judy', 'Corey', 'Steven', 'Jane', 'April']
#Which members are developers and go to the gym?
result = set(gym_members).intersection(developers)
print(result)
#notice we have to wrap gym_members in a set() function because .inersection() only works for sets.
#Which employees are NOT developers OR Gym members?
result = set(employees).difference(gym_members, developers)
print(result)
#to search a List takes a lot of computing power, to search a set is more efficient
#List membership test:
if 'Corey' in developers:
print('Corey is a developer!')
#same membership test with a set (faster!!)
s = set(employees)
if 'Steven' in developers:
print('Steven is a developer!')
| hwsanchez/Python_ex | example.py | example.py | py | 843 | python | en | code | 0 | github-code | 13 |
71395266578 |
# @Author Benedict Quartey
import matplotlib.pyplot as plt
import numpy as np #matrix math
#simplified interface for building models
import keras
from keras.callbacks import ModelCheckpoint
import model as NN_model
import data_processing
#for reading files
import os
batch_size = 128
num_classes = 6
epochs = 10
# inputshape for images
imageHeight, imageWidth, imageChannels = 245,240,3
# seeding to enable exact reproduction of learning results
np.random.seed(0)
def preprocess():
(X_train, X_test, Y_train, Y_test)=data_processing.prepareData()
x_train = np.array(X_train)
y_train = np.array(Y_train)
x_test = np.array(X_test)
y_test = np.array(Y_test)
#more reshaping
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
return(x_train,x_test,y_train,y_test)
def plot_training(model_history, Num_epochs, plotPath):
# construct a plot that plots and saves the training history
plt.style.use("ggplot")
plt.figure()
plt.plot(np.arange(0, Num_epochs), model_history.history["loss"], label="train_loss")
plt.plot(np.arange(0, Num_epochs), model_history.history["val_loss"], label="val_loss")
plt.plot(np.arange(0, Num_epochs), model_history.history["acc"], label="train_acc")
plt.plot(np.arange(0, Num_epochs), model_history.history["val_acc"], label="val_acc")
plt.title("Training Loss and Accuracy")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend(loc="lower left")
plt.savefig(plotPath)
def train():
(x_train,x_test,y_train,y_test) = preprocess()
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
#get model
model = NN_model.nn_model()
model.summary()
# callback function to be executed after every training epoch, only saves the trsined model
# # if its validation mean_squared_error is less than the model from the previoud epoch
interimModelPoint = ModelCheckpoint('model-{epoch:03d}.h5',
monitor='val_loss',
verbose=0,
save_best_only = 'true',
mode = 'auto')
#Adaptive learning rate (adaDelta) is a popular form of gradient descent
#categorical cross entropy since we have multiple classes (10)
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
#train!
history=model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test),
callbacks=interimModelPoint)
#performance evaluation
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
plot_training(history,epochs,"model_performance")
# only run if script if invoked directly
if __name__ == "__main__":
train()
| benedictquartey/Chiromancer | train.py | train.py | py | 3,097 | python | en | code | 6 | github-code | 13 |
74030503059 | from concurrent.futures import ProcessPoolExecutor # , ThreadPoolExecutor
import logging
import time
FORMAT = "%(asctime)s %(threadName)s %(thread)8d %(message)s"
logging.basicConfig(format=FORMAT, level=logging.INFO)
def worker(n):
logging.info('enter thread~~~~~~~{}'.format(n))
time.sleep(5)
logging.info('finished ----------{}'.format(n))
return 1000 + n
if __name__ == "__main__":
executor = ProcessPoolExecutor(max_workers=3)
fs = []
with executor:
for i in range(6):
future = executor.submit(worker, i + 2)
logging.info(future)
fs.append(future)
while True:
flag = True
for f in fs:
print(f.done())
flag = flag and f.done()
# if not flag:
# break
time.sleep(1)
print()
if flag:
break
print('~' * 30)
for f in fs:
print(f.result())
| sqsxwj520/python | 并发编程/进程/进程池.py | 进程池.py | py | 1,033 | python | en | code | 1 | github-code | 13 |
75058918096 | def help(self, input):
"""Displays information, usage and examples for a given command."""
cmd = input.args
if not cmd:
raise self.BadInputError()
if cmd in self.plugin_aliases:
cmd = self.plugin_aliases[cmd]
for e in self.doc[cmd]:
if e:
self.say(e)
else:
self.say("Sorry, can't help you with %s." % cmd)
help.rule = ["help"]
help.usage = [("Get help for a command", "$pcmd <command>")]
help.example = [("Get help for the help command", "$pcmd help")]
def commands(self, input):
"""Displays a list of all available commands."""
self.say('\x02Available commands:')
cmds = self.doc.keys()
cmds.sort()
for cmd in self.split(', '.join(cmds)):
self.say(' ' + cmd)
self.say("Use %shelp <command> to get more information about a command." % self.config.get('prefix',''))
commands.rule = ['commands']
commands.usage = [("List all available commands", "$pcmd")]
| liato/spiffy | plugins/help.py | help.py | py | 977 | python | en | code | 4 | github-code | 13 |
26100863005 | import os
import csv
cand_list = {}
break_line = "------------------------------"
csvpath = os.path.join(".", "Resources", "election_data.csv")
with open(csvpath) as csvfile:
csv_reader = csv.reader(csvfile, delimiter = ",")
csv_header = next(csv_reader)
total_vote = 0
for row in csv_reader:
total_vote += 1
name = row[2]
if name in cand_list:
cand_list[name] += 1
else:
cand_list[name] = 1
cand_list["Khan Percent"] = round((cand_list["Khan"]/total_vote) * 100, 2)
cand_list["Correy Percent"] = round((cand_list["Correy"]/total_vote) * 100, 2)
cand_list["Li Percent"] = round((cand_list["Li"]/total_vote) * 100, 2)
cand_list["O'Tooley Percent"] = round((cand_list["O'Tooley"]/total_vote) * 100, 2)
cand_winner = max(cand_list, key=cand_list.get)
print("Election Results")
print(break_line)
print("Total Vote: " + str(total_vote))
print(break_line)
print("Khan: " + str(cand_list["Khan Percent"]) + "% " + str(cand_list["Khan"]))
print("Correy: " + str(cand_list["Correy Percent"]) + "% " + str(cand_list["Correy"]))
print("Li: " + str(cand_list["Li Percent"]) + "% " + str(cand_list["Li"]))
print("O'Tooley: " + str(cand_list["O'Tooley Percent"]) + "% " + str(cand_list["O'Tooley"]))
print(break_line)
print("Winner: " + str(cand_winner))
print(break_line)
output_result = os.path.join(".", "analysis", "result.txt")
with open(output_result, "w") as txt_file:
txt_file.write("Election Results" + "\n")
txt_file.write(break_line + "\n")
txt_file.write("Total Vote: " + str(total_vote) + "\n")
txt_file.write(break_line + "\n")
txt_file.write("Khan: " + str(cand_list["Khan Percent"]) + "% " + str(cand_list["Khan"]) + "\n")
txt_file.write("Correy: " + str(cand_list["Correy Percent"]) + "% " + str(cand_list["Correy"]) + "\n")
txt_file.write("Li: " + str(cand_list["Li Percent"]) + "% " + str(cand_list["Li"]) + "\n")
txt_file.write("O'Tooley: " + str(cand_list["O'Tooley Percent"]) + "% " + str(cand_list["O'Tooley"]) + "\n")
txt_file.write(break_line + "\n")
txt_file.write("Winner: " + str(cand_winner) + "\n")
txt_file.write(break_line + "\n") | eddiexunyc/python-challenge | PyPoll/main.py | main.py | py | 2,182 | python | en | code | 0 | github-code | 13 |
2239877141 | #!/usr/bin/env python3
# dump.py -- dump DAPHNE INPUT spy buffers
# Jamieson Olsen <jamieson@fnal.gov> Python3
from oei import *
for i in [4,5,7,9]:
thing = OEI(f"10.73.137.10{i}")
reg=hex(thing.read(0x3001,8)[2])
print(f"reg= {reg} in ip address ending in {i}!" )
thing.close()
| matheos/daphne_slow_control_scripts | read_link_control.py | read_link_control.py | py | 309 | python | en | code | 0 | github-code | 13 |
24693823534 | from selenium.webdriver.common.keys import Keys
from selenium import webdriver
from selenium.webdriver.common.by import By
import os
import time
import math
class Linkedin:
email = os.getenv('email')
password = os.getenv('password')
def __init__(self):
# linkprofile = webdriver.ChromeOptions('')
self.driver = webdriver.Chrome(
"./chromedriver")
self.driver.get("https://www.linkedin.com/login")
# introduce email and password and hit enter
time.sleep(5)
def Link_job_apply(self):
login_email = self.driver.find_element_by_name('session_key')
login_email.clear()
login_email.send_keys(self.email)
login_pass = self.driver.find_element_by_name('session_password')
login_pass.clear()
login_pass.send_keys(self.password)
login_pass.send_keys(Keys.RETURN)
count_application = 0
count_job = 0
jobs_per_page = 25
easy_apply = "?f_AL=true"
location = "Japan" # "Worldwide"
keywords = ["Python Developer"]
for indexpag in range(len(keywords)):
self.driver.get(
'https://www.linkedin.com/jobs/search/' + easy_apply + '&keywords=' + keywords[indexpag] + "&" + location)
numofjobs = self.driver.find_element(by= By.XPATH, value='//small').text # get number of results
space_ind = numofjobs.index(' ')
total_jobs = (numofjobs[0:space_ind])
total_jobs_int = int(total_jobs.replace(',', ''))
number_of_pages = math.ceil(total_jobs_int/jobs_per_page)
print(number_of_pages)
for i in range(number_of_pages):
cons_page_mult = 25 * i
url = 'https://www.linkedin.com/jobs/search/' + easy_apply + '&keywords=' + \
keywords[indexpag] + "&" + location + \
"&start=" + str(cons_page_mult)
self.driver.get(url)
time.sleep(10)
links = self.driver.find_elements(by= By.XPATH, value='//div[@data-job-id]') # needs to be scrolled down
IDs = []
for link in links:
temp = link.get_attribute("data-job-id")
jobID = temp.split(":")[-1]
IDs.append(int(jobID))
IDs = set(IDs)
jobIDs = [x for x in IDs]
for jobID in jobIDs:
job_page = 'https://www.linkedin.com/jobs/view/' + \
str(jobID)
self.driver.get(job_page)
count_job += 1
time.sleep(5)
try:
button = self.driver.find_elements(by= By.XPATH, value='//button[contains(@class, "jobs-apply")]/span[1]')
# if button[0].text in "Easy Apply" :
EasyApplyButton = button[0]
except:
EasyApplyButton = False
button = EasyApplyButton
if button is not False:
string_easy = "* has Easy Apply Button"
button.click()
time.sleep(2)
try:
self.driver.find_element_by_css_selector(
"button[aria-label='Submit application']").click()
time.sleep(3)
count_application += 1
print("* Just Applied to this job!")
except:
try:
button = self.driver.find_element_by_css_selector(
"button[aria-label='Continue to next step']").click()
time.sleep(3)
percen = self.driver.find_element_by_xpath(
"/html/body/div[3]/div/div/div[2]/div/div/span").text
percen_numer = int(percen[0:percen.index("%")])
if int(percen_numer) < 25:
print(
"*More than 5 pages,wont apply to this job! Link: " + job_page)
elif int(percen_numer) < 30:
try:
self.driver.find_element_by_css_selector(
"button[aria-label='Continue to next step']").click()
time.sleep(3)
self.driver.find_element_by_css_selector(
"button[aria-label='Continue to next step']").click()
time.sleep(3)
self.driver.find_element_by_css_selector(
"button[aria-label='Review your application']").click()
time.sleep(3)
self.driver.find_element_by_css_selector(
"button[aria-label='Submit application']").click()
count_application += 1
print("* Just Applied to this job!")
except:
print(
"*4 Pages,wont apply to this job! Extra info needed. Link: " + job_page)
elif int(percen_numer) < 40:
try:
self.driver.find_element_by_css_selector(
"button[aria-label='Continue to next step']").click()
time.sleep(3)
self.driver.find_element_by_css_selector(
"button[aria-label='Review your application']").click()
time.sleep(3)
self.driver.find_element_by_css_selector(
"button[aria-label='Submit application']").click()
count_application += 1
print("* Just Applied to this job!")
except:
print(
"*3 Pages,wont apply to this job! Extra info needed. Link: " + job_page)
elif int(percen_numer) < 60:
try:
self.driver.find_element_by_css_selector(
"button[aria-label='Review your application']").click()
time.sleep(3)
self.driver.find_element_by_css_selector(
"button[aria-label='Submit application']").click()
count_application += 1
print("* Just Applied to this job!")
except:
print(
"* 2 Pages,wont apply to this job! Unknown. Link: " + job_page)
except:
print("* Cannot apply to this job!!")
else:
print("* Already applied!")
time.sleep(2)
print("Category: " + str(keywords) + " ,applied: " + str(count_application) +
" jobs out of " + str(count_job) + ".")
start_time = time.time()
ed = Linkedin()
ed.Link_job_apply()
end = time.time()
print("---Took: " + str(round((time.time() - start_time)/60)) + " minute(s).")
| sterrado/linkedin_bot | job_apply.py | job_apply.py | py | 8,537 | python | en | code | 1 | github-code | 13 |
70333243538 | from cmath import inf
MAX_VAL = 1000001
def update(i, add, BIT):
while (i > 0 and i < len(BIT)):
BIT[i] += add
i = i + (i & (-i))
def sum(i, BIT):
ans = 0
while (i > 0):
ans += BIT[i]
i = i - (i & (-i))
return ans
def insertElement(x, BIT):
update(x, 1, BIT)
def deleteElement(x, BIT):
update(x, -1, BIT)
def findRank(x, BIT):
return sum(x, BIT)
BIT = [0]*MAX_VAL
num = int(input())
output = []
min_key = inf
for i in range(num):
user_input = input()
if user_input[0] == '?':
key1, key2 = user_input[2], user_input[4]
key1 = int(key1)
key2 = int(key2)
if key1 == min_key:
output.append(findRank(key2, BIT))
else:
output.append(findRank(key2, BIT) - findRank(key1, BIT))
elif user_input[0] == '+':
key = user_input[2]
key = int(key)
if key < min_key:
min_key = key
insertElement(key, BIT)
else:
key = user_input[2]
key = int(key)
deleteElement(key, BIT)
for i in output:
print(i)
| Emad-Salehi/Data-Structures-and-Algorithms-Course | HW#3/Q2.py | Q2.py | py | 1,127 | python | en | code | 0 | github-code | 13 |
1766108876 | import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
lookUpTable = np.empty((1,256), np.uint8)
for i in range(256):
lookUpTable[0,i] = np.clip(pow(i / 255.0, 0.6) * 255.0, 0, 255)
daytime_img = cv.imread('./nighttime place recognition dataset/test/00021510/20151102_160120.jpg')
night_img = cv.imread('./nighttime place recognition dataset/test/00021510/20151102_060125.jpg')
# daytime_img = cv.imread('./nighttime place recognition dataset/test/00023966/20151120_114613.jpg')
# night_img = cv.imread('./nighttime place recognition dataset/test/00023966/20151120_191559.jpg')
# Denoise
night_img = cv.fastNlMeansDenoising(night_img)
daytime_img = cv.fastNlMeansDenoising(daytime_img)
# Gamma correction
night_img = cv.LUT(night_img, lookUpTable)
daytime_img = cv.LUT(daytime_img, lookUpTable)
# Harris corner
# night_gray = np.float32(cv.cvtColor(night_img, cv.COLOR_BGR2GRAY))
# daytime_gray = np.float32(cv.cvtColor(daytime_img, cv.COLOR_BGR2GRAY))
# night_dst = cv.cornerHarris(night_gray,2,3,0.04)
# night_dst = cv.dilate(night_dst,None)
# night_img[night_dst>0.01*night_dst.max()]=[0,0,255]
# daytime_dst = cv.cornerHarris(daytime_gray,2,3,0.04)
# daytime_dst = cv.dilate(daytime_dst,None)
# daytime_img[daytime_dst>0.01*daytime_dst.max()]=[0,0,255]
# cv.imwrite('./output/daytime_corner.jpg', daytime_img)
# cv.imwrite('./output/night_corner.jpg', night_img)
# Sharpen (Laplacian filter)
# kernel = np.array([[1, 1, 1], [1, -8, 1], [1, 1, 1]], dtype=np.float32)
# daytime_imgLaplacian = cv.filter2D(daytime_img, cv.CV_32F, kernel)
# daytime_sharp = np.float32(daytime_img)
# daytime_img = daytime_sharp - daytime_imgLaplacian
# daytime_img = np.clip(daytime_img, 0, 255)
# daytime_img = daytime_img.astype('uint8')
# night_imgLaplacian = cv.filter2D(night_img, cv.CV_32F, kernel)
# night_sharp = np.float32(night_img)
# night_img = night_sharp - night_imgLaplacian
# night_img = np.clip(night_img, 0, 255)
# night_img = night_img.astype('uint8')
night_gray = cv.cvtColor(night_img, cv.COLOR_BGR2GRAY)
daytime_gray = cv.cvtColor(daytime_img, cv.COLOR_BGR2GRAY)
# Canny edge
# daytime_gray = cv.Canny(daytime_img,100,200)
# night_gray = cv.Canny(night_img,100,200)
# Find corners
kp_night = cv.goodFeaturesToTrack(night_gray, 5000, 0.01, 10)
kp_daytime = cv.goodFeaturesToTrack(daytime_gray, 5000, 0.01, 10)
l_night = []
for item in kp_night:
l_night.append(item[0])
kp_night = np.array(l_night)
kp_night = cv.KeyPoint_convert(kp_night)
l_daytime = []
for item in kp_daytime:
l_daytime.append(item[0])
kp_daytime = np.array(l_daytime)
kp_daytime = cv.KeyPoint_convert(kp_daytime)
# SIFT
sift = cv.SIFT_create()
# kp_night, des_night = sift.detectAndCompute(night_gray, None)
# kp_daytime, des_daytime = sift.detectAndCompute(daytime_gray, None)
# kp_night = sift.detect(night_gray)
# kp_daytime = sift.detect(daytime_gray)
kp_night, des_night = sift.compute(night_gray, kp_night)
kp_daytime, des_daytime = sift.compute(daytime_gray, kp_daytime)
night_sift_img = cv.drawKeypoints(night_gray, kp_night, night_img)
daytime_sift_img = cv.drawKeypoints(daytime_gray, kp_daytime, daytime_img)
cv.imwrite('./output/night_sift.jpg', night_sift_img)
cv.imwrite('./output/daytime_sift.jpg', daytime_sift_img)
bf = cv.BFMatcher()
matches = bf.knnMatch(des_night,des_daytime, k=2)
good = []
for m, n in matches:
if m.distance < 0.75*n.distance:
good.append([m])
print(len(good))
match_img = cv.drawMatchesKnn(night_img, kp_night, daytime_img, kp_daytime,
good, None, flags=cv.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
plt.imshow(match_img)
plt.savefig('output/match_test12.jpg') | JasmineZZZ9/nighttime_place_recognition | nighttime place recognition/test_matching_2.py | test_matching_2.py | py | 3,650 | python | en | code | 0 | github-code | 13 |
18468474985 | import os
import sys
import numpy as np
import cv2
import csv
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
from maskrcnn_benchmark.config import cfg
from predictor import COCODemo
def getVideoFile():
# for arg in sys.argv[1:]:
# for arg in sys.argv[1]:
# video_file = arg
video_file = sys.argv[1]
return video_file
if __name__ == '__main__':
"""
test everything
"""
# ROOT_DIR = os.path.abspath("../")
# sys.path.append(ROOT_DIR) # To find local version of the library
# MODEL_DIR = os.path.join(ROOT_DIR, "logs")
# video_file = "/media/binbin/data/dataset/dynamic/rot_chair/rgb.mp4"
video_file = getVideoFile()
# Import Mask RCNN
# cnn_path = '/home/binbin/code/Tool/tensorflow/tensorpackZoo/COCO-ResNet50-MaskRCNN.npz'
config_file = "../configs/caffe2/e2e_mask_rcnn_X_101_32x8d_FPN_1x_caffe2.yaml"
cfg.merge_from_file(config_file)
coco_demo = COCODemo(
cfg,
min_image_size=100,
confidence_threshold=0.5,
)
video_path = os.path.dirname(video_file)
capture = cv2.VideoCapture(video_file)
frame_count = 0
# build folders
mask_rcnn_path = os.path.join(video_path, "mask_RCNN")
if not os.path.exists(mask_rcnn_path):
os.makedirs(mask_rcnn_path)
visual_path = os.path.join(mask_rcnn_path, "visualization")
if not os.path.exists(visual_path):
os.makedirs(visual_path)
class_path = os.path.join(mask_rcnn_path, "class_id")
if not os.path.exists(class_path):
os.makedirs(class_path)
mask_path = os.path.join(mask_rcnn_path, "mask")
if not os.path.exists(mask_path):
os.makedirs(mask_path)
box_path = os.path.join(mask_rcnn_path, "box")
if not os.path.exists(box_path):
os.makedirs(box_path)
prob_path = os.path.join(mask_rcnn_path, "prob")
if not os.path.exists(prob_path):
os.makedirs(prob_path)
all_prob_path = os.path.join(mask_rcnn_path, "all_prob")
if not os.path.exists(all_prob_path):
os.makedirs(all_prob_path)
csv_path = os.path.join(mask_rcnn_path, "maskrcnn.csv")
out = csv.writer(open(csv_path, "w", newline=''), delimiter=',', quoting=csv.QUOTE_ALL)
data = ["#visualisation", "mask", "class-id", "box", "prob", "all_prob"]
out.writerow(data)
while (capture.isOpened()):
ret, frame = capture.read()
# Bail out when the video file ends
if not ret:
break
print('frame_count :{0}'.format(frame_count))
all_probs, box, class_id, mask, prob, visualization = coco_demo.output_predictions(frame)
# plt.imshow(visualization[:, :, [2, 1, 0]])
# plt.show()
# print('Predicted')
# do not output bg probablity: legacy issue in mid-fusion
all_probs = all_probs[:, 1:]
# save visualization
name = '{0:04d}.png'.format(frame_count)
visual_name = os.path.join(visual_path, name)
cv2.imwrite(visual_name, visualization)
print('writing to file:{0}'.format(visual_name))
# save mask
name = '{0:04d}'.format(frame_count)
mask_name = os.path.join(mask_path, name)
np.save(mask_name, mask)
print('writing to file:{0}'.format(mask_name))
# save class id
class_name = os.path.join(class_path, name)
np.save(class_name, class_id)
print('writing to file:{0}'.format(class_name))
# save box
box_name = os.path.join(box_path, name)
np.save(box_name, box)
print('writing to file:{0}'.format(box_name))
# probability #N
prob_name = os.path.join(prob_path, name)
np.save(prob_name, prob)
print('writing to file:{0}'.format(prob_name))
# all probability #N * #class
all_prob_name = os.path.join(all_prob_path, name)
np.save(all_prob_name, all_probs)
print('writing to file:{0}'.format(all_prob_name))
# save associsations
data = [visual_name, mask_name, class_name, box_name, prob_name, all_prob_name]
out.writerow(data)
frame_count += 1
capture.release() | binbin-xu/maskrcnn_for_midfusion | demo/video_mask_rcnn.py | video_mask_rcnn.py | py | 4,179 | python | en | code | 3 | github-code | 13 |
34887156104 | import pickle
import time
from abc import ABC, abstractmethod
from typing import Optional
import numpy as np
import matplotlib.pyplot as plt
from prettytable import PrettyTable
from fitness.fitness_functions import RealValueFitnessFunction, FitnessFunction
class GeneticAlgorithm(ABC):
"""The base class used to implement different kinds of genetic algorithms (e.g. SGA and crowding)."""
def __init__(self,
population_size: int = 32,
n_bits: int = 8,
fitness_function: FitnessFunction = None,
p_cross_over: float = 0.6,
p_mutation: float = 0.05,
offspring_multiplier: int = 1):
"""
:param population_size: Number of individuals in population.
:param n_bits: Number of bits used to represent each individual.
:param fitness_function: Fitness function to use during evolution.
:param p_cross_over: Probability of crossover of two parents.
:param p_mutation: Probability of mutating offspring.
:param offspring_multiplier: Decides how many offspring are created in each generation:
population_size is selected from population_size * offspring_multiplier offsprings
"""
self.population_size: int = population_size
self.n_bits: int = n_bits
if fitness_function is None:
raise TypeError('fitness_function must be specified')
self.fitness_function = fitness_function
self.p_cross_over: float = p_cross_over
self.p_mutation: float = p_mutation
self.offspring_multiplier = offspring_multiplier
self.population: np.ndarray = self.init_population(population_size, n_bits)
# Used to store histories during fit
self.population_history: list[np.ndarray] = []
self.fitness_history: list[np.ndarray] = []
self.entropy_history: list[float] = []
@staticmethod
def init_population(population_size: int, n_bits: int) -> np.ndarray:
"""Initializes population of size (n_individuals x n_bits).
Assignment task a).
:param population_size: Number of individuals in population.
:param n_bits: Number of bits used to represent each individual.
:return: Numpy array of the whole population. Shape: (population_size x n_bits).
"""
return np.random.randint(0, 2, (population_size, n_bits))
@staticmethod
def calculate_entropy(population: np.ndarray, epsilon: float = 1e-18) -> float:
"""Calculates entropy of a population.
:param population: A (Nxb) numpy array of a population.
:param epsilon: Minimum probability to avoid taking log of 0.
:return: Entropy of population
"""
probabilities = population.mean(axis=0).clip(min=epsilon) # Clip to avoid taking log of 0.
return -np.dot(probabilities, np.log2(probabilities))
def _get_fitness_stats(self) -> np.ndarray:
"""Calculates fitness of whole population and returns sum, max/min, and mean of these.
:return: A (3x1) numpy array of the sum, max, and mean of the fitness of the population.
"""
fitness: np.ndarray = self.fitness_function(population=self.population)
if self.fitness_function.maximizing:
return np.array([fitness.sum(), fitness.max(), fitness.mean()])
else:
return np.array([fitness.sum(), fitness.min(), fitness.mean()])
def _get_selection_probabilities(self, fitness: np.ndarray) -> np.ndarray:
"""Calculates selection probabilities from a fitness vector.
The probabilities are calculated using the roulette wheel method.
:param fitness: A (Nx1) numpy array specifying the fitness of each individual in the population.
:return: A (Nx1) numpy array of the probabilities that an individual will be chosen as a parent.
"""
if not self.fitness_function.maximizing:
fitness *= -1
return np.exp(fitness) / np.exp(fitness).sum()
def _parent_selection(self, population: np.ndarray) -> np.ndarray:
"""Selects parents for the next generation of the population.
:return: A multiset chosen from the current population.
"""
parent_population = population.copy()
fitness = self.fitness_function(population=parent_population)
probabilities = self._get_selection_probabilities(fitness)
indeces = np.random.choice(len(fitness),
size=self.population_size,
replace=True,
p=probabilities)
parent_population = parent_population[indeces]
np.random.shuffle(parent_population) # Shuffles the mating pool
return parent_population
def _cross_over(self, popultation: np.ndarray) -> np.ndarray:
"""Performs cross over for a whole population.
Two and two individuals are crossed. If the population contains an odd
number of individuals, the last one is not crossed, and just passes through.
This should be done after selection.
Assignment task c)
:param popultation: A (Nxb) numpy array of a population.
:return: A (Nxb) numpy array of the crossed over population.
"""
crossed_population = popultation.copy()
for p1, p2 in zip(crossed_population[::2], crossed_population[1::2]):
if np.random.random() < self.p_cross_over:
c = np.random.randint(1, self.n_bits) # Random cross over point
temp1 = p1[c:].copy()
temp2 = p2[c:].copy()
p1[c:], p2[c:] = temp2, temp1
return crossed_population
def _mutate(self, population: np.ndarray) -> np.ndarray:
"""Mutates a population by randomly flipping bits.
This should be done after cross over.
Assignment task c)
:param population: A (Nxb) numpy array of a population.
:return: A (Nxb) numpy array of the mutated population.
"""
mutated_population = population.copy()
mask = np.random.choice([0, 1], size=population.shape, p=[1-self.p_mutation, self.p_mutation])
idx = np.where(mask == 1)
mutated_population[idx] = 1 - mutated_population[idx]
return mutated_population
@abstractmethod
def _survivor_selection(self, parents: np.ndarray, offspring: np.ndarray) -> np.ndarray:
"""Selects and returns survivors for the next generation.
:param parents: A numpy array of the parents of the current generation.
:param offspring: A numpy array of the offsprings of the current generation.
:return: A numpy array of the survivors for the next generation.
"""
raise NotImplementedError('Subclass must implement _survivor_selection() method.')
def fit(self,
generations: int = 100,
termination_fitness: Optional[float] = None,
verbose: bool = False,
visualize: bool = False,
vis_sleep: float = 0.1,
) -> None:
"""Fits the population through a generational loop.
For each generation the following is done:
1. Selection
2. Cross over
3. Mutation
4. Survivor selection
:param generations: Number of generations the algorithm should run.
:param termination_fitness: Fitting stops if termination_fitness has been reached.
If None, all generations are performed.
:param verbose: Whether or not additional data should be printed during fitting.
:param visualize: Whether or not to visualize population during fitting.
:param vis_sleep: Sleep timer between each generation. Controls speed of visualization.
"""
# Only visualize if the fitness function is a real value fitness function.
# Not visualizing for regression tasks.
visualize = visualize and issubclass(type(self.fitness_function), RealValueFitnessFunction)
self.population_history = []
self.fitness_history = []
self.entropy_history = []
if visualize:
plt.ion()
fig, ax = plt.subplots(figsize=(12, 12))
fig.suptitle(f'{self.__class__.__name__} on {self.fitness_function.__class__.__name__}')
interval = self.fitness_function.interval
x_func = np.linspace(interval[0], interval[1], 10*(interval[1] - interval[0]))
y_func = self.fitness_function.fitness(x_func)
ax.plot(x_func, y_func)
ax.set_xlabel('Value')
ax.set_ylabel('Fitness')
points, = ax.plot(x_func, y_func, 'ro', label='Population')
ax.legend()
for g in range(generations):
print(f'Generation {g} - {self.__class__.__name__}')
fitness_stats = self._get_fitness_stats()
self.fitness_history.append(fitness_stats)
entropy = self.calculate_entropy(self.population)
self.entropy_history.append(entropy)
if termination_fitness is not None:
if (self.fitness_function.maximizing and fitness_stats[-1] >= termination_fitness) or \
(not self.fitness_function.maximizing and fitness_stats[-1] <= termination_fitness):
break
if verbose:
print(f'Entropy: {round(entropy, 2)}')
max_or_min = 'Max' if self.fitness_function.maximizing else 'Min'
fitness_table = PrettyTable(['Sum', max_or_min, 'Mean'], title='Fitness')
fitness_table.add_row([round(s, 4) for s in fitness_stats])
print(fitness_table)
if visualize:
ax.set_title(f'Generation {g}')
x = self.fitness_function.bits_to_scaled_nums(self.population)
y = self.fitness_function.fitness(x)
points.set_xdata(x)
points.set_ydata(y)
fig.canvas.draw()
fig.canvas.flush_events()
time.sleep(vis_sleep)
self.population_history.append(self.population.copy())
parents = self._parent_selection(self.population)
crossed = self._cross_over(parents)
mutated = self._mutate(crossed)
self.population = self._survivor_selection(parents, mutated)
self.population_history = np.asarray(self.population_history)
self.fitness_history = np.asarray(self.fitness_history)
self.entropy_history = np.asarray(self.entropy_history)
def fittest_individual(self) -> np.ndarray:
"""Returns the currently fittest individual
:return: A (1xb) numpy array of the fittest individual.
"""
fitness = self.fitness_function(population=self.population)
return self.population[np.argmax(fitness)]
def save(self, file_name: str) -> None:
"""Saves the GeneticAlgorithm object to a file.
Useful to save population and histories after fitting.
:param file_name: File name of where to save. Expects that folder where file should be created exists.
"""
with open(file_name, 'wb') as file:
pickle.dump(self, file, pickle.HIGHEST_PROTOCOL)
@staticmethod
def load(file_name: str) -> 'Network':
"""Loads a GeneticAlgorithm object from a file.
Useful to load population and histories from previously run fit.
:param file_name: File name of saved object.
:return: A GeneticAlgorithm object as specified by the file.
"""
with open(file_name, 'rb') as file:
network = pickle.load(file)
return network
class SimpleGeneticAlgorithm(GeneticAlgorithm):
"""This class inherits from GeneticAlgorithm, and implements the simplest form of survivor selection."""
def _survivor_selection(self, parents: np.ndarray, offspring: np.ndarray) -> np.ndarray:
"""For this SGA, the offspring is the next generation.
:param parents: A numpy array of the parents of the current generation.
:param offspring: A numpy array of the offsprings of the current generation.
:return: A numpy array of the survivors for the next generation.
"""
return offspring
class FittestGeneticAlgorithm(GeneticAlgorithm):
"""This class inherits from GeneticAlgorithm, and selects the fittest individuals to survive."""
def _survivor_selection(self, parents: np.ndarray, offspring: np.ndarray) -> np.ndarray:
"""Selects the fittest to survive.
:param parents: A numpy array of the parents of the current generation.
:param offspring: A numpy array of the offsprings of the current generation.
:return: A numpy array of the survivors for the next generation.
"""
survivors = np.concatenate((parents, offspring), axis=0)
fitness = self.fitness_function(survivors)
if self.fitness_function.maximizing:
fitness *= -1
indeces = np.argsort(fitness)[:self.population_size]
return survivors[indeces]
class GeneralizedCrowding(GeneticAlgorithm):
"""This class inherits from GeneticAlgorithm, and implements a generalized
local tournament to select the survivors for the next generation.
"""
def __init__(self, scaling_factor: float = 0.5, *args, **kwargs):
"""
:param scaling_factor: Parameter used for getting winner probabilities in the competition.
"""
super().__init__(*args, **kwargs)
self._scaling_factor = scaling_factor
@staticmethod
def hamming_distance(a1: np.ndarray, a2: np.ndarray) -> int:
"""Calculates the hamming distance between two bit arrays.
:param a1: First numpy array.
:param a2: Second numpy array.
:return: The hamming distance between a1 and a2.
"""
return np.count_nonzero(a1 != a2)
def _competition(self, parent: np.ndarray, offspring: np.ndarray, epsilon: float = 1e-12) -> np.ndarray:
"""Parent competes against offspring in a fitness competition. The winner is returned.
:param parent: Numpy array of the parent.
:param offspring: Numpy array of the offspring
:param epsilon: Parameter used to ensure we don't divide by zero.
:return: The winner of the competition.
"""
offspring_fitness = self.fitness_function(offspring)
parent_fitness = self.fitness_function(parent)
if not self.fitness_function.maximizing:
max_fitness = max(offspring_fitness.max(), parent_fitness.max())
offspring_fitness = (max_fitness - offspring_fitness).clip(min=epsilon)
parent_fitness = (max_fitness - parent_fitness).clip(min=epsilon)
# offspring_fitness *= -1
# parent_fitness *= -1
if offspring_fitness > parent_fitness:
offspring_probability = offspring_fitness / (offspring_fitness + self._scaling_factor * parent_fitness)
elif offspring_fitness < parent_fitness:
scaled_offspring_fitness = self._scaling_factor * offspring_fitness
offspring_probability = scaled_offspring_fitness / (scaled_offspring_fitness + parent_fitness)
else:
offspring_probability = 0.5
return offspring if np.random.random() < offspring_probability else parent
def _survivor_selection(self, parents: np.ndarray, offspring: np.ndarray) -> np.ndarray:
"""Parents and offspring competes in a local tournament.
The winners are selected as survivors for the next generation.
:param parents: A numpy array of the parents of the current generation.
:param offspring: A numpy array of the offsprings of the current generation.
:return: A numpy array of the survivors for the next generation.
"""
survivor_population = offspring.copy()
for i, (p1, p2, o1, o2) in enumerate(zip(parents[::2], parents[1::2], offspring[::2], offspring[1::2])):
h1 = self.hamming_distance(p1, o1) + self.hamming_distance(p2, o2)
h2 = self.hamming_distance(p1, o2) + self.hamming_distance(p2, o1)
if h1 < h2: # Competitions: [(p1, o1), (p2, o2)]
survivor_population[i*2] = self._competition(p1, o1)
survivor_population[i*2 + 1] = self._competition(p2, o2)
else: # Competitions: [(p1, o2), (p2, o1)]
survivor_population[i*2] = self._competition(p1, o2)
survivor_population[i*2 + 1] = self._competition(p2, o1)
return survivor_population
class DeterministicCrowding(GeneralizedCrowding):
"""This is a special case of the GeneralizedCrowding, with the scaling factor used in competition set to 0."""
def __init__(self, *args, **kwargs):
super().__init__(scaling_factor=0, *args, **kwargs)
class ProbabilisticCrowding(GeneralizedCrowding):
"""This is a special case of the GeneralizedCrowding, with the scaling factor used in competition set to 1."""
def __init__(self, *args, **kwargs):
super().__init__(scaling_factor=1, *args, **kwargs)
| fredrvaa/IT3708 | project1/evolution/genetic_algorithm.py | genetic_algorithm.py | py | 17,311 | python | en | code | 0 | github-code | 13 |
26964802324 | import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.service import Service
import unittest
import HtmlTestRunner
class GoogleSearchTest(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
baseUrl = "https://google.com"
s = Service("../driver/chromedriver.exe")
cls.driver = webdriver.Chrome(service=s)
cls.driver.implicitly_wait(10)
cls.driver.maximize_window()
cls.driver.get(baseUrl)
def test_search_keyword_01(self):
global search_bar, search_button
self.driver.find_element(By.NAME, "q").send_keys("Automation step by step")
self.driver.find_element(By.NAME, "btnK").click()
time.sleep(2)
def test_search_keyword_02(self):
self.driver.find_element(By.NAME, "q").clear()
time.sleep(2)
self.driver.find_element(By.NAME, "q").send_keys("Selenium")
self.driver.find_element(By.CSS_SELECTOR, "button[type=submit").click()
@classmethod
def tearDownClass(cls) -> None:
cls.driver.quit()
print("Test Completed")
if __name__ == "__main__":
unittest.main()
| jongsungbae/SeleniumWithPython | small_project/small_project_01/GoogleSearchTest.py | GoogleSearchTest.py | py | 1,192 | python | en | code | 0 | github-code | 13 |
23249619116 | #!/usr/bin/env python3
# encoding: utf-8
import bisect
import dataclasses
from typing import List, Optional
@dataclasses.dataclass
class TreeNode:
start: int
end: int
sum: int = 0
left: Optional['TreeNode'] = None
right: Optional['TreeNode'] = None
class SegTree:
def __init__(self, length: int):
self._counters = [0] * length
self._root = self._build_tree(0, length)
def _build_tree(self, start: int, end: int):
if start + 1 == end:
return TreeNode(start, end)
mid_idx = (start + end) // 2
left = self._build_tree(start, mid_idx)
right = self._build_tree(mid_idx, end)
return TreeNode(start, end, left=left, right=right)
def _range_sum(self, node: TreeNode, start: int, end: int):
if node.start == start and node.end == end:
return node.sum
node_mid = (node.start + node.end) // 2
tot = 0
if start < node_mid:
tot += self._range_sum(node.left, start, min(node_mid, end))
if end > node_mid:
tot += self._range_sum(node.right, max(node_mid, start), end)
return tot
def range_sum(self, start: int, end: int):
if start == end:
return 0
if start + 1 == end:
return self._counters[start]
return self._range_sum(self._root, start, end)
def inc(self, idx: int):
self._counters[idx] += 1
node = self._root
while node:
node.sum += 1
node_mid = (node.start + node.end) // 2
if idx < node_mid:
node = node.left
else:
node = node.right
class Solution:
def countSmaller(self, nums: List[int]) -> List[int]:
if not nums:
return []
sorted_unique_nums = sorted(set(nums))
# A seg tree that maintains the counters for the indices in `sorted_unique_nums`.
seg_tree_counters = SegTree(len(sorted_unique_nums))
# The result counters.
result_counters = [0] * len(nums)
for i in range(len(nums) - 1, -1, -1):
# Find the idx of nums[i] in the seg tree.
seg_tree_idx = bisect.bisect_left(sorted_unique_nums, nums[i])
# All the elements on its left should be counted.
result_counters[i] = seg_tree_counters.range_sum(0, seg_tree_idx)
# Maintain the counters in the tree.
seg_tree_counters.inc(seg_tree_idx)
return result_counters
| misaka-10032/leetcode | coding/00315-count-smaller-after-self/solution.py | solution.py | py | 2,513 | python | en | code | 1 | github-code | 13 |
19466257765 | def spec_sum(n):
x = 0
for i in range(65, n, 3):
x += i
return x
def add(a, b):
return a + b
# Print the incoming list in ascending order.
def bubble_sort(list):
for i in range(len(list)):
for j in range(len(list)):
if list[j] > list[i]:
tmp = list[j]
list[j] = list[i]
list[i] = tmp
print(list, len(list))
def matrix_op(m, n):
sum = 0
for i in range(len(m)):
sum = sum + m[i][n]
return sum
| turo62/exercise | sandbox/justtry2.py | justtry2.py | py | 516 | python | en | code | 0 | github-code | 13 |
23676863660 | import logging
import rasa_core
from rasa_core.agent import Agent
from rasa_core.domain import Domain
from rasa_core.policies.keras_policy import KerasPolicy
from rasa_core.policies.memoization import MemoizationPolicy
from rasa_core.featurizers import (MaxHistoryTrackerFeaturizer,
BinarySingleStateFeaturizer)
if __name__ == '__main__':
logging.basicConfig(level='INFO')
dialog_training_data_file = './config/stories.md'
path_to_model = './models/dialogue'
# domain = Domain()
featurizer = MaxHistoryTrackerFeaturizer(BinarySingleStateFeaturizer(),
max_history=5)
agent = Agent('config/chat_domain.yml',
policies=[MemoizationPolicy(max_history=5),
KerasPolicy(featurizer)])
# agent = Agent('data/chat_domain.yml', policies = [MemoizationPolicy(), KerasPolicy()])
agent.train(
dialog_training_data_file,
augmentation_factor=50,
epochs=500,
batch_size=10,
validation_split=0.2)
agent.persist(path_to_model)
| mayflower/err-rasa | dialogue_model.py | dialogue_model.py | py | 1,112 | python | en | code | 1 | github-code | 13 |
23885696318 | from django.contrib import admin
from .models import *
from django.contrib.auth.models import User
@admin.register(Billinginfo)
class BillinginfoAdmin(admin.ModelAdmin):
list_display= ['user','country', 'postcode', 'phone', 'id_list']
list_editable= ['country', 'phone']
list_filter= ['country', 'phone']
@admin.display(ordering='id')
def id_list(self,billinginfo):
return billinginfo.id + 2
admin.site.register(Profile)
admin.site.register(Appointment) | Huzzy619/it_next | users/admin.py | admin.py | py | 493 | python | en | code | 0 | github-code | 13 |
39399916243 | #!/usr/bin/env python
import os
import sys
import subprocess
import time
import signal
def file_filter(name):
return (name.endswith(".py") and not ("autoreload.py" in name))
def file_times(path):
for top_level in os.listdir(path):
if not os.path.isdir(top_level) and file_filter(top_level ):
yield os.stat(top_level).st_mtime
for root, dirs, files in os.walk(top_level):
for file in filter(file_filter, files):
yield os.stat(os.path.join(root, file)).st_mtime
def print_stdout(process):
stdout = process.stdout
if stdout != None:
print(stdout)
# We concatenate all of the arguments together, and treat that as the command to run
command = ' '.join(sys.argv[1:])
# The path to watch
path = '.'
# How often we check the filesystem for changes (in seconds)
wait = 1
# The process to autoreload
process = subprocess.Popen(command.split(" "))
# The current maximum file modified time under the watched directory
last_mtime = max(file_times(path))
while True:
max_mtime = max(file_times(path))
print_stdout(process)
if max_mtime > last_mtime:
last_mtime = max_mtime
print ('## Restarting process on file change ! ##')
os.kill(process.pid, signal.SIGINT)
process = subprocess.Popen(command.split(" "))
time.sleep(wait)
| asano3091/autoreload | autoreload.py | autoreload.py | py | 1,350 | python | en | code | 0 | github-code | 13 |
2591747786 | class Array:
def __init__(self,cape):
self.arrty = [None] * cape
self.size = 0
def insert(self,index,element):
if index < 0 or index > self.size:
raise Exception('数组越界')
if self.size >= len(self.arrty):
arrty.addkuorong()
for i in range(self.size-1,index-1,-1):
self.arrty[i+1] = self.arrty[i]
self.arrty[index] = element
self.size += 1
def output(self):
for i in range(self.size):
print(self.arrty[i],end='->')
def addkuorong(self):
new_arrty = [None] * len(self.arrty) * 2
for i in range(self.size):
new_arrty[i] = self.arrty[i]
self.arrty = new_arrty
if __name__ == '__main__':
arrty = Array(4)
arrty.insert(0,0)
arrty.insert(1,1)
arrty.insert(2,2)
arrty.insert(3,3)
arrty.insert(4,4)
arrty.output() | Mrliuyuchao/ds | 6月18/lianxi1.py | lianxi1.py | py | 907 | python | en | code | 0 | github-code | 13 |
39810039211 | from nltk.corpus import stopwords
from nltk.stem.wordnet import WordNetLemmatizer
from gensim.models import KeyedVectors
import re
import numpy as np
from joblib import dump, load
from utils import power_iteration, track_trans
import tkinter as tk
from tkinter import ttk
from tkinter import scrolledtext
def disco():
# print("Building the word graph for the input article ......")
process_area.insert(tk.END, " --- Building the Word Graph for the Input Article ......\n")
process_area.see(tk.END)
process_area.update_idletasks()
review = re.sub("[^a-zA-Z]", " ", text_area.get('1.0', 'end-1c'))
review = review.lower()
review = review.split()
review = [WordNetLemmatizer().lemmatize(word) for word in review if word not in stopwords.words("english")]
# - Get valid tokens - #
num_nodes = 0
id_2_word = dict()
word_2_id = dict()
id_2_vec = dict()
appeared_nodes = set()
for word in review:
if word not in word_2_id.keys() and word not in appeared_nodes: # - find a new word - #
try:
vec = pretrained_model[word] # - 300 dimension - #
word_2_id[word] = num_nodes
id_2_word[num_nodes] = word
id_2_vec[num_nodes] = vec
num_nodes += 1
appeared_nodes.add(word)
except:
# print('============> ' + word + ' could not be found in the Google pre-trained model.')
appeared_nodes.add(word)
# print("============> number of words: " + str(num_nodes))
# - Construct graph adjacency matrix - #
adj_matrix = np.zeros((num_nodes, num_nodes))
for j in range(len(review) - 2): # - size window is 3 - #
word_x = review[j]
word_y = review[j + 1]
word_z = review[j + 2]
if word_x in word_2_id.keys() and word_y in word_2_id.keys():
adj_matrix[word_2_id[word_x]][word_2_id[word_y]] = 1
adj_matrix[word_2_id[word_y]][word_2_id[word_x]] = 1
if word_x in word_2_id.keys() and word_z in word_2_id.keys():
adj_matrix[word_2_id[word_x]][word_2_id[word_z]] = 1
adj_matrix[word_2_id[word_z]][word_2_id[word_x]] = 1
if word_y in word_2_id.keys() and word_z in word_2_id.keys():
adj_matrix[word_2_id[word_y]][word_2_id[word_z]] = 1
adj_matrix[word_2_id[word_z]][word_2_id[word_y]] = 1
# print("Extracting Geometric Features ......")
process_area.insert(tk.END, " --- Extracting Geometric Features ......\n")
process_area.see(tk.END)
process_area.update_idletasks()
# - Get graph embedding - #
h_matrix = np.zeros((num_nodes, 300))
p_matrix = np.zeros((num_nodes, num_nodes))
for j in range(num_nodes):
ppr = np.zeros((num_nodes,))
ppr[j] = 1
ppr = power_iteration(ppr, adj_matrix)
p_matrix[j:] = ppr
h_matrix[j:] = id_2_vec[j]
z_matrix = np.dot(p_matrix, h_matrix)
z_vec = np.sum(z_matrix, axis=0) # pooling: sum to row
# print("Making predictions .....")
process_area.insert(tk.END, " --- Neural Detection .....\n")
process_area.see(tk.END)
process_area.update_idletasks()
clf = load('trained-classifier/saved_mlp.joblib')
prob = clf.predict_proba([z_vec])
fake_prob = f'{prob[0][0]:.14f}'
real_prob = f'{prob[0][1]:.14f}'
label = clf.predict([z_vec])
# print(label, prob)
decision = ""
if label == 0:
decision = "Detection Result: Fake\n" + "Fake Probability: " + str(fake_prob) + "\n" + "Real Probability: " + str(real_prob) + "\n"
else:
decision = "Detection Result: Real\n" + "Fake Probability: " + str(fake_prob) + "\n" + "Real Probability: " + str(real_prob) + "\n"
result_area.insert(tk.END, decision)
result_area.see(tk.END)
result_area.update_idletasks()
# print("Analyzing each word misleading degree .....")
process_area.insert(tk.END, " --- Analyzing Each Word Misleading Degree .....\n")
process_area.see(tk.END)
process_area.update_idletasks()
word_2_misleading_degree = dict()
for rm_idx in range(num_nodes):
new_adj_matrix = adj_matrix.copy()
new_adj_matrix[:, rm_idx] = 0
new_adj_matrix[rm_idx, :] = 0
new_p_matrix = track_trans(adj_matrix, new_adj_matrix, p_matrix)
z_matrix = np.dot(new_p_matrix, h_matrix)
z_vec = np.sum(z_matrix, axis=0)
new_prob = clf.predict_proba([z_vec])
misleading_degree = 0
if label == 0:
misleading_degree = new_prob[0][0] - prob[0][0]
else:
misleading_degree = new_prob[0][1] - prob[0][1]
rm_word = id_2_word[rm_idx]
word_2_misleading_degree[rm_word] = misleading_degree
ranking = sorted(word_2_misleading_degree.items(), key=lambda item: item[1], reverse=True)[:10]
# print(ranking)
ranks = "[Word]:\t[Misleading Degree]\n"
for item in ranking:
ranks += str(item[0]) + ":\t" + str(f'{item[1]:.14f}') + "\n"
result_area.insert(tk.END, ranks + "\n")
# result_area.see(tk.END)
# result_area.update_idletasks()
process_area.insert(tk.END, " --- DISCO Finished .....\n\n")
process_area.see(tk.END)
process_area.update_idletasks()
if __name__ == '__main__':
print("\nLoading Google pre-trained Word2Vec ......\n")
pretrained_model = KeyedVectors.load_word2vec_format('pretrained-word2vec/GoogleNews-vectors-negative300.bin', binary=True)
print("\nStarting GUI of DISCO ......\n")
root = tk.Tk()
root.geometry('1520x680')
root.title("DISCO")
root.columnconfigure(0, weight=1)
root.rowconfigure(0, weight=0)
ttk.Label(root, text="DISCO: Comprehensive and Explainable Disinformation Detection", font=("Times New Roman", 18), justify="center").grid(column=0, row=0, columnspan=2)
ttk.Label(root, text="Enter your articles :", font=("Bold", 12)).grid(column=0, row=1)
text_area = scrolledtext.ScrolledText(root, wrap=tk.WORD, width=90, height=25, font=("Times New Roman", 15))
text_area.grid(column=0, row=2, pady=10, padx=10, rowspan=3)
# placing cursor in text area
text_area.focus()
ttk.Label(root, text="Process :", font=("Bold", 12)).grid(column=1, row=1)
process_area = scrolledtext.ScrolledText(root, wrap=tk.WORD, width=50, height=8, font=("Times New Roman", 15))
process_area.grid(column=1, row=2, pady=10, padx=10)
# placing cursor in text area
process_area.focus()
ttk.Label(root, text="Results :", font=("Bold", 12)).grid(column=1, row=3)
result_area = scrolledtext.ScrolledText(root, wrap=tk.WORD, width=50, height=13, font=("Times New Roman", 15))
result_area.grid(column=1, row=4, pady=10, padx=10)
# placing cursor in text area
result_area.focus()
ttk.Button(root, text='Send', width=10, command=disco).grid(column=0, row=5, pady=10, padx=10)
root.mainloop()
| DongqiFu/DISCO | gui_disco.py | gui_disco.py | py | 7,097 | python | en | code | 5 | github-code | 13 |
25941402395 | # Shortest Path in Binary Matrix
'''
n x n binary matrix인 grid가 주어졌을 때, 출발지에서 목적지까지 도착하는
가장 빠른 경로의 길이를 반환하시오. 만약 경로가 없다면 -1을 반환하시오.
출발지 : top-left cell
목적지 : bottom-right cell
- 값이 0인 cell만 지나갈 수 있다.
- cell끼리는 8가지 방향으로 연결되어 있다. (edge와 corner 방향으로 총 8가지)
- 연결된 cell을 통해서만 지나갈 수 있다.
ex)
Input: grid = [
[0, 1],
[1, 0]
]
Output: 2
Input: grid = [
[0, 0, 0],
[1, 1, 0],
[1, 1, 0]
]
Output: 4
Input: grid = [
[1, 0, 0],
[1, 1, 0],
[1, 1, 0]
]
Output: -1
Input: grid = [
[0, 0, 0, 1, 0, 0, 0],
[0, 1, 1, 1, 0, 1, 0],
[0, 1, 1, 1, 0, 1, 0],
[0, 1, 1, 1, 0, 1, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 0, 0, 1, 1, 1, 0],
[0, 1, 0, 0, 0, 0, 0],
]
Output: 11
제약조건
n == grid.length
n == grid[i].length
1 <= n <= 100
grid[i][j] is 0 or 1
'''
# ------------------------------------------------------------
'''
(1) 문제 이해
정점 개수 V -> 행길이 * 열길이 -> 최대 10000
(2) 접근 방법
bfs 이용
dfs로 탐색한다면 모든 경우의 수를 다 비교해봐야 함
bfs로 탐색한다면 가까운 곳부터 순차적으로 접근하다 목적지에 도착하면 그것이 최단경로 -> 보다 적합
bfs/dfs의 시간복잡도
-> O(V)
-> O(10000) -> O(10^4)
(3) 코드 설계
(4) 코드 구현
'''
# ------------------------------------------------------------
from collections import deque
def shortestPathBinaryMatrix(grid):
shortest_path_len = -1
row_len = len(grid)
col_len = len(grid[0])
visited = [[False] * col_len for _ in range(row_len)]
visited[0][0] = True
q = deque([(0, 0, 1)])
directions = [(-1, 0), (1, 0), (0, -1), (0, 1),
(-1, -1), (-1, 1), (1, -1), (1, 1)]
# 출발지 혹은 목적지가 0이 아니라면
# 출발지 -> 목적지로 이동하는 것이 불가능하므로 즉시 -1 반환
if (grid[0][0] != 0) or (grid[row_len - 1][col_len - 1] != 0):
return shortest_path_len
def isValid(r, c):
return(
(r >= 0 and r < row_len) and
(c >= 0 and c < col_len) and
(grid[r][c] == 0)
)
while q:
cur_r, cur_c, cur_len = q.popleft()
if (cur_r == row_len - 1) and (cur_c == col_len - 1):
shortest_path_len = cur_len
break
for dr, dc in directions:
next_r = cur_r + dr
next_c = cur_c + dc
if isValid(next_r, next_c):
if not visited[next_r][next_c]:
visited[next_r][next_c] = True
q.append((next_r, next_c, cur_len + 1))
return shortest_path_len
print(
shortestPathBinaryMatrix(
grid = [
[0, 0, 0],
[1, 1, 0],
[1, 1, 0]
]
)
)
print(
shortestPathBinaryMatrix(
grid = [
[1, 0, 0],
[1, 1, 0],
[1, 1, 0]
]
)
)
print(
shortestPathBinaryMatrix(
grid = [
[0, 0, 0, 1, 0, 0, 0],
[0, 1, 1, 1, 0, 1, 0],
[0, 1, 1, 1, 0, 1, 0],
[0, 1, 1, 1, 0, 1, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 0, 0, 1, 1, 1, 0],
[0, 1, 0, 0, 0, 0, 0],
]
)
)
| cjkywe07/codingTestStudy | inflearn/ch06/shortestPath_05.py | shortestPath_05.py | py | 3,595 | python | ko | code | 0 | github-code | 13 |
10719050269 | from gensim.models import Doc2Vec
def init_model(tagged_articles, dimension_size, iterations):
model = Doc2Vec(min_count=1, size=dimension_size, iter=iterations, workers=1, window=4, seed=1)
model.build_vocab(tagged_articles)
model.train(tagged_articles)
return model
| vineetjohn/semeval2017-task5 | utils/doc2vec_helper.py | doc2vec_helper.py | py | 289 | python | en | code | 10 | github-code | 13 |
927337183 | import tkinter as tk # this is for Window creation
from tkinter import Tk, Label, Button, Menu, Entry, messagebox # that is required in my application
# from tkinter import *
import os # Python package os and it is used to know pwd
# Done by Yuling, Mohammed, Tejal and Shahzeb: Below are all classes are implemented as part of Object Oriented Programming.
class candidates: # candidate call with all methods required for candidate
# __init__ method is the first method that will be invoked when object will be created.
def __init__(self, candi_name, Pin):
self.candi_name = candi_name
self.Pin = Pin
self.votePrf = {} # Dict count in all preference Preference1 = 20, Pref2 =2
def updateVoteForCandidate(self, Prf): # c1.updateVoteForCandidate("pref1")
if Prf in self.votePrf:
self.votePrf[Prf] = self.votePrf[Prf] + 1
else:
self.votePrf[Prf] = 1
def reSetCount(self): # Reset Count of all Preference
for key in self.votePrf:
self.votePrf[key] = 0
# not in use
class voters: # Votets Object
def __init__(self, candi_name):
self.candi_name = candi_name
class votes: # Object for votes Votername, candidatname, given preference
def __init__(self, VotersName, CandidateName, Preference):
self.VotersName = VotersName
self.CandidateName = CandidateName
self.Preference = Preference
class winnerPin: # Object to keep position and winner candidate name and its score
def __init__(self, PinName, WinnerCandidateName, voteReceived):
self.PinName = PinName
self.WinnerCandidateName = WinnerCandidateName
self.voteReceived = voteReceived
# Done by Yulin and Mohammed This class is for UI / window
class VotingGUI:
def __init__(self, master): # constructor
# master is variable for window that is created in starting of program
self.master = master # matster is storing window object in this class
master.title("University of Greenwich Voting System")
master.geometry('800x600') # dimension
# Start : Menu related Configuration
self.menubar = Menu(master) # menu
master.config(menu=self.menubar, bg='white', borderwidth=1)
master.configure(background='white')
# --menu created --
self.menubar.add_command(label="Login with UID and password", command=self.UserLoginForm) # A1 & A2
self.menubar.add_command(label="Cast votes", command=self.CastVotes)
self.menubar.add_command(label="Results", command=self.Results)
self.menubar.add_command(label="Logout", command=self.Logout)
# End : Menu related Configuration
self.Authe = 'false' # if user is authenticated then its value would be TRUE
self.WelcomeLabel = None
self.UIDLabel = None
self.PWDLabel = None
self.UIDInput = None
self.PWDInput = None
self.Login = None # UserName
self.LoginFormDisplayed = 'false' # if it is true then Login form is displayed on UI
self.candidateList = [] # python list All candidates
self.VotingPins = {} # dict of all Positions and number of candidates
self.CandidatePinDict = {} # CandidatePositionDict stores Poistion and List Of employee contesting for that position
self.LoadCandidates() # A3 : Load candidate from text file
self.voterList = [] # List of Voters
self.listofLabels_Prf = {} # List of candidate and given preference by voter
self.voteObjectList = [] # list of vote Class Object
self.WinnerCandidate = None
self.winnerPinList = []
self.DisplayedItem = []
self.VoteDoneList = []
def Display(self): # Done by Shahzeb and Tejal A6
self.removeVoteCastUI() # Clean UI before adding element on Window
xx = 20
yy = 40
hh = 20
cc = 0
aa = 0
for key in self.winnerPinList:
if key in self.DisplayedItem:
continue
else:
self.DisplayedItem.append(key)
totalVote = 0
label = Label(self.master,
text="************************************************************************",
bg='white')
label.place(x=xx + 40, y=yy, width=230, height=hh)
self.listofLabels_Prf[label] = ""
yy = yy + hh
label = Label(self.master, text="Position : " + key.PinName, bg='white')
self.listofLabels_Prf[label] = ""
label.place(x=xx + 40, y=yy, width=130, height=hh)
yy = yy + hh
label = Label(self.master,
text="************************************************************************",
bg='white')
self.listofLabels_Prf[label] = ""
label.place(x=xx + 40, y=yy, width=230, height=hh)
yy = yy + hh
label = Label(self.master, text="Candidate ", bg='white')
self.listofLabels_Prf[label] = ""
label.place(x=xx + 40, y=yy, width=130, height=hh)
cc = yy
yy = yy + hh
aa = xx
for x in range(self.VotingPins[key.PinName]):
xx = xx + 130
label = Label(self.master, text="Preference " + str(x + 1), bg='white')
self.listofLabels_Prf[label] = ""
label.place(x=xx + 40, y=cc, width=130, height=hh)
for c in self.candidateList:
if c.Pin == key.PinName:
xx = aa + 40
cc = cc + hh
label = Label(self.master, text=c.candi_name, bg='white')
self.listofLabels_Prf[label] = ""
label.place(x=xx, y=cc, width=130, height=hh)
for x in range(self.VotingPins[key.PinName]):
xx = xx + 130
print(c.votePrf)
if str(x + 1) in c.votePrf:
votecount = c.votePrf[str(x + 1)]
totalVote = totalVote + votecount
else:
votecount = 0
label = Label(self.master, text=votecount, bg='white')
self.listofLabels_Prf[label] = ""
label.place(x=xx + 40, y=cc, width=130, height=hh)
label = Label(self.master,
text="**********************************************************************", bg='white')
self.listofLabels_Prf[label] = ""
cc = cc + hh
label.place(x=60, y=cc, width=230, height=hh)
label = Label(self.master, text="Winner :" + key.WinnerCandidateName, bg='white')
self.listofLabels_Prf[label] = ""
cc = cc + hh
label.place(x=60, y=cc, width=130, height=hh)
label = Label(self.master, text="Vote Received :" + str(key.voteReceived), bg='white')
self.listofLabels_Prf[label] = ""
cc = cc + hh
label.place(x=60, y=cc, width=130, height=hh)
label = Label(self.master, text="Total Vote cast :" + str(len(self.voterList)), bg='white')
self.listofLabels_Prf[label] = ""
cc = cc + hh
label.place(x=60, y=cc, width=130, height=hh)
self.validate = Button(self.master, text="Next", command=self.Display, bg='green', fg='white')
self.validate.place(x=80, y=cc + 20, width=100, height=25)
break
def Results(self):
self.clearWindow() # clear window
self.CountVotes() # A5 count vote and do manipulation
self.Display() # A6
def Logout(self):
self.removeVoteCastUI()
self.CandidatePinDict.clear()
self.listofLabels_Prf.clear()
self.clearWindow()
self.Authe = 'false'
self.Login = None
self.LoginFormDisplayed = 'false'
print(self.voterList)
# Done by Mohammed A3 -Load All candidates from GSUCandidates.txt
def LoadCandidates(self): # A3 -Load All candidates from GSUCandidates.txt
self.clearWindow()
self.msg = Label(self.master, text="Welcome to UoG Voting System", font=('courier', 20, 'bold'), bg='white')
self.msg.place(x=25, y=40, width=800, height=50)
StudentVoters = open('GSUCandidates.txt', 'r')
Lines = StudentVoters.readlines()
for line in Lines:
candi_name = " ".join(line.strip().split(" ")[1:])
postn = line.strip().split(" ")[0].strip()
recordfound = 'false'
if len(self.candidateList) > 0:
for c in self.candidateList: # validate One candidate for one position
if c.candi_name == candi_name:
recordfound = 'true'
if recordfound != 'true':
self.candidateList.append(candidates(candi_name, postn))
else:
recordfound = 'false'
else:
self.candidateList.append(candidates(candi_name, postn))
pass
pass
# returns candidate position and takes input as candidate name
def findPinofCandidate(self, candi_name):
for key in self.CandidatePinDict:
if candi_name in self.CandidatePinDict[key]:
return key
# Done by Yulin, Tejal, Shahzeb and Mohammed A4 Cast votes
def CastVotes(self): # A.4
if self.Authe != 'false':
self.removeVoteCastUI() # Clean UI before adding element on Window
self.CandidatePinDict.clear() # clean before initi
self.clearWindow() # Clear Window
# CandidatePositionDict stores position and List Of employee contesting for that position
for c in self.candidateList:
if c.Pin in self.CandidatePinDict:
self.CandidatePinDict[c.Pin].append(c.candi_name)
pass
else:
l = []
l.append(c.candi_name)
self.CandidatePinDict[c.Pin] = l
# Display list of candidate
xx = 20
yy = 40
hh = 20
byy = 0
counter = 0
for key in self.CandidatePinDict:
label = Label(self.master, text="For " + key, bg="sea green", fg='white', font=('courier', 10, 'bold'))
label.place(x=xx, y=yy, width=170, height=hh)
yy = yy + hh
self.listofLabels_Prf[label] = ""
for c in self.CandidatePinDict[key]:
label = Label(self.master, text=c, bg="pale green")
label.place(x=xx, y=yy, width=120, height=hh)
Input = Entry(self.master, validate="key")
Input['validatecommand'] = (Input.register(self.testVal), '%P', '%d')
Input.place(x=xx + 121, y=yy, width=50, height=hh)
self.listofLabels_Prf[label] = Input
yy = yy + hh
counter = counter + 1
self.VotingPins[key] = counter
counter = 0
xx = xx + 180
if byy < yy:
byy = yy
yy = 40
self.validate = Button(self.master, text="Validate & Submit", command=self.ValidateVote, bg="green",
fg='white')
self.validate.place(x=xx, y=byy + 20, width=120, height=25)
else:
messagebox.showinfo("Error", "Please first Login with UID and password.")
self.removeVoteCastUI() # Clean UI before adding element on Window
self.UserLoginForm()
pass
# Done by Shahzeb, Tejal, Yulin and Mohammed: part of A4 - After voter user click Validate & Submit
# Done by Shahzeb, Tejal, Yulin and Mohammed: part of A4 - It will validate preferences and validate duplicate, uniquness and other criteria
# Done by Shahzeb, Tejal, Yulin and Mohammed: part of A4 - after validation it will store vote in votes.txt
def ValidateVote(self):
l = []
for key in self.listofLabels_Prf:
if not isinstance(self.listofLabels_Prf[key], str):
maxCount = self.VotingPins[self.findPinofCandidate(key.cget("text"))]
if self.listofLabels_Prf[key].get() == "":
messagebox.showinfo("Error", key.cget("text") + " No preference?")
return ""
if int(self.listofLabels_Prf[key].get()) > maxCount:
messagebox.showinfo("Error", key.cget(
"text") + " Preference is not correct. it should be less then or equal to " + str(maxCount))
return ""
if self.listofLabels_Prf[key].get() in l:
messagebox.showinfo("Error", key.cget("text") + " duplicate Preference issue.")
return ""
else:
l.append(self.listofLabels_Prf[key].get())
else:
l = []
self.CreateVotes()
self.removeVoteCastUI()
self.Logout()
# Done by Tejal and Shahzeb A 5 : Function to count votes as per logic
def CountVotes(self): # A.5
for candidate in self.candidateList:
candidate.reSetCount() # Reset Before doing reCount
for votes in self.voteObjectList:
for candidate in self.candidateList:
if candidate.candi_name == votes.CandidateName:
candidate.updateVoteForCandidate(
votes.Preference) # Update Preference and Count in each Candidate Object
print(candidate.votePrf)
self.Winner() # A.6
# function to get candidate name based on position and preference
def getCandidatename(self, winnerScore, Prf, post):
for cand in self.candidateList:
if str(Prf) in cand.votePrf:
if cand.votePrf[str(Prf)] == winnerScore and cand.Pin == post:
return cand.candi_name
# Done by Shahzeb and Tejal: part of A 5 : Calculate winner for each position
def Winner(self):
winnerCandidateScoreList = []
self.winnerPinList.clear()
winnerScore = 0
preference = 0
for post in self.VotingPins: # All position and count of candidates on position
print("Preference for post-", post, " is ", self.VotingPins[post])
winnerCandidateScoreList.clear()
for Prf in range(self.VotingPins[post]): # loop through number of preference
for cand in self.candidateList:
if cand.Pin == post:
if str(Prf + 1) in cand.votePrf:
winnerCandidateScoreList.append(cand.votePrf[str(Prf + 1)])
winnerCandidateScoreList.sort(reverse=True) # Sort List in Descending Order
print("winnerCandidateScoreList :", winnerCandidateScoreList, Prf + 1)
if len(winnerCandidateScoreList) > 1:
a, b, *_ = winnerCandidateScoreList
if a > b and a != b:
winnerScore = a
preference = Prf + 1
break
else:
winnerScore, *_ = winnerCandidateScoreList
preference = Prf + 1
break
self.winnerPinList.append(
winnerPin(post, self.getCandidatename(winnerScore, preference, post), winnerScore))
# Done by Shahzeb, Tejal, Yulin and Mohammed: part of A4 : Create vote object for after submit button
def CreateVotes(self):
for key in self.listofLabels_Prf:
if not isinstance(self.listofLabels_Prf[key], str):
self.voteObjectList.append(votes(self.Login, key.cget("text"), self.listofLabels_Prf[key].get()))
self.writeVotetofile() # store in file
messagebox.showinfo("Done", "Your vote has been written in file " + os.getcwd() + "\Votes.txt")
pass
# Done by Yulin: part of A2 : method to show welcome message
def welcomeUser(self, u): # Invoked to show welcome Caption after login
self.clearWindow() # UI is clean
# now it is adding New labals
self.WelcomeLabel = Label(self.master, text=u + "! You are Welcome.", font=('courier', 15, 'bold'), bg='white')
self.WelcomeLabel.place(x=30, y=40, width=920, height=25)
pass
# Done by Yulin: part of A2 : method to remove UI component
def removeVoteCastUI(self):
if len(self.listofLabels_Prf) > 0:
self.validate.destroy()
for key in self.listofLabels_Prf:
if key:
key.destroy()
if not isinstance(self.listofLabels_Prf[key], str):
self.listofLabels_Prf[key].destroy()
# Done by Yulin: part of A2 : remove Login/password/login button from UI and show welcome message
def clearWindow(self):
print("clearWindow")
if self.UIDLabel:
self.UIDLabel.destroy()
if self.PWDLabel:
self.PWDLabel.destroy()
if self.UIDInput:
self.UIDInput.destroy()
if self.PWDInput:
self.PWDInput.destroy()
if self.Login:
self.Login.destroy()
if self.WelcomeLabel:
self.WelcomeLabel.destroy()
# Done by Shahzeb, Tejal, Yulin and Mohammed: part of A4 - Write vote in votes.txt. It stores votes.txt in directory
def writeVotetofile(self):
f = open('Votes.txt', 'w')
for v in self.voteObjectList:
s = self.findPinofCandidate(v.CandidateName) + " " + v.CandidateName + " " + v.Preference
f.write(s + '\n')
f.close()
# part of A2 this ValidateUser method will internally call from UserLoginForm method to validate if user is valid for voting
# part of A2 - Validates Login user after click login button and it will be invoked from UserLoginForm method
# part of A2 it will cross check login user/ password from 'StudentVoters.txt' file
# part of A2 - if user login / password is correct then it will show welcome message by calling welcomeUser method
def ValidateUser(self):
if self.UIDInput.get() not in self.VoteDoneList:
self.VoteDoneList.append(self.UIDInput.get())
if self.UIDInput.get() != "" and self.PWDInput.get() != "":
# Read StudentVoters.txt readlines()
StudentVoters = open('StudentVoters.txt', 'r')
Lines = StudentVoters.readlines()
for line in Lines:
u = line.strip().split(" ")[0].strip()
p = line.strip().split(" ")[1].strip()
if u == self.UIDInput.get():
if p == self.PWDInput.get():
self.Authe = 'true'
break
if self.Authe == 'false':
messagebox.showinfo("Error", "Person is not eligible for vote.")
else:
self.welcomeUser(u) # A2 method to show welcome message for logged in user
self.voterList.append(voters(u))
else:
messagebox.showinfo("Error", "Please Enter UID / Password")
else:
messagebox.showinfo("Error", "You have already voted.")
self.LoginFormDisplayed = 'false'
self.clearWindow() # Clears UI element of user Id/Password/Login button
# this testVal method is to validate if user has input preference of candidate must be digit
def testVal(self, inStr, acttyp):
if acttyp == '1': # insert
if not inStr.isdigit():
return False
return True
# Done by Shahzeb, Tejal, Yulin and Mohammed: this is first method to show login password and part of A1 & A2
# It will be invoked when user will click Login/Password menu
# this method will internally call Function ValidateUser to validate if user is valid for voting
def UserLoginForm(self): # A1 & A2
if self.LoginFormDisplayed == 'false':
self.LoginFormDisplayed = 'true'
if self.Authe == 'false':
self.msg.destroy()
self.UIDLabel = Label(self.master, text="UID :")
self.UIDLabel.place(x=20, y=40, width=120, height=25)
self.UIDInput = Entry(self.master)
self.UIDInput.place(x=141, y=40, width=120, height=25)
self.PWDLabel = Label(self.master, text="Password :")
self.PWDLabel.place(x=20, y=67, width=120, height=25)
self.PWDInput = Entry(self.master)
self.PWDInput.place(x=141, y=67, width=120, height=25)
self.Login = Button(self.master, text="Login", command=self.ValidateUser, bg="green",
fg='white') # Login Button click will call ValidateUser user method
self.Login.place(x=200, y=100, width=60, height=25)
else:
messagebox.showinfo("Warning", "You are already authenticated.")
value = 'default text'
# program will start from here.
root = Tk() # this will create a window root
my_gui = VotingGUI(root) # this class is taking input of window root
root.mainloop()
| syed66/Student-voting-system | Main.py | Main.py | py | 21,945 | python | en | code | 0 | github-code | 13 |
72275818257 | # TODO: check format of file? guess format maybe; use BioPython to parse variety of formats?
#: set: valid IUPAC nucleotide characters for checking FASTA format
VALID_NUCLEOTIDES = {'A', 'a',
'C', 'c',
'G', 'g',
'T', 't',
'R', 'r',
'Y', 'y',
'S', 's',
'W', 'w',
'K', 'k',
'M', 'm',
'B', 'b',
'D', 'd',
'H', 'h',
'V', 'v',
'N', 'n',
'X', 'x', } # X for masked nucleotides
def parse_fasta(filepath):
'''
Parse a fasta file returning a generator yielding tuples of fasta headers to sequences.
Note:
This function should give equivalent results to SeqIO from BioPython
.. code-block:: python
from Bio import SeqIO
# biopython to dict of header-seq
hseqs_bio = {r.description:str(r.seq) for r in SeqIO.parse(fasta_path, 'fasta')}
# this func to dict of header-seq
hseqs = {header:seq for header, seq in parse_fasta(fasta_path)}
# both methods should return the same dict
assert hseqs == hseqs_bio
Args:
filepath (str): Fasta file path
Returns:
generator: yields tuples of (<fasta header>, <fasta sequence>)
'''
with open(filepath, 'r') as f:
seqs = []
header = ''
for line in f:
line = line.strip()
if line == '':
continue
if line[0] == '>':
if header == '':
header = line.replace('>','')
else:
yield header, ''.join(seqs)
seqs = []
header = line.replace('>','')
else:
seqs.append(line)
yield header, ''.join(seqs)
def fasta_format_check(fasta_path, logger):
"""
Check that a file is valid FASTA format.
- First non-blank line needs to begin with a '>' header character.
- Sequence can only contain valid IUPAC nucleotide characters
Args:
fasta_str (str): FASTA file contents string
Raises:
Exception: If invalid FASTA format
"""
header_count = 0
line_count = 1
nt_count = 0
with open(fasta_path) as f:
for l in f:
l = l.strip()
if l == '':
continue
if l[0] == '>':
header_count += 1
continue
if header_count == 0 and l[0] != '>':
error_msg = 'First non-blank line (L:{line_count}) does not contain FASTA header. Line beginning with ">" expected.' \
.format(line_count=line_count)
logger.error(error_msg)
raise Exception(error_msg)
non_nucleotide_chars_in_line = set(l) - VALID_NUCLEOTIDES
if len(non_nucleotide_chars_in_line) > 0:
error_msg = 'Line {line} contains the following non-nucleotide characters: {non_nt_chars}' \
.format(line=line_count,
non_nt_chars=', '.join([x for x in non_nucleotide_chars_in_line]))
logger.error(error_msg)
raise Exception(error_msg)
nt_count += len(l)
line_count += 1
if nt_count == 0:
error_msg = 'File "{}" does not contain any nucleotide sequence.'.format(fasta_path)
logger.error(error_msg)
raise Exception(error_msg)
logger.info('Valid FASTA format "{}" ({} bp)'.format(fasta_path, nt_count))
| phac-nml/sistr_cmd | sistr/src/parsers.py | parsers.py | py | 3,758 | python | en | code | 21 | github-code | 13 |
10963963907 | from . import icons, panel, preferences, localdb, ops, test, key, pie
bl_info = {
"name": "POPOTI Align Helper",
"description": "More friendly alignment based on observation perspective",
"author": "AIGODLIKE社区,小萌新",
"version": (1, 2, 0),
"blender": (3, 0, 0),
"location": "Tool Panel",
"support": "COMMUNITY",
"category": "辣椒出品",
}
mod_tuple = (
pie,
key,
ops,
test,
icons,
panel,
localdb,
preferences,
)
def register():
for mod in mod_tuple:
mod.register()
def unregister():
for mod in mod_tuple:
mod.unregister()
| AIGODLIKE/popoti_align_helper | __init__.py | __init__.py | py | 628 | python | en | code | 5 | github-code | 13 |
7676896982 | from collections import Counter
# Function to check if a number is a permutation of other
# it takes two strings as parameters, both being the respective numbers
def is_perm(n, x):
if len(n) != len(x):
return False
else:
a = sorted(n)
b = sorted(x)
for i in range(len(n)):
if a[i] != b[i]:
return False
return True
# used counter dict to keep count of the number of cubic permutation of each number
cubes = Counter([a for a in range(1, 10000)])
i = 3
while True:
for j in range(2, i):
if is_perm(str(i ** 3), str(j ** 3)):
cubes[j] += 1
break
i += 1
if cubes.most_common()[0][1] == 5:
print(cubes.most_common()[0][0]**3)
break
| notBlurryFace/project-euler | PE062.py | PE062.py | py | 800 | python | en | code | 1 | github-code | 13 |
39241066556 | import os
import shutil
def remove(path: str):
"""
Fully remove directory
"""
if os.path.exists(path):
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.remove(path)
def mkfile(path: str):
"""
Create file
"""
if not os.path.exists(os.path.split(path)[0]):
os.makedirs(os.path.split(path)[0])
with open(path, "w", encoding="utf-8") as file:
file.write("")
def mkdir(path: str):
"""
Create directory
"""
if not os.path.exists(path):
os.makedirs(path)
| n00-name/12345 | ide/utils/files.py | files.py | py | 574 | python | en | code | 0 | github-code | 13 |
25935309643 | from argparse import ArgumentParser
import numpy as np
from src.best_pairs_finder_non_brute_force import BestPairsFinderNonBruteForce
from src.best_pairs_finder import BestPairsFinder
class ParseArgs:
def convert_to_dictionary( self, arg ):
"""
Function that converts to a dictionary the arguments read from the terminal
"""
dict = {
'input': arg.i[0],
'output': arg.o[0],
'method': arg.m,
'steps': arg.s,
'energy': arg.e
}
return dict
def parse_arguments(self):
"""
Function to parse the arguments from the terminal
"""
parser = ArgumentParser()
parser.add_argument('-i', metavar='--input', type=str, nargs=1,
required=True, action='store',
help='input file' )
parser.add_argument('-o', metavar='--output', type=str, nargs=1,
required=True, action='store',
help='output file' )
parser.add_argument('-m', metavar='--method', type=str,
required=False, action='store',
choices=['MC','bf','Monte_Carlo','brute_force'],
default='MC',
help='method. Possible choices: MC, bf, Monte_Carlo or brute_force' )
parser.add_argument('-s', metavar='--steps', type=int,
default=20, help='number of Monte-Carlo steps')
parser.add_argument('-e', metavar='--energy', type=float,
default=-1, help='Activation energy for Monte-Carlo')
args = parser.parse_args()
# store a dictionary containing the arguments
self.arguments_as_dictionary = self.convert_to_dictionary( args )
# sanity check
self.arguments_sanity_check()
def arguments_sanity_check(self):
# check if the number of steps is positive
if self.arguments_as_dictionary['steps'] < 0:
raise RuntimeError('Number of Monte-Carlo steps must be positive.')
# check if the activation energy is positive
if (self.arguments_as_dictionary['energy'] < 0) and\
(self.arguments_as_dictionary['energy'] != -1):
raise RuntimeError('Number of Monte-Carlo steps must be positive.')
def name_of_input(self):
return self.arguments_as_dictionary['input']
def name_of_output(self):
return self.arguments_as_dictionary['output']
def name_of_method(self):
return self.arguments_as_dictionary['method']
def number_of_steps(self):
return self.arguments_as_dictionary['steps']
def energy(self):
return self.arguments_as_dictionary['energy']
def check_if_data_contain_only_pairs(list_of_pairs):
for pair in list_of_pairs:
if len(pair) != 2:
raise RuntimeError('data must contains a list with pairs')
return True
def read_input(fname,dtype=float):
data = np.loadtxt(fname=fname,dtype=dtype)
# Convert from numpy array to list
return data.tolist()
def write_output(fname,data):
check_if_data_contain_only_pairs(data)
with open(fname,'w') as out:
for pairs in data:
out.write(str(pairs[0]) + ', '+ str(pairs[1]) + '\n' )
if __name__ == '__main__':
# Parse arguments
arguments = ParseArgs()
arguments.parse_arguments()
# Name of files and method
input = arguments.name_of_input()
output = arguments.name_of_output()
method = arguments.name_of_method()
max_steps = arguments.number_of_steps()
energy = arguments.energy()
# Read input
positions = read_input(fname=input,dtype=float)
# Find best pairs following the specified method
if( method == 'Monte_Carlo' or method == 'MC' ):
pairs_finder = BestPairsFinderNonBruteForce()
pairs = pairs_finder.find_best_pairs( \
particle_positions=positions, MAX_ITERATIONS=max_steps,
activation_energy=energy )
elif ( method == 'brute_force' or method == 'bf' ):
pairs_finder = BestPairsFinder()
pairs = pairs_finder.find_best_pairs( \
particle_positions=positions )
else:
raise RuntimeError('Method to find best pairs has not been defined')
# Write to output
write_output(fname=output,data=pairs)
| Team2Munchkin/particle_project | src/find_optimal_pairs.py | find_optimal_pairs.py | py | 4,250 | python | en | code | 0 | github-code | 13 |
28187724655 |
import sys
from PyQt4.QtCore import Qt
from PyQt4.QtGui import *
app = QApplication([])
tableWidget = QTableWidget()
tableWidget.setContextMenuPolicy(Qt.ActionsContextMenu)
quitAction = QAction("Quit", None)
quitAction.triggered.connect(app.quit)
tableWidget.addAction(quitAction)
tableWidget.show()
sys.exit(app.exec_()) | brownharryb/webtydesk | example_can_delete.py | example_can_delete.py | py | 326 | python | en | code | 0 | github-code | 13 |
6616324034 | agent = [(0, 0), (1, 3)]
prob = {(0, 0) : 0.4, (1, 3) : 0.6}
target = [(0, 1), (0, 2), (1, 1), (1, 2)]
probs = [0, 0, 0, 0, 0, 0]
for aR, aC in agent:
for tR, tC in target:
pr = prob[(aR, aC)] * 0.25
if aR == tR and aC == tC:
probs[0] += pr
elif aR == tR and aC+1 == tC:
probs[1] += pr
elif aR+1 == tR and aC == tC:
probs[2] += pr
elif aR == tR and aC-1 == tC:
probs[3] += pr
elif aR == tR-1 and aC == tC:
probs[4] += pr
else:
probs[5] += pr
print(probs) | Aa-Aanegola/ML-Assignments | Assignment_3/Part_2/calcObs.py | calcObs.py | py | 588 | python | en | code | 0 | github-code | 13 |
37412156879 | import re
import csv
def file_to_array(file_name, validation, ignore_line_1=True):
output = []
f = file(file_name)
f = f.readlines()
if ignore_line_1 == True:
f.pop(0)
for line in f:
l = line.split(',')
output.append(l)
if validation == True:
output = output[3::4]
elif validation == False:
output = output[0::4] + output[1::4] + output[2::4]
return output
# Take a file path as input.
def file_to_hash(file_name, key, value, validation="all"):
output = {}
f = file_to_array(file_name, validation)
for line in f:
k = str(line[key])
v = line[value]
if k in output:
old_v = output[k]
else:
old_v = ''
output[k] = old_v + ' ' + v
return output
# Accepts a hash an input.
# THe has should be keys to strings.
# It returns the hash with the strings formatted.
def format_words(data):
output = {}
for k,v in data.items():
output[k] = format_string(v)
return output
def format_string(string):
v = re.sub("[\"']", '', string) # remove quotes
v = v.lower() # lowercase
v = re.sub("\s{2,}", "\s", v) # change double space to single space
v = re.sub("(\s\n|\A\s)", '', v) # remove end/beginning of line blank space
return v
# Accepts a hash as input. It takes a hash of keys to strings and counts the words in each string.
# It returns the hash of key to the count
def word_count_hash(data, ngram=1):
output = data
for sku,v in data.items():
output[sku] = {}
for word in tokenize(v, ngram):
if word in output[sku]:
count = output[sku][word] + 1
else:
count = 1
output[sku][word] = count
return output
def slice(data, index):
output = []
for d in data:
output.append(d[index])
return output
def string_to_hash(string):
word_count = {}
formatted = format_string(string)
for token in tokenize(formatted):
if token in word_count:
word_count[token] += 1
else:
word_count[token] = 1
return word_count
def tokenize(sentence, ngram=1):
output = []
tokens = sentence.split(' ')
n_tokens = len(tokens)
for i in xrange(n_tokens):
for j in xrange(i+ngram, min(n_tokens, i+ngram)+1):
joined = ' '.join(tokens[i:j])
output.append(joined)
return output
def write_predictions(predictions, csv_file):
with open(csv_file, "w") as outfile:
writer = csv.writer(outfile, delimiter=",")
writer.writerow(["sku"])
for p in predictions:
writer.writerow([" ".join(p)])
| pmiller10/best_buy | kaggle.py | kaggle.py | py | 2,358 | python | en | code | 0 | github-code | 13 |
8442554274 | # get all files
import glob
directory = 'ScaledWiki/Tests/Task07/1_24_23/Figs'
pngs = glob.glob(directory + '/*')
import markdown
output = ""
for figure in pngs:
output += "\n" % figure
with open("Figures.md", "w") as f:
f.write(markdown.markdown(output))
| greyliedtke/PyExplore | SubProjects/DocGeneration/SPI/fig_to_md.py | fig_to_md.py | py | 280 | python | en | code | 0 | github-code | 13 |
35596778209 | from .i2cDevice import *
from ..device import pyLabDataLoggerIOError
import datetime, time
import numpy as np
from termcolor import cprint
try:
import Adafruit_ADS1x15
except ImportError:
cprint( "Error, could not load Adafruit_ADS1x15 library", 'red', attrs=['bold'])
########################################################################################################################
class ads1x15Device(i2cDevice):
""" Class providing support for Adafruit's ADS1x15 breakout boards (ADS1015, ADS1115).
Specify I2C bus, address and driver (ADS1015/ADS1115) on initialisation.
Channel gains can be specified with the gain config parameter (a list).
Setting 'differential' parameter True gives 2 outputs instead of 4. """
# Establish connection to device
def activate(self):
assert self.params['address']
assert self.params['bus']
if not 'driver' in self.params.keys(): self.params['driver']='ads1115'
if 'name' in self.params: self.name = self.params['name']+' %i:%s' % (self.params['bus'],hex(self.params['address']))
if self.params['driver']=='ADS1115':
self.ADC = Adafruit_ADS1x15.ADS1115(address=int(self.params['address'],16), busnum=self.params['bus'])
elif self.params['driver']=='ADS1015':
self.ADC = Adafruit_ADS1x15.ADS1015(address=int(self.params['address'],16), busnum=self.params['bus'])
else:
cprint( "Error: unknown driver. Choices are ADS1015 or ADS1115" ,'red',attrs=['bold'] )
return
if not 'differential' in self.params.keys():
self.diffDefault=True
self.diff=True
else:
self.diffDefault=False
self.diff=self.params['differential']
if self.diff:
self.params['n_channels']=2
if not 'channel_names' in self.config:
self.config['channel_names']=['ChA','ChB']
else:
self.params['n_channels']=4
if not 'channel_names' in self.config:
self.config['channel_names']=['Ch1','Ch2','Ch3','Ch4']
self.params['raw_units']=['V']*self.params['n_channels']
self.config['eng_units']=['V']*self.params['n_channels']
self.config['scale']=np.ones(self.params['n_channels'],)
self.config['offset']=np.zeros(self.params['n_channels'],)
if 'gain' in self.params: self.config['gain']=self.params['gain']
cprint( "Activating %s on i2c bus at %i:%s with %i channels" % (self.params['driver'],self.params['bus'],hex(self.params['address']),self.params['n_channels']) , 'green' )
if self.diffDefault: print("\tDifferential mode (default)")
elif self.diff: print("\tDifferential mode specified")
else: print("\tSingle-ended mode")
self.apply_config()
self.driverConnected=True
return
# Apply configuration (i.e. gain parameter)
def apply_config(self,default_gain=2/3):
valid_gain_values=[2/3, 1,2,4,8,16]
if not 'gain' in self.config.keys(): self.config['gain']=[default_gain]*self.params['n_channels']
for chg in self.config['gain']:
if not chg in valid_gain_values:
cprint( "Error, gain values are invalid. Resetting", 'yellow' )
self.config['gain']=[default_gain]*self.params['n_channels']
return
# Update device with new value, update lastValue and lastValueTimestamp
def query(self):
assert self.ADC
# Read all the ADC channel values in a list.
values = [0]*4
for i in range(4):
if self.diff: j=i/2
else: j=i
values[i] = self.ADC.read_adc(i, gain=self.config['gain'][j])*4.096/32768.
if self.config['gain'][j]==0: values[i] /= 2/3.
else: values[i] /= self.config['gain'][j]
self.updateTimestamp()
if self.diff:
self.lastValue=[values[0]-values[1],values[2]-values[3]]
else:
self.lastValue=values
self.lastScaled = np.array(self.lastValue) * self.config['scale'] + self.config['offset']
return
# End connection to device.
def deactivate(self):
del self.ADC
| djorlando24/pyLabDataLogger | src/device/i2c/ads1x15Device.py | ads1x15Device.py | py | 4,282 | python | en | code | 11 | github-code | 13 |
73937024019 | import unittest
from modules.card_slot_mod import CardSlot
from modules.game_controller_mod import GameController
from unittest.mock import MagicMock
from unittest.mock import Mock
from modules.game_model_mod import GameModel
test_winning_value_cases = [
[54, None],
[0, None],
[105, 10],
[99, None],
[104, 8],
[100, 1],
[103, 1],
]
class GameControllerModule(unittest.TestCase):
@staticmethod
def create_game_controller():
model = Mock()
card_slot_3 = CardSlot(3)
card_slot_3.shown_value = 15
card_slot_3.n_cards = 3
card_slot_3.real_value = 15
card_slot_3.flashing = True
model.get_slots.return_value = (
CardSlot(0),
CardSlot(1),
CardSlot(2),
card_slot_3,
CardSlot(4),
)
return GameController(model)
@staticmethod
def set_shown_values_in_model(model, values):
for index, value in enumerate(values):
model._slots[index].shown_value = value
def set_up_game_controller(self):
model = GameModel()
# model.get_total = MagicMock(return )
return GameController(model)
def test_get_winning_value(self):
for values, expected_winning in test_winning_value_cases:
with self.subTest():
controller = self.set_up_game_controller()
controller.model.get_total = MagicMock(return_value=values)
self.assertEqual(controller.get_winning_value(), expected_winning)
def test_get_slot_values(self):
controller = self.create_game_controller()
slot = controller.get_slot_values(3)
self.assertEqual(slot[0], 15)
self.assertEqual(slot[1], 15)
self.assertEqual(slot[2], 3)
self.assertEqual(slot[3], True)
self.assertEqual(slot[4], False)
if __name__ == "__main__":
unittest.main()
| willygroup/105-game | tests/game_controller_test.py | game_controller_test.py | py | 1,932 | python | en | code | 1 | github-code | 13 |
37085208903 | from __future__ import absolute_import
from __future__ import unicode_literals
import itertools
import os
from ..variants import revcomp
try:
from pyfaidx import Genome as SequenceFileDB
# Allow pyflakes to ignore redefinition in except clause.
SequenceFileDB
except ImportError:
SequenceFileDB = None
class MockGenomeError(Exception):
pass
class MockSequence(object):
def __init__(self, sequence):
self.sequence = sequence
def __neg__(self):
"""Return reverse complement sequence."""
return MockSequence(revcomp(self.sequence))
def __str__(self):
return self.sequence
def __repr__(self):
return 'MockSequence("%s")' % self.sequence
class MockChromosome(object):
def __init__(self, name, genome=None):
self.name = name
self.genome = genome
def __getitem__(self, n):
"""Return sequence from region [start, end)
Coordinates are 0-based, end-exclusive."""
if isinstance(n, slice):
return self.genome.get_seq(self.name, n.start, n.stop)
else:
return self.genome.get_seq(self.name, n, n+1)
def __repr__(self):
return 'MockChromosome("%s")' % (self.name)
class MockGenome(object):
def __init__(self, lookup=None, filename=None, db_filename=None,
default_seq=None):
"""
A mock genome object that provides a pygr compatible interface.
lookup: a list of ((chrom, start, end), seq) values that define
a lookup table for genome sequence requests.
filename: a stream or filename containing a lookup table.
db_filename: a fasta file to use for genome sequence requests. All
requests are recorded and can be writen to a lookup table file
using the `write` method.
default_seq: if given, this base will always be returned if
region is unavailable.
"""
self._chroms = {}
self._lookup = lookup if lookup is not None else {}
self._genome = None
self._default_seq = default_seq
if db_filename:
# Use a real genome database.
if SequenceFileDB is None:
raise ValueError('pygr is not available.')
self._genome = SequenceFileDB(db_filename)
elif filename:
# Read genome sequence from lookup table.
self.read(filename)
def __contains__(self, chrom):
"""Return True if genome contains chromosome."""
return chrom in (self._genome or self._chroms)
def __getitem__(self, chrom):
"""Return a chromosome by its name."""
if chrom not in self._chroms:
self._chroms[chrom] = MockChromosome(chrom, self)
return self._chroms[chrom]
def get_seq(self, chrom, start, end):
"""Return a sequence by chromosome name and region [start, end).
Coordinates are 0-based, end-exclusive.
"""
if self._genome:
# Get sequence from real genome object and save result.
seq = self._genome[chrom][start:end]
self._lookup[(chrom, start, end)] = str(seq)
return seq
else:
# Use lookup table to fetch genome sequence.
try:
return MockSequence(self._lookup[(chrom, start, end)])
except KeyError:
if self._default_seq:
# Generate default sequence.
return ''.join(itertools.islice(
itertools.cycle(self._default_seq),
None, end - start))
else:
raise MockGenomeError(
'Sequence not in test data: %s:%d-%d' %
(chrom, start, end))
def read(self, filename):
"""Read a sequence lookup table from a file.
filename: a filename string or file stream.
"""
if hasattr(filename, 'read'):
infile = filename
else:
with open(filename) as infile:
return self.read(infile)
for line in infile:
tokens = line.rstrip().split('\t')
chrom, start, end, seq = tokens
self._lookup[(chrom, int(start), int(end))] = seq
if chrom not in self._lookup:
self._chroms[chrom] = MockChromosome(chrom, self)
def write(self, filename):
"""Write a sequence lookup table to file."""
if hasattr(filename, 'write'):
out = filename
else:
with open(filename, 'w') as out:
return self.write(out)
for (chrom, start, end), seq in self._lookup.items():
out.write('\t'.join(map(str, [chrom, start, end, seq])) + '\n')
class MockGenomeTestFile(MockGenome):
def __init__(self, lookup=None, filename=None, db_filename=None,
default_seq=None, create_data=False):
if not create_data:
db_filename = None
super(MockGenomeTestFile, self).__init__(
lookup=lookup, db_filename=db_filename,
filename=filename,
default_seq=default_seq)
self._filename = filename
self._create_data = (db_filename is not None)
if self._create_data and os.path.exists(filename):
# Clear output file when creating data.
os.remove(filename)
def get_seq(self, chrom, start, end):
seq = super(MockGenomeTestFile, self).get_seq(chrom, start, end)
# Save each query in append mode.
if self._create_data:
with open(self._filename, 'a') as out:
out.write('\t'.join(map(str, [chrom, start, end, seq])) + '\n')
return seq
| counsyl/hgvs | pyhgvs/tests/genome.py | genome.py | py | 5,761 | python | en | code | 167 | github-code | 13 |
18194963659 | # Coordinate systems transformations
#
# @author: Anna Eivazi
import numpy as np
from src.rotation_matrix import calculate_rotation_matrix_extrinsic
def transform_2D_to_3D(x, y,
focal_length,
pixel_size_x, pixel_size_y,
principal_point_x, principal_point_y):
"""
Transforms 2D point on image coordinate system (ICS) to 3D point in camera coordinate system (CCS).
Camera coordinate system is in units of length (e.g. cm). Image coordinate system is in pixel coordinate system.
Camera coordinate system has zero in center of the lens, x axis left, y - up, z from lens towards the object.
Image coordinate system has zero in the left-up corner of the image, x - right, y - down.
As the image is flipped, the center is located in down-right corner of the camera matrix, x - left, y - up.
x_ccs = (x_ics - principal_point_x)*pixel_size_x
y_ccs = (y_ics - principal_point_y)*pixel_size_y
z_css = -focal_length
:param
x_ics, y_ics: 2D coordinate in ICS
focal_length: focal length in units of length (e.g. cm)
pixel_size_x: the sixe of one pixel in x dimention in the same units of length (e.g. cm)
pixel_size_y: the sixe of one pixel in y dimention in the same units of length (e.g. cm)
principal_point_x, principal_point_y: the location of the zero point of CCS on the ICS (usually it is center of the camera matrix)
:return: x_ccs, y_ccs, z_ccs: 3D point in CCS
"""
x_ccs = (x - principal_point_x) * pixel_size_x
y_ccs = (y - principal_point_y) * pixel_size_y
z_ccs = -focal_length
return x_ccs, y_ccs, z_ccs
def transform_3D_to_3D(input_coordinates,
alpha_rad, beta_rad, gamma_rad,
shift_of_zero):
"""
Projects 3D point in the input right handed coordinate system (CS_in) to
the 3D point in the output right handed coordinate system (CS_out).
(x_out) (x_in) (x_shift)
(y_out) = R (y_in) + (y_shift)
(z_out) (z_in) (z_shift)
R - is extrinsic rotational matrix defined by Euler angles alpha, beta, gamma
(x_shift, y_shift, z_shift) defines the shift of CS_out's center relatively to CS_in's center.
:param
input_coordinates: 3D point in CS_in
gamma, beta, alpha: rotation angles of axis (radians)
shift_of_zero: zero shift of coordinate systems
:return:
output_coordinates: 3D point in CS_out
"""
R = calculate_rotation_matrix_extrinsic(alpha_rad, beta_rad, gamma_rad)
output_coordinates = np.dot(R, input_coordinates) + shift_of_zero
return output_coordinates
| aeivazi/gaze-estimation | src/coordinate_system_transformations.py | coordinate_system_transformations.py | py | 2,657 | python | en | code | 4 | github-code | 13 |
24913491520 | # 베르트랑 공준
def isPrime(n):
for i in range(2, int(n**(1/2)) + 1):
if n % i == 0:
return False
return True
prime = list()
for i in range(2, (123456 * 2) + 1):
if isPrime(i):
prime.append(i)
while True:
n = int(input())
if n == 0:
break
count = 0
for p in prime:
if n < p and p <= 2*n:
count += 1
print(count)
| yeon7485/cote-study | 단계별로 풀어보기/기본 수학2/bj_4948.py | bj_4948.py | py | 428 | python | en | code | 0 | github-code | 13 |
5911374664 | class Solution:
def deleteGreatestValue(self, grid):
answer = 0
# first modify the rows in grid so that all rows are in assending order
for row in range(len(grid)):
grid[row] = sorted(grid[row])
# while grid[0] is not empty continue the loop
while grid[0]:
largest = [] # set a largest list to keep track of largest numbers in each row that is removed.
# start a second loop to loop though each row in grid
for i in range(len(grid)):
largest.append(grid[i].pop(-1)) # for each row pop the largest number from that row and add it to the largest list
answer += max(largest) # add the biggest number in largest to the answer
return answer | collinsakuma/LeetCode | Problems/2500. Delete Greatest Value in Each Row/delete_greatest_value.py | delete_greatest_value.py | py | 766 | python | en | code | 0 | github-code | 13 |
71899433618 | import random
import time
import threading
from collections import defaultdict
from datetime import timedelta
from dateutil import tz
from dateutil.tz import tzutc
import string
import traceback
from copy import copy
from cement.utils.misc import minimal_logger
from botocore.compat import six
from datetime import datetime
from ..lib import elasticbeanstalk, utils, elb, ec2
from ..lib.aws import InvalidParameterValueError
from ..resources.strings import responses
Queue = six.moves.queue.Queue
LOG = minimal_logger(__name__)
class DataPoller(object):
def __init__(self, app_name, env_name):
self.app_name = app_name
self.env_name = env_name
self.data_queue = Queue()
self.data = None
self.t = None
self.running = False
self.no_instances_time = None
self.instance_info = defaultdict(dict)
def get_fresh_data(self):
new_data = self.data
while not self.data_queue.empty() or new_data is None:
# Block on the first call.
block = new_data is None
new_data = self.data_queue.get(block=block)
self.data = new_data
return new_data
def start_background_polling(self):
self.running = True
self.t = threading.Thread(
target=self._poll_for_health_data
)
self.t.daemon = True
self.t.start()
def _poll_for_health_data(self):
LOG.debug('Starting data poller child thread')
try:
LOG.debug('Polling for data')
while True:
# Grab data
try:
data = self._get_health_data()
# Put it in queue
self.data_queue.put(data)
except Exception as e:
if e.message == responses['health.nodescribehealth']:
# Environment probably switching between health monitoring types
LOG.debug('Swallowing \'DescribeEnvironmentHealth is not supported\' exception')
LOG.debug('Nothing to do as environment should be transitioning')
else:
# Not a recoverable error, raise it
raise e
# Now sleep while we wait for more data
refresh_time = data['environment'].get('RefreshedAt', None)
time.sleep(self._get_sleep_time(refresh_time))
except (SystemError, SystemExit, KeyboardInterrupt) as e:
LOG.debug('Exiting due to: {}'.format(e))
except InvalidParameterValueError as e:
# Environment no longer exists, exit
LOG.debug(e)
except Exception as e:
traceback.print_exc()
finally:
self.data_queue.put({})
def _get_sleep_time(self, refresh_time):
if refresh_time is None:
LOG.debug('No refresh time. (11 seconds until next refresh)')
return 2
delta = utils.get_delta_from_now_and_datetime(refresh_time)
countdown = 11 - delta.seconds
LOG.debug('health time={}. '
'current={}. ({} seconds until next refresh)'
.format(utils.get_local_time_as_string(refresh_time),
utils.get_local_time_as_string(
datetime.now()), countdown))
return max(0.5, min(countdown, 11)) # x in range [0.5, 11]
def _account_for_clock_drift(self, environment_health):
time_str = environment_health['ResponseMetadata']['date']
time = datetime.strptime(time_str, '%a, %d %b %Y %H:%M:%S %Z')
delta = utils.get_delta_from_now_and_datetime(time)
LOG.debug(u'Clock offset={0}'.format(delta))
LOG.debug(delta)
try:
environment_health['RefreshedAt'] += delta
except KeyError:
environment_health['RefreshedAt'] = None
def _get_health_data(self):
environment_health = elasticbeanstalk.\
get_environment_health(self.env_name)
instance_health = elasticbeanstalk.get_instance_health(self.env_name)
LOG.debug('EnvironmentHealth-data:{}'.format(environment_health))
LOG.debug('InstanceHealth-data:{}'.format(instance_health))
self._account_for_clock_drift(environment_health)
token = instance_health.get('NextToken', None)
# Collapse data into flatter tables/dicts
environment_health = collapse_environment_health_data(
environment_health)
instance_health = collapse_instance_health_data(instance_health)
while token is not None:
paged_health = elasticbeanstalk.get_instance_health(
self.env_name, next_token=token)
token = paged_health.get('NextToken', None)
instance_health += collapse_instance_health_data(paged_health)
# Timeout if 0 instances for more than 15 minutes
if environment_health['Total'] == 0:
if self.no_instances_time is None:
self.no_instances_time = datetime.now()
else:
timediff = timedelta(seconds=15 * 60)
if (datetime.now() - self.no_instances_time) > timediff:
return {}
else:
self.no_instances_time = None
# Get AZ info for each instance
self._get_instance_azs(instance_health)
# Return all the data as a single object
data = {'environment': environment_health,
'instances': instance_health}
LOG.debug('collapsed-data:{}'.format(data))
return data
def _get_instance_azs(self, data_dict):
instance_ids = [i.get('InstanceId') for i in data_dict]
ids_with_no_az_info = []
for id in instance_ids:
if 'az' not in self.instance_info[id]:
ids_with_no_az_info.append(id)
instances = ec2.describe_instances(ids_with_no_az_info)
for i in instances:
id = i.get('InstanceId', None)
az = i.get('Placement', {}).get('AvailabilityZone', None)
if az:
self.instance_info[id]['az'] = az
for instance_data in data_dict:
id = instance_data.get('InstanceId')
instance_data['az'] = self.instance_info.get(id, {}).get('az', 'no-data')
def collapse_environment_health_data(environment_health):
result = dict()
request_count = environment_health.get('ApplicationMetrics', {}) \
.get('RequestCount', 0)
latency_dict = environment_health.get('ApplicationMetrics', {})\
.pop('Latency', {})
result.update(_format_latency_dict(latency_dict, request_count))
result['requests'] = request_count/10.0
statuses = environment_health.get('ApplicationMetrics', {})\
.pop('StatusCodes', {})
for k, v in six.iteritems(statuses):
convert_data_to_percentage(statuses, k, request_count)
result.update(statuses)
result.update(environment_health.pop('ApplicationMetrics', {}))
total = 0
for k, v in six.iteritems(environment_health.get('InstancesHealth', {})):
total += v
result['Total'] = total
result.update(environment_health.pop('InstancesHealth', {}))
result.update(environment_health)
causes = result.get('Causes', [])
cause = causes[0] if causes else ''
result['Cause'] = cause
return result
def collapse_instance_health_data(instances_health):
instance_list = instances_health.get('InstanceHealthList', [])
result = list()
for i in instance_list:
instance = dict()
request_count = i.get('ApplicationMetrics', {}) \
.get('RequestCount', 0)
latency = i.get('ApplicationMetrics', {}).pop('Latency', {})
instance.update(_format_latency_dict(latency, request_count))
instance.update(i.get('ApplicationMetrics', {}).pop('StatusCodes', {}))
instance.update(i.pop('ApplicationMetrics', {}))
instance.update(i.get('System', {}).pop('CPUUtilization', {}))
instance.update(i.pop('System', {}))
instance.update(i)
causes = instance.get('Causes', [])
cause = causes[0] if causes else ''
instance['Cause'] = cause
instance['load1'] = instance['LoadAverage'][0] \
if 'LoadAverage' in instance else '-'
instance['load5'] = instance['LoadAverage'][1] \
if 'LoadAverage' in instance else '-'
try:
delta = datetime.now(tz.tzlocal()) - utils.get_local_time(instance['LaunchedAt'])
instance['launched'] = utils.get_local_time_as_string(instance['LaunchedAt'])
days = delta.days
minutes = delta.seconds // 60
hours = minutes // 60
if days > 0:
instance['running'] = '{0} day{s}'\
.format(days, s=_get_s(days))
elif hours > 0:
instance['running'] = '{0} hour{s}'\
.format(hours, s=_get_s(hours))
elif minutes > 0:
instance['running'] = '{0} min{s}'\
.format(minutes, s=_get_s(minutes))
else:
instance['running'] = '{0} secs'.format(delta.seconds)
except KeyError as e:
instance['running'] = '-'
# Calculate requests per second
duration = instance.get('Duration', 10)
instance['requests'] = request_count / (duration * 1.0)
# Convert counts to percentages
for key in {'Status_2xx', 'Status_3xx', 'Status_4xx', 'Status_5xx'}:
convert_data_to_percentage(instance, key, request_count,
add_sort_column=True)
# Add status sort index
instance['status_sort'] = _get_health_sort_order(instance['HealthStatus'])
result.append(instance)
return result
def _format_latency_dict(latency_dict, request_count):
new_dict = copy(latency_dict)
for k, v in six.iteritems(latency_dict):
new_dict[k + '_sort'] = v
representation = format_float(v, 3)
if (k == 'P99' and request_count < 100) or \
(k == 'P90' and request_count < 10):
representation += '*'
elif k in ['P99', 'P90']:
representation += ' '
new_dict[k] = representation
return new_dict
def _get_s(number):
return 's' if number > 1 else ''
def convert_data_to_percentage(data, index, total, add_sort_column=False):
if total > 0:
percent = (data.get(index, 0) / (total * 1.0)) * 100.0
# Now convert to string
representation = format_float(percent, 1)
data[index] = representation
# Convert back to float for sorting
if add_sort_column:
data[index + '_sort'] = float(representation)
def format_float(flt, number_of_places):
format_string = '{0:.' + str(number_of_places) + 'f}'
return format_string.format(flt)
def _get_health_sort_order(health):
health_order = dict(
(v, k) for k, v in enumerate([
'Severe',
'Degraded',
'Unknown',
'Warning',
'NoData',
'No Data',
'Info',
'Pending',
'Ok',
])
)
return health_order[health]
class TraditionalHealthDataPoller(DataPoller):
""" Assumes we are using a LoadBalanced Environment """
def _get_health_data(self):
timestamp = datetime.now(tz.tzutc())
env = elasticbeanstalk.get_environment(self.app_name, self.env_name)
env_dict = elasticbeanstalk.get_environment_resources(self.env_name)
env_dict = env_dict['EnvironmentResources']
load_balancers = env_dict.get('LoadBalancers', None)
if load_balancers and len(load_balancers) > 0:
load_balancer_name = env_dict.get('LoadBalancers')[0].get('Name')
instance_states = elb.get_health_of_instances(load_balancer_name)
else:
instance_states = []
instance_ids = [i['Id'] for i in
env_dict.get('Instances', [])]
total_instances = len(instance_ids)
total_in_service = len([i for i in instance_states
if i['State'] == 'InService'])
env_data = {'EnvironmentName': env.name,
'Color': env.health,
'Status': env.status,
'Total': total_instances,
'InService': total_in_service,
'Other': total_instances - total_in_service}
data = {'environment': env_data, 'instances': []}
# Get Instance Health
for i in instance_states:
instance = {'id': i['InstanceId'], 'state': i['State'],
'description': i['Description']}
ec2_health = ec2.describe_instance(instance['id'])
instance['health'] = ec2_health['State']['Name']
data['instances'].append(instance)
# Get Health for instances not in Load Balancer yet
for i in instance_ids:
instance = {'id': i}
if i not in [x['InstanceId'] for x in instance_states]:
instance['description'] = 'N/A (Not registered ' \
'with Load Balancer)'
instance['state'] = 'n/a'
ec2_health = ec2.describe_instance(i)
instance['health'] = ec2_health['State']['Name']
data['instances'].append(instance)
data['environment']['RefreshedAt'] = timestamp
return data | ianblenke/awsebcli | ebcli/health/data_poller.py | data_poller.py | py | 13,638 | python | en | code | 3 | github-code | 13 |
6492842462 | from flask import Flask, render_template, session, request, make_response, json, jsonify, url_for
from flask_socketio import SocketIO, emit, join_room, leave_room,close_room, rooms, disconnect
import glob
# import json
import math
import numpy as np
import os
import pyaudio
from random import randint
from threading import Thread, Lock
import time
import serial
import sys
import struct
import logging
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
async_mode = None
if async_mode is None:
try:
import eventlet
async_mode = 'eventlet'
except ImportError:
pass
if async_mode is None:
try:
from gevent import monkey
async_mode = 'gevent'
except ImportError:
pass
if async_mode is None:
async_mode = 'threading'
print('async_mode is ' + async_mode)
# monkey patching is necessary because this application uses a background
# thread
if async_mode == 'eventlet':
import eventlet
eventlet.monkey_patch()
elif async_mode == 'gevent':
from gevent import monkey
monkey.patch_all()
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# #Version 2.7 or Above?
# if sys.version_info[0] >2:
# version3 = True
# kwargs = {'newline':''}
# else:
# version3 = False
# kwargs = {}
# serialConnected = False #global flag for whether or not the serial port should be connected
# serialPort = 0 # (init value is 3...junk) contains serial port object when in use...touching protected by serialLock below
# serialLock = Lock() #serial permission lock (protects shared resource of serial port)
# print (serialLock)
# #Taken from here on StackExchange: http://stackoverflow.com/questions/12090503/listing-available-com-ports-with-python
# #Want to give credit where credit is due!
# def serial_ports():
# if sys.platform.startswith('win'):
# ports = ['COM%s' % (i + 1) for i in list(range(256))]
# elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):
# # this excludes your current terminal "/dev/tty"
# ports = glob.glob('/dev/tty[A-Za-z]*')
# elif sys.platform.startswith('darwin'):
# ports = glob.glob('/dev/tty.*')
# else:
# raise EnvironmentError('Unsupported platform')
# result = []
# for port in ports:
# try:
# #print("checking port "+port)
# s = serial.Serial(port)
# #print("closing port "+port)
# s.close()
# result.append(port)
# except (OSError, serial.SerialException):
# pass
# return result
# #serial variables:
# serialselection = ''
# baudselection = 115200
# mcuMessage = []
# '''system_parameters (dictionary where keys are user-variable parameters and entry is list consisting of current value (index 0 and single-character comm term for conveying value back to micro...for example you could have system_parameters['K_d']=[1.4,'D']
# '''
# system_parameters = {}
# #params_and_values an ordered list of the names of paramters, headroom, and values to be plotted
# #Used in generating CSV header list in order
# params_and_values = []
# #A list pointing to parameter values for quick plotting (rather than list comprehend this every time
# param_vals = []
# command_terms = ['HIHI']
# #expected_length...how long each full message from Micro should be
# expected_length = 0
# #function that will be stored for chopping up message into appropriate signed/unsignedness/float, etc... makes this processing arbitrarily expandable as needed...must obviously agree with encoding scheme on micro
# parseFunction = lambda x: [0]
# '''Kp = 0.0
# Kd = 0.0
# Ki = 0.0
# direct = 0.0
# desired = 0.0
# '''
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
##
#####################################
## ##
## Stuff from original files ##
## ##
#####################################
# keepRunning = True #set to True for default
# #global setup variables:
# #used during initialization of comms/building GUI
# isSetup = False
# setupString = ""
# allGoodFromGUI = False
# #Function run in parallel on infinite loop with
# #serves as serial listener outside of separate loop
# def serialThread():
# print ("Starting serial background thread.")
# global desired
# global serialLock
# global csvLock
# global serialPort
# global system_parameters
# global params_and_values
# global expected_length
# global parseFunction
# global param_vals
# global csv_default
# global csv_recent
# global alt_data
# global alternate
# global isSetup
# global setupString
# global command_terms
# while True:
# #print (serialConnected)
# if serialConnected:
# print("serial connected!")
# print("not setup")
# writeUpdates('~',0)
# time.sleep(2.0)
# serialLock.acquire()
# try:
# new_setupString = serialPort.readline()
# serialPort.flushInput()
# except:
# print ("initi string reading issue")
# serialLock.release()
# print("before")
# print(new_setupString)
# # what the heck is happening here?
# new_setupString = strip_until_marker(new_setupString)
# print("after")
# print(new_setupString)
# temp_commands = new_setupString.split('&')
# temp_commands = temp_commands[1:-1]
# print(temp_commands)
# print(command_terms)
# if temp_commands != command_terms: #only reload the gui if the configuration setup string has changed!
# command_terms = temp_commands
# print("DETECTED DIFFERENT STARTUP STRING!")
# setupString = new_setupString
# print(setupString)
# temp = setupString.split('&',1)[1]
# temp = temp.rsplit('&',1)[0]
# setupString = temp
# print(setupString)
# try:#send up to javascript to sort its part out
# socketio.emit('startup',setupString,broadcast =True)
# except:
# print ("failed socket")
# #build structures based on setupString's contents and orderj
# plot_count =0 #used for tallying plots
# spaces = [] #used for determining how to chop data string (bytes per var)
# s=[] #list of sliders
# t=[] #list of temporal plots
# h = [] #contains headroom value if that is being plotted
# for x in command_terms:
# if len(x)>0 and x[0] =='S': #is a slider
# print("slider")
# slider_vals = x.split('~') #chop string
# print(slider_vals)
# #next: add key to system_parameters dict of slider name
# #entry is starting val (0) and one char value used for comms
# system_parameters[slider_vals[1]]=[0,slider_vals[2]]
# s.append(slider_vals[1]) #add name of param to s list
# #next is to fill in the param_vals list with the current value
# param_vals.append(system_parameters[slider_vals[1]][0])
# if len(x)>0 and x[0] == 'A': #we are alternating
# vals = x.split('~') #split substring
# alt_data['period'] = float(vals[2]) #period unpacked
# alt_data['param'] = vals[1] #link alternate to selected parameter
# if len(x)>0 and x[0]=='T': #we have a temporal plot
# print("Plot")
# plot_vals = x.split('~') #split substring
# t.append(plot_vals[1]) #add name to t list
# #next line: append list: [num_bytes,signed/unsigned/float,etc..]
# print(plot_vals)
# spaces.append([int(plot_vals[2][1]),plot_vals[2][0]])
# plot_count +=1 #increment plot count
# if len(x)>0 and x[0]=='H':
# head_vals = x.split('~')
# h.append("Headroom")
# plot_count +=1 #headroom isn't a "plot" but treated same
# if head_vals[1] =='2':
# spaces.append([2,'S']) #needed since 16bit int on Arduino
# elif head_vals[1] =='4':
# spaces.append([4,'F']) #needed since ARM32 Teensy
# params_and_values = t+h+s #in order plots, headroom, sliders
# print("Identified values: %r" %(params_and_values))
# expected_length = sum(x[0] for x in spaces)+2 #2 from open/closing byte
# #parse_prototype is function that will chop up incoming bytes for sending up to the GUI
# def parse_prototype(listo):
# new_out = []
# current_index=1 #start 1 up because of start byte
# #print(listo)
# for x in range(plot_count):
# val = 0
# if spaces[x][0] == 1:
# if spaces[x][1] == 'S':
# val = struct.unpack('b',listo[current_index:current_index+1])[0]
# elif spaces[x][1] =='U':
# val = struct.unpack('B',listo[current_index:current_index+1])[0]
# elif spaces[x][0] == 2:
# if spaces[x][1] == 'S':
# val = struct.unpack('<h',listo[current_index:current_index+2])[0]
# elif spaces[x][1] == 'U':
# val = struct.unpack('H',listo[current_index:current_index+2])[0]
# elif spaces[x][0] == 4:
# if spaces[x][1] == 'F':
# val = struct.unpack('f',listo[current_index:current_index+4])[0]
# elif spaces[x][1] == 'S':
# val = struct.unpack('i',listo[current_index:current_index+4])[0]
# new_out.append(val)
# current_index += spaces[x][0]
# return new_out
# parseFunction = parse_prototype
# while not allGoodFromGUI:
# print("Waiting for GUI Setup...")
# time.sleep(1.0)
# isSetup = True
# else:
# print("SAME AS BEFORE!")
# inform_dev() #just tell device that we are good
# serialLock.acquire()
# try:
# serialPort.flushInput()
# except:
# print ("initi string reading issue")
# serialLock.release()
# print("updating Parameters:")
# for x in s: #reload gui and device
# socketio.emit('setup slider',{0:x,1:str(system_parameters[x][0])}, broadcast=True)
# print("Writing %s to be %0.4f" %(system_parameters[x][1],system_parameters[x][0]))
# writeUpdates(system_parameters[x][1],system_parameters[x][0])
# time.sleep(0.1)
# writeUpdates(system_parameters[x][1],system_parameters[x][0])
# time.sleep(0.1)
# time.sleep(1)
# print(system_parameters)
# print ("Starting to read serial subthread")
# print ('Alternating state')
# print (alternate)
# print("expected length:")
# print (expected_length)
# print (serialConnected)
# while serialConnected:
# serialLock.acquire()
# b = serialPort.read(expected_length)
# if len(b) != expected_length:
# print("expected=%d, actual=%d\n",len(b),expected_length)
# new_data = None
# if len(b) > 0 and messageRead(b,expected_length):
# new_data = parseFunction(b)
# if new_data != None:
# try:
# socketio.emit('note',new_data,broadcast =True)
# except:
# print ("failed socket")
# if csv_yn:
# temp_time = [time.time()-csv_st] #time since recording started
# csvLock.acquire()
# newb_list = temp_time+new_data+[system_parameters[x][0] for x in s]
# csv_default.writerow(newb_list)
# csv_recent.writerow(newb_list)
# csvLock.release()
# serialLock.release()
# time.sleep(0.01)
# if alternate == 1:
# if time.time()-alt_data['timer'] > alt_data['period']:
# print ('Switch to :')
# alt_data['timer'] = time.time() #reset timer
# poi = alt_data['param'] #param of interest
# print(type(system_parameters[poi][0]))
# print(system_parameters[poi][0])
# system_parameters[poi][0] = system_parameters[poi][0]*-1.0
# alt_data['state'] = alt_data.get('state')*-1
# writeUpdates(system_parameters[poi][1],system_parameters[poi][0])
# try:
# socketio.emit('state toggle', system_parameters[poi][0], broadcast=True) #tell the GUI that the desired has changed
# except:
# print('failed toggle socket')
# print ("Stopping serial read. Returning to idle state")
# time.sleep(0.01)
# def strip_until_marker(input_string):
# #return only text after last non-ascii character has been found
# #should *always* work...closing byte of plot package is \xff which is non-ascii and
# #should get caught in this scheme...there are of course ways to break this but they
# #require breaking the communication contract we have setup.
# new_string = ''
# for x in range(len(input_string)):
# poss = input_string[x:x+1]
# try:
# if version3:
# if type(poss)==type("hi"):
# poss = str.encode(poss,'ascii') #fail here possibly
# char = poss.decode('ascii')
# new_string+=char
# except:
# new_string=""
# return new_string
# #runtime variables...
# def messageRead(buff,exp):
# first = struct.unpack('b',buff[0:1])[0]
# last = struct.unpack('b',buff[exp-1:exp])[0]
# if first == 0 and last == -1:
# return True
# else:
# return False
# # User has connected
# @socketio.on('connect')
# def test_connect():
# print ('hey someone connected!' )
# ports = serial_ports() #generate list of currently connected serial ports
# print (ports)
# newb=[]
# for p in ports:
# newb.append({"comName": p})
# print (json.dumps(newb))
# #emit('serial list display', {'data': ports}) #emit socket with serial ports in it
# emit('serial list display', newb) #emit socket with serial ports in it
# #emit('my response', {'data': 'Connected'})
# # User has disconnected
# @socketio.on('disconnect')
# def test_disconnect():
# global csv_yn
# global csvLock
# emit('serial disconnect request',broadcast=True)
# csv_yn = 0
# #if current is not None and archive is not None:
# csvLock.acquire()
# try:
# current.close()
# archive.close()
# except NameError:
# pass #if didn't exist yet, don't try...
# csvLock.release()
# print('Client disconnected. Hopefully that was for the best.')
# writeUpdates('~',0)#for non-autoreset devices must tell it to enter child state again
# # Something
# def writeUpdates(tag,val):
# global serialPort
# global serialLock
# string_to_write = tag+' %0.2f\n' %(float(val))
# print(string_to_write)
# if serialConnected:
# serialLock.acquire() #claim serial resource
# if version3:
# b = bytes(string_to_write,'UTF-8')
# print(b)
# serialPort.write(bytes(string_to_write,'UTF-8'))
# else:
# serialPort.write(string_to_write.encode('utf-8'))
# #serialPort.write(string_to_write)
# serialLock.release() #release serial resource back out into big scary world
# else:
# print ("Change in %s to value %s not written since no live serial comm exists yet" %(tag,val))
# # Specs
# @socketio.on('serial select')
# def action(port):
# global serialselection
# print ('serial port changed to %s' %(port))
# serialselection = port
# @socketio.on('baud select')
# def action(baud):
# global baudselection
# print ('baud changed to %s' %(baud))
# baudselection = baud
# @socketio.on('serial connect request')
# def connection(already_built):
# global serialConnected
# global serialPort
# global serialLock
# global alternate
# global isSetup
# already_built = eval(str(already_built))
# print("state of gui")
# print(already_built)
# isSetup = already_built['state'] #user this
# print(isSetup)
# alternate = 0
# print ('Trying to connect to: ' + serialselection + ' ' + str(baudselection))
# print (serialLock)
# print (serialConnected)
# try:
# serialLock.acquire()
# print ("Lock acquired")
# serialPort = serial.Serial(serialselection, int(baudselection),timeout=4)
# print ('SerialPort')
# print ('Connected to ' + str(serialselection) + ' at ' + str(baudselection) + ' BAUD.')
# emit('serial connected', broadcast=True) #tells page to indicate connection (in button)
# serialPort.flushInput()
# serialLock.release()
# serialConnected = True #set global flag
# except:
# print ("Failed to connect with "+str(serialselection) + ' at ' + str(baudselection) + ' BAUD.')
# @socketio.on('serial disconnect request')
# def discon():
# global serialConnected
# global serialLock
# global serialPort
# print ('Trying to disconnect...')
# serialLock.acquire()
# serialPort.close()
# serialLock.release()
# serialConnected = False
# emit('serial disconnected',broadcast=True)
# print ('Disconnected...good riddance' )
# @socketio.on('disconnected')
# def ending_it():
# print ("We're done, we're through, we're over.")
# @socketio.on('change')
# def action(data):
# global system_parameters
# data = eval(str(data))
# system_parameters[data['id']][0]=float(data['val'])
# writeUpdates(system_parameters[data['id']][1],system_parameters[data['id']][0])
# @socketio.on('all set from gui')
# def action():
# global allGoodFromGUI
# allGoodFromGUI = True
# print("we are done from GUI Side")
# inform_dev()
# def inform_dev():
# global serialPort
# global serialLock
# string_to_write = "SET\n"
# if serialConnected:
# serialLock.acquire() #claim serial resource
# if version3:
# serialPort.write(bytes(string_to_write,'UTF-8'))
# else:
# print(string_to_write)
# serialPort.write(string_to_write)
# serialPort.flushInput()
# serialLock.release() #release serial resource back out into big scary world
# else:
# print ("can't inform device since it isn't connected...what does this even mean") # same joe...
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
########################
## ##
## SERVER STUFF ##
## ##
########################
#Start up Flask server:
app = Flask(__name__, template_folder = './',static_url_path='/static')
app.config['SECRET_KEY'] = 'secret!' #shhh don't tell anyone. Is a secret
socketio = SocketIO(app, async_mode = async_mode)
thread = None
global identifiers
def dataThread():
print("yes)")
# Startup has occured
@app.route('/')
def index():
global thread
global fft
global data
print ("A user connected")
if thread is None:
thread = Thread(target=dataThread)
thread.daemon = True
thread.start()
# NOTE: Leaving this oen
# fft = Thread(target=micThread)
# fft.daemon = True
# fft.start()
return render_template('pages/main.html')
# Return the configuration
@app.route('/config', methods=['GET', 'POST'])
def config():
if request.method == 'GET':
SITE_ROOT = os.path.realpath(os.path.dirname(__file__))
json_url = os.path.join(SITE_ROOT, "static/json/", "config.json")
checkJson(json_url)
config = json.load(open(json_url))
return jsonify(config)
elif request.method == 'POST':
print("can't really post anything yet, sorry...")
else:
print("Check your request method.")
# print(identifiers['ToneGenerator'])
# Check and update identifiers in the json. plus other things
def checkJson(json_url):
# Make global dictionary of ID's
global identifiers
identifiers = {}
# Open Json
with open(json_url, "r") as jsonFile:
config = json.load(jsonFile)
# Function to generate new unique identifier
def newUnique(n):
range_start = 10**(n-1)
range_end = (10**n)-1
return randint(range_start, range_end)
# List to store existing unique values
uniques = []
# Open up modules portion of config.json
modules = config[1]['modules']
for module in modules:
for instance in module:
for item in module[instance]:
# Check if module already has unique identifer
if 'unique' in item:
# Appends existing identifier to uniques
uniques.append(item['unique'])
else:
# Generates new unique identifier
unique = newUnique(3)
# Checks if identifier hasn't already been used
if unique not in uniques:
# Assings identifier for that module
item['unique'] = unique
identifiers[item['name']] = item['unique']
# Write modified json file
with open(json_url, "w") as jsonFile:
# Complicated dump so that everytime we modify the json it isn't minified
json.dump(config, jsonFile, sort_keys=True,indent=2,separators=(',',': '))
# Arbitrary for the time being, but this will lead to the generation page
@app.route('/generate')
def configGenerate():
global identifiers
return render_template('pages/index.html')
# Universal announcer
@socketio.on('reporting')
def announce(content):
# Capture variables
unique = content['unique']
div = content['div']
data = content['data']
# Send variables
socketio.emit("announce_{}".format(unique),data=(unique,div,data))
@app.route("/simple.png")
def simple():
import datetime
import io
import random
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from matplotlib.dates import DateFormatter
try:
while True:
fig=Figure()
ax=fig.add_subplot(111)
x=[]
y=[]
now=datetime.datetime.now()
delta=datetime.timedelta(days=1)
for i in range(10):
x.append(now)
now+=delta
y.append(random.randint(0, 1000))
ax.plot_date(x, y, '-')
ax.xaxis.set_major_formatter(DateFormatter('%Y-%m-%d'))
fig.autofmt_xdate()
canvas=FigureCanvas(fig)
png_output = io.BytesIO()
canvas.print_png(png_output)
response=make_response(png_output.getvalue())
response.headers['Content-Type'] = 'image/png'
return response
time.sleep(.001)
fig.cla()
except:
print('welp')
def micThread():
# First time setup
first_time = True
if ( first_time ):
wow = SpectrumAnalyzer()
first_time = False
# Etc
unique = 450
burst_duration = 1
counter = 0
toggle_count = 500
on_state = True
name = 'joe'
while True:
counter +=1
if counter%burst_duration == 0:
socketio.emit('update_{}'.format(unique),wow.fft())
if counter%toggle_count == 0:
counter = 0
if on_state:
print("OFF")
else:
print("ON")
on_state = not on_state
time.sleep(0.001)
class SpectrumAnalyzer:
# Start Pyaudio
p = pyaudio.PyAudio()
# Select Device
device = p.get_device_info_by_host_api_device_index(0, 0)
# Device Specs
CHUNK = 1024
CHANNELS = int(device['maxInputChannels'])
FORMAT = pyaudio.paFloat32
RATE = int(device['defaultSampleRate'])
START = 0
N = CHUNK
wave_x = 0
wave_y = 0
spec_x = 0
spec_y = 0
data = []
def __init__(self):
self.pa = pyaudio.PyAudio()
self.stream = self.pa.open(
format = self.FORMAT,
channels = self.CHANNELS,
rate = self.RATE,
input = True,
output = False,
input_device_index = 0,
frames_per_buffer = self.CHUNK)
def audioinput(self):
data = np.fromstring(self.stream.read(self.CHUNK),dtype=np.float32)
# ret = self.stream.read(self.CHUNK)
# ret = np.fromstring(ret, dtype=np.float32)
return data
def fft(self):
self.data = self.audioinput()
self.wave_x = range(self.START, self.START + self.N)
self.wave_y = self.data[self.START:self.START + self.N]
self.spec_x = np.fft.fftfreq(self.N, d = 1.0 / self.RATE)
y = np.fft.fft(self.data[self.START:self.START + self.N])
self.spec_y = [np.sqrt(c.real ** 2 + c.imag ** 2) for c in y]
return self.spec_y
if __name__ == '__main__' or __name__ == 'server':
socketio.run(app, port=3000, debug=True) | dggsax/vigilaveris | webpage/main.py | main.py | py | 26,956 | python | en | code | 0 | github-code | 13 |
41310510364 | from google.oauth2 import service_account
from googleapiclient.errors import HttpError
from googleapiclient.http import MediaFileUpload
from googleapiclient.discovery import build
import sys
def main():
# Replace 'your-service-account-key.json' with the path to your Service Account key file
credentials = service_account.Credentials.from_service_account_file('your-service-account-key.json')
# Scopes are defined based on API documentation
scoped_credentials = credentials.with_scopes(['https://www.googleapis.com/auth/drive.file'])
# Initialize the Drive API client
service = build('drive', 'v3', credentials=scoped_credentials)
# Use the provided file path in the command line argument
if len(sys.argv) > 1:
file_path = sys.argv[1]
else:
print("Please provide a file path in the command line argument.")
return
# Upload the file
try:
print("Uploading file: %s" % file_path)
file_metadata = {'name': 'yourfilename.zip'}
media = MediaFileUpload(file_path, resumable=True)
# Perform the request to create a file and upload its contents
file = service.files().create(body=file_metadata, media_body=media, fields='id').execute()
print("File ID: %s" % file.get('id'))
except HttpError as error:
print(f"An error occurred: {error}")
return
if __name__ == '__main__':
main()
| wraith4081/gdrive-upload | index.py | index.py | py | 1,418 | python | en | code | 3 | github-code | 13 |
70652616979 | #!/usr/bin/env python
#Author - Teja Koganti (D3B)
import argparse
import pandas as pd
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--histologies', required = True,
help = 'path to the histology file')
parser.add_argument('-o', '--outnotebook', required = True,
help = "output notebook")
args = parser.parse_args()
pbta_histologies = pd.read_csv(args.histologies, sep="\t")
outnotebook = open(args.outnotebook, "w")
# Based on primary site, supra/infra category assigned to each sample
# These two primary sites could not be categorized and will be assigned "None" under disease group
# "Other locations NOS" and "Ventricles"
def group_disease(primary_site):
infra = ["posterior fossa",
"optic",
"spinal",
"tectum",
"spine"]
supra = ["frontal lobe",
"parietal lobe",
"occipital lobe",
"temporal lobe"]
primary = primary_site.lower() # this will prevent possible errors from case mismatches
for site in infra:
if site in primary:
return "infratentorial"
for site in supra:
if site in primary:
return "supratentorial"
# Note we only get to the below return if the primary site was not in either defined group.
return "undetermined"
# Filtering for ependymoma samples
EP = pbta_histologies[pbta_histologies["pathology_diagnosis"]=="Ependymoma"]
EP_rnaseq_samples = EP[EP["experimental_strategy"] == "RNA-Seq"][["Kids_First_Biospecimen_ID","Kids_First_Participant_ID", "sample_id","primary_site"]]
# Filtering for DNA samples
WGS_dnaseqsamples = EP[EP["experimental_strategy"] == "WGS"][["Kids_First_Biospecimen_ID", "Kids_First_Participant_ID", "sample_id","primary_site"]]
# Renaming the column name so they don't conflict in merge step
EP_rnaseq_samples = EP_rnaseq_samples.rename(columns={"Kids_First_Biospecimen_ID":"Kids_First_Biospecimen_ID_RNA"})
WGS_dnaseqsamples = WGS_dnaseqsamples.rename(columns={"Kids_First_Biospecimen_ID":"Kids_First_Biospecimen_ID_DNA"})
# sample_id is common between both datafarmes and also unique between RNA and DNA.
# Some DNA BSID's are missing for the corresponding RNA samples
EP_rnaseq_WGS = EP_rnaseq_samples.merge(WGS_dnaseqsamples,
on = ["sample_id", "Kids_First_Participant_ID","primary_site"],
how = "outer")
EP_rnaseq_WGS.fillna('NA', inplace=True)
# add disease group infered from primary_site
EP_rnaseq_WGS["disease_group"] = [group_disease(primary) for primary in EP_rnaseq_WGS["primary_site"]]
# Sort for consistency
EP_rnaseq_WGS = EP_rnaseq_WGS.sort_values(by = ["Kids_First_Participant_ID", "sample_id"])
# Write out
EP_rnaseq_WGS[[
"Kids_First_Participant_ID",
"sample_id",
"Kids_First_Biospecimen_ID_DNA",
"Kids_First_Biospecimen_ID_RNA",
"disease_group"
]].to_csv(outnotebook, sep="\t", index=False)
outnotebook.close()
| AlexsLemonade/OpenPBTA-analysis | analyses/molecular-subtyping-EPN/01-make_notebook_RNAandDNA.py | 01-make_notebook_RNAandDNA.py | py | 3,009 | python | en | code | 94 | github-code | 13 |
33611716816 | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import json
import pandas as pd
from companyMessage.items import CompanymessageItem
from companyMessage.items import DetailedInformation
from scrapy.exporters import CsvItemExporter
class BytesEncoder(json.JSONEncoder):
def default(self,obj):
if isinstance(obj,bytes):
return str(obj,encoding='utf-8')
return json.JSONEncoder.default(self,obj)
class CompanymessagePipeline(object):
def __init__(self):
# python
# self.f = open('../../files/pythonSystem/python_job_message.json','w')
self.f = open('../../files/baiyunqu/job_message.json','w')
# self.detailf = open('../../files/pythonSystem/python_job_company_message.json','w')
self.detailf = open('../../files/baiyunqu/job_company_message.json','w')
#csv文件的位置,无需事先创建
#打开(创建)文件
# self.file = open('../../files/pythonSystem/python_job.csv','wb')
self.file = open('../../files/baiyunqu/job.csv','wb')
# 对应detailf
# self.company_file = open('../../files/pythonSystem/python_company_message.csv','wb')
self.company_file = open('../../files/baiyunqu/company_message.csv','wb')
#csv写法
self.exporter = CsvItemExporter(self.file)
self.company_exporter = CsvItemExporter(self.company_file)
self.exporter.start_exporting()
self.company_exporter.start_exporting()
def process_item(self, item, spider):
if isinstance(item, CompanymessageItem):
item = dict(item)
content = json.dumps(item,cls=BytesEncoder,ensure_ascii = False)+',\n'
self.f.write(content)
self.exporter.export_item(item)
return item
elif isinstance(item, DetailedInformation):
item = dict(item)
content = json.dumps(item,cls=BytesEncoder,ensure_ascii = False)+',\n'
self.detailf.write(content)
self.company_exporter.export_item(item)
return item
def close_spider(self,spider):
self.f.close()
self.detailf.close()
self.exporter.finish_exporting()
self.company_exporter.finish_exporting()
self.file.close()
| DunShou/ScrapyItem | companyMessage/pipelines.py | pipelines.py | py | 2,571 | python | en | code | 0 | github-code | 13 |
32078177510 | #퍼셉트론 구현하기
def AND(x1 , x2):
w1, w2, theta = 0.5, 0.5, 0.7
tmp = x1*w1 + x2*w2
if tmp <= theta:
return 0
elif tmp > theta:
return 1
print(AND(0,0))
print(AND(1,0))
print(AND(0,1))
print(AND(1,1))
"""출력
0
0
0
1
"""
#임계값을 편향으로 나타내기
import numpy as np
def AND2(x1,x2):
x = np.array([x1,x2]) #입력
w = np.array([0.5,0.5])
b = -0.7
tmp = np.sum(x*w) + b
if tmp <=0:
return 0
elif tmp > 0:
return 1
print(AND2(0,0))
print(AND2(1,0))
print(AND2(0,1))
print(AND2(1,1))
"""출력
0
0
0
1
"""
def NAND(x1, x2):
x = np.array([x1,x2])
w = np.array([-0.5,-0.5])
b = 0.7
tmp = np.sum(x*w)+b
if tmp <= 0:
return 0
elif tmp >0:
return 1
def OR(x1,x2):
x = np.array([x1,x2])
w = np.array([0.2,0.2])
b = -0.1
tmp = np.sum(x*w)+b
if tmp <=0:
return 0
elif tmp >0:
return 1
print(NAND(0,0))
print(NAND(0,1))
print(NAND(1,0))
print(NAND(1,1))
"""출력
1
1
1
0
"""
print(OR(0,0))
print(OR(1,0))
print(OR(0,1))
print(OR(1,1))
"""출력
0
1
1
1
"""
#다층 퍼셉트론
def XOR(x1,x2):
s1 = NAND(x1,x2)
s2 = OR(x1,x2)
y = AND(s1,s2)
return y
print(XOR(0,0))
print(XOR(1,0))
print(XOR(0,1))
print(XOR(1,1))
"""출력
0
1
1
0
""" | wotjd0715/DeepLearning2 | 2.Perceptron/letstudy.py | letstudy.py | py | 1,330 | python | en | code | 0 | github-code | 13 |
17831477410 | from .base import BaseTestCase
from .fixtures import (create_customer_string,
create_book_string, borrow_books_string)
class MutationsTestcase(BaseTestCase):
def test_create_customer(self):
response = self.client.execute(
create_customer_string.format(username='kafuuma')
)
self.assertEqual(response['data']['createCustomer']['success'],
'customer account kafuuma was created successfully')
def test_create_duplicate_customer(self):
self.client.execute(
create_customer_string.format(username='kafuuma')
)
response = self.client.execute(
create_customer_string.format(username='kafuuma')
)
self.assertIn('errors', response['data']['createCustomer'])
def test_create_book(self):
response = self.client.execute(
create_book_string.format(
title='how to learn c++ in 24hrs',
total_number=25,
book_kind='regular'
)
)
self.assertEqual(
response['data']['createBook']['success'],
'book how to learn c++ in 24hrs was created successfully')
self.assertEqual(response['data']['createBook']
['book']['totalNumber'], 25)
def test_create_dupilcate_title_book(self):
self.client.execute(
create_book_string.format(
title='how to learn c++ in 24hrs',
total_number=1,
book_kind='novel'
)
)
response = self.client.execute(
create_book_string.format(
title='how to learn c++ in 24hrs',
total_number=25,
book_kind='novel'
)
)
self.assertIn('already exists',
response['data']['createBook']['errors'][0])
def test_borrow_books(self):
customer_id = self.customer.id
books_ids = list(self.create_books())
response = self.client.execute(
borrow_books_string.format(
customer_id=customer_id, books_ids=books_ids,
days=[4, 5]
)
)
self.assertEqual(
len(response['data']['lendBooks']['borrowedBooks']), 2)
self.assertEqual(response['data']['lendBooks']['price'], 11.5)
def test_borrow_books_exceed_and_finish_all(self):
customer_id = self.customer.id
books_ids = list(self.create_books())
for _ in range(6):
response = self.client.execute(
borrow_books_string.format(
customer_id=customer_id, books_ids=books_ids,
days=[7, 8]
)
)
self.assertEqual(response['data']['lendBooks']['price'], None)
self.assertIn('Books with test book2 are over',
response['data']['lendBooks']['errors'])
| kafuuma/Rent-books-app | booksapp/tests/test_mutations.py | test_mutations.py | py | 2,971 | python | en | code | 0 | github-code | 13 |
27389190173 | from flask import Flask, render_template, request, url_for
from datetime import datetime, date
azi = date.today()
app = Flask(__name__)
app.secret_key = "asecretkey"
@app.route('/', methods=['POST','GET'])
def home():
name = request.form.get('name')
if name != None:
return render_template('bday.html', name=name, azi=azi)
return render_template("home.html")
@app.route('/<name>', methods=['POST','GET'])
def bday(name):
return render_template('bday.html', name=name, azi=azi)
def start_ngrok():
#from flask_ngrok import run_with_ngrok#pip install flask-ngrok
from pyngrok import ngrok
url = ngrok.connect(5000)
print('ngrok url = ', url)
#start_ngrok()
if __name__ == '__main__':
app.run(debug=True)
| iancuioan/DdayFlask | app.py | app.py | py | 766 | python | en | code | 0 | github-code | 13 |
19609893092 | from typing import Dict, List
from src.infra.interfaces import SpaceFlightNewInterfaceRepository
from src.utils.errors import MissingParamError
from src.infra.config import DBConnectionHandler
class SpaceFlightNewRepository(SpaceFlightNewInterfaceRepository):
def insert(self, data: Dict = None) -> Dict:
if not data:
raise MissingParamError("data")
try:
with DBConnectionHandler() as connection:
collection = connection.get_collection("spaceFlight")
inserted_space_flight = collection.insert_one(data)
return {"inserted_id": str(inserted_space_flight.inserted_id)}
except:
raise Exception("server error")
def find(self, limit_page: int, skip_page: int) -> List[Dict]:
try:
data = None
with DBConnectionHandler() as connection:
collection = connection.get_collection("spaceFlight")
skips = limit_page * (skip_page - 1)
space_flights = [
i for i in collection.find().skip(skips).limit(limit_page)
]
data = [{**i, "_id": str(i["_id"])} for i in space_flights]
return data
except:
raise Exception("Server error")
def find_one(self, id: int = None) -> Dict:
try:
data = None
with DBConnectionHandler() as connection:
collection = connection.get_collection("spaceFlight")
space_flight = collection.find_one({"id": id}, {})
data = {**space_flight, "_id": str(space_flight["_id"])}
return data
except:
raise Exception("Server error")
def update(self, id: int = None, data: Dict = None) -> Dict:
try:
space_flight = None
with DBConnectionHandler() as connection:
collection = connection.get_collection("spaceFlight")
space_flight = collection.update_one({"id": id}, {"$set": data})
return {"modified_count": space_flight.modified_count}
except:
raise Exception("Server error")
def delete(self, id: int = None) -> Dict:
try:
space_flight = None
with DBConnectionHandler() as connection:
collection = connection.get_collection("spaceFlight")
space_flight = collection.delete_one({"id": id})
return {"deleted_count": space_flight.deleted_count}
except:
raise Exception("Server error")
| joaoo-vittor/back-end-challenge | src/infra/repo/space_flight_new_repository.py | space_flight_new_repository.py | py | 2,576 | python | en | code | 0 | github-code | 13 |
21651845006 | from tweepy import Stream
from tweepy import OAuthHandler
import time
from tweepy.streaming import StreamListener
ckey='eZSxJEtGtCY5SqcVh3cZUbf27'
csecretkey='Ub7vovbs2M8uCAKBuGslBy4Sb9ArHOXFaRYhtp12k5ZMQDOZOF'
atoken='449479332-1dha1NMfojFmuY1tBuNjrzHmZnJSRt8bhgejrV0p'
asecret='hpmw39VPT5m3XlFmuDW416u24melcqLVur0E8vZQfLefJ'
class listener(StreamListener):
def on_data(self, data):
try:
print(data)
saveFile= open('twitDB.csv', 'a')
saveFile.write(data)
saveFile.write('\n')
saveFile.close()
return True
except Exception as e:
print('filed connect',e)
time.sleep(5)
def on_error(self, status_code):
print (status_code)
auth = OAuthHandler(ckey, csecretkey)
auth.set_access_token(atoken, asecret)
twitterStreaming = Stream(auth, listener())
twitterStreaming.filter(track=["car"])
| sdquintana/StreamingAPI | Streaming.py | Streaming.py | py | 924 | python | en | code | 0 | github-code | 13 |
20074687346 | import json
import re
import requests
######################################################
########## TRAITEMENT DU FICHIER LIGNES TAO ##########
def traitement():
raw_data = []
res = requests.get('https://data.orleans-metropole.fr/api/records/1.0/search/?dataset=referentielbdauao_dep_iti_cyclables&facet=commune&facet=quartier&facet=type_iti&rows=-1')
if res:
raw_data = res.json()['records']
else:
with open('resources/velos/lignes_velo.json') as json_data:
raw_data = json.load(json_data)
clean_data = []
id_generator = 0
for rd in raw_data:
id_generator += 1
new_data = {
"ligne_id" : id_generator,
"ligne_name" : "Piste cyclable {}".format(id_generator),
"geometry" : rd["fields"]["geo_shape"],
"commune" : rd["fields"]["commune"],
"longueur" : rd["fields"]["longueur"],
"sens_cycl" : rd["fields"]["sens_cycl"],
"numero" : int(rd["fields"]["numero"]),
"rue" : rd["fields"].get("rue", "0")
}
clean_data.append(new_data)
# Pre-traitement coordinates geometry
for data in clean_data:
if(data["geometry"]["type"] == "MultiLineString"):
res = "("
for coord in data["geometry"]["coordinates"][0]:
res += ""+str(coord[0])+" "+str(coord[1])+", "
data["geometry"]["coordinates"] = res[:-2]+")"
elif(data["geometry"]["type"] == "LineString"):
res = "("
for coord in data["geometry"]["coordinates"]:
res += ""+str(coord[0])+" "+str(coord[1])+", "
data["geometry"]["coordinates"] = res[:-2]+")"
with open('target/lignes_velo.sql', 'w') as sql_data:
sql_data.write(
"""-- Table: public.lignes_velo
-- DROP TABLE public.lignes_velo;
CREATE TABLE public.lignes_velo
(
id integer NOT NULL,
geom geometry(MULTILINESTRING, 4326),
name character varying(50),
commune character varying(50),
longueur numeric,
sens character varying(25),
numero numeric,
rue character varying(80),
CONSTRAINT lignes_velo_pkey PRIMARY KEY (id)
);
""")
with open('target/lignes_velo.sql', "a") as sql_data:
for elem in clean_data:
sql_data.write("INSERT INTO public.lignes_velo (id, name, geom, commune, longueur, sens, numero, rue) VALUES ({}, \'{}\', ST_GeomFromText(\'MULTILINESTRING({})\', {}), \'{}\', {}, \'{}\', {}, \'{}\');\n"
.format(elem["ligne_id"], elem["ligne_name"].replace("'", "''"), elem["geometry"]["coordinates"], 4326, elem["commune"].replace("'", "''"), elem["longueur"], elem["sens_cycl"].replace("'", "''"), elem["numero"], elem["rue"].replace("'", "''")))
| robinanthony/celc | bdd/resources/scripts_bdd/lignes_velo.py | lignes_velo.py | py | 2,833 | python | en | code | 0 | github-code | 13 |
42054042398 | import sys
try:
import frosch
frosch.hook()
except ImportError:
pass
ini = lambda: int(sys.stdin.readline())
inl = lambda: [int(x) for x in sys.stdin.readline().split()]
ins = lambda: sys.stdin.readline().rstrip()
debug = lambda *a, **kw: print("\033[33m", *a, "\033[0m", **dict(file=sys.stderr, **kw))
def solve():
s = ins()
for i, c in enumerate(s):
if i % 2 == 0:
if not c.islower():
return False
else:
if c.islower():
return False
return True
print(["No", "Yes"][solve()])
| keijak/comp-pub | atcoder/abc192/B/main.py | main.py | py | 580 | python | en | code | 0 | github-code | 13 |
928031303 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from conans import ConanFile, AutoToolsBuildEnvironment, tools
import os
class GPGErrorConan(ConanFile):
name = "libgpg-error"
version = "1.24"
url = "http://github.com/DEGoodmanWilson/conan-libgpg-error"
description = "Libgpg-error is a small library that originally defined common error values for all GnuPG components."
license = "https://www.gnupg.org/documentation/manuals/gnupg/Copying.html#Copying"
exports_sources = ["CMakeLists.txt"]
settings = "os", "arch", "compiler", "build_type"
options = {"shared": [True, False]}
default_options = "shared=False"
def configure(self):
# Because this is pure C
del self.settings.compiler.libcxx
def source(self):
source_url = "https://www.gnupg.org/ftp/gcrypt/libgpg-error"
tools.get("{0}/libgpg-error-{1}.tar.bz2".format(source_url, self.version))
extracted_dir = self.name + "-" + self.version
os.rename(extracted_dir, "sources")
def build(self):
if self.settings.compiler == 'Visual Studio':
# self.build_vs()
self.output.fatal("No windows support yet. Sorry. Help a fellow out and contribute back?")
with tools.chdir("sources"):
env_build = AutoToolsBuildEnvironment(self)
env_build.fpic = True
config_args = []
for option_name in self.options.values.fields:
if(option_name == "shared"):
if(getattr(self.options, "shared")):
config_args.append("--enable-shared")
config_args.append("--disable-static")
else:
config_args.append("--enable-static")
config_args.append("--disable-shared")
else:
activated = getattr(self.options, option_name)
if activated:
self.output.info("Activated option! %s" % option_name)
config_args.append("--%s" % option_name)
# This is a terrible hack to make cross-compiling on Travis work
if (self.settings.arch=='x86' and self.settings.os=='Linux'):
env_build.configure(args=config_args, host="i686-linux-gnu") #because Conan insists on setting this to i686-linux-gnueabi, which smashes gpg-error hard
else:
env_build.configure(args=config_args)
env_build.make()
def package(self):
self.copy("*.h", "include", "sources/src", keep_path=True)
# self.copy(pattern="*.dll", dst="bin", src="bin", keep_path=False)
self.copy(pattern="*.lib", dst="lib", src="sources", keep_path=False)
self.copy(pattern="*.a", dst="lib", src="sources", keep_path=False)
self.copy(pattern="*.so*", dst="lib", src="sources", keep_path=False)
self.copy(pattern="*.dylib", dst="lib", src="sources", keep_path=False)
# binaries
self.copy("gen-posix-lock-obj", dst="bin", src="sources/src", keep_path=False)
self.copy("gpg-error", dst="bin", src="sources/src", keep_path=False)
self.copy("gpg-error-config", dst="bin", src="sources/src", keep_path=False)
self.copy("mkerrcodes", dst="bin", src="sources/src", keep_path=False)
self.copy("mkheader", dst="bin", src="sources/src", keep_path=False)
def package_info(self):
self.cpp_info.libs = tools.collect_libs(self)
| DEGoodmanWilson/conan-libgpg-error | conanfile.py | conanfile.py | py | 3,511 | python | en | code | 0 | github-code | 13 |
20934317193 | ###############################################
#
# Author: Aniruddha Gokhale
# Vanderbilt University
#
# Purpose: Skeleton/Starter code for the subscriber application
#
# Created: Spring 2023
#
###############################################
# This is left as an exercise to the student. Design the logic in a manner similar
# to the PublisherAppln. As in the publisher application, the subscriber application
# will maintain a handle to the underlying subscriber middleware object.
#
# The key steps for the subscriber application are
# (1) parse command line and configure application level parameters
# (2) obtain the subscriber middleware object and configure it.
# (3) As in the publisher, register ourselves with the discovery service
# (4) since we are a subscriber, we need to ask the discovery service to
# let us know of each publisher that publishes the topic of interest to us. Then
# our middleware object will connect its SUB socket to all these publishers
# for the Direct strategy else connect just to the broker.
# (5) Subscriber will always be in an event loop waiting for some matching
# publication to show up. We also compute the latency for dissemination and
# store all these time series data in some database for later analytics.
# import the needed packages
import os # for OS functions
import sys # for syspath and system exception
import time # for sleep
import argparse # for argument parsing
import configparser # for configuration parsing
import logging # for logging. Use it in place of print statements.
from topic_selector import TopicSelector
from CS6381_MW.SubscriberMW import SubscriberMW
from enum import Enum # for an enumeration we are using to describe what state we are in
from CS6381_MW import discovery_pb2
MAX_MESSAGES_RECEIVED = 1000
##################################
# SubscriberAppln class
##################################
class SubscriberAppln():
# these are the states through which our subscriber appln object goes thru.
# We maintain the state so we know where we are in the lifecycle and then
# take decisions accordingly
class State (Enum):
INITIALIZE = 0,
CONFIGURE = 1,
REGISTER = 2,
ISREADY = 3,
LOOKUP = 4,
LISTEN = 5,
COMPLETED = 6
def __init__(self, logger):
self.state = self.State.INITIALIZE
self.name = None
self.topiclist = None # different topics to which we subscribe
self.num_topics = None # number of topics to which we subscribe
self.lookup = None # lookup method to use
self.dissemination = None # direct or via broker
self.mw_obj = None # middleware object
self.logger = logger
self.meesages_received = 0
########################################
# configure/initialize
########################################
def configure(self, args):
''' Initialize the object '''
try:
# Here we initialize any internal variables
self.logger.info("SubscriberAppln::configure")
# set our current state to CONFIGURE state
self.state = self.State.CONFIGURE
# initialize our variables
self.name = args.name # our name
self.num_topics = args.num_topics # total num of topics we listen to
# Now, get the configuration object
self.logger.debug(
"SubscriberAppln::configure - parsing config.ini")
config = configparser.ConfigParser()
config.read(args.config)
self.lookup = config["Discovery"]["Strategy"]
self.dissemination = config["Dissemination"]["Strategy"]
# Now get our topic list of interest
self.logger.debug(
"SubscriberAppln::configure - selecting our topic list")
ts = TopicSelector()
# let topic selector give us the desired num of topics
self.topiclist = ts.interest(self.num_topics)
# Now setup up our underlying middleware object to which we delegate
# everything
self.logger.debug(
"SubscriberAppln::configure - initialize the middleware object")
self.mw_obj = SubscriberMW(self.logger, self.topiclist)
# pass remainder of the args to the m/w object
self.mw_obj.configure(args)
self.logger.info(
"SubscriberAppln::configure - configuration complete")
except Exception as e:
raise e
def dump(self):
try:
self.logger.info("**********************************")
self.logger.info("SubscriberAppln::dump")
self.logger.info("------------------------------")
self.logger.info(" Name: {}".format(self.name))
self.logger.info(" Lookup: {}".format(self.lookup))
self.logger.info(
" Dissemination: {}".format(self.dissemination))
self.logger.info(" Num Topics: {}".format(self.num_topics))
self.logger.info(" TopicList: {}".format(self.topiclist))
self.logger.info("**********************************")
except Exception as e:
raise e
########################################
# handle isready response method called as part of upcall
#
# Also a part of upcall handled by application logic
########################################
def isready_response(self, isready_resp):
''' handle isready response '''
try:
self.logger.info("SubscriberAppln::isready_response")
# Notice how we get that loop effect with the sleep (10)
# by an interaction between the event loop and these
# upcall methods.
if not isready_resp.status:
# discovery service is not ready yet
self.logger.debug(
"SubscriberAppln::driver - Not ready yet; check again")
# sleep between calls so that we don't make excessive calls
time.sleep(10)
else:
# we got the go ahead
# set the state to LOOKUP
self.state = self.State.LOOKUP
# return timeout of 0 so event loop calls us back in the invoke_operation
# method, where we take action based on what state we are in.
return 0
except Exception as e:
raise e
def lookup_response(self):
try:
self.logger.info("SubscriberAppln::islookup_response")
# set the state to listen
self.state = self.State.LISTEN
# return timeout of 0 so event loop calls us back in the invoke_operation
# method, where we take action based on what state we are in.
return None
except Exception as e:
raise e
def handle_data(self, strRcvd):
''' handle data '''
try:
self.logger.info("SubscriberAppln::handle_data")
self.logger.info(
"SubscriberAppln: Received a message {}".format(strRcvd))
self.meesages_received += 1
# return timeout of 0 so event loop calls us back in the invoke_operation
# method, where we take action based on what state we are in.
if self.meesages_received == MAX_MESSAGES_RECEIVED:
self.state = self.State.COMPLETED
return 0
return None
except Exception as e:
raise e
def invoke_operation(self):
try:
self.logger.info("SubscriberAppln::invoke_operation")
# check what state are we in. If we are in REGISTER state,
# we send register request to discovery service. If we are in
# ISREADY state, then we keep checking with the discovery
# service.
if (self.state == self.State.REGISTER):
# send a register msg to discovery service
self.logger.debug(
"SubscriberAppln::invoke_operation - register with the discovery service")
self.mw_obj.register(self.name, self.topiclist)
# Remember that we were invoked by the event loop as part of the upcall.
# So we are going to return back to it for its next iteration. Because
# we have just now sent a register request, the very next thing we expect is
# to receive a response from remote entity. So we need to set the timeout
# for the next iteration of the event loop to a large num and so return a None.
return None
elif (self.state == self.State.ISREADY):
# Now keep checking with the discovery service if we are ready to go
#
# Note that in the previous version of the code, we had a loop. But now instead
# of an explicit loop we are going to go back and forth between the event loop
# and the upcall until we receive the go ahead from the discovery service.
self.logger.debug(
"SubscriberAppln::invoke_operation - check if are ready to go")
self.mw_obj.is_ready() # send the is_ready? request
# Remember that we were invoked by the event loop as part of the upcall.
# So we are going to return back to it for its next iteration. Because
# we have just now sent a isready request, the very next thing we expect is
# to receive a response from remote entity. So we need to set the timeout
# for the next iteration of the event loop to a large num and so return a None.
return None
elif (self.state == self.State.LOOKUP):
self.logger.debug(
"SubscriberAppln::invoke_operation - start looking up")
self.mw_obj.lookup()
return None
elif (self.state == self.State.COMPLETED):
# we are done. Time to break the event loop. So we created this special method on the
# middleware object to kill its event loop
self.mw_obj.disable_event_loop()
return None
else:
raise ValueError("Undefined state of the appln object")
except Exception as e:
raise e
def register_response(self, reg_resp):
''' handle register response '''
try:
self.logger.info("SubcriberAppln::register_response")
if (reg_resp.status == discovery_pb2.STATUS_SUCCESS):
self.logger.debug(
"PublisherAppln::register_response - registration is a success")
# set our next state to isready so that we can then send the isready message right away
self.state = self.State.ISREADY
# return a timeout of zero so that the event loop in its next iteration will immediately make
# an upcall to us
return 0
else:
self.logger.debug(
"PublisherAppln::register_response - registration is a failure with reason {}".format(response.reason))
raise ValueError("Publisher needs to have unique id")
except Exception as e:
raise e
########################################
# driver program
########################################
def driver(self):
''' Driver program '''
try:
self.logger.info("SubscriberAppln::driver")
# dump our contents (debugging purposes)
self.dump()
# First ask our middleware to keep a handle to us to make upcalls.
# This is related to upcalls. By passing a pointer to ourselves, the
# middleware will keep track of it and any time something must
# be handled by the application level, invoke an upcall.
self.logger.debug("SubscriberAppln::driver - upcall handle")
self.mw_obj.set_upcall_handle(self)
self.state = self.State.REGISTER
# Now simply let the underlying middleware object enter the event loop
# to handle events. However, a trick we play here is that we provide a timeout
# of zero so that control is immediately sent back to us where we can then
# register with the discovery service and then pass control back to the event loop
#
# As a rule, whenever we expect a reply from remote entity, we set timeout to
# None or some large value, but if we want to send a request ourselves right away,
# we set timeout is zero.
#
self.mw_obj.event_loop(timeout=0) # start the event loop
self.logger.info("PublisherAppln::driver completed")
except Exception as e:
raise e
def parseCmdLineArgs():
# instantiate a ArgumentParser object
parser = argparse.ArgumentParser(description="Subscriber Application")
# Now specify all the optional arguments we support
# At a minimum, you will need a way to specify the IP and port of the lookup
# service, the role we are playing, what dissemination approach are we
# using, what is our endpoint (i.e., port where we are going to bind at the
# ZMQ level)
parser.add_argument("-n", "--name", default="sub",
help="Some name assigned to us. Keep it unique per subscriber")
parser.add_argument("-a", "--addr", default="localhost",
help="IP addr of this subscriber to advertise (default: localhost)")
parser.add_argument("-p", "--port", type=int, default=5588,
help="Port number on which our underlying subscriber ZMQ service runs, default=5588")
parser.add_argument("-d", "--discovery", default="localhost:5555",
help="IP Addr:Port combo for the discovery service, default localhost:5555")
parser.add_argument("-T", "--num_topics", type=int, choices=range(1, 10), default=1,
help="Number of topics to listen to, currently restricted to max of 9")
parser.add_argument("-c", "--config", default="config.ini",
help="configuration file (de fault: config.ini)")
parser.add_argument("-l", "--loglevel", type=int, default=logging.DEBUG, choices=[
logging.DEBUG, logging.INFO, logging.WARNING, logging.ERROR, logging.CRITICAL], help="logging level, choices 10,20,30,40,50: default 20=logging.INFO")
return parser.parse_args()
###################################
#
# Main program
#
###################################
def main():
try:
# obtain a system wide logger and initialize it to debug level to begin with
logging.info(
"Main - acquire a child logger and then log messages in the child")
logger = logging.getLogger("SubscriberAppln")
# first parse the arguments
logger.debug("Main: parse command line arguments")
args = parseCmdLineArgs()
# reset the log level to as specified
logger.debug("Main: resetting log level to {}".format(args.loglevel))
logger.setLevel(args.loglevel)
logger.debug("Main: effective log level is {}".format(
logger.getEffectiveLevel()))
# Obtain a publisher application
logger.debug("Main: obtain the subscriber appln object")
sub_app = SubscriberAppln(logger)
# configure the object
logger.debug("Main: configure the subscriber appln object")
sub_app.configure(args)
# now invoke the driver program
logger.debug("Main: invoke the subscriber appln driver")
sub_app.driver()
except Exception as e:
logger.error("Exception caught in main - {}".format(e))
return
if __name__ == "__main__":
# set underlying default logging capabilities
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
main()
| saydus/distributed-hw1 | SubscriberAppln.py | SubscriberAppln.py | py | 16,304 | python | en | code | 0 | github-code | 13 |
30118227450 | from django import forms
from django.contrib.auth.models import User
from .models import Profile
from django.views.generic import FormView
from django.urls import reverse
from paypal.standard.forms import PayPalPaymentsForm
class ProfileUpdateForm(forms.ModelForm):
class Meta:
model=Profile
fields=['image', 'bio', "location"]
class PaypalFormView(FormView):
template_name = 'paypal_form.html'
form_class = PayPalPaymentsForm
def get_initial(self):
return {
"business": 'your-paypal-business-address@example.com',
"amount": 20,
"currency_code": "EUR",
"item_name": 'Example item',
"invoice": 1234,
"notify_url": self.request.build_absolute_uri(reverse('paypal-ipn')),
"return_url": self.request.build_absolute_uri(reverse('paypal-return')),
"cancel_return": self.request.build_absolute_uri(reverse('paypal-cancel')),
"lc": 'EN',
"no_shipping": '1',
}
| dustyj1984/handygig01 | accounts/forms.py | forms.py | py | 1,024 | python | en | code | 0 | github-code | 13 |
7017992250 |
class classBecauseINeedToTurnTheseIntoMethodsForSomeReason:
#Defibe the input class
def checkFloat(self, inu):
floatList = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "."]
#Define a list of string floats to compare the input to
t = ""
#Define blank string
r = 0
##Index number
for l in inu:
#Loop through input string
print(l)
#Print current string index
if l in floatList:
#If the current index is in the list of possible values within a float
t += l
#If it is, add it to the string
else:
#If not
r += 1
#Add 1 to r
#endif
#endfor
if r == 0:
#If r is 0(no non-float characters were found)
if "." not in t:
#if there isn't a decimal(number is perfectly even)
t += ".00"
#Add .00 to the string to turn it into a float
return t
#Return the updated string
else:
#If some non-float characters were found
return False
#Return false
#endif
#endmethod
def checkInt(self, inu):
#Get only integer input
intList = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
#Define a list of string integers to compare the input to
t = ""
#Define blank string
r = 0
#Index number
for l in inu:
#Loop through input string
print(l)
#Print current index
if l in intList:
#If the current index is in the list of possible values within an int
t += l
#If it is, add it to the string
else:
#If not
r += 1
#Add 1 to r
#endif
#endfor
if r == 0:
#If r is 0(no non-int characters were found)
return t
#Return the string
else:
#If some non-int characters were found
return False
#Return false
#endif
#endmethod
def getFloatOnly(self, strMsg):
#Function to ensure user types float
u = 0
#Define u as 0
while u < 1:
#While u is under 1
inu = input(strMsg)
#Get user input with message
t = self.checkFloat(inu)
#Run user input through checkfloat
if t != False:
#If t isn't false(a float was inputted)
u += 1
#Break loop
return float(t)
#Return float of t
else:
#If not
print("Please enter a float")
#Keep running loop, ask user to enter float
#endif
#endwhile
#endmethod
def getIntOnly(self, strMsg):
#Function to make sure user types int
u = 0
#Define u as 0
while u < 1:
#While u is less than 1
inu = input(strMsg)
#Get user input with message
t = self.checkInt(inu)
#Check if input is integer
if t != False:
#If t isn't false(an int was inputted)
u += 1
#Break loop
return int(t)
#Return integer of t
else:
#If not
print("Please enter only integers")
#Keep running loop with message
#endif
#endwhile
#endmethod
def createMenu(self, msg, *argv):
#Function to create user selection menu
numbList = []
#Define list of option numbers
print(" ")
print(msg)
print(" ")
#Print message to user
e = 0
#Define e as 0
for i in argv:
#Loop through option arguments
e += 1
#Add 1 to e
print(f"{e}. {i}")
#Print option number + option
numbList += [str(e)]
#Append e to list of accepted inputs
#endfor
t = 0
#Define t as 0
print(numbList)
#Print list of accepted options
while t < 1:
#While t is less than 1
r = input("Enter your selection: ")
#Get user input
if r in numbList:
#If user entered one of the accepted options
t += 1
#break loop
return r
#Return option number selected
else:
#If not
print("Invalid input. Please enter one of the selections above")
#Keep running loop + print message to user
#endif
#endwhile
#endmethod
#endclass
| landynS8990/posSys2 | inputControl.py | inputControl.py | py | 4,927 | python | en | code | 0 | github-code | 13 |
18254582323 | #coding:utf-8
# 2018-3-16
# build by qianqians
# genjs
import sys
sys.path.append("./")
sys.path.append("./parser")
import os
import jparser
from checkAndPretreatCommon import *
def gen(inputdir, lang, outputdir):
syspath = "./common/"
c_suffix = ""
if lang == 'csharp':
sys.path.append("./struct/csharp")
sys.path.append("./enum/csharp")
sys.path.append("./tools/csharp")
syspath += "csharp/"
c_suffix = "cs"
elif lang == 'ts':
sys.path.append("./struct/ts")
sys.path.append("./enum/ts")
sys.path.append("./tools/ts")
syspath += "ts/"
c_suffix = "ts"
sys.path.append(syspath)
import gen_common_impl
sys.path.remove(syspath)
if not os.path.isdir(outputdir):
os.mkdir(outputdir)
defmodulelist = []
lexical_tree = []
for filename in os.listdir(inputdir):
fname = os.path.splitext(filename)[0]
fex = os.path.splitext(filename)[1]
if fex != '.juggle':
continue
file = open(inputdir + '//' + filename, 'r')
genfilestr = file.readlines()
module, enum, struct = jparser.parser(genfilestr)
checkAndPretreatCommon(fname, module, enum, struct, defmodulelist)
lexical_tree.append((fname, enum, struct))
for fname, enum, struct in lexical_tree:
global_argv.quote_file_list = []
callercode = gen_common_impl.gen(fname, enum, struct)
file = open(outputdir + '//' + fname + '_common.' + c_suffix, 'w')
file.write(callercode)
file.close()
if __name__ == '__main__':
gen(sys.argv[1], sys.argv[2], sys.argv[3])
| qianqians/discard | abelkan_rpc_typescript_csharp/gencommon.py | gencommon.py | py | 1,915 | python | en | code | 1 | github-code | 13 |
73240313938 | import matplotlib.pyplot as plt
from matplotlib import animation
import numpy as np
from numba import jit
from timeit import default_timer as timer
import random
start = timer()
'''
FRACTAL
Functions and parameters to change the appearance and behavior of the fractals generated
'''
# PARAMETERS TO CHANGE THE FRACTAL GENERATED
seq = "AB" # sequence to alternate r values
a_lb = 2 # b1 lower bound
a_ub = 4 # b1 upper bound
b_lb = 2 # b2 lower bound
b_ub = 4 # b2 upper bound
# PARAMETERS REFINING ACCURACY OF FRACTAL PICTURE GENERATED
num_warmups = 1200 # number of "warmups" or throwaway iterations before computing lyapunov exponent
num_lyap_iterations = 120 # number of iterations used to compute the lyapunov exp
steps = 300 # steps between b1 and b2 values on axes -- higher it is, the better the picture
# LOGISTIC MAP THAT GIVES US THE NEXT X
@jit
def F(x, curr_r):
return (curr_r * x) * (1 - x)
# DERIVATIVE OF F -- USED TO COMPUTE THE LYAPUNOV EXPONENT
@jit
def Fprime(x, curr_r):
ans = curr_r * (1 - (2 * x))
ans[ans == 0] = 0.0001
ans[ans == -np.inf] = -1000
ans[ans == np.inf] = 1000
return ans
# CREATING RANDOM SEQUENCE WITH A LENGTH OF THE TOTAL NUMBER OF ITERATIONS
# EACH ITERATION THE PROBABILITY WILL BE JUDGED AGAINST THIS LIST
@jit
def getrandomseq():
problist = list()
for i in range(num_lyap_iterations + num_warmups):
problist.append(random.randint(0,99))
return problist
# RETURNS THE CORRECT B-VALUE BASED ON THE CURRENT ITERATION
@jit
def getseqval(curr_iteration, a, b, probability, problist):
randnum = problist[curr_iteration]
index = np.mod(curr_iteration, len(seq))
if (seq[index] == 'A'):
if (probability <= randnum):
return a
else:
return b
else:
if (probability <= randnum):
return b
else:
return a
# RETURNS THE LYAPUNOV EXPONENT BASED ON THE SPECIFIED B1 AND B2 VALUES
@jit
def getlyapexponent(time_scale, probability, problist):
a, b = time_scale
lyap_prob = probability
#print("b1", b1, "b2", b2, "prob", lyap_prob)
x = .5 # initial value of x
lyapsum = 0 # initializing lyapunov sum for use later
# do warmups, to discard the early values of the iteration to allow the orbit to settle down
for i in range(num_warmups):
x = F(x, getseqval(i, a, b, lyap_prob, problist))
for i in range(num_warmups, num_lyap_iterations + num_warmups):
lyapsum += np.log( np.abs(Fprime( x, getseqval(i, a, b, lyap_prob, problist) ) ) )
# get next x
x = F( x, getseqval(i, a, b, lyap_prob, problist) )
return (lyapsum / num_lyap_iterations)
'''
ANIMATING FRACTAL
Making each frame and creating animation to then save with ImageMagick
'''
plt.rcParams["animation.convert_path"] = r"C:\Program Files\ImageMagick-7.0.7-Q16\magick.exe"
plt.rcParams['animation.html'] = 'html5'
# INITIALIZING LIN-SPACE FOR FRACTAL GRID
a = np.linspace(a_lb, a_ub, steps) #range of b1 values
b = np.linspace(b_lb, b_ub, steps) #range of b2 values
aa, bb = np.meshgrid(a, b)
# INITIALIZING GRAPHING ELEMENTS FOR ANIMATION
fig, ax = plt.subplots()
# ADJUSTING GRAPH
lyap_cmap = plt.get_cmap('nipy_spectral') # creating our own colormap to use "set_over" with
lyap_cmap.set_over('black') # any value over vmax is colored black
lyap_cmap.set_under('#5e1d77') # any value under vmin is colored dark purple
plt.title("Probabilistic Lyapunov fractal / SEQ: " + seq)
plt.xlabel("a")
plt.ylabel("b")
# HOW TO DRAW FRAME NUMBER i
def animate(i):
print("frame #:", i)
probability = i
lyap_exponents = []
for i in range(10):
problist = getrandomseq()
lyap_exponents.append(getlyapexponent( (bb, aa), probability, problist ))
grid = np.average(lyap_exponents, axis = 0)
return grid
# CREATING FRACTAL IMAGES
ims = []
for i in range(101):
grid = animate(i)
subtitle = ax.text(0.5, 0.95, "Probability values will switch: {}%".format(i), bbox={'facecolor':'w', 'alpha':0.8, 'pad':5}, ha="center", transform=ax.transAxes,)
im = ax.imshow(grid, cmap = lyap_cmap, vmin = -2, vmax = 0, origin = 'lower', extent = [a_lb, a_ub, b_lb, b_ub])
ims.append([im, subtitle])
anim = animation.ArtistAnimation(fig, ims, interval = 200, blit = True)
anim.save(r"Lyap_Prob.gif", writer="imagemagick")
plt.show()
end = timer()
print("elapsed time: " + str(end - start))
| shaunramsey/FractalExploration | Fractals/Markus-Lyapunov Fractals/ANIMATED_lyapunov_fractal_probabilistic_logistic_map.py | ANIMATED_lyapunov_fractal_probabilistic_logistic_map.py | py | 4,684 | python | en | code | 5 | github-code | 13 |
31200963128 | from st2common.constants.pack import SYSTEM_PACK_NAME
from st2common.models.system.common import ResourceReference
__all__ = [
'WEBHOOKS_PARAMETERS_SCHEMA',
'WEBHOOKS_PAYLOAD_SCHEMA',
'INTERVAL_PARAMETERS_SCHEMA',
'DATE_PARAMETERS_SCHEMA',
'CRON_PARAMETERS_SCHEMA',
'TIMER_PAYLOAD_SCHEMA',
'TIMER_TRIGGER_TYPES',
'INTERNAL_TRIGGER_TYPES',
'SYSTEM_TRIGGER_TYPES'
]
# Internal system triggers which are available for each resource
INTERNAL_TRIGGER_TYPES = {
'action': [
{
'name': 'st2.generic.actiontrigger',
'pack': SYSTEM_PACK_NAME,
'description': 'Trigger encapsulating the completion of an action execution.',
'payload_schema': {
'type': 'object',
'properties': {
'execution_id': {},
'status': {},
'start_timestamp': {},
'action_name': {},
'parameters': {},
'result': {}
}
}
}
],
'sensor': [
{
'name': 'st2.sensor.process_spawn',
'pack': SYSTEM_PACK_NAME,
'description': 'Trigger encapsulating spawning of a sensor process.',
'payload_schema': {
'type': 'object',
'properties': {
'id': {},
'timestamp': {},
'pid': {},
'cmd': {}
}
}
},
{
'name': 'st2.sensor.process_exit',
'pack': SYSTEM_PACK_NAME,
'description': 'Trigger encapsulating exit of a sensor process.',
'payload_schema': {
'type': 'object',
'properties': {
'id': {},
'timestamp': {},
'exit_code': {}
}
}
}
]
}
WEBHOOKS_PARAMETERS_SCHEMA = {
'type': 'object',
'properties': {
'url': {
'type': 'string'
}
},
'required': [
'url'
],
'additionalProperties': False
}
WEBHOOKS_PAYLOAD_SCHEMA = {
'type': 'object'
}
WEBHOOK_TRIGGER_TYPES = {
ResourceReference.to_string_reference(SYSTEM_PACK_NAME, 'st2.webhook'): {
'name': 'st2.webhook',
'pack': SYSTEM_PACK_NAME,
'description': ('Trigger type for registering webhooks that can consume'
' arbitrary payload.'),
'parameters_schema': WEBHOOKS_PARAMETERS_SCHEMA,
'payload_schema': WEBHOOKS_PAYLOAD_SCHEMA
}
}
# Timer specs
INTERVAL_PARAMETERS_SCHEMA = {
"type": "object",
"properties": {
"timezone": {
"type": "string"
},
"unit": {
"enum": ["weeks", "days", "hours", "minutes", "seconds"]
},
"delta": {
"type": "integer"
}
},
"required": [
"unit",
"delta"
],
"additionalProperties": False
}
DATE_PARAMETERS_SCHEMA = {
"type": "object",
"properties": {
"timezone": {
"type": "string"
},
"date": {
"type": "string",
"format": "date-time"
}
},
"required": [
"date"
],
"additionalProperties": False
}
CRON_PARAMETERS_SCHEMA = {
"type": "object",
"properties": {
"timezone": {
"type": "string"
},
"year": {
"type": "integer"
},
"month": {
"type": "integer",
"minimum": 1,
"maximum": 12
},
"day": {
"type": "integer",
"minimum": 1,
"maximum": 31
},
"week": {
"type": "integer",
"minimum": 1,
"maximum": 53
},
"day_of_week": {
"type": "integer",
"minimum": 0,
"maximum": 6
},
"hour": {
"type": "integer",
"minimum": 0,
"maximum": 23
},
"minute": {
"type": "integer",
"minimum": 0,
"maximum": 59
},
"second": {
"type": "integer",
"minimum": 0,
"maximum": 59
}
},
"additionalProperties": False
}
TIMER_PAYLOAD_SCHEMA = {
"type": "object",
"properties": {
"executed_at": {
"type": "string",
"format": "date-time",
"default": "2014-07-30 05:04:24.578325"
},
"schedule": {
"type": "object",
"default": {
"delta": 30,
"units": "seconds"
}
}
}
}
TIMER_TRIGGER_TYPES = {
ResourceReference.to_string_reference(SYSTEM_PACK_NAME, 'st2.IntervalTimer'): {
'name': 'st2.IntervalTimer',
'pack': SYSTEM_PACK_NAME,
'description': 'Triggers on specified intervals. e.g. every 30s, 1week etc.',
'payload_schema': TIMER_PAYLOAD_SCHEMA,
'parameters_schema': INTERVAL_PARAMETERS_SCHEMA
},
ResourceReference.to_string_reference(SYSTEM_PACK_NAME, 'st2.DateTimer'): {
'name': 'st2.DateTimer',
'pack': SYSTEM_PACK_NAME,
'description': 'Triggers exactly once when the current time matches the specified time. '
'e.g. timezone:UTC date:2014-12-31 23:59:59.',
'payload_schema': TIMER_PAYLOAD_SCHEMA,
'parameters_schema': DATE_PARAMETERS_SCHEMA
},
ResourceReference.to_string_reference(SYSTEM_PACK_NAME, 'st2.CronTimer'): {
'name': 'st2.CronTimer',
'pack': SYSTEM_PACK_NAME,
'description': 'Triggers whenever current time matches the specified time constaints like '
'a UNIX cron scheduler.',
'payload_schema': TIMER_PAYLOAD_SCHEMA,
'parameters_schema': CRON_PARAMETERS_SCHEMA
}
}
SYSTEM_TRIGGER_TYPES = dict(WEBHOOK_TRIGGER_TYPES.items() + TIMER_TRIGGER_TYPES.items())
| gtmanfred/st2 | st2common/st2common/constants/triggers.py | triggers.py | py | 6,069 | python | en | code | null | github-code | 13 |
27165078205 | from django.urls import path, include
# from app.api.employee import views
from app.api.accountant import views
urlpatterns = [
path('create/', views.Accountant_createAPIView.as_view(), name='api-accountant-create'),
path('update/<int:id>', views.Accountant_updateAPIView.as_view(), name='api-accountant-update'),
path('delete/<int:id>', views.Accountant_deleteAPIView.as_view(), name='api-accountant-delete'),
path('list/', views.Accountant_listAPIView.as_view(), name='api-accountant-list'),
#
] | shadowwa1k3r/osg_employee | app/api/accountant/urls.py | urls.py | py | 520 | python | en | code | 0 | github-code | 13 |
5072810206 | import glob
import pickle
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import KFold
from sklearn.multiclass import OneVsRestClassifier, OneVsOneClassifier
from sklearn.preprocessing import MinMaxScaler
from sklearn.svm import SVC, LinearSVC
from sklearn.tree import DecisionTreeClassifier
rng = 42
np.set_printoptions(threshold=np.inf)
def make_confusion_matrix(cf, feat_type, key, exp_name,
group_names=None,
categories='auto',
count=True,
percent=True,
cbar=True,
xyticks=True,
xyplotlabels=True,
sum_stats=True,
figsize=None,
cmap='Blues',
title=None):
'''
This function will make a pretty plot of an sklearn Confusion Matrix cm using a Seaborn heatmap visualization.
Arguments
---------
cf: confusion matrix to be passed in
group_names: List of strings that represent the labels row by row to be shown in each square.
categories: List of strings containing the categories to be displayed on the x,y axis. Default is 'auto'
count: If True, show the raw number in the confusion matrix. Default is True.
normalize: If True, show the proportions for each category. Default is True.
cbar: If True, show the color bar. The cbar values are based off the values in the confusion matrix.
Default is True.
xyticks: If True, show x and y ticks. Default is True.
xyplotlabels: If True, show 'True Label' and 'Predicted Label' on the figure. Default is True.
sum_stats: If True, display summary statistics below the figure. Default is True.
figsize: Tuple representing the figure size. Default will be the matplotlib rcParams value.
cmap: Colormap of the values displayed from matplotlib.pyplot.cm. Default is 'Blues'
See http://matplotlib.org/examples/color/colormaps_reference.html
title: Title for the heatmap. Default is None.
'''
# CODE TO GENERATE TEXT INSIDE EACH SQUARE
blanks = ['' for i in range(cf.size)]
if group_names and len(group_names) == cf.size:
group_labels = ["{}\n".format(value) for value in group_names]
else:
group_labels = blanks
if count:
group_counts = ["{0:0.0f}\n".format(value) for value in cf.flatten()]
else:
group_counts = blanks
if percent:
row_sum = np.sum(cf, axis=1)
cf_row_sum = np.array([[value] * len(row_sum) for value in row_sum]).flatten()
# print(cf_row_sum)
group_percentages = ["{0:.2%}".format(value) for value in cf.flatten() / cf_row_sum]
else:
group_percentages = blanks
box_labels = [f"{v1}{v2}{v3}".strip() for v1, v2, v3 in zip(group_labels, group_counts, group_percentages)]
box_labels = np.asarray(box_labels).reshape(cf.shape[0], cf.shape[1])
# CODE TO GENERATE SUMMARY STATISTICS & TEXT FOR SUMMARY STATS
if sum_stats:
# Accuracy is sum of diagonal divided by total observations
accuracy = np.trace(cf) / float(np.sum(cf))
# if it is a binary confusion matrix, show some more stats
if len(cf) == 2:
# Metrics for Binary Confusion Matrices
precision = cf[1, 1] / sum(cf[:, 1])
recall = cf[1, 1] / sum(cf[1, :])
f1_score = 2 * precision * recall / (precision + recall)
stats_text = "\n\nAccuracy={:0.3f}\nPrecision={:0.3f}\nRecall={:0.3f}\nF1 Score={:0.3f}".format(
accuracy, precision, recall, f1_score)
else:
stats_text = "\n\nAccuracy={:0.3f}".format(accuracy)
else:
stats_text = ""
# SET FIGURE PARAMETERS ACCORDING TO OTHER ARGUMENTS
if figsize == None:
# Get default figure size if not set
figsize = plt.rcParams.get('figure.figsize')
if xyticks == False:
# Do not show categories if xyticks is False
categories = False
# MAKE THE HEATMAP VISUALIZATION
plt.figure(figsize=figsize)
sns.heatmap(cf / np.sum(cf, axis=1), annot=box_labels, fmt="", cmap=cmap, cbar=cbar, xticklabels=categories,
yticklabels=categories)
if xyplotlabels:
plt.ylabel('True label')
plt.xlabel('Predicted label' + stats_text)
else:
plt.xlabel(stats_text)
if title:
plt.title(title)
plt.savefig('../plots/' + feat_type + '_' + key + '_' + exp_name + '_pcap_concat.png', bbox_inches='tight', dpi=300)
def load_data(in_file):
with open(in_file, 'rb') as f:
data = pickle.load(f)
return data
#
def load_dataset(feat_type, device="*", vidInte=False, vidType='mp4', concat=True):
X = []
y = []
print(device)
if feat_type == 'keypoint':
for activity in act_label.keys():
dir = f'{feature_path}{activity}/keypoint/'
print(glob.glob(dir + '*.dat'))
for ft_files in glob.glob(dir + '*.dat'):
X_, y_ = load_data(ft_files)
X.append(X_)
y.append(y_)
# print(X)
# print(y)
return X, y
else:
# for activity in act_label.keys():
dir = feature_path + device + '/*/*/pcap-netml/' + feat_type + '/'
print(len(glob.glob(dir + '*.dat')))
for ft_files in glob.glob(dir + '*.dat'):
# scaler = MinMaxScaler()
X_, y_ = load_data(ft_files)
X_ = X_.reshape(1, 36)
# print(X_)
# scaler.fit(X_)
# X_ = scaler.transform(X_)
# print(X_)
y_ = y_[0]
if not vidInte:
X.append(X_)
else:
index = ft_files.split('/')[-1].split('_filtered.dat')[0].split('STATS_')[1]
if concat:
if vidType == "mp4":
vid_dir = f'{vid_feature_path_mp4}'
vid_X = np.load(vid_dir + str(index) + '_1_vgg.npy')
x_li = []
for x in X_:
x = np.concatenate((x, vid_X))
x_li.append(x)
elif vidType == "mkv":
vid_dir = f'{vid_feature_path_mkv}'
vid_X = np.load(vid_dir + str(index) + '_2_vgg.npy')
x_li = []
for x in X_:
x = np.concatenate((x, vid_X))
x_li.append(x)
elif vidType == "both":
vid_X_1 = np.load(vid_feature_path_mp4 + str(index) + '_1_vgg.npy')
vid_X_2 = np.load(vid_feature_path_mkv + str(index) + '_2_vgg.npy')
x_li = []
for x in X_:
x = np.concatenate((x, vid_X_1, vid_X_2))
x_li.append(x)
else:
if vidType == "mp4":
vid_dir = f'{vid_feature_path_mp4}'
vid_X = np.load(vid_dir + str(index) + '_1_vgg.npy')
x_li = []
for x in X_:
x = vid_X
x_li.append(x)
elif vidType == "mkv":
vid_dir = f'{vid_feature_path_mkv}'
vid_X = np.load(vid_dir + str(index) + '_2_vgg.npy')
x_li = []
for x in X_:
x = vid_X
x_li.append(x)
elif vidType == "both":
vid_X_1 = np.load(vid_feature_path_mp4 + str(index) + '_1_vgg.npy')
vid_X_2 = np.load(vid_feature_path_mkv + str(index) + '_2_vgg.npy')
x_li = []
for x in X_:
x = np.concatenate((vid_X_1, vid_X_2))
x_li.append(x)
X_ = np.array(x_li)
X.append(X_)
y.append(y_)
# print(X)
# print(y)
scaler = MinMaxScaler()
X = np.concatenate(X, axis=0)
scaler.fit(X)
X = scaler.transform(X)
# print(X)
return np.array(X), np.array(y)
act_label = {"no_interaction": 0,
"open_close_fridge": 1,
"put_back_item": 2,
"screen_interaction": 3,
"take_out_item": 4}
classifiers = {"OvRLogReg": OneVsRestClassifier(LogisticRegression(random_state=rng)),
"DecTree": DecisionTreeClassifier(random_state=rng),
"LogReg": LogisticRegression(random_state=rng),
"OvOSVC": OneVsOneClassifier(SVC(random_state=rng)),
# # OneVsOneClassifier(NuSVC(random_state=42)),
"OvOGP": OneVsOneClassifier(GaussianProcessClassifier(random_state=rng)),
"OvRLinearSVC": OneVsRestClassifier(LinearSVC(random_state=rng)),
"OvRGP": OneVsRestClassifier(GaussianProcessClassifier(random_state=rng)),
}
#
# # act_label = {"frg_no_interaction": 0,
# # "frg_open_close_fridge": 1,
# # "frg_put_back_item": 2,
# # "frg_screen_interaction": 3,
# # "frg_take_out_item": 4,
# # "alx_no_interaction": 5,
# # "alx_ask_weather": 6,
# # "alx_play_music": 7,
# # "nstc_no_interaction": 8,
# # "nstc_ask_time": 9,
# # "nstc_trigger_motion_detection": 10}
# feat_types = ['STATS']
# # feat_types = ['IAT','SIZE','IAT_SIZE','SAMP_NUM','SAMP_SIZE','STATS',
# # 'FFT-IAT','FFT-IAT_SIZE','FFT-SIZE','FFT-SAMP_NUM', 'FFT-SAMP_SIZE']
# from warnings import filterwarnings
#
# filterwarnings('ignore')
#
# for feat in feat_types:
# print(feat)
# # eval(feat, 'mp4+netml',"refrigerator", True, "mp4")
#
# eval(feat, 'mp4', "refrigerator", True, "mp4", False)
def eval(feat_type, exp, device="*", vid=False, vidType="mp4", concat=True):
print(exp)
# X, y = load_dataset(feat_type, device, vidInte=vid, vidType=vidType, concat=concat)
X, y = gen_video_data(in_dir)
df = pd.DataFrame(X)
# for k in range(0, len(X)):
# print(X[k][17], X[k][35], y[k], y[k])
# X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42, test_size=0.33)
# print(X, y)
kf = KFold(3, True, rng)
for key in classifiers:
print(key)
for train_index, test_index in kf.split(X, y):
# print("TRAIN:", train_index, "TEST:", test_index)
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
classifier = classifiers[key].fit(X_train, y_train)
# print(classifier.feature_importances_)
predicted_labels = classifier.predict(X_test)
cf_matrix = confusion_matrix(y_test, predicted_labels)
size = len(act_label.keys())
make_confusion_matrix(cf_matrix, feat_type, key, exp,
figsize=(size * 1.5, size * 1.5),
categories=act_label.keys(),
cmap='Blues')
# disp = plot_confusion_matrix(classifier, X_test, y_test,
# cmap=plt.cm.Blues)
plt.show()
return df
def main():
vid_feature_path_mp4 = "../../video-feature-clean/output_mp4/"
vid_feature_path_mkv = "../../video-feature-clean/output_mkv/"
#
# X = []
# f = []
# y = []
# act_label = {"frg_no_interaction": 0,
# "frg_open_close_fridge": 1,
# "frg_put_back_item": 2,
# "frg_screen_interaction": 3,
# "frg_take_out_item": 4}
# # act_label = {"frg_no_interaction": 0,
# # "frg_open_close_fridge": 1,
# # "frg_put_back_item": 2,
# # "frg_screen_interaction": 3,
# # "frg_take_out_item": 4,
# # "alx_no_interaction": 5,
# # "alx_ask_weather": 6,
# # "alx_play_music": 7,
# # "nstc_no_interaction": 8,
# # "nstc_ask_time": 9,
# # "nstc_trigger_motion_detection": 10}
#
# for file in glob.glob("../../video-feature-clean/output_mp4/*.npy"):
# X.append(np.load(file))
# f.append(file)
# y.append(label)
# print(file)
# print(np.load(file).shape)
#
# from sklearn.model_selection import KFold
feat = ''
eval(feat, 'mp4', "refrigerator", True, "mp4", False)
if __name__ == '__main__':
main()
| kun0906/activity_recognition | legacy/shinan.py | shinan.py | py | 13,386 | python | en | code | 2 | github-code | 13 |
31044921506 | #!/usr/bin/env python2
# coding: utf8
from __future__ import division, print_function
import itertools
import os
import time
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
from .utilities import unicode_dammit, render_dict, by_chunks_of
from .server import show_connection
from .streamer import MetadataInjector
import logging
logger = logging.getLogger(__name__)
ICY_METAINT = 10000
icy_info = [("StreamTitle='{}';".format(s), t) for s, t in [
(u"AD - 1 AD AD", 1),
(u"first - second", 4),
(u"3 AD AD AD", 1),
(u"third - fourth", 4),
(u"AD - 4 AD AD AD AD", 1),
(u"fifth - sixth", 4),
(u"AD - 5 AD AD AD AD AD", 1)
]]
class Handler(BaseHTTPRequestHandler):
def do_GET(self):
pretty_headers = unicode_dammit(render_dict(self.headers))
msg = show_connection(pretty_headers)
logger.info(msg)
print(msg)
self.send_response(200)
self.send_header('Content-type', 'audio/mpeg')
self.send_header('icy-metaint', ICY_METAINT)
self.end_headers()
output_buffer = MetadataInjector(self.wfile, ICY_METAINT)
icy = itertools.cycle(icy_info)
filename = os.path.join(os.path.dirname(__file__), 'sample.mp3')
with open(filename, 'r') as f:
data = itertools.cycle(
itertools.imap(b''.join, by_chunks_of(1024, f.read())))
while True:
start_time = time.time()
output_buffer.icy, tm = next(icy)
logger.info("New ICY: {!r}".format(output_buffer.icy))
while time.time() - start_time < tm:
output_buffer.write(next(data))
def serve_on_port(host, port):
server = HTTPServer((host, port), Handler)
try:
server.serve_forever()
finally:
server.server_close()
def main():
"""Run main."""
serve_on_port('localhost', 12345)
return 0
if __name__ == '__main__':
main()
| johntyree/rio | rio/mock_server.py | mock_server.py | py | 2,034 | python | en | code | 5 | github-code | 13 |
23870796408 |
from email import message
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework import permissions
from rest_framework.decorators import action
from rest_framework.filters import SearchFilter
from rest_framework.response import Response
from rest_framework.viewsets import ModelViewSet
from django.http import HttpResponse
from .models import *
from .pagination import TaskPaginator
from .permissions import IsMentor, IsStudent
from .serializers import *
from learn.signals import new_signal
from django.core.mail import EmailMessage
def test(request):
# new_signal.send_robust(__name__)
for email in ["huzzy@django.com", "kola@go.com"]:
message = EmailMessage(
subject="New Task Created",
body=f"Task title - ",
to= [email]
)
message.send()
print(email)
return HttpResponse(" Seems to be working just fine")
class TaskViewSet (ModelViewSet):
"""
Task can be created, updated, and deleted by Mentors.
Students will see only tasks on their registered courses.
"""
pagination_class = TaskPaginator
serializer_class = TaskSerializer
queryset = Task.objects.all() #prefetch_related('concepts', 'resources')
@action(detail=False)
def me(self, request, **kwargs):
if request.method == "GET":
# Get the user's registered courses
# The distinct may not be needed.
user_courses = UserCourse.objects.filter(
user=request.user).values('course').distinct()
# Filter the task according to the courses offered by the user
tasks = Task.objects.filter(
course__in=user_courses) # .annotate(hello = 1)
serializer = self.get_serializer(tasks, many=True)
return Response(serializer.data)
@action(detail=False)
def my_info(self, request):
if request.method == "GET":
# Get the user's registered courses
# The distinct may not be needed.
user_courses = UserCourse.objects.\
filter(user=request.user).values('course').distinct()
# Filter the task according to the courses offered by the user
tasks = Task.objects.filter(
course__in=user_courses) # .annotate(hello = 1)
# total attainable scores
total = sum([task.total_grade for task in tasks])
scores_of_user = []
for task in tasks:
# get answers of the current users
user_answers = task.answer.filter(
user=request.user).select_related('user', 'grade')
print(user_answers)
for answer in user_answers:
try:
# try to get scores of those answers
scores_of_user.append(answer.grade.score)
except:
# catch the exception that will be throwed if any of the answers have not been graded, and skip it then continue the loop
continue
total_scores = sum(scores_of_user)
return Response(
{
"total_attainable_score": total,
"total_scores_gotten": total_scores,
"number_of_courses": user_courses.count(),
# "number_of_task_attempted": user_answers.count()
}
)
def get_serializer_context(self):
# had to pass total as None to avoid Key erorr in the serializer
return {'user': self.request.user}
def get_permissions(self):
if not self.request.method in permissions.SAFE_METHODS:
return [IsMentor()]
return [permissions.IsAuthenticated()]
class AnswerTaskViewSet (ModelViewSet):
# http_method_names = ['post'] # Answers cannot be edited or updated or deleted
"""
Only Students can POST their answers to tasks, Mode of submission (URL or Text)
Filter by user and task, Also search by task(title and submission_date)
"""
filter_backends = [DjangoFilterBackend, SearchFilter]
filterset_fields = ['user', 'task']
http_method_names = ['post', 'get', 'options', 'head']
pagination_class = TaskPaginator
permission_classes = [IsStudent]
search_fields = ['task__title', 'task__submisson_date']
serializer_class = AnswerTaskSerializer
def get_queryset(self):
answer = AnswerTask.objects.all().select_related('user', 'task', 'grade')
if self.request.user.is_mentor:
return answer
return answer.filter(user=self.request.user)
def get_serializer_context(self):
context = {
'user': self.request.user,
'task_pk': self.kwargs.get('task_pk', ""),
}
return context
class GradeViewSet (ModelViewSet):
"""
Endpoints for Tasks to be graded.
"""
http_method_names = ['get', 'post','patch','head', 'options']
queryset = Grade.objects.all()
serializer_class = GradeSerializer
def get_serializer_context(self):
return {'answer_pk': self.kwargs['answer_pk'], 'request': self.request}
def get_permissions(self):
if self.request.method in permissions.SAFE_METHODS:
return [permissions.IsAuthenticated()]
return [IsMentor()]
class CourseEnrollViewSet(ModelViewSet):
http_method_names = ['post', 'delete', 'get']
serializer_class = CourseEnrollSerializer
queryset = UserCourse.objects.all().select_related('course', 'user')
def get_serializer_context(self):
return {'user': self.request.user}
class ConceptViewSet (ModelViewSet):
"""
The links to Concept pages to understand a task More.
"""
serializer_class = ConceptSerializer
queryset = Concept.objects.all()
class ResourceViewSet (ModelViewSet):
"""
The links to Resource Materials for Tasks.
"""
serializer_class = ResourceSerializer
queryset = Resource.objects.all() | Huzzy619/Learning-Management-System-API | learn/views.py | views.py | py | 6,045 | python | en | code | 0 | github-code | 13 |
41767966472 | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def f(self, root, parent, d):
if root:
if root.val == self.x:
self.d1 = d
self.parent1 = parent
if root.val == self.y:
self.d2 = d
self.parent2 = parent
self.f(root.left, root.val, d + 1)
self.f(root.right, root.val, d + 1)
def isCousins(self, root: Optional[TreeNode], x: int, y: int) -> bool:
self.x = x
self.y = y
self.f(root, -1, 0)
if self.d1 == self.d2 and self.parent1 != self.parent2:
return True
return False
| ritwik-deshpande/LeetCode | 993-cousins-in-binary-tree/993-cousins-in-binary-tree.py | 993-cousins-in-binary-tree.py | py | 960 | python | en | code | 0 | github-code | 13 |
11161186630 | import random
import time
from pyinsect.collector.NGramGraphCollector import NGramGraphCollector
if __name__ == "__main__":
def getRandomText(iSize):
# lCands = list("abcdefghijklmnopqrstuvwxyz" + "abcdefghijklmnopqrstuvwxyz".upper() + "1234567890!@#$%^&*()")
lCands = list("abcdef")
sRes = "".join([random.choice(lCands) for i in range(1, iSize)])
return sRes
# Start test
print("Initializing texts...")
lTexts = list()
for iCnt in range(0, 50):
# Select text size
iSize = random.randint(1000, 2000)
sText = getRandomText(iSize)
# Add to list
lTexts.append(sText)
print("Initializing texts... Done.")
print("Starting shallow...")
cNoDeep = NGramGraphCollector()
start = time.time()
lastStep = start
# No deep
iCnt = 0
for sText in lTexts:
cNoDeep.add(sText)
iCnt += 1
if time.time() - lastStep > 1:
print("..." + str(iCnt))
lastStep = time.time()
end = time.time()
print((end - start))
print("End shallow.")
# print "Starting deep..."
# cDeep = NGramGraphCollector()
# start = time.time()
# # Deep
# for sText in lTexts:
# cDeep.add(sText, True)
# if (time.time() - lastStep > 1):
# print "."
# lastStep = time.time()
# end = time.time()
# print(end - start)
# print "End deep."
| ggianna/PyINSECT | examples/example_n_gram_graph_collector.py | example_n_gram_graph_collector.py | py | 1,443 | python | en | code | 3 | github-code | 13 |
72727926419 | '''
ODEs representing the HIV model.
'''
import warnings
import numpy
from scipy import integrate
import pandas
from . import control_rates
variables = (
'susceptible', # S
'vaccinated', # Q
'acute', # A
'undiagnosed', # U
'diagnosed', # D
'treated', # T
'viral_suppression', # V
'AIDS', # W
'dead', # Z
'new_infections', # R
)
alive = ('susceptible', 'vaccinated', 'acute', 'undiagnosed',
'diagnosed', 'treated', 'viral_suppression', 'AIDS')
infected = ('acute', 'undiagnosed', 'diagnosed', 'treated',
'viral_suppression', 'AIDS')
def get_variable(state, name):
i = variables.index(name)
return state[..., i]
def get_alive(state):
return sum(get_variable(state, k) for k in alive)
def get_infected(state):
return sum(get_variable(state, k) for k in infected)
# Variables to log transform: S, U, D, T, V, W
vars_log = [0, 3, 4, 5, 6, 7]
# Variables to not log transform: Q, A, Z, R
vars_nonlog = [1, 2, 8, 9]
def transform(state, _log0 = -20):
'''
Log transform some of the state variables.
'''
state_trans = numpy.empty(numpy.shape(state))
state_trans[..., vars_log] = numpy.ma.log(state[..., vars_log]).filled(
_log0)
state_trans[..., vars_nonlog] = state[..., vars_nonlog]
return state_trans
def transform_inv(state_trans):
'''
Inverse log transform some of the state variables.
'''
state = numpy.empty(numpy.shape(state_trans))
state[..., vars_log] = numpy.exp(state_trans[..., vars_log])
state[..., vars_nonlog] = state_trans[..., vars_nonlog]
return state
def split_state(state):
if isinstance(state, (pandas.Series, pandas.DataFrame)):
state = state.values
return map(numpy.squeeze, numpy.split(state, state.shape[-1], -1))
def rhs(t, state, target, parameters, vaccine_efficacy):
# Force the state variables to be non-negative.
# The last two state variables, dead from AIDS and new infections,
# are cumulative numbers that are set to 0 at t = 0: these
# can be negative if time goes backwards.
state[ : -2] = state[ : - 2].clip(0, numpy.inf)
S, Q, A, U, D, T, V, W, Z, R = state
# Total sexually active population.
N = S + Q + A + U + D + T + V
control_rates_ = control_rates.get(t, state, target, parameters)
force_of_infection = (
parameters.transmission_rate_acute * A
+ parameters.transmission_rate_unsuppressed * (U + D + T)
+ parameters.transmission_rate_suppressed * V) / N
dS = (parameters.birth_rate * N
- control_rates_.vaccination * S
- force_of_infection * S
- parameters.death_rate * S)
dQ = (control_rates_.vaccination * S
- (1 - vaccine_efficacy) * force_of_infection * Q
- parameters.death_rate * Q)
dA = (force_of_infection * S
+ (1 - vaccine_efficacy) * force_of_infection * Q
- parameters.progression_rate_acute * A
- parameters.death_rate * A)
dU = (parameters.progression_rate_acute * A
- control_rates_.diagnosis * U
- parameters.death_rate * U
- parameters.progression_rate_unsuppressed * U)
dD = (control_rates_.diagnosis * U
+ control_rates_.nonadherence * (T + V)
- control_rates_.treatment * D
- parameters.death_rate * D
- parameters.progression_rate_unsuppressed * D)
dT = (control_rates_.treatment * D
- control_rates_.nonadherence * T
- parameters.suppression_rate * T
- parameters.death_rate * T
- parameters.progression_rate_unsuppressed * T)
dV = (parameters.suppression_rate * T
- control_rates_.nonadherence * V
- parameters.death_rate * V
- parameters.progression_rate_suppressed * V)
dW = (parameters.progression_rate_unsuppressed * (U + D + T)
+ parameters.progression_rate_suppressed * V
- parameters.death_rate_AIDS * W)
dZ = parameters.death_rate_AIDS * W
dR = (force_of_infection * S
+ (1 - vaccine_efficacy) * force_of_infection * Q)
return [dS, dQ, dA, dU, dD, dT, dV, dW, dZ, dR]
def rhs_log(t, state_trans, target, parameters, vaccine_efficacy):
state = transform_inv(state_trans)
S, Q, A, U, D, T, V, W, Z, R = state
(S_log, U_log, D_log, T_log, V_log, W_log) = state_trans[vars_log]
# Total sexually active population.
N = S + Q + A + U + D + T + V
N_log = numpy.log(N)
control_rates_ = control_rates.get(t, state, target, parameters)
force_of_infection = (
parameters.transmission_rate_acute * A / N
+ (parameters.transmission_rate_unsuppressed
* (numpy.exp(U_log - N_log)
+ numpy.exp(D_log - N_log)
+ numpy.exp(T_log - N_log)))
+ parameters.transmission_rate_suppressed * numpy.exp(V_log - N_log))
dS_log = (parameters.birth_rate * numpy.exp(N_log - S_log)
- control_rates_.vaccination
- force_of_infection
- parameters.death_rate)
dQ = (control_rates_.vaccination * numpy.exp(S_log)
- (1 - vaccine_efficacy) * force_of_infection * Q
- parameters.death_rate * Q)
dA = (force_of_infection * numpy.exp(S_log)
+ (1 - vaccine_efficacy) * force_of_infection * Q
- parameters.progression_rate_acute * A
- parameters.death_rate * A)
dU_log = (parameters.progression_rate_acute * A * numpy.exp(- U_log)
- control_rates_.diagnosis
- parameters.death_rate
- parameters.progression_rate_unsuppressed)
dD_log = (control_rates_.diagnosis * numpy.exp(U_log - D_log)
+ control_rates_.nonadherence * (numpy.exp(T_log - D_log)
+ numpy.exp(V_log - D_log))
- control_rates_.treatment
- parameters.death_rate
- parameters.progression_rate_unsuppressed)
dT_log = (control_rates_.treatment * numpy.exp(D_log - T_log)
- control_rates_.nonadherence
- parameters.suppression_rate
- parameters.death_rate
- parameters.progression_rate_unsuppressed)
dV_log = (parameters.suppression_rate * numpy.exp(T_log - V_log)
- control_rates_.nonadherence
- parameters.death_rate
- parameters.progression_rate_suppressed)
dW_log = (
parameters.progression_rate_unsuppressed * (numpy.exp(U_log - W_log)
+ numpy.exp(D_log - W_log)
+ numpy.exp(T_log - W_log))
+ parameters.progression_rate_suppressed * numpy.exp(V_log - W_log)
- parameters.death_rate_AIDS)
dZ = parameters.death_rate_AIDS * numpy.exp(W_log)
dR = (force_of_infection * numpy.exp(S_log)
+ (1 - vaccine_efficacy) * force_of_infection * Q)
dstate = [dS_log, dQ, dA, dU_log, dD_log, dT_log, dV_log, dW_log, dZ, dR]
return dstate
def _solve_odeint(t, Y0, fcn, args = ()):
def fcn_swap_Yt(Y, t, *args):
return fcn(t, Y, *args)
return integrate.odeint(fcn_swap_Yt, Y0, t,
args = args,
mxstep = 2000,
mxhnil = 1)
def _solve_ode(t, Y0, fcn, args = (), integrator = 'lsoda'):
solver = integrate.ode(fcn)
if integrator == 'lsoda':
kwds = dict(max_hnil = 1)
else:
kwds = {}
solver.set_integrator(integrator,
nsteps = 2000,
**kwds)
solver.set_f_params(*args)
solver.set_initial_value(Y0, t[0])
Y = numpy.empty((len(t), len(Y0)))
Y[0] = Y0
for i in range(1, len(t)):
Y[i] = solver.integrate(t[i])
if not use_log:
# Force to be non-negative.
Y[i, : -2] = Y[i, : -2].clip(0, numpy.inf)
solver.set_initial_value(Y[i], t[i])
assert solver.successful()
return Y
def solve(t, target, parameters,
integrator = 'odeint', use_log = True):
'''
`integrator` is a
:class:`scipy.integrate.ode` integrator---``'lsoda'``,
``'vode'``, ``'dopri5'``, ``'dop853'``---or
``'odeint'`` to use :func:`scipy.integrate.odeint`.
'''
assert numpy.isfinite(parameters.R0)
assert not numpy.all(parameters.initial_conditions == 0)
Y0 = parameters.initial_conditions.copy().values
if use_log:
Y0 = transform(Y0)
fcn = rhs_log
else:
fcn = rhs
# Scale time to start at 0 to avoid some solver warnings.
t_scaled = t - t[0]
def fcn_scaled(t_scaled, *args):
return fcn(t_scaled + t[0], *args)
try:
vaccine_efficacy = target.vaccine_efficacy
except AttributeError:
vaccine_efficacy = 0
args = (target, parameters, vaccine_efficacy)
if integrator == 'odeint':
Y = _solve_odeint(t_scaled, Y0, fcn_scaled, args)
else:
Y = _solve_ode(t_scaled, Y0, fcn_scaled, args,
integrator = integrator)
if numpy.any(numpy.isnan(Y)):
msg = ("country = '{}': NaN in solution!").format(parameters.country)
if use_log:
msg += " Re-running with use_log = False."
warnings.warn(msg)
return solve(t, target, parameters,
integrator = integrator,
use_log = False)
else:
raise ValueError(msg)
elif use_log:
return transform_inv(Y)
else:
return Y
| janmedlock/HIV-95-vaccine | model/ODEs.py | ODEs.py | py | 9,735 | python | en | code | 1 | github-code | 13 |
27454731834 | import numpy as np
class Renderer:
def __init__(self, height, width, config):
self.height = height
self.width = width
self.content = dict()
self.m = None
self.f = None
self.resize(height, width)
self.config = config
self.btoggle = 0
self.active_frame = 0
self.offset = 0
self.pos = np.array(self.config.coordinates[:self.config.atm_counts[0]])
self.org_center = 1.0 / self.pos.shape[0] * np.sum(self.pos, axis=0)
self.zoom = None
self.rot = None
self.rotcounter = None
self.rot_cache = None
self.reset_view()
self.ztoggle = False
self.auto_rotate_flags = np.array([False, False, False])
self.buffer_scene()
def buffer_scene(self):
"""
A super simple rasterizer. For now, just draw single character atom symbols at their rounded x and y
positions.
:return: True if nothing bad happened.
"""
mx, my = self.m
rot = self.rot_cache
self.clear()
# Draw bonds
for bond in self.config.bonds[self.active_frame]:
i, j = bond
# if bond is (i, j) with i == j, just draw the label (no bonds)
if i == j:
x, y, z = rot[i]
xp, yp = round(float(x) * self.f * self.zoom + mx), round(float(y) * self.zoom + my)
self.buffer(yp, xp, self.config.symbols[i + self.offset], self.config.colors[i + self.offset], float(z))
# else draw the bond with the labels at the end points
else:
xa, ya, za = rot[i]
xa = float(xa) * self.f * self.zoom + mx
ya = float(ya) * self.zoom + my
xb, yb, zb = rot[j]
xb = float(xb) * self.f * self.zoom + mx
yb = float(yb) * self.zoom + my
xap, yap = int(round(xa)), int(round(ya))
xbp, ybp = int(round(xb)), int(round(yb))
# If desired, draw bonds by starting at xap+1 and going to xbp-1, drawing line segments
if self.btoggle < 2:
sy = -1 if ya > yb else 1
sx = -1 if xa > xb else 1
sz = -1 if za > zb else 1
dx = float((xb - xa) / (yb - ya)) if abs(yb - ya) > 0 else 0
dy = float((yb - ya) / (xb - xa)) if abs(xb - xa) > 0 else 0
if abs(dy) <= 1:
dz = float((zb - za) / (xb - xa)) if abs(xb - xa) > 0 else 0
for k in range(1, abs(xap - xbp)):
xk = xap + sx * k
yk = round(float(ya) + sx * k * dy)
zk = round((float(za) + sz * k * dz))
col = self.config.colors[i + self.offset] if k < abs(xap - xbp) / 2 \
else self.config.colors[j + self.offset]
if 1 < xk < self.width - 2 and 1 < yk < self.height - 3:
self.buffer(yk, xk, self.bond_ab(rot, i, j), col, float(zk))
else:
dz = float((zb - za) / (yb - ya)) if abs(yb - ya) > 0 else 0
for k in range(1, abs(yap - ybp)):
xk = round((float(xa) + sy * k * dx))
yk = yap + sy * k
zk = round((float(za) + sz * k * dz))
col = self.config.colors[i + self.offset] if k < abs(yap - ybp) / 2 \
else self.config.colors[j + self.offset]
if 1 < xk < self.width - 2 and 1 < yk < self.height - 3:
self.buffer(yk, xk, self.bond_ab(rot, i, j), col, float(zk))
# Draw the two labels at the end points
self.buffer(yap, xap, self.config.symbols[i + self.offset], self.config.colors[i + self.offset], float(za))
self.buffer(ybp, xbp, self.config.symbols[j + self.offset], self.config.colors[j + self.offset], float(zb))
return True
def buffer(self, y, x, chars, color, zdepth):
for i, char in enumerate(chars):
zbuf = self.content[y, x + i][2] if (y, x + i) in self.content else float("-inf")
if 1 < x + i < self.width - 2 and 1 < y < self.height - 3 and float(zdepth) > zbuf:
self.content[y, x + i] = (char, color, zdepth)
def toggle_auto_rotate(self, x=False, y=False, z=False):
self.auto_rotate_flags[[x, y, z]] = not all(self.auto_rotate_flags[[x, y, z]])
def get_auto_rotate(self):
return any(self.auto_rotate_flags)
def auto_rotate(self):
if self.auto_rotate_flags[0]:
self.rotate(x=0.32)
if self.auto_rotate_flags[1]:
self.rotate(y=0.32)
if self.auto_rotate_flags[2]:
self.rotate(z=0.32)
def rotate(self, x=0.0, y=0.0, z=0.0):
"""
Set an internal rotation matrix that is applied to the coordinates before every render.
"""
if abs(x) > 0:
increment = np.pi / 36 * x
sine = np.sin(increment)
cosine = np.cos(increment)
self.rot = np.matmul(self.rot, [[1.0, 0.0, 0.0], [0.0, cosine, -sine], [0.0, sine, cosine]])
if self.rotcounter[0] + 5 * x > 360:
self.rotcounter[0] -= 360
elif self.rotcounter[0] + 5 * x < 0:
self.rotcounter[0] += 360
self.rotcounter[0] += 5 * x
if abs(y) > 0:
increment = np.pi / 36 * y
sine = np.sin(increment)
cosine = np.cos(increment)
self.rot = np.matmul(self.rot, [[cosine, 0.0, sine], [0.0, 1.0, 0.0], [-sine, 0.0, cosine]])
if self.rotcounter[1] + 5 * y > 360:
self.rotcounter[1] -= 360
elif self.rotcounter[1] + 5 * y < 0:
self.rotcounter[1] += 360
self.rotcounter[1] += 5 * y
if abs(z) > 0:
increment = np.pi / 36 * z
sine = np.sin(increment)
cosine = np.cos(increment)
self.rot = np.matmul(self.rot, [[cosine, -sine, 0.0], [sine, cosine, 0.0], [0.0, 0.0, 1.0]])
if self.rotcounter[2] + 5 * z > 360:
self.rotcounter[2] -= 360
elif self.rotcounter[2] + 5 * z < 0:
self.rotcounter[2] += 360
self.rotcounter[2] += 5 * z
self.update_rot_cache()
return abs(x) > 0 or abs(y) > 0 or abs(z) > 0
def navigate(self, dx=0, dy=0):
"""
Navigate the view so that the new mid point sits at (x + dx, y + dy).
"""
x, y = self.m
self.m = (x + dx, y + dy)
return True
def modify_zoom(self, factor):
if self.zoom + factor > 0.2:
self.zoom += factor
return True
return False
def reset_view(self):
"""
Reset the view to the starting values.
"""
self.rotcounter = [0, 0, 0]
self.rot = np.identity(3)
self.m = round((self.width - 2) / 2), round((self.height - 2) / 2)
self.refresh_coordinates()
self.rot_cache = self.pos
dx = np.max(self.pos[:, 0]) - np.min(self.pos[:, 0])
dy = np.max(self.pos[:, 1]) - np.min(self.pos[:, 1])
fx = 0.9 * self.m[0] / (self.f * dx)
fy = 0.9 * self.m[1] / dy
self.zoom = fx if fx > fy else fy
return True
def refresh_coordinates(self):
self.pos = np.array(
self.config.coordinates[self.offset:self.offset + self.config.atm_counts[self.active_frame]])
self.pos -= self.org_center
def resize(self, height, width):
"""
Resize the screen. Known issue: crashes if the resize is faster than the framerate.
"""
self.height = height
self.width = width
self.m = round((self.width - 2) / 2), round((self.height - 2) / 2)
# Since terminal characters are higher than wide, I correct for this by multiplying the x by f
# so that it appears wider. 2.25 is what looks good on my terminals, but might be
# nice to have a general way of determining the optimal value
self.f = 2.25
def clear(self):
"""
Clear the canvas and redraw the border.
"""
self.content.clear()
for i in range(self.height - 2):
for j in range(self.width):
if i == 0 and j == 0:
self.content[i, j] = ("┌", 0, -1)
elif (i == 0 or i == self.height - 3) and 0 < j < self.width - 1:
self.content[i, j] = ("─", 0, -1)
elif i == 0 and j == self.width - 1:
self.content[i, j] = ("┐", 0, -1)
elif i < self.height - 3 and (j == 0 or j == self.width - 1):
self.content[i, j] = ("│", 0, -1)
elif i == self.height - 3 and j == 0:
self.content[i, j] = ("└", 0, -1)
elif i == self.height - 3 and j == self.width - 1:
self.content[i, j] = ("┘", 0, -1)
def principle_axes(self):
"""
Transform to principle axes of rotation
"""
x, y, z = np.transpose(self.pos)
xx = np.sum(y * y + z * z)
yy = np.sum(x * x + z * z)
zz = np.sum(x * x + y * y)
xy = -np.sum(x * y)
yz = -np.sum(y * z)
xz = -np.sum(x * z)
i = np.array([[xx, xy, xz], [xy, yy, yz], [xz, yz, zz]])
w, t = np.linalg.eig(i)
# Einstein Summation: for the 3x3 matrix t, multiply row j with each of the k rows in pos and sum
# this is essentially just a basis transformation to the principle axes coordinate system.
self.pos = np.einsum('ij,kj->ki', np.linalg.inv(t), self.pos)
self.update_rot_cache()
return True
def bond_ab(self, coordinates, i, j):
if self.btoggle == 0:
v = coordinates[j] - coordinates[i]
angle = np.arccos(v / np.linalg.norm(v))
# If angle towards z direction is small, use dot
if angle[2] < np.pi * 0.33:
return "·"
elif angle[2] > np.pi * 0.66:
return "-"
# Angle towards x is small or large if horizontal
if angle[0] < np.pi * 0.25 or angle[0] > np.pi * 0.75:
return "─"
# Angle towards y is small or large if vertical
elif angle[1] < np.pi * 0.25 or angle[1] > np.pi * 0.75:
return "│"
# For in between, check y direction
elif np.pi * 0.25 < angle[0] < np.pi * 0.5:
return "/" if v[1] < 0 else "\\"
elif np.pi * 0.5 < angle[0] < np.pi * 0.75:
return "/" if v[1] > 0 else "\\"
elif self.btoggle == 1:
return "·"
return " "
def change_frame(self, step=1):
if self.active_frame + step > len(self.config.atm_counts) - 1:
self.active_frame = self.active_frame + step - len(self.config.atm_counts)
elif self.active_frame + step < 0:
self.active_frame = self.active_frame + step + len(self.config.atm_counts)
else:
self.active_frame += step
self.offset = sum(self.config.atm_counts[:self.active_frame])
self.refresh_coordinates()
self.update_rot_cache()
def update_rot_cache(self):
self.rot_cache = np.matmul(self.pos, self.rot)
| dewberryants/asciiMol | asciimol/app/renderer.py | renderer.py | py | 11,649 | python | en | code | 344 | github-code | 13 |
41491176044 | import os
SECRET_KEY = os.urandom(32)# Grabs the folder where the script runs.
basedir = os.path.abspath(os.path.dirname(__file__))# Enable debug mode.
DEBUG = True# Connect to the database
SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://sql11496228:fQvMfR2mG8@sql11.freesqldatabase.com/sql11496228'# Turn off the Flask-SQLAlchemy event system and warning
# app.config['SQLALCHEMY_DATABASE_URI'] = "mysql+pymysql://root:root@localhost/myDbName"
SQLALCHEMY_TRACK_MODIFICATIONS = False
| faroukdon/cynax-store | config.py | config.py | py | 486 | python | en | code | 0 | github-code | 13 |
70674377299 | # Importing necessary modules and libraries for the TextGeneratorServicer class.
import grpc
from transformers import AutoTokenizer, AutoModelWithLMHead
import concurrent.futures as futures
# Importing the textgen_pb2 and textgen_pb2_grpc modules generated from protobuf files.
import textgen_pb2
import textgen_pb2_grpc
class TextGeneratorServicer(textgen_pb2_grpc.TextGeneratorServicer):
def __init__(self):
self.tokenizer = AutoTokenizer.from_pretrained("distilgpt2") # Initializing tokenizer for DistilGPT2 model.
self.model = AutoModelWithLMHead.from_pretrained("distilgpt2") # Initializing pre-trained DistilGPT2 model.
def GenerateText(self, request, context):
input_ids = self.tokenizer.encode(request.prompt, return_tensors="pt") # Tokenizing prompt into input_ids.
output = self.model.generate( # Generating text using initialized DistilGPT2 model.
input_ids,
max_length=request.length,
pad_token_id=self.tokenizer.eos_token_id,
num_beams=5,
no_repeat_ngram_size=2,
early_stopping=True,
)
generated_text = self.tokenizer.decode(output[0], skip_special_tokens=True) # Decoding generated output to readable text.
return textgen_pb2.TextResponse(text=generated_text) # Returning generated text from server as TextResponse.
def serve():
# Creating a gRPC server with ThreadPoolExecutor.
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
# Adding the TextGeneratorServicer instance to the gRPC server.
textgen_pb2_grpc.add_TextGeneratorServicer_to_server(TextGeneratorServicer(), server)
# Binding the server to listen on port 50051.
server.add_insecure_port("[::]:50051")
# Starting the server and waiting for it to be terminated.
print("Starting server. Listening on port 50051.")
server.start()
server.wait_for_termination()
if __name__ == "__main__":
serve() # Calling the serve function to start the server.
| Codehackerone/storyforge | python-server/server.py | server.py | py | 2,023 | python | en | code | 1 | github-code | 13 |
38035037138 | __doc__ = '''a python script to workaround various limitations of rootmap
files and reflex/cint typename impedance mismatches.
'''
__version__ = '$Revision: 1.1 $'
__author__ = 'Sebastien Binet <binet@cern.ch>'
if __name__ == "__main__":
import sys
import os
import PyUtils.Dso as Dso
oname = 'typereg_dso_db.csv'
if len(sys.argv) > 1:
oname = sys.argv[1]
else:
from PyCmt.Cmt import CmtWrapper
project_root = CmtWrapper().projects()[0]
from PyUtils.path import path
oname = path(project_root) / "InstallArea" / "share" / oname
if not os.path.exists(oname.dirname()):
os.makedirs(oname.dirname())
pass
rflx_names = Dso.gen_typeregistry_dso(oname)
sys.exit(0)
| rushioda/PIXELVALID_athena | athena/Tools/PyUtils/bin/gen-typereg-dso.py | gen-typereg-dso.py | py | 765 | python | en | code | 1 | github-code | 13 |
24805170602 | import argparse
from randomPredictor import RandomPredictor
from medianPredictor import MedianPredictor
from basePredictor import BasePredictor
from modePredictor import ModePredictor
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Local Value Predictor')
parser.add_argument('--history', type=int, help="history length", choices=range(2, 21), default=7)
arguments = parser.parse_args()
base_predictor = BasePredictor()
mode_predictor = ModePredictor(arguments.history)
median_predictor = MedianPredictor(arguments.history)
random_predictor = RandomPredictor(arguments.history)
with open("pinatrace.txt") as fp:
total_loads = 0
base_correct_count, mode_correct_count, median_correct_count, random_correct_count = 0, 0, 0, 0
base_was_correct, mode_was_correct, median_was_correct, random_was_correct = True, True, True, True
line = fp.readline()
while line:
total_loads += 1
line_split = line.split()
pc_address, load_address, actual_load_value = line_split[0], line_split[2], line_split[3]
if base_predictor.checkIfExists(pc_address, load_address):
base_prediction = base_predictor.getPrediction(pc_address, load_address)
mode_prediction, mode_confidence = mode_predictor.getPrediction(pc_address, load_address)
median_prediction, median_confidence = median_predictor.getPrediction(pc_address, load_address)
random_prediction, random_confidence = random_predictor.getPrediction(pc_address, load_address)
if base_prediction == actual_load_value:
base_correct_count += 1
base_was_correct = True
else:
base_was_correct = False
if (mode_prediction == actual_load_value and mode_confidence >= 0.5) or (mode_prediction != actual_load_value and mode_confidence < 0.5):
mode_correct_count += 1
mode_was_correct = True
else:
mode_was_correct = False
if (median_prediction == actual_load_value and median_confidence >= 0.5) or (median_prediction != actual_load_value and median_confidence < 0.5):
median_correct_count += 1
median_was_correct = True
else:
median_was_correct = False
if (random_prediction == actual_load_value and random_confidence >= 0.5) or (random_prediction != actual_load_value and random_confidence < 0.5):
random_correct_count += 1
random_was_correct = True
else:
random_was_correct = False
base_predictor.addToHistory(pc_address, load_address, actual_load_value, base_was_correct)
mode_predictor.addToHistory(pc_address, load_address, actual_load_value, mode_was_correct)
median_predictor.addToHistory(pc_address, load_address, actual_load_value, median_was_correct)
random_predictor.addToHistory(pc_address, load_address, actual_load_value, random_was_correct)
else:
base_predictor.addToHistory(pc_address, load_address, actual_load_value, True)
mode_predictor.addToHistory(pc_address, load_address, actual_load_value, True)
median_predictor.addToHistory(pc_address, load_address, actual_load_value, True)
random_predictor.addToHistory(pc_address, load_address, actual_load_value, True)
line = fp.readline()
base_accuracy = ((base_correct_count)/total_loads) * 100
mode_accuracy = ((mode_correct_count)/total_loads) * 100
median_accuracy = ((median_correct_count)/total_loads) * 100
random_accuracy = ((random_correct_count)/total_loads) * 100
print()
print('Base Accuracy : ', round(base_accuracy, 4), '%')
print('Mode Accuracy : ', round(mode_accuracy, 4), '%')
print('Median Accuracy : ', round(median_accuracy, 4), '%')
print('Random Accuracy : ', round(random_accuracy, 4), '%')
print() | pushkarsharma/load-value-predictor | comparePredictors.py | comparePredictors.py | py | 4,237 | python | en | code | 0 | github-code | 13 |
26836924912 | import operator
train = [('토마스', 5), ('헨리', 8),('에드워드', 9),('에밀리', 5),('토마스', 4),('헨리', 7),('토마스', 3),('에밀리', 8),('퍼시', 5),('고든', 13)]
t_dic, t_list = {},[]
tmpTup = None
tot_rank, cur_rank = 1, 1
if __name__ == '__main__' :
print('-----2021041047 허정윤-----')
for tmpTup in train :
tName = tmpTup[0]
tWeight = tmpTup[1]
if tName in t_dic:
t_dic[tName] += tWeight
else :
t_dic[tName] = tWeight
train = sorted(t_dic.items(), key = operator.itemgetter(1), reverse = True)
print('기차 수송량 목록 ==>', train)
print("---------------------")
print('기차\t 총수송량\t순위')
print('---------------------\n')
for i in range(len(train)):
if train[i][1] == train[i-1][1]: pass
else : cur_rank = tot_rank
tot_rank+=1
print(train[i][0],'\t', train[i][1],'\t',cur_rank)
| inte168/OpenProject1 | 4weak/hw2.py | hw2.py | py | 972 | python | en | code | 0 | github-code | 13 |
29651883673 | from __future__ import absolute_import, print_function, unicode_literals
__metaclass__ = type
__all__ = [
'RemoveLiloPlugin',
]
import os
import logging
from janitor.plugincore.i18n import setup_gettext
from janitor.plugincore.core.package_cruft import PackageCruft
from janitor.plugincore.plugin import Plugin
_ = setup_gettext()
class RemoveLiloPlugin(Plugin):
"""Plugin to remove lilo if grub is also installed."""
def __init__(self):
self.condition = ['jauntyPostDistUpgradeCache']
def get_description(self):
return _('Remove lilo since grub is also installed.'
'(See bug #314004 for details.)')
def get_cruft(self):
if 'lilo' in self.app.apt_cache and 'grub' in self.app.apt_cache:
lilo = self.app.apt_cache['lilo']
grub = self.app.apt_cache['grub']
if lilo.is_installed and grub.is_installed:
if not os.path.exists('/etc/lilo.conf'):
yield PackageCruft(lilo, self.description)
else:
logging.warning('lilo and grub installed, but '
'lilo.conf exists')
| GalliumOS/update-manager | janitor/plugincore/plugins/remove_lilo_plugin.py | remove_lilo_plugin.py | py | 1,172 | python | en | code | 4 | github-code | 13 |
39878884381 | import pygame, random
TOWER_SPEED = 5
SKY_COLOR = (25, 2, 52)
score = 0
highscore = 0
started = False
gameOver = False
towers = []
opPlanes = []
deathMessage = None
plane = pygame.transform.flip(pygame.image.load('plane.png'), True, False)
opPlane = pygame.image.load('planeBlue.png')
pygame.init()
font10 = pygame.font.SysFont('Comic Sans MS', 10, 10)
font15 = pygame.font.SysFont('Comic Sans MS', 15, 15)
font20 = pygame.font.SysFont('Comic Sans MS', 20, 20)
font30 = pygame.font.SysFont('Comic Sans MS', 30, 30)
font50 = pygame.font.SysFont('Comic Sans MS', 50, 50)
font200 = pygame.font.SysFont('Comic Sans MS', 200, 200)
def makeTowers():
for i in range(10):
towers.append(tower(1920+i*192))
class OpPlane():
def __init__(self):
self.y = random.randint(10,500)
self.x = 1920
self.planeSpeed = random.randint(2,5)
self.v = -.5
self.alive = True
self.type = "plane"
self.rot = 0
def move(self):
if self.alive == False:
if self.rot < 90:
self.rot += 1.5
self.y -= self.v
self.v *= 1.1
self.x -= self.planeSpeed+TOWER_SPEED
if self.x < 0:
return "terminate"
class Bird():
def __init__(self):
self.set()
def set(self):
self.x = 960
self.y = 540
self.v = 0
self.rot = 0
self.alive = True
def move(self):
global gameOver, highscore, score, deathMessage
if started == True:
if self.rot > -90:
self.rot -= 1.5
self.y -= self.v
if self.v < .5 and self.v > 0:
self.v = -.5
elif self.v > 0:
self.v *= .9
elif self.v < 0:
self.v *= 1.1
if self.y > 1080:
gameOver = True
if score > highscore:
highscore = score
if self.alive == True:
deathMessage = "You died by crashing into the ground"
elif self.y < 0:
self.alive = False
deathMessage = "You flew to close to the sun"
def jump(self):
self.rot = 45
self.v = 10
if self.y < 0:
self.y = 0
class tower():
def __init__(self, x):
self.y = 0
self.x = x
self.windows = []
self.type = "building"
self.start()
def start(self):
self.y = random.randint(100,500)
for i in range(self.y//30):
for j in range(6):
self.windows.append([j, i, random.randint(0,3)])
def move(self):
self.x -= TOWER_SPEED
if self.x <= -100:
self.x = 1920
self.start() | Dante-W/PivotPilot | PivotPilot/Static.py | Static.py | py | 2,871 | python | en | code | 0 | github-code | 13 |
21928358173 | # Given an array of positive integers nums and an integer k,
# find the length of the longest subarray whose sum is less than or equal to k.
class Solution:
def find_length(nums, k):
ans = 0
l = 0
cur_len = 0
cur_sum = 0
for r in range(len(nums)):
cur_len += 1
cur_sum += nums[r]
while cur_sum > k:
cur_sum -= nums[l]
cur_len -= 1
l += 1
ans = max(ans, r - l + 1)
return ans
| dyabk/competitive-programming | LeetCode/find_length.py | find_length.py | py | 525 | python | en | code | 0 | github-code | 13 |
38900165349 | import graphene
from graphene_django import DjangoObjectType
from ..models import (
Author,
Publisher,
Genre,
BookList,
Format,
ReadBy
)
class AuthorOutputType(DjangoObjectType):
id_int = graphene.Int(description="The integer representation of the ID")
@staticmethod
def resolve_id_int(root, info):
return int(root.pk)
class Meta:
model = Author
fields = '__all__'
interfaces = (graphene.relay.Node,)
filter_fields = {
'id': ['exact'],
'name': ['iexact', 'icontains', 'istartswith'],
}
ordering = ['name']
class PublisherOutputType(DjangoObjectType):
id_int = graphene.Int(description="The integer representation of the ID")
@staticmethod
def resolve_id_int(root, info):
return int(root.pk)
class Meta:
model = Publisher
fields = '__all__'
interfaces = (graphene.relay.Node,)
filter_fields = {
'id': ['exact'],
'publisher': ['exact', 'icontains', 'istartswith'],
'link': ['exact'],
}
ordering = ['publisher', 'link']
class GenreOutputType(DjangoObjectType):
id_int = graphene.Int(description="The integer representation of the ID")
@staticmethod
def resolve_id_int(root, info):
return int(root.pk)
class Meta:
model = Genre
fields = '__all__'
interfaces = (graphene.relay.Node,)
filter_fields = {
'id': ['exact'],
'name': ['exact', 'icontains', 'istartswith'],
}
ordering = ['name']
class BookListOutputType(DjangoObjectType):
id_int = graphene.Int(description="The integer representation of the ID")
@staticmethod
def resolve_id_int(root, info):
return int(root.pk)
class Meta:
model = BookList
fields = '__all__'
interfaces = (graphene.relay.Node,)
convert_choices_to_enums = True
filter_fields = {
'id': ['exact'],
'title': ['exact', 'icontains', 'istartswith'],
'type': ['exact'],
'publisher__publisher': ['exact', 'icontains', 'istartswith'],
'link': ['exact', 'icontains'],
'author__name': ['exact', 'icontains', 'istartswith'],
'tag__skill': ['exact', 'icontains', 'istartswith'],
'genre__name': ['exact', 'icontains', 'istartswith'],
'slug': ['exact'],
}
ordering = [
'title',
'type',
'publisher__publisher',
'link',
'author__name',
'tag__skill',
'genre__name'
]
class FormatOutputType(DjangoObjectType):
id_int = graphene.Int(description="The integer representation of the ID")
@staticmethod
def resolve_id_int(root, info):
return int(root.pk)
class Meta:
model = Format
fields = '__all__'
interfaces = (graphene.relay.Node,)
filter_fields = {
'id': ['exact'],
'format': ['exact', 'icontains', 'istartswith'],
}
ordering = [
'format'
]
class ReadByOutputType(DjangoObjectType):
id_int = graphene.Int(description="The integer representation of the ID")
@staticmethod
def resolve_id_int(root, info):
return int(root.pk)
class Meta:
model = ReadBy
fields = '__all__'
interfaces = (graphene.relay.Node,)
filter_fields = {
'id': ['exact'],
'talent__alias': ['exact', 'icontains', 'istartswith'],
'book__title': ['exact', 'icontains', 'istartswith'],
'type__format': ['exact', 'icontains', 'istartswith'],
'date': ['exact', 'lt', 'lte', 'gt', 'gte'],
'review': ['icontains'],
}
ordering = [
'talent__alias'
'book__title'
'type',
'date',
'-date',
] | MichaelAchterberg72/MyWeXlog_v2 | booklist/graphql/output_types.py | output_types.py | py | 4,096 | python | en | code | 0 | github-code | 13 |
41777602056 | # Define the actions we may need during training
# You can define your actions here
import random
from Tool.SendKey import PressKey, ReleaseKey
import time
# Hash code for key we may use: https://docs.microsoft.com/en-us/windows/win32/inputdev/virtual-key-codes?redirectedfrom=MSDN
UP_ARROW = 0x26
DOWN_ARROW = 0x28
LEFT_ARROW = 0x25
RIGHT_ARROW = 0x27
L_SHIFT = 0xA0
A = 0x41
C = 0x43
X = 0x58
Z = 0x5A
F = 0x46
def Look_up():
PressKey(UP_ARROW)
time.sleep(0.1)
ReleaseKey(UP_ARROW)
def restart():
time.sleep(2.5)
Look_up()
time.sleep(3)
Look_up()
time.sleep(0.7)
PressKey(Z)
time.sleep(0.1)
ReleaseKey(Z)
time.sleep(2)
prob = lambda p: (p - 0.5) * 2
def take_action(action):
if action['lr'] == 0:
ReleaseKey(RIGHT_ARROW)
PressKey(LEFT_ARROW)
elif action['lr'] == 2:
ReleaseKey(LEFT_ARROW)
PressKey(RIGHT_ARROW)
else:
ReleaseKey(LEFT_ARROW)
ReleaseKey(RIGHT_ARROW)
if action['ud'] > 0.5:
if prob(action['ud']) > random.uniform(0, 1):
ReleaseKey(UP_ARROW)
PressKey(DOWN_ARROW)
else:
ReleaseKey(DOWN_ARROW)
ReleaseKey(UP_ARROW)
else:
if prob(action['ud']) < -random.uniform(0, 1):
ReleaseKey(DOWN_ARROW)
PressKey(UP_ARROW)
else:
ReleaseKey(DOWN_ARROW)
ReleaseKey(UP_ARROW)
PressKey(Z) if action['Z'] > random.uniform(0, 1) else ReleaseKey(Z)
PressKey(X) if action['X'] > random.uniform(0, 1) else ReleaseKey(X)
PressKey(C) if action['C'] > random.uniform(0, 1) else ReleaseKey(C)
PressKey(F) if action['F'] > random.uniform(0, 1) else ReleaseKey(F)
def ReleaseAll():
ReleaseKey(UP_ARROW)
ReleaseKey(DOWN_ARROW)
ReleaseKey(LEFT_ARROW)
ReleaseKey(RIGHT_ARROW)
ReleaseKey(Z)
ReleaseKey(X)
ReleaseKey(C)
ReleaseKey(F)
if __name__ == "__main__":
time.sleep(1)
restart()
# while True:
# PressKey(X)
# # ReleaseKey(LEFT_ARROW)
# print('1')
#
# time.sleep(0.2)
# ReleaseKey(X)
# print('0')
#
#
| Radiance-nt/HollowKnight-AI | Tool/Actions.py | Actions.py | py | 2,165 | python | en | code | 0 | github-code | 13 |
29416481771 | # creating index based on timestamp and symbol
from openpyxl import load_workbook, Workbook
def mongoDbpopulate(collection):
print(collection.index_information())
# Loading all the excel files
wb = load_workbook("./data/BNBUSDT.xlsx",data_only=True)
shBNB = wb["BNBUSDT"]
wb = load_workbook("./data/BTCUSDT.xlsx",data_only=True)
shBTC = wb["BTCUSDT"]
wb = load_workbook("./data/ETHUSDT.xlsx", data_only=True)
shETH = wb["ETHUSDT"]
wb = load_workbook("./data/LTCUSDT.xlsx", data_only=True)
shLTC = wb["LTCUSDT"]
wb = load_workbook("./data/NEOUSDT.xlsx", data_only=True)
shNEO = wb["NEOUSDT"]
# Gather all the data of BNB table into map
cryptoList = []
sheetList = [
{"sheet": shBNB, "symbol": "BNB"},
{"sheet": shBTC, "symbol": "BTC"},
{"sheet": shETH, "symbol": "ETH"},
{"sheet": shLTC, "symbol": "LTC"},
{"sheet": shNEO, "symbol": "NEO"}
]
# unix date symbol open high low close Volume BTC Volume USDT tradecount
# Loop through every crypto sheet
for dict in sheetList:
symbol = dict["symbol"]
for row in dict["sheet"].iter_rows(min_row=2):
map = {}
map["timestamp"] = row[0].value
map["date"] = row[1].value
map["symbol"] = symbol
map["open"] = row[3].value
map["high"] = row[4].value
map["low"] = row[5].value
map["close"] = row[6].value
map["VolumeBTC"] = row[7].value
map["VolumeUSDT"] = row[8].value
map["tradecount"] = row[9].value
cryptoList.append(map)
print(cryptoList[0])
print(len(cryptoList))
print()
collection.insert_many(cryptoList)
cryptoList = []
| tarunsai284/flaskPro | static/mongoDBpopulate.py | mongoDBpopulate.py | py | 1,808 | python | en | code | 0 | github-code | 13 |
41619144916 | import pygame
class Button:
def __init__(self, pos, display_surface, path) -> None:
self.image = pygame.image.load(path)
self.image = pygame.transform.scale(self.image, (50,50))
self.rect = self.image.get_rect()
self.rect.topleft = pos
self.display_surface = display_surface
self.start_time = pygame.time.get_ticks()
self.allow_input = False
self.timer_lenght = 500
def draw(self):
self.display_surface.blit(self.image, self.rect)
def detect_colliction(self):
print(self.allow_input)
self.pos = pygame.mouse.get_pos()
pressed = False
if self.rect.collidepoint((self.pos)):
if pygame.mouse.get_pressed()[0]:
pressed = True
return pressed | AgustinSande/sandeAgustin-pygame-tp-final | codefiles/button.py | button.py | py | 944 | python | en | code | 0 | github-code | 13 |
7296431355 | # Work №1 - Task №3
# Задача 6: Вы пользуетесь общественным транспортом?
# Вероятно, вы расплачивались за проезд и получали билет с номером.
# Счастливым билетом называют такой билет с шестизначным номером, где сумма первых трех цифр равна сумме последних трех.
# Т.е. билет с номером 385916 – счастливый, т.к. 3+8+5=9+1+6. Вам требуется написать программу, которая проверяет счастливость билета.
# *Пример:*
# 385916 -> yes
# 123456 -> no
# ------------
# Вариант №1
# ------------
# n = input('Enter six-digit number ticket: ')
# list1 = int(n[0]) + int(n[1]) + int(n[2])
# list2 = int(n[3]) + int(n[4]) + int(n[5])
# count = len(n)
# if count == 6:
# if list1 == list2:
# print('Yes, thes lucky ticket')
# else:
# print('Oh no, thes ticket losers')
# else:
# print('Error, enter six-digit number!')
# ------------
# Вариант №2
# ------------
n = input('Enter six-digit number tiket: ')
firstn = int(0)
secondn = int(0)
count = len(n)
if count == 6:
n = int(n)
for i in range(6):
if i < 3:
firstn = firstn + n // 10 ** i % 10
else:
secondn = secondn + n // 10 ** i % 10
if firstn == secondn:
print('Yes, thes lucky ticket')
else:
print('Oh no, thes ticket losers')
else:
print('Error, enter six-digit number!')
| Ritorta/HomeWork_Python | Work№1/Task3/W1Z3.py | W1Z3.py | py | 1,659 | python | ru | code | 0 | github-code | 13 |
73226656338 | from fastapi.responses import HTMLResponse, RedirectResponse
from fastapi.staticfiles import StaticFiles
from fastapi.templating import Jinja2Templates
from fastapi import FastAPI
from fastapi import Request, Depends, Request, Form
from config.db import SessionLocal, engine
import model.curso_model
from sqlalchemy.orm import Session
from model.curso_model import CursoM
model.curso_model.Base.metadata.create_all(bind=engine)
app = FastAPI()
def get_database_session():
try:
db = SessionLocal()
yield db
finally:
db.close()
app.mount("/static", StaticFiles(directory="static"), name="static")
templates = Jinja2Templates(directory="templates")
@app.get("/", response_class=HTMLResponse)
def home_page(request: Request):
return templates.TemplateResponse("index.html", {"request": request})
@app.get("/curso/{id}", response_class=HTMLResponse)
def get_curso_id(request: Request, id: int, db: Session = Depends(get_database_session)):
curso = db.query(CursoM).filter(CursoM.id == id).first()
return templates.TemplateResponse("view_curso.html", {"request": request, "curso": curso})
@app.get("/curso", response_class=HTMLResponse)
def get_curso_all(request: Request, db: Session = Depends(get_database_session)):
cursos = db.query(CursoM).all()
return templates.TemplateResponse("list_curso.html", {"request": request, "curso_list": cursos})
@app.get("/create_curso_ui", response_class=HTMLResponse)
async def create_curso_ui(request: Request):
return templates.TemplateResponse("create_curso.html", {"request": request})
@app.post("/create_curso/", response_class=HTMLResponse)
def create_curso(nombre: str = Form(...), profesor: str = Form(...), creditos: int = Form(...), ciclo: int = Form(...), db: Session = Depends(get_database_session)):
curso = CursoM(nombre=nombre, profesor=profesor, creditos=creditos, ciclo=ciclo)
db.add(curso)
db.commit()
return RedirectResponse(url="/curso", status_code=303)
@app.get("/asignar_curso_ui", response_class=HTMLResponse)
async def asignar_curso_ui(request: Request):
return templates.TemplateResponse("asignar_curso.html", {"request": request})
@app.post("/asignar_curso", response_class=HTMLResponse)
def asignar_curso(id_curso: int = Form(...), id_alumno: int = Form(...), db: Session = Depends(get_database_session)):
curso = db.query(CursoM).filter(CursoM.id == id_curso).first()
update = ''
if curso.alumnos is None:
update = str(id_alumno)
else:
update = "'"+curso.alumnos + "|" + str(id_alumno)+"'"
db.query(CursoM).filter(CursoM.id == id_curso).update({CursoM.alumnos: update})
db.commit()
cursos = 0;
asignacion = db.execute("SELECT a.cursos FROM Alumnos.Alumnos a WHERE id = " + str(id_alumno)).fetchone()
if asignacion[0] is None:
cursos = 1
else:
cursos = asignacion[0] +1
db.execute("UPDATE Alumnos.Alumnos a SET a.cursos = "+str(cursos)+ " WHERE id = "+str(id_alumno))
db.commit()
return RedirectResponse(url="/curso", status_code=303)
@app.get("/curso/delete/{id}", response_class=HTMLResponse)
def delete_curso(id: int, db: Session = Depends(get_database_session)):
db.query(CursoM).filter(CursoM.id == id).delete()
db.commit()
return RedirectResponse(url="/curso", status_code=303)
@app.get("/curso/update/{id}", response_class=HTMLResponse)
def update_curso(id: int, request: Request, db: Session = Depends(get_database_session)):
result = db.query(CursoM).filter(CursoM.id == id).first()
return templates.TemplateResponse("update_curso.html", {"request": request, "curso": result})
@app.post("/update_curso", response_class=HTMLResponse)
def update_curso(request: Request, id: int = Form(...), profesor: str = Form(...), creditos: int = Form(...), db: Session = Depends(get_database_session)):
db.query(CursoM).filter(CursoM.id == id).update({CursoM.profesor: profesor, CursoM.creditos: creditos})
db.commit()
return RedirectResponse(url="/curso", status_code=303) | ctesenb/Cursos | main.py | main.py | py | 4,036 | python | en | code | 0 | github-code | 13 |
13159929455 | import numpy as np
from DriftAnalysisFramework.Optimization import CMA_ES
from DriftAnalysisFramework.Transformation import CMA_ES as TR
from DriftAnalysisFramework.Fitness import Sphere
from alive_progress import alive_bar
# Globals
groove_iteration = 5000
measured_samples = 1000000
alpha_sequence = np.linspace(0, np.pi / 4, num=64)
kappa_sequence = np.geomspace(1 / 20, 20, num=256)
alg = CMA_ES(Sphere(), {
"d": 2,
"p_target": 0.1818,
"c_p": 0.8333,
"c_cov": 0.2,
"dim": 2
})
# stable sigma experiment
print("Stable Sigma Experiment")
# prepare algorithm inputs
alpha, kappa, sigma = np.repeat(alpha_sequence, kappa_sequence.shape[0]), np.tile(kappa_sequence,
alpha_sequence.shape[0]), 1
m, C, sigma = TR.transform_to_parameters(alpha, kappa, sigma)
with alive_bar(groove_iteration, force_tty=True, title="Grooving", bar="notes", title_length=10) as bar:
for i in range(groove_iteration):
sigma = alg.step(m, C, sigma)[2]
bar()
sigma_store = np.empty([alpha_sequence.shape[0] * kappa_sequence.shape[0]])
with alive_bar(measured_samples, force_tty=True, title="Collecting") as bar:
for i in range(measured_samples):
sigma = alg.step(m, C, sigma)[2]
sigma_store += sigma
bar()
# store the data in an efficient form to allow for interpolation later
stable_sigma_data = (sigma_store / measured_samples).reshape(alpha_sequence.shape[0], kappa_sequence.shape[0])
# Run data
filename = "./data/stable_sigma.txt"
# Write the array of strings into the file
with open(filename, 'w') as f:
f.write('./data/stable_sigma.npz\n')
f.write(str(measured_samples) + '\n')
f.write(str(groove_iteration) + '\n')
# Save variables into a file
np.savez('./data/stable_sigma.npz',
alpha=alpha_sequence, kappa=kappa_sequence,
stable_sigma=stable_sigma_data
)
| Sm4ster/DriftAnalysisFramework | py/CMA_sigma_analysis.py | CMA_sigma_analysis.py | py | 1,942 | python | en | code | 0 | github-code | 13 |
6213905517 | # 557. Reverse Words in a String III
class Solution:
def reverseWords(self, s: str) -> str:
s = list(s)
l = len(s)
start_index_list = list()
end_index_list = list()
start_index_list.append(0)
for i in range(l):
if s[i] == ' ':
start_index_list.append(i+1)
end_index_list.append(i-1)
end_index_list.append(l-1)
indx_len = len(end_index_list)
for indx in range(indx_len):
start = start_index_list[indx]
end = end_index_list[indx]
self.reverseOneWord(s, start, end)
return "".join(s)
def reverseOneWord(self, s, start, end):
i = start
j = end
l = (end - start) + 1
mid = start + int(l / 2)
while(True):
if i == mid:
break
else: # swap
temp = s[i]
s[i] = s[j]
s[j] = temp
i += 1
j -= 1
| feyza-droid/leetcode_solutions | 0557/main.py | main.py | py | 1,098 | python | en | code | 0 | github-code | 13 |
71670952018 | #!/usr/bin/python3
if __name__ == "__main__":
from sys import argv
if len(argv) != 4:
print("Usage: ./100-my_calculator.py <a> <operator> <b>")
quit(1)
a = int(argv[1])
b = int(argv[3])
ops = ["+", "-", "*", "/"]
from calculator_1 import add, sub, mul, div
funcs = [add, sub, mul, div]
for i, s in enumerate(ops):
if argv[2] == s:
print("{} {} {} = {}".format(a, s, b, funcs[i](a, b)))
break
else:
print("Unknown operator. Available operators: +, -, * and /")
quit(1)
| leelshaday/alx-higher_level_programming | 0x02-python-import_modules/100-my_calculator.py | 100-my_calculator.py | py | 568 | python | en | code | 6 | github-code | 13 |
8092978118 | from .bp_lib import bp_types, bp_unit, bp_utils
from . import data_cabinet_parts
from . import data_cabinet_carcass
from . import data_countertops
from . import data_cabinet_doors
from . import kitchen_utils
import time
import math
class Standard_Cabinet(bp_types.Assembly):
show_in_library = True
category_name = "Cabinets"
prompt_id = "kitchen.cabinet_prompts"
placement_id = "kitchen.place_cabinet"
carcass = None
interior = None
exterior = None
splitter = None
def draw(self):
start_time = time.time()
props = kitchen_utils.get_kitchen_scene_props()
self.create_assembly()
self.obj_bp["IS_CABINET_BP"] = True
self.obj_y['IS_MIRROR'] = True
self.obj_x.location.x = bp_unit.inch(18)
self.obj_y.location.y = -props.base_cabinet_depth
self.obj_z.location.z = props.base_cabinet_height
ctop_front = self.add_prompt("Countertop Overhang Front",'DISTANCE',bp_unit.inch(1))
ctop_back = self.add_prompt("Countertop Overhang Back",'DISTANCE',bp_unit.inch(0))
ctop_left = self.add_prompt("Countertop Overhang Left",'DISTANCE',bp_unit.inch(0))
ctop_right = self.add_prompt("Countertop Overhang Right",'DISTANCE',bp_unit.inch(0))
width = self.obj_x.drivers.get_var('location.x','width')
depth = self.obj_y.drivers.get_var('location.y','depth')
height = self.obj_z.drivers.get_var('location.z','height')
ctop_overhang_front = ctop_front.get_var('ctop_overhang_front')
ctop_overhang_back = ctop_back.get_var('ctop_overhang_back')
ctop_overhang_left = ctop_left.get_var('ctop_overhang_left')
ctop_overhang_right = ctop_right.get_var('ctop_overhang_right')
carcass = self.add_assembly(self.carcass)
carcass.set_name('Carcass')
carcass.loc_x(value=0)
carcass.loc_y(value=0)
carcass.loc_z(value=0)
carcass.dim_x('width',[width])
carcass.dim_y('depth',[depth])
carcass.dim_z('height',[height])
material_thickness = carcass.get_prompt('Material Thickness').get_var('material_thickness')
toe_kick_height = carcass.get_prompt('Toe Kick Height').get_var('toe_kick_height')
if carcass.carcass_type in {'Base','Suspended','Sink'}:
countertop = self.add_assembly(data_countertops.Countertop())
countertop.set_name('Countertop')
countertop.loc_x('-ctop_overhang_left',[ctop_overhang_left])
countertop.loc_y('ctop_overhang_back',[ctop_overhang_back])
countertop.loc_z('height',[height])
countertop.dim_x('width+ctop_overhang_left+ctop_overhang_right',[width,ctop_overhang_left,ctop_overhang_right])
countertop.dim_y('depth-(ctop_overhang_front+ctop_overhang_back)',[depth,ctop_overhang_front,ctop_overhang_back])
countertop.dim_z(value=.1)
if self.exterior:
exterior = self.add_assembly(self.exterior)
exterior.loc_x('material_thickness',[material_thickness])
exterior.loc_y('depth',[depth])
exterior.loc_z('toe_kick_height+material_thickness',[toe_kick_height,material_thickness])
exterior.dim_x('width-(material_thickness*2)',[width,material_thickness])
exterior.dim_y('depth',[depth])
exterior.dim_z('height-toe_kick_height-(material_thickness*2)',[height,toe_kick_height,material_thickness])
if self.interior:
exterior = self.add_assembly(self.interior)
exterior.loc_x('material_thickness',[material_thickness])
exterior.loc_y('depth',[depth])
exterior.loc_z('toe_kick_height+material_thickness',[toe_kick_height,material_thickness])
exterior.dim_x('width-(material_thickness*2)',[width,material_thickness])
exterior.dim_y('depth',[depth])
exterior.dim_z('height-toe_kick_height-(material_thickness*2)',[height,toe_kick_height,material_thickness])
print("Test_Cabinet: Draw Time --- %s seconds ---" % (time.time() - start_time)) | CreativeDesigner3D/Library_Kitchen | data_cabinets.py | data_cabinets.py | py | 4,099 | python | en | code | 2 | github-code | 13 |
10699623175 |
#!/usr/bin/python
# -*- coding:utf-8 -*-
import numpy as np
from sklearn import svm
import matplotlib.pyplot as plt
from PIL import Image
if __name__ == "__main__":
'''
N = 50
np.random.seed(0)
x = np.sort(np.random.uniform(0, 6, N), axis=0)
y = 2*np.sin(x)
x = x.reshape(-1, 1)
print ('x =\n', x)
print ('y =\n', y)
'''
#############################################################################
# 图片二值化
img = Image.open('Z_SWGO_I_59140_20180923020000_O_INSD_DIS(cutF).jpg')
# 模式L”为灰色图像,它的每个像素用8个bit表示,0表示黑,255表示白,其他数字表示不同的灰度。
Img = img.convert('L')
Img.save("test1.jpg")
# 自定义灰度界限,大于这个值为黑色,小于这个值为白色
threshold = 200
table = []
for i in range(256):
if i < threshold:
table.append(0)
else:
table.append(1)
# 图片二值化
photo = Img.point(table, '1')
#photo.save("test2.jpg")
#photo就是二值化后的图片
matrix1 = np.array(photo)
matrix2=matrix1.astype(int)
width1,height1 = img.size
i=j=0
Xa=[]
Ya=[]
#X=[x for y in matrix2 for x in y]
#矩阵降维
X=matrix2.reshape(-1)
print(len(X))
for j in range(height1):
for i in range(width1):
if X[i+width1*j]==0:
Xa.append([i])
m=height1 - j
Ya.append([m])
Xa1=np.array(Xa,dtype=float)
Ya1=np.array(Ya,dtype=float)
plt.plot(Xa1, Ya1, 'mo', markersize=2)
#############################################################################
# 数据
x = np.array(Xa1)
y = np.array(Ya1)
print ('SVR - RBF')
svr_rbf = svm.SVR(kernel='rbf', gamma=0.01, C=20)
svr_rbf.fit(x, y)
'''
print ('SVR - Linear')
svr_linear = svm.SVR(kernel='linear', C=100)
svr_linear.fit(x, y)
'''
'''
print ('SVR - Polynomial')
svr_poly = svm.SVR(kernel='poly', degree=2, C=100)
svr_poly.fit(x, y)
'''
print ('Fit OK.')
# 思考:系数1.1改成1.5
x_test = np.linspace(x.min(), x.max(), 100).reshape(-1, 1)
y_rbf = svr_rbf.predict(x_test)
#y_linear = svr_linear.predict(x_test)
#y_poly = svr_poly.predict(x_test)
#显示RBF 拟合结果
#plt.plot(x_test, y_rbf, 'r-', linewidth=2, label='RBF Kernel')
plt.figure(figsize=(9, 8), facecolor='w')
plt.plot(x_test, y_rbf, 'r-', linewidth=2, label='RBF Kernel')
#plt.plot(x_test, y_linear, 'g-', linewidth=2, label='Linear Kernel')
#plt.plot(x_test, y_poly, 'b-', linewidth=2, label='Polynomial Kernel')
#plt.plot(x, y, 'mo', markersize=6)
plt.scatter(x[svr_rbf.support_], y[svr_rbf.support_], s=1, c='r', marker='*', label='RBF Support Vectors')
plt.legend(loc='lower left')
plt.title('SVR', fontsize=16)
plt.xlabel('X')
plt.ylabel('Y')
plt.grid(True)
plt.show() | ParkerGong/Automatic_Ionogram_Detection_with_YOLOv3 | SVR/SVR-Type3.py | SVR-Type3.py | py | 3,066 | python | en | code | 0 | github-code | 13 |
2462880831 | # -*- coding: utf-8 -*-
"""
Created on Sun Apr 24 18:25:56 2016
@author: Ryan-Rhys
"""
import numpy as np
import matplotlib.pyplot as plt
# Hamaker coefficient values taken from Parsegian and Weiss 1981.
# The values given in table IId in this paper were converted to units of kT.
# For reference: 1 erg = 1*10^-7 joules
# 4.04*10^-21 joules = 1 kT at room temperature.
# Hamaker coefficients for two gold nanospheres across water, no electrolyte.
DESY_Hamaker = 74.26
JC_Hamaker = 24.75
# Nanoparticle Radius in nanometres.
NP_Radius1 = 10.0
NP_Radius2 = 20.0
NP_Radius3 = 40.0
sep_vals = []
# Collecting the x-axis list (separation between nanoparticles in nm).
# Using largest surface to surface distance as 6 times the NP radius.
energy_vals_DESY1 = []
energy_vals_DESY2 = []
energy_vals_DESY3 = []
#energy_vals_JC = []
# Employing the centre-centre distance (holder_2) in the energy equation.
holder1 = (2*NP_Radius1) + 5.0
holder2 = (2*NP_Radius2) + 5.0
holder3 = (2*NP_Radius3) + 5.0
while holder1 < (6*NP_Radius1):
sep_vals.append(holder1 - (2*NP_Radius1))
# Formula below taken from Parsegian's 2006 book, page 155.
energy_vals_DESY1.append((-DESY_Hamaker/3.0)*((NP_Radius1**2)/(holder1**2 - 4*NP_Radius1**2)+(NP_Radius1**2)/(holder1**2)+(0.5*np.log(1-(4*NP_Radius1**2/holder1**2)))))
energy_vals_DESY2.append((-DESY_Hamaker/3.0)*((NP_Radius2**2)/(holder2**2 - 4*NP_Radius2**2)+(NP_Radius2**2)/(holder2**2)+(0.5*np.log(1-(4*NP_Radius2**2/holder2**2)))))
energy_vals_DESY3.append((-DESY_Hamaker/3.0)*((NP_Radius3**2)/(holder3**2 - 4*NP_Radius3**2)+(NP_Radius3**2)/(holder3**2)+(0.5*np.log(1-(4*NP_Radius3**2/holder3**2)))))
#energy_vals_JC.append((-JC_Hamaker/3.0)*((NP_Radius**2)/(holder**2 - 4*NP_Radius**2)+(NP_Radius**2)/(holder**2)+(0.5*np.log(1-(4*NP_Radius**2/holder**2)))))
holder1 += 0.1
holder2 += 0.1
holder3 += 0.1
#energy_vals_DESY2 = []
#sep_vals = []
#holder7 = (2*NP_Radius2) + 0.1
#while holder7 < (6*NP_Radius1):
#sep_vals.append(holder7 - (2*NP_Radius2))
#energy_vals_DESY2.append((-DESY_Hamaker/3.0)*((NP_Radius2**2)/(holder7**2 - 4*NP_Radius2**2)+(NP_Radius2**2)/(holder7**2)+(0.5*np.log(1-(4*NP_Radius2**2/holder7**2)))))
#holder7 += 0.1
plt.figure(1)
plt.title('Identical Gold NPs - Size Comparison')
plt.plot(sep_vals, energy_vals_DESY1, label = 'Radius of 10 nm')
plt.plot(sep_vals, energy_vals_DESY2, label = 'Radius of 20 nm')
plt.plot(sep_vals, energy_vals_DESY3, label = 'Radius of 40 nm')
#plt.plot(sep_vals, energy_vals_JC, label = 'Johnson and Christy')
plt.xlabel('Surface to Surface Separation (nm)')
plt.ylabel('Van der Waals Potential (kT)')
plt.legend(loc=9, bbox_to_anchor=(0.5, -0.2))
plt.xlim(5.0, 40.0)
| Ryan-Rhys/Nanoparticle-Systems | The_Bulk_Error_Margins.py | The_Bulk_Error_Margins.py | py | 2,757 | python | en | code | 2 | github-code | 13 |
38899059956 | # THIS SCRIPT IS PROVIDED BY THE ORGANIZATOR
import re
import pandas as pd
import os
import numpy as np
import gradio as gr
from src.utils.preprocess_utils import preprocess_text
from src.utils.constants import TARGET_DICT, TARGET_INV_DICT
from src.models.bert_model import BertModel
# CV Voting Model Load
# For model class import, model checkpoint looks for models sub-dir
# import sys
# sys.path.append("./src")
#
# Loading and transferring the model
# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
#
# # Competition Model
# model = torch.load("checkpoint/blend_model.bin",
# map_location=device)
# model.set_device(device)
# Competition Model
model = BertModel(model_path="l2reg/toxic-dbmdz-bert-base-turkish-128k-uncased")
model.load()
# Case-Unbiased Model
case_unbiased_model = BertModel(model_path="l2reg/toxic-dbmdz-bert-base-turkish-128k-uncased-casing-unbiased")
case_unbiased_model.load()
# Fully-Unbiased Model
fully_unbiased_model = BertModel(model_path="l2reg/toxic-dbmdz-bert-base-turkish-128k-uncased-fully-unbiased")
fully_unbiased_model.load()
# Cased-Sentence ratio
def get_uppercase_sentence_ratio(input_df: pd.DataFrame):
"""
Get uppercase ratio.
---------
:param input_df: input dataframe
:return: Uppercase sentence ratio
"""
def find_uppercase(text):
pattern = '[A-Z]'
rgx = re.compile(pattern)
result = rgx.findall(''.join(sorted(text)))
return result
any_upper_letter = input_df["text"].apply(lambda x: find_uppercase(x))
any_upper_letter = any_upper_letter.apply(lambda x: len(x) > 0)
any_upper_letter = any_upper_letter.astype(int)
return np.round(any_upper_letter.mean(), 3)
# Authorization routine
def auth(username: str, password: str):
if username == "L2_Regulasyon" and password == os.environ["space_auth_pass"]:
return True
else:
return False
def predict(df: pd.DataFrame):
"""
Model inference for Gradio app.
---------
:param df: input dataframe
:return: Dataframe with wanted columns.
"""
df["is_offensive"] = 1
df["target"] = "OTHER"
# Case-Specific Competition Routine
cased_ratio = get_uppercase_sentence_ratio(df)
print(f"CR: {cased_ratio}")
if (cased_ratio <= 0.35) and (cased_ratio >= 0.25):
print(f"Using # routine...")
df["proc_text"] = preprocess_text(df["text"],
prevent_bias=0)
pred_classes, _ = model.predict(df["proc_text"])
else:
print(f"Using lower routine...")
df["proc_text"] = preprocess_text(df["text"],
prevent_bias=1)
pred_classes, _ = case_unbiased_model.predict(df["proc_text"])
# Class ID > Text
for pred_i, pred in enumerate(pred_classes):
pred_classes[pred_i] = TARGET_INV_DICT[pred] if pred in [0, 1, 2, 3, 4] else pred
df["target"] = pred_classes
df.loc[df["target"] == "OTHER", "is_offensive"] = 0
return df[["id", "text", "is_offensive", "target"]]
def get_file(file):
"""
Reads, processes and dumps the results.
---------
:param file: input dataframe
:return: processed output .csv file
"""
output_file = "output_L2_Regulasyon.csv"
# For Windows users, replace path seperator
file_name = file.name.replace("\\", "/")
print("-"*30)
df = pd.read_csv(file_name, sep="|")
print(f"Got {file_name}.\nIt consists of {len(df)} rows!")
df = predict(df)
print("-" * 30)
df.to_csv(output_file, index=False, sep="|")
return (output_file)
def demo_inference(selected_model: str,
input_text: str):
"""
Reads, processes and dumps the results.
---------
:param selected_model: Selected model for demo-inference
:param input_text: to-be-processed text
:return: Class probability predictions for the given text
"""
input_series = pd.Series([input_text])
if selected_model == "Yarışma Modeli":
proc_input = preprocess_text(input_series,
prevent_bias=0)
pred_classes, pred_probas = model.predict(proc_input)
elif selected_model == "Case-Unbiased Model":
proc_input = preprocess_text(input_series,
prevent_bias=1)
pred_classes, pred_probas = case_unbiased_model.predict(proc_input)
elif selected_model == "Fully-Unbiased Model (Ürün Modu)":
proc_input = preprocess_text(input_series,
prevent_bias=2)
pred_classes, pred_probas = fully_unbiased_model.predict(proc_input)
for pred_i, pred in enumerate(pred_classes):
pred_classes[pred_i] = TARGET_INV_DICT[pred] if pred in [0, 1, 2, 3, 4] else pred
print("*"*30)
print("Ran inference on a custom text!")
print("*"*30)
return dict(zip(list(TARGET_DICT.keys()), pred_probas[0].tolist()))
model_selector = gr.Radio(["Yarışma Modeli", "Case-Unbiased Model", "Fully-Unbiased Model (Ürün Modu)"])
# Launch the interface with user password
competition_interface = gr.Interface(get_file, "file", gr.File())
demo_interface = gr.Interface(demo_inference, [model_selector, gr.Text()], gr.Label(num_top_classes=5))
if __name__ == "__main__":
gr.TabbedInterface(
[competition_interface, demo_interface], ["Yarışma Ekranı", "Demo Ekranı"]
).launch(server_name="0.0.0.0",
share=False,
auth=None if ("space_auth_pass" not in os.environ) else auth)
| L2-Regulasyon/Teknofest2023 | app.py | app.py | py | 5,610 | python | en | code | 8 | github-code | 13 |
5660744765 | with open("score.txt", "r") as f :
data = f.readlines()
a=[]; b=[]; c=[]
for i in data :
a.append(i.split())
for i in a :
b.append(float(i[1])*0.4+float(i[2])*0.6)
for i in b :
if i>=90 :
c.append('(A)')
elif i>=80 :
c.append('(B)')
elif i>=70 :
c.append('(C)')
elif i>=60 :
c.append('(D)')
else :
c.append('(F)')
with open("report.txt","w") as g:
for i in range(len(data)):
for j in range(3):
print(a[i][j], end=' ')
print(b[i],c[i]) | JinhoCHOIS/AB-A | 파이썬기초/Day3_최진호.py | Day3_최진호.py | py | 636 | python | en | code | 0 | github-code | 13 |
12139795183 | import jieba.posseg as psg
def pos(text):
results = psg.cut(text)
for w, t in results:
print("%s/%s" % (w, t), end=" ")
print("")
text = "呼伦贝尔大草原"
pos(text)
text = "梅兰芳大剧院里星期六晚上有演出"
pos(text)
| 15149295552/Code | Month09/NLP_DATA/NLP_study/NLP_study/06_jieba_pos.py | 06_jieba_pos.py | py | 261 | python | en | code | 1 | github-code | 13 |
39438590371 | from typing import Union, Tuple, Callable, Optional, List
from time import time
from threading import Thread, Lock
import numpy as np
import matplotlib.pyplot as plt
from PyQt5.QtCore import pyqtSignal, Qt, QObject
from PyQt5.QtWidgets import QHBoxLayout, QVBoxLayout, QApplication
from Utility.Layouts import ListWidget, MplCanvas
from Utility.Indexing import RepeatingList, ElementList
from TableWidgets.CompTable import CompRow
from Containers.Element import Element, Elements
from Containers.Arguments import GeneralBeamArguments, GeneralTargetArguments, GeneralArguments, SimulationArguments, RowArguments
class GeneralSettings:
"""
General settings class, that will be inherited by other GeneralSetting classes
"""
settingsChanged = pyqtSignal(dict)
contentChanged = pyqtSignal()
def __init__(self):
super().__init__()
def emit(self, value_dict: dict = None):
"""
Emits settingsChanged pyqtSignal
:param value_dict: dictionary to emit
"""
if value_dict is None:
return
self.settingsChanged.emit(value_dict)
def edited(self):
"""Emits contentChanged pyqtSignal"""
self.contentChanged.emit()
def receive(self, value_dict: dict):
"""
Receives other settingsChanged pyqtSignal -> dict
:param value_dict: dictionary to be received
"""
pass
@staticmethod
def reset():
"""Resets all input fields"""
pass
@staticmethod
def getArguments():
"""Returns container of parameters for settings"""
raise NotImplementedError
@staticmethod
def loadArguments(arguments: SimulationArguments) -> list:
"""Loads <SimulationArguments> container. Returns list of not loadable parameters (default used)"""
return []
class HlGeneralBeamSettings(GeneralSettings, QHBoxLayout):
"""
QHBoxLayout for general beam settings
:param version: version of simulation
"""
def __init__(self, version: str):
super().__init__()
self.version = version
@staticmethod
def getArguments() -> GeneralBeamArguments:
"""Returns <GeneralBeamArguments> container of parameters for general beam settings"""
return GeneralBeamArguments()
class HlGeneralTargetSettings(GeneralSettings, QHBoxLayout):
"""
QHBoxLayout for general target settings
:param version: version of simulation
"""
def __init__(self, version: str):
super().__init__()
self.version = version
@staticmethod
def getArguments() -> GeneralTargetArguments:
"""Returns <GeneralTargetArguments> container of parameters for general target settings"""
return GeneralTargetArguments()
class VlGeneralSimulationSettings(GeneralSettings, QVBoxLayout):
"""
QVBoxLayout for general simulation settings
:param version: version of simulation
"""
def __init__(self, version: str):
super().__init__()
self.version = version
@staticmethod
def getArguments() -> GeneralArguments:
"""Returns <GeneralArguments> container of parameters for general simulation settings"""
return GeneralArguments(title='SIMULATION TITLE MISSING')
class CompRowBeamSettings(CompRow):
"""
CompRow for beam
:param version: version of simulation
"""
# list of CustomRowField() elements
"""
Example:
rowFields = [
CustomRowField( # First Column
unique='unique_specifier', # Unique specifier to link beam and target tables and for reference in input file
label='label_title', # Title of column header
tooltip='tooltip' # Tooltip for column header (optional)
),
CustomRowField(...), # Second Column
...
]
"""
rowFields = []
def __init__(self, *args, version: str = '', **kwargs):
super().__init__(*args, **kwargs)
self.version = version
# extend list of widgets
"""
Example:
self.rowWidgets += [
QSpinBox(),
QDoubleSpinBox(),
QComboBox(),
...
]
"""
self.row_widgets += []
def getArguments(self) -> RowArguments:
"""Returns <RowArguments> container of parameters for row"""
return super().getArguments()
def setArguments(self, arguments: RowArguments, general_arguments: GeneralArguments):
"""
Sets <RowArguments> container of parameters for row
:param arguments: container of <RowArguments>
:param general_arguments: container of <GeneralArguments>
"""
super().setArguments(arguments, general_arguments)
class CompRowTargetSettings(CompRow):
"""
CompRow for target
:param version: version of simulation
"""
# list of CustomRowField() elements
"""
Example:
rowFields = [
CustomRowField( # First Column
unique='unique_specifier', # Unique specifier to link beam and target tables and for reference in input file
label='label_title', # Title of column header
tooltip='tooltip' # Tooltip for column header (optional)
),
CustomRowField(...), # Second Column
...
]
"""
rowFields = []
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# extend list of widgets
"""
Example:
self.rowWidgets += [
QSpinBox(),
QDoubleSpinBox(),
QComboBox(),
...
]
"""
self.row_widgets += []
def getArguments(self) -> RowArguments:
"""Returns <RowArguments> container of parameters for row"""
return super().getArguments()
def setArguments(self, arguments: RowArguments, general_arguments: SimulationArguments):
"""
Sets <RowArguments> container of parameters for row
:param arguments: container of <RowArguments>
:param general_arguments: container of <GeneralArguments>
"""
super().setArguments(arguments, general_arguments)
class GeneralElementData(Elements):
"""
Simulation supported elements and element specific data
"""
# default element dict from SDTrimSP v6.01
"""
Example:
elementList = [
Element(
# required
symbol=<str>
name=<dict>
atomic_nr=<int>
period=<int>
group=<int>
atomic_mass=<float>
atomic_density=<float>
# optional
mass_density=<float>
periodic_table_symbol=<str>
surface_binding_energy=<float>
displacement_energy=<float>
cutoff_energy=<float>
dissociation_heat=<float>
melt_enthalpy=<float>
vaporization_energy=<float>
formation_enthalpy=<float>
),
Element(...),
...
]
"""
elementList = [
Element(
symbol='H',
name={
'en': 'hydrogen',
'de': 'Wasserstoff'
},
atomic_nr=1,
period=1,
group=1,
atomic_mass=1.007825,
atomic_density=0.04231,
surface_binding_energy=1.1,
displacement_energy=5.0
),
Element(
symbol='He',
name={
'en': 'helium',
'de': 'Helium'
},
atomic_nr=2,
period=1,
group=18,
atomic_mass=4.002603,
atomic_density=0.01878,
surface_binding_energy=0.0,
displacement_energy=5.0
),
Element(
symbol='Li',
name={
'en': 'lithium',
'de': 'Lithium'
},
atomic_nr=3,
period=2,
group=1,
atomic_mass=6.941,
atomic_density=0.04633,
surface_binding_energy=1.64,
displacement_energy=25.0
),
Element(
symbol='Be',
name={
'en': 'beryllium',
'de': 'Beryllium'
},
atomic_nr=4,
period=2,
group=2,
atomic_mass=9.012182,
atomic_density=0.12347,
surface_binding_energy=3.31,
displacement_energy=15.0
),
Element(
symbol='B',
name={
'en': 'boron',
'de': 'Bor'
},
atomic_nr=5,
period=2,
group=13,
atomic_mass=10.811,
atomic_density=0.1309,
surface_binding_energy=5.76,
displacement_energy=25.0
),
Element(
symbol='C',
name={
'en': 'carbon',
'de': 'Kohlenstoff'
},
atomic_nr=6,
period=2,
group=14,
atomic_mass=12.011,
atomic_density=0.11331,
surface_binding_energy=7.37,
displacement_energy=25.0
),
Element(
symbol='N',
name={
'en': 'nitrogen',
'de': 'Stickstoff'
},
atomic_nr=7,
period=2,
group=15,
atomic_mass=14.00674,
atomic_density=0.03784,
surface_binding_energy=4.9,
displacement_energy=25.0
),
Element(
symbol='O',
name={
'en': 'oxygen',
'de': 'Sauerstoff'
},
atomic_nr=8,
period=2,
group=16,
atomic_mass=15.9994,
atomic_density=0.04291,
surface_binding_energy=1.0,
displacement_energy=5.0
),
Element(
symbol='F',
name={
'en': 'fluorine',
'de': 'Fluor'
},
atomic_nr=9,
period=2,
group=17,
atomic_mass=18.998403,
atomic_density=0.04796,
surface_binding_energy=0.82,
displacement_energy=25.0
),
Element(
symbol='Ne',
name={
'en': 'neon',
'de': 'Neon'
},
atomic_nr=10,
period=2,
group=18,
atomic_mass=20.1797,
atomic_density=0.03603,
surface_binding_energy=0.0,
displacement_energy=5.0
),
Element(
symbol='Na',
name={
'en': 'sodium',
'de': 'Natrium'
},
atomic_nr=11,
period=3,
group=1,
atomic_mass=22.989768,
atomic_density=0.02544,
surface_binding_energy=1.11,
displacement_energy=25.0
),
Element(
symbol='Mg',
name={
'en': 'magnesium',
'de': 'Magnesium'
},
atomic_nr=12,
period=3,
group=2,
atomic_mass=24.305,
atomic_density=0.04306,
surface_binding_energy=1.51,
displacement_energy=10.0
),
Element(
symbol='Al',
name={
'en': 'aluminum',
'de': 'Aluminium'
},
atomic_nr=13,
period=3,
group=13,
atomic_mass=26.981539,
atomic_density=0.06022,
surface_binding_energy=3.42,
displacement_energy=16.0
),
Element(
symbol='Si',
name={
'en': 'silicon',
'de': 'Silicium'
},
atomic_nr=14,
period=3,
group=14,
atomic_mass=28.08553,
atomic_density=0.04994,
surface_binding_energy=4.72,
displacement_energy=13.0
),
Element(
symbol='P',
name={
'en': 'phosphoros',
'de': 'Phosphor(weiss)'
},
atomic_nr=15,
period=3,
group=15,
atomic_mass=30.973761,
atomic_density=0.03544,
surface_binding_energy=3.27,
displacement_energy=25.0
),
Element(
symbol='S',
name={
'en': 'sulfur',
'de': 'Schwefel'
},
atomic_nr=16,
period=3,
group=16,
atomic_mass=32.066,
atomic_density=0.03888,
surface_binding_energy=2.85,
displacement_energy=25.0
),
Element(
symbol='Cl',
name={
'en': 'chlorine',
'de': 'Chlor'
},
atomic_nr=17,
period=3,
group=17,
atomic_mass=35.4527,
atomic_density=0.0256,
surface_binding_energy=1.0,
displacement_energy=25.0
),
Element(
symbol='Ar',
name={
'en': 'argon',
'de': 'Argon'
},
atomic_nr=18,
period=3,
group=18,
atomic_mass=39.948,
atomic_density=0.0208,
surface_binding_energy=0.0,
displacement_energy=5.0
),
Element(
symbol='K',
name={
'en': 'potassium',
'de': 'Kalium'
},
atomic_nr=19,
period=4,
group=1,
atomic_mass=39.0983,
atomic_density=0.01328,
surface_binding_energy=0.93,
displacement_energy=25.0
),
Element(
symbol='Ca',
name={
'en': 'calcium',
'de': 'Calcium'
},
atomic_nr=20,
period=4,
group=2,
atomic_mass=40.078,
atomic_density=0.02314,
surface_binding_energy=2.39,
displacement_energy=25.0
),
Element(
symbol='Sc',
name={
'en': 'scandium',
'de': 'Scandium'
},
atomic_nr=21,
period=4,
group=3,
atomic_mass=44.95591,
atomic_density=0.04004,
surface_binding_energy=3.9,
displacement_energy=25.0
),
Element(
symbol='Ti',
name={
'en': 'titanium',
'de': 'Titan'
},
atomic_nr=22,
period=4,
group=4,
atomic_mass=47.867,
atomic_density=0.05712,
surface_binding_energy=4.84,
displacement_energy=19.0
),
Element(
symbol='V',
name={
'en': 'vanadium',
'de': 'Vanadium'
},
atomic_nr=23,
period=4,
group=5,
atomic_mass=50.9415,
atomic_density=0.07223,
surface_binding_energy=5.33,
displacement_energy=26.0
),
Element(
symbol='Cr',
name={
'en': 'chromium',
'de': 'Chrom'
},
atomic_nr=24,
period=4,
group=6,
atomic_mass=51.9961,
atomic_density=0.08327,
surface_binding_energy=5.22,
displacement_energy=28.0
),
Element(
symbol='Mn',
name={
'en': 'maganese',
'de': 'Mangan'
},
atomic_nr=25,
period=4,
group=7,
atomic_mass=54.938049,
atomic_density=0.08155,
surface_binding_energy=2.92,
displacement_energy=25.0
),
Element(
symbol='Fe',
name={
'en': 'iron',
'de': 'Eisen'
},
atomic_nr=26,
period=4,
group=8,
atomic_mass=55.847,
atomic_density=0.08491,
surface_binding_energy=4.28,
displacement_energy=17.0
),
Element(
symbol='Co',
name={
'en': 'cobalt',
'de': 'Cobalt'
},
atomic_nr=27,
period=4,
group=9,
atomic_mass=58.9332,
atomic_density=0.09084,
surface_binding_energy=4.39,
displacement_energy=22.0
),
Element(
symbol='Ni',
name={
'en': 'nickel',
'de': 'Nickel'
},
atomic_nr=28,
period=4,
group=10,
atomic_mass=58.6934,
atomic_density=0.09134,
surface_binding_energy=4.44,
displacement_energy=23.0
),
Element(
symbol='Cu',
name={
'en': 'copper',
'de': 'Kupfer'
},
atomic_nr=29,
period=4,
group=11,
atomic_mass=63.546,
atomic_density=0.08486,
surface_binding_energy=3.2,
displacement_energy=19.0
),
Element(
symbol='Zn',
name={
'en': 'zinc',
'de': 'Zink'
},
atomic_nr=30,
period=4,
group=12,
atomic_mass=65.39,
atomic_density=0.06569,
surface_binding_energy=1.35,
displacement_energy=14.0
),
Element(
symbol='Ga',
name={
'en': 'gallium',
'de': 'Gallium'
},
atomic_nr=31,
period=4,
group=13,
atomic_mass=69.723,
atomic_density=0.05099,
surface_binding_energy=2.8,
displacement_energy=12.0
),
Element(
symbol='Ge',
name={
'en': 'germanium',
'de': 'Germanium'
},
atomic_nr=32,
period=4,
group=14,
atomic_mass=72.61,
atomic_density=0.04415,
surface_binding_energy=3.85,
displacement_energy=15.0
),
Element(
symbol='As',
name={
'en': 'arsenic',
'de': 'Arsen'
},
atomic_nr=33,
period=4,
group=15,
atomic_mass=74.9216,
atomic_density=0.04603,
surface_binding_energy=3.12,
displacement_energy=25.0
),
Element(
symbol='Se',
name={
'en': 'selenium',
'de': 'Selen'
},
atomic_nr=34,
period=4,
group=16,
atomic_mass=78.96,
atomic_density=0.03653,
surface_binding_energy=2.2,
displacement_energy=25.0
),
Element(
symbol='Br',
name={
'en': 'bromine (liquid)',
'de': 'Brom'
},
atomic_nr=35,
period=4,
group=17,
atomic_mass=79.904,
atomic_density=0.02353,
surface_binding_energy=1.16,
displacement_energy=25.0
),
Element(
symbol='Kr',
name={
'en': 'krypton',
'de': 'Krypton'
},
atomic_nr=36,
period=4,
group=18,
atomic_mass=83.8,
atomic_density=0.01734,
surface_binding_energy=0.0,
displacement_energy=5.0
),
Element(
symbol='Rb',
name={
'en': 'rubidium',
'de': 'Rubidium'
},
atomic_nr=37,
period=5,
group=1,
atomic_mass=85.4678,
atomic_density=0.01078,
surface_binding_energy=0.85,
displacement_energy=25.0
),
Element(
symbol='Sr',
name={
'en': 'strontium',
'de': 'Strontium'
},
atomic_nr=38,
period=5,
group=2,
atomic_mass=87.62,
atomic_density=0.01835,
surface_binding_energy=1.7,
displacement_energy=25.0
),
Element(
symbol='Y',
name={
'en': 'yttrium',
'de': 'Yttrium'
},
atomic_nr=39,
period=5,
group=3,
atomic_mass=88.90585,
atomic_density=0.03029,
surface_binding_energy=4.4,
displacement_energy=25.0
),
Element(
symbol='Zr',
name={
'en': 'zirkonium',
'de': 'Zirkonium'
},
atomic_nr=40,
period=5,
group=4,
atomic_mass=91.224,
atomic_density=0.04296,
surface_binding_energy=6.3,
displacement_energy=21.0
),
Element(
symbol='Nb',
name={
'en': 'niobium',
'de': 'Niob'
},
atomic_nr=41,
period=5,
group=5,
atomic_mass=92.90638,
atomic_density=0.05562,
surface_binding_energy=7.47,
displacement_energy=28.0
),
Element(
symbol='Mo',
name={
'en': 'molybdenum',
'de': 'Molybdaen'
},
atomic_nr=42,
period=5,
group=6,
atomic_mass=95.94,
atomic_density=0.06453,
surface_binding_energy=6.81,
displacement_energy=33.0
),
Element(
symbol='Tc',
name={
'en': 'technetium',
'de': 'Technetium'
},
atomic_nr=43,
period=5,
group=7,
atomic_mass=97.907215,
atomic_density=0.07073,
surface_binding_energy=6.81,
displacement_energy=25.0
),
Element(
symbol='Ru',
name={
'en': 'ruthenium',
'de': 'Ruthenium'
},
atomic_nr=44,
period=5,
group=8,
atomic_mass=101.07,
atomic_density=0.07966,
surface_binding_energy=6.73,
displacement_energy=25.0
),
Element(
symbol='Rh',
name={
'en': 'rhodium',
'de': 'Rhodium'
},
atomic_nr=45,
period=5,
group=9,
atomic_mass=102.9055,
atomic_density=0.07262,
surface_binding_energy=5.72,
displacement_energy=25.0
),
Element(
symbol='Pd',
name={
'en': 'palladium',
'de': 'Palladium'
},
atomic_nr=46,
period=5,
group=10,
atomic_mass=106.42,
atomic_density=0.06802,
surface_binding_energy=3.91,
displacement_energy=26.0
),
Element(
symbol='Ag',
name={
'en': 'silver',
'de': 'Silber'
},
atomic_nr=47,
period=5,
group=11,
atomic_mass=107.8682,
atomic_density=0.05862,
surface_binding_energy=2.95,
displacement_energy=23.0
),
Element(
symbol='Cd',
name={
'en': 'cadmium',
'de': 'Cadmium'
},
atomic_nr=48,
period=5,
group=12,
atomic_mass=112.411,
atomic_density=0.04634,
surface_binding_energy=1.16,
displacement_energy=19.0
),
Element(
symbol='In',
name={
'en': 'indium',
'de': 'Indium'
},
atomic_nr=49,
period=5,
group=13,
atomic_mass=114.818,
atomic_density=0.03834,
surface_binding_energy=2.52,
displacement_energy=15.0
),
Element(
symbol='Sn',
name={
'en': 'tin',
'de': 'Zinn'
},
atomic_nr=50,
period=5,
group=14,
atomic_mass=118.71,
atomic_density=0.03698,
surface_binding_energy=3.15,
displacement_energy=22.0
),
Element(
symbol='Sb',
name={
'en': 'antimony',
'de': 'Antimon'
},
atomic_nr=51,
period=5,
group=15,
atomic_mass=121.757,
atomic_density=0.03306,
surface_binding_energy=2.74,
displacement_energy=25.0
),
Element(
symbol='Te',
name={
'en': 'tellurium',
'de': 'Tellur'
},
atomic_nr=52,
period=5,
group=16,
atomic_mass=127.6,
atomic_density=0.0295,
surface_binding_energy=2.04,
displacement_energy=25.0
),
Element(
symbol='I',
name={
'en': 'iodine',
'de': 'Iod'
},
atomic_nr=53,
period=5,
group=17,
atomic_mass=126.90447,
atomic_density=0.02344,
surface_binding_energy=1.11,
displacement_energy=25.0
),
Element(
symbol='Xe',
name={
'en': 'xenon',
'de': 'Xenon'
},
atomic_nr=54,
period=5,
group=18,
atomic_mass=131.29,
atomic_density=0.01348,
surface_binding_energy=0.0,
displacement_energy=5.0
),
Element(
symbol='Cs',
name={
'en': 'cesium',
'de': 'Caesium'
},
atomic_nr=55,
period=6,
group=1,
atomic_mass=132.90544,
atomic_density=0.00851,
surface_binding_energy=0.8,
displacement_energy=15.0
),
Element(
symbol='Ba',
name={
'en': 'barium',
'de': 'Barium'
},
atomic_nr=56,
period=6,
group=2,
atomic_mass=137.327,
atomic_density=0.01587,
surface_binding_energy=1.89,
displacement_energy=25.0
),
Element(
symbol='La',
name={
'en': 'lanthanum',
'de': 'Lanthan'
},
atomic_nr=57,
period=8,
group=3,
atomic_mass=138.9055,
atomic_density=0.02671,
surface_binding_energy=4.47,
displacement_energy=25.0
),
Element(
symbol='Ce',
name={
'en': 'cerium',
'de': 'Cer'
},
atomic_nr=58,
period=8,
group=4,
atomic_mass=140.115,
atomic_density=0.02911,
surface_binding_energy=4.39,
displacement_energy=25.0
),
Element(
symbol='Pr',
name={
'en': 'praseodymium',
'de': 'Praseodym'
},
atomic_nr=59,
period=8,
group=5,
atomic_mass=140.90765,
atomic_density=0.02767,
surface_binding_energy=3.7,
displacement_energy=25.0
),
Element(
symbol='Nd',
name={
'en': 'neodymium',
'de': 'Neodym'
},
atomic_nr=60,
period=8,
group=6,
atomic_mass=144.24,
atomic_density=0.02924,
surface_binding_energy=3.41,
displacement_energy=25.0
),
Element(
symbol='Pm',
name={
'en': 'promethium',
'de': 'Promethium'
},
atomic_nr=61,
period=8,
group=7,
atomic_mass=145.9127,
atomic_density=0.0298,
surface_binding_energy=3.19,
displacement_energy=25.0
),
Element(
symbol='Sm',
name={
'en': 'samarium',
'de': 'Samarium'
},
atomic_nr=62,
period=8,
group=8,
atomic_mass=150.36,
atomic_density=0.03018,
surface_binding_energy=2.14,
displacement_energy=25.0
),
Element(
symbol='Eu',
name={
'en': 'europium',
'de': 'Europium'
},
atomic_nr=63,
period=8,
group=9,
atomic_mass=151.965,
atomic_density=0.02078,
surface_binding_energy=1.83,
displacement_energy=25.0
),
Element(
symbol='Gd',
name={
'en': 'gadolinium',
'de': 'Gadolinium'
},
atomic_nr=64,
period=8,
group=10,
atomic_mass=157.25,
atomic_density=0.03024,
surface_binding_energy=4.14,
displacement_energy=25.0
),
Element(
symbol='Tb',
name={
'en': 'terbium',
'de': 'Terbium'
},
atomic_nr=65,
period=8,
group=11,
atomic_mass=158.92534,
atomic_density=0.03127,
surface_binding_energy=4.05,
displacement_energy=25.0
),
Element(
symbol='Dy',
name={
'en': 'dysprosium',
'de': 'Dysprosium'
},
atomic_nr=66,
period=8,
group=12,
atomic_mass=162.5,
atomic_density=0.03172,
surface_binding_energy=3.04,
displacement_energy=25.0
),
Element(
symbol='Ho',
name={
'en': 'holmium',
'de': 'Holmium'
},
atomic_nr=67,
period=8,
group=13,
atomic_mass=164.93032,
atomic_density=0.03211,
surface_binding_energy=3.14,
displacement_energy=25.0
),
Element(
symbol='Er',
name={
'en': 'erbium',
'de': 'Erbium'
},
atomic_nr=68,
period=8,
group=14,
atomic_mass=167.26,
atomic_density=0.03264,
surface_binding_energy=3.3,
displacement_energy=25.0
),
Element(
symbol='Tm',
name={
'en': 'thulium',
'de': 'Thulium'
},
atomic_nr=69,
period=8,
group=15,
atomic_mass=168.93421,
atomic_density=0.03323,
surface_binding_energy=2.42,
displacement_energy=25.0
),
Element(
symbol='Yb',
name={
'en': 'ytterbium',
'de': 'Ytterbium'
},
atomic_nr=70,
period=8,
group=16,
atomic_mass=173.04,
atomic_density=0.02424,
surface_binding_energy=1.58,
displacement_energy=25.0
),
Element(
symbol='Lu',
name={
'en': 'litetium',
'de': 'Litetium'
},
atomic_nr=71,
period=6,
group=3,
atomic_mass=174.967,
atomic_density=0.03387,
surface_binding_energy=4.43,
displacement_energy=17.0
),
Element(
symbol='Hf',
name={
'en': 'hafnium',
'de': 'Hafnium'
},
atomic_nr=72,
period=6,
group=4,
atomic_mass=178.49,
atomic_density=0.04491,
surface_binding_energy=6.41,
displacement_energy=25.0
),
Element(
symbol='Ta',
name={
'en': 'tantal',
'de': 'Tantal'
},
atomic_nr=73,
period=6,
group=5,
atomic_mass=180.9479,
atomic_density=0.05543,
surface_binding_energy=8.1,
displacement_energy=32.0
),
Element(
symbol='W',
name={
'en': 'tungsten',
'de': 'Wolfram'
},
atomic_nr=74,
period=6,
group=6,
atomic_mass=183.84,
atomic_density=0.06306,
surface_binding_energy=8.79,
displacement_energy=38.0
),
Element(
symbol='Re',
name={
'en': 'rhenium',
'de': 'Rhenium'
},
atomic_nr=75,
period=6,
group=7,
atomic_mass=186.207,
atomic_density=0.06805,
surface_binding_energy=8.01,
displacement_energy=40.0
),
Element(
symbol='Os',
name={
'en': 'osmium',
'de': 'Osmium'
},
atomic_nr=76,
period=6,
group=8,
atomic_mass=190.23,
atomic_density=0.07151,
surface_binding_energy=8.18,
displacement_energy=25.0
),
Element(
symbol='Ir',
name={
'en': 'iridium',
'de': 'Iridium'
},
atomic_nr=77,
period=6,
group=9,
atomic_mass=192.217,
atomic_density=0.07096,
surface_binding_energy=6.93,
displacement_energy=25.0
),
Element(
symbol='Pt',
name={
'en': 'platinum',
'de': 'Platin'
},
atomic_nr=78,
period=6,
group=10,
atomic_mass=195.08,
atomic_density=0.06622,
surface_binding_energy=5.85,
displacement_energy=33.0
),
Element(
symbol='Au',
name={
'en': 'gold',
'de': 'Gold'
},
atomic_nr=79,
period=6,
group=11,
atomic_mass=196.96655,
atomic_density=0.05907,
surface_binding_energy=3.79,
displacement_energy=36.0
),
Element(
symbol='Hg',
name={
'en': 'mercury',
'de': 'Quecksilber'
},
atomic_nr=80,
period=6,
group=12,
atomic_mass=200.59,
atomic_density=0.04067,
surface_binding_energy=0.67,
displacement_energy=25.0
),
Element(
symbol='Tl',
name={
'en': 'thallium',
'de': 'Thallium'
},
atomic_nr=81,
period=6,
group=13,
atomic_mass=204.3833,
atomic_density=0.03492,
surface_binding_energy=1.88,
displacement_energy=25.0
),
Element(
symbol='Pb',
name={
'en': 'lead',
'de': 'Blei'
},
atomic_nr=82,
period=6,
group=14,
atomic_mass=207.2,
atomic_density=0.03299,
surface_binding_energy=2.03,
displacement_energy=11.0
),
Element(
symbol='Bi',
name={
'en': 'bismuth',
'de': 'Bismuth'
},
atomic_nr=83,
period=6,
group=15,
atomic_mass=208.98038,
atomic_density=0.02821,
surface_binding_energy=2.17,
displacement_energy=25.0
),
Element(
symbol='Po',
name={
'en': 'polonium',
'de': 'Polonium'
},
atomic_nr=84,
period=6,
group=16,
atomic_mass=209.9828,
atomic_density=0.02637,
surface_binding_energy=1.51,
displacement_energy=25.0
),
Element(
symbol='At',
name={
'en': 'astatine',
'de': 'Astatium'
},
atomic_nr=85,
period=6,
group=17,
atomic_mass=209.987126,
atomic_density=0.02509,
surface_binding_energy=0.94,
displacement_energy=25.0
),
Element(
symbol='Rn',
name={
'en': 'radon',
'de': 'Radon'
},
atomic_nr=86,
period=6,
group=18,
atomic_mass=222.01757,
atomic_density=0.01193,
surface_binding_energy=0.0,
displacement_energy=5.0
),
Element(
symbol='Fr',
name={
'en': 'francium',
'de': 'Francium'
},
atomic_nr=87,
period=7,
group=1,
atomic_mass=223.019731,
atomic_density=0.00675,
surface_binding_energy=0.78,
displacement_energy=52.0
),
Element(
symbol='Ra',
name={
'en': 'radium',
'de': 'Radium'
},
atomic_nr=88,
period=7,
group=2,
atomic_mass=226.025402,
atomic_density=0.01465,
surface_binding_energy=1.65,
displacement_energy=25.0
),
Element(
symbol='Ac',
name={
'en': 'actinium',
'de': 'Actinium'
},
atomic_nr=89,
period=9,
group=3,
atomic_mass=227.027747,
atomic_density=0.02669,
surface_binding_energy=4.21,
displacement_energy=25.0
),
Element(
symbol='Th',
name={
'en': 'thorium',
'de': 'Thorium'
},
atomic_nr=90,
period=9,
group=4,
atomic_mass=232.03805,
atomic_density=0.03042,
surface_binding_energy=6.2,
displacement_energy=35.0
),
Element(
symbol='Pa',
name={
'en': 'protactinium',
'de': 'Protactinium'
},
atomic_nr=91,
period=9,
group=5,
atomic_mass=231.035878,
atomic_density=0.04006,
surface_binding_energy=6.29,
displacement_energy=25.0
),
Element(
symbol='U',
name={
'en': 'u238',
'de': 'U238'
},
atomic_nr=92,
period=9,
group=6,
atomic_mass=238.0289,
atomic_density=0.04832,
surface_binding_energy=5.55,
displacement_energy=25.0
),
Element(
symbol='Np',
name={
'en': 'neptunium',
'de': 'Neptunium'
},
atomic_nr=93,
period=9,
group=7,
atomic_mass=237.048166,
atomic_density=0.05195,
surface_binding_energy=4.82,
displacement_energy=25.0
),
Element(
symbol='Pu',
name={
'en': 'plutonium',
'de': 'Plutonium'
},
atomic_nr=94,
period=9,
group=8,
atomic_mass=244.064197,
atomic_density=0.04895,
surface_binding_energy=3.65,
displacement_energy=25.0
),
Element(
symbol='Am',
name={
'en': 'americum',
'de': 'Americum'
},
atomic_nr=95,
period=9,
group=9,
atomic_mass=243.061372,
atomic_density=0.03387,
surface_binding_energy=2.94,
displacement_energy=25.0
),
Element(
symbol='Cm',
name={
'en': 'curium',
'de': 'Curium'
},
atomic_nr=96,
period=9,
group=10,
atomic_mass=247.0703,
atomic_density=0.03293,
surface_binding_energy=3.96,
displacement_energy=25.0
),
Element(
symbol='Bk',
name={
'en': 'berkelium',
'de': 'Berkelium'
},
atomic_nr=97,
period=9,
group=11,
atomic_mass=247.0703,
atomic_density=0.03605,
surface_binding_energy=3.02,
displacement_energy=25.0
),
Element(
symbol='Cf',
name={
'en': 'californium',
'de': 'Californium'
},
atomic_nr=98,
period=9,
group=12,
atomic_mass=251.079579,
atomic_density=0.0,
surface_binding_energy=1.81,
displacement_energy=25.0
),
Element(
symbol='Es',
name={
'en': 'einsteinium',
'de': 'Einsteinium'
},
atomic_nr=99,
period=9,
group=13,
atomic_mass=252.082944,
atomic_density=0.0,
surface_binding_energy=1.55,
displacement_energy=25.0
),
Element(
symbol='Fm',
name={
'en': 'fermium',
'de': 'Fermium'
},
atomic_nr=100,
period=9,
group=14,
atomic_mass=257.075099,
atomic_density=0.0,
surface_binding_energy=1.46,
displacement_energy=25.0
),
Element(
symbol='Md',
name={
'en': 'mendelevium',
'de': 'Mendelevium'
},
atomic_nr=101,
period=9,
group=15,
atomic_mass=258.098427,
atomic_density=0.0,
surface_binding_energy=1.2,
displacement_energy=25.0
),
Element(
symbol='No',
name={
'en': 'nobelium',
'de': 'Nobelium'
},
atomic_nr=102,
period=9,
group=16,
atomic_mass=259.100931,
atomic_density=0.0,
surface_binding_energy=1.12,
displacement_energy=25.0
),
Element(
symbol='Lr',
name={
'en': 'lawrencium',
'de': 'Lawrencium'
},
atomic_nr=103,
period=7,
group=3,
atomic_mass=262.11,
atomic_density=0.0,
surface_binding_energy=3.19,
displacement_energy=25.0
)
]
def __init__(self):
super().__init__(self.elementList)
class HlGeneralPlot(QHBoxLayout):
"""
QHBoxLayout for general plot settings
:param version: version of simulation
"""
settingsChanged = pyqtSignal(dict)
def __init__(self, version: str):
super().__init__()
self.version = version
def emit(self, value_dict: dict = None):
"""
Emits settingsChanged pyqtSignal
:param value_dict: dictionary to emit
"""
if value_dict is None:
return
self.settingsChanged.emit(value_dict)
def receive(self, value_dict: dict):
"""
Receives pyqtSignal -> dict
:param value_dict: dictionary to be received
"""
pass
class SimulationsInput:
"""
Class for simulation specific parameters
"""
# parameters should be different for each simulation class
# name of the simulation
Name = 'SIMULATION NAME MISSING'
# supported versions
Versions = []
# detailed description of the simulation
Description = 'SIMULATION DESCRIPTION MISSING'
# logo for simulation if exists
Logo = ''
# information for about section
About = 'No information available'
# save folder name of simulation
SaveFolder = 'default'
# input file name
InputFilename = 'input'
# layer file name
LayerFilename = 'layer'
# example for additional settings
ExampleAdditionalSetting = ''
# list of skipped files for preview
SkipList = [InputFilename, LayerFilename]
# dictionary of tooltips for output files
OutputTooltips = {'output': 'Output of simulation'}
# possible input parameters
InputParameters = {}
# list of possible compounds
CompoundList = []
# group elements in beam and target
GroupElements = False
# Reference to classes
HlBeamSettings = HlGeneralBeamSettings
HlTargetSettings = HlGeneralTargetSettings
VlSimulationSettings = VlGeneralSimulationSettings
CompRowBeamSettings = CompRowBeamSettings
CompRowTargetSettings = CompRowTargetSettings
# Maximum number of components
MaxComponents = 10
def __init__(self):
self.element_data = GeneralElementData()
self.element_data_default = True
@staticmethod
def getVersionName(folder: str, binary: str) -> Union[str, bool]:
"""
Returns version of simulation depending on selected folder and binary
:param folder: main folder of simulation
:param binary: binary of simulation
:return: string of version number or False if no version can be determined
"""
return False
@staticmethod
def getDoc(folder: str, binary: str, version: str) -> Union[str, bool]:
"""
Returns path to documentation pdf-file
:param folder: main folder of simulation
:param binary: binary of simulation
:param version: version of simulation
:return: path to documentation pdf-file or
True if no documentation can be found but there is a possibility to download
False if no documentation can be found
"""
return False
@staticmethod
def downloadDoc(parent, folder: str, binary: str, version: str):
"""
Downloads documentation pdf-file and saves it in the default directory
:param parent: parent widget
:param folder: main folder of simulation
:param binary: binary of simulation
:param version: version of simulation
"""
pass
@staticmethod
def update(folder: str, binary: str, version: str):
"""
Updates on startup
:param folder: folder of simulation
:param binary: binary path of simulation
:param version: version of simulation
"""
pass
def loadDefaultElements(self, version: str):
"""
Loads default elements in ElementData
:param version: version of simulation
"""
self.element_data = GeneralElementData()
self.element_data_default = True
@staticmethod
def updateElements(folder: str, version: str) -> bool:
"""
Updates list of <Element> for this simulation
:param folder: main folder of simulation
:param version: version of simulation
"""
return False
def nameInputFile(self, arguments: SimulationArguments, version: str) -> str:
"""
Returns file-name of input file
:param arguments: <SimulationArguments> container
:param version: version of simulation
:return: file-name of input file
"""
return self.InputFilename
@staticmethod
def makeInputFile(arguments: SimulationArguments, folder: str, version: str) -> str:
"""
Returns input file as string
:param arguments: <SimulationArguments> container
:param folder: folder of simulation
:param version: version of simulation
:return: input file for simulation as string
"""
return ''
def nameLayerFile(self, arguments: SimulationArguments, version: str) -> str:
"""
Returns file-name of layer file
:param arguments: <SimulationArguments> container
:param version: version of simulation
:return: file-name of layer file
"""
return self.LayerFilename
@staticmethod
def makeLayerFile(arguments: SimulationArguments, folder: str, version: str) -> str:
"""
Returns layer input file as string
:param arguments: <SimulationArguments> container
:param folder: folder of simulation
:param version: version of simulation
:return: layer input file for simulation as string
"""
return ''
@staticmethod
def loadFiles(folder: str, version: str) -> Union[Tuple[SimulationArguments, list], str, bool]:
"""
Returns Tuple of <SimulationArguments> container if it can load input files from folder and list of errors while loading
Returns string of error if input file can not be opened
Returns False if not implemented
:param folder: folder of input files
:param version: version of simulation
:return: Tuple(<SimulationArguments>, list), str or False
"""
return False
@staticmethod
def checkAdditional(settings: str, version: str) -> List[str]:
"""
Checks the user defined additional settings and returns list of errors
:param settings: provided additional settings
:param version: version of simulation
:return: list of errors
"""
return []
@staticmethod
def cmd(binary: str, save_folder: str, input_file: str, version: str) -> (str, bool, str):
"""
Returns command to be executed
:param binary: path to binary executable
:param save_folder: path to save folder
:param input_file: file name of input file
:param version: version of simulation
:return: Tuple of
str: command to be executed in QProcess
bool: True if input file should be piped
str: command to be executed in cmd/shell
"""
return (
f'"{binary}"',
False,
f'"{binary}"'
)
@staticmethod
def getProgress(save_folder: str, process_log: str, version: str) -> int:
"""
Returns progress in % of running simulation.
Negative return value indicates some error.
:param save_folder: folder for output files
:param process_log: most recent output of process
:param version: version of simulation
"""
return -1
class SimulationsOutput(QObject):
"""
Class for displaying simulation specific parameters and plots
:param plot: MplCanvas class that is used for plotting
:param element_data: <Elements> container
"""
# change to True if calculations should be performed in separate thread
# caution: this will cause flickering of the plots when updating while simulation is running, needs further work
Threading = False
# signals
hlChange = pyqtSignal(dict)
# References to classes
HlPlot = HlGeneralPlot
def __init__(self, plot: MplCanvas, element_data: Elements):
super().__init__()
self.plot = plot
self.element_data = element_data
self.data = None
self.save_folder = ''
self.elements = ElementList()
self.masses = np.array([])
self.first_color = 0, 0, 0 # (r, g, b)
self.line_width = 2
self.thread_lock = Lock()
self.thread_last_update = 0
# colors for plots
self.colors = RepeatingList()
order = [4, 0, 6, 8, 14, 12, 16, 18, 10, 3, 5, 1, 7, 9, 15, 13, 17, 11, 19]
colors = plt.get_cmap('tab20').colors
for i in order:
self.colors.append(colors[i])
self.first_color = colors[2]
def emit(self, value_dict: dict = None):
"""
Emits settingsChanged pyqtSignal
:param value_dict: dictionary to emit
"""
if value_dict is None:
return
self.hlChange.emit(value_dict)
def receive(self, value_dict: dict):
"""
Receives pyqtSignal -> dict
:param value_dict: dictionary to be received
"""
pass
def reset(self):
"""Reset class"""
self.data = None
self.save_folder = ''
self.elements = ElementList()
self.masses = np.array([])
def clearPlotWindow(self):
"""Clears the plot window"""
self.plot.fig.clf()
self.plot.axes = self.plot.fig.add_subplot(projection='rectilinear')
self.plot.fig.canvas.draw_idle()
def plotFct(self, plot: Callable = None, plot_args: dict = None):
"""
Call the plot function in a new thread with function parameters
:param plot: plot function
:param plot_args: plot function parameters as dictionary
"""
if plot is None:
return
if plot_args is None:
plot_args = {}
if self.Threading:
thread = Thread(
target=self.plotFctThread,
kwargs={
'plot': plot,
'plot_args': plot_args
})
thread.start()
else:
# wait cursor
QApplication.setOverrideCursor(Qt.WaitCursor)
result = plot(**plot_args)
if result is None:
self.data = None
QApplication.restoreOverrideCursor()
return
self.data, plot_settings = result
plot_settings.apply(self.plot)
QApplication.restoreOverrideCursor()
def plotFctThread(self, plot: Callable, plot_args: dict):
"""
Call the plot function with function parameters
:param plot: plot function
:param plot_args: plot function parameters as dictionary
:return:
"""
our_time = time()
# do calculations
result = plot(**plot_args)
if not isinstance(result, tuple) or len(result) != 2:
self.data = None
return
self.data, plot_settings = result
# obtain the lock to update the plot
with self.thread_lock:
# check if some other thread has done more recent calculation of same routine
if our_time <= self.thread_last_update:
return
# update plot
plot_settings.apply(self.plot)
self.thread_last_update = our_time
@staticmethod
def listParameters(save_folder: str, list_widget: ListWidget):
"""
Builds ListWidget from files in save folder
:param save_folder: folder for output files
:param list_widget: empty ListWidget (extension of QListWidget) that should be written to
"""
raise NotImplementedError
def getReturnData(self, precision: int = 7) -> Optional[str]:
"""Returns data from plot as string ready to be written in file or None if no data is available"""
if self.data is None or len(self.data) != 2:
return
data, labels = self.data
output = '\t'.join(labels) + '\n'
data_len = len(data)
for i in range(len(data[0])):
data_i = []
for j in range(data_len):
data_i.append(f'{data[j][i]:.{precision}E}')
output += '\t'.join(data_i) + '\n'
return output
| atomicplasmaphysics/BCA-GUIDE | Simulations/Simulations.py | Simulations.py | py | 57,933 | python | en | code | 4 | github-code | 13 |
1461993515 | __docformat__ = "restructuredtext en"
import roslib
roslib.load_manifest('pr2_plan_utils')
import rospy
import actionlib
import pr2_plan_utils.exceptions as ex
import pr2_controllers_msgs.msg as pr2c
import actionlib_msgs.msg as am
import actionlib as al
class Torso(object):
def __init__(self):
"""
Represents the torso of the pr2.
"""
name = 'torso_controller/position_joint_action'
self._ac = al.SimpleActionClient(name,pr2c.SingleJointPositionAction)
rospy.loginfo("Waiting for torso controller action server")
self._ac.wait_for_server()
rospy.loginfo("Torso control action client ready")
def move(self, pos):
"""
Move torso to a given height
:param pos: Desired height (m)
"""
goal = pr2c.SingleJointPositionGoal(position=pos,
min_duration=rospy.Duration(2),
max_velocity=1)
rospy.loginfo("Sending torso goal and waiting for result")
self._ac.send_goal_and_wait(goal)
res = self._ac.get_result()
if self._ac.get_state() != am.GoalStatus.SUCCEEDED:
raise ex.ActionFailedError()
def up(self):
"""
Move torso to max height (0.3)
"""
self.move(0.3)
def down(self):
"""
Move torso to min height (0)
"""
self.move(0.01)
| natanaso/active_object_detection | pr2_planning_module/pr2_plan_utils/src/pr2_plan_utils/torso.py | torso.py | py | 1,452 | python | en | code | 3 | github-code | 13 |
12139805943 | # -*- coding: utf-8 -*-
# 通过tf-idf提取高频词汇
import glob
import random
import jieba
# 读取文件内容
def get_content(path):
with open(path, "r", encoding="gbk", errors="ignore") as f:
content = ""
for line in f.readlines():
line = line.strip()
content += line
return content
# 统计词频,返回最高前10位词频列表
def get_tf(words, topk=10):
tf_dict = {}
for w in words:
if w not in tf_dict.items():
tf_dict[w] = tf_dict.get(w, 0) + 1 # 获取词频(默认为0)并加1
# 倒序排列
new_list = sorted(tf_dict.items(), key=lambda x: x[1], reverse=True)
return new_list[:topk]
# 去除停用词
def get_stop_words(path):
with open(path, encoding="utf8") as f:
return [line.strip() for line in f.readlines()]
if __name__ == "__main__":
# 样本文件
fname = "d:\\NLP_DATA\\chap_3\\news\\C000008\\11.txt"
# 读取文件内容
corpus = get_content(fname)
# 分词
tmp_list = list(jieba.cut(corpus))
# 去除停用词
stop_words = get_stop_words("d:\\NLP_DATA\\chap_3\\stop_words.utf8")
split_words = []
for tmp in tmp_list:
if tmp not in stop_words:
split_words.append(tmp)
# print("样本:\n", corpus)
print("\n分词结果:\n" + "/".join(split_words))
# 统计高频词
tf_list = get_tf(split_words)
print("\ntop10词:\n", str(tf_list))
| 15149295552/Code | Month09/NLP_DATA/NLP_study/NLP_study/ML_NLP/04_get_tf_demo.py | 04_get_tf_demo.py | py | 1,461 | python | en | code | 1 | github-code | 13 |
15540950482 | import numpy as np
import math
# sigmoid
from scipy.special import expit
# returns word index
def getWordIdx(word):
idx = np.where(ptb_wtoi == word)[0]
if len(idx) == 0:
idx = np.where(ptb_wtoi == "<unk>")[0]
return idx[0]
# returns embedding given word index
def getEmbedding(idx):
return embedding_mat[idx]
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum(axis=0)
# given input, hidden layer value of t-1, and cell value of t-1
# calculate new states of the cell
def applyLSTM(x, h, c, bias, weight):
xh = np.concatenate((x, h), axis=0)
out = np.add(np.matmul(xh, weight), bias)
i_c_f_o = np.split(out, 4)
i = expit(i_c_f_o[0])
c_cand = np.tanh(i_c_f_o[1])
f = expit(i_c_f_o[2])
o = expit(i_c_f_o[3])
nc = c*f + c_cand*i
nh = np.tanh(nc)*o
return nh, nc
def applySoftmax(x, bias, weight):
return softmax(np.matmul(x, weight)+bias)
# initialize the LSTM
def initLSTM():
return np.zeros(650), np.zeros(650), np.zeros(650), np.zeros(650)
# one step of LSTM giving word, and the states of the LSTM
# return output layer and new values for the states
def applyLM(widx, h_0, c_0, h_1, c_1):
emb = getEmbedding(widx)
# first layer
new_h_0, new_c_0 = applyLSTM(emb, h_0, c_0, lstm_0_b, lstm_0_w)
# second layer
new_h_1, new_c_1 = applyLSTM(new_h_0, h_1, c_1, lstm_1_b, lstm_1_w)
# softmax
o = applySoftmax(new_h_1, softmax_b, softmax_w)
h_0 = new_h_0
h_1 = new_h_1
c_0 = new_c_0
c_1 = new_c_1
return o, h_0, c_0, h_1, c_1
def getPerplexity(sentence):
# initialize hidden and cells
h_0, c_0, h_1, c_1 = initLSTM()
loss = 0
for i in range(len(sentence)-1):
widx = sentence[i]
o, h_0, c_0, h_1, c_1 = applyLM(widx, h_0, c_0, h_1, c_1)
# calculate the loss for each word
loss = loss + -math.log(o[sentence[i+1]])
if (i+1)%1000 == 0:
print('word ',i+1,' - current ppl:',loss/(i+1))
print('perplexity: ',loss/len(sentence))
print('- loading embedding...')
embedding_mat = np.load('parameters/embedding.npy')
print('- loading word to index')
ptb_wtoi = np.loadtxt('ptb_word_to_id.txt',delimiter='\t',comments = '###',usecols = 0, dtype=bytes).astype(str)
print('- loading LSTM parameters...')
lstm_0_b = np.load('parameters/lstm_0_b.npy')
lstm_0_w = np.load('parameters/lstm_0_w.npy')
lstm_1_b = np.load('parameters/lstm_1_b.npy')
lstm_1_w = np.load('parameters/lstm_1_w.npy')
print('- loading softmax parameters...')
softmax_b = np.load('parameters/softmax_b.npy')
softmax_w = np.load('parameters/softmax_w.npy')
print('- read test file')
with open("ptb_test_index.txt") as f:
test = f.readlines()
test = [int(x.strip()) for x in test]
print('- calculate perplexity on test file')
getPerplexity(test)
| sy2358/Word-Embedding-and-LSTM-for-Language-Modelling | rnn.py | rnn.py | py | 2,798 | python | en | code | 0 | github-code | 13 |
13356092137 | # -*- coding: utf-8 -*-
from __future__ import division #1/2 = float, 1//2 = integer, python 3.0 behaviour in 2.6, to make future port to 3 easier.
from __future__ import print_function
from optparse import OptionParser
import os
import struct
import sys
import zlib
import time
debug = False
if not debug:
import serial
# The maximum size to transfer if we can determinate the size of the file (if input data comes from stdin).
MAX_SIZE = 2 ** 30
LINE_FEED = "\n"
# Wait for the prompt
def getprompt(ser, addr, verbose):
# Send a command who does not produce a result so when receiving the next line feed, only show the prompt will be returned
ser.write("mw {0:08x} 0".format(addr) + LINE_FEED)
# Flushing read buffer
while ser.read(256):
pass
if verbose:
print("Waiting for a prompt...")
while True:
# Write carriage return and wait for a response
ser.write(LINE_FEED)
# Read the response
buf = ser.read(256);
if (buf.endswith(b"> ") or buf.endswith(b"# ")):
print("Prompt is '" + buf[2:] + "'")
# The prompt returned starts with a line feed. This is the echo of the line feed we send to get the prompt.
# We keep this linefeed
return buf
else:
# Flush read buffer
while ser.read(256):
pass
# Wait for the prompt and return True if received or False otherwise
def writecommand(ser, command, prompt, verbose):
# Write the command and a line feed, so we must get back the command and the prompt
ser.write(command + LINE_FEED)
buf = ser.read(len(command))
if (buf != command):
if verbose:
print("Echo command not received. Instead received '" + buf + "'")
return False
if verbose:
print("Waiting for prompt...")
buf = ser.read(len(prompt))
if (buf == prompt):
if verbose:
print("Ok, prompt received")
return True
else:
if verbose:
print("Prompt not received. Instead received '" + buf + "'")
return False
def memwrite(ser, path, size, start_addr, verbose, debug):
if not debug:
prompt = getprompt(ser, start_addr, verbose)
if (path == "-"):
fd = sys.stdin
if (size <= 0):
size = MAX_SIZE
else:
fd = open(path,"rb")
if (size <= 0):
# Get the size of the file
fd.seek(0, os.SEEK_END);
size = fd.tell();
fd.seek(0, os.SEEK_SET);
addr = start_addr
bytes_read = 0
crc32_checksum = 0
startTime = time.time();
bytesLastSecond = 0
while (bytes_read < size):
if ((size - bytes_read) > 4):
read_bytes = fd.read(4);
else:
read_bytes = fd.read(size - bytes_read);
if (len(read_bytes) == 0):
if (path == "-"):
size = bytes_read
break
bytesLastSecond += len(read_bytes)
bytes_read += len(read_bytes)
crc32_checksum = zlib.crc32(read_bytes, crc32_checksum) & 0xFFFFFFFF
while (len(read_bytes) < 4):
read_bytes += b'\x00'
(val, ) = struct.unpack(">L", read_bytes)
read_bytes = "".format(val)
str_to_write = "mw {0:08x} {1:08x}".format(addr, val)
if verbose:
print("Writing:" + str_to_write + "at:", "0x{0:08x}".format(addr))
if debug:
str_to_write = struct.pack(">L", int("{0:08x}".format(val), 16))
else:
if not writecommand(ser, str_to_write, prompt, verbose):
print("Found an error, so aborting")
fd.close()
return
# Print progress
currentTime = time.time();
if ((currentTime - startTime) > 1):
print("\rProgress {:2.1f}%".format((bytes_read * 100) / size))
print(", {:3.1f}kb/s".format(bytesLastSecond / (currentTime - startTime) / 1024))
print(", ETA {0}s ".format(round((size - bytes_read) / bytesLastSecond / (currentTime - startTime))))
bytesLastSecond = 0
startTime = time.time();
# Increment address
addr += 4
if (bytes_read != size):
print("Error while reading file '", fd.name, "' at offset " + bytes_read)
else:
print("\rProgress 100% ")
print("File successfully written. You should run 'crc32" + " {0:08x}".format(start_addr) + " {0:08x}".format(bytes_read) + "' on the modem and the result must be" + " {0:08x}".format(crc32_checksum) + ".")
print("To copy from RAM to flash, unprotect flash: 'protect off all'...")
print("Then erase flash: 'erase" + " {0:08x}".format((start_addr - 0x80000000) + 0xb0000000) + " +{0:08x}".format(bytes_read) + "'.")
print("Then copy from RAM to flash: 'cp.b", " {0:08x}".format(start_addr), " {0:08x}".format((start_addr - 0x80000000) + 0xb0000000) + " {0:08x}".format(bytes_read), "'.")
fd.close()
return
def main():
optparser = OptionParser("usage: %prog [options]", version = "%prog 0.2")
optparser.add_option("--verbose", action = "store_true", dest = "verbose", help = "be verbose", default = False)
optparser.add_option("--serial", dest = "serial", help = "specify serial port", default = "/dev/ttyUSB0", metavar = "dev")
optparser.add_option("--write", dest = "write", help = "write mem from file", metavar = "path")
optparser.add_option("--addr", dest = "addr", help = "mem address", default = "0x80500000", metavar = "addr")
optparser.add_option("--size", dest = "size", help = "# bytes to write", default = "0", metavar = "size")
(options, args) = optparser.parse_args()
if (len(args) != 0):
optparser.error("incorrect number of arguments")
if not debug:
ser = serial.Serial(options.serial, 115200, timeout=0.1)
else:
ser = open(options.write + ".out", "wb")
if debug:
prompt = getprompt(ser, options.verbose)
writecommand(ser, "mw 80500000 01234567", prompt, options.verbose)
buf = ser.read(256)
print("buf = '" + buf + "'")
return
if options.write:
memwrite(ser, options.write, int(options.size, 0), int(options.addr, 0), options.verbose, debug)
return
if __name__ == '__main__':
main()
| HorstBaerbel/ubootwrite | ubootwrite.py | ubootwrite.py | py | 7,502 | python | en | code | 21 | github-code | 13 |
2048451364 | import os.path
import time
from iterfzf import iterfzf
def iter_pokemon(sleep=0.01):
filename = os.path.join(os.path.dirname(__file__), 'pokemon.txt')
with open(filename) as f:
for l in f:
yield l.strip()
time.sleep(sleep)
def main():
result = iterfzf(iter_pokemon(), multi=True)
for item in result:
print(repr(item))
if __name__ == '__main__':
main()
| dahlia/iterfzf | examples/pokemon.py | pokemon.py | py | 419 | python | en | code | 147 | github-code | 13 |
15049000700 | from django.urls import path
from . import views
from . import url_handlers
urlpatterns = [
#path("", views.index, name="index"),
path("klient_index/", views.FilmIndex.as_view(), name="klient_index"),
path("<int:pk>/klient_detail/", views.CurrentFilmView.as_view(), name="klient_detail"),
path("create_klient/", views.CreateFilm.as_view(), name="novy_klient"),
path("", url_handlers.index_handler),
path("register/", views.UzivatelViewRegister.as_view(), name = "registrace"),
path("login/", views.UzivatelViewLogin.as_view(), name = "login"),
path("logout/", views.logout_user, name = "logout"),
path("<int:pk>/edit/", views.EditFilm.as_view(), name="edit_klient"),
] | ladislav-moravec/mysite2 | clientapp/urls.py | urls.py | py | 705 | python | en | code | 1 | github-code | 13 |
2868328198 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow_probability import distributions as tfd
from dreamer.tools import nested
class MPCAgent(object):
def __init__(self, batch_env, step, is_training, should_log, config):
self._step = step # Trainer step, not environment step.
self._is_training = is_training
self._should_log = should_log
self._config = config
self._cell = config.cell
self._num_envs = len(batch_env)
state = self._cell.zero_state(self._num_envs, tf.float32)
var_like = lambda x: tf.get_local_variable(
x.name.split(':')[0].replace('/', '_') + '_var',
shape=x.shape,
initializer=lambda *_, **__: tf.zeros_like(x), use_resource=True)
self._state = nested.map(var_like, state)
batch_action_shape = (self._num_envs,) + batch_env.action_space.shape
self._prev_action = tf.get_local_variable(
'prev_action_var', shape=batch_action_shape,
initializer=lambda *_, **__: tf.zeros(batch_action_shape),
use_resource=True)
def reset(self, agent_indices):
state = nested.map(
lambda tensor: tf.gather(tensor, agent_indices),
self._state)
reset_state = nested.map(
lambda var, val: tf.scatter_update(var, agent_indices, 0 * val),
self._state, state, flatten=True)
reset_prev_action = self._prev_action.assign(
tf.zeros_like(self._prev_action))
return tf.group(reset_prev_action, *reset_state)
def step(self, agent_indices, observ):
observ = self._config.preprocess_fn(observ)
# Converts observ to sequence.
observ = nested.map(lambda x: x[:, None], observ)
embedded = self._config.encoder(observ)[:, 0]
state = nested.map(
lambda tensor: tf.gather(tensor, agent_indices),
self._state)
prev_action = self._prev_action + 0
with tf.control_dependencies([prev_action]):
use_obs = tf.ones(tf.shape(agent_indices), tf.bool)[:, None]
_, state = self._cell((embedded, prev_action, use_obs), state)
action = self._config.planner(
self._cell, self._config.objective, state,
embedded.shape[1:].as_list(),
prev_action.shape[1:].as_list())
action = action[:, 0]
if self._config.exploration:
expl = self._config.exploration
scale = tf.cast(expl.scale, tf.float32)[None] # Batch dimension.
if expl.schedule:
scale *= expl.schedule(self._step)
if expl.factors:
scale *= np.array(expl.factors)
if expl.type == 'additive_normal':
action = tfd.Normal(action, scale[:, None]).sample()
elif expl.type == 'epsilon_greedy':
random_action = tf.one_hot(
tfd.Categorical(0 * action).sample(), action.shape[-1])
switch = tf.cast(tf.less(
tf.random.uniform((self._num_envs,)),
scale), tf.float32)[:, None]
action = switch * random_action + (1 - switch) * action
else:
raise NotImplementedError(expl.type)
action = tf.clip_by_value(action, -1, 1)
remember_action = self._prev_action.assign(action)
remember_state = nested.map(
lambda var, val: tf.scatter_update(var, agent_indices, val),
self._state, state, flatten=True)
with tf.control_dependencies(remember_state + (remember_action,)):
return tf.identity(action)
| google-research/dreamer | dreamer/control/mpc_agent.py | mpc_agent.py | py | 3,431 | python | en | code | 575 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.