seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
23965078219 | from tkinter.messagebox import YES
print("Welcome to my computer quiz challenge ")
playing = input("Do you want to play? ")
score = 0
if playing.lower() == "yes":
print("Okay let's play ")
else:
quit()
answer = input("When was the first computer invented? ")
if answer.lower() == "1943":
print("Correct!")
score += 1
else:
print("Incorrect")
answer = input("What does CPU stand for? ")
if answer.lower() == "central processing unit":
print("Correct!")
score += 1
else:
print("Incorrect")
answer = input("What does GPU stand for? ")
if answer.lower() == "graphics processing unit":
print('Correct!')
score += 1
else:
print("Incorrect!")
answer = input("What does RAM stand for? ")
if answer.lower() == "Random Access Memory":
print("Correct!")
score += 1
else:
print("Incorrect")
answer = input("What does PSU stand for? ")
if answer.lower() == "Power supply":
print("Correct!")
score += 1
else:
print("Incorrect")
print("")
print("Your score is "+ str(score)+ " out of 5 questions")
print("You have scored "+ str((score/5)*100)+"%") | PriyenJoshi/Python-Practice-Programs | Python Programs/quiz_game.py | quiz_game.py | py | 1,162 | python | en | code | 0 | github-code | 13 |
74559992656 | from discord.ext import commands
from discord_components import Button, ButtonStyle
from database.Players import players_info
class Location(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(name='home')
async def home_location(self, ctx):
player = players_info(ctx.author.id)
await ctx.send(
'.set #Teleport 240610.469 82449.469 26973.654 {}'.format(player[3])
)
def setup(bot):
bot.add_cog(Location(bot)) | GreenDevth/Discord_server_store | cogs/Location.py | Location.py | py | 491 | python | en | code | 0 | github-code | 13 |
15478580145 | #Data Visualization
import pandas as pd
import matplotlib.pyplot as plt
data=pd.read_csv('C:/Users/SPTINT-09/Desktop/tips (1).csv')
plt.scatter(data['day'],data['tip'])
plt.show()
plt.plot(data['tip'])
plt.plot(data['size'])
plt.show()
plt.bar(data['day'],data['tip'])
plt.show()
plt.hist(data['total_bill'])
plt.show()
| dhanushree1702/AIML | prg13.py | prg13.py | py | 320 | python | en | code | 0 | github-code | 13 |
37646442621 |
from Node import Node
class Queue():
def __init__(self) -> None:
self.first = None
self.last = None
self.length = 0
def peek(self):
return self.first
def enqueue(self, value):
new_node = Node(value)
if self.length == 0:
self.first = new_node
self.last = new_node
else:
new_node.next = self.first
self.first = new_node
self.length += 1
return self
def dequeue(self):
if self.length == 1:
self.first = None
self.last = None
else:
old_now = self.first
self.first = old_now.next
self.length -= 1
return self
def main():
pass
if __name__ == '__main__':
main() | aaxlss/python-some-datastructures-algoritms | Queue.py | Queue.py | py | 818 | python | en | code | 0 | github-code | 13 |
17493287925 | from opentelemetry import trace
import pyarrow.flight as flight
from opentelemetry.propagate import inject
from opentelemetry.trace import set_span_in_context
class ClientTracingMiddlewareFactory(flight.ClientMiddlewareFactory):
def __init__(self):
super().__init__()
self._tracer = trace.get_tracer(
__name__
)
def start_call(self, info):
span = self._tracer.start_span(str(info.method))
return ClientTracingMiddleware(span)
class ClientTracingMiddleware(flight.ClientMiddleware):
def __init__(self, span):
self._span = span
def sending_headers(self):
ctx = set_span_in_context(self._span)
carrier = {}
inject(carrier=carrier, context=ctx)
return carrier
def call_completed(self, exception):
if exception:
self._span.record_exception(exception)
self._span.end()
| amoeba/arrow-flight-playground | test_flight_stress/client_python/tracing_middleware.py | tracing_middleware.py | py | 910 | python | en | code | 1 | github-code | 13 |
41949654368 | import random
import string
import requests
from bs4 import BeautifulSoup
import colorama
from os import getcwd
def randstr():
letters = string.ascii_lowercase+string.ascii_uppercase+string.digits
result_str = ''.join(random.choice(letters) for i in range(3))
return result_str
def main(tlets,savefile):
while True:
linkz = f'https://dosyaupload.com/{tlets}{randstr()}'
resp = requests.get(linkz)
if resp.status_code == 200:
sup = BeautifulSoup(resp.content, "html.parser")
file_name = sup.find('div',{'class':'heading-1'}).get_text(strip=True).rsplit('indir')[0]
if not 'Hata' in file_name:
print(colorama.Fore.GREEN,f'File name : {file_name} - Download link : {linkz} - Scraped by blueshillz')
if savefile == 'y':
with open('hits.txt','a',encoding='UTF-8') as sfile:
sfile.write(f'File name : {file_name} - Download link : {linkz} - Scraped by blueshillz\n')
if __name__=='__main__':
print(colorama.Fore.RED,'Blueshillz dosyaupload scraper\nGithub : https://github.com/blueshillz/dosyaupload-scraper')
tlets = input(f'{colorama.Fore.CYAN}Enter 2 letters of an exist link\n')
savefile = input(f'{colorama.Fore.CYAN}Do you want to save hits to a file? (y/n)\n')
if savefile == 'y':
print(colorama.Fore.GREEN,f'Hits will be saved to {getcwd()}\\hits.txt')
main(tlets,savefile)
| blueshillz/dosyaupload-scraper | main.py | main.py | py | 1,489 | python | en | code | 0 | github-code | 13 |
8076372640 | from PyQt5 import QtWidgets , uic , QtGui
def somme():
a=u.ImpX.text()
b=u.ImpY.text()
if a.isnumeric() and (len(a)>0) and b.isnumeric() and (len(b)>0):
u.res.setText(str(int(a)+int(b)))
else :
u.res.setText("ERROR")
def fois():
a=u.ImpX.text()
b=u.ImpY.text()
if a.isnumeric() and (len(a)>0) and b.isnumeric() and (len(b)>0):
u.res.setText(str(int(a)*int(b)))
else :
u.res.setText("ERROR")
def moins():
a=u.ImpX.text()
b=u.ImpY.text()
if a.isnumeric() and (len(a)>0) and b.isnumeric() and (len(b)>0):
u.res.setText(str(int(a)-int(b)))
else :
u.res.setText("ERROR")
def div():
a=u.ImpX.text()
b=u.ImpY.text()
if a.isnumeric() and (len(a)>0) and b.isnumeric() and (len(b)>0):
if( int(b) == 0):
u.res.setText("division par 0!")
QtWidgets.QMessageBox.critical(u,"ERROR","DIVISION PAR 0 IMPOSSIBLE ",QtWidgets.QMessageBox.Ok)
else:
u.res.setText("{:5.2f}".format(int(a)/int(b)))
else :
u.res.setText("ERROR")
def out():
u.hide()
app.quit()
def clear():
a=u.ImpX.clear()
b=u.ImpY.clear()
c =u.res.clear()
app = QtWidgets.QApplication([])
app.beep()
u = uic.loadUi('calc.ui')
u.show()
u.ImpX.setFocus()
u.add.clicked.connect(somme)
u.fois.clicked.connect(fois)
u.delt.clicked.connect(moins)
u.div.clicked.connect(div)
u.close.clicked.connect(out)
u.reset.clicked.connect(clear)
app.exec_()
| HamoudaBenAbdennebi/calc | calc.py | calc.py | py | 1,594 | python | en | code | 0 | github-code | 13 |
27165930935 | '''
Platform specific file for Raspberry Pi devices. This system does NOT use extlinux for boot so
dynamic overlays are not needed or supported. GPIO's are provided as a simple GPIO # (i.e. 14, 21).
This configuration should also apply to other Raspberry Pi family devices and may be updated in
the future to include other platforms.
'''
from ._base import SbcPlatform_Base
# select the gpio library for the platform
gpiod = None
rpi_gpio = None
try:
# try using The RPi.GPIO library
import sbc_gpio.gpio_libs.rpi_gpio as rpi_gpio #pylint: disable=W0611
except ImportError:
# fallback to gpiod
import sbc_gpio.gpio_libs.gpiod as gpiod #pylint: disable=W0611,C0411
GPIO_VALID_VALUES = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27]
MODEL_FILE = '/sys/firmware/devicetree/base/model'
SERIAL_FILE = '/sys/firmware/devicetree/base/serial-number'
# List of dict - platforms supported by this definition
SUPPORTED_PLATFORMS = [
{
'model': 'Pi4B',
'description': 'Raspberry Pi 4 Model B',
'gpio_valid_values': GPIO_VALID_VALUES,
'gpio_lib': rpi_gpio if rpi_gpio is not None else gpiod,
'identifiers': [{'type': 'file', 'file': MODEL_FILE, 'contents': '^Raspberry Pi 4 Model B'}],
'_serial_location': {'type': 'file', 'file': SERIAL_FILE, 'contents': '.*'}
},
{
'model': 'Pi4B',
'description': 'Raspberry Pi 3 Model B',
'gpio_valid_values': GPIO_VALID_VALUES,
'gpio_lib': rpi_gpio if rpi_gpio is not None else gpiod,
'identifiers': [{'type': 'file', 'file': MODEL_FILE, 'contents': '^Raspberry Pi 3 Model B'}],
'_serial_location': {'type': 'file', 'file': SERIAL_FILE, 'contents': '.*'}
},
{
'model': 'Pi4B',
'description': 'Raspberry Pi Zero W',
'gpio_valid_values': GPIO_VALID_VALUES,
'gpio_lib': rpi_gpio if rpi_gpio is not None else gpiod,
'identifiers': [{'type': 'file', 'file': MODEL_FILE, 'contents': '^Raspberry Pi Zero W$'}],
'_serial_location': {'type': 'file', 'file': SERIAL_FILE, 'contents': '.*'}
},
{
'model': 'Pi4B',
'description': 'Raspberry Pi Zero',
'gpio_valid_values': GPIO_VALID_VALUES,
'gpio_lib': rpi_gpio if rpi_gpio is not None else gpiod,
'identifiers': [{'type': 'file', 'file': MODEL_FILE, 'contents': '^Raspberry Pi Zero$'}],
'_serial_location': {'type': 'file', 'file': SERIAL_FILE, 'contents': '.*'}
}
]
class SbcPlatformClass(SbcPlatform_Base):
''' SBC Platform representing an Allwinner based SBC '''
_platforms = SUPPORTED_PLATFORMS
| LearningToPi/sbc_gpio | src/sbc_gpio/platforms/rpi.py | rpi.py | py | 2,647 | python | en | code | 0 | github-code | 13 |
38340454556 | from math import sqrt, pi, cos, sin
import draw
def canon(Xc, Yc, Ra, Rb, scene, pen, drawFlag=True):
"""
Отрисовка эллипса по
каноническому уравнению
"""
sqrA = Ra * Ra
sqrB = Rb * Rb
xRange = (int(round(sqrA / sqrt(sqrA + sqrB)))
if sqrA or sqrB else 0)
sqrtCoef = Rb / Ra if Ra else 0
x = 0
while x <= xRange:
y = int(round(sqrtCoef * sqrt(sqrA - x * x)))
if drawFlag:
draw.drawEllipsePoints(x, y, Xc, Yc, scene, pen)
x += 1
y = 0
yRange = (int(round(sqrB / sqrt(sqrA + sqrB)))
if sqrA or sqrB else 0)
sqrtCoef = Ra / Rb if Rb else 0
while y <= yRange:
x = int(round(sqrtCoef * sqrt(sqrB - y * y)))
if drawFlag:
draw.drawEllipsePoints(x, y, Xc, Yc, scene, pen)
y += 1
def parametric(Xc, Yc, Ra, Rb, scene, pen, drawFlag=True):
"""
Отрисовка эллипса по
параметрическому уравнению
"""
tRange = pi / 2
tStep = 1 / max(Ra, Rb) if Ra or Rb else 1
t = 0
while t < tRange + tStep:
x = int(round(Ra * cos(t)))
y = int(round(Rb * sin(t)))
if drawFlag:
draw.drawEllipsePoints(x, y, Xc, Yc, scene, pen)
t += tStep
def brezenham(Xc, Yc, Ra, Rb, scene, pen, drawFlag=True):
"""
Отрисовка эллипса
алгоритмом Брезенхема
"""
sqrA = Ra * Ra
sqrB = Rb * Rb
x = 0
y = Rb
capDelta = sqrB - sqrA * (2 * Rb - 1)
while y >= 0:
if drawFlag:
draw.drawEllipsePoints(x, y, Xc, Yc, scene, pen)
if capDelta < 0:
delta = 2 * capDelta + sqrA * (2 * y - 1)
if delta <= 0:
x += 1
capDelta = capDelta + sqrB * (2 * x + 1)
else:
x += 1
y -= 1
capDelta = capDelta + 2 * x * sqrB - 2 * y * sqrA + sqrA + sqrB
elif capDelta > 0:
delta = 2 * capDelta - sqrB * (2 * x + 1)
if delta <= 0:
x += 1
y -= 1
capDelta = capDelta + 2 * x * sqrB - 2 * y * sqrA + sqrA + sqrB
else:
y -= 1
capDelta = capDelta + sqrA * (- 2 * y + 1)
else:
x += 1
y -= 1
capDelta = capDelta + 2 * x * sqrB - 2 * y * sqrA + sqrA + sqrB
if not Rb and drawFlag:
for x in range(Xc - Ra, Xc + Ra + 1):
draw.drawPoint(x, Yc, scene, pen)
def midpoint(Xc, Yc, Ra, Rb, scene, pen, drawFlag=True):
"""
Отрисовка эллипса
алгоритмом средней точки
"""
sqrA = Ra * Ra
sqrB = Rb * Rb
x = 0
y = Rb
trialFunc = sqrB - sqrA * (Rb - 1 / 4) if Rb else 0
xRange = (int(round(sqrA / sqrt(sqrA + sqrB)))
if sqrA or sqrB else 0)
while x <= xRange:
if drawFlag:
draw.drawEllipsePoints(x, y, Xc, Yc, scene, pen)
x += 1
if trialFunc > 0:
y -= 1
trialFunc -= 2 * y * sqrA
trialFunc = trialFunc + sqrB * (2 * x + 1)
x = Ra
y = 0
trialFunc = sqrA - sqrB * (Ra - 1 / 4) if Ra else 0
yRange = (int(round(sqrB / sqrt(sqrA + sqrB)))
if sqrA or sqrB else 0)
while y <= yRange:
if drawFlag:
draw.drawEllipsePoints(x, y, Xc, Yc, scene, pen)
y += 1
if trialFunc > 0:
x -= 1
trialFunc -= 2 * x * sqrB
trialFunc = trialFunc + sqrA * (2 * y + 1)
def libfunc(Xc, Yc, Ra, Rb, scene, pen):
"""
Отрисовка окружности
библиотечной функции
"""
scene.addEllipse(Xc - Ra, Yc - Rb, 2 * Ra, 2 * Rb, pen) | MyMiDiII/bmstu-cg | lab_04/ellipse.py | ellipse.py | py | 3,918 | python | en | code | 0 | github-code | 13 |
25553832762 | import numpy as np
import random
import logging
import math
import gym
from gym import spaces
logger = logging.getLogger(__name__)
STATE_DESK = 0
STATE_BODY = 1
STATE_HEAD = 2
STATE_FOOD = 3
ACTIONS = [[-1, 0], [1, 0], [0, -1], [0, 1]] # 上/下/左/右
DIR_UP = 0
DIR_DOWN = 1
DIR_LEFT = 2
DIR_RIGHT = 3
class Snake(object):
def __init__(self, width, height):
self.width = width
self.height = height
self.state = np.zeros((height, width)).astype(int)
self.body = []
self.food = []
self.direction = None
def draw_body(self, tail=None):
for i, (y, x) in enumerate(self.body):
if i == len(self.body) - 1:
self.state[y, x] = STATE_HEAD
break
self.state[y, x] = STATE_BODY
if tail is not None:
self.state[tail[0], tail[1]] = STATE_DESK
self.state[self.food[0], self.food[1]] = STATE_FOOD
def go(self, action):
# 对向action不起作用
if action + self.direction != 1 and action + self.direction != 5:
self.direction = action
head = self.body[-1]
next_head = [head[0] + ACTIONS[self.direction][0], head[1] + ACTIONS[self.direction][1]]
# hit wall
if next_head[0] < 0 or next_head[0] >= self.height or next_head[1] < 0 or next_head[1] >= self.width:
return self.state, -1, True, 'Failed. Hit wall'
# eat itself
if next_head in self.body[1:]:
return self.state, -1, True, 'Failed. Eat itself'
# eat food
self.body.append(next_head)
if next_head == self.food:
self.generate_food()
self.draw_body()
return self.state, 1, False, 'Succeed. Eat food'
# nothing happened
tail = self.body.pop(0)
self.draw_body(tail)
return self.state, self.get_reward(), False, None
def get_reward(self):
head = self.body[-1]
food = self.food
dis = math.sqrt(pow((food[0] - head[0]), 2) + pow((food[1] - head[1]), 2)) # >= 1
reward = (1 / dis) * 0.5 # <= 0.5
return reward
def generate_snake(self):
x = random.randint(1, self.width - 2)
y = random.randint(1, self.height - 2)
head = [y, x]
self.direction = random.randint(0, len(ACTIONS) - 1)
tail = [head[0] - ACTIONS[self.direction][0], head[1] - ACTIONS[self.direction][1]]
self.body.clear()
self.body.append(tail)
self.body.append(head)
self.state = np.zeros((self.height, self.width)).astype(int)
self.state[tail[0], tail[1]] = STATE_BODY
self.state[head[0], head[1]] = STATE_HEAD
def generate_food(self):
y, x = self.body[-1]
while [y, x] in self.body:
x = random.randint(0, self.width - 1)
y = random.randint(0, self.height - 1)
self.food = [y, x]
self.state[y, x] = STATE_FOOD
def reset(self):
self.generate_snake()
self.generate_food()
self.draw_body()
return self.state
class SnakeEnv(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': 1
}
def __init__(self):
self.width = 10
self.height = 10
self.viewer = None
self.snake = Snake(self.width, self.height)
# self.state = self.snake.state
self.state = self.render(mode='rgb_array')
self.action_space = spaces.Discrete(len(ACTIONS))
def _seed(self, seed=None):
self.np_random, seed = random.seeding.np_random(seed)
return [seed]
def step(self, action):
err_msg = "%r (%s) invalid" % (action, type(action))
assert self.action_space.contains(action), err_msg
self.state, reward, done, info = self.snake.go(action)
# return self.state, reward, done, info
return self.render(mode='rgb_array'), reward, done, info
def reset(self):
self.state = self.snake.reset()
# return self.state
return self.render(mode='rgb_array')
def render(self, mode='human'):
from gym.envs.classic_control import rendering
grid = 20
circle_r = 10
screen_width = grid * (self.width + 2)
screen_height = grid * (self.height + 2)
if self.viewer is None:
self.viewer = rendering.Viewer(screen_width, screen_height)
# create mesh world
lines = []
# for w in range(1, self.width + 2):
for w in range(1, self.width + 2, self.width):
line = rendering.Line((grid*w, grid), (grid*w, grid*(self.height + 1)))
line.set_color(0, 0, 0)
lines.append(line)
self.viewer.add_geom(line)
# for h in range(1, self.height + 2):
for h in range(1, self.height + 2, self.height):
line = rendering.Line((grid, grid*h), (grid*(self.width + 1), grid*h))
lines[-1].set_color(0, 0, 0)
lines.append(line)
self.viewer.add_geom(line)
# create snake body
for i, (y, x) in enumerate(self.snake.body):
body = rendering.make_circle(circle_r)
bodytrans = rendering.Transform(translation=(grid * (1.5 + x), grid * (1.5 + y)))
body.add_attr(bodytrans)
body.set_color(0.5, 0.5, 0.5)
if i == len(self.snake.body) - 1:
body.set_color(0, 0, 0)
self.viewer.add_onetime(body)
# create food
if self.snake.food:
food = rendering.make_circle(circle_r)
y, x = self.snake.food
foodtrans = rendering.Transform(translation=(grid * (1.5 + x), grid * (1.5 + y)))
food.add_attr(foodtrans)
food.set_color(0, 1, 0)
self.viewer.add_onetime(food)
return self.viewer.render(return_rgb_array=mode == 'rgb_array')
def close(self):
if self.viewer:
self.viewer.close()
| zjl-utopia/gym-custom | gym_custom/envs/snake.py | snake.py | py | 6,126 | python | en | code | 0 | github-code | 13 |
32839625135 | import tweetws
class UserProfile:
def __init__(self, username, tweets:list[tweetws.Tweetws], sntmntTweets, avglen, positivity, topics):
self.username = username
self.tweets = tweets
self.sntmntTweets = sntmntTweets
self.avglen = avglen
self.positivity = positivity
self.topics = topics
| MysticMatt/CSE-5914-Automated-Twitter-Matchmaker | userProfile.py | userProfile.py | py | 339 | python | en | code | 0 | github-code | 13 |
5906785614 | from unittest import TestCase
from salesdataanalyzer.helpers import Salesman
from salesdataanalyzer.parser import parse_salesman,\
WrongNumberOfFieldsError, InvalidCpfPatternError, InvalidSalaryPatternError
class ParseSalesmanTest(TestCase):
def test_parse_salesman(self):
salesman = parse_salesman('001ç12312312312çJohn H. Pattersonç'
'192000.00')
self.assertEqual(Salesman('12312312312',
'John H. Patterson',
192000.00),
salesman)
def test_raise_wrong_number_of_fields_error_for_missing_field(self):
self.assertRaises(WrongNumberOfFieldsError,
parse_salesman,
'001ç12312312312çJohn H. Patterson')
def test_raise_wrong_number_of_fields_error_for_extra_field(self):
self.assertRaises(WrongNumberOfFieldsError,
parse_salesman,
'001ç12312312312çJohn H. Pattersonç'
'192000.00çExtra Field')
def test_raise_invalid_cpf_pattern_error_for_invalid_characters(self):
self.assertRaises(InvalidCpfPatternError,
parse_salesman,
'001ç123abc12312çJohn H. Pattersonç192000.00')
def test_raise_invalid_cpf_pattern_error_for_missing_digits(self):
self.assertRaises(InvalidCpfPatternError,
parse_salesman,
'001ç123123123çJohn H. Pattersonç192000.00')
def test_raise_invalid_cpf_pattern_error_for_extra_digits(self):
self.assertRaises(InvalidCpfPatternError,
parse_salesman,
'001ç123123123120çJohn H. Pattersonç192000.00')
def test_raise_invalid_salary_pattern_error_for_invalid_character(self):
self.assertRaises(InvalidSalaryPatternError,
parse_salesman,
'001ç12312312312çJohn H. Pattersonç192,000.00')
def test_raise_invalid_salary_pattern_error_for_empty_field(self):
self.assertRaises(InvalidSalaryPatternError,
parse_salesman,
'001ç12312312312çJohn H. Pattersonç')
| dmertins/sales-data-analyzer | tests/unit/parse_salesman_test.py | parse_salesman_test.py | py | 2,313 | python | en | code | 0 | github-code | 13 |
14232507587 | import mock
import pytest
from rest_framework import exceptions
from backend.api.authorization.constants import OperateEnum
from backend.api.authorization.mixins import AllowItem, AuthorizationAPIAllowListCheckMixin, AuthViewMixin
from backend.apps.role.models import Role
from backend.biz.policy import PolicyBean, PolicyBeanList
from backend.service.models.subject import Subject
class TestAllowItem:
@pytest.mark.parametrize(
"object_id,match_object_id,expected_result",
[
("*", "test", True),
("test", "test", True),
("test", "test1", False),
("starts_with:test", "test1", True),
("starts_with:test", "tes1t", False),
("abc:test", "test", False),
],
)
def test_allow_item(self, object_id, match_object_id, expected_result):
result = AllowItem(object_id).match(match_object_id)
assert result == expected_result
class TestAuthorizationAPIAllowListCheckMixin:
def test_verify_api(self):
mixin = AuthorizationAPIAllowListCheckMixin()
mixin._list_system_allow_list = mock.Mock(return_value=[AllowItem("test")])
mixin.verify_api("system", "test", "authorization_instance")
with pytest.raises(exceptions.PermissionDenied):
mixin.verify_api("system", "test1", "authorization_instance")
def test_verify_api_by_object_ids(self):
mixin = AuthorizationAPIAllowListCheckMixin()
mixin._list_system_allow_list = mock.Mock(return_value=[AllowItem("test"), AllowItem("test1")])
mixin.verify_api_by_object_ids("system", ["test", "test1"], "authorization_instance")
with pytest.raises(exceptions.PermissionDenied):
mixin.verify_api_by_object_ids("system", ["test", "test1", "test2"], "authorization_instance")
class TestAuthViewMixin:
def test_grant_or_revoke_admin(self):
mixin = AuthViewMixin()
result = mixin.grant_or_revoke(
OperateEnum.GRANT.value, Subject(type="user", id="admin"), PolicyBeanList("system", [])
)
assert result == []
def test_grant_or_revoke_user(self):
mixin = AuthViewMixin()
mixin.policy_operation_biz.alter = mock.Mock(return_value=None)
mixin.policy_query_biz.list_by_subject = mock.Mock(return_value=[])
mixin._check_or_sync_user = mock.Mock(return_value=None)
result = mixin.grant_or_revoke(
OperateEnum.GRANT.value, Subject(type="user", id="test"), PolicyBeanList("system", [])
)
assert result == []
def test_grant_or_revoke_group(self):
mixin = AuthViewMixin()
mixin.policy_operation_biz.alter = mock.Mock(return_value=None)
mixin.policy_query_biz.list_by_subject = mock.Mock(return_value=[])
mixin._check_scope = mock.Mock(return_value=None)
result = mixin.grant_or_revoke(
OperateEnum.GRANT.value, Subject(type="group", id="1"), PolicyBeanList("system", [])
)
assert result == []
def test_grant_or_revoke_user_revoke(self):
mixin = AuthViewMixin()
mixin.policy_operation_biz.revoke = mock.Mock(return_value=[])
mixin._check_or_sync_user = mock.Mock(return_value=None)
result = mixin.grant_or_revoke(
OperateEnum.REVOKE.value, Subject(type="user", id="test"), PolicyBeanList("system", [])
)
assert result == []
def test_check_scope_assert_group(self):
mixin = AuthViewMixin()
with pytest.raises(AssertionError):
mixin._check_scope(Subject(type="user", id="test"), PolicyBeanList("system", []))
def test_check_scope(self):
mixin = AuthViewMixin()
mixin.role_biz.get_role_by_group_id = mock.Mock(return_value=Role())
mixin.role_auth_scope_trans.from_policy_list = mock.Mock(return_value=None)
mixin.role_biz.incr_update_auth_scope = mock.Mock(return_value=None)
mixin._check_scope(Subject(type="group", id="1"), PolicyBeanList("system", []))
def test_policy_response(self):
mixin = AuthViewMixin()
resp = mixin.policy_response(PolicyBean(id="create_host", related_resource_types=[]))
assert resp.data == {"policy_id": 0, "statistics": {"instance_count": 0}}
def test_batch_policy_response(self):
mixin = AuthViewMixin()
resp = mixin.batch_policy_response(
[
PolicyBean(id="create_host", related_resource_types=[]),
PolicyBean(id="delete_host", related_resource_types=[]),
]
)
assert resp.data == [
{
"action": {"id": "create_host"},
"policy_id": 0,
"statistics": {"instance_count": 0},
},
{
"action": {"id": "delete_host"},
"policy_id": 0,
"statistics": {"instance_count": 0},
},
]
| TencentBlueKing/bk-iam-saas | saas/tests/api/authorization/mixins_tests.py | mixins_tests.py | py | 4,942 | python | en | code | 24 | github-code | 13 |
74370820498 | import numpy as np
def project_depth_to_points(
intrinsics: np.array,
depth: np.array,
instance_mask: np.array = None) -> [np.array, tuple]:
r"""Projection of depth map to points.
Input:
intrinsics: camera intrinsics, [3, 3]
depth: depth map, [H, W]
instance_mask: mask, [H, W]. Defaults to None.
Output:
[pts, ids]
pts: points, [N, 3]
ids: x,y indexes of the depth map corresponding to the points, [np.array(N), np.array(N)]
"""
intrinsics_inv = np.linalg.inv(intrinsics)
non_zero_mask = (depth > 0)
if instance_mask is not None:
non_zero_mask = np.logical_and(instance_mask, non_zero_mask)
ids = np.where(non_zero_mask)
grid = np.array([ids[1], ids[0]])
length = grid.shape[1]
ones = np.ones([1, length])
uv_grid = np.concatenate((grid, ones), axis=0) # [3, num_pixel]
xyz = np.dot(intrinsics_inv, uv_grid) # [3, num_pixel]
xyz = np.transpose(xyz) # [num_pixel, 3]
z = depth[ids[0], ids[1]]
pts = xyz * z[:, np.newaxis] / xyz[:, -1:]
return pts, ids
def project_points_to_2d(intrinsics: np.array, points: np.array) -> np.array:
r"""Projection of points to 2d.
Input:
intrinsics: camera intrinsics, [3, 3]
points: coordinates of points, [N, 3]
Output:
2d coordinate: [N, 2]
"""
points = points / points[:, -1:]
points = np.dot(points, intrinsics.T)
return points[:, :2].astype(np.int16)
| Gorilla-Lab-SCUT/gorilla-3d | gorilla3d/utils/project.py | project.py | py | 1,559 | python | en | code | 7 | github-code | 13 |
28247248876 | import csv
from django.shortcuts import render
from django.http import HttpResponse
from django.conf import settings
from data_handler.models import DataHandler, DataExporter, Charter, Mapper
def index(request):
return render(request, 'frontend/index.html', context={'current_site': 'index'})
def updates(request):
return render(request, 'frontend/updates.html', context={'current_site': 'updates'})
def aboutus(request):
return render(request, 'frontend/aboutus.html', context={'current_site': 'about_us'})
def guidelines(request):
return render(request, 'frontend/guidelines.html', context={'current_site': 'guidelines'})
def credits(request):
return render(request, 'frontend/credits.html', context={'current_site': 'credits'})
def further_reading(request):
return render(request, 'frontend/further_reading.html', context={'current_site': 'further_reading'})
def search(request, search_type):
print("GET params:", request.GET)
data_handler = DataHandler(search_type, 'search')
data = data_handler.fetch_request_data(request)
data.update({'current_site': 'search'})
# return render(request, 'frontend/search.html')
return render(request, 'frontend/search.html', context=data)
def chart(request, search_type):
data_handler = DataHandler(search_type, 'charts')
data = data_handler.fetch_request_data(request)
charter = Charter()
labels, values, name = charter.chart(data, request)
data.update({'chart_labels': list(labels), 'chart_values': list(values), 'chart_name': name})
return render(request, 'frontend/chart.html', context=data)
def chart_better(request, search_type, normalization_type, date_scope):
data_handler = DataHandler(search_type, 'charts', extra_args={'normalization_type': normalization_type, 'date_scope': date_scope})
data = data_handler.fetch_request_data(request)
new_facets = []
for facet in data['facets']:
if date_scope == 'year' and (facet['field'] == 'year' or facet['field'] == 'starting_year'):
new_facets.append(facet)
elif date_scope == 'month' and (facet['field'] == 'month' or facet['field'] == 'starting_month'):
new_facets.append(facet)
charter = Charter()
labels, values, name = charter.chart(new_facets, normalization_type, date_scope, request)
data.update({'chart_labels': list(labels), 'chart_values': list(values), 'chart_name': name})
return render(request, 'frontend/chart.html', context=data)
def map(request, search_type, flow_type):
data_handler = DataHandler(search_type, 'map', extra_args={'flow_type': flow_type})
data = data_handler.fetch_request_data(request)
return render(request, 'frontend/map.html', context=data)
def map_data(request, search_type, flow_type, data_type):
data_handler = DataHandler(search_type, 'map')
if search_type != 'cluster':
return HttpResponse(status=501)
data = data_handler.fetch_all_data(request, fields=['date', 'location', 'country', 'coordinates'], data_type='hits')
mapper = Mapper(flow_type, data_type)
csv_data = mapper.format_map_data(data)
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="{}.csv"'.format(data_type)
writer = csv.writer(response)
writer.writerows(csv_data)
return response
def download(request, search_type):
"""
Sends the current request to user as a downloadable TSV.
"""
data_handler = DataHandler(search_type, 'download')
data_exporter = DataExporter(data_handler=data_handler)
lines = data_exporter.export_current_search(request, search_type)
response = HttpResponse(content_type='text/tsv')
response['Content-Disposition'] = 'attachment; filename="{}.tsv"'.format('download')
writer = csv.writer(response, delimiter='\t')
writer.writerows(lines)
return response
| avjves/HistSE | frontend/views.py | views.py | py | 3,902 | python | en | code | 0 | github-code | 13 |
39655883300 | from alive_progress import alive_bar
message = ""
def wpmToDps(wpm):
''' Words per minute = number of times PARIS can be sent per minute.
PARIS takes 50 dot lengths to send. Returns dots per seconds. '''
return wpm*50/60.0
def farnsworthScaleFactor(wpm, fs=None):
''' Returns the multiple that character and word spacing should be multiplied by. '''
if fs is None:
return 1 # Standard (not Farnsworth) word spacing
slowWordInterval = 1.0/fs # Minutes per word
standardWordInterval = 1.0/wpm
extraSpace = slowWordInterval-standardWordInterval
extraSpaceDots = (extraSpace/standardWordInterval) * (9+10+4*DASH_WIDTH+4*CHAR_SPACE+WORD_SPACE)
standardSpaceDots = 4*CHAR_SPACE + WORD_SPACE # For the word PARIS
totalSpaceDots = standardSpaceDots + extraSpaceDots
scaleFactor = totalSpaceDots / standardSpaceDots
if scaleFactor < 1:
return 1
return scaleFactor
def string_divide(string, div):
l = []
for i in range(0, len(string), div):
l.append(string[i:i+div])
return l
MORSE_DICT = {
'a':'.-',
'b':'-...',
'c':'-.-.',
'd':'-..',
'e':'.',
'f':'..-.',
'g':'--.',
'h':'....',
'i':'..',
'j':'.---',
'k':'-.-',
'l':'.-..',
'm':'--',
'n':'-.',
'o':'---',
'p':'.--.',
'q':'--.-',
'r':'.-.',
's':'...',
't':'-',
'u':'..-',
'v':'...-',
'w':'.--',
'x':'-..-',
'y':'-.--',
'z':'--..',
'1': '.----',
'2': '..---',
'3': '...--',
'4': '....-',
'5': '.....',
'6': '-....',
'7': '--...',
'8': '---..',
'9': '----.',
'0': '-----',
'.': '.-.-.-',
',': '--..--',
'?': '..--..',
':': '---...',
';': '_._._.',
"'": '.----.',
'"': '.-..-.',
'(': '-.--.',
')': '-.--.-',
'/': '-..-.',
'-': '-....-',
'=': '-...-',
'+': '.-.-.',
'!': '-.-.--',
'×': '-..-',
'@': '.--.-.',
' ':'/'
}
DASH_WIDTH = 3
CHAR_SPACE = 3
WORD_SPACE = 7
class Morsepy():
@staticmethod
def encrypt(str):
"""
Encrypts any string into morse
Only one string can be passed as an argument
"""
cipher = ''
for char in str:
try:
cipher += MORSE_DICT[char]
cipher += ' '
except KeyError:
raise ValueError(f' Character "{char}" is not currently supported by morsepy')
return cipher.strip()
@staticmethod
def decrypt(str):
"""
Decrypts a morse string into english,
Morse characters should be seperated by spaces and words seperated with a /,
the / can either be padded by whitespace (e.g ' / ') or not padded by whitespace (e.g '/')
but the same should be used every time a slash is placed, otherwise a SyntaxError will be raised
"""
decipher = ''
wordsplit = ' / ' if ' / ' in str else '/' #determines wether words should be split with / padded by whitespace or / not padded by whitespace
wordlist = [word.split(' ') for word in str.split(wordsplit)] #gives list of
#adds letter associated with morse character inside dict
for charlist in wordlist:
for char in charlist:
if char not in MORSE_DICT.values():
raise SyntaxError(f' Morse character "{char}" does not exist')
for letter, morse in MORSE_DICT.items():
if morse == char:
decipher += letter
decipher += ' '
return decipher.strip()
import pydub
from pydub.generators import Sine
from pydub import AudioSegment
class Encoder():
def __init__(self, wpm, fs, frequency = 400):
self.dps = wpmToDps(wpm) # Dots per second
self.mspd = 1000/self.dps # Dot duration in milliseconds
self.farnsworthScale = farnsworthScaleFactor(wpm, fs)
self.dotLength = round(self.mspd, 1)
self.dashLength = int(round(self.mspd * DASH_WIDTH))
self.characterLength = int(round(self.mspd * CHAR_SPACE * self.farnsworthScale))
self.spaceLength = int(round(self.mspd * WORD_SPACE * self.farnsworthScale))
self.audioFile = pydub.AudioSegment.empty()
self.dotSound = Sine(freq=frequency).to_audio_segment(duration=self.dotLength)
self.dotSound += AudioSegment.silent(duration=self.dotLength)
self.dashSound = Sine(freq=frequency).to_audio_segment(duration=self.dashLength)
self.dashSound += AudioSegment.silent(duration=self.dotLength)
self.spaceSound = AudioSegment.silent(duration=self.spaceLength)
self.charSpace = AudioSegment.silent(duration=self.characterLength)
print("\n")
print('Dot width =', round(self.mspd, 1), 'ms')
print('Dash width =', int(round(self.mspd * DASH_WIDTH)), 'ms')
print('Character space =', int(round(self.mspd * CHAR_SPACE * self.farnsworthScale)), 'ms')
print('Word space =', int(round(self.mspd * WORD_SPACE * self.farnsworthScale)), 'ms')
print("\n")
def encodeMorse(self, part):
audioFile = self.audioFile
for character in part:
if character == ".":
audioFile += self.dotSound
elif character == "-":
audioFile += self.dashSound
elif character == "/":
audioFile += self.spaceSound
elif character == " ":
audioFile += self.charSpace
return audioFile
def encode(self, message, n=1000):
message=Morsepy.encrypt(message)
splitMessage = string_divide(message, n)
with alive_bar(len(splitMessage)) as bar:
clips = []
for part in splitMessage:
clips.append(self.encodeMorse(part))
bar()
audioFile = sum(clips)
print("Encoded")
return audioFile
| SpaceNerden/TextToMorse | TextToMorse.py | TextToMorse.py | py | 6,006 | python | en | code | 0 | github-code | 13 |
1583264198 | """ Prepare Data to be uploaded to MySQL tables """
import pandas as pd
import pathlib
def prepare_customer_data(sales_df: pd.DataFrame, target_data_dir: pathlib.Path):
columns = [
"customer_name",
"customer_age",
"customer_segment",
"city",
"zip_code",
"state",
"region",
]
customer_df = sales_df.loc[:, columns]
columns_renamed = [col.replace("customer_", "") for col in customer_df.columns]
customer_df.columns = columns_renamed
customer_df.drop_duplicates(inplace=True, keep="last")
customer_df.to_csv(
target_data_dir / "customer.csv", index=False, sep=";", na_rep="NULL"
)
return customer_df
def prepare_product_data(sales_df: pd.DataFrame, target_data_dir: pathlib.Path):
columns = [
"product_name",
"product_container",
"product_sub_category",
"product_category",
"product_base_margin",
]
product_df = sales_df.loc[:, columns]
columns_renamed = [col.replace("product_", "") for col in product_df.columns]
product_df.columns = columns_renamed
product_df.drop_duplicates(inplace=True, keep="last")
product_df.to_csv(
target_data_dir / "product.csv", index=False, sep=";", na_rep="NULL"
)
return product_df
def prepare_order_data(
sales_df: pd.DataFrame,
product_df: pd.DataFrame,
customer_df: pd.DataFrame,
target_data_dir: pathlib.Path,
):
columns = [
"order_id",
"order_priority",
"order_quantity",
"unit_price",
"sales",
"shipping_cost",
"discount",
"profit",
"order_date",
"ship_date",
# The following columns are needed for looking up Customer ID
"customer_name",
"city",
"zip_code",
"state",
"region",
# The following columns are needed for looking up Product ID
"product_name",
"product_container",
"product_sub_category",
"product_category",
"product_base_margin",
]
order_df = sales_df.loc[:, columns]
columns_renamed = [col.replace("order_", "") for col in order_df.columns]
order_df.columns = columns_renamed
order_df.rename(columns={"id": "order_id"}, inplace=True)
# TO-DO: Lookup Customer ID (from customer.csv) and Product ID (from product.csv)
order_df.to_csv(target_data_dir / "order.csv", index=False, sep=";", na_rep="NULL")
if __name__ == "__main__":
data_dir = pathlib.Path.cwd() / "data"
source_data_dir = data_dir / "raw"
source_file_path = source_data_dir / "walmart_sales.csv"
sales_df = pd.read_csv(source_file_path)
target_data_dir = data_dir / "final"
customer_df = prepare_customer_data(sales_df, target_data_dir)
product_df = prepare_product_data(sales_df, target_data_dir)
prepare_order_data(sales_df, product_df, customer_df, target_data_dir)
| m-p-esser/mysql-walmart-data-model | src/process_data.py | process_data.py | py | 2,935 | python | en | code | 0 | github-code | 13 |
15918381694 | """empty message
Revision ID: ee6b7b2e6a16
Revises: 28f62b6e1f24
Create Date: 2016-03-01 11:46:58.147000
"""
# revision identifiers, used by Alembic.
revision = 'ee6b7b2e6a16'
down_revision = '28f62b6e1f24'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects.postgresql import ENUM
# enum type
user_status_enum = sa.Enum('new', 'confirmed', 'deleted', name="status_enum")
def upgrade():
# create new type
user_status_enum.create(op.get_bind())
### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('status', user_status_enum, nullable=False))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('user', 'status')
### end Alembic commands ###
# drop type
user_status_enum.drop(op.get_bind())
| joonaojapalo/trackem | migrations/versions/ee6b7b2e6a16_.py | ee6b7b2e6a16_.py | py | 863 | python | en | code | 0 | github-code | 13 |
71915692179 | import scrapy
import io
import json
from PyPDF2 import PdfReader
from flask import Flask,jsonify
from nltk.sentiment import SentimentIntensityAnalyzer
app = Flask(__name__)
class PdfCrawler(scrapy.Spider):
name = 'pdf_crawler'
start_urls = [
'https://iwgdfguidelines.org/wp-content/uploads/2020/12/Brazilian-Portuguese-translation-IWGDF-Guidelines-2019.pdf',
'https://portal.unicap.br/documents/475032/672293/ebook+Livro+pe+diabetico-2020.pdf/36b829a4-e588-cee9-e4ea-89cf8cf50fc6?t=1608742383653#:~:text=Segundo%20o%20Minist%C3%A9rio%20da%20Sa%C3%BAde,diabetes%20(BRASIL%2C%202016)'
]
output_file = "resultados.json"
results = []
sia = SentimentIntensityAnalyzer()
keywords = ["pé diabético", "neuropatia diabética", "úlcera diabética", "amputação", "neuropatia periférica", "circulação sanguínea", "tratamento", "diagnóstico", "prevenção", "complicações", "diabetes mellitus" , "cuidados", "terapia", "monitoramento"]
def parse(self, response):
with io.BytesIO(response.body) as data:
reader = PdfReader(data)
for i, page in enumerate(reader.pages):
text = page.extract_text()
lines = text.split('\n')
for j, line in enumerate(lines):
if self._contains_keywords(line):
result = {
'page_number': i + 1,
'position': j,
'context': lines[j-2:j+2],
'text': line.strip(),
'sentiment_score': self.sia.polarity_scores(line)['compound']
}
self.results.append(result)
with open(self.output_file, "w",encoding="utf-8") as f:
json.dump(self.results, f, indent=4, ensure_ascii=False)
self.log(f'Informações salvas em {self.output_file}')
def _contains_keywords(self, text):
return any(word in text.lower() for word in self.keywords)
@app.route('/results')
def get_results():
with open('resultados.json', 'r', encoding='utf-8') as f:
data = json.load(f)
return jsonify(data)
@app.route('/')
def index():
return '''
<!DOCTYPE html>
<html>
<head>
<title>Resultados da análise de PDFs</title>
<script src="https://code.jquery.com/jquery-3.6.0.min.js"></script>
<script>
$(document).ready(function() {
$.getJSON('/results', function(data) {
var table = '<table>';
table += '<tr><th>Página</th><th>Posição</th><th>Contexto</th><th>Texto</th><th>Pontuação de Sentimento</th></tr>';
$.each(data, function(index, value) {
table += '<tr>';
table += '<td>' + value.page_number + '</td>';
table += '<td>' + value.position + '</td>';
table += '<td>' + value.context.join('<br>') + '</td>';
table += '<td>' + value.text + '</td>';
table += '<td>' + value.sentiment_score + '</td>';
table += '</tr>';
});
table += '</table>';
$('#results').html(table);
});
});
</script>
</head>
<body>
<h1>Resultados da análise de PDFs</h1>
<div id="results"></div>
</body>
</html>
'''
if __name__ == '__main__':
app.run()
| dashp21/Crawler-Flask | CrawlerFlask.py | CrawlerFlask.py | py | 3,791 | python | en | code | 0 | github-code | 13 |
36639567358 | from __future__ import division, print_function, unicode_literals
import sys
import codecs
import nscr
class LineReader(object):
# Nscripter uses commands like "goto" and "skip".
# Thus, we can't really make the script into trees.
# So we use a procedural approach.
START_LABEL = b"*define"
def __init__(self, filename):
self._lines = []
self._labels = dict()
self._gosub_stack = []
self._read_file(filename)
self.last_line = len(self._lines)-1
self.goto(self.START_LABEL)
def _read_file(self, filename):
# reads in lines and lists labels
with codecs.open(filename, 'r', encoding="sjis") as f:
for line in f:
line = line.strip().lstrip()
# list labels
if len(line) > 1 and line[0] == b"*":
self._labels[line.lower()] = len(self._lines)
self._lines.append(line)
def _next_line(self):
self.current_line += 1
return self._lines[self.current_line]
def read_next(self):
return self._next_line()
def skip(self, n):
"""Skip N lines of text; execution will continue
on the Nth line from the current one.
Negative skipping is also supported."""
self.current_line += n-1
def goto(self, label):
"""The label string should include the asterisk at the beginning."""
# We go before it so that the next read will be from the "*label" line
self.current_line = self._labels[label]-1
def gosub(self, label):
self._gosub_stack.append( self.current_line )
self.goto(label)
def cmd_return(self):
try:
self.current_line = self._gosub_stack.pop()
except IndexError:
self.error("Tried to return while not gosub'd.")
def jumpf(self):
"""The worst form of flow control in history."""
s = ""
while b"~" not in s:
s = self._next_line()
# Currently we don't support mid-line skipping with jumpf
self.current_line -= 1
def error(self, msg):
print("Strange.", msg)
class CmdReader(LineReader):
ARG_SEP = b","
def __init__(self, *args, **kwargs):
super(CmdReader, self).__init__(*args, **kwargs)
# The commands will be popped out, so the
# first command should be at the last position!
self._cmds = []
# for multi-line statements
self.unfinished = b""
def read_next(self):
while self._cmds == []:
line = super(CmdReader, self).read_next()
if line.endswith(self.ARG_SEP) and (not line.startswith(b"`") or self.unfinished != b""):
self.unfinished += line
continue
else:
if self.unfinished != b"":
line = self.unfinished + line
self.unfinished = b""
self._cmds.extend( self.parse_line(line) )
return self._cmds.pop()
def parse_line(self, line):
got = nscr.parse("goal", line)
if got == None: # testing
print("Return value replaced for:", line)
return []
else:
return reversed(got)
def main():
if len(sys.argv) not in (2, 3):
print("Usage: onscr_parse.py FILENAME [SKIP_TO]")
exit(1)
filename = sys.argv[1]
reader = CmdReader(filename)
if len(sys.argv) == 3:
cmd_count = int( sys.argv[2] ) - 1
else:
cmd_count = 0
try:
for i in xrange(cmd_count):
reader.read_next()
except IndexError:
print("Error: Skipping went over bounds!")
exit(1)
print("Successfully initialized.")
try:
while raw_input("").lower() not in ("q", "quit", "e", "exit"):
if reader.current_line != reader.last_line:
cmd_count += 1
cmd = reader.read_next()
print( cmd_count, cmd )
else:
# If the last line contained multiple commands
# only the first will be displayed and then we enter
# last line mode. But that's not a very likely
# circumstance.
print("That was the last line.")
except EOFError:
pass
if __name__ == '__main__':
main()
| uvthenfuv/npynscr | onscr_parse.py | onscr_parse.py | py | 4,969 | python | en | code | 2 | github-code | 13 |
37153941194 | import sys
sys.path.insert(0,'./Modules/')
import numpy as np
from file_reader import read_file
import pandas as pd
from rdkit import Chem
from mol_utils import get_fragments
import numpy as np
import time
import sys
import matplotlib.pyplot as plt
import pickle
import argparse
import xgboost as xgb
import Show_Epoch
import logging
from keras.utils.generic_utils import get_custom_objects
import keras
sys.path.insert(0,'./Modules/')
from models import maximization
from rewards import get_padel, clean_folder, modify_fragment
from build_encoding import get_encodings, encode_molecule, decode_molecule, encode_list, save_decodings, save_encodings, read_decodings, read_encodings
from global_parameters import MAX_SWAP, MAX_FRAGMENTS, GAMMA, BATCH_SIZE, EPOCHS, TIMES, FEATURES
#similar to bunch_Eval, except that it is without the rewards
def get_pIC50(mols):
folder_path = "./generated_molecules/"
file_path = "./descriptors.csv"
#Cleaning up the older files
clean_folder(folder_path)
i = 0
for mol in mols:
print(Chem.MolToMolBlock(mol),file=open(str(folder_path)+str(i)+'.mol','w'))
i += 1
get_padel(folder_path,file_path,'-1')
#Reading the descriptors
xg_all = pd.read_csv(file_path)
names = xg_all['Name']
bad = []
with open('./saved_models/good_columns','rb') as f:
cols = pickle.load(f)
for col in xg_all.columns:
if col not in cols:
bad.append(col)
xg_all.drop(columns=bad,inplace=True)
#Verifying that all the required columns are there
assert len(xg_all.columns) == len(cols)
xg_all['Name'] = names
files = xg_all[pd.isnull(xg_all).any(axis=1)]['Name']
xg_all.dropna(inplace=True)
mol= []
if len(files) !=0:
uneval_folder = "C:\\Users\\HP\\AZC_Internship\\DeepFMPO\\3.6\\unevalmol\\"
clean_folder(uneval_folder)
for f in files:
m = Chem.MolFromMolFile(folder_path+str(f)+'.mol')
print(Chem.MolToMolBlock((m)),file=open(str(uneval_folder)+str(f)+'.mol','w'))
get_padel(uneval_folder,'./uneval_desc.csv','-1')
unevalmol = pd.read_csv('./uneval_desc.csv')
unevalmol.drop(columns=bad,inplace=True)
print(unevalmol.isna().sum(axis=1))
xg_all = pd.concat([xg_all,unevalmol])
#xg_all.to_csv('./xgall.csv')
xg_all.fillna(value=0,inplace=True)
regressor = xgb.XGBRegressor()
regressor.load_model('./saved_models/best_from_gs38.model')
xg_all.sort_values(by='Name',inplace=True)
xg_all.drop(columns='Name',inplace=True)
predictions = regressor.predict(xg_all)
print('Properties predicted for {} molecules'.format(len(predictions)))
return predictions
def modify_mols(X,decodings,stoch=1):
batch_mol = X.copy()
org_mols = batch_mol.copy() #saving a copy of the original molecules
BATCH_SIZE = len(X)
n_actions = MAX_FRAGMENTS * MAX_SWAP + 1
stopped = np.zeros(BATCH_SIZE) != 0
#loss = maximization()
#get_custom_objects().update({"maximization": loss.computeloss})
actor = keras.models.load_model('./saved_models/generation', custom_objects={'maximization': maximization})
TIMES = 8
rand_select = np.random.rand(BATCH_SIZE)
for t in range(TIMES):
#for each mol, a no. between 0-1 indicating the time-step
tm = (np.ones((BATCH_SIZE,1)) * t) / TIMES
#predictions for all the 512 molecules: 512*n_actions
probs = actor.predict([batch_mol, tm])
actions = np.zeros((BATCH_SIZE))
if stoch == 1:
# Find probabilities for modification actions
for i in range(BATCH_SIZE):#for every molecules in the batch
a = 0
while True:
rand_select[i] -= probs[i,a]
if rand_select[i] < 0 or a + 1 == n_actions:
break
a += 1
#choosing a random action
actions[i] = a
else:
for i in range(BATCH_SIZE):#for every molecules in the batch
#choose the action which has maximum probability
actions[i]=np.argmax(probs[i])
# Select actions
for i in range(BATCH_SIZE):
a = int(actions[i])
if stopped[i] or a == n_actions - 1:
stopped[i] = True
continue
#Converting the n_actions*1 position to the actual position
a = int(a // MAX_SWAP)#Integer Division, to get the location of the fragment
s = a % MAX_SWAP# it is the location where the swap happens in that fragment
if batch_mol[i,a,0] == 1:#Checking whether the fragment is non-empty?
#In ith molecule, in its ath fragment, the sth position is flipped
batch_mol[i,a] = modify_fragment(batch_mol[i,a], s)#changes 0 to 1 and 1 to 0
#Evaluating multiple molecules at the same time
e = 1000
np.save("./History/in-{}.npy".format(e), org_mols)
np.save("./History/out-{}.npy".format(e), batch_mol)
def main(epoch,gen):
if gen == 1:
lead_file = "Data/trial.csv"
fragment_file = "Data/molecules.smi"
fragment_mols = read_file(fragment_file)
lead_mols = read_file(lead_file)
fragment_mols += lead_mols
logging.info("Read %s molecules for fragmentation library", len(fragment_mols))
logging.info("Read %s lead molecules", len(lead_mols))
fragments, used_mols = get_fragments(fragment_mols)
logging.info("Num fragments: %s", len(fragments))
logging.info("Total molecules used: %s", len(used_mols))
assert len(fragments)
assert len(used_mols)
lead_mols = np.asarray(fragment_mols[-len(lead_mols):])[used_mols[-len(lead_mols):]]
decodings = read_decodings()
encodings = read_encodings()
logging.info("Loaded encodings and decodings")
X = encode_list(lead_mols, encodings)
modify_mols(X,decodings)
epoch=1000
file_name = './past outputs/out'+str(epoch)+'.csv'
logging.info("Collecting and storing all molecules in {}".format(file_name))
Show_Epoch.main(epoch,file_name)
df = pd.read_csv('./past outputs/out'+str(epoch)+'.csv',sep=";")
moli = []
molm = []
for i in range(len(df)):
if (Chem.MolFromSmiles(df.iloc[i,1])) is not None:
moli.append(Chem.MolFromSmiles(df.iloc[i,0]))
molm.append(Chem.MolFromSmiles(df.iloc[i,1]))
logging.info("Predicting pIC50 values of the initial molecules")
ini = get_pIC50(moli)
logging.info("Predicting pIC50 values of the predicted molecules")
mod = get_pIC50(molm)
ini = np.asarray(ini)
mod = np.asarray(mod)
changes = pd.DataFrame(np.transpose(np.asarray([ini,mod])),columns=['Initial_pIC','Modified_pIC'])
changes['Initial_mol'] = df.iloc[:,0]
changes['Modified_mol'] = df.iloc[:,1]
changes['Delta'] = changes['Modified_pIC'] - changes['Initial_pIC']
changes.sort_values(by='Delta',ascending=False,inplace=True)
inact_to_act = changes.loc[(changes['Modified_pIC']>7) & (changes['Initial_pIC']<7),['Modified_pIC','Initial_pIC','Delta']].sort_values(by='Delta',ascending=False)
changes.to_csv('./past outputs/out_pIC'+str(epoch)+'.csv',index=False)
inact_to_act.to_csv('./past outputs/act_pIC'+str(epoch)+'.csv',index=False)
print(inact_to_act.head())
print(changes.head())
from rdkit.Chem import Draw
moli = []
molm = []
for i in range(5):
moli.append(Chem.MolFromSmiles(changes.iloc[i,2]))
moli.append(Chem.MolFromSmiles(changes.iloc[i,3]))
plot = Draw.MolsToGridImage(moli, molsPerRow=2)
plot.show()
#plot.save('/past outputs/epoch.png')
bins = np.linspace(4,10,14)
#changes = changes.loc[changes.Delta>0]
plt.hist(changes['Initial_pIC'], bins, alpha=0.5, label='initial',color='blue')
plt.hist(changes['Modified_pIC'], bins, alpha=0.5, label='modified',color='green')
plt.legend(loc='upper right')
plt.show()
sp = changes.loc[changes['Delta']>0].sum()['Delta']
sn = changes.loc[changes['Delta']<0].sum()['Delta']
cp = changes.loc[changes['Delta']>0].count()['Delta']
cn = changes.loc[changes['Delta']<0].count()['Delta']
print('Sum of positive changes = {}\tNo. of +ves = {}\nSum of negative changes = {}\tNo. of -ves = {}'.format(sp,cp,sn,cn))
return 0
parser = argparse.ArgumentParser()
parser.add_argument("-epoch", dest="epoch", help="Epoch for which the results are to be viewed", default=0)
parser.add_argument("-gen",dest="gen",help="Pass as 1 if you want to generate new molecules",default=0)
parser.add_argument("-stoch",dest="stoch",help="Pass as 0 if you do not want to generate new molecules by sampling actions from a probability distribution",default=1)
if __name__ == "__main__":
args = parser.parse_args()
epoch = int(args.epoch)
gen = int(args.gen)
stoch = int(args.stoch)
start_time = time.time()
main(int(args.epoch),int(gen))
print("---Time taken = {} seconds ---".format(time.time() - start_time))
| mew-two-github/de-Novo-drug-Design | 3.6/viewing_outputs.py | viewing_outputs.py | py | 9,246 | python | en | code | 1 | github-code | 13 |
22396644132 | import numpy as np
import os
import sys
import matplotlib.pyplot as plt
import chainconsumer
from math import ceil
# pyburst
from . import mcmc_versions
from . import mcmc_tools
from . import burstfit
from . import mcmc_params
from pyburst.observations import obs_tools
from pyburst.plotting import plot_tools
from pyburst.grids.grid_strings import get_source_path, print_warning
from pyburst.misc.pyprint import printv
GRIDS_PATH = os.environ['KEPLER_GRIDS']
def default_plt_options():
"""Initialise default plot parameters"""
params = {'mathtext.default': 'regular',
'font.family': 'serif',
'text.usetex': False}
plt.rcParams.update(params)
default_plt_options()
def save_plot(fig, prefix, save, source, version, display, chain=None, n_dimensions=None,
n_walkers=None, n_steps=None, label=None, extension='.png',
enforce_chain_info=True):
"""Handles saving/displaying of a figure passed to it
"""
if enforce_chain_info and (None in (n_dimensions, n_walkers, n_steps)):
if chain is None:
raise ValueError('Must provide chain, or specify each of '
'(n_dimensions, n_walkers, n_steps)')
else:
n_walkers, n_steps, n_dimensions = chain.shape
if save:
filename = mcmc_tools.get_mcmc_string(source=source, version=version,
n_walkers=n_walkers, n_steps=n_steps,
prefix=prefix, label=label,
extension=extension)
source_path = get_source_path(source)
filepath = os.path.join(source_path, 'plots', prefix, f'{filename}')
fig.savefig(filepath)
if display:
plt.show(block=False)
else:
plt.close(fig)
def save_multiple_synth(series, source, version, n_steps, discard, n_walkers=960,
walkers=True, posteriors=True, contours=False,
display=False, mass_radius=True,
synth=True, compressed=False):
"""Save plots for multiple series in a synthetic data batch
"""
# TODO reuse max_lhood point
default_plt_options()
for ser in series:
if synth:
full_source = f'{source}_{ser}'
else:
full_source = source
chain = mcmc_tools.load_chain(full_source, n_walkers=n_walkers, n_steps=n_steps,
version=version, compressed=compressed)
if walkers:
plot_walkers(chain, source=full_source, save=True,
display=display, version=version)
if posteriors:
plot_posteriors(chain, source=full_source, save=True, discard=discard,
display=display, version=version)
if contours:
plot_contours(chain, source=full_source, save=True, discard=discard,
display=display, version=version)
if mass_radius:
plot_mass_radius(chain, source=full_source, save=True, discard=discard,
display=display, version=version)
def save_all_plots(source, version, discard, n_steps, n_walkers=1000, display=False,
save=True, cap=None, posteriors=True, contours=True,
redshift=True, mass_radius=True, verbose=True, compressed=False):
"""Saves (and/or displays) main MCMC plots
"""
chain = mcmc_tools.load_chain(source, version=version, n_steps=n_steps,
n_walkers=n_walkers, verbose=verbose,
compressed=compressed)
if posteriors:
printv('Plotting posteriors', verbose=verbose)
plot_posteriors(chain, source=source, save=save, discard=discard, cap=cap,
display=display, version=version)
if contours:
printv('Plotting contours', verbose=verbose)
plot_contours(chain, source=source, save=save, discard=discard, cap=cap,
display=display, version=version)
if mass_radius:
printv('Plotting mass-radius', verbose=verbose)
plot_mass_radius(chain, source=source, save=save, discard=discard, cap=cap,
display=display, version=version)
if redshift:
printv('Plotting redshift', verbose=verbose)
plot_redshift(chain, source=source, save=save, discard=discard, cap=cap,
display=display, version=version)
def plot_contours(chain, discard, source, version, cap=None,
display=True, save=False, truth_values=None, parameters=None,
sigmas=np.linspace(0, 2, 5), cc=None, summary=False, fontsize=14,
max_ticks=4):
"""Plots posterior contours of mcmc chain
parameters : [str]
specify which parameters to plot
"""
default_plt_options()
if cc is None:
pkeys = mcmc_versions.get_parameter(source, version, 'param_keys')
pkey_labels = plot_tools.convert_mcmc_labels(param_keys=pkeys)
cc = mcmc_tools.setup_chainconsumer(chain=chain, param_labels=pkey_labels,
discard=discard, cap=cap, sigmas=sigmas,
summary=summary, fontsize=fontsize,
max_ticks=max_ticks)
if parameters is not None:
parameters = plot_tools.convert_mcmc_labels(param_keys=parameters)
# TODO: figsize
if truth_values is not None:
fig = cc.plotter.plot(truth=truth_values, parameters=parameters)
else:
fig = cc.plotter.plot(parameters=parameters)
save_plot(fig, prefix='contours', chain=chain, save=save, source=source,
version=version, display=display)
return fig
def plot_posteriors(chain, discard, source, version, cap=None,
display=True, save=False, truth_values=None,
cc=None):
"""Plots posterior distributions of mcmc chain
truth_values : list|dict
Specify parameters of point (e.g. the true value) to draw on the distributions.
"""
default_plt_options()
pkeys = mcmc_versions.get_parameter(source, version, 'param_keys')
pkey_labels = plot_tools.convert_mcmc_labels(param_keys=pkeys)
if cc is None:
cc = mcmc_tools.setup_chainconsumer(chain=chain, param_labels=pkey_labels,
discard=discard, cap=cap)
height = 3 * ceil(len(pkeys) / 4)
if truth_values is not None:
fig = cc.plotter.plot_distributions(figsize=[10, height],
truth=truth_values)
else:
fig = cc.plotter.plot_distributions(figsize=[10, height])
plt.tight_layout()
save_plot(fig, prefix='posteriors', chain=chain, save=save, source=source,
version=version, display=display)
return fig
def plot_mass_radius(chain, discard, source, version, cap=None,
display=True, save=False, summary=False,
sigmas=np.linspace(0, 2, 5), fontsize=18, figsize='column'):
"""Plots contours of mass versus radius from a given chain
"""
default_plt_options()
mass_nw, mass_gr = mcmc_params.get_constant_masses(source, version)
mass_radius_chain = mcmc_params.get_mass_radius_chain(chain=chain, discard=discard,
source=source, version=version,
cap=cap, mass_nw=mass_nw,
mass_gr=mass_gr)
cc = mcmc_tools.setup_custom_chainconsumer(mass_radius_chain, parameters=['R', 'M'],
sigmas=sigmas, summary=summary,
fontsize=fontsize)
fig = cc.plotter.plot(figsize=figsize)
fig.subplots_adjust(left=0.16, bottom=0.15)
save_plot(fig, prefix='mass-radius', chain=chain, save=save, source=source,
version=version, display=display)
return fig
def plot_redshift(chain, discard, source, version, cap=None, display=True, save=False):
"""Plots posterior distribution of redshift given a chain
"""
mass_nw, mass_gr = mcmc_params.get_constant_masses(source, version)
redshift_chain = mcmc_params.get_redshift_chain(chain=chain, discard=discard,
source=source, version=version,
cap=cap, mass_nw=mass_nw,
mass_gr=mass_gr)
cc = mcmc_tools.setup_custom_chainconsumer(redshift_chain, parameters=['1+z'])
fig = cc.plotter.plot_distributions(figsize=[5, 5])
plt.tight_layout()
save_plot(fig, prefix='redshift', chain=chain, save=save, source=source,
version=version, display=display)
return fig
def plot_gravitational_contours(chain, discard, source, version, cap=None, display=True,
save=False, r_nw=10, sigmas=np.linspace(0, 2, 5),
summary=False, unit_labels=True, fontsize=16,
fixed_grav=False, figsize=None):
"""Plots contours of gravitational parameters
"""
cc = mcmc_tools.setup_gravitational_chainconsumer(chain=chain, discard=discard,
source=source, version=version,
cap=cap, fixed_grav=fixed_grav,
summary=summary, r_nw=r_nw,
unit_labels=unit_labels,
sigmas=sigmas, fontsize=fontsize)
if fixed_grav:
fig = cc.plotter.plot_distributions(figsize=figsize)
plt.tight_layout()
else:
fig = cc.plotter.plot()
save_plot(fig, prefix='gravitational', chain=chain, save=save, source=source,
version=version, display=display)
return fig
def plot_inclination(chain, discard, source, version, cap=None, display=True,
save=False, disc_model='he16_a', sigmas=np.linspace(0, 2, 5),
summary=False, unit_labels=True, figsize=(4, 4), fontsize=18):
"""Plots contours of parameters derived using disc model
"""
disc_chain = mcmc_params.get_disc_chain(chain=chain, discard=discard, cap=cap,
source=source, version=version,
disc_model=disc_model)
cc = mcmc_tools.setup_custom_chainconsumer(disc_chain, parameters=['d', 'i'],
sigmas=sigmas, summary=summary,
unit_labels=unit_labels, fontsize=fontsize)
fig = cc.plotter.plot(figsize=figsize)
fig.subplots_adjust(left=0.15, bottom=0.15)
save_plot(fig, prefix='disc', chain=chain, save=save, source=source,
version=version, display=display)
return fig
def plot_distance_anisotropy(chain, discard, source, version, cap=None, display=True,
save=False, sigmas=np.linspace(0, 2, 5), summary=False,
figsize=(4, 4), unit_labels=True, fontsize=18):
"""Plots contours of MCMC parameters d_b, xi_ratio
"""
d_b_chain = mcmc_params.get_param_chain(chain, param='d_b', discard=discard,
source=source, version=version, cap=cap)
xi_ratio_chain = mcmc_params.get_param_chain(chain, param='xi_ratio', discard=discard,
source=source, version=version, cap=cap)
flat_chain = np.column_stack([d_b_chain, xi_ratio_chain])
cc = mcmc_tools.setup_custom_chainconsumer(flat_chain, parameters=['d_b', 'xi_ratio'],
sigmas=sigmas, summary=summary,
unit_labels=unit_labels, fontsize=fontsize)
fig = cc.plotter.plot(figsize=figsize)
fig.subplots_adjust(left=0.2, bottom=0.2)
save_plot(fig, prefix='distance', chain=chain, save=save, source=source,
version=version, display=display)
return fig
def plot_xedd(chain, discard, source, version, cap=None, display=True,
save=False, cloud=True, sigmas=np.linspace(0, 2, 10), figsize=(5, 5)):
"""Plots posterior for Eddington hydrogen composition (X_Edd)
"""
default_plt_options()
xedd_chain = mcmc_params.get_xedd_chain(chain=chain, discard=discard, source=source,
version=version, cap=cap)
label = plot_tools.quantity_label('xedd')
cc = mcmc_tools.setup_custom_chainconsumer(xedd_chain, parameters=[label],
sigmas=sigmas, cloud=cloud)
fig = cc.plotter.plot(figsize=figsize)
save_plot(fig, prefix='xedd', chain=chain, save=save, source=source,
version=version, display=display)
return fig
def plot_walkers(chain, source, version, params=None, n_lines=30, xlim=-1,
display=True, save=False, label=''):
"""Plots walkers vs steps (i.e. "time")
Parameters
----------
source : str
version : int
chain : np.array
chain as returned by load_chain()
params : [str]
parameter(s) of which to plot walkers.
n_lines : int
approx number of lines/walkers to plot on parameter
xlim : int
x-axis limit to plot (n_steps), i.e. ax.set_xlim((0, xlim))
label : str
optional label to add to filename when saving
display : bool
save : bool
"""
default_plt_options()
pkeys = mcmc_versions.get_parameter(source, version, 'param_keys')
# ===== Default to splitting all params into 2 plots =====
if params is None:
half = int(len(pkeys) / 2)
for i, param_split in enumerate((pkeys[:half], pkeys[half:])):
plot_walkers(chain=chain, source=source, version=version,
params=param_split, n_lines=n_lines, xlim=xlim,
display=display, save=save, label=f'P{i + 1}')
return
n_walkers, n_steps, n_dim = chain.shape
n_params = len(params)
jump_size = round(n_walkers / n_lines)
steps = np.arange(n_steps)
walker_idxs = np.arange(0, n_walkers, jump_size)
# noinspection PyTypeChecker
fig, ax = plt.subplots(n_params, 1, sharex=True, figsize=(10, 12))
for i in range(n_params):
p_idx = pkeys.index(params[i])
for j in walker_idxs:
walker = chain[j, :, p_idx]
ax[i].plot(steps, walker, linewidth=0.5, color='black')
ax[i].set_ylabel(params[i])
if xlim == -1:
xlim = n_steps
ax[-1].set_xlabel('Step')
ax[-1].set_xlim([0, xlim])
plt.tight_layout()
if display:
plt.show(block=False)
save_plot(fig, prefix='walkers', chain=chain, save=save, source=source,
version=version, display=display,
label=label, extension='.png')
def plot_qb_mdot(chain, source, version, discard, cap=None, display=True, save=False,
figsize=(5, 5), fontsize=16, sigmas=(1, 2)):
"""Plots 2D contours of Qb versus Mdot for each epoch (from multi-epoch chain)
"""
mv = mcmc_versions.McmcVersion(source=source, version=version)
chain_flat = mcmc_tools.slice_chain(chain, discard=discard, cap=cap, flatten=True)
system_table = obs_tools.load_summary(mv.system)
epochs = list(system_table.epoch)
cc = chainconsumer.ChainConsumer()
param_labels = []
for param in ['mdot', 'qb']:
param_labels += [plot_tools.full_label(param)]
for i, epoch in enumerate(epochs):
mdot_idx = mv.param_keys.index(f'mdot{i + 1}')
qb_idx = mv.param_keys.index(f'qb{i + 1}')
param_idxs = [mdot_idx, qb_idx]
cc.add_chain(chain_flat[:, param_idxs], parameters=param_labels,
name=str(epoch))
cc.configure(kde=False, smooth=0, label_font_size=fontsize,
tick_font_size=fontsize-2, sigmas=sigmas)
fig = cc.plotter.plot(display=False, figsize=figsize)
fig.subplots_adjust(left=0.2, bottom=0.2)
save_plot(fig, prefix='qb', save=save, source=source, version=version,
display=display, chain=chain)
return fig
def plot_epoch_posteriors(master_cc, source, version, display=True, save=False,
col_wrap=None, alt_params=True, unit_labels=True,
add_text=True, fontsize=16):
"""Plot posteriors for multiiple epoch chains
parameters
----------
master_cc : ChainConsumer
Contains the multi-epoch chain, created with setup_master_chainconsumer()
source : str
version : int
display : bool (optional)
save : bool (optional)
col_wrap : int (optional)
"""
param_order = {
'grid5': ['mdot1', 'mdot2', 'mdot3', 'qb1', 'qb2', 'qb3', 'x', 'z', 'm_nw',
'm_gr', 'd_b', 'xi_ratio'],
'he2': ['mdot1', 'mdot2', 'qb1', 'qb2', 'm_gr', 'd_b', 'xi_ratio'],
}
param_keys = param_order[source]
# TODO: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# quick and dirty patch!
if alt_params:
param_keys = ['mdot1', 'mdot2', 'mdot3', 'qb1', 'qb2', 'qb3', 'x', 'z', 'g',
'M', 'd_b', 'xi_ratio']
# TODO: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
formatted_params = plot_tools.convert_mcmc_labels(param_keys, unit_labels=unit_labels)
n_epochs = len(master_cc.chains) - 1
if col_wrap is None:
col_wrap = n_epochs
height = 3 * ceil(len(param_keys) / n_epochs)
fig = master_cc.plotter.plot_distributions(parameters=formatted_params,
col_wrap=col_wrap,
figsize=[8, height],
display=False)
if add_text:
add_epoch_text(fig, fontsize=fontsize)
plt.tight_layout()
save_plot(fig, prefix='multi_posteriors', save=save, source=source, version=version,
display=display, enforce_chain_info=False)
return fig
def plot_max_lhood(source, version, n_walkers, n_steps, verbose=True, re_interp=False,
display=True, save=False):
default_plt_options()
max_params, max_lhood = mcmc_tools.get_max_lhood_params(source, version=version,
n_walkers=n_walkers,
n_steps=n_steps,
verbose=verbose,
return_lhood=True)
bfit = burstfit.BurstFit(source=source, version=version, verbose=False, re_interp=re_interp)
lhood, fig = bfit.lhood(max_params, plot=True)
if lhood != max_lhood:
print_warning(f'lhoods do not match (original={max_lhood:.2f}, current={lhood:.2f}). '
+ 'BurstFit (e.g. lhood, lnhood) or interpolator may have changed')
save_plot(fig, prefix='compare', n_dimensions=len(max_params),
n_walkers=n_walkers, n_steps=n_steps, save=save, source=source,
version=version, display=display)
def plot_bprop_sample(bp_sample, source, version, bprops=None, legend=True,
subplot_figsize=(3, 2.5), bfit=None, fontsize=14,
vlines=True):
"""Plot burst properties from large sample against observations
bprop_sample : np.array
obtained using mcmc_tools.bprop_sample()
"""
if bfit is None:
bfit = burstfit.BurstFit(source=source, version=version, verbose=False)
if bprops is None:
bprops = bfit.mcmc_version.bprops
cc = mcmc_tools.setup_bprop_chainconsumer(chain=None, n=None, discard=None,
source=source, version=version,
bp_sample=bp_sample)
bp_summary = mcmc_tools.extract_bprop_summary(cc, source=source, version=version)
n_bprops = len(bprops)
n_rows = int(np.ceil(n_bprops / 2))
n_cols = {False: 1, True: 2}.get(n_bprops > 1)
figsize = (n_cols * subplot_figsize[0], n_rows * subplot_figsize[1])
fig, ax = plt.subplots(n_rows, n_cols, sharex=False, figsize=figsize)
if n_bprops % 2 == 1 and n_bprops > 1: # blank odd-numbered subplot
ax[-1, -1].axis('off')
for i, bprop in enumerate(bprops):
subplot_row = int(np.floor(i / 2))
subplot_col = i % 2
if n_cols > 1:
axis = ax[subplot_row, subplot_col]
else:
axis = ax
u_model = np.diff(bp_summary[:, :, i], axis=0)
bfit.plot_compare(model=bp_summary[1, :, i], u_model=u_model,
bprop=bprop, fontsize=fontsize,
ax=axis, display=False, vlines=vlines,
legend=True if (i == 0 and legend) else False,
xlabel=True if (i in [n_bprops-1, ]) else False)
fig.subplots_adjust(wspace=0.4)
plt.show(block=False)
return fig
def plot_autocorrelation(chain, source, version, n_points=10, load=True, save_tau=True,
ylims=None):
"""Plots estimated integrated autocorrelation time
Note: Adapted from https://dfm.io/posts/autocorr/
"""
mv = mcmc_versions.McmcVersion(source=source, version=version)
params_fmt = plot_tools.convert_mcmc_labels(mv.param_keys)
if load:
sample_steps, autoc = mcmc_tools.load_autocorrelation(source, version=version,
n_steps=chain.shape[1])
else:
sample_steps, autoc = mcmc_tools.get_autocorrelation(chain, source=source,
version=version,
n_points=n_points,
save=save_tau)
fig, ax = plt.subplots()
for i, param in enumerate(mv.param_keys):
ax.loglog(sample_steps, autoc[i], "o-", label=rf"{params_fmt[i]}")
ax.plot(sample_steps, sample_steps / 10.0, "--k", label=r"$\tau = N/10$")
if ylims is None:
xlim = ax.get_xlim()
ylims = [5, xlim[1] / 10]
ax.set_ylim(ylims)
ax.set_xlabel("N steps")
ax.set_ylabel(r"$\tau$ estimate (N)")
ax.legend(fontsize=14, ncol=2, labelspacing=0.3)
plt.show(block=False)
return fig
def add_epoch_text(fig, fontsize, epochs=(1998, 2000, 2007),
colours=('C0', 'C2', 'C3')):
"""Adds text of epoch to figure subplots
"""
for i, epoch in enumerate(epochs):
ax = fig.axes[i]
ax.text(0.95, 0.95, str(epoch), color=colours[i], fontsize=fontsize,
transform=ax.transAxes, va='top', ha='right')
| zacjohnston/pyburst | pyburst/mcmc/mcmc_plot.py | mcmc_plot.py | py | 23,197 | python | en | code | 3 | github-code | 13 |
41188096434 | import turtle
bob=turtle.Turtle()
bob.speed(0)
bob.shape('turtle')
bob.left(180)
bob.penup()
bob.forward(100)
bob.right(180)
bob.pendown()
def star():
bob.color('cyan')
for i in range(5):
bob.forward(10)
bob.right(144)
def stars():
bob.color('light blue')
for i in range(5):
bob.forward(50)
star()
bob.right(144)
def starss():
bob.color('cyan')
for i in range(5):
bob.forward(100)
stars()
bob.right(144)
for i in range(5):
bob.forward(300)
starss()
bob.right(144)
bob.color
| Pankuu21/PyFun | python star turtle design.py | python star turtle design.py | py | 635 | python | en | code | 0 | github-code | 13 |
41881731950 | from lib.logging_config import logger
def find_occurrences(s, ch):
"""
Source:
https://stackoverflow.com/questions/13009675/find-all-the-occurrences-of-a-character-in-a-string
"""
return [i for i, letter in enumerate(s) if letter == ch]
def get_day_time_of_class(url: str) -> str:
# get the second index of a dash on the url string
idxs_dashes = find_occurrences(s=url, ch="-")
idxs_slashes = find_occurrences(s=url, ch="/")
## date_class_raw provides ddd-hhmm ; if provides ddd-hh, something must be done
date_class_raw_opt_1 = url[idxs_slashes[3] + 1:idxs_dashes[1]]
date_class_raw_opt_2 = url[idxs_slashes[3] + 1:idxs_dashes[2]]
logger.debug(date_class_raw_opt_1)
logger.debug(date_class_raw_opt_2)
date_class_split = date_class_raw_opt_1.split("-")
logger.debug(date_class_split)
date_class = date_class_split[0]
time_class_raw = date_class_split[1]
logger.debug(len(date_class_split))
if len(time_class_raw) == 4:
time_class = f"{time_class_raw[0:2]}:{time_class_raw[2:]}"
elif len(time_class_raw) < 4:
date_class_split = date_class_raw_opt_2.split("-")
time_class_raw = [date_class_split[1].strip(), date_class_split[2].strip().strip("hrs")]
logger.debug(time_class_raw)
time_class = f"{time_class_raw[0]}:{time_class_raw[1]}"
else:
raise Exception("Error fetching day and time of class")
logger.debug(f"{date_class} {time_class}")
return f"{date_class} {time_class}"
| DarkestAbed/dufur | backend/app/parse_dates.py | parse_dates.py | py | 1,518 | python | en | code | 0 | github-code | 13 |
42702873281 | import os
arquivo = open(os.path.join("C:/Users/Samsung Max/Desktop/Data Science Academy/Python/Arquivos/ManipulandoTexto.txt"),"w")
texto = "Manipulando arquivo de texto no python"
for palavra in texto.split():
arquivo.write(f"{palavra} ")
arquivo.close()
arquivo = open("C:/Users/Samsung Max/Desktop/Data Science Academy/Python/Arquivos/ManipulandoTexto.txt","r")
print(arquivo.read())
arquivo.close()
with open("C:/Users/Samsung Max/Desktop/Data Science Academy/Python/Arquivos/ManipulandoTexto.txt","r") as arquivo:
conteudo = arquivo.read()
print(conteudo)
print(len(conteudo))
with open(os.path.join("C:/Users/Samsung Max/Desktop/Data Science Academy/Python/Arquivos/ManipulandoTexto2.txt"),"w") as arquivo:
arquivo.write(texto[:5])
arquivo.write("\n")
arquivo.write(texto[:10])
arquivo = open("C:/Users/Samsung Max/Desktop/Data Science Academy/Python/Arquivos/ManipulandoTexto2.txt","r")
conteudo = arquivo.read()
print(conteudo)
arquivo.close
| luizsouza1993/Data_Science_Python | Manipulando Textos.py | Manipulando Textos.py | py | 1,052 | python | pt | code | 0 | github-code | 13 |
1447911471 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author: Zhoutao
#create_date:2017-01-12-16:59
# Python 3.5
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author: Zhoutao
#create_date:2017-01-09-10:28
# Python 3.5
import logging
import logging,time,sys,os,time
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from conf import settings
def logger(logtype,message):
'''
日志接口函数
日志文件的保持方式为/username/years/mouth/...log
通过传入的日志类型 ,得到具体的日志文件路径./名称等
:param logtype:
:param message:
:return:
'''
#global setting
logger = logging.getLogger(logtype)
logger.setLevel(logging.DEBUG)
# to mirror
input_mirror = logging.StreamHandler()
input_mirror.setLevel(logging.DEBUG)
# to file
dirtime = time.strftime("%Y|%m", time.localtime()).split('|') # 获得当前年,月列表
user_log_path = "%s/logs/%s/%s/%s"%(settings.BASE_DIR,settings.DATABASE['name'],dirtime[0],dirtime[1]) # 获得目录变量
os.makedirs(user_log_path,exist_ok=True) # 先创建log目录
log_file = "%s/%s"%(user_log_path,settings.logfile[logtype])
# print(log_file,123)
input_file = logging.FileHandler(log_file)
input_file.setLevel(logging.DEBUG)
# fomart input type
format_type = logging.Formatter("%(asctime)s - %(message)s")
# user format type
input_mirror.setFormatter(format_type)
input_file.setFormatter(format_type)
logger.addHandler(input_mirror)
logger.addHandler(input_file)
logger.debug(message)
logger.removeHandler(input_file)
logger.removeHandler(input_mirror)
# return logger
| 248808194/python | M2/ATM/core/logger.py | logger.py | py | 1,704 | python | en | code | 0 | github-code | 13 |
41243281784 | def number_of_customers_per_state(customers_dict):
final_dict = {}
count = 0
for k1, v1 in customers_dict.items():
for i in v1:
for k2, v2 in i.items():
if type(v2) == int:
if v2 > count:
count = v2
print (k2)
print (v2)
final_dict[k1] = {}
final_dict[k1] = count
print (final_dict)
customers = {
'UT': [{
'name': 'Mary',
'age': 28
}, {
'name': 'John',
'age': 31
}, {
'name': 'Robert',
'age': 16
}],
'NY': [{
'name': 'Linda',
'age': 71
}],
'CA': [{
'name': 'Barbara',
'age': 15
}, {
'name': 'Paul',
'age': 18
}]
}
number_of_customers_per_state(customers)
| gsakkas/seq2parse | src/tests/parsing_test_29.py | parsing_test_29.py | py | 827 | python | en | code | 8 | github-code | 13 |
16566211698 | import hashlib
def encode_md5(temp: str) -> str:
md5 = hashlib.md5()
md5.update(temp.encode(encoding='utf-8'))
return md5.hexdigest()
def file_md5(file_path) -> str:
with open(file_path, 'rb') as f:
md5_obj = hashlib.md5()
while True:
d = f.read(8096)
if not d:
break
md5_obj.update(d)
hash_code = md5_obj.hexdigest()
return str(hash_code).lower()
| 14Days/back_web | app/utils/md5.py | md5.py | py | 453 | python | en | code | 0 | github-code | 13 |
30687027954 |
from django.contrib import admin
from django.urls import path, include
from rest_framework.routers import SimpleRouter
from core.views import (ProdutoAPIView, ProdutosAPIView, PedidoAPIView, PedidosAPIView,EnderecoAPIView,EnderecosAPIView,
EmpresasAPIView, EmpresaAPIView,ClientesAPIView, ClienteAPIView, verificar_login, ProdutoViewSet,
PedidoViewSet,EnderecoViewSet,EmpresaViewSet,ClienteViewSet)
router = SimpleRouter()
router.register('produtos',ProdutoViewSet)
router.register('pedidos',PedidoViewSet)
router.register('enderecos',EnderecoViewSet)
router.register('empresas',EmpresaViewSet)
router.register('clientes',ClienteViewSet)
urlpatterns = [
path('verificar_login/', verificar_login),
path('produtos/<int:pk>/',ProdutoAPIView.as_view(), name='produto'),
path('pedidos/<int:pk>/', PedidoAPIView().as_view(), name='pedido'),
path('enderecos/<int:pk>/', EnderecoAPIView().as_view(), name='endereco'),
path('empresas/<int:pk>/', EmpresaAPIView().as_view(), name='empresa'),
path('clientes/<int:pk>/', ClienteAPIView().as_view(), name='cliente'),
path('produtos/', ProdutosAPIView.as_view(), name='produtos'),
path('pedidos/', PedidosAPIView().as_view(), name='pedidos'),
path('enderecos/', EnderecosAPIView().as_view(), name='enderecos'),
path('empresas/', EmpresasAPIView().as_view(), name='empresas'),
path('clientes/', ClientesAPIView().as_view(), name='clientes'),
path('empresas/<int:empresa_pk>/produtos/', EmpresasAPIView().as_view(), name='empresa_produtos'),
path('empresas/<int:empresa_pk>/produtos/<int:produto_pk>/', ProdutoAPIView().as_view(), name='empresa_produto'),
# path('', include('produtos.urls')),
]
| Samuelssj/pede_comer | backend_pede_comer/core/urls.py | urls.py | py | 1,748 | python | pt | code | 1 | github-code | 13 |
34672445786 | #融合Gdelt和航空网络数据
import pandas as pd
header_names=['GlobalEventID', 'Day', 'MonthYear', 'Year', 'FractionDate',
'Actor1Code', 'Actor1Name', 'Actor1CountryCode', 'Actor1KnownGroupCode',
'Actor1EthnicCode', 'Actor1Religion1Code', 'Actor1Religion2Code',
'Actor1Type1Code', 'Actor1Type2Code', 'Actor1Type3Code', 'Actor2Code',
'Actor2Name', 'Actor2CountryCode', 'Actor2KnownGroupCode',
'Actor2EthnicCode', 'Actor2Religion1Code', 'Actor2Religion2Code',
'Actor2Type1Code', 'Actor2Type2Code', 'Actor2Type3Code', 'IsRootEvent',
'EventCode', 'EventBaseCode', 'EventRootCode',
'QuadClass', 'GoldsteinScale', 'NumMentions', 'NumSources',
'NumArticles', 'AvgTone', 'Actor1Geo_Type', 'Actor1Geo_Fullname',
'Actor1Geo_CountryCode', 'Actor1Geo_ADM1Code',
'Actor1Geo_Lat', 'Actor1Geo_Long', 'Actor1Geo_FeatureID', 'Actor2Geo_Type',
'Actor2Geo_Fullname', 'Actor2Geo_CountryCode', 'Actor2Geo_ADM1Code',
'Actor2Geo_Lat', 'Actor2Geo_Long',
'Actor2Geo_FeatureID', 'ActionGeo_Type', 'ActionGeo_Fullname',
'ActionGeo_CountryCode', 'ActionGeo_ADM1Code',
'ActionGeo_Lat', 'ActionGeo_Long', 'ActionGeo_FeatureID', 'DATEADDED',
'SOURCEURL.']
date=list(range(20200101,20200132))
date+=list(range(20200201,20200230))
date+=list(range(20200301,20200332))
date+=list(range(20200401,20200431))
output=[]
tag_dict={}
for idt,day in enumerate(date):
data=pd.read_table("./events/"+str(day)+".export.CSV",engine='python',header=None, names=header_names)
#筛选需要的数据列并清空空缺数据列
data=data[["Actor1CountryCode","Actor2CountryCode",'QuadClass', 'GoldsteinScale', 'NumMentions', 'NumSources','NumArticles', 'AvgTone']]
data=data.dropna(axis=0,how='any')
#读取国家编号文件,进行二字码编码
country=pd.read_csv("country.csv")
country=country[country["Continent_Code"]=="EU"]
#中间文件:先对于actor1进行连接并命名
temp_data=pd.merge(data,country[["Continent_Code","Three_Letter_Country_Code","Two_Letter_Country_Code"]],
left_on="Actor1CountryCode",right_on="Three_Letter_Country_Code",how="inner")
del temp_data["Three_Letter_Country_Code"]
del temp_data["Continent_Code"]
temp_data=temp_data.rename(columns={"Two_Letter_Country_Code":"Actor1Country2Code"})
#最后数据:对于actor2进行连接并命名
n_data=pd.merge(temp_data,country[["Continent_Code","Three_Letter_Country_Code","Two_Letter_Country_Code"]],
left_on="Actor2CountryCode",right_on="Three_Letter_Country_Code",how="inner")
del n_data["Three_Letter_Country_Code"]
del n_data["Continent_Code"]
n_data=n_data.rename(columns={"Two_Letter_Country_Code":"Actor2Country2Code"})
#准备压缩数据列:边整合
further_group=[]
for index, row in n_data.iterrows():
row=dict(row)
a1=row["Actor1Country2Code"]
a2=row["Actor2Country2Code"]
if a1+"-"+a2 in tag_dict.keys():
row["index"]=a1+"-"+a2
elif a2+"-"+a1 in tag_dict.keys():
row["index"]=a2+"-"+a1
else:
tag_dict[a1+"-"+a2]=1
row["index"]=a1+"-"+a2
further_group.append(row)
further_group=pd.DataFrame(further_group)
temp=further_group.groupby(["index"])[["QuadClass","GoldsteinScale","NumMentions","NumSources","NumArticles","AvgTone"]].mean()
flight=pd.read_csv("../FlightData/day/"+str(day)+".csv")
flight=flight[(flight["org_continent"]=="EU")&(flight["dst_continent"]=="EU")]
flight_dict={}
for index,row in flight.iterrows():
row=dict(row)
a1=row["org_country"]
a2=row["dst_country"]
if a1+"-"+a2 in flight_dict.keys():
flight_dict[a1+"-"+a2]=flight_dict[a1+"-"+a2]+1
elif a2+"-"+a1 in flight_dict.keys():
flight_dict[a2+"-"+a1]=flight_dict[a2+"-"+a1]+1
else:
flight_dict[a1+"-"+a2]=1
lines=[]
for index,row in temp.iterrows():
row=dict(row)
if index in flight_dict.keys():
row["lines"]=flight_dict[index]
else:
sp=index.split("-")
new_index=sp[1]+"-"+sp[0]
if new_index in flight_dict.keys():
row["lines"]=flight_dict[new_index]
else:
continue
row["date"]=idt
row["flow"]=index
lines.append(row)
output+=lines
print(day)
pd.DataFrame(output).to_csv("output.csv") | hinczhang/Graduate-Thesis | batchHandle.py | batchHandle.py | py | 4,537 | python | en | code | 0 | github-code | 13 |
19241863240 | from fvcore.common.registry import Registry
# from .backbone import Backbone
CLS_HEAD_REGISTRY = Registry("CLS_HEAD")
CLS_HEAD_REGISTRY.__doc__ = """
Registry for LOCALIZATION HEAD, which output target localization based on consecutive images
The registered object must be a callable that accepts two arguments:
1. A :class:`tao.config.CfgNode`
2. A :class:`tracker.layers.ShapeSpec`, which contains the input shape specification.
Registered object must return instance of :class:`Backbone`.
"""
def build_cls_head(cfg, input_shape=None):
"""
Build a backbone from `cfg.MODEL.BACKBONE.NAME`.
Returns:
an instance of :class:`Backbone`
"""
# if input_shape is None:
# input_shape = ShapeSpec(channels=len(cfg.MODEL.PIXEL_MEAN))
cls_head = CLS_HEAD_REGISTRY.get(cfg.NAME)(cfg)
# assert isinstance(backbone, Backbone)
return cls_head
| Flowerfan/Trackron | trackron/models/cls_heads/build.py | build.py | py | 887 | python | en | code | 46 | github-code | 13 |
34487583696 | import csv
def save_to_file(jobs):
file = open("jobs.csv", mode="w", encoding="UTF-8", newline='')
# 윈도우즈의 경우 csv 모듈에서 데이타를 쓸 때 각 라인 뒤에 빈 라인이 추가되는 문제가 있는데, 이를 없애기 위해 (파이썬 3 에서) 파일을 open 할 때 newline='' 와 같은 옵션을 지정한다
# http://pythonstudy.xyz/python/article/207-CSV-%ED%8C%8C%EC%9D%BC-%EC%82%AC%EC%9A%A9%ED%95%98%EA%B8%B0
writer = csv.writer(file)
writer.writerow(["title", "company", "location", "link"])
for job in jobs:
writer.writerow(list(job.values()))
return
| purple402/webscrapper | lecture/save.py | save.py | py | 629 | python | ko | code | 0 | github-code | 13 |
36590749345 | from mysqlconnection import connectToMySQL
from flask import flash
class Survey:
def __init__(self, data):
self.id = data['id']
self.name = data['name']
self.location = data['location']
self.language = data['language']
self.comments = data['comments']
self.created_at = data['created_at']
self.updated_at = data['updated_at']
@classmethod
def save(cls, data):
query = "INSERT INTO surveys (name, location, language, comments, created_at, updated_at) VALUES ( %(name)s, %(location)s, %(language)s, %(comments)s, NOW(), NOW() );"
return connectToMySQL('dojo_survey_schema').query_db(query, data)
@classmethod
def get_recent(cls):
query = "SELECT * FROM surveys WHERE id = (SELECT max(id) FROM surveys);"
result = connectToMySQL('dojo_survey_schema').query_db(query)
return cls(result[0])
@staticmethod
def validate_survey(survey):
is_valid = True
if len(survey['name']) < 3:
flash("Name must be at least 3 characters.")
is_valid = False
if len(survey['location']) < 3:
flash("Location must be at least 3 characters.")
is_valid = False
if len(survey['language']) < 1:
flash("Please Select a Language.")
is_valid = False
if len(survey['comments']) < 3:
flash("Comment must be at least 3 characters.")
is_valid = False
return is_valid | CSHepworth/Python-v21.1 | Python/flask_mysql/validation/dojo_survey_validation/survey.py | survey.py | py | 1,501 | python | en | code | 0 | github-code | 13 |
15832960846 | import torch
import torch.nn as nn
import torch.optim as optim
class PPO():
def __init__(self,
actor_critic,
value_loss_coef = 0.5,
entropy_coef = 0.01,
num_mini_batch = 32,
clip_param = 0.2,
symmetry_coef=0,
ppo_epoch = 10,
lr=3e-4,
eps=1e-5,
max_grad_norm=0.5,
use_clipped_value_loss=True,
mirror_obs=None,
mirror_act=None):
self.actor_critic = actor_critic
self.clip_param = clip_param
self.ppo_epoch = ppo_epoch
self.num_mini_batch = num_mini_batch
self.value_loss_coef = value_loss_coef
self.entropy_coef = entropy_coef
self.max_grad_norm = max_grad_norm
self.use_clipped_value_loss = use_clipped_value_loss
self.optimizer = optim.Adam(actor_critic.parameters(), lr=lr, eps=eps)
self.symmetry_coef = symmetry_coef
self.mirror_obs = mirror_obs
self.mirror_act = mirror_act
self.is_cuda = next(actor_critic.parameters()).is_cuda
def update(self, rollouts):
advantages = rollouts.returns[:-1] - rollouts.value_preds[:-1]
advantages = (advantages - advantages.mean()) / (
advantages.std() + 1e-5)
value_loss_epoch = 0
action_loss_epoch = 0
dist_entropy_epoch = 0
# Check Advantage
for e in range(self.ppo_epoch):
data_generator = rollouts.feed_forward_generator(advantages, self.num_mini_batch)
for sample in data_generator:
obs_batch, delta_batch, value_preds_batch, return_batch, masks_batch, old_log_probs, adv_targ, _, _ = sample
# Reshape to do in a single forward pass for all steps
values, delta_log_probs, dist_entropy = self.actor_critic.evaluate_actions(obs_batch, masks_batch, delta_batch)
# value_loss = 0.5 * (return_batch - values).pow(2).mean()
value_pred_clipped = value_preds_batch + \
(values - value_preds_batch).clamp(-self.clip_param, self.clip_param)
value_losses = (values - return_batch).pow(2)
value_losses_clipped = (
value_pred_clipped - return_batch).pow(2)
value_loss = 0.5 * torch.max(value_losses,
value_losses_clipped).mean()
ratio = torch.exp(delta_log_probs -
old_log_probs)
surr1 = ratio * adv_targ
surr2 = torch.clamp(ratio, 1.0 - self.clip_param,
1.0 + self.clip_param) * adv_targ
action_loss = -torch.min(surr1, surr2).mean()
self.optimizer.zero_grad()
(value_loss * self.value_loss_coef + action_loss - dist_entropy * self.entropy_coef).backward()
torch.nn.utils.clip_grad_norm_(self.actor_critic.parameters(),
self.max_grad_norm)
self.optimizer.step()
value_loss_epoch += value_loss.item()
action_loss_epoch += action_loss.item()
dist_entropy_epoch += dist_entropy.item()
num_updates = self.ppo_epoch * self.num_mini_batch
value_loss_epoch /= num_updates
action_loss_epoch /= num_updates
dist_entropy_epoch /= num_updates
return value_loss_epoch, action_loss_epoch, dist_entropy_epoch
| aayushwadhwa/av-simulation | ppo.py | ppo.py | py | 3,654 | python | en | code | 0 | github-code | 13 |
74316503377 | import numpy as np
import matplotlib.pyplot as plt
def I(delta_k):
return (np.sin( 2 * delta_k))**2 / ( 2 *delta_k )**2
Dk = np.linspace(-2*np.pi, 2*np.pi, 1e5)
plt.figure(figsize=(16, 9))
plt.plot(Dk, I(Dk), linewidth=4)
plt.ylabel(r'$\Gamma \, / \, \mathrm{a.u.} $', fontsize=20)
plt.xlabel(r'$\Delta k \, / \, \mathrm{a.u.} $', fontsize=20)
plt.tick_params(axis='both', labelsize=20)
plt.savefig('./efficiency_second_harmonic.png', bbox_inches='tight')
| beckstev/presentations | nonlinear_optics/presentation_elements/phase_matching/efficiency_plot/I_plot.py | I_plot.py | py | 469 | python | en | code | 0 | github-code | 13 |
36941557518 | """This script downloads all the csv files from a s3 bucket, updates the content, then write to another local directory
as the same filename."""
import boto3
import csv
import os
from tempfile import NamedTemporaryFile
s3_client = boto3.client('s3', 'us-east-1')
s3_resource = boto3.resource('s3', 'us-east-1')
BUCKET_NAME = 'plant-evaluator-dashboard'
PREFIX = 'FTTA-full-stack'
# We would need to use a paginator here if the number of objects we are listing might be over 1000.
paginator = s3_client.get_paginator('list_objects')
page_iterator = paginator.paginate(Bucket=BUCKET_NAME, Prefix=PREFIX + '/dashboard-data')
objs_list = []
for page in page_iterator:
try:
# Extend the list of objects Contents to objs_list on every page.
objs_list.extend([obj['Key'] for obj in page['Contents'] if '.csv' in obj['Key']])
except KeyError as e:
if 'Contents' in str(e):
print("s3 bucket / prefix empty.")
for obj in objs_list:
current_csv_name = os.path.basename(obj)
local_csv_path = os.path.join('/home/yilanzhu/Desktop/', 'plant_eval_csvs', obj)
output_csv_path = os.path.join('/home/yilanzhu/Desktop/', 'output', obj)
os.makedirs(os.path.dirname(local_csv_path), exist_ok=True) # succeeds even if directory exists.
os.makedirs(os.path.dirname(output_csv_path), exist_ok=True) # succeeds even if directory exists.
s3_resource.Bucket(BUCKET_NAME).download_file(obj, local_csv_path)
print("Downloaded: {}".format(local_csv_path))
fieldnames = ['git_datetime', 'git_hash', 'error_type', 'rms_error', 'scenario']
with open(local_csv_path, 'r') as read_csv_file, open(output_csv_path, 'w+') as write_csv_file:
reader = csv.DictReader(read_csv_file)
writer = csv.DictWriter(write_csv_file, fieldnames=fieldnames)
for row in reader:
# Check for 'T's in git_datetime.
if 'T' in row['git_datetime']:
row['git_datetime'] = row['git_datetime'].replace('T', ' ')
writer.writerow(row)
else:
writer.writerow(row)
writer.writerows(reader)
| amandazhuyilan/Breakfast-Burrito | CheatSheet/download_and_update_cvs_inline.py | download_and_update_cvs_inline.py | py | 2,211 | python | en | code | 3 | github-code | 13 |
29162771731 | import dataclasses
import typing
from collections.abc import Callable, Iterable
from algokit_utils import ApplicationSpecification, CallConfig, MethodConfigDict, MethodHints, OnCompleteActionName
from algosdk.abi import Method
from algokit_client_generator import utils
@dataclasses.dataclass(kw_only=True)
class ContractArg:
name: str
abi_type: str
python_type: str
desc: str | None
has_default: bool = False
@dataclasses.dataclass(kw_only=True)
class ABIStructField:
name: str
abi_type: str
python_type: str
@dataclasses.dataclass(kw_only=True)
class ABIStruct:
abi_name: str
struct_class_name: str
fields: list[ABIStructField]
@dataclasses.dataclass(kw_only=True)
class ABIContractMethod:
method: Method
hints: MethodHints
abi_type: str
python_type: str
result_struct: ABIStruct | None
args: list[ContractArg]
structs: list[ABIStruct]
args_class_name: str
client_method_name: str
deploy_args_class_name: str
deploy_create_args_class_name: str
@dataclasses.dataclass(kw_only=True)
class ContractMethod:
abi: ABIContractMethod | None
on_complete: list[OnCompleteActionName]
call_config: typing.Literal["call", "create"]
@dataclasses.dataclass(kw_only=True)
class ContractMethods:
no_op: list[ContractMethod] = dataclasses.field(default_factory=list)
create: list[ContractMethod] = dataclasses.field(default_factory=list)
update_application: list[ContractMethod] = dataclasses.field(default_factory=list)
delete_application: list[ContractMethod] = dataclasses.field(default_factory=list)
opt_in: list[ContractMethod] = dataclasses.field(default_factory=list)
close_out: list[ContractMethod] = dataclasses.field(default_factory=list)
@property
def all_methods(self) -> Iterable[ContractMethod]:
yield from self.no_op
yield from self.create
yield from self.update_application
yield from self.delete_application
yield from self.opt_in
yield from self.close_out
@property
def all_abi_methods(self) -> Iterable[ContractMethod]:
return (m for m in self.all_methods if m.abi)
@property
def has_abi_methods(self) -> bool:
return any(self.all_abi_methods)
def add_method(self, abi: ABIContractMethod | None, method_config: MethodConfigDict) -> None:
create_on_completes = []
for on_complete, call_config in method_config.items():
if call_config & CallConfig.CALL != CallConfig.NEVER:
collection = getattr(self, on_complete)
contract_method = ContractMethod(
abi=abi,
call_config="call",
on_complete=[on_complete],
)
collection.append(contract_method)
if call_config & CallConfig.CREATE != CallConfig.NEVER:
create_on_completes.append(on_complete)
if create_on_completes:
contract_method = ContractMethod(
abi=abi,
call_config="create",
on_complete=create_on_completes,
)
self.create.append(contract_method)
def group_by_overloads(methods: list[Method]) -> Iterable[list[Method]]:
result: dict[str, list[Method]] = {}
for method in methods:
result.setdefault(method.name, []).append(method)
return result.values()
def use_method_name(method: Method) -> str:
return method.name
def use_method_signature(method: Method) -> str:
return method.get_signature().replace("(", "_").replace(")", "_").replace(",", "_")
def find_naming_strategy(methods: list[Method]) -> Callable[[Method], str]:
if len(methods) == 1:
return use_method_name
return use_method_signature
def get_contract_methods(
app_spec: ApplicationSpecification, used_module_symbols: set[str], used_client_symbols: set[str]
) -> ContractMethods:
result = ContractMethods()
result.add_method(None, app_spec.bare_call_config)
structs: dict[str, ABIStruct] = {}
for methods in group_by_overloads(app_spec.contract.methods):
naming_strategy = find_naming_strategy(methods)
for method in methods:
method_name = naming_strategy(method)
hints = app_spec.hints[method.get_signature()]
args_class_name = utils.get_unique_symbol_by_incrementing(
used_module_symbols,
utils.get_class_name(method_name, "args"),
)
parameter_type_map: dict[str, str] = {}
method_structs: list[ABIStruct] = []
result_struct: ABIStruct | None = None
for parameter, struct in hints.structs.items():
abi_name = struct["name"]
abi_struct = structs.get(abi_name)
if not abi_struct:
# TODO: check for collisions where the same name refers to different structures
struct_class_name = utils.get_unique_symbol_by_incrementing(
used_module_symbols, utils.get_class_name(abi_name)
)
abi_struct = ABIStruct(
abi_name=abi_name,
struct_class_name=struct_class_name,
fields=[
ABIStructField(
name=name,
abi_type=abi_type,
python_type=utils.map_abi_type_to_python(abi_type),
)
# TODO: nested structs?!
for name, abi_type in struct["elements"]
],
)
structs[abi_name] = abi_struct
if parameter == "output": # TODO: check return signature
result_struct = abi_struct
parameter_type_map[parameter] = abi_struct.struct_class_name
method_structs.append(abi_struct)
abi = ABIContractMethod(
method=method,
hints=hints,
abi_type=str(method.returns),
python_type=result_struct.struct_class_name
if result_struct
else utils.map_abi_type_to_python(str(method.returns)),
result_struct=result_struct,
structs=method_structs,
args=[
ContractArg(
name=arg.name or f"arg{idx}",
abi_type=str(arg.type),
python_type=parameter_type_map[arg.name]
if arg.name in parameter_type_map
else utils.map_abi_type_to_python(str(arg.type)),
desc=arg.desc,
has_default=arg.name in hints.default_arguments,
)
for idx, arg in enumerate(method.args)
],
args_class_name=args_class_name,
deploy_args_class_name=f"Deploy[{args_class_name}]",
deploy_create_args_class_name=f"DeployCreate[{args_class_name}]",
client_method_name=utils.get_unique_symbol_by_incrementing(
used_client_symbols, utils.get_method_name(method_name)
),
)
result.add_method(abi, hints.call_config)
return result
| algorandfoundation/algokit-client-generator-py | src/algokit_client_generator/spec.py | spec.py | py | 7,466 | python | en | code | 2 | github-code | 13 |
26542418440 | import math
import serial
import serial.tools.list_ports
import time
upper_arm_length = 220 #mm
lower_arm_length = 160 #mm
basis_height = 253 #mm
hand_length = 65 #mm #65 + 142
hand_radial_offset = 0 #mm
hand_radial_offset_rot=0 #deg
base_encoder_0 = 9600
base_encoder_steps = 57600
upper_arm_encoder_0 = 2200
upper_arm_encoder_steps = 57600
lower_arm_encoder_0 = 14400
lower_arm_encoder_steps = -57600
hand_roll_encoder_0 = 2000
hand_roll_encoder_steps = -38400
hand_encoder_0=9700
hand_encoder_steps = -38400
ser=ser = serial.Serial()
calibrated=False
def set_tool(height,radial_offset,offset_rot):
global hand_length,hand_radial_offset,hand_radial_offset_rot
hand_length=height+65
hand_radial_offset=radial_offset
hand_radial_offset_rot=offset_rot
def open_connection(i):
global ser
#open serial connection
ports = serial.tools.list_ports.comports()
available_ports = []
for p in ports:
available_ports.append(p.device)
print("connecting to port: " + available_ports[i])
ser = serial.Serial(
port=available_ports[i],
baudrate=500000,
)
if ser.isOpen():
ser.close()
ser.open()
time.sleep(1) #pause needed to open the port
while (not ser.isOpen()):
time.sleep(1)
print("connected")
def close_connection():
ser.close()
def FK(alphas):
base_alpha,upper_arm_alpha,lower_arm_alpha,hand_alpha,hand_roll_alpha = alphas
r = math.sin(math.radians(upper_arm_alpha))*upper_arm_length
z = math.cos(math.radians(upper_arm_alpha))*upper_arm_length
r += math.sin(math.radians(lower_arm_alpha+upper_arm_alpha))*lower_arm_length
z += math.cos(math.radians(lower_arm_alpha+upper_arm_alpha))*lower_arm_length
roll = hand_roll_alpha
pitch=upper_arm_alpha+lower_arm_alpha+hand_alpha-90
z+=basis_height
z-=math.sin(math.radians(pitch))*hand_length
r+=math.cos(math.radians(pitch))*hand_length
x = math.cos(math.radians(base_alpha))*r
y = math.sin(math.radians(base_alpha))*r
return [x,y,z,pitch,roll]
def IK(pos):
x,y,z,pitch,roll = pos
roll = math.radians(roll)
pitch = math.radians(pitch)
z-=basis_height
z+=math.sin(pitch)*hand_length
r = math.sqrt(pow(x,2)+pow(y,2))
r-=math.cos(pitch)*hand_length
span = math.sqrt(pow(z,2)+pow(r,2))
if span > (upper_arm_length+lower_arm_length) or span<abs(upper_arm_length-lower_arm_length):
return(9999,9999,9999,9999,9999)
lower_arm_alpha = math.pi-math.acos((pow(lower_arm_length,2)+pow(upper_arm_length,2)-pow(span,2))/(2.0*lower_arm_length*upper_arm_length))
upper_arm_alpha = -math.acos((pow(span,2)+pow(upper_arm_length,2)-pow(lower_arm_length,2))/(2*span*upper_arm_length))
upper_arm_alpha += math.atan2(r,z)
base_alpha = math.atan2(y,x)
hand_roll_alpha=roll
hand_alpha = pitch-(upper_arm_alpha+lower_arm_alpha-math.pi*0.5)
return [math.degrees(base_alpha),math.degrees(upper_arm_alpha),math.degrees(lower_arm_alpha),math.degrees(hand_alpha),math.degrees(hand_roll_alpha)]
def FK_4DOF(alphas):
base_alpha,upper_arm_alpha,lower_arm_alpha,hand_alpha,hand_roll_alpha = alphas
r = math.sin(math.radians(upper_arm_alpha))*upper_arm_length
z = math.cos(math.radians(upper_arm_alpha))*upper_arm_length
r += math.sin(math.radians(lower_arm_alpha+upper_arm_alpha))*lower_arm_length
z += math.cos(math.radians(lower_arm_alpha+upper_arm_alpha))*lower_arm_length
pitch=upper_arm_alpha+lower_arm_alpha+hand_alpha-90
phi = hand_roll_alpha-base_alpha+hand_radial_offset_rot
z+=basis_height
z-=math.sin(math.radians(pitch))*hand_length
r+=math.cos(math.radians(pitch))*hand_length
x = math.cos(math.radians(base_alpha))*r
y = math.sin(math.radians(base_alpha))*r
x+= math.cos(math.radians(phi))*hand_radial_offset
y-= math.sin(math.radians(phi))*hand_radial_offset
return [x,y,z,phi]
def IK_4DOF(pos):
x,y,z,phi = pos
phi = math.radians(phi)
z-=basis_height
z+=hand_length
x-= math.cos(phi)*hand_radial_offset
y+= math.sin(phi)*hand_radial_offset
r = math.sqrt(pow(x,2)+pow(y,2))
span = math.sqrt(pow(z,2)+pow(r,2))
if span > (upper_arm_length+lower_arm_length) or span<abs(upper_arm_length-lower_arm_length):
return(9999,9999,9999,9999,9999)
lower_arm_alpha = math.pi-math.acos((pow(lower_arm_length,2)+pow(upper_arm_length,2)-pow(span,2))/(2.0*lower_arm_length*upper_arm_length))
upper_arm_alpha = -math.acos((pow(span,2)+pow(upper_arm_length,2)-pow(lower_arm_length,2))/(2*span*upper_arm_length))
upper_arm_alpha += math.atan2(r,z)
base_alpha = math.atan2(y,x)
hand_roll_alpha=phi+base_alpha-math.radians(hand_radial_offset_rot)
hand_alpha = math.pi-(upper_arm_alpha+lower_arm_alpha)
return [math.degrees(base_alpha),math.degrees(upper_arm_alpha),math.degrees(lower_arm_alpha),math.degrees(hand_alpha),math.degrees(hand_roll_alpha)]
def is_valid(alphas):
base_alpha,upper_arm_alpha,lower_arm_alpha,hand_alpha,hand_roll_alpha = alphas
base_alpha_min=-45
base_alpha_max=180
upper_arm_alpha_min = -10
upper_arm_alpha_max = 120
lower_arm_alpha_min = 0
lower_arm_alpha_max = 90
hand_alpha_max=90
hand_alpha_min=-90
test = base_alpha<=base_alpha_max and base_alpha>=base_alpha_min
test = test and (upper_arm_alpha>=upper_arm_alpha_min and upper_arm_alpha<=upper_arm_alpha_max)
test = test and (lower_arm_alpha<=lower_arm_alpha_max and lower_arm_alpha>=lower_arm_alpha_min)
test = test and (hand_alpha<=hand_alpha_max and hand_alpha>=hand_alpha_min)
return test
def send_reset():
global calibrated
print("starting calibration")
ser.write(bytes("R;", 'utf-8'))
time.sleep(1)
ser.read(1)
calibrated=True
send_alphas([0,30,60,90,0])
time.sleep(2)
print("calibration done")
last_send_alphas=[0,0,0,0,0]
def send_alphas(alphas):
global last_send_alphas
if calibrated:
if is_valid(alphas):
base_alpha,upper_arm_alpha,lower_arm_alpha,hand_alpha,hand_roll_alpha = alphas
base_encoder = int(base_encoder_0 + base_encoder_steps*(base_alpha/360.0))
upper_arm_encoder = int(upper_arm_encoder_0 + upper_arm_encoder_steps*(upper_arm_alpha/360.0))
lower_arm_encoder = int(lower_arm_encoder_0 + lower_arm_encoder_steps*(lower_arm_alpha/360.0))
hand_encoder = int(hand_encoder_0+hand_encoder_steps*(hand_alpha/360.0))
hand_roll_encoder = int(hand_roll_encoder_0+hand_roll_encoder_steps*(hand_roll_alpha/360.0))
cmd = "T0="+str(base_encoder)+";T1="+str(upper_arm_encoder)+";T2="+str(lower_arm_encoder)+";T3="+str(hand_encoder+hand_roll_encoder)+";T4="+str(hand_encoder-hand_roll_encoder)+";"
ser.write(bytes(cmd, 'utf-8'))
last_send_alphas=alphas
else:
print("out of range alphas")
#print(alphas)
else:
print("not clibrated")
def gripper(d):
if d>0.5:
cmd = "T5=1000;"
ser.write(bytes(cmd, 'utf-8'))
elif d<-0.5:
cmd = "T5=-1000;"
ser.write(bytes(cmd, 'utf-8'))
else:
cmd = "T5=0;"
ser.write(bytes(cmd, 'utf-8'))
def send_pos(pos):
send_alphas(IK(pos))
def send_pos_4DOF(pos):
send_alphas(IK_4DOF(pos))
def lip_alphas(pos_new,time_frame):
pos_old = last_send_alphas
timestep = 0.01
steps = int(time_frame/timestep)
pos = [0,0,0,0,0]
for i in range(steps):
for j in range(5):
pos[j] = pos_old[j]+(pos_new[j]-pos_old[j])*(i/steps)
send_alphas(pos)
time.sleep(timestep)
send_alphas(pos_new)
time.sleep(timestep)
def get_pos():
return FK(last_send_alphas)
def lip(pos_new,time_frame):
pos_old = get_pos()
timestep = 0.01
steps = int(time_frame/timestep)
pos = [0,0,0,0,0]
for i in range(steps):
for j in range(5):
pos[j] = pos_old[j]+(pos_new[j]-pos_old[j])*(i/steps)
send_alphas(IK(pos))
time.sleep(timestep)
send_alphas(IK(pos_new))
time.sleep(timestep)
def lip_4DOF(pos_new,time_frame):
pos_old = FK_4DOF(last_send_alphas)
timestep = 0.01
steps = int(time_frame/timestep)
pos = [0,0,0,0]
for i in range(steps):
for j in range(4):
pos[j] = pos_old[j]+(pos_new[j]-pos_old[j])*(i/steps)
send_alphas(IK_4DOF(pos))
time.sleep(timestep)
send_alphas(IK_4DOF(pos_new))
time.sleep(timestep)
| redoxcode/movemaster2-demo | MoveMasterLib.py | MoveMasterLib.py | py | 7,914 | python | en | code | 0 | github-code | 13 |
8305286364 | def recur(cur, s):
if cur == n:
res.add(" ".join(list(map(str, li)))) #int 를 str로 변환해서 배열로 담기
return
if len(li) > m:
return
for i in range(s, len(arr)):
if not v[i]:
li[cur] = arr[i]
v[i] = True
recur(cur + 1, i)
v[i] = False
m, n = map(int, input().split())
arr = sorted(list(map(int, input().split())))
v = [False for _ in range(m+1)]
li = [0 for i in range(n)]
res = set() # set 은 순서를 보장하지 않는..다
recur(0, 0)
res = list(res) #set을 list로 변환
for i in range(len(res)):
res[i] = list(map(int,res[i].split())) #str을 다시 int로 변환
if n > 1: #인덱스 에러 방지용
res.sort(key = lambda x :(x[0],x[1])) #배열 정렬
for i in res:
print(*i)
| lion1735/Algorithm | Main_15664.py | Main_15664.py | py | 830 | python | ko | code | 0 | github-code | 13 |
17385515764 | import sys
import time
import paho.mqtt.client as mqtt
import json
import random
NETPIE_HOST = "broker.netpie.io"
CLIENT_ID = "4d046c4d-37c7-4978-953a-c851d596fad5" # Client ID ของ Device ที่สร้างขึ้นใน NETPIE
DEVICE_TOKEN = "M7ptbnbqoBZWgLzm72Jcb4gfJ2N6ahGd"# Token ของ Device ที่สร้างขึ้นใน NETPIE
sensor_data = {'temperature': 0, 'humidity': 0,'sunshine': 0,"farmname":"Not data"}
sensor_datanobutton = {"statusbutton":"no Button"}
def on_connect(client, userdata, flags, rc):
print("Result from connect: {}:".format(mqtt.connack_string(rc)))
client.subscribe("@shadow/data/updated")
client.subscribe("@msg/led")
def on_message(client, userdata, msg):
data_ = msg.payload
datatrue = data_.decode("utf-8")
#print(datatrue)
if datatrue == "ontoggle":
sensor_datanobutton["statusbutton"] = "ontoggle"
client.publish("@shadow/data/update",
json.dumps({"data": sensor_datanobutton}), 1)
if datatrue == "offtoggle":
sensor_datanobutton["statusbutton"] = "offtoggle"
client.publish("@shadow/data/update",
json.dumps({"data": sensor_datanobutton}), 1)
if datatrue == "onbutton":
sensor_datanobutton["statusbutton"] = "onbutton"
client.publish("@shadow/data/update",
json.dumps({"data": sensor_datanobutton}), 1)
if datatrue == "offbutton":
sensor_datanobutton["statusbutton"] = "offbutton"
client.publish("@shadow/data/update",
json.dumps({"data": sensor_datanobutton}), 1)
client = mqtt.Client(protocol=mqtt.MQTTv311,client_id=CLIENT_ID, clean_session=True)
client.username_pw_set(DEVICE_TOKEN)
client.on_connect = on_connect
client.on_message = on_message
client.connect(NETPIE_HOST, 1883)
client.loop_start()
try:
while True:
temperature = random.randint(0, 50)
humidity = random.randint(0, 100)
sunshine = random.randint(0, 100)
farmname = "FARM-A"
if humidity is not None and temperature is not None and sunshine is not None:
humidity = round(humidity)
temperature = round(temperature)
sunshine = round(sunshine)
print(
"Temp={0: 0.1f}*C Humidity={1: 0.1f} %".format(temperature, humidity, sunshine))
sensor_data["temperature"] = temperature
sensor_data["humidity"] = humidity
sensor_data["sunshine"] = sunshine
sensor_data["farmname"] = farmname
print(json.dumps({"data": sensor_data}))
client.publish("@shadow/data/update",
json.dumps({"data": sensor_data}), 1)
time.sleep(10)
else:
print("Failed to get reading. Try again!")
except KeyboardInterrupt:
pass
client.loop_start()
client.disconnect()
| NesJaaTH/python | lab6/LAB6_6_129.py | LAB6_6_129.py | py | 3,009 | python | en | code | 0 | github-code | 13 |
33792993224 |
def getBMI (x,y):
return x/y
def display_category(x):
if x < 16.5:
return first_condition
elif x <= 18.4:
return second_condition
elif x <= 24.9:
return third_condition
elif x <= 30:
return fourth_condition
elif x <= 34.9:
return fifth_condition
elif x <= 40:
return sixth_condition
else:
return seventh_condition
# Code to input the user's details
weight = int(input("Enter your weight in kilograms: "))
height = int(input("Enter your height in meters: "))
height_squared = height**2
#Initialize the user's condition
first_condition = "Severly Underweight"
second_condition = "Underweight"
third_condition = "Normal"
fourth_condition = "Overweight"
fifth_condition = "Obese Class 1"
sixth_condition = "Obese Class 2"
seventh_condition = "Obese Class 3"
#Compute for the output
BMI = getBMI(weight, height_squared)
category = display_category(BMI)
#Print the Output
print (BMI)
print("Your BMI is", BMI, "You are", category)
| sheenabasiga/ITE-260 | BMI.py | BMI.py | py | 1,020 | python | en | code | 0 | github-code | 13 |
11236697625 | import pickle
import pandas as pd
from utils import get_config
prot_table = pd.read_csv(snakemake.input.prot_table, sep="\t")
prot_data = pd.read_pickle(snakemake.input.prot_data)
prot_y = prot_table.set_index("Target_ID")["Y"].to_dict()
dims_config = get_config(prot_data, "prot")
dims_config["num_classes"] = len(prot_y)
snakemake.config["prots"]["data"] = dims_config
y_encoder = {v: k for k, v in enumerate(sorted(set(prot_y.values())))}
result = []
for k, v in prot_data["data"].items():
v["y"] = y_encoder[prot_y[k]]
v["id"] = k
result.append(v)
with open(snakemake.output.pretrain_prot_data, "wb") as file:
pickle.dump(
{
"data": result,
"config": snakemake.config["prots"],
"decoder": {v: k for k, v in y_encoder.items()},
},
file,
)
| ilsenatorov/rindti | workflow/scripts/pretrain_prot_data.py | pretrain_prot_data.py | py | 828 | python | en | code | 8 | github-code | 13 |
14343989441 | import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="learning-map",
version="0.0.1",
author="Jeremy Miller",
author_email="jeremymiller00@gmail.com",
description="An application for viewing and interacting with my Data Scientist Learning Map",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/jeremymiller00/learning-map",
project_urls={
"Bug Tracker": "https://github.com/jeremymiller00/learning-map/issues",
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
package_dir={"": "src"},
packages=setuptools.find_packages(where="src"),
python_requires=">=3.6",
) | jeremymiller00/learning-map | setup.py | setup.py | py | 874 | python | en | code | 0 | github-code | 13 |
71003327058 | from tkinter import *
class MortgageCalculator(Frame):
def __init__(self, master):
Frame.__init__(self, master)
self.grid()
self.create_widgets()
def create_widgets(self):
self.label = Label(self, text = "Principle: ")
self.label.grid(row = 0, column = 0, sticky = W)
self.prince = Entry(self)
self.prince.grid(row = 0, column = 1, sticky = W)
self.label = Label(self, text = "Term of Mortgage: ")
self.label.grid(row = 1, column = 0, sticky = W)
self.term = Entry(self)
self.term.grid(row = 1, column = 1, sticky = W)
self.label = Label(self, text = "Interest Rate: ")
self.label.grid(row = 2, column = 0, sticky = W)
self.rate = Entry(self)
self.rate.grid(row = 2, column = 1, sticky = W)
self.calc_button = Button(self, text = "Calculate", command = self.calculate)
self.calc_button.grid(row = 3, column = 1, sticky = W)
def calculate(self):
princeC = float(self.prince.get())
termC = float(self.term.get())
rateC = float(self.rate.get())
rateC = (rateC/100)/12
powercalc = (1 - (pow((1 + rateC), (-termC * 12))))
total = (rateC * princeC) / powercalc
total = total * 100
total = round(total, 2)
total = total / 100
totalInt = (total * (termC * 12) - princeC)
totalInt = totalInt * 100
totalInt = round(totalInt, 2)
totalInt = totalInt / 100
self.outputLabel1 = Label(self , text = "Your monthly repayments will be: " + "€" + str(total))
self.outputLabel1.grid(row = 4, column = 0, columnspan = 2, sticky = W)
self.outputLabel2 = Label(self , text = "Your total interest paid for the loan will be: " + "€" + str(totalInt))
self.outputLabel2.grid(row = 5, column = 0, columnspan = 2, sticky = W)
self.outputLabel3 = Label(self , text = "Total paid back to the bank: " + "€" + str(totalInt + princeC))
self.outputLabel3.grid(row = 6, column = 0, columnspan = 2, sticky = W)
#create the window
root = Tk()
#modify root window
root.title("Mortgage Calculator")
root.geometry("350x200")
app = MortgageCalculator(root)
#event loop
root.mainloop()
| Taylor365/Python | MortCalculator/mortgageCalculator.py | mortgageCalculator.py | py | 2,344 | python | en | code | 0 | github-code | 13 |
667152311 | from ecpy.curves import Curve,Point
from Crypto.Hash import SHA3_256, SHA256
import Crypto.Random.random # a bit better secure random number generation
import client_basics as cb
import client_basics_Phase2 as cb2
import client_basics_Phase3 as cb3
from Crypto.Hash import HMAC
from Crypto.Cipher import AES
#3.1 Downloading Messages from the server
stuID = 28239
x = 93223115898197558905062012489877327981787036929201444813217704012422483432813
y = 8985629203225767185464920094198364255740987346743912071843303975587695337619
E = Curve.get_curve('secp256k1')
p = E.generator
n = E.order
print("p: ", p)
print("n: ", n)
point = Point(x,y,E)
print("point: ", point)
#sa = Crypto.Random.random.randint(0, n-1) #sa is private - identity key
sa = 17353634583535269100214152160979107048399289142843300833199020552285271875066
print("sa: ", sa)
qa = sa * p #qa is public key
print("qa: ",qa)
#signature generation
stuID = stuID.to_bytes(2,byteorder="big")
print("stuId: ", stuID)
m = 28239
k = Crypto.Random.random.randint(1, n-2)
R = k * p
r = (R.x) % n
print("r:", r)
r_byte = r.to_bytes(32, 'big')
m_byte = m.to_bytes(2, 'big')
h = SHA3_256.SHA3_256_Hash(r_byte+ m_byte, True)
h = SHA3_256.SHA3_256_Hash.digest(h)
h = int.from_bytes(h,"big")
h = h % n
s = (k- (sa*h))
s = s % n
print("h: ", h)
print("s: ", s)
print(E.is_on_curve(qa))
#cb.IKRegReq(h,s,qa.x,qa.y) ---------> ID: 28239 CODE: 106590
"""
ID: 28239 CODE: 106590
Sending message is: {'ID': 28239,
'H': 7483239667947657079221120183470408812468827778297638119224547277257303037189,
'S': 37072444792267534329042480053454078134844330208514188234371033536238826941057,
'IKPUB.X': 39976054850521507574967626301504309882077713530204759279452697237879489454571,
'IKPUB.Y': 42826606605638080211453913302126934486778992853270815969562555968218429004241}
ID: 28239 CODE: 106590
"""
#signature verification
"""
h = 7483239667947657079221120183470408812468827778297638119224547277257303037189
s = 37072444792267534329042480053454078134844330208514188234371033536238826941057
p=(0x79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798 , 0x483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8)
sa = 17353634583535269100214152160979107048399289142843300833199020552285271875066
"""
qa = sa * p #qa is public key
code = 106590
V = s*p + h*qa
v = V.x % n
v_byte = v.to_bytes(32, 'big')
m_byte = m.to_bytes(2, 'big')
h2 = SHA3_256.SHA3_256_Hash(v_byte+ m_byte, True)
h2 = SHA3_256.SHA3_256_Hash.digest(h2)
h2 = int.from_bytes(h2,"big")
h2 = h2 % n
if (h == h2):
print("Accept!") #verified
else:
print("Not verified!") #not verified
#cb.IKRegVerify(code) Registered successfully reset code: 706974
#2.2 signed pre key
cb3.ResetOTK(h,s) #TODO
spk_priv = 97386945159447522628161478992249335496917184340606559844874149341380030966312
print("skp_priv: ", spk_priv)
spk_pub = spk_priv * p #qa is public key
print("skp_pub: ",spk_pub)
spk_x_bytes = spk_pub.x.to_bytes(32, 'big')
spk_y_bytes = spk_pub.y.to_bytes(32, 'big')
spk_m = spk_x_bytes + spk_y_bytes
k3 = Crypto.Random.random.randint(1,n-2)
r3 = k3*p
r3x = r3.x % n
r3x_bytes = r3.x.to_bytes(32, 'big')
h3 = SHA3_256.SHA3_256_Hash(r3x_bytes+ spk_m, True)
h3 = SHA3_256.SHA3_256_Hash.digest(h3)
h3 = int.from_bytes(h3,"big")
h3 = h3 % n
s3 = (k3 - (sa*h3))
s3 = s3 % n
print("h3: ", h3)
print("s3: ", s3)
print(E.is_on_curve(spk_pub))
x5, y5, h4, s4 = cb.SPKReg(h3,s3,spk_pub.x,spk_pub.y)
sw_pub_ik = Point(x, y, E)
V2 = s4*p + h4*sw_pub_ik
v2 = V2.x % n
v2_byte = v2.to_bytes(32, 'big')
x5_byte = x5.to_bytes(32, 'big')
y5_byte = y5.to_bytes(32, 'big')
h5 = SHA3_256.SHA3_256_Hash(v2_byte + x5_byte + y5_byte, True)
h5 = SHA3_256.SHA3_256_Hash.digest(h5)
h5 = int.from_bytes(h5,"big")
h5 = h5 % n
if (h4 == h5):
print("Accept!") #verified
else:
print("Not verified!") #not verified
#2.3 otk
sw_pub_spk = Point(x5, y5, E)
T = spk_priv * sw_pub_spk
t_byte_x = T.x.to_bytes(32, 'big')
t_byte_y = T.y.to_bytes(32, 'big')
m1_byte = b"NoNeedToRideAndHide"
k_hmac = SHA3_256.SHA3_256_Hash(t_byte_x+ t_byte_y + m1_byte, True)
k_hmac = SHA3_256.SHA3_256_Hash.digest(k_hmac)
def otk_cal (k_hmac, okt):
h_temp = HMAC.new(k_hmac, digestmod=SHA256)
okt_x_y = okt.x.to_bytes(32, 'big') + okt.y.to_bytes(32, 'big')
h_temp.update(okt_x_y)
return h_temp.hexdigest()
#(okt.x.bit_length()+7)//8
otk_priv_arr = []
for i in range(0,10):
otk_priv = Crypto.Random.random.randint(0, n-1) #otk_priv is private key
print("otk_priv_ ", i ,":", otk_priv)
otk_pub = otk_priv * p #otk_pub is public key
print("otk_pub_ ", i ,":",otk_pub)
a = cb.OTKReg(i,otk_pub.x,otk_pub.y,otk_cal(k_hmac, otk_pub)) #TODO
#a = "True"
print("Result :", a)
print("")
otk_priv_arr.append(otk_priv)
print("OTK_PRIV_ARR: ", otk_priv_arr)
#PHASE 2
print("------------------------------------------------------")
print("PHASE 2: ")
print()
cb3.PseudoSendMsgPH3(h, s) #Your favourite pseudo-client sent you 5 messages. You can get them from the server
#PseudoSendMsgPH3
print("1:")
idb1, otkid1, msgid1, msg1, ekx1, eky1 = cb3.ReqMsg(h, s)
print("2:")
idb2, otkid2, msgid2, msg2, ekx2, eky2 = cb3.ReqMsg(h, s)
print("3:")
idb3, otkid3, msgid3, msg3, ekx3, eky3 = cb3.ReqMsg(h, s)
print("4:")
idb4, otkid4, msgid4, msg4, ekx4, eky4 = cb3.ReqMsg(h, s)
print("5:")
idb5, otkid5, msgid5, msg5, ekx5, eky5 = cb3.ReqMsg(h, s)
#3.2.1 Session Key (KS)
def findKS(otkid, ekx, eky):
ek = Point(ekx, eky, E)
T = otk_priv_arr[otkid] * ek
U = (T.x).to_bytes(((T.x).bit_length()+7)//8, "big") + (T.y).to_bytes(((T.y).bit_length()+7)//8, "big") + b'MadMadWorld'
KS = SHA3_256.new(U).digest()
return KS
ks1 = findKS(otkid1, ekx1, eky1)
def findKdf(ks):
kenc = SHA3_256.new(ks + b'LeaveMeAlone').digest()
khmac = SHA3_256.new(kenc + b'GlovesAndSteeringWheel').digest()
kkdf = SHA3_256.new(khmac + b'YouWillNotHaveTheDrink').digest()
return kenc, khmac, kkdf
kenc1, khmac1, kkdf1 = findKdf(ks1)
kenc2, khmac2, kkdf2 = findKdf(kkdf1)
kenc3, khmac3, kkdf3 = findKdf(kkdf2)
kenc4, khmac4, kkdf4 = findKdf(kkdf3)
kenc5, khmac5, kkdf5 = findKdf(kkdf4)
kencs = [kenc1, kenc2, kenc3, kenc4, kenc5]
khmacs = [khmac1, khmac2, khmac3, khmac4, khmac5]
cmsgs = []
nonces = []
hmacs = []
encs = []
ids = []
def findHmac(msg, i):
print("msg: ", i)
msg = msg.to_bytes((msg.bit_length()+7)//8,"big")
nonce = msg[:8]
hmac = msg[-32:]
theMsg = msg[8:-32]
hmac_new = HMAC.new(khmacs[i-1], digestmod=SHA256)
hmac_new.update(theMsg)
hmac_final= hmac_new.digest()
print("hmac: ", hmac)
print("hmac_final: ", hmac_final)
if(hmac == hmac_final):
print("True, msg authenticated!")
cmsgs.append(theMsg)
nonces.append(nonce)
hmacs.append(hmac)
encs.append(kencs[i-1])
ids.append(i)
else:
print("False, not authenticated!")
findHmac(msg1, 1)
findHmac(msg2, 2)
findHmac(msg3, 3)
findHmac(msg4, 4)
findHmac(msg5, 5)
def AesDecrypt(ctext, key, nonce):
cipher = AES.new(key, AES.MODE_CTR, nonce=nonce) #keyenc, AES.MODE_CTR, nonce=ctext[0:8]
dtext = cipher.decrypt(ctext)
dtext = dtext.decode('UTF-8')
print("plaintext: ", dtext)
return dtext
dtext1 = AesDecrypt(cmsgs[0], encs[0], nonces[0])
dtext1 = bytes(dtext1, 'utf-8')
dtext2 = AesDecrypt(cmsgs[1], encs[1], nonces[1])
dtext2 = bytes(dtext2, 'utf-8')
dtext3 = AesDecrypt(cmsgs[2], encs[2], nonces[2])
dtext3 = bytes(dtext3, 'utf-8')
dtext4 = AesDecrypt(cmsgs[3], encs[3], nonces[3])
dtext4 = bytes(dtext4, 'utf-8')
dtext5 = AesDecrypt(cmsgs[4], encs[4], nonces[4]) #because now all the msg are correct (phase3)
dtext5 = bytes(dtext5, 'utf-8')
dtext_list = [dtext1, dtext2, dtext3, dtext4, dtext5] #needed at phase3 for grading part. These are the msg we will send to pseudo client
#phase 3
print("")
print("------------------------------------------------------")
print("PHASE 3:")
sa = 17353634583535269100214152160979107048399289142843300833199020552285271875066
print("sa: ", sa)
qa = sa * p #qa is public key
print("qa: ",qa)
#signature generation
print("stuId: ", stuID)
ServerID = 18007
k = 1748178
R = k * p
r = (R.x) % n
print("r:", r)
r_byte = r.to_bytes(32, 'big')
m_byte = m.to_bytes(2, 'big')
h = SHA3_256.SHA3_256_Hash(r_byte+ m_byte, True)
h = SHA3_256.SHA3_256_Hash.digest(h)
h = int.from_bytes(h,"big")
h = h % n
s = (k- (sa*h))
s = s % n
print("h: ", h)
print("s: ", s)
print(E.is_on_curve(qa))
stuID = int.from_bytes(stuID,"big")
print("stuId: ", stuID)
print("reqOTKB: ")
#KEYID, OTK_X, OTK_Y = cb3.reqOTKB(stuID, m, h, s)
KEYID = 54
OTK_X = 49423115135639117780110598800067102951151492842969517804762168007684819320280
OTK_Y = 103838483670827101067809167427012791940728140473666527481560818625322840035032
print("KEYID: ", KEYID)
print("OTK_X: ", OTK_X)
print("OTK_Y: ", OTK_Y)
otk_b = Point(OTK_X,OTK_Y, E) #public client
ek_a_priv = Crypto.Random.random.randint(1, n-2)
print("ek_a_priv: ", ek_a_priv)
ek_a_pub = ek_a_priv * p #qa is public key
print("ek_a_pub: ",ek_a_pub)
T = ek_a_priv * otk_b
print("T: ",T)
U = (T.x).to_bytes(((T.x).bit_length()+7)//8, "big") + (T.y).to_bytes(((T.y).bit_length()+7)//8, "big") + b'MadMadWorld'
print("U: ",U)
KS_P3 = SHA3_256.new(U).digest()
print("KS_P3: ",KS_P3)
#creating the chain
def findKdf(ks):
kenc = SHA3_256.new(ks + b'LeaveMeAlone').digest()
khmac = SHA3_256.new(kenc + b'GlovesAndSteeringWheel').digest()
kkdf = SHA3_256.new(khmac + b'YouWillNotHaveTheDrink').digest()
return kenc, khmac, kkdf
kenc1_p3, khmac1_p3, kkdf1_p3 = findKdf(KS_P3)
kenc2_p3, khmac2_p3, kkdf2_p3 = findKdf(kkdf1_p3)
kenc3_p3, khmac3_p3, kkdf3_p3 = findKdf(kkdf2_p3)
kenc4_p3, khmac4_p3, kkdf4_p3 = findKdf(kkdf3_p3)
kenc5_p3, khmac5_p3, kkdf5_p3 = findKdf(kkdf4_p3)
#encryption
def AesEncrypt(ptext, key):
cipher = AES.new(key, AES.MODE_CTR)
ctext = cipher.nonce + cipher.encrypt(ptext)
return ctext
ctext1 = AesEncrypt(dtext_list[0], kenc1_p3)
ctext2 = AesEncrypt(dtext_list[1], kenc2_p3)
ctext3 = AesEncrypt(dtext_list[2], kenc3_p3)
ctext4 = AesEncrypt(dtext_list[3], kenc4_p3)
ctext5 = AesEncrypt(dtext_list[4], kenc5_p3)
print("cipher after decrpyt1: ", ctext1)
print("cipher after decrpyt2: ", ctext2)
print("cipher after decrpyt3: ", ctext3)
print("cipher after decrpyt4: ", ctext4)
print("cipher after decrpyt5: ", ctext5)
def addHmac(ctext, hmac):
hmac_p3 = HMAC.new(hmac, digestmod=SHA256)
print("hmac_p3: ",hmac_p3)
hmac_p3.update(ctext)
print("hmac_p3: ",hmac_p3)
hmac_final_p3 = hmac_p3.digest()
print("hmac_final_p3: ",hmac_final_p3)
fin = ctext + hmac_final_p3
print("final: ", fin)
fin = str(fin)
return fin
fin1 = addHmac(ctext1, khmac1_p3)
fin2 = addHmac(ctext2, khmac2_p3)
fin3 = addHmac(ctext3, khmac3_p3)
fin4 = addHmac(ctext4, khmac4_p3)
fin5 = addHmac(ctext5, khmac5_p3)
print("")
cb3.SendMsg(stuID, ServerID, 54, 1, fin1, ek_a_pub.x, ek_a_pub.y)
print("")
cb3.SendMsg(stuID, ServerID, 54, 2, fin2, ek_a_pub.x, ek_a_pub.y)
print("")
cb3.SendMsg(stuID, ServerID, 54, 3, fin3, ek_a_pub.x, ek_a_pub.y)
print("")
cb3.SendMsg(stuID, ServerID, 54, 4, fin4, ek_a_pub.x, ek_a_pub.y)
print("")
cb3.SendMsg(stuID, ServerID, 54, 5, fin5, ek_a_pub.x, ek_a_pub.y)
print("")
#4.2
#checking the status
numMSG, numOTK, StatusMSG = cb3.Status(stuID, h, s)
print("Num msg: ", numMSG)
print("Num otk: ", numOTK)
print("Status msg: ", StatusMSG)
#You have only 1 OTK left. Please register new OTKs. The largest key id is 9
largest_key_id = 9
otk_priv_arr2 = []
for i in range(largest_key_id+1, largest_key_id + 11 - numOTK):
otk_priv = Crypto.Random.random.randint(0, n-1) #otk_priv is private key
print("otk_priv_ ", i ,":", otk_priv)
otk_pub = otk_priv * p #otk_pub is public key
print("otk_pub_ ", i ,":",otk_pub)
a = cb.OTKReg(i,otk_pub.x,otk_pub.y,otk_cal(k_hmac, otk_pub))
print("Result :", a)
print("")
otk_priv_arr2.append(otk_priv)
print("otk_priv_arr2: ", otk_priv_arr2)
otk_priv_arr.append(otk_priv_arr2)
numMSG, numOTK, StatusMSG = cb3.Status(stuID, h, s)
#4.3
KEYID = 54
OTK_X = 49423115135639117780110598800067102951151492842969517804762168007684819320280
OTK_Y = 103838483670827101067809167427012791940728140473666527481560818625322840035032
print("KEYID: ", KEYID)
print("OTK_X: ", OTK_X)
print("OTK_Y: ", OTK_Y)
otk_b = Point(OTK_X,OTK_Y, E) #public client
ek_a_priv = Crypto.Random.random.randint(1, n-2)
print("ek_a_priv: ", ek_a_priv)
ek_a_pub = ek_a_priv * p #qa is public key
print("ek_a_pub: ",ek_a_pub)
T = ek_a_priv * otk_b
print("T: ",T)
U = (T.x).to_bytes(((T.x).bit_length()+7)//8, "big") + (T.y).to_bytes(((T.y).bit_length()+7)//8, "big") + b'MadMadWorld'
print("U: ",U)
KS_P3 = SHA3_256.new(U).digest()
print("KS_P3: ",KS_P3)
#creating the chain
def findKdf(ks):
kenc = SHA3_256.new(ks + b'LeaveMeAlone').digest()
khmac = SHA3_256.new(kenc + b'GlovesAndSteeringWheel').digest()
kkdf = SHA3_256.new(khmac + b'YouWillNotHaveTheDrink').digest()
return kenc, khmac, kkdf
kenc1_p3, khmac1_p3, kkdf1_p3 = findKdf(KS_P3)
kenc2_p3, khmac2_p3, kkdf2_p3 = findKdf(kkdf1_p3)
kenc3_p3, khmac3_p3, kkdf3_p3 = findKdf(kkdf2_p3)
kenc4_p3, khmac4_p3, kkdf4_p3 = findKdf(kkdf3_p3)
kenc5_p3, khmac5_p3, kkdf5_p3 = findKdf(kkdf4_p3)
khmacs_p3 = [khmac1_p3, khmac2_p3, khmac3_p3, khmac4_p3,khmac5_p3 ]
kencs_p3 = [kenc1_p3, kenc2_p3, kenc3_p3, kenc4_p3,kenc5_p3 ]
#encryption
def AesEncrypt(ptext, key):
cipher = AES.new(key, AES.MODE_CTR)
ctext = cipher.nonce + cipher.encrypt(ptext)
return ctext
ctext1 = AesEncrypt(dtext_list[0], kenc1_p3)
ctext2 = AesEncrypt(dtext_list[1], kenc2_p3)
ctext3 = AesEncrypt(dtext_list[2], kenc3_p3)
ctext4 = AesEncrypt(dtext_list[3], kenc4_p3)
ctext5 = AesEncrypt(dtext_list[4], kenc5_p3)
print("cipher after decrpyt1: ", ctext1)
print("cipher after decrpyt2: ", ctext2)
print("cipher after decrpyt3: ", ctext3)
print("cipher after decrpyt4: ", ctext4)
print("cipher after decrpyt5: ", ctext5)
#returning final ciphertext as byte and string
def addHmac(ctext, hmac):
hmac_p3 = HMAC.new(hmac, digestmod=SHA256)
print("hmac_p3: ",hmac_p3)
hmac_p3.update(ctext)
print("hmac_p3: ",hmac_p3)
hmac_final_p3 = hmac_p3.digest()
print("hmac_final_p3: ",hmac_final_p3)
fin = ctext + hmac_final_p3
print("final: ", fin)
fin_s = str(fin)
return fin, fin_s
fin1, fin1_s = addHmac(ctext1, khmac1_p3)
fin2, fin2_s = addHmac(ctext2, khmac2_p3)
fin3, fin3_s = addHmac(ctext3, khmac3_p3)
fin4, fin4_s = addHmac(ctext4, khmac4_p3)
fin5, fin5_s = addHmac(ctext5, khmac5_p3)
print("")
cb3.SendMsg(stuID, ServerID, 54, 1, fin1_s, ek_a_pub.x, ek_a_pub.y)
print("")
cb3.SendMsg(stuID, ServerID, 54, 2, fin2_s, ek_a_pub.x, ek_a_pub.y)
print("")
cb3.SendMsg(stuID, ServerID, 54, 3, fin3_s, ek_a_pub.x, ek_a_pub.y)
print("")
cb3.SendMsg(stuID, ServerID, 54, 4, fin4_s, ek_a_pub.x, ek_a_pub.y)
print("")
cb3.SendMsg(stuID, ServerID, 54, 5, fin5_s, ek_a_pub.x, ek_a_pub.y)
print("")
#4.3 to check this part we will know decrypt as we did in phase2
to_be_decrypted = [fin1, fin2, fin3, fin4, fin5]
cmsgs_3 = []
nonces_3 = []
hmacs_3 = []
encs_3 = []
#seperating the fins
def findHmac(msg, i):
print("msg: ", i)
nonce = msg[:8]
hmac = msg[-32:]
theMsg = msg[8:-32]
hmac_new = HMAC.new(khmacs_p3[i-1], digestmod=SHA256)
hmac_new.update(theMsg)
hmac_final= hmac_new.digest()
cmsgs_3.append(theMsg)
nonces_3.append(nonce)
hmacs_3.append(hmac)
encs_3.append(kencs_p3[i-1])
findHmac(fin1, 1)
findHmac(fin2, 2)
findHmac(fin3, 3)
findHmac(fin4, 4)
findHmac(fin5, 5)
def AesDecrypt(ctext, key, nonce):
cipher = AES.new(key, AES.MODE_CTR, nonce=nonce) #keyenc, AES.MODE_CTR, nonce=ctext[0:8]
dtext = cipher.decrypt(ctext)
dtext = dtext.decode('UTF-8')
print("plaintext: ", dtext)
return dtext
dtext1_3 = AesDecrypt(cmsgs_3[0], encs_3[0], nonces_3[0])
dtext1_3 = bytes(dtext1_3, 'utf-8')
dtext2_3 = AesDecrypt(cmsgs_3[1], encs_3[1], nonces_3[1])
dtext2_3 = bytes(dtext2_3, 'utf-8')
dtext3_3 = AesDecrypt(cmsgs_3[2], encs_3[2], nonces_3[2])
dtext3_3 = bytes(dtext3_3, 'utf-8')
dtext4_3 = AesDecrypt(cmsgs_3[3], encs_3[3], nonces_3[3])
dtext4_3 = bytes(dtext4_3, 'utf-8')
dtext5_3 = AesDecrypt(cmsgs_3[4], encs_3[4], nonces_3[4]) #because now all the msg are correct (phase3)
dtext5_3 = bytes(dtext5_3, 'utf-8')
#to check if we sent the correct dtexts to the server
print("Check for dtext1: ", dtext1_3==dtext1)
print("Check for dtext2: ", dtext2_3==dtext2)
print("Check for dtext3: ", dtext3_3==dtext3)
print("Check for dtext4: ", dtext4_3==dtext4)
print("Check for dtext5: ", dtext5_3==dtext5) | kaanatmacaa/cryptography_project | client_final_phase.py | client_final_phase.py | py | 16,719 | python | en | code | 0 | github-code | 13 |
71354454099 | import matplotlib
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib import style
import time
from itertools import count
import sys
import pandas as pd
from sklearn import preprocessing
import numpy as np
from scipy.signal import find_peaks
import os
import json
import seaborn
import neurokit2 as nk
import librosa
import librosa.display
def eda_custom_process(eda_signal, sampling_rate=4, method="neurokit"):
eda_signal = nk.signal_sanitize(eda_signal)
# Series check for non-default index
if type(eda_signal) is pd.Series and type(eda_signal.index) != pd.RangeIndex:
eda_signal = eda_signal.reset_index(drop=True)
# Preprocess
eda_cleaned = eda_signal #Add your custom cleaning module here or skip cleaning
eda_decomposed = nk.eda_phasic(eda_cleaned, sampling_rate=sampling_rate)
# Find peaks
peak_signal, info = nk.eda_peaks(
eda_decomposed["EDA_Phasic"].values,
sampling_rate=sampling_rate,
method=method,
amplitude_min=0.2,
)
info['sampling_rate'] = sampling_rate # Add sampling rate in dict info
# Store
signals = pd.DataFrame({"EDA_Raw": eda_signal, "EDA_Clean": eda_cleaned})
signals = pd.concat([signals, eda_decomposed, peak_signal], axis=1)
return signals, info
def create_emotion_df(path):
emotions = []
for i in range(0, 2100):
json_file = json.load(open(path+"/prediction.json"))
emotion = json_file[str(i)]["DeepFace"][0]
# if (
# json_file[str(i)]["DeepFace"][1] > json_file[str(i)]["ResNet"][1]) else json_file[str(i)]["ResNet"][0] # Sets emotion to value with highest confidence
emotions.append(emotion)
df = pd.DataFrame(emotions, columns=["Emotion"])
return df
def nr_similar_predictions(path):
tot_similar = 0
tot_confident_predictions = 0
imgs_predicted_similarly = []
for i in range(0, 2100):
json_file = json.load(open(path+"/prediction.json"))
emotion_deepface = json_file[str(i)]["DeepFace"][0]
pred_score_deepface = json_file[str(i)]["DeepFace"][1]
emotion_resnet = json_file[str(i)]["ResNet"][0]
pred_score_resnet = json_file[str(i)]["ResNet"][1]
if emotion_deepface == emotion_resnet:
tot_similar = tot_similar + 1
imgs_predicted_similarly.append(i)
if pred_score_deepface > 90 or pred_score_resnet > 90:
tot_confident_predictions = tot_confident_predictions + 1
print("Total images that are predicted to be same class: " +
str(tot_similar)+"/2100")
print("Total images that are predicted with a probability percentage over 90: " +
str(tot_confident_predictions)+"/2100")
return imgs_predicted_similarly
def normalize_dataframe_values(dataframe, column_name):
nparray = dataframe.values
if column_name == 'ACC':
nparray = nparray.reshape(-1,1)
scaler = preprocessing.StandardScaler()
min_max_scaler = preprocessing.MinMaxScaler()
nparray_scaled = min_max_scaler.fit_transform(nparray)
# nparray /= np.max(np.abs(nparray), axis=0)
df_scaled = pd.DataFrame(nparray_scaled)
df_scaled.rename(columns={df_scaled.columns[0]: column_name}, inplace=True)
return df_scaled
def z_score_normalize(df,column_name):
df_std = df.copy()
# apply the z-score method
df_std[column_name] = (df_std[column_name] - df_std[column_name].mean()) / df_std[column_name].std()
return df_std
def transform_bvp(dataframe, column_name):
nparray = dataframe.values
# dataframe.plot()
nparray /= np.max(np.abs(nparray), axis=0)
# df = pd.DataFrame(nparray)
# df.plot()
nparray = nparray.clip(min=0)
# df = pd.DataFrame(nparray)
# df.plot()
nparray = nparray[:, 0]
peaks, _ = find_peaks(nparray, distance=40)
single_peak_values_with_min = []
cur_val = 0.1
start = 0
for i in range(0, len(nparray)):
if i in peaks:
new_val = nparray[i]
if new_val >= 0.1:
cur_val = new_val
for j in range(start, i+1):
single_peak_values_with_min.append(cur_val)
start = i+1
if i == len(nparray)-1:
for j in range(start, i+1):
single_peak_values_with_min.append(cur_val)
df_scaled = pd.DataFrame(single_peak_values_with_min)
# df_scaled.plot()
df_scaled.rename(columns={df_scaled.columns[0]: column_name}, inplace=True)
# plt.show()
return df_scaled
def set_phys_in_json(path, phys_df, phys_type):
json_file = json.load(open(path+"/prediction.json", "r"))
for i in range(0, 2100):
json_file[str(i)][phys_type] = phys_df.iloc[i, 0]
json.dump(json_file, open(path+"/prediction.json", "w"))
participant = "participant_8"
path = "/Users/andreas/Desktop/master/toadstool/participants/" + \
participant+"/images"
# emotion_df = create_emotion_df(path)
# imgs_predicted_similarly = nr_similar_predictions(path)
df_HR = pd.read_csv("/Users/andreas/Desktop/master/toadstool/participants/" +
participant+"/"+participant+"_sensor/HR_sync_video.csv")
df_EDA = pd.read_csv("/Users/andreas/Desktop/master/toadstool/participants/" +
participant+"/"+participant+"_sensor/EDA_sync_video.csv")
df_BVP = pd.read_csv("/Users/andreas/Desktop/master/toadstool/participants/" +
participant+"/"+participant+"_sensor/BVP_sync_video.csv")
df_ACC = pd.read_csv("/Users/andreas/Desktop/master/toadstool/participants/" +
participant+"/"+participant+"_sensor/ACC_sync_video.csv",sep=';')
# print(df_ACC)
# new_df = df_ACC.apply(lambda r: np.sqrt((r['x']**2)+(r['y']**2)+(r['z']**2)),axis=1)
# norm_acc = normalize_dataframe_values(new_df, 'ACC')
#new_df.plot()
# norm_acc.plot()
# transformed_df_BVP = transform_bvp(df_BVP, "BVP")
# df_BVP_max = transformed_df_BVP.rolling(64).max()
# df_BVP_max = df_BVP_max.iloc[::64, :]
# df_BVP_max = df_BVP_max.iloc[1:, :] # Remove NaN from first index
# df_BVP_max = df_BVP_max.reset_index()
# df_BVP_max = df_BVP_max.drop("index", 1)
# df_BVP_max = df_BVP_max['BVP'].round(decimals=3)
# df_BVP_max = df_BVP_max.to_frame("BVP")
# df_BVP_avg = transformed_df_BVP.rolling(64).mean()
# df_BVP_avg = df_BVP_avg.iloc[::64, :]
# df_BVP_avg = df_BVP_avg.iloc[1:, :] # Remove NaN from first index
# df_BVP_avg = df_BVP_avg.reset_index()
# df_BVP_avg = df_BVP_avg.drop("index", 1)
# df_BVP_avg = df_BVP_avg['BVP'].round(decimals=3)
# df_BVP_avg = df_BVP_avg.to_frame("BVP")
# # df_BVP_avg.plot()
# # df_BVP_max.plot()
# # plt.show()
# df_HR_scaled = normalize_dataframe_values(df_HR, "HR")
# arr = np.repeat(df_EDA['EDA'].to_numpy(),16)
# mfccs = librosa.feature.mfcc(arr,n_mfcc=8, sr=64)
# print(mfccs.shape)
# np.save(participant+'_mfccs.npy', mfccs, allow_pickle=True)
signals, info = eda_custom_process(df_EDA["EDA"])
tonic = signals["EDA_Tonic"]
ax = tonic.plot(label="Tonic EDA Level")
phasic = signals["EDA_Phasic"]
phasic.to_csv(
"/Users/andreas/Desktop/master/toadstool/participants/"+participant+"/"+participant+"_sensor/EDA_Phasic.csv", header="EDA")
# features = [info["SCR_Peaks"]]
#plot = nk.events_plot(features, phasic, color=['blue'])
#df_EDA_norm = z_score_normalize(df_EDA, "EDA")
#df_EDA_norm.plot()
# set_phys_in_json(path, df_BVP_avg, "BVP")
# df_BVP_max.to_csv(
# "/Users/andreas/Desktop/master/toadstool/participants/"+participant+"/"+participant+"_sensor/transformed_bvp.csv", header="BVP")
# df_BVP_max.plot(kind="line", y="BVP")
# seaborn.set(style='ticks')
# _emotions = [
# 'angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral', 'contempt'
# ]
# df_bvp_emotion = df_BVP_max[:2100].join(emotion_df)
# sim_samples = df_bvp_emotion.iloc[imgs_predicted_similarly, :]
# print(sim_samples.groupby('Emotion').count())
# print(sim_samples.groupby('Emotion').mean())
# seaborn.relplot(data=sim_samples.reset_index(), x='index',
# y='BVP', hue='Emotion', hue_order=_emotions, aspect=1.61)
# df_eda_emotion = df_EDA_scaled[:2100].join(emotion_df)
# df_hr_emotion = df_HR_scaled[:2100].join(emotion_df)
# seaborn.relplot(data=df_bvp_emotion.reset_index(), x='index',
# y='BVP', hue='Emotion', hue_order=_emotions, aspect=1.61)
# seaborn.relplot(data=df_eda_emotion.reset_index(), x='index',
# y='EDA', hue='Emotion', hue_order=_emotions, aspect=1.61)
# seaborn.relplot(data=df_hr_emotion.reset_index(), x='index',
# y='HR', hue='Emotion', hue_order=_emotions, aspect=1.61)
# print(df_bvp_emotion.groupby('Emotion').count())
# print(df_bvp_emotion.groupby('Emotion').mean())
# print(df_eda_emotion.groupby('Emotion').mean())
# print(df_hr_emotion.groupby('Emotion').mean())
| andmathisen/multimodal_emotion_recognition | misc/plot_phy.py | plot_phy.py | py | 8,851 | python | en | code | 1 | github-code | 13 |
14441023035 | from django.contrib.auth.models import User
from django.shortcuts import render, redirect
from django.db.models import Q
from .models import User, Person,Lecture, Forum, Reply
from .forms import ForumForm, ReplyForm
def home(request):
person = Person.objects.filter(user=request.user)
context = {"person": person}
return render(request, 'home.html', context)
def programs(request):
person = Person.objects.filter(user=request.user)
context = {"person": person}
return render(request, 'programs.html', context)
def lectures(request):
lecture = Lecture.objects.all().order_by('name').values()
user = User.objects.filter(username=request.user.username).all()
person = Person.objects.filter(user=request.user)
context = {'lecture': lecture, 'user': user, "person": person}
return render(request, 'lectures.html', context)
def test(request):
person = Person.objects.filter(user=request.user)
context = {"person": person}
return render(request, 'test.html', context)
def answers(request):
person = Person.objects.filter(user=request.user)
context = {"person": person}
return render(request, 'answers.html', context)
def profile(request):
person = Person.objects.filter(user = request.user)
context = {"person": person}
return render(request, "profile.html", context)
def forum(request):
person = Person.objects.filter(user=request.user)
forum = Forum.objects.all()
context = {"person": person, "forum": forum}
return render(request, 'forum.html', context)
def replies(request):
person = Person.objects.filter(user=request.user)
replies = Reply.objects.all()
context = {"person": person,"replies": replies}
return render(request, 'replies.html', context)
def replyForum(request):
person = Person.objects.filter(user=request.user)
if(request.method == "POST"):
form = ReplyForm(request.POST)
if (form.is_valid()):
post = form.save(commit=False)
post.user = request.user
post.save()
return redirect("replies")
context = {"person": person,"form":ReplyForm}
return render(request, "reply.html", context=context)
def addForum(request):
person = Person.objects.filter(user=request.user)
if(request.method == "POST"):
form = ForumForm(request.POST)
if(form.is_valid()):
post = form.save(commit=False)
post.user = request.user
post.save()
return redirect("forum")
context = {"person": person, "form": ForumForm}
return render(request, "addForum.html", context=context)
def contact(request):
person = Person.objects.filter(user=request.user)
context = {"person": person}
return render(request, 'contact.html', context)
| jovanaivanovska11/LearningProjectDjango | ProjectLearning/views.py | views.py | py | 2,798 | python | en | code | 0 | github-code | 13 |
35403455976 | import os
import asyncio
import json
import uuid
import logging
import importlib
s3_client = importlib.import_module("liveness-tests-s3-client.s3-client.s3_client")
comm_client = importlib.import_module("liveness-tests-s3-client.s3-client.comm_client")
cc = comm_client.CommClient()
logging.basicConfig(
level=logging.INFO,
handlers=[logging.FileHandler("outputs/gen_gt.log"), logging.StreamHandler()],
)
logger = logging.getLogger("gen_ground_truths")
class GenGroundTruth:
def get_data(self, bc):
"""
Downloads data from s3
"""
bc.download()
bc.unzip()
def remove_data(self, bc):
"""
Removes data downloaded from s3
"""
bc.remove()
def make_dirs(self, dir_path):
"""
Implements the os.makedirs function.
"""
if not os.path.exists(dir_path):
os.makedirs(dir_path)
def get_zip_paths(self, cc):
"""
Gets full paths for all .zips
"""
data_folder = cc.args.test_zips_path
gt_folder = cc.args.gt_face_regressor_path
zips = []
for path, dirs, files in os.walk(data_folder):
if any([".zip" in x for x in files]) and "reais/" in path:
zips.append(os.path.join(path, files[0]))
gt_instance_path = path.split("/")[-2:] # ex: [reais_compressed, t42]
gt_instance_path = "/".join(
gt_instance_path
) # ex: reais_compressed/t42
gt_instance_path = os.path.join(gt_folder, gt_instance_path)
self.make_dirs(gt_instance_path)
return zips
def save_json(self, json_name, imgs_roi):
"""
Save a ground truths as an json file.
"""
json_gt = dict(imgs_roi=imgs_roi.bboxes)
with open(json_name, "w") as arq:
json.dump(json_gt, arq, indent=4)
async def build_gt(self, instance_paths, cc):
"""
Make the requests to face_regressor and save the ground truths.
"""
loop = asyncio.get_event_loop()
cc.regressor_comm.set_event_loop(loop)
for instance in instance_paths:
try:
request_id = uuid.uuid4().hex
json_name = instance.replace(
"instances", "ground_truths/face-regressor"
)
json_name = json_name.replace(".zip", ".json")
file = open(instance, "rb").read()
images = await cc.parse_zip_file(file)
imgs_roi = await cc.get_fr_response(images, request_id)
self.save_json(json_name, imgs_roi)
logger.info("Done {}, request_id: {}".format(instance, request_id))
except:
logger.info(
"----> FAIL {}, request_id: {}".format(instance, request_id)
)
def main():
cc = comm_client.CommClient()
ggt = GenGroundTruth()
# bc = s3_client.BotoClient()
# ggt.get_data(bc)
zips = ggt.get_zip_paths(cc)
zips = sorted(zips)
asyncio.run(ggt.build_gt(zips, cc))
# ggt.remove_data(bc)
if __name__ == "__main__":
main()
| Guipc10/PFG | detect_faces/liveness-face-regressor/tests/src/gen_ground_truths.py | gen_ground_truths.py | py | 3,207 | python | en | code | 0 | github-code | 13 |
23812143846 | import sys
import threading
from datetime import datetime
from PyQt5.QtCore import QDate
from PyQt5.QtWidgets import *
from matplotlib.figure import Figure
import numpy as np
import matplotlib.dates as mdates
import matplotlib
from mplfinance.original_flavor import candlestick_ohlc
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
from data import MarketDB
from strategy.MACD import MACD
from strategy.OHLC import OHLC
from strategy.BollingerBand import BollingerBand
class MainWindow(QMainWindow):
scanRunning = False
mk = MarketDB.MarketDB()
def __init__(self):
QMainWindow.__init__(self)
self.setWindowTitle('Dave')
self.setGeometry(600, 200, 1200, 800)
self.statusBar().showMessage('Ready')
self.widget = QWidget(self)
self.setCentralWidget(self.widget)
self.visualization = WidgetPlot(self)
self.scanBtn = QPushButton('스캔', self)
self.scanBtn.clicked.connect(self.onScanBtnClick)
self.stopBtn = QPushButton('정지', self)
self.stopBtn.clicked.connect(self.onStopBtnClick)
self.stockComboBox = QComboBox(self)
self.stockComboBox.activated[str].connect(self.onStockComboBoxActivated)
self.preBtn = QPushButton('이전', self)
self.preBtn.clicked.connect(self.onPreBtnClick)
self.nextBtn = QPushButton('다음', self)
self.nextBtn.clicked.connect(self.onNextBtnClick)
self.startDateEdit = QDateEdit(self)
self.startDateEdit.setDate(QDate.currentDate().addYears(-2))
self.endDateEdit = QDateEdit(self)
self.endDateEdit.setDate(QDate.currentDate())
self.stockLineEdit = QLineEdit(self)
self.drawBtn = QPushButton('차트 그리기', self)
self.drawBtn.clicked.connect(self.onDrawBtnClick)
self.initLayout()
def initLayout(self):
leftLayout = QVBoxLayout()
leftLayout.addWidget(self.visualization)
scanBtnLayout = QHBoxLayout()
scanBtnLayout.addWidget(self.scanBtn)
scanBtnLayout.addWidget(self.stopBtn)
pagingBtnLayout = QHBoxLayout()
pagingBtnLayout.addWidget(self.preBtn)
pagingBtnLayout.addWidget(self.nextBtn)
dateLayout = QHBoxLayout()
dateLayout.addWidget(self.startDateEdit)
dateLayout.addWidget(self.endDateEdit)
rightLayout = QVBoxLayout()
rightLayout.addLayout(scanBtnLayout)
rightLayout.addWidget(self.stockComboBox)
rightLayout.addLayout(pagingBtnLayout)
rightLayout.addLayout(dateLayout)
rightLayout.addWidget(self.stockLineEdit)
rightLayout.addWidget(self.drawBtn)
rightLayout.addStretch(1)
layout = QHBoxLayout(self.widget)
layout.addLayout(leftLayout)
layout.addLayout(rightLayout)
layout.setStretchFactor(leftLayout, 1)
layout.setStretchFactor(rightLayout, 0)
def onScanBtnClick(self):
self.statusBar().showMessage("스캔 시작")
self.stockComboBox.clear()
threading.Thread(target=self.scan, args=(self.startDateEdit.text(), self.endDateEdit.text())).start()
def scan(self, start_date, end_date):
mk = MarketDB.MarketDB()
self.scanRunning = True
codes = mk.get_codes().values()
for i, code in enumerate(codes):
self.statusBar().showMessage(f"[{start_date} ~ {end_date}] 스캔중... ({i + 1} / {len(codes) + 1})")
if not self.scanRunning:
self.statusBar().showMessage(f"[{start_date} ~ {end_date}] 스캔 종료 ({i + 1} / {len(codes) + 1})")
break
df = mk.get_daily_price(code, start_date, end_date)
today_x_value = mdates.date2num(np.datetime64(datetime.today().strftime('%Y-%m-%d')))
bollinger_band = BollingerBand(df)
bollinger_band_trading_points = bollinger_band.get_trading_points()
if len(bollinger_band_trading_points) == 0:
continue
bollinger_band_trading_point = bollinger_band_trading_points[-1]
macd = MACD(df)
macd_trading_points = macd.get_trading_points()
if len(macd_trading_points) == 0:
continue
macd_trading_point = macd_trading_points[-1]
if bollinger_band_trading_point.get("x") == today_x_value and \
bollinger_band_trading_point.get("trading") == "buy" and \
macd_trading_point.get("x") == today_x_value and \
macd_trading_point.get("trading") == "buy":
if bollinger_band_trading_point.get("weight") + macd_trading_point.get("weight") >= 1:
self.stockComboBox.addItem(code)
if self.scanRunning:
self.statusBar().showMessage(f"[{start_date} ~ {end_date}] 스캔 완료")
self.scanRunning = False
def onStopBtnClick(self):
self.scanRunning = False
def onStockComboBoxActivated(self, code):
self.stockLineEdit.clear()
self.stockLineEdit.setText(code)
def onPreBtnClick(self):
text = self.stockComboBox.currentText()
index = self.stockComboBox.findText(text)
if index > 0:
self.stockComboBox.setCurrentIndex(index - 1)
text = self.stockComboBox.currentText()
self.onStockComboBoxActivated(text)
self.onDrawBtnClick()
def onNextBtnClick(self):
text = self.stockComboBox.currentText()
index = self.stockComboBox.findText(text)
if index < self.stockComboBox.count() - 1:
self.stockComboBox.setCurrentIndex(index + 1)
text = self.stockComboBox.currentText()
self.onStockComboBoxActivated(text)
self.onDrawBtnClick()
def onDrawBtnClick(self):
code = self.stockLineEdit.text()
df = self.mk.get_daily_price(code, self.startDateEdit.text(), self.endDateEdit.text())
self.visualization.changeData(code, df)
class WidgetPlot(QWidget):
def __init__(self, *args, **kwargs):
QWidget.__init__(self, *args, **kwargs)
self.setLayout(QVBoxLayout())
self.canvas = PlotCanvas(self)
self.toolbar = NavigationToolbar(self.canvas, self)
self.layout().addWidget(self.toolbar)
self.layout().addWidget(self.canvas)
def changeData(self, code, df):
self.canvas.changeData(code, df)
self.toolbar.update()
class PlotCanvas(FigureCanvas):
def __init__(self, parent=None):
matplotlib.rcParams['font.family'] = "Malgun Gothic"
matplotlib.rcParams['axes.unicode_minus'] = False
fig = Figure()
FigureCanvas.__init__(self, fig)
self.setParent(parent)
FigureCanvas.setSizePolicy(self, QSizePolicy.Expanding, QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
self.plot()
def plot(self):
self.candle_chart = self.figure.add_subplot(3, 1, 1)
self.bollinger_band_chart = self.figure.add_subplot(3, 1, 2, sharex=self.candle_chart)
self.macd_chart = self.figure.add_subplot(3, 1, 3, sharex=self.candle_chart)
def changeData(self, code, df):
ohlc = OHLC(df)
bollinger_band = BollingerBand(df)
macd = MACD(df)
self.candle_chart.clear()
self.candle_chart.set_title(code)
candlestick_ohlc(self.candle_chart, ohlc.get_ohlc_data(), width=.1, colorup='red', colordown='blue')
for trading_point in macd.get_trading_points():
self.candle_chart.plot(trading_point.get("x"), trading_point.get("y"), trading_point.get("marker"))
self.candle_chart.xaxis.set_major_formatter(mdates.DateFormatter('%y-%m-%d'))
self.candle_chart.yaxis.set_major_formatter(matplotlib.ticker.StrMethodFormatter('{x:,.0f}'))
self.candle_chart.grid(True)
self.bollinger_band_chart.clear()
bollinger_band_close_data = bollinger_band.get_close_data()
self.bollinger_band_chart.plot(bollinger_band_close_data.get("x"), bollinger_band_close_data.get("y"),
color='m', label='Close')
bollinger_band_upper_band_data = bollinger_band.get_upper_band_data()
self.bollinger_band_chart.plot(bollinger_band_upper_band_data.get("x"), bollinger_band_upper_band_data.get("y"),
'r--', label='Upper band')
bollinger_band_ma20_data = bollinger_band.get_ma20_band_data()
self.bollinger_band_chart.plot(bollinger_band_ma20_data.get("x"), bollinger_band_ma20_data.get("y"), 'k--',
label='Moving average 20')
bollinger_band_lower_band_data = bollinger_band.get_lower_band_data()
self.bollinger_band_chart.plot(bollinger_band_lower_band_data.get("x"), bollinger_band_lower_band_data.get("y"),
'c--', label='Lower band')
self.bollinger_band_chart.xaxis.set_major_formatter(mdates.DateFormatter('%y-%m-%d'))
self.bollinger_band_chart.yaxis.set_major_formatter(matplotlib.ticker.StrMethodFormatter('{x:,.0f}'))
self.bollinger_band_chart.grid(True)
self.bollinger_band_chart.legend(loc='best')
self.macd_chart.clear()
macd_data = macd.get_macd_data()
self.macd_chart.plot(macd_data.get("x"), macd_data.get("y"), color='b', label='MACD')
macd_signal_data = macd.get_signal_data()
self.macd_chart.plot(macd_signal_data.get("x"), macd_signal_data.get("y"), 'g--', label='MACD-Signal')
macd_histogram_data = macd.get_histogram_data()
self.macd_chart.bar(macd_histogram_data.get("x"), macd_histogram_data.get("y"), color='m', label='MACD-Hist')
self.macd_chart.xaxis.set_major_formatter(mdates.DateFormatter('%y-%m-%d'))
self.macd_chart.yaxis.set_major_formatter(matplotlib.ticker.StrMethodFormatter('{x:,.0f}'))
self.macd_chart.grid(True)
self.macd_chart.legend(loc='best')
self.draw()
if __name__ == "__main__":
app = QApplication(sys.argv)
mainWin = MainWindow()
mainWin.show()
sys.exit(app.exec_())
| luckyDaveKim/WUBU | src/main.py | main.py | py | 10,281 | python | en | code | 2 | github-code | 13 |
72388296657 | # @author wangjinzhao on 2020/12/9
import scrapy
from scrapy.utils.response import open_in_browser
from tutorial.items import Item
class AllSpider2(scrapy.Spider):
name = "all-2"
allowed_domains = ["toscrape.com"]
start_urls = [
"http://quotes.toscrape.com/"
]
def parse(self, response):
for sel in response.xpath('//div[@class="quote"]'):
self.logger.info('parse 1')
item = {
'text': sel.xpath('span[@class="text"]/text()').get(),
'tags': sel.css('div.tags a.tag::text').getall()
}
yield from response.follow_all(urls=sel.css('.author + a'), callback=self.parse_author, cb_kwargs=dict(item=item))
# yield from response.follow_all(css='ul.pager a', callback=self.parse)
def parse_author(self, response, item):
def extract_with_css(query):
return response.css(query).get(default='').strip()
item['author'] = {
'name': extract_with_css('h3.author-title::text'),
'birthdate': extract_with_css('.author-born-date::text')
}
yield item
| bigbaldy1128/scrapy-demo | tutorial/spiders/all_spider2.py | all_spider2.py | py | 1,134 | python | en | code | 0 | github-code | 13 |
25552307447 | class Computer:
def __init__(self):
self.name = 'Rachel'
self.age = 28
def update(self):
self.age = 30
c1 = Computer()
c2 = Computer()
c1.name = 'Emma'
c1.age = 12
c1.update()
print(c1.name)
print(c1.age) | draksha22/python | update.py | update.py | py | 243 | python | en | code | 0 | github-code | 13 |
37274133172 | __author__ = 'DafniAntotsiou'
from gym.envs.registration import register
register(
id='InvertedPendulum_ext-v2',
entry_point='gym_ext.envs:InvertedPendulumEnvExt',
max_episode_steps=1000,
)
register(
id='HalfCheetah_ext-v2',
entry_point='gym_ext.envs:HalfCheetahEnvExt',
max_episode_steps=1000,
)
| DaphneAntotsiou/Adversarial-Imitation-Learning-with-Trajectorial-Augmentation-and-Correction | gym_ext/__init__.py | __init__.py | py | 325 | python | en | code | 1 | github-code | 13 |
73381534418 | # -*- coding: utf-8 -*-
"""
@description:
@author: LiuXin
@contact: xinliu1996@163.com
@Created on: 2020/11/1 下午10:20
"""
import os
import time
import random
import numpy as np
import argparse
import datetime
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torch.optim as optim
from torch.utils.data import DataLoader
from utils.loss import Loss
from utils.summaries import TensorboardSummary
from utils.modeltools import netParams
from utils.set_logger import get_logger
from train import Trainer
from test import Tester
from dataloader.skmt import SkmtDataSet
from modeling import build_skmtnet
[]
def main(args,logger,summary):
cudnn.enabled = True # Enables bencnmark mode in cudnn, to enable the inbuilt
cudnn.benchmark = True # cudnn auto-tuner to find the best algorithm to use for
# our hardware
seed = random.randint(1, 10000)
logger.info('======>random seed {}'.format(seed))
random.seed(seed) # python random seed
np.random.seed(seed) # set numpy random seed
torch.manual_seed(seed) # set random seed for cpu
# train_set = VaiHinGen(root=args.root, split='trainl',outer_size=2*args.image_size,centre_size=args.image_size)
# test_set = VaiHinGen(root=args.root, split='testl',outer_size=2*args.image_size,centre_size=args.image_size)
train_set=SkmtDataSet(args,split='train')
val_set = SkmtDataSet(args, split='val')
kwargs = {'num_workers': args.workers, 'pin_memory': True}
train_loader = DataLoader(train_set, batch_size=args.batch_size, drop_last=True, shuffle=False, **kwargs)
test_loader = DataLoader(val_set, batch_size=1, drop_last=True, shuffle=False, **kwargs)
logger.info('======> building network')
# set model
model = build_skmtnet(backbone='resnet50',auxiliary_head=args.auxiliary, trunk_head='deeplab',
num_classes=args.num_classes,output_stride = 16)
logger.info("======> computing network parameters")
total_paramters = netParams(model)
logger.info("the number of parameters: " + str(total_paramters))
# setup optimizer
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=0.9, weight_decay=args.weight_decay)
# setup savedir
args.savedir = (args.savedir + '/' + args.model + 'bs'
+ str(args.batch_size) + 'gpu' + str(args.gpus) + '/')
if not os.path.exists(args.savedir):
os.makedirs(args.savedir)
# setup optimization criterion
criterion = Loss(args)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed) # set random seed for all GPU
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpus
model=nn.DataParallel(model).cuda()
criterion=criterion.cuda()
start_epoch = 0
best_epoch = 0.
best_overall = 0.
best_mIoU = 0.
best_F1 = 0.
trainer=Trainer(args=args,dataloader=train_loader,model=model,
optimizer=optimizer,criterion=criterion,logger=logger,summary=summary)
tester = Tester(args=args,dataloader=test_loader,model=model,
criterion=criterion,logger=logger,summary=summary)
writer=summary.create_summary()
for epoch in range(start_epoch,args.max_epochs):
trainer.train_one_epoch(epoch,writer)
if(epoch%args.show_val_interval==0):
score, class_iou, class_acc,class_F1=tester.test_one_epoch(epoch,writer)
logger.info('======>Now print overall info:')
for k, v in score.items():
logger.info('======>{0:^18} {1:^10}'.format(k, v))
logger.info('======>Now print class acc')
for k, v in class_acc.items():
print('{}: {:.5f}'.format(k, v))
logger.info('======>{0:^18} {1:^10}'.format(k, v))
logger.info('======>Now print class iou')
for k, v in class_iou.items():
print('{}: {:.5f}'.format(k, v))
logger.info('======>{0:^18} {1:^10}'.format(k, v))
logger.info('======>Now print class_F1')
for k, v in class_F1.items():
logger.info('======>{0:^18} {1:^10}'.format(k, v))
if score["Mean IoU(8) : \t"] > best_mIoU:
best_mIoU = score["Mean IoU(8) : \t"]
if score["Overall Acc : \t"] > best_overall:
best_overall = score["Overall Acc : \t"]
# save model in best overall Acc
model_file_name = args.savedir + '/best_model.pth'
torch.save(model.state_dict(), model_file_name)
best_epoch = epoch
if score["Mean F1 : \t"] > best_F1:
best_F1 = score["Mean F1 : \t"]
logger.info("======>best mean IoU:{}".format(best_mIoU))
logger.info("======>best overall : {}".format(best_overall))
logger.info("======>best F1: {}".format(best_F1))
logger.info("======>best epoch: {}".format(best_epoch))
# save the model
model_file_name = args.savedir + '/model.pth'
state = {"epoch": epoch + 1, "model": model.state_dict()}
logger.info('======> Now begining to save model.')
torch.save(state, model_file_name)
logger.info('======> Save done.')
if __name__ == '__main__':
import timeit
start = timeit.default_timer()
parser = argparse.ArgumentParser(description='Semantic Segmentation...')
parser.add_argument('--model', default='skmtnet', type=str)
parser.add_argument('--auxiliary', default=None, type=str)
parser.add_argument('--batch_size', default=2, type=int)
parser.add_argument('--image_size', default=512, type=int)
parser.add_argument('--crop_size', default=512, type=int)
parser.add_argument('--max_epochs', type=int, help='the number of epochs: default 100 ')
parser.add_argument('--num_classes', type=int)
parser.add_argument('--lr', type=float)
parser.add_argument('--weight_decay', default=4e-5, type=float)
parser.add_argument('--workers', type=int, default=4, help=" the number of parallel threads")
parser.add_argument('--show_interval', default=50, type=int)
parser.add_argument('--show_val_interval', default=1, type=int)
parser.add_argument('--savedir', default="./runs", help="directory to save the model snapshot")
# parser.add_argument('--logFile', default= "log.txt", help = "storing the training and validation logs")
parser.add_argument('--gpus', type=str, default='1')
parser.add_argument('--resume', default=None, help="the resume model path")
args = parser.parse_args()
# 设置运行id
run_id = 'lr{}_bz{}'.format(args.lr, args.batch_size) \
+ datetime.datetime.now().strftime('%Y-%m-%d_%H:%M') # 现在
args.savedir = os.path.join(args.savedir, str(run_id))
if not os.path.exists(args.savedir):
os.makedirs(args.savedir)
logger = get_logger(args.savedir)
logger.info('just do it')
logger.info('Now run_id {}'.format(run_id))
if (args.resume):
if not os.path.exists(args.resume):
raise Exception("the path of resume is empty!!")
# 设置tensorboard
summary = TensorboardSummary(args.savedir)
logger.info('======>Input arguments:')
for key, val in vars(args).items():
logger.info('======> {:16} {}'.format(key, val))
# 开始运行.........
main(args, logger, summary)
end = timeit.default_timer()
logger.info("training time:{:.4f}".format(1.0 * (end - start) / 3600))
logger.info('model save in {}.'.format(run_id))
| UESTC-Liuxin/SKMT | SkmtSeg/main.py | main.py | py | 7,643 | python | en | code | 0 | github-code | 13 |
25730102895 | """
Crossflow-enabled classes for WElib
Provides crossflow-compatible building blocks to code weighted ensemble simulation
workflows.
Classes:
CrossflowFunctionStepper
CrossflowFunctionProgressCoordinator
"""
from .base import Recorder
class CrossflowFunctionStepper(object):
"""
A class for functions that move (update the states of) lists of walkers
Attributes
----------
recorder : Recorder
keeps a record of all states visited by all walkers
Methods
-------
run(walkers):
update the states of a list of walkers
"""
def __init__(self, client, function, *args):
"""
Create a stepper
Parameters
----------
client : Crossflow Client
a crossflow client that connects to a dask distributed cluster
function : function
a function with the call signature: new_state = function(old_state, *args)
*args : list, optional
any extra arguments required by the function
"""
self.client = client
self._function = function
self._args = args
self.recorder = Recorder()
def run(self, walkers):
"""
Run the stepper on a list of walkers
Parameters
----------
walkers : list
a list of walkers whose states will be updated
Returns
-------
list
a list of walkers with updated states
"""
if not isinstance(walkers, list):
walkers = [walkers]
self.recorder.record(walkers)
old_states = [w.state for w in walkers]
new_states = self.client.map(self._function, old_states, *self._args)
for n, w in zip(new_states, walkers):
w.update(n.result())
self.recorder.record(walkers)
return walkers
class CrossflowFunctionProgressCoordinator(object):
"""
A class for crossflow-compatible functions that update the progress coordinates
of lists of walkers
Methods
-------
run(walkers)
Update the progress coordinates for a list of walkers
"""
def __init__(self, client, pcfunc, *args):
"""
Create a progress coordinator
Parameters
----------
client : Crossflow Client
a client attached to a dask.distributed cluster
pcfunc : function
a function with the call signature:
progress_coordinates = function(state, *args)
*args : list, optional
any extra arguments required by the function
"""
self.client = client
self.pcfunc = pcfunc
self.args = args
def run(self, walkers):
"""
Process a list of walkers, updating their progress coordinates
Parameters
----------
walkers : list of Walkers
walkers whose progress coordinates will be updated
Returns
-------
list
a list of walkers with updated progress coordinates
"""
if not isinstance(walkers, list):
walkers = [walkers]
states = [w.state for w in walkers]
pcs = self.client.map(self.pcfunc, states, *self.args)
for p, w in zip(pcs, walkers):
pr = p.result()
if isinstance(pr, (list, tuple)):
w.pcs = list(pr)
else:
w.pcs = [pr]
if w._initial_pcs is None:
w._initial_pcs = w.pcs
return walkers
| CharlieLaughton/WElib | WElib/crossflow.py | crossflow.py | py | 3,535 | python | en | code | 0 | github-code | 13 |
24637196240 | ########################################################################################################
# The RWKV Language Model - https://github.com/BlinkDL/RWKV-LM
########################################################################################################
import gradio as gr
import os, copy, types, gc, sys
import numpy as np
try:
os.environ["CUDA_VISIBLE_DEVICES"] = sys.argv[1]
except:
pass
np.set_printoptions(precision=4, suppress=True, linewidth=200)
args = types.SimpleNamespace()
########################################################################################################
# 选择语言
print('\nPlease choose the language.')
language_list = [('English','English'),('Chinese','简体中文')]
#for i in range(len(language_list)):
# print(f"{i} = {language_list[i][1]}")
#CHAT_LANG = language_list[int(input("Waiting for the language ref (etc. 0): "))][0]
CHAT_LANG = 'Chinese'
# 选择模式
print('\nPlease choose the mode.')
mode_list = [('cuda','GPU'),('cpu','CPU')]
for i in range(len(mode_list)):
print(f"{i} = {mode_list[i][1]}")
#_temp_mode = mode_list[int(input("Waiting for the mode ref (etc. 0): "))][0]
_temp_mode = 'cuda'
if(_temp_mode == "cuda"):
args.RUN_DEVICE = "cuda"
args.FLOAT_MODE = "fp16"
if(_temp_mode == "cpu"):
args.RUN_DEVICE = "cpu"
args.FLOAT_MODE = "fp32"
os.environ["RWKV_JIT_ON"] = '1' # '1' or '0', please use torch 1.13+ and benchmark speed
QA_PROMPT = False # True: Q & A prompt // False: User & Bot prompt
# 中文问答设置QA_PROMPT=True(只能问答,问答效果更好,但不能闲聊) 中文聊天设置QA_PROMPT=False(可以闲聊,但需要大模型才适合闲聊)
# Download RWKV-4 models from https://huggingface.co/BlinkDL
# 选择模型
print('\nPlease choose the model.')
models_list = os.listdir(r'./models/')
for i in range(len(models_list)):
print(f"{i} = {models_list[i]}")
#model_num = int(input("Waiting for the model ref (etc. 0): "))
model_num = 0
args.MODEL_NAME = f"./models/{os.path.splitext(models_list[model_num])[0]}"
# if CHAT_LANG == 'English':
# args.MODEL_NAME = '/fsx/BlinkDL/HF-MODEL/rwkv-4-pile-14b/RWKV-4-Pile-14B-20230204-7324'
# # args.MODEL_NAME = '/fsx/BlinkDL/HF-MODEL/rwkv-4-pile-7b/RWKV-4-Pile-7B-20221115-8047'
# # args.MODEL_NAME = '/fsx/BlinkDL/HF-MODEL/rwkv-4-pile-3b/RWKV-4-Pile-3B-20221110-ctx4096'
# # args.MODEL_NAME = '/fsx/BlinkDL/HF-MODEL/rwkv-4-pile-3b/RWKV-4-Pile-3B-Instruct-test1-20230124'
# # args.MODEL_NAME = '/fsx/BlinkDL/HF-MODEL/rwkv-4-pile-1b5/RWKV-4-Pile-1B5-20220903-8040'
# # args.MODEL_NAME = '/fsx/BlinkDL/HF-MODEL/rwkv-4-pile-430m/RWKV-4-Pile-430M-20220808-8066'
# # args.MODEL_NAME = '/fsx/BlinkDL/HF-MODEL/rwkv-4-pile-169m/RWKV-4-Pile-169M-20220807-8023'
# # args.MODEL_NAME = '/fsx/BlinkDL/CODE/_PUBLIC_/RWKV-LM/RWKV-v4neo/7-run1z/rwkv-340'
# # args.MODEL_NAME = '/fsx/BlinkDL/CODE/_PUBLIC_/RWKV-LM/RWKV-v4neo/14b-run1/rwkv-6210'
# elif CHAT_LANG == 'Chinese':
# args.MODEL_NAME = '/models/RWKV-4-Pile-3B-Instruct-test2-20230209'
# # args.MODEL_NAME = '/fsx/BlinkDL/HF-MODEL/rwkv-4-pile-3b/RWKV-4-Pile-3B-EngChn-test4-20230115'
# # args.MODEL_NAME = '/fsx/BlinkDL/HF-MODEL/rwkv-4-pile-1b5/RWKV-4-Pile-1B5-EngChn-test4-20230115'
# # args.MODEL_NAME = '/fsx/BlinkDL/CODE/_PUBLIC_/RWKV-LM/RWKV-v4neo/7-run1z/rwkv-490'
# # args.MODEL_NAME = '/fsx/BlinkDL/CODE/_PUBLIC_/RWKV-LM/RWKV-v4neo/1.5-run1z/rwkv-415'
args.ctx_len = 1024
CHAT_LEN_SHORT = 40
CHAT_LEN_LONG = 150
FREE_GEN_LEN = 200
GEN_TEMP = 1.0
GEN_TOP_P = 0.85
AVOID_REPEAT = ',。:?!'
########################################################################################################
os.environ["RWKV_RUN_DEVICE"] = args.RUN_DEVICE
print(f'\nLoading ChatRWKV - {CHAT_LANG} - {args.RUN_DEVICE} - {args.FLOAT_MODE} - QA_PROMPT {QA_PROMPT}')
import torch
# please tune these (test True/False for all of them). can significantly improve speed.
# torch._C._jit_set_profiling_executor(True)
# torch._C._jit_set_profiling_mode(True)
# torch._C._jit_override_can_fuse_on_cpu(True)
# torch._C._jit_override_can_fuse_on_gpu(True)
# torch._C._jit_set_texpr_fuser_enabled(False)
# torch._C._jit_set_nvfuser_enabled(False)
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.allow_tf32 = True
torch.backends.cuda.matmul.allow_tf32 = True
from src.model_run import RWKV_RNN
from src.utils import TOKENIZER
tokenizer = TOKENIZER("20B_tokenizer.json")
args.vocab_size = 50277
args.head_qk = 0
args.pre_ffn = 0
args.grad_cp = 0
args.my_pos_emb = 0
MODEL_NAME = args.MODEL_NAME
if CHAT_LANG == 'English':
interface = ":"
if QA_PROMPT:
user = "Q"
bot = "A"
intro = f'The following is a verbose and detailed Q & A conversation of factual information.'
else:
user = "User"
bot = "Bot"
intro = f'The following is a verbose and detailed conversation between an AI assistant called {bot}, and a human user called {user}. {bot} is intelligent, knowledgeable, wise and polite.'
init_prompt = f'''
{intro}
{user}{interface} french revolution what year
{bot}{interface} The French Revolution started in 1789, and lasted 10 years until 1799.
{user}{interface} 3+5=?
{bot}{interface} The answer is 8.
{user}{interface} guess i marry who ?
{bot}{interface} Only if you tell me more about yourself - what are your interests?
{user}{interface} solve for a: 9-a=2
{bot}{interface} The answer is a = 7, because 9 - 7 = 2.
{user}{interface} wat is lhc
{bot}{interface} LHC is a high-energy particle collider, built by CERN, and completed in 2008. They used it to confirm the existence of the Higgs boson in 2012.
'''
HELP_MSG = '''Commands:
say something --> chat with bot. use \\n for new line.
+ --> alternate chat reply
+reset --> reset chat
+gen YOUR PROMPT --> free generation with any prompt. use \\n for new line.
+qa YOUR QUESTION --> free generation - ask any question (just ask the question). use \\n for new line.
+++ --> continue last free generation (only for +gen / +qa)
++ --> retry last free generation (only for +gen / +qa)
Now talk with the bot and enjoy. Remember to +reset periodically to clean up the bot's memory. Use RWKV-4 14B for best results.
This is not instruct-tuned for conversation yet, so don't expect good quality. Better use +gen for free generation.
'''
elif CHAT_LANG == 'Chinese':
interface = ":"
if QA_PROMPT:
user = "Q"
bot = "A"
init_prompt = f'''
Expert Questions & Helpful Answers
Ask Research Experts
'''
else:
user = "User"
bot = "Bot"
init_prompt = f'''
The following is a verbose and detailed conversation between an AI assistant called {bot}, and a human user called {user}. {bot} is intelligent, knowledgeable, wise and polite.
{user}{interface} wat is lhc
{bot}{interface} LHC is a high-energy particle collider, built by CERN, and completed in 2008. They used it to confirm the existence of the Higgs boson in 2012.
{user}{interface} 企鹅会飞吗
{bot}{interface} 企鹅是不会飞的。它们的翅膀主要用于游泳和平衡,而不是飞行。
'''
HELP_MSG = '''指令:
直接输入内容 --> 和机器人聊天(建议问机器人问题),用\\n代表换行
+ --> 让机器人换个回答
+reset --> 重置对话
+gen 某某内容 --> 续写任何中英文内容,用\\n代表换行
+qa 某某问题 --> 问独立的问题(忽略上下文),用\\n代表换行
+qq 某某问题 --> 问独立的问题(忽略上下文),且敞开想象力,用\\n代表换行
+++ --> 继续 +gen / +qa / +qq 的回答
++ --> 换个 +gen / +qa / +qq 的回答
现在可以输入内容和机器人聊天(注意它不大懂中文,它更懂英文)。请经常使用 +reset 重置机器人记忆。
目前没有“重复惩罚”,所以机器人有时会重复,此时必须使用 + 换成正常回答,以免污染电脑记忆。
注意:和上下文无关的独立问题,必须用 +qa 或 +qq 问,以免污染电脑记忆。
'''
# Load Model
print(f'Loading model - {MODEL_NAME}')
model = RWKV_RNN(args)
model_tokens = []
model_state = None
AVOID_REPEAT_TOKENS = []
for i in AVOID_REPEAT:
dd = tokenizer.encode(i)
assert len(dd) == 1
AVOID_REPEAT_TOKENS += dd
########################################################################################################
def run_rnn(tokens, newline_adj = 0):
global model_tokens, model_state
tokens = [int(x) for x in tokens]
model_tokens += tokens
out, model_state = model.forward(tokens, model_state)
# print(f'### model ###\n{tokens}\n[{tokenizer.decode(model_tokens)}]')
out[0] = -999999999 # disable <|endoftext|>
out[187] += newline_adj # adjust \n probability
# if newline_adj > 0:
# out[15] += newline_adj / 2 # '.'
if model_tokens[-1] in AVOID_REPEAT_TOKENS:
out[model_tokens[-1]] = -999999999
return out
all_state = {}
def save_all_stat(srv, name, last_out):
n = f'{name}_{srv}'
all_state[n] = {}
all_state[n]['out'] = last_out
all_state[n]['rnn'] = copy.deepcopy(model_state)
all_state[n]['token'] = copy.deepcopy(model_tokens)
local_state = {}
def save_local_stat(srv, name, last_out):
n = f'{name}_{srv}'
local_state[n] = {}
local_state[n]['out'] = last_out
local_state[n]['rnn'] = copy.deepcopy(model_state)
local_state[n]['token'] = copy.deepcopy(model_tokens)
def load_all_stat(srv, name):
global model_tokens, model_state
n = f'{name}_{srv}'
model_state = copy.deepcopy(all_state[n]['rnn'])
model_tokens = copy.deepcopy(all_state[n]['token'])
return all_state[n]['out']
########################################################################################################
# Run inference
print(f'\nRun prompt...')
out = run_rnn(tokenizer.encode(init_prompt))
save_local_stat('', 'chat_init', out)
gc.collect()
torch.cuda.empty_cache()
srv_list = ['dummy_server']
for s in srv_list:
save_local_stat(s, 'chat', out)
print(f'### prompt ###\n[{tokenizer.decode(model_tokens)}]\n')
def reply_msg(msg):
outtext = ""
print(f'{bot}{interface} {msg}\n')
def on_message(message, state, turbostats):
print(f'{user}{interface} {message}')
global model_tokens, model_state, all_state
all_state = state
outtext = ""
srv = 'dummy_server'
msg = message.replace('\\n','\n').strip()
# if len(msg) > 1000:
# reply_msg('your message is too long (max 1000 tokens)')
# return
x_temp = GEN_TEMP
x_top_p = GEN_TOP_P
if ("-temp=" in msg):
x_temp = float(msg.split("-temp=")[1].split(" ")[0])
msg = msg.replace("-temp="+f'{x_temp:g}', "")
# print(f"temp: {x_temp}")
if ("-top_p=" in msg):
x_top_p = float(msg.split("-top_p=")[1].split(" ")[0])
msg = msg.replace("-top_p="+f'{x_top_p:g}', "")
# print(f"top_p: {x_top_p}")
if x_temp <= 0.2:
x_temp = 0.2
if x_temp >= 5:
x_temp = 5
if x_top_p <= 0:
x_top_p = 0
if msg == '+reset':
out = load_all_stat('', 'chat_init')
save_all_stat(srv, 'chat', out)
reply_msg("Chat reset.")
return turbostats,state,turbostats
elif msg[:5].lower() == '+gen ' or msg[:4].lower() == '+qa ' or msg[:4].lower() == '+qq ' or msg.lower() == '+++' or msg.lower() == '++':
if msg[:5].lower() == '+gen ':
new = '\n' + msg[5:].strip()
# print(f'### prompt ###\n[{new}]')
model_state = None
model_tokens = []
out = run_rnn(tokenizer.encode(new))
save_all_stat(srv, 'gen_0', out)
elif msg[:4].lower() == '+qq ':
new = '\nQ: ' + msg[4:].strip() + '\nA:'
# print(f'### prompt ###\n[{new}]')
model_state = None
model_tokens = []
out = run_rnn(tokenizer.encode(new))
save_all_stat(srv, 'gen_0', out)
elif msg[:4].lower() == '+qa ':
out = load_all_stat('', 'chat_init')
real_msg = msg[4:].strip()
new = f"{user}{interface} {real_msg}\n\n{bot}{interface}"
# print(f'### qa ###\n[{new}]')
out = run_rnn(tokenizer.encode(new))
save_all_stat(srv, 'gen_0', out)
elif msg.lower() == '+++':
try:
out = load_all_stat(srv, 'gen_1')
save_all_stat(srv, 'gen_0', out)
except:
return turbostats,state,turbostats
elif msg.lower() == '++':
try:
out = load_all_stat(srv, 'gen_0')
except:
return turbostats,state,turbostats
begin = len(model_tokens)
out_last = begin
for i in range(FREE_GEN_LEN+100):
token = tokenizer.sample_logits(
out,
model_tokens,
args.ctx_len,
temperature=x_temp,
top_p=x_top_p,
)
if msg[:4].lower() == '+qa ':# or msg[:4].lower() == '+qq ':
out = run_rnn([token], newline_adj=-2)
else:
out = run_rnn([token])
xxx = tokenizer.decode(model_tokens[out_last:])
if '\ufffd' not in xxx: # avoid utf-8 display issues
outtext += xxx
print(xxx, end='', flush=True)
out_last = begin + i + 1
if i >= FREE_GEN_LEN:
turbostats.append((msg,outtext))
state = all_state
return turbostats,state,turbostats
outtext += '\n'
print('\n')
# send_msg = tokenizer.decode(model_tokens[begin:]).strip()
# print(f'### send ###\n[{send_msg}]')
# reply_msg(send_msg)
save_all_stat(srv, 'gen_1', out)
else:
if msg.lower() == '+':
try:
out = load_all_stat(srv, 'chat_pre')
except:
return turbostats,state,turbostats
else:
out = load_all_stat(srv, 'chat')
new = f"{user}{interface} {msg}\n\n{bot}{interface}"
# print(f'### add ###\n[{new}]')
out = run_rnn(tokenizer.encode(new), newline_adj=-999999999)
save_all_stat(srv, 'chat_pre', out)
begin = len(model_tokens)
out_last = begin
print(f'{bot}{interface}', end='', flush=True)
i = 0
while True:
if i == 0:
newline_adj = -999999999
elif i <= CHAT_LEN_SHORT:
newline_adj = (i - CHAT_LEN_SHORT) / 10
elif i <= CHAT_LEN_LONG:
newline_adj = 0
else:
newline_adj = (i - CHAT_LEN_LONG) * 0.25 # MUST END THE GENERATION
token = tokenizer.sample_logits(
out,
model_tokens,
args.ctx_len,
temperature=x_temp,
top_p=x_top_p,
)
out = run_rnn([token], newline_adj=newline_adj)
xxx = tokenizer.decode(model_tokens[out_last:])
if '\ufffd' not in xxx: # avoid utf-8 display issues
outtext += xxx
print(xxx, end='', flush=True)
out_last = begin + i + 1
send_msg = tokenizer.decode(model_tokens[begin:])
if '\n\n' in send_msg:
send_msg = send_msg.strip()
turbostats.append((msg,outtext))
state = all_state
return turbostats,state,turbostats
i += 1
save_all_stat(srv, 'chat', out)
# print(HELP_MSG)
with gr.Blocks() as demo:
gr.Markdown(
"""
# ChatRWKV
玩的开心!
""")
chatbot = gr.Chatbot()
state = gr.State(local_state)
turbostats = gr.State([])
with gr.Row():
txt = gr.Textbox(show_label=False, placeholder="输入后回车").style(container=False)
txt.submit(on_message, [txt,state,turbostats], [chatbot,state,turbostats])
demo.launch(share=True)
# while True:
# msg = input(f'{user}{interface} ')
# if len(msg.strip()) > 0:
# on_message(msg)
# else:
# print('Error: please say something')
| 1500231819/ChatRWKV-WebUI | chat.py | chat.py | py | 16,333 | python | en | code | null | github-code | 13 |
12767861870 |
def netIncomeCalculator(state, grossIncome):
statetax=0
netIncome=0
federalTax=10*grossIncome/100
if state == "Florida":
statetax=3*grossIncome/100
elif state == "Texas":
statetax=5*grossIncome/100
elif state == "Arizona":
statetax=7*grossIncome/100
elif state == "NewYork":
statetax=9*grossIncome/100
else:
print("No state tax")
return
statetax=9*grossIncome/100
netIncome=grossIncome-federalTax-statetax
print("netIncome for %s is %d" %(state,netIncome))
return netIncome
#statename= input("Enter state name")
#income= input("Enter grossIncome")
z = netIncomeCalculator("Florida",22220) | Nazreen20/NazreenPractice2020 | assignments/calculateTax.py | calculateTax.py | py | 690 | python | en | code | 0 | github-code | 13 |
39896942351 | import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn import preprocessing
import pandas as pd
import random
import itertools
import seaborn as sns
sns.set(style = 'darkgrid')
# i dont know
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
#read the data file
bc = pd.read_csv('D:\DataSets-master/kaggle.csv')
bc.head(1)
#scale the data into chart and allow better predictive power
bcs = pd.DataFrame(preprocessing.scale(bc.ix[:,2:32]))
bcs.columns = list(bc.ix[:,2:32].columns)
bcs['diagnosis'] = bc['diagnosis']
#unknown diagram corelation btw variable and diagnose
from pandas.tools.plotting import scatter_matrix
p = sns.PairGrid(bcs.ix[:,20:32], hue = 'diagnosis', palette = 'Reds')
p.map_upper(plt.scatter, s = 20, edgecolor = 'w')
p.map_diag(plt.hist)
p.map_lower(sns.kdeplot, cmap = 'GnBu_d')
p.add_legend()
p.figsize = (30,30)
# M & B measurement
mbc = pd.melt(bcs, "diagnosis", var_name="measurement")
fig, ax = plt.subplots(figsize=(10,5))
p = sns.violinplot(ax = ax, x="measurement", y="value", hue="diagnosis", split = True, data=mbc, inner = 'quartile', palette = 'Set2');
p.set_xticklabels(rotation = 90, labels = list(bcs.columns));
#M & B diagram
sns.swarmplot(x = 'diagnosis', y = 'concave points_worst',palette = 'Set2', data = bcs);
#concave point diagram
sns.jointplot(x = bc['concave points_worst'], y = bc['area_mean'], stat_func=None, color="#4CB391", edgecolor = 'w', size = 6);
X = bcs.ix[:,0:30]
y = bcs['diagnosis']
class_names = list(y.unique())
#train the data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=42)
#testing accurecy
svc = SVC(kernel = 'linear',C=.1, gamma=10, probability = True)
svc.fit(X,y)
y_pred = svc.fit(X_train, y_train).predict(X_test)
t = pd.DataFrame(svc.predict_proba(X_test))
svc.score(X_train,y_train), svc.score(X_test, y_test)
#confusion matrix
mtrx = confusion_matrix(y_test,y_pred)
np.set_printoptions(precision = 2)
plt.figure()
plot_confusion_matrix(mtrx,classes=class_names,title='Confusion matrix, without normalization')
plt.figure()
plot_confusion_matrix(mtrx, classes=class_names, normalize = True, title='Normalized confusion matrix')
plt.show()
| dante0007/Breast-cancer-prediction-using-ML-algorithms | svm.py | svm.py | py | 3,487 | python | en | code | 0 | github-code | 13 |
11759555806 | from ontology_changes import (
Commit,
CreateProperty,
DeleteClass,
DeleteProperty,
RenameProperty,
SubsumeProperty,
)
from ontology_changes.create_class import CreateClass
from rack.namespaces.rack_ontology import (
AGENTS,
ANALYSIS,
FILE,
PROCESS,
PROV_S,
TESTING,
)
commit = Commit(
number="40955e24b4e38d45df2ffd0ad8aa47a827a4c72f",
changes=[
# AGENTS.sadl
CreateProperty(
name_space=AGENTS,
class_id="TOOL",
property_id="toolInstallationConfiguration",
),
# ANALYSIS.sadl
DeleteProperty(name_space=ANALYSIS, property_id="result"),
DeleteProperty(name_space=ANALYSIS, property_id="metric"),
DeleteProperty(name_space=ANALYSIS, property_id="producedBy"),
DeleteClass(name_space=ANALYSIS, class_id="ANALYSIS_RESULT"),
RenameProperty(
from_name_space=ANALYSIS,
from_class="ANALYSIS",
from_name="performedBy",
to_name_space=ANALYSIS,
to_class="ANALYSIS",
to_name="runBy",
),
CreateProperty(
name_space=ANALYSIS,
class_id="ANALYSIS",
property_id="analyzedWith",
),
CreateProperty(
name_space=ANALYSIS,
class_id="ANALYSIS",
property_id="analysisInput",
),
CreateProperty(
name_space=ANALYSIS,
class_id="ANALYSIS",
property_id="analysisConfiguration",
),
DeleteClass(name_space=ANALYSIS, class_id="ANALYSIS_ANNOTATION_TYPE"),
DeleteClass(name_space=ANALYSIS, class_id="PRECONDITION"),
DeleteClass(name_space=ANALYSIS, class_id="POSTCONDITION"),
DeleteClass(name_space=ANALYSIS, class_id="INVARIANT"),
DeleteClass(name_space=ANALYSIS, class_id="ANALYSIS_ANNOTATION"),
DeleteProperty(name_space=ANALYSIS, property_id="fromReport"),
DeleteProperty(name_space=ANALYSIS, property_id="annotationType"),
# FILE.sadl
RenameProperty(
from_name_space=FILE,
from_class="FILE",
from_name="createBy",
to_name_space=FILE,
to_class="FILE",
to_name="createdBy",
),
# PROCESS.sadl
CreateClass(name_space=PROCESS, class_id="PROPERTY"),
CreateProperty(
name_space=PROCESS, class_id="PROPERTY", property_id="partiallySupports"
),
CreateProperty(name_space=PROCESS, class_id="PROPERTY", property_id="scopeOf"),
CreateProperty(
name_space=PROCESS, class_id="PROPERTY", property_id="mitigates"
),
# PROV-S.sadl
CreateProperty(name_space=PROV_S, class_id="ACTIVITY", property_id="goal"),
# TESTING.sadl
SubsumeProperty(
from_name_space=TESTING,
from_class="TEST",
from_name="producedBy",
to_name_space=PROV_S,
to_class="ENTITY",
to_name="wasGeneratedBy",
),
],
)
| Michielyn/RACK | migration/rack/commits/commit40955e24b4e38d45df2ffd0ad8aa47a827a4c72f.py | commit40955e24b4e38d45df2ffd0ad8aa47a827a4c72f.py | py | 3,085 | python | en | code | null | github-code | 13 |
31320076224 | # noinspection PyUnresolvedReferences
import OPi.GPIO as GPIO # this was installed by sudo, so
from time import sleep # this lets us have a time delay
GPIO.setboard(GPIO.PCPCPLUS) # ZERO
GPIO.setmode(GPIO.BOARD)
clk = 18
dt = 16
R_PIN = 33
G_PIN = 35
B_PIN = 37
GPIO.setup(R_PIN, GPIO.OUT)
GPIO.setup(G_PIN, GPIO.OUT)
GPIO.setup(B_PIN, GPIO.OUT)
GPIO.setup(clk, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.setup(dt, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
counter = 0
clkLastState = GPIO.input(clk)
currentLed = R_PIN
GPIO.output(currentLed, 1)
try:
while True:
clkState = GPIO.input(clk)
dtState = GPIO.input(dt)
if clkState != clkLastState:
if dtState != clkState:
counter += 1
else:
counter -= 1
print(counter)
if counter % 3 == 0:
# Turn on red LED
GPIO.output(currentLed, 0)
currentLed = R_PIN
GPIO.output(currentLed, 1)
elif counter % 3 == 1:
# Turn on green LED
GPIO.output(currentLed, 0)
currentLed = G_PIN
GPIO.output(currentLed, 1)
elif counter % 3 == 2:
# Turn on blue LED
GPIO.output(currentLed, 0)
currentLed = B_PIN
GPIO.output(currentLed, 1)
clkLastState = clkState
sleep(0.001) # If we sleep too long we skip steps when turning fast
finally:
GPIO.cleanup()
| ClaasM/OrangePiOneExperiments | rotary_encoder_rgb.py | rotary_encoder_rgb.py | py | 1,524 | python | en | code | 0 | github-code | 13 |
15747703886 | class MyClass:
@staticmethod ## static method can be called from an instance or a class
def stat_meth():
print("Look no self was passed")
a = 10 # class variable shared by all instances
def fn(self): # self is representaion of object inside class
print("Hello");
print(MyClass.a)
## print(MyClass.fn()) # will throw error as no object of MyClass is
#specified for self. Rather use MyClass obj to call this method like below
x = MyClass()
x.fn() ## x.fn is a method object
x.stat_meth()
y = MyClass()
y.stat_meth()
# Each value is an object, and therefore has a class (also called its type,
## obtained by type(x)). It is stored as "object.__class__"
print('x.__class__ = ' + str(x.__class__)) #Same is obtained by str(type(x)) #OUTPUT: <class '__main__.MyClass'>
print('type(MyClass) = ' + str(type(MyClass))) #OUTPUT: <class 'type'>
print('value of x.a ' + str(x.a))
print('value of y.a ' + str(y.a))
x.a=100
print('value of x.a after update ' + str(x.a))
print('value of y.a after update ' + str(y.a)) ## only value of x.a is changed
MyClass.fn(MyClass) ## MyClass.fn is a function object
print(MyClass.__doc__)
print('type(MyClass.fn): ',type(MyClass.fn))
objMyClass = MyClass()
print('type(objMyClass.fn): ',type(objMyClass.fn))
## A peculiar thing about methods (in Python) is that the object itself is
# passed on as the first argument to the corresponding function.
## we can write anything "xyz" in place of "self", to represent object inside classs
class ComplexNumber:
def __init__(self,x=0,y=0):
self.x = x
self.y = y
def displayData(self):
print('{0} + {1}j is the complex number'.format(self.x, self.y))
obj = ComplexNumber()
obj.displayData()
obj1 = ComplexNumber(2, 5)
obj1.displayData()
obj.attr = 100 # attributes of an object can be created on the fly.
# But this attribute is only for obj, not for obj1
print(obj.x, obj.y,obj.attr)
# attributes (variable or method) of an object can be deleted
del obj1.x # deletes attribute x for obj1 only
# print(obj1.x) # this statement will throw error
print(obj.x, obj.y)
# can delete object also
del obj1
# obj1.displayData() # throws error
#### Mutable objects are better not to be used as shared variable
class Dog:
tricks = []
z = 30
def __init__(self, name):
self.name = name
def add_tricks(self,tricks, z):
self.tricks.append(tricks)
self.z = self.z + z
d = Dog('Fido')
print("-- --d (before calling init() over it)",d.name, id(d))
ret = d.__init__("TESTING") ## __init__() can be called separately too.
print("-----d (after calling init over it)",d.name,id(d))
print("---------------- ret ", type(ret)) ## init() returns None (always)
d.add_tricks('roll over', 100)
e = Dog('Buddy')
e.add_tricks('play dead', 200)
print('Value of mutable list tricks for dog d ' + str(d.tricks))
print('Value of immutable z for dog d ' + str(d.z))
print('Value of immutable z for dog e ' + str(e.z))
# In the above example, mutable attribute tricks is unexpectedly shared by all Dog instances
# to avoid this,
"""
class Dog:
z = 30
def __init__(self, name):
self.name = name
self.tricks = [] ### initailise tricks inside constructor
"""
# Class can be defined in a shorter way using namedtuple
from collections import namedtuple
car = namedtuple("CarClass", "Color1 Mileage2")
carObj = car("Red", "100") #Notice that object is created using "car" name. But on printing carObj, CarClass is considered as class
print(carObj) #prints CarClass(Color1='Red', Mileage2='100')
print("carObj Color: ", carObj.Color1)
print("carObj Mileage: ", carObj.Mileage2)
class A:
def fn(self,b=0):
self.b=b
print("I am in A: ", b)
a = A()
a.fn(100)
| sharmas4/Python_Programs | ClassExample01.py | ClassExample01.py | py | 3,772 | python | en | code | 0 | github-code | 13 |
11628905005 | #!/usr/bin/env python
import pytest
from olympus import Observations, ParameterVector
from olympus.planners import Cma
# use parametrize to test multiple configurations of the planner
@pytest.mark.parametrize("stddev", [0.5, 0.4, 0.6])
def test_planner_ask_tell(two_param_space, stddev):
planner = Cma(stddev=stddev)
planner.set_param_space(param_space=two_param_space)
param = planner.ask()
value = ParameterVector().from_dict({"objective": 0.0})
obs = Observations()
obs.add_observation(param, value)
planner.tell(observations=obs)
| aspuru-guzik-group/olympus | tests/test_planners/test_planner_cma.py | test_planner_cma.py | py | 566 | python | en | code | 70 | github-code | 13 |
16408959877 | import pickle
import pandas as pd
def predict(p1, p2, p3, p4, p5, p6, p7, p8):
f = open('Recom_Pre/pkl/pred_group.pkl', 'rb')
deci_tree = pickle.load(f)
f.close()
dict = {'1':[p2], '2':[p3], '3':[p4], '4':[p5], '5':[p6], '6':[p7], '7':[p8]}
X_test = pd.DataFrame(dict)
y_pred = deci_tree.predict(X_test)
#print('ทำนายผลการสอบกลางาค คุุณจะอยู่ในเกณฑ์', y_pred)
return y_pred[0]
| glorylife/RecommendMining | Recom_Pre/Prediction/load_model.py | load_model.py | py | 502 | python | th | code | 0 | github-code | 13 |
38689358119 | import re
import numpy as np
import pandas as pd
#this function is used to tell if two features are aligned
#for example: key1 = 'humidity', key2 = 'wind',
#return False because this combination is not in the align rule
#and they should be two separate columns in the fusedata
#if key1 = 'humidity', key2 = 'rh'
#return True to indicate that 'humidity' and 'rh' are the same feature
#and their value should be merged together as one column in fusedata
def isaligned(key1, key2):
#alignment rule
rule = [['temp', 'tempreture'], ['humidity','rh']]
if key1 == key2: return True
for item in rule:
if key1 in item and key2 in item: return True
return False
#return True if the input feature is in the feature sets of the input dataset
#for example: dataset has a feature set ['humidity','temp','time']
#if input feature = 'humidity', return True
#if input feature = 'pressure', return False
def featInData(feat, dataset):
#whether the input feature or
#it has aligned feature in the input dataset
for itm in dataset:
if isaligned(feat, itm): return True, itm
return False, ''
#fuse two weather dataset into one
#the confidence of correctness of website 1 and website2
def weatherfuse(data1, data2, confidence1, confidence2):
fusedata = {}
#add all features in data1 into fusedata
for feat1 in data1:
fusedata[feat1] = []
#add the features that not in data1 and
#have no aligned feature in data1
for feat2 in data2:
if not featInData(feat2, data1):
fusedata[feat2] = []
#fuse data1 and data2 using (data1*conf1+data2*conf2)/(conf1+conf2) if their time stamps are the same
#if not add data1 or data2 to fusedata separately with their relative time stamp
#example: data1=[1:1, 2:2, 3:3] with confidence 0.8, data2=[1:2, 2:4, 5:5] with confidence 0.6,
#fusedata = [1:(1*0.8+2*0.6)/(0.8+0.6), 2:(2*0.8+4*0.6)/(0.8+0.6), 3:3, 5:5]
# = [1:1.43, 2:2.86, 3:3, 5:5]
i = 0
j = 0
#this loop is used to fuse data1 and data2 in a time sequence,
#for example: data1['time'] = [10:00,10:30], data2['time'] = [10:30,12:00]
#in the first loop, i < len(data1)(2) -> time1 = 10*60+0
# j < len(data2)(2) -> time2 = 10*60+30
#time1 < time2, so add data1[0] into fusedata and i = 1
#in the second loop, i < len(data1)(2) -> time1 = 10*60+30
# j < len(data2)(2) -> time2 = 10*60+30
#time1 = time2, so fuse data1[1] with data2[0] considering confidence, and i = 2, j = 1
#in the third loop, i = len(data1)(2) -> time1 = 10000000
# j < len(data2)(2) -> time2 = 12*60+0
#time1 > time2, so add data2[1] into fusedata and j = 2
#now i = 2 and j =2 the loop condition (i != len(data1) or j != len(data2)) breaks and the loop stops
#the fusedata is [data1[0], fuse(data1[1],data2[0]), data2[1]]
while i != len(data1) or j != len(data2):
time1 = 10000000
time2 = 10000000
if i < len(data1):
time1 = int(data1['time'][i].split(':')[0])*60 + int(data1['time'][i].split(':')[1])
if j < len(data2):
time2 = int(data2['time'][j].split(':')[0])*60 + int(data2['time'][j].split(':')[1])
onedata = []
#data1 and data2 have the same time stamp, fuse them with (data1*conf1+data2*conf2)/(conf1+conf2)
if time1 == time2:
for feat in fusedata:
#no need to fuse time because they are the same
if feat == 'time': fusedata[feat].append(data1[feat][i]); continue
#if this feature or its aligned feature in data2
flag, alignfeat = featInData(feat, data2)
#if this feature also in data1
#if this feature exist in both data1 and data2, fuse them
if feat in data1 and flag: fusedata[feat].append(
round((float(data1[feat][i])*confidence1 + float(data2[alignfeat][j])*confidence2) / (confidence1 + confidence2),2))
#this feature only in data1, just add it, nothing to fuse
elif feat in data1: fusedata[feat].append(data1[feat][i])
#this feature only in data2, just add it, nothing to fuse
else: fusedata[feat].append(data2[feat][j])
i += 1
j += 1
#add data1 whose time is smaller as data2 has no such time stamp
elif time1 < time2:
for feat in fusedata:
if feat in data1: fusedata[feat].append(data1[feat][i])
else: fusedata[feat].append(np.nan)
i += 1
#add data2 whose time is smaller as data1 has no such time stamp
else:
for feat in fusedata:
#find the aligned feature in fusedata
flag, alignfeat = featInData(feat, data2)
if flag: fusedata[feat].append(data2[alignfeat][j])
else: fusedata[feat].append(np.nan)
j += 1
return fusedata
if __name__ == '__main__':
data1 = pd.read_csv(r'C:\Users\mypc\Desktop\mds\Data for both\t_2intervaltesta.csv')
data2 = pd.read_csv(r'C:\Users\mypc\Desktop\mds\Data for both\t_2intervaltestb.csv')
#because I use website 1 a lot, thus I have confident to it.
#though I seldom use data2, but it is said that oldersweather
#is one of the best weather app, so I trust it only a bit less.
fusedata = weatherfuse(data1, data2, 0.8, 0.6)
tmp = pd.DataFrame(fusedata)
tmp.to_csv(r'C:\Users\mypc\Desktop\mds\Data for both\myfuse.csv', index=False)
print('End of Test!') | muzzi30/mdsassignment | myfusecode.py | myfusecode.py | py | 5,201 | python | en | code | 0 | github-code | 13 |
27283155662 | import numpy as np
import csv
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestRegressor
import sys
np.set_printoptions(threshold=sys.maxsize)
with open("data/Grand-slams-men-2013.csv") as f:
teams_comb = list(csv.reader(f))
FSP1 = np.array([])
ACE1 = np.array([])
DBF1 = np.array([])
WNR1 = np.array([])
UFE1 = np.array([])
BPC1 = np.array([])
NPA1 = np.array([])
for rows in teams_comb:
if rows != teams_comb[0]:
FSP1 = np.append(FSP1, int(rows[6]))
ACE1 = np.append(ACE1, int(rows[10]))
DBF1 = np.append(DBF1, int(rows[11]))
WNR1 = np.append(WNR1, int(rows[12]))
UFE1 = np.append(UFE1, int(rows[13]))
BPC1 = np.append(BPC1, int(rows[14]))
NPA1 = np.append(NPA1, int(rows[16]))
X = np.vstack((FSP1, ACE1, DBF1, WNR1, UFE1, BPC1, NPA1)).T
# print(X)
ST1_1 = np.array([])
ST2_1 = np.array([])
ST3_1 = np.array([])
ST4_1 = np.array([])
ST5_1 = np.array([])
for rows in teams_comb:
if rows != teams_comb[0]:
ST1_1 = np.append(ST1_1, int(rows[19]))
ST2_1 = np.append(ST2_1, int(rows[20]))
ST3_1 = np.append(ST3_1, int(rows[21]))
ST4_1 = np.append(ST4_1, int(rows[22]))
ST5_1 = np.append(ST5_1, int(rows[23]))
Y = ST1_1 + ST2_1 + ST3_1 + ST4_1 + ST5_1
X_Train = X[0:200]
X_Test = X[200:240]
# print(X_Train)
# print(X_Test)
Y_Train = Y[0:200]
Y_Test = Y[200:240]
# print(Y_Test)
# print(Y_Train)
def R2(y, y_prediction):
tss = 0
rss = 0
for i in range(len(y)):
tss += (y[i] - np.mean(y)) ** 2
rss += (y[i] - y_prediction[i]) ** 2
r2 = 1 - rss / tss
return r2
R2_1 = []
R2_2 = []
R2_3 = []
for i in range(1, 151):
regression_a = RandomForestRegressor(max_features="auto", max_depth=7, n_estimators=i)
regression_b = RandomForestRegressor(max_features="sqrt", max_depth=7, n_estimators=i)
regression_c = RandomForestRegressor(max_features=4, max_depth=7, n_estimators=i)
regression_a.fit(X_Train, Y_Train)
regression_b.fit(X_Train, Y_Train)
regression_c.fit(X_Train, Y_Train)
y_pred_a = regression_a.predict(X_Test)
y_pred_b = regression_b.predict(X_Test)
y_pred_c = regression_c.predict(X_Test)
R2_1.append(R2(Y_Test, y_pred_a))
R2_2.append(R2(Y_Test, y_pred_b))
R2_3.append(R2(Y_Test, y_pred_c))
regression1 = RandomForestRegressor(max_features=4, max_depth=7, n_estimators=150)
regression2 = RandomForestRegressor(max_features=4, max_depth=1, n_estimators=150)
regression1.fit(X_Train, Y_Train)
regression2.fit(X_Train, Y_Train)
y_pred_1 = regression1.predict(X_Test)
y_pred_2 = regression2.predict(X_Test)
error1 = Y_Test - y_pred_1
error2 = Y_Test - y_pred_2
X_value = np.arange(1, 151, 1)
fig, ax = plt.subplots()
ax.plot(X_value, R2_1, 'g', label="m = p")
ax.plot(X_value, R2_2, 'b', label="m = sqrt(p)")
ax.plot(X_value, R2_3, 'r', label="m = p/2")
plt.xlabel("Number of estimators (decision trees)")
plt.ylabel("R^2 score")
legend = ax.legend(loc='lower right', shadow=True, fontsize='x-large')
plt.show()
plt.scatter(y_pred_1, error1, color='r')
plt.scatter(y_pred_2, error2, color='b')
plt.xlabel("Estimation")
plt.ylabel("Error of estimation")
plt.show()
| ontckr/475Labs | lab8.py | lab8.py | py | 3,208 | python | en | code | 0 | github-code | 13 |
26265819186 | import re
from os.path import join, basename
from glob import glob
DATA_DIR = 'tempdata'
pattern = "(yob19[5-9][0-9]\.txt)|(yob20[01][0-9]\.txt)"
alltxtfiles_names = glob(join(DATA_DIR, '*.txt'))
myfilenames = []
for fname in alltxtfiles_names:
matchobj = re.search(pattern, fname, flags=0)
if matchobj:
myfilenames.append(fname)
tally = {'M': set(), 'F': set()}
for fname in myfilenames:
babyfile = open(fname, "r")
for line in babyfile:
name, gender, babies = line.split(',')
tally[gender].add(name)
babyfile.close()
# Now, tally contains two keys, 'M' and 'F', which both point
# to a set of names
print("F:", str(len(tally['F'])).rjust(6),
"M:", str(len(tally['M'])).rjust(6))
f_to_m_ratio = round(100 * len(tally['F']) / len(tally['M']))
print("F/M baby ratio:", f_to_m_ratio) | kbenitez/compciv-2016 | exercises/0020-gender-detector/c.py | c.py | py | 839 | python | en | code | 0 | github-code | 13 |
1954997884 | # -*- coding: utf-8 -*-
"""
Created on Thu Jul 2 11:42:11 2020
Problem 76: Counting summations
It is possible to write five as a sum in exactly six different ways:
4 + 1
3 + 2
3 + 1 + 1
2 + 2 + 1
2 + 1 + 1 + 1
1 + 1 + 1 + 1 + 1
How many different ways can one hundred be written as a sum of at least two positive integers?
@author: Admin
"""
K = 100
def rek(num, n, ilosc=0):
for x in range(1, n + 1):
new_n = num - x
if new_n == 0:
# print('+1')
ilosc += 1
return ilosc
else:
ilosc = rek(new_n, x, ilosc=ilosc)
return ilosc
def solution():
count = 0
for x in range(K - 1, 0, -1):
temp = rek(K - x, x)
count += temp
print(count)
solution()
| KubiakJakub01/ProjectEuler | src/Problem76.py | Problem76.py | py | 764 | python | en | code | 0 | github-code | 13 |
40647694364 | import os
import ast
import json
import torch
import cv2
from data_preperation.utils import read_txt, read_json
from data_preperation.visualize import get_cords_for_pred, display_image, get_cords_from_yolo
from configs.getconfig import GetConfig
import numpy as np
class prediction():
'''This class is uesd to craete a prediction method'''
def __init__(self,class_path, asp_index , dcu, init_config):
"""
class map: mapping of classes
asp_index: index of aspects
dict_cord_units: units of dict cords
"""
class_lines = read_txt(class_path)
n_class = len(class_lines)
self.class_map_dict = {i: cls for i, cls in enumerate(class_lines)}
asp_id = read_json(asp_index)
self.asp_idx = {int(v):k for k,v in asp_id.items()}
self.dcu_dict=read_json(dcu)
self.block_len = n_class+5 #1 for background possible bug in the code
self.iterator_range = len(list(self.dcu_dict.values())[0])
self.boxnm2 = init_config.getboolean('DATA','use_boxnm2')
self.image_shape = ast.literal_eval(init_config['DATA']['image_shape'])
def collect_boxes(self, target, image):
img=image.squeeze()
img= img.permute(1,2,0)
imgnp = img.numpy()
tg = target.squeeze(0)
the_hold_dict={}
#convert tensor to numpy array
for tg_asp in range(tg.shape[0]): #iterate through target channels, ie; aspects
lkt = tg[tg_asp,:,:]
# print("the lkt", lkt)
lkt_list = lkt.squeeze(-1)
feelr = {i:val for i,val in enumerate([lkt_list[m*self.block_len:m*self.block_len+self.block_len]
for m in range(self.iterator_range)]) if torch.count_nonzero(val[0:-4])>0}
# the_hold_dict[tg_asp]=feelr
ifer = self.collect_ni_comparison(feelr,tg_asp)
# print("Ifer keys of detections >>>>>>", ifer)
the_hold_dict[tg_asp] = ifer
self.pred_visualize(the_hold_dict, imgnp)
def pred_visualize(self, the_hold_dict, image):
val_list = [i for i in the_hold_dict.values() if i]
for m in val_list:
img = image.copy()
for v in m:
cnf_scr = v[0].detach().numpy()
class_obj= v[-1][0].detach().numpy()
class_name = self.class_map_dict[int(class_obj)]
class_name = class_name+" : "+str(cnf_scr)[0:4]
img = cv2.rectangle(img, v[-1][1],v[-1][2], (0,255,0),3)
img = cv2.putText(
img,
text=class_name,
org=v[-1][1],
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=1,
color=(0, 0, 255),
thickness=3
)
display_image(img,self.image_shape, title="on")
def collect_ni_comparison(self, feelr,tg_asp):
new_ls =[]
the_asp = self.asp_idx[tg_asp]
for idx, pnts in feelr.items():
asp_box = self.dcu_dict[the_asp][idx]
if self.boxnm2:
pred_box, obj_class, obj_cnf =self.box_diff_normalize2(pnts, asp_box)
else:
10/0
obj_cnf=1 ####TOC
obj_class=1
pred_box=1
yolofrm = [obj_class]
yolofrm.extend(pred_box)
# new_ls.append([obj_cnf,get_cords_for_pred(yolofrm, self.image_shape)])
new_ls.append([obj_cnf, get_cords_from_yolo(1,1,yolofrm)])
return new_ls
def box_diff_normalize2(self, pred_an, anc_bx):
new_pred= []
i1 = pred_an[-4]
i2 = pred_an[-3]
i3 = pred_an[-2]
i4 = pred_an[-1]
####
new_pred.append(int(anc_bx[0] - i1 * self.image_shape[1]))
new_pred.append(int(anc_bx[1] - i2 * self.image_shape[0]))
# delx =
new_pred.append(int(i3 * anc_bx[2]))
new_pred.append(int(i4 * anc_bx[3]))
objscore = pred_an[-5]
cls_obj = self.block_len-5 ###################################TOC
class_t = torch.argmax(pred_an[0:cls_obj])
# if backgroun:
# print("dcct would be different")
return new_pred, class_t, objscore*torch.max(pred_an[0:cls_obj])
def box_diff_normalize(self, box, gt_bx):
box_dif = list(np.array(box.copy()) - np.array(gt_bx.copy()))
nw_list = []
for i,val in enumerate(box_dif):
if i in [0,2]:
nw = val/self.image_shape[1]
else:
nw = val/self.image_shape[0]
nw_list.append(nw)
return nw_list
def analyse_preds(self,traindata ):
for i, (images, labels) in enumerate(traindata):
self.collect_boxes(labels, images)
| VipulAlgoSoul/odhybrid | prediction/prediction.py | prediction.py | py | 4,873 | python | en | code | 0 | github-code | 13 |
6993440616 | import torch
from torch import nn
class RS(nn.Module):
def __init__(self, reduced_dim = True):
super(RS, self).__init__()
if reduced_dim:
item_size = 1000
user_size = 1000
else:
item_size = 9560
user_size = 1157633
if reduced_dim:
print("You use the model with reduced dimensions")
self.item_embedding = nn.Sequential(
nn.Embedding(item_size, 200),
nn.Linear(200, 200),
nn.ReLU(),
nn.Linear(200, 100),
nn.ReLU(),
nn.Linear(100, 50),
nn.ReLU()
)
self.user_embedding = nn.Sequential(
nn.Embedding(user_size, 200),
nn.Linear(200, 200),
nn.ReLU(),
nn.Linear(200, 100),
nn.ReLU(),
nn.Linear(100, 50),
nn.ReLU()
)
self.gender_embedding = nn.Sequential(
nn.Embedding(3, 10),
nn.Linear(10, 200),
nn.ReLU(),
nn.Linear(200, 100),
nn.ReLU(),
nn.Linear(100, 50),
nn.ReLU()
)
self.category_embedding = nn.Sequential(
nn.Embedding(10, 10),
nn.Linear(10, 200),
nn.ReLU(),
nn.Linear(200, 100),
nn.ReLU(),
nn.Linear(100, 50),
nn.ReLU()
)
self.output_fc = nn.Sequential(
nn.Linear(200, 200),
nn.ReLU(),
nn.Linear(200, 100),
nn.ReLU(),
nn.Flatten(),
nn.Linear(100, 6),
)
def forward(self, gender, item, user, category):
x_item = self.item_embedding(item)
x_user = self.user_embedding(user)
x_gender = self.gender_embedding(gender)
x_category = self.category_embedding(category)
#print("---------------------size---------------------")
#print("x_item", x_item.size(), "x_user", x_user.size(),"gender", x_gender.size(), "category", x_category.size())
x = torch.cat((x_item, x_user, x_user, x_category), dim = -1)
#print("Total:", x.size())
x = self.output_fc(x)
#print("Output:", x.size())
#print("---------------------size---------------------")
return x
| shengy3/RecomSystem | RecomSysModel.py | RecomSysModel.py | py | 2,198 | python | en | code | 0 | github-code | 13 |
12860537832 | from rest_framework.viewsets import GenericViewSet
from rest_framework.response import Response
from rest_framework import mixins, permissions, decorators, generics, status
from django_filters import rest_framework as filters
from src.core.serializers import CarSerializer
from src.suppliers.models import SupplierModel
from src.suppliers.serializers import SupplierSerializer, SupplierCarSerializer, DiscountSerializer
from src.suppliers.services import SuppliersService
from src.suppliers.filters import SupplierFilter
class SuppliersViewSet(
GenericViewSet,
mixins.ListModelMixin,
mixins.CreateModelMixin,
mixins.RetrieveModelMixin,
mixins.DestroyModelMixin
):
serializer_class = SupplierSerializer
queryset = SupplierModel.objects.all()
permission_classes = (permissions.AllowAny, )
service = SuppliersService()
filter_backends = (filters.DjangoFilterBackend,)
filterset_class = SupplierFilter
@decorators.action(methods=('GET', ), detail=True)
def get_cars(self, request, pk):
cars = self.service.get_cars(supplier=self.service.get_supplier(pk))
return Response(CarSerializer(cars, many=True).data)
@decorators.action(methods=('GET', ), detail=True)
def get_statistics(self, request, pk):
supplier = self.service.get_supplier(supplier_id=pk)
return Response(self.service.get_statistic(supplier), status=status.HTTP_200_OK)
class SupplierCarViewSet(GenericViewSet, mixins.CreateModelMixin):
serializer_class = SupplierCarSerializer
permission_classes = (permissions.AllowAny, )
class SuppliersDiscountView(generics.CreateAPIView):
serializer_class = DiscountSerializer
permission_classes = (permissions.AllowAny, )
| ChainHokesss/whitesnake_project | CarshowroomProject/src/suppliers/views.py | views.py | py | 1,736 | python | en | code | 0 | github-code | 13 |
1223775377 | # /bin/etc/env Python
# -*- coding: utf-8 -*-
import sys
import pygame
from bullet import Bullet
def check_keydown_events(event, ai_settings, screen, ship, bullets):
"""响应按键"""
if event.key == pygame.K_RIGHT:
ship.moving_right = True
elif event.key == pygame.K_LEFT:
ship.moving_left = True
elif event.key == pygame.K_SPACE: # 创建一颗子弹,加入编组bullet中
fire_bullet(ai_settings, screen, ship, bullets)
def check_keyup_events(event, ship):
if event.key == pygame.K_RIGHT:
ship.moving_right = False
elif event.key == pygame.K_LEFT:
ship.moving_left = False
def check_events(ai_settings, screen, ship, bullets):
"""响应鼠标事件"""
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == pygame.KEYDOWN:
check_keydown_events(event, ai_settings, screen, ship, bullets)
elif event.type == pygame.KEYUP:
check_keyup_events(event, ship)
def fire_bullet(ai_settings, screen, ship, bullets):
if len(bullets) < ai_settings.bullets_allowed:
new_bullet = Bullet(ai_settings, screen, ship)
bullets.add(new_bullet)
def update_screen(ai_settings, screen, ship, bullets):
"""每次循环重新绘制屏幕"""
screen.fill(ai_settings.bg_color)
# 在飞船和外星人后绘重绘所有子弹
for bullet in bullets.sprites():
bullet.draw_bullet()
ship.blitme()
# 让最新的屏幕可见。
pygame.display.flip() # 绘制一个空屏幕,并擦去旧屏幕。可以理解为刷新屏幕。
def update_bullets(bullets):
bullets.update()
for bullet in bullets.copy():
if bullet.rect.bottom <= 0:
bullets.remove(bullet)
| AidenSmith09/alien_invasion | game_fuctions.py | game_fuctions.py | py | 1,800 | python | en | code | 0 | github-code | 13 |
27406868767 | import sys
# Hack to work around PySide being imported from nowhere:
import qtpy
from xicam.plugins import GUIPlugin, GUILayout
from xicam.plugins import manager as pluginmanager
from xicam.plugins import manager as pluginmanager
from xicam.core import threads
# Note: qtconsole often fails to guess correctly which qt flavor to use. One of the below snippets will guide it.
# Overload for Py2App
# def new_load_qt(api_options):
# from qtpy import QtCore, QtWidgets, QtSvg
#
# return QtCore, QtWidgets, QtGuiCompat, 'pyqt5'
# from qtconsole import qt_loaders
# qt_loaders.load_qt(['pyqt5'])
if 'PySide.QtCore' in sys.modules and qtpy.API != 'pyside': del sys.modules['PySide.QtCore']
from qtconsole.rich_jupyter_widget import RichJupyterWidget
from qtconsole.inprocess import QtInProcessKernelManager
class IPythonPlugin(GUIPlugin):
name = 'IPython'
def __init__(self):
# # Enforce global style within the console
# with open('xicam/gui/style.stylesheet', 'r') as f:
# style = f.read()
# style = (qdarkstyle.load_stylesheet() + style)
# Setup the kernel
self.kernel_manager = QtInProcessKernelManager()
self.kernel_manager.start_kernel()
kernel = self.kernel_manager.kernel
kernel.gui = 'qt'
# Push Xi-cam variables into the kernel
kernel.shell.push({plugin.name: plugin for plugin in
pluginmanager.get_plugins_of_type("GUIPlugin") +
pluginmanager.get_plugins_of_type("EZPlugin")})
# Observe plugin changes
pluginmanager.attach(self.pluginsChanged)
# Continue kernel setuppluginmanager.getPluginsOfCategory("GUIPlugin")
self.kernel_client = self.kernel_manager.client()
threads.invoke_in_main_thread(self.kernel_client.start_channels)
# Setup console widget
def stop():
self.kernel_client.stop_channels()
self.kernel_manager.shutdown_kernel()
control = RichJupyterWidget()
control.kernel_manager = self.kernel_manager
threads.invoke_in_main_thread(setattr, control, "kernel_client", self.kernel_client)
control.exit_requested.connect(stop)
# control.style_sheet = style
control.syntax_style = u'monokai'
control.set_default_style(colors='Linux')
# Setup layout
self.stages = {'Terminal': GUILayout(control)}
# Save for later
self.kernel = kernel
super(IPythonPlugin, self).__init__()
def pluginsChanged(self):
self.kernel.shell.push({plugin.name: plugin for plugin in
pluginmanager.get_plugins_of_type("GUIPlugin") +
pluginmanager.get_plugins_of_type("EZPlugin")})
| Xi-CAM/Xi-cam.plugins.IPython | xicam/ipython/__init__.py | __init__.py | py | 2,797 | python | en | code | 0 | github-code | 13 |
73009272018 | from microdot_asyncio import Microdot, Response, send_file
from microdot_utemplate import render_template
from microdot_asyncio_websocket import with_websocket
from ldr_photoresistor_module import LDR
import time
# Initialize MicroDot
app = Microdot()
Response.default_content_type = 'text/html'
# LDR module
ldr = LDR(27)
# root route
@app.route('/')
async def index(request):
return render_template('index.html')
@app.route('/ws')
@with_websocket
async def read_sensor(request, ws):
while True:
# data = await ws.receive()
time.sleep(.1)
await ws.send(str(ldr.get_light_percentage()))
# Static CSS/JSS
@app.route("/static/<path:path>")
def static(request, path):
if ".." in path:
# directory traversal is not allowed
return "Not found", 404
return send_file("static/" + path)
# shutdown
@app.get('/shutdown')
def shutdown(request):
request.app.shutdown()
return 'The server is shutting down...'
if __name__ == "__main__":
try:
app.run()
except KeyboardInterrupt:
pass
| donskytech/micropython-raspberry-pi-pico | websocket_using_microdot/main.py | main.py | py | 1,068 | python | en | code | 13 | github-code | 13 |
30970354891 | x = 14 # x variable equal 14 number
y = 23 # y variable equal 23 number
print('x = %d y = %d'%(x,y)) #This line print x and y values on screen
temp = x # X value writing in temporary location
x = y # y value writing on X old value in this way now x value is 23
y = temp # temp value writing on y old value in this way now y value is 14
print('x = %d y = %d'%(x,y)) #This line print x and y new values on screen
| VefikFiratAkman/Introduction-to-Computer-Science | Introduction to Computer Science_FirstTry(Fail)/CSE101_HW02/151044031_Vefik_Firat_Akman.py | 151044031_Vefik_Firat_Akman.py | py | 416 | python | en | code | 0 | github-code | 13 |
31406761579 | # Originally made by Katherine Crowson (https://github.com/crowsonkb, https://twitter.com/RiversHaveWings)
# The original BigGAN+CLIP method was by https://twitter.com/advadnoun
# Adapted from https://github.com/nerdyrodent/VQGAN-CLIP/blob/main/generate.py
# Various functions and classes
import argparse
import math
from tqdm import tqdm
import sys
# pip install taming-transformers doesn't work with Gumbel, but does not yet work with coco etc
# appending the path does work with Gumbel, but gives ModuleNotFoundError: No module named 'transformers' for coco etc
sys.path.append('model/taming-transformers')
from omegaconf import OmegaConf
from taming.models import cond_transformer, vqgan
#import taming.modules
import torch
from torch import nn, optim
from torch.nn import functional as F
from torchvision import transforms
from torchvision.transforms import functional as TF
torch.backends.cudnn.benchmark = False # NR: True is a bit faster, but can lead to OOM. False is more deterministic.
#torch.use_deterministic_algorithms(True) # NR: grid_sampler_2d_backward_cuda does not have a deterministic implementation
from torch_optimizer import DiffGrad, AdamP, RAdam
from model.CLIP import clip
import kornia.augmentation as K
import numpy as np
import imageio
from PIL import ImageFile, Image, PngImagePlugin, ImageChops
ImageFile.LOAD_TRUNCATED_IMAGES = True
# Supress warnings
import warnings
warnings.filterwarnings('ignore')
def sinc(x):
return torch.where(x != 0, torch.sin(math.pi * x) / (math.pi * x), x.new_ones([]))
def lanczos(x, a):
cond = torch.logical_and(-a < x, x < a)
out = torch.where(cond, sinc(x) * sinc(x/a), x.new_zeros([]))
return out / out.sum()
def ramp(ratio, width):
n = math.ceil(width / ratio + 1)
out = torch.empty([n])
cur = 0
for i in range(out.shape[0]):
out[i] = cur
cur += ratio
return torch.cat([-out[1:].flip([0]), out])[1:-1]
# For zoom video
def zoom_at(img, x, y, zoom):
w, h = img.size
zoom2 = zoom * 2
img = img.crop((x - w / zoom2, y - h / zoom2,
x + w / zoom2, y + h / zoom2))
return img.resize((w, h), Image.LANCZOS)
# NR: Testing with different intital images
def random_noise_image(w,h):
random_image = Image.fromarray(np.random.randint(0,255,(w,h,3),dtype=np.dtype('uint8')))
return random_image
# create initial gradient image
def gradient_2d(start, stop, width, height, is_horizontal):
if is_horizontal:
return np.tile(np.linspace(start, stop, width), (height, 1))
else:
return np.tile(np.linspace(start, stop, height), (width, 1)).T
def gradient_3d(width, height, start_list, stop_list, is_horizontal_list):
result = np.zeros((height, width, len(start_list)), dtype=float)
for i, (start, stop, is_horizontal) in enumerate(zip(start_list, stop_list, is_horizontal_list)):
result[:, :, i] = gradient_2d(start, stop, width, height, is_horizontal)
return result
def random_gradient_image(w,h):
array = gradient_3d(w, h, (0, 0, np.random.randint(0,255)), (np.random.randint(1,255), np.random.randint(2,255), np.random.randint(3,128)), (True, False, False))
random_image = Image.fromarray(np.uint8(array))
return random_image
# Used in older MakeCutouts
def resample(input, size, align_corners=True):
n, c, h, w = input.shape
dh, dw = size
input = input.view([n * c, 1, h, w])
if dh < h:
kernel_h = lanczos(ramp(dh / h, 2), 2).to(input.device, input.dtype)
pad_h = (kernel_h.shape[0] - 1) // 2
input = F.pad(input, (0, 0, pad_h, pad_h), 'reflect')
input = F.conv2d(input, kernel_h[None, None, :, None])
if dw < w:
kernel_w = lanczos(ramp(dw / w, 2), 2).to(input.device, input.dtype)
pad_w = (kernel_w.shape[0] - 1) // 2
input = F.pad(input, (pad_w, pad_w, 0, 0), 'reflect')
input = F.conv2d(input, kernel_w[None, None, None, :])
input = input.view([n, c, h, w])
return F.interpolate(input, size, mode='bicubic', align_corners=align_corners)
class ReplaceGrad(torch.autograd.Function):
@staticmethod
def forward(ctx, x_forward, x_backward):
ctx.shape = x_backward.shape
return x_forward
@staticmethod
def backward(ctx, grad_in):
return None, grad_in.sum_to_size(ctx.shape)
class ClampWithGrad(torch.autograd.Function):
@staticmethod
def forward(ctx, input, min, max):
ctx.min = min
ctx.max = max
ctx.save_for_backward(input)
return input.clamp(min, max)
@staticmethod
def backward(ctx, grad_in):
input, = ctx.saved_tensors
return grad_in * (grad_in * (input - input.clamp(ctx.min, ctx.max)) >= 0), None, None
def vector_quantize(x, codebook):
d = x.pow(2).sum(dim=-1, keepdim=True) + codebook.pow(2).sum(dim=1) - 2 * x @ codebook.T
indices = d.argmin(-1)
x_q = F.one_hot(indices, codebook.shape[0]).to(d.dtype) @ codebook
return ReplaceGrad.apply(x_q, x)
class Prompt(nn.Module):
def __init__(self, embed, weight=1., stop=float('-inf')):
super().__init__()
self.register_buffer('embed', embed)
self.register_buffer('weight', torch.as_tensor(weight))
self.register_buffer('stop', torch.as_tensor(stop))
def forward(self, input):
input_normed = F.normalize(input.unsqueeze(1), dim=2)
embed_normed = F.normalize(self.embed.unsqueeze(0), dim=2)
dists = input_normed.sub(embed_normed).norm(dim=2).div(2).arcsin().pow(2).mul(2)
dists = dists * self.weight.sign()
return self.weight.abs() * ReplaceGrad.apply(dists, torch.maximum(dists, self.stop)).mean()
#NR: Split prompts and weights
def split_prompt(prompt):
vals = prompt.rsplit(':', 2)
vals = vals + ['', '1', '-inf'][len(vals):]
return vals[0], float(vals[1]), float(vals[2])
class MakeCutouts(nn.Module):
def __init__(self, args, cut_size, cutn, cut_pow=1.):
super().__init__()
self.cut_size = cut_size
self.cutn = cutn
self.cut_pow = cut_pow # not used with pooling
# Pick your own augments & their order
augment_list = []
for item in args.augments[0]:
if item == 'Ji':
augment_list.append(K.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0.1, p=0.7))
elif item == 'Sh':
augment_list.append(K.RandomSharpness(sharpness=0.3, p=0.5))
elif item == 'Gn':
augment_list.append(K.RandomGaussianNoise(mean=0.0, std=1., p=0.5))
elif item == 'Pe':
augment_list.append(K.RandomPerspective(distortion_scale=0.7, p=0.7))
elif item == 'Ro':
augment_list.append(K.RandomRotation(degrees=15, p=0.7))
elif item == 'Af':
augment_list.append(K.RandomAffine(degrees=15, translate=0.1, shear=5, p=0.7, padding_mode='zeros', keepdim=True)) # border, reflection, zeros
elif item == 'Et':
augment_list.append(K.RandomElasticTransform(p=0.7))
elif item == 'Ts':
augment_list.append(K.RandomThinPlateSpline(scale=0.8, same_on_batch=True, p=0.7))
elif item == 'Cr':
augment_list.append(K.RandomCrop(size=(self.cut_size,self.cut_size), pad_if_needed=True, padding_mode='reflect', p=0.5))
elif item == 'Er':
augment_list.append(K.RandomErasing(scale=(.1, .4), ratio=(.3, 1/.3), same_on_batch=True, p=0.7))
elif item == 'Re':
augment_list.append(K.RandomResizedCrop(size=(self.cut_size,self.cut_size), scale=(0.1,1), ratio=(0.75,1.333), cropping_mode='resample', p=0.5))
self.augs = nn.Sequential(*augment_list)
self.noise_fac = 0.1
# self.noise_fac = False
# Uncomment if you like seeing the list ;)
# print(augment_list)
# Pooling
self.av_pool = nn.AdaptiveAvgPool2d((self.cut_size, self.cut_size))
self.max_pool = nn.AdaptiveMaxPool2d((self.cut_size, self.cut_size))
def forward(self, input):
cutouts = []
for _ in range(self.cutn):
# Use Pooling
cutout = (self.av_pool(input) + self.max_pool(input))/2
cutouts.append(cutout)
batch = self.augs(torch.cat(cutouts, dim=0))
if self.noise_fac:
facs = batch.new_empty([self.cutn, 1, 1, 1]).uniform_(0, self.noise_fac)
batch = batch + facs * torch.randn_like(batch)
return batch
# An updated version with Kornia augments and pooling (where my version started):
class MakeCutoutsPoolingUpdate(nn.Module):
def __init__(self, cut_size, cutn, cut_pow=1.):
super().__init__()
self.cut_size = cut_size
self.cutn = cutn
self.cut_pow = cut_pow # Not used with pooling
self.augs = nn.Sequential(
K.RandomAffine(degrees=15, translate=0.1, p=0.7, padding_mode='border'),
K.RandomPerspective(0.7,p=0.7),
K.ColorJitter(hue=0.1, saturation=0.1, p=0.7),
K.RandomErasing((.1, .4), (.3, 1/.3), same_on_batch=True, p=0.7),
)
self.noise_fac = 0.1
self.av_pool = nn.AdaptiveAvgPool2d((self.cut_size, self.cut_size))
self.max_pool = nn.AdaptiveMaxPool2d((self.cut_size, self.cut_size))
def forward(self, input):
sideY, sideX = input.shape[2:4]
max_size = min(sideX, sideY)
min_size = min(sideX, sideY, self.cut_size)
cutouts = []
for _ in range(self.cutn):
cutout = (self.av_pool(input) + self.max_pool(input))/2
cutouts.append(cutout)
batch = self.augs(torch.cat(cutouts, dim=0))
if self.noise_fac:
facs = batch.new_empty([self.cutn, 1, 1, 1]).uniform_(0, self.noise_fac)
batch = batch + facs * torch.randn_like(batch)
return batch
# An Nerdy updated version with selectable Kornia augments, but no pooling:
class MakeCutoutsNRUpdate(nn.Module):
def __init__(self, args, cut_size, cutn, cut_pow=1.):
super().__init__()
self.cut_size = cut_size
self.cutn = cutn
self.cut_pow = cut_pow
self.noise_fac = 0.1
# Pick your own augments & their order
augment_list = []
for item in args.augments[0]:
if item == 'Ji':
augment_list.append(K.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0.1, p=0.7))
elif item == 'Sh':
augment_list.append(K.RandomSharpness(sharpness=0.3, p=0.5))
elif item == 'Gn':
augment_list.append(K.RandomGaussianNoise(mean=0.0, std=1., p=0.5))
elif item == 'Pe':
augment_list.append(K.RandomPerspective(distortion_scale=0.5, p=0.7))
elif item == 'Ro':
augment_list.append(K.RandomRotation(degrees=15, p=0.7))
elif item == 'Af':
augment_list.append(K.RandomAffine(degrees=30, translate=0.1, shear=5, p=0.7, padding_mode='zeros', keepdim=True)) # border, reflection, zeros
elif item == 'Et':
augment_list.append(K.RandomElasticTransform(p=0.7))
elif item == 'Ts':
augment_list.append(K.RandomThinPlateSpline(scale=0.8, same_on_batch=True, p=0.7))
elif item == 'Cr':
augment_list.append(K.RandomCrop(size=(self.cut_size,self.cut_size), pad_if_needed=True, padding_mode='reflect', p=0.5))
elif item == 'Er':
augment_list.append(K.RandomErasing(scale=(.1, .4), ratio=(.3, 1/.3), same_on_batch=True, p=0.7))
elif item == 'Re':
augment_list.append(K.RandomResizedCrop(size=(self.cut_size,self.cut_size), scale=(0.1,1), ratio=(0.75,1.333), cropping_mode='resample', p=0.5))
self.augs = nn.Sequential(*augment_list)
def forward(self, input):
sideY, sideX = input.shape[2:4]
max_size = min(sideX, sideY)
min_size = min(sideX, sideY, self.cut_size)
cutouts = []
for _ in range(self.cutn):
size = int(torch.rand([])**self.cut_pow * (max_size - min_size) + min_size)
offsetx = torch.randint(0, sideX - size + 1, ())
offsety = torch.randint(0, sideY - size + 1, ())
cutout = input[:, :, offsety:offsety + size, offsetx:offsetx + size]
cutouts.append(resample(cutout, (self.cut_size, self.cut_size)))
batch = self.augs(torch.cat(cutouts, dim=0))
if self.noise_fac:
facs = batch.new_empty([self.cutn, 1, 1, 1]).uniform_(0, self.noise_fac)
batch = batch + facs * torch.randn_like(batch)
return batch
# An updated version with Kornia augments, but no pooling:
class MakeCutoutsUpdate(nn.Module):
def __init__(self, cut_size, cutn, cut_pow=1.):
super().__init__()
self.cut_size = cut_size
self.cutn = cutn
self.cut_pow = cut_pow
self.augs = nn.Sequential(
K.RandomHorizontalFlip(p=0.5),
K.ColorJitter(hue=0.01, saturation=0.01, p=0.7),
# K.RandomSolarize(0.01, 0.01, p=0.7),
K.RandomSharpness(0.3,p=0.4),
K.RandomAffine(degrees=30, translate=0.1, p=0.8, padding_mode='border'),
K.RandomPerspective(0.2,p=0.4),)
self.noise_fac = 0.1
def forward(self, input):
sideY, sideX = input.shape[2:4]
max_size = min(sideX, sideY)
min_size = min(sideX, sideY, self.cut_size)
cutouts = []
for _ in range(self.cutn):
size = int(torch.rand([])**self.cut_pow * (max_size - min_size) + min_size)
offsetx = torch.randint(0, sideX - size + 1, ())
offsety = torch.randint(0, sideY - size + 1, ())
cutout = input[:, :, offsety:offsety + size, offsetx:offsetx + size]
cutouts.append(resample(cutout, (self.cut_size, self.cut_size)))
batch = self.augs(torch.cat(cutouts, dim=0))
if self.noise_fac:
facs = batch.new_empty([self.cutn, 1, 1, 1]).uniform_(0, self.noise_fac)
batch = batch + facs * torch.randn_like(batch)
return batch
# This is the original version (No pooling)
class MakeCutoutsOrig(nn.Module):
def __init__(self, cut_size, cutn, cut_pow=1.):
super().__init__()
self.cut_size = cut_size
self.cutn = cutn
self.cut_pow = cut_pow
def forward(self, input):
sideY, sideX = input.shape[2:4]
max_size = min(sideX, sideY)
min_size = min(sideX, sideY, self.cut_size)
cutouts = []
for _ in range(self.cutn):
size = int(torch.rand([])**self.cut_pow * (max_size - min_size) + min_size)
offsetx = torch.randint(0, sideX - size + 1, ())
offsety = torch.randint(0, sideY - size + 1, ())
cutout = input[:, :, offsety:offsety + size, offsetx:offsetx + size]
cutouts.append(resample(cutout, (self.cut_size, self.cut_size)))
return ClampWithGrad.apply(torch.cat(cutouts, dim=0), 0, 1)
def load_vqgan_model(config_path, checkpoint_path, gumbel=False):
config = OmegaConf.load(config_path)
if config.model.target == 'taming.models.vqgan.VQModel':
model = vqgan.VQModel(**config.model.params)
model.eval().requires_grad_(False)
model.init_from_ckpt(checkpoint_path)
elif config.model.target == 'taming.models.vqgan.GumbelVQ':
assert gumbel
model = vqgan.GumbelVQ(**config.model.params)
model.eval().requires_grad_(False)
model.init_from_ckpt(checkpoint_path)
elif config.model.target == 'taming.models.cond_transformer.Net2NetTransformer':
parent_model = cond_transformer.Net2NetTransformer(**config.model.params)
parent_model.eval().requires_grad_(False)
parent_model.init_from_ckpt(checkpoint_path)
model = parent_model.first_stage_model
else:
raise ValueError(f'unknown model type: {config.model.target}')
del model.loss
return model
def resize_image(image, out_size):
ratio = image.size[0] / image.size[1]
area = min(image.size[0] * image.size[1], out_size[0] * out_size[1])
size = round((area * ratio)**0.5), round((area / ratio)**0.5)
return image.resize(size, Image.LANCZOS)
def get_opt(opt_name, opt_lr, z):
if opt_name == "Adam":
opt = optim.Adam([z], lr=opt_lr) # LR=0.1 (Default)
elif opt_name == "AdamW":
opt = optim.AdamW([z], lr=opt_lr)
elif opt_name == "Adagrad":
opt = optim.Adagrad([z], lr=opt_lr)
elif opt_name == "Adamax":
opt = optim.Adamax([z], lr=opt_lr)
elif opt_name == "DiffGrad":
opt = DiffGrad([z], lr=opt_lr, eps=1e-9, weight_decay=1e-9) # NR: Playing for reasons
elif opt_name == "AdamP":
opt = AdamP([z], lr=opt_lr)
elif opt_name == "RAdam":
opt = RAdam([z], lr=opt_lr)
elif opt_name == "RMSprop":
opt = optim.RMSprop([z], lr=opt_lr)
else:
print("Unknown optimiser. Are choices broken?")
opt = optim.Adam([z], lr=opt_lr)
return opt
# Vector quantize
def synth(z, model, gumbel=False):
if gumbel:
z_q = vector_quantize(z.movedim(1, 3), model.quantize.embed.weight).movedim(3, 1)
else:
z_q = vector_quantize(z.movedim(1, 3), model.quantize.embedding.weight).movedim(3, 1)
return ClampWithGrad.apply(model.decode(z_q).add(1).div(2), 0, 1)
#@torch.no_grad()
@torch.inference_mode()
def checkin(i, losses, z, model, prompts, output_path, gumbel=False):
losses_str = ', '.join(f'{loss.item():g}' for loss in losses)
tqdm.write(f'i: {i}, loss: {sum(losses).item():g}, losses: {losses_str}')
out = synth(z, model, gumbel=gumbel)
info = PngImagePlugin.PngInfo()
info.add_text('comment', f'{prompts}')
TF.to_pil_image(out[0].cpu()).save(output_path, pnginfo=info)
def ascend_txt(pMs, i, z, model, perceptor, make_cutouts, normalize, init_weight, make_video=False, gumbel=False):
out = synth(z, model, gumbel=gumbel)
iii = perceptor.encode_image(normalize(make_cutouts(out))).float()
result = []
if init_weight:
# result.append(F.mse_loss(z, z_orig) * init_weight / 2)
result.append(F.mse_loss(z, torch.zeros_like(z_orig)) * ((1/torch.tensor(i*2 + 1))*init_weight) / 2)
for prompt in pMs:
result.append(prompt(iii))
if make_video:
img = np.array(out.mul(255).clamp(0, 255)[0].cpu().detach().numpy().astype(np.uint8))[:,:,:]
img = np.transpose(img, (1, 2, 0))
imageio.imwrite('./steps/' + str(i) + '.png', np.array(img))
return result # return loss
def train(pMs, opt, i, z, model, perceptor, make_cutouts, normalize, z_min, z_max, save_every, init_weight, make_video, prompts, output_path, gumbel=False):
opt.zero_grad(set_to_none=True)
lossAll = ascend_txt(pMs, i, z, model, perceptor, make_cutouts, normalize, init_weight, make_video, gumbel=gumbel)
if i % save_every == 0:
checkin(i, lossAll, z, model, prompts, output_path, gumbel=gumbel)
loss = sum(lossAll)
loss.backward()
opt.step()
#with torch.no_grad():
with torch.inference_mode():
z.copy_(z.maximum(z_min).minimum(z_max))
| gramhagen/game-paint | server/app/model/utils.py | utils.py | py | 19,513 | python | en | code | 1 | github-code | 13 |
71842177617 | import socket
import os
import threading
import sys
import datetime
HOST = 'localhost'
PORT = 3337
def start_server():
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.bind(('0.0.0.0',4324))
server_socket.listen(1)
# server_socket.settimeout(10)
print("Server started, waiting for connection")
cli_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
cli_sock.connect((HOST, PORT))
while True:
conn, addr = server_socket.accept()
print("Client connected:",addr)
data = conn.recv(1024)
if not data:
break
print(f"Received data({datetime.datetime.now()}): {data.decode('ascii')}")
conn.send("Message received".encode('ascii'))
cli_sock.send(data)
cli_sock.close()
print("Client disconnected")
conn.close()
start_server() | ZacSchepis/acm_bot | server.py | server.py | py | 877 | python | en | code | 0 | github-code | 13 |
17058750894 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class RecruitContentConfig(object):
def __init__(self):
self._config_code = None
self._config_value = None
@property
def config_code(self):
return self._config_code
@config_code.setter
def config_code(self, value):
self._config_code = value
@property
def config_value(self):
return self._config_value
@config_value.setter
def config_value(self, value):
self._config_value = value
def to_alipay_dict(self):
params = dict()
if self.config_code:
if hasattr(self.config_code, 'to_alipay_dict'):
params['config_code'] = self.config_code.to_alipay_dict()
else:
params['config_code'] = self.config_code
if self.config_value:
if hasattr(self.config_value, 'to_alipay_dict'):
params['config_value'] = self.config_value.to_alipay_dict()
else:
params['config_value'] = self.config_value
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = RecruitContentConfig()
if 'config_code' in d:
o.config_code = d['config_code']
if 'config_value' in d:
o.config_value = d['config_value']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/RecruitContentConfig.py | RecruitContentConfig.py | py | 1,441 | python | en | code | 241 | github-code | 13 |
71139548497 | import os
import discord
from dotenv import load_dotenv
from bs4 import BeautifulSoup
from urllib.request import Request, urlopen
import gzip
import io
import csv
load_dotenv()
TOKEN = os.getenv('DISCORD_TOKEN')
client = discord.Client()
@client.event
async def on_ready():
print(f'{client.user} has connected to Discord!')
@client.event
async def on_message(message):
if message.author == client.user:
return
if(len(message.mentions) == 0):
return
else:
if'AoE4 Elo Bot' in message.mentions[0].name:
channel = message.channel
user = str(message.content)[23:]
url = "https://aoe4world.com/dumps"
request = Request(url)
request.add_header('User-Agent','Mozilla/5.0')
page = urlopen(request).read()
soup = BeautifulSoup(page,'html.parser')
elo = "not found on the leaderboard."
for rows in soup.find_all(class_="hover:underline"):
if("leadersboards_rm_1v1_elo.csv" in rows.get('href')):
response = urlopen(rows.get('href'))
zippedData = io.BytesIO(response.read())
unZippedData = gzip.GzipFile(fileobj=zippedData).read()
playerList = str(unZippedData).split('\\n')
i = 0
while i<len(playerList)-1:
playerStats = playerList[i].split(',')
if(user == playerStats[1]):
elo = playerStats[3]
break
i = i+1
result = user+"'s hidden ranked 1v1 elo is "+str(elo)
await channel.send(result)
client.run(TOKEN) | Ketsuppimakkara/AoE4-Discord-Elo-bot | bot.pyw | bot.pyw | pyw | 1,832 | python | en | code | 0 | github-code | 13 |
69850321619 | class BSTNode:
def __init__(self):
self.key=None
self.parent=None
self.left=None
self.right=None
def display(self):
lines, _, _, _ = self._display_aux()
for line in lines:
print(line)
def _display_aux(self):
"""Returns list of strings, width, height, and horizontal coordinate of the root."""
# No child.
if self.right is None and self.left is None:
line = '%s' % self.key
width = len(line)
height = 1
middle = width // 2
return [line], width, height, middle
# Only left child.
if self.right is None:
lines, n, p, x = self.left._display_aux()
s = '%s' % self.key
u = len(s)
first_line = (x + 1) * ' ' + (n - x - 1) * '_' + s
second_line = x * ' ' + '/' + (n - x - 1 + u) * ' '
shifted_lines = [line + u * ' ' for line in lines]
return [first_line, second_line] + shifted_lines, n + u, p + 2, n + u // 2
# Only right child.
if self.left is None:
lines, n, p, x = self.right._display_aux()
s = '%s' % self.key
u = len(s)
first_line = s + x * '_' + (n - x) * ' '
second_line = (u + x) * ' ' + '\\' + (n - x - 1) * ' '
shifted_lines = [u * ' ' + line for line in lines]
return [first_line, second_line] + shifted_lines, n + u, p + 2, u // 2
# Two children.
left, n, p, x = self.left._display_aux()
right, m, q, y = self.right._display_aux()
s = '%s' % self.key
u = len(s)
first_line = (x + 1) * ' ' + (n - x - 1) * '_' + s + y * '_' + (m - y) * ' '
second_line = x * ' ' + '/' + (n - x - 1 + u + y) * ' ' + '\\' + (m - y - 1) * ' '
if p < q:
left += [n * ' '] * (q - p)
elif q < p:
right += [m * ' '] * (p - q)
zipped_lines = zip(left, right)
lines = [first_line, second_line] + [a + u * ' ' + b for a, b in zipped_lines]
return lines, n + m + u, max(p, q) + 2, n + u // 2
def find(root,key):
while root!=None:
if root.key==key:
return root
elif root.key<key:
root=root.right
else:
root=root.left
return None
def insert(root,key):
while root!=None:
if root.key<key:
prev=root
root=root.right
else:
prev=root
root=root.left
new = BSTNode()
new.key = key
if prev.key<key:
prev.right=new
new.parent=prev
else:
prev.left=new
new.parent = prev
def mx(root): #max
while root.right:
root=root.right
return root.key
def mn(root): #min
while root.left:
root=root.left
return root.key
def succ(root): #następnik
f=root.key
if root.right:
return mn(root.right)
else:
while root.parent and root.parent.key<root.key:
root=root.parent
if root.parent:
if root.parent.key < f:
return None
return root.parent.key
if root.key < f:
return None
return root.key
def pred(root): #poprzednik
f=root.key
if root.left:
return mx(root.left)
else:
while root.parent and root.parent.key > root.key:
root = root.parent
if root.parent:
if root.parent.key>f:
return None
return root.parent.key
if root.key>f:
return None
return root.key
def delete(root, key):
if not root:
return root
if root.key > key:
root.left = delete(root.left, key)
elif root.key < key:
root.right= delete(root.right, key)
else:
if not root.right:
return root.left
if not root.left:
return root.right
temp = root.right
mini = temp.key
while temp.left:
temp = temp.left
mini = temp.key
root.key = mini
root.right = delete(root.right,root.key)
return root
def kth_largest(root, k):
stack = []
while root or stack:
while root:
stack.append(root)
root = root.right
root = stack.pop()
k -= 1
if k == 0:
return root.key
root = root.left
root=BSTNode()
root.key=9
insert(root,15,)
insert(root,6)
insert(root,10)
insert(root,3)
insert(root,7)
insert(root,8)
insert(root,2)
insert(root,4)
insert(root,1)
insert(root,20)
root.display()
delete(root,15)
root.display()
| rogzan/ASD | trees/001 - BST all.py | 001 - BST all.py | py | 4,673 | python | en | code | 0 | github-code | 13 |
33104140901 | """
Author: Shuoyao Wang From Shenzhen University
Reinforcement Learning (A3C) using Pytroch + multiprocessing for the paper:
S. Wang, S. Bi and Y. A. Zhang,
"Reinforcement Learning for Real-Time Pricing and Scheduling Control in EV Charging Stations,"
in IEEE Transactions on Industrial Informatics, vol. 17, no. 2, pp. 849-859, Feb. 2021.
S. Wang, S. Bi and Y. A. Zhang, "A Reinforcement Learning Approach for
EV Charging Station Dynamic Pricing and Scheduling Control,"
2018 IEEE Power & Energy Society General Meeting (PESGM), Portland, OR, 2018, pp. 1-5.
Find more on our github: 'https://github.com/wsyCUHK/'.
"""
import torch
import torch.nn as nn
from utils import v_wrap, set_init, push_and_pull, record
import torch.nn.functional as F
import torch.multiprocessing as mp
from shared_adam import SharedAdam
import gym
import os
import numpy as np
import random
os.environ["OMP_NUM_THREADS"] = "1"
UPDATE_GLOBAL_ITER = 5
GAMMA = 0.9
MAX_EP = 3000
MAX_EP_STEP = 200
import pandas as pd
df=pd.read_excel('../data/Price_CAISO.xlsx')
from scipy.io import loadmat
m = loadmat("../data/testingdata.mat")
import numpy as np
out1=np.concatenate((m['out1'],m['out1']),axis=1)
for i in range(5):
out1=np.concatenate((out1,m['out1']),axis=1)
out2=np.concatenate((m['out2'],m['out2']),axis=1)
for i in range(5):
out2=np.concatenate((out2,m['out2']),axis=1)
out3=np.concatenate((m['out3'],m['out3']),axis=1)
for i in range(5):
out3=np.concatenate((out3,m['out3']),axis=1)
out1=out1.squeeze().astype('int')
out2=out2.squeeze().astype('int')
out3=out3.squeeze().astype('int')
mixed_price=df['Unnamed: 4'].values
ISO_eprice=np.zeros((4000,1))
for i in range(1,1001):
if mixed_price[9*i-6]>1 and mixed_price[9*i-6]<100:
ISO_eprice[15*i-14:15*i]=mixed_price[9*i-6]
elif mixed_price[9*i-6]<100:
ISO_eprice[15*i-14:15*i]=1
else:
ISO_eprice[15*i-14:15*i]=100
import operator
beta1=[-1,-4,-25]
beta2=[6,15,100]
deadline=[6,24,144] #Unit: 5
theta1=0.1
theta2=0.9
max_charging_rate=5 #Unit: 20 KWh
price_upper_bound=int(np.max([-beta2[0]/beta1[0],-beta2[1]/beta1[1],-beta2[2]/beta1[2]]))
#print(price_upper_bound)
N_A=max_charging_rate+price_upper_bound
N_S=8
ISO_eprice=ISO_eprice.astype('float64')
eprice_mean=np.mean(ISO_eprice)
def env(action,residual_demand,iternum):
if action[1]>residual_demand.shape[0]:
action[1]=residual_demand.shape[0]
##########################Charging Station Start to Charge##########################################
if residual_demand.shape[0]>0.5:
#return reward,residual_demand,torch.tensor([0,0,0,0,0])
least=residual_demand[:,1]-residual_demand[:,0]
order=[operator.itemgetter(0)(t)-1 for t in sorted(enumerate(least,1), key=operator.itemgetter(1), reverse=True)]
residual_demand[order[:action[1]],0]=residual_demand[order[:action[1]],0]-1
residual_demand[:,1]=residual_demand[:,1]-1
######################EV Admission##############################################################
reward=0
for i in range(out1[iternum]):
dem=beta1[0]*action[0]+beta2[0]
if dem<0:
dem=0
reward+=dem*action[0]
residual_demand=demand_update(residual_demand,np.array([dem,deadline[0]]).reshape((1,2)))
for i in range(out2[iternum]):
dem=beta1[1]*action[0]+beta2[1]
if dem<0:
dem=0
reward+=dem*action[0]
residual_demand=demand_update(residual_demand,np.array([dem,deadline[1]]).reshape((1,2)))
for i in range(out3[iternum]):
dem=beta1[2]*action[0]+beta2[2]
if dem<0:
dem=0
reward+=dem*action[0]
residual_demand=demand_update(residual_demand,np.array([dem,deadline[2]]).reshape((1,2)))
if residual_demand.shape[0]<0.5:
return reward,residual_demand,torch.tensor([0,action[1],0,0,ISO_eprice[iternum+1],out1[iternum+1],out2[iternum+1],out3[iternum+1]])
#######################Departure#################################################################
residual_demand_=[]
for i in range(residual_demand.shape[0]):
if residual_demand[i,1]>0.5 and residual_demand[i,0]>0.5:
residual_demand_.append(residual_demand[i,:])
residual_demand=np.array(residual_demand_)
######################Caculate Reward and Features##############################################
f1=reward
f2=action[1]
#print(f2)
try:
reward_output=reward_output-action[1]*ISO_eprice[iternum]
except:
reward_output=reward-action[1]*ISO_eprice[iternum]
f3=0
f4=0
for i in range(residual_demand.shape[0]):
f3=f3-residual_demand[i,0]*(np.max(residual_demand[:,1])-residual_demand[i,1])*theta1
f4=f4-residual_demand[i,0]*np.power(theta2,residual_demand[i,1])
return reward_output, residual_demand, torch.tensor([f1,f2,max(f3,-20),max(f4,-20),5*ISO_eprice[iternum+1]/eprice_mean,out1[iternum+1],out2[iternum+1],out3[iternum+1]])
def demand_update(current,new):
#print(residual_demand)
if current.shape[0]<0.5:
output=new
else:
output=np.concatenate((current,new),axis=0)
return output
class Net(nn.Module):
def __init__(self, s_dim, a_dim):
super(Net, self).__init__()
self.s_dim = s_dim
self.a_dim = a_dim
self.pi1 = nn.Linear(s_dim, 128)
self.pi2 = nn.Linear(128, a_dim)
self.v1 = nn.Linear(s_dim, 128)
self.v2 = nn.Linear(128, 1)
set_init([self.pi1, self.pi2, self.v1, self.v2])
self.distribution = torch.distributions.Categorical
#torch.nn.init.xavier_uniform_(self.pi1)
#torch.nn.init.xavier_uniform_(self.pi2)
#torch.nn.init.xavier_uniform_(self.v1)
#torch.nn.init.xavier_uniform_(self.v2)
def forward(self, x):
#print(x.shape)
pi1 = torch.tanh(self.pi1(x))
logits = self.pi2(pi1)
v1 = torch.tanh(self.v1(x))
values = self.v2(v1)
return logits, values
def choose_action(self, s):
self.eval()
logits, _ = self.forward(s)
#print(logits[0][0][:max_charging_rate])
#print(logits[0][0][max_charging_rate:])
prob1 = F.softmax(logits[0][0][:max_charging_rate], dim=-1).data
prob2 = F.softmax(logits[0][0][max_charging_rate:], dim=-1).data
m1 = self.distribution(prob1)
m2=self.distribution(prob2)
a1=m1.sample().numpy()
a2=m2.sample().numpy()
#print(a1)
#print(a2)
return np.array([a1,a2])
def loss_func(self, s, a, v_t):
self.train()
logits, values = self.forward(s)
td = v_t - values
c_loss = td.pow(2)
#print(logits[:,:max_charging_rate])
#print(logits[:,max_charging_rate:])
prob1 = F.softmax(logits[:,:max_charging_rate], dim=-1).data
prob2 = F.softmax(logits[:,max_charging_rate:], dim=-1).data
m1 = self.distribution(prob1)
m2=self.distribution(prob2)
#print(a.shape)
#print(a[:,0])
#print(a[:,1])
exp_v = m1.log_prob(a[:,0])*m2.log_prob(a[:,1])* td.detach().squeeze()
a_loss = -exp_v
total_loss = (c_loss + a_loss).mean()
return total_loss
class Worker(mp.Process):
def __init__(self, gnet, opt, global_ep, global_ep_r, res_queue, name):
super(Worker, self).__init__()
self.name = 'w%02i' % name
self.g_ep, self.g_ep_r, self.res_queue = global_ep, global_ep_r, res_queue
self.gnet, self.opt = gnet, opt
self.lnet = Net(N_S, N_A) # local network
self.env = env
def run(self):
total_step = 1
while self.g_ep.value < MAX_EP:
################################
# Initial State #
################################
a=np.array([100,0])
s=torch.tensor([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]).reshape((1,N_S)).unsqueeze(0)
real_state=np.array([])
#########################
buffer_s, buffer_a, buffer_r = [], [], []
ep_r = 0.
for t in range(MAX_EP_STEP):
a = self.lnet.choose_action(s)
r, real_state_, s_= self.env(a,real_state,t)
r=np.expand_dims(np.expand_dims(r, 0), 0)
s_=s_.reshape((1,N_S)).unsqueeze(0).float()
ep_r += r
buffer_a.append(np.array(a))
buffer_s.append(s.squeeze().numpy())
buffer_r.append(r.squeeze())
done=False
if t == MAX_EP_STEP - 1:
done = True
if total_step % UPDATE_GLOBAL_ITER == 0 or done: # update global and assign to local net
# sync
push_and_pull(self.opt, self.lnet, self.gnet, done, s_, buffer_s, buffer_a, buffer_r, GAMMA)
buffer_s, buffer_a, buffer_r = [], [], []
if done: # done and print information
record(self.g_ep, self.g_ep_r, ep_r, self.res_queue, self.name)
break
s = s_
real_state=real_state_
total_step += 1
self.res_queue.put(None)
if __name__ == "__main__":
gnet = Net(N_S, N_A) # global network
gnet.share_memory() # share the global parameters in multiprocessing
opt = SharedAdam(gnet.parameters(), lr=1e-4, betas=(0.92, 0.999)) # global optimizer
global_ep, global_ep_r, res_queue = mp.Value('i', 0), mp.Value('d', 0.), mp.Queue()
# unparallel training
#workers = [Worker(gnet, opt, global_ep, global_ep_r, res_queue, i) for i in range(1)]
# parallel training
workers = [Worker(gnet, opt, global_ep, global_ep_r, res_queue, i) for i in range(mp.cpu_count())]
[w.start() for w in workers]
res = [] # record episode reward to plot
while True:
r = res_queue.get()
if r is not None:
res.append(r)
else:
break
[w.join() for w in workers]
import matplotlib.pyplot as plt
plt.plot(res)
plt.ylabel('Moving average ep reward')
plt.xlabel('Step')
plt.show()
| wsyCUHK/Reinforcement-Learning-for-Real-time-Pricing-and-Scheduling-Control-in-EV-Charging-Stations | code/HSA.py | HSA.py | py | 10,262 | python | en | code | 97 | github-code | 13 |
2131257439 | def hooke_jeeves(evalf, x0, s, a=1, r=0.5, kmax=1e5, smin=1e-6):
import numpy as np
import scipy as sp
# Hooke and Jeeves
k = 0 # function evaulation counter
n = x0.size
# first step
xb = x0
fxb = evalf(xb); k += 1
x, fx = pattern_search(xb, fxb, s); k += (2*n)
# note: k incremented by 2n because pattern_search calls evalf() 2n times
# keep reducing step size and continuing pattern search until success
while (fx >= fxb) and (s > smin):
s = r*s # reduce step size
x, fx = pattern_search(xb, fxb, s); k += (2*n)
# if pattern search succeeded, enter main loop
delta = x - xb
xb = x; fxb = fx
while (k < kmax) and (s > smin):
# first take acceleration step
xe = xb + a*delta
fxe = evalf(xe); k += 1
# pattern search around xe
x, fx = pattern_search(xe, fxe, s); k += (2*n)
if fx < fxb:
# patten serach succeeded; take the new point and lop
delta = x-xe
xb, fxb = x, fx
else:
# pattern search about xe failed; pattern search around xb
x, fx = pattern_search(xb, fxb, s); k += (2*n)
if fx < fxb:
delta = x-xb
xb, fxb = x, fx
else:
# patten search about xb failed; reduce s and try again
s = r*s
#endif
#endif
#endwhile
return xb, fxb, k, s
# end hooke_jeeves()
# pattern search function for use with hooke_jeeves()
def pattern_search(x, fx, s):
ii = 0
n = x.size
# loop through each dimension
while ii < n:
# define current basis vector
d = np.zeros(n)
d[ii] = 1
# look at x +/- s*d, take lowest f value
y = np.array([x, x + s*d, x-s*d])
fVals = np.array([fx, evalf(y[1]), evalf(y[2]) ])
idx = np.argmin(fVals)
x = y[idx]; fx = fVals[idx]
ii += 1
return x, fx
# end pattern_search()
| bradling/direct-search-opt | hooke_jeeves.py | hooke_jeeves.py | py | 2,058 | python | en | code | 0 | github-code | 13 |
31494275432 | #Faça um programa que leia 5 números e informe o maior número.
def limp(x):
x = x.strip()
x = x.replace(',','.')
x = float(x)
return x
a = int(input('Quantos números você deseja inserir? '))
l = []
for i in range(a):
b = input(f'Digite o {i+1}º número: ')
b = limp(b)
l.append(b)
l.sort(reverse = True)
print(l[0])
| GuilhermeMastelini/Exercicios_documentacao_Python | Estrutura de Repetição/Lição 7.py | Lição 7.py | py | 370 | python | pt | code | 0 | github-code | 13 |
17050358094 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class ContractAttach(object):
def __init__(self):
self._biz_status = None
self._file_location = None
self._file_name = None
self._file_type = None
self._file_url = None
self._type_is_http = None
@property
def biz_status(self):
return self._biz_status
@biz_status.setter
def biz_status(self, value):
self._biz_status = value
@property
def file_location(self):
return self._file_location
@file_location.setter
def file_location(self, value):
self._file_location = value
@property
def file_name(self):
return self._file_name
@file_name.setter
def file_name(self, value):
self._file_name = value
@property
def file_type(self):
return self._file_type
@file_type.setter
def file_type(self, value):
self._file_type = value
@property
def file_url(self):
return self._file_url
@file_url.setter
def file_url(self, value):
self._file_url = value
@property
def type_is_http(self):
return self._type_is_http
@type_is_http.setter
def type_is_http(self, value):
self._type_is_http = value
def to_alipay_dict(self):
params = dict()
if self.biz_status:
if hasattr(self.biz_status, 'to_alipay_dict'):
params['biz_status'] = self.biz_status.to_alipay_dict()
else:
params['biz_status'] = self.biz_status
if self.file_location:
if hasattr(self.file_location, 'to_alipay_dict'):
params['file_location'] = self.file_location.to_alipay_dict()
else:
params['file_location'] = self.file_location
if self.file_name:
if hasattr(self.file_name, 'to_alipay_dict'):
params['file_name'] = self.file_name.to_alipay_dict()
else:
params['file_name'] = self.file_name
if self.file_type:
if hasattr(self.file_type, 'to_alipay_dict'):
params['file_type'] = self.file_type.to_alipay_dict()
else:
params['file_type'] = self.file_type
if self.file_url:
if hasattr(self.file_url, 'to_alipay_dict'):
params['file_url'] = self.file_url.to_alipay_dict()
else:
params['file_url'] = self.file_url
if self.type_is_http:
if hasattr(self.type_is_http, 'to_alipay_dict'):
params['type_is_http'] = self.type_is_http.to_alipay_dict()
else:
params['type_is_http'] = self.type_is_http
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = ContractAttach()
if 'biz_status' in d:
o.biz_status = d['biz_status']
if 'file_location' in d:
o.file_location = d['file_location']
if 'file_name' in d:
o.file_name = d['file_name']
if 'file_type' in d:
o.file_type = d['file_type']
if 'file_url' in d:
o.file_url = d['file_url']
if 'type_is_http' in d:
o.type_is_http = d['type_is_http']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/ContractAttach.py | ContractAttach.py | py | 3,395 | python | en | code | 241 | github-code | 13 |
73654531217 | #!/usr/bin/python3
'''
Author: Björn Hendriks
See http://adventofcode.com/2017
'''
import sys
sys.path.append('..')
import helpers.puzzleInput
import re
def makeGraph(input):
'''Make neighboring graph stored as dictionary with values
as list of neighbors of keys'''
graph = {}
for line in input.inputLineIter():
# Get all numbers in line
nodes = [int(m.group(1)) for m in re.finditer(r"(\d+)+", line)]
# First number is the parent program
fromNode = nodes.pop(0)
graph[fromNode] = nodes
return graph
def allNeighbors(start, neighbors=set()):
'''Return set of all (direct and indirect) neighbors of start'''
assert start in graph
neighbors.add(start)
for neighbor in graph[start]:
if (neighbor not in neighbors):
allNeighbors(neighbor, neighbors)
return neighbors
if __name__ == "__main__":
input = helpers.puzzleInput.Input(2017, 12)
graph = makeGraph(input)
neighborsOf0 = allNeighbors(0)
print ("result1 =", len(neighborsOf0))
groupCount = 0
remainingPrograms = set(graph.keys())
while (remainingPrograms):
# Take any remaining program
start = remainingPrograms.pop()
neighbors = allNeighbors(start)
# Remove neighbors of start from remainingPrograms
remainingPrograms -= neighbors
groupCount += 1
print ("result2 =", groupCount)
| bjhend/adventofcode | 2017/day12.py | day12.py | py | 1,292 | python | en | code | 0 | github-code | 13 |
37184749855 | import Tkinter as Tk
from lib import tkwindows
def boot_strap():
application = Tk.Tk()
return application
def main():
application = boot_strap()
window = tkwindows.TkWindow(application)
window.bootEvent()
application.protocol("WM_DELETE_WINDOW", window.fileQuit)
application.mainloop()
if __name__ == '__main__':
main()
| bluele/PyCliper | pycliper.py | pycliper.py | py | 358 | python | en | code | 1 | github-code | 13 |
28419731772 | # -*- coding: utf-8 -*-
"""
Created on Thu Dec 1 11:54:30 2022
@author: Lucian
"""
import numpy as np
def checkHeader(header):
for (n, ch) in enumerate(header):
if ch in header[n+1:]:
return True
return False
def checkMessage(header):
for (n, ch) in enumerate(header):
if ch in header[n+1:]:
return True
return False
for line in open("day6_input.txt"):
#for line in open("day6_example.txt"):
line.strip()
for (n, ch) in enumerate(line):
if n<4:
continue
if checkHeader(line[n-4:n]):
continue
print("End of header:", n, ch)
if n<14:
continue
if checkMessage(line[n-14:n]):
continue
print("End of message:", n, ch)
break | luciansmith/adventOfCode | 2022/day6.py | day6.py | py | 792 | python | en | code | 0 | github-code | 13 |
71109809937 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Nico Colic, June 2015
import nltk
import os.path
class Text_processing(object):
"""Allows to do tokenisation and PoS tagging on a given text"""
"""For now, needs manual downloading in NLTK of tokenizers/punkt and maxent_treebank_pos_tagger before it works"""
"""Structure of tokens: [0]: token, [1]: start position, [2]: end position"""
"""Structure of tagged tokens: same as tokens, and [3] is tag"""
sentence_tokenizer = None
word_tokenizer = None
def __init__(self, word_tokenizer, sentence_tokenizer):
# TODO: this code fragment should automatically download nltk models if they haven't been downloaded yet,
# but it doesn't seem to work. For now needs manual download
# try:
# nltk.data.find('tokenizers/punkt.zip')
# except LookupError:
# nltk.download('punkt')
# try:
# nltk.data.find('maxent_treebank_pos_tagger')
# except LookupError:
# nltk.download('maxent_treebank_pos_tagger')
self.sentence_tokenizer = sentence_tokenizer
self.word_tokenizer = word_tokenizer
def tokenize_sentences(self, text):
return self.sentence_tokenizer.tokenize(text)
def span_tokenize_words(self, text):
"""Returns a list of tokenized sentences, which in turn are a list of token tuples. Use flatify() to get a flat list of tokens.
Token tuples: [0] token text, [1] begin position, [2] end position, [3] sentence number"""
sentences = self.tokenize_sentences(text)
tokens_per_sentence = list()
sentence_offset = 0
sentence_counter = 0
for sentence in sentences:
sentence_tokens = list()
for token in self.word_tokenizer.span_tokenize(sentence):
# save actual token together with it's positions
begin = token[0] + sentence_offset
end = token[1] + sentence_offset
token_tuple = (text[begin:end],begin,end,sentence_counter)
sentence_tokens.append(token_tuple)
tokens_per_sentence.append(sentence_tokens)
sentence_counter = sentence_counter + 1
sentence_offset = sentence_offset + len(sentence) + 1
return tokens_per_sentence
def tokenize_words(self,text):
return self.word_tokenizer.tokenize(text)
def flatify(self, tokens_per_sentence):
tokens = list()
for sentence in tokens_per_sentence:
for token in sentence:
tokens.append(token)
return tokens
def pos_tag(self, span_tokens):
"""Takes as input tokens with position information, and returns a list in the form of [0] token, [1] start position, [2] end position, [4] pos-tag"""
# nltk.pos_tag() takes as argument a list of tokens, so we need to get rid of positions first, then pos-tag, then reconcile with position information
tokens = list()
for span_token in span_tokens:
tokens.append(span_token[0])
tagged_tokens = nltk.pos_tag(tokens)
# reconcile with position information
span_tagged_tokens = list()
for i in range(len(span_tokens)):
# just a little security measure should something go wrong
if span_tokens[i][0] == tagged_tokens[i][0]:
span_tagged_token = (span_tokens[i][0] , span_tokens[i][1] , span_tokens[i][2] , tagged_tokens[i][1])
span_tagged_tokens.append(span_tagged_token)
return span_tagged_tokens
def export_tokens_to_xml(self,id, tokens_per_sentence,output_directory,mode=None):
import xml.etree.ElementTree as ET
sentence_number = 0
root = ET.Element("root")
for sentence in tokens_per_sentence:
S = ET.SubElement(root,"S")
S.set('i',str(sentence_number))
sentence_number = sentence_number + 1;
for word in sentence:
W = ET.SubElement(S,"W")
W.text = word[0]
# Create the o1 and o2 attributes for the starting and ending position of the word
W.set('end', str(word[2]))
W.set('begin', str(word[1]))
# prepare printing
my_directory = self.make_output_subdirectory(output_directory)
# write using etree
file_name = os.path.join(my_directory, id + '.xml')
if ( mode == 'both' or mode == 'normal'):
with open(file_name,'wb') as f:
ET.ElementTree(root).write(f,encoding="UTF-8",xml_declaration=True)
# Pretty Print
file_name = os.path.join(my_directory, id + '_pretty.xml')
if ( mode == 'both' or mode == 'pretty' or mode == None ) :
from xml.dom import minidom
pretty_article = minidom.parseString(ET.tostring(root, 'utf-8')).toprettyxml(indent=" ")
with open(file_name,'w') as f:
f.write(pretty_article)
def export_tagged_tokens_to_xml(self):
pass
def make_output_subdirectory(self,output_directory_absolute):
my_directory = os.path.join(output_directory_absolute, 'text_processing')
return self.make_directory(my_directory)
def make_directory(self,directory):
"""Create folder if it doesn't exist yet"""
if not os.path.exists(directory):
try:
os.makedirs(directory)
return directory
except():
print('Could not create directory ', directory)
return None
else:
return directory
| Aequivinius/python-ontogene | text_processing/text_processing.py | text_processing.py | py | 4,952 | python | en | code | 1 | github-code | 13 |
40406242651 | # Ejercicio 6
#
# Diseñar una función que calcule el área y el perímetro de una circunferencia.
# Utiliza dicha función en un programa principal que lea el radio de una circunferencia y muestre su área y perímetro.
import math
def area_perimetro(radio):
area = math.pi * radio ** 2
perimetro = 2 * math.pi * radio
return area, perimetro
radio = float(input("Introduzca el radio de la circunferencia: "))
area, perimetro = area_perimetro(radio)
print("El area de la circunferencia es ", area)
print("El area de la circunferencia es ", perimetro)
| mavb86/ejercicios-python | seccion7/ejercicio06.py | ejercicio06.py | py | 569 | python | es | code | 0 | github-code | 13 |
35988829651 | from typing import List
class Solution:
def maxSubArray(self, nums: List[int]) -> int:
maxSum = 0
answer = 0
anyPositive = False
for number in nums:
if number > 0:
anyPositive = True
if maxSum + number > 0:
maxSum += number
else:
maxSum = 0
answer = max(answer, maxSum)
if not anyPositive:
return max(nums)
return answer
if __name__ == "__main__":
solution = Solution()
result = solution.maxSubArray([-2,1,-3,4,-1,2,1,-5,4])
print (result) | MateuszKudla/30-day-leet-coding-challange | day-3/maximum-subarray.py | maximum-subarray.py | py | 632 | python | en | code | 0 | github-code | 13 |
22454145679 | import json
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.db import IntegrityError
from django.shortcuts import redirect, render
from django.urls import reverse
from django.http import HttpResponse, HttpResponseRedirect, JsonResponse
from django.views.decorators.csrf import csrf_exempt
from django.core.paginator import Paginator
from django.db.models import (
F, Sum, Count, Case, When, Q,
ExpressionWrapper,
IntegerField, FloatField,DecimalField, CharField,
OuterRef, Subquery
)
from django.db.models.functions import (
Cast
)
from django.utils.translation import gettext_lazy as _
from .util import get_prices, get_price, check_stock, monetaryConversor
from .balance import balance
from .models import User, UserPreferences, Type, Institution, Investiment, Transaction, Goal
from .forms import CreateInstitution, CreateType, CreateGoal, CreateInvestiment, EditInvestiment, CreateTransaction, CreateFirstTransaction
def index(request):
if request.user.is_authenticated:
user_data = User.objects.get(pk=request.user.id)
types = user_data.types.order_by("percent").all()
goals = user_data.goals.order_by("value").all()
getBalance = balance(request.user.id)
nowPercetages = {}
maxTypesPercentage = 100
for type in types:
if getBalance['total'] > 0:
nowPercetages[type.typeName] = round((getBalance['typesTotal'].get(type.typeName) / getBalance['total']) * 100, 2)
else:
nowPercetages[type.typeName] = 0
maxTypesPercentage = maxTypesPercentage - type.percent
goalsArray = []
nowGoal = None
if goals:
nowGoal = goals[0].value
biggerGoal = 0
for goal in goals:
lack = 0
status = 'off'
if getBalance['total'] > 0:
if getBalance['total'] >= goal.value:
status = 'on'
lack = 100
else:
lack = round( (getBalance['total'] / float(goal.value)) * 100, 2)
status = 'off'
if biggerGoal == 0:
biggerGoal = goal.value
if nowGoal < goal.value and goal.value <= biggerGoal:
nowGoal = goal.value
if goal.value > biggerGoal:
biggerGoal: goal.value
goalsArray.append({
'id' : goal.id,
'value' : goal.value,
'lack' : lack,
'status' : status,
})
return render(request, "finance/dashboard.html",{
"types": types,
"maxTypesPercentage": maxTypesPercentage,
"goals": goalsArray,
"nowGoal": nowGoal,
"balance": getBalance,
"nowPercetages": nowPercetages,
"type_form": CreateType(),
"goal_form": CreateGoal(),
})
return render(request, "finance/index.html")
def getPrices(request):
if request.method == "POST":
body = json.loads(request.body)
codes = body.get('codes')
nowPrices = {}
prices = get_prices(codes)
for code in codes:
nowPrices[code] = prices.tickers[code].info['regularMarketPrice']
return JsonResponse({"success": nowPrices }, status=201)
def getPrice(request, code):
print('CODE:', code)
price = get_price(code)
print(price)
return render(request, "finance/index.html")
@login_required(login_url="finance:login")
def investiments(request):
if request.method == "GET":
filtterType = request.GET.get('type')
my_q = Q( user_id=request.user.id)
if filtterType:
my_q = Q( type__id=filtterType)
user_data = User.objects.get(pk=request.user.id)
portfolio = user_data.investiments.order_by("-date").all().filter(my_q).annotate(
lastedTrans=Subquery(
Transaction.objects.filter(
investiment=OuterRef('pk')
).order_by('-transaction_date').values('action')[:1]
),
firstTrans=Subquery(
Transaction.objects.filter(
investiment=OuterRef('pk')
).order_by('-id').values('payprice')[:1]
),
qnt = Case(
When(position="BUY", then=(
Sum(Case(
When(transactions__action="BUY", then='transactions__quantity' ),
When(transactions__action="SELL", then=F('transactions__quantity') * -1 ),
output_field=FloatField()
))
)),
When(position="SELL", then=(
Sum(Case(
When(transactions__action="BUY", then='transactions__quantity' ),
When(transactions__action="SELL", then=F('transactions__quantity') * -1 ),
output_field=FloatField()
))* -1
)),
When(position="NONE", then=0)
,output_field=FloatField()),
allBought= (
Sum(Case(
When(transactions__action="BUY", then=F('transactions__quantity') * F('transactions__payprice')),
output_field=FloatField()
), output_field=FloatField())
* 1.0),
allSales = (
Sum(Case(
When(transactions__action="SELL", then=F('transactions__quantity') * F('transactions__payprice')),
output_field=FloatField()
), output_field=FloatField())
* 1.0),
balance = Sum(Case(
When(transactions__action="BUY", then=F('transactions__quantity') * F('transactions__payprice')),
When(transactions__action="SELL", then=F('transactions__quantity') * F('transactions__payprice') * -1 ),
output_field=FloatField()
)),
total = Case(
When(position="BUY", then=(
Case(
When( balance__gt = 0, then=F('balance')),
When( ~Q(balance__gt = 0), then=(
F('firstTrans') * F('qnt')
))
,output_field=FloatField())
)),
When(position="SELL", then=(
Case(
When( balance__gt = 0, then=F('balance') * -1),
When( ~Q(balance__gt = 0), then=(
F('firstTrans') * F('qnt')
))
,output_field=FloatField())
)),
When(position="NONE", then=('allBought'))
,output_field=FloatField()),
accomplished = Case(
When(~Q(position="NONE"), then=0),
When(position="NONE", then=(
(Case(
When(lastedTrans="SELL", then=('balance')),
When(lastedTrans="BUY", then=(
(F('allBought') * -1) + F('allSales')
))
))
))
,output_field=FloatField()),
accomplishedRevenuePercent = Case(
When(~Q(position="NONE"), then=0),
When(position="NONE", then=(
ExpressionWrapper(
(
(
(Case(
When(lastedTrans="SELL", then=(
F('allSales') - F('allBought')
)),
When(lastedTrans="BUY", then=(
F('allBought') - F('allSales')
))
))
)
/
(Case(
When(lastedTrans="SELL", then=(
F('allBought')
)),
When(lastedTrans="BUY", then=(
Case(
When(allBought__lte=F('allSales'), then=( F('allSales' ) )),
When(allBought__gte=F('allSales'), then=( F('allBought' ) ))
)
))
))
) * 100.00
,output_field=FloatField())
))
,output_field=FloatField()),
)
types = user_data.types.order_by("typeName").all()
institutions = user_data.institutions.order_by("name").all()
maxTypesPercentage = 100
for type in types:
maxTypesPercentage = maxTypesPercentage - type.percent
#Get portfolio each
codes = []
realcodes = []
nowPrices = {}
sumRating = 0
for investiment in portfolio:
sumRating = sumRating + investiment.rating
if investiment.currency == 'R$':
realcodes.append(investiment.code)
codes.append(investiment.code+'.SA')
else:
realcodes.append(investiment.code)
codes.append(investiment.code)
portfolioTotal = 0
currenciesTotal = {
'R$' : 0,
'$' : 0,
'€' : 0,
'£' : 0,
}
prices = get_prices(codes)
for i in range(len(codes)):
price = prices.tickers[codes[i]].info['regularMarketPrice']
nowPrices[codes[i]] = price
# print(realcodes[i], codes[i])
# print(portfolio[i])
# print(portfolio[i].total)
thisInvestimentSum = round(float(price) * float(portfolio[i].qnt),2)
currenciesTotal[portfolio[i].currency] = currenciesTotal[portfolio[i].currency] + thisInvestimentSum
if portfolio[i].currency == user_data.preferences.currency :
portfolioTotal = portfolioTotal + thisInvestimentSum
else:
# print(portfolio[i].currency, user_data.preferences.currency , thisInvestimentSum)
thisInvestimentSumUser = monetaryConversor(portfolio[i].currency, user_data.preferences.currency , thisInvestimentSum)
portfolioTotal = portfolioTotal + thisInvestimentSumUser
portfolioTotal = round(portfolioTotal,2)
if filtterType == None or ( filtterType != None and len(types) == 1):
Balance = {'total': portfolioTotal}
else:
Balance = balance(request.user.id)
# Create page controll
paginator = Paginator(portfolio, 10)
page_number = request.GET.get('page')
page_obj = paginator.get_page(page_number)
investiment_form = CreateInvestiment()
investiment_form.fields["type"].queryset = types
investiment_form.fields["institution"].queryset = institutions
edit_investtiment = EditInvestiment()
edit_investtiment.fields["type"].queryset = types
edit_investtiment.fields["institution"].queryset = institutions
transaction_form = CreateTransaction()
transaction_form.fields["investiment"].queryset = portfolio
return render(request, "finance/investiments.html",{
"page_obj": page_obj,
"portfolio": page_obj,
"nowPrices": nowPrices,
"sumRating": sumRating,
"currenciesTotal": currenciesTotal,
"portfolioTotal": portfolioTotal,
"balance": Balance,
"types": types,
"maxTypesPercentage": maxTypesPercentage,
"institutions": institutions,
"type_form": CreateType(),
"institution_form": CreateInstitution(),
"investiment_form": investiment_form,
"transaction_form": transaction_form,
"edit_investiment_form": edit_investtiment,
})
if request.method == "POST":
form = CreateInvestiment(request.POST)
formset = CreateFirstTransaction(request.POST)
if request.POST.get("transaction_date") == '':
messages.error(request, _("You must select the transaction date"))
return redirect('finance:investiments')
if all([form.is_valid(), formset.is_valid()]):
code = form.cleaned_data.get("code")
#Check if stock exist before create
if check_stock(code, form.cleaned_data.get("currency")):
if Investiment.objects.filter(code=code, user=request.user).exists():
CheckInvestiment = Investiment.objects.annotate(
qnt=Sum(Case(
When(transactions__action="BUY", then='transactions__quantity' ),
When(transactions__action="SELL", then=F('transactions__quantity') * -1 ),
output_field=IntegerField()
)),).get(code=code, user=request.user)
investiment_id = CheckInvestiment.id
# print('update investiment')
if formset.cleaned_data.get("action") == 'SELL':
if CheckInvestiment.qnt - formset.cleaned_data.get("quantity") < 0.00:
CheckInvestiment.position = 'SELL'
elif CheckInvestiment.qnt - formset.cleaned_data.get("quantity") > 0.00:
CheckInvestiment.position = 'BUY'
else:
CheckInvestiment.position = 'NONE'
# print(CheckInvestiment.qnt)
# print(CheckInvestiment.qnt + formset.cleaned_data.get("quantity"))
if formset.cleaned_data.get("action") == 'BUY':
if CheckInvestiment.qnt + formset.cleaned_data.get("quantity") < 0.00:
#print('SELL')
CheckInvestiment.position = 'SELL'
elif CheckInvestiment.qnt + formset.cleaned_data.get("quantity") > 0.00:
#print('BUY')
CheckInvestiment.position = 'BUY'
else:
#print('NONE')
CheckInvestiment.position = 'NONE'
CheckInvestiment.save()
else:
#print('create investiment')
#Crete investiment
new_investiment = Investiment(
user = User.objects.get(pk=request.user.id),
code = code,
rating = form.cleaned_data.get("rating"),
currency = form.cleaned_data.get("currency"),
institution = form.cleaned_data.get("institution"),
type = form.cleaned_data.get("type"),
)
if formset.cleaned_data.get("action") == 'SELL':
new_investiment.position = 'SELL'
else:
new_investiment.position = 'BUY'
new_investiment.save()
investiment_id = new_investiment.id
# Crete investiment first transaction
new_transaction = Transaction(
user = User.objects.get(pk=request.user.id),
investiment = Investiment.objects.get(pk=investiment_id),
quantity = formset.cleaned_data.get("quantity"),
payprice = formset.cleaned_data.get("payprice"),
action = formset.cleaned_data.get("action"),
transaction_date = formset.cleaned_data.get("transaction_date"),
)
new_transaction.save()
return HttpResponseRedirect(request.headers['Referer'])
else:
messages.error(request, _("Investiment does not match with any stock"))
return redirect('finance:investiments')
@login_required(login_url="finance:login")
def investiment(request, id):
if request.method == "PUT":
body = json.loads(request.body)
try:
ThisInvestiment = Investiment.objects.get(pk=id, user=request.user)
except (Investiment.DoesNotExist):
return JsonResponse({
"error": _("Investiment does not exist")
}, status=404)
#print('Type',body.get("type"))
ThisInvestiment.type = Type.objects.get(pk=body.get("type"), user=request.user)
ThisInvestiment.institution = Institution.objects.get(pk=body.get("institution"), user=request.user)
ThisInvestiment.rating = body.get("rating")
ThisInvestiment.save()
return HttpResponse(status=204)
if request.method == "DELETE":
try:
object_to_delete = Investiment.objects.get(pk=id, user=request.user)
except (Investiment.DoesNotExist):
return JsonResponse({
"error": _("Investiment does not exist")
}, status=404)
objectId = object_to_delete.id
# Delete the investiment
object_to_delete.delete()
return JsonResponse({"success": objectId }, status=201)
def updateInvestimentPosition(investiment_code, user):
ThisInvestiment = Investiment.objects.annotate(
qnt=Sum(Case(
When(transactions__action="BUY", then='transactions__quantity' ),
When(transactions__action="SELL", then=F('transactions__quantity') * -1 ),
output_field=FloatField()
)),).get(code=investiment_code, user=user)
if ThisInvestiment.qnt < 0.00:
ThisInvestiment.position = 'SELL'
elif ThisInvestiment.qnt > 0.00:
ThisInvestiment.position = 'BUY'
else:
ThisInvestiment.position = 'NONE'
ThisInvestiment.save()
@login_required(login_url="finance:login")
def transactions(request):
if request.method == "GET":
user_data = User.objects.get(pk=request.user.id)
filtterInvest = request.GET.get('i')
my_q = Q( user_id=request.user.id)
if filtterInvest:
my_q = Q( investiment__id=filtterInvest)
transactions = user_data.transactions.order_by("-transaction_date").all().filter(my_q).annotate(
realDate=Cast('transaction_date', CharField()),
)
investiments = user_data.investiments.order_by("code").all()
transaction_form = CreateTransaction()
transaction_form.fields["investiment"].queryset = investiments
# Create page controll
paginator = Paginator(transactions, 20)
page_number = request.GET.get('page')
page_obj = paginator.get_page(page_number)
return render(request, "finance/transactions.html",{
"transactions": page_obj,
"page_obj": page_obj,
"investiments": investiments,
"balance": balance(request.user.id),
"transaction_form": transaction_form,
})
if request.method == "POST":
form = CreateTransaction(request.POST)
if request.POST.get("transaction_date") == '' or request.POST.get("transaction_date") == None:
messages.error(request, _("You must select the transaction date"))
return HttpResponseRedirect(request.headers['Referer'])
investiment = Investiment.objects.filter(pk=request.POST.get("investiment"), user=request.user.id)
if not investiment:
messages.error(request, _("Investiment does not exist"))
return HttpResponseRedirect(request.headers['Referer'])
if form.is_valid():
# Crete investiment transaction
new_transaction = Transaction(
user = User.objects.get(pk=request.user.id),
investiment = form.cleaned_data.get("investiment"),
quantity = form.cleaned_data.get("quantity"),
payprice = form.cleaned_data.get("payprice"),
action = form.cleaned_data.get("action"),
transaction_date = form.cleaned_data.get("transaction_date"),
)
new_transaction.save()
#Update investiment position
updateInvestimentPosition(form.cleaned_data.get("investiment"),request.user)
return HttpResponseRedirect(request.headers['Referer'])
@login_required(login_url="finance:login")
def transaction(request, id):
if request.method == "PUT":
body = json.loads(request.body)
try:
object_to_update = Transaction.objects.get(pk=id, user=request.user)
except (Transaction.DoesNotExist):
return JsonResponse({
"error": _("Transaction does not exist")
}, status=404)
# Update transaction
object_to_update.action = body.get('action')
object_to_update.payprice = body.get('payprice')
object_to_update.quantity = body.get('quantity')
object_to_update.transaction_date = body.get('transaction_date')
object_to_update.save()
#Update investiment position
updateInvestimentPosition(object_to_update.investiment,request.user)
return HttpResponse(status=204)
if request.method == "DELETE":
try:
object_to_delete = Transaction.objects.get(pk=id, user=request.user)
except (Transaction.DoesNotExist):
return JsonResponse({
"error": _("Transaction does not exist")
}, status=404)
objectId = object_to_delete.id
# Delete the transaction
object_to_delete.delete()
#Update investiment position
updateInvestimentPosition(object_to_delete.investiment,request.user)
return JsonResponse({"success": objectId }, status=201)
@login_required(login_url="finance:login")
def institutions(request):
if request.method == "POST":
form = CreateInstitution(request.POST)
if form.is_valid():
new_institution = Institution(
user = User.objects.get(pk=request.user.id),
name = form.cleaned_data.get("name")
)
new_institution.save()
institutionData = {
'id': new_institution.id,
'name': new_institution.name,
}
return JsonResponse(institutionData, safe=False)
else:
return JsonResponse({
"error": form.errors
}, status=404)
if request.method == "GET":
user_data = User.objects.get(pk=request.user.id)
institutions = user_data.institutions.order_by("name").all().annotate(
investiments = Count('investimentInstitution')
)
# Create page controll
paginator = Paginator(institutions, 20)
page_number = request.GET.get('page')
page_obj = paginator.get_page(page_number)
return render(request, "finance/institutions.html",{
"page_obj": page_obj,
"institutions": page_obj,
"institution_form": CreateInstitution(),
"balance": balance(request.user.id),
})
@login_required(login_url="finance:login")
def institution(request, id):
if request.method == "PUT":
body = json.loads(request.body)
try:
object_to_update = Institution.objects.get(pk=id, user=request.user)
except (Institution.DoesNotExist):
return JsonResponse({
"error": _("Institution does not exist")
}, status=404)
object_to_update.name = body.get('name')
object_to_update.save()
return HttpResponse(status=204)
if request.method == "DELETE":
try:
object_to_delete = Institution.objects.get(pk=id, user=request.user)
except (Institution.DoesNotExist):
return JsonResponse({
"error": _("Institution does not exist")
}, status=404)
objectId = object_to_delete.id
# Delete the institution
try:
object_to_delete.delete()
except:
return JsonResponse({
"error": _("To delete this institution, remove all related investiments first")
}, status=404)
return JsonResponse({"success": objectId }, status=201)
@login_required(login_url="finance:login")
def create_type(request):
if request.method == "POST":
form = CreateType(request.POST)
if form.is_valid():
user_data = User.objects.get(pk=request.user.id)
types = user_data.types.order_by("percent").all()
maxTypesPercentage = 100
for type in types:
maxTypesPercentage = maxTypesPercentage - type.percent
#print('maxTypesPercentage',maxTypesPercentage, form.cleaned_data.get("percent"))
if form.cleaned_data.get("percent") > maxTypesPercentage:
return JsonResponse({
"error": {
"percent": _("The sum of your investiment types percentages must be less than 100%")
}
}, status=404)
new_type = Type(
user = User.objects.get(pk=request.user.id),
typeName = form.cleaned_data.get("typeName"),
percent = form.cleaned_data.get("percent"),
color = form.cleaned_data.get("color")
)
new_type.save()
typenData = {
'id': new_type.id,
'name': new_type.typeName,
'percent': new_type.percent,
}
return JsonResponse(typenData, safe=False)
else:
#print('invalid')
return JsonResponse({
"error": form.errors
}, status=404)
@login_required(login_url="finance:login")
def type(request, id):
if request.method == "PUT":
body = json.loads(request.body)
try:
object_to_update = Type.objects.get(pk=id, user=request.user)
except (Type.DoesNotExist):
return JsonResponse({
"error": _("Type does not exist")
}, status=404)
object_to_update.typeName = body.get('typeName')
object_to_update.percent = body.get('percent')
object_to_update.color = body.get('color')
object_to_update.save()
return HttpResponse(status=204)
if request.method == "DELETE":
try:
object_to_delete = Type.objects.get(pk=id, user=request.user)
except (Type.DoesNotExist):
return JsonResponse({
"error": _("Type does not exist")
}, status=404)
objectId = object_to_delete.id
# Delete type
try:
object_to_delete.delete()
except:
return JsonResponse({
"error": _("To delete this type, remove all related investiments first")
}, status=404)
return JsonResponse({"success": objectId }, status=201)
@login_required(login_url="finance:login")
def create_goal(request):
if request.method == "POST":
form = CreateGoal(request.POST)
#print('form')
#print(form.errors )
if form.is_valid():
#print('valid')
new_goal = Goal(
user = User.objects.get(pk=request.user.id),
value = form.cleaned_data.get("value"),
)
new_goal.save()
goalData = {
'id': new_goal.id,
'value': new_goal.value,
}
return JsonResponse(goalData, safe=False)
else:
#print('invald')
return JsonResponse({
"error": form.errors
}, status=404)
@login_required(login_url="finance:login")
def goal(request, id):
if request.method == "DELETE":
try:
object_to_delete = Goal.objects.get(pk=id, user=request.user)
except (Goal.DoesNotExist):
return JsonResponse({
"error": _("Goal does not exist")
}, status=404)
objectId = object_to_delete.id
# Delete the goal
try:
object_to_delete.delete()
except:
return JsonResponse({
"error": _("Something goes wrong")
}, status=404)
return JsonResponse({"success": objectId }, status=201)
def setCurrency(request):
if request.method == "POST":
body = json.loads(request.body)
currency = body.get("currency")
if request.user.is_authenticated:
#print('is_authenticated')
try:
obj = UserPreferences.objects.get(user=request.user.id)
obj.currency = currency
obj.save()
except UserPreferences.DoesNotExist:
obj = UserPreferences( user=request.user, currency = currency)
obj.save()
response = HttpResponse()
response.set_cookie('currency', currency, max_age = 5000000)
return HttpResponseRedirect(request.headers['Referer'])
def setTheme(request):
if request.method == "POST":
body = json.loads(request.body)
theme = body.get("theme")
if request.user.is_authenticated:
try:
obj = UserPreferences.objects.get(user=request.user.id)
obj.theme = theme
obj.save()
except UserPreferences.DoesNotExist:
obj = UserPreferences( user=request.user, theme = theme)
obj.save()
return HttpResponseRedirect(request.headers['Referer'])
def login_view(request):
""" View: Controls logging in """
if request.method == "POST":
# Attempt to sign user in
username = request.POST["username"]
password = request.POST["password"]
user = authenticate(request, username=username, password=password)
# Check if authentication successful
if user is not None:
login(request, user)
# If user tried to enter login_required page - go there after login
if "next" in request.POST:
request_args = request.POST.get("next")[1:].split('/')
return HttpResponseRedirect(reverse(
"finance:" + request_args[0], args=request_args[1:]
))
else:
return HttpResponseRedirect(reverse("finance:index"))
else:
return render(request, "finance/login.html", {
"message": _("Invalid username and/or password.")
})
else:
# Show login panel only for not login users
if request.user.is_authenticated:
return HttpResponseRedirect(reverse("finance:index"))
else:
return render(request, "finance/login.html")
def logout_view(request):
""" View: Controls logging out """
logout(request)
return HttpResponseRedirect(reverse("finance:index"))
def register(request):
""" View: Controls registration """
if request.method == "POST":
username = request.POST["username"]
email = request.POST["email"]
password = request.POST["password"]
confirmation = request.POST["confirmation"]
# Ensure no blank fields
if (not username) or (not email) or (not password):
return render(request, "finance/register.html", {
"message": _("You must fill out all fields.")
})
# Ensure password matches confirmation
elif password != confirmation:
return render(request, "finance/register.html", {
"message": _("Passwords must match.")
})
# Attempt to create new user and its profile
try:
user = User.objects.create_user(username, email, password)
user.save()
except IntegrityError:
return render(request, "finance/register.html", {
"message": _("Username already taken.")
})
login(request, user)
return HttpResponseRedirect(reverse("finance:index"))
else:
# Show register panel only for not login users
if request.user.is_authenticated:
return HttpResponseRedirect(reverse("finance:index"))
else:
return render(request, "finance/register.html") | carlosjosedesign/finance | finance/views.py | views.py | py | 33,519 | python | en | code | 0 | github-code | 13 |
17034032484 | from __future__ import print_function
from typing import Dict, Optional
from pathlib2 import Path
from six.moves import input
from six.moves.urllib.parse import urlparse
from clearml_agent.backend_api.session import Session
from clearml_agent.backend_api.session.defs import ENV_HOST
from clearml_agent.backend_config.defs import LOCAL_CONFIG_FILES
from clearml_agent.external.pyhocon import ConfigFactory, ConfigMissingException
description = """
Please create new clearml credentials through the settings page in your `clearml-server` web app,
or create a free account at https://app.clear.ml/settings/webapp-configuration
In the settings > workspace page, press "Create new credentials", then press "Copy to clipboard".
Paste copied configuration here:
"""
def_host = 'http://localhost:8080'
try:
def_host = ENV_HOST.get(default=def_host) or def_host
except Exception:
pass
host_description = """
Editing configuration file: {CONFIG_FILE}
Enter the url of the clearml-server's Web service, for example: {HOST} or https://app.clear.ml
""".format(
CONFIG_FILE=LOCAL_CONFIG_FILES[-1],
HOST=def_host,
)
def main():
print('CLEARML-AGENT setup process')
for f in LOCAL_CONFIG_FILES:
conf_file = Path(f).absolute()
if conf_file.exists():
break
if conf_file.exists() and conf_file.is_file() and conf_file.stat().st_size > 0:
print('Configuration file already exists: {}'.format(str(conf_file)))
print('Leaving setup. If you\'ve previously initialized the ClearML SDK on this machine, manually add an \'agent\' section to this file.')
return
print(description, end='')
sentinel = ''
parse_input = ''
for line in iter(input, sentinel):
parse_input += line+'\n'
if line.rstrip() == '}':
break
credentials = None
api_server = None
web_server = None
# noinspection PyBroadException
try:
parsed = ConfigFactory.parse_string(parse_input)
if parsed:
# Take the credentials in raw form or from api section
credentials = get_parsed_field(parsed, ["credentials"])
api_server = get_parsed_field(parsed, ["api_server", "host"])
web_server = get_parsed_field(parsed, ["web_server"])
except Exception:
credentials = credentials or None
api_server = api_server or None
web_server = web_server or None
while not credentials or set(credentials) != {"access_key", "secret_key"}:
print('Could not parse credentials, please try entering them manually.')
credentials = read_manual_credentials()
print('Detected credentials key=\"{}\" secret=\"{}\"'.format(credentials['access_key'],
credentials['secret_key'][0:4] + "***"))
web_input = True
if web_server:
host = input_url('WEB Host', web_server)
elif api_server:
web_input = False
host = input_url('API Host', api_server)
else:
print(host_description)
host = input_url('WEB Host', 'https://app.clear.ml')
parsed_host = verify_url(host)
api_host, files_host, web_host = parse_host(parsed_host, allow_input=True)
# on of these two we configured
if not web_input:
web_host = input_url('Web Application Host', web_host)
else:
api_host = input_url('API Host', api_host)
files_host = input_url('File Store Host', files_host)
print('\nClearML Hosts configuration:\nWeb App: {}\nAPI: {}\nFile Store: {}\n'.format(
web_host, api_host, files_host))
retry = 1
max_retries = 2
while retry <= max_retries: # Up to 2 tries by the user
if verify_credentials(api_host, credentials):
break
retry += 1
if retry < max_retries + 1:
credentials = read_manual_credentials()
else:
print('Exiting setup without creating configuration file')
return
selection = input_options(
'Default Output URI (used to automatically store models and artifacts)',
{'N': 'None', 'S': 'ClearML Server', 'C': 'Custom'},
default='None'
)
if selection == 'Custom':
print('Custom Default Output URI: ', end='')
default_output_uri = input().strip()
elif selection == "ClearML Server":
default_output_uri = files_host
else:
default_output_uri = None
print('\nDefault Output URI: {}'.format(default_output_uri if default_output_uri else 'not set'))
# get GIT User/Pass for cloning
print('Enter git username for repository cloning (leave blank for SSH key authentication): [] ', end='')
git_user = input()
if git_user.strip():
print(
"Git personal token is equivalent to a password, to learn how to generate a token:\n"
" GitHub: https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/creating-a-personal-access-token\n" # noqa
" Bitbucket: https://support.atlassian.com/bitbucket-cloud/docs/app-passwords/\n"
" GitLab: https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html\n"
)
print('Enter git personal token for user \'{}\': '.format(git_user), end='')
git_pass = input()
print('Git repository cloning will be using user={} token={}'.format(git_user, git_pass))
else:
git_user = None
git_pass = None
# get extra-index-url for pip installations
extra_index_urls = []
print('\nEnter additional artifact repository (extra-index-url) to use when installing python packages '
'(leave blank if not required):', end='')
index_url = input().strip()
while index_url:
extra_index_urls.append(index_url)
print('Another artifact repository? (enter another url or leave blank if done):', end='')
index_url = input().strip()
if len(extra_index_urls):
print("The following artifact repositories will be added:\n\t- {}".format("\n\t- ".join(extra_index_urls)))
# noinspection PyBroadException
try:
conf_folder = Path(__file__).parent.absolute() / '..' / 'backend_api' / 'config' / 'default'
default_conf = ''
for conf in ('agent.conf', 'sdk.conf', ):
conf_file_section = conf_folder / conf
with open(str(conf_file_section), 'rt') as f:
default_conf += conf.split('.')[0] + ' '
default_conf += f.read()
default_conf += '\n'
except Exception:
print('Error! Could not read default configuration file')
return
# noinspection PyBroadException
try:
with open(str(conf_file), 'wt') as f:
header = '# CLEARML-AGENT configuration file\n' \
'api {\n' \
' # Notice: \'host\' is the api server (default port 8008), not the web server.\n' \
' api_server: %s\n' \
' web_server: %s\n' \
' files_server: %s\n' \
' # Credentials are generated using the webapp, %s/settings\n' \
' # Override with os environment: CLEARML_API_ACCESS_KEY / CLEARML_API_SECRET_KEY\n' \
' credentials {"access_key": "%s", "secret_key": "%s"}\n' \
'}\n\n' % (api_host, web_host, files_host,
web_host, credentials['access_key'], credentials['secret_key'])
f.write(header)
git_credentials = '# Set GIT user/pass credentials\n' \
'# leave blank for GIT SSH credentials\n' \
'agent.git_user=\"{}\"\n' \
'agent.git_pass=\"{}\"\n' \
'\n'.format(git_user or '', git_pass or '')
f.write(git_credentials)
extra_index_str = '# extra_index_url: ["https://allegroai.jfrog.io/clearml/api/pypi/public/simple"]\n' \
'agent.package_manager.extra_index_url= ' \
'[\n{}\n]\n\n'.format("\n".join(map("\"{}\"".format, extra_index_urls)))
f.write(extra_index_str)
if default_output_uri:
default_output_url_str = '# Default Task output_uri. if output_uri is not provided to Task.init, ' \
'default_output_uri will be used instead.\n' \
'sdk.development.default_output_uri="{}"\n' \
'\n'.format(default_output_uri.strip('"'))
f.write(default_output_url_str)
default_conf = default_conf.replace('default_output_uri: ""', '# default_output_uri: ""')
f.write(default_conf)
except Exception:
print('Error! Could not write configuration file at: {}'.format(str(conf_file)))
return
print('\nNew configuration stored in {}'.format(str(conf_file)))
print('CLEARML-AGENT setup completed successfully.')
def parse_host(parsed_host, allow_input=True):
if parsed_host.netloc.startswith('demoapp.'):
# this is our demo server
api_host = parsed_host.scheme + "://" + parsed_host.netloc.replace('demoapp.', 'demoapi.', 1) + parsed_host.path
web_host = parsed_host.scheme + "://" + parsed_host.netloc + parsed_host.path
files_host = parsed_host.scheme + "://" + parsed_host.netloc.replace('demoapp.', 'demofiles.',
1) + parsed_host.path
elif parsed_host.netloc.startswith('app.'):
# this is our application server
api_host = parsed_host.scheme + "://" + parsed_host.netloc.replace('app.', 'api.', 1) + parsed_host.path
web_host = parsed_host.scheme + "://" + parsed_host.netloc + parsed_host.path
files_host = parsed_host.scheme + "://" + parsed_host.netloc.replace('app.', 'files.', 1) + parsed_host.path
elif parsed_host.netloc.startswith('demoapi.'):
print('{} is the api server, we need the web server. Replacing \'demoapi.\' with \'demoapp.\''.format(
parsed_host.netloc))
api_host = parsed_host.scheme + "://" + parsed_host.netloc + parsed_host.path
web_host = parsed_host.scheme + "://" + parsed_host.netloc.replace('demoapi.', 'demoapp.', 1) + parsed_host.path
files_host = parsed_host.scheme + "://" + parsed_host.netloc.replace('demoapi.', 'demofiles.',
1) + parsed_host.path
elif parsed_host.netloc.startswith('api.'):
print('{} is the api server, we need the web server. Replacing \'api.\' with \'app.\''.format(
parsed_host.netloc))
api_host = parsed_host.scheme + "://" + parsed_host.netloc + parsed_host.path
web_host = parsed_host.scheme + "://" + parsed_host.netloc.replace('api.', 'app.', 1) + parsed_host.path
files_host = parsed_host.scheme + "://" + parsed_host.netloc.replace('api.', 'files.', 1) + parsed_host.path
elif parsed_host.port == 8008:
print('Port 8008 is the api port. Replacing 8080 with 8008 for Web application')
api_host = parsed_host.scheme + "://" + parsed_host.netloc + parsed_host.path
web_host = parsed_host.scheme + "://" + parsed_host.netloc.replace(':8008', ':8080', 1) + parsed_host.path
files_host = parsed_host.scheme + "://" + parsed_host.netloc.replace(':8008', ':8081', 1) + parsed_host.path
elif parsed_host.port == 8080:
api_host = parsed_host.scheme + "://" + parsed_host.netloc.replace(':8080', ':8008', 1) + parsed_host.path
web_host = parsed_host.scheme + "://" + parsed_host.netloc + parsed_host.path
files_host = parsed_host.scheme + "://" + parsed_host.netloc.replace(':8080', ':8081', 1) + parsed_host.path
elif allow_input:
api_host = ''
web_host = ''
files_host = ''
if not parsed_host.port:
print('Host port not detected, do you wish to use the default 8080 port n/[y]? ', end='')
replace_port = input().lower()
if not replace_port or replace_port == 'y' or replace_port == 'yes':
api_host = parsed_host.scheme + "://" + parsed_host.netloc + ':8008' + parsed_host.path
web_host = parsed_host.scheme + "://" + parsed_host.netloc + ':8080' + parsed_host.path
files_host = parsed_host.scheme + "://" + parsed_host.netloc + ':8081' + parsed_host.path
elif not replace_port or replace_port.lower() == 'n' or replace_port.lower() == 'no':
web_host = input_host_port("Web", parsed_host)
api_host = input_host_port("API", parsed_host)
files_host = input_host_port("Files", parsed_host)
if not api_host:
api_host = parsed_host.scheme + "://" + parsed_host.netloc + parsed_host.path
else:
raise ValueError("Could not parse host name")
return api_host, files_host, web_host
def verify_credentials(api_host, credentials):
"""check if the credentials are valid"""
# noinspection PyBroadException
try:
print('Verifying credentials ...')
if api_host:
Session(api_key=credentials['access_key'], secret_key=credentials['secret_key'], host=api_host,
http_retries_config={"total": 2})
print('Credentials verified!')
return True
else:
print("Can't verify credentials")
return False
except Exception:
print('Error: could not verify credentials: key={} secret={}'.format(
credentials.get('access_key'), credentials.get('secret_key')))
return False
def get_parsed_field(parsed_config, fields):
"""
Parsed the value from web profile page, 'copy to clipboard' option
:param parsed_config: The parsed value from the web ui
:type parsed_config: Config object
:param fields: list of values to parse, will parse by the list order
:type fields: List[str]
:return: parsed value if found, None else
"""
try:
return parsed_config.get("api").get(fields[0])
except ConfigMissingException: # fallback - try to parse the field like it was in web older version
if len(fields) == 1:
return parsed_config.get(fields[0])
elif len(fields) == 2:
return parsed_config.get(fields[1])
else:
return None
def read_manual_credentials():
print('Enter user access key: ', end='')
access_key = input()
print('Enter user secret: ', end='')
secret_key = input()
return {"access_key": access_key, "secret_key": secret_key}
def input_url(host_type, host=None):
while True:
print('{} configured to: {}'.format(host_type, '[{}] '.format(host) if host else ''), end='')
parse_input = input()
if host and (not parse_input or parse_input.lower() == 'yes' or parse_input.lower() == 'y'):
break
parsed_host = verify_url(parse_input) if parse_input else None
if parse_input and parsed_host:
host = parsed_host.scheme + "://" + parsed_host.netloc + parsed_host.path
break
return host
def input_options(message, options, default=None):
# type: (str, Dict[str, str], Optional[str]) -> str
options_msg = "/".join(
"".join(('(' + c.upper() + ')') if c == o else c for c in option)
for o, option in options.items()
)
if default:
options_msg += " [{}]".format(default)
while True:
print('{}: {} '.format(message, options_msg), end='')
res = input().strip()
if not res:
return default
elif res.lower() in options:
return options[res.lower()]
elif res.upper() in options:
return options[res.upper()]
def input_host_port(host_type, parsed_host):
print('Enter port for {} host '.format(host_type), end='')
replace_port = input().lower()
return parsed_host.scheme + "://" + parsed_host.netloc + (
':{}'.format(replace_port) if replace_port else '') + parsed_host.path
def verify_url(parse_input):
# noinspection PyBroadException
try:
if not parse_input.startswith('http://') and not parse_input.startswith('https://'):
# if we have a specific port, use http prefix, otherwise assume https
if ':' in parse_input:
parse_input = 'http://' + parse_input
else:
parse_input = 'https://' + parse_input
parsed_host = urlparse(parse_input)
if parsed_host.scheme not in ('http', 'https'):
parsed_host = None
except Exception:
parsed_host = None
print('Could not parse url {}\nEnter your clearml-server host: '.format(parse_input), end='')
return parsed_host
if __name__ == '__main__':
main()
| allegroai/clearml-agent | clearml_agent/commands/config.py | config.py | py | 17,027 | python | en | code | 205 | github-code | 13 |
7389241920 | import requests
import pyttsx3
import json
engine = pyttsx3.init("sapi5")
voices = engine.getProperty('voices')
engine.setProperty("voice", voices[0].id)
engine.setProperty('rate', 170)
def speak(audio):
engine.say(audio)
engine.runAndWait()
def latestnews():
apidict = {"sports":"https://newsapi.org/v2/top-headlines?country=in&apiKey=67113e90ecac417c8b781fa6789eee2c",
"health":"https://newsapi.org/v2/top-headlines?country=in&category=health&apiKey=67113e90ecac417c8b781fa6789eee2c",
"science":"https://newsapi.org/v2/top-headlines?country=in&category=science&apiKey=67113e90ecac417c8b781fa6789eee2c",
"business":"https://newsapi.org/v2/top-headlines?country=in&apiKey=67113e90ecac417c8b781fa6789eee2c",
"technology":"https://newsapi.org/v2/top-headlines?sources=techcrunch&apiKey=67113e90ecac417c8b781fa6789eee2c"}
content = None
url = None
speak("which field news you want")
field =input("Type the field you want:")
for key, value in api_dict.items():
if key.lower() in field.lower():
url = value
print(url)
print("url was found")
break
else:
url = True
if url is True:
print("url not found")
news = requests.get(url).text
news = json.loads(news)
speak("Here is the first news.")
arts = news["articles"]
for articles in arts :
article = articles["title"]
print(article)
speak(article)
news_url = articles["url"]
print(f"for more info visit: {news_url}")
a = input("[press 1 to cont] and [press 2 to stop]")
if str(a) == "1":
pass
elif str(a) == "2":
break
speak("thats all")
| kanugoyal/Virtual-Assistant | newsRead.py | newsRead.py | py | 1,819 | python | en | code | 1 | github-code | 13 |
5141561121 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 3 11:56:14 2023
@author: Hannah Germaine
This is a collection of functions for plotting deviation bins
(To eventually replace parts of "plot_funcs.py")
"""
import tqdm, os
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import cm
def plot_deviations(dev_save_dir, num_neur, segment_names, segment_times, dev_thresh,
segment_devs, segment_bouts, segment_bout_lengths,
segment_ibis, segment_spike_times, num_null_sets,
null_segment_dev_counts, null_segment_dev_ibis,
null_segment_dev_bout_len, fig_buffer_size):
"""Master function to generate all deviation plots desired"""
num_segments = len(segment_names)
#First plot when deviations occur in each segment
#segment_dev_frac_ind are those that are above the dev_thresh cutoff
deviation_times(num_segments,num_neur,segment_names,dev_save_dir,segment_devs,dev_thresh)
#Next plot bout lengths and inter-bout intervals and trends across segments
segment_bout_stats_plots(num_segments,segment_names,segment_bout_lengths,segment_ibis,dev_save_dir)
#Zoom in and plot rasters for the deviations
dev_bin_plots(dev_save_dir,segment_names,segment_times,
segment_spike_times,num_neur,fig_buffer_size,
segment_bouts)
#Plot true deviations against null distribution
null_v_true_dev_plots(dev_save_dir,segment_names,segment_bouts,segment_bout_lengths,segment_ibis,
num_null_sets,null_segment_dev_counts,null_segment_dev_ibis,null_segment_dev_bout_len)
def deviation_times(num_segments,num_neur,segment_names,dev_save_dir,segment_devs,dev_thresh):
"""
This function plots the times in each segment that a deviation is tracked.
INPUTS:
- num_segments: number of segments in experiment
- num_neur: number of neurons in data
- sampling_rate: sampling rate of recording
- segment_names: names of different segments
- dev_save_dir: where to save plots
- segment_devs: indices and deviation fractions for each segment
- dev_thresh: threshold for a good deviation to plot
OUTPUTS:
- png and svg plots for each segment with 1 when a deviation occurs and 0 otherwise.
"""
#Now plot deviations
print("Beginning Deviation Plots.")
for i in tqdm.tqdm(range(num_segments)):
print("\n\tPlotting deviations for segment " + segment_names[i])
seg_dev_save_dir = dev_save_dir + ('_').join(segment_names[i].split(' ')) + '/'
if os.path.isdir(seg_dev_save_dir) == False:
os.mkdir(seg_dev_save_dir)
dev_times = segment_devs[i][0]*(1/1000) #Converted to seconds
fig_i = plt.figure(figsize=((max(dev_times)-min(dev_times))*(1/60),10))
plt.plot(dev_times,segment_devs[i][1])
plt.xlabel('Time (s)')
plt.ylabel('Deviation Fraction')
im_name = (' ').join(segment_names[i].split('_'))
plt.title(im_name + ' deviation fractions')
save_name = ('_').join(segment_names[i].split(' ')) + '_devs'
fig_i.savefig(seg_dev_save_dir + save_name + '.png')
fig_i.savefig(seg_dev_save_dir + save_name + '.svg')
plt.close(fig_i)
def segment_bout_stats_plots(num_segments,segment_names,segment_bout_lengths,segment_ibis,dev_save_dir):
"""This function plots the bout lengths and IBIs
INPUTS:
- segment_bout_lengths:
- segment_ibis:
OUTPUTS:
Histogram plots of each segment's bout lengths and inter-bout-intervals
"""
mean_segment_len = []
std_segment_len = []
mean_segment_ibis = []
std_segment_ibis = []
for i in tqdm.tqdm(range(num_segments)):
seg_dev_save_dir = dev_save_dir + ('_').join(segment_names[i].split(' ')) + '/'
if os.path.isdir(seg_dev_save_dir) == False:
os.mkdir(seg_dev_save_dir)
segment_len = segment_bout_lengths[i]
mean_segment_len.extend([np.mean(segment_len)])
std_segment_len.extend([np.std(segment_len)])
segment_ibi = segment_ibis[i]
mean_segment_ibis.extend([np.mean(segment_ibi)])
std_segment_ibis.extend([np.std(segment_ibi)])
#Plot individual histograms
fig_i = plt.figure(figsize = (10,10))
plt.subplot(1,2,1)
plt.hist(segment_len)
plt.title('Bout lengths (s) histogram')
plt.xlabel('Bout length (s)')
plt.ylabel('Counts')
plt.subplot(1,2,2)
plt.hist(segment_ibi)
plt.title('Inter-bout-intervals (s) histogram')
plt.xlabel('IBI (s)')
plt.ylabel('Counts')
save_name = ('_').join(segment_names[i].split(' ')) + '_dev_hist.png'
fig_i.savefig(seg_dev_save_dir + save_name)
plt.close(fig_i)
mean_segment_len = np.array(mean_segment_len)
std_segment_len = np.array(std_segment_len)
mean_segment_ibis = np.array(mean_segment_ibis)
std_segment_ibis = np.array(std_segment_ibis)
#Plot mean and standard deviations across segments
fig_i = plt.figure(figsize = (10,10))
#cm_subsection = np.linspace(0,1,num_neur)
#cmap = [cm.gist_rainbow(x) for x in cm_subsection]
plt.subplot(1,2,1)
plt.plot(segment_names,mean_segment_len,color='k',label='Mean Across Neurons')
plt.plot(segment_names,mean_segment_len + std_segment_len,alpha=0.25,color='k',linestyle='-',label='Mean Across Neurons')
plt.plot(segment_names,mean_segment_len - std_segment_len,alpha=0.25,color='k',linestyle='-',label='Mean Across Neurons')
plt.legend()
plt.title('Mean Bout Lengths by Segment')
plt.xlabel('Experimental Segment')
plt.ylabel('Mean Bout Length (s)')
plt.subplot(1,2,2)
plt.plot(segment_names,mean_segment_ibis,color='k',label='Mean Across Neurons')
plt.plot(segment_names,mean_segment_ibis + std_segment_ibis,alpha=0.25,color='k',linestyle='-',label='Mean Across Neurons')
plt.plot(segment_names,mean_segment_ibis - std_segment_ibis,alpha=0.25,color='k',linestyle='-',label='Mean Across Neurons')
plt.legend()
plt.title('Mean Inter-Bout-Intervals by Segment')
plt.xlabel('Experimental Segment')
plt.ylabel('Mean Inter-Bout-Interval (s)')
fig_i.tight_layout()
save_name = 'cross-segment_bout_stats.png'
fig_i.savefig(dev_save_dir + save_name)
plt.close(fig_i)
def dev_bin_plots(fig_save_dir,segment_names,segment_times,
segment_spike_times,num_neur,fig_buffer_size,
segment_bouts):
"""This function creates visualizations of bins with high deviation from local mean
INPUTS:
- fig_save_dir: directory to save visualizations
- segment_names: names of different experiment segments
- segment_times: time indices of different segment starts/ends
- segment_spike_times: when spikes occur in each segment
- num_neur: the number of neurons
- fig_buffer_size: how much (in seconds) to plot before and after a deviation event
- segment_bouts: bouts of time in which segments occur
OUTPUTS:
-
"""
print("\nBeginning individual deviation segment plots.")
#Create save directory
dev_save_dir = fig_save_dir
if os.path.isdir(dev_save_dir) == False:
os.mkdir(dev_save_dir)
#Convert the bin size from time to samples
num_segments = len(segment_names)
local_bin_dt = int(np.ceil(fig_buffer_size*1000)) #in ms timescale
half_local_bin_dt = int(np.ceil(local_bin_dt/2))
#Run through deviation times by segment and plot rasters
for s_i in tqdm.tqdm(range(num_segments)):
print("\nGrabbing spike rasters for segment " + segment_names[s_i])
seg_dev_save_dir = dev_save_dir + ('_').join(segment_names[s_i].split(' ')) + '/'
if os.path.isdir(seg_dev_save_dir) == False:
os.mkdir(seg_dev_save_dir)
seg_rast_save_dir = seg_dev_save_dir + 'dev_rasters/'
if os.path.isdir(seg_rast_save_dir) == False:
os.mkdir(seg_rast_save_dir)
segment_dev_start_times = segment_bouts[s_i][:,0]
segment_dev_end_times = segment_bouts[s_i][:,1]
segment_spikes = [np.array(segment_spike_times[s_i][n_i]) for n_i in range(num_neur)]
min_seg_time = segment_times[s_i]
max_seg_time = segment_times[s_i+1]
for d_i in range(len(segment_dev_start_times)):
min_time = max(segment_dev_start_times[d_i] - half_local_bin_dt,min_seg_time)
max_time = min(segment_dev_end_times[d_i] + half_local_bin_dt,max_seg_time)
s_t = []
for n_i in range(num_neur):
try:
s_t.append(list(segment_spikes[n_i][np.where((segment_spikes[n_i] >= min_time)*(segment_spikes[n_i] <= max_time))[0]]))
except:
print(segment_spikes[n_i])
s_t_time = [list(np.array(s_t[i])*(1/1000)) for i in range(len(s_t))] #In seconds
#Plot segment deviation raster
plt.figure(figsize=(10,num_neur))
plt.xlabel('Time (s)')
plt.ylabel('Neuron Index')
plt.axvline(segment_dev_start_times[d_i]*(1/1000),color='r',alpha=0.4)
plt.axvline(segment_dev_end_times[d_i]*(1/1000),color='r',alpha=0.4)
plt.eventplot(s_t_time,color='b')
plt.title('Deviation ' + str(d_i))
plt.tight_layout()
im_name = 'dev_' + str(d_i)
plt.savefig(seg_rast_save_dir + im_name + '.png')
plt.savefig(seg_rast_save_dir + im_name + '.svg')
plt.close()
def null_v_true_dev_plots(dev_save_dir,segment_names,segment_bouts,segment_bout_lengths,segment_ibis,num_null_sets,null_segment_dev_counts,null_segment_dev_ibis,null_segment_dev_bout_len):
"""This function plots histograms of null distribution values and 95th percentile cutoffs against true deviation values
INPUTS:
- fig_save_dir
- segment_names
- segment_bouts
- segment_bout_lengths
- segment_ibis
- num_null_sets
- null_segment_dev_counts
- null_segment_dev_ibis
- null_segment_dev_bout_len
OUTPUTS:
- Plots
"""
num_segments = len(segment_names)
#Create save directory
if os.path.isdir(dev_save_dir) == False:
os.mkdir(dev_save_dir)
#Go through each segment
for s_i in range(num_segments):
print("\tPlotting null distributions for segment " + segment_names[s_i])
#Create Save Directory
seg_dev_save_dir = dev_save_dir + ('_').join(segment_names[s_i].split(' ')) + '/'
if os.path.isdir(seg_dev_save_dir) == False:
os.mkdir(seg_dev_save_dir)
seg_null_hist_save_dir = seg_dev_save_dir + 'dev_histograms/'
if os.path.isdir(seg_null_hist_save_dir) == False:
os.mkdir(seg_null_hist_save_dir)
#Histogram of bout length
fig_i = plt.figure(figsize=(5,5))
seg_true_dev_bout_lens = segment_bout_lengths[s_i]
seg_null_dev_bout_lens = null_segment_dev_bout_len[s_i]
seg_null_dev_bout_lens_flat = []
for s_n_i in range(len(seg_null_dev_bout_lens)):
seg_null_dev_bout_lens_flat.extend(seg_null_dev_bout_lens[s_n_i])
im_name = (' ').join(segment_names[s_i].split('_'))
plt.subplot(1,2,1)
plt.hist(seg_null_dev_bout_lens_flat,bins=20,alpha=0.5,color='blue',label='Null Data')
plt.axvline(np.mean(seg_true_dev_bout_lens),color='orange',label='Mean of True Data')
plt.legend()
plt.title(im_name + ' Null Distribution')
plt.xlabel('Deviation Bout Length (s)')
plt.ylabel('Number of Instances')
plt.subplot(1,2,2)
plt.hist(seg_true_dev_bout_lens,bins=20,alpha=0.5,color='orange',label='True Data')
plt.axvline(np.mean(seg_true_dev_bout_lens),color='orange',label='Mean of True Data')
plt.legend()
plt.title(im_name + ' True Distribution')
plt.xlabel('Deviation Bout Length (s)')
plt.ylabel('Number of Instances')
plt.title(im_name + ' deviation lengths x null distribution')
plt.tight_layout()
save_name = ('_').join(segment_names[s_i].split(' ')) + '_null_v_true_lens'
fig_i.savefig(seg_null_hist_save_dir + save_name + '.png')
fig_i.savefig(seg_null_hist_save_dir + save_name + '.svg')
plt.close(fig_i)
#Histogram of IBIs
fig_i = plt.figure(figsize=(5,5))
seg_true_dev_ibis = segment_ibis[s_i]
seg_null_dev_bout_ibis = null_segment_dev_ibis[s_i]
seg_null_dev_bout_ibis_flat = []
for s_n_i in range(len(seg_null_dev_bout_ibis)):
seg_null_dev_bout_lens_flat.extend(seg_null_dev_bout_ibis[s_n_i])
im_name = (' ').join(segment_names[s_i].split('_'))
plt.subplot(1,2,1)
plt.hist(seg_null_dev_bout_ibis_flat,bins=20,alpha=0.5,color='blue',label='Null Data')
plt.axvline(np.mean(seg_true_dev_ibis),color='orange',label='Mean of True Data')
plt.legend()
plt.title(im_name + ' Null Distribution')
plt.xlabel('Deviation Inter-Bout-Interals (IBIs) (s)')
plt.ylabel('Number of Instances')
plt.subplot(1,2,2)
plt.hist(seg_true_dev_ibis,bins=20,alpha=0.5,color='orange',label='True Data')
plt.axvline(np.mean(seg_true_dev_ibis),color='orange',label='Mean of True Data')
plt.legend()
plt.title(im_name + ' True Distribution')
plt.xlabel('Deviation Inter-Bout-Interals (IBIs) (s)')
plt.ylabel('Number of Instances')
plt.suptitle(im_name + ' Deviation IBIs x Null Distribution')
plt.tight_layout()
save_name = ('_').join(segment_names[s_i].split(' ')) + '_null_v_true_ibis'
fig_i.savefig(seg_null_hist_save_dir + save_name + '.png')
fig_i.savefig(seg_null_hist_save_dir + save_name + '.svg')
plt.close(fig_i)
#Histogram of deviation counts
fig_i = plt.figure(figsize=(5,5))
seg_true_dev_count = len(segment_bout_lengths[s_i])
seg_null_dev_counts = null_segment_dev_counts[s_i]
im_name = (' ').join(segment_names[s_i].split('_'))
plt.hist(seg_null_dev_counts,bins=20,alpha=0.5,color='blue',label='Null Data')
plt.axvline(seg_true_dev_count,color='orange',label='True Count')
plt.legend()
plt.xlabel('Deviation Counts')
plt.ylabel('Number of Instances')
plt.title(im_name + ' deviation counts x null distribution')
plt.tight_layout()
save_name = ('_').join(segment_names[s_i].split(' ')) + '_null_v_true_counts'
fig_i.savefig(seg_null_hist_save_dir + save_name + '.png')
fig_i.savefig(seg_null_hist_save_dir + save_name + '.svg')
plt.close(fig_i)
#Now compare segment deviations against each other
print("\tPlotting null distributions for all segments combined")
cm_subsection = np.linspace(0,1,num_segments)
cmap = [cm.gist_rainbow(x) for x in cm_subsection] #Color maps for each segment
#Bout lengths
mean_vals = []
fig_lens = plt.figure(figsize=(5,5))
for s_i in range(num_segments):
segment_name = (' ').join(segment_names[s_i].split('_'))
seg_true_dev_bout_lens = segment_bout_lengths[s_i]
mean_true = np.mean(seg_true_dev_bout_lens)
mean_vals.extend([mean_true])
seg_null_dev_bout_lens = null_segment_dev_bout_len[s_i]
seg_null_dev_bout_lens_flat = []
for s_n_i in range(len(seg_null_dev_bout_lens)):
seg_null_dev_bout_lens_flat.extend(seg_null_dev_bout_lens[s_n_i])
plt.hist(seg_null_dev_bout_lens_flat,bins=20,color=cmap[s_i],alpha=0.5,label=segment_name + ' null')
plt.axvline(mean_true,color=cmap[s_i],label=segment_name + ' mean')
plt.xlim((0,max(mean_vals) + 0.25))
plt.legend()
plt.xlabel('Deviation Length (s)')
plt.ylabel('Number of Instances')
plt.title('cross-segment deviation lengths x null distribution')
plt.tight_layout()
save_name ='all_seg_null_v_true_lengths'
fig_lens.savefig(dev_save_dir + save_name + '.png')
fig_lens.savefig(dev_save_dir + save_name + '.svg')
plt.close(fig_lens)
#Bout lengths
mean_vals = []
fig_lens = plt.figure(figsize=(5,5))
for s_i in range(num_segments):
segment_name = (' ').join(segment_names[s_i].split('_'))
seg_true_dev_bout_lens = segment_bout_lengths[s_i]
mean_true = np.mean(seg_true_dev_bout_lens)
mean_vals.extend([mean_true])
plt.hist(seg_true_dev_bout_lens,bins=20,color=cmap[s_i],alpha=0.5,label=segment_name + ' distribution')
plt.axvline(mean_true,color=cmap[s_i],label=segment_name + ' mean')
plt.xlim((0,max(mean_vals) + 0.25))
plt.legend()
plt.xlabel('Deviation Length (s)')
plt.ylabel('Number of Instances')
plt.title('cross-segment deviation lengths')
plt.tight_layout()
save_name ='all_seg_true_lengths'
fig_lens.savefig(dev_save_dir + save_name + '.png')
fig_lens.savefig(dev_save_dir + save_name + '.svg')
plt.close(fig_lens)
#Bout ibis
mean_vals = []
fig_ibis = plt.figure(figsize=(5,5))
for s_i in range(num_segments):
segment_name = (' ').join(segment_names[s_i].split('_'))
seg_true_dev_ibis = segment_ibis[s_i]
mean_true = np.mean(seg_true_dev_ibis)
mean_vals.extend([mean_true])
seg_null_dev_bout_ibis = null_segment_dev_ibis[s_i]
seg_null_dev_bout_ibis_flat = []
for s_n_i in range(len(seg_null_dev_bout_ibis)):
seg_null_dev_bout_lens_flat.extend(seg_null_dev_bout_ibis[s_n_i])
plt.hist(seg_null_dev_bout_ibis_flat,bins=20,color=cmap[s_i],alpha=0.5,label=segment_name + ' null')
plt.axvline(mean_true,color=cmap[s_i],label=segment_name + ' mean')
#plt.xlim((0,max(mean_vals) + 0.25))
plt.legend()
plt.xlabel('Deviation IBI (s)')
plt.ylabel('Number of Instances')
plt.title('cross-segment deviation ibis x null distribution')
plt.tight_layout()
save_name ='all_seg_null_v_true_ibis'
fig_ibis.savefig(dev_save_dir + save_name + '.png')
fig_ibis.savefig(dev_save_dir + save_name + '.svg')
plt.close(fig_ibis)
#Bout ibis
mean_vals = []
fig_ibis = plt.figure(figsize=(5,5))
for s_i in range(num_segments):
segment_name = (' ').join(segment_names[s_i].split('_'))
seg_true_dev_ibis = segment_ibis[s_i]
mean_true = np.mean(seg_true_dev_ibis)
mean_vals.extend([mean_true])
plt.hist(seg_true_dev_ibis,bins=20,color=cmap[s_i],alpha=0.5,label=segment_name + ' distribution')
plt.axvline(mean_true,color=cmap[s_i],label=segment_name + ' mean')
#plt.xlim((0,max(mean_vals) + 0.25))
plt.legend()
plt.xlabel('Deviation IBI (s)')
plt.ylabel('Number of Instances')
plt.title('cross-segment deviation ibis')
plt.tight_layout()
save_name ='all_seg_true_ibis'
fig_ibis.savefig(dev_save_dir + save_name + '.png')
fig_ibis.savefig(dev_save_dir + save_name + '.svg')
plt.close(fig_ibis)
#Bout counts
true_counts = []
fig_counts = plt.figure(figsize=(5,5))
for s_i in range(num_segments):
segment_name = (' ').join(segment_names[s_i].split('_'))
seg_true_dev_count = len(segment_bout_lengths[s_i])
seg_null_dev_counts = null_segment_dev_counts[s_i]
true_counts.extend([seg_true_dev_count])
plt.hist(seg_null_dev_counts,bins=20,color=cmap[s_i],alpha=0.5,label=segment_name + ' null')
plt.axvline(seg_true_dev_count,color=cmap[s_i],label=segment_name + ' true')
plt.legend()
plt.xlabel('Deviation Count')
plt.ylabel('Number of Instances')
plt.title('cross-segment deviation counts x null distribution')
plt.tight_layout()
save_name ='all_seg_null_v_true_counts'
fig_counts.savefig(dev_save_dir + save_name + '.png')
fig_counts.savefig(dev_save_dir + save_name + '.svg')
plt.close(fig_counts)
| hfgem/BlechCodes | functions/dev_plots.py | dev_plots.py | py | 18,054 | python | en | code | 1 | github-code | 13 |
22453074953 | import sqlite3
conexion = sqlite3.connect("ejemplo.db")
cursor = conexion.cursor()
alumnos = [('Derek',21,456789,6), ('Adri',19,563478,10), ('Adri',19,563478,10)]
cursor.executemany("INSERT INTO alumno VALUES(?,?,?,?)", alumnos)
cursor.execute("SELECT * FROM alumno")
alumnitos = cursor.fetchall()
print(alumnitos)
#Cambiando el nombre de la tabla
#renombrar = "ALTER TABLE alumno RENAME TO student"
#cursor.execute(renombrar)
conexion.commit()
conexion.close() | isgla/pythonProteco | Intermedio/Clase4/Clase 4-20190621/base2.py | base2.py | py | 462 | python | es | code | 1 | github-code | 13 |
45194878526 | from math import inf
def min_coin(coins, s):
n = len(coins)
t = [[inf for _ in range(s + 1)] for _ in range(n)]
counter = 1
t[0][0] = 0
for i in range(coins[0], s + 1, coins[0]):
t[0][i] = counter
counter += 1
for i in range(1, n):
t[i][0] = 0
for j in range(s + 1):
if coins[i] > j:
t[i][j] = t[i - 1][j]
else:
t[i][j] = min(t[i - 1][j], t[i][j - coins[i]] + 1)
if t[n - 1][s] == 0:
return 0
for i in t:
print(i)
return t[n - 1][s], get_solution(t, n, s, coins)
def get_solution(t, n, s, coins):
index = s
height = n - 1
solution = []
while index > 0 and height > 0:
if t[height][index] == t[height - 1][index]:
height -= 1
else:
index -= coins[height]
solution.append(coins[height])
if index > 0:
solution.append(coins[0])
return solution
if __name__ == '__main__':
tab = [5, 2, 3, 1, 7]
tab.sort()
print(min_coin(tab, 13)) | kkorta/ASD | DynamicProgramming/min_num_coin_changing.py | min_num_coin_changing.py | py | 1,126 | python | en | code | 0 | github-code | 13 |
5007044983 | import sys
import numpy as np
#import matplotlib as plt
import matplotlib.pyplot as plt
plt.rc("axes", titlesize=14)
plt.rc("axes", labelsize=14)
plt.rc("xtick", labelsize=12)
plt.rc("ytick", labelsize=12)
reduced_filename = sys.argv[1].split("/")[-1][:-4]
def get_comms_time(filename):
datafile = open(filename, 'r')
Lines = datafile.readlines()
Message_Size = []
ISend = []
Poll = []
IRecv = []
for line in Lines[0:-1]:
templine = line.strip().split()
Message_Size.append(int(templine[2]))
ISend.append(float(templine[4]))
Poll.append(float(templine[6]))
IRecv.append(float(templine[8]))
#print(line.strip().split())
#print(Message_Size)
#print(ISend)
#print(Poll)
#print(IRecv)
comms_time = []
for i in range(0, len(ISend)):
comms_time.append(ISend[i]+Poll[i]+IRecv[i])
return comms_time
alltimes = []
for i in range(len(sys.argv) - 1):
filename = sys.argv[1 + i]
alltimes.append(get_comms_time(filename))
print(alltimes)
message_sizes = []
for i in range(0, 23):
message_sizes.append((2**i))
markers = ["x", "^", "o"]
for i in range(len(alltimes)):
plt.semilogx(message_sizes, alltimes[i], marker=markers[i], base=2)
#plt.xscale("log", base=2)
#plt.plot(Message_Size, comms_time, marker="x")
#plt.plot(perc_tasks, peak_time, marker="x")
plt.xlabel("Message Size / B")
plt.ylabel("Time / s")
plt.legend(["2 Host Baseline", "4 Bluefields model", "4 Hosts model"])
plt.tight_layout()
#plt.title("Latency of moving messages of size n back and forth between two worker nodes via two communication nodes pt2pt", wrap=True)
plt.savefig(reduced_filename + ".png")
| JosephMoore25/L3-Project-DPUs | Graphing-Code/graph_latency.py | graph_latency.py | py | 1,709 | python | en | code | 0 | github-code | 13 |
28081563890 | import os
import sys
from os.path import join as opj
import base64
import struct
import copy
import numpy as np
from scipy.spatial.distance import cdist
import cv2
def parse_pt(pt_file):
with open(pt_file) as f:
lines = f.readlines()
img_rects = dict()
for line in lines:
line = line.strip().split(',')
fid, tid = int(float(line[0])), int(float(line[1]))
rect = map(lambda x:int(float(x)), line[2:6])
rect[2] += rect[0]
rect[3] += rect[1]
fea = base64.b64decode(line[-1])
fea = struct.unpack('{}f'.format(len(fea)/4), fea)
if tid not in img_rects:
img_rects[tid] = list()
rect.insert(0, fid)
rect.append(fea)
img_rects[tid].append(rect)
return img_rects
def parse_bias(timestamp_dir, scene_name):
cid_bias = dict()
for sname in scene_name:
with open(opj(timestamp_dir, sname + '.txt')) as f:
lines = f.readlines()
for line in lines:
line = line.strip().split(' ')
cid = int(line[0][2:])
bias = float(line[1])
if cid not in cid_bias: cid_bias[cid] = bias
return cid_bias
def homography(data_dir):
cid_arr = dict()
for cid_name in os.listdir(data_dir):
if '.' in cid_name: continue
cid = int(cid_name[1:])
cal_path = opj(data_dir, cid_name, 'calibration.txt')
with open(cal_path) as f:
lines = f.readlines()
datas = lines[0].strip().split(':')[-1].strip().split(';')
arr = list()
for data in datas:
arr.append(map(float, data.split(' ')))
arr = np.array(arr)
cid_arr[cid] = arr
return cid_arr
if __name__ == '__main__':
data_dir = '../data/feature/'
roi_dir = '../data/roi/'
save_dir = '../data/trajectory/'
scene_name = ['S02', 'S05']
cid_bias = parse_bias('../data/Track1/cam_timestamp', scene_name)
cid_arr = homography('../data/Track1/calibration/')
txt_paths = os.listdir(data_dir)
txt_paths = filter(lambda x: '.txt' in x, sorted(txt_paths, key=lambda x: int(x.split('.')[0][-3:])))
for txt_path in txt_paths:
print('processing {}...'.format(txt_path))
cid = int(txt_path.split('.')[0][-3:])
f_w = open(opj(save_dir, txt_path), 'wb')
cur_bias = cid_bias[cid]
roi = cv2.imread(opj(roi_dir, '{}.jpg'.format(txt_path.split('.')[0])), 0)
img_rects = parse_pt(opj(data_dir, txt_path))
tid_data = dict()
for tid in img_rects:
rects = img_rects[tid]
if len(rects) == 0: continue
tid_data[tid] = [cid]
rects = sorted(rects, key=lambda x: x[0])
if cid != 15:
tid_data[tid] += [cur_bias + rects[0][0] / 10., cur_bias + rects[-1][0] / 10.] # [enter, leave]
else:
tid_data[tid] += [cur_bias + rects[0][0] / 8., cur_bias + rects[-1][0] / 8.] # [enter, leave]
all_fea = np.array([rect[-1] for rect in rects[int(0.3*len(rects)):int(0.7*len(rects)) + 1]])
mean_fea = np.mean(all_fea, axis=0)
tid_data[tid] += mean_fea.tolist()
for tid in tid_data:
data = tid_data[tid]
data.insert(1, tid)
f_w.write(' '.join(map(str, data)) + '\n')
f_w.close()
| he010103/Traffic-Brain | AI-City-MTMC/tools/trajectory_fusion.py | trajectory_fusion.py | py | 3,377 | python | en | code | 15 | github-code | 13 |
43972752156 | import random
import datetime
class Person:
def __init__(self,name,act,det,spe,hp=100,skip=0,sround=0,hit=100,times = 1.0):
self.act = act
self.det = det
self.hp = hp
self.spe =spe
self.name = name
self.skip = skip
self.sround = sround
self.hit = hit
self.times = times
def revage(self,p1):
if p1.name=="幽兰黛尔":
if random.randint(1,100)<=16:
if random.randint(1, 100) <= p1.hit:
self.hp -= 30 - self.det
return 1
else:
return 0
# def Dmg(self):
# pass
def is_dead(self):
return self.hp <= 0
def base_attack(self,person):
return (self.act - person.det)
def attack(self,p1,round):
# print("DMG:",self.Dmg(p1,round),"\nHP:",p1.hp)
if self.skip == 1:
self.skip = 0
else:
p1.hp -= self.times*self.Dmg(p1,round)
def __str__(self):
n = ""
n += self.name+"\n"
n += "攻击力:" + str(self.act) + "\n"
n += "防御力:" + str(self.det) + "\n"
n += "Hp: " + str(self.hp) + "\n"
return n
__repr__ = __str__
class Yil(Person): # 樱莲
def __init__(self):
Person.__init__(self,"樱莲",20,9,18)
def Dmg(self,p1,round):
dmg = 0
if random.randint(1,100)<=30:
self.hp += 25
if self.hp >100 :
self.hp = 100
if round %2 ==0:
if self.revage(p1):
return dmg
if random.randint(1, 100) <= self.hit:
dmg += 25
else:
if random.randint(1, 100) <= self.hit:
dmg += self.base_attack(p1)
return dmg
class Dls(Person): # 德莉莎
def __init__(self):
Person.__init__(self,"德莉莎",19,12,22)
def Dmg(self, p1, round):
dmg = 0
if round%3 ==0 :
if self.revage(p1):
return dmg
for i in range(5):
if random.randint(1, 100) <= self.hit:
dmg += (16-p1.det)
else:
if random.randint(1, 100) <= self.hit:
dmg += self.base_attack(p1)
if random.randint(1,100)<=30:
p1.det -=5
return dmg
class Dya(Person): # 渡鸦
def __init__(self,P1):
Person.__init__(self, "渡鸦", 23, 14, 14)
if P1.name == "琪亚娜":
self.act = 1.25 * self.act
else:
# pass
if random.randint(1, 100) <= 25:
self.act = 1.25 * self.act
def Dmg(self,p1,round):
dmg = 0
if round %3 ==0:
if self.revage(p1):
return dmg
for i in range(7):
if random.randint(1, 100) <= self.hit:
dmg += (16-p1.det)
else:
if random.randint(1, 100) <= self.hit:
dmg += self.base_attack(p1)
# print("渡鸦:",dmg)
return dmg
class Jiz(Person): # 姬子
def __init__(self):
self.muti = ["阿琳姐妹","德莉莎","樱莲"]
Person.__init__(self,"姬子",23,9,12)
def Dmg(self,p1,round):
dmg = 0
if round%2 == 0:
self.act *= 2
self.hit -= 35
if random.randint(1, 100) <= self.hit:
dmg += self.base_attack(p1)
if p1.name in self.muti:
dmg = dmg*2
return dmg
class Yay(Person): # 芽衣
def __init__(self):
Person.__init__(self,"芽衣",22,12,30)
def Dmg(self,p1,round):
dmg = 0
if round %2 == 0 :
if self.revage(p1):
return dmg
for i in range(5):
if random.randint(1, 100) <= self.hit:
dmg += 3
else:
dmg += self.base_attack(p1)
if random.randint(1,100)<=30:
p1.skip = 1
return dmg
class Ali(Person): # 阿琳姐妹
def __init__(self):
Person.__init__(self,"阿琳姐妹",18,10,10)
self.dead_attack = 1
self.speal_attack = 1
def is_dead(self):
if self.hp<=0:
if self.dead_attack == 0:
return True
else:
self.dead_attack = 0
self.hp = 20
return False
def Dmg(self,p1,round):
dmg = 0
if self.dead_attack == 0 and self.speal_attack == 1:
self.speal_attack = 0
if self.revage(p1):
return dmg
if random.randint(1,100) <=50:
if random.randint(1, 100) <= self.hit:
dmg += 233-p1.det
else:
if random.randint(1, 100) <= self.hit:
dmg += 50 -p1.det
else:
if random.randint(1, 100) <= self.hit:
dmg += self.base_attack(p1)
return dmg
class Kna(Person):
def __init__(self):
Person.__init__(self,"琪亚娜",24,11,23)
def Dmg(self,p1,round):
dmg = 0
if round %2 ==0:
if self.revage(p1):
return dmg
if random.randint(1, 100) <= self.hit:
dmg += self.act + p1.det
if random.randint(1,100) <=35:
self.skip = 1
else:
if random.randint(1, 100) <= self.hit:
dmg += self.base_attack(p1)
# print("琪亚娜:",dmg)
return dmg
class Bly(Person): # 布洛妮娅
def __init__(self):
Person.__init__(self,"布洛妮娅",21,10,20)
def Dmg(self,p1,round):
dmg = 0
if round % 3 == 0 :
if self.revage(p1):
return dmg
if random.randint(1, 100) <= self.hit:
dmg += random.randint(1,100)
else:
if random.randint(1, 100) <= self.hit:
dmg += self.base_attack(p1)
if random.randint(1,100) <=25:
for i in range(4):
if random.randint(1, 100) <= self.hit:
dmg += (12-p1.det)
return dmg
class Lit(Person):
def __init__(self):
Person.__init__(self,"丽塔",26,11,17)
self.flag =1
def Dmg(self,p1,round):
dmg = 0
if round %4 == 0:
if self.revage(p1):
return dmg
p1.hp +=4
if p1.hp >100:
p1.hp = 100
p1.sround = 2
if self.flag :
self.flag = 0
p1.times = 0.4
else:
if random.randint(1, 100) <= self.hit:
dmg += self.base_attack(p1)
if random.randint(1,100)<=35:
dmg -= 3
if dmg < 0 :
dmg = 0
p1.act -= 4
if p1.act <0:
p1.act = 0
return dmg
class Xer(Person):
def __init__(self):
Person.__init__(self,"希儿",23,13,26)
self.black_white = 1
def Dmg(self,p1,round):
dmg = 0
if self.black_white:
self.black_white=0
self.act +=10
self.det -=5
else:
self.black_white =1
self.act -=10
self.det +=5
self.hp += random.randint(1,15)
if self.hp >100 :
self.hp = 100
if random.randint(1,100) <= self.hit:
dmg += self.base_attack(p1)
return dmg
class Dae(Person): # 幽兰黛尔
def __init__(self):
Person.__init__(self,"幽兰黛尔",19,10,15)
def Dmg(self,p1,round):
dmg =0
self.act += 3
if random.randint(1,100) <= self.hit:
dmg += self.base_attack(p1)
return dmg
class Fuh(Person): #符华
def __init__(self):
Person.__init__(self,"符华",17,15,16)
def Dmg(self,p1,round):
dmg = 0
if round %3 ==0:
if self.revage(p1):
return dmg
if random.randint(1, 100) <= self.hit:
dmg += 18
p1.hit -= 25
else:
if random.randint(1, 100) <= self.hit:
dmg += self.act
return dmg
def first():
P1 = Dls()
P2 = Dya(P1)
if P1.spe < P2.spe:
P1,P2 = P2,P1
return P1,P2
def fight():
P1 , P2 = first()
round = 0
while True:
round += 1
# P1 先攻
# print (round)
P1.attack(P2,round)
if P1.is_dead():
return 0
if P2.is_dead():
return 1
# P2 后攻
P2.attack(P1,round)
if P1.is_dead():
return 0
if P2.is_dead():
return 1
if __name__ == "__main__" :
# run()
P1,P2 = first()
num = 10000
a= 0
for i in range(num):
a += fight()
print (P1.name , "的胜率是:",a/num)
print (P2.name , "的胜率是:",1-a/num)
| findland/benghuai | simulation.py | simulation.py | py | 8,968 | python | en | code | 0 | github-code | 13 |
10012481416 | ################################################################
#
# File: update_features.py
# Author: Michael Souffront
# Date: 05/22/2018
# Last Modified: 09/12/2018
# Purpose: Update published service with new dissolved FTs
# Requirements: arcpy
#
################################################################
# import modules
import arcpy
import os
from ALFlib.ALFlib import copyFiles
import logging
# create logger function
def init_logger():
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
handler = logging.FileHandler(r'C:\Users\byuhi\Documents\table_update_workflow\workflow.log', 'a', 'utf-16')
formatter = logging.Formatter('%(asctime)s: %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
if __name__ == "__main__":
# initialize logger
init_logger()
logging.debug('Updating features')
# set arcpy env variables
arcpy.gp.logHistory = False
arcpy.env.overwriteOutput = True
source = r'D:\dissolved_features.gdb'
destination = r'C:\Users\byuhi\Documents\ArcGIS\animation_workspace'
# compact gdb
arcpy.Compact_management(source)
# delete old gdb
arcpy.Delete_management(os.path.join(destination, 'dissolved_features.gdb'))
# copy gdb to production
copyFiles(source, destination, exclusions=['*.lock'], overwrite=True)
logging.debug('Finished updating features')
| BYU-Hydroinformatics/Esri_Animation_workflow_py | update_features.py | update_features.py | py | 1,453 | python | en | code | 0 | github-code | 13 |
24479408913 | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
import gym
import numpy as np
from stable_baselines.sac.policies import FeedForwardPolicy
from stable_baselines.common.vec_env import DummyVecEnv
from stable_baselines import SAC
import gym
import simglucose
import warnings
def fxn():
warnings.warn("deprecated", DeprecationWarning)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
fxn()
# Register gym environment. By specifying kwargs,
# you are able to choose which patient to simulate.
# patient_name must be 'adolescent#001' to 'adolescent#010',
# or 'adult#001' to 'adult#010', or 'child#001' to 'child#010'
from gym.envs.registration import register
register(
id='simglucose-adolescent2-v0',
entry_point='simglucose.envs:T1DSimEnv',
kwargs={'patient_name': 'adolescent#002'}
)
env = gym.make('simglucose-adolescent2-v0')
# Custom MLP policy of three layers of size 128 each
class CustomSACPolicy(FeedForwardPolicy):
def __init__(self, *args, **kwargs):
super(CustomSACPolicy, self).__init__(*args, **kwargs,
layers=[128, 128, 128],
layer_norm=False,
feature_extraction="mlp")
# Create and wrap the environment
env = DummyVecEnv([lambda: env])
model = SAC(CustomSACPolicy, env, verbose=1)
# Train the agent
# model.learn(total_timesteps=100000)
# model = SAC.load("sac_simglucose")
# obs = env.reset()
# while True:
# action, _states = model.predict(obs)
# obs, rewards, dones, info = env.step(action)
# del model # remove to demonstrate saving and loading
person_options = (['child#0{}'.format(str(i).zfill(2)) for i in range(1, 11)] +
['adolescent#0{}'.format(str(i).zfill(2)) for i in range(1, 11)] +
['adult#0{}'.format(str(i).zfill(2)) for i in range(1, 11)])
for i,p in enumerate(person_options):
patient_id = p.split('#')[0] + str(i + 1)
register(
id='simglucose-' + patient_id + '-v0',
entry_point='simglucose.envs:T1DSimEnv',
kwargs={'patient_name': p}
)
env = gym.make('simglucose-' + patient_id + '-v0')
print(p, patient_id)
env = DummyVecEnv([lambda: env])
model.learn(total_timesteps=10)
model.save("sac_simglucose")
obs = env.reset()
for i in range(10):
print(obs)
action, _states = model.predict(obs)
print(action, _states)
obs, rewards, dones, info = env.step(action)
print(obs, rewards, dones, info)
# env.render()
# In[ ]:
| Projna42/Pancreas_controller | Projna/using_the_SAC.py | using_the_SAC.py | py | 2,597 | python | en | code | 0 | github-code | 13 |
38740374162 | #coding: utf-8
from django.shortcuts import render, get_object_or_404,redirect
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth.decorators import user_passes_test, login_required, permission_required
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User, Group, Permission
from django.contrib import messages
from django import forms
from django.core.exceptions import ValidationError
from django.conf import settings
from django.contrib.auth import login
from django.contrib.auth.decorators import user_passes_test, login_required, permission_required
from django.forms.models import model_to_dict
from records.order_model import Order, OrderItem, OrderItemForm
from records.transfer_model import OrderCashFlow, IncomeForm, ExpenseForm
def addOrderIncome(request, order_id):
order = get_object_or_404(Order, pk=order_id)
if request.method == 'POST':
form = IncomeForm(request.POST)
if form.is_valid():
income = form.save(commit=False)
income.order = order
income.user = request.user
income.save()
messages.add_message(request, messages.SUCCESS, u'成功添加收入 %s' % income.amount)
return HttpResponseRedirect(reverse('vieworder',
kwargs={'order_id': order.id }))
else:
form = IncomeForm()
return render(request, 'transfer/addorderincome.html',
{'order': order, 'form': form})
def addOrderExpense(request, order_id):
order = get_object_or_404(Order, pk=order_id)
order = get_object_or_404(Order, pk=order_id)
if request.method == 'POST':
form = ExpenseForm(request.POST)
if form.is_valid():
income = form.save(commit=False)
income.amount = income.amount*-1
income.order = order
income.user = request.user
income.save()
messages.add_message(request, messages.SUCCESS, u'成功添加支出 %s' % income.amount)
return HttpResponseRedirect(reverse('vieworder',
kwargs={'order_id': order.id }))
else:
form = ExpenseForm()
return render(request, 'transfer/addorderexpense.html',
{'order': order, 'form': form})
def deleteTransfer(request, transfer_id):
cashFlow = get_object_or_404(OrderCashFlow, pk=transfer_id)
cashFlow.delete()
return HttpResponse(u'成功删除')
| lianhuness/hdcrm | records/transfer_view.py | transfer_view.py | py | 2,231 | python | en | code | 0 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.