seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
13498915187 | import math
#import civq.utils as utils
import sys
import civq.utils as utils
class Encoder:
# Codebook's size
M = 0
# Block's area
L = 0
# Stop condition
epslon = 0
# Codebook
codebook = []
# Regions to associate vector with codebook vectors
regions = {}
indexReconstruct = {}
def __init__(self, m, l, e):
self.M = m
self.L = l
self.epslon = e
self.codebook = []
#print(self.codebook)
def buildInitialCodeBook(self, img):
imgWidth, imgHeight = img.size
initialCodeBook = []
blocks = (imgHeight * imgWidth) // self.L
if blocks >= self.M:
side = int(math.sqrt(self.L))
step = blocks // self.M
counter = 1
for i in range(0, imgHeight // side):
for j in range(0, imgWidth // side):
#print(str(j * width) + " " + str(i * height) + " " + str((j + 1) * width) + " " + str((i + 1) * height))
box = (j * side, i * side, ((j + 1) * side), ((i + 1) * side))
cropped = img.crop(box)
if(not (counter % step)):
initialCodeBook.append(list(cropped.getdata()))
counter += 1
else:
initialCodeBook = self.getUniformInitialCodeBook()
self.codebook = initialCodeBook
def getUniformInitialCodeBook(self):
initialCodeBook = []
product = self.M * self.L
if product <= 256:
step = int((256 / self.M) / self.L)
counter = 1
l = []
for j in range(0, 256, step):
l.append(j)
if(not (counter % self.L)):
initialCodeBook.append(l)
l = []
counter += 1
else:
repetition = product // 256
l = []
counter = 1
for j in range(0, 256):
for k in range(0, repetition):
l.append(j)
#print(l)
#print(counter)
if(not (counter % (self.L // repetition))):
initialCodeBook.append(l)
l = []
counter += 1
return initialCodeBook
def calculateDistortion(self, v1, v2):
size = len(v1)
#print(v1)
#print(v2)
mse = 0
for i in range(0, size):
mse += math.pow(v1[i] - v2[i], 2)
mse /= size
return mse
def findClosest(self, v1):
distortion = sys.float_info.max
index = 0
#print(self.codebook)
counter = 0
for vector in self.codebook:
result = self.calculateDistortion(v1, vector)
if(result < distortion):
distortion = result
index = counter
counter += 1
return index
def allocateRegions(self, vectors):
self.regions = {}
self.indexReconstruct = {}
counter = 0
for vector in vectors:
chosen = self.findClosest(vector)
updatedValue = [vector]
if chosen in self.regions:
for already in self.regions[chosen]:
updatedValue.append(already)
self.regions.update({chosen : updatedValue})
self.indexReconstruct.update({counter : chosen})
counter += 1
def calculateAverageDistortion(self, size):
avg_dist = 0
#print(self.regions)
keys = self.regions.keys()
for index in keys:
values = self.regions[index]
for allocated in values:
avg_dist += self.calculateDistortion(allocated, self.codebook[index])
avg_dist /= size
return avg_dist
def updateCodeBook(self):
for indexKey in self.regions.keys():
values = self.regions[indexKey]
quantity = len(values)
sizeOfEach = len(values[0])
#print(sizeOfEach)
avg = [0] * sizeOfEach
for i in range(0, len(values[0])):
for vector in values:
avg[i] += vector[i]
avg[i] //= quantity
#print(avg)
self.codebook[indexKey] = avg
#print(self.codebook)
def encode(self, img):
#print(self.codebook)
vectors = []
side = int(math.sqrt(self.L))
imgWidth, imgHeight = img.size
## STEP 1 ##
# Generating vectors from image
for i in range(0, imgHeight // side):
for j in range(0, imgWidth // side):
#print(str(j * width) + " " + str(i * height) + " " + str((j + 1) * width) + " " + str((i + 1) * height))
box = (j * side, i * side, ((j + 1) * side), ((i + 1) * side))
cropped = img.crop(box)
vectors.append(list(cropped.getdata()))
# Starting list of distortions for future comparison
distortions = [0]
## STEP 2 ##
# Allocating vector into their regions
self.allocateRegions(vectors)
#print(self.regions)
## STEP 3 ##
# Adding second distortion to start interation
size = len(vectors)
distortions.append(self.calculateAverageDistortion(size))
#print(distortions)
## STEP 4 ##
# Updating codebook until error condition is satisfied
counter = 1
if distortions[counter] != 0.0:
print(((distortions[counter] - distortions[counter - 1]) / distortions[counter]))
while(abs((distortions[counter] - distortions[counter - 1]) / distortions[counter]) > self.epslon):
## STEP 5 ##
self.updateCodeBook()
## BACK TO STEP 2 ##
self.allocateRegions(vectors)
## STEP 3 ##
next = self.calculateAverageDistortion(size)
if next == 0.0:
break
print(next)
distortions.append(next)
print(distortions)
counter += 1
# Making a list from region dictionary
indexList = []
""" for key, value in self.regions.items():
temp = (key,value)
dictList.append(temp) """
for i in range(0, len(vectors)):
indexList.append(self.indexReconstruct[i])
#print(self.regions)
#print(indexList)
# Need to send the codebook!
#print(self.codebook)
return (imgHeight, imgWidth, self.L, self.M - 1, self.codebook, indexList)
| EduardoLR10/imageCompressors | civq/encoder.py | encoder.py | py | 6,772 | python | en | code | 1 | github-code | 13 |
1652918955 | # 这些真的是easy难度吗...
# 一开始想到位运算,但没细想怎么处理进位,然后百度了下得到一个解法 :
class Solution(object):
def getSum(self, a, b):
"""
:type a: int
:type b: int
:rtype: int
"""
while b:
x = a^b
y = (a&b) << 1 #这里注意下位移运算 优先级高于位运算
a = x
b = y
return a
# 测试没问题,但提交又报错了,原因是计算不了-1然后超时报错了
# 再次直接百度python解法。
# 报错原因:由于【Python没有无符号右移操作】,并且当左移操作的结果超过最大整数范围时,会自动将int类型转换为long类型,因此需要对上述代码进行调整。
class Solution(object):
def getSum(self, a, b):
"""
:type a: int
:type b: int
:rtype: int
"""
MAX_INT = 0x7FFFFFFF
MIN_INT = 0x80000000
MASK = 0x100000000
while b:
a, b = (a ^ b) % MASK, ((a & b) << 1) % MASK
return a if a <= MAX_INT else ~((a % MIN_INT) ^ MAX_INT) #负数先与最大正数异或再取反,因为为1的位会进位成0
| fire717/Algorithms | LeetCode/python/_371.SumofTwoIntegers.py | _371.SumofTwoIntegers.py | py | 1,238 | python | zh | code | 6 | github-code | 13 |
34785907190 | import pandas as pd
import matplotlib.pyplot as plt
file_path = './directory.csv'
df = pd.read_csv(file_path)
df = df[df['Country'] == 'US']
data = df.groupby('City')['Brand'].count().sort_values(ascending = False).head(25)
_x = data.index
_y = data.values
plt.figure(figsize = (20, 8), dpi = 80)
plt.bar(range(len(_x)), _y)
plt.xticks(range(len(_x)), _x, rotation = 45)
plt.xlabel('City')
plt.ylabel('Number of Starbucks')
plt.savefig('./chart2.png')
plt.show() | ScarletSmallRed/LeeML | Visualization/Starbucks/chart2.py | chart2.py | py | 468 | python | en | code | 0 | github-code | 13 |
32029283965 | import base
from ..items.node import Category, Node
from ..items.edge import Edge
class EclubsSpider(base.BaseSpider):
name = 'eclubs'
start_urls = [
'http://entrepreneurship.mit.edu/accelerator/demo-day/'
]
def parse(self, response):
hostname = self.extract_hostname(response)
for l in self.le.extract_links(response):
if self.extract_hostname(l) == hostname:
yield response.follow(l.url, self.parse)
else:
yield response.follow(l.url, self.parse_company_site)
def parse_company_site(self, response):
hostname = self.extract_hostname(response)
seen = set()
yield Node(hostname=hostname, category=Category.company)
for l in self.le.extract_links(response):
l_hostname = self.extract_hostname(l)
l_base_hostname = self.extract_base_hostname(l)
if l_hostname == hostname:
self.limited.add(hostname)
yield response.follow(l.url, self.parse_company_site)
elif l_base_hostname not in seen:
seen.add(l_base_hostname)
yield Edge(from_=hostname, to=l_base_hostname, auto=True)
yield response.follow(l.url, self.parse_news_site)
def parse_news_site(self, response):
base_hostname = self.extract_base_hostname(response)
seen = set()
yield Node(hostname=base_hostname, category=Category.news)
for l in self.le.extract_links(response):
l_base_hostname = self.extract_base_hostname(l)
if l_base_hostname not in seen:
seen.add(l_base_hostname)
yield Edge(from_=base_hostname, to=l_base_hostname, auto=False)
| yasyf/vcpr | vcpr/spiders/eclubs.py | eclubs.py | py | 1,579 | python | en | code | 0 | github-code | 13 |
9962084352 | import discord
from discord.ext import commands
import models.functions as func
from models.async_mcrcon import MinecraftClient
class ServerTool(commands.Cog):
"""
ServerTool
"""
def __init__(self, client):
self.client = client
self.minecraftCharArray = ['§0', '§1', '§2', '§3', '§4', '§5', '§6', '§7', '§8', '§9', '§a', '§b', '§c', '§d', '§e', '§f', '§k', '§l', '§m', '§n', '§o', '§r']
self.__rcon = func.getSettings('rconData')
def charFilter(self, string: str, charArray: list) -> str:
for char in charArray:
string = string.replace(char, '')
return string
@commands.command(pass_context=True)
async def online(self, ctx):
"""
!online - узнать количество игроков и кто на сервере
"""
async with MinecraftClient(self.__rcon['address'], self.__rcon['port'], self.__rcon['password']) as mc:
response = await mc.send('list')
response = self.charFilter(response, self.minecraftCharArray + [',', '/'])
lastPlayers = int(response.split()[1])
maxPlayers = int(response.split()[3])
if lastPlayers != 0:
arrayPlayers = response.split(': ')[1].split()
if lastPlayers == 0:
message = "На сервере сейчас никого нету"
elif lastPlayers == 1:
message = "На сервере сейчас только **{}**".format(arrayPlayers[0])
elif lastPlayers > 1:
message = "На сервере сейчас присутствует такие игроки как:"
for player in arrayPlayers:
message += f"\n **`{player}`**"
message += "\nСумарно **{}** игроков из **{}**".format(lastPlayers, maxPlayers)
await ctx.send(message)
@commands.command(aliases=['cmd'])
@commands.has_permissions(administrator = True)
async def command_to_server(self, ctx, *, cmd):
"""
!cmd - отправить команду на сервер
"""
async with MinecraftClient(self.__rcon['address'], self.__rcon['port'], self.__rcon['password']) as mc:
response = await mc.send(cmd)
response = self.charFilter(response, self.minecraftCharArray)
await ctx.send("```" + response + "```")
@commands.command(name='player')
@commands.has_role(func.getSettings('roles_id')['logged_yes'])
async def get_player_info(self, ctx, user):
def get_id(user: str) -> int:
try:
user = user.split('!')
user = user[1].split('>')
return int(user[0])
except:
return 0
cursor = func.cursor_database('users')
if cursor.count_documents({"username": user}):
for data in cursor.find({"username": user}):
await ctx.send(f'Это <@{data["discordID"]}>') # Баланс: **{ i["money"] }**¥
elif cursor.count_documents({"discordID": get_id(user)}):
for data in cursor.find({"discordID": get_id(user)}):
await ctx.send(f'Это **{data["username"]}**') # Баланс: **{ i["money"] }**¥
else:
await ctx.send(f"Товарища **{user}** у нас нету и не было!")
def setup(client):
client.add_cog(ServerTool(client))
| DmytroFrame/dimoxa-bot | extensions/serverTool.py | serverTool.py | py | 3,465 | python | en | code | 0 | github-code | 13 |
10839457252 | from unittest.mock import MagicMock, AsyncMock
from aiogram.types import CallbackQuery, User
fake_event = AsyncMock()
def make_fake_callback(data: str) -> CallbackQuery:
return CallbackQuery(
from_user=User(id=1, is_bot=False, first_name='user'),
id=1,
chat_instance='1',
data=data
)
| DeveloperHackaton2023/tgbot | bot/tests/mocks/message.py | message.py | py | 328 | python | en | code | 0 | github-code | 13 |
23160437348 | # 小明身高1.75,体重80.5kg。请根据BMI公式(体重除以身高的平方)帮小明计算他的BMI指数,并根据BMI指数:
# 低于18.5:过轻
# 18.5-25:正常
# 高于38:严重肥胖
str = input("请输入你的体重: ")
weight = float(str)
if weight < 18.5:
print("你的体重过轻,请加强锻炼")
elif 18.5 <= weight and weight <= 25:
print("你的体重正常")
else:
print("你的体重过于肥胖")
| ywendeng/python_study | If_Else.py | If_Else.py | py | 454 | python | zh | code | 0 | github-code | 13 |
29266963499 | from flask import Flask, render_template, jsonify, request
from predict import predict
from utilities.download_csv_from_current_directory import download_csv_from_current_directory as read_csv
from utilities.find_entries_by_date_station import find_entries_by_date_station
from utilities.label_encoding import label_encoding
import pandas as pd
app = Flask(__name__)
@app.route('/', methods=['GET', 'POST'])
def home():
return render_template('index.html')
@app.route('/getTemperature', methods=['POST'])
def getTemperature():
try:
# Extract JSON data from the request body
request_data = request.get_json()
# Access specific fields from the JSON data
date = request_data.get('date')
station = request_data.get('station')
# print(date)
# print(station)
df = read_csv()
# print("read csv")
# first_row = df['date_str'].iloc[0]
# print(first_row)
# if(first_row == '1977-02-19' and date == '2023-11-23'):
# print(df.head())
entries = find_entries_by_date_station(df, date, station)
print("got dated entries")
print(entries[0].keys())
min_temp = 0
max_temp = 0
for dict in entries:
if dict["station_name"] == 'WISDOM':
min_temp += dict["min_temp"]
max_temp += dict['max_temp']
X_test = pd.DataFrame([0,0,0,0,0,max_temp, min_temp,0,0,0])
# temperature = predict(X_test)
#Return a JSON response with the temperature
temperature = date
return jsonify({"temperature": temperature})
except Exception as e:
return jsonify({"error": str(e)}), 400
if __name__ == '__main__':
app.run(debug=True) | Jayho219/weather-anomalies-dataset | server.py | server.py | py | 1,828 | python | en | code | 0 | github-code | 13 |
5002934854 | import sys
import os
import shutil
import json
from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import Mail
from config import get_config, create_config_file, remove_config_file, get_shell_profile_path, config_file_exists, FILE_PATH
from network_error_handler import remove_offline_store, submit_offline_store, handle_err
commands = {
"python nirvana_in.py --install": "Add 'nin' as a shell command",
"nin --help": "Display a list of all commands",
"nin INBOX_ITEM": "Adds INBOX_ITEM to your Nirvana Inbox",
"nin INBOX_ITEM // NOTE": "Adds an inbox item with a note",
"nin --refresh": "Submits all inbox items that were added offline",
"nin --reset": "Resets data stored by NirvanaIn.py",
"nin --uninstall": "Resets NirvanaIn.py and removes the shell command"
}
FILE_NAME = "nirvana_in.py"
DATA_FILE = FILE_PATH + "/" + ".data"
class InboxService:
def __init__(self, nin_service):
self.nin_service = nin_service
def add_to_inbox(self, task, note, from_offline_store=False):
config = get_config()
api_key, sender_email, inbox_addr = config["api_key"], config["sender_email"], config["inbox_addr"]
save_on_err = not from_offline_store # we only want to sae in offline store if it is not already in there
# sendgrid won't send emails with no content. if note is empty, make it a single space.
note = " " if (note is None or len(note) == 0) else note
message = Mail(
from_email=sender_email,
to_emails=inbox_addr,
subject=task,
plain_text_content=note)
# Terminate if no network connection (and keep task/note in offline store if it isn't already there)
handle_err(task, note, save_offline=save_on_err)
try:
sg = SendGridAPIClient(api_key)
response = sg.send(message)
handle_err(task, note, response.status_code, save_offline=save_on_err) # will terminate if response code is success
self.nin_service.increment_submission_count()
except Exception as e:
print(e)
handle_err(task, note, save_offline=save_on_err, force=True)
if not from_offline_store:
# If this submission is not from the offline store, submit the items remaining in our offline store.
submit_offline_store(self)
class NirvanaInService:
def get_current_path(self):
return os.path.dirname(os.path.abspath(__file__) + "/" + FILE_NAME)
def get_shell_profile_txt(self):
return "alias nin='" + sys.executable + " " + self.get_current_path() + "'"
def reset(self, force=False):
try:
remove_config_file(force)
self.remove_data_file()
shutil.rmtree("__pycache__")
except OSError:
pass
def install_shell_cmd(self):
shell_profile_path = get_shell_profile_path()
with open(shell_profile_path, "a") as f:
f.write("\n" + self.get_shell_profile_txt())
print("'nin' command has been added to your shell.")
def uninstall_shell_cmd(self, uninstalled_msg=True):
def _delete_line(file, prefix):
f = open(file, "r+")
d = f.readlines()
f.seek(0)
for i in d:
if not i.startswith(prefix):
f.write(i)
f.truncate()
f.close()
if config_file_exists():
# if the config file doesn't exist, calling get_shell_profile_path will trigger the setup process.
# don't wanna go thru a setup during unintallation
# so we'll just remove the shell command during installation.
_delete_line(get_shell_profile_path(), "alias nin")
self.reset(force=True)
remove_offline_store()
if uninstalled_msg:
print("NirvanaIn.py uninstalled. Restart your shell.")
def increment_submission_count(self):
data = {
"submission_count": 1
}
if not os.path.isfile(DATA_FILE):
with open(DATA_FILE, "w") as f:
json.dump(data, f)
f.close()
else:
with open(DATA_FILE, "r+") as f:
data_obj = json.load(f)
data_obj["submission_count"] += 1
f.seek(0)
f.truncate()
json.dump(data_obj, f)
f.close()
def remove_data_file(self, force=False):
msg = "WARNING: Continuing will remove data file at " + DATA_FILE + ". Continue? y/n "
continue_reset = "y"
if not force:
continue_reset = input(msg)
if continue_reset == "y" and os.path.isfile(DATA_FILE):
os.remove(DATA_FILE)
if (len(sys.argv) <= 1):
print("usage: nin INBOX_ITEM")
print("Use 'nin --help' for a list of commands.")
exit(1)
else:
nin_service = NirvanaInService()
inbox_service = InboxService(nin_service)
arg = sys.argv[1]
if arg == "--help":
print(json.dumps(commands, indent=4, sort_keys=True))
elif arg == "--reset":
nin_service.reset()
elif arg == "--install":
# uninstall if it already exists
nin_service.uninstall_shell_cmd(uninstalled_msg=False)
print("Starting setup...")
create_config_file()
nin_service.install_shell_cmd()
print("Install completed. Restart your shell.")
exit(0)
elif arg == "--uninstall":
confirm = input("This will remove your data file, api keys, offline store and all other data. Continue? y/n ")
if confirm == "y":
nin_service.uninstall_shell_cmd()
elif arg == "--refresh":
submit_offline_store(inbox_service, True)
elif len(arg) > 2 and arg[0:2] == "--":
print("Invalid 'nin' command. Type 'nin --help' for a list of commands")
else:
task = arg
note = ""
record_as_note = False
for i in range(2, len(sys.argv)):
word = sys.argv[i]
if word == "//":
record_as_note = True
elif record_as_note:
note += " "
note += word
else:
task += " "
task += word
inbox_service.add_to_inbox(task, note)
| tash-had/TerminalTodo | nirvana_in.py | nirvana_in.py | py | 6,370 | python | en | code | 3 | github-code | 13 |
1721118717 | import os
# noinspection PyUnresolvedReferences
from pybgfx import bgfx
from pybgfx.utils.imgui_utils import ImGuiExtra
from pybgfx.constants import (
BGFX_CLEAR_COLOR,
BGFX_CLEAR_DEPTH,
BGFX_DEBUG_TEXT,
BGFX_RESET_VSYNC,
)
from examples.example_window import ExampleWindow
from examples.helloworld import python_image
from examples.utils.imgui_utils import show_example_dialog
class HelloWorld(ExampleWindow):
def __init__(self, width, height, title):
super().__init__(width, height, title)
self.init_conf = bgfx.Init()
self.init_conf.debug = True
self.init_conf.resolution.width = self.width
self.init_conf.resolution.height = self.height
self.init_conf.resolution.reset = BGFX_RESET_VSYNC
def init(self, platform_data):
bgfx.renderFrame()
bgfx.setPlatformData(platform_data)
bgfx.init(self.init_conf)
bgfx.setDebug(BGFX_DEBUG_TEXT)
bgfx.setViewClear(0, BGFX_CLEAR_COLOR | BGFX_CLEAR_DEPTH, 0x443355FF, 1.0, 0)
ImGuiExtra.create()
def shutdown(self):
ImGuiExtra.destroy()
bgfx.shutdown()
def update(self, dt):
mouse_x, mouse_y, buttons_states = self.get_mouse_state()
ImGuiExtra.begin_frame(
int(mouse_x), int(mouse_y), buttons_states, 0, self.width, self.height
)
show_example_dialog()
ImGuiExtra.end_frame()
bgfx.setViewRect(0, 0, 0, self.width, self.height)
bgfx.touch(0)
bgfx.dbgTextClear(0, False)
bgfx.dbgTextImage(
int(max(self.width / 2 / 8, 20)) - 20,
int(max(self.height / 2 / 16, 6)) - 6,
40,
12,
python_image.logo,
160,
)
stats = bgfx.getStats()
bgfx.dbgTextPrintf(
1,
1,
0x0F,
"Color can be changed with ANSI \x1b[9;me\x1b[10;ms\x1b[11;mc\x1b[12;ma\x1b[13;mp\x1b[14;me\x1b[0m code too.",
)
bgfx.dbgTextPrintf(
80,
1,
0x0F,
"\x1b[;0m \x1b[;1m \x1b[; 2m \x1b[; 3m \x1b[; 4m \x1b[; 5m \x1b[; 6m \x1b[; 7m \x1b[0m",
)
bgfx.dbgTextPrintf(
80,
2,
0x0F,
"\x1b[;8m \x1b[;9m \x1b[;10m \x1b[;11m \x1b[;12m \x1b[;13m \x1b[;14m \x1b[;15m \x1b[0m",
)
bgfx.dbgTextPrintf(
1,
2,
0x0F,
f"Backbuffer {stats.width}W x {stats.height}H in pixels, debug text {stats.textWidth}W x {stats.textHeight}H in characters.",
)
bgfx.frame()
def resize(self, width, height):
bgfx.reset(
self.width, self.height, BGFX_RESET_VSYNC, self.init_conf.resolution.format
)
if __name__ == "__main__":
test = HelloWorld(1280, 720, "examples/helloworld")
test.run()
| fbertola/bgfx-python | examples/helloworld/helloworld.py | helloworld.py | py | 2,926 | python | en | code | 117 | github-code | 13 |
19428969076 | import time , RP, math
LCD = RP.LCD_1inch28()
LCD.set_bl_pwm(15535)
cx , cy =120 ,120 #center of watch
def spin( tic , spinLen , color):
now = list(time.localtime())
x = spinLen*math.sin(math.radians(now[tic]*6))
y = spinLen*math.cos(math.radians(now[tic]*6))
LCD.line(cx,cy,int(cx+x),int(cy-y),color)
def hourspin(spinLen , color):
now = list(time.localtime())
if now[3] < 12:hh = now[3]#切換24小時制 --> 12 小時制
else : hh = now[3] - 12
x = spinLen*math.sin(math.radians(hh*30+(now[4]/2))) #hour spin 30˚/h , +0.5˚/min
y = spinLen*math.cos(math.radians(hh*30+(now[4]/2)))
LCD.line(cx,cy,int(cx+x),int(cy-y),color)
def centerCircle(tic , spinLen , color):
now = list(time.localtime())
r = now[tic]*2
for i in range(-r,r,1):
for j in range(-r,r,1):
if i*i + j*j <= r*r:
LCD.pixel(cx+i,cy+j,color)#利用pixel畫的時候,半徑增大、計算時間N^2
def runDotRing(tic , spinLen , color):
r = 10
now = list(time.localtime())
x = int(spinLen*math.sin(math.radians(now[tic]*6)))
y = int(spinLen*math.cos(math.radians(now[tic]*6)))
for i in range(-r,r,1):
for j in range(-r,r,1):
if i*i + j*j <= r*r:
LCD.pixel(cx+x+i,cy-y+j,color)
while 1:
LCD.fill_rect(0,0,240,240,LCD.white)
centerCircle(5,120,0x180f)#同心圓秒針
runDotRing(5,110,LCD.red)#紅點繞行秒針
spin(5,120,LCD.red)#一般秒針
spin(4,100,LCD.black)
hourspin(50 ,LCD.cyan )
LCD.show()
| chyijiunn/picoWatch | 12_centerSec.py | 12_centerSec.py | py | 1,578 | python | en | code | 0 | github-code | 13 |
34897410489 | import unittest
import pandas as pd
from parameterized import parameterized
import nose2
colum_names = ['Destination.IP', 'Timestamp', 'Flow.Duration', 'Flow.Bytes.s', 'Average.Packet.Size', 'ProtocolName']
def bytes_transfered(flow_duration, flow_bytes):
bytes_transfered = flow_duration * flow_bytes
return bytes_transfered
def number_packets(bytes_transfered, packet_size):
number_packets = bytes_transfered / packet_size
return number_packets
class DfTests(unittest.TestCase):
def setUp(self):
try:
data = pd.read_csv("Dataset-Unicauca-Version2-87Atts.csv",
usecols=['Destination.IP', 'Timestamp', 'Flow.Duration', 'Flow.Bytes.s', 'Average.Packet.Size', 'ProtocolName'])
self.fixture = data
except IOError as e:
print(e)
# testing colum names
def test_colnames(self):
self.assertListEqual(list(self.fixture.columns), colum_names)
# testing timestamp format
def test_timestamp_format(self):
ts = self.fixture["Timestamp"]
[self.assertRegex(i, "\d{2}/\d{2}/\d{6}:\d{2}:\d{2}") for i in ts]
# test cases for bytes_transfered function
def test_cases_1():
""" Create test cases in the format of a dataframe. """
df = pd.DataFrame.from_dict({
'test_1': ['negative_int_test' , -2 , 2 , -4 , None ],
'test_2': ['positive_int_test' , 2 , 2 , 4 , None ],
'test_3': ['decimal_test' , .5 , .4 , 0.2 , None ],
'test_4': ['none_type_test' , None , 2 , None , TypeError ],
'test_5': ['string_type_test' , '10' , 1 , None , TypeError ],
},
orient='index'
)
df.columns = ['name','a','b','expected_output', 'expected_error']
# return dataframe as a list of tuples.
return list(df.itertuples(index=False, name=None))
# test cases for number_packets function
def test_cases_2():
""" Create test cases in the format of a dataframe. """
df = pd.DataFrame.from_dict({
'test_1': ['negative_int_test' , -2 , 2 , -1 , None ],
'test_2': ['positive_int_test' , 2 , 2 , 1 , None ],
'test_3': ['decimal_test' , .5 , .4 , 1.25 , None ],
'test_4': ['none_type_test' , None , 2 , None , TypeError ],
'test_5': ['string_type_test' , '10' , 1 , None , TypeError ],
'test_6': ['zero_division_test', 2 , 0 , None , ZeroDivisionError ]
},
orient='index'
)
df.columns = ['name','a','b','expected_output', 'expected_error']
# return dataframe as a list of tuples.
return list(df.itertuples(index=False, name=None))
class TestSuite(unittest.TestCase):
@parameterized.expand(
test_cases_1()
)
# testing bytes_transfered function
def test_bytes_transfered(self, name, a, b, expected_output, expected_error=None):
if expected_error is None:
assert bytes_transfered(a, b) == expected_output
else:
with self.assertRaises(expected_error):
bytes_transfered(a, b)
@parameterized.expand(
test_cases_2()
)
# testing number_packets function
def test_number_packets(self, name, a, b, expected_output, expected_error=None):
if expected_error is None:
assert number_packets(a, b) == expected_output
else:
with self.assertRaises(expected_error):
number_packets(a, b)
if __name__ == '__main__':
nose2.main() | avantgarden/fc_interview | test.py | test.py | py | 3,834 | python | en | code | 0 | github-code | 13 |
15271003703 | #############################
## market visualize seperate sources
############################
import pandas as pd
import matplotlib as plt
import numpy as np
from os import listdir
from cryptocmd import CmcScraper
import pickle
from copy import deepcopy
from string import punctuation
from random import shuffle
import gensim
from gensim.models.word2vec import Word2Vec
LabeledSentence = gensim.models.doc2vec.LabeledSentence
from tqdm import tqdm
tqdm.pandas(desc="progress-bar")
from nltk.tokenize import TweetTokenizer # a tweet tokenizer from nltk.
tokenizer = TweetTokenizer()
import re
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.preprocessing import scale
from keras.models import load_model
import matplotlib.pyplot as plt
def ingest(type = 'main'):
if type == "main":
data = pd.read_csv('../data/trainingandtestdata/tweets.csv',encoding='latin-1',
header = None, names=['Sentiment','ItemID','Date','Blank','SentimentSource','SentimentText'])
data.drop(['ItemID', 'SentimentSource'], axis=1, inplace=True)
data = data[data.Sentiment.isnull() == False]
data['Sentiment'] = data['Sentiment'].map(int)
data['Sentiment'] = data['Sentiment'].map( {4:1, 0:0} )
data = data[data['SentimentText'].isnull() == False]
data.reset_index(inplace=True)
data.drop('index', axis=1, inplace=True)
print('dataset loaded with shape', data.shape)
if type == 'transfer':
data = pd.read_csv('../data/twitter_stock_1.csv')
data = data.sample(frac=1).reset_index(drop=True)
data = data[data.sentiment.isnull() == False]
data['Sentiment'] = data.sentiment.map( {'negative':0, 'positive':1} )
data['SentimentText'] = data.text
data.drop(['created_at','text','sentiment'], axis=1, inplace=True)
data.reset_index(inplace=True)
data.drop('index', axis=1, inplace=True)
print('dataset loaded with shape', data.shape)
if type == 'new':
data = pd.read_csv('../data/tweets_corpus_general.csv', error_bad_lines=False)
data['Sentiment'] = data['target""'].map( {'negative""':0, 'positive""':1} )
data = data[data['Sentiment'].isnull() == False]
return data
def handle_emojis(tweet):
# Smile -- :), : ), :-), (:, ( :, (-:, :')
tweet = re.sub(r'(:\s?\)|:-\)|\(\s?:|\(-:|:\'\))', ' EMO_POS ', tweet)
# Laugh -- :D, : D, :-D, xD, x-D, XD, X-D
tweet = re.sub(r'(:\s?D|:-D|x-?D|X-?D|;D)', ' EMO_POS ', tweet)
# Love -- <3, :*
tweet = re.sub(r'(<3|:\*)', ' EMO_POS ', tweet)
# Wink -- ;-), ;), ;-D, ;D, (;, (-;
tweet = re.sub(r'(;-?\)|;-?D|\(-?;)', ' EMO_POS ', tweet)
# Sad -- :-(, : (, :(, ):, )-:
tweet = re.sub(r'(:\s?\(|:-\(|\)\s?:|\)-:)', ' EMO_NEG ', tweet)
# Cry -- :,(, :'(, :"(
tweet = re.sub(r'(:,\(|:\'\(|:"\()', ' EMO_NEG ', tweet)
return tweet
def tokenize(tweet):
try:
#tweet = unicode(tweet.decode('utf-8').lower())
tweet = handle_emojis(tweet)
tweet = tweet.lower()
tweet = re.sub(r'((www\.[\S]+)|(https?://[\S]+))', ' URL ', tweet)
# Replace @handle with the word USER_MENTION
tweet = re.sub(r'@[\S]+', 'USER_MENTION', tweet)
# Replaces #hashtag with hashtag
tweet = re.sub(r'#(\S+)', r' \1 ', tweet)
tokens = tokenizer.tokenize(tweet)
return tokens
except:
return 'NC'
def postprocess(data, n=1000000):
#data = data.head(n)
data['tokens'] = data['SentimentText'].progress_map(tokenize) ## progress_map is a variant of the map function plus a progress bar. Handy to monitor DataFrame creations.
data = data[data.tokens != 'NC']
data.reset_index(inplace=True)
data.drop('index', inplace=True, axis=1)
return data
def labelizeTweets(tweets, label_type):
labelized = []
for i,v in tqdm(enumerate(tweets)):
label = '%s_%s'%(label_type,i)
labelized.append(LabeledSentence(v, [label]))
return labelized
def buildWordVector(tokens, size, tweet_w2v, tfidf):
vec = np.zeros(size).reshape((1, size))
count = 0.
for word in tokens:
try:
vec += tweet_w2v[word].reshape((1, size)) * tfidf[word]
count += 1.
except KeyError: # handling the case where the token is not
# in the corpus. useful for testing.
continue
if count != 0:
vec /= count
return vec
tweet_w2v = gensim.models.KeyedVectors.load_word2vec_format('../model_saved/GoogleNews-vectors-negative300.bin', binary = True)
onlyfiles = [f for f in listdir('../twitter_stream/seperate')]
def get_market(coin = 'BTC'):
scraper = CmcScraper('BTC', '06-03-2018', '30-05-2018')
headers, data = scraper.get_data()
scraper.export_csv(csv_path='/home/cedric/Documents/UM/Info_mining/twitter_stream/market')
def clean_source(file):
data = pd.read_csv('../twitter_stream/seperate/' + str(file))
data['SentimentText'] = data.text
data['tokens'] = data['SentimentText'].progress_map(tokenize) ## progress_map is a variant of the map function plus a progress bar. Handy to monitor DataFrame creations.
data = data[data.tokens != 'NC']
data.reset_index(inplace=True)
data.drop('index', inplace=True, axis=1)
data.drop('id', inplace=True, axis=1)
dates = data.created_at
## model
data = np.array(data.tokens)
data = labelizeTweets(data, 'TEST')
print('building tf-idf matrix ...')
vectorizer = TfidfVectorizer(analyzer=lambda x: x, min_df=10)
matrix = vectorizer.fit_transform([x.words for x in data])
tfidf = dict(zip(vectorizer.get_feature_names(), vectorizer.idf_))
print('vocab size :', len(tfidf))
import gensim
n_dim = 300
n=1000000
pred_data = np.concatenate([buildWordVector(z, n_dim, tweet_w2v, tfidf) for z in tqdm(map(lambda x: x.words, data))])
pred_data = scale(pred_data)
##################
## MODEL
##
model = load_model('../model_saved/current_transfer.h5')
sentiment = model.predict(pred_data)
df = pd.DataFrame({'date':dates.tolist(),'sentiment':sentiment.ravel().tolist()})
df.date = pd.to_datetime(df.date)
df.index = df.date
df.drop('date',inplace = True, axis = 1)
df = df.sentiment.rolling(25, center = True)
df = pd.DataFrame({'sentiment':df.mean(),'std':df.std()})
#df = df.groupby('sentiment').rolling('12H').mean()
df = df.groupby(pd.TimeGrouper(freq='1D')).mean()
df = df.loc['2018-03-06':]
#df.dropna()
return df
plt.ion()
# These are the "Tableau 20" colors as RGB.
tableau20 = [(31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120),
(44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150),
(148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),
(227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),
(188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]
for i in range(len(tableau20)):
r, g, b = tableau20[i]
tableau20[i] = (r / 255., g / 255., b / 255.)
plot_type =1
if plot_type == 0:
plt.figure(figsize=(12, 14))
ax = plt.subplot(111)
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["left"].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
for i in range(len(onlyfiles)):
df = clean_source(onlyfiles[i])
df = df.fillna(method='ffill')
plt.plot(df.sentiment,color=tableau20[i])
plt.text('2018-05-31', df.sentiment[-1], onlyfiles[i][:-4], fontsize=10, color=tableau20[i])
plt.show()
else:
merged = pd.DataFrame()
for i in range(len(onlyfiles)):
df = clean_source(onlyfiles[i])
df = df.fillna(method='ffill')
merged = pd.concat([merged,df])
merged = merged.groupby(pd.TimeGrouper(freq='1D')).mean()
plt.figure(figsize=(12, 14))
ax = plt.subplot(111)
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["left"].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
plt.ylim(0.54, 0.85)
plt.fill_between(merged.index, merged.sentiment - merged['std'],
merged.sentiment + merged['std'], color="#3F5D7D")
plt.plot(merged.index, merged.sentiment, color="white", lw=2)
prices = pd.read_csv('../twitter_stream/market/this.csv')
prices.Date = pd.to_datetime(prices.Date)
prices.index = prices.Date
prices.drop(['Date','Open*', 'High', 'Low', 'Volume', 'Market Cap'],inplace = True, axis = 1)
prices = prices.Close.rolling(3, center = True)
prices = pd.DataFrame({'Close':prices.mean()})
prices = prices.fillna(method='ffill')
axes2 = plt.twinx()
axes2.bar(prices.index, prices['Close'], color=tableau20[3], label='Bitcoin Closing Price',linewidth=4)
#axes2.set_ylim(-1, 1)
axes2.set_ylabel('USD')
plt.ylim(6000,11000)
plt.legend()
plt.show()
### interpolate nans
### get all sources, average them, get error
### plot each source
### plot all sources and market data
| cedricoeldorf/DeepTransferLearning_BTCPricePrediction | models/seperate_market_visualization.py | seperate_market_visualization.py | py | 9,302 | python | en | code | 1 | github-code | 13 |
3189694533 | import pandas as pd
import numpy as np
import scipy as sc
from scipy import optimize
import cvxpy as cp
def project_first_teams(data, columns, numberOfProjects, maxTeamSize, numberOfChoices, significantCols, isCsv = False):
# Initialization, Reading the survey information
if isCsv: ## Needed for the jupyter notebook
df = pd.read_csv(data)# people, pp-mg-sum21.csv
else:
df = pd.DataFrame(data = data, columns = columns)
n = df.shape[0] # Number of students
m = int(numberOfProjects) # Number of teams
t = int(maxTeamSize) # Maximal Team size
# NOTE: We will number the projects from 1 to numberOfProjects to set up the solver.
# NOTE: To do this, we need to extract all the projects’ names, and assign them to 1-numberOfProjects
dict_init = pd.unique(df.loc[:,significantCols].values.ravel('K')) # Gathering all the different projects’ names
print("Significant Columns:{}".format(significantCols))
dict_init.sort()
print(dict_init)
dictionary = {dict_init[i-1]: i for i in range(1,len(dict_init)+1)}
invert_dic = {i: dict_init[i-1] for i in range(1,len(dict_init)+1)}
#print(dictionary)
#print(invert_dic)
choices_raw = df.loc[:,significantCols]
choices_toNumber = choices_raw.replace(dictionary)
choices = choices_toNumber.to_numpy()
#print(choices)
# OPTIMIZATION
## By default, each project for each student has a value of 100.
## Each of the 3 student’s favorites projects’ values will be set to 1, 2, and 3.
## The decision variables, Xij, are equal to 1 if the student i is assigned to the project j, 0 otherwise.
## CONVENTION: the vectors X (decision variable) & c (weights) dimension is n*m, i.e. numberOfStudents*numberOfProjects
## CONVENTION (2): c = (C11, C12,…C1m, C21,…C2m,…Cnm).
## CONVENTION (3) I.e., the weight of the project j for the student i will be in position m*i+j-1 (starting at 0) in c.
## WEIGHTS
c_choices = np.full((n*m,1),100) # Setting all the weights to 100.
for i in range(choices.shape[0]): # For each student: i = student number from 0 to n-1
for k in range(choices.shape[1]): # For all the possible projects: k = choice number for the student i
c_choices[i*m+int(choices[i,k])-1]=k+1
## CONSTRAINTS
b_eq = np.ones((n,1)) # -> Equality constraints: each student must be assigned to 1 project
b_ub = t*np.ones((m,1)) # -> Inequality constraints: each team must have at most t number of students assigned to it
A_eq = np.zeros((n,n*m)) # -> Initialization
A_ub = np.zeros((m,n*m)) # -> Initialization
for row_eq in range(n):
for i in range(m):
A_eq[row_eq,row_eq*m+i]=1
for row_ub in range(m):
for i in range(n):
A_ub[row_ub,row_ub+i*m]=1
## BOUNDS FOR THE ANSWERS
lb, ub = 0, 1 # Lower and upper bound for the decision variables
## MILP Programming - Using CVXPY
### Documentation: https://www.cvxpy.org/index.html
### Inspiration: https://towardsdatascience.com/integer-programming-in-python-1cbdfa240df2
x = cp.Variable((n*m,1), integer=True) # Defining variables x - integer
### OPTIMIZATION PROBLEM
objective = cp.Minimize(x.T @ c_choices) # Minimize the weights of the allocations.
### CONSTRAINTS
team_size_constraint = A_ub @ x <= b_ub # Each team must have at most t participants
assignments_constraint = A_eq @ x == b_eq # Each participant must be assigned to 1 project
x_lb = 0<=x # Binary var
x_ub = 1>=x # Binary var
constraints = [team_size_constraint, assignments_constraint, x_lb, x_ub] # All constraints
### SOLVER
opti_problem = cp.Problem(objective,constraints)
opti_problem.solve(solver=cp.GLPK_MI)
df['Team'] = 'TBA'
for i in range(n): # for each student
sub_list = x.value[i*m:(i+1)*m]
for j in range(m): # we will search its project
if sub_list[j]>=1:
df.at[i, 'Team'] = invert_dic[j+1]
df = df.sort_values('Team',ascending=True)
### LEGACY - IN CASE CVXPY DOES NOT WORK
## OPTIMIZATION - Objective: min cTx. Cf doc: https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.linprog.html#scipy.optimize.linprog
#res = sc.optimize.linprog(c = c_choices, A_ub=A_ub, b_ub=b_ub, A_eq=A_eq, b_eq=b_eq, bounds=(lb,ub), method='simplex', callback=None, options=None, x0=None)
#x_res = res['x']
#x_res = np.floor(x_res+0.5) # In case results are not integers
#x_res_team = [[] for i in range(m)] # Final result to return: teammates for each project
return df
| jojoqjchen/teamFormation | jojoAttempt2/teamFormationCode/project_first.py | project_first.py | py | 4,652 | python | en | code | 1 | github-code | 13 |
29292900398 | # -*- coding: utf-8 -*-
import sys,json
from datetime import datetime
sys.path.append('/root/Project/Api/Class')
sys.path.append('/root/Project/Api/Service')
sys.path.append('/root/Project/Api/Db')
sys.path.append('/root/Project/Api/Util')
sys.path.append('/root/Project/Api/Constant')
#------------------Local Component-------------------
#--Class--
from Offer import Offer
from OfferStatistic import OfferStatistic
#--Db--
from OfferDatabaseLayer import OfferDatabase
#--Others--
from Util import Util
import Constant,ErrorMessages,SuccessMessages
class OfferService(object):
def __init__(self):
self.offer_db_instance = OfferDatabase()
self.util_instance = Util()
def AddOfferService(self,offer_json):
offer_instance = Offer()
offer_instance.Deserialize(offer_json)
url_flag = self.util_instance.VerifyYoutubeLink(offer_instance.url)
if url_flag['Status'] is not Constant.OK:
return url_flag
offer_instance.SetVariablesWithRules()
result= self.offer_db_instance.AddOfferDb(offer_instance)
return result
def AddLikeOfferService(self,like_json):
result= self.offer_db_instance.AddOfferLikeDb(like_json['offer_id'], like_json['user_id'], self.util_instance.GetDateTimeNow())
return result
def GetOfferGlobalList(self,user_id):
result = self.offer_db_instance.GetOfferGlobalList(user_id)
offer_obj_list = []
offer_list = result['Data']
for index in range(0,len(offer_list)):
offer_obj = self.SetOfferForGlobal(offer_list[index][0], offer_list[index][1], user_id)
offer_obj_list.append(offer_obj)
return({'Status': Constant.OK, 'Data':offer_obj_list, 'Code':'', 'Message':''})
def GetOfferService(self,offer_id):
result = self.offer_db_instance.GetOfferDb(offer_id)
if result['Status'] == Constant.ERROR:
return result
offer_instance = Offer()
offer_instance.SetFromDb(result['Data'])
return({'Status': Constant.OK, 'Data':offer_instance.ToJson(), 'Code':'', 'Message':''})
def UpdateOfferService(self, offer_json):
offer_instance = Offer()
offer_flag = offer_instance.Deserialize(offer_json)
if offer_flag is not Constant.OK:
return offer_flag
offer_instance.SetVariablesWithRules()
result = self.offer_db_instance.UpdateOfferDb(offer_instance)
return result
def DeleteOfferService(self, offer_json):
result = self.offer_db_instance.DeleteOfferDb(offer_json['offer_id'], offer_json['user_id'])
return result
def OfferStatisticService(self, statistic_json):
if statistic_json['statistic_type'] == Constant.VIEWING_STATISTIC_TYPE:
result = self.offer_db_instance.OfferStatisticViewingDb(statistic_json['offer_id'], statistic_json['user_id'], self.util_instance.GetDateTimeNow())
return result
elif statistic_json['statistic_type'] == Constant.SHARE_STATISTIC_TYPE:
result = self.offer_db_instance.OfferStatisticShareDb(statistic_json['offer_id'], statistic_json['user_id'], self.util_instance.GetDateTimeNow())
return result
def OfferGetStatisticService(self, statistic_json):
result = self.offer_db_instance.GetOfferStatisticDb(statistic_json['offer_id'], statistic_json['user_id'])
offer_statistic_instance = OfferStatistic()
offer_statistic_instance.SetFromDb(result['Data'])
return({'Status': Constant.OK, 'Data':offer_statistic_instance.ToJson(), 'Code':'', 'Message':''})
def OfferAgreementCompletedService(self,agreement_json):
result= self.offer_db_instance.AddOfferAgreementDb(agreement_json['offer_id'], agreement_json['user_id'], self.util_instance.GetDateTimeNow())
return result
def SetOfferForProfile(self,offer_list,user_id):
offer_obj_list = []
for index in range(0,len(offer_list)):
offer_instance = Offer()
offer_instance.SetFromDb(offer_list[index])
offer_instance.is_liked= self.offer_db_instance.GetIsLikedOfferDb(offer_instance.offer_id, user_id)
offer_instance.is_agreement= self.offer_db_instance.GetIsAgreementOfferDb(offer_instance.offer_id, user_id)
offer_obj_list.append(offer_instance.ToJson())
return offer_obj_list
def SetOfferForGlobal(self, offer, profile, user_id):
from UserService import UserService
user_service_instance = UserService()
offer_instance = Offer()
offer_instance.SetFromDb(offer)
offer_instance.is_liked= self.offer_db_instance.GetIsLikedOfferDb(offer_instance.offer_id, user_id)
offer_instance.is_agreement= self.offer_db_instance.GetIsAgreementOfferDb(offer_instance.offer_id, user_id)
offer_instance.profile_object = user_service_instance.SetProfileForGlobal(profile)
return offer_instance.ToJson()
def DeleteLikeOfferService(self,like_json):
result = self.offer_db_instance.DeleteOfferLikeDb(like_json['offer_id'], like_json['user_id'])
return result
| Fipek/SocialIcon-Backend | Service/OfferService.py | OfferService.py | py | 5,209 | python | en | code | 0 | github-code | 13 |
26016353966 | from typing import Sequence
import numpy as np
from sklearn.metrics import accuracy_score, confusion_matrix, recall_score
from audeep.backend.data.data_set import DataSet, Split, Partition
from audeep.backend.learners import LearnerBase, PreProcessingWrapper
from audeep.backend.log import LoggingMixin
def uar_score(labels: np.ndarray,
predictions: np.ndarray):
"""
Computes the unweighted average recall for the specified true labels and predictions.
The unweighted average recall is simply the average recall for each class without any weighting.
Parameters
----------
labels: numpy.ndarray
A one-dimensional numpy array containing the true labels of instances
predictions
A one-dimensional numpy array containing the predicted labels of instances
Returns
-------
float
The unweighted average recall for the specified true labels and predictions
"""
return recall_score(labels, predictions, average="macro")
class CrossValidatedEvaluation(LoggingMixin):
"""
Cross-validated evaluation of a learner on some data.
Given a data set and a learner, this class computes cross-validated accuracy and unweighted average recall, and a
95% confidence interval for both values. Additionally, a confusion matrix is summed over all cross-validation folds.
"""
def __init__(self,
learner: LearnerBase,
upsample: bool,
majority_vote: bool):
"""
Creates and initializes a new cross-validated evaluation of the specified learner.
The `run` method has to be invoked on some data before results can be retrieved.
Parameters
----------
learner: LearnerBase
A learner which should be evaluated
upsample: bool
Balance classes in the training splits of each fold by upsampling instances
"""
super().__init__()
self._learner = learner
self._upsample = upsample
self._majority_vote = majority_vote
self._accuracy = None
self._accuracy_confidence_interval = None
self._uar = None
self._uar_confidence_interval = None
self._confusion_matrix = None
@property
def accuracy(self) -> float:
"""
Returns the accuracy of the learner.
This property returns the accuracy on the last data set on which the `run` method has been invoked. As a
consequence, this property returns None if the `run` method has not yet been invoked.
Returns
-------
float
Returns the accuracy of the learner on the last data set passed to the `run` method
"""
return self._accuracy
@property
def accuracy_confidence_interval(self) -> float:
"""
Returns a 95% confidence interval for the accuracy of the learner.
This property returns the confidence interval on the last data set on which the `run` method has been invoked.
As a consequence, this property returns None if the `run` method has not yet been invoked.
Returns
-------
float
Returns a 95% confidence interval for the accuracy of the learner on the last data set passed to the `run`
method
"""
return self._accuracy_confidence_interval
@property
def uar(self) -> float:
"""
Returns the unweighted average recall of the learner.
This property returns the unweighted average recall on the last data set on which the `run` method has been
invoked. As a consequence, this property returns None if the `run` method has not yet been invoked.
Returns
-------
float
Returns the unweighted average recall of the learner on the last data set passed to the `run` method
"""
return self._uar
@property
def uar_confidence_interval(self) -> float:
"""
Returns a 95% confidence interval for the unweighted average recall of the learner.
This property returns the confidence interval on the last data set on which the `run` method has been invoked.
As a consequence, this property returns None if the `run` method has not yet been invoked.
Returns
-------
float
Returns a 95% confidence interval for the unweighted average recall of the learner on the last data set
passed to the `run` method
"""
return self._uar_confidence_interval
@property
def confusion_matrix(self) -> np.ndarray:
"""
Returns the confusion matrix of the learner.
This property returns the confusion matrix on the last data set on which the `run` method has been invoked.
As a consequence, this property returns None if the `run` method has not yet been invoked. The confusion matrix
is computed as the sum of the confusion matrices on the individual cross-validation folds.
Returns
-------
numpy.ndarray
The confusion matrix of the learner on the last data set passed to the `run` method
"""
return self._confusion_matrix
def run(self,
data_set: DataSet):
"""
Evaluates the learner on the specified data set using cross-validation.
Sets the various properties of this instance to the values obtained during evaluation on the specified data set.
Parameters
----------
data_set: DataSet
The data set on which the learner should be evaluated
Raises
------
ValueError
If the specified data set does not have cross-validation information
"""
if not data_set.has_cv_info:
raise ValueError("data set does not have cross validation info")
accuracies = []
uars = []
confusion_matrices = []
# order numeric labels by nominal value
ordered_labels = sorted(data_set.label_map.items(), key=lambda t: t[0])
ordered_labels = list(zip(*ordered_labels))[1]
for fold in range(data_set.num_folds):
self.log.info("processing cross validation fold %d...", fold + 1)
learner_wrapper = PreProcessingWrapper(learner=self._learner,
upsample=self._upsample,
majority_vote=self._majority_vote)
train_split = data_set.split(fold=fold,
split=Split.TRAIN)
valid_split = data_set.split(fold=fold,
split=Split.VALID)
learner_wrapper.fit(train_split)
# IMPORTANT: these methods return maps of filename to label, since order may (or most certainly will) be
# different
predictions = learner_wrapper.predict(valid_split)
true_labels = valid_split.filename_labels_numeric
# sort labels and predictions by filename
predictions = np.array([item[1] for item in sorted(predictions.items(), key=lambda item: item[0])])
true_labels = np.array([item[1] for item in sorted(true_labels.items(), key=lambda item: item[0])])
accuracy = accuracy_score(true_labels, predictions)
uar = uar_score(true_labels, predictions)
accuracies.append(accuracy)
uars.append(uar)
confusion_matrices.append(confusion_matrix(y_true=true_labels,
y_pred=predictions,
labels=ordered_labels))
self.log.info("fold %d accuracy is %2.2f%% (UAR %2.2f%%)", fold + 1, 100 * accuracy, 100 * uar)
self._accuracy = np.mean(accuracies)
self._accuracy_confidence_interval = 2 * np.std(accuracies)
self._uar = np.mean(uars)
self._uar_confidence_interval = 2 * np.std(uars)
self._confusion_matrix = np.sum(confusion_matrices, axis=0)
class PartitionedEvaluation(LoggingMixin):
"""
Partitioned evaluation of a learner on some data set.
The learner is trained on specific partitions of the data set, and evaluated on some other specific partitions of
the data set. Typically, a the learner is tuned by training on the train partition and evaluating on the development
partition of a data set. Once tuning is complete, the learner is trained on the train and development partitions,
and evaluated on the test partition.
"""
def __init__(self,
learner: LearnerBase,
train_partitions: Sequence[Partition],
eval_partitions: Sequence[Partition],
upsample: bool,
majority_vote: bool):
"""
Create a new partitioned evaluation of the specified learner.
Evaluation is performed by training the learner on the specified training partitions, and evaluating the
learner on the specified evaluation partitions.
Parameters
----------
learner: LearnerBase
The learner which should be evaluated
train_partitions: list of Partition
The partitions on which the learner should be trained
eval_partitions: list of Partition
The partitions on which the learner should be evaluted
upsample: bool
Balance classes in the training partitions by upsampling instances
"""
super().__init__()
self._learner = learner
self._train_partitions = train_partitions
self._eval_partitions = eval_partitions
self._upsample = upsample
self._majority_vote = majority_vote
self._accuracy = None
self._uar = None
self._confusion_matrix = None
@property
def accuracy(self) -> float:
"""
Returns the accuracy of the learner.
This property returns the accuracy on the last data set on which the `run` method has been invoked. As a
consequence, this property returns None if the `run` method has not yet been invoked.
Returns
-------
float
Returns the accuracy of the learner on the last data set passed to the `run` method
"""
return self._accuracy
@property
def uar(self) -> float:
"""
Returns the unweighted average recall of the learner.
This property returns the unweighted average recall on the last data set on which the `run` method has been
invoked. As a consequence, this property returns None if the `run` method has not yet been invoked.
Returns
-------
float
Returns the unweighted average recall of the learner on the last data set passed to the `run` method
"""
return self._uar
@property
def confusion_matrix(self) -> np.ndarray:
"""
Returns the confusion matrix of the learner.
This property returns the confusion matrix on the last data set on which the `run` method has been invoked.
As a consequence, this property returns None if the `run` method has not yet been invoked.
Returns
-------
numpy.ndarray
The confusion matrix of the learner on the last data set passed to the `run` method
"""
return self._confusion_matrix
def run(self,
data_set: DataSet):
"""
Evaluates the learner on the specified data set.
Sets the various properties of this instance to the values obtained during evaluation on the specified data set.
Parameters
----------
data_set: DataSet
The data set on which the learner should be evaluated
Raises
------
ValueError
If the specified data set does not have partition information
"""
if not data_set.has_partition_info:
raise ValueError("data set does not have partition info")
self.log.info("training classifier")
learner_wrapper = PreProcessingWrapper(learner=self._learner,
upsample=self._upsample,
majority_vote=self._majority_vote)
train_split = data_set.partitions(self._train_partitions)
eval_split = data_set.partitions(self._eval_partitions)
learner_wrapper.fit(train_split)
# IMPORTANT: these methods return maps of filename to label, since order may (or most certainly will) be
# different
predictions = learner_wrapper.predict(eval_split)
true_labels = eval_split.filename_labels_numeric
# sort labels and predictions by filename
predictions = np.array([item[1] for item in sorted(predictions.items(), key=lambda item: item[0])])
true_labels = np.array([item[1] for item in sorted(true_labels.items(), key=lambda item: item[0])])
self._accuracy = accuracy_score(true_labels, predictions)
self._uar = uar_score(true_labels, predictions)
# order numeric labels by nominal value
ordered_labels = sorted(data_set.label_map.items(), key=lambda t: t[0])
ordered_labels = list(zip(*ordered_labels))[1]
self._confusion_matrix = confusion_matrix(y_true=true_labels,
y_pred=predictions,
labels=ordered_labels)
| auDeep/auDeep | audeep/backend/evaluation.py | evaluation.py | py | 13,821 | python | en | code | 144 | github-code | 13 |
25273268262 | from tkinter import *
def display():
if(x.get() == 1):
print("You agree")
else:
print("You disagree")
window = Tk()
x = IntVar() #inVar() returns a 1 or a 0
check_button = Checkbutton(window,
text="I agree",
variable= x, #tracks the current state of the checkbutton, is an IntVar, and 0 means cleared and 1 means set
onvalue= 1, #used to know if variable is toggled on
offvalue= 0, #used to know if variable is toggled off
command=display)
check_button.pack()
window.mainloop() | Swishxo/VScode | TKinter/checkbox.py | checkbox.py | py | 642 | python | en | code | 0 | github-code | 13 |
25405656718 | from main import *
from skimage import data, color, morphology, img_as_ubyte, measure
from skimage.feature import canny
from skimage.transform import hough_ellipse, hough_circle
from skimage.draw import ellipse_perimeter
import cv2
import matplotlib.pyplot as plt
import numpy as np
from skimage.color import rgb2gray
def GetCircles(image_rgb, maxNoteHeight, drawContours):
# image_rgb = image[0:220, 0:420]
image_gray = rgb2gray(image_rgb)
edges = canny(image_gray, sigma=2.0,
low_threshold=0.25, high_threshold=0.2)
edges = morphology.dilation(edges)
edges = morphology.dilation(edges)
edges = morphology.closing(edges)
#plt.imshow(image_rgb)
contours = measure.find_contours(edges,0)
#plt.imshow(contours)
circles=[]
for n, contour in enumerate(contours):
area = getArea(contour)
plt.plot(contour[:, 1], contour[:, 0], linewidth=2)
if(area < maxNoteHeight**2*1.5 and len(contour[0]) < maxNoteHeight and area > (maxNoteHeight*0.8)**2):
circles.append(contour)
centers =[]
for circle in circles:
sumX, sumY, numPoint = 0,0,0
for point in circle:
sumX = sumX + point[1]
sumY = sumY + point[0]
numPoint = numPoint+1
centers.append([sumX/numPoint, sumY/numPoint])
if(drawContours):
plt.imshow(image_rgb)
for n, circle in enumerate(circles):
plt.plot(circle[:, 1], circle[:, 0], linewidth=2)
plt.show()
return centers
def getArea(contour):
columns = 0;
for row in contour:
if (len(row) > columns):
columns = len(row)
area = columns * len(contour)
return area
| wmatecki97/Python-Notes-Recognition | CirclesDetection.py | CirclesDetection.py | py | 1,712 | python | en | code | 0 | github-code | 13 |
14776934100 | #!/usr/bin/python3
import h5py as h5 # for reading and writing h5 format
import numpy as np # for handling arrays
import os # for directory walking
import subprocess as sp # for executing terminal command from python
"""
This script turns the different outputs from a COMPAS simulation into
a single h5file. If you have a simulation in different subfolders due
to a large run, set the path to the data to the output root directory.
Note that in order to create the data we combine the data from different
folders in a single csv file first. Hence you need enough space to
store a duplicate of the data. The combined csv files will automatically
be removed afterwards.
"""
### User-defined parameters
def setDefaults():
dataRootDir = '.' # Location of root directory of the data # defaults to '.'
prefix = 'BSE_' # Prefix of the data files # defaults to 'BSE_'
delimiter = ',' # Delimeter used in the output csv files # defaults to ','
extension = 'csv' # Extension of the data files # defaults to 'csv'
h5Name = 'COMPAS_Output.h5' # Name of the output h5 file # defaults to 'COMPAS_Output.h5'
# To only combine a subset of the data files, specify them here
filesToCombine = None # default None means to use all of them (apologies if that's counterintuitive...)
#filesToCombine = [\
# 'SystemParameters',\
# 'CommonEnvelopes',\
# 'DoubleCompactObjects',\
# 'Supernovae',\
# 'RLOF',\
# 'errors',\
# 'output'\
#]
return filesToCombine, dataRootDir, prefix, delimiter, extension, h5Name
###############################################################
###############################################################
#
# Changing code below this line is at own risk
#
###############################################################
################################################################
### Global vars
# Number of lines in the data file headers.
# Probably will never change, but
# this avoids having "magic numbers" below
nLinesInHeader = 3
##################################################################
###
### Main Function
###
##################################################################
def main(filesToCombine=None, dataRootDir=None, prefix=None,\
delimiter=None, extension=None, h5Name='COMPAS_output.h5'):
"""
Combines all of the different output files of the same type (e.g RLOF, Supernovae, etc.)
into one CSV file per type, then synthesizes all of the combined CSV files into one
single H5 file, before cleaning up the repository.
"""
### Step 0: create dictionary filesToCombine and Paths
h5GroupDict = createDictionaryGroupPaths(filesToCombine=filesToCombine,\
prefix=prefix, extension=extension)
### Step 1: Check that the rootDataDir exists and correct formatting if necessary
print('Checking if directory and files to combine exist')
dataRootDir = verifyPathsFiles(dataRootDir=dataRootDir, h5GroupDict=h5GroupDict)
### Step 2: Create the combined CSV file for each output type
print('Combining %s files from subdirectories' %(extension))
setOfUsedDatafiles = combineOutputsOfFile(dataRootDir=dataRootDir, h5GroupDict=h5GroupDict,\
delimiter=delimiter)
### Step 3: Create a single H5 file for all the data
print('Creating H5 file %s' %(h5Name))
createH5file(dataRootDir=dataRootDir, h5Name=h5Name, h5GroupDict=h5GroupDict)
### Step 4: Remove the temporary files
print('Cleaning up the combined %s files' %(extension))
cleanUpInAisleNumber2Please(dataRootDir=dataRootDir, h5GroupDict=h5GroupDict)
print()
print('-------------------------')
print('--Overview of your data--')
print('-------------------------')
### Step 5: Print columns in the h5 file
printH5Columns(dataRootDir=dataRootDir, h5Name=h5Name)
### Step 6: Print out which data files were used
printUsedDataFiles(setOfUsedDatafiles)
print('Done, :smiling_imp:\n')
##################################################################
###
### Step 0: Create dictionary which links groupnames and paths
### of the files you which to combine
###
##################################################################
def createDictionaryGroupPaths(filesToCombine = None, prefix=None, \
extension=None):
# The current groups we offer are
optionsDict = {
'CommonEnvelopes' : str(prefix) + 'Common_Envelopes.' + str(extension),\
'DoubleCompactObjects' : str(prefix) + 'Double_Compact_Objects.' + str(extension),\
'Supernovae' : str(prefix) + 'Supernovae.' + str(extension),\
'SystemParameters' : str(prefix) + 'System_Parameters.' + str(extension),\
'RLOF' : str(prefix) + 'RLOF.' + str(extension),\
'errors' : str(prefix) + 'errorfile.' + str(extension),\
'output' : str(prefix) + 'output.' + str(extension)\
}
# Create empty dictionary
h5GroupDict = {}
# Fill it in with only the files you want
if filesToCombine == None: # For default setting None, use all of the options
h5GroupDict = optionsDict
else: # If a subset of the data files is specified, use that
for f in filesToCombine:
if f in optionsDict.keys():
h5GroupDict[f] = optionsDict[f]
else:
raise ValueError("%s is not a group that exists. \n\
Currently we include %s "%(f, optionsDict.keys()))
return h5GroupDict
##################################################################
###
### Step 1: Check that the rootDataDir exists
### and correct formatting if necessary
###
##################################################################
def verifyPathsFiles(dataRootDir=None, h5GroupDict=None):
# Ensure the root directory path string ends with a '/'
if dataRootDir[-1] != "/":
dataRootDir = dataRootDir + "/"
# Throw an error if root directory does not exist
if not os.path.isdir(dataRootDir):
raise ValueError("directory not found with path: %s"%(dataRootDir))
# Return updated dataRootDir
return dataRootDir
##################################################################
###
### Step 2: Create the combined CSV file for each output type
###
##################################################################
def combineOutputsOfFile(dataRootDir=None, h5GroupDict = None, delimiter=None):
"""
COMPAS has a strict header format which is assumed here
1st line = type data (INT, FLOAT BOOL etc)
2nd line = unit data (Msol, ergs,Tsol etc)
3th line = column name
4th ++++ = data
This function identifies which categories of output file
were produced and need to be included in the H5 file. Then
it collects all the data for a given category and puts it
in a CSV file with the correct header.
"""
# Keep track of which data files are picked up by the walker
setOfUsedDatafiles = set() # Set of filenames found in output
for compasDataFilename in h5GroupDict.values():
################################################################
# Initialize variables for safety checks
isHeaderWritten = False # Boolean to ensure header is
# only written once
nColumnCheck = None # Check that column numbers are
# consistent for each category
############################################################################
# Iterate through each subdirectory to find all the relevant output files
# if none exists, no output file will be produced
for root,dirs,files in os.walk(dataRootDir):
for f in files:
if f == compasDataFilename:
# Add to set of discovered datafiles
setOfUsedDatafiles.add(f)
# Open the file
path = os.path.join(root, f)
compasData = open(path)
#######################################################################
# For the first processed output file, include the header as well
if not isHeaderWritten:
# Only write the header once
isHeaderWritten = True
# Create the empty combine file now, so that data can be appended later
with open(dataRootDir+'Combine_'+compasDataFilename, 'w') as combineWrite:
pass
######################################################
# Read in the appropriate number of header lines
for i in range(nLinesInHeader):
line = compasData.readline()
# Verify that the number of columns is consistent across rows
nCols = len(line.split(delimiter))
if i == 0: # first row - set the required column number
nColumnCheck = nCols
else: # later rows - verify the column number
if nCols != nColumnCheck:
raise ValueError('wrong number of columns in header=%s'%(i))
# Clean up the lines, and write to the output
line = line.replace(" ", "") # remove whitespace
line = line.replace(delimiter, "\t") # swap input delimiter with a tab (to simplify writing to h5 later)
# Write the header to file
with open(dataRootDir + 'Combine_' + compasDataFilename, 'a') as combineWrite:
combineWrite.write(line)
else:
# For later output files, skip the header by reading and not doing anything
[compasData.readline() for i in range(nLinesInHeader)]
###################################################
# Process the non-header lines of each file
for line in compasData:
## Verify that the number of columns is consistent across rows
nCols = len(line.split(delimiter))
if nCols != nColumnCheck:
raise ValueError('wrong number of columns in data')
# Clean up the lines, and write to the output
line = line.replace(" ", "") # remove whitespace # Is this necessary? Coen didn't have this...
line = line.replace(delimiter, "\t") # swap input delimiter with a tab (to simplify writing to h5 later)
with open(dataRootDir + 'Combine_' + compasDataFilename, 'a') as combineWrite:
combineWrite.write(line)
# Return the set of all used datafiles to be printed at the end
return setOfUsedDatafiles
##################################################################
###
### Step 3: Create a single H5 file for all the data
###
##################################################################
def createH5file(dataRootDir=None, h5GroupDict=None, h5Name='COMPAS_output.h5'):
"""
Function to create the h5 file, extract the details of the
Combine_ files, and call the functions to fill the h5 with
headers and data.
"""
hf = h5.File(dataRootDir + h5Name, 'w')
# Use the h5GroupDict dictionary to find the relevant Combine file for a given group, then fill the h5 group with the header and data
for group in h5GroupDict.keys():
combineFilePath = dataRootDir + 'Combine_' + h5GroupDict[group]
# If combine file does not exist, skip it
# This happens if a category of output (e.g RLOF) does not occur in
# any systems of a given run
if not os.path.isfile(combineFilePath):
continue
# Create h5 group for the given category, and enter in the header and data
hf.create_group(group)
addHdf5HeadersAndAttributes(hf, group, combineFilePath)
addHdf5Data(hf, group, combineFilePath)
hf.close()
##################################################################
def addHdf5HeadersAndAttributes(hf, group, filePath):
"""
COMPAS has a strict header format which is assumed here
1st line = data type (INT, FLOAT, BOOL, etc)
2nd line = parameter unit (Msol, ergs, Tsol, etc)
3th line = parameter name
"""
# Extract header information
with open(filePath, 'r') as fileRead:
types = fileRead.readline()[:-1].split('\t')
units = fileRead.readline()[:-1].split('\t')
params = fileRead.readline()[:-1].split('\t')
# Extract number of systems in file (total file length - header)
fileLength = int(sp.check_output('wc -l ' + filePath, shell=True).split()[0]) - nLinesInHeader
# Need to replace dataType, which is a string, by actual type for h5
dtypes = []
for iType, dataType in enumerate(types):
if dataType == 'INT':
dtypes.append(np.int64)
elif dataType == 'FLOAT':
dtypes.append(np.float64)
elif dataType == 'BOOL':
dtypes.append(bool)
else:
raise ValueError("Unrecognised datatype dataType=%s - for column %s in file%s "\
%(dataType, params[iType], group))
# Create the groups in the h5file and add units
for param, dtype, unit in zip(params, dtypes, units):
dset = hf[group].create_dataset(param, dtype=dtype, shape=(fileLength,))
dset.attrs['units'] = unit
return
##################################################################
def addHdf5Data(hf, group, filePath):
"""
Function to append data from Combine_ files
into the h5 file
"""
# Too slow to go line by line, so load in a modest (in terms of memory) amount at a time
chunkSize = 500000 # Binary systems
#get the length of the file (minus headers)
fileLength = int(sp.check_output('wc -l ' + filePath, shell=True).split()[0]) - nLinesInHeader
# Open the file so that lines can be read in succession
with open(filePath, 'r') as fileRead:
# Read the header lines first so they don't interfere with the data
types = fileRead.readline()[:-1].split('\t')
units = fileRead.readline()[:-1].split('\t')
params = fileRead.readline()[:-1].split('\t')
# Initialize parameters
h5group = hf[group]
chunkBegin = 0
chunkEnd = 0
# Loop over the file in chunkSize'd chunks
while chunkEnd < fileLength:
data = []
chunkEnd = chunkBegin + chunkSize
# Don't try to load in more data than you've got
if chunkEnd > fileLength:
chunkEnd = fileLength
# Read in a modest number of lines
for i in range(chunkEnd-chunkBegin):
data.append(fileRead.readline()[:-1].split())
data = np.array(data) # data is now a 2d array where each column is a specific variable
# Add the data array into the h5 file
for iParam, param in enumerate(params):
dtype = type(h5group[param][0])
h5group[param][chunkBegin:chunkEnd] = np.array(data[:,iParam],dtype=dtype)
# Leapfrog to the next chunk location
chunkBegin = chunkEnd
##################################################################
###
### Step 4: Remove the temporary files
###
##################################################################
def cleanUpInAisleNumber2Please(dataRootDir='./',h5GroupDict=None):
"""
Function to remove the temporary Combine_* files
"""
for group in h5GroupDict.keys():
combineFilePath = dataRootDir + 'Combine_' + h5GroupDict[group]
# If combine file does not exist, skip it
if not os.path.isfile(combineFilePath):
continue
shellCommand = 'rm ' + combineFilePath
sp.Popen(shellCommand, shell=True, executable='/bin/bash')
##################################################################
###
### Step 5: Print columns in the h5 file
###
##################################################################
def printH5Columns(dataRootDir='./', h5Name="COMPAS_output.h5"):
"""
Function to print all files with their column names/length/unit
Returns nothing, just prints to cell/terminal
Most of the function is for nice spacing
"""
# Check that path is correct
h5data = dataRootDir + h5Name
if not os.path.isfile(h5data):
raise ValueError("h5 file not found. Wrong path given?")
else:
Data = h5.File(h5data)
Files = Data.keys()
for File in Files:
print('Filename = %s' %(File))
print('----------------------')
# Note: X*' ' means X spaces in line
print('\t column name%sunit%slength'%(29*' ',16*' '))
print('\t '+'-----------------'*4)
# In this file give me all the column names
columns = Data[File].keys()
# For every column in the columns
for nrc, column in enumerate(columns):
# Always want the column name printed in 40 char
spaces = ' '*(40 - len(column))
length = Data[File][column].shape[0]
# Always want the unit name printed over 20 chars
unit = Data[File][column].attrs['units']
spaces2 = ' '*(20 - len(unit))
#--
length = Data[File][column].shape[0]
print('\t %s%s%s%s%s'%(column,spaces, unit,spaces2, length))
# Every 4 lines print a dashed line to read output easier
if (nrc%5==4):
print('\t '+'-----------------'*4)
Data.close()
##################################################################
###
### Step 6: Print out which data files were used
###
##################################################################
def printUsedDataFiles(setOfUsedDatafiles={}):
"""
Last step: print out the set of all data files
which were included in the H5. Explicitly, this is the
intersection of the set of desired data files specified
by the user and the set of outputted data files from
COMPAS (since small runs may not produce all the desired
output files)
"""
print("\n###########################################################################\n")
print("\tThe COMPAS datafiles combined into this HDF5 file are:\n")
[print("\t" + str(datafile)) for datafile in setOfUsedDatafiles]
print("\n###########################################################################\n")
##################################################################
###
### With all functions defined, now run the whole script
###
##################################################################
if __name__ == "__main__":
# If you run this script from a terminal
# Use the global parameters defined at the top
# Otherwise call it from your own script with the settings there
# Only pull the default settings above if this script is run from the command line
filesToCombine, dataRootDir, prefix, delimiter, extension, h5Name = setDefaults()
# Run the script above with the settings defined at the top
main(filesToCombine=filesToCombine, dataRootDir=dataRootDir, \
prefix=prefix, delimiter=delimiter, extension=extension, \
h5Name=h5Name)
| SimonStevenson/COMPAS | defaults/postProcessingDefault.py | postProcessingDefault.py | py | 20,590 | python | en | code | null | github-code | 13 |
473943256 | #write a program that asks for a letter, then prints if it`s a vowel or consonant
while True:
letter = input('Enter a letter: ')
if not letter.isalpha():
print('Enter only letters')
continue
else:
break
vowels = ['a','e','i','o','u']
if letter in vowels:
print('The letter %s is a vowel.' % (letter))
else:
print('The letter %s is a consonant.' % (letter)) | Ian-Lohan/Python | List/22.py | 22.py | py | 412 | python | en | code | 0 | github-code | 13 |
341536071 | from flask import Blueprint, render_template, request
import json
import random
from flaskr import puzzle as master_puzzle
random_puzzle_bp = Blueprint('random_puzzle_bp', __name__,
static_folder = 'static', static_url_path = 'static',
template_folder = 'templates')
@random_puzzle_bp.route('/')
def start_rp():
return render_template('random_puzzle.html')
@random_puzzle_bp.route('/make-random-numlist', methods=['GET', 'POST'])
def make_random_numlist():
receive_data = json.loads(request.data.decode('utf-8'))
print('Receive request make random list', receive_data)
N = int(receive_data['N'])
numlist = [i+1 for i in range(0, N*N)]
numlist[-1] = 0
# random.shuffle(numlist) # turn on after dev
sendDataRaw = {
'numlist': numlist
}
sendData = json.dumps(sendDataRaw)
return sendData
@random_puzzle_bp.route('/verify-random-answer', methods=['GET', 'POST'])
def verify_random_answer():
receive_data = json.loads(request.data.decode('utf-8'))
print('Receive request verify puzzle answer', receive_data)
answer = receive_data['answer']
verify_result = master_puzzle.verify_puzzle(answer)
sendDataRaw = {
'verification_result': verify_result
}
sendData = json.dumps(sendDataRaw)
return sendDataRaw | aegolix/puzzle-slider-web | source/flaskr/random_puzzle/random_puzzle.py | random_puzzle.py | py | 1,359 | python | en | code | 0 | github-code | 13 |
29076721134 | import time
from pyVmomi import vim
from cloudshell.cp.vcenter.exceptions.task_waiter import TaskFaultException
class SynchronousTaskWaiter(object):
def __init__(self):
pass
# noinspection PyMethodMayBeStatic
def wait_for_task(self, task, logger, action_name='job', hide_result=False):
"""
Waits and provides updates on a vSphere task
:param task:
:param action_name:
:param hide_result:
:param logger:
"""
while task.info.state in [vim.TaskInfo.State.running, vim.TaskInfo.State.queued]:
time.sleep(2)
if task.info.state == vim.TaskInfo.State.success:
if task.info.result is not None and not hide_result:
out = '%s completed successfully, result: %s' % (action_name, task.info.result)
logger.info(out)
else:
out = '%s completed successfully.' % action_name
logger.info(out)
else: # error state
multi_msg = ''
if task.info.error.faultMessage:
multi_msg = ', '.join([err.message for err in task.info.error.faultMessage])
elif task.info.error.msg:
multi_msg = task.info.error.msg
logger.info("task execution failed due to: {}".format(multi_msg))
logger.info("task info dump: {0}".format(task.info))
raise TaskFaultException(multi_msg)
return task.info.result
| AdamSharon/vCenterShell | package/cloudshell/cp/vcenter/common/vcenter/task_waiter.py | task_waiter.py | py | 1,492 | python | en | code | null | github-code | 13 |
21025717908 | from pythonosc.udp_client import SimpleUDPClient
from pythonosc.dispatcher import Dispatcher
from pythonosc import osc_server
import time
import torch
from torch_models import LSTMMemory
from collections import deque
from typing import List, Any, Union
import random
import numpy as np
from visual.live_plot import LiveMultiPlot
DEBUG = True
def debug(msg: str):
if DEBUG:
print(msg)
IP = "127.0.0.1"
PORT = 9004
OUTPORT = 9006
class LiveFilter:
def __init__(self, kernel_size: int):
self.past_inputs = deque()
self.kernel_size = kernel_size
def input(self, input: float) -> float:
self.past_inputs.append(input)
while len(self.past_inputs) > self.kernel_size:
self.past_inputs.popleft()
return sum(self.past_inputs) / len(self.past_inputs)
class Controller:
def __init__(self, model_names, input_names, filters, output_names, visual=False):
self.dispatcher = Dispatcher()
self.client = SimpleUDPClient(IP, OUTPORT)
self.server = osc_server.ThreadingOSCUDPServer((IP, PORT), self.dispatcher)
self.plot = None
self.visual = visual
if visual:
rows = 2
all_inputs = set()
for inputs in input_names:
all_inputs.update(inputs)
all_inputs = list(all_inputs)
cols = max(len(all_inputs), len(output_names))
for i, input in enumerate(all_inputs):
self.dispatcher.map(input, self.handle_plot, i)
self.plot = LiveMultiPlot(rows, cols, 10, [-4, 4], [all_inputs, output_names])
self.model_wrappers = []
for i in range(len(model_names)):
model_wrap = ModelWrapper(i, model_names[i], input_names[i], filters[i], output_names[i], self.dispatcher, self.client, self.plot)
self.model_wrappers.append(model_wrap)
def handle_plot(self, address, index, *data):
debug(f"Handling plot for {address} at index {index} with data {data}")
self.plot.update(0, index[0], data[0])
def run(self):
debug("RUNNING SERVER")
self.server.serve_forever()
class ModelWrapper:
def __init__(self, index: int, name: str, input_names: List[str], filters: List[Union[LiveFilter, None]],
output_name: str, dispatcher: Dispatcher, client: SimpleUDPClient, plot: Union[LiveMultiPlot, None]):
self.index = index
self.plot = plot
self.model: LSTMMemory = torch.load("models/" + name)
self.input_names = input_names
self.output_name = output_name
self.stored_vals = {}
self.client = client
self.filters = {}
debug(f"Creating handler for inputs {self.input_names} and outputs {self.output_name} with model {name} and filter {filter}")
for i, input in enumerate(input_names):
if filters[i]:
self.filters[input] = filters[i]
dispatcher.map(input, self.handle)
def handle(self, address: str, *args: List[Any]):
inp = args[0]
if address in self.filters:
inp = self.filters[address].input(inp)
debug(f"Received {address} : {inp} for handler for inputs {self.input_names} and outputs {self.output_name}")
self.stored_vals[address] = inp
self.check_vals_full()
def check_vals_full(self):
'''If all values are populated, we send out an OSC response'''
if set(self.input_names) == set(self.stored_vals.keys()):
inp_list = [self.stored_vals[inp] for inp in self.input_names]
out = self.model.forward_live(np.array(inp_list).reshape(1, len(self.input_names))).item()
if self.plot:
self.plot.update(1, self.index, out)
self.plot.draw_all()
self.client.send_message(self.output_name, out * 3)
self.stored_vals.clear()
debug(f"Values populated, sending output for {self.output_name}")
| trian-gles/aloof-machine | live_unit.py | live_unit.py | py | 4,051 | python | en | code | 0 | github-code | 13 |
32276734373 | # exercise 95: Capitalize It
def capitalize(s):
li = list(s)
li[0] = li[0].upper()
c = 1
for c in range(1, len(li)):
if li[c] == 'i' and li[c - 1] == ' ' and li[c + 1] == ' ':
li[c] = li[c].upper()
if li[c] == '.' or li[c] == '!' or li[c] == '?':
if c + 2 < len(li):
li[c + 2] = li[c + 2].upper()
res = ''
for el in li:
res += el
return res
def main():
string = input('please enter: ')
print(capitalize(string))
if __name__ == '__main__':
main()
| sara-kassani/1000_Python_example | books/Python Workbook/functions/ex95.py | ex95.py | py | 555 | python | en | code | 1 | github-code | 13 |
41243246814 | # open() is not supported in Online Python Tutor,
# so use io.StringIO to simulate a file (in Python 3)
import io
# create a multi-line string and pass it into StringIO
Code = io.StringIO('''Cnmzkc Ingm Sqtlo hr sgd 45sg zmc btqqdms Oqdrhcdms ne sgd Tmhsdc Rszsdr, hm neehbd rhmbd Izmtzqx 20, 2017.
Adenqd dmsdqhmf onkhshbr, gd vzr z atrhmdrrlzm zmc sdkduhrhnm odqrnmzkhsx.'''
)
# now work with f as though it were an opened file
for line in Code:
for word in line.strip():
for letter in word:
if letter.isalpha:
N=ord("letter")
O=chr(N+1)
out_file.write(O)
elif letter.isdigit:
out_file.write(letter)
| gsakkas/seq2parse | src/tests/parsing_test_17.py | parsing_test_17.py | py | 737 | python | en | code | 8 | github-code | 13 |
42159003510 | import re
import time
from threading import Thread
from PyQt5.QtGui import QPixmap, QIcon
from PyQt5.QtWidgets import QMainWindow, QApplication, QLineEdit, QPushButton, QListWidget, QLabel, QMessageBox, \
QProgressBar
from PyQt5 import uic
import sys
import asyncio
from download_movie import MovieDownloader
from fetch_movie import MovieDetails, MovieFetcherThread
import json
import os
import requests
class MainUI(QMainWindow):
def __init__(self):
super(MainUI, self).__init__()
self.movie_fetcher_thread = None
self.image_url = None
self.selected_movie_title = None
self.selected_episode_id = None
self.selected_episode = None
self.movies_episodes = []
self.selected_movie_id = None
self.selected_movie = None
self.movie_downloader = None
self.movie_fetcher = None
self.movies = None
self.query = None
uic.loadUi("kdrama.ui", self)
self.setWindowIcon(QIcon("logo.png"))
self.show()
self.query_input = self.findChild(QLineEdit, "query_input")
self.search_btn = self.findChild(QPushButton, "search_btn")
self.download_btn = self.findChild(QPushButton, "download_btn")
self.movie_list_widget = self.findChild(QListWidget, "movie_list")
self.episode_list_widget = self.findChild(QListWidget, "episode_list")
self.movie_image = self.findChild(QLabel, "movie_image")
self.progress_bar = self.findChild(QProgressBar, "progress_bar")
self.download_label = self.findChild(QLabel, "download_label")
self.search_btn.clicked.connect(self.start_movie_fetcher_thread)
self.query_input.returnPressed.connect(self.start_movie_fetcher_thread)
self.download_btn.clicked.connect(self.download_selected_episode)
self.movie_list_widget.itemSelectionChanged.connect(self.get_movie_index)
self.episode_list_widget.itemSelectionChanged.connect(self.get_selected_episode)
def start_movie_fetcher_thread(self):
self.query = self.query_input.text()
if self.query:
self.query_input.setReadOnly(True)
self.search_btn.setEnabled(False)
self.movie_fetcher_thread = MovieFetcherThread(self.query)
self.movie_fetcher_thread.movie_object_signal.connect(self.display_movies)
self.movie_fetcher_thread.start()
else:
self.show_popup_error("Search Error", "Search query is empty")
def display_movies(self, movies):
self.movies = movies
self.clear_movie_list()
if self.movies:
for movie in self.movies:
self.movie_list_widget.addItem(movie['title'])
self.search_btn.setEnabled(True)
self.query_input.setReadOnly(False)
def clear_movie_list(self):
self.movie_list_widget.clear()
def get_movie_index(self):
currentRow = self.movie_list_widget.currentRow()
if currentRow is not None:
self.selected_movie = self.movies[currentRow]
self.selected_movie_id = self.selected_movie['id']
title = self.selected_movie['title']
self.image_url = self.selected_movie['image']
self.display_episodes()
self.set_image()
self.clean_title(title)
else:
self.show_popup_error("Search result", "No Search Result")
def set_image(self):
if self.image_url:
response = requests.get(self.image_url)
pixmap = QPixmap()
pixmap.loadFromData(response.content)
self.movie_image.setScaledContents(True)
self.movie_image.setPixmap(pixmap)
else:
self.movie_image.clear()
def display_episodes(self):
self.movie_fetcher = MovieDetails()
self.episode_list_widget.clear()
self.selected_episode_id = None
self.movies_episodes = self.movie_fetcher.fetch_selected_object_detail(self.selected_movie_id)
for episode in self.movies_episodes:
self.episode_list_widget.addItem(episode['id'])
def get_selected_episode(self):
selected_item = self.episode_list_widget.currentItem()
if selected_item is not None:
selected_index = self.episode_list_widget.row(selected_item)
self.selected_episode = self.movies_episodes[selected_index]
self.selected_episode_id = self.selected_episode['id']
def download_selected_episode(self):
if self.selected_episode_id is not None:
self.download_btn.setEnabled(False)
self.download_label.setText(f'Downloading {self.selected_episode_id}')
if not os.path.exists(os.path.join('movies', self.selected_movie_title)):
os.mkdir(os.path.join('movies', self.selected_movie_title))
try:
output_path = os.path.join('movies', self.selected_movie_title, f'{self.selected_episode_id}.mp4')
url = self.movie_fetcher.fetch_movie_streaming_links(self.selected_episode_id, self.selected_movie_id)
self.movie_downloader = MovieDownloader(url, output_path)
self.movie_downloader.update_progress.connect(self.update_progress)
self.movie_downloader.download_finished.connect(self.download_finished)
self.movie_downloader.start()
except FileNotFoundError:
self.show_popup_error("Missing Movie Folder", "Please create a folder named movies in the same directory of the kdrama_downloader.exe.", "Please use lowercase in creating a folder.")
else:
self.show_popup_error("No Episode Selected", "Please click the episode you want to download")
def update_progress(self, value):
self.progress_bar.setValue(value)
def download_finished(self):
self.show_popup_success("Finished Download", f'{self.selected_episode_id} is finished downloading.')
self.download_btn.setEnabled(True)
self.download_label.setText("")
self.progress_bar.setValue(0)
def clean_title(self, title):
title = re.sub(r'[^a-zA-Z0-9\s]', '', title)
self.selected_movie_title = title.replace(' ', '-')
def show_popup_error(self, title, content, detailedContent=""):
msg = QMessageBox()
msg.setWindowTitle(title)
msg.setText(content)
msg.setIcon(QMessageBox.Icon.Critical)
msg.setDetailedText(detailedContent)
x = msg.exec_()
def show_popup_success(self, title, content, detailedContent=""):
msg = QMessageBox()
msg.setWindowTitle(title)
msg.setText(content)
msg.setIcon(QMessageBox.Icon.Information)
msg.setDetailedText(detailedContent)
x = msg.exec_()
if __name__ == '__main__':
app = QApplication(sys.argv)
expense_ui = MainUI()
sys.exit(app.exec_())
| Hannes0730/Kdrama-Downloader | main.py | main.py | py | 6,926 | python | en | code | 0 | github-code | 13 |
30144027050 | train_rust_input_path = "../data/translate/train_rust_input.txt"
train_rule_output_path = "../data/translate/train_rule_output.txt"
test_rust_input_path = "../data/translate/test_rust_input.txt"
test_rule_output_path = "../data/translate/test_rule_output.txt"
val_rust_input_path = "../data/translate/val_rust_input.txt"
val_rule_output_path = "../data/translate/val_rule_output.txt"
with open(train_rust_input_path, "r") as f_train_rust_input:
train_rust_input_data = f_train_rust_input.readlines()
with open(train_rule_output_path, "r") as f_train_rule_output:
train_rule_output_data = f_train_rule_output.readlines()
with open(test_rust_input_path, "r") as f_test_rust_input:
test_rust_input_data = f_test_rust_input.readlines()
with open(test_rule_output_path, "r") as f_test_rule_output:
test_rule_output_data = f_test_rule_output.readlines()
with open(val_rust_input_path, "r") as f_val_rust_input:
val_rust_input_data = f_val_rust_input.readlines()
with open(val_rule_output_path, "r") as f_val_rule_output:
val_rule_output_data = f_val_rule_output.readlines()
all_rust_input_data = train_rust_input_data + test_rust_input_data + val_rust_input_data
all_rule_output_data = train_rule_output_data + test_rule_output_data + val_rule_output_data
with open("../data/translate/all_rust_input.txt", "w") as f:
for input_rust in all_rust_input_data:
f.write(input_rust)
# f.write("\n")
with open("../data/translate/all_rule_output.txt", "w") as f1:
for output_rule in all_rule_output_data:
f1.write(output_rule)
# f1.write("\n")
| trusted-programming/rulegen_2 | scripts/reprocess_data.py | reprocess_data.py | py | 1,605 | python | en | code | 0 | github-code | 13 |
16987979598 | class Solution:
def kthSmallest(self, matrix, k):
"""
:type matrix: List[List[int]]
:type k: int
:rtype: int
"""
import heapq
heap = []
for row in range(len(matrix)):
for col in range(len(matrix[0])):
if len(heap) == k:
heapq.heappush(heap, -matrix[row][col])
heapq.heappop(heap)
else:
heapq.heappush(heap,-matrix[row][col])
return -heapq.heappop(heap)
| HzCeee/Algorithms | LeetCode/heap/378_KthSmallestElementInSortedMatrix.py | 378_KthSmallestElementInSortedMatrix.py | py | 551 | python | en | code | 0 | github-code | 13 |
6764537756 | from data import db_session
from data.news import News
from data.jobs import Jobs
from data.users import User
from forms.user import RegisterForm, LoginForm
from forms.news import NewsForm
from flask import Flask, abort, redirect, render_template, request
from flask_login import (
LoginManager,
current_user,
login_required,
logout_user,
login_user,
)
DB_PATH = "/".join(__file__.split("/")[:-2]) + "/db/blogs.db"
app = Flask(__name__)
app.config["SECRET_KEY"] = "yandexlyceum_secret_key"
login_manager = LoginManager()
login_manager.init_app(app)
@login_manager.user_loader
def load_user(id):
db_sess = db_session.create_session()
return db_sess.get(User, id)
@app.route("/")
def index():
db_sess = db_session.create_session()
if current_user.is_authenticated:
news = db_sess.query(News).filter(
(News.user == current_user) | (News.is_private != True)
)
else:
news = db_sess.query(News).filter(News.is_private != True)
return render_template("index.html", news=news)
@app.route("/register", methods=["GET", "POST"])
def reqister():
form = RegisterForm()
if form.validate_on_submit():
if form.password.data != form.password_again.data:
return render_template(
"register.html",
title="Sign up",
form=form,
message="Passwords doesn't match",
)
db_sess = db_session.create_session()
if db_sess.query(User).filter(User.email == form.email.data).first():
return render_template(
"register.html",
title="Sign up",
form=form,
message="This user exists",
)
user = User(
surname=form.surname.data,
name=form.name.data,
age=form.age.data,
position=form.position.data,
speciality=form.speciality.data,
address=form.address.data,
email=form.email.data,
)
user.set_password(form.password.data)
db_sess.add(user)
db_sess.commit()
login_user(user)
return redirect("/")
return render_template("register.html", title="Регистрация", form=form)
@app.route("/login", methods=["GET", "POST"])
def login():
form = LoginForm()
if form.validate_on_submit():
db_sess = db_session.create_session()
user = db_sess.query(User).filter(User.email == form.email.data).first()
if user and user.check_password(form.password.data):
login_user(user, remember=form.remember_me.data)
return redirect("/")
return render_template(
"login.html", message="Incorrect login or password", form=form
)
return render_template("login.html", title="Sign in", form=form)
@app.route("/logout")
@login_required
def logout():
logout_user()
return redirect("/")
@app.route("/news", methods=["GET", "POST"])
@login_required
def add_news():
form = NewsForm()
if form.validate_on_submit():
db_sess = db_session.create_session()
news = News()
news.title = form.title.data
news.content = form.content.data
news.is_private = form.is_private.data
current_user.news.append(news)
db_sess.merge(current_user)
db_sess.commit()
return redirect("/")
return render_template("news.html", title="Add", form=form)
@app.route("/news/<int:id>", methods=["GET", "POST"])
@login_required
def edit_news(id):
form = NewsForm()
if request.method == "GET":
db_sess = db_session.create_session()
news = (
db_sess.query(News).filter(News.id == id, News.user == current_user).first()
)
if news:
form.title.data = news.title
form.content.data = news.content
form.is_private.data = news.is_private
else:
abort(404)
if form.validate_on_submit():
db_sess = db_session.create_session()
news = (
db_sess.query(News).filter(News.id == id, News.user == current_user).first()
)
if news:
news.title = form.title.data
news.content = form.content.data
news.is_private = form.is_private.data
db_sess.commit()
return redirect("/")
else:
abort(404)
return render_template("news.html", title="Edit", form=form)
@app.route("/news_delete/<int:id>", methods=["GET", "POST"])
@login_required
def news_delete(id):
db_sess = db_session.create_session()
news = db_sess.query(News).filter(News.id == id, News.user == current_user).first()
if news:
db_sess.delete(news)
db_sess.commit()
else:
abort(404)
return redirect("/")
def row_exists(model, **kwargs):
return db_session.create_session().query(model).filter_by(**kwargs).count() != 0
def register_colonists():
CAPTAIN = {
"surname": "Scott",
"name": "Ridley",
"age": 21,
"position": "captain",
"speciality": "research engineer",
"address": "module_1",
"email": "scott_chief@mars.org",
}
COLONISTS = (
{
"surname": "Nick",
"name": "Valentine",
"age": 35,
"position": "low",
"speciality": "cleaner",
"address": "module_5",
"email": "nick_valley@mars.org",
},
{
"surname": "Elon",
"name": "Musk",
"age": 51,
"position": "high",
"speciality": "business man",
"address": "module_7",
"email": "elon_musk@mars.org",
},
{
"surname": "Tony",
"name": "Stark",
"age": 45,
"position": "captain helper",
"speciality": "tech engineer",
"address": "module_4",
"email": "tony_stark@mars.org",
},
)
db_sess = db_session.create_session()
if not row_exists(User, **CAPTAIN):
db_sess.add(User(**CAPTAIN))
for colonist in COLONISTS:
if not row_exists(User, **colonist):
db_sess.add(User(**colonist))
db_sess.commit()
def create_initial_job():
JOB = {
"team_leader": 1,
"job": "deployment of residential modules 1 and 2",
"work_size": 15,
"collaborators": "2, 3",
"is_finished": False,
}
db_sess = db_session.create_session()
if not row_exists(Jobs, **JOB):
db_sess.add(Jobs(**JOB))
db_sess.commit()
def main():
db_session.global_init(DB_PATH)
register_colonists()
create_initial_job()
app.run()
if __name__ == "__main__":
main()
| genhost/mars | mars/main.py | main.py | py | 6,818 | python | en | code | 0 | github-code | 13 |
72776832977 | from unittest import result
def removeDuplicates(nums):
i = 1
while i < len(nums):
if nums[i] == nums[i - 1]:
nums.pop(i)
else:
i = i + 1
return len(nums)
nums = [1,1,2,2,4,5]
res = removeDuplicates(nums)
print(res)
'''
TRACING:
nums = [1,1,2,2,4,5]
i = 1
while i < len(nums):
=> 1 < 6 -> true
nums[1] == nums[0] => 1 == 1
true
pop(1)
=> nums = [1,2,2,4,5]
=> 1 < 5 -> true
nums[1] == nums[0] => 2 == 1
false
i = i + 1 -> i = 2
=> 2 < 5 -> true
nums[2] == nums[1] => 2 == 2
true
pop(2)
=> nums = [1,2,4,5]
=> 2 < 4 -> true
nums[2] == nums[1] => 4 == 2
false
i = i + 1 -> i = 3
=> 3 < 4 -> true
nums[3] == nums[2] => 5 == 4
false
i = i + 1 -> i = 4
=> 4 < 4 -> false
comes out of while loop
returns length of nums, i.e., 4
OUTPUT = 4
'''
| karthik-karalgikar/coding_practice | Day_7_ArrayEasy/LC26removeDuplicates.py | LC26removeDuplicates.py | py | 901 | python | en | code | 0 | github-code | 13 |
33212668864 | from reading_datasets import read_ud_dataset, reading_tb_ner
pos_train = read_ud_dataset(dataset = 'tb', location = '../Datasets/POSTagging/Tweebank/', split = 'train')
pos_val = read_ud_dataset(dataset = 'tb', location = '../Datasets/POSTagging/Tweebank/', split = 'dev')
pos_test = read_ud_dataset(dataset = 'tb', location = '../Datasets/POSTagging/Tweebank/', split = 'test')
ner_train = reading_tb_ner(location = '../Datasets/NER/Tweebank/', split = 'train')
ner_val = reading_tb_ner(location = '../Datasets/NER/Tweebank/', split = 'dev')
ner_test = reading_tb_ner(location = '../Datasets/NER/Tweebank/', split = 'test')
combined_dataset = []
for i, (tweet, pos_labels) in enumerate(pos_train):
tweet = [token.lower() for token in tweet]
query = ' '.join(tweet)
found = False
for ner_tweet, ner_labels in ner_train:
ner_tweet = [token.lower() for token in ner_tweet]
check = ' '.join(ner_tweet)
if query == check:
found = True
break
if found and tweet == ner_tweet:
combined_dataset.append([tweet, pos_labels, ner_labels])
for tweet, pos_labels, ner_labels in combined_dataset:
for t, p, n in zip(tweet, pos_labels, ner_labels):
if p == 'PROPN' and n == 'O': #and t[0] != '@':
print(t, '\t', p, '\t', n, '\t--> ', ' '.join(tweet) )
| akshat57/Twitter-Seq-Labelling | Code/reconcile_datasets.py | reconcile_datasets.py | py | 1,343 | python | en | code | 0 | github-code | 13 |
680795996 | #!/usr/bin/env python3
"""
Coin flip exersice from lecture.
"""
import random
class Coin():
"""
Coin class.
"""
def __init__(self):
"""
Constructor method.
"""
self.side_up = "Heads"
self.results = []
def toss_coin(self):
"""
Setter method to toss coin.
"""
possible_outcome = ["Heads", "Tails"]
self.side_up = random.choice(possible_outcome)
# self.results.append(self.side_up)
return self.side_up
if __name__ == "__main__":
# Init object & play variable.
coin1 = Coin()
play_again = "yes"
while play_again == "yes" != "no":
# Play coin toss with new object
result = coin1.toss_coin()
# Print result
print("You tossed: " + result + ". ")
# Prompt to play again
print("Would you like to toss the coin again? [yes/no]")
play_again = input()
| DMoest/ooPython | kmom01/exercises/coin_flip.py | coin_flip.py | py | 942 | python | en | code | 1 | github-code | 13 |
2098280839 | import streamlit as st
import time
'Starting a long computation...'
# Add a placeholder
latest_iteration = st.empty()
bar = st.progress(0)
for i in range(20):
# Update the progress bar with each iteration.
latest_iteration.text(f'Iteration {(i+1)*5}')
bar.progress((i+1)*5)
time.sleep(0.15)
'...and now we\'re done ...almost...'
placeholder = st.empty()
msg = st.empty()
msg.text("wait for 5 sec")
time.sleep(5)
# Replace the placeholder with some text:
placeholder.text("Hello")
msg.text("wait for 5 sec again")
time.sleep(5)
# Replace the text with a chart:
placeholder.line_chart({"data": [1, 5, 2, 6]})
msg.text("wait for 5 sec again and again")
time.sleep(5)
# Replace the chart with several elements:
with placeholder.container():
st.write("This is one element")
st.write("This is another")
msg.text("the last time to wait for 5 sec befor cleaning containers")
time.sleep(5)
# Clear all those elements:
placeholder.empty()
msg.empty() | jffist/streamlit-sandbox | app_with_progress.py | app_with_progress.py | py | 965 | python | en | code | 0 | github-code | 13 |
40864952114 | """
Implementing Double Linked List.
"""
class Node:
def __init__(self, data):
self.data = data
self.next = None
self.prev = None
class DLL:
def __init__(self):
self.head = None
def print(self):
itr = self.head
while itr is not None:
print(itr.data)
itr = itr.next
def insertAtBeginning(self, new_data):
n1 = Node(new_data)
n1.next = self.head
self.head = n1
def insertAtEnd(self, new_data):
n2 = Node(new_data)
itr = self.head
while itr.next is not None:
itr = itr.next
itr.next = n2
n2.prev = itr
d1 = DLL()
d1.insertAtBeginning(5)
d1.insertAtBeginning(6)
d1.insertAtBeginning(7)
d1.insertAtEnd(10)
d1.print()
| kundan123456/100DaysCodeChallengeDS | Day2/double-linked-list.py | double-linked-list.py | py | 789 | python | en | code | 0 | github-code | 13 |
39740091057 | #!/usr/bin/env python
from utils import toUnicode, removeDiacritic
import sqlite3
import sys
# Normalize imported database text fields to utf8.
dbFileNameDefault = 'RaceDB.sqlite3'
license_holder_fields = [
'search_text',
'last_name', 'first_name',
'license_code', 'uci_code',
'nationality', 'state_prov', 'city',
'existing_tag', 'existing_tag2',
]
team_fields = [
'name',
'team_code',
'nation_code',
'search_text',
]
table_fields = [
('core_licenseholder', license_holder_fields),
('core_team', team_fields),
]
def fix_utf8( dbFileName = dbFileNameDefault ):
try:
db = sqlite3.connect( dbFileName )
except Exception as e:
print ( e )
sys.exit()
db.text_factory = toUnicode
cursor = db.cursor()
for table, fields in table_fields:
print ( 'Scanning: {}'.format(table) )
cursor.execute( 'SELECT {}, id FROM {}'.format( ', '.join(fields), table ) )
info = [[field.encode('utf-8') if field is not None else None for field in row[:-1]] + [row[-1]] for row in cursor.fetchall()]
cursor.executemany( 'UPDATE {} SET {} where id=?'.format( table, ', '.join('{}=?'.format(field_name) for field_name in fields)), info )
db.commit()
db.close()
if __name__ == '__main__':
fix_utf8()
| esitarski/RaceDB | core/fix_utf8.py | fix_utf8.py | py | 1,219 | python | en | code | 12 | github-code | 13 |
15912891596 | from __future__ import absolute_import
from mom.security.codec import pem
from mom.security.codec.pem import rsa
from mom.security.codec.pem import x509
__author__ = "yesudeep@google.com (Yesudeep Mangalapilly)"
def public_key_pem_decode(pem_key):
"""
Decodes a PEM-encoded public key/X.509 certificate string into
internal representation.
:param pem_key:
The PEM-encoded key. Must be one of:
1. RSA public key.
2. X.509 certificate.
:returns:
A dictionary of key information.
"""
pem_key = pem_key.strip()
if pem_key.startswith(pem.CERT_PEM_HEADER):
key = x509.X509Certificate(pem_key).public_key
elif pem_key.startswith(pem.PUBLIC_KEY_PEM_HEADER):
key = rsa.RSAPublicKey(pem_key).public_key
else:
raise NotImplementedError("Only PEM X.509 certificates & public "
"RSA keys can be read.")
return key
def private_key_pem_decode(pem_key):
"""
Decodes a PEM-encoded private key string into internal representation.
:param pem_key:
The PEM-encoded RSA private key.
:returns:
A dictionary of key information.
"""
pem_key = pem_key.strip()
if (pem_key.startswith(pem.PRIVATE_KEY_PEM_HEADER) or
pem_key.startswith(pem.RSA_PRIVATE_KEY_PEM_HEADER)):
key = rsa.RSAPrivateKey(pem_key).private_key
else:
raise NotImplementedError("Only PEM private RSA keys can be read.")
return key
| gorakhargosh/mom | mom/security/codec/__init__.py | __init__.py | py | 1,413 | python | en | code | 37 | github-code | 13 |
22196298312 | from jsonasobj2 import as_json
from linkml_runtime.utils.compile_python import compile_python
from linkml.generators.pythongen import PythonGenerator
def test_issue_python_ordering(input_path, snapshot):
"""Make sure that types are generated as part of the output"""
output = PythonGenerator(input_path("issue_134.yaml")).serialize()
assert output == snapshot("issue_134.py")
module = compile_python(output)
e = module.E("id:1")
b = module.B("id:2")
e.has_b = b
json_output = as_json(e)
assert json_output == snapshot("issue_134.json")
| linkml/linkml | tests/test_issues/test_issue_134.py | test_issue_134.py | py | 577 | python | en | code | 228 | github-code | 13 |
74039093138 | #Tahmin yapabilen bir sinir ağı kurmanın son parçası,
# her şeyi bir araya getirmektir.
# Öyleyse, compute_weighted_sum ve node_activation işlevlerini
# ağdaki her düğüme uygulayan ve verileri çıktı katmanına sonuna kadar uygulayan ve
# çıktı katmanındaki her düğüm için bir tahmin çıkaran bir fonksiyon oluşturalım.
#Bunu başaracağımız yol aşağıdaki prosedürdür:
#1. İlk gizli katmana giriş olarak giriş katmanıyla başlayın.
#2. Geçerli katmanın düğümlerindeki ağırlıklı toplamı hesaplayın.
#3. Geçerli katmanın düğümlerinin çıktısını hesaplayın.
#4. Geçerli katmanın çıktısını bir sonraki katmana girilecek şekilde ayarlayın.
#5. Ağdaki bir sonraki katmana gidin.
#6. Çıktı katmanının çıktısını hesaplayana kadar 2-4. Adımları tekrarlayın.
import numpy as np
import Initialize_NeuralNetwork_Function as INF
def forward_propagate(network, inputs):
layer_inputs = list(inputs) # ilk gizli katmana girdi olarak giriş katmanıyla başlayın
for layer in network:
layer_data = network[layer]
layer_outputs = []
for layer_node in layer_data:
node_data = layer_data[layer_node]
# Her bir düğümün ağırlıklı toplamını ve çıktısını aynı anda hesaplayın
node_output = INF.node_activation(INF.compute_weighted_sum(layer_inputs, node_data['weights'], node_data['bias']))
layer_outputs.append(np.around(node_output[0], decimals=4))
if layer != 'output':
print('{}. Gizli katmandaki düğümlerin çıktıları: {}'.format(layer.split('_')[1], layer_outputs))
layer_inputs = layer_outputs # bu katmanın çıktısını sonraki katmana girdi olacak şekilde ayarla
network_predictions = layer_outputs
return network_predictions
| erdiacr/Forward-Propagation_Neural_Network | Forward_Propagation_NeuralNetwork/Forward_Propagate_Function.py | Forward_Propagate_Function.py | py | 1,897 | python | tr | code | 0 | github-code | 13 |
8424762609 | # -*- coding: utf-8 -*-
"""
Created on Fri Apr 27 09:30:54 2018
@author: lenovo
"""
# 该文件处理需要用_thread 优化一下
from ProcessFunc import ProcessOp, ProcessF
import csv
import pandas as pd
from DateFuture import future_id, date
import _thread
# 根据Option文件夹路径读取期权价格(t,ask1,bid1) 生成PriceForRes文件
def ProcessOp_Res(filename):
option = filename[-8:]
day = filename[-17:-9]
[t, askprice1, bidprice1, askvol1, bidvol1] = ProcessOp(option, day)
columns = ['t', 'askprice1', 'bidprice1', 'askvol1', 'bidvol1']
dataframe = pd.DataFrame({'t': t, 'askprice1': askprice1, 'bidprice1': bidprice1,
'askvol1': askvol1, 'bidvol1':bidvol1}, columns=columns)
dataframe.to_csv(filename+'/DataForRes.csv')
print('Processed:'+filename)
return 0
# 根据期货文件夹名 读取期货数据(t,ask1,bid1) 并生成 E:\Pycharm\data\res_data\IH1804\20180315\DataForRes.csv
def ProcessF_Res(filename):
date = filename[-8:]
future = filename[-15:-9]
[t, askprice1, bidprice1, askvol1, bidvol1] = ProcessF(future, date)
columns = ['t', 'askprice1', 'bidprice1', 'askvol1', 'bidvol1']
dataframe = pd.DataFrame({'t': t, 'askprice1': askprice1, 'bidprice1': bidprice1,
'askvol1': askvol1, 'bidvol1':bidvol1}, columns=columns)
dataframe.to_csv(filename+'/FutureForRes.csv')
print('Processed:'+filename)
return 0
# 对所有日期的期货价格进行处理, 以及对所有的期权处理
def processAllF():
for future in future_id:
for day in date:
filename = 'E:/Pycharm/data/res_data/'+future+'/'+day
ProcessF_Res(filename)
return 0
def processAllOp(future):
filename = 'E:/Pycharm/data/res_data/'+future+'OpFolderPath.csv'
with open(filename, 'r') as f:
reader = csv.reader(f)
for row in reader:
if row[0]:
ProcessOp_Res(row[1])
return 0
def main():
processAllF()
main()
| Lee052/huatai-intern | code/ResProcess.py | ResProcess.py | py | 2,055 | python | en | code | 0 | github-code | 13 |
11097780835 | volume = int(input()) # obem
p1 = int(input()) # debit parva traba
p2 = int(input()) # debit vtora traba
hours = float(input()) # chasovete v koito rabotnikat otsastwa
first_pipe = p1 * hours
second_pipe = p2 * hours
sum_pipes_volume = first_pipe + second_pipe
if sum_pipes_volume <= volume:
pool_percent = (sum_pipes_volume / volume) * 100
first_pipe_percent = (first_pipe / sum_pipes_volume) * 100
second_pipe_percent = (second_pipe / sum_pipes_volume) * 100
print(f"The pool is {pool_percent:.2f}% full. Pipe 1: {first_pipe_percent:.2f}%. Pipe 2: {second_pipe_percent:.2f}%")
else:
overflow = sum_pipes_volume - volume
print(f"For {hours:.2f} hours the pool overflows with {overflow:.2f} liters.")
| tanchevtony/SoftUni_Python_basic | More exercises/02_Conditional_statements/pipes in pool.py | pipes in pool.py | py | 732 | python | en | code | 0 | github-code | 13 |
43031669204 | # Generic imports
import os
import os.path
import PIL
import math
import scipy.special
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
### ************************************************
### Class defining shape object
class shape:
### ************************************************
### Constructor
def __init__(self,
name,
position,
control_pts,
n_control_pts,
n_sampling_pts,
radius,
edgy,
output_dir):
self.name = name
self.position = position
self.control_pts = control_pts
self.n_control_pts = n_control_pts
self.n_sampling_pts = n_sampling_pts
self.curve_pts = np.array([])
self.area = 0.0
self.size_x = 0.0
self.size_y = 0.0
self.radius = radius
self.edgy = edgy
self.output_dir = output_dir
if (not os.path.exists(self.output_dir)): os.makedirs(self.output_dir)
### ************************************************
### Reset object
def reset(self):
# Reset object
self.name = 'shape'
self.control_pts = np.array([])
self.n_control_pts = 0
self.n_sampling_pts = 0
self.radius = np.array([])
self.edgy = np.array([])
self.curve_pts = np.array([])
self.area = 0.0
### ************************************************
### Build shape
def build(self):
# Center set of points
center = np.mean(self.control_pts, axis=0)
self.control_pts -= center
# Sort points counter-clockwise
control_pts, radius, edgy = ccw_sort(self.control_pts,
self.radius,
self.edgy)
local_curves = []
delta = np.zeros([self.n_control_pts,2])
radii = np.zeros([self.n_control_pts,2])
delta_b = np.zeros([self.n_control_pts,2])
# Compute all informations to generate curves
for i in range(self.n_control_pts):
# Collect points
prv = (i-1)
crt = i
nxt = (i+1)%self.n_control_pts
pt_m = control_pts[prv,:]
pt_c = control_pts[crt,:]
pt_p = control_pts[nxt,:]
# Compute delta vector
diff = pt_p - pt_m
diff = diff/np.linalg.norm(diff)
delta[crt,:] = diff
# Compute edgy vector
delta_b[crt,:] = 0.5*(pt_m + pt_p) - pt_c
# Compute radii
dist = compute_distance(pt_m, pt_c)
radii[crt,0] = 0.5*dist*radius[crt]
dist = compute_distance(pt_c, pt_p)
radii[crt,1] = 0.5*dist*radius[crt]
# Generate curves
for i in range(self.n_control_pts):
crt = i
nxt = (i+1)%self.n_control_pts
pt_c = control_pts[crt,:]
pt_p = control_pts[nxt,:]
dist = compute_distance(pt_c, pt_p)
smpl = math.ceil(self.n_sampling_pts*math.sqrt(dist))
local_curve = generate_bezier_curve(pt_c, pt_p,
delta[crt,:], delta[nxt,:],
delta_b[crt,:], delta_b[nxt,:],
radii[crt,1], radii[nxt,0],
edgy[crt], edgy[nxt],
smpl)
local_curves.append(local_curve)
curve = np.concatenate([c for c in local_curves])
x, y = curve.T
z = np.zeros(x.size)
self.curve_pts = np.column_stack((x,y,z))
self.curve_pts = remove_duplicate_pts(self.curve_pts)
# Center set of points
center = np.mean(self.curve_pts, axis=0)
self.curve_pts -= center
self.control_pts[:,0:2] -= center[0:2]
# Reprocess to position
self.control_pts[:,0:2] += self.position[0:2]
self.curve_pts [:,0:2] += self.position[0:2]
### ************************************************
### Write image
def generate_image(self, *args, **kwargs):
# Handle optional argument
plot_pts = kwargs.get('plot_pts', True)
xmin = kwargs.get('xmin', -1.0)
xmax = kwargs.get('xmax', 1.0)
ymin = kwargs.get('ymin', -1.0)
ymax = kwargs.get('ymax', 1.0)
# Plot shape
plt.xlim([xmin,xmax])
plt.ylim([ymin,ymax])
plt.axis('off')
plt.gca().set_aspect('equal', adjustable='box')
plt.fill([xmin,xmax,xmax,xmin],
[ymin,ymin,ymax,ymax],
color=(0.784,0.773,0.741),
linewidth=2.5,
zorder=0)
plt.fill(self.curve_pts[:,0],
self.curve_pts[:,1],
'black',
linewidth=0,
zorder=1)
# Plot points
# Each point gets a different color
colors = matplotlib.cm.ocean(np.linspace(0, 1,
self.n_control_pts))
plt.scatter(self.control_pts[:,0],
self.control_pts[:,1],
color=colors,
s=16,
zorder=2,
alpha=0.5)
# Save image
filename = self.output_dir+self.name+'.png'
plt.savefig(filename,
dpi=200)
plt.close(plt.gcf())
plt.cla()
trim_white(filename)
### ************************************************
### Write csv
def write_csv(self):
filename = self.output_dir+self.name+'.csv'
with open(filename,'w') as file:
# Write header
file.write('{} {}\n'.format(self.n_control_pts,
self.n_sampling_pts))
# Write control points coordinates
for i in range(0,self.n_control_pts):
file.write('{} {} {} {}\n'.format(self.control_pts[i,0],
self.control_pts[i,1],
self.radius[i],
self.edgy[i]))
### ************************************************
### Read csv and initialize shape with it
def read_csv(self, filename, *args, **kwargs):
# Handle optional argument
keep_numbering = kwargs.get('keep_numbering', False)
if (not os.path.isfile(filename)):
print('I could not find csv file: '+filename)
print('Exiting now')
exit()
self.reset()
sfile = filename.split('.')
sfile = sfile[-2]
sfile = sfile.split('/')
name = sfile[-1]
if (keep_numbering):
sname = name.split('_')
name = sname[0]
x = []
y = []
radius = []
edgy = []
with open(filename) as file:
header = file.readline().split()
n_control_pts = int(header[0])
n_sampling_pts = int(header[1])
for i in range(0,n_control_pts):
coords = file.readline().split()
x.append(float(coords[0]))
y.append(float(coords[1]))
radius.append(float(coords[2]))
edgy.append(float(coords[3]))
control_pts = np.column_stack((x,y))
self.__init__(name,
control_pts,
n_control_pts,
n_sampling_pts,
radius,
edgy)
### ************************************************
### Modify shape given a deformation field
def modify_shape_from_field(self, deformation, pts_list):
# Deform shape
for i in range(len(pts_list)):
self.control_pts[pts_list[i],0] = deformation[i,0]
self.control_pts[pts_list[i],1] = deformation[i,1]
self.edgy[pts_list[i]] = deformation[i,2]
### End of class Shape
### ************************************************
### ************************************************
### Compute distance between two points
def compute_distance(p1, p2):
return np.sqrt((p1[0]-p2[0])**2+(p1[1]-p2[1])**2)
### ************************************************
### Generate cylinder points
def generate_cylinder_pts(n_pts):
if (n_pts < 4):
print('Not enough points to generate cylinder')
exit()
pts = np.zeros([n_pts, 2])
ang = 2.0*math.pi/n_pts
for i in range(0,n_pts):
pts[i,:] = [0.5*math.cos(float(i)*ang),
0.5*math.sin(float(i)*ang)]
return pts
### ************************************************
### Generate square points
def generate_square_pts(n_pts):
if (n_pts != 4):
print('You should have n_pts = 4 for square')
exit()
pts = np.zeros([n_pts, 2])
pts[0,:] = [ 1.0, 1.0]
pts[1,:] = [-1.0, 1.0]
pts[2,:] = [-1.0,-1.0]
pts[3,:] = [ 1.0,-1.0]
pts[:,:] *= 0.5
return pts
### ************************************************
### Remove duplicate points in input coordinates array
### WARNING : this routine is highly sub-optimal
def remove_duplicate_pts(pts):
to_remove = []
for i in range(len(pts)):
for j in range(len(pts)):
# Check that i and j are not identical
if (i == j):
continue
# Check that i and j are not removed points
if (i in to_remove) or (j in to_remove):
continue
# Compute distance between points
pi = pts[i,:]
pj = pts[j,:]
dist = compute_distance(pi,pj)
# Tag the point to be removed
if (dist < 1.0e-8):
to_remove.append(j)
# Sort elements to remove in reverse order
to_remove.sort(reverse=True)
# Remove elements from pts
for pt in to_remove:
pts = np.delete(pts, pt, 0)
return pts
### ************************************************
### Counter Clock-Wise sort
### - Take a cloud of points and compute its geometric center
### - Translate points to have their geometric center at origin
### - Compute the angle from origin for each point
### - Sort angles by ascending order
def ccw_sort(pts, rad, edg):
geometric_center = np.mean(pts,axis=0)
translated_pts = pts - geometric_center
angles = np.arctan2(translated_pts[:,1], translated_pts[:,0])
x = angles.argsort()
pts2 = np.array(pts)
rad2 = np.array(rad)
edg2 = np.array(edg)
return pts2[x,:], rad2[x], edg2[x]
### ************************************************
### Compute Bernstein polynomial value
def compute_bernstein(n,k,t):
k_choose_n = scipy.special.binom(n,k)
return k_choose_n * (t**k) * ((1.0-t)**(n-k))
### ************************************************
### Sample Bezier curves given set of control points
### and the number of sampling points
### Bezier curves are parameterized with t in [0,1]
### and are defined with n control points P_i :
### B(t) = sum_{i=0,n} B_i^n(t) * P_i
def sample_bezier_curve(control_pts, n_sampling_pts):
n_control_pts = len(control_pts)
t = np.linspace(0, 1, n_sampling_pts)
curve = np.zeros((n_sampling_pts, 2))
for i in range(n_control_pts):
curve += np.outer(compute_bernstein(n_control_pts-1, i, t),
control_pts[i])
return curve
### ************************************************
### Generate Bezier curve between two pts
def generate_bezier_curve(p1, p2,
delta1, delta2,
delta_b1, delta_b2,
radius1, radius2,
edgy1, edgy2,
n_sampling_pts):
# Lambda function to wrap angles
#wrap = lambda angle: (angle >= 0.0)*angle + (angle < 0.0)*(angle+2*np.pi)
# Sample the curve if necessary
if (n_sampling_pts != 0):
# Create array of control pts for cubic Bezier curve
# First and last points are given, while the two intermediate
# points are computed from edge points, angles and radius
control_pts = np.zeros((4,2))
control_pts[0,:] = p1[:]
control_pts[3,:] = p2[:]
# Compute baseline intermediate control pts ctrl_p1 and ctrl_p2
ctrl_p1_base = radius1*delta1
ctrl_p2_base =-radius2*delta2
ctrl_p1_edgy = radius1*delta_b1
ctrl_p2_edgy = radius2*delta_b2
control_pts[1,:] = p1 + edgy1*ctrl_p1_base + (1.0-edgy1)*ctrl_p1_edgy
control_pts[2,:] = p2 + edgy2*ctrl_p2_base + (1.0-edgy2)*ctrl_p2_edgy
# Compute points on the Bezier curve
curve = sample_bezier_curve(control_pts, n_sampling_pts)
# Else return just a straight line
else:
curve = p1
curve = np.vstack([curve,p2])
return curve
### ************************************************
### Crop white background from image
def trim_white(filename):
im = PIL.Image.open(filename)
bg = PIL.Image.new(im.mode, im.size, (255,255,255))
diff = PIL.ImageChops.difference(im, bg)
bbox = diff.getbbox()
cp = im.crop(bbox)
cp.save(filename)
### ************************************************
### Generate shape
def generate_shape(n_pts,
position,
shape_type,
shape_size,
shape_name,
n_sampling_pts,
output_dir):
# Check input
if (shape_type not in ['cylinder','square','random']):
print('Error in shape_type')
print('Authorized values are "cylinder", "square" and "random"')
exit()
# Select shape type
if (shape_type == 'cylinder'):
radius = 0.5*np.ones((n_pts))
edgy = 1.0*np.ones((n_pts))
ctrl_pts = generate_cylinder_pts(n_pts)
ctrl_pts[:,:] *= shape_size
if (shape_type == 'square'):
radius = np.zeros((n_pts))
edgy = np.ones((n_pts))
ctrl_pts = generate_square_pts(n_pts)
ctrl_pts[:,:] *= shape_size
if (shape_type == 'random'):
radius = np.random.uniform(low=0.8, high=1.0, size=n_pts)
edgy = np.random.uniform(low=0.45, high=0.55, size=n_pts)
ctrl_pts = np.random.rand(n_pts,2)
ctrl_pts[:,:] *= shape_size
# Initialize and build shape
s = shape(shape_name,
position,
ctrl_pts,
n_pts,
n_sampling_pts,
radius,
edgy,
output_dir)
s.build()
s.generate_image(xmin =-shape_size,
xmax = shape_size,
ymin =-shape_size,
ymax = shape_size)
s.write_csv()
return s
| jviquerat/lbm | lbm/src/utils/shapes.py | shapes.py | py | 15,469 | python | en | code | 109 | github-code | 13 |
15061861677 | from django.contrib import admin
from django.urls import path, include
# from django.contrib.auth import views as auth_views
# from django.views.generic.base import TemplateView
from . import views
urlpatterns = [
path('route_list/', views.route_list, name='route_list'),
path('add_route/', views.add_route, name='add_route'),
path('my_routes/', views.my_routes, name='my_routes'),
path('upload_csv/', views.upload_csv, name='upload_csv'),
path('<slug:route_name>/', views.route, name='route'),
path('<slug:route_name>/add_note/', views.add_note, name='add_note'),
path('<slug:route_name>/checkout/', views.checkout, name='checkout'),
path('<slug:route_name>/turnin/', views.turnin, name='turnin'),
] | DavKle132/route_manager | routes/urls.py | urls.py | py | 734 | python | en | code | 0 | github-code | 13 |
23911469401 | #! /usr/bin/env python3
import rospy
from sensor_msgs.msg import PointCloud2
import sensor_msgs.point_cloud2 as pcl
import std_msgs.msg
pcl_list = []
def pcl_callback(data):
new_pcl = []
header = std_msgs.msg.Header()
header.stamp = rospy.Time.now()
header.frame_id = 'map'
# new_pcl.header = header
for p in pcl.read_points(data, field_names=('x', 'y', 'z'), skip_nans=True):
p_int = (p[0], round(p[1]), round(p[2]))
if p_int not in pcl_list:
pcl_list.append(p_int)
rospy.init_node('point_cloud_modeller', anonymous=True)
sub = rospy.Subscriber('camera/depth/color/points', PointCloud2, pcl_callback)
pub = rospy.Publisher('env_model', PointCloud2, queue_size=10)
while not rospy.is_shutdown():
header = std_msgs.msg.Header()
header.stamp = rospy.Time.now()
header.frame_id = 'map'
pub.publish(
pcl.create_cloud_xyz32(
header,
pcl_list
)
)
rospy.spin() | noorbot/mars-quadcopter | src/point_cloud_modeller.py | point_cloud_modeller.py | py | 971 | python | en | code | 2 | github-code | 13 |
11351552741 | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
'''
输入一个链表的头节点,从尾到头反过来返回每个节点的值(用数组返回)。
示例 1:
输入:head = [1,3,2]
输出:[2,3,1]
'''
class Solution:
def reversePrint(self, head: ListNode) -> List[int]:
res = []
cur = head
while cur:
res.append(cur.val)
cur = cur.next
return res[::-1] | DaToo-J/NotesForBookAboutPython | 剑指offer/linkList/6_reversePrint.py | 6_reversePrint.py | py | 515 | python | en | code | 0 | github-code | 13 |
40518488185 | import urllib
import urllib.request
from bs4 import BeautifulSoup as BFS
import os
def make_soup(url):
thepage = urllib.request.urlopen(url)
soupdata = BFS(thepage, "html.parser")
return soupdata
playerdatasaved = ""
soup = make_soup("http://fundamentus.com.br/detalhes.php")
for record in soup.findAll('tr'):
playerdata = ""
for data in record.findAll("a"):
playerdata = playerdata +","+ data.text
if len(playerdata)!=0:
playerdatasaved = playerdatasaved + "\n" + playerdata[1:]
print(playerdatasaved)
"""header="Papel"
file = open(os.path.expanduser("papeis.csv"),"wb")
file.write(bytes(header, encoding="ascii",errors='ignore'))
file.write(bytes(playerdatasaved, encoding="ascii",errors='ignore'))"""
| sospsbrasil/ImportaPapeis | ImportaPapeis.py | ImportaPapeis.py | py | 753 | python | en | code | 0 | github-code | 13 |
21503295239 | from jax import numpy as jnp
from flax.core.frozen_dict import FrozenDict
from pinn_jax.derivatives import get_batch_jacobian, get_batch_hessian, get_batch_snap
from typing import Callable, Tuple
def get_burgers(u_hat: Callable, nu: float) -> Callable:
batch_jacobian = get_batch_jacobian(u_hat)
batch_hessian = get_batch_hessian(u_hat)
def burgers_eqn(params: FrozenDict, points: jnp.ndarray) -> dict:
u = u_hat(params, points).squeeze()
hessian = batch_hessian(params, points) # n_batch x n_output x n_input x n_input
jacobian = batch_jacobian(params, points) # n_batch x n_output x n_input
du_xx = hessian[:, 0, 0, 0]
du_t = jacobian[:, 0, 1]
du_x = jacobian[:, 0, 0]
residual = du_t + u * du_x - nu * du_xx
return {'burgers_eqn': residual}
return burgers_eqn
def get_wave_eqn(u_hat: Callable, c: float, v: float) -> Callable:
batch_jacobian = get_batch_jacobian(u_hat)
batch_hessian = get_batch_hessian(u_hat)
def wave_eqn(params: FrozenDict, points: jnp.ndarray) -> dict:
hessians = batch_hessian(params, points) # n_batch x n_output x n_input x n_input
jacobians = batch_jacobian(params, points) # n_batch x n_output x n_input
residual = hessians[:, 0, 0, 0] - 1. / c**2 * hessians[:, 0, 1, 1] - v * jacobians[:, 0, 1]
return {'wave': (residual**2).mean()}
return wave_eqn
def get_allen_cahn(u_hat: Callable, a: float, b: float, c: float) -> Callable:
"""
u_hat is a map (x, t) -> u
equation: D(x, t; u) = partial_t u - a u_xx + b u^3 - c u = 0, (x, t) in (-1, 1) x (0, 1)
note: using the `PeriodicMLP` will introduce an extra singleton dimension when evaluating batched derivatives. to
mitigate this, we `squeeze` everything. this is generally okay because `n_input > 1`, but this will
produce bad behavior when `n_batch` is 1.
"""
batch_jacobian = get_batch_jacobian(u_hat)
batch_hessian = get_batch_hessian(u_hat)
def allen_cahn(params: FrozenDict, points: jnp.ndarray) -> dict:
u = u_hat(params, points).squeeze()
jacobian = batch_jacobian(params, points).squeeze() # n_batch x n_input
hessian = batch_hessian(params, points).squeeze() # n_batch x n_input x n_input
u_t = jacobian[:, 1]
u_xx = hessian[:, 0, 0]
return {'allen_cahn': u_t - a * u_xx + b * u**3 - c * u}
return allen_cahn
def get_kuramoto_sivashinsky(u_hat: Callable, alpha: float, beta: float, gamma: float) -> Callable:
"""u_hat is a map (x, t) -> u"""
batch_jacobian = get_batch_jacobian(u_hat)
batch_hessian = get_batch_hessian(u_hat)
batch_snap = get_batch_snap(u_hat)
def kuramoto_sivashinsky(params: FrozenDict, points: jnp.ndarray) -> dict:
u = u_hat(params, points).squeeze()
j = batch_jacobian(params, points).squeeze() # n_batch x n_input
h = batch_hessian(params, points).squeeze() # n_batch x n_input x n_input
s = batch_snap(params, points).squeeze() # n_batch x n_input x n_input x n_input x n_inputs
u_x, u_t = j[:, 0], j[:, 1]
u_xx = h[:, 0, 0]
u_xxxx = s[:, 0, 0, 0, 0]
return {'kuramoto_sivashinksy': u_t + alpha * u * u_x + beta * u_xx + gamma * u_xxxx}
return kuramoto_sivashinsky
def get_convection_diffusion(u_hat: Callable, alpha: float) -> Tuple[Callable, Callable]:
"""
u_hat is a map x -> u
u_x + alpha u_xx = 0, x in (0, 1)
with the boundary conditions u(0) = 0.5 and u(1) = -0.5, has the analytic solution
exp(-x / alpha) / (1 - exp(-1 / alpha)) - 0.5
"""
batch_jacobian = get_batch_jacobian(u_hat)
batch_hessian = get_batch_hessian(u_hat)
def convection_diffusion(params: FrozenDict, points: jnp.ndarray) -> dict:
j = batch_jacobian(params, points).squeeze() # n_batch
h = batch_hessian(params, points).squeeze() # n_batch
return {'convection_diffusion': j + alpha * h}
def u_fn(points: jnp.ndarray) -> jnp.ndarray:
return jnp.exp(-points[:, 0] / alpha) / jnp.exp(-1. / alpha)
return convection_diffusion, u_fn
def get_helmholtz(u_hat: Callable, k: float, a_1: float, a_2: float) -> Tuple[Callable, Callable]:
"""
u_hat is a map (x, y) -> u
Delta u + k^2 u - q = 0, (x, y) in [-1, 1] x [-1, 1]
q = - (a_1 pi)^2 sin(a_1 pi x) sin(a_2 pi y) - (a_2 pi)^2 sin(a_1 pi x) sin(a_2 pi y) + k^2 sin(a_1 pi x) sin(a_2 pi y)
has the analytic solution u = sin(a_1 pi x) sin(a_2 pi y)
"""
def q_fn(points: jnp.ndarray) -> jnp.ndarray:
q = jnp.sin(a_1 * jnp.pi * points[:, 0]) * jnp.sin(a_2 * jnp.pi * points[:, 1])
return (k**2 - a_1**2 * jnp.pi**2 - a_2**2 * jnp.pi**2) * q
batch_hessian = get_batch_hessian(u_hat)
def helmholtz(params: FrozenDict, points: jnp.ndarray) -> dict:
q = q_fn(points) # n_batch
u = u_hat(params, points).squeeze() # n_batch
h = batch_hessian(params, points).squeeze() # n_batch x n_input x n_input
return {'helmholtz': h[:, 0, 0] + h[:, 1, 1] + k**2 * u - q} # , 'u_xx': h[:,0,0], 'u_yy': h[:,1,1], 'q': q}
def u_fn(points: jnp.ndarray) -> jnp.ndarray:
return jnp.sin(a_1 * jnp.pi * points[:, 0]) * jnp.sin(a_2 * jnp.pi * points[:, 1])
return helmholtz, u_fn
def get_reaction_diffusion(u_hat: Callable, nu: float, rho: float) -> Callable:
"""
u_hat is a map (x, t) -> u
u_t - nu u_xx - rho u (1 - u) = 0
when `u` is periodic in `x`, it has a solution that can be calculated exactly for each time step
note: using the `PeriodicMLP` will introduce an extra singleton dimension when evaluating batched derivatives. to
mitigate this, we `squeeze` everything. this will produce bad behavior `n_batch` is 1.
"""
batch_jacobian = get_batch_jacobian(u_hat)
batch_hessian = get_batch_hessian(u_hat)
def reaction_diffusion(params: FrozenDict, points: jnp.ndarray) -> dict:
u = u_hat(params, points).squeeze() # n_batch
jacobian = batch_jacobian(params, points).squeeze() # n_batch x n_input
hessian = batch_hessian(params, points).squeeze() # n_batch x n_input x n_input
u_t = jacobian[:, 1]
u_xx = hessian[:, 0, 0]
return {'reaction_diffusion': u_t - nu * u_xx - rho * u * (1. - u)}
return reaction_diffusion
def get_brusselator(uv_hat: Callable, d_0: float, d_1: float, a: float, b: float) -> Callable:
"""brusselator reaction-diffusion system presented in: https://doi.org/10.1016/j.jcp.2023.112008
u_t = d_0 (u_xx + u_yy) + a - (1 + b) u + v u^2
v_t = d_1 (v_xx + v_yy) + b u - v u^2
note: using the `PeriodicMLP` will introduce an extra singleton dimension when evaluating batched derivatives. to
mitigate this, we `squeeze` everything. this will produce bad behavior `n_batch` is 1.
"""
batch_jacobian = get_batch_jacobian(uv_hat)
batch_hessian = get_batch_hessian(uv_hat)
(_x, _y, _t), (_u, _v) = (0, 1, 2), (0, 1)
def brusselator(params: FrozenDict, points: jnp.ndarray) -> dict:
uv = uv_hat(params, points).squeeze()
jacobian = batch_jacobian(params, points).squeeze() # n_batch x n_output x n_input
hessian = batch_hessian(params, points).squeeze() # n_batch x n_output x n_input x n_input
u, v = uv[:, _u], uv[:, _v]
u_t, v_t = jacobian[:, _u, _t], jacobian[:, _v, _x]
u_xx, u_yy = hessian[:, _u, _x, _x], hessian[:, _u, _y, _y]
v_xx, v_yy = hessian[:, _v, _x, _x], hessian[:, _v, _y, _y]
return {'u': -u_t + d_0 * (u_xx + u_yy) + a - (1. + b) * u + v * u**2,
'v': -v_t + d_1 * (v_xx + v_yy) + b * u - v * u**2}
return brusselator
| newalexander/pinn-jax | pinn_jax/equations/simple_pdes.py | simple_pdes.py | py | 7,743 | python | en | code | 0 | github-code | 13 |
1794407761 | import types,string,sys,os
import logs
import traceback
import matches
MathOps = ('/ ** ~| ~& ~^ !^ + - * / ^ % & | && || ! ~ < > << >> >>> == <= >= != ~&').split()
RESERVED = ['int']
class module_class:
def __init__(self,Name,Kind='module'):
self.Module=Name
self.Kind=Kind
self.defines={}
self.includes=[]
self.nets={}
self.netParams={}
self.mems={}
self.genvars={}
self.insts={}
self.alwayses=[]
self.initials=[]
self.hard_assigns=[]
self.generates=[]
self.parameters={}
self.localparams={}
self.functions={}
self.pragmas=[]
self.newvers=[]
self.tasks={}
self.typedefs={}
self.funcheads={}
self.enums={}
self.modports={}
self.Vars={}
self.interfaces={}
self.version = 102
self.stat_types={}
self.deepInstNames = False
self.inventedNets = 0
self.extTypes = []
self.vsignals = []
self.valwayses=[]
def cleanZeroNets(self):
return
def create_stat_types(self):
for Inst in self.insts:
Type = self.insts[Inst].Type
if Type not in self.stat_types:
self.stat_types[Type]=1
else:
self.stat_types[Type] +=1
def add_interface(self,Type,Inst,Dir):
self.interfaces[Inst]=(Dir,Type)
def add_typedef(self,Name,Items):
self.typedefs[Name]=Items
def add_enum(self,Name,Items):
self.enums[Name]=Items
def add_modport(self,Name,Items):
self.modports[Name]=Items
def add_generate(self,Statement):
self.generates.append(Statement)
def add_task(self,Name,Body,Ldefs=False):
self.tasks[Name]=(Body,Ldefs);
def add_always(self,Body,When=False,Kind='always'):
List = support_set(Body,False)+support_set(When,False)
self.checkDefined(List)
self.alwayses.append((When,Body,Kind))
def add_initial(self,Body):
self.initials.append(Body)
def add_include(self,Expr):
self.includes.append(Expr)
def add_define(self,Name,Expr):
self.defines[Name]=Expr
def add_net(self,Name,Dir,Wid):
self.add_sig(Name,Dir,Wid)
def add_sig(self,Name,Dir,Wid):
if Name in RESERVED: Name = 'x_'+Name
if (type(Name)is str)and('[' in Name):
Name = Name[:Name.index('[')]
if Name=='':
Name = 'net_%d'%self.inventedNets
self.inventedNets += 1
self.add_sig(Name,Dir,Wid)
return Name
ww = Dir.split()
if (type(Wid)is str)and(Wid in Dir):
self.nets[Name]=(Dir,Wid)
elif (len(ww)==2)and(ww[0] in ['input','output'])and(ww[1] in self.extTypes):
self.nets[Name]=(Dir,Wid)
elif Dir not in ['input wire','output wire','wire','reg','input','output','output reg','integer','inout','tri0','tri1','output reg signed','wire signed','signed wire','reg signed','output signed','input logic','output logic','logic','genvar','signed','unsigned','inout logic']:
if Dir not in self.enums:
logs.log_error('add_sig got of %s dir=%s'%(Name,Dir))
logs.pStack()
if Dir=='genvar':
self.genvars[Name]=True
return
if Dir=='input wire': Dir='input'
if Name=='repeat':
traceback.print_stack(None,None,logs.Flog)
sys.exit()
return
if type(Name) is list:
logs.log_error('add_sig got listName %s'%str(Name))
return
if (Dir=='wire')and(Name in self.nets)and(Wid==0):
return
if (Dir=='reg')and(Name in self.nets):
Pdir,Pwid=self.nets[Name]
if Pwid!=Wid:
logs.log_error('module %s sig %s got reg wid=%s was=%s'%(Name,Pwid,Wid,self.Module))
if 'reg' in Pdir:
return
if Pdir!='output':
logs.log_error('module %s sig %s got reg but was=%s'%(Name,Pdir,self.Module))
return
self.nets[Name]='output reg',Wid
return
if Name in self.nets:
WasDir,WasWid = self.nets[Name]
else:
WasDir='wire'
WasWid=0
if Dir=='wire':
Dir=WasDir
if isinstance(Wid,str):
try:
Wid = eval(Wid)
except:
Wid = Wid
if (Wid==0):
self.nets[Name] = Dir,WasWid
elif type(Wid) is tuple:
self.nets[Name]=Dir,Wid
elif (type(Wid) is int)and(Wid>1):
self.nets[Name]=Dir,(Wid-1,0)
elif (type(Wid)is int)and(Wid==1):
self.nets[Name]=Dir,0
elif(type(Wid)is list):
if Name not in self.netParams:
self.netParams[Name]={}
try:
for Prm,Val in Wid:
self.netParams[Name][Prm]=Val
except:
pass
elif (type(Wid)is str)and(Wid in Dir):
pass
else:
logs.log_err('add_sig %s (%s) got width %s'%(Name,Dir,Wid))
traceback.print_stack(None,None,logs.Flog)
def add_mem(self,Name,Dir,Wid,Wid2):
if type(Name)is list:
for NN in Name:
self.nets[NN]=(Dir,('double',Wid,Wid2))
else:
self.nets[Name]=(Dir,('double',Wid,Wid2))
def add_parameter(self,Name,Expr):
self.parameters[Name]=Expr
def add_localparam(self,Name,Expr):
self.localparams[Name]=Expr
def add_hard_assign(self,Dst,Src,Strength='',Delay=''):
if (Dst =='')or(Dst==False):
logs.log_err('add_hard_assign got dst="%s" and src="%s"'%(Dst,Src))
traceback.print_stack(None,None,logs.Flog)
return
L1 = support_set(Dst,False)+support_set(Src,False)
self.checkDefined(L1)
self.hard_assigns.append((Dst,Src,Strength,Delay))
def checkDefined(self,List):
for Net in List:
if '[' in Net:
Net = Net[:Net.index('[')]
if (not myExtras(Net))and(Net not in self.nets)and(Net not in self.parameters)and(Net[0] not in '0123456789')and(Net not in self.localparams)and(Net not in self.genvars):
logs.log_err('net %s used before defined'%Net)
def duplicate_inst(self,Inst,Inst2):
Obj = self.insts[Inst]
NewObj = instance_class(Obj.Type,Inst2)
self.insts[Inst2]=NewObj
for Pin in Obj.conns:
self.add_conn(Inst2,Pin,Obj.conns[Pin])
def add_inst_conns(self,Type,Inst,List):
self.add_inst(Type,Inst)
self.add_conns(Inst,List)
def add_inst(self,Type,Inst):
InstObj = instance_class(Type,Inst)
self.insts[Inst]=InstObj
return InstObj
def add_inst_param(self,Inst,Prm,Val):
Obj = self.insts[Inst]
Obj.add_param(Prm,Val)
def add_conn(self,Inst,Pin,Sig):
if (type(Sig)is list)and(len(Sig)==1):
Sig = Sig[0]
Obj = self.insts[Inst]
Sig1 = busify_x(Sig)
self.check_net_def(Sig1)
if Pin in RESERVED:
Pin = 'x_' + Pin
Obj.add_conn(Pin,Sig1)
def add_conns(self,Inst,List):
for (Pin,Sig) in List:
self.add_conn(Inst,Pin,Sig)
def add_function(self,Name,Wid,Defs,Statement):
self.functions[Name]=(Wid,Defs,Statement)
def dump(self,Fname = '?'):
if Fname == '?':
Fname = '%s.dump'%self.Module
elif (len(Fname)>0)and(Fname[0]=='+'):
Fname = '%s.%s.dump'%(self.Module,Fname[1:])
File = open(Fname,'w')
File.write('module %s\n'%(self.Module))
for Def in self.defines:
File.write(' define %s %s\n'%(Def,self.defines[Def]))
for Prm in self.parameters:
File.write(' parameter %s %s\n'%(Prm,self.parameters[Prm]))
for Sig in self.nets:
File.write(' net %s %s\n'%(Sig,self.nets[Sig]))
for HAS in self.hard_assigns:
File.write(' assign %s\n'%(str(HAS)))
for Func in self.functions:
File.write(' function %s %s\n'%(Func,self.functions[Func]))
for Func in self.tasks:
File.write(' task %s\n'%(Func,self.tasks[Func]))
for Alw in self.alwayses:
File.write(' always %s\n'%(str(Alw)))
for Alw in self.generates:
File.write(' generate %s\n'%(str(Alw)))
for Inst in self.insts:
self.insts[Inst].dump(File)
File.write('endmodule\n')
File.close()
def dump_new_style_header(self,Fout):
Fout.write('%s %s'%(self.Kind,pr_expr(self.Module)))
if list(self.parameters.keys())!=[]:
Pref=''
Fout.write(' #( ')
for Prm in self.parameters:
Fout.write('%sparameter %s = %s'%(Pref,pr_expr(Prm),pr_expr(self.parameters[Prm])))
if Prm in self.nets: self.nets.pop(Prm)
Pref=','
Fout.write(') ')
IOS=[]
NOIOS=[]
IFS=[]
NOIFS=[]
for Sig in self.interfaces:
Def = self.interfaces[Sig]
if Def[0]=='ext':
Dir=Def[1]
IFS.append((Dir,Sig))
else:
NOIFS.append((Sig,Def))
for Sig in self.nets:
Dir,Wid = self.nets[Sig]
if is_external_dir(Dir):
IOS.append((Sig,Dir,Wid))
elif reasonableSig(Sig):
NOIOS.append((Sig,Dir,Wid))
if (IOS==[])and(IFS==[]):
Fout.write(';\n')
else:
Fout.write('(\n')
IOS.sort()
Pref=''
for (Name,Dir,Wid) in IOS:
if is_double_def(Wid):
if (Wid[0]=='packed'):
Fout.write(' %s%s %s %s %s\n'%(Pref,pr_dir(Dir),pr_wid(Wid[1]),pr_wid(Wid[2]),pr_expr(Name)))
else:
Fout.write(' %s%s %s %s %s\n'%(Pref,pr_dir(Dir),pr_wid(Wid[1]),pr_expr(Name),pr_wid(Wid[2])))
else:
Fout.write(' %s%s %s %s\n'%(Pref,pr_dir(Dir),pr_wid(Wid),pr_expr(Name)))
Pref=','
for (Type,Inst) in IFS:
Fout.write(' %s%s %s\n'%(Pref,pr_dir(Type),pr_dir(Inst)))
Pref=','
Fout.write(');\n')
return NOIOS,NOIFS
def dump_old_style_header(self,Fout):
Fout.write('%s %s'%(self.Kind,pr_expr(self.Module)))
IOS=[]
NOIOS=[]
for Sig in self.nets:
Dir,Wid = self.nets[Sig]
if Dir in ['input wire','input','output','inout','output reg','output wire','inout wire']:
IOS.append((Sig,Dir,Wid))
else:
NOIOS.append((Sig,Dir,Wid))
if IOS==[]:
Fout.write(';\n')
else:
IOS.sort()
Fout.write('(%s\n'%pr_expr(IOS[0][0]))
for (Name,_,_) in IOS[1:]:
Fout.write(' ,%s\n'%pr_expr(Name))
Fout.write(');\n')
for Prm in self.parameters:
Fout.write('parameter %s = %s;\n'%(pr_expr(Prm),pr_expr(self.parameters[Prm])))
for (Name,Dir,Wid) in IOS:
if is_double_def(Wid):
if Wid[0]=='packed':
Fout.write('%s %s %s %s;\n'%(pr_dir(Dir),pr_wid(Wid[1]),pr_wid(Wid[2]),pr_expr(Name)))
else:
Fout.write('%s %s %s %s;\n'%(pr_dir(Dir),pr_wid(Wid[1]),pr_expr(Name),pr_wid(Wid[2])))
else:
Fout.write('%s %s %s;\n'%(pr_dir(Dir),pr_wid(Wid),pr_expr(Name)))
return NOIOS,[]
def dump_verilog(self,Fout,Style='new'):
if Style=='new':
NOIOS,NOIFS=self.dump_new_style_header(Fout)
else:
NOIOS,NOIFS=self.dump_old_style_header(Fout)
for Prm in self.includes:
Fout.write('`include "%s"\n'%(Prm))
for Prm in self.localparams:
Fout.write('localparam %s = %s;\n'%(pr_expr(Prm),pr_expr(self.localparams[Prm])))
for Name in self.typedefs:
List = self.typedefs[Name]
PP = pr_net_def(List,'','')
Fout.write('typedef %s %s;\n'%(PP,Name))
# Fout.write('typedef struct packed { %s } %s\n'%(PP,Name))
for Name in self.enums:
List = self.enums[Name]
if type(List)is tuple:
if List[0]=='width_singles':
Wid = List[1]
Str = logs.join(List[2],', ')
Fout.write('typedef enum logic [%s:%s] { %s } %s\n'%(Wid[0],Wid[1],Str,Name))
elif List[0]=='singles':
Str = logs.join(List[1],', ')
Bits = len(bin(len(List[1])))
if len(List[1])<3:
Fout.write('typedef enum logic { %s } %s;\n'%(Str,Name))
else:
Bits = len(bin(len(List[1])))-2
Fout.write('typedef enum logic [%d:0] { %s } %s;\n'%(Bits-1,Str,Name))
else:
logs.log_err('!!!typedef enum { %s } %s;\n'%(self.enums[Name],Name))
else:
logs.log_err('!!!typedef enum { %s } %s\n'%(self.enums[Name],Name))
for (Name,Dir,Wid) in NOIOS:
if Wid==Dir:
Fout.write('%s %s;\n'%(pr_dir(Dir),pr_expr(Name)))
elif is_double_def(Wid):
if Wid[0]=='packed':
Fout.write('%s %s %s %s;\n'%(pr_dir(Dir),pr_wid(Wid[1]),pr_wid(Wid[2]),pr_expr(Name)))
else:
Fout.write('%s %s %s %s;\n'%(pr_dir(Dir),pr_wid(Wid[1]),pr_expr(Name),pr_wid(Wid[2])))
else:
Fout.write('%s %s %s;\n'%(pr_dir(Dir),pr_wid(Wid),pr_expr(Name)))
for (Name,Def) in NOIFS:
Type=Def[1]
Conns=Def[2]
Str=''
res=verilog_conns(Conns)
Fout.write('%s %s(%s);\n'%(Type,Name,logs.join(res,',')))
if self.vsignals!=[]:
for Sig in self.vsignals[0]:
Fout.write('VSIGNAL %s\n' % str(Sig))
if self.valwayses!=[]:
for Sig in self.valwayses[0]:
Fout.write('VALWAYS %s\n' % str(Sig))
for Name in self.mems:
(Dir,Wid1,Wid2)=self.mems[Name]
Fout.write('%s %s %s %s;\n'%(pr_dir(Dir),pr_wid(Wid1),pr_expr(Name),pr_wid(Wid2)))
for (A,B,C,D) in self.pragmas:
Fout.write('(* %s lnum=%s *)\n'%(A,C))
for Name in self.modports:
List = self.modports[Name]
res=[]
for XX in List:
if type(XX)is tuple:
Str = '%s %s'%(XX[0],XX[1])
else:
Str = XX
res.append(Str)
Big = logs.join(res,', ')
Fout.write('modport %s ( %s );\n'%(Name,Big))
for Name in self.genvars:
Fout.write('genvar %s;\n'%(Name))
for (Dst,Src,Strength,Dly) in self.hard_assigns:
if (Dst=='//'):
Fout.write('// %s\n'%Src)
else:
Src1 = clean_br(pr_expr(Src))
if len(Src1)>120:
Src2 = Src1.split('$$$')
Src1 = '\n '.join(Src2)
else:
Src1 = Src1.replace('$$$','')
Src2 = splitLong(Src1)
Fout.write('assign %s %s %s = %s;\n'%(pr_strength(Strength),pr_dly(Dly),pr_expr(Dst),Src2))
for Inst in self.insts:
self.insts[Inst].dump_verilog(Fout)
for Task in self.tasks:
self.dump_task(Task,Fout)
for Func in self.functions:
self.dump_function(Func,Fout)
for Initial in self.initials:
self.dump_initial(Initial,Fout)
for Generate in self.generates:
Fout.write('generate\n')
if Generate[0] in ['for','if','ifelse']:
Statement = pr_stmt(Generate,' ',True)
Fout.write('%s\n'%Statement)
elif Generate[0] in ['named_begin']:
Statement = pr_stmt(Generate,' ',True)
Fout.write('%s\n'%Statement)
else:
for Item in Generate:
Statement = pr_stmt(Item,' ',True)
Fout.write('%s\n'%Statement)
Fout.write('endgenerate\n')
for Always in self.alwayses:
if Always:
dump_always(Always,Fout)
Fout.write('end%s\n\n'%self.Kind)
def dump_function(self,Func,Fout):
X = self.functions[Func]
Fout.write('function %s %s;\n'%(pr_wid(X[0]),Func))
if X[1]!=[]:
for Item in X[1]:
if (type(Item)is list)and(len(Item)==4):
Fout.write('%s %s %s;\n'%(Item[3],pr_wid(Item[2]),Item[1]))
else:
logs.log_err('dump_function ldefs %s '%(str(Item)))
Fout.write(pr_stmt(X[2],''))
Fout.write('endfunction\n')
def dump_task(self,Task,Fout):
X = self.tasks[Task]
if type(X)is list:
Fout.write('task %s;\nbegin\n'%Task)
Y = pr_stmt(X,'')
Fout.write(Y)
Fout.write('end\nendtask\n')
return
if type(X)is tuple:
if len(X)==2:
Body=X[0]
Ldefs = X[1]
Fout.write('task %s;\n'%Task)
if Ldefs:
for Item in Ldefs:
if (type(Item)is tuple)and(len(Item)==3):
Fout.write('%s %s %s;\n'%(Item[2],pr_wid(Item[1]),Item[0]))
else:
logs.log_err('dump_task ldefs %s %s'%(Item,Ldefs))
Fout.write('begin\n')
Fout.write(pr_stmt(Body,''))
Fout.write('end\nendtask\n')
return
logs.log_err('dump_task %s %s'%(Task,X))
def dump_initial(self,Initial,Fout):
if len(Initial)==0:
return
Statement = pr_stmt(Initial,' ',True)
if type(Initial)is tuple:
Fout.write('initial %s'%(Statement.lstrip()))
else:
Fout.write('initial begin\n')
Fout.write('%s'%Statement)
Fout.write('end\n')
def get_width(self,Net):
if type(Net)is str:
if Net in self.nets:
Dir,Wid = self.nets[Net]
return Wid
else:
logs.log_err('get_width got unexist net %s'%Net)
return 0
else:
logs.log_err('get_width expects simple token ,it got %s'%str(Net))
return 0
def check_interfaces(self):
Sigs = {}
for Inst in self.insts:
Obj = self.insts[Inst]
for Pin in Obj.conns:
Sig = Obj.conns[Pin]
if type(Sig)is str:
Sigs[Sig]=1
Dels=[]
for Inst in self.insts:
if Inst in Sigs:
self.interfaces[Inst]=('int',self.insts[Inst].Type,self.insts[Inst].conns)
Dels.append(Inst)
for Inst in Dels:
self.insts.pop(Inst)
def check_net_def(self,Net):
if not Net: return
if type(Net)is int: return
if type(Net)is str:
if "'" in Net: return
if Net in self.insts:
return
if Net in self.interfaces:
return
Sig1 = busify_x(Net)
if Sig1!=Net:
self.check_net_def(Sig1)
return
if Net not in self.nets:
self.add_sig(Net,'wire',0)
return
if type(Net)is tuple:
self.check_net_def(list(Net))
return
if type(Net)is list:
if Net[0]=='dotted':
return
if Net[0]=='define':
return
if Net[0]=='sub_slice':
return
if Net[0] in ['functioncall','funccall']:
return
if Net[0]=='subbit':
Name = Net[1]
Ind = make_int(Net[2])
if type(Ind)is int:
if Name not in self.nets:
self.add_sig(Name,'wire',(Ind,Ind))
return
(Dir,WW) = self.nets[Name]
if (type(WW)is tuple)and(len(WW)==2):
(H,L)=WW
try:
H = max(H,Ind)
except:
pass
try:
L = min(L,Ind)
except:
pass
self.nets[Name]=(Dir,(H,L))
elif (type(WW)is tuple)and(len(WW)==3):
if WW[0] not in ['packed','double']:
logs.log_error('definition(0) of net %s dir=%s and wid "%s" is wrong (%s)'%(Name,Dir,WW,Net))
elif isinstance(WW,int):
self.nets[Name]=(Dir,WW)
else:
logs.log_error('definition(1) of net %s dir=%s and wid "%s" is wrong (%s)'%(Name,Dir,WW,Net))
return
if Net[0]=='subbus':
Name = Net[1]
if len(Net)==3:
Ind0 = self.compute_int(Net[2][0])
Ind1 = self.compute_int(Net[2][1])
else:
Ind0 = (Net[2])
Ind1 = Net[3]
if Name not in self.nets:
logs.log_info('declared new bus, deduced from connections %s: wire [%s:%s] %s;'%(self.Module,Ind0,Ind1,Name))
self.add_sig(Name,'wire',(Ind0,Ind1))
return
(Dir,WW) = self.nets[Name]
if type(WW)is tuple:
if WW[0]=='double':
return
(H,L)=WW
# print('>>>',H,Ind1,L,Ind0)
try:
H = max(H,Ind1)
except:
H = Ind1
try:
L = min(L,Ind0)
except:
L = Ind0
self.nets[Name]=(Dir,(H,L))
else:
logs.log_warning('check net def got width name=%s dir=%s ww=%s'%(Name,Dir,WW))
self.nets[Name]=(Dir,WW)
return
if Net[0]=='curly':
if Net[1]=='repeat':
for NN in Net[2:]:
self.check_net_def(NN)
return
for NN in Net[1:]:
self.check_net_def(NN)
return
if Net[0] in ['const','hex','bin','dig']:
return
if Net[0] == 'repeat':
self.check_net_def(Net[2])
return
if Net[0] in MathOps+['question','?']:
for NN in Net[1:]:
self.check_net_def(NN)
return
logs.log_error('check_net_def module=%s net=%s (%s)'%(self.Module,Net,type(Net)))
traceback.print_stack(None,None,logs.Flog)
def del_inst(self,Inst):
Inst = clean_inst(Inst)
self.insts.pop(Inst)
def relax_instance_wires_names(self):
Insts = self.insts.keys()
for Inst in Insts:
Inst2 = self.relax_name(Inst)
if Inst2!=Inst:
Obj = self.insts[Inst]
self.insts.pop(Inst)
self.insts[Inst2]=Obj
Obj.Name=Inst2
# self.duplicate_inst(Inst,Inst2)
Insts = self.insts.keys()
Renames={}
for Inst in Insts:
Obj = self.insts[Inst]
Pins = Obj.conns.keys()
for Pin in Pins:
Net = Obj.conns[Pin]
Net1 = busify_x(self.relax_name(Net,False))
if Net!=Net1:
Obj.conns[Pin]=Net1
Renames[Net]=Net1
for Net in Renames:
if Net in self.nets:
self.nets.pop(Net)
for Net in Renames:
Net1 = Renames[Net]
self.check_net_def(Net1)
def recompute_instance_wires(self):
Wires = {}
for Inst in self.insts:
Obj=self.insts[Inst]
for Pin in Obj.conns:
Net = Obj.conns[Pin]
if not Net:
pass
elif type(Net)is str:
if "'" in Net:
pass
elif Net not in Wires:
Wires[Net]=(0,0)
elif isinstance(Net,(tuple,list)):
if Net[0] in ['subbit','subbus']:
Name = Net[1]
if Net[0]=='subbit':
Hi = int(Net[2])
Lo = Hi
elif Net[0]=='subbus':
Hi = int(Net[2])
Lo = int(Net[3])
if Name in Wires:
WasHi=Wires[Name][0]
WasLo=Wires[Name][1]
Hi = max(Hi,WasHi)
Lo = min(Lo,WasLo)
Wires[Name]=(Hi,Lo)
else:
logs.log_err('net %s is strange for recompute_instance_wires'%(str(Net)))
Nets = self.nets.keys()
for Net in Nets:
(Dir,WW)=self.nets[Net]
if (Dir=='wire')and(Net not in Wires):
logs.log_info('removing not in wires %s'%(Net))
self.nets.pop(Net)
for Wire in Wires:
if Wire not in self.nets:
Hi,Lo = Wires[Wire]
if Hi==0:
self.add_sig(Wire,'wire',0)
else:
self.add_sig(Wire,'wire',(Hi,Lo))
def prepareNetTable(self):
netTable={}
for Inst in self.insts:
Obj = self.insts[Inst]
Type = Obj.Type
for Pin in Obj.conns:
NN = Obj.conns[Pin]
if not ((type(NN)is list)and(NN[0]=='curly')):
Net = hashit(Obj.conns[Pin])
if Net not in netTable:
netTable[Net]=[]
netTable[Net].append((Inst,Type,Pin))
self.netTable = netTable
return netTable
def relax_name(self,Name,Simple=True):
if not Name:
return Name
if type(Name)is list:
if Name[0] in ['subbit','subbus']:
Name[1]=relax_name(Name[1],Simple)
return Name
if type(Name)is not str:
return Name
if Name[0]=='\\':
Name=Name[1:]
for Char in '/.*':
Name = Name.replace(Char,'_')
if Simple and ('[' in Name):
Name = Name.replace('[','_')
Name = Name.replace(']','_')
return Name
def fix_conn_table(self,Net):
Sigi = self.relax_name(Net,True)
Supset = support_set(Sigi)
for Sigx in Supset:
if Sigx in self.netsTable:
List = self.netsTable[Sigx]
After=[]
for Inst,Type,Pin in List:
if Inst in self.insts:
if Pin in self.insts[Inst].conns:
Con = self.insts[Inst].conns[Pin]
if Con==Net:
After.append((Inst,Type,Pin))
if len(After)!=len(List):
self.netsTable[Sigx]=After
def create_connections_table(self):
self.netsTable={}
for Inst in self.insts:
Obj = self.insts[Inst]
Type = Obj.Type
for Pin in Obj.conns:
Sig = Obj.conns[Pin]
Sigi = self.relax_name(Sig,True)
Supset = support_set(Sigi)
for Sigx in Supset:
if '[' in Sigx:
Sigx = Sigx[:Sigx.index('[')]
if Sigx not in self.netsTable:
self.netsTable[Sigx] = [(Inst,Type,Pin)]
else:
self.netsTable[Sigx].append((Inst,Type,Pin))
def compute_int(self,Item):
if type(Item)is int:
return Item
if type(Item)is str:
if Item[0] in '0123456789':
return int(Item)
if Item in self.parameters:
X = self.parameters[Item]
return self.compute_int(X)
if Item in self.localparams:
X = self.localparams[Item]
return self.compute_int(X)
if type(Item)is list:
if Item[0] in ['-','+','*','/']:
A = self.compute_int(Item[1])
B = self.compute_int(Item[2])
return eval('%s%s%s'%(A,Item[0],B))
if Item[0] in ['dig']:
return self.compute_int(Item[2])
if type(Item)is tuple:
if Item[0] in ['-','+','*','/']:
A = self.compute_int(Item[1])
B = self.compute_int(Item[2])
return eval('%s%s%s'%(A,Item[0],B))
logs.log_err('compute_int failed on "%s" %s'%(str(Item),self.Module),False)
return 0
def createUsedRtlNets(self):
self.usedRtlNets = {}
for Edge,Body,_ in self.alwayses:
Sup = support_set(Body,True)
for Item in Sup:
if Item not in self.usedRtlNets:
self.usedRtlNets[Item] = ['rtl']
for (Dst,Src,_,_) in self.hard_assigns:
Sup = support_set(Src,False)
for Item in Sup:
if Item in self.usedRtlNets:
self.usedRtlNets[Item] = ['rtl','hard']
else:
self.usedRtlNets[Item] = ['hard']
def dump_always(Always,Fout):
if len(Always)==3:
Timing = pr_timing(Always[0])
Statement = pr_stmt(Always[1],' ',True)
Kind=Always[2]
while '$$$' in Statement:
ind = Statement.index('$$$')
if ind>80:
Repl=' \n'
else:
Repl = ''
Statement = Statement[:ind]+Repl+Statement[ind+4:]
Str = ''
if Timing=='':
# Fout.write('%s begin\n'%Kind)
Str += '%s begin\n'%Kind
else:
# Fout.write('%s @(%s) begin\n'%(Kind,Timing))
Str += '%s @(%s) begin\n'%(Kind,Timing)
# Fout.write('%s'%Statement)
# Fout.write('end\n')
Str += Statement
Str += 'end\n'
if Fout: Fout.write(Str)
return Str
logs.log_err('dump_always %d %s'%(len(Always),Always))
return ''
OPS = ['/','~^','^','=','>=','=>','*','/','<','>','+','-','~','!','&','&&','<=','>>','>>>','<<','||','==','!=','|']
KEYWORDS = ('sub_slice sub_slicebit taskcall functioncall named_begin unsigned if for ifelse edge posedge negedge list case default double_sub').split()
def support_set(Sig,Bussed=True):
Set = support_set__(Sig,Bussed)
Set.sort()
ind=0
while ind<(len(Set)-1):
if Set[ind]==Set[ind+1]:
Set.pop(ind)
else:
ind += 1
return Set
def support_set__(Sig,Bussed):
if (Sig=='')or not Sig: return []
if (Sig=='$unconnected'): return []
if type(Sig) is int: return []
if type(Sig) is str:
if Sig[0]=='`': return []
if Sig in OPS : return []
if Sig in MathOps : return []
if Sig in KEYWORDS : return []
if Sig[0] in '0123456789': return []
return [Sig]
if isinstance(Sig,(tuple,list)):
if len(Sig)==1:
return support_set__(Sig[0],Bussed)
if Sig[0] in ['const','bin','hex','dig','taskcall']:
return []
if Sig[0]=='curly':
Ind = 1
if Sig[1]=='repeat': Ind=3
res=[]
for X in Sig[Ind:]:
XY = support_set__(X,Bussed)
res.extend(XY)
return res
if Sig[0]=='repeat':
return support_set__(Sig[2],Bussed)
if Sig[0]=='subbit':
Sub = support_set__(Sig[2],Bussed)
if Bussed:
if Sub==[]:
return ['%s[%s]'%(Sig[1],Sig[2])]
else:
return [Sig[1]]+Sub
else:
return [Sig[1]]+Sub
if Sig[0]=='subbus':
if Bussed:
if len(Sig)==3:
return ['%s[%s:%s]'%(Sig[1],Sig[2][0],Sig[2][1])]
elif len(Sig)==4:
return ['%s[%s:%s]'%(Sig[1],Sig[2],Sig[3])]
else:
logs.log_err('untreated (712)support set expr %s'%(str(Sig)))
return []
else:
return [Sig[1]]
if Sig[0] in OPS:
return support_set__(Sig[1:],Bussed)
if Sig[0]in ['question','?']:
while len(Sig)<4: Sig.append('err')
return support_set__(Sig[1],Bussed)+support_set__(Sig[2],Bussed)+support_set__(Sig[3],Bussed)
res=[]
if Sig[0]=='named_begin':
Sigg = Sig[2:]
else:
Sigg = Sig[:]
for X in Sigg:
XY = support_set__(X,Bussed)
res.extend(XY)
return res
logs.log_err('untreated support set expr %s'%(str(Sig)))
return []
def hasit(Net):
if type(Net)is list:
return tuple(Net)
return Net
class instance_class:
def __init__(self,Type,Name):
self.Type=Type
self.Name=Name
self.conns={}
self.params={}
self.specials={}
def add_conn(self,Pin,Sig):
if Pin in self.conns:
logs.log_warning('replacing connection pin=%s inst=%s was=%s now=%s'%(Pin,self.Name,self.conns[Pin],Sig))
self.conns[Pin]=Sig
def add_param(self,Prm,Val):
self.params[Prm]=Val
def dump(self,File):
File.write('instance %s %s\n'%(self.Type,self.Name))
for Pin in self.conns:
File.write(' conn pin=%s sig=%s\n'%(Pin,self.conns[Pin]))
def dump_verilog(self,Fout):
Prms = pr_inst_params(self.params)
Many = pr_width_param(self.params)
Fout.write('%s %s %s %s'%(pr_expr(self.Type),Prms,pr_expr(self.Name),Many))
if self.conns.keys()==[]:
Fout.write(';\n')
return
Fout.write('(')
Pref=' '
res=[]
if 0 in self.conns:
Pins = list(self.conns.keys())
Pins.sort()
for Pin in Pins:
Expr = pr_expr(self.conns[Pin])
res.append('%s'%(Expr))
else:
res=verilog_conns(self.conns)
res.sort()
try1 = logs.join(res,', ')
if (len(try1)<80):
Fout.write('%s);\n'%(try1))
else:
try2 = logs.join(res,'\n%s,'%Pref)
Fout.write('%s);\n'%(try2))
def pr_width_param(Dir):
if 'inst_width' not in Dir.keys(): return ''
return '#(%s)'%pr_expr(Dir['inst_width'])
def pr_inst_params(Dir):
if Dir.keys()==[]:
return ''
if 0 in Dir:
res = []
i = 0
while i in Dir.keys():
V = str(Dir[i])
i+=1
res.append(V)
return '#(%s)'%(logs.join(res,', '))
res=[]
for Prm in Dir.keys():
if Prm != 'inst_width':
Val = Dir[Prm]
X = '.%s(%s)'%(pr_expr(Prm),pr_expr(Val))
res.append(X)
if res==[]: return ''
return '#(%s)'%(logs.join(res,', '))
def pr_timing(List):
if type(List)is list:
if List[0]=='list':
res = map(pr_expr,List[1:])
res = map(str,res)
return logs.join(res,' or ')
if len(List)==1:
return pr_timing(List[0])
return str(pr_expr(List))
def pr_stmt(List,Pref='',Begin=False):
if List==None: return '%s;'%Pref
if List=='': return '%s;'%Pref
if type(List)is tuple:
return pr_stmt(list(List),Pref,Begin)
if (type(List)is int):
return str(List)
if (List==[]):
return 'begin /* empty */ end '
if (type(List)is list)and(len(List)==1):
return pr_stmt(List[0],Pref,Begin)
if (type(List)is list)and(len(List)>2)and(List[0]=='list')and(List[1]=='list'):
logs.log_warning('ilia double list def')
return pr_stmt(List[2],Pref,Begin)
if (type(List)is list)and(List[0]=='list'):
if len(List)==2:
return pr_stmt(List[1],Pref,Begin)
if Begin:
Res=''
Pref2=Pref
else:
Res='%sbegin\n'%Pref
Pref2=Pref+' '
for X in List[1:]:
Y = pr_stmt(X,Pref2)
Res = Res + Y
if Begin:
return Res
else:
return Res+'%send\n'%Pref
elif type(List)is list:
if List[0]=='comment':
res = '%s//'%Pref
for II in List[1:]:
X = pr_expr(II)
res += ' %s'%X
return '%s\n'%(res)
if List[0]=='list':
logs.log_warning('ilia double list def')
List = List[1:]
if List[0] in ['genvar','integer']:
Kind = List[0]
LL = pr_expr(List[1])
return '%s%s %s ;\n'%(Pref,Kind,LL)
if List[0]=='assign':
Dst = pr_expr(List[1])
Src = pr_expr(List[2])
return '%sassign %s = %s;\n'%(Pref,Dst,Src)
if List[0]=='fork':
CC = ['list']+List[1:]
Fork = pr_stmt(CC,Pref+' ',True)
return '%sfork\n%s%sjoin\n'%(Pref,Fork,Pref)
if List[0]=='$finish':
return List[0]
if List[0] == 'dotted':
Txt = pr_expr(List)
return '%s%s;\n'%(Pref,Txt)
if List[0] == 'wait':
Dly = clean_br(pr_expr(List[1]))
return '%swait(%s);\n'%(Pref,Dly)
if List[0] == '#':
Dly = clean_br(pr_expr(List[1]))
return '%s#%s;\n'%(Pref,Dly)
if List[0] in ['<=','=']:
Dst = clean_br(pr_expr(List[1]))
Src =split_expr(List[2],Pref+' ')
return '%s%s %s %s;\n'%(Pref,Dst,List[0],Src)
if List[0]=='ifelse':
if len(List)>4:
logs.log_err('ifelse structure has too many items %d > %d %s'%(len(List),4,str(List)))
Cond = clean_br(pr_expr(List[1]))
Yes = pr_stmt(List[2],Pref+' ',True)
if len(List)<4:
logs.log_err('illegal ifelse len=%d'%len(List))
return '<alength is not 4><><><>'
elif List[3]==[]:
logs.log_err('illegal ifelse len=%d'%len(List))
return '<><>empty<>\n'
elif not List[3]:
return '<>fls<><>tooshort'
elif List[3][0] in ['ifelse','if']:
No = pr_stmt(List[3],Pref,True)
No = No.lstrip()
return '%sif(%s) begin\n%s%send else %s'%(Pref,Cond,Yes,Pref,No)
else:
No = pr_stmt(List[3],Pref+' ',True)
return '%sif(%s) begin\n%s%send else begin\n%s%send\n'%(Pref,Cond,Yes,Pref,No,Pref)
if List[0]=='if':
if len(List)>3:
logs.log_err('if structure has too many items %d > %d %s'%(len(List),3,str(List)))
Cond = clean_br(pr_expr(List[1]))
Yes = pr_stmt(List[2],Pref+' ',True)
return '%sif(%s) begin\n%s%send\n'%(Pref,Cond,Yes,Pref)
if List[0] in ['functioncall','funccall']:
res = map(pr_expr,List[2])
res2 = logs.join(res,',')
return '%s%s(%s);\n'%(Pref,List[1],res2)
if List[0]in ['unique_case','case','casez','casex']:
Case = List[0].replace('unique_','unique ')
Cond = clean_br(pr_expr(List[1]))
Str = '%s%s (%s)\n'%(Pref,Case,Cond)
LLL = List[2]
if (len(LLL)>0)and(LLL[0]=='list'):
LLL = LLL[1:]
for Item in LLL:
if len(Item)==2:
Switch,Stmt=Item
if (type(Switch)is list)and(len(Switch)==1):
Str += '%s%s: '%(Pref+' ',pr_expr(Switch[0]))
else:
Str += '%s%s: '%(Pref+' ',pr_expr(Switch))
X = pr_stmt(Stmt,Pref+' ')
X = X.lstrip()
if (X==''): X = ';'
Str += X
elif (len(Item)==4)and(Item[0]=='default'):
Str += '%sdefault: ;\n'%(Pref+' ')
else:
logs.log_err('module_class: bad case item "%s"'%str(Item))
Str = Str + '%sendcase\n'%Pref
return Str
if List[0]=='while':
Cond = clean_br(pr_expr(List[1]))
X = pr_stmt(List[2],Pref+' ',True)
Str = '%swhile(%s)begin\n'%(Pref,Cond)
Str += X
Str += '%send\n'%(Pref)
return Str
if List[0]=='for':
Cond = clean_br(pr_expr(List[2]))
X = pr_stmt(List[4],Pref+' ',True)
A1 = pr_assign_list(List[1])
A2 = pr_assign_list(List[3])
Str = '%sfor(int %s;%s;%s)begin\n'%(Pref,A1,Cond,A2)
Str += X
Str += '%send\n'%(Pref)
return Str
if List[0]=='taskcall':
if List[1]=='break': return ''
if List[2]==[]:
Str = '%s%s;'%(Pref,List[1])
else:
LL = list(map(pr_expr,List[2]))
Pars = ','.join(LL)
Str = '%s%s(%s);\n'%(Pref,List[1],Pars)
return Str
if List[0]=='disable':
Str = '%sdisable %s;\n'%(Pref,List[1])
return Str
if List[0]=='release':
Str = '%srelease %s;\n'%(Pref,List[1])
return Str
if List[0]=='force':
Str = '%sforce %s = %s;\n'%(Pref,List[1],clean_br(pr_expr(List[2])))
return Str
if List[0]=='named_begin':
X = pr_stmt(List[2],Pref+' ',True)
Str = '%sbegin :%s\n%s%send\n'%(Pref,List[1],X,Pref)
return Str
if List[0]=='empty_begin_end':
Str = '%sbegin end\n'%(Pref)
return Str
if List[0]=='instance':
Params = List[3]
if type(Params)is list:
PP = []
for (A,B) in Params:
PP.append('.%s(%s)'%(pr_expr(A),pr_expr(B)))
Prms = '#(%s)'%logs.join(PP,',')
else:
Prms = pr_inst_params(List[3])
Str0 = '%s %s %s('%(pr_expr(List[1]),Prms,pr_expr(List[2]))
Conns = List[4]
Pref=' '
res=[]
if ('pin0' in Conns)and('pin1' in Conns):
Pins = Conns.keys()
Pins.sort()
for Pin in Pins:
Expr = pr_expr(Conns[Pin])
res.append('%s'%(Expr))
else:
res=verilog_conns(Conns)
try1 = logs.join(res,', ')
if (len(try1)<80):
Str0 += '%s);\n'%(try1)
else:
try2 = logs.join(res,'\n%s,'%Pref)
Str0 += '%s);\n'%(try2)
return Str0
if List[0]=='always':
Str = dump_always([List[1],List[2],List[0]],False)
return Str
Vars = matches.matches(List,'declare ? ? ?')
if Vars:
if Vars[2]==0: Vars[2]=''
elif (Vars[2][0]=='double'):
return '%s%s %s %s %s;\n'%(Pref,Vars[0],pr_wid(Vars[2][1]),Vars[1],pr_wid(Vars[2][2]))
else:
return '%s%s %s %s;\n'%(Pref,Vars[0],pr_wid(Vars[2]),Vars[1])
if List[0]=='return':
return 'return %s;\n'%pr_expr(List[1])
if List[0]=='declare':
Vars = matches.matches(List,'declare wire ? ?')
if Vars:
return 'wire %s;\n'%Vars[0]
if List[0]=='assigns':
Vars = matches.matches(List[1],'= ? ?',False)
if Vars:
Dst = pr_expr(Vars[0])
Src = pr_expr(Vars[1])
return 'assign %s = %s;\n'%(Dst,Src)
if List[0] in ['logic','reg']:
if (type(List[1])is str)or(List[1]==0):
if List[1] in ['0',0]:
return ' %s %s;\n'%(List[0],List[2])
elif List[1][0]=='width':
Hi = pr_expr(List[1][1])
Lo = pr_expr(List[1][2])
return ' %s [%s:%s] %s;\n'%(List[0],Hi,Lo,List[2])
else:
return ' reg %s;\n'%str(List[1])
if List in ['ILIA_FALSE','ILIA_TRUE']: return List
if type(List)is str:
if List=='empty_begin_end': return ''
if (type(List)is list)and(len(List)==1):
return pr_stmt(List[0])
if type(List)is tuple:
return pr_stmt(list(List))
if (type(List)is list)and(len(List)>1):
Str = '%sbegin\n' % Pref
for Item in List:
Str += pr_stmt(Item,Pref+' ',True)
return '%s%send\n'%(Str,Pref)
logs.log_err('module_class: untreated for prnt stmt %s %s'%(Pref,List))
traceback.print_stack(None,None,logs.Flog)
return '[error %s]'%str(List)
def split_expr(List,Pref):
return clean_br(pr_expr(List))
def pr_assign_list(List):
if List==[]:
return ''
res = []
if List[0]=='list':
return pr_assign_list(List[1:])
if List[0] in ['<=','=']:
Dst = pr_expr(List[1])
Src = clean_br(pr_expr(List[2]))
return '%s=%s'%(Dst,Src)
if type(List) is tuple:
return List
if type(List) is list:
for Item in List:
Res = pr_assign_list(Item)
res.append(Res)
return logs.join(res,', ')
logs.log_error('pr_assign_list got "%s" ' % str(List))
return str(List)
def clean_br(Txt):
Txt = str(Txt)
if Txt=='':
return ''
if (Txt[0]=='(')and(Txt[-1]==')')and('?' not in Txt):
return Txt[1:-1]
return Txt
def pexpr(Src):
return clean_br(pr_expr(Src))
def pr_dly(Dly):
if len(Dly)==0:
return ''
if Dly=='':
return ''
res=[]
for (A,B) in Dly:
res.append(pr_expr(B))
return '#(%s)'%(logs.join(res,', '))
def pr_strength(Strength):
if Strength=='':
return ''
A,B = Strength
return str('(%s,%s)'%(A,B))
def pr_dir(Dir):
if Dir=='signed wire': return 'wire signed'
if Dir=='signed': return 'wire signed'
if Dir=='output': return 'output logic'
# Dir = Dir.replace('logic','wire')
return Dir
def pr_wid(Wid):
if Wid==None:
logs.log_err('wid is none error')
traceback.print_stack(None,None,logs.Flog)
return 'wid is none error!!'
if Wid==0:
return ''
if type(Wid)is int:
return '[%s:0]'%(pr_expr(Wid))
if (len(Wid)==3)and(Wid[0]=='double'):
return '%s%s'%(pr_wid(Wid[1]),pr_wid(Wid[2]))
if (len(Wid)==3)and(Wid[0]=='packed'):
return pr_wid(Wid[1])+pr_wid(Wid[2])
if len(Wid)==3:
logs.log_err('pr_wid %s'%(str(Wid)))
traceback.print_stack(None,None,logs.Flog)
return str(Wid)
return '[%s:%s]'%(pr_expr(Wid[0]),pr_expr(Wid[1]))
def pr_replace(What):
if What=='': return ''
if What[0]=='\\':
return What+' '
return What
def pr_expr(What):
if type(What)is int:
return str(What)
if type(What) is int:
return str(What)
if type(What) is float:
return str(What)
if type(What)is str:
if What == "'0'": return "1'b0"
if What == "'1'": return "1'b1"
return pr_replace(What)
if not What:
return ''
if What[0]=='wire':
return What[1]
if (len(What)>1)and(What[1]=='token'):
return What[0]
if What[0]=='dotted':
LL = map(pr_expr,What[1])
LL = map(str,LL)
return LL.join('.')
if What[0]=='edge':
return '%s %s'%(What[1],pr_expr(What[2]))
if What[0] in ['negedge','posedge']:
return '%s %s'%(What[0],pr_expr(What[1]))
if What[0]=='subbit':
if What[1] == 'rising_edge':
noEdges = logs.getVar('noEdges',False)
if noEdges:
return '1'
return '$rising_edge(%s)' % pr_expr(What[2])
return '%s[%s]'%(pr_expr(What[1]),pr_expr(What[2]))
if What[0]=='sub_slice':
return '%s[%s][%s:%s]'%(pr_expr(What[1]),pr_expr(What[2]),pr_expr(What[3][0]),pr_expr(What[3][1]))
if What[0] in ['double_sub','sub_slicebit']:
return '%s[%s][%s]'%(pr_expr(What[1]),pr_expr(What[2]),pr_expr(What[3]))
if What[0]=='subbus':
if len(What)==4:
return '%s[%s:%s]'%(pr_expr(What[1]),pr_expr(What[2]),pr_expr(What[3]))
elif (len(What)==3):
try:
return '%s[%s:%s]'%(pr_expr(What[1]),pr_expr(What[2][0]),pr_expr(What[2][1]))
except:
logs.log_error('#1198# whatlen=3 %s'%str(What))
return '"%s"'%str(What)
else:
logs.log_err('pr_expr %s'%(str(What)))
return 'ERROR%s'%str(What)
if What[0]=='bin':
return "%s'b%s"%(What[1],What[2])
if What[0]=='const':
return "%s'%s"%(What[1],What[2])
if What[0]=='hex':
Hex = What[2]
while Hex[0]=='_': Hex = Hex[1:]
if What[1] in ['0',0]:
return "'h%s"%(Hex)
if len(What)==2: return What[1]
return "%s'h%s"%(4*What[1],Hex)
if What[0]=='dig':
if What[2][0]=="'": What[2] = What[2][1:]
return "%s'd%s"%(What[1],What[2])
if What[0]=='**':
if What[1] in [2,'2']:
return '(1<<%s)'%(pr_expr(What[2]))
return '(%s ** %s)'%(pr_expr(What[1]),pr_expr(What[2]))
if What[0] in MathOps+['!','~']:
if len(What)==2:
return '(%s%s)'%(What[0],pr_expr(What[1]))
if simply_computable(What):
X,_ = simply_computable(What)
return str(X)
res=[]
for X in What[1:]:
Y = str(pr_expr(X))
res.append(Y)
res1 = '(%s)'%logs.join(res,' %s '%What[0])
return res1
if (type(What[0]) is str) and (What[0] in vhdlOps):
List = [vhdlOps[What[0]]] + list(What[1:])
return pr_expr(List)
if What[0]in ['?','question']:
Cond = pr_expr(What[1])
Yes = pr_expr(What[2])
if len(What)<4:
No = 'error!!!!'
else:
No = pr_expr(What[3])
Short = '%s ? %s : %s'%(Cond,Yes,No)
if len(Short)<120:
return Short
return '%s ? %s :\n %s'%(Cond,Yes,No)
if What[0]=='width':
try:
W = What[1]
return '[%s:%s]'%(pr_expr(W[0]),pr_expr(W[1]))
except:
return 'width'
if What[0]=='curly':
if What[1]=='repeat':
return '{ %s { %s }}'%(pr_expr(What[2]),pr_expr(What[3]))
if one_bus(What[1:]):
Bus,H,L=one_bus(What[1:])
return pr_expr(['subbus',Bus,(H,L)])
if single_bits(What[1:]):
Wid,Val=single_bits(What[1:])
if type(Val)is str:
return pr_expr(['const',Wid,'b%s'%Val])
else:
return pr_expr(['const',Wid,'h%x'%Val])
res = map(pr_expr,What[1:])
return '{%s}'%(logs.join(res,', '))
if What[0]=='define':
if len(What)==2:
return What[1]
Expr = pr_expr(What[2])
return '%s(%s)'%(What[1],Expr)
if (What[0] == 'function')and(What[1] == 'edge'):
return "1"
if What[0] in ['functioncall','funccall']:
if len(What[2])==1:
Str = '%s(%s)'%(What[1],pr_expr(What[2][0]))
else:
Str = '%s(%s)'%(What[1],pr_expr(What[2]))
return Str
if What[0]=='repeat':
Str = '{%s{%s}}'%(pr_expr(What[1]),pr_expr(What[2]))
return Str
if What[0]=='return':
Str = 'return %s'%(pr_expr(What[1]))
return Str
if What[0]=='aggregate':
X = pr_expr(What[2])
Str = '%s%s'%(What[1],X)
return Str
if What[0]=='DOWNTO':
Hi = pr_expr(What[1])
Lo = pr_expr(What[2])
return '[%s:%s]'%(Hi,Lo)
if What[0]=='multing':
Hi = pr_expr(What[1])
Lo = pr_expr(What[2])
return '(%s * %s)'%(Hi,Lo)
if What[0] == 'func':
return '%s[%s]' % (What[1],pr_expr(What[2]))
if What[0]=='expr':
return pr_expr(What[1:])
if What[0]=='default':
return pr_expr(What[1])
if (type(What)is list):
if simply_computable(What):
X,_ = simply_computable(What)
return str(X)
LL=[]
for X in What:
Y = pr_expr(X)
LL.append(Y)
return logs.join(LL,',')
logs.pStack('pr_expr %s'%(str(What)))
return str('error '+str(What))
vhdlOps = {'Slash':'/','Star':'*','EQSym' : '=='}
def splitLong(res1):
if len(res1)<120: return res1
if '+' in res1:
ww = res1.split('+')
res2 = logs.join(ww,'\n +')
return res2
return res1
def simply_computable(What):
if isinstance(What,(float,int)):
return What,'aa'
if (What[0] in ['-','+','*','/'])and(len(What)==3):
if simply_computable(What[1]) and simply_computable(What[2]):
A,_ = simply_computable(What[1])
B,_ = simply_computable(What[2])
return eval('%s%s%s'%(A,What[0],B)),'aa'
return False
def one_bus(List):
if type(List)is not list:
return False
Run = -1
Bus = False
H=-1
for Item in List:
if type(Item)is not list:
return False
if Item[0]!='subbit':
return False
if not Bus:
Bus=Item[1]
elif (Bus!=Item[1]):
return False
try:
Ind = int(Item[2])
except:
return False
if Run<0:
Run=Ind
H=Ind
elif (Ind!=(Run-1)):
return False
Run=Ind
return Bus,H,Run
def single_bits(List):
if type(List)is not list:
return False
if len(List)==0:
return False
Bits = []
Run = -1
Bus = False
H=-1
for Item in List:
if type(Item)is not list:
return False
if (Item[0]=='const')and(Item[1] in ['1',1]):
Bits.append(compute1(Item[2]))
elif (Item[0]=='bin')and(Item[1] in ['1',1]):
Bits.append(compute1(Item[2]))
elif (Item[0]=='hex')and(Item[1] in ['1',1]):
Bits.append(compute1(Item[2]))
else:
return False
Bits1 = logs.join(map(str,Bits),'')
Len1 = len(Bits1)
if 'x' in Bits1: return Len1,Bits1
return Len1,int(Bits1,2)
def compute1(Item):
if type(Item)is int:
return Item
if type(Item)is str:
if Item[0] in '0123456789':
return int(Item)
if Item in ['b0','b1']:
return int(Item[1])
if Item=='x': return 'x'
if type(Item)is list:
if Item[0] in ['-','+','*','/']:
A = compute1(Item[1])
B = compute1(Item[2])
X = '%s %s %s'%(A,Item[0],B)
try:
return eval(X)
except:
logs.log_err('failed compute of "%s"'%str(X))
return 1
if Item[0] in ['dig']:
return compute1(Item[2])
logs.pStack('compute1 in moduleClass faiuled on "%s" %s'%(Item,type(Item)))
return ''%str(Item)
def clean_inst(Inst):
if (type(Inst) is not str):
return Inst
Inst = Inst.replace('\\','')
Inst = Inst.replace('[','_')
Inst = Inst.replace(']','_')
return Inst
def busify_x(Sig):
if (type(Sig)is str)and('[' in Sig)and(Sig[-1]==']')and(Sig[0]!='\\'):
ind = Sig.index('[')
Net = Sig[:ind]
Inds = Sig[ind+1:-1]
if ':' in Inds:
ww = Inds.split(':')
return ['subbus',Net,ww[0],ww[1]]
return ['subbit',Net,Inds]
return Sig
def relax_inst_name(Name):
if isinstance(Name,(list,tuple)):
if Name[0] in ['subbit']:
return '%s_%s_'%(Name[1],Name[2])
return relax_name(Name)
def relax_name(Name,Simple=True):
if not Name:
return Name
if isinstance(Name,(list,tuple)):
if Name[0] in ['subbit','subbus']:
Name[1]=relax_name(Name[1],Simple)
return Name
if type(Name)is not str:
return Name
if Name[0]=='\\':
Name=Name[1:]
Name = Name.replace('/','_')
Name = Name.replace('.','_')
if Simple and ('[' in Name):
Name = Name.replace('[','_')
Name = Name.replace(']','_')
return Name
def hashit(End):
if type(End)is list:
return tuple(End)
else:
return End
def is_double_def(Wid):
if not isinstance(Wid,(list,tuple)):
return False
if (len(Wid)==3)and(Wid[0] in ['packed','double']):
return True
if (len(Wid)==4)and(Wid[0] in ['triple']):
return True
if len(Wid)!=2:
logs.log_err('bad width definition, ilia! %s '%(str(Wid)))
logs.pStack()
return False
return False
def make_int(Str):
try:
return int(Str)
except:
return Str
def verilog_conns(Conns):
if type(Conns) is list:
return verilog_conns_list(Conns)
elif type(Conns) is dict:
return verilog_conns_dict(Conns)
def verilog_conns_list(Conns):
res=[]
for PS in Conns:
[Pin,Sig]=PS
if Sig=='':
Expr=''
else:
Expr = pr_expr(Sig)
if Expr in ['1',1]: Expr = "1'b1"
elif Expr in [0,'0']: Expr = "1'b0"
res.append('.%s(%s)'%(pr_expr(Pin),Expr))
return res
def verilog_conns_dict(Conns):
res=[]
for Pin in Conns:
if Pin=='*':
res.append('.*')
else:
Sig = Conns[Pin]
if Sig=='':
Expr=''
else:
Expr = pr_expr(Sig)
if Expr in ['1',1]: Expr = "1'b1"
elif Expr in [0,'0']: Expr = "1'b0"
res.append('.%s(%s)'%(pr_expr(Pin),Expr))
return res
def orderHardAssigns(Mod,Env,Fout):
HAS = {}
Ordered=[]
Ins = []
Regs = []
for Net in Mod.nets:
Dir,_ = Mod.nets[Net]
if 'input' in Dir: Ins.append(Net)
if 'reg' in Dir: Regs.append(Net)
for ind,(Dst,Src,_,_) in enumerate(Mod.hard_assigns):
if Dst!='//':
Sup0 = support_set(Dst,False)
Sup1 = support_set(Src,False)
HAS[ind]=(Sup0,Sup1)
ReadyList = Ins[:]+Regs[:]
WorkList = HAS.keys()
ok=True
while ok and (len(WorkList)!=0):
pos=0
while pos<len(WorkList):
ind = WorkList[pos]
Dst,Src = HAS[ind]
if allReady(Src,ReadyList):
WorkList.pop(pos)
pos=999999
ReadyList.extend(Dst)
Ordered.append(ind)
ok = pos==999999
def allReady(Src,Ready):
for Item in Src:
if Item not in Ready: return False
return True
def is_external_dir(Dir):
return ('input' in Dir)or('output' in Dir)or('inout' in Dir)
def myExtras(Token):
return Token in ('$high $signed empty_begin_end unique_case').split()
def pr_net_def(Wid,Dir,Name):
if Wid==Dir:
return '%s %s'%(pr_dir(Dir),pr_expr(Name))
if Wid[0]=='logic':
return '%s %s %s'%(pr_dir(Dir),pr_wid(Wid[1]),pr_expr(Name))
if Wid[0]=='signed':
return '%s %s %s'%(pr_dir(Dir),pr_wid(Wid[1]),pr_expr(Name))
if is_double_def(Wid):
if (Wid[0]=='triple'):
return '%s %s %s %s %s'%(pr_dir(Dir),pr_wid(Wid[1]),pr_wid(Wid[2]),pr_wid(Wid[3]),pr_expr(Name))
elif (Wid[0]=='packed'):
return '%s %s %s %s'%(pr_dir(Dir),pr_wid(Wid[1]),pr_wid(Wid[2]),pr_expr(Name))
else:
return '%s %s %s %s'%(pr_dir(Dir),pr_wid(Wid[1]),pr_expr(Name),pr_wid(Wid[2]))
else:
return '%s %s %s'%(pr_dir(Dir),pr_wid(Wid),pr_expr(Name))
logs.log_error('pr_net_def got "%s" "%s" "%s"'%(Wid,Dir,Name))
def reasonableSig(Net):
if Net in [0,1,'0','1']: return False
return True
| greenblat/vhdl2v | llbin/module_class.py | module_class.py | py | 61,604 | python | en | code | 1 | github-code | 13 |
2000596264 | from django.shortcuts import render, redirect
from .forms import ProductAddForm
from django.contrib import messages
from .models import ProductDetail
from django.contrib.auth.decorators import login_required
# Create your views here.
@login_required(login_url="SignIn")
def AddProduct(request):
form = ProductAddForm()
if request.method == "POST":
form = ProductAddForm(request.POST,request.FILES)
if form.is_valid():
product =form.save()
product.Merchant = request.user
product.save()
messages.info(request,"Product Added to list")
return redirect('AddProduct')
return render(request,'admin/addproduct.html',{"form":form})
def ProductViewMerchant(request):
products = ProductDetail.objects.all()
context ={
"products":products
}
return render(request,'admin/productlistview.html',context)
@login_required(login_url="SignIn")
def DeleteProduct(request,pk):
product = ProductDetail.objects.get(ProductId =pk)
product.Product_Image.delete()
product.delete()
messages.info(request,"Product Deleted")
return redirect('ProductViewMerchant')
@login_required(login_url="SignIn")
def UpdateProduct(request,pk):
product = ProductDetail.objects.filter(ProductId =pk)
if request.method=="POST":
pname= request.POST['pname']
pbrand= request.POST['pbrand']
pdis= request.POST['pdis']
pstk= request.POST['pstock']
pcat= request.POST['pcat']
pprice= request.POST['pprice']
img= request.FILES['img']
item=ProductDetail.objects.get(ProductId = pk)
item.Product_Name = pname
item.Product_Brand = pbrand
item.Product_Discription = pdis
item.Product_Price = pprice
item.Product_Category = pcat
item.Product_Stock = pstk
item.Product_Image.delete()
item.Product_Image = img
item.save()
messages.info(request,"item updated")
return redirect("UpdateProduct",pk=pk)
context={
"product":product
}
return render(request,'admin/updateproduct.html',context)
@login_required(login_url="SignIn")
def prodctlistview(request,pk):
item = ProductDetail.objects.filter(ProductId=pk)
context={
"product":item
}
return render(request,'prodctlistview.html',context) | AmalMohan487/Ecom | Product/views.py | views.py | py | 2,385 | python | en | code | 0 | github-code | 13 |
21890994178 | l=list()
for i in range(1):
l.append(input("enter the number"))
print(*l)
for x in l:
print(x)
l[0]="hello"
print(l)
l.pop()
l.insert(3,"hai")
l.sort()
l.clear()
print(l)
l1=["hai","hlo"]
l2=["a","b","c"]
l3=["ab","cd","de"]
l4=[l1,l2,l3]
print(l4[2][1])
z=10
x=11
print(f"the value is {z} and {x}")
z=[]
y=["hlo","hi"]
x=["vvv","ffff"]
z.append(y)
z.append(x)
print(*(z))
| joyaldevassy14/python | bascis/list.py | list.py | py | 393 | python | en | code | 0 | github-code | 13 |
15469383031 | import copy
import datetime
import os
import re
import sys
import time
#--------------------------------------------------------------------------------
# Local
from jobsuite import *
from pathlib import Path
keyword_command = [ "nodes", "ppn", "suite", ]
keyword_reserved = [ "system", "modules",
# slurm variables
"account", "queue", "date", "time", "user",
]
def read_batch_template(filename):
"""
Read in Slurm batch submit template and return as a string.
"""
return open(filename, "r").read()
def DefaultModules():
return "intel/18.0.2"
def wait_for_jobs( jobs ):
while True:
running = []; pending = []
for j in jobs:
id = j.jobid
status = j.get_status()
if status=="R":
running.append(id)
elif status=="PD":
pending.append(id)
print(f"Running: {running} Pending: {pending}")
if len(running)+len(pending)==0:
break
time.sleep(1)
def get_suite_name(options,values):
if "name" in options.keys():
return options["name"]
else:
for kv in values:
if re.match("name:",kv):
k,v = kv.split(":")
return v
return "testsuite"
def print_configuration(confdict):
print("""
################ Configuration ################
Running as:
################################
""".format(str(confdict)))
def test_job():
print("""================
Test job in main
================""")
job = Job(script="/bin/true")
id = job.submit()
print("""Job script
================
{}
================
submitted as {}""".format(str(job),id))
class Configuration():
def __init__(self,**kwargs):
self.configuration = {}
for key,val in kwargs.items():
self.configuration[key] = val
jobname = self.configuration["jobname"]
self.configuration["modules"] = "default"
self.configuration["time"] = "0:37:0"
try :
self.configuration["system"] = os.environ["TACC_SYSTEM"]
except:
self.configuration["system"] = None
try :
self.configuration["mpi"] = os.environ["LMOD_FAMILY_MPI"]
except:
self.configuration["mpi"] = "mpich"
self.configuration["pwd"] = os.getcwd()
def parse(self,filename,**kwargs):
for k in [ "suites","sbatch","env" ]:
self.configuration[k] = []
queue = None
with open(filename,"r") as configuration:
for specline in configuration:
specline = specline.strip()
#
# skip comments
if re.match("#",specline) or re.match(r'^[ \t]*$',specline):
continue
#
# otherwise interpret as key/value
#
key,value = specline.split(" ",1)
# special case: system
if key=="system":
if value!=self.configuration["system"]:
print(f"This configuration can only be run on <<{value}>>")
sys.exit(1)
# substitute any macros
value = macros_substitute( value,self.configuration )
# special case: jobname can be set only once
if key=="jobname" and jobname != "spawn":
raise Exception(f"Job name can be set only once, current: {jobname}")
# special case: queue
elif key=="queue":
queue = value; nam_lim = value.split(); qname = nam_lim[0]; qlimit = 1
if len(nam_lim)>1:
qlimit = nam_lim[1]
if re.match("limit",qlimit):
qlimit = qlimit.split(":")[1]
Queues().add_queue( qname,qlimit )
self.configuration[key] = qname
# special case: output dir needs to be set immediately
elif key=="outputdir":
raise Exception("outputdir key deprecated")
# special case: `sbatch' and `env' lines are appended
elif key in ["sbatch","env"]:
self.configuration[key].append(value)
#
# suite or macro
#
elif key=="suite":
# now parse
fields = value.split(" ")
suitespec = [ macros_substitute(f,self.configuration) for f in fields ]
n = get_suite_name(self.configuration,suitespec)
s = TestSuite( suitespec, copy.copy(self.configuration) )
self.configuration["suites"].append(s)
else:
self.configuration[key] = value
def run(self):
for s in self.configuration["suites"]:
s.run(debug=self.configuration["debug"],
submit=self.configuration["submit"],
testing=self.configuration["testing"])
if __name__ == "__main__":
if sys.version_info[0]<3:
print("Please move to python3"); sys.exit(1)
if sys.version_info[1]<8:
print("This requires at least python 3.8"); sys.exit(1)
args = sys.argv[1:]
testing = False; debug = False; submit = True
jobname = "spawn"; outputdir = None; comparedir = None
rootdir = os.getcwd()
while re.match("^-",args[0]):
if args[0]=="-h":
print("Usage: python3 batch.py [ -h ] [ -d --debug ] [ -f --filesonly ] [ -t --test ] [ -n name ] [ -r --regression dir ] [ -o --output dir ] [ -c --compare dir ]")
sys.exit(0)
elif args[0] == "-n":
args = args[1:]; jobname = args[0]
elif args[0] in [ "-f", "--filesonly" ] :
submit = False; testing = False
elif args[0] in [ "-o", "--outputdir" ] :
args = args[1:]; outputdir = args[0]
elif args[0] in [ "-r", "--regression" ] :
args = args[1:]; outputdir = args[0]
testing = True; submit = False
elif args[0] in [ "-c", "--compare" ] :
args = args[1:]; comparedir = args[0]
if not os.path.exists(comparedir):
raise Exception(f"Compare directory <<{comparedir}>> does not exist")
elif args[0] in [ "-t", "--test" ]:
testing = True; submit = False
elif args[0] in [ "-d", "--debug" ]:
debug = True
SpawnFiles().debug = True
args = args[1:]
now = datetime.datetime.now()
starttime = f"{now.year}{now.month}{now.day}-{now.hour}.{now.minute}"
print(f"Output dir: {outputdir}")
if not outputdir:
outputdir = f"spawn_output_{starttime}"
SpawnFiles().setoutputdir(outputdir)
SpawnFiles().open_new(f"logfile-{jobname}-{starttime}",key="logfile")
configuration = Configuration\
(jobname=jobname,date=starttime,debug=debug,submit=submit,testing=testing,
outputdir=outputdir,comparedir=comparedir)
queues = Queues()
queues.testing = testing
if os.path.exists(".spawnrc"):
configuration.parse(".spawnrc")
else:
globalrc = f"{Path.home()}/.spawnrc"
if os.path.exists( globalrc ):
configuration.parse(globalrc)
configuration.parse(args[0])
# now activate all the suites
configuration.run()
# close all files
SpawnFiles().__del__()
| TACC/demonspawn | spawn.py | spawn.py | py | 6,750 | python | en | code | 12 | github-code | 13 |
37948066748 | class mass(object):
"""! Particle masses."""
## electron mass
e = 0.00051
## muon mass
mu = 0.1057
## tau mass
tau = 1.777
## down quark mass
d = 0.32
## up quark mass
u = 0.32
## strange quark mass
s = 0.5
## charm quark mass
c = 1.55
## bottom quark mass
b = 4.95
## top quark mass
t = 172.5
## W boson mass
W = 80.399
## Z boson mass
Z = 91.1876
## Higgs boson mass
H = 125.0
class width(object):
"""! Particle widths."""
## top quark width
t = 1.32
## W boson width
W = 2.085
## Z boson width
Z = 2.4952
## Higgs boson width
H = 0.00407
class branching_ratio(object):
"""! Particle branching ratios."""
## BR W -> ev
W_to_enu = 0.1082
## BR W -> lv (sum of e, mu, tau)
W_to_leptons = 3 * W_to_enu
## BR W -> qq
W_to_hadrons = 1.0 - W_to_leptons
## BR t -> Wb
t_to_Wb = 1.0
## BR t -> Ws
t_to_Ws = 0.0
## BR t -> Wd
t_to_Wd = 0.0
class CKM(object):
"""! CKM matrix."""
## CKM Vud
Vud = 0.97428
## CKM Vus
Vus = 0.2253
## CKM Vub
Vub = 0.00347
## CKM Vcd
Vcd = 0.2252
## CKM Vcs
Vcs = 0.97345
## CKM Vcb
Vcb = 0.041
## CKM Vtd
Vtd = 0.00862
## CKM Vts
Vts = 0.0403
## CKM Vtb
Vtb = 0.999152
## EM coupling
alphaem_0 = 0.00729735252 # 1/137.036
## EM coupling
alphaem = 0.00781653039 # 1/127.934
## Strong coupling
alphaqcd = 0.1185
## Fermi constant
G_F = 0.00001166397
## sin(theta_W) ^ 2
sin2thetaW = 0.23113
## sin(theta_Cabibbo) ^ 2
sin2cabibbo = 0.051
| rushioda/PIXELVALID_athena | athena/Generators/PowhegControl/python/parameters/atlas_common.py | atlas_common.py | py | 1,669 | python | en | code | 1 | github-code | 13 |
24938877053 | """
5) add the choice between a nominal and an effective interest rate
"""
from mortality import *
class Annuity:
def __init__(
self,
table = 'test',
gender = 'female',
interest_rate = 0.03,
interest_compounding = 12,
):
self.table = mortality_tables[table]
qx = mortality_tables[table][gender]
self.px = [1-x for x in qx]
v = (1+interest_rate/interest_compounding)**(1/interest_compounding)
self.v = 1/v
def value(
self,
age_start = 1,
annuity_duration = 5,
):
n = annuity_duration+1
vk = [1]*n
kpx = [1]*n
age_start_qx = self.table.age_start
offset = age_start - age_start_qx
for i in range(1, n):
vk[i] = self.v**i
kpx[i] = kpx[i-1]*self.px[offset+i-1]
ax = 0
for i in range(n):
ax += vk[i]*kpx[i]
return ax
# running
ax = Annuity()
r = ax.value()
print('annuity with compounded interest says', r) | nicolasessisbreton/pyzehe | b_basic/sol_5.py | sol_5.py | py | 865 | python | en | code | 3 | github-code | 13 |
17630712060 | from ecom import settings
from django.urls import path
from .views import (
homepage_view,
category_view,
category_gender_view,
category_gender_subcategory_view,
brand_view,
)
from core.models import (
Header,
SubGroup,
GroupItem,
Carousel
)
from products.models import (
Category,
Subcategory,
Brand,
Size,
Product,
Gender,
)
urlpatterns = [
path(
'',
homepage_view,
name='homepage_view',
),
path(
'A/<str:category>',
category_view,
name='category_view',
),
path(
'B/<str:brand>',
brand_view,
name='brand_view',
),
path(
'C/<str:category>/<str:gender>',
category_gender_view,
name='category_gender_view',
),
path(
'D/<str:category>/<str:gender>/<str:subcategory>',
category_gender_subcategory_view,
name='category_gender_subcategory_view',
)
]
| AarushJuneja/Reppin | ecom/core/urls.py | urls.py | py | 980 | python | en | code | 1 | github-code | 13 |
19270049255 | from django.http import HttpResponse
from django.shortcuts import redirect
from django.views import View
from django.db import transaction
from django.core.mail import send_mail
from django.core.mail import get_connection
from rest_framework.views import APIView
from rest_framework.renderers import TemplateHTMLRenderer
from rest_framework.response import Response
from core.models import Orgs
from core.models import User
from core.models import UserInfo
from core.models import Employee
from core.models import OrgPosition
from core.models import CountNumber
from core.models import PropertyType
from core.models import EducationalInstitutionCategory
from .help.roles import default_roles
from .serializer import OrgsSerializer
from .serializer import OrgJsonSerializer
from .serializer import EmployeeSerializer
from .serializer import UserSaveSerializer
from .serializer import UserFirstRegisterSerializer
from .serializer import EmailOrgSerializer
from worker.serializer import EmployeeMigrationsSerializer
from main.decorators import has_permission
from main.decorators import login_required
# Create your views here.
class HomeApiView(View):
def get(self, request):
return HttpResponse("dsadsa")
class OrgRegisterJsonAPIView(APIView):
@login_required()
def get(self, request):
''' Энэ нь orgs jstree-гийн утгыг авах функц
'''
# бүх утгыг баазаас авна
filters = {}
if "org" in request.org_filter and not request.user.is_superuser:
filters['id'] = request.org_filter.get("org").id
orgsData = Orgs.objects.filter(**filters).order_by("created_at")
serialized_data = OrgJsonSerializer(orgsData, many=True).data
if request.user.is_superuser:
if orgsData.count() < 1:
serialized_data.append(
{
"text": "Шинээр үүсгэх",
"a_attr": {
"href": "/org/org-register/"
},
"icon": "fa fa-folder-plus"
}
)
# json файл буцаана
return Response(serialized_data)
class OrgAPIView(APIView):
''' Хамгийн том Байгууллага crud үйлдэл
'''
serializer_class = OrgsSerializer
queryset = Orgs.objects
crud_serializer = OrgJsonSerializer
renderer_classes = [TemplateHTMLRenderer]
template_name = 'pages/org/index.html'
@login_required()
@has_permission(must_permissions=['org-read'])
def get(self, request, pk=None):
property_type = PropertyType.objects.all().values('name', 'id')
educational_institution_category = EducationalInstitutionCategory.objects.all().values('name', 'id')
if pk:
self.queryset = self.queryset.get(pk=pk)
serializer = self.serializer_class(instance=self.queryset, many=False)
return Response({ 'serializer': serializer, "pk": pk, 'user_serializer': UserFirstRegisterSerializer, 'property_type': property_type, 'educational_institution_category': educational_institution_category })
# Хэрвээ ядаж нэг байгууллага байвал тийшээгээ үсэргэнэ
orgs_qs = Orgs.objects.filter()
if orgs_qs.exists():
return redirect('org-register', pk=orgs_qs.last().id)
serializer = self.serializer_class()
return Response({ 'serializer': serializer, 'user_serializer': UserFirstRegisterSerializer, 'property_type': property_type, 'educational_institution_category': educational_institution_category })
#post
@login_required()
@has_permission(allowed_permissions=['org-create', 'org-update'])
@transaction.atomic
def post(self, request, pk=None):
""" шинээр үүсгэх нь:
- ``name`` - нэр
"""
if pk:
instance = self.queryset.get(pk=pk)
serializer = self.serializer_class(instance=instance, data=request.data)
if not request.data['logo']:
request.data._mutable = True
request.data.pop('logo')
request.data._mutable = False
# if not request.data['todorkhoilolt_signature']:
# request.data._mutable = True
# request.data.pop('todorkhoilolt_signature')
# request.data._mutable = False
# if not request.data['todorkhoilolt_tamga']:
# request.data._mutable = True
# request.data.pop('todorkhoilolt_tamga')
# request.data._mutable = False
if not serializer.is_valid():
request.send_message('error', 'ERR_001')
return Response({ 'serializer': serializer, 'pk': pk })
try:
request.data['logo']
if instance.logo:
instance.logo.delete()
except:
None
serializer.save()
else:
with transaction.atomic():
sid = transaction.savepoint() # transaction savepoint зарлах нь хэрэв алдаа гарвад roll back хийнэ
employee_body = dict() # Ажилтны мэдээллийг хадгалах хувьсагч
user_body = dict() # Хэрэглэгчийн мэдээллийг хадгалах хувьсагч
#Байгуулга үүгэх
serializer = self.serializer_class(data=request.data)
if not serializer.is_valid():
request.send_message('error', 'ERR_001')
transaction.savepoint_rollback(sid)
return Response({ 'serializer': serializer, 'data': request.data , 'user_serializer': UserFirstRegisterSerializer })
org = serializer.save()
# Байгуулгын хүний нөөцийн ажилтан эрх
for role_info in default_roles:
org_position = self.serializer_class.create_defualt_role(role_info['name'], role_info['description'], role_info['permissions'], org.id)
if org_position is False:
transaction.savepoint_rollback(sid)
request.send_message('error', 'ERR_004')
user_body = {
'password': request.data['phone_number'],
'email': request.data['email'],
'phone_number': request.data['phone_number'],
'mail_verified': True,
}
#Байгуулгын хүний нөөцийн ажилтаны account үүсгэх
user_serializer = UserSaveSerializer(data=user_body)
if not user_serializer.is_valid():
request.send_message('error', 'ERR_012')
user_serializer = UserFirstRegisterSerializer(data=request.data)
user_serializer.is_valid()
transaction.savepoint_rollback(sid)
return Response({ 'serializer': serializer, 'data': request.data, 'user_serializer': user_serializer})
user = user_serializer.save()
# хоосон userinfo үүсгэх
if user:
UserInfo.objects.create(user=user, first_name=user.email, action_status=UserInfo.APPROVED, action_status_type=UserInfo.ACTION_TYPE_ALL)
# Шинээр үүссэн байгуулгат хэрэглэгчийн ажилтны мэдээллийг үүсгэх
org_position = OrgPosition.objects.filter(org_id=org.id, is_hr=True).values('id').first()
count_number = CountNumber.objects.filter(name='time_register_employee').last()
time_register_id_count = count_number.count
employee_body = {
'org_position': org_position['id'],
'org': org.id,
'user': user.id,
'time_register_employee': time_register_id_count
}
employee_serializer = EmployeeSerializer(data=employee_body)
if not employee_serializer.is_valid():
request.send_message('error', 'ERR_012')
user_serializer = UserFirstRegisterSerializer(data=request.data)
user_serializer.is_valid()
transaction.savepoint_rollback(sid)
return Response({ 'serializer': serializer, 'data': request.data, 'user_serializer': user_serializer})
employee = employee_serializer.save()
# Ажилтны албан тушаалын шилжилтийн мэдээллийг хадгалах нь
EmployeeMigrationsSerializer.create_from_employee(employee, None, None)
# Ажилтнаа нэмсний дараа ажилчны тоог нэмнэ
time_register_id_count = time_register_id_count + 1
count_number.count = time_register_id_count
count_number.save()
request.send_message('success', 'INF_015')
return redirect('org-register')
class OrgDelete(
APIView
):
@login_required()
@has_permission(must_permissions=['org-delete'])
def get(self, request, pk):
Orgs.objects.filter(pk=pk).delete()
request.send_message("success", 'INF_003')
return redirect("org-register")
class SuperUserChangeOrg(APIView):
@login_required()
@has_permission(must_permissions=['salbar-delete'])
def get(self, request, pk):
if request.user.is_superuser:
Employee.objects.update_or_create(
user=request.user,
defaults={
'org_id': pk,
'sub_org': None,
"salbar": None
}
)
request.send_message("success", 'INF_002')
return redirect("org-register")
class UserRegisterOrg(APIView):
@login_required()
@has_permission(allowed_permissions=['org-update'])
@transaction.atomic
def post(self, request, pk):
if request.user.is_superuser:
sid = transaction.savepoint() # transaction savepoint зарлах нь хэрэв алдаа гарвад roll back хийнэ
employee_body = dict() # Ажилтны мэдээллийг хадгалах хувьсагч
user_body = dict() # Хэрэглэгчийн мэдээллийг хадгалах хувьсагч
user = User.objects.filter(email=request.data['email']).first()
if user:
raise request.send_error("ERR_002", "и-мэйл")
# Байгуулгын хүний нөөцийн ажилтан эрх
org_position = OrgPosition.objects.filter(org_id=pk, is_hr=True).values('id').first()
#Хэрэглэгч үүсгэх
user_body = {
'password': request.data['phone_number'],
'email': request.data['email'],
'phone_number': request.data['phone_number'],
'mail_verified': True,
}
user_serializer = UserSaveSerializer(data=user_body)
if not user_serializer.is_valid():
transaction.savepoint_rollback(sid)
raise request.send_error("ERR_001")
user = user_serializer.save()
# хоосон userinfo үүсгэх
if user:
UserInfo.objects.create(user=user, first_name=user.email, action_status=UserInfo.APPROVED, action_status_type=UserInfo.ACTION_TYPE_ALL)
count_number = CountNumber.objects.filter(name='time_register_employee').last()
time_register_id_count = count_number.count
# Шинээр үүссэн байгуулгат хэрэглэгчийн ажилтны мэдээллийг үүсгэх
employee_body = {
'org_position': org_position['id'],
'org': pk,
'user': user.id,
'time_register_employee': time_register_id_count,
}
employee_serializer = EmployeeSerializer(data=employee_body)
if not employee_serializer.is_valid():
transaction.savepoint_rollback(sid)
raise request.send_error("ERR_001")
employee = employee_serializer.save()
# Ажилтны албан тушаалын шилжилтийн мэдээллийг хадгалах нь
EmployeeMigrationsSerializer.create_from_employee(employee, None, None)
# Ажилтнаа нэмсний дараа ажилчны тоог нэмнэ
time_register_id_count = time_register_id_count + 1
count_number.count = time_register_id_count
count_number.save()
return request.send_info("INF_001")
class OrgSystemEmailApiView(APIView):
serializer_class = EmailOrgSerializer
queryset = Orgs.objects
def post(self, request, pk=None):
request.data._mutable = True
request.data["email_use_tls"] = request.data.get("email_use_tls") == "on"
request.data._mutable = False
if pk:
instance = self.queryset.get(pk=pk)
serializer = self.serializer_class(instance=instance, data=request.data)
if not serializer.is_valid():
request.send_message('error', 'ERR_001')
return redirect("org-register", pk=pk)
serializer.save()
return redirect("org-register", pk=pk)
else:
request.send_message('error', 'ERR_022')
return redirect("org-register")
class CheckEmailApiView(APIView):
@login_required()
def post(self, request, pk=None):
email = request.data.get('email')
if not request.org_filter.get("org").email_host or not request.org_filter.get("org").email_password:
raise request.send_error("ERR_024")
connection = get_connection(
username=request.org_filter.get("org").email_host,
password=request.org_filter.get("org").email_password,
port=587,
host='smtp.gmail.com',
use_tls=True,
)
send_mail(
'subject',
'text',
email,
recipient_list=[email],
connection=connection,
html_message='Амжилттай илгээгдэж байна.'
)
rsp = request.send_info("INF_015")
return rsp
| FoxLlik/sus_hr | apps/org/views.py | views.py | py | 14,895 | python | en | code | 0 | github-code | 13 |
25105269793 | # -*- coding: utf-8 -*-
from algorithm import yolov3_slideWindows
from easydict import EasyDict as edict
import argparse
import os
import cv2
import sys
yolov3_path=os.path.expanduser('~/git/gnu/code/yolov3')
if yolov3_path not in sys.path:
sys.path.insert(0,yolov3_path)
from utils.utils import plot_one_box
def isObjectInArea(rule,bbox):
"""
rule=bbox={'bbox':list(xyxy),'conf':conf,'label':dog}
"""
if rule['label']!=bbox['label']:
return False,'label'
if rule['conf']>bbox['conf']:
return False,'conf'
rect1=rule['bbox']
rect2=bbox['bbox']
w=min(rect1[2],rect2[2])-max(rect1[0],rect2[0])
h=min(rect1[3],rect2[3])-max(rect1[1],rect2[1])
if w<=0 or h <=0:
return False,'iou'
else:
return True,'warning'
class Area_Detector():
def __init__(self,config):
self.config=config
opt=edict()
opt.root_path=config.root_path
opt.cfg=os.path.join(config.root_path,config.cfg)
opt.data_cfg=os.path.join(config.root_path,config.data_cfg)
opt.weights=os.path.join(config.root_path,config.weights)
opt.img_size=config.img_size
self.detector=yolov3_slideWindows(opt)
def process_frame(self,frame):
image,bboxes=self.detector.process_slide(frame)
return image,bboxes
def process_video(self,video_name):
cap=cv2.VideoCapture(video_name)
if not cap.isOpened():
assert False,'cannot open video {}'.format(video_name)
COLOR_AREA=(0,0,255)
COLOR_ALARM=(0,0,255)
COLOR_NORMAL=(0,255,0)
# config for writer video
save_video_name='{}_{}'.format(self.config.label,os.path.basename(video_name))
codec = cv2.VideoWriter_fourcc(*"mp4v")
fps=30
writer=None
rule={'bbox':self.config.bbox,
'conf':self.config.conf,
'label':self.config.label}
while True:
ret,frame=cap.read()
if ret:
if self.config.slide_window:
image,bboxes=self.detector.process_slide(frame)
else:
image,bboxes=self.detector.process(frame)
bboxes=[bbox for bbox in bboxes if bbox['label']==self.config.label]
for bbox in bboxes:
flag,reason=isObjectInArea(rule,bbox)
if flag:
color=COLOR_ALARM
else:
color=COLOR_NORMAL
plot_one_box(bbox['bbox'], frame, label=bbox['label']+' %s %0.2f'%(reason,bbox['conf']), color=color)
frame=cv2.rectangle(img=frame, pt1=tuple(rule['bbox'][0:2]), pt2=tuple(rule['bbox'][2:4]), color=COLOR_AREA, thickness=2)
cv2.imshow(self.config.label,frame)
key=cv2.waitKey(30)
if key==ord('q'):
break
if writer is None:
height,width,_=frame.shape
writer=cv2.VideoWriter(save_video_name,
codec, fps,
(width, height))
writer.write(frame)
else:
break
if writer is not None:
writer.release()
cap.release()
class Config(edict):
def __init__(self):
super().__init__()
self.cfg=''
self.data_cfg=''
self.weights=''
self.root_path=''
self.img_size=416
self.label='car'
self.conf=0.2
self.bbox=[20,20,100,100]
self.video_name='test.mp4'
self.slide_window=True
self.get_parser()
def get_parser(self):
parser = argparse.ArgumentParser()
parser.add_argument('--root_path',type=str,default=os.path.expanduser('~/git/gnu/code/yolov3'),help='config root path for cfg, data_cfg and weights')
parser.add_argument('--cfg', type=str, default='cfg/yolov3.cfg', help='cfg file path')
parser.add_argument('--data_cfg', type=str, default='data/coco.data', help='coco.data file path')
parser.add_argument('--weights', type=str, default='weights/yolov3.weights', help='path to weights file')
parser.add_argument('--label',default='car',choices=['car','person'],help='the class to detect')
parser.add_argument('--conf',type=float,default=0.2,help='conf threshold for object')
parser.add_argument('--bbox',type=int,nargs=4,default=[20,20,100,100],help='x1,y1,x2,y2 for bbox')
parser.add_argument('--slide_window',default=False,action='store_true',help='use slide window technolegy or not')
parser.add_argument('--video_name',required=True,help='input video name')
args = parser.parse_args()
sort_keys=sorted(list(self.keys()))
#TODO remove function key words
for key in sort_keys:
if hasattr(args,key):
print('{} = {} (default: {})'.format(key,args.__dict__[key],self[key]))
self[key]=args.__dict__[key]
else:
if callable(self[key]):
pass
else:
print('{} : (default:{})'.format(key,self[key]))
for key in args.__dict__.keys():
if key not in self.keys():
print('{} : unused keys {}'.format(key,args.__dict__[key]))
if __name__ == '__main__':
config=Config()
detector=Area_Detector(config)
detector.process_video(config.video_name) | ISCAS007/demo | areadetection/main.py | main.py | py | 5,517 | python | en | code | 0 | github-code | 13 |
14278103086 | import bpy
from mathutils import *
bl_info = {
"name": "Tila : Empty Mesh",
"author": "Tilapiatsu",
"version": (1, 0, 0, 0),
"blender": (2, 80, 0),
"location": "View3D",
"category": "Mesh",
}
class TILA_EmptyMeshOperator(bpy.types.Operator):
bl_idname = "object.tila_emptymesh"
bl_label = "TILA: Empty Mesh"
bl_options = {'REGISTER', 'UNDO'}
emptymesh_name = 'tila_emptymesh'
def execute(self, context):
currentActiveObject = bpy.context.active_object
if currentActiveObject:
currentMode = currentActiveObject.mode
else:
currentMode = "OBJECT"
currentSelection = bpy.context.selected_objects
if currentMode == "EDIT":
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.mesh.primitive_plane_add(align='WORLD', enter_editmode=False, location=(0.0, 0.0, 0.0))
bpy.context.object.data.name = self.emptymesh_name
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.mesh.delete(type='FACE')
bpy.ops.object.mode_set(mode='OBJECT')
bpy.context.selected_objects[0].name = "EmptyMesh"
if currentMode == "EDIT":
bpy.ops.object.mode_set(mode='EDIT')
return {'FINISHED'}
addon_keymaps = []
classes = (TILA_EmptyMeshOperator,)
def register():
for c in classes:
bpy.utils.register_class(c)
def unregister():
for c in classes:
bpy.utils.unregister_class(c)
if __name__ == "__main__":
register()
| Tilapiatsu/blender-custom_config | scripts/startup/tila_OP_EmptyMesh.py | tila_OP_EmptyMesh.py | py | 1,541 | python | en | code | 5 | github-code | 13 |
17055504064 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.MMemberLevel import MMemberLevel
class MPromoConstraint(object):
def __init__(self):
self._crowd_type = None
self._member_levels = None
self._need_crowd_flag = None
self._sub_dimension = None
self._sub_win_count = None
self._suit_shop_ids = None
self._total_win_count = None
@property
def crowd_type(self):
return self._crowd_type
@crowd_type.setter
def crowd_type(self, value):
self._crowd_type = value
@property
def member_levels(self):
return self._member_levels
@member_levels.setter
def member_levels(self, value):
if isinstance(value, list):
self._member_levels = list()
for i in value:
if isinstance(i, MMemberLevel):
self._member_levels.append(i)
else:
self._member_levels.append(MMemberLevel.from_alipay_dict(i))
@property
def need_crowd_flag(self):
return self._need_crowd_flag
@need_crowd_flag.setter
def need_crowd_flag(self, value):
self._need_crowd_flag = value
@property
def sub_dimension(self):
return self._sub_dimension
@sub_dimension.setter
def sub_dimension(self, value):
self._sub_dimension = value
@property
def sub_win_count(self):
return self._sub_win_count
@sub_win_count.setter
def sub_win_count(self, value):
self._sub_win_count = value
@property
def suit_shop_ids(self):
return self._suit_shop_ids
@suit_shop_ids.setter
def suit_shop_ids(self, value):
if isinstance(value, list):
self._suit_shop_ids = list()
for i in value:
self._suit_shop_ids.append(i)
@property
def total_win_count(self):
return self._total_win_count
@total_win_count.setter
def total_win_count(self, value):
self._total_win_count = value
def to_alipay_dict(self):
params = dict()
if self.crowd_type:
if hasattr(self.crowd_type, 'to_alipay_dict'):
params['crowd_type'] = self.crowd_type.to_alipay_dict()
else:
params['crowd_type'] = self.crowd_type
if self.member_levels:
if isinstance(self.member_levels, list):
for i in range(0, len(self.member_levels)):
element = self.member_levels[i]
if hasattr(element, 'to_alipay_dict'):
self.member_levels[i] = element.to_alipay_dict()
if hasattr(self.member_levels, 'to_alipay_dict'):
params['member_levels'] = self.member_levels.to_alipay_dict()
else:
params['member_levels'] = self.member_levels
if self.need_crowd_flag:
if hasattr(self.need_crowd_flag, 'to_alipay_dict'):
params['need_crowd_flag'] = self.need_crowd_flag.to_alipay_dict()
else:
params['need_crowd_flag'] = self.need_crowd_flag
if self.sub_dimension:
if hasattr(self.sub_dimension, 'to_alipay_dict'):
params['sub_dimension'] = self.sub_dimension.to_alipay_dict()
else:
params['sub_dimension'] = self.sub_dimension
if self.sub_win_count:
if hasattr(self.sub_win_count, 'to_alipay_dict'):
params['sub_win_count'] = self.sub_win_count.to_alipay_dict()
else:
params['sub_win_count'] = self.sub_win_count
if self.suit_shop_ids:
if isinstance(self.suit_shop_ids, list):
for i in range(0, len(self.suit_shop_ids)):
element = self.suit_shop_ids[i]
if hasattr(element, 'to_alipay_dict'):
self.suit_shop_ids[i] = element.to_alipay_dict()
if hasattr(self.suit_shop_ids, 'to_alipay_dict'):
params['suit_shop_ids'] = self.suit_shop_ids.to_alipay_dict()
else:
params['suit_shop_ids'] = self.suit_shop_ids
if self.total_win_count:
if hasattr(self.total_win_count, 'to_alipay_dict'):
params['total_win_count'] = self.total_win_count.to_alipay_dict()
else:
params['total_win_count'] = self.total_win_count
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = MPromoConstraint()
if 'crowd_type' in d:
o.crowd_type = d['crowd_type']
if 'member_levels' in d:
o.member_levels = d['member_levels']
if 'need_crowd_flag' in d:
o.need_crowd_flag = d['need_crowd_flag']
if 'sub_dimension' in d:
o.sub_dimension = d['sub_dimension']
if 'sub_win_count' in d:
o.sub_win_count = d['sub_win_count']
if 'suit_shop_ids' in d:
o.suit_shop_ids = d['suit_shop_ids']
if 'total_win_count' in d:
o.total_win_count = d['total_win_count']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/MPromoConstraint.py | MPromoConstraint.py | py | 5,252 | python | en | code | 241 | github-code | 13 |
20809205379 | from crypto_tulips.dal.objects.block import Block
from crypto_tulips.dal.objects.transaction import Transaction
from crypto_tulips.dal.objects.pos_transaction import PosTransaction
from crypto_tulips.hashing.crypt_hashing_wif import EcdsaHashing
import time
import json
class GenesisBlockService():
@staticmethod
def generate_from_priv(private_key):
time_now = 1520135639.4713802
public = EcdsaHashing.recover_public_key_str(private_key)
transactions = [
Transaction('', '', public, '', 1, 1, time_now),
Transaction('', '', public, '', 10, 1, time_now),
Transaction('', '', public, '', 100, 1, time_now),
Transaction('', '', public, '', 1000, 1, time_now),
]
for tranaction in transactions:
tranaction.update_signature(private_key)
tranaction.update_hash()
pos_transactions = [
PosTransaction('', '',public, 100, 1, time_now)
]
for pos_transaction in pos_transactions:
pos_transaction.update_signature(private_key)
pos_transaction.update_hash()
block = Block('', '', public, '', 0, transactions, pos_transactions, [], [], [], [], time_now)
block.update_signature(private_key)
block.update_hash()
return block
@staticmethod
def generate_from_file(filename = 'priv_rsa'):
with open('crypto_tulips/config/'+filename, 'r') as myfile:
private_key = myfile.read()
block = GenesisBlockService.generate_from_priv(private_key)
return block
| StevenJohnston/py-crypto-tulips | crypto_tulips/services/genesis_block_service.py | genesis_block_service.py | py | 1,595 | python | en | code | 1 | github-code | 13 |
72725038739 | import sys
import requests
import math
import re
#import matplotlib.pyplot as plt
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
from bs4 import BeautifulSoup
from qtpy import QtWidgets
from PyQt5.QtCore import Qt
from ui.mainwindow import Ui_MainWindow
app = QtWidgets.QApplication(sys.argv)
url = "https://www.ariva.de"
number = 6
class Canvas(FigureCanvas):
def __init__(self, parent=None, width=5, height=5, dpi=200):
fig = Figure(figsize=(width, height), dpi=dpi)
self.axes = fig.add_subplot(111)
FigureCanvas.__init__(self, fig)
self.setParent(parent)
self.plotter()
def plotter(self):
x = [50, 30, 40]
labels = ["Apples", "Bananas", "Melons"]
ax = self.figure.add_subplot(111)
ax.pie(x, labels=labels)
class MainWindow(QtWidgets.QMainWindow):
def __init__(self, parent=None, list_of_companies=[], the_company=[], sorted_matrice=[], input_urls=[], row=0,
modnumber=0, chosen="", financials="", value="", complete_financials=[]):
super().__init__(parent)
self.list_of_companies = list_of_companies
self.the_company = the_company
self.sorted_matrice = sorted_matrice
self.row = row
self.chosen = chosen
self.input_urls = input_urls
self.financials = financials
self.value = value
self.modnumber = modnumber
self.complete_financials = complete_financials
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.MYUI()
self.ui.pushButton.clicked.connect(self.disp_input)
self.ui.pushButton2.clicked.connect(self.clickedStock)
self.ui.error_info.hide()
self.ui.error_info_2.hide()
self.ui.error_info_3.hide()
self.ui.tableWidget.hide()
self.ui.pushButton2.hide()
self.ui.restart.hide()
def disp_input(self):
if self.ui.lineEdit.text() == "":
pass
else:
self.returnInputResults()
self.ui.tableWidget.show()
self.ui.pushButton2.show()
self.ui.pushButton.hide()
self.ui.restart.hide()
self.newRow()
return
def MYUI(self):
canvas = Canvas(self, width=8, height=4)
canvas.move(0, 0)
# button = QPushButton("click me", self)
# button.move(100, 450)
# button2 = QPushButton("click me next", self)
# button2.move(250, 450)
def keyPressEvent(self, event):
if event.key() == Qt.Key_Enter or event.key() == Qt.Key_Return:
self.disp_input
def clickedStock(self):
if self.ui.tableWidget.currentItem() == None:
self.ui.error_info_2.show()
return
else:
if not self.ui.error_info_2.isHidden():
self.ui.error_info_2.hide()
# get the clicked company
self.chosen = self.ui.tableWidget.currentItem().text()
self.row = self.ui.tableWidget.currentItem().row()
# erase the TableWidget and set a new title
while self.ui.tableWidget.rowCount() > 0:
self.ui.tableWidget.removeRow(0)
self.ui.tableWidget.setHorizontalHeaderItem(0, QtWidgets.QTableWidgetItem("Indicators"))
self.goToStockPage()
#self.getSortedMatrice(url)
return
def newRow(self):
for i in range(0, len(self.list_of_companies)):
rows = self.ui.tableWidget.rowCount()
self.ui.tableWidget.insertRow(rows)
self.ui.tableWidget.setItem(rows, 0, QtWidgets.QTableWidgetItem(self.list_of_companies[i]))
return
def returnInputResults(self):
newurl = url + "/search/search.m?searchname=" + self.ui.lineEdit.text()
print(newurl)
# build self.ui.error_info.show() into the return input result function
r = requests.get(newurl)
doc = BeautifulSoup(r.text, "html.parser")
for i in doc.select("tbody a"):
self.list_of_companies.append(i.text)
self.input_urls.append(i.attrs["href"])
#print(self.list_of_companies)
return
def goToStockPage(self):
self.ui.pushButton2.hide()
newurl = url + self.input_urls[self.row]
print(newurl)
r = requests.get(newurl)
doc = BeautifulSoup(r.text, "html.parser")
for i in doc.select("#pageSnapshotHeader #reiter_Profilseite li a"):
if i.text == "Kennzahlen":
print("Es gibt Kennzahlen zu diesem Unternehmen")
self.financials = i.attrs["href"]
self.goToFinancials()
return
if self.financial == "":
self.ui.error_info_3.show()
return
def goToFinancials(self):
newurl2 = url + self.financials
print(newurl2)
r = requests.get(newurl2)
doc = BeautifulSoup(r.text, "html.parser")
tempmax = 0
for i in doc.select("#pageFundamental select option"):
if int(i.attrs["value"]) > tempmax:
tempmax = int(i.attrs["value"])
self.value = i.attrs["value"]
self.modnumber = math.floor(tempmax / number)
# crawle alle Werte der letzten self.value Jahre
for i in range(0, self.modnumber + 2):
if i == 0:
store_temp1 = self.crawl(doc, i)
elif i == self.modnumber + 1:
store_temp4 = self.crawl(doc, i)
elif i == 1:
store_temp2 = self.crawl(doc, i)
elif i == 2:
store_temp3 = self.crawl(doc, i)
self.complete_financials = store_temp4
for i in range(0, len(self.complete_financials)):
if self.modnumber == 2:
for j in range(0, 6):
self.complete_financials[i].append(store_temp3[i][j])
self.complete_financials[i].append(store_temp2[i][j])
if j != 5:
self.complete_financials[i].append(store_temp1[i][j])
elif self.modnumber == 1:
for j in range(0, 6):
self.complete_financials[i].append(store_temp2[i][j])
if j != 5:
self.complete_financials[i].append(store_temp1[i][j])
else:
for j in range(0, 5):
self.complete_financials[i].append(store_temp1[i][j])
print(self.complete_financials)
return
def crawl(self, doc, i):
div1 = ".tabelleUndDiagramm.guv.new.abstand tbody tr td.right"
div2 = ".tabelleUndDiagramm.aktie.new.abstand tbody tr td.right"
div3 = ".tabelleUndDiagramm.personal.new.abstand tbody tr td.right"
div4 = ".tabelleUndDiagramm.bewertung.new.abstand tbody tr td.right"
div = [div1, div2, div3, div4]
if i == 0:
store_temp1 = []
# crawl the first page
# therefor crawl everything except the current year's column
for j in div:
fixer = 6
counter = 1
tempnumber = []
for i in doc.select(j):
if i.attrs["class"] == ["right", "year"]:
continue
else:
if counter % fixer == 0:
counter += 1
store_temp1.append(tempnumber)
tempnumber = []
continue
elif (i.text == "- " and counter % fixer != 0) or (i.text == " " and counter % fixer != 0):
tempnumber.append(0)
elif re.findall("[M]", i.text):
temp = i.text.replace(",", "")
temp = temp.replace("M", "")
temp = temp.replace(" ", "")
temp = temp + "0000"
temp = temp.replace(" ", "")
tempnumber.append(float(temp))
else:
temp = i.text.replace(".", "")
temp = temp.replace(",", ".")
tempnumber.append(float(temp))
counter += 1
return store_temp1
elif i == self.modnumber + 1:
temp_mod = int(self.value) % 6
newurl3 = url + self.financials + "?page=" + str((i - 1) * number + (temp_mod))
print(newurl3)
r = requests.get(newurl3)
temp_doc = BeautifulSoup(r.text, "html.parser")
store_temp2 = []
# crawl the first page
# therefor crawl everything except the current year's column
for j in div:
fixer = 6
counter = 1
tempnumber = []
for i in temp_doc.select(j):
if i.attrs["class"] == ["right", "year"]:
continue
elif counter > temp_mod:
pass
else:
if i.text == "- " or i.text == " ":
tempnumber.append(0)
elif re.findall("[M]", i.text):
temp = i.text.replace(",", "")
temp = temp.replace("M", "")
temp = temp.replace(" ", "")
temp = temp + "0000"
temp = temp.replace(" ", "")
tempnumber.append(float(temp))
else:
temp = i.text.replace(".", "")
temp = temp.replace(",", ".")
tempnumber.append(float(temp))
if counter % fixer == temp_mod:
store_temp2.append(tempnumber)
tempnumber = []
if counter == 6:
counter = 0
counter += 1
return store_temp2
elif i == 1 or i == 2:
newurl3 = url + self.financials + "?page=" + str(i * number)
print(newurl3)
r = requests.get(newurl3)
temp_doc = BeautifulSoup(r.text, "html.parser")
store_temp2 = []
# crawl the first page
# therefor crawl everything except the current year's column
for j in div:
fixer = 6
counter = 1
tempnumber = []
for i in temp_doc.select(j):
if i.attrs["class"] == ["right", "year"]:
continue
else:
if i.text == "- " or i.text == " ":
tempnumber.append(0)
elif re.findall("[M]", i.text):
temp = i.text.replace(",", "")
temp = temp.replace("M", "")
temp = temp.replace(" ", "")
temp = temp + "0000"
temp = temp.replace(" ", "")
tempnumber.append(float(temp))
else:
temp = i.text.replace(".", "")
temp = temp.replace(",", ".")
tempnumber.append(float(temp))
if counter % fixer == 0:
store_temp2.append(tempnumber)
tempnumber = []
counter += 1
return store_temp2
window = MainWindow()
window.show()
sys.exit(app.exec_())
| JanMarcelKezmann/Web-Crawler-with-PyQt-Widget | main.py | main.py | py | 11,900 | python | en | code | 0 | github-code | 13 |
29635856568 | import sys
import pickle
import neat
import visualize
import pygame
from game import Game
from car import Car
from car import CarAction
MAX_GENOME = None
def run_simulation(genomes, config):
# Create neural networks
nets = []
cars = []
count = 0
for id, genome in genomes:
net = neat.nn.FeedForwardNetwork.create(genome, config)
nets.append(net)
genome.fitness = 0
cars.append(Car())
count += 1
# Create game instance
game = Game(cars, genomes, "map5")
forcedStop = False
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit(0)
game.flood_fill()
remaining_cars = 0
for i, car in enumerate(cars):
if car.is_alive and not car.has_completed:
remaining_cars += 1
output = nets[i].activate(car.get_state())
car.update(output.index(max(output)))
genomes[i][1].fitness = car.get_fitness(game.total_time)
game.check_collisions(car, i)
if remaining_cars == 0:
break
game.tick()
if pygame.key.get_pressed()[pygame.K_t]:
forcedStop = True
if forcedStop:
global MAX_GENOME
MAX_GENOME = genomes[0][1]
for genome in genomes:
if genome[1].fitness > MAX_GENOME.fitness:
MAX_GENOME = genome[1]
raise Exception("Forced stop")
CONFIG_PATH = "./config.txt"
neat_config = neat.config.Config(neat.DefaultGenome, neat.DefaultReproduction, neat.DefaultSpeciesSet,
neat.DefaultStagnation, CONFIG_PATH)
population = neat.Population(neat_config)
stats = neat.StatisticsReporter()
population.add_reporter(stats)
population.add_reporter(neat.StdOutReporter(True))
winner = None
try:
winner = population.run(run_simulation, 1000)
except Exception as e:
winner = MAX_GENOME
visualize.plot_stats(stats, True, True, "fitness.png")
visualize.plot_species(stats, True, "species.png")
with open('winner', 'wb') as f:
pickle.dump(winner, f)
| lynconEBB/ai-cars | main.py | main.py | py | 2,137 | python | en | code | 0 | github-code | 13 |
26870162745 | from itertools import permutations
def pan(s):
if len(set(s))<len(s):
return False
for d in "0123456789":
if not d in s:
return False
return True
ds=[2,3,5,7,11,13,17]
def fun(n):
for i in range(1,7+1):
if int(str(n)[i:i+3])%ds[i-1]!=0:
return False
return True
pans=[int("".join(s)) for s in list(filter(lambda s:s[0]!="0",list(permutations("0123456789",r=10))))]
print(len(pans))
sum=0
for i in pans:
if fun(i):
print(i)
sum+=i
print("SUM",sum) | zydiig/PESolution | 43.py | 43.py | py | 534 | python | en | code | 0 | github-code | 13 |
35770923422 | import glob
import csv
import os
import pandas as pd
def convert(filenames, filePath):
num = 0
for filename in filenames:
# print(filename)
tweetTexts = []
# tweetTexts.append('text')
tweetIds = []
# tweetIds.append('tweet_id')
i = -1
# print(filename)
# print(filenames)
with open(filename) as f:
for line in f:
#make a list of all tweet ids
tweetId = str(line).split(', "id": ')
tweetId = tweetId[1].split(",")
tweetIds.append(str(tweetId[0]))
# print(str(tweetId[0]))
#make a list of all tweet texts
text = str(line).split(', "text": "')
text = text[1].split('", "truncated":')
tweetTexts.append(text[0])
# print(text[0])
#create a csv for the movie and write into csv
# csvFile = filename.split('/movies-allTime') #add \\ at the end of this exp if required
csvFile = filename.split('/tweets')
print(csvFile)
csvFile = csvFile[1].split(".json")
csvFilename = filePath + "csv/" + csvFile[0] + '.csv'
dirname = os.path.dirname(csvFilename)
if not os.path.exists(dirname):
os.makedirs(dirname)
raw_data = {'tweet_id': tweetIds,'text': tweetTexts}
df = pd.DataFrame(raw_data, columns=['tweet_id', 'text'])
df.to_csv(csvFilename)
# with open(csvFilename, 'rb') as csvfile:
# spamreader = csv.reader(csvfile, delimiter='|', quotechar='|')
# for row in spamreader:
# print ', '.join(row)
num += 1
print ("Converting JSONs to CSVs: "+str(num * 100 / len(filenames)) + "% complete")
#test local - only run the convert to see what it does
# filenames = glob.glob("./data/allTimeMovieTweets/movies-allTime/*.json")
# filenames = glob.glob("./data/allTimeMovieTweets/movies-allTime/query-#101dalmatians-2017-05-10-09-43-14.json")
# convert(filenames) | namrata-simha/Slug-MovieBot | DataExtractionAndPreprocess/convertJSON_CSV.py | convertJSON_CSV.py | py | 2,115 | python | en | code | 0 | github-code | 13 |
22036111695 | #
# @lc app=leetcode.cn id=695 lang=python3
#
# [695] 岛屿的最大面积
#
from typing import List
# @lc code=start
# 深度优先(递归)
# class Solution:
# def maxAreaOfIsland(self, grid: List[List[int]]) -> int:
# max_area = 0
# for i, l in enumerate(grid):
# for j, m in enumerate(l):
# max_area = max(max_area, self.dfs(grid, i, j))
# return max_area
# def dfs(self, grid, cur_i, cur_j):
# if cur_i < 0 or cur_j < 0 or cur_i == len(grid) or cur_j == len(grid[0]) or grid[cur_i][cur_j] != 1:
# return 0
# area = 1
# grid[cur_i][cur_j] = 0
# directions = [[0, 1], [1, 0], [0, -1], [-1, 0]] # 以下(x)/右(y)为正方向建立坐标系: 右, 下, 左, 上
# for direction in directions:
# next_i = cur_i+direction[0]
# next_j = cur_j+direction[1]
# if 0 <= next_i < len(grid) and 0 <= next_j < len(grid[0]):
# area += self.dfs(grid, next_i, next_j)
# return area
from collections import deque
class Solution:
def maxAreaOfIsland(self, grid: List[List[int]]) -> int:
max_area = 0
for i, l in enumerate(grid):
for j, m in enumerate(l):
cur_area = 0
stack = deque()
stack.append((i, j))
while stack:
cur_i, cur_j = stack.pop()
if cur_i < 0 or cur_j < 0 or cur_i == len(grid) or cur_j == len(grid[0]) or grid[cur_i][cur_j] != 1:
continue
cur_area += 1
grid[cur_i][cur_j] = 0
directions = [[0, 1], [1, 0], [0, -1], [-1, 0]] # 以下(x)/右(y)为正方向建立坐标系: 右, 下, 左, 上
for direction in directions:
next_i = cur_i+direction[0]
next_j = cur_j+direction[1]
stack.append((next_i, next_j))
max_area = max(max_area, cur_area)
return max_area
# @lc code=end
def test():
assert Solution().maxAreaOfIsland(
[[0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0],
[0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0]]) == 6
| revang/leetcode | 695.岛屿的最大面积.py | 695.岛屿的最大面积.py | py | 2,551 | python | en | code | 0 | github-code | 13 |
13713835332 | # -*- coding: utf-8 -*-
from PIL import Image
from chess_board import WHITE
def draw_choose_mask(color):
path = ''
name_queen = '_queen.png'
name_rook = '_rook.png'
name_bishop = '_bishop.png'
name_knight = '_knight.png'
if color == WHITE:
name_color = 'white'
else:
name_color = 'black'
im_queen = Image.open(path + name_color + name_queen)
im_rook = Image.open(path + name_color + name_rook)
im_bishop = Image.open(path + name_color + name_bishop)
im_knight = Image.open(path + name_color + name_knight)
image = Image.new('RGBA', (750, 800), (192, 192, 192, 192))
image.paste(im_queen, box=(150, 360), mask=im_queen)
image.paste(im_rook, box=(275, 360), mask=im_rook)
image.paste(im_bishop, box=(400, 360), mask=im_bishop)
image.paste(im_knight, box=(525, 360), mask=im_knight)
return image
| AleshinAndrei/Chess | draw_mask_of_choose.py | draw_mask_of_choose.py | py | 914 | python | en | code | 0 | github-code | 13 |
17052633834 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class HospitalDTO(object):
def __init__(self):
self._hospital_id = None
self._hospital_name = None
self._level = None
self._ownership = None
@property
def hospital_id(self):
return self._hospital_id
@hospital_id.setter
def hospital_id(self, value):
self._hospital_id = value
@property
def hospital_name(self):
return self._hospital_name
@hospital_name.setter
def hospital_name(self, value):
self._hospital_name = value
@property
def level(self):
return self._level
@level.setter
def level(self, value):
self._level = value
@property
def ownership(self):
return self._ownership
@ownership.setter
def ownership(self, value):
self._ownership = value
def to_alipay_dict(self):
params = dict()
if self.hospital_id:
if hasattr(self.hospital_id, 'to_alipay_dict'):
params['hospital_id'] = self.hospital_id.to_alipay_dict()
else:
params['hospital_id'] = self.hospital_id
if self.hospital_name:
if hasattr(self.hospital_name, 'to_alipay_dict'):
params['hospital_name'] = self.hospital_name.to_alipay_dict()
else:
params['hospital_name'] = self.hospital_name
if self.level:
if hasattr(self.level, 'to_alipay_dict'):
params['level'] = self.level.to_alipay_dict()
else:
params['level'] = self.level
if self.ownership:
if hasattr(self.ownership, 'to_alipay_dict'):
params['ownership'] = self.ownership.to_alipay_dict()
else:
params['ownership'] = self.ownership
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = HospitalDTO()
if 'hospital_id' in d:
o.hospital_id = d['hospital_id']
if 'hospital_name' in d:
o.hospital_name = d['hospital_name']
if 'level' in d:
o.level = d['level']
if 'ownership' in d:
o.ownership = d['ownership']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/HospitalDTO.py | HospitalDTO.py | py | 2,346 | python | en | code | 241 | github-code | 13 |
23551633400 | import os
import asyncio
import logging
from aiogram import Dispatcher, Bot, types
from aiogram.fsm.storage.memory import MemoryStorage
from src.modules import modules
from src.start import START
def create_dispatcher() -> Dispatcher:
storage = MemoryStorage()
dispatcher = Dispatcher(storage=storage)
dispatcher.include_router(START.router)
for module in modules:
dispatcher.include_router(module.router)
return dispatcher
async def main():
bot = Bot(token=os.getenv('TOKEN'))
await bot.set_my_commands(
commands=[types.BotCommand(
command='start', description='Главное меню бота')]
)
dispatcher = create_dispatcher()
logging.basicConfig(level=logging.INFO)
await dispatcher.start_polling(
bot
)
if __name__ == '__main__':
asyncio.run(main())
| ktp0li/summus | src/__main__.py | __main__.py | py | 858 | python | en | code | 3 | github-code | 13 |
23440544469 | # imports os module
from os import *
# file operators
# w for write, r for read, a for appending
file = open("myfile.txt", "w") # writing to a file that doesn't exist make a new file
# closing the file stream and flushes/closes it
file.close()
file.flush() # flushes the stream but the file is still open
# if you don't wnat to manually close a file
with open('myfile.txt', "r") as f: # reading a file that doesn't exist fails
content = f.read()
f.write("Hello world")
print(content)
mkdir("test") # makes a directory named test aka folder
chdir("Test") # inside of folder
rename("da.txt")
remove("ja.txt")
| Sashe-Bashe/NeuralNine_PCAP | Notes For Begginer Lessons/9.py | 9.py | py | 620 | python | en | code | 0 | github-code | 13 |
24159777926 | import numpy as np
from scipy import ndimage
import torch
import torch.nn as nn
import torch.nn.functional as F
class WingLoss(nn.Module):
def __init__(self, omega=10.0, epsilon=2.0):
super(WingLoss, self).__init__()
self.omega = omega
self.epsilon = epsilon
def forward(self, prediction, target):
C = (self.omega - self.omega * np.log(1.0+self.omega/self.epsilon))
diff_abs = torch.abs(prediction-target)
loss = torch.where(diff_abs < self.omega,
self.omega * torch.log(1.0+diff_abs/self.epsilon),
diff_abs - C
)
class AWingLoss(nn.Module):
def __init__(self, omega=14.0, epsilon=1.0, theta=0.5, alpha=2.1, weighted_map=True):
super(AWingLoss, self).__init__()
self.omega = omega
self.epsilon = epsilon
self.theta = theta
self.alpha = alpha
self.weighted_map = weighted_map
def forward(self, prediction, target):
A = self.omega * (1.0/(1.0+torch.pow(self.theta/self.epsilon, self.alpha-target))) * (self.alpha-target) * torch.pow(self.theta/self.epsilon, self.alpha-target-1.0) * (1.0/self.epsilon)
C = (self.theta*A - self.omega*torch.log(1.0+torch.pow(self.theta/self.epsilon, self.alpha-target)))
diff_abs = torch.abs(prediction-target)
loss = torch.where(diff_abs < self.theta,
self.omega * torch.log(1.0+torch.pow(diff_abs/self.epsilon, self.alpha-target)),
A * diff_abs - C
)
if self.weighted_map:
loss *= self.generate_loss_map_mask(target)
return loss.mean()
def generate_loss_map_mask(self, target, W=10.0, k_size=3, threshold=0.2):
target_array = target.cpu().numpy()
mask = np.zeros_like(target_array)
for batch in range(mask.shape[0]):
for loc in range(mask.shape[1]):
H_d = ndimage.grey_dilation(target_array[batch, loc], size=(k_size, k_size))
mask[batch, loc, H_d > threshold] = W
return torch.Tensor(mask+1).to(target.device) | ChoiDM/Adaptive-Wing-Loss | awing_loss.py | awing_loss.py | py | 2,146 | python | en | code | 0 | github-code | 13 |
25980523169 | import random
word_list = ["aardvark", "baboon", "camel"]
chosen_word = random.choice(word_list)
# Testing code
print(f'Pssst, the solution is {chosen_word}.')
display = []
for letter in chosen_word:
display += "_"
print(display)
end_game = False
while not end_game:
guess = input("Guess a letter: ").lower()
for position in range(len(chosen_word)):
letter = chosen_word[position]
if letter == guess:
display[position] = letter
print(display)
if "_" not in display:
end_game = True
print("You Win!")
| rizkiabdillahazmi/100Days_Code_Python | 007-day7/replacing-blank.py | replacing-blank.py | py | 567 | python | en | code | 1 | github-code | 13 |
38406937816 | # import libraries
from segmentation_model import get_model
from classification_model import classification_model
from coef_and_loss import dice_coef, dice_loss
import pandas as pd
import numpy as np
import cv2 as cv
import tensorflow as tf
# read our data
train = pd.read_csv('train_ship_segmentations_v2.csv')
# decode pixels to take mask
def find_mask(encoded_pixels, size):
my_img = []
for i in range(0, len(encoded_pixels), 2):
steps = encoded_pixels[i+1]
start = encoded_pixels[i]
pos_of_pixels = [start+j for j in range(steps)]
my_img.extend(pos_of_pixels)
mask_img = np.zeros((size**2), dtype=np.uint8)
mask_img[my_img] = 1
mask = np.reshape(mask_img, (size,size)).T
return mask
# random our dataset
np.random.seed(0)
np.random.shuffle(train.values)
# split data into 2 groups: with ships and without
train_without_ship = train[train['EncodedPixels'].isna()]
train_without_ship.index = [i for i in range(len(train_without_ship))]
train_with_ship = train[train['EncodedPixels'].notna()].groupby('ImageId')['EncodedPixels'].apply(lambda x: ' '.join(x)).to_frame()
train_with_ship = train_with_ship.reset_index()
# count of images
n = 3000
# take arrays of images and coordinates, for those images
imgs_to_classification = []
imgs_to_segmentation = []
mask_to_segmentation = []
y = []
for i in range(n):
try:
# read and resize image
img = cv.imread('train_v2/'+train_with_ship['ImageId'][i])
img = cv.GaussianBlur(img, (3,3), cv.BORDER_DEFAULT)
img = cv.resize(img, (160,160))
img = img.astype(np.uint8)
# decode pixels and take mask
encoded_pixels = [int(k) for k in train_with_ship['EncodedPixels'][i].split()]
mask = find_mask(encoded_pixels, 768)
mask = cv.resize(mask, (160,160))
imgs_to_segmentation.append(img)
mask_to_segmentation.append(mask)
# take 50% of images with ships and 50% without ships
if i % 2 == 0:
imgs_to_classification.append(img)
y.append(np.array([1,0]))
else:
img = cv.imread('train_v2/'+train_without_ship['ImageId'][i])
img = cv.GaussianBlur(img, (3,3), cv.BORDER_DEFAULT)
img = cv.resize(img, (160,160))
img = img.astype(np.uint8)
imgs_to_classification.append(img)
y.append(np.array([0,1]))
except:
# Corrupted img
pass
# change dtypes of our input and output data
imgs_to_classification = np.array(imgs_to_classification, dtype=np.uint8)
y = np.array(y, dtype=np.uint8)
imgs_to_segmentation = np.array(imgs_to_segmentation, dtype=np.float16)
mask_to_segmentation = np.array(mask_to_segmentation, dtype=np.float16)
# compiling two models
model = get_model((160,160))
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001), loss=dice_loss, metrics=[dice_coef])
cnn3 = classification_model()
cnn3.compile(loss='binary_crossentropy', optimizers='adam', metrics=['accuracy'])
from sklearn.model_selection import train_test_split
# splitting data into train and valid groups
X_segm_train, X_segm_valid, y_segm_train, y_segm_valid = train_test_split(imgs_to_segmentation, mask_to_segmentation.reshape(-1,160,160,1), test_size=0.1, random_state=42)
X_class_train, X_class_valid, y_class_train, y_class_valid = train_test_split(imgs_to_classification, y, test_size=0.2, random_state=42)
# fitting model
cnn3.fit(X_class_train, y_class_train, validation_data=(X_class_valid, y_class_valid), epochs=40, batch_size=32)
model.fit(X_segm_train, y_segm_train, validation_data=(X_segm_valid, y_segm_valid), epochs=100, batch_size=32)
| daniltomashi/AirbusShipDetection | train_models.py | train_models.py | py | 3,690 | python | en | code | 0 | github-code | 13 |
2747611762 | from telegram import ParseMode, Update
from telegram.ext import CallbackContext
from tgbot.handlers.vpn import static_text
from tgbot.handlers.utils.info import extract_user_data_from_update
from users.models import User
from payment.utils import create_payment_for_user
from tgbot.handlers.vpn.keyboards import make_keyboard_for_start_command
from tgbot.handlers.vpn.state_handlers import get_trx_hash
from django.conf import settings
def command_start(update: Update, context: CallbackContext) -> None:
u, created = User.get_user_and_created(update, context)
if u.chat_state == User.ChatStateChoices.GET_TRX_HASH.name:
get_trx_hash(update, context)
return
text = static_text.start_text.format(first_name=u.first_name)
update.message.reply_text(text=text,
reply_markup=make_keyboard_for_start_command())
def command_buy(update: Update, context: CallbackContext) -> None:
u = User.get_user(update, context)
user_id = u.user_id
payment, vpn, amount = create_payment_for_user(u)
if amount == 0:
context.bot.send_message(
chat_id=user_id,
text=static_text.wallet_has_enough_money,
)
return
with open(settings.BASE_DIR / settings.QR_CODE, 'rb') as qr_code:
context.bot.send_photo(
caption=settings.WALLET,
chat_id=user_id,
photo=qr_code
)
context.bot.send_message(
text=static_text.create_payment_text.format(
amount=amount,
days=vpn.subscription.package_days,
usage=vpn.subscription.usage_limit
),
chat_id=user_id,
)
def command_edu(update: Update, context: CallbackContext) -> None:
user_id = extract_user_data_from_update(update)['user_id']
context.bot.edit_message_text(
text="edit for edu",
chat_id=user_id,
message_id=update.callback_query.message.message_id,
parse_mode=ParseMode.HTML
)
def command_list(update: Update, context: CallbackContext) -> None:
user = User.get_user(update, context)
active_links = []
for v in user.vpns.all():
if v.left_days > 0:
active_links.append(v)
res_text = ""
row_text = "{index}. <a href='{link}'>Link</a> | {days:02d} days\n"
for index, v in enumerate(active_links):
res_text += row_text.format(index=index + 1, link=v.link, days=v.left_days)
context.bot.send_message(
text=res_text if len(res_text) > 0 else static_text.empty_vpn_link,
chat_id=user.user_id,
parse_mode=ParseMode.HTML
)
# def command_history(update: Update, context: CallbackContext) -> None:
# user = User.get_user(update, context)
# res_text = "id. amount | minute left | to address | status | transaction hash\n"
# row_text = "{id}. {amount} USDT | {minute} Minutes left | {address} | {status} | {trx_hash}\n"
# for p in user.payments.all():
# res_text += row_text.format(
# id=p.id,
# amount=p.amount,
# minute=int(p.expired_after),
# address=p.to_address,
# status=p.status,
# trx_hash=p.trx_hash
# )
#
# context.bot.send_message(
# text=res_text,
# chat_id=user.user_id,
# parse_mode=ParseMode.HTML
# )
def command_cancel(update: Update, context: CallbackContext) -> None:
user = User.get_user(update, context)
user.chat_state = User.ChatStateChoices.NONE
user.save()
context.bot.edit_message_text(
text="Cancel successfully",
chat_id=user.user_id,
message_id=update.callback_query.message.message_id,
parse_mode=ParseMode.HTML
)
| amirshabanics/vpn-hiddify-telegram-bot | tgbot/handlers/vpn/handlers.py | handlers.py | py | 3,724 | python | en | code | 4 | github-code | 13 |
71045582417 | from rest_framework import serializers
from .models import Video, Comment, Like
class VideoSerializer(serializers.ModelSerializer):
video = serializers.FileField()
class Meta:
model = Video
fields = ('user', 'title', 'video', 'slug', 'created_at', 'updated_at',)
read_only_fields = ('user', 'slug', 'created_at', 'updated_at')
class LikeSerializer(serializers.ModelSerializer):
class Meta:
model = Like
fields = ('user', 'video', 'created_at',)
read_only_fields = ('user', 'video', 'created_at',)
class CommentSerializer(serializers.ModelSerializer):
class Meta:
model = Comment
fields = ('user', 'video', 'comment', 'created_at', 'updated_at',)
read_only_fields = ('user', 'video',) | devbobnwaka/video_streaming_app_drf | backend/videos/serializers.py | serializers.py | py | 779 | python | en | code | 0 | github-code | 13 |
13431904405 | chains = [0] * (10 ** 6 + 2)
def colatz(n):
if n == 1:
return 1
elif len(chains) > n and not chains[n] == 0:
return chains[n]
elif n % 2 == 1:
return (1 + colatz(n * 3 + 1))
else:
return (1 + colatz(n // 2))
def solve(target):
longest = [1, 1]
for i in range(1, target + 1):
chains[i] = colatz(i)
if chains[i] > longest[1]:
longest = [i, chains[i]]
return longest[0]
def main():
print(solve(10**6))
if __name__ == "__main__":
main()
| thindo/projecteuler | python/pe014.py | pe014.py | py | 537 | python | en | code | 0 | github-code | 13 |
73607329616 | import functools
import logging
import logging.config
from contextlib import contextmanager
from logging import getLogger
from logging.handlers import QueueHandler, QueueListener
from trollflow2 import MP_MANAGER
DEFAULT_LOG_CONFIG = {'version': 1,
'disable_existing_loggers': False,
'formatters': {'pytroll': {'format': '[%(levelname)s: %(asctime)s : %(name)s] %(message)s',
'datefmt': '%Y-%m-%d %H:%M:%S'}},
'handlers': {'console': {'class': 'logging.StreamHandler',
'formatter': 'pytroll'}},
'root': {'level': 'DEBUG', 'handlers': ['console']}}
LOG_QUEUE = MP_MANAGER.Queue()
LOG_CONFIG = None
@contextmanager
def logging_on(config=None):
"""Activate queued logging.
This context activates logging through the use of logging's QueueHandler and
QueueListener.
Whether the default config parameters are used or a custom configuration is
passed, the log handlers are passed to a QueueListener instance, such that
the subprocesses of trollflow2 can use a QueueHandler to communicate logs.
"""
root = logging.getLogger()
# Lift out the existing handlers (we need to keep these for pytest's caplog)
handlers = root.handlers.copy()
with configure_logging(config):
root.handlers.extend(handlers)
# set up and run listener
listener = QueueListener(LOG_QUEUE, *(root.handlers))
listener.start()
try:
yield
finally:
listener.stop()
@contextmanager
def configure_logging(config):
"""Configure the logging using the provided *config* dict."""
_set_config(config)
global LOG_CONFIG
LOG_CONFIG = config
try:
yield
finally:
LOG_CONFIG = None
reset_logging()
def _set_config(config):
if config is None:
config = DEFAULT_LOG_CONFIG
logging.config.dictConfig(config)
def reset_logging():
"""Reset logging.
Source: https://stackoverflow.com/a/56810619/9112384
"""
manager = logging.root.manager
manager.disabled = logging.NOTSET
for logger in manager.loggerDict.values():
if isinstance(logger, logging.Logger):
logger.setLevel(logging.NOTSET)
logger.propagate = True
logger.disabled = False
logger.filters.clear()
handlers = logger.handlers.copy()
for handler in handlers:
# Copied from `logging.shutdown`.
try:
handler.acquire()
handler.flush()
handler.close()
except (OSError, ValueError):
pass
finally:
handler.release()
logger.removeHandler(handler)
def setup_queued_logging(log_queue, config=None):
"""Set up queued logging in a spawned subprocess."""
root_logger = getLogger()
if config:
remove_handlers_from_config(config)
_set_config(config)
queue_handler = QueueHandler(log_queue)
root_logger.addHandler(queue_handler)
def remove_handlers_from_config(config):
"""Remove handlers from config."""
config.pop("handlers", None)
for logger in config["loggers"]:
config["loggers"][logger].pop("handlers", None)
def queued_logging(func):
"""Decorate a function that will take log queue and config."""
@functools.wraps(func)
def wrapper_decorator(*args, **kwargs):
# Do something before
log_queue = kwargs.pop("log_queue")
log_config = kwargs.pop("log_config")
setup_queued_logging(log_queue, config=log_config)
value = func(*args, **kwargs)
# Do something after
return value
return wrapper_decorator
def create_logged_process(target, args, kwargs=None):
"""Create a logged process."""
from multiprocessing import get_context
if kwargs is None:
kwargs = {}
kwargs["log_queue"] = LOG_QUEUE
kwargs["log_config"] = LOG_CONFIG
ctx = get_context('spawn')
proc = ctx.Process(target=target, args=args, kwargs=kwargs)
return proc
| pytroll/trollflow2 | trollflow2/logging.py | logging.py | py | 4,241 | python | en | code | 7 | github-code | 13 |
8619185158 | import numpy as np
import xarray as xr
import argparse
import configparser
import pickle
import image_generator
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import TensorBoard, ReduceLROnPlateau, ModelCheckpoint, EarlyStopping
from model import leveeNet
parser = argparse.ArgumentParser(description="Train levee detection model")
parser.add_argument("-c", "--config", help="configuration file",
type=str, required=True)
parser.add_argument("-w", "--weights", help="weight hdf5 file path",
type=str, required=True)
# parse args and check data types
args = parser.parse_args()
weight_path = args.weights
config = configparser.ConfigParser()
config.read(args.config)
n_classes = config.getint("model", "n_classes")
assert isinstance(n_classes, int), "n_classes must be int, but got {0}".format(type(n_classes))
resize_v = config.getint("generator", "resize_v")
assert isinstance(resize_v, int), "resize_v must be tuple, but got {0}".format(type(resize_v))
resize_h = config.getint("generator", "resize_h")
assert isinstance(resize_h, int), "resize_h must be tuple, but got {0}".format(type(resize_h))
n_channels = config.getint("generator", "n_channels")
assert isinstance(n_channels, int), "resize_h must be tuple, but got {0}".format(type(n_channels))
image_size = (resize_v, resize_h, n_channels)
max_pool = config.getint("generator", "max_pool")
assert (isinstance(max_pool, int) | (max_pool is None)), "max_pool must be None or int, but got {0}".format(type(max_pool))
shuffle = config.getboolean("generator", "shuffle")
augment = config.getboolean("generator", "augment")
batch_size = config.getint("train", "batch_size")
assert isinstance(batch_size, int), "batch_size must be int, but got {0}".format(type(batch_size))
testpath = config.get("data", "test_path")
# load data
data = xr.open_dataset(testpath)
X_test = data["features"]
Y_test = data["labels"]
X_test, Y_test = image_generator.DataGenerator(X_test, Y_test,
n_classes, batch_size,
image_size, max_pool,
shuffle, augment).testDataGenerator()
model = leveeNet(n_classes, image_size)
model.compile(loss="categorical_crossentropy", optimizer=Adam(), metrics=["accuracy"])
model.load_weights(weight_path)
score = model.evaluate(X_test, Y_test, verbose=0)
print("Test score:", score[0])
print("Test accuracy;", score[1])
| windsor718/leveeNet | model/cnn/test.py | test.py | py | 2,505 | python | en | code | 4 | github-code | 13 |
44435806066 |
import re
# read the file
mobydick = open('moby_dick.txt').read()
# remove crap
mobydick = mobydick.lower()
mobydick = re.sub('[\*\.\!\"\;\?,\d]','',mobydick)
mobydick = re.sub("[\-\(\)\']",' ',mobydick)
# chop it into a list of words.
words = mobydick.split()
out = "Moby Dick contains %8i words."%(len(words))
# count the words in a dictionary
count = {}
for w in words:
count.setdefault(w,0)
count[w] += 1
# get a list of sorted word counts
result = []
for w in count:
result.append( (count[w],w) )
result.sort()
result.reverse()
for count,word in result:
out += "%5i\t%s\n"%(count,word)
open('words.txt','w').write(out)
| krother/Python3_Basics_Tutorial | challenges/count_words/count_words.py | count_words.py | py | 684 | python | en | code | 32 | github-code | 13 |
73306389779 | from math import sqrt
def is_prime(x):
if x < 2:
return False
for i in range(2, int(sqrt(x)) + 1):
if x % i == 0:
return False
return True
# syntax -> expr(item) for item in iterable if predicate(item)
# note that the most simple expression for the comprehension can be the own item
primes = [x for x in range(101) if is_prime(x)]
print(primes)
even_pows = {x: x*x for x in range(20) if x % 2 == 0}
print(even_pows)
odd_pows = {x: x*x for x in range(20) if x % 2 != 0}
print(odd_pows) | diego-guisosi/python-norsk | 01-fundamentals/chapter08/filtering_comprehensions.py | filtering_comprehensions.py | py | 529 | python | en | code | 0 | github-code | 13 |
3094417427 | #encoding = utf-8
import torch
import os
import copy
from PIL import Image
import shutil
import numpy as np
import dlib
import cv2
import sys
from config_mask import config
import torchvision.transforms as transforms
from torch.nn.modules.distance import PairwiseDistance
pwd = os.path.abspath(__file__+'../../')
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
pwd = os.path.abspath('./')
version = 'V9'
mask = True #是否给人脸戴口罩
img1_path = os.path.join(pwd, 'Layer_show', 'George_W_Bush_0001.jpg')
# img1_path = os.path.join(pwd, 'Layer_show', 'Michael_Douglas_0003.jpg')
img2_path = os.path.join(pwd, 'Layer_show', 'George_W_Bush_0003.jpg')
if version=='V1':
from Models.CBAM_Face_attention_Resnet_maskV1 import resnet18_cbam, resnet50_cbam, resnet101_cbam, resnet34_cbam, \
resnet152_cbam
elif version=='V6':
from Models.Resnet34 import resnet34 as resnet34_cbam
elif version=='V2':
from Models.CBAM_Face_attention_Resnet_maskV2 import resnet18_cbam, resnet50_cbam, resnet101_cbam, resnet34_cbam, \
resnet152_cbam
elif version=='V8':
from Models.Resnet34_attention import resnet34 as resnet34_cbam
elif (version=='V3') or (version=='V9'):
from Models.CBAM_Face_attention_Resnet_notmaskV3 import resnet18_cbam, resnet50_cbam, resnet101_cbam, resnet34_cbam, \
resnet152_cbam
if config['model'] == 18:
model = resnet18_cbam(pretrained=False, showlayer= True, num_classes=128)
elif config['model'] == 34:
model = resnet34_cbam(pretrained=False, showlayer= True, num_classes=128)
elif config['model'] == 50:
model = resnet50_cbam(pretrained=False, showlayer= True, num_classes=128)
elif config['model'] == 101:
model = resnet101_cbam(pretrained=False, showlayer= True, num_classes=128)
elif config['model'] == 152:
model = resnet152_cbam(pretrained=False, showlayer= True, num_classes=128)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model_path = os.path.join(pwd, 'Model_training_checkpoints')
x = [int(i.split('_')[4]) for i in os.listdir(model_path) if version in i]
x.sort()
for i in os.listdir(model_path):
if (len(x)!=0) and ('epoch_'+str(x[-1]) in i) and (version in i):
model_pathi = os.path.join(model_path, i)
break
if version=='V1':
model_pathi = os.path.join(model_path, 'model_34_triplet_epoch_30_rocNotMasked0.819_rocMasked0.764maskV1.pt')
elif version=='V2':
model_pathi = os.path.join(model_path, 'model_34_triplet_epoch_27_rocNotMasked0.919_rocMasked0.798notmaskV2.pt')
elif version=='V3':
model_pathi = os.path.join(model_path, 'model_34_triplet_epoch_97_rocNotMasked0.951_rocMasked0.766notmaskV3.pt')
elif version=='V6':
model_pathi = os.path.join(model_path, 'model_34_triplet_epoch_63_rocNotMasked0.922_rocMasked0.834maskV6.pt')
elif version=='V8':
model_pathi = os.path.join(model_path, 'model_34_triplet_epoch_39_rocNotMasked0.926_rocMasked0.856maskV8.pt')
elif version=='V9':
model_pathi = os.path.join(model_path, 'model_34_triplet_epoch_19_rocNotMasked0.918_rocMasked0.831notmaskV9.pt')
print(model_path)
if os.path.exists(model_pathi) and (version in model_pathi):
if torch.cuda.is_available():
model_state = torch.load(model_pathi)
else:
model_state = torch.load(model_pathi, map_location='cpu')
model.load_state_dict(model_state['model_state_dict'])
start_epoch = model_state['epoch']
print('loaded %s' % model_pathi)
else:
print('不存在预训练模型!')
sys.exit(0)
if torch.cuda.is_available():
model.cuda()
model.eval()
test_data_transforms = transforms.Compose([
transforms.Resize([config['image_size'], config['image_size']]), # resize
transforms.ToTensor(),
transforms.Normalize(
mean=[0.5, 0.5, 0.5],
std=[0.5, 0.5, 0.5]
)
])
isame = 1
threshold = 0.9
detector = dlib.get_frontal_face_detector()
predicter_path=config['predicter_path']
predictor = dlib.shape_predictor(predicter_path)
img_size = config['image_size']
font = cv2.FONT_HERSHEY_SIMPLEX
masked = os.path.join(pwd, 'Layer_show', 'mask')
notmasked = os.path.join(pwd, 'Layer_show', 'notmask')
delete = input('是否删除文件? Y or N')
if (delete.upper()=='Y') and (mask==True):
os.system('rm -rf %s'%masked)
os.mkdir(masked)
elif (delete.upper()=='Y') and (mask==False):
os.system('rm -rf %s'%notmasked)
os.mkdir(notmasked)
def preprocess(image_path, detector, predictor, img_size, cl, mask=True):
image = dlib.load_rgb_image(image_path)
face_img, TF = None, 0
# 人脸对齐、切图
dets = detector(image, 1)
if len(dets) == 1:
faces = dlib.full_object_detections()
faces.append(predictor(image, dets[0]))
images = dlib.get_face_chips(image, faces, size=img_size)
image = np.array(images[0]).astype(np.uint8)
# face_img = Image.fromarray(image).convert('RGB') #
face_img = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
# 生成人脸mask
dets = detector(image, 1)
if len(dets) == 1:
point68 = predictor(image, dets[0])
landmarks = list()
INDEX = [0, 2, 14, 16, 17, 18, 19, 24, 25, 26]
eyebrow_list = [19, 24]
eyes_list = [36, 45]
eyebrow = 0
eyes = 0
for eb, ey in zip(eyebrow_list, eyes_list):
eyebrow += point68.part(eb).y
eyes += point68.part(ey).y
add_pixel = int(eyes / 2 - eyebrow / 2)
for idx in INDEX:
x = point68.part(idx).x
if idx in eyebrow_list:
y = (point68.part(idx).y - 2 * add_pixel) if (point68.part(idx).y - 2 * add_pixel) > 0 else 0
else:
y = point68.part(idx).y
landmarks.append((x, y))
belows = []
for i in range(2, 15, 1):
belows.append([point68.part(i).x, point68.part(i).y])
belows = np.array(belows)
colors = [(200, 183, 144), (163, 150, 134), (172, 170, 169), \
(167, 168, 166), (173, 171, 170), (161, 161, 160), \
(170, 162, 162)]
# cl = np.random.choice(len(colors), 1)[0]
# cl = 0
if mask:
cv2.fillConvexPoly(face_img, belows, colors[cl])
else:
pass
return Image.fromarray(face_img).convert('RGB')
def ishowm(ima, imb):
imgone = np.asarray(ima)
imgtwo = np.asarray(imb)
imgone = cv2.cvtColor(imgone, cv2.COLOR_RGB2BGR)
imgtwo = cv2.cvtColor(imgtwo, cv2.COLOR_RGB2BGR)
cv2.putText(imgtwo, 'lay',(1,25),font,1,[0,0,255],2)
imgall = np.concatenate([imgone, imgtwo], axis=1)
if False:
cv2.namedWindow('images')
cv2.resizeWindow('images', 600, 600)
cv2.imshow('images', imgall)
cv2.waitKey(0)
cv2.destroyWindow('images')
return imgall
ima = preprocess(img1_path, detector, predictor, img_size, 1, mask)
imb = preprocess(img2_path, detector, predictor, img_size, 3, mask)
imb_ = copy.deepcopy(imb)
ima_ = copy.deepcopy(ima)
ima, imb = test_data_transforms(ima), test_data_transforms(imb)
if torch.cuda.is_available():
data_a = ima.unsqueeze(0).cuda()
data_b = imb.unsqueeze(0).cuda()
else:
data_a, data_b = ima.unsqueeze(0), imb.unsqueeze(0)
imgall=ishowm(ima_, imb_)
output_a, output_b = model(data_a), model(data_b)
output_a = torch.div(output_a, torch.norm(output_a))
output_b = torch.div(output_b, torch.norm(output_b))
l2_distance = PairwiseDistance(2)#.cuda()
distance = l2_distance.forward(output_a, output_b)
print('从两张图片提取出来的特征向量的欧氏距离是:%1.3f' % distance)
cv2.putText(imgall, 'dis:%1.4f'%distance, (1, 19), font, 0.6, [0, 0, 255], 1)
imgall = Image.fromarray(imgall).convert('RGB')
if mask:
imgall.save(os.path.join(pwd, 'Layer_show', 'mask', 'dis%1.3f_faceshow_%s.jpg'%(distance, version)))
path = os.path.join(os.path.join(pwd,'Layer_show'))
for i in os.listdir(path):
if os.path.isfile(os.path.join(path, i)) and '000' not in i:
shutil.move(os.path.join(path,i),os.path.join(path,'mask',i))
else:
imgall.save(os.path.join(pwd, 'Layer_show', 'notmask', 'dis%1.3f_faceshow_%s.jpg'%(distance, version)))
path = os.path.join(os.path.join(pwd, 'Layer_show'))
for i in os.listdir(path):
if os.path.isfile(os.path.join(path, i)) and '000' not in i:
shutil.move(os.path.join(path, i), os.path.join(path, 'notmask', i))
| ZouJiu1/Mask_face_recognitionZ | compare.py | compare.py | py | 8,530 | python | en | code | 29 | github-code | 13 |
6269857607 | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 19 22:20:58 2019
@author: E442282
"""
import numpy as np
import cv2
import os,sys
from matplotlib import pyplot as plt
from numpy.lib.stride_tricks import as_strided
def getColorSpaces(image):
rgb = cv2.cvtColor(image,cv2.COLOR_RGB2BGR)
gray = cv2.cvtColor(image,cv2.COLOR_RGB2GRAY)
return rgb,gray
def getImageDimnesion(image):
height,width = image.shape[:2]
return height,width
def showImage(image,title,cmap):
plt.imshow(image,cmap=cmap)
plt.axis('off')
plt.title(title)
def splitRGBChannels(image):
red, green, blue= cv2.split(img)
return red, green, blue
def getHistogram(image, bins=256):
image_pixels=image.flatten()
# array with size of bins, set to zeros
histogram = np.zeros(bins)
# loop through pixels and sum up counts of pixels
for pixel in image_pixels:
histogram[pixel] += 1
# return our final result
return histogram
def applySobel(gray):
# filterX = np.array([[-1,0,1],[-2,0,2],[-1,0,1]])
# filterY= np.array([[1,2,1],[0,0,0],[-1,-2,-1]])
#
# img_X = cv2.filter2D(gray, -1, filterX)
# img_Y = cv2.filter2D(gray, -1, filterY)
#
# return img_X+img_Y
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=3)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=3)
abs_sobelx = np.absolute(sobelx)
abs_sobely = np.absolute(sobely)
return abs_sobelx+abs_sobely
def applyRoberts(gray):
filterX = np.array([[0,1],[-1,0]])
filterY = np.array([[1,0],[0,-1]])
img_X = cv2.filter2D(gray, -1, filterX)
img_Y = cv2.filter2D(gray, -1, filterY)
roberts=img_X+img_Y
scale_factor = np.max(roberts)/255
roberts = (roberts/scale_factor).astype(np.uint8)
cv2.normalize(roberts, roberts, 0, 255, cv2.NORM_MINMAX, dtype=-1)
roberts = roberts.astype(np.uint8)
return roberts
def applyPrewitt(gray):
filterX = np.array([[-1,0,1],[-1,0,1],[-1,0,1]])
filterY= np.array([[1,1,1],[0,0,0],[-1,-1,-1]])
img_X = cv2.filter2D(gray, -1, filterX)
img_Y = cv2.filter2D(gray, -1, filterY)
return img_X+img_Y
def applyLaplacian(gray):
# Apply Gaussian Blur
blur = cv2.GaussianBlur(gray,(3,3),0)
# Apply Laplacian operator in some higher datatype
# Since our input is CV_8U we define ddepth = CV_16S to avoid overflow
# laplacian = cv2.Laplacian(blur,cv2.CV_16S,ksize=3)
# But this tends to localize the edge towards the brighter side.
# laplacian1 = cv2.convertScaleAbs(laplacian)
filter1 = np.array([[0,1,0],[1,-4,1],[0,1,0]])
laplacian1 = cv2.filter2D(blur, -1, filter1)
laplacian1 = cv2.convertScaleAbs(laplacian1)
return laplacian1
def applyCanny(gray,low):
ratio=3
kernel_size = 3
img_blur = cv2.blur(gray, (3,3))
detected_edges = cv2.Canny(img_blur, low, low*ratio, kernel_size)
# mask = detected_edges != 0
# dst = gray * (mask[:,:,None].astype(gray.dtype))
return detected_edges
def addGaussianNoise(gray):
row,col= gray.shape
mean = 0
var = 0.1
sigma = var**0.5
gaussian = np.random.normal(mean,sigma,(row,col))
noisy_image = np.zeros(gray.shape, np.float32)
noisy_image = gray + gaussian
cv2.normalize(noisy_image, noisy_image, 0, 255, cv2.NORM_MINMAX, dtype=-1)
noisy_image = noisy_image.astype(np.uint8)
return noisy_image
'''
Sharpen the image
Use a gaussian smoothing filter and subtract the smoothed version from the original
image (in a weighted way so the values of a constant area remain constant).
'''
def unsharp_mask(image, kernel_size=(5, 5), sigma=1.0, amount=1.0, threshold=0):
"""Return a sharpened version of the image, using an unsharp mask."""
blurred = cv2.GaussianBlur(image, kernel_size, sigma)
sharpened = float(amount + 1) * image - float(amount) * blurred
sharpened = np.maximum(sharpened, np.zeros(sharpened.shape))
sharpened = np.minimum(sharpened, 255 * np.ones(sharpened.shape))
sharpened = sharpened.round().astype(np.uint8)
height,width=getImageDimnesion(image)
low_contrast_mask=np.zeros((height,width), np.bool)
if threshold > 0:
low_contrast_mask = np.absolute(image - blurred) < threshold
np.copyto(sharpened, image, where=low_contrast_mask)
return sharpened
# Low-pass kernel
def getNormalizedImage(image):
norm_image=image.copy()
norm_image = np.maximum(norm_image, np.zeros(norm_image.shape))
norm_image = np.minimum(norm_image, 255 * np.ones(norm_image.shape))
norm_image = norm_image.round().astype(np.uint8)
return norm_image
import time
'''
Refered this material
https://jessicastringham.net/2017/12/31/stride-tricks/
'''
def applyFastFilter(gray, kernel):
kernel = np.array(kernel)
view_shape = tuple(np.subtract(gray.shape, kernel.shape) + 1) + kernel.shape
expanded_input = as_strided(gray, shape = view_shape, strides = gray.strides * 2)
output=np.einsum('ij,ijkl->kl',kernel,expanded_input .T).T
return output
def applyLowPass(gray,k,fastImplement=True):
kernel = np.ones((k,k))/k*k
padding=k//2
if fastImplement:
blur = applyFastFilter(gray,kernel)
blur = np.pad(blur,(2,), 'constant')
else:
h,w = gray.shape
blur = np.zeros(gray.shape)
for i in range(padding,w-padding):
for j in range(padding,h-padding):
blur[i,j] = np.sum(kernel*gray[i-padding:i+padding +1,j-padding:j+padding + 1])
return blur
def applyFastFilterMedian(gray, kernel):
view_shape = tuple(np.subtract(gray.shape, (k,k)) + 1) + (k,k)
expanded_input = as_strided(gray, shape = view_shape, strides = gray.strides * 2)
output=np.median(expanded_input,axis=(2,3))
return output
def applyMedianFilter(gray,k,fastImplement=True):
padding=k//2
if fastImplement:
filtered_gray = applyFastFilterMedian(gray,k)
filtered_gray = np.pad(filtered_gray,(2,), 'constant')
else:
h,w = gray.shape
filtered_gray = np.zeros(gray.shape)
for i in range(padding,w-padding):
for j in range(padding,h-padding):
filtered_gray[i,j] = np.median(gray[i-padding:i+padding +1,j-padding:j+padding + 1])
return filtered_gray
img = cv2.imread(r'C:\SAI\IIIT\2019_Monsoon\DIP\Assignment2\input_data\brain.jpg')
plt.figure(figsize=(12, 12))
rgb,gray=getColorSpaces(img)
kvals=[k for k in range(3,19,2)]
times_naive=[]
times_fast=[]
for k in range(3,19,2):
start = time.time()
#Get naive implementation time
blurred_mage = applyMedianFilter(gray,k,False)
end = time.time()
times_naive.append(end-start)
start = time.time()
#Get faster implementation(strides of numpy)
blurred_mage = applyMedianFilter(gray,k,True)
end = time.time()
times_fast.append(end-start)
plt.xlabel('K ')
plt.ylabel('Time taken')
plt.plot(kvals,times_naive)
plt.plot(kvals,times_fast)
plt.legend(['Naive','Fast'], loc='upper right')
plt.show()
#kernel_size=7
#blurred_mage1 = applyLowPass(gray,7,False)
#blurred_mage2 = applyLowPass(gray,7,True)
#plt.subplot(1,3,1)
#plt.axis('off')
#plt.title('Original')
#plt.imshow(gray,cmap='gray')
#
#plt.subplot(1,3,2)
#plt.axis('off')
#plt.title('Lowpass - Naive ')
#plt.imshow(blurred_mage1,cmap='gray')
#
#plt.subplot(1,3,3)
#plt.axis('off')
#plt.title('Lowpass - Faster ')
#plt.imshow(blurred_mage2,cmap='gray')
#kvals=[k for k in range(3,19,2)]
#times_naive=[]
#times_fast=[]
#
#for k in range(3,19,2):
#
# start = time.time()
# #Get naive implementation time
# blurred_mage = applyLowPass(gray,k,False)
# end = time.time()
# times_naive.append(end-start)
#
# start = time.time()
# #Get faster implementation(strides of numpy)
# blurred_mage = applyLowPass(gray,k,True)
# end = time.time()
# times_fast.append(end-start)
#
#plt.xlabel('K ')
#plt.ylabel('Time taken')
#
#
#plt.plot(kvals,times_naive)
#plt.plot(kvals,times_fast)
#
#plt.legend(['Naive','Fast'], loc='upper right')
#
#plt.show()
| ddurgaprasad/DIP | Assignment2/src/Q5.py | Q5.py | py | 8,234 | python | en | code | 0 | github-code | 13 |
9521163577 | from __future__ import absolute_import, unicode_literals
import logging
from dataclasses import dataclass
from datetime import datetime, timedelta
from decimal import Decimal
from django.apps import apps
from fractions import Fraction
from functools import reduce
from typing import Tuple, List
from annoying.functions import get_object_or_None
from dateutil import rrule
from dateutil.relativedelta import relativedelta
from django.core.serializers.json import DjangoJSONEncoder
from django.db.models import JSONField
from django_fsm import FSMField, transition, TransitionNotAllowed
from model_utils import Choices
from django.core.exceptions import ValidationError
from django.core.validators import MinValueValidator
from django.db import models
from django.db.models.signals import pre_delete
from django.dispatch import receiver
from django.template.loader import render_to_string
from django.utils import timezone
from django.utils.timezone import utc
from django.utils.translation import gettext_lazy as _
from silver.models import Plan
from silver.models.documents.entries import OriginType
from silver.models.billing_entities import Customer
from silver.models.documents import DocumentEntry
from silver.models.fields import field_template_path
from silver.utils.dates import ONE_DAY, first_day_of_month, first_day_of_interval, end_of_interval, monthdiff, \
monthdiff_as_fraction
from silver.utils.numbers import quantize_fraction
from silver.validators import validate_reference
logger = logging.getLogger(__name__)
class MeteredFeatureUnitsLog(models.Model):
metered_feature = models.ForeignKey('MeteredFeature', related_name='consumed',
on_delete=models.CASCADE)
subscription = models.ForeignKey('Subscription', related_name='mf_log_entries',
on_delete=models.CASCADE)
consumed_units = models.DecimalField(max_digits=19, decimal_places=4,
validators=[MinValueValidator(0.0)])
start_datetime = models.DateTimeField()
end_datetime = models.DateTimeField()
annotation = models.CharField(max_length=256, null=True, blank=True)
class Meta:
unique_together = ('metered_feature', 'subscription', 'start_datetime', 'end_datetime',
'annotation')
def clean(self):
super(MeteredFeatureUnitsLog, self).clean()
if self.subscription.state in [Subscription.STATES.ENDED,
Subscription.STATES.INACTIVE]:
if not self.id:
action_type = "create"
else:
action_type = "change"
err_msg = 'You cannot %s a metered feature units log belonging to '\
'an %s subscription.' % (action_type,
self.subscription.state)
raise ValidationError(err_msg)
if not self.id:
start_datetime = self.subscription.bucket_start_datetime(origin_type=OriginType.MeteredFeature)
end_datetime = self.subscription.bucket_end_datetime(origin_type=OriginType.MeteredFeature)
if get_object_or_None(MeteredFeatureUnitsLog, start_datetime=start_datetime,
end_datetime=end_datetime,
metered_feature=self.metered_feature,
subscription=self.subscription):
err_msg = 'A %s units log for the current date already exists.'\
' You can edit that one.' % self.metered_feature
raise ValidationError(err_msg)
def save(self, *args, **kwargs):
if self.annotation == "":
self.annotation = None
if not self.id:
if not self.start_datetime:
self.start_datetime = self.subscription.bucket_start_datetime(origin_type=OriginType.MeteredFeature)
if not self.end_datetime:
self.end_datetime = self.subscription.bucket_end_datetime(origin_type=OriginType.MeteredFeature)
super(MeteredFeatureUnitsLog, self).save(*args, **kwargs)
else:
update_fields = []
for field in self._meta.fields:
if field.name != 'metered_feature' and field.name != 'id':
update_fields.append(field.name)
kwargs['update_fields'] = kwargs.get('update_fields', update_fields)
super(MeteredFeatureUnitsLog, self).save(*args, **kwargs)
def __str__(self):
return self.metered_feature.name
@dataclass
class OverageInfo:
extra_consumed_units: Decimal
annotations: List[str]
directly_applied_bonuses: List["silver.models.Bonus"]
separately_applied_bonuses: List["silver.models.Bonus"]
class Subscription(models.Model):
class STATES(object):
ACTIVE = 'active'
INACTIVE = 'inactive'
CANCELED = 'canceled'
ENDED = 'ended'
STATE_CHOICES = Choices(
(STATES.ACTIVE, _('Active')),
(STATES.INACTIVE, _('Inactive')),
(STATES.CANCELED, _('Canceled')),
(STATES.ENDED, _('Ended'))
)
class CANCEL_OPTIONS(object):
NOW = 'now'
END_OF_BILLING_CYCLE = 'end_of_billing_cycle'
_INTERVALS_CODES = {
'year': rrule.YEARLY,
'month': rrule.MONTHLY,
'week': rrule.WEEKLY,
'day': rrule.DAILY
}
plan = models.ForeignKey(
'Plan', on_delete=models.CASCADE,
help_text='The plan the customer is subscribed to.'
)
description = models.TextField(max_length=1024, blank=True, null=True)
customer = models.ForeignKey(
'Customer', related_name='subscriptions', on_delete=models.CASCADE,
help_text='The customer who is subscribed to the plan.'
)
trial_end = models.DateField(
blank=True, null=True,
help_text='The date at which the trial ends. '
'If set, overrides the computed trial end date from the plan.'
)
start_date = models.DateField(
blank=True, null=True,
help_text='The starting date for the subscription.'
)
cancel_date = models.DateField(
blank=True, null=True,
help_text='The date when the subscription was canceled.'
)
ended_at = models.DateField(
blank=True, null=True,
help_text='The date when the subscription ended.'
)
reference = models.CharField(
max_length=128, blank=True, null=True, validators=[validate_reference],
help_text="The subscription's reference in an external system."
)
state = FSMField(
choices=STATE_CHOICES, max_length=12, default=STATES.INACTIVE,
help_text='The state the subscription is in.'
)
meta = JSONField(blank=True, null=True, default=dict, encoder=DjangoJSONEncoder)
def clean(self):
errors = dict()
if self.start_date and self.trial_end:
if self.trial_end < self.start_date:
errors.update(
{'trial_end': "The trial end date cannot be older than "
"the subscription's start date."}
)
if self.ended_at:
if self.state not in [self.STATES.CANCELED, self.STATES.ENDED]:
errors.update(
{'ended_at': 'The ended at date cannot be set if the '
'subscription is not canceled or ended.'}
)
elif self.ended_at < self.start_date:
errors.update(
{'ended_at': "The ended at date cannot be older than the"
"subscription's start date."}
)
if errors:
raise ValidationError(errors)
@property
def provider(self):
return self.plan.provider
def _get_aligned_start_date_after_date(self, reference_date, interval_type,
bymonth=None, byweekday=None, bymonthday=None):
return list(
rrule.rrule(interval_type,
count=1, # align the cycle to the given rules as quickly as possible
bymonth=bymonth,
bymonthday=bymonthday,
byweekday=byweekday,
dtstart=reference_date)
)[-1].date()
def _get_last_start_date_within_range(self, range_start, range_end,
interval_type, interval_count,
bymonth=None, byweekday=None, bymonthday=None):
# we try to obtain a start date aligned to the given rules
aligned_start_date = self._get_aligned_start_date_after_date(
reference_date=range_start,
interval_type=interval_type,
bymonth=bymonth,
bymonthday=bymonthday,
byweekday=byweekday,
)
relative_start_date = range_start if aligned_start_date > range_end else aligned_start_date
dates = list(
rrule.rrule(interval_type,
dtstart=relative_start_date,
interval=interval_count,
until=range_end)
)
return aligned_start_date if not dates else dates[-1].date()
def _get_interval_rules(self, granulate, origin_type: OriginType = None):
if not origin_type:
origin_type = OriginType.Plan
interval = self.plan.base_interval if origin_type == OriginType.Plan else self.plan.metered_features_interval
interval_count = (self.plan.base_interval_count if origin_type == OriginType.Plan
else self.plan.metered_features_interval_count)
rules = {
'interval_type': self._INTERVALS_CODES[interval],
'interval_count': 1 if granulate else interval_count,
}
if interval == self.plan.INTERVALS.MONTH:
rules['bymonthday'] = 1 # first day of the month
elif interval == self.plan.INTERVALS.WEEK:
rules['byweekday'] = 0 # first day of the week (Monday)
elif interval == self.plan.INTERVALS.YEAR:
# first day of the first month (1 Jan)
rules['bymonth'] = 1
rules['bymonthday'] = 1
return rules
def _cycle_start_date(self, reference_date=None, ignore_trial=None, granulate=None,
ignore_start_date=None, origin_type: OriginType = None):
if not origin_type:
origin_type = OriginType.Plan
ignore_trial_default = False
granulate_default = False
ignore_start_date_default = False
ignore_trial = ignore_trial_default or ignore_trial
granulate = granulate_default or granulate
ignore_start_date = ignore_start_date_default or ignore_start_date
if reference_date is None:
reference_date = timezone.now().date()
start_date = reference_date
if not self.start_date or reference_date < self.start_date:
return None
rules = self._get_interval_rules(granulate, origin_type)
start_date_ignoring_trial = self._get_last_start_date_within_range(
range_start=self.start_date,
range_end=reference_date,
**rules
)
if ignore_trial or not self.trial_end:
return start_date_ignoring_trial
else: # Trial period is considered
if self.trial_end < reference_date: # Trial period ended
# The day after the trial ended can be a start date (once, right after trial ended)
date_after_trial_end = self.trial_end + ONE_DAY
return max(date_after_trial_end, start_date_ignoring_trial)
else: # Trial is still ongoing
if granulate or self.separate_cycles_during_trial:
# The trial period is split into cycles according to the rules defined above
return start_date_ignoring_trial
else:
# Otherwise, the start date of the trial period is the subscription start date
return self.start_date
def _cycle_end_date(self, reference_date=None, ignore_trial=None, granulate=None, origin_type: OriginType = None):
if not origin_type:
origin_type = OriginType.Plan
ignore_trial_default = False
granulate_default = False
ignore_trial = ignore_trial or ignore_trial_default
granulate = granulate or granulate_default
if reference_date is None:
reference_date = timezone.now().date()
real_cycle_start_date = self._cycle_start_date(reference_date, ignore_trial, granulate, origin_type=origin_type)
# we need a current start date in order to compute a current end date
if not real_cycle_start_date:
return None
# during trial and trial cycle is not separated into intervals
if self.on_trial(reference_date) and not (self.separate_cycles_during_trial or granulate):
return min(self.trial_end, (self.ended_at or datetime.max.date()))
interval = self.plan.base_interval if origin_type == OriginType.Plan else self.plan.metered_features_interval
interval_count = (self.plan.base_interval_count if origin_type == OriginType.Plan
else self.plan.metered_features_interval_count)
maximum_cycle_end_date = end_of_interval(
real_cycle_start_date, interval, interval_count
)
# We know that the cycle end_date is the day before the next cycle start_date,
# therefore we check if the cycle start_date for our maximum cycle end_date is the same
# as the initial cycle start_date.
while True:
reference_cycle_start_date = self._cycle_start_date(maximum_cycle_end_date,
ignore_trial, granulate, origin_type=origin_type)
# it means the cycle end_date we got is the right one
if reference_cycle_start_date == real_cycle_start_date:
return min(maximum_cycle_end_date, (self.ended_at or datetime.max.date()))
elif reference_cycle_start_date < real_cycle_start_date:
# This should never happen in normal conditions, but it may stop infinite looping
return None
maximum_cycle_end_date = reference_cycle_start_date - ONE_DAY
@property
def prebill_plan(self):
if self.plan.prebill_plan is not None:
return self.plan.prebill_plan
return self.provider.prebill_plan
@property
def cycle_billing_duration(self):
if self.plan.cycle_billing_duration is not None:
return self.plan.cycle_billing_duration
return self.provider.cycle_billing_duration
@property
def separate_cycles_during_trial(self):
if self.plan.separate_cycles_during_trial is not None:
return self.plan.separate_cycles_during_trial
return self.provider.separate_cycles_during_trial
@property
def generate_documents_on_trial_end(self):
if self.plan.generate_documents_on_trial_end is not None:
return self.plan.generate_documents_on_trial_end
return self.provider.generate_documents_on_trial_end
@property
def _ignore_trial_end(self):
return not self.generate_documents_on_trial_end
def cycle_start_date(self, reference_date=None, origin_type: OriginType = None):
if not origin_type:
origin_type = OriginType.Plan
return self._cycle_start_date(ignore_trial=self._ignore_trial_end,
granulate=False,
reference_date=reference_date,
origin_type=origin_type)
def cycle_end_date(self, reference_date=None, origin_type: OriginType = None):
if not origin_type:
origin_type = OriginType.Plan
return self._cycle_end_date(ignore_trial=self._ignore_trial_end,
granulate=False,
reference_date=reference_date,
origin_type=origin_type)
def bucket_start_date(self, reference_date=None, origin_type: OriginType = None):
if not origin_type:
origin_type = OriginType.Plan
granulate = True
if origin_type == OriginType.Plan:
granulate = (
self.plan.separate_plan_entries_per_base_interval != Plan.SEPARATE_ENTRIES_BY_INTERVAL.DISABLED
)
return self._cycle_start_date(reference_date=reference_date,
ignore_trial=False, granulate=granulate,
origin_type=origin_type)
def bucket_end_date(self, reference_date=None, origin_type: OriginType = None):
if not origin_type:
origin_type = OriginType.Plan
granulate = True
if origin_type == OriginType.Plan:
granulate = (
self.plan.separate_plan_entries_per_base_interval != Plan.SEPARATE_ENTRIES_BY_INTERVAL.DISABLED
)
return self._cycle_end_date(reference_date=reference_date,
ignore_trial=False, granulate=granulate,
origin_type=origin_type)
def bucket_start_datetime(self, reference_datetime=None, origin_type: OriginType = None):
if not origin_type:
origin_type = OriginType.Plan
reference_date = reference_datetime.date() if reference_datetime else None
return datetime.combine(
self._cycle_start_date(reference_date=reference_date,
ignore_trial=False,
granulate=True,
origin_type=origin_type),
datetime.min.time(),
tzinfo=timezone.utc,
)
def bucket_end_datetime(self, reference_datetime=None, origin_type: OriginType = None):
if not origin_type:
origin_type = OriginType.Plan
reference_date = reference_datetime.date() if reference_datetime else None
return datetime.combine(
self._cycle_end_date(reference_date=reference_date,
ignore_trial=False,
granulate=True,
origin_type=origin_type),
datetime.max.time(),
tzinfo=timezone.utc,
).replace(microsecond=0)
def updateable_buckets(self):
buckets = []
if self.state in ['ended', 'inactive']:
return buckets
start_date = self.bucket_start_date(origin_type=OriginType.MeteredFeature)
end_date = self.bucket_end_date(origin_type=OriginType.MeteredFeature)
if start_date is None or end_date is None:
return buckets
if self.state == self.STATES.CANCELED:
if self.cancel_date < start_date:
return buckets
buckets.append({'start_date': start_date, 'end_date': end_date})
generate_after = timedelta(seconds=self.plan.generate_after)
while (timezone.now() - generate_after <
datetime.combine(start_date, datetime.min.time()).replace(
tzinfo=timezone.get_current_timezone())):
end_date = start_date - ONE_DAY
start_date = self.bucket_start_date(end_date, origin_type=OriginType.MeteredFeature)
if start_date is None:
return buckets
buckets.append({'start_date': start_date, 'end_date': end_date})
return buckets
@property
def is_on_trial(self):
"""
Tells if the subscription is currently on trial.
:rtype: bool
"""
if self.state == self.STATES.ACTIVE and self.trial_end:
return timezone.now().date() <= self.trial_end
return False
def on_trial(self, date):
"""
Tells if the subscription was on trial at the date passed as argument.
:param date: the date for which the check is made.
:type date: datetime.date
:rtype: bool
"""
if self.trial_end:
return date <= self.trial_end
return False
def _log_should_be_billed_result(self, billing_date, interval_end):
logger.debug('should_be_billed result: %s', {
'subscription': self.id,
'billing_date': billing_date.strftime('%Y-%m-%d'),
'interval_end': interval_end.strftime('%Y-%m-%d')
})
@property
def billed_up_to_dates(self):
last_billing_log = self.last_billing_log
return {
'metered_features_billed_up_to': last_billing_log.metered_features_billed_up_to,
'plan_billed_up_to': last_billing_log.plan_billed_up_to
} if last_billing_log else {
'metered_features_billed_up_to': self.start_date - ONE_DAY,
'plan_billed_up_to': self.start_date - ONE_DAY
}
def should_be_billed(self, billing_date, generate_documents_datetime=None):
return (
self.should_plan_be_billed(billing_date, generate_documents_datetime=generate_documents_datetime) or
self.should_mfs_be_billed(billing_date, generate_documents_datetime=generate_documents_datetime)
)
def should_plan_be_billed(self, billing_date, generate_documents_datetime=None):
if self.state not in [self.STATES.ACTIVE, self.STATES.CANCELED]:
return False
if not generate_documents_datetime:
generate_documents_datetime = timezone.now()
if self.cycle_billing_duration:
if self.start_date > first_day_of_month(billing_date) + self.cycle_billing_duration:
# There was nothing to bill on the last day of the first cycle billing duration
return False
# We need the full cycle here (ignoring trial ends)
cycle_start_datetime_ignoring_trial = self._cycle_start_date(billing_date,
ignore_trial=False)
latest_possible_billing_datetime = (
cycle_start_datetime_ignoring_trial + self.cycle_billing_duration
)
billing_date = min(billing_date, latest_possible_billing_datetime)
if billing_date > generate_documents_datetime.date():
return False
cycle_start_date = self.cycle_start_date(billing_date)
if not cycle_start_date:
return False
if self.state == self.STATES.CANCELED:
if billing_date <= self.cancel_date:
return False
cycle_start_date = self.cancel_date + ONE_DAY
cycle_start_datetime = datetime.combine(cycle_start_date,
datetime.min.time()).replace(tzinfo=utc)
generate_after = timedelta(seconds=self.plan.generate_after)
if generate_documents_datetime < cycle_start_datetime + generate_after:
return False
plan_billed_up_to = self.billed_up_to_dates['plan_billed_up_to']
# We want to bill the subscription if the plan hasn't been billed for this cycle or
# if the subscription has been canceled and the plan won't be billed for this cycle.
if self.prebill_plan or self.state == self.STATES.CANCELED:
return plan_billed_up_to < cycle_start_date
# wait until the cycle that is going to be billed ends:
billed_cycle_end_date = self.cycle_end_date(plan_billed_up_to + ONE_DAY)
return billed_cycle_end_date < cycle_start_date
def should_mfs_be_billed(self, billing_date, generate_documents_datetime=None, billed_up_to=None):
if self.state not in [self.STATES.ACTIVE, self.STATES.CANCELED]:
return False
if not generate_documents_datetime:
generate_documents_datetime = timezone.now()
if (
self.plan.only_bill_metered_features_with_base_amount and
not self.should_plan_be_billed(billing_date, generate_documents_datetime)
):
return False
if self.cycle_billing_duration:
if self.start_date > first_day_of_month(billing_date) + self.cycle_billing_duration:
# There was nothing to bill on the last day of the first cycle billing duration
return False
# We need the full cycle here (ignoring trial ends)
cycle_start_datetime_ignoring_trial = self._cycle_start_date(billing_date,
ignore_trial=False,
origin_type=OriginType.MeteredFeature)
latest_possible_billing_datetime = (
cycle_start_datetime_ignoring_trial + self.cycle_billing_duration
)
billing_date = min(billing_date, latest_possible_billing_datetime)
if billing_date > generate_documents_datetime.date():
return False
cycle_start_date = self.cycle_start_date(billing_date, origin_type=OriginType.MeteredFeature)
if not cycle_start_date:
return False
if self.state == self.STATES.CANCELED:
if billing_date <= self.cancel_date:
return False
cycle_start_date = self.cancel_date + ONE_DAY
cycle_start_datetime = datetime.combine(cycle_start_date,
datetime.min.time()).replace(tzinfo=utc)
generate_after = timedelta(seconds=self.plan.generate_after)
if generate_documents_datetime < cycle_start_datetime + generate_after:
return False
metered_features_billed_up_to = billed_up_to or self.billed_up_to_dates['metered_features_billed_up_to']
# We want to bill the subscription if the subscription has been canceled.
if self.state == self.STATES.CANCELED:
return metered_features_billed_up_to < cycle_start_date
# wait until the cycle that is going to be billed ends:
billed_cycle_end_date = self.cycle_end_date(metered_features_billed_up_to + ONE_DAY,
origin_type=OriginType.MeteredFeature)
return billed_cycle_end_date < cycle_start_date and billed_cycle_end_date < billing_date
@property
def _has_existing_customer_with_consolidated_billing(self):
# TODO: move to Customer
return (
self.customer.consolidated_billing and
self.customer.subscriptions.filter(state=self.STATES.ACTIVE).count() > 1
)
@property
def is_billed_first_time(self):
return self.billing_logs.all().count() == 0
@property
def last_billing_log(self):
return self.billing_logs.order_by('billing_date').last()
@property
def last_billing_date(self):
# ToDo: Improve this when dropping Django 1.8 support
try:
return self.billing_logs.all()[0].billing_date
except (BillingLog.DoesNotExist, IndexError):
# It should never get here.
return None
def _should_activate_with_free_trial(self):
return Subscription.objects.filter(
plan__provider=self.plan.provider,
customer=self.customer,
state__in=[Subscription.STATES.ACTIVE, Subscription.STATES.CANCELED,
Subscription.STATES.ENDED]
).count() == 0
@property
def applied_discounts(self):
Discount = apps.get_model('silver.Discount')
return Discount.for_subscription(self)
@property
def applied_bonuses(self):
Bonus = apps.get_model('silver.Bonus')
return Bonus.for_subscription(self)
##########################################################################
# STATE MACHINE TRANSITIONS
##########################################################################
@transition(field=state, source=[STATES.INACTIVE, STATES.CANCELED],
target=STATES.ACTIVE)
def activate(self, start_date=None, trial_end_date=None):
if start_date:
self.start_date = min(timezone.now().date(), start_date)
else:
if self.start_date:
self.start_date = min(timezone.now().date(), self.start_date)
else:
self.start_date = timezone.now().date()
if self._should_activate_with_free_trial():
if trial_end_date:
self.trial_end = max(self.start_date, trial_end_date)
else:
if self.trial_end:
if self.trial_end < self.start_date:
self.trial_end = None
elif self.plan.trial_period_days:
self.trial_end = self.start_date + timedelta(
days=self.plan.trial_period_days - 1)
@transition(field=state, source=STATES.ACTIVE, target=STATES.CANCELED)
def cancel(self, when):
now = timezone.now()
bsdt = self.bucket_start_datetime()
bedt = self.bucket_end_datetime()
if when == self.CANCEL_OPTIONS.END_OF_BILLING_CYCLE:
if self.is_on_trial:
self.cancel_date = self.bucket_end_date(reference_date=self.trial_end)
else:
self.cancel_date = self.cycle_end_date()
elif when == self.CANCEL_OPTIONS.NOW:
for metered_feature in self.plan.metered_features.all():
MeteredFeatureUnitsLog.objects.filter(
start_datetime__gte=bsdt, end_datetime=bedt,
metered_feature=metered_feature.pk,
subscription=self.pk
).update(end_datetime=now)
if self.on_trial(now.date()):
self.trial_end = now.date()
self.cancel_date = now.date()
self.save()
@transition(field=state, source=STATES.CANCELED, target=STATES.ENDED)
def end(self):
self.ended_at = timezone.now().date()
##########################################################################
def _cancel_now(self):
self.cancel(when=self.CANCEL_OPTIONS.NOW)
def _cancel_at_end_of_billing_cycle(self):
self.cancel(when=self.CANCEL_OPTIONS.END_OF_BILLING_CYCLE)
def _get_interval_end_date(self, date=None):
"""
:returns: the end date of the interval that should be billed. The
returned value is a function f(subscription_state, date)
:rtype: datetime.date
"""
if self.state == self.STATES.ACTIVE:
end_date = self.bucket_end_date(reference_date=date)
elif self.state == self.STATES.CANCELED:
if self.trial_end and date <= self.trial_end:
if self.trial_end <= self.cancel_date:
end_date = self.trial_end
else:
end_date = self.cancel_date
else:
end_date = self.cancel_date
return end_date
def _log_value_state(self, value_state):
logger.debug('Adding value: %s', {
'subscription': self.id,
'value_state': value_state
})
def _add_plan_trial(self, start_date, end_date, invoice=None,
proforma=None):
"""
Adds the plan trial to the document, by adding an entry with positive
prorated value and one with prorated, negative value which represents
the discount for the trial period.
"""
prorated, fraction = self._get_proration_status_and_fraction(start_date,
end_date,
OriginType.Plan)
plan_price = quantize_fraction(Fraction(str(self.plan.amount)) * fraction)
context = self._build_entry_context({
'name': self.plan.name,
'unit': self.plan.base_interval,
'product_code': self.plan.product_code,
'start_date': start_date,
'end_date': end_date,
'prorated': prorated,
'proration_percentage': plan_price,
'context': 'plan-trial'
})
unit = self._entry_unit(context)
description = self._entry_description(context)
# Add plan with positive value
DocumentEntry.objects.create(
invoice=invoice, proforma=proforma, description=description,
unit=unit, unit_price=plan_price, quantity=Decimal('1.00'),
product_code=self.plan.product_code, prorated=prorated,
start_date=start_date, end_date=end_date
)
context.update({
'context': 'plan-trial-discount'
})
description = self._entry_description(context)
# Add plan with negative value
DocumentEntry.objects.create(
invoice=invoice, proforma=proforma, description=description,
unit=unit, unit_price=-plan_price, quantity=Decimal('1.00'),
product_code=self.plan.product_code, prorated=prorated,
start_date=start_date, end_date=end_date
)
return Decimal("0.00")
# def _get_proration_status_and_fraction_during_trial(self, start_date, end_date) -> Tuple[bool, Fraction]:
# """
# Returns the proration percent (how much of the interval will be billed)
# and the status (if the subscription is prorated or not) during the trial period.
# If start_date and end_date are not from the trial period, you are entering
# undefined behaviour territory.
#
# :returns: a tuple containing (status, Decimal(percent)) where status
# can be one of [True, False]. The Decimal will have values in the
# [0.00, 1.00] range.
# :rtype: tuple
# """
#
# if self.on_trial(end_date):
# fraction = Fraction((end_date - start_date).days + 1, (self.start_date - self.trial_end).days + 1)
#
# return fraction != Fraction(1), fraction
#
# return False, Fraction(1)
def _get_consumed_units_from_total_included_in_trial(self, metered_feature, start_date, end_date,
consumed_units, bonuses=None):
"""
:returns: (consumed_units, free_units)
"""
# _, extra_proration_fraction = self._get_proration_status_and_fraction_during_trial(start_date, end_date)
#
# included_units_during_trial = quantize_fraction(
# Fraction(str(metered_feature.included_units_during_trial)) * extra_proration_fraction
# )
included_units_during_trial = metered_feature.included_units_during_trial
if included_units_during_trial is None:
return 0, consumed_units
if consumed_units <= included_units_during_trial:
return 0, consumed_units
return consumed_units - included_units_during_trial, included_units_during_trial
def _get_extra_consumed_units_during_trial(self, metered_feature, start_date, end_date,
consumed_units, bonuses=None):
"""
:returns: (extra_consumed, free_units)
extra_consumed - units consumed extra during trial that will be
billed
free_units - the units included in trial
"""
if self.is_billed_first_time:
# It's on trial and is billed first time
return self._get_consumed_units_from_total_included_in_trial(
metered_feature, start_date, end_date, consumed_units, bonuses=bonuses
)
else:
# It's still on trial but has been billed before
# The following part tries to handle the case when the trial
# spans over 2 months and the subscription has been already billed
# once => this month it is still on trial but it only
# has remaining = consumed_last_cycle - included_during_trial
last_log_entry = self.billing_logs.all()[0]
if last_log_entry.invoice:
qs = last_log_entry.invoice.invoice_entries.filter(
product_code=metered_feature.product_code)
elif last_log_entry.proforma:
qs = last_log_entry.proforma.proforma_entries.filter(
product_code=metered_feature.product_code)
else:
qs = DocumentEntry.objects.none()
if not qs.exists():
return self._get_consumed_units_from_total_included_in_trial(
metered_feature, start_date, end_date, consumed_units, bonuses=bonuses
)
consumed = [qs_item.quantity
for qs_item in qs if qs_item.unit_price >= 0]
consumed_in_last_billing_cycle = sum(consumed)
included_during_trial = metered_feature.included_units_during_trial or Decimal(0)
if consumed_in_last_billing_cycle > included_during_trial:
return consumed_units, 0
remaining = included_during_trial - consumed_in_last_billing_cycle
if consumed_units > remaining:
return consumed_units - remaining, remaining
return 0, consumed_units
def _add_mfs_for_trial(self, start_date, end_date, invoice=None, proforma=None, bonuses=None):
start_datetime = datetime.combine(
start_date,
datetime.min.time(),
tzinfo=timezone.utc,
).replace(microsecond=0)
end_datetime = datetime.combine(
end_date,
datetime.max.time(),
tzinfo=timezone.utc,
).replace(microsecond=0)
prorated, fraction = self._get_proration_status_and_fraction(start_date,
end_date,
OriginType.MeteredFeature)
context = self._build_entry_context({
'product_code': self.plan.product_code,
'start_date': start_date,
'end_date': end_date,
'prorated': prorated,
'proration_percentage': quantize_fraction(fraction),
'bonuses': bonuses,
'context': 'metered-feature-trial'
})
total = Decimal("0.00")
# Add all the metered features consumed during the trial period
for metered_feature in self.plan.metered_features.all():
context.update({'metered_feature': metered_feature,
'unit': metered_feature.unit,
'name': metered_feature.name,
'product_code': metered_feature.product_code})
unit = self._entry_unit(context)
qs = self.mf_log_entries.filter(metered_feature=metered_feature,
start_datetime__gte=start_datetime,
end_datetime__lte=end_datetime)
log = [qs_item.consumed_units for qs_item in qs]
total_consumed_units = sum(log)
mf_bonuses = [bonus for bonus in bonuses if bonus.applies_to_metered_feature(metered_feature)]
extra_consumed, free = self._get_extra_consumed_units_during_trial(
metered_feature, start_date, end_date, total_consumed_units, bonuses=mf_bonuses
)
if extra_consumed > 0:
charged_units = extra_consumed
free_units = free
else:
free_units = total_consumed_units
charged_units = 0
if free_units > 0:
description = self._entry_description(context)
# Positive value for the consumed items.
DocumentEntry.objects.create(
invoice=invoice, proforma=proforma, description=description,
unit=unit, quantity=free_units,
unit_price=metered_feature.price_per_unit,
product_code=metered_feature.product_code,
start_date=start_date, end_date=end_date,
prorated=prorated
)
context.update({
'context': 'metered-feature-trial-discount'
})
description = self._entry_description(context)
# Negative value for the consumed items.
DocumentEntry.objects.create(
invoice=invoice, proforma=proforma, description=description,
unit=unit, quantity=free_units,
unit_price=-metered_feature.price_per_unit,
product_code=metered_feature.product_code,
start_date=start_date, end_date=end_date,
prorated=prorated
)
# Extra items consumed items that are not included
if charged_units > 0:
context.update({
'context': 'metered-feature-trial-not-discounted'
})
description_template_path = field_template_path(
field='entry_description',
provider=self.plan.provider.slug)
description = render_to_string(
description_template_path, context
)
total += DocumentEntry.objects.create(
invoice=invoice, proforma=proforma,
description=description, unit=unit,
quantity=charged_units, prorated=prorated,
unit_price=metered_feature.price_per_unit,
product_code=metered_feature.product_code,
start_date=start_date, end_date=end_date
).total
return total
def _add_plan_entries(self, start_date, end_date, invoice=None, proforma=None) \
-> Tuple[Decimal, List['silver.models.DocumentEntry']]:
"""
Adds to the document the cost of the plan.
:returns: A tuple consisting of:
- The plan cost after proration and PER ENTRY discounts have been applied.
- A list of entries that have been added to the documents. The first one is the (prorated)
cost of the plan, followed by PER ENTRY discount entries if applicable. It is possible
that PER DOCUMENT or PER ENTRY TYPE discount entries to be created later down the
document generation process.
"""
prorated, fraction = self._get_proration_status_and_fraction(start_date,
end_date,
OriginType.Plan)
plan_price = quantize_fraction(Fraction(str(self.plan.amount)) * fraction)
base_context = {
'name': self.plan.name,
'unit': self.plan.base_interval,
'product_code': self.plan.product_code,
'start_date': start_date,
'end_date': end_date,
'prorated': prorated,
'proration_percentage': quantize_fraction(fraction),
}
plan_context = base_context.copy()
plan_context.update({
'context': 'plan'
})
context = self._build_entry_context(plan_context)
description = self._entry_description(context)
unit = self._entry_unit(context)
entries = [
DocumentEntry.objects.create(
invoice=invoice, proforma=proforma, description=description,
unit=unit, unit_price=plan_price, quantity=Decimal('1.00'),
product_code=self.plan.product_code, prorated=prorated,
start_date=start_date, end_date=end_date
)
]
return entries[0].total_before_tax, entries
def _included_units_from_bonuses(
self, metered_feature, start_date, end_date, extra_proration_fraction: Fraction, bonuses: List
):
included_units = extra_proration_fraction * Fraction(metered_feature.included_units or Decimal(0))
return sum(
[
(
Fraction(str(bonus.amount)) if bonus.amount else
Fraction(str(bonus.amount_percentage)) / 100 * included_units
) * bonus.extra_proration_fraction(self, start_date, end_date, OriginType.MeteredFeature)[0]
for bonus in bonuses
]
)
def _get_extra_consumed_units(self, metered_feature, extra_proration_fraction: Fraction,
start_datetime, end_datetime, bonuses=None) -> OverageInfo:
included_units = extra_proration_fraction * Fraction(metered_feature.included_units or Decimal(0))
log_entries = self.mf_log_entries.filter(
metered_feature=metered_feature,
start_datetime__gte=start_datetime,
end_datetime__lte=end_datetime
)
consumed_units = [entry.consumed_units for entry in log_entries]
total_consumed_units = reduce(lambda x, y: x + y, consumed_units, 0)
annotations = list({log_entry.annotation for log_entry in log_entries})
start_date = start_datetime.date()
end_date = end_datetime.date()
if bonuses:
bonuses = [bonus for bonus in bonuses if bonus.matches_metered_feature_units(metered_feature, annotations)]
applied_directly_bonuses = [
bonus for bonus in bonuses
if bonus.document_entry_behavior == bonus.ENTRY_BEHAVIOR.APPLY_DIRECTLY_TO_TARGET_ENTRIES
]
applied_separately_bonuses = [
bonus for bonus in bonuses
if bonus.document_entry_behavior == bonus.ENTRY_BEHAVIOR.APPLY_AS_SEPARATE_ENTRY_PER_ENTRY
]
included_units += self._included_units_from_bonuses(
metered_feature, start_date, end_date, extra_proration_fraction, applied_directly_bonuses
)
included_units = quantize_fraction(included_units)
extra_consumed_units = max(total_consumed_units - included_units, Decimal(0))
return OverageInfo(
extra_consumed_units, annotations, applied_directly_bonuses, applied_separately_bonuses
)
def _add_mfs_entries(self, start_date, end_date, invoice=None, proforma=None, bonuses=None) \
-> Tuple[Decimal, List['silver.models.DocumentEntry']]:
start_datetime = datetime.combine(
start_date,
datetime.min.time(),
tzinfo=timezone.utc,
).replace(microsecond=0)
end_datetime = datetime.combine(
end_date,
datetime.max.time(),
tzinfo=timezone.utc,
).replace(microsecond=0)
prorated, fraction = self._get_proration_status_and_fraction(start_date, end_date, OriginType.MeteredFeature)
base_context = self._build_entry_context({
'start_date': start_date,
'end_date': end_date,
'prorated': prorated,
'proration_percentage': quantize_fraction(fraction),
'context': 'metered-feature'
})
mfs_total = Decimal('0.00')
entries = []
for metered_feature in self.plan.metered_features.all():
overage_info = self._get_extra_consumed_units(
metered_feature, fraction, start_datetime, end_datetime, bonuses=bonuses
)
extra_consumed_units = overage_info.extra_consumed_units
entry_context = base_context.copy()
entry_context.update({
'metered_feature': metered_feature,
'unit': metered_feature.unit,
'name': metered_feature.name,
'product_code': metered_feature.product_code,
'annotations': overage_info.annotations,
'directly_applied_bonuses': overage_info.directly_applied_bonuses,
})
description = self._entry_description(entry_context)
unit = self._entry_unit(entry_context)
entry = DocumentEntry.objects.create(
invoice=invoice, proforma=proforma,
description=description, unit=unit,
quantity=overage_info.extra_consumed_units, prorated=prorated,
unit_price=metered_feature.price_per_unit,
product_code=metered_feature.product_code,
start_date=start_date, end_date=end_date
)
entries.append(entry)
for separate_bonus in overage_info.separately_applied_bonuses:
if extra_consumed_units <= 0:
break
bonus_included_units = quantize_fraction(
self._included_units_from_bonuses(
metered_feature, start_date, end_date,
extra_proration_fraction=fraction, bonuses=[separate_bonus]
)
)
if not bonus_included_units:
continue
bonus_consumed_units = min(bonus_included_units, extra_consumed_units)
extra_consumed_units -= bonus_consumed_units
bonus_entry_context = base_context.copy()
bonus_entry_context.update({
'metered_feature': metered_feature,
'unit': metered_feature.unit,
'name': metered_feature.name,
'product_code': metered_feature.product_code,
'annotations': overage_info.annotations,
'directly_applied_bonuses': overage_info.directly_applied_bonuses,
'context': 'metered-feature-bonus'
})
description = self._entry_description(bonus_entry_context)
bonus_entry = DocumentEntry.objects.create(
invoice=invoice, proforma=proforma,
description=description, unit=unit,
quantity=bonus_consumed_units, prorated=prorated,
unit_price=-metered_feature.price_per_unit,
product_code=separate_bonus.product_code,
start_date=start_date, end_date=end_date
)
entries.append(bonus_entry)
mfs_total += bonus_entry.total_before_tax
mfs_total += entry.total_before_tax
return mfs_total, entries
def _get_proration_status_and_fraction(self, start_date, end_date, entry_type: OriginType) -> Tuple[bool, Fraction]:
"""
Returns the proration percent (how much of the interval will be billed)
and the status (if the subscription is prorated or not).
If start_date and end_date are not from the same billing cycle, you are entering
undefined behaviour territory.
:returns: a tuple containing (status, Decimal(percent)) where status
can be one of [True, False]. The Decimal will have values in the
[0.00, 1.00] range.
:rtype: tuple
"""
interval = self.plan.base_interval if entry_type == OriginType.Plan else self.plan.metered_features_interval
interval_count = (self.plan.base_interval_count if entry_type == OriginType.Plan else
self.plan.metered_features_interval_count)
cycle_start_date = self._cycle_start_date(
ignore_trial=True,
reference_date=start_date,
origin_type=entry_type
)
first_day_of_full_interval = first_day_of_interval(cycle_start_date, interval)
last_day_of_full_interval = end_of_interval(
first_day_of_full_interval, interval, interval_count
)
if start_date == first_day_of_full_interval and end_date == last_day_of_full_interval:
return False, Fraction(1, 1)
if interval in (Plan.INTERVALS.DAY, Plan.INTERVALS.WEEK, Plan.INTERVALS.YEAR):
full_interval_days = (last_day_of_full_interval - first_day_of_full_interval).days + 1
billing_cycle_days = (end_date - start_date).days + 1
return (
True,
Fraction(billing_cycle_days, full_interval_days)
)
elif interval == Plan.INTERVALS.MONTH:
billing_cycle_months = monthdiff_as_fraction(end_date + ONE_DAY, start_date)
full_interval_months = monthdiff_as_fraction(last_day_of_full_interval + ONE_DAY,
first_day_of_full_interval)
return True, Fraction(billing_cycle_months, full_interval_months)
def _entry_unit(self, context):
unit_template_path = field_template_path(
field='entry_unit', provider=self.plan.provider.slug)
return render_to_string(unit_template_path, context)
def _entry_description(self, context):
description_template_path = field_template_path(
field='entry_description', provider=self.plan.provider.slug
)
return render_to_string(description_template_path, context)
@property
def _base_entry_context(self):
return {
'name': None,
'unit': 1,
'subscription': self,
'plan': self.plan,
'provider': self.plan.provider,
'customer': self.customer,
'product_code': None,
'start_date': None,
'end_date': None,
'prorated': None,
'proration_percentage': None,
'metered_feature': None,
'context': None
}
def _build_entry_context(self, context):
base_context = self._base_entry_context
base_context.update(context)
return base_context
def __str__(self):
return u'%s (%s)' % (self.customer, self.plan.name)
class BillingLog(models.Model):
subscription = models.ForeignKey('Subscription', on_delete=models.CASCADE,
related_name='billing_logs')
invoice = models.ForeignKey('BillingDocumentBase', null=True, blank=True,
on_delete=models.SET_NULL, related_name='invoice_billing_logs')
proforma = models.ForeignKey('BillingDocumentBase', null=True, blank=True,
on_delete=models.SET_NULL, related_name='proforma_billing_logs')
billing_date = models.DateField(
help_text="The date when the invoice/proforma was generated."
)
plan_billed_up_to = models.DateField(
help_text="The date up to which the plan base amount has been billed."
)
metered_features_billed_up_to = models.DateField(
help_text="The date up to which the metered features have been billed."
)
total = models.DecimalField(
decimal_places=2, max_digits=12,
null=True, blank=True
)
plan_amount = models.DecimalField(
decimal_places=2, max_digits=12,
null=True, blank=True
)
metered_features_amount = models.DecimalField(
decimal_places=2, max_digits=12,
null=True, blank=True
)
created_at = models.DateTimeField(auto_now_add=timezone.now)
class Meta:
ordering = ['-billing_date']
def __str__(self):
return u'{sub} - {pro} - {inv} - {date}'.format(
sub=self.subscription, pro=self.proforma,
inv=self.invoice, date=self.billing_date)
@receiver(pre_delete, sender=Customer)
def cancel_billing_documents(sender, instance, **kwargs):
if instance.pk and not kwargs.get('raw', False):
subscriptions = Subscription.objects.filter(
customer=instance, state=Subscription.STATES.ACTIVE
)
for subscription in subscriptions:
try:
subscription.cancel()
subscription.end()
subscription.save()
except TransitionNotAllowed:
logger.error(
'Couldn\'t end subscription on customer delete: %s', {
'subscription': subscription.id,
}
)
pass
| silverapp/silver | silver/models/subscriptions.py | subscriptions.py | py | 56,720 | python | en | code | 292 | github-code | 13 |
16824599754 | """Schema for database name-space."""
from marshmallow import Schema, post_load
from marshmallow.fields import Float, Integer, List, Nested, String
from hyrisecockpit.api.app.database.model import (
AvailableWorkloadTables,
Database,
DetailedDatabase,
WorkloadTables,
)
class DatabaseSchema(Schema):
"""Schema of a Database."""
id = String(
title="Database ID",
description="Used to identify a database.",
required=True,
example="hyrise-1",
)
@post_load
def make_database(self, data, **kwargs):
"""Return database object."""
return Database(**data)
class DetailedDatabaseSchema(Schema):
"""Schema of a detailed Database."""
id = String(
title="Database ID",
description="Used to identify a database.",
required=True,
example="hyrise-1",
)
host = String(
title="Host",
description="Host to log in to.",
required=True,
example="vm.example.com",
)
port = String(
title="Port",
description="Port of the host to log in to.",
required=True,
example="1234",
)
number_workers = Integer(
title="Number of initial database worker processes.",
description="",
required=True,
example=8,
)
dbname = String(
title="",
description="Name of the database to log in to.",
required=True,
example="mydb",
)
user = String(
title="Username",
description="Username used to log in.",
required=True,
example="user123",
)
password = String(
title="Password",
description="Password used to log in.",
required=True,
example="password123",
)
@post_load
def make_detailed_database(self, data, **kwargs):
"""Return detailed database object."""
return DetailedDatabase(**data)
class WorkloadTablesSchema(Schema):
"""Schema of loading workload tables."""
workload_type = String(
title="Workload type",
description="Name of the workload type that includes all needed tables.",
required=True,
example="tpch",
)
scale_factor = Float(
title="Scale factor of tables",
description="Scale factor of tables for workload type.",
required=True,
example=1.0,
)
@post_load
def make_benchmark_tables(self, data, **kwargs):
"""Return available benchmark tables object."""
return WorkloadTables(**data)
class AvailableWorkloadTablesSchema(Schema):
"""Schema of available workload tables."""
workload_tables = List(Nested(WorkloadTablesSchema))
@post_load
def make_available_benchmark_tables(self, data, **kwargs):
"""Return available benchmark tables object."""
return AvailableWorkloadTables(**data)
| hyrise/Cockpit | hyrisecockpit/api/app/database/schema.py | schema.py | py | 2,903 | python | en | code | 14 | github-code | 13 |
74079555856 | from bs4 import BeautifulSoup
import requests
import time
#url = 'http://jimo.baixing.com/ershouqiche/a1184041036.html'
# 获取一页的网页信息
def get_links_from():
urls = []
list_view = 'http://qingdao.baixing.com/ershouqiche/'
wb_data = requests.get(list_view)
soup = BeautifulSoup(wb_data.text,'lxml')
htmls = soup.select('li > div > div.media-body-title > a.ad-title')
for link in htmls:
urls.append(link.get('href').split('?')[0])
print (soup)
def get_item_info():
urls = get_links_from()
for url in urls:
wb_data = requests.get(url)
soup = BeautifulSoup(wb_data.text,'lxml')
data = {
'title':soup.title.text,
# 'data':soup.select('li.time')[0].get_text(), # 时间
'price':soup.select('span.price')[0].get_text(), # 价格
#'quality':soup.select('ul > li:nth-of-type(2) > div.su_con > span')[0].get_text() # 品质
}
print(data)
time.sleep(2) # 延迟时间
get_item_info()
#get_links_from()
| HanChanXiaMing/Crawler | baixinwang.py | baixinwang.py | py | 1,058 | python | en | code | 0 | github-code | 13 |
14766814210 | # preprocess data
from sklearn.svm import SVC
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from keras.models import load_model
from .Scraper import *
class PreProcess:
def __init__(self, url):
self.url = url
def process(self):
scraper = Scraper(self.url)
self.reviews = scraper.get_reviews()
self.data = scraper.get_data()
self.count = scraper.get_count()
self.name = scraper.get_name()
self.is_applicable = scraper.is_applicable() # check is reviews compatiable
def return_vectorized_reviews(self):
# performing vectorizer
vectorizer = CountVectorizer(stop_words=("english"),max_features=100) # to convert the main reviews into a matrix of shape[1]=100
tfvect = TfidfVectorizer(stop_words="english", max_features=100)
vect_r = vectorizer.fit_transform(self.reviews)
return vect_r
| aaditkapoor/Rate-My-Professor-Sentiment-Analysis | rmpsentiment/preprocess.py | preprocess.py | py | 900 | python | en | code | 1 | github-code | 13 |
7165417781 | import os
import argparse
import json
import numpy as np
import pandas as pd
from flask import Flask, render_template, request, redirect, send_file
from flask_talisman import Talisman
from plots.climate import get_PM25_plot, get_NO2_plot, get_PM25_plot_diff, get_NO2_plot_diff
from plots.dark import get_cases_plot, get_deaths_plot
from plots.finance import get_finance_plot
import config
parser = argparse.ArgumentParser()
parser.add_argument("--host")
parser.add_argument("--port")
app = Flask(__name__)
talisman = Talisman(app, content_security_policy=None)
cases_data_update_date = os.environ.get('CASES_DATA_UPDATE_DATE', config.CASES_DATA_UPDATE_DATE)
climate_data_update_date = os.environ.get('CLIMATE_DATA_UPDATE_DATE', config.CLIMATE_DATA_UPDATE_DATE)
# Home page
@app.route('/')
def home():
return render_template("index.html")
@app.route('/contact')
def contact():
return render_template("contact.html")
@app.route('/dark/cases')
def cases_confirmed():
script, div = get_cases_plot()
return render_template("dark.html", script=script, div=div, date=cases_data_update_date, toggle=True)
@app.route('/dark/deaths')
def cases_deaths():
script, div = get_deaths_plot()
return render_template("dark.html", script=script, div=div, date=cases_data_update_date, toggle=False)
# NO2 page
@app.route('/NO2')
def climate_no2():
script, div = get_NO2_plot()
return render_template("climate.html", script=script, div=div, date=climate_data_update_date, toggle=1)
# PM25 page
@app.route('/PM25')
def climate_pm25():
script, div = get_PM25_plot()
return render_template("climate.html", script=script, div=div, date=climate_data_update_date, toggle=0)
# 2019 NO2 page
@app.route('/NO2_19')
def climate_no2_19():
script, div = get_NO2_plot_diff()
return render_template("climate.html", script=script, div=div, date=climate_data_update_date, toggle=3)
# 2019 PM25 page
@app.route('/PM25_19')
def climate_pm25_19():
script, div = get_PM25_plot_diff()
return render_template("climate.html", script=script, div=div, date=climate_data_update_date, toggle=2)
# 2019 NO2 page
@app.route('/finance')
def finance():
script, div = get_finance_plot()
return render_template("finance.html", script=script, div=div, date=climate_data_update_date, toggle=3)
@app.route('/download/<int:idx>')
def downloadFile(idx):
dir_path = os.path.dirname(os.path.realpath(__file__))
data_dir = os.path.join(dir_path, "data")
if idx < 2:
path = os.path.join(data_dir, "aqi_df.csv")
return send_file(path, as_attachment=True)
else:
path = os.path.join(data_dir, "aqi_df_diff.csv")
return send_file(path, as_attachment=True)
if __name__ == '__main__':
args = parser.parse_args()
#app.run(debug=True)
app.run(host=args.host, port=args.port)
| mayukh18/covidexplore | app.py | app.py | py | 2,849 | python | en | code | 5 | github-code | 13 |
72842986578 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import re
import os
import sys
import configparser
from slackclient import SlackClient
try:
syspath = os.path.abspath(
os.path.join(os.path.dirname(__file__), os.path.pardir))
sys.path.insert(0, syspath)
from settings import LACK_BOT_TOKEN as TOKEN
except Exception:
raise
def TrustUserExist(conf, filepath):
conf.read(filepath)
sections = conf.sections()
if 'trustuser' in sections:
return True
else:
return False
def UserListExist(conf, filepath):
conf.read(filepath)
sections = conf.sections()
if 'userlist' in sections:
return True
else:
return False
pass
def CheckUserExist(conf, filepath, user):
if TrustUserExist(conf, filepath):
conf.read(filepath)
if user in conf.options['trustuser']:
return True
else:
return False
else:
return False
def CheckUserExistAndGetID(conf, filepath, user):
conf.read(filepath)
if user in conf['userlist']:
userid = conf['userlist'][user]
return True, userid
else:
return False, 'user_error_7'
def ValueAuthority(authority):
roles = "^([1-5]){1}$"
if re.match(roles, authority):
return True
else:
return False
class UserCommandConf(object):
"""docstring for UserCommandConf"""
def __init__(self, filename='trust.conf', ):
super(UserCommandConf, self).__init__()
self.dirpath = os.path.dirname(os.path.realpath(__file__))
self.filepath = os.path.join(self.dirpath, filename)
self.conf = configparser.ConfigParser()
readconfig = self.ReadConf()
if readconfig[0] is not True:
return readconfig[1]
def ReadConf(self):
if os.path.isfile(self.filepath):
if (TrustUserExist(self.conf, self.filepath) and
UserListExist(self.conf, self.filepath)):
return True, 'success'
else:
return False, 'user_error_2'
else:
return False, 'user_error_1'
def UserListUpdate(self):
sc = SlackClient(TOKEN)
api_call = sc.api_call("users.list")
if api_call.get('ok'):
userlist = api_call.get('members')
for user in userlist:
if not user['deleted']:
self.conf.set('userlist', user['name'], user.get('id'))
with open(self.filepath, 'w') as configfile:
self.conf.write(configfile)
return True, 'user_success_1'
else:
return False, 'user_error_1'
def AddUser(self, USER, authority):
if not ValueAuthority(authority):
return False, 'user_error_9'
checklist, userID = CheckUserExistAndGetID(
self.conf, self.filepath, USER)
checktrust = CheckUserExist(self.conf, self.filepath, USER)
if (checklist is True and checktrust is not True):
self.conf.set('trustuser', USER, "{},{}".format(userID, authority))
with open(self.filepath, 'w') as configfile:
self.conf.write(configfile)
return True, 'user_success_1'
elif (checklist is True and checktrust is True):
keyvalue = self.conf.get('trusthost', USER).split(',')
if int(keyvalue[1]) == 0:
if self.UnmaskUser(USER, authority)[0] is True:
return True, 'user_success_2'
else:
return False, 'user_error_4'
return False, 'user_error_3'
elif checklist is False:
return False, 'user_error_8'
else:
return False, 'user_error'
def RemoveUser(self, USER):
checklist, userID = CheckUserExistAndGetID(
self.conf, self.filepath, USER)
checktrust = CheckUserExist(self.conf, self.filepath, USER)
if (checklist is True and checktrust is True):
self.conf.remove_option('trustuser', USER)
with open(self.filepath, 'w') as configfile:
self.conf.write(configfile)
return True, 'host_success_3'
elif (checklist is True and checktrust is not True):
return True, 'host_success_3'
elif checklist is False:
userfile = self.conf.read(self.filepath)
userlist = userfile.options('userlist')
usertrust = userfile.options('trustuser')
for user in usertrust:
if user not in userlist:
self.conf.remove_option('trustuser', user)
with open(self.filepath, 'w') as configfile:
self.conf.write(configfile)
return True, 'user_success_5'
else:
return False, 'user_error'
def ChageUser(self, USER, authority):
if not ValueAuthority(authority):
return False, 'user_error_9'
checklist, userID = CheckUserExistAndGetID(
self.conf, self.filepath, USER)
checktrust = CheckUserExist(self.conf, self.filepath, USER)
if checklist is True and checktrust is True:
self.conf.set('trustuser', USER, "{},{}".format(userID, authority))
with open(self.filepath, 'w') as configfile:
self.conf.write(configfile)
return True, 'user_success_7'
elif checklist is True and checktrust is not True:
return False, 'user_error_5'
elif checklist is False:
return False, 'user_error_7'
else:
return False, 'user_error'
def MaskUser(self, USER):
checklist, userID = CheckUserExistAndGetID(
self.conf, self.filepath, USER)
checktrust = CheckUserExist(self.conf, self.filepath, USER)
if (checklist is True and checktrust is True):
self.conf.set('trustuser', USER, "{},0".format(userID))
with open(self.filepath, 'w') as configfile:
self.conf.write(configfile)
return True, 'user_success_2'
else:
return False, 'host_error_4'
def UnmaskUser(self, USER, authority=5):
if not ValueAuthority(authority):
return False, 'user_error_9'
checklist, userID = CheckUserExistAndGetID(
self.conf, self.filepath, USER)
checktrust = CheckUserExist(self.conf, self.filepath, USER)
if (checklist is True and checktrust is True):
self.conf.set('trustuser', USER, "{},{}".format(userID, authority))
with open(self.filepath, 'w') as configfile:
self.conf.write(configfile)
return True, 'host_success_2'
else:
return False, 'host_error_4'
def CheckUser(self, USER):
checklist, userID = CheckUserExistAndGetID(
self.conf, self.filepath, USER)
checktrust = CheckUserExist(self.conf, self.filepath, USER)
if checklist is True and checktrust is True:
keyvalue = self.conf.get('trustuser', USER).split(',')
return True, "{}=> userID:{} authority:{}".format(
USER, keyvalue[0], keyvalue[1])
else:
return False, 'host_error_4'
def ListAllUser(self):
for item in self.conf.items('trusthost'):
keyvalue = item[1].split(',')
print("User: {}, UserID: {}, Authority: {}".format(
item[0], keyvalue[0], keyvalue[1]))
def UserCommand(command):
commandList = command.split(' ')
userconfig = UserCommandConf()
if commandList[1] is 'add':
userconfig.UserListUpdate()
return userconfig.AddUser(commandList[2], commandList[3])
elif commandList[1] is 'remove':
return userconfig.RemoveUser(commandList[2])
elif commandList[1] is 'update':
return userconfig.ChageUser(commandList[2], commandList[3])
elif commandList[1] is 'mask':
return userconfig.MaskUser(commandList[2])
elif commandList[1] is 'unmask':
return userconfig.UnmaskUser(commandList[2], commandList[3])
else:
return userconfig.ListAllHost()
# Test class
def main():
filename = 'test_trust.conf'
USER = '140.115.31.245'
hostconfig = UserCommandConf(filename)
print(hostconfig.AddUser(USER, 1))
print(hostconfig.ListAllUser())
print(hostconfig.RemoveUser(USER))
print(hostconfig.AddUser(USER, 1))
print(hostconfig.MaskUser(USER))
print(hostconfig.CheckUser(USER))
print(hostconfig.ListAllUser())
print(hostconfig.UnmaskUser(USER, 1))
print(hostconfig.CheckUser(USER))
print(hostconfig.ListAllUser())
if __name__ == '__main__':
main()
| gra230434/slackbot_smartone | Functions/trustuser.py | trustuser.py | py | 8,698 | python | en | code | 0 | github-code | 13 |
37482029634 | '''
기준 값을 몇으로 둘것이냐 ?
--> 배열의 최소값과 최대값사이에 모든 값들(X)
--> 0부터 최대값 사이에 모든 값들(O)
'''
import sys
sys.setrecursionlimit(100000)
rainList = [] # 기준 장마 리스트
resultList = [] # 각 영역마다 리스트
maxs = 0 # 장마 리스트 안의 최대값
N = int(input())
for i in range(N):
val = list(map(int,input().split()))
if max(val) > maxs:
maxs = max(val)
rainList.append(val)
def dfs(x,y,mins,visited):
if x <= -1 or x >= N or y >= N or y <= -1:
return False
if rainList[x][y] > mins and visited[x][y] == True:
visited[x][y] = False
dfs(x+1,y,mins,visited)
dfs(x-1,y,mins,visited)
dfs(x,y+1,mins,visited)
dfs(x,y-1,mins,visited)
return True
return False
visited = []
for _ in range(N): # 방문 리스트 초기화
val = [0] * N
visited.append(val)
small = 1 # 1부터 max까지 모든 경우의 수를 구함
while(small < maxs):
cnt = 0
for i in range(N): # 방문 리스트 작성
for j in range(N):
if rainList[i][j] > small:
visited[i][j] = True
else:
visited[i][j] = False
for i in range(N):
for j in range(N):
if rainList[i][j] > small:
if dfs(i,j,small,visited) == True:
cnt += 1
resultList.append(cnt)
small += 1
if resultList:
print(max(resultList))
else:
print(1)
| Choi-Seong-Hyeok/Algorithm | DFS/안전영역(rt).py | 안전영역(rt).py | py | 1,560 | python | ko | code | 0 | github-code | 13 |
71497060499 | # 언어 : Python
# 날짜 : 2021.09.21
# 문제 : KOREATECH JUDGE > 쉬운 수학, 어려운 프로그래밍 (2020년도 F번 문제)
# 풀이 : 이분 탐색으로 풀어야지 풀리는 문제..
# ========================================================================
import math
import sys
def solution():
available = []
num = 1
low, high = 0, math.pow(2, 32) - 1
while low <= high:
mid = (low + high) // 2
n1 = mid // 3
n2 = (mid // K) * 3
np = n1 + n2
if np == P:
available.append(mid)
high -= 1
elif np < P:
low = mid + 1
else:
high = mid - 1
if not available:
return -1
return int(sorted(available)[0])
for _ in range(int(input())):
P, K = map(int, input().split())
print(solution())
| eunseo-kim/Algorithm | Koreatech Judge/F_이 회사의 순이익이 궁금해.py | F_이 회사의 순이익이 궁금해.py | py | 846 | python | ko | code | 1 | github-code | 13 |
33436027531 | import cv2 as cv
import numpy as np
from tkinter import *
from PIL import ImageTk, Image
from copy import deepcopy
class MainSolution():
def __init__(self):
self.image = cv.imread("Adams_The_Tetons_and_the_Snake_River.jpg")
self.imgray = None
self.trsh1 = None
self.trsh2 = None
def original(self):
img = Image.fromarray(self.image)
img = img.resize((300, 300))
return ImageTk.PhotoImage(img)
def vincent(self):
img = cv.ximgproc.thinning(self.trsh2)
img = Image.fromarray(img)
img = img.resize((300, 300))
return ImageTk.PhotoImage(img)
def filt(self):
self.imgray = cv.cvtColor(cv.pyrMeanShiftFiltering(
self.image, 15, 50), cv.COLOR_BGR2GRAY)
img = Image.fromarray(self.imgray)
img = img.resize((300, 300))
return ImageTk.PhotoImage(img)
def local_threshold(self):
ret, thresh1 = cv.threshold(self.imgray, 0, 255, cv.THRESH_BINARY_INV | cv.THRESH_OTSU)
self.trsh1 = deepcopy(thresh1)
img = Image.fromarray(thresh1)
img = img.resize((300, 300))
return ImageTk.PhotoImage(img)
def adaptive_threshold(self):
thresh2 = cv.adaptiveThreshold(self.imgray, 255, cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY, 11, 2)
self.trsh2 = deepcopy(thresh2)
img = Image.fromarray(thresh2)
img = img.resize((300, 300))
return ImageTk.PhotoImage(img)
def niblack(self):
img = cv.ximgproc.niBlackThreshold(self.imgray, 255, cv.THRESH_BINARY, 11, 0.2)
img = Image.fromarray(img)
img = img.resize((300, 300))
return ImageTk.PhotoImage(img)
def segmentation(self):
img = cv.bitwise_and(self.image, self.image, mask=self.trsh2)
img = Image.fromarray(img)
img = img.resize((300, 300))
return ImageTk.PhotoImage(img)
def bilateral_filter(self):
img = cv.bilateralFilter(self.image, 9, 75, 75)
img = Image.fromarray(img)
img = img.resize((300, 300))
return ImageTk.PhotoImage(img)
def edge_detection(self):
img = cv.Canny(self.image, 100, 200)
img = Image.fromarray(img)
img = img.resize((300, 300))
return ImageTk.PhotoImage(img)
def canny(self):
img = cv.Canny(self.trsh2, 100, 200)
img = Image.fromarray(img)
img = img.resize((300, 300))
return ImageTk.PhotoImage(img)
def euclidean_distance(self):
img = cv.distanceTransform(self.trsh1, cv.DIST_L2, 3)
img = Image.fromarray(img)
img = img.resize((300, 300))
return ImageTk.PhotoImage(img)
def shi_tomasi(self):
img = cv.goodFeaturesToTrack(self.trsh2, 25, 0.01, 10)
img = np.int0(img)
for i in img:
x, y = i.ravel()
cv.circle(self.trsh2, (x, y), 3, 255, -1)
img = Image.fromarray(self.trsh2)
img = img.resize((300, 300))
return ImageTk.PhotoImage(img)
class MainApp():
def __init__(self, master):
self.master = master
self.master.title("Image Processing")
self.master.geometry("715x740")
self.master.resizable(False, False)
self.master.configure(background="darkgrey")
self.solution = MainSolution()
self.frame = Frame(self.master, bg="darkgrey")
self.frame.pack()
self.label = Label(self.frame, text="Image Processing", font=("Bahnschrift Condensed", 22), bg="darkgrey")
self.label.grid(row=0, column=0, columnspan=2, padx=273, pady=20)
self.button1 = Button(self.frame, text="Original Image", font=("Bahnschrift Condensed", 14), bg="snow", width=15, command=self.original_image)
self.button1.grid(row=4, column=0,columnspan=3, pady=5)
self.button2 = Button(self.frame, text="Mean Shift Filter", font=("Bahnschrift Condensed", 14), bg="snow",width=15, command=self.filt)
self.button2.grid(row=3, column=0,columnspan=3, pady=5)
self.button3 = Button(self.frame, text="Local Threshold", font=("Bahnschrift Condensed", 14), bg="snow",width=15, command=self.local_threshold)
self.button3.grid(row=2, column=0, pady=5)
self.button4 = Button(self.frame, text="Adaptive Threshold", font=("Bahnschrift Condensed", 14), bg="snow",width=15, command=self.adaptive_threshold)
self.button4.grid(row=2, column=1, pady=5)
self.button5 = Button(self.frame, text="Segmentation", font=("Bahnschrift Condensed", 14), bg="snow", width=15, command=self.segmentation)
self.button5.grid(row=3, column=0, pady=5)
self.button6 = Button(self.frame, text="Bilateral Filter", font=("Bahnschrift Condensed", 14), bg="snow",width=15, command=self.bilateral_filter)
self.button6.grid(row=3, column=1, pady=5)
self.button7 = Button(self.frame, text="Edge Detection", font=("Bahnschrift Condensed", 14), bg="snow",width=15, command=self.edge_detection)
self.button7.grid(row=4, column=0, pady=5)
self.button8 = Button(self.frame, text="Euclidean Distance", font=("Bahnschrift Condensed", 14), bg="snow",width=15, command=self.euclidean_distance)
self.button8.grid(row=4, column=1, pady=5)
self.button9 = Button(self.frame, text="Niblack", font=("Bahnschrift Condensed", 14), bg="snow",width=15, command=self.niblack)
self.button9.grid(row=5, column=0, pady=5)
self.button10 = Button(self.frame, text="Close", font=("Bahnschrift Condensed", 14), bg="rosybrown",width=20, command=self.exit)
self.button10.grid(row=9, column=0,columnspan=3, pady=0)
self.label1 = Label(self.frame, text="Original Image", font=("Bahnschrift Condensed", 14), bg="darkgrey")
self.label1.grid(row=7, column=0, pady=5)
self.label2 = Label(self.frame, text="Processed Image", font=("Bahnschrift Condensed", 14), bg="darkgrey")
self.label2.grid(row=7, column=1, padx=0, pady=5)
self.button23 = Button(self.frame, text="Shi-Tomasi", font=("Bahnschrift Condensed", 14),width=15, bg="snow",
command=self.shi_tomasi)
self.button23.grid(row=5, column=1, pady=5)
self.button20 = Button(self.frame, text="Vincent", font=("Bahnschrift Condensed", 14), bg="snow", width=15,
command=self.vincent)
self.button20.grid(row=6, column=0, pady=5)
self.button21 = Button(self.frame, text="Canny", font=("Bahnschrift Condensed", 14), bg="snow", width=15,
command=self.canny)
self.button21.grid(row=6, column=1, pady=5)
self.image1 = self.solution.original()
self.label3 = Label(self.frame, image=self.image1, bg="darkgrey")
self.label3.grid(row=8, column=0, pady=10)
self.image2 = self.solution.original()
self.label4 = Label(self.frame, image=self.image2, bg="darkgrey")
self.label4.grid(row=8, column=1, pady=10)
def shi_tomasi(self):
self.image2 = self.solution.shi_tomasi()
self.label4.configure(image=self.image2)
self.label4.image = self.image2
def canny(self):
self.image2 = self.solution.canny()
self.label4.configure(image=self.image2)
self.label4.image = self.image2
def vincent(self):
self.image2 = self.solution.vincent()
self.label4.configure(image=self.image2)
self.label4.image = self.image2
def original_image(self):
self.image1 = self.solution.original()
self.label3.configure(image=self.image1)
self.label3.image = self.image1
def filt(self):
self.image2 = self.solution.filt()
self.label4.configure(image=self.image2)
self.label4.image = self.image2
def local_threshold(self):
self.image1 = self.solution.local_threshold()
self.label4.configure(image=self.image1)
self.label4.image = self.image1
def adaptive_threshold(self):
self.image2 = self.solution.adaptive_threshold()
self.label4.configure(image=self.image2)
self.label4.image = self.image2
def segmentation(self):
self.image2 = self.solution.segmentation()
self.label4.configure(image=self.image2)
self.label4.image = self.image2
def bilateral_filter(self):
self.image2 = self.solution.bilateral_filter()
self.label4.configure(image=self.image2)
self.label4.image = self.image2
def furie(self):
self.image2 = self.solution.furie()
self.label4.configure(image=self.image2)
self.label4.image = self.image2
def edge_detection(self):
self.image2 = self.solution.edge_detection()
self.label4.configure(image=self.image2)
self.label4.image = self.image2
def euclidean_distance(self):
self.image2 = self.solution.euclidean_distance()
self.label4.configure(image=self.image2)
self.label4.image = self.image2
def niblack(self):
self.image2 = self.solution.niblack()
self.label4.configure(image=self.image2)
self.label4.image = self.image2
def exit(self):
self.master.destroy()
if __name__ == "__main__":
root = Tk()
root.title("Image Processing")
root.geometry("1000x700")
root.resizable(False, False)
app = MainApp(root)
root.mainloop() | NikitaMantush/PCG_2022 | Laba 3/Code/Laba3.py | Laba3.py | py | 9,407 | python | en | code | 0 | github-code | 13 |
14386187385 | #
# @lc app=leetcode.cn id=102 lang=python3
#
# [102] 二叉树的层序遍历
#
from typing import List
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
# @lc code=start
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def levelOrder(self, root: TreeNode) -> List[List[int]]:
res = []
def dfs(root, level):
if not root:
return
if level > len(res) - 1:
res.append([])
res[level].append(root.val)
if root.left:
dfs(root.left, level+1)
if root.right:
dfs(root.right, level+1)
dfs(root, 0)
return res
# @lc code=end
root = TreeNode(
3, TreeNode(9), TreeNode(20, TreeNode(15), TreeNode(7))
)
r = Solution().levelOrder(root)
print(r)
| largomst/leetcode-problem-solution | 102.二叉树的层序遍历.2.py | 102.二叉树的层序遍历.2.py | py | 1,060 | python | en | code | 0 | github-code | 13 |
21660436864 | import jax
import jax.numpy as jnp
import functools
import psutil
import diffrax as dfx
import sys, gc
def value_and_jacfwd(f, x):
"""Create a function that evaluates both fun and its foward-mode jacobian.
Only works on ndarrays, not pytrees.
Source: https://github.com/google/jax/pull/762#issuecomment-1002267121
"""
pushfwd = functools.partial(jax.jvp, f, (x,))
basis = jnp.eye(x.size, dtype=x.dtype)
y, jac = jax.vmap(pushfwd, out_axes=(None, 1))((basis,))
return y, jac
def value_and_jacrev(f, x):
"""Create a function that evaluates both fun and its reverse-mode jacobian.
Only works on ndarrays, not pytrees.
Source: https://github.com/google/jax/pull/762#issuecomment-1002267121
"""
y, pullback = jax.vjp(f, x)
basis = jnp.eye(y.size, dtype=y.dtype)
jac = jax.vmap(pullback)(basis)
return y, jac
def clear_caches():
"""Clear all kind of jax and diffrax caches."""
# see https://github.com/patrick-kidger/diffrax/issues/142
process = psutil.Process()
if process.memory_info().rss > 4 * 2**30: # >4GB memory usage
for module_name, module in sys.modules.items():
if module_name.startswith("jax"):
for obj_name in dir(module):
obj = getattr(module, obj_name)
if hasattr(obj, "cache_clear"):
obj.cache_clear()
dfx.diffeqsolve._cached.clear_cache()
gc.collect()
print("Cache cleared")
| fhchl/quant-comp-ls-mod-ica22 | src/jaxutil.py | jaxutil.py | py | 1,395 | python | en | code | 7 | github-code | 13 |
3995532547 | # Aumentos Múltiplos
sal = float(input('Digite o Valor do seu Salário: '))
if sal >= 1250.0:
aum10 = sal + (sal * 0.10)
#aum10 = sal + (sal * 10 / 100)
print(f'Seu Salário de R${sal} receberá um aumento de 10%, você receberá {aum10} ')
else:
aum15 = sal + (sal * 0.15)
#aum15 = sal + (sal * 15 / 100)
print(f'Seu Salário de R${sal} receberá um aumento de 15%, você receberá {aum15} ') | PatrickPortes/ProjetoAulasPython | cursoemvideo/mundo1/exercicios/ex034.py | ex034.py | py | 420 | python | pt | code | 0 | github-code | 13 |
26948285633 | import pyrebase
import json
class DBhandler:
def __init__(self):
with open('flask-server/authentication/firebase_auth.json') as f:
config = json.load(f)
firebase = pyrebase.initialize_app(config)
self.db = firebase.database()
firebaseEmailAuth = firebase.auth()
#회원가입
def insert_user(self, data, pwd):
user_info = {
"nickname" : data['nickname'],
"email" : data['email'],
"pwd" : pwd
}
# if self.user_duplicate_check(str(data['email'])):
self.db.child("user").push(user_info)
print(data)
return True
# else:
# return False
def user_duplicate_check(self, id_string):
users = self.db.child("user").get()
print("users###",users.val())
if str(users.val()) == "None": # first registration
return True
else:
for res in users.each():
value = res.val()
if value['email'] == id_string:
return False
return True
#로그인 부분
def find_user(self, email_, pwd_):
users = self.db.child("user").get()
target_value=[]
for res in users.each():
value = res.val()
if value['email'] == email_ and value['pwd'] == pwd_:
return True
return False
#평균 평점 계산하고 push해주는 함수
def AverageScore(self,storename):
count = 0
sum = 0
users=self.db.child("REVIEW").child(storename).get().val()
if users:
for user in users:
score=self.db.child("REVIEW").child(storename).child(user).get("storescore")
sum =sum +score
count =count+1
avg=sum/count
self.db.child("STORE").child(storename).push({"avg_score" ,avg})
# self.db.child("STORE").child(storename).ref('avg_score').push(avg)
return avg
################ 맛집 정보 입력 관련 함수 ################
# 맛집 정보 등록
def insert_store(self,name,data,img_path):
store_info ={
"storename" : data['storename'],
"location" : data['location'],
"phonenumber" : data['phonenumber'],
"time1" : data['time1'],
"time2" : data['time2'],
"food" : data['food'],
"park" : data['park'],
"price1" : data['price1'],
"price2" : data['price2'],
"site" : data['site'],
"img_path" : img_path,
# "avg_score": "",
}
if img_path == "" :
store_info["img_path"]="symbol-mark.png"
if self.store_duplicate_check(name):
self.db.child("STORE").push(store_info)
return True
else:
return False
# 맛집 정보 중복 체크
def store_duplicate_check(self, name):
stores = self.db.child("STORE").get()
for res in stores.each():
value=res.val()
if value['storename']==name:
# if res.key() == name:
return False
return True
# 전체 맛집 데이터 가져오기
def get_store(self):
stores = self.db.child("STORE").get().val()
return stores
def get_store_info(self,storename):
storeInfo = self.db.child("STORE").child(storename).get().val
return storeInfo
# 특정 맛집 데이터 가져오기
def get_store_byname(self, storename):
stores = self.db.child("STORE").get()
target_value=""
for res in stores.each():
value = res.val()
if value['storename'] == storename:
target_value=value
break
return target_value
# 맛집 정보 업데이트
def update_store(self,storename,data,img_path):
store_info ={
#"name" : name,
#"storename" : data['storename'],
"location" : data['location'],
"phonenumber" : data['phonenumber'],
"time1" : data['time1'],
"time2" : data['time2'],
"food" : data['food'],
"park" : data['park'],
"price1" : data['price1'],
"price2" : data['price2'],
"site" : data['site'],
# "img_path" : img_path
}
if img_path :
store_info["img_path"]=img_path
stores = self.db.child("STORE").get()
for res in stores.each():
value = res.val()
if value['storename'] == storename:
self.db.child("STORE").child(res.key()).update(store_info)
return True
#self.db.child("STORE").child(name).
# if self.store_duplicate_check(name):
# self.db.child("STORE").update(store_info)
# #print(data,img_path)
# return True
# else:
# self.insert_store(self,name,data,img_path)
# self.db.child("STORE").child(name).remove()
# return True
################ 메뉴 정보 입력 관련 함수 ################
def insert_menu(self,storename,data,img_path):
menu_info ={
"food" : data['food'],
"money" : data['money'],
# "nutrient" : data['nutrient'],
"img_path" : img_path,
"storename" :storename
}
if img_path == "" :
menu_info["img_path"]="symbol-mark.png"
menuname=data['food']
if self.menu_duplicate_check(storename,menuname):
self.db.child("MENU").child(storename).child(menuname).set(menu_info)
#print(data,img_path)
return True
else:
return False
def menu_duplicate_check(self, storename, menuname):
menudata = self.db.child("MENU").child(storename).get()
if isinstance(menudata.val(), type(None)):
#print("NONE")
return True
else:
for res in menudata.each():
if res.key() == menuname:
return False
return True
# def get_menu(self, storename):
# menudata = self.db.child("MENU").child(storename).get()
# if isinstance(menudata.val(), type(None)):
# menu=None
# return menu
# else:
# for res in menudata.each():
# menuInfo=self.db.child("MENU").child(storename).child(res.key).get()
# return menuIㄴnfo
def get_menu(self,storename):
menus = self.db.child("MENU").child(storename).get().val() #해당 맛집의 메뉴들을 가져옴
return menus
################ 리뷰 정보 입력 관련 함수 ################
def insert_review(self, storename, data, img_path):
review_info ={
"timestamp":data['timestamp'],
"storename" : storename,
"storescore" : data['storescore'],
"username" : data['username'],
"reviewtitle" : data['reviewtitle'],
"reviewdesc" : data['reviewdesc'],
"img_path" : img_path
}
if img_path == "" :
review_info["img_path"]="symbol-mark.png"
username=data['username']
self.db.child("REVIEW").child(storename).child(username).set(review_info)
return True
def get_review(self,storename):
reviews = self.db.child("REVIEW").child(storename).get().val() #해당 맛집의 메뉴들을 가져옴
#sorted_by_times=self.db.child("REVIEW").child(storename).order_by_child("timestamp")
return reviews
def get_all_review(self):
stores=self.db.child("REVIEW").get().val()
# # review_all[100]
# review_all={}
# review_info={}
# i=0
# for store in stores:
# # review_all=zip(review_all, review_instore)
# user=self.db.child("REVIEW").child(store).get()
# if user:
# # review=self.db.child("REVIEW").child(store).get().val()
# # keys=self.db.child("REVIEW").child(store).get().key()
# # for keyinreview in keys:
# # review_all[store]=[user][keyinreview]=self.db.child("REVIEW").child(store).get(keyinreview)
# # i=i+1
# review_info[user]=self.db.child(store).get().val()
# review_all[store]={review_info}
print(stores)
for store in stores:
review_all=self.db.child("REVIEW").child(store).get().val()
return stores
| euWna/osp_project | flask-server/database.py | database.py | py | 8,697 | python | en | code | 0 | github-code | 13 |
40445255722 | def proteins(strand):
my_dict = dict(AUG='Methionine', UUU='Phenylalanine',
UUC='Phenylalanine', UUA='Leucine', UUG='Leucine',
UCU='Serine', UCC='Serine', UCA='Serine',
UCG='Serine', UAU='Tyrosine', UAC='Tyrosine',
UGU='Cysteine', UGC='Cysteine', UGG='Tryptophan',
UAA='STOP', UAG='STOP', UGA='STOP')
list = []
i = 0
while i < len(strand):
if strand[i:i+3] != 'UAA' and strand[i:i+3] != 'UAG' \
and strand[i:i+3] != 'UGA':
list.append(my_dict.get(strand[i:i+3]))
else:
break
i = i+3
return list
| CatalinPetre/Exercism | python/protein-translation/protein_translation.py | protein_translation.py | py | 669 | python | en | code | 0 | github-code | 13 |
3079757015 | #4) Go to https://catalog.umkc.edu/course-offerings/graduate/comp-sci/ and fetch the course name and overview of
# course. Hint:Use BeautifulSoup package.
from bs4 import BeautifulSoup
import requests
# Enter URL to fetch the data
url = requests.get("https://catalog.umkc.edu/course-offerings/graduate/comp-sci/")
data = url.text
output = BeautifulSoup(data, "html.parser")
# Using FindAll to find desired class
for p in output.findAll('p',{'class':'courseblocktitle'}):
for title, detail in zip(output.findAll('span', {'class': 'title'}),
output.findAll('p', {'class': 'courseblockdesc'})):
# #printing courese name and course description
print("Course Name: ", title.string)
print("Course Description: ", detail.string)
print('\n')
| adtmv7/CS5590-490-Python-Deep-Learning | LAB1/Source/Lab1_Q4.py | Lab1_Q4.py | py | 823 | python | en | code | 2 | github-code | 13 |
29330455573 | import random
import math
import copy
import numpy as np
from config.slam_settings import unitGridSize
from config.ik_settings import MOVING_FRONT_SAFE_DISTANCE, MOVING_REAR_SAFE_DISTANCE
from matplotlib import pyplot as plt
class Node():
def __init__(self, x, y):
self.x = x
self.y = y
self.cost = 0.0
self.parent = None
self.leaf = True
class RRTStar():
"""Path finding RRT* og-based routine"""
def __init__(self,
state,
real_goal,
expand_dist=10,
goal_sample_rate=15,
max_iter=1000,
near_search_rcoef=4):
"""
Parameters
----------
state: ParticleFilter class object
Contains SLAM calculated parameters such as occupation grid map, trajectory...
ParticleFilter class object
real_goal: list
Goal point in global coordinates in millimeters
Position [x,y]
expand_dist: int
Distance increment in map pixels for Boundiung box collision algorithm.
Should be in range of wall thickness(OGrig parameter)
goal_sample_rate: int
Rate to perform sampling from goal point
max_iter: int
Number of sampling iterations
near_search_rcoef: int
coefficient of searching radius for nearest nodes.
Result search radius is: expandDis * near_search_rcoef
"""
# Robot body parameters
# assumes robot bbox as rectangle
self.robot_half_length = MOVING_FRONT_SAFE_DISTANCE//unitGridSize
self.robot_half_width = MOVING_REAR_SAFE_DISTANCE//unitGridSize
self.obstacle_map = np.flipud(state.og.occupancyGridVisited / state.og.occupancyGridTotal)
self.obstacle_map = np.where(self.obstacle_map > 0.5, 1, 0)
self.x_limit = (self.obstacle_map.shape[1] - self.robot_half_width-20)
self.y_limit = (self.obstacle_map.shape[0] - self.robot_half_width-20)
self.map_xrange = state.og.mapXLim
self.map_yrange = state.og.mapYLim
self.unit_grid_size = unitGridSize
self.real_pos_x = state.xTrajectory[-1]
self.real_pos_y = state.yTrajectory[-1]
self.map_pos_x, self.map_pos_y = self.real_to_map_position(self.real_pos_x, -self.real_pos_y)
self.map_theta = state.thetaTrajectory[-1]
self.map_goal_x, self.map_goal_y = self.real_to_map_position(real_goal[0], -real_goal[1])
self.start = Node(self.map_pos_x, self.map_pos_y)
self.end = Node(self.map_goal_x, self.map_goal_y)
self.expand_dist = expand_dist
self.goal_sample_rate = goal_sample_rate
self.max_iter = max_iter
self.near_search_rcoef = near_search_rcoef
self.get_bbox_body_contour()
def get_bbox_body_contour(self):
"""
Calculate robot body contour points as rectangular bbox bias around center point
Rotation applied in future steps
"""
corners = np.array([
[-self.robot_half_length, -self.robot_half_width],
[self.robot_half_length, -self.robot_half_width],
[self.robot_half_length, self.robot_half_width],
[-self.robot_half_length, self.robot_half_width]
])
x1, y1, x2, y2, x3, y3, x4, y4 = corners.flatten()
# Calculate integer points on the edges of the rectangle
x_top = np.arange(x1, x2 + 1)
y_top = np.full_like(x_top, y1)
x_right = np.full(y3 - y2 + 1, x2)
y_right = np.arange(y2, y3 + 1)
x_bottom = np.arange(x3, x4 - 1, -1)
y_bottom = np.full_like(x_bottom, y3)
x_left = np.full(y4 - y1 + 1, x4)
y_left = np.arange(y4, y1 - 1, -1)
x = np.concatenate((x_top, x_right, x_bottom, x_left))
y = np.concatenate((y_top, y_right, y_bottom, y_left))
# Remove duplicate points
points = np.column_stack((x, y))
self.bbox_contour = np.unique(points, axis=0)
def get_bbox_anchor_rotation_bias(self, estimated_theta):
"""
Get bbox corners as offset from zero center point in map coordinates
Further this precalc used to calculate bbox points around moving center point
"""
rotation_matrix = np.array([[np.cos(estimated_theta), -np.sin(estimated_theta)],
[np.sin(estimated_theta), np.cos(estimated_theta)]])
rotated_bbox_bias = np.dot(self.bbox_contour, rotation_matrix).astype(int)
return rotated_bbox_bias
def get_bbox_anchor_points(self, estimated_x, estimated_y, bbox_bias):
"""
Calculate anchor points of bbox for center point in map space base on raw biases
see get_bbox_anchor_rotation_bias()
input and return format: [(x1,y1),..., (xn, yn)]
"""
bbox_anchors = bbox_bias + [estimated_x, estimated_y]
return bbox_anchors
def real_to_map_position(self, x,y):
map_x = int((x - self.map_xrange[0]) / self.unit_grid_size)
map_y = int((y + self.map_yrange[1]) / self.unit_grid_size) # y is inverted
return (map_x, map_y)
def map_path_to_real_position(self, path):
path[:, 0] = (path[:, 0] * self.unit_grid_size) + self.map_xrange[0]
path[:, 1] = (path[:, 1] * self.unit_grid_size) - self.map_yrange[1]
return path
def planning(self):
"""Main path planning RRT function"""
self.node_list = {0: self.start}
for i in range(self.max_iter):
rnd = self.get_random_point()
nind = self.get_nearest_list_index(self.node_list, rnd) # get nearest node index to random point
newNode, theta = self.steer(rnd, nind) # generate new node from that nearest node in direction of random point
bbox_theta_bias = self.get_bbox_anchor_rotation_bias(theta)
if self.is_not_collide(newNode, bbox_theta_bias): # if it does not collide
nearinds = self.find_near_nodes(newNode, self.near_search_rcoef) # find nearest nodes to newNode in search range
newNode = self.choose_parent(newNode,
nearinds) # from that nearest nodes find the best parent to newNode
self.node_list[newNode.parent].leaf = False
self.node_list[i + 100] = newNode # add newNode to nodeList
self.rewire(i + 100, newNode, nearinds) # make newNode a parent of another node if necessary
if i > self.max_iter:
leaves = [key for key, node in self.node_list.items() if node.leaf == True]
ind = leaves[random.randint(0, len(leaves) - 1)]
self.node_list[self.node_list[ind].parent].leaf = True
for value in self.node_list.values():
if value.parent == self.node_list[ind].parent and value != self.node_list[ind]:
self.node_list[self.node_list[ind].parent].leaf = False
break
self.node_list.pop(ind)
# generate course
lastIndex = self.get_best_last_index()
if lastIndex is None:
return None
path = self.gen_final_course(lastIndex)
return path
def choose_parent(self, newNode, nearinds):
if len(nearinds) == 0:
return newNode
dlist = []
for i in nearinds:
dx = newNode.x - self.node_list[i].x
dy = newNode.y - self.node_list[i].y
d = math.sqrt(dx ** 2 + dy ** 2)
theta = math.atan2(dy, dx)
if self.check_collision_extend(self.node_list[i], theta, d):
dlist.append(self.node_list[i].cost + d)
else:
dlist.append(float("inf"))
mincost = min(dlist)
minind = nearinds[dlist.index(mincost)]
if mincost == float("inf"):
return newNode
newNode.cost = mincost
newNode.parent = minind
return newNode
def steer(self, rnd, nind):
"""Generate node for one step in direction of random point"""
# expand tree
nearestNode = self.node_list[nind]
theta = math.atan2(rnd[1] - nearestNode.y, rnd[0] - nearestNode.x)
newNode = copy.deepcopy(nearestNode)
newNode.x += int(round(self.expand_dist * math.cos(theta)))
newNode.y += int(round(self.expand_dist * math.sin(theta)))
newNode.cost += self.expand_dist
newNode.parent = nind
newNode.leaf = True
return newNode, theta
def get_random_point(self):
if random.randint(0, 100) > self.goal_sample_rate:
rnd = [random.uniform(0, self.x_limit), random.uniform(0, self.y_limit)]
else: # goal point sampling
rnd = [self.end.x, self.end.y]
return rnd
def get_best_last_index(self):
disglist = [(key, self.calc_dist_to_goal(node.x, node.y)) for key, node in self.node_list.items()]
goalinds = [key for key, distance in disglist if distance <= self.expand_dist]
if len(goalinds) == 0:
return None
mincost = min([self.node_list[key].cost for key in goalinds])
for i in goalinds:
if self.node_list[i].cost == mincost:
return i
return None
def gen_final_course(self, goalind):
path = [[self.end.x, self.end.y]]
while self.node_list[goalind].parent is not None:
node = self.node_list[goalind]
path.append([node.x, node.y])
goalind = node.parent
path.append([self.start.x, self.start.y])
return np.array(path)[::-1]
def calc_dist_to_goal(self, x, y):
return np.linalg.norm([x - self.end.x, y - self.end.y])
def find_near_nodes(self, newNode, value):
r = self.expand_dist * value
dlist = [(key, (node.x - newNode.x) ** 2 + (node.y - newNode.y) ** 2) for key, node in self.node_list.items()]
nearinds = [key for key, distance in dlist if distance <= r ** 2]
return nearinds
def rewire(self, newNodeInd, newNode, nearinds):
# nnode = len(self.nodeList)
for i in nearinds:
nearNode = self.node_list[i]
dx = newNode.x - nearNode.x
dy = newNode.y - nearNode.y
d = math.sqrt(dx ** 2 + dy ** 2)
scost = newNode.cost + d
if nearNode.cost > scost:
theta = math.atan2(dy, dx)
if self.check_collision_extend(nearNode, theta, d):
self.node_list[nearNode.parent].leaf = True
for value in self.node_list.values():
if value.parent == nearNode.parent and value != nearNode:
self.node_list[nearNode.parent].leaf = False
break
nearNode.parent = newNodeInd
nearNode.cost = scost
newNode.leaf = False
def check_collision_extend(self, nearNode, theta, d):
tmpNode = copy.deepcopy(nearNode)
bbox_theta_bias = self.get_bbox_anchor_rotation_bias(theta)
for i in range(int(d / self.expand_dist)):
tmpNode.x += int(round(self.expand_dist * math.cos(theta)))
tmpNode.y += int(round(self.expand_dist * math.sin(theta)))
if not self.is_not_collide(tmpNode, bbox_theta_bias):
return False
return True
def get_nearest_list_index(self, nodeList, rnd):
dlist = [(key, (node.x - rnd[0]) ** 2 + (node.y - rnd[1]) ** 2) for key, node in nodeList.items()]
minind = min(dlist, key=lambda d: d[1])
return minind[0]
def is_not_collide(self, node, bbox_bias):
"""
Check collision if any of anchor point in obstacle
"""
bbox_contour = self.get_bbox_anchor_points(node.x, node.y, bbox_bias) # get bbox rectangle contour points
for x, y in bbox_contour:
if self.obstacle_map[y][x] == 1:
return False # collision case
return True # safe case
def plot_state(self, bbox_contour=None):
"""For debugging use"""
plt.figure(figsize=(20, 20))
obstacle_map = self.obstacle_map
obstacle_map = 1 - obstacle_map
im = plt.imshow(obstacle_map, cmap='gray')
plt.scatter(self.map_pos_x, self.map_pos_y, c='r', marker='.')
plt.scatter(self.map_goal_x, self.map_goal_y, c='b', marker='.')
if bbox_contour is not None:
for pt in bbox_contour:
plt.scatter(pt[0], pt[1], c='red', marker=',')
for node in self.node_list.values():
if node.parent is not None:
plt.plot([self.node_list[node.parent].x, node.x], [self.node_list[node.parent].y, node.y], c='orange')
# plt.xlim((450, 850))
# plt.ylim((450, 850))
msg = f"Real pos: {round(self.real_pos_x)}, {round(self.real_pos_y)}, theta {round(self.map_theta)}"
plt.colorbar(im)
plt.grid()
plt.title(msg)
plt.show()
| Nickel-nc/Sprite | scripts/path_finding/rrt_star.py | rrt_star.py | py | 13,289 | python | en | code | 0 | github-code | 13 |
70327376979 | import sys
import math
import re
import os
import base64
import streamlit as st
def amorpm(temp):
log_0_h = int(temp[0][0])
log_0_m = int(temp[0][1])
log_1_h = int(temp[1][0])
log_1_m = int(temp[1][1])
m=0
if(log_0_h == 12):
if(log_1_h != 12):
log_1_h = log_1_h + 12
if(log_0_h < log_1_h):
m = m + (log_1_h - log_0_h)*60
else:
if(log_0_h == log_1_h):
m = 0
else:
m = m + (log_0_h - log_1_h)*60
if(log_1_m > log_0_m):
m = m + (log_1_m - log_0_m)
else:
m = m - (log_0_m - log_1_m)
return m
def ampm(temp):
log_0_h = int(temp[0][0])
log_0_m = int(temp[0][1])
log_1_h = int(temp[1][0])
log_1_m = int(temp[1][1])
m=0
if(log_0_h == 12):
log_1_h = log_1_h - 12
if(log_1_h != 12):
log_1_h = log_1_h + 12
if(log_0_h < log_1_h):
m = m + (log_1_h - log_0_h)*60
else:
if(log_0_h == log_1_h):
m = 0
else:
m = m + (log_0_h - log_1_h)*60
if(log_1_m > log_0_m):
m = m + (log_1_m - log_0_m)
else:
m = m - (log_0_m - log_1_m)
return m
def pmam(temp):
log_0_h = int(temp[0][0])
log_0_m = int(temp[0][1])
log_1_h = int(temp[1][0])
log_1_m = int(temp[1][1])
m=0
if(log_0_h == 12):
log_1_h = log_1_h - 12
if(log_1_h != 12):
log_1_h = log_1_h + 12
if(log_0_h < log_1_h):
m = m + (log_1_h - log_0_h)*60
else:
if(log_0_h == log_1_h):
m = 0
else:
m = m + (log_0_h - log_1_h)*60
if(log_1_m > log_0_m):
m = m + (log_1_m - log_0_m)
else:
m = m - (log_0_m - log_1_m)
return m
def expression(data):
log_data = []
d_ = list(data.split("\n"))
for variable in d_:
log = re.findall(r'([01][0-9]|[0-9]):([0-5][0-9]|[0-9])([apAP][mM])',variable)
log_data.append(log)
return log_data
def output(m):
raw_h = m/60
floor_h = math.floor(raw_h)
ceil_m = math.ceil((raw_h - floor_h)*60)
raw_d = math.floor(raw_h/24)
hour = math.floor((((m/60)/24)-raw_d)*24)
mi = math.ceil((((((m/60)/24)-raw_d)*24)-hour)*60)
if(hour == 0):
st.write(f"{floor_h}hours {ceil_m}minutes -- {m} minutes -- {raw_d}days {mi}minutes -- {raw_h}hours")
else:
st.write(f"{floor_h}hours {ceil_m}minutes -- {m} minutes -- {raw_d}days {hour}hours {mi}minutes -- {raw_h}hours")
def number_of_mins(data):
tot = []
for variable in data:
m = 0
if len(variable)!= 0:
temp=list(variable)
if variable[0][2] == 'am' or variable[0][2] == 'aM' or variable[0][2] == 'Am' or variable[0][2] == 'AM' :
if variable[1][2] == 'am' or variable[1][2] == 'aM' or variable[1][2] == 'Am' or variable[1][2] == 'AM' :
m = amorpm(temp)
if variable[0][2] == 'pm' or variable[0][2] == 'pM' or variable[0][2] == 'Pm' or variable[0][2] == 'PM' :
if variable[1][2] == 'pm' or variable[1][2] == 'pM' or variable[1][2] == 'Pm' or variable[1][2] == 'PM' :
m = amorpm(temp)
if variable[0][2] == 'am' or variable[0][2] == 'aM' or variable[0][2] == 'Am' or variable[0][2] == 'AM' :
if variable[1][2] == 'pm' or variable[1][2] == 'pM' or variable[1][2] == 'Pm' or variable[1][2] == 'PM' :
m = ampm(temp)
if variable[0][2] == 'pm' or variable[0][2] == 'pM' or variable[0][2] == 'Pm' or variable[0][2] == 'PM' :
if variable[1][2] == 'am' or variable[1][2] == 'aM' or variable[1][2] == 'Am' or variable[1][2] == 'AM' :
m = pmam(temp)
tot.append(m)
tot_min = 0
for variable in tot:
tot_min = tot_min + variable
return tot_min
if __name__ == "__main__" :
st.title("TL parser")
background = "background.jpg"
background_ext = "jpg"
st.markdown(
f"""
<style>
.reportview-container {{
background: url(data:image/{background_ext};base64,{base64.b64encode(open(background, "rb").read()).decode()})
}}
</style>
""",
unsafe_allow_html=True
)
st.markdown("""
<style>
div.stButton > button:first-child {
background-color: #f44336;
color:black;
}
div.stButton > button:hover {
background-color: white;
color:black;
}
</style>""", unsafe_allow_html=True)
file = st.file_uploader("", "txt")
if st.button("Submit"):
doc = str(file.read(),"utf-8")
if not doc:
st.write("Not Found")
else:
s = doc[0:8]
x = re.search("Time Log", s)
if x:
formatted = expression(doc)
number_of_mins = number_of_mins(formatted)
output(number_of_mins)
else:
st.write(" Time Log not mentioned")
| kingcv/tlparser | main.py | main.py | py | 5,451 | python | en | code | 0 | github-code | 13 |
72951884177 | import json
from collections import Counter
def is_balanced(inp_str):
'''
Check if input string has balanced number of brackets e.g.:
{[()]} - balanced
{{[ }}] - unbalanced
{()}[]() - balanced
:return: bool: returns True if string could be considered as balanced
'''
c = Counter(inp_str)
brackets = {
'{': '}',
'[': ']'
}
for b in brackets:
if b in c:
if not c[b] == c[brackets[b]]:
return False
return True
def get_json_data_from_file(path):
'''
:param path: str path to file with json data
:return: dict with decocded json
:raise
FileNotFoundError if path is not correct
'''
with open(path, 'r') as f:
return json.load(fp=f)
| broHeryk/test_fwrks | pytest_samples/logic_functions.py | logic_functions.py | py | 789 | python | en | code | 0 | github-code | 13 |
16980065514 | import socket
client = socket.socket()
client.connect(("localhost", 20000))
while True:
cmd = input(">>:")
if not len(cmd): continue
client.send(cmd.encode("utf-8"))
data_size = client.recv(1024).decode()
client.send(b"1")
data = b""
data_size = int(data_size)
print(data_size)
while data_size > 0:
tmp = client.recv(1024)
data_size -= len(tmp)
data += tmp
else:
print(f"输出{data.decode()}")
client.close() | nehzx/PythonStudy | study/day7/new_ssh/client_ssh.py | client_ssh.py | py | 497 | python | en | code | 0 | github-code | 13 |
71065924498 | import nasapy
import os
import pandas
from datetime import date, timedelta
import urllib.request
from database import Image, db
key = "3pdvIP08fK1EEb7QJ1HaJliJVaahITfuWeJ36hkF"
nasa = nasapy.Nasa(key = key)
start_date = date(2019, 1, 1)
end_date = date(2020, 1, 1)
delta = timedelta(days=1)
while start_date <= end_date:
apod = nasa.picture_of_the_day(date=start_date.strftime('%Y-%m-%d'))
title = apod["title"]
explanation = apod["explanation"]
url = apod["url"]
hdurl = apod["hdurl"]
image = Image(title=title,explanation=explanation,url=url,hdurl=hdurl)
db.session.add(image)
start_date += delta
db.session.commit()
| EstebanLeiva/AstronomyAPI | src/image_population.py | image_population.py | py | 662 | python | en | code | 0 | github-code | 13 |
9384350177 |
from distutils.core import setup, Extension
import sys
if sys.version_info[0] > 2:
swig_opts = ['-py3']
else:
swig_opts = ['-DPYTHON2']
# 'build_ext' must be run before 'build_py' to create 'corpustools.py'
for i in range(1, len(sys.argv)):
if sys.argv[i] == 'build_ext':
break
if sys.argv[i] == 'build' or sys.argv[i] == 'build_py' or sys.argv[i].startswith('install'):
sys.argv = sys.argv[:i] + ['build_ext'] + sys.argv[i:]
break
corpustools_module = Extension('_corpustools',
sources = ['corpustools.c', 'libtok.c', 'corpustools.i'],
depends = ['corpustools.h'],
swig_opts = swig_opts,
)
setup(name = 'corpustools',
version = '0.1',
author = 'Peter Kleiweg',
description = '''Tools for working with corpora''',
ext_modules = [corpustools_module],
py_modules = ['corpustools'],
)
| rug-compling/Alpino | Suites/ChildesDutch/setup.py | setup.py | py | 987 | python | en | code | 19 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.