blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 213
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 246
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ff30e8932a6292b69bb900155874ffcfa1e06431 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_136/2930.py | 97be09473c78d8ee4bccfb81bd58eb99d9cd14ca | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 432 | py | from __future__ import division
T = input()
for i in range(T):
C, F, X = [float(x) for x in raw_input().split()]
cookiesRate = 2
if C >= X : print "Case #%d: %.7f" % (i+1, X/cookiesRate)
else:
timeElapsed = 0
while(C/cookiesRate + X/(cookiesRate+F) < X/cookiesRate):
timeElapsed += C/cookiesRate
cookiesRate += F
timeElapsed += X/cookiesRate
print "Case #%d: %.7f" % (i+1, timeElapsed) | [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
3b3e969d18a5a1b06d75911178cd64cceed64785 | e65fba18d73b9a0d2bb51b8903d9bfff4d621da6 | /pytd6.py | 068d407edadaf6b07cdbac1acfa3dca93671d843 | [] | no_license | stevendevine/pyTD6 | 23fb16c3afd00235759f6ecba6b7a13cf0ecef77 | 7c701db14fe9b286dc0fc29e8a1e580e249889c8 | refs/heads/main | 2023-07-15T22:16:20.458018 | 2021-08-24T21:47:17 | 2021-08-24T21:47:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27,536 | py | import keyboard, mouse, json, pygetwindow, pywinauto, time, ctypes, pyautogui, PIL, pytesseract
from PIL import ImageFilter
from PIL import ImageChops
from typing import Tuple, Union
from exceptions import *
# required for the mouse.move() to not be offset when display scaling is enabled.
user32 = ctypes.windll.user32
user32.SetProcessDPIAware()
pytesseract.pytesseract.tesseract_cmd = r"C:\Program Files\Tesseract-OCR\tesseract"
# load json file with monkey information in it.
with open("monkeys.json") as monkeys_json:
monkeys = json.load(monkeys_json)
# load json file with hero information in it.
with open("heroes.json") as heroes_json:
heroes = json.load(heroes_json)
# load json file with hotkey information in it.
with open("hotkeys.json") as hotkeys_json:
hotkeys = json.load(hotkeys_json)
# used to focus btd6 window without IPython error (https://github.com/asweigart/PyGetWindow/issues/16s)
def focus_window(window_title=None):
window = pygetwindow.getWindowsWithTitle(window_title)[0]
if window.isActive == False:
pywinauto.application.Application().connect(
handle=window._hWnd
).top_window().set_focus()
# used to round price to the nearest 5.
def price_round(x, base=5):
return base * round(x / base)
# these 3 functions are used to get health, cash, and the round respectively.
def get_health():
# focus BTD6.
focus_window("BloonsTD6")
# take a screenshot of the health.
health_image = pyautogui.screenshot(region=[120, 20, 150, 40])
# invert the image, as pytesseract does better with black text.
health_image = ImageChops.invert(health_image)
# convert it to a pure black and white binary image, to increase contrast and readability.
fn = lambda x: 255 if x > 10 else 0
health_image = health_image.convert("L").point(fn, mode="1")
# convert it to a string, with only specified characters allowed.
health_text = pytesseract.image_to_string(
health_image, config="-c tessedit_char_whitelist=0123456789 --psm 6",
)
# convert it into an integer if possible.
try:
health = int(health_text)
except ValueError:
health = 1
return health
def get_cash():
# focus BTD6.
focus_window("BloonsTD6")
# take a screenshot of the cash.
cash_image = pyautogui.screenshot(region=[320, 20, 240, 40])
# invert the image, as pytesseract does better with black text.
cash_image = ImageChops.invert(cash_image)
# convert it to a pure black and white binary image, to increase contrast and readability.
fn = lambda x: 255 if x > 10 else 0
cash_image = cash_image.convert("L").point(fn, mode="1")
# convert it to a string, with only specified characters allowed.
cash_text = pytesseract.image_to_string(
cash_image, config="-c tessedit_char_whitelist=$0123456789 --psm 6",
)
# convert it into an integer if possible.
try:
cash = int(cash_text[1:])
except ValueError:
cash = 0
return cash
def get_round():
# focus BTD6.
focus_window("BloonsTD6")
# take a screenshot of the round.
round_image = pyautogui.screenshot(region=[1360, 30, 200, 40])
# invert the image, as pytesseract does better with black text.
round_image = ImageChops.invert(round_image)
# convert it to a pure black and white binary image, to increase contrast and readability.
fn = lambda x: 255 if x > 10 else 0
round_image = round_image.convert("L").point(fn, mode="1")
# convert it to a string, with only specified characters allowed.
round_text = pytesseract.image_to_string(
round_image, config="-c tessedit_char_whitelist=/0123456789 --psm 6",
)
# convert it into a [int, int] tuple if possible.
try:
round = list(map(int, round_text.split("/")))
except ValueError:
round = [0]
return round
class Monkey:
def __init__(self, monkey: str, delay: int = 0.1):
# initialize monkey's attributes.
self.name = monkey
self.delay = delay
self.upgrades = [0, 0, 0]
self.targeting_options = ["First", "Last", "Close", "Strong"]
self.targeting = "First"
self.sold = False
self.placed = False
# update information about monkey
# self.get_info()
def place(self, coordinates: Tuple[int, int]):
# raise MonkeyPlaced if the monkey has already been placed.
if self.placed:
raise MonkeyPlaced
# raise CoordinateError if invalid type or tuple length.
if (type(coordinates) != list) and (type(coordinates) != tuple):
raise CoordinateError
if len(coordinates) != 2:
raise CoordinateError
# activate Bloons TD 6 window.
focus_window("BloonsTD6")
# move to the monkey's position
# send the hotkey for the monkey
# left click to place the monkey
# time.sleep required for the monkey to be placed in time.
mouse.move(coordinates[0], coordinates[1])
time.sleep(self.delay)
keyboard.send(hotkeys["Monkeys"][self.name])
time.sleep(self.delay)
mouse.click()
time.sleep(self.delay)
# record the coordinates of the monkey.
self.coordinates = coordinates
# record that the monkey has been placed.
self.placed = True
def select(self):
# raise exceptions if the monkey hasn't been placed or has been already sold.
if not self.placed:
raise MonkeyNotPlaced
if self.sold:
raise MonkeySold
# raise CoordinateError if invalid type or tuple length.
if (type(self.coordinates) != list) and (type(self.coordinates) != tuple):
raise CoordinateError
if len(self.coordinates) != 2:
raise CoordinateError
mouse.move(self.coordinates[0], self.coordinates[1])
time.sleep(self.delay)
mouse.click()
time.sleep(self.delay)
def upgrade(self, upgrades: Tuple[int, int, int], skip_esc: bool = False):
# raise exceptions if the monkey hasn't been placed or has been already sold.
if not self.placed:
raise MonkeyNotPlaced
if self.sold:
raise MonkeySold
# raise UpgradeError if invalid type or tuple length.
if (type(upgrades) != list) and (type(upgrades) != tuple):
raise UpgradeError
if len(upgrades) != 3:
raise UpgradeError
# raise UpgradeError if all paths have tiers active.
if upgrades.count(0) == 0:
raise UpgradeError
# raise UpgradeError there is a path above the 5th tier or below the base tier.
if max(upgrades) > 5 or min(upgrades) < 0:
raise UpgradeError
# raise UpgradeError if there is more than one path at tier 3 or higher
third_tier_upgrade_count = len([i for i in upgrades if i >= 3])
if third_tier_upgrade_count > 1:
raise UpgradeError
# activate Bloons TD 6 window.
focus_window("BloonsTD6")
# move to the monkey's position
# send the hotkey for (current upgrade - previous upgrade)
# send escape to get out of upgrade menu
self.select()
for path in range(len(upgrades)):
for tier in range(upgrades[path] - self.upgrades[path]):
keyboard.send(hotkeys["Monkeys"]["Upgrades"][path])
time.sleep(self.delay)
if not skip_esc:
keyboard.send("esc")
time.sleep(self.delay)
# record the upgrades of the monkey.
self.upgrades = upgrades
# update information about monkey
# self.get_info(self.name)
def target(self, targeting: str):
# raise TargetingError if targeting not in targeting_options.
if targeting not in self.targeting_options:
raise TargetingError
# find difference between indexes of new targeting and old targeting
targeting_index_old = self.targeting_options.index(self.targeting)
targeting_index = self.targeting_options.index(targeting)
targeting_index_change = targeting_index - targeting_index_old
self.select()
# if new targeting index is lower than old one, use reverse targeting hotkey
if targeting_index_change <= 0:
for i in range(abs(targeting_index_change)):
keyboard.send(hotkeys["Monkeys"]["Change Targeting"][0])
time.sleep(self.delay)
# if new targeting index is higher than old one, use normal targeting hotkey
else:
for i in range(targeting_index_change):
keyboard.send(hotkeys["Monkeys"]["Change Targeting"][1])
time.sleep(self.delay)
# send escape to get out of upgrade menu
keyboard.send("esc")
time.sleep(self.delay)
# record the targetting of the monkey.
self.targeting = targeting
def sell(self):
# raise exceptions if the monkey hasn't been placed or has been already sold.
if not self.placed:
raise MonkeyNotPlaced
if self.sold:
raise MonkeySold
# move to the monkey's position
# sell monkey
self.select()
keyboard.send(hotkeys["Gameplay"]["Sell"])
time.sleep(self.delay)
# record that the monkey has been sold.
self.sold = True
def get_info(self, upgrades: Tuple[int, int, int] = None):
# if no upgrade path is passed, use the one provided when the monkey was generated.
if upgrades == None:
upgrades = self.upgrades
# raise UpgradeError if invalid type or tuple length.
if (type(upgrades) != list) and (type(upgrades) != tuple):
raise UpgradeError
if len(upgrades) != 3:
raise UpgradeError
# raise UpgradeError if all paths have tiers active.
if upgrades.count(0) == 0:
raise UpgradeError
# raise UpgradeError there is a path above the 5th tier or below the base tier.
if max(upgrades) > 5 or min(upgrades) < 0:
raise UpgradeError
# raise UpgradeError if there is more than one path at tier 3 or higher
third_tier_upgrade_count = len([i for i in upgrades if i >= 3])
if third_tier_upgrade_count > 1:
raise UpgradeError
# get main path from the 3, represented by highest tier.
self.main_tier = max(upgrades)
self.main_path = upgrades.index(self.main_tier)
# set basic monkey data
self.monkey_description = monkeys[self.name]["description"]
# calculate monkey prices for different difficulties.
self.monkey_price_medium = monkeys[self.name]["price"]
self.monkey_price_easy = price_round(0.85 * self.monkey_price_medium)
self.monkey_price_hard = price_round(1.08 * self.monkey_price_medium)
self.monkey_price_impoppable = price_round(1.2 * self.monkey_price_medium)
# reset upgrade info every time this method is called.
self.upgrade_name = None
self.upgrade_description = None
self.upgrade_price_medium = 0
self.upgrade_price_easy = 0
self.upgrade_price_hard = 0
self.upgrade_price_impoppable = 0
# only run this if the monkey has been upgraded.
if upgrades != [0, 0, 0]:
# get basic upgrade data from monkeys.json
self.upgrade_name = monkeys[self.name]["upgrades"][self.main_path][
self.main_tier - 1
]["name"]
self.upgrade_description = monkeys[self.name]["upgrades"][self.main_path][
self.main_tier - 1
]["description"]
# calculate upgrade prices for different difficulties.
self.upgrade_price_medium = monkeys[self.name]["upgrades"][self.main_path][
self.main_tier - 1
]["price"]
self.upgrade_price_easy = price_round(0.85 * self.upgrade_price_medium)
self.upgrade_price_hard = price_round(1.08 * self.upgrade_price_medium)
self.upgrade_price_impoppable = price_round(1.2 * self.upgrade_price_medium)
# calculate total prices for different difficulties.
self.total_price_medium = self.monkey_price_medium
for path in range(len(upgrades)):
for tier in range(upgrades[path]):
self.total_price_medium += monkeys[self.name]["upgrades"][path][tier][
"price"
]
self.total_price_easy = price_round(0.85 * self.total_price_medium)
self.total_price_hard = price_round(1.08 * self.total_price_medium)
self.total_price_impoppable = price_round(1.2 * self.total_price_medium)
class Hero:
def __init__(self, hero: str, delay: int = 0.1):
self.name = hero
self.delay = delay
self.level = 0
self.targeting = "First"
self.targeting_options = ["First", "Last", "Close", "Strong"]
self.sold = False
self.placed = False
def place(self, coordinates: Tuple[int, int]):
# raise MonkeyPlaced if the monkey has already been placed.
if self.placed:
raise MonkeyPlaced
# raise CoordinateError if invalid type or tuple length.
if (type(coordinates) != list) and (type(coordinates) != tuple):
raise CoordinateError
if len(coordinates) != 2:
raise CoordinateError
# activate Bloons TD 6 window.
focus_window("BloonsTD6")
# move to the monkey's position
# send the hotkey for the monkey
# left click to place the monkey
# time.sleep required for the monkey to be placed in time.
mouse.move(coordinates[0], coordinates[1])
time.sleep(self.delay)
keyboard.send(hotkeys["Monkeys"]["Heroes"])
time.sleep(self.delay)
mouse.click()
time.sleep(self.delay)
# record the coordinates of the monkey.
self.coordinates = coordinates
# record that the monkey has been placed.
self.placed = True
self.level = 1
def select(self, coordinates: Tuple[int, int] = None):
# raise exceptions if the monkey hasn't been placed or has been already sold.
if not self.placed:
raise MonkeyNotPlaced
if self.sold:
raise MonkeySold
# if no coordinates are passed, sue the ones provided when the monkey was placed.
if coordinates == None:
coordinates = self.coordinates
# raise CoordinateError if invalid type or tuple length.
if (type(coordinates) != list) and (type(coordinates) != tuple):
raise CoordinateError
if len(coordinates) != 2:
raise CoordinateError
mouse.move(coordinates[0], coordinates[1])
time.sleep(self.delay)
mouse.click()
time.sleep(self.delay)
def set_level(self, level: int = 1):
self.level = level
def upgrade(self, level: int = 1, skip_esc: bool = False):
# raise exceptions if the monkey hasn't been placed or has been already sold.
if not self.placed:
raise MonkeyNotPlaced
if self.sold:
raise MonkeySold
# if no upgrade path is passed, use the one provided when the monkey was generated.
# raise UpgradeError if invalid type.
if type(level) != int:
raise UpgradeError
# raise UpgradeError there is a path above the 5th tier or below the base tier.
if level > 20 or level < 1 or level < self.level:
raise UpgradeError
# move to the monkey's position
# send the hotkey for (current level - previous level)
# send escape to get out of upgrade menu
self.select()
for l in range(level - self.level):
keyboard.send(hotkeys["Monkeys"]["Upgrades"][0])
time.sleep(self.delay)
if not skip_esc:
keyboard.send("esc")
time.sleep(self.delay)
# record the level of the hero.
self.set_level(level)
# update information about hero
# self.get_info(self.name)
def target(self, targeting: str = None):
# if no targeting is passed, use the one provided when the monkey was generated.
if targeting == None:
targeting = self.targeting
# raise TargetingError if targeting not in targeting_options.
if targeting not in self.targeting_options:
raise TargetingError
# find difference between indexes of new targeting and old targeting
self.targeting_index_old = self.targeting_options.index(self.targeting)
self.targeting_index = self.targeting_options.index(targeting)
self.targeting_index_change = self.targeting_index - self.targeting_index_old
self.select()
# if new targeting index is lower than old one, use reverse targeting hotkey
if self.targeting_index_change <= 0:
for i in range(abs(self.targeting_index_change)):
keyboard.send(hotkeys["Monkeys"]["Change Targeting"][0])
time.sleep(self.delay)
# if new targeting index is higher than old one, use normal targeting hotkey
else:
for i in range(self.targeting_index_change):
keyboard.send(hotkeys["Monkeys"]["Change Targeting"][1])
time.sleep(self.delay)
# send escape to get out of upgrade menu
keyboard.send("esc")
time.sleep(self.delay)
# record the targetting of the monkey.
self.targeting = targeting
def sell(self):
# raise exceptions if the monkey hasn't been placed or has been already sold.
if not self.placed:
raise MonkeyNotPlaced
if self.sold:
raise MonkeySold
# move to the monkey's position
# sell monkey
self.select()
keyboard.send(hotkeys["Gameplay"]["Sell"])
time.sleep(self.delay)
# record that the monkey has been sold.
self.sold = True
def get_info(self, name: str = None, upgrades: Tuple[int, int, int] = None):
# if no upgrade path is passed, use the one provided when the monkey was generated.
if upgrades == None:
upgrades = self.upgrades
# if no monkey name is passed, use the one provided when the monkey was generated.
if name == None:
name = self.name
# raise UpgradeError if invalid type or tuple length.
if (type(upgrades) != list) and (type(upgrades) != tuple):
raise UpgradeError
if len(upgrades) != 3:
raise UpgradeError
# raise UpgradeError if all paths have tiers active.
if upgrades.count(0) == 0:
raise UpgradeError
# raise UpgradeError there is a path above the 5th tier or below the base tier.
if max(upgrades) > 5 or min(upgrades) < 0:
raise UpgradeError
# raise UpgradeError if there is more than one path at tier 3 or higher
third_tier_upgrade_count = len([i for i in upgrades if i >= 3])
if third_tier_upgrade_count > 1:
raise UpgradeError
# get main path from the 3, represented by highest tier.
self.main_tier = max(upgrades)
self.main_path = upgrades.index(self.main_tier)
# set basic monkey data
self.name = name
self.monkey_description = monkeys[name]["description"]
# calculate monkey prices for different difficulties.
self.monkey_price_medium = monkeys[name]["price"]
self.monkey_price_easy = price_round(0.85 * self.monkey_price_medium)
self.monkey_price_hard = price_round(1.08 * self.monkey_price_medium)
self.monkey_price_impoppable = price_round(1.2 * self.monkey_price_medium)
# reset upgrade info every time this method is called.
self.upgrade_name = None
self.upgrade_description = None
self.upgrade_price_medium = 0
self.upgrade_price_easy = 0
self.upgrade_price_hard = 0
self.upgrade_price_impoppable = 0
# only run this if the monkey has been upgraded.
if upgrades != [0, 0, 0]:
# get basic upgrade data from monkeys.json
self.upgrade_name = monkeys[name]["upgrades"][self.main_path][
self.main_tier - 1
]["name"]
self.upgrade_description = monkeys[name]["upgrades"][self.main_path][
self.main_tier - 1
]["description"]
# calculate upgrade prices for different difficulties.
self.upgrade_price_medium = monkeys[name]["upgrades"][self.main_path][
self.main_tier - 1
]["price"]
self.upgrade_price_easy = price_round(0.85 * self.upgrade_price_medium)
self.upgrade_price_hard = price_round(1.08 * self.upgrade_price_medium)
self.upgrade_price_impoppable = price_round(1.2 * self.upgrade_price_medium)
# calculate total prices for different difficulties.
self.total_price_medium = self.monkey_price_medium
for path in range(len(upgrades)):
for tier in range(upgrades[path]):
self.total_price_medium += monkeys[name]["upgrades"][path][tier][
"price"
]
self.total_price_easy = price_round(0.85 * self.total_price_medium)
self.total_price_hard = price_round(1.08 * self.total_price_medium)
self.total_price_impoppable = price_round(1.2 * self.total_price_medium)
class Ability:
def __init__(
self,
monkey: Union[Monkey, Hero],
hotkey_index: int,
ability_name: str = None,
upgrades: Union[Tuple[int, int, int], int] = None,
):
# initialize ability's attributes.
self.monkey = monkey
self.name = monkey.name
self.hotkey_index = hotkey_index
self.ability_name = ability_name
if type(monkey) == Monkey:
# if no upgrade path is passed, use the one provided when the monkey was generated.
if upgrades == None:
self.upgrades = monkey.upgrades
# raise AbilityError if the monkey's upgrade doesn't have an ability.
if (
"abilities"
not in monkeys[self.name]["upgrades"][monkey.main_path][
monkey.main_tier - 1
].keys()
):
raise AbilityError
# set list of monkey's abilities in ability_list
self.ability_list = monkeys[self.name]["upgrades"][monkey.main_path][
monkey.main_tier - 1
]["abilities"]
# if ability_name isn't passed, default to the first ability that the monkey has.
# if it is, then find the index of it and set it to that.
if ability_name == None:
self.ability_dict = self.ability_list[0]
else:
for ability_dict in self.ability_list:
if ability_dict["name"] == ability_name:
self.ability_dict = ability_dict
# update information about ability
self.get_info()
elif type(monkey) == Hero:
# if no upgrade path is passed, use the one provided when the monkey was generated.
if upgrades == None:
self.level = monkey.level
# raise AbilityError if the monkey's upgrade doesn't have an ability.
if "abilities" not in heroes[self.name]["levels"][self.level - 1]:
print(heroes[self.name]["levels"][self.level - 1])
raise AbilityError
# set list of monkey's abilities in ability_list
self.ability_list = heroes[self.name]["levels"][self.level - 1]["abilities"]
# if ability_name isn't passed, default to the first ability that the monkey has.
# if it is, then find the index of it and set it to that.
if ability_name == None:
self.ability_dict = self.ability_list[0]
else:
for ability_dict in self.ability_list:
if ability_dict["name"] == ability_name:
self.ability_dict = ability_dict
# update information about ability
self.get_info()
def activate(
self,
hotkey_index=None,
coordinates_1: Tuple[int, int] = None,
coordinates_2: Tuple[int, int] = None,
):
# if no hotkey_index is passed, use the one provided when the ability was generated.
if hotkey_index == None:
hotkey_index = self.hotkey_index
# type 0 - just activate ability
# i.e. Super Monkey Fan Club
if self.ability_dict["type"] == 0:
keyboard.send(hotkeys["Gameplay"]["Activated Abilities"][hotkey_index - 1])
time.sleep(self.monkey.delay)
# type 1 - activate ability then click once.
# i.e. Overclock
elif self.ability_dict["type"] == 1:
keyboard.send(hotkeys["Gameplay"]["Activated Abilities"][hotkey_index - 1])
time.sleep(self.monkey.delay)
mouse.move(coordinates_1[0], coordinates_1[1])
time.sleep(self.monkey.delay)
mouse.click()
time.sleep(self.monkey.delay)
# type 2 - activate ability then click twice.
# i.e. Chinook Reposition
elif self.ability_dict["type"] == 2:
keyboard.send(hotkeys["Gameplay"]["Activated Abilities"][hotkey_index - 1])
time.sleep(self.monkey.delay)
mouse.move(coordinates_1[0], coordinates_2[1])
time.sleep(self.monkey.delay)
mouse.click()
time.sleep(self.monkey.delay)
mouse.move(coordinates_2[0], coordinates_2[1])
time.sleep(self.monkey.delay)
mouse.click()
time.sleep(self.monkey.delay)
def get_info(self, ability_dict=None):
# if ability_dict != provided, use the one provided when the ability was generated.
if ability_dict == None:
ability_dict = self.ability_dict
# turn ability dictionary values into attributes.
self.ability_name = ability_dict["name"]
self.ability_cooldown = ability_dict["cooldown"]
self.ability_type = ability_dict["type"]
def play():
keyboard.send(hotkeys["Gameplay"]["Play/Fast Forward"])
time.sleep(0.1)
def confirm():
keyboard.send("enter")
time.sleep(0.1)
keyboard.send("esc")
time.sleep(0.1)
| [
"vidh.bhatt@gmail.com"
] | vidh.bhatt@gmail.com |
9242c6b500fb9f1bf7ec63e07416b8c4618c046e | 1ea4892f4a8f3ef4c1aad0d69ad6800dfbb74661 | /actions/send_email.py | 7be87c16bf8585d3c6a48a22d77aeed5972ae858 | [] | no_license | qube-ai/rule_vm | 55c32715d3828bbb4d97438c80758631857b89e8 | 4e7cd890535077eb79bb8647fd5ea2542fd5c34d | refs/heads/main | 2023-05-08T15:55:04.247590 | 2021-05-31T22:36:26 | 2021-05-31T22:36:26 | 304,846,945 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,716 | py | import os
import trio
from loguru import logger
from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import Mail, Email, To
from .base import BaseAction
from .base import ActionConstant
from typing import Dict
class SendEmailAction(BaseAction):
action_type = ActionConstant.SEND_EMAIL
schema = {
"$schema": "http://json-schema.org/draft-07/schema",
"type": "object",
"properties": {
"type": {"type": "string", "enum": ["send_email"]},
"subject": {"type": "string"},
"body": {"type": "string"},
"to": {"type": "array"},
},
"required": ["type", "subject", "body", "to"],
}
def __init__(self, action_data: Dict):
super(SendEmailAction, self).__init__(action_data)
self.subject = action_data["subject"]
self.body = action_data["body"]
self.to = action_data["to"]
async def perform(self):
from_email = Email("automated@thepodnet.com", name="Podnet")
to_email = list(map(lambda x: To(x), self.to))
message = Mail(
from_email=from_email,
to_emails=to_email,
subject=self.subject,
html_content=self.body,
)
def f():
sg = SendGridAPIClient(os.getenv("SENDGRID_API_KEY"))
response = sg.send(message)
logger.info(
f"Email sent. Status Code: {response.status_code}, Body: {response.body}"
)
try:
await trio.to_thread.run_sync(f)
except Exception as e:
logger.error(f"Unable to send the email due to some error. Error: {e}")
logger.error(f"Error body: {e.body}")
| [
"apoorvasingh157@gmail.com"
] | apoorvasingh157@gmail.com |
6b02940ed353c8a99eb9742927bfe2c36301da96 | c388f70bec95795dd8fdea56dc8c9beea49aa564 | /diststore/settings.py | 9a2c84a0d1c9ad3f286bff6b4bb37109c619066d | [
"MIT"
] | permissive | santeri/pydiststore | 6d6a5c8d33ba2147da106b0877067c23f591a45d | 227074a152e9e6a86abf22c3d4b2e8d43ec2f659 | refs/heads/master | 2021-01-22T02:53:32.767149 | 2012-10-04T23:54:28 | 2012-10-04T23:54:28 | 145,312 | 1 | 1 | null | 2012-10-04T23:54:29 | 2009-03-07T17:33:18 | Python | UTF-8 | Python | false | false | 843 | py | #!/usr/bin/env python
# encoding: utf-8
__all__ = ['http_port', 'multicast_addr', 'multicast_port', 'multicast_dst', 'multicast_timeout']
import ConfigParser
config = ConfigParser.ConfigParser()
config.read(['diststored.cfg', '/usr/local/etc/diststored.cfg', '/etc/diststored.cfg'])
_http_port = int(config.get("diststored", "http_port"))
_multicast_addr = config.get("diststored", "multicast_addr")
_multicast_port = int(config.get("diststored", "multicast_port"))
_multicast_dst = (_multicast_addr, _multicast_port)
_multicast_timeout = float(config.get("diststored", "multicast_timeout"))
def http_port():
return _http_port
def multicast_addr():
return _multicast_addr
def multicast_port():
return _multicast_port
def multicast_dst():
return _multicast_dst
def multicast_timeout():
return _multicast_timeout | [
"santeri@santeri.se"
] | santeri@santeri.se |
cca76db479141c09148d375544d94ff41aad4e76 | 2b87db0ada3c2d016df891761855e9a9dc5b81fe | /DP/matrixMultiplication_tabulation.py | 5dccacdc6120e0b39b9f754361beaf58b5d63d59 | [] | no_license | drdcs/Algorithms-and-System-Design | 9b201ba47bda14ca8fcd9aeddcfee760b3194f2d | 656fafbd758c30f5bd7a73a7d677562d5ae1f39f | refs/heads/main | 2023-04-11T10:25:02.992297 | 2021-04-22T05:57:10 | 2021-04-22T05:57:10 | 329,364,127 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 608 | py | """
Input: p[] = {40, 20, 30, 10, 30}
Output: 26000
There are 4 matrices of dimensions 40x20, 20x30, 30x10 and 10x30.
Let the input 4 matrices be A, B, C and D. The minimum number of
multiplications are obtained by putting parenthesis in following way
(A(BC))D --> 20*30*10 + 40*20*10 + 40*10*30
Input: p[] = {10, 20, 30, 40, 30}
Output: 30000
There are 4 matrices of dimensions 10x20, 20x30, 30x40 and 40x30.
Let the input 4 matrices be A, B, C and D. The minimum number of
multiplications are obtained by putting parenthesis in following way
((AB)C)D --> 10*20*30 + 10*30*40 + 10*40*30
"""
| [
"diptihdl@gmail.com"
] | diptihdl@gmail.com |
c95bfa05173b3c57990cc2d53cf42b70060aa4c2 | 934c3ed7a95d6509d63c099df9481aa35c2a7c56 | /app/view_models/__init__.py | 79902edd521f6ccbc41b47d3c82e0bf184b70ae1 | [] | no_license | meto001/label | b072c9ab0e581799a3eb6a7b4175afae9d04ca10 | 8f2af923379eea9bb89350b2f6b794b4964a1301 | refs/heads/master | 2021-06-10T13:11:26.307612 | 2020-08-25T08:41:23 | 2020-08-25T08:41:23 | 181,861,296 | 3 | 0 | null | 2021-03-25T22:56:53 | 2019-04-17T09:37:58 | Python | UTF-8 | Python | false | false | 71 | py | # _*_ coding:utf-8 _*_
__author__ = 'meto'
__date__ = '2019/4/11 14:42' | [
"756246975@qq.com"
] | 756246975@qq.com |
bca492a81fe23d315cbb69434d85d776cc8e630a | 774413d52ecdfad882bc79f325f6d6b282b336cb | /src/face_api_client.py | c849e0456c44d3ae95e4d294a36909c9ee1ec024 | [] | no_license | omribeni/BestFaceImageRecognition | 6cde0d7dce9c743001aa34121eead85517b93a4c | 633ab4cf8200bff47a730982b8d41e816aae916f | refs/heads/master | 2022-12-29T11:07:05.260349 | 2020-10-11T15:04:06 | 2020-10-11T15:04:06 | 289,653,503 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,458 | py | import os
from azure.cognitiveservices.vision.face import FaceClient
from msrest.authentication import CognitiveServicesCredentials
import Face
class FaceApiClient(object):
# Set the FACE_SUBSCRIPTION_KEY environment variable with your key as the value.
KEY = os.environ.get('FACE_SUBSCRIPTION_KEY')
# Set the FACE_ENDPOINT environment variable with the endpoint from your Face service in Azure.
ENDPOINT = os.environ.get('FACE_ENDPOINT')
def __init__(self):
self.__client = None
def __initialize(self):
if not self.__client:
# Create an authenticated FaceClient.
self.__client = FaceClient(self.ENDPOINT, CognitiveServicesCredentials(self.KEY))
@property
def client(self):
if not self.__client:
self.__initialize()
return self.__client
def detect_image_faces(self, image_path):
response = self.analyze_image(image_path)
if not response:
return None
return response
def recognize_similar_faces(self, face, all_faces):
face_ids = [f.face_id for f in all_faces]
if face_ids:
response = self.__client.face.find_similar(face.face_id, face_ids=face_ids)
return response
return []
def analyze_image(self, image_path):
# init on demand
self.__initialize()
try:
# convert local image to stream
# file_path = image_path.replace('/', '\\')
with open(image_path, "rb") as image_bytes_stream:
return self.__client.face.detect_with_stream(image_bytes_stream,
return_face_attributes=Face.FaceFields.all(),
detection_model="detection_01", return_face_landmarks=True)
except Exception as e:
return None
def get_most_popular_face_images(self, faces_list):
biggest_person_group = []
while faces_list:
face = faces_list[0]
cur_group = [face]
# add all similar faces of the current face
cur_group.extend(self.recognize_similar_faces(face, faces_list[1:]))
if cur_group and len(cur_group) > len(biggest_person_group):
biggest_person_group = cur_group
# remove all of the current group members from the faces list
faces_list = [x for x in faces_list if x not in cur_group]
return biggest_person_group
@classmethod
def get_best_resolution_face_image(cls, person_faces, all_faces, face_to_resolution_map):
best_image = None
best_resolution = 0
for face in person_faces:
# get the detectedFace object form the similarFace list by face_id
detected_face = next((x for x in all_faces if x.face_id == face.face_id), None)
if detected_face:
cur_res = face_to_resolution_map[detected_face.face_id]
picture_surface_area = cur_res[0] * cur_res[1]
face_box_surface_area = detected_face.face_rectangle.height * detected_face.face_rectangle.width
current_resolution = face_box_surface_area / picture_surface_area
if current_resolution > best_resolution:
best_resolution = current_resolution
best_image = detected_face
return best_image
| [
"omribenishai015@gmail.com"
] | omribenishai015@gmail.com |
30e8a20521a6bc8d67539d32f44ca040fae0ad73 | 292c19b382dd882a60a31cbe291ca036f39ebb7f | /args.py | 3dba48b23e9683711ac9610852aeb74c3c154d93 | [] | no_license | shailzajolly/EaSe | 687e947319f008240db352119b0b9363e2be8d5a | 53518e8273b7ee01d5c005f052671c2e96ab29a4 | refs/heads/main | 2023-04-05T04:13:55.864936 | 2021-04-06T13:22:41 | 2021-04-06T13:22:41 | 355,194,977 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,440 | py | import argparse
def get_args():
parser = argparse.ArgumentParser(description="data parameters")
parser.add_argument("--annotation_path_tr",
help="Path to file that contains annotations for training data.",
type=str,
default='v2_mscoco_train2014_annotations.json')
parser.add_argument("--question_path_tr",
help="Path to file that contains questions for training data.",
type=str,
default='v2_OpenEnded_mscoco_train2014_questions.json')
parser.add_argument("--annotation_path_val",
help="Path to file that contains annotations for validation data.",
type=str,
default='v2_mscoco_val2014_annotations.json')
parser.add_argument("--question_path_val",
help="Path to file that contains questions for validation data.",
type=str,
default='v2_OpenEnded_mscoco_val2014_questions.json')
parser.add_argument("--data_split",
help="Split of the dataset whose scores are to be computed. Possible options: train/val",
type=str,
default='train')
parser.add_argument("--word2vec",
help="Path for word2vec model",
type=str,
default='fastText/wiki-news-300d-1M-subword.vec')
parser.add_argument("--data_name",
help="Dataset name used in the experiment. Possible Types: VQA/VizWiz",
type=str,
default='VQA')
parser.add_argument("--pred_file",
help="Prediction file generated by the model.",
type=str,
default='LXMERTpreds/VQ_pred_id2scr_TrHE')
parser.add_argument("--pred_model",
help="Model used to create prediction file.",
type=str,
default='lxmert')
parser.add_argument("--id_dir",
help="Directory where question ids computed by SeS scores are present.",
type=str,
default='VQA2.0_ids/entropy/E_')
args, unparsed = parser.parse_known_args()
print(args)
return args | [
"shailzajolly@gmail.com"
] | shailzajolly@gmail.com |
141edf402032a4bbe9c3349258944e9dcfa2c803 | fb7efe44f4d9f30d623f880d0eb620f3a81f0fbd | /chrome/browser/android/digital_asset_links/DEPS | 7023254e344e39b9b94c5db81d7a70a7df505240 | [
"BSD-3-Clause"
] | permissive | wzyy2/chromium-browser | 2644b0daf58f8b3caee8a6c09a2b448b2dfe059c | eb905f00a0f7e141e8d6c89be8fb26192a88c4b7 | refs/heads/master | 2022-11-23T20:25:08.120045 | 2018-01-16T06:41:26 | 2018-01-16T06:41:26 | 117,618,467 | 3 | 2 | BSD-3-Clause | 2022-11-20T22:03:57 | 2018-01-16T02:09:10 | null | UTF-8 | Python | false | false | 296 | # It is likely that this code will eventually be shared across platforms, so
# excluding dependencies that would make this being a component impossible.
include_rules = [
"-content",
"-chrome",
"+base",
"+content/public/test",
"+chrome/browser/android/digital_asset_links",
"+net",
]
| [
"jacob-chen@iotwrt.com"
] | jacob-chen@iotwrt.com | |
4c269658b728a3ab97fc12a26d4e6a217994a423 | fb0906f77a44e2e2875ee5dbc16a14f5d70df50e | /fotok/config.py | edebde423a287e62642cffceca7e034c08c99d55 | [] | no_license | gsdu8g9/fotok | 3f30f14a5a195531cbf1ee7ecaa5d87d702a701d | bdb9b823c000e9bb9cb465f4e0401f1c093b63f0 | refs/heads/master | 2021-01-22T20:44:44.406304 | 2016-02-16T20:20:54 | 2016-02-16T20:20:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 754 | py | """
This file stores configuration for both the web app and feed server.
Should be changed on deployment.
"""
import os
basedir = os.path.abspath(os.path.dirname(__file__))
DEBUG = True
SECRET_KEY = 'abcdimverysilly'
MAX_CONTENT_LENGTH = 1024*1024
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'fotok.db')
CACHE_KIND = 'redis'
REDIS_HOST = 'localhost'
REDIS_PORT = 6379
REDIS_DB = 0
TOKEN_TTL = 1800
RECENT_PHOTOS = 10
RECENT_COMMENTS = 3
FEEDSERVER_HOST = '127.0.0.1'
FEEDSERVER_PORT = 8000
if FEEDSERVER_PORT == 80:
FEEDSERVER_URL = 'http://{}/'.format(FEEDSERVER_HOST)
else:
FEEDSERVER_URL = 'http://{}:{}/'.format(FEEDSERVER_HOST, FEEDSERVER_PORT)
MAX_WIDTH = 1920
MIN_WIDTH = 100
MAX_HEIGHT = 1080
MIN_HEIGHT = 100
| [
"maksbotan@gentoo.org"
] | maksbotan@gentoo.org |
488374d79aa245ef32483dbbbe87d6d7825895ad | 4cc4f6f49bcd1031f017a144e18a7ea68ea7cfeb | /Lab09/.svn/text-base/generateReport.py.svn-base | e96764d152e3d9da24cd8b7572b3d2a740e12461 | [] | no_license | apoorvvw/Python | abeddddbe59dfe0d17865bfbf06232565dbff104 | 1b2661e5835f9e01ae10a91da21bfe768a858a13 | refs/heads/master | 2020-12-24T16:40:34.717506 | 2016-04-03T05:53:16 | 2016-04-03T05:53:16 | 40,106,944 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,185 | #!/usr/local/bin/python3.4
__author__ = 'ee364e10'
import re
def moreReg(filename):
dict=[]
with open(filename,"r") as inputFile:
FileObject = open('finalGrades.xml',"w")
FileObject.write("<?xml version=\"1.0\"?>\n")
FileObject.write("<students>\n")
for line in inputFile:
f = re.search(r'<(?P<id>\w{3})>(?P<name>.*):\[',line)
# AT this point I have the name and the id. Now generate the course grades and stuff
if f:
FileObject.write(" <student name=\""+f.group("name")+"\" id=\""+f.group("id")+"\">\n")
print(f.group("id"))
ff = re.search(r':(.*)<',line)
g = ff.group(1)
if g[8] == ",":
gg = g.strip().split(",")
elif g[8] == ";":
gg = g.strip().split(";")
for i in gg:
i = i.strip()
j = i.strip("[").strip("]")
course = j.split(":")[0]
marks = j.split(":")[1]
# dict[course] = marks
print(int(marks))
letter = 'A'
if int(marks)>=90 and int(marks)<=100:
letter = 'A'
elif int(marks)>=80 and int(marks)<90:
letter = 'B'
elif int(marks)>=70 and int(marks)<80:
letter = 'C'
elif int(marks)>=60 and int(marks)<70:
letter = 'D'
elif int(marks)<60:
letter = 'F'
print(course+" : "+marks+" : "+letter)
# fg = re.search(r'<(?P<course>ECE\d{3})\sscore=\"(?P<marks>\d{2})\"\sgrade=\"(?P<grade>[A-Z]?)\"/>',line)
FileObject.write(" <ECE"+course+ " score=\""+marks+"\" grade=\""+letter+"\"" +">\n")
FileObject.write(" </student>\n")
FileObject.write("</students>\n")
if __name__ == "__main__":
a= moreReg('rawGrades.xml')
# f = re.search(r'<(?P<course>ECE\d{3})\sscore=\"(?P<marks>\d{2})\"\sgrade=\"(?P<grade>[A-Z]?)\"/>',line)
| [
"awairaga@purdue.edu"
] | awairaga@purdue.edu | |
9f060361d6de995b683995985d8c9f9c463b1669 | d54dfc623e1916c5cf08b738ff4a3d88a923fdb7 | /NumberGuessingGame.py | 0527a7d767b7c0c03f242a1d91d2b5670c745784 | [] | no_license | sandeep1108/Code | 1d2004987d55058368149873944ebc631b43fe4e | 1f97e2f6494a40bec14915a288cbfe8e23c7e6eb | refs/heads/master | 2021-05-01T04:23:47.511245 | 2016-09-28T20:44:35 | 2016-09-28T20:44:35 | 69,502,022 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,662 | py | """
Author: Sandeep Das
This program asks the user to guess a random number between 1 and 100
"""
import random
import os.path
import json
from sys import argv
def main():
print "Hello There! Welcome!"
playerName = str (raw_input("Please enter your user id: "))
print "How are you doing %s?" %playerName
checkUser(playerName) # chk if the user already exists?
def checkUser(playerName):
if not checkIfFileExist():
print "Looks like this is your first attempt %s. Let's begin" %playerName
game(playerName)
else:
playerScore = readFile(playerName) # Check if the Paleyer already exists in the file, check for key as playerName
print "Welcome back %s. Looks like your last score was %d. Let's play again. " %(playerName, max(playerScore))
game(playerName)
def readFile(playerName):
with open ('Score.txt') as fileOpen:
playerDict = json.load(fileOpen)
if playerDict.get(playerName):
playerScore = playerDict.get(playerName)
return (playerScore)
else:
playerScore = []
return (playerScore) # here i am
def game(playerName):
print "Guess a number between 1 and 100!"
randomNumber = random.randint(1, 100)
found = False
attemptCounter = 0
while not found:
userGuess = input("Your Guess: ")
if userGuess == randomNumber:
print "That's the number!"
found = True
elif userGuess < randomNumber:
print "Thats's not it. Try biggger.."
attemptCounter += 1
else:
print "Thats's not it. Try smaller!"
attemptCounter += 1
print "Congratulations, you found it in %s attempts."%str(attemptCounter+1)
if not checkIfFileExist():
playerScore = []
playerScore.append(int(attemptCounter+1))
writeScore(playerName, playerScore)
else:
playerScore =readFile(playerName)
playerScore.append(int(attemptCounter+1))
writeScore(playerName, playerScore)
print "Have a goood day!"
def checkIfFileExist():
if not os.path.isfile('Score.txt') or os.stat('Score.txt').st_size == 0:
filePresent = False
else:
filePresent = True
return (filePresent)
def writeScore(playerName, playerScore):
if not checkIfFileExist():
playerDict = {}
playerDict[playerName] = playerScore
with open ('Score.txt', 'w') as fileOpen:
json.dump(playerDict, fileOpen)
print "New Player %s data saved." %playerName
else:
with open ('Score.txt') as fileOpen:
playerDict = json.load(fileOpen)
playerDict[playerName] = playerScore
with open ('Score.txt', 'w') as fileOpen:
json.dump(playerDict, fileOpen)
print "Player %s data saved." %playerName
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | sandeep1108.noreply@github.com |
3791527cea4f9b19510cd2511f27d307b569de22 | 4d2de834ecea6ef444b1c45afb5a41e717900858 | /app/app_todo/__init__.py | 33a8204e60a9ea5ebfaf02b5c996d4aafaf808af | [] | no_license | florije1988/flask_regular | 19da04c59fbf600274d206750ccb8cf355db2d24 | 1219e4efbad76202d6dca7e4b2148344ea9edf8c | refs/heads/master | 2020-12-24T13:21:29.840919 | 2014-12-16T00:58:15 | 2014-12-16T00:58:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 269 | py | # -*- coding: utf-8 -*-
__author__ = 'florije'
from flask import Blueprint
from app.custom_api import Api
app_todo = Blueprint('app_task', __name__)
api_todo = Api(app_todo, catch_all_404s=True)
from . import views
api_todo.add_resource(views.HelloHandler, '/hello') | [
"florije1988@gmail.com"
] | florije1988@gmail.com |
9c1b67405acfc447e0bcde61a0b406ab29189c33 | f4713830c8519daca9d75ec692a6937ee03c74d4 | /Problems/Algorithms/953. Verifying an Alien Dictionary/alien_dictionary.py | af8a014ae986a3a0467e9a3207355cbfdb4b4240 | [
"MIT"
] | permissive | xuedong/leet-code | a0dd38cb884292de9d947718bb00160eff2b0f00 | 285d49cd7061ec43368d63b7c7c56763be520570 | refs/heads/master | 2023-09-03T02:38:55.932182 | 2023-09-02T18:35:42 | 2023-09-02T18:35:42 | 189,745,542 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 332 | py | #!/usr/bin/env python3
from typing import List
class Solution:
def isAlienSorted(self, words: List[str], order: str) -> bool:
map = {ch: idx for idx, ch in enumerate(order)}
words = [[map[ch] for ch in word] for word in words]
return all(word1 <= word2 for word1, word2 in zip(words[:-1], words[1:]))
| [
"shang.xuedong@yahoo.fr"
] | shang.xuedong@yahoo.fr |
ff0d63e5404e77e306860887c8d0ff6eb8b09f4a | 426e3b7138107d4ef6fb048a0f7b1c4d72617ee6 | /MRI_ProstateCancer_Classification/test.py | e2fca76eb8b994975e620311388537eb98390f09 | [] | no_license | andrewmlu/MRI-prostate | 5d8d16987ab197e79dd231cad61ce65c48cd50d1 | 760552cfea0a3056d21ba1ac81261f5e2af1fe46 | refs/heads/master | 2023-01-23T10:29:46.507468 | 2019-09-09T15:20:31 | 2019-09-09T15:20:31 | 198,726,627 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 277 | py | #%%
import keras
import numpy as np
import matplotlib.pyplot as plt
#%%
X = np.load('./CoRegistration/img_valid_data_3d_t2_tse_tra.npy')
#%%
print(X.shape)
for i in range(16):
plt.imshow(X[0,i,:,:,0])
plt.show()
#%%
print(X.shape)
plt.imshow(X[4500,3,:,:,0])
plt.show() | [
"lu.andrew.m@gmail.com"
] | lu.andrew.m@gmail.com |
3e2d840dab7fafdba0367c9dcc831eaf950482c9 | 7554c1309bb2409618ced1747b0c6052fe7b963c | /audio-streaming-client-python/client_demo_simple.py | 92cdbe562c3a1d069688c4370cc2515b5a7b9926 | [] | no_license | TheCarryKing/pie | ec464e1a459bfc602cdfeabac4f07318273df485 | e18301e26c94004e84be73e2d31489537c2989ef | refs/heads/master | 2022-04-24T22:49:53.692507 | 2020-04-26T12:00:05 | 2020-04-26T12:00:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,816 | py | # -*-coding:utf-8-*-
from baidu_acu_asr.asr_client import AsrClient
from baidu_acu_asr.asr_product import AsrProduct
import time
import logging
import baidu_acu_asr.audio_streaming_pb2
def run():
"""
添加失败重传
:return:
"""
for i in range(5):
client = AsrClient(url, port, product, enable_flush_data,
log_level=log_level,
user_name=user_name,
password=password)
responses = client.get_result("testaudio/xeq16k.wav")
try:
for response in responses:
if response.type == baidu_acu_asr.audio_streaming_pb2.FRAGMENT_DATA:
logging.info("%s\t%s\t%s\t%s",
response.audio_fragment.start_time,
response.audio_fragment.end_time,
response.audio_fragment.result,
response.audio_fragment.serial_num)
else:
logging.warning("type is: %d, error code: %d, error message: %d",
response.type, response.error_code, response.error_message)
break
except Exception as ex:
# 如果出现异常,此处需要重试当前音频
logging.error("encounter an error: %s, will create a new channel and retry audio! times : %d",
ex.message, i + 1)
time.sleep(0.5)
if __name__ == '__main__':
logging.basicConfig(filename="asr_result.log")
log_level = 0
url = "127.0.0.1"
port = "8050"
# product_id = AsrProduct.INPUT_METHOD
product_id = "888"
sample_rate = 16000
enable_flush_data = True
user_name = "abc"
password = "123"
run() | [
"xiashuai01@baidu.com"
] | xiashuai01@baidu.com |
3342dbd03130abc2b867b2e3e093a75c7f00aafa | 1e177ebdcb470f738c058606ac0f86a36085f661 | /Python3/Tkinter/tkinter020.py | 23c5a22cc77a5d384d51239477848c13a696f07a | [] | no_license | robingreig/raspi-git | 5cbdd295c1048a0571aa2c2f8576438269439f07 | 7373bf94557d7a88c8f343362ba64f9cd19c8ce7 | refs/heads/master | 2023-08-31T03:16:17.286700 | 2023-08-26T11:54:23 | 2023-08-26T11:54:23 | 16,873,881 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 127 | py | #!/usr/bin/python3
from tkinter import *
root = Tk()
myContainer1 = Frame(root)
myContainer1.pack()
root.mainloop()
| [
"robin.greig@calalta.com"
] | robin.greig@calalta.com |
b6cb0d98aa20ba4ad45816517fb090f55425bab8 | 493fb070c7f60137ae5f0f92e64f416445e2d126 | /managers/swap.py | ac5abed4965adfdbb709030be04f58be42461a64 | [] | no_license | nyko27/algorithms_lab1 | a93365c4481c6a86c719e3d3127ca45b8004df83 | a52871b620ab343de7af0e58eabb35fb82e50228 | refs/heads/main | 2023-01-05T16:25:13.652155 | 2020-10-27T14:43:34 | 2020-10-27T14:43:34 | 303,790,346 | 0 | 0 | null | 2020-10-27T14:43:35 | 2020-10-13T18:09:54 | null | UTF-8 | Python | false | false | 169 | py | def swap_elements(array, element_index, second_element_index):
array[element_index], array[second_element_index] = array[second_element_index], array[element_index]
| [
"yuranykolysak@gmail.com"
] | yuranykolysak@gmail.com |
2735e4cab031de78116192c8f4f40ac4584083ce | 2dd7c708b76d6affba1323ff7b57b768200e863b | /2015/Day_18/day18_1.py | 84ebf3f30809064e6995eb1362784a4a635bd467 | [] | no_license | Daniel-Fernandez-951/AdventOfCode | c8cccf9742e3996ef1235ac7f9ab0391ecc10157 | d708d089fc1170a76e54c8b25332063e55fa8329 | refs/heads/main | 2023-02-01T04:27:17.216222 | 2020-12-18T08:04:10 | 2020-12-18T08:04:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,303 | py | from copy import deepcopy
lights = [[True if x == '#' else False for x in y] for y in open('AOC_2015\day18.txt').read().split('\n')]
def get_num_neighbors(x, y, lights):
num_neighbors = 0
nums = [-1, 0, 1]
for a in nums:
for b in nums:
if x+a < 0 or b+y < 0:
continue
elif a == b == 0:
continue
try:
if lights[x+a][y+b]:
num_neighbors += 1
except IndexError:
pass
return num_neighbors
for step in range(100):
new_lights = []
for x in range(len(lights)):
row = []
for y in range(len(lights[x])):
a = get_num_neighbors(x, y, lights)
if lights[x][y]:
if a == 3 or a == 2:
row.append(True)
else:
row.append(False)
else:
if a == 3:
row.append(True)
else:
row.append(False)
new_lights.append(row)
lights = deepcopy(new_lights)
# print(*(''.join(['#' if r else '.' for r in l]) for l in lights), sep='\n')
# print()
total = 0
for x in lights:
total += x.count(True)
print(total) | [
"Max.Techniche@gmail.com"
] | Max.Techniche@gmail.com |
13c365ba493e7a5703589a2025cbc4b9bf7f895a | 0de1ffce4bc996e44561907e31fa3e25d93be03b | /lab 2/main.py | 131d7cde3f017742bfbef0849a641a6752639361 | [] | no_license | moxxiq/db-2y-labs | 59b891ac3c6b9e224957fc73de7336c251d2244c | 966c277471d4f1afabadf4ad882d05887018d46c | refs/heads/master | 2023-03-23T10:37:05.771818 | 2021-02-02T22:48:10 | 2021-02-02T22:48:10 | 257,173,634 | 1 | 0 | null | 2021-03-19T12:36:26 | 2020-04-20T04:44:55 | Python | UTF-8 | Python | false | false | 1,430 | py | import cx_Oracle
import cred
conn = cx_Oracle.connect(cred.name, cred.passw, "localhost/XE")
cur = conn.cursor()
cur.execute("""
SELECT *
FROM (SELECT ARTIST_ID, ARTIST_NAME, COUNT(RAA.ARTWORK_ARTWORK_ID) ARTWORKS_COUNT
FROM ARTIST
LEFT JOIN RELATION_ARTWORK_ARTIST RAA on ARTIST.ARTIST_ID = RAA.ARTIST_ARTIST_ID
GROUP BY ARTIST_ID, ARTIST_NAME
ORDER BY ARTWORKS_COUNT DESC)
WHERE ROWNUM <= 10
""")
query1 = cur.fetchmany(10)
print('\nЗапит 1')
print(*list(i[0] for i in cur.description))
for row in query1:
print(*row)
cur.execute("""
SELECT PROC_OFFICER.PROC_OFFICER_NAME, round(COUNT(RELATION_AO.ARTWORK_ARTWORK_ID)*100/all_count,2) ARTWORKS_CREDITED_COUNT
FROM PROC_OFFICER
JOIN RELATION_AO on PROC_OFFICER.PROC_OFFICER_NAME = RELATION_AO.PROC_OFFICER_NAME
, (select count(ARTWORK_ID) as all_count from ARTWORK)
GROUP BY PROC_OFFICER.PROC_OFFICER_NAME, all_count
ORDER BY ARTWORKS_CREDITED_COUNT DESC
""")
print('\nЗапит 2')
query2 = cur.fetchmany(10)
print(*list(i[0] for i in cur.description))
for row in query2:
print(*row)
cur.execute("""
SELECT ARTWORK_CREATION_YEAR, COUNT(ARTWORK_ID) NUMBER_OF_ARTWORKS
FROM ARTWORK
GROUP BY ARTWORK_CREATION_YEAR
ORDER BY ARTWORK_CREATION_YEAR
""")
print('\nЗапит 3')
query3 = cur.fetchmany(10)
print(*list(i[0] for i in cur.description))
for row in query3:
print(*row)
cur.close()
conn.close() | [
"dimanavsisto@gmail.com"
] | dimanavsisto@gmail.com |
3e33a95740de2a609722833a58f612500ad751a6 | 0bcb70eceea00ed1ed3627eee0be0b72e3112dd8 | /seok/views/vote_views.py | 7b9ea427e2020168ece3bb569c5f0557716045b2 | [] | no_license | odongs/dong | d6afad4e7fe58d8e1e71cd265a280b5228643320 | 17ae5f8eafefe13939d8bc2a6573a99312256b9c | refs/heads/master | 2023-03-19T11:46:35.394134 | 2021-03-16T06:40:48 | 2021-03-16T06:40:48 | 328,822,109 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,035 | py | from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.shortcuts import get_object_or_404, redirect
from ..models import Question, Answer
@login_required(login_url='common:login')
def vote_question(request, question_id):
"""
seok 질문추천등록
"""
question = get_object_or_404(Question, pk=question_id)
if request.user == question.author:
messages.error(request, '본인이 작성한 글은 추천할수 없습니다')
else:
question.voter.add(request.user)
return redirect('seok:detail', question_id=question.id)
@login_required(login_url='common:login')
def vote_answer(request, answer_id):
"""
seok 답글추천등록
"""
answer = get_object_or_404(Answer, pk=answer_id)
if request.user == answer.author:
messages.error(request, '본인이 작성한 글은 추천할수 없습니다')
else:
answer.voter.add(request.user)
return redirect('seok:detail', question_id=answer.question.id) | [
"dhehdtjr007@naver.com"
] | dhehdtjr007@naver.com |
0ba9aca97b1c1f59da1afb823752e4f46a680b96 | feae88b4a8bc0aba388dcc2eeb7debb49d736809 | /apps/second_app/urls.py | fb99d9914ffc2c2fedcdee10fd14c61afe4e550b | [] | no_license | john-gore/belt3_retry | ec8a5582382fc00f0bcb3cf973fe9cd073ed571c | 03aa6d7ff9988615a96d2c882282107d389b1c52 | refs/heads/master | 2021-07-21T11:11:42.972344 | 2017-10-29T21:34:09 | 2017-10-29T21:34:09 | 108,772,325 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 239 | py | from django.conf.urls import url
from django.contrib import admin
from . import views
from ..first_app.models import User # This line is new!
urlpatterns = [
url(r'^$', views.index, name='index') # This line has changed!
] | [
"johngore@Johns-MBP.localdomain"
] | johngore@Johns-MBP.localdomain |
958e3c40bcd396ccc9057e1589dd0d12791f5c09 | b4e8b85f5154bba569b83227d5d1a57acc9c10f7 | /Script/createdata.py | e0ccdc861d699f22b344f5489274d6c1994f61be | [] | no_license | Lazerbeam50/Roguelike-TBS | 1380bd2ce34705e9c3ff8bd77cf2c8ce8a5e3aac | 711e92439e91d0bf95cef6137d20653e0e504088 | refs/heads/main | 2023-03-06T04:19:42.842811 | 2021-02-20T21:28:27 | 2021-02-20T21:28:27 | 340,178,078 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,428 | py | """
Used to set up the empty database and tables.
"""
import sqlite3
def set_up_empty_database():
db = sqlite3.connect('Game data/game_data')
cursor = db.cursor()
#Ambiguous names
cursor.execute('''
CREATE TABLE IF NOT EXISTS AmbiguousNames
(
id INTEGER PRIMARY KEY,
name TEXT
)
''')
cursor.execute('''
CREATE TABLE IF NOT EXISTS Classes
(
id INTEGER PRIMARY KEY,
name TEXT,
usesSwords INTEGER,
usesAxes INTEGER,
usesLances INTEGER,
usesBows INTEGER,
usesDaggers INTEGER,
usesDestruction INTEGER,
usesRestoration INTEGER,
catForm INTEGER,
bearForm INTEGER,
breathWeapon INTEGER,
usesTime INTEGER,
usesEnchanting INTEGER,
usesWitchcraft INTEGER,
hpBase INTEGER,
strengthBase INTEGER,
magicBase INTEGER,
dexterityBase INTEGER,
speedBase INTEGER,
defenceBase INTEGER,
resistanceBase INTEGER,
luckBase INTEGER,
hpBaseSD INTEGER,
strengthBaseSD INTEGER,
magicBaseSD INTEGER,
dexterityBaseSD INTEGER,
speedBaseSD INTEGER,
defenceBaseSD INTEGER,
resistanceBaseSD INTEGER,
luckBaseSD INTEGER,
hpGrowth INTEGER,
strengthGrowth INTEGER,
magicGrowth INTEGER,
dexterityGrowth INTEGER,
speedGrowth INTEGER,
defenceGrowth INTEGER,
resistanceGrowth INTEGER,
luckGrowth INTEGER,
hpGrowthSD INTEGER,
strengthGrowthSD INTEGER,
magicGrowthSD INTEGER,
dexterityGrowthSD INTEGER,
speedGrowthSD INTEGER,
defenceGrowthSD INTEGER,
resistanceGrowthSD INTEGER,
luckGrowthSD INTEGER,
movement INTEGER,
flying INTEGER,
mounted INTEGER,
critBonus INTEGER,
sprite TEXT
)
''')
cursor.execute('''
CREATE TABLE IF NOT EXISTS WeaponsSpells
(
id INTEGER PRIMARY KEY,
name TEXT,
magic INTEGER,
itemType INTEGER,
rank INTEGER,
uses INTEGER,
power INTEGER,
hit INTEGER,
crit INTEGER,
scalesWith INTEGER,
minRange INTEGER,
maxRange INTEGER,
weight INTEGER,
exp INTEGER,
value INTEGER,
effect INTEGER,
bonus_vs1 INTEGER,
bonus_vs2 INTEGER
)
''')
cursor.execute('''
CREATE TABLE IF NOT EXISTS FemaleNames
(
id INTEGER PRIMARY KEY,
name TEXT
)
''')
cursor.execute('''
CREATE TABLE IF NOT EXISTS LastNames
(
id INTEGER PRIMARY KEY,
name TEXT
)
''')
cursor.execute('''
CREATE TABLE IF NOT EXISTS MaleNames
(
id INTEGER PRIMARY KEY,
name TEXT
)
''')
cursor.execute('''
CREATE TABLE IF NOT EXISTS AmbiguousNames
(
id INTEGER PRIMARY KEY,
name TEXT
)
''')
cursor.execute('''
CREATE TABLE IF NOT EXISTS Maps
(
id INTEGER PRIMARY KEY,
name TEXT,
tileset INTEGER
)
''')
cursor.execute('''
CREATE TABLE IF NOT EXISTS MapTiles
(
id INTEGER,
x TEXT,
y INTEGER
)
''')
cursor.execute('''
CREATE TABLE IF NOT EXISTS MapDeployment
(
id INTEGER,
force INTEGER,
boss INTEGER,
x INTEGER,
y INTEGER
)
''')
cursor.execute('''
CREATE TABLE IF NOT EXISTS Tiles
(
id INTEGER PRIMARY KEY,
name TEXT,
def INTEGER,
avoid INTEGER,
heal INTEGER,
moveCost INTEGER,
sprite INTEGER
)
''')
db.commit()
cursor.close()
db.close()
set_up_empty_database() | [
"FSolofin@gmail.com"
] | FSolofin@gmail.com |
50f0a61a6acffc49e6f9907a711fbfbce9e14a7c | 9fa6547def43d4d56892eafa3f0a7f7b54a50227 | /src/backend/app/tasks/config.py | 3d9fb115ecd781a3ffd1f1a581be539c4afd6933 | [
"Apache-2.0"
] | permissive | code4security/LuWu | f66c23fe17605840c2781e38d2378f5ede03c6bd | 47ae6e1c5a177b1c5e4253f4158db5a50c04d1dc | refs/heads/master | 2022-11-26T08:40:29.565173 | 2020-07-08T01:14:01 | 2020-07-08T01:14:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,813 | py | from typing import List
from pydantic import parse_obj_as
from app.crud.crud_config import crud_isp
from app.db.session import session_manager
from app.schemas.config import IspItem
from app.tasks.base import BaseTask
from utils.redis import RedisPool
class ReloadVpsIspConfigTask(BaseTask):
name = 'reload_vps_isp_config'
def run(self, *args, **kwargs) -> dict:
task_result = {
'handled_vps_isp_list': []
}
with session_manager() as db_session:
vps_isp_obj_list = crud_isp.get_vps_isp_list(db_session).all()
vps_isp_data_list = parse_obj_as(List[IspItem], vps_isp_obj_list)
for vps_isp_data in vps_isp_data_list:
isp_provider_name = vps_isp_data.provider_name
if isp_provider_name in task_result['handled_vps_isp_list']:
continue
reload_result = self.reload_vps_isp_config(vps_isp_data.id)
if reload_result:
task_result['handled_vps_isp_list'].append(isp_provider_name)
return self.set_result(task_result)
def reload_vps_isp_config(self, vps_isp_id: int) -> bool:
with session_manager() as db_session:
rp = RedisPool()
try:
vps_raw_spec_data = rp.get_vps_spec_data(
db_session=db_session, isp_id=vps_isp_id, reload=True
)
except:
vps_raw_spec_data = None
return bool(vps_raw_spec_data)
class CreateVpsIspSshKey(BaseTask):
name = 'create_vps_isp_ssh_key'
def run(self, vps_isp_id: int):
create_result = {}
# with session_manager() as db_session:
# vps_isp_obj = crud_isp.get(db_session=db_session, id=vps_isp_id)
# if vps_isp_obj and vps_isp_obj.isp_instance.is_valid_account:
# ssh_key_data, ssh_key_created = crud_vps_ssh_key.get_or_create_ssh_key_data(
# db_session, vps_isp_id
# )
# if ssh_key_created:
# unix_timestamp = int(self.now_time.utcnow().timestamp())
# isp_ssh_key_id = vps_isp_obj.isp_instance.create_ssh_key(
# name=f"{ssh_key_data['name']}-{unix_timestamp}",
# public_key_content=ssh_key_data['public_key']
# )
# crud_vps_ssh_key.filter_by(
# db_session=db_session,
# id=ssh_key_data['id']
# ).update({'ssh_key_id': isp_ssh_key_id})
# create_result.update({
# 'ssh_key_data': ssh_key_data,
# 'ssh_key_created': ssh_key_created
# })
return self.set_result(create_result)
| [
"root@lazymaple.pw"
] | root@lazymaple.pw |
bea2ef34103c1be80a214a122e6c43d572c7b693 | f4f09a1a04ff6f3029b5bd4c7f16a7315387cdd3 | /avatar_pose_controls/handctrl_gestures/__i.py | 308487f520d39e61e5666a7e30a22a54ac1c8d1e | [
"MIT"
] | permissive | YetzabethGC/chilean_sign_language_speller | 469d268c90c8d1b379a7ae95fe655cfe0c7ea23b | c7fa69321ef4a6073b88979432eca98f27f7bf47 | refs/heads/master | 2023-02-02T12:52:21.211099 | 2020-12-21T22:30:05 | 2020-12-21T22:30:05 | 284,134,122 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 853 | py | import bpy
import sys
sys.path.append("/home/vpoblete/yetzabethg/New Folder/") #DIRECTORIO CON CÓDIGOS DE CONTROL
import handctrl as hc
#####CONTROL DE LAS MANOS######
#Mano izquierda
hc.hLH(0) #ALTURA(0= REPOSO, 1=ESTOMAGO, 2=PECHO, 3=CUELLO, 4=CARA, 5=CABEZA)
hc.dLH(0) #DISTANCIA AL CUERPO(0= CENTRO, 1= ALEJADO)
hc.lhF(0,0,0,0,0) #CONTROL DEDOS(1=PULGAR, 2=INDICE, 3=MEDIO, 4=ANULAR, 5=MEÑIQUE) VALORES DEL 0(ABIERTO) A 6(CERRADO)
#####CONTROL DE LAS MANOS######
#Mano derecha
hc.hRH(3) #ALTURA(0= REPOSO, 1=ESTOMAGO, 2=PECHO, 3=CUELLO, 4=CARA, 5=CABEZA)
hc.dRH(0) #DISTANCIA AL CUERPO(0= CENTRO, 1= ALEJADO, 2= CONTRARIO)
hc.rhF(6,6,6,6,0) #CONTROL DEDOS(1=PULGAR, 2=INDICE, 3=MEDIO, 4=ANULAR, 5=MEÑIQUE) VALORES DEL 0(ABIERTO) A 6(CERRADO)
hc.detRF(2,0,0)
hc.detRF(1,0,0)
hc.rotRH(0,0)
| [
"yetzabeth.gonzalez@hotmail.com"
] | yetzabeth.gonzalez@hotmail.com |
96871bfb4da9eeaef49b953463556f6f8f8b9173 | 254db35021faf41430b3ffc98f1deae17c881ea5 | /wine_recipes/urls.py | 2b49eee6357796ac7b3ded5a70da46ad6d1e6e2b | [] | no_license | smiledt/IS411-Winesitters | 4e4f71f9eaf8f9468d2c05413f97ac5bad4bdeaa | 2e6946a6b7f558e3ec441d0a77505587fda48c37 | refs/heads/master | 2023-07-31T03:52:56.209082 | 2020-05-12T11:17:19 | 2020-05-12T11:17:19 | 238,153,903 | 1 | 0 | null | 2021-09-22T18:59:20 | 2020-02-04T08:05:11 | HTML | UTF-8 | Python | false | false | 350 | py | from django.urls import path
from wine_recipes import views
app_name = 'wine_recipes'
urlpatterns = [
# Index pattern
path('', views.index, name='index'),
# /wine_recipes pattern, this one will be the final
path('wine_recipes/', views.recipes, name='wine_recipes'),
path('new_recipe/', views.new_recipe, name='new_recipe')
]
| [
"dereksmilees@gmail.com"
] | dereksmilees@gmail.com |
add8e9fd2525edaad3e388569632dcd7a8a6e0c0 | abc4991a47998d5a6946c2609cc9b6926b95c59e | /get_by_tor.py | e0fb5200e94e646706f21f1cd2f52066f1600114 | [
"MIT"
] | permissive | Guiraud/Formation-Scraping-1 | 09ad6651d3d210f41300fd126008d7bfcc6f685f | 2194a06da9fc727abe160157295f7e984161c0a3 | refs/heads/master | 2022-12-12T21:02:11.477275 | 2019-08-07T21:22:52 | 2019-08-07T21:22:52 | 201,124,297 | 0 | 0 | MIT | 2022-12-08T05:59:20 | 2019-08-07T20:37:08 | Python | UTF-8 | Python | false | false | 872 | py | from selenium import webdriver
from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
import os
profile = FirefoxProfile(r'/Users/mguiraud/Library/Application Support/TorBrowser-Data/Browser/o45k8lgc.default')
profile.set_preference('network.proxy.type', 1)
profile.set_preference('network.proxy.socks', '127.0.0.1')
profile.set_preference('network.proxy.socks_port', 9050)
profile.set_preference("network.proxy.socks_remote_dns", False)
profile.update_preferences()
#driver = webdriver.Firefox()
driver = webdriver.Firefox(firefox_profile=profile)
driver.get("http://check.torproject.org")
print("done !")
print(driver.find_elements_by_css_selector(".small")[0].text)
ok_tor = ".on"
no_tor = ".off"
if driver.find_elements_by_css_selector(ok_tor):
print("ça marche :) ")
if driver.find_elements_by_css_selector(no_tor):
print("ça marche pô :(") | [
"mehdi.guiraud@gmail.com"
] | mehdi.guiraud@gmail.com |
458f132534c8a8adb8eae398e8594983f35991ad | 4501dc41b82969b665f9d68ffc8729fabbd50b60 | /1-flask/venv/bin/flask | 961e2559b9e1cecd0dcd703ed36f1016a11f90f3 | [] | no_license | iancmason/100daysOfPython | f12ad1498a7f1d6a635fc73cfb0aaf09c59fcb68 | a93ba6ed1b4ab009734c0454f4f132114a16abb9 | refs/heads/master | 2021-06-21T07:18:42.825660 | 2021-01-22T19:35:05 | 2021-01-22T19:35:05 | 201,953,731 | 0 | 0 | null | 2021-03-30T02:44:51 | 2019-08-12T15:06:21 | Python | UTF-8 | Python | false | false | 260 | #!/Users/imason/code/Python/100daysOfCode/1-flask/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from flask.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"ian.christopher.mason@gmail.com"
] | ian.christopher.mason@gmail.com | |
f205239515d090b06d7334b477d110d97c8c5e8c | 33b97572f76c7f6ebfac4407ba2bca467edf0745 | /batting_order_back_test.py | 37253b7d77e4920adf5e9e0e14ce386e307c059f | [] | no_license | fidler-analyst/Batting-Order-Optimization | 0e83247cf4a4df226a6130a45e4c2421ff9a143e | 88a7eddaa43f245cc2cd7030f3ac5d3527d76967 | refs/heads/main | 2023-08-24T06:14:41.964184 | 2021-10-16T22:24:22 | 2021-10-16T22:24:22 | 389,487,722 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,712 | py | #batting_order_back_test
import pandas as pd
import numpy as np
df3 = pd.read_csv(r'C:\Users\maxfi\Desktop\Python\baseball\2021-mlb-season-pbp-feed.csv', usecols \
= ['GAME ID', 'BATTER', 'PLAYTYPE', 'ROADSCORE', 'HOMESCORE', 'INNING'])
df3 = df3.dropna(axis = 0)
HOME = np.where(df3['INNING'] == 'B', 1, 0)
HOME_TEAM = df3['GAME ID'].str[10:13]
ROAD_TEAM = df3['GAME ID'].str[6:9]
df3.insert(2, 'HM_TEAM', HOME_TEAM)
df3.insert(3, 'RD_TEAM', ROAD_TEAM)
df3.INNING = df3['INNING'].str[-1]
#df3 = df3.replace({'PLAYTYPE': {'SINGLE':1,'DOUBLE':2,'TRIPLE':3, 'HOME RUN':4, \
#'STRIKEOUT':0, 'FLYOUT':0, 'LINEOUT':0, 'POP OUT':0, 'GROUNDOUT':0, 'GROUNDED INTO DP':0, \
#'FORCEOUT':0, 'FIELD ERROR':1, 'FIELDERS CHOICE':0, 'DOUBLE PLAY':0, \
#'WALK':1, 'SAC BUNT':0, 'SAC FLY':0, 'HIT BY PITCH':1, 'CATCHER INTERFERENCE':1, 'INTENT WALK':1}})
home_games = df3[df3['HM_TEAM'] == 'COL']
home_games = home_games[home_games['INNING'] == 'B']
road_games = df3[df3['RD_TEAM'] == 'COL']
road_games = road_games[road_games['INNING'] == 'T']
#based off of weighted obp
best_lup = ['Yonathan Daza','C.J. Cron','Ryan McMahon','Joshua Fuentes', \
'Raimel Tapia','Charlie Blackmon','Garrett Hampson', 'Dom Nunez']
all_rocky_games = pd.concat([home_games, road_games])
list_of_games = pd.Series(all_rocky_games['GAME ID']).drop_duplicates().to_list()
games_with_the_boys = []
for game in list_of_games:
single_game = all_rocky_games[all_rocky_games['GAME ID'] == game]
players_in_game = pd.Series(single_game['BATTER']).head(n=8)
players_in_game.to_list()
players_in_game_check = sorted(players_in_game)
best_lup_check = sorted(best_lup)
if players_in_game_check == best_lup_check:
games_with_the_boys.append(game)
score_comparison = pd.DataFrame(index = games_with_the_boys, columns = ['Actual Score', 'My Model'])
for game in games_with_the_boys:
game_with_my_order = all_rocky_games[all_rocky_games['GAME ID']==game]
my_order_df = pd.DataFrame(index = best_lup, columns = ['1st', '2nd', '3rd', '4th'])
for player in best_lup:
player_df = game_with_my_order[game_with_my_order['BATTER']==player]
player_df = player_df.reset_index(drop = True)
my_order_df.loc[player, '1st'] = player_df.PLAYTYPE[0]
my_order_df.loc[player, '2nd'] = player_df.PLAYTYPE[1]
my_order_df.loc[player, '3rd'] = player_df.PLAYTYPE[2]
my_order_df.loc[player, '4th'] = player_df.PLAYTYPE[3]
my_order_df.loc[len(my_order_df)] = ['STRIKEOUT', 'STRIKEOUT', 'STRIKEOUT', 'STRIKEOUT']
my_order_df = my_order_df.rename(index={8:'Pitcher'})
my_event_list = pd.Series()
my_event_list = my_order_df['1st']
my_event_list = my_event_list.append(my_order_df['2nd'])
my_event_list = my_event_list.append(my_order_df['3rd'])
my_event_list = my_event_list.append(my_order_df['4th'])
inning = 1
runs = 0
outs = 0
r1, r2, r3 = 0, 0, 0;
total_outs = 0
for play in my_event_list:
if play == 'SINGLE':
if r1 == 0:
if r2 == 0:
if r3 == 0:
r1 = 1
else:
r1, r3 = 1, 0
runs += 1
else:
if r3 == 0:
r1, r2 = 1, 0
runs +=1
else:
r1, r2, r3 = 1, 0, 0
runs += 2
else:
if r2 == 0:
if r3 == 0:
r2 = 1
elif r3 == 1:
r2, r3 = 1, 0
runs+=1
elif r2 == 1:
if r3 == 0:
runs += 1
elif r3 == 1:
r3 = 0
runs += 2
elif play == 'DOUBLE':
runs += r2 + r3
if r1 == 1:
r1, r2, r3 = 0, 1, 1
else:
r1, r2, r3 = 0, 1, 0
elif play == 'TRIPLE':
runs += r1 + r2 + r3
r1, r2, r3 = 0, 0, 1
elif play == 'HOME RUN':
runs += r1 + r2 + r3 + 1
r1, r2, r3 = 0, 0, 0
elif play == 'FLYOUT':
outs += 1
elif play == 'POP OUT':
outs += 1
elif play == 'LINEOUT':
outs += 1
elif play == 'WALK':
if r1 == 0:
if r2 == 0:
if r3 == 0:
r1 = 1
else:
r1, r3 = 1, 1
else:
if r3 == 0:
r1, r2 = 1, 1
else:
r1, r2, r3 = 1, 1, 1
else:
if r2 == 0:
if r3 == 0:
r2 = 1
elif r3 == 1:
r2, r3 = 1, 1
elif r2 == 1:
if r3 == 0:
r3 = 1
elif r3 == 1:
runs += 1
elif play == "INTENT WALK":
if r1 == 0:
if r2 == 0:
if r3 == 0:
r1 = 1
else:
r1, r3 = 1, 1
else:
if r3 == 0:
r1, r2 = 1, 1
else:
r1, r2, r3 = 1, 1, 1
else:
if r2 == 0:
if r3 == 0:
r2 = 1
elif r3 == 1:
r2, r3 = 1, 1
elif r2 == 1:
if r3 == 0:
r3 = 1
elif r3 == 1:
runs += 1
elif play == 'HIT BY PITCH':
if r1 == 0:
if r2 == 0:
if r3 == 0:
r1 = 1
else:
r1, r3 = 1, 1
else:
if r3 == 0:
r1, r2 = 1, 1
else:
r1, r2, r3 = 1, 1, 1
else:
if r2 == 0:
if r3 == 0:
r2 = 1
elif r3 == 1:
r2, r3 = 1, 1
elif r2 == 1:
if r3 == 0:
r3 = 1
elif r3 == 1:
runs += 1
elif play == 'SAC BUNT':
#runs += 1
outs += 1
r3 = 0
elif play == 'SAC FLY':
runs += 1
outs += 1
r3 = 0
elif play == 'FIELDERS CHOICE':
outs += 1
elif play == 'FORCEOUT':
outs += 1
elif play == 'STRIKEOUT':
outs += 1
elif play == 'GROUNDOUT':
outs += 1
elif play == 'FIELD ERROR':
if r1 == 0:
r1 = 1
elif r1 == 1:
if r2 == 0:
r2 = 1
elif r2 == 1:
if r3 == 0:
r3 = 1
elif r3 == 1:
runs +=1
elif play == 'DOUBLE PLAY':
outs += 2
r1, r2, r3 = 0, 0, 0
elif play == 'GROUNDED INTO DP':
outs += 2
r1, r2, r3 = 0, 0, 0
if outs == 3:
inning += 1
outs = 0
r1, r2, r3 = 0, 0, 0;
score_comparison.loc[game, 'Actual Score'] = single_game['HOMESCORE'].iloc[-1]
score_comparison.loc[game, 'My Model'] = runs
score_comparison.to_excel(r'C:\Users\maxfi\Desktop\Python\baseball\Batting Order Optimizer\batting_order_back_test_results.xlsx')
| [
"noreply@github.com"
] | fidler-analyst.noreply@github.com |
a66a276129d5106408fd6ed8913421cd6fca40fd | 2c09147aa1622d749d7356eb941ef53ec4793184 | /phase_1.py | 3502d36f447ec37638e4d2b3e2c4e0bfdce4ef38 | [] | no_license | TheBroMoe/Berkeley-DB-Kijiji-Query | 7812ee9daa9c6e538643a9a8d65c26b5cd23074c | dea68293824889157c3336579c009614ca1f13e4 | refs/heads/master | 2020-04-07T18:40:42.484991 | 2019-07-22T00:24:41 | 2019-07-22T00:24:41 | 158,619,521 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,607 | py | import re
'''
This file includes terms extracted from ad titles and descriptions; for our purpose, suppose a term is a consecutive sequence of alphanumeric,
underscore '_' and dashed '-' characters, i.e [0-9a-zA-Z_-]. The format of the file is as follows: for every termT in the title or the
description of an ad with id a, there is a row in this file of the form t:a where t is the lowercase form of T. Ignore all special characters
coded as &#number; such as 産 which represents 産 as well as ', " and & which respectively represent ', " and &. Also ignore terms
of length 2 or less. Convert the terms to all lowercase before writing them out.
'''
def write_terms(path):
# Read file
with open(path + '-terms.txt', 'w') as fout:
with open(path + '.txt', 'r') as fin:
for line in fin:
if re.match("<ad>.*</ad>", line):
a = re.search("(<aid>)(.*)(</aid>)", line).group(2)
term_ti = re.search("(<ti>)(.*)(</ti>)", line).group(2).lower()
term_ti = re.sub("&", " ", term_ti)
term_ti = re.sub("&.*?;", "", term_ti)
term_ti = re.sub("[^0-9a-zA-Z-_]", " ", term_ti)
ti_terms = term_ti.split()
for term in ti_terms:
result = re.search("([0-9a-zA-Z-_]+)", term).group(1)
if result is not None and len(result) > 2:
fout.write("{}:{}\n".format(result,a))
term_desc = re.search("(<desc>)(.*)(</desc>)", line).group(2).lower()
term_desc = re.sub("(")|(&apos)|(&);", " ", term_desc)
term_desc = re.sub("&.*?;", "", term_desc)
term_desc = re.sub("[^0-9a-zA-Z-_]", " ", term_desc)
desc_terms = term_desc.split()
for term in desc_terms:
result = re.search("([0-9a-zA-Z-_]+)", term).group(1)
if result is not None and len(result) > 2:
fout.write("{}:{}\n".format(result,a))
print("written to " + path + '-terms.txt')
'''
This file includes one line for each ad that has a non-empty price field in the form of p:a,c,l
where p is a number indicating the price and a, c, and l are respectively the ad id, category and location of the ad.
'''
def write_prices(path):
# Read file
with open(path + '-prices.txt', 'w') as fout:
with open(path + '.txt', 'r') as fin:
for line in fin:
if re.match("<ad>.*</ad>", line):
p = re.search("(<price>)(.*)(</price>)", line).group(2)
p = ' ' * (12 - len(p)) + p
a = re.search("(<aid>)(.*)(</aid>)", line).group(2)
c = re.search("(<cat>)(.*)(</cat>)", line).group(2)
l = re.search("(<loc>)(.*)(</loc>)", line).group(2)
fout.write("{}:{},{},{}\n".format(p,a,c,l))
print("written to " + path + '-prices.txt')
'''
d:a,c,l where d is a non-empty date at which the ad is posted and a, c, and l are respectively the ad id, category and location of the ad.
'''
def write_pdates(path):
# Read file
with open(path + '-pdates.txt', 'w') as fout:
with open(path + '.txt', 'r') as fin:
for line in fin:
if re.match("<ad>.*</ad>", line):
d = re.search("(<date>)(.*)(</date>)", line).group(2)
a = re.search("(<aid>)(.*)(</aid>)", line).group(2)
c = re.search("(<cat>)(.*)(</cat>)", line).group(2)
l = re.search("(<loc>)(.*)(</loc>)", line).group(2)
fout.write("{}:{},{},{}\n".format(d,a,c,l))
print("written to " + path + '-pdates.txt')
'''
This file includes one line for each ad in the form of a:rec where a is the ad id and rec is the full ad record in xml.
'''
def write_ads(path):
# Read file
with open(path + '-ads.txt', 'w') as fout:
with open(path + '.txt', 'r') as fin:
for line in fin:
if re.match("<ad>.*</ad>", line):
a = re.search("(<aid>)(.*)(</aid>)", line).group(2)
fout.write("{}:{}".format(a,line))
print("written to " + path + '-pdates.txt')
def main():
print("================")
path = input("enter xml file: ")
write_terms(path)
write_pdates(path)
write_prices(path)
write_ads(path)
print("================")
if __name__ =='__main__':
main()
| [
"wespeterson2@gmail.com"
] | wespeterson2@gmail.com |
e0e6bb89daac937ec046ab4dbb02ff8fd1a54917 | 59e3082ce3a2f9e873b908c669009acbcbf79d78 | /speech.py | 1d53100f60e3e8a122f23199e4b459480580971e | [] | no_license | PROTO204/chef | 0e5529f8f3b582677fe6ea98521ad113596be7a0 | 1df3b34cfd5596f06776b764d5284b539dc44904 | refs/heads/master | 2020-05-19T19:22:12.098925 | 2019-05-02T05:36:14 | 2019-05-02T05:36:14 | 185,178,035 | 0 | 0 | null | 2019-05-06T10:50:57 | 2019-05-06T10:50:57 | null | UTF-8 | Python | false | false | 341 | py | import speech_recognition as sr
r = sr.Recognizer()
with sr.Microphone() as source:
print("Say something")
audio = r.listen(source)
try:
print("You said:" + r.recognize_google(audio))
except sr.UnknownValueError:
print("could not understand audio")
except sr.RequestError as e:
print(format(e))
| [
"siba@nyu.edu"
] | siba@nyu.edu |
7c84eefc810d6353c7ea038bea797d19876b7ea6 | ded95a944d97f8892f0041242181d035ef120644 | /sigmaz_nonadiabatic/nlevels=20/non_adiabtic.py | 90125498bab58acdd70f4858a82f18aebbc30adf | [] | no_license | Mulliken/dataAnalysis | ed844fbe7366a19ac3b47754efe5edb137ec8eb4 | 0f5a891587e791ae607fb7f6fb033f6088ae39d0 | refs/heads/main | 2023-08-19T09:50:20.621929 | 2021-10-08T15:57:52 | 2021-10-08T15:57:52 | 391,789,157 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,231 | py | import numpy as np
from scipy.optimize import curve_fit
# from fishbonett.starSpinBoson import SpinBoson, SpinBoson1D
from fishbonett.backwardSpinBoson import SpinBoson, SpinBoson1D, calc_U
from fishbonett.stuff import sigma_x, sigma_z, temp_factor, sd_zero_temp, drude1, lemmer, drude, _num, sigma_1
from scipy.linalg import expm
from time import time
import sys
ene = int(sys.argv[1])
temp = int(sys.argv[2])
gam=int(sys.argv[3])
coupling = int(sys.argv[4])
bath_length =int(200*5*1.5)
phys_dim = 20
bond_dim = 1000
a = [np.ceil(phys_dim - N*(phys_dim -2)/ bath_length) for N in range(bath_length)]
a = [int(x) for x in a]
a = [phys_dim]*bath_length
print(a)
pd = a[::-1] + [2]
eth = SpinBoson(pd)
etn = SpinBoson1D(pd)
# set the initial state of the system. It's in the high-energy state |0>:
etn.B[-1][0, 1, 0] = 0.
etn.B[-1][0, 0, 0] = 1.
# spectral density parameters
g = 2000
eth.domain = [-g, g]
j = lambda w: drude(w, lam=3952.11670, gam=gam)* temp_factor(temp,w)
eth.sd = j
eth.he_dy = np.diag([-1, 1])/2
eth.h1e = np.diag([ene, 0]) + coupling*sigma_x
eth.build(g=1., ncap=50000)
print(eth.w_list)
print(eth.k_list)
# 0.5 ps ~ 0.1T
p = []
threshold = 1e-3
dt = 0.005/10
num_steps = 100*5 # Due to 2nd order Trotter, actual time is dt*2*num_steps
t = 0.
tt0=time()
for tn in range(num_steps):
U1, U2 = eth.get_u(2*tn*dt, 2*dt, factor=2)
t0 = time()
etn.U = U1
for j in range(bath_length-1,0,-1):
print("j==", j, tn)
etn.update_bond(j, bond_dim, threshold, swap=1)
etn.update_bond(0, bond_dim, threshold, swap=0)
etn.update_bond(0, bond_dim, threshold, swap=0)
t1 = time()
t = t + t1 - t0
t0 = time()
etn.U = U2
for j in range(1, bath_length):
print("j==", j, tn)
etn.update_bond(j, bond_dim, threshold,swap=1)
dim = [len(s) for s in etn.S]
theta = etn.get_theta1(bath_length) # c.shape vL i vR
rho = np.einsum('LiR,LjR->ij', theta, theta.conj())
pop = np.abs(rho[0,0])
p = p + [pop]
t1 = time()
t = t + t1 - t0
tt1 = time()
print(tt1-tt0)
pop = [x.real for x in p]
print("population", pop)
pop = np.array(pop)
pop.astype('float32').tofile(f'./output/pop_sigmaz_{coupling}_{temp}_{ene}_{gam}.dat')
| [
"kavat.lux@gmail.com"
] | kavat.lux@gmail.com |
fbc05970539a311c1532e03d1461d962abe1cae2 | 5b4312ddc24f29538dce0444b7be81e17191c005 | /autoware.ai/1.12.0/devel/.private/vector_map_msgs/lib/python2.7/dist-packages/vector_map_msgs/msg/_PointArray.py | 302c83b9f7d628767effb2ae4bd898435e6dc65f | [
"MIT"
] | permissive | muyangren907/autoware | b842f1aeb2bfe7913fb2be002ea4fc426b4e9be2 | 5ae70f0cdaf5fc70b91cd727cf5b5f90bc399d38 | refs/heads/master | 2020-09-22T13:08:14.237380 | 2019-12-03T07:12:49 | 2019-12-03T07:12:49 | 225,167,473 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,546 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from vector_map_msgs/PointArray.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import vector_map_msgs.msg
import std_msgs.msg
class PointArray(genpy.Message):
_md5sum = "6d79425254a86e33112d6737776efb2b"
_type = "vector_map_msgs/PointArray"
_has_header = True #flag to mark the presence of a Header object
_full_text = """Header header
Point[] data
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
string frame_id
================================================================================
MSG: vector_map_msgs/Point
# Ver 1.00
int32 pid
float64 b
float64 l
float64 h
float64 bx
float64 ly
int32 ref
int32 mcode1
int32 mcode2
int32 mcode3
"""
__slots__ = ['header','data']
_slot_types = ['std_msgs/Header','vector_map_msgs/Point[]']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
header,data
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(PointArray, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.header is None:
self.header = std_msgs.msg.Header()
if self.data is None:
self.data = []
else:
self.header = std_msgs.msg.Header()
self.data = []
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
length = len(self.data)
buff.write(_struct_I.pack(length))
for val1 in self.data:
_x = val1
buff.write(_get_struct_i5d4i().pack(_x.pid, _x.b, _x.l, _x.h, _x.bx, _x.ly, _x.ref, _x.mcode1, _x.mcode2, _x.mcode3))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.data is None:
self.data = None
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.data = []
for i in range(0, length):
val1 = vector_map_msgs.msg.Point()
_x = val1
start = end
end += 60
(_x.pid, _x.b, _x.l, _x.h, _x.bx, _x.ly, _x.ref, _x.mcode1, _x.mcode2, _x.mcode3,) = _get_struct_i5d4i().unpack(str[start:end])
self.data.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
length = len(self.data)
buff.write(_struct_I.pack(length))
for val1 in self.data:
_x = val1
buff.write(_get_struct_i5d4i().pack(_x.pid, _x.b, _x.l, _x.h, _x.bx, _x.ly, _x.ref, _x.mcode1, _x.mcode2, _x.mcode3))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.data is None:
self.data = None
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.data = []
for i in range(0, length):
val1 = vector_map_msgs.msg.Point()
_x = val1
start = end
end += 60
(_x.pid, _x.b, _x.l, _x.h, _x.bx, _x.ly, _x.ref, _x.mcode1, _x.mcode2, _x.mcode3,) = _get_struct_i5d4i().unpack(str[start:end])
self.data.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_3I = None
def _get_struct_3I():
global _struct_3I
if _struct_3I is None:
_struct_3I = struct.Struct("<3I")
return _struct_3I
_struct_i5d4i = None
def _get_struct_i5d4i():
global _struct_i5d4i
if _struct_i5d4i is None:
_struct_i5d4i = struct.Struct("<i5d4i")
return _struct_i5d4i
| [
"907097904@qq.com"
] | 907097904@qq.com |
2326a5cd67d0e36dfc987657a3b77f64b1108019 | 5de646fb3ecf10ecb45e05018a23b6345fb9ca53 | /codejam/2020 Qualification Round/d.py | e358bdc477498577b9dcea874b2bbacb4f08905f | [] | no_license | PPinto22/LeetCode | 5590d6ca87efcd29f9acd2eaed1bcf6805135e29 | 494a35542b61357c98c621202274d774e650a27c | refs/heads/master | 2022-04-29T20:37:31.085120 | 2022-04-02T12:02:30 | 2022-04-02T12:02:30 | 201,478,904 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,118 | py | from typing import Union, List, Tuple, Optional
def solve(B):
def set(index, value):
nonlocal control_equal, control_complement, known
# Fix to prevent unpaired bits right before a fluctuation
if (not control_complement or not control_equal) \
and (query % 10 == 0) \
and (known % 2 == 0):
return
solution[index] = value
known += 1
pair = get_pair(index)
if not control_equal and value == pair[1]:
control_equal = pair
elif not control_complement \
and pair[1] is not None \
and value != pair[1]:
control_complement = pair
def get_pair(index):
pair_index = B - 1 - index
return [pair_index, solution[pair_index]]
def determine_fluctuation():
nonlocal control_complement, control_equal
possibilities = ['complement', 'reverse', 'both', 'none']
if control_equal:
index, old = control_equal
new = ask(index)
if old == new:
possibilities = [p for p in possibilities if p in {'reverse', 'none'}]
else:
possibilities = [p for p in possibilities if p in {'complement', 'both'}]
control_equal = index, new
if control_complement:
index, old = control_complement
new = ask(index)
if old == new:
possibilities = [p for p in possibilities if p in {'both', 'none'}]
else:
possibilities = [p for p in possibilities if p in {'complement', 'reverse'}]
control_complement = index, new
return possibilities[0]
def apply_fluctuation(fluctuation):
def complement():
for i in range(B):
if solution[i] is not None:
solution[i] = not solution[i]
if fluctuation == 'complement':
complement()
elif fluctuation == 'reverse':
solution.reverse()
elif fluctuation == 'both':
complement()
solution.reverse()
def ask(i):
nonlocal query
query += 1
print(i + 1, flush=True)
response = input()
return True if response == '1' else False
def next_index():
return (known // 2) if (known % 2 == 0) else (B - (known // 2) - 1)
solution: List[Union[bool, None]] = [None] * B
control_equal: Optional[Tuple[int, bool]] = None
control_complement: Optional[Tuple[int, bool]] = None
query = 0
known = 0
while known < B and query < 150:
if query > 0 and query % 10 == 0:
fluctuation = determine_fluctuation()
apply_fluctuation(fluctuation)
else:
index = next_index()
set(index, ask(index))
return ''.join(map(lambda x: '1' if x else '0', solution))
if __name__ == '__main__':
T, B = map(int, input().split())
for Ti in range(1, T + 1):
solution = solve(B)
print(solution, flush=True)
if input() == 'N':
break
| [
"pedropinto24@hotmail.com"
] | pedropinto24@hotmail.com |
704af671c34b28fe54897638a551ae17e85e2de8 | 77f880463d3dc6a7d5e314c1001641370b00e49b | /tests/test_client.py | d47c69a88a4435a499209cb2f66606431a49934a | [
"MIT"
] | permissive | IngoKl/simple-xapi-client | 81e615698a33f01bd76d4e5b2dee9000b1397d58 | 23258d2af5d461f95d431265f8af6674f7f05b32 | refs/heads/master | 2022-12-08T08:01:48.160952 | 2020-08-24T10:57:25 | 2020-08-24T10:57:25 | 289,899,627 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,812 | py | import uuid
import configparser
from simple_xapi_client import LRS, XapiStatement, XapiActor, XapiVerb, XapiObject
config = configparser.ConfigParser()
config.read('settings.conf')
test_cfg = config['test']
def test_config():
assert(config.sections() == ['test'])
def test_simple_put_statement():
statement = {
'id': str(uuid.uuid1()),
'actor': {'name': 'TestUser', 'mbox': 'mailto:test@simple-xapi-client.com'},
'verb': {'id': 'http://activitystrea.ms/schema/1.0/accept'},
'object': {'id': 'http://simple-xapi-client.com/test'}
}
client = LRS(test_cfg['xapi_endpoint'], test_cfg['xapi_basica_username'], test_cfg['xapi_basica_password'])
assert(client.put_statement(statement) in [200, 204])
def test_statement():
actor = XapiActor('Tester', 'tester@simple-xapi-client.com')
obj = XapiObject('http://simple-xapi-client.com/essay', 'Essay')
verb = XapiVerb('created')
statement = XapiStatement(actor, verb, obj)
client = LRS(test_cfg['xapi_endpoint'], test_cfg['xapi_basica_username'], test_cfg['xapi_basica_password'])
assert(client.put_statement(statement) in [200, 204])
def test_statement_custom_object():
actor = XapiActor('Tester', 'tester@simple-xapi-client.com')
custom_object_definition = {
'type': 'http://adlnet.gov/expapi/activities/course',
'name': {'en-US': 'Python Test'},
'description': {'en-US': 'A simple test'}
}
obj = XapiObject('http://simple-xapi-client.com/custom', custom_object_definition)
verb = XapiVerb('created')
statement = XapiStatement(actor, verb, obj)
client = LRS(test_cfg['xapi_endpoint'], test_cfg['xapi_basica_username'], test_cfg['xapi_basica_password'])
assert(client.put_statement(statement) in [200, 204])
def test_statement_context():
actor = XapiActor('Tester', 'tester@simple-xapi-client.com')
obj = XapiObject('http://simple-xapi-client.com/essay', 'Essay')
verb = XapiVerb('created')
context = {'instructor': {'name': 'Tester', 'mbox': 'mailto:tester@simple-xapi-client.com'}}
statement = XapiStatement(actor, verb, obj, context=context)
client = LRS(test_cfg['xapi_endpoint'], test_cfg['xapi_basica_username'], test_cfg['xapi_basica_password'])
assert(client.put_statement(statement) in [200, 204])
def test_statement_result():
actor = XapiActor('Tester', 'tester@simple-xapi-client.com')
obj = XapiObject('http://simple-xapi-client.com/essay', 'Essay')
verb = XapiVerb('created')
result = {'completion': True, 'success': True}
statement = XapiStatement(actor, verb, obj, result=result)
client = LRS(test_cfg['xapi_endpoint'], test_cfg['xapi_basica_username'], test_cfg['xapi_basica_password'])
assert(client.put_statement(statement) in [200, 204]) | [
"ikleiber@gmail.com"
] | ikleiber@gmail.com |
232daecfb8fa2a362ef8328b7c89287ecd2fb7aa | ec44c3fff2eb9179b3c5e3386467563492b66387 | /globbing .py | b53b658c5247417a7a115ffd0676cdf952981737 | [] | no_license | SonNinh/terminal | 41a3ecb61a6a067af4d308ce247f41dd73f86db9 | 61ac6605235f22248733870a0b7cb253fb153359 | refs/heads/master | 2021-07-07T20:46:16.262103 | 2020-08-25T15:28:10 | 2020-08-25T15:28:10 | 179,713,592 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 958 | py | #!/usr/bin/env python3
from re import findall, match, split
from glob import glob
def get_token_list(input_from_command_line):
"""
get input string from command and split it up into tokens.
A token is a none-space string if it begin with a word
Else, if it begin with a [backquote, single quote, double quote], it will
end with the next [backquote, single quote, double quote]
@para: string
@return: list
"""
token = "[^ ]*[`\'\"][^ ].*?[`\'\"][^ ]*|[^ ]*"
token_list = findall(token, input_from_command_line)
while '' in token_list:
token_list.remove('')
return token_list
def get_possible_name(path_name_list):
path_name_dictionary = {}
for path_name in path_name_list:
path_name_dictionary[path_name] = '\n'.join(glob(path_name))
return path_name_dictionary
def main():
args = "echo \"asddasd\""
print(get_token_list(args))
if __name__ == '__main__':
main()
| [
"sonninhngocba@gmail.com"
] | sonninhngocba@gmail.com |
62755ebfabc1c13420961bb6af14666e8286ca85 | a477e529b162fe45ef4ba0a73c847096e73dae57 | /source_view/urls.py | 0bc818995892077897730d447d324f0a53e6211e | [] | no_license | ubhisat/slack_source_view | 13ff8f0869a1957f7c13bdeaba4e32a4e5bbc42f | f939fc7672a0808702d5becbc74ba580ae6ae002 | refs/heads/master | 2021-01-01T18:37:08.772781 | 2014-11-12T08:15:16 | 2014-11-12T08:15:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 154 | py | from django.conf.urls import include, url
from .views import IndexView
urlpatterns = [
url(r'^$', IndexView.as_view(),
name='index'),
]
| [
"satmeet@nclouds.com"
] | satmeet@nclouds.com |
12efbe3435603eb9e953e20a0996af85885357c7 | dc931801b35d0e2a05209378e9e1a148c73e2a98 | /crawlers/smackjeeves_crawler.py | a034849493110476ff37ebfe00da7a379ff6901b | [] | no_license | regeto/comics-crawler | 102dfe217634c3dd8cf7c14f3c11af7564e1562a | 6f86621acf031bdfd05761278fb29bcbd07b12fa | refs/heads/master | 2016-09-06T07:09:20.825799 | 2015-01-02T21:02:51 | 2015-01-02T21:02:51 | 28,471,537 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 892 | py | import re
from crawlers import Crawler
class SmackjeevesCrawler(Crawler):
site = "Smackjeeves"
url = "smackjeeves.com"
search = "http://www.smackjeeves.com/search.php?submit=submit&comic_title=%s"
def get_chapters(self, series_url):
prefix = series_url.split(".")[0] + "."
html = self.get_html(series_url)
regex = "<option.*?value=\"(/comics/.*?)\".*?>(.*?)<\/option>"
reg = re.compile(regex)
r = reg.findall(html)
ret = [None] * len(r)
pos = 0
for x in r:
ret[pos] = dict(url=prefix + self.url + x[0], name=x[1])
pos += 1
return ret
def get_pages(self, chapter_url):
html = self.get_html(chapter_url)
regex = "src=\"(.*?)\".*?\"comic_image\""
reg = re.compile(regex)
r = reg.findall(html)
ret = [dict(url=r[0])]
return ret
| [
"regendokira@gmail.com"
] | regendokira@gmail.com |
ec987b94aa935fcb73e88d0b7806f58298647634 | 62bf4ccfe2693157a838fb9f5759f912e79c669f | /workshop/lab_price_deviation_corr.py | 124bcab22c249fa55db0707f686462ce489bff0d | [] | no_license | rafaelxiao/athena | 249ecbe56816f9ec9e7f6d4e1d0075a0b5f10e44 | 7501d7b0991e29a35c574ede4f3a4338e26649c5 | refs/heads/master | 2021-01-13T13:14:41.247613 | 2018-01-29T14:13:24 | 2018-01-29T14:13:24 | 72,708,745 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 777 | py | import messenger as ms
import analyst as al
import numpy as np
import assistant as at
import matplotlib.pyplot as plt
h = al.PriceDeviation()
h.price_diff_list_save('002415', date='2017-06-28', duration=600, smooth=3)
origin_data = h.price_diff_list_load('002415', date='2017-06-28', duration=600, smooth=3)
origin_data = origin_data[-200:]
result = []
date_list = []
for i in range(30, len(origin_data)):
date = origin_data[i]['date']
sub_list = origin_data[i-30:i]
diff_list = [i['smoothed difference'] for i in sub_list]
theo_list = [i['smoothed theoretical'] for i in sub_list]
corr = np.correlate(diff_list, theo_list)
print(date, corr)
result.append(corr)
date_list.append(at.date_encoding(date))
plt.plot(date_list, result)
plt.show() | [
"rafaelxiao@hotmail.com"
] | rafaelxiao@hotmail.com |
d27e3a0a5682a13390fb54adcdbba787c16b8c52 | 3c81565e599b4db30c58c5eac871becd7739231a | /Backend/DjangoProject_1/users/views.py | d95ba18d24db3904668e2a510d32eb2fe6f91831 | [] | no_license | alifelham/Bus_Management_System | 8517be21d8715eb4fd56117c880eb7cffa460247 | f8ef3ffebccc6f560fc2624af53d040911cd8f31 | refs/heads/main | 2023-08-01T07:17:28.215221 | 2021-09-19T18:01:42 | 2021-09-19T18:01:42 | 388,483,823 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 939 | py | from django.shortcuts import render, redirect
# importing django forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib import messages
# defining a function for registration
def register(request):
# the form is only given if a post request is made else it is a blank return
if request.method == 'POST':
form = UserCreationForm(request.POST)
#checks if form data is valid. If true then gets the username
if form.is_valid():
username = form.cleaned_data.get('username')
#gives a flash msg when an account is created
messages.success(request, f'Account created for {username}!')
return redirect('blog-home')
else:
form = UserCreationForm()
return render(request, 'users/register.html', {'form': form})
# the {'form': form} part is used for accessing form content from within the reg template
| [
"noreply@github.com"
] | alifelham.noreply@github.com |
8aadc45ac8ed74363bbca2826d0e6709ba10a478 | fbf19ea11edf7e862a10ba5e1ac2d45a97b65c5a | /authentication/middleware.py | 2daa243dc6edf7743dbd7d8ddfdf4d766a0cfbe7 | [] | no_license | AlexandruGhergut/epic-online-judge | c4bed0e69a98d369a9f95c00b9529a4967db9818 | 1af79e9343e722f352d974f445ad641c79016667 | refs/heads/master | 2021-03-22T03:22:45.447253 | 2017-06-29T06:19:48 | 2017-06-29T06:19:48 | 86,570,515 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 604 | py | from django.urls import reverse
from django.utils.deprecation import MiddlewareMixin
from django.http import HttpResponseRedirect
class SetUsernameRedirectMiddleware(MiddlewareMixin):
def process_request(self, request):
user = request.user
if user.is_authenticated:
redirect_url = reverse('authentication:set_username',
kwargs={'pk': user.pk})
if (request.path != redirect_url and user.username_set is False):
request.session['set_username'] = True
return HttpResponseRedirect(redirect_url)
| [
"alexandru.ghergut94@gmail.com"
] | alexandru.ghergut94@gmail.com |
d5c5909ea6644335136f2d82bcda8a30fa14ccab | 48477a15ad96505def8097a6c098826b1e5cfe1a | /2_basic_algorithms/2_sorting_algorithms/14_pair_sum.py | 9e1422278c00aead20f8116beaac4b3230077a6d | [] | no_license | 450703035/Data-Structures-Algorithms | 02cd5bbb92ce25019fce4955af38b0317b4f4cac | dde33560fcb3e3ff41cf8bd37a454f8c13b15138 | refs/heads/master | 2021-05-22T02:25:03.554870 | 2020-06-27T14:23:24 | 2020-06-27T14:23:24 | 252,927,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,832 | py | # Pair Sum
'''
Problem Statement
Given an input array and a target value (integer), find two values
in the array whose sum is equal to the target value.
Solve the problem without using extra space.
You can assume the array has unique values and will never have
more than one solution.
'''
def pair_sum(arr, target):
"""
:param: arr - input array
:param: target - target value
TODO: complete this method to find two numbers such that their sum is equal to the target
Return the two numbers in the form of a sorted list
"""
# Sort the list
arr.sort()
# Initialize two pointers - one from the start of the array and
# the other from the the end.
front_index = 0
back_index = len(arr) - 1
# Shift the pointers
while front_index < back_index:
front = arr[front_index]
back = arr[back_index]
if front + back == target:
return [front, back]
# Sum < target --> shift front pointer forwards
elif front + back < target:
front_index += 1
# Sum > target --> Shift back pointer backwards
else:
back_index -= 1
return [None, None]
# Test of pair/sum function.
def test_function(test_case):
input_list = test_case[0]
target =test_case[1]
solution = test_case[2]
output = pair_sum(input_list, target)
if output == solution:
print("Pass")
else:
print("False")
input_list = [2, 7, 11, 15]
target = 9
solution = [2, 7]
test_case = [input_list, target, solution]
test_function(test_case)
input_list = [0, 8, 5, 7, 9]
target = 9
solution = [0, 9]
test_case = [input_list, target, solution]
test_function(test_case)
input_list = [110, 9, 89]
target = 9
solution = [None, None]
test_case = [input_list, target, solution]
test_function(test_case)
| [
"danny.wangle@gmail.com"
] | danny.wangle@gmail.com |
e79db74e458b1f23bf9c7d355f33c7457e7e49b8 | 45272da6d64161a586b1dd41df63b8f701f38e39 | /Easy Problems/1-10/1easy.py | 075277c849e0a410bcde57f4d5bf459e7c1e8fad | [] | no_license | Lucas-Guimaraes/Reddit-Daily-Programmer | 559f813d2ee1a06e80a2b260bcb43718ae50b8bf | 45d554d0e0f8bc67e2111bede3a45f77f5512d7b | refs/heads/main | 2023-07-31T18:36:48.774791 | 2021-09-13T04:08:09 | 2021-09-13T04:08:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 349 | py | #https://www.reddit.com/r/dailyprogrammer/comments/pih8x/easy_challenge_1/
user_name = raw_input("Put in your name: ")
user_age = raw_input("Whhat's your age?: ")
user_screenname = raw_input("How about a username?: ")
print("Your name is " + user_name + " your are " + user_age + " years old, and your username is " + user_screenname)
raw_input()
| [
"noreply@github.com"
] | Lucas-Guimaraes.noreply@github.com |
37bf81f3ad11ff153ef7e0c65f8e73638bd8e747 | 76ae6d1194c4440b86eac56e1ed2d42f745e612c | /mcds_dcl2isa-pre-v1.py | c673f914ebc0520107f6229d628e42b73a175689 | [] | no_license | rheiland/mcds2isa | 76a551df09233bd976268c44cf0fa7968f87c075 | c0b1245fafd133701ff41fe12153543b73cb94e6 | refs/heads/master | 2021-07-21T00:11:43.103167 | 2019-08-27T17:23:19 | 2019-08-27T17:23:19 | 143,934,536 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,636 | py | #
# mcds_dcl2isa.py - using a MultiCellDS digital cell line XML file, generate associated ISA-Tab files
#
# Input:
# a MultiCellDS digital cell line file <DCL-root-filename>.xml
# Output:
# 3 ISA files:
# i_<DCL-root-filename>.txt
# s_<DCL-root-filename>.txt
# a_<DCL-root-filename>.txt
#
# Author: Randy Heiland
# Date:
# v0.1 - May 2018
# v0.2 - Oct 2018 : add more tab sep_char in various rows
#
import os
import sys
import re
import xml.etree.ElementTree as ET
from pathlib import Path # Python 3?
if (len(sys.argv) < 2):
print("Usage: " + sys.argv[0] + " <MultiCellDS Digital Cell Line XML file>")
sys.exit(0)
else:
xml_file = sys.argv[1]
# for testing, just set it
#xml_file = "MCDS_L_0000000052.xml"
header = '\
ONTOLOGY SOURCE REFERENCE\n\
Term Source Name "NCIT" "UO" "NCBITAXON" "EDDA"\n\
Term Source File "https://ncit.nci.nih.gov/ncitbrowser/" "https://bioportal.bioontology.org/ontologies/UO" "http://purl.obolibrary.org/obo/NCBITaxon_1" "http://bioportal.bioontology.org/ontologies/EDDA"\n\
Term Source Version "17.02d" "" "" "2.0"\n\
Term Source Description "NCI Thesarus" "" "" "Evidence in Documents, Discovery, and Analytics (EDDA)"\
'
if not Path(xml_file).is_file():
print(xml_file + 'does not exist!')
sys.exit(-1)
if (os.sep in xml_file):
xml_base_filename = xml_file[xml_file.rfind(os.sep)+1:]
else:
xml_base_filename = xml_file
investigation_filename = "i_" + xml_base_filename[:-4] + ".txt"
study_filename = "s_" + xml_base_filename[:-4] + ".txt"
assay_filename = "a_" + xml_base_filename[:-4] + ".txt"
#=======================================================================
fp = open(investigation_filename, 'w')
tree = ET.parse(xml_file) # TODO: relative path using env var?
xml_root = tree.getroot()
sep_char = '\t' # tab
fp.write(header + '\n')
fp.write('INVESTIGATION\n')
#print(xml_root.find(".//MultiCellDB").find(".//ID").text)
i_identifier = '"' + xml_root.find(".//metadata").find(".//ID").text + '"'
#i_title = '"' + xml_root.find(".//metadata").find(".//name").text + '"'
i_title = '"' + xml_root.find(".//metadata").find(".//name").text + ' Digital Cell Line"'
i_desc = '"' + xml_root.find(".//metadata").find(".//description").text + '"'
i_desc = re.sub('\t','',i_desc)
i_desc = re.sub('\n','',i_desc)
fp.write('Investigation Identifier' + sep_char + i_identifier + '\n')
fp.write('Investigation Title' + sep_char + i_title + '\n')
fp.write('Investigation Description' + sep_char + i_desc + '\n')
fp.write('Investigation Submission Date' + sep_char + '""\n')
fp.write('Investigation Public Release Date \t "" \n')
citation_str = '"' + re.sub('[\t\n]','',xml_root.find(".//citation").find(".//text").text) + '"' # remove all tabs and newlines
fp.write('Comment [MultiCellDS/cell_line/metadata/citation/text]' + sep_char + citation_str + '\n')
# TODO: check that "citation" exists first?
if (xml_root.find(".//citation").find(".//notes")):
fp.write('Comment [MultiCellDS/cell_line/metadata/citation/notes]' + sep_char + xml_root.find(".//citation").find(".//notes").text + '\n')
fp.write('INVESTIGATION PUBLICATIONS\n')
# Extract over all <PMID> in <data_origin> and <data_analysis>
#print('Investigation PubMed ID "21988888" "23084996" "22342935" ' )
# Extract <PMID> and <DOI> in all <data_origin> and <data_analysis>
# TODO? will we have matching # of each?
pmid = []
doi = []
url = []
uep = xml_root.find('.//data_origins') # uep = unique entry point
for elm in uep.findall('data_origin'):
# doi.append(elm.find('.//DOI').text)
doi_ptr = elm.find('.//DOI')
if (doi_ptr == None):
doi_value = ""
else:
doi_value = doi_ptr.text
doi.append(doi_value) # do we want to append "" if none??
# pmid.append(elm.find('.//PMID').text)
pmid_ptr = elm.find('.//PMID')
if (pmid_ptr == None):
pmid_value = ""
else:
pmid_value = pmid_ptr.text
pmid.append(pmid_value)
# pmid.append(pmid_value)
url_ptr = elm.find('.//URL')
if (url_ptr == None):
url_value = ""
else:
url_value = url_ptr.text
url.append(url_value)
#print("(post data_origin) pmid=",pmid)
#print("(post data_origin) url=",url)
uep = xml_root.find('.//metadata')
for elm in uep.findall('data_analysis'):
# print(' "' + el.find('.//PMID').text + '"', end='')
# doi.append(elm.find('.//DOI').text)
# pmid.append(elm.find('.//PMID').text)
doi_ptr = elm.find('.//DOI')
if (doi_ptr == None):
doi_value = ""
else:
doi_value = doi_ptr.text
doi.append(doi_value) # do we want to append "" if none??
# pmid.append(elm.find('.//PMID').text)
pmid_ptr = elm.find('.//PMID')
if (pmid_ptr == None):
pmid_value = ""
else:
pmid_value = pmid_ptr.text
pmid.append(pmid_value)
# pmid.append(pmid_value)
#print("(post data_analysis) pmid=",pmid)
sep_char_sq = sep_char + '"' # tab + single quote
pmid_str = ''
for elm in pmid:
pmid_str += sep_char + '"' + elm + '"'
fp.write('Investigation PubMed ID' + sep_char + pmid_str + '\n')
doi_str = ''
for elm in doi:
doi_str += sep_char + '"' + elm + '"'
fp.write('Investigation Publication DOI' + sep_char + doi_str + '\n')
empty_str = ''.join(sep_char + '""' for x in pmid)
fp.write('Investigation Publication Author List' + sep_char + empty_str + '\n')
fp.write('Investigation Publication Title' + sep_char + empty_str + '\n')
pub_status_str = ''.join('\t"Published"' for x in pmid)
pub_title_str = ''.join('\t""' for x in pmid)
fp.write('Investigation Publication Status' + sep_char + pub_status_str + '\n')
pub_status_TA_str = ''.join('\t"C19026"' for x in pmid)
fp.write('Investigation Publication Status Term Accession' + sep_char + pub_status_TA_str + '\n')
pub_status_TSR_str = ''.join('\t"NCIT"' for x in pmid)
fp.write('Investigation Publication Status Term Source REF' + sep_char + pub_status_TSR_str + '\n')
fp.write('INVESTIGATION CONTACTS\n')
fp.write('Investigation Person Last Name' + sep_char_sq + xml_root.find(".//current_contact").find(".//family-name").text + '"\t\n')
fp.write('Investigation Person First Name' + sep_char_sq + xml_root.find(".//current_contact").find(".//given-names").text + '"\n')
fp.write('Investigation Person Mid Initials' + sep_char + '""\n')
fp.write('Investigation Person Email' + sep_char_sq + xml_root.find(".//current_contact").find(".//email").text + '"\n')
fp.write('Investigation Person Phone' + sep_char + '""\n')
fp.write('Investigation Person Fax' + sep_char + '""\n')
fp.write('Investigation Person Address' + sep_char + '""\n')
fp.write('Investigation Person Affiliation' + sep_char_sq + xml_root.find(".//current_contact").find(".//organization-name").text +
', ' + xml_root.find(".//current_contact").find(".//department-name").text + '"\n')
fp.write('Investigation Person Roles' + sep_char + '""\n')
fp.write('Investigation Person Roles Term Accession Number' + sep_char + '""\n')
fp.write('Investigation Person Roles Term Source REF' + sep_char + '""\n')
fp.write('Comment[Investigation Person REF]' + sep_char + '""\n')
fp.write('STUDY\n')
fp.write('Study Identifier\t' + i_identifier + '\n')
fp.write('Study Title\t' + i_title + '\n')
fp.write('Study Description\t' + i_desc + '\n')
fp.write('Comment[Study Grant Number]\t""\n')
fp.write('Comment[Study Funding Agency]\t""\n')
fp.write('Study Submission Date\t""\n')
fp.write('Study Public Release Date\t""\n')
fp.write('Study File Name\t' + '"' + study_filename + '"\n')
fp.write('STUDY DESIGN DESCRIPTORS\n')
fp.write('Study Design Type\t""\n')
fp.write('Study Design Type Term Accession Number\t""\n')
fp.write('Study Design Type Term Source REF\t""\n')
# TODO? are these different than the previous pubs?
fp.write('STUDY PUBLICATIONS\n')
fp.write('Study PubMed ID' + sep_char + pmid_str + '\n')
fp.write('Study Publication DOI' + sep_char + doi_str + sep_char + '\n')
fp.write('Study Publication Author List' + sep_char + empty_str + '\n')
fp.write('Study Publication Title' + sep_char + pub_title_str + '\n')
fp.write('Study Publication Status' + sep_char + pub_status_str + sep_char + '\n')
fp.write('Study Publication Status Term Accession Number' + sep_char + pub_status_TA_str + sep_char + '\n')
fp.write('Study Publication Status Term Source REF' + sep_char + pub_status_TSR_str + '\n')
fp.write('STUDY FACTORS' + 3*sep_char + '\n')
fp.write('Study Factor Name\t"phenotype_dataset"\n')
fp.write('Study Factor Type\t""\n')
fp.write('Study Factor Type Term Accession Number\t""\n')
fp.write('Study Factor Type Term Source REF\t""\n')
#fp.write('Comment[phenotype_dataset_keywords] "viable; hypoxic; physioxia(standard); physioxia(breast); necrotic,chronic hypoxia"\n')
#fp.write('Comment[phenotype_dataset_keywords] "')
comment_str = 'Comment[phenotype_dataset_keywords]\t"'
uep = xml_root.find('.//cell_line')
for elm in uep.findall('phenotype_dataset'):
comment_str += elm.attrib['keywords'] + '; '
# print(comment_str)
fp.write(comment_str[:-2] + '"\n')
fp.write('STUDY ASSAYS\t\n')
fp.write('Study Assay Measurement Type\t""\n')
fp.write('Study Assay Measurement Type Term Accession Number\t""\n')
fp.write('Study Assay Measurement Type Term Source REF\t""\n')
fp.write('Study Assay Technology Type\t"Digital Cell Line"\n')
fp.write('Study Assay Technology Type Term Accession Number\t""\n')
fp.write('Study Assay Technology Type Term Source REF\t""\n')
fp.write('Study Assay Technology Platform\t""\n')
fp.write('Study Assay File Name\t' + '"' + assay_filename + '"\n')
fp.write('STUDY PROTOCOLS\t\n')
fp.write('Study Protocol Name\t"microenvironment.measurement"\n')
fp.write('Study Protocol Type\t""\n')
fp.write('Study Protocol Type Term Accession Number\t""\n')
fp.write('Study Protocol Type Term Source REF\t""\n')
fp.write('Study Protocol Description\t""\n')
fp.write('Study Protocol URI\t""\n')
fp.write('Study Protocol Version\t""\n')
#fp.write('Study Protocol Parameters Name "oxygen.partial_pressure; DCIS_cell_density(2D).surface_density; DCIS_cell_area_fraction.area_fraction; DCIS_cell_volume_fraction.volume_fraction"\n')
comment_str = 'Study Protocol Parameters Name\t"'
# TODO? search for all phenotype_dataset/microenvironment/domain/variables/...
uep = xml_root.find('.//variables')
if (uep):
for elm in uep.findall('variable'):
if ('type' in elm.attrib.keys()): # TODO: what's desired format if 'type' is missing?
comment_str += elm.attrib['name'] + '.' + elm.attrib['type'] + '; '
else:
comment_str += elm.attrib['name'] + '; '
# comment_str += '; '
# print(comment_str)
fp.write(comment_str[:-2] + '"\n')
semicolon_sep_empty_str = ''.join('; ' for x in pmid)
fp.write('Study Protocol Parameters Name Term Accession Number\t" ' + semicolon_sep_empty_str + ' "\n')
fp.write('Study Protocol Parameters Name Term Source REF\t" ' + semicolon_sep_empty_str + ' "\n')
fp.write('Study Protocol Components Name\t"' + semicolon_sep_empty_str + ' "\n')
fp.write('Study Protocol Components Type\t"' + semicolon_sep_empty_str + ' "\n')
fp.write('Study Protocol Components Type Term Accession Number\t"' + semicolon_sep_empty_str + ' "\n')
fp.write('Study Protocol Components Type Term Source REF\t"' + semicolon_sep_empty_str + ' "\n')
fp.write('STUDY CONTACTS\t\n')
fp.write('Study Person Last Name\t"' + xml_root.find(".//current_contact").find(".//family-name").text + '"\n')
fp.write('Study Person First Name\t"' + xml_root.find(".//current_contact").find(".//given-names").text + '"\n')
fp.write('Study Person Mid Initials\t""\n')
fp.write('Study Person Email\t"' + xml_root.find(".//current_contact").find(".//email").text + '"\n')
fp.write('Study Person Phone\t""\n')
fp.write('Study Person Fax\t""\n')
fp.write('Study Person Address\t""\n')
fp.write('Study Person Affiliation\t"' + xml_root.find(".//current_contact").find(".//organization-name").text +
', ' + xml_root.find(".//current_contact").find(".//department-name").text + '"\n')
fp.write('Study Person Roles\t""\n')
fp.write('Study Person Roles Term Accession Number\t""\n')
fp.write('Study Person Roles Term Source REF\t""\n')
fp.write('Comment[creator_orcid-id_family-name]\t"' + xml_root.find(".//creator").find(".//family-name").text + '"\n')
fp.write('Comment[creator_orcid-id_given-names]\t"' + xml_root.find(".//creator").find(".//given-names").text + '"\n')
fp.write('Comment[creator_orcid-id_email]\t"' + xml_root.find(".//creator").find(".//email").text + '"\n')
fp.write('Comment[creator_orcid-id_organization-name]\t"' + xml_root.find(".//creator").find(".//organization-name").text +
', ' + xml_root.find(".//creator").find(".//department-name").text + '"\n')
#curator_ptr = xml_root.find(".//curator").find(".//family-name").text + '"\n')
family_name = ""
given_names = ""
email = ""
org = ""
dept = ""
curator_ptr = xml_root.find(".//curator")
if (curator_ptr):
family_name_ptr = curator_ptr.find(".//family-name")
given_names_ptr = curator_ptr.find(".//given-names")
email_ptr = curator_ptr.find(".//email")
org_ptr = curator_ptr.find(".//organization-name")
dept_ptr = curator_ptr.find(".//department-name")
if (family_name_ptr):
family_name = family_name_ptr.find(".//family-name").text
if (given_names_ptr):
given_names = given_names_ptr.find(".//given-names").text
if (email_ptr):
email = email_ptr.find(".//email").text
if (org_ptr):
org = org_ptr.find(".//organization-name").text
if (dept_ptr):
dept = dept_ptr.find(".//department-name").text
#fp.write('Comment[curator_orcid-id_family-name]\t"' + xml_root.find(".//curator").find(".//family-name").text + '"\n')
fp.write('Comment[curator_orcid-id_family-name]\t"' + family_name + '"\n')
#fp.write('Comment[curator_orcid-id_given-names]\t"' + xml_root.find(".//curator").find(".//given-names").text + '"\n')
fp.write('Comment[curator_orcid-id_given-names]\t"' + given_names + '"\n')
#fp.write('Comment[curator_orcid-id_email]\t"' + xml_root.find(".//curator").find(".//email").text + '"\n')
fp.write('Comment[curator_orcid-id_email]\t"' + email + '"\n')
fp.write('Comment[curator_orcid-id_organization-name]\t"' + org + ', ' + dept + '"\n')
fp.write('Comment[last_modified_by_orcid-id_family-name]\t"' + xml_root.find(".//last_modified_by").find(".//family-name").text + '"\n')
fp.write('Comment[last_modified_by_orcid-id_given-names]\t"' + xml_root.find(".//last_modified_by").find(".//given-names").text + '"\n')
fp.write('Comment[last_modified_by_orcid-id_email]\t"' + xml_root.find(".//last_modified_by").find(".//email").text + '"\n')
fp.write('Comment[last_modified_by_orcid-id_organization-name]\t"' + xml_root.find(".//last_modified_by").find(".//organization-name").text +
', ' + xml_root.find(".//last_modified_by").find(".//department-name").text + '"\n')
fp.write('Comment[Study Person REF]' + sep_char + '""' + '\n')
fp.close()
print(' --> ' + investigation_filename)
#=======================================================================
fp = open(study_filename, 'w')
# row #1 (column titles)
fp.write('Source Name' + sep_char)
source_name = i_identifier[1:-1] + '.0'
uep = xml_root.find('.//data_origins') # uep = unique entry point
for elm in uep.findall('data_origin'):
for elm2 in elm.findall('citation'):
fp.write('Comment[citation]' + sep_char)
# TODO: why did I insert the following line?
# pmid_origin = elm.find('.//PMID').text
uep = xml_root.find('.//metadata')
for elm in uep.findall('data_analysis'):
for elm2 in elm.findall('citation'):
fp.write('Comment[citation]' + sep_char)
uep = xml_root.find('.//cell_origin')
cell_origin_characteristics = []
if (uep):
for elm in uep.getchildren():
fp.write('Characteristics[' + elm.tag + ']' + sep_char)
text_val = elm.text
text_val = ' '.join(text_val.split()) # strip out tabs and newlines
cell_origin_characteristics.append(text_val)
# print("cell_origin_characteristics----->",cell_origin_characteristics,"<-------")
fp.write('Factor Value[phenotype_dataset]' + sep_char + 'Sample Name\n')
# remaining rows
uep = xml_root.find('.//cell_line')
suffix = 0
for elm in uep.findall('phenotype_dataset'):
row_str = source_name + sep_char
# do we want a hierarchy of preferred citation types? (e.g., PMID,PMCID,DOI,URL)
if (len(pmid) > 0):
for p in pmid:
row_str += 'PMID: ' + p + sep_char
elif (len(url) > 0):
for p in url:
row_str += 'URL: ' + p + sep_char
# print("cell_origin_characteristics=",cell_origin_characteristics)
for c in cell_origin_characteristics:
row_str += c + sep_char
row_str += elm.attrib['keywords'] + sep_char + source_name + '.' + str(suffix)
suffix += 1
# print(row_str)
fp.write(row_str + '\n')
fp.close()
print(' --> ' + study_filename)
#=======================================================================
fp = open(assay_filename, 'w')
"""
Sample Name Protocol REF Parameter Value[oxygen.partial_pressure] Unit Parameter Value[DCIS_cell_density(2D).surface_density] Unit Parameter Value[DCIS_cell_area_fraction.area_fraction] Unit Parameter Value[DCIS_cell_volume_fraction.volume_fraction] Unit Data File
MCDS_L_0000000052.0.0 microenvironment.measurement 6.17 mmHg 0.00883 1/micron^2 0.8 dimensionless 0.8 dimensionless MCDS_L_0000000052.xml
MCDS_L_0000000052.0.1 microenvironment.measurement 8 mmHg MCDS_L_0000000052.xml
MCDS_L_0000000052.0.2 microenvironment.measurement 38 mmHg MCDS_L_0000000052.xml
MCDS_L_0000000052.0.3 microenvironment.measurement 52 mmHg MCDS_L_0000000052.xml
MCDS_L_0000000052.0.4 microenvironment.measurement 5 mmHg MCDS_L_0000000052.xml
"""
# We will do a two-pass approach:
# 1st pass: parse the first instance of the <variables> element to generate the header row.
# UPDATE: cannot assume the first instance of <variables> will be sufficient. The HUVEC data proves otherwise.
#
# Columns' titles
fp.write('Sample Name' + sep_char + 'Protocol REF' + sep_char )
uep = xml_root.find('.//variables') # TODO: also req: keywords="viable"?
# TODO: what to do if there are no
if (uep):
num_vars = 0
for elm in uep.findall('variable'):
if ('type' in elm.attrib.keys()): # TODO: what's desired format if 'type' is missing?
pval_str = elm.attrib['name'] + '.' + elm.attrib['type']
else:
pval_str = elm.attrib['name']
# pval_str = elm.attrib['name'] + '.' + elm.attrib['type']
fp.write('Parameter Value[' + pval_str + '] ' + sep_char + 'Unit' + sep_char)
num_vars += 1
fp.write('Data File\n')
#print('num_vars=',num_vars)
# 2nd pass: for each <phenotype_dataset>, each <variables>, and each <variable>, extract a row of relevant
# info to match the column headings.
count = 0
# TODO: am I making too many assumptions about elements - existence, ordering, etc.?
id = xml_root.find(".//metadata").find(".//ID").text
uep = xml_root.find('.//cell_line')
for elm in uep.findall('phenotype_dataset'):
vs = elm.find('.//variables')
# print("----- found <variables>, count=",count)
nvar = 0
# for ma in v.findall('material_amount'):
if vs:
comment_str = id + '.0.' + str(count) + '\t' + 'microenvironment.measurement'
# print(comment_str)
for v in vs.findall('variable'):
nvar += 1
# print(v.attrib['units'])
# print(v.find('.//material_amount').text)
# Need to strip out tabs here (sometimes)
text_val = v.find('.//material_amount').text
# print('------ text_val --->',text_val,'<---------')
text_val = ' '.join(text_val.split())
# print('------ text_val --->',text_val,'<---------')
if ('units' in v.attrib.keys()): # TODO: what's desired format if missing?
comment_str += sep_char + text_val + sep_char + v.attrib['units']
else:
comment_str += sep_char + text_val + sep_char + ""
# comment_str += sep_char + v.find('.//material_amount').text + sep_char + v.attrib['units']
# print(comment_str)
# print('nvar=',nvar)
fp.write(comment_str)
if (nvar == num_vars):
fp.write(sep_char)
else:
for idx in range(nvar,2*num_vars):
fp.write(sep_char)
# fp.write(comment_str + sep_char + xml_file + '\n')
# fp.write(xml_file + '\n')
# print("----- ",xml_base_filename, " + CR")
fp.write(xml_base_filename + '\n')
count += 1
else: # if no 'variables' present, just print minimal info
# comment_str = id + '.0.' + str(count) + '\t' + '' + '\t' + xml_file + '\n'
comment_str = id + '.0.' + str(count) + '\t' + '' + '\t' + xml_base_filename + '\n'
count += 1
fp.write(comment_str)
fp.close()
print(' --> ' + assay_filename)
| [
"heiland@indiana.edu"
] | heiland@indiana.edu |
ddeb97e4d5e9190c47dcbecb8b9a7dbe7ca9dfe4 | bfd33836d69dd05d6e7720216cef6d341c35147b | /User/DanHD/class/Main.py | c45cc8fe6017a72125ec2f8a7222b820242a7acb | [] | no_license | ducdan/PYTHON-COURSE | 5371b65685769e88c2e14a38b9c86a2bdfc82b79 | cc5a26d0d8124087f711ee0ca354065df6ea4fcf | refs/heads/master | 2020-05-25T15:00:00.465760 | 2017-06-15T11:49:54 | 2017-06-15T11:49:54 | 84,941,845 | 1 | 2 | null | 2017-06-15T11:44:32 | 2017-03-14T11:34:28 | HTML | UTF-8 | Python | false | false | 612 | py | #thua ke
# class Chu(Ong):
# def print_ho(self):
# print(self.ho)
class Ong:
def __init__(self):
self.ho='Tran'
self.__ten='Hung'
def __tai_san(self):
print("100 cay vang")
class Me:
def __init__(self):
self.tai_san='200 cay vang'
class Cha(Ong):
def __init__(self):
self.ho='Nguyen'
def print_name(self):
pass
# def tai_san(self):
# print("10 cay vang")
class Con(Cha,Me):
def print_ho(self):
print(self.ho)
print(self.tai_san)
nam=Con()
# nam.print_ho()
nam.tai_san()
# chu=Chu()
# chu.print_ho()
| [
"51200482@hcmut.edu.vn"
] | 51200482@hcmut.edu.vn |
3f127a05979e2337238db15b7faefd643873ce8f | 4bdd0fa82a314f7d005400fe1bb8bb7c42e9fa85 | /manage.py | aaa1f9e04c23e569b10fefac82ec289aeca70ba1 | [] | no_license | PavloKuptsov/TestTask2021 | 05bf9d9fd1141eac7e029519b86a398422ea5fb7 | 19e66ea74065da1609e3abbb7046e5e90164049f | refs/heads/main | 2023-04-24T13:34:00.921427 | 2021-05-12T18:54:36 | 2021-05-12T18:54:36 | 366,821,803 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 670 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'socialTestTask.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"pavlo.kupstov@sigma.software"
] | pavlo.kupstov@sigma.software |
19b4e9540fda721864be49fb8fc6b663fcd29f9f | e500f23b931008b33e8e8f46e4ebc46bd5e35cd6 | /data_layer.py | a0caf0643ec1b8071963348f249c279580a31e4c | [] | no_license | spathak99/Covid-19-Visualization | 5e30123b059f48348bd6fb415483811814509f1a | 3d80c2aae77c5025644a367168b2dafab3817136 | refs/heads/master | 2022-05-20T20:55:58.905677 | 2020-04-23T06:44:19 | 2020-04-23T06:44:19 | 258,126,435 | 1 | 0 | null | 2020-04-23T07:24:50 | 2020-04-23T07:24:49 | null | UTF-8 | Python | false | false | 4,513 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 20 05:24:53 2020
@author: lukas
"""
#imports
import pandas as pd
import global_code
#Data columns as a variable struct
class Columns:
SNO = 'SNo'
OBSERVATION_DATE = 'ObservationDate'
PROVINCE = 'Province/State'
COUNTRY = 'Country/Region'
LAST_UPDATE = 'Last Update'
CONFIRMED = 'Confirmed'
DEATHS = 'Deaths'
RECOVERED = 'Recovered'
ALL = [SNO, OBSERVATION_DATE, PROVINCE, COUNTRY, LAST_UPDATE, CONFIRMED, DEATHS, RECOVERED]
logger = global_code.getLogger()
#start code
class DataLayer:
'''
Handles all data access
## documentation about all fields stored: ##
-dataframe
contains basically all data as table
-all_countries
a list of all countries were data is available
-province_mapping
a list for each country of its subregion/regions
e.g. province_mapping = ['USA':['Massach.', 'Ohio', ..], 'Spain':[..], ..]
-total_days_recorded
for each day recored 1 entry n format 'MM/DD/YYYY'
-accum_deaths_for_day
a mapping of accumuated deaths per day [03/27/2020:151, 03/28/2020:162,..]
-accum_recovery_for_day
a mapping of accumuated recovery per day [03/27/2020:151, 03/28/2020:162,..]
-accum_confirms_for_day
a mapping of accumuated confirmed cases per day [03/27/2020:151, 03/28/2020:162,..]
'''
def __init__(self, datafile):
#load the data given
#strore it in a more specific data format
#anser querys from VierController
self.__process_input_data(datafile)
#--- private util mehods -----------------------------------------------------
def __process_input_data(self, datafile):
logger.log('Preprocessing data structures..')
dframe = pd.read_csv(datafile)
self.dataframe = dframe
#get all countries
countries = dframe[Columns.COUNTRY].unique()
self.all_countries = countries
#add all provinces to country
self.province_mapping = {}
for country in self.all_countries:
country_indices = dframe[Columns.COUNTRY] == country
provinces = dframe[country_indices][Columns.PROVINCE].unique()
self.province_mapping[country] = list(provinces)
#get all dates available
self.total_days_recorded = dframe[Columns.OBSERVATION_DATE].unique()
#for each day, record how many people died, recovered or infected
self.accum_deaths_for_day = {}
self.accum_recovery_for_day = {}
self.accum_confirms_for_day = {}
for day in self.total_days_recorded:
current_day_indices = dframe[Columns.OBSERVATION_DATE] == day
total_day_deaths = dframe[current_day_indices][Columns.DEATHS].sum()
total_day_recovers = dframe[current_day_indices][Columns.RECOVERED].sum()
total_day_confirmed = dframe[current_day_indices][Columns.CONFIRMED].sum()
self.accum_deaths_for_day[day] = total_day_deaths
self.accum_recovery_for_day[day] = total_day_recovers
self.accum_confirms_for_day[day] = total_day_confirmed
#example filter
# datafilter1 = dframe[Columns.DEATHS] > 100
# datafilter2 = dframe[Columns.PROVINCE] == 'Hubei'
# print(dframe[datafilter1 & datafilter2].head(5))
logger.log('Preprocessing geomap')
self.geo_map_dataframe = self.dataframe.rename(columns={'Country/Region':'Country'}) #copy & rename
self.geo_map_dataframe.rename(columns={'ObservationDate':'Date'}, inplace=True) #only rename
logger.log('Preprocessing finished')
#--- public mehods -----------------------------------------------------------
def get_map_dataframe(self):
final_df = self.geo_map_dataframe[self.geo_map_dataframe['Confirmed']>0]
return final_df.groupby(['Date','Country']).sum().reset_index()
def get_as_of_date(self):
no_days = len(self.total_days_recorded)
return self.total_days_recorded[no_days - 1]
def get_all_countries(self):
return self.all_countries
#Extend methods here as needed | [
"checker-tracker@web.de"
] | checker-tracker@web.de |
76d2a881bf9e6396738d1fd20530828d0772d4dc | 3b298a922bea8249f4459f4252605c0774cc00f9 | /test/test_cliffworld.py | ac44640d760d7d32e0a3332e227f04cc5b3eba52 | [
"MIT",
"Python-2.0"
] | permissive | bouali-meriem-estin/tabular-methods | 536fd07ec05ccfb5fc3109cdc59e7bd2e79590a5 | 05ee6488feffc64d3bb7335f26b2e9688d90a57b | refs/heads/master | 2023-06-09T14:01:01.687901 | 2020-07-03T15:23:03 | 2020-07-03T15:23:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,474 | py | import sys
sys.path.append("..")
import numpy as np
from env.grid_world import GridWorld
from scipy.io import loadmat
def test_cliffworld():
# load the test data
grid_world = loadmat('../data/test_data/cliffworld.mat')['model']
# specify world parameters
num_rows = 5
num_cols = 10
restart_states = np.array([[4, 1], [4, 2], [4, 3], [4, 4], [4, 5], [4, 6], [4, 7]])
obstructed_states = np.array([[0, 9], [1, 9], [2, 9], [3, 9], [4, 9]])
start_state = np.array([[4, 0]])
goal_states = np.array([[4, 8]])
# create model
gw = GridWorld(num_rows=num_rows,
num_cols=num_cols,
start_state=start_state,
goal_states=goal_states)
gw.add_obstructions(obstructed_states=obstructed_states,
restart_states=restart_states)
gw.add_rewards(step_reward=-1,
goal_reward=10,
restart_state_reward=-100)
gw.add_transition_probability(p_good_transition=1,
bias=0)
gw.add_discount(discount=0.9)
model = gw.create_gridworld()
# run tests
assert np.all(model.R == grid_world['R'][0][0][:,0].reshape(-1,1))
assert np.all(model.P[:,:,0] == grid_world['P'][0][0][:,:,0])
assert np.all(model.P[:,:,1] == grid_world['P'][0][0][:,:,1])
assert np.all(model.P[:,:,2] == grid_world['P'][0][0][:,:,2])
assert np.all(model.P[:,:,3] == grid_world['P'][0][0][:,:,3])
| [
"david.lines.dl@gmail.com"
] | david.lines.dl@gmail.com |
fe096733995179810a8f5a1cd1a2c0c8aa89bd5d | 5fe7929f129584688b75706334921a02bfa93a20 | /aide_design/unit_process_design/prefab/lfom_prefab_functional.py | 470d5e65aea0ede5f5d25e09810f58fc0555ea2f | [] | no_license | FelixYuHengYang/moops | ff1b1c416151d873a13d91109b17683d6656fbed | 82f018b2f39b58cf08c990cc986d21f67ac265f9 | refs/heads/master | 2021-04-30T06:13:07.663966 | 2019-05-05T18:38:11 | 2019-05-05T18:38:11 | 121,437,817 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,872 | py | # -*- coding: utf-8 -*-
"""
Edited on September 1, 2017
@author: Monroe Weber-Shirk
Created on Wed Jun 21 17:16:46 2017
@author: cc2467
"""
#Here we import packages that we will need for this notebook. You can find out about these packages in the Help menu.
# although math is "built in" it needs to be imported so it's functions can be used.
import math
#see numpy cheat sheet https://www.dataquest.io/blog/images/cheat-sheets/numpy-cheat-sheet.pdf
#The numpy import is needed because it is renamed here as np.
import numpy as np
# add imports for AguaClara code that will be needed
# physchem has functions related to hydraulics, fractal flocs, flocculation, sedimentation, etc.
from aide_design import physchem as pc
# pipedatabase has functions related to pipe diameters
from aide_design import pipedatabase as pipe
# units allows us to include units in all of our calculations
from aide_design.units import unit_registry as u
# utility has the significant digit display function
from aide_design import utility as ut
# import export inputs and define the VC coefficient
from aide_design import expert_inputs as exp
ratio_VC_orifice= exp.RATIO_VC_ORIFICE
# The following constants need to go into the constants file
Pi_LFOM_safety = 1.2
# pipe schedule for LFOM
#SDR_LFOM = 26
#FLOW = 10*u.L/u.s
#HL_LFOM = 20*u.cm
#primary outputs from this file are
#Nominal diameter nom_diam_lfom_pipe(FLOW,HL_LFOM,Pi_LFOM_safety,SDR_LFOM)
#number of rows n_lfom_rows(FLOW,HL_LFOM)
#orifice diameter orifice_diameter(FLOW,HL_LFOM,drill_series_uom)
#number of orifices in each row n_lfom_orifices(FLOW,HL_LFOM,drill_series_uom,SDR_LFOM)
#height of the center of each row height_lfom_orifices(FLOW,HL_LFOM,drill_series_uom)
# output is width per flow rate.
@u.wraps(u.s/(u.m**2), [u.m,u.m], False)
def width_stout(HL_LFOM,z):
return (2/((2*pc.gravity*z)**(1/2)*ratio_VC_orifice*np.pi*HL_LFOM)).magnitude
@u.wraps(None, [u.m**3/u.s,u.m], False)
def n_lfom_rows(FLOW,HL_LFOM):
"""This equation states that the open area corresponding to one row can be
set equal to two orifices of diameter=row height. If there are more than
two orifices per row at the top of the LFOM then there are more orifices
than are convenient to drill and more than necessary for good accuracy.
Thus this relationship can be used to increase the spacing between the
rows and thus increase the diameter of the orifices. This spacing function
also sets the lower depth on the high flow rate LFOM with no accurate
flows below a depth equal to the first row height.
But it might be better to always set then number of rows to 10.
The challenge is to figure out a reasonable system of constraints that
reliably returns a valid solution.
"""
N_estimated = (HL_LFOM*np.pi/(2*width_stout(HL_LFOM,HL_LFOM)*FLOW))
#variablerow=min(10,max(4,math.trunc(N_estimated.magnitude)))
return 10
def dist_center_lfom_rows(FLOW,HL_LFOM):
return HL_LFOM/n_lfom_rows(FLOW,HL_LFOM)
def vel_lfom_pipe_critical(HL_LFOM):
"""The average vertical velocity of the water inside the LFOM pipe
at the very bottom of the bottom row of orifices
The speed of falling water is 0.841 m/s for all linear flow orifice meters
of height 20 cm, independent of total plant flow rate."""
return (4/(3*math.pi)*(2*u.g_0*HL_LFOM)**(1/2)).to(u.m/u.s)
def area_lfom_pipe_min(FLOW,HL_LFOM,Pi_LFOM_safety):
return (Pi_LFOM_safety*FLOW/vel_lfom_pipe_critical(HL_LFOM)).to(u.m**2)
def nom_diam_lfom_pipe(FLOW,HL_LFOM,Pi_LFOM_safety,SDR_LFOM):
ID=pc.diam_circle(area_lfom_pipe_min(FLOW,HL_LFOM,Pi_LFOM_safety))
return pipe.ND_SDR_available(ID,SDR_LFOM)
def area_lfom_orifices_max(FLOW,HL_LFOM):
"""Estimate the orifice area corresponding to the top row of orifices.
Another solution method is to use integration to solve this problem.
Here we use the width of the stout weir in the center of the top row
to estimate the area of the top orifice
"""
return ((FLOW*width_stout(HL_LFOM,HL_LFOM-0.5*dist_center_lfom_rows(FLOW,HL_LFOM))*dist_center_lfom_rows(FLOW,HL_LFOM))).to(u.m**2)
def d_lfom_orifices_max(FLOW,HL_LFOM):
return (pc.diam_circle(area_lfom_orifices_max(FLOW,HL_LFOM)))
def orifice_diameter(FLOW,HL_LFOM,drill_bits):
maxdrill = (min((dist_center_lfom_rows(FLOW,HL_LFOM)).to(u.m).magnitude,(d_lfom_orifices_max(FLOW,HL_LFOM)).to(u.m).magnitude))*u.m
return ut.floor_nearest(maxdrill,drill_bits)
def drillbit_area(FLOW,HL_LFOM,drill_bits):
return pc.area_circle(orifice_diameter(FLOW,HL_LFOM,drill_bits))
def n_lfom_orifices_per_row_max(FLOW,HL_LFOM,drill_bits,SDR_LFOM):
"""A bound on the number of orifices allowed in each row.
The distance between consecutive orifices must be enough to retain
structural integrity of the pipe.
"""
S_lfom_orifices_Min= 3*u.mm
return math.floor(math.pi*(pipe.ID_SDR(nom_diam_lfom_pipe(FLOW,HL_LFOM,Pi_LFOM_safety,SDR_LFOM),SDR_LFOM))/(orifice_diameter(FLOW,HL_LFOM,drill_bits)+S_lfom_orifices_Min))
def flow_ramp(FLOW,HL_LFOM):
n_rows = n_lfom_rows(FLOW,HL_LFOM)
return np.linspace(FLOW.magnitude/n_rows,FLOW.magnitude,n_rows)*FLOW.units
def height_lfom_orifices(FLOW,HL_LFOM,drill_bits):
"""Calculates the height of the center of each row of orifices.
The bottom of the bottom row orifices is at the zero elevation
point of the LFOM so that the flow goes to zero when the water height
is at zero.
"""
return (np.arange(((orifice_diameter(FLOW,HL_LFOM,drill_bits)*0.5).to(u.m)).magnitude,
(HL_LFOM.to(u.m)).magnitude,
((dist_center_lfom_rows(FLOW,HL_LFOM)).to(u.m)).magnitude))*u.m
#print(height_lfom_orifices(10*u.L/u.s,20*u.cm,[0.75]*u.inch))
def flow_lfom_actual(FLOW,HL_LFOM,drill_bits,Row_Index_Submerged,N_LFOM_Orifices):
"""Calculates the flow for a given number of submerged rows of orifices
"""
D_LFOM_Orifices=orifice_diameter(FLOW,HL_LFOM,drill_bits)
row_height=dist_center_lfom_rows(FLOW,HL_LFOM)
#harray is the distance from the water level to the center of the orifices when the water is at the max level
harray = (np.linspace(row_height.to(u.mm).magnitude,HL_LFOM.to(u.mm).magnitude,n_lfom_rows(FLOW,HL_LFOM)))*u.mm -0.5* D_LFOM_Orifices
FLOW_new=0*u.m**3/u.s
for i in range(Row_Index_Submerged+1):
FLOW_new = FLOW_new + (N_LFOM_Orifices[i]*(pc.flow_orifice_vert(D_LFOM_Orifices,harray[Row_Index_Submerged-i],ratio_VC_orifice)))
return FLOW_new
#Calculate number of orifices at each level given a diameter
def n_lfom_orifices(FLOW,HL_LFOM,drill_bits,SDR_LFOM):
FLOW_ramp_local = flow_ramp(FLOW,HL_LFOM)
n_orifices_max =n_lfom_orifices_per_row_max(FLOW,HL_LFOM,drill_bits,SDR_LFOM)
n_rows = (n_lfom_rows(FLOW,HL_LFOM))
D_LFOM_Orifices = orifice_diameter(FLOW,HL_LFOM,drill_bits)
# H is distance from the elevation between two rows of orifices down to the center of the orifices
H=dist_center_lfom_rows(FLOW,HL_LFOM)-D_LFOM_Orifices*0.5
n=[]
for i in range(n_rows):
#place zero in the row that we are going to calculate the required number of orifices
n=np.append(n,0)
#calculate the ideal number of orifices at the current row without constraining to an integer
n_orifices_real=((FLOW_ramp_local[i]-flow_lfom_actual(FLOW,HL_LFOM,drill_bits,i,n))/
pc.flow_orifice_vert(D_LFOM_Orifices,H,ratio_VC_orifice)).to(u.dimensionless).magnitude
#constrain number of orifices to be less than the max per row and greater or equal to 0
n[i]=min((max(0,round(n_orifices_real))),n_orifices_max)
return n
#This function calculates the error of the design based on the differences between the predicted flow rate
#and the actual flow rate through the LFOM.
def flow_lfom_error(FLOW,HL_LFOM,drill_bits,SDR_LFOM):
N_lfom_orifices=n_lfom_orifices(FLOW,HL_LFOM,drill_bits,SDR_LFOM)
FLOW_lfom_error=[]
for j in range (len(N_lfom_orifices)-1):
FLOW_lfom_error.append((flow_lfom_actual(FLOW,HL_LFOM,drill_bits,j,N_lfom_orifices)-flow_ramp(FLOW,HL_LFOM)[j])/FLOW)
return FLOW_lfom_error
def flow_lfom_ideal(FLOW,HL_LFOM,H):
flow_lfom_ideal=(FLOW*H)/HL_LFOM
return flow_lfom_ideal
def flow_lfom(FLOW,HL_LFOM,drill_bits,SDR_LFOM,H):
D_lfom_orifices=orifice_diameter(FLOW,HL_LFOM,drill_bits)
H_submerged=np.arange(H-0.5*D_lfom_orifices,HL_LFOM,H-dist_center_lfom_rows(FLOW,HL_LFOM),dtype=object)
N_lfom_orifices=n_lfom_orifices(FLOW,HL_LFOM,drill_bits,SDR_LFOM)
flow=[]
for i in range (len(H_submerged)):
flow.append(pc.flow_orifice_vert(D_lfom_orifices,H_submerged[i],ratio_VC_orifice)*N_lfom_orifices[i])
return sum (flow)
| [
"31310824+skittlesburst@users.noreply.github.com"
] | 31310824+skittlesburst@users.noreply.github.com |
8735f5b0e9167684495efe5852cebc7defa664f7 | 930309163b930559929323647b8d82238724f392 | /abc155_c.py | 6b0f7c6960bceb99ef3c1e6274c2f06a7b5baa8f | [] | no_license | GINK03/atcoder-solvers | 874251dffc9f23b187faa77c439b445e53f8dfe1 | b1e7ac6e9d67938de9a85df4a2f9780fb1fbcee7 | refs/heads/master | 2021-11-07T14:16:52.138894 | 2021-09-12T13:32:29 | 2021-09-12T13:32:29 | 11,724,396 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 193 | py | import collections
N=int(input())
S=[input() for i in range(N)]
S=collections.Counter(S)
max_v = max(S.values())
for k,v in sorted(list(filter(lambda x:x[1]==max_v, S.items()))):
print(k)
| [
"gim.kobayashi@gmail.com"
] | gim.kobayashi@gmail.com |
410559c8f26e95c96374a7fea4724d3d00169ba7 | 10ddfb2d43a8ec5d47ce35dc0b8acf4fd58dea94 | /Python/number-of-ways-to-earn-points.py | 6707c76b184e8c02c07e41ef08fcbd9b81e9220e | [
"MIT"
] | permissive | kamyu104/LeetCode-Solutions | f54822059405ef4df737d2e9898b024f051fd525 | 4dc4e6642dc92f1983c13564cc0fd99917cab358 | refs/heads/master | 2023-09-02T13:48:26.830566 | 2023-08-28T10:11:12 | 2023-08-28T10:11:12 | 152,631,182 | 4,549 | 1,651 | MIT | 2023-05-31T06:10:33 | 2018-10-11T17:38:35 | C++ | UTF-8 | Python | false | false | 1,069 | py | # Time: O(n * t * c)
# Space: O(t)
# knapsack dp
class Solution(object):
def waysToReachTarget(self, target, types):
"""
:type target: int
:type types: List[List[int]]
:rtype: int
"""
MOD = 10**9+7
dp = [0]*(target+1)
dp[0] = 1
for c, m in types:
for i in reversed(xrange(1, target+1)):
for j in xrange(1, min(i//m, c)+1):
dp[i] = (dp[i]+dp[i-j*m])%MOD
return dp[-1]
# Time: O(n * t * c)
# Space: O(t)
# knapsack dp
class Solution2(object):
def waysToReachTarget(self, target, types):
"""
:type target: int
:type types: List[List[int]]
:rtype: int
"""
MOD = 10**9+7
dp = [0]*(target+1)
dp[0] = 1
for c, m in types:
new_dp = [0]*(target+1)
for i in xrange(target+1):
for j in xrange(min((target-i)//m, c)+1):
new_dp[i+j*m] = (new_dp[i+j*m]+dp[i])%MOD
dp = new_dp
return dp[-1]
| [
"noreply@github.com"
] | kamyu104.noreply@github.com |
526abb44076323b13492031101bc312d813868d2 | 40796d49a6d50237900ac1a1a20648b546613d18 | /python/applications/mobdat/common/graph/LayoutNodes.py | 333b5941c57af701d1ba71064bdf91f907c25351 | [] | no_license | Mondego/spacetime-apps | c32abca98134d80f5bff965c8d74550c8109821d | c2d3a714cc2819f4a72d2d0b1b8c129d69c4de7c | refs/heads/master | 2021-01-23T03:43:08.197768 | 2019-07-27T22:08:58 | 2019-07-27T22:08:58 | 86,112,423 | 3 | 3 | null | 2019-07-27T22:08:59 | 2017-03-24T21:34:10 | Python | UTF-8 | Python | false | false | 10,237 | py | #!/usr/bin/env python
"""
Copyright (c) 2014, Intel Corporation
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@file LayoutNodes.py
@author Mic Bowman
@date 2013-12-03
This file defines routines used to build features of a mobdat traffic
network such as building a grid of roads.
"""
import os, sys
import logging
# we need to import python modules from the $SUMO_HOME/tools directory
sys.path.append(os.path.join(os.environ.get("OPENSIM","/share/opensim"),"lib","python"))
sys.path.append(os.path.realpath(os.path.join(os.path.dirname(__file__), "..", "..")))
sys.path.append(os.path.realpath(os.path.join(os.path.dirname(__file__), "..")))
sys.path.append(os.path.realpath(os.path.join(os.path.dirname(__file__), "..", "lib")))
import Node, LayoutDecoration
logger = logging.getLogger(__name__)
## XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
## XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
class IntersectionType(Node.Node) :
"""
The IntersectionType class is used to specify parameters for rendering
intersections in Sumo and OpenSim.
"""
# -----------------------------------------------------------------
def __init__(self, name, itype, render) :
"""
Args:
name -- string
itype -- string, indicates the stop light type for the intersection
render -- boolean, flag to indicate that opensim should render the object
"""
Node.Node.__init__(self, name = name)
self.AddDecoration(LayoutDecoration.IntersectionTypeDecoration(name, itype, render))
## XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
## XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
class Intersection(Node.Node) :
# -----------------------------------------------------------------
def __init__(self, name, itype, x, y) :
"""
Args:
name -- string
itype -- object of type Layout.IntersectionType
x, y -- integer coordinates
"""
Node.Node.__init__(self, name = name)
self.AddDecoration(LayoutDecoration.CoordDecoration(x, y))
self.AddDecoration(LayoutDecoration.EdgeMapDecoration())
itype.AddMember(self)
## XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
## XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
##class EndPoint(Node.Node) :
class EndPoint(Intersection) :
"""
EndPoint
This graph node class (a subset of intersections) is the destination
for a trip.
Members: None
Decorations:
EndPointDecoration
Edges: None
"""
# -----------------------------------------------------------------
def __init__(self, name, itype, x, y) :
"""
Args:
name -- string
itype -- object of type Layout.IntersectionType
x, y -- integer coordinates
"""
Intersection.__init__(self, name, itype, x, y)
self.AddDecoration(LayoutDecoration.EndPointDecoration())
## XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
## XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
class LocationCapsule(Node.Node) :
"""
LocationCapsule
This graph node class manages a collection of EndPoint nodes.
Members: EndPoints, typically one endpoint for a residential
location and multiple endpoints for a business location
Decorations:
CapsuleDecoration
Edges: None
"""
# -----------------------------------------------------------------
def __init__(self, name) :
"""
Args:
name -- string
itype -- object of type Layout.IntersectionType
x, y -- integer coordinates
"""
Node.Node.__init__(self, name = name)
self.AddDecoration(LayoutDecoration.CapsuleDecoration())
# -----------------------------------------------------------------
def AddEndPointToCapsule(self, endpoint) :
"""
Args:
endpoint -- object of type LayoutNodes.EndPoint
"""
self.AddMember(endpoint)
## XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
## XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
class BusinessLocation(Node.Node) :
"""
BusinessLocation
This graph node class manages a business neighborhood consisting of
a collection of LocationCapsule objects
Members:- Typically one LocationCapsule nodes that contains multiple
EndPoint nodes
MemberOf:
BusinessLocationProfile
Decorations:
BusinessLocationDecoration
Edges: None
"""
# -----------------------------------------------------------------
def __init__(self, name, profile) :
"""
Args:
name -- string
profile -- object of type BusinessLocationProfile
"""
Node.Node.__init__(self, name = name)
self.AddDecoration(LayoutDecoration.BusinessLocationDecoration())
profile.AddMember(self)
# -----------------------------------------------------------------
def AddCapsuleToLocation(self, capsule) :
"""
Args:
capsule -- object of type LayoutNodes.LocationCapsule
"""
self.AddMember(capsule)
## XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
## XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
class ResidentialLocation(Node.Node) :
"""
ResidentialLocation
This graph node class manages a residential neighborhood consisting of
a collection of LocationCapsule objects
Members: Typically several LocationCapsule nodes that each contain
a single EndPoint node
MemberOf:
ResidentialLocationProfile
Decorations:
ResidentialLocationDecoration
Edges: None
"""
# -----------------------------------------------------------------
def __init__(self, name, profile) :
"""
Args:
name -- string
profile -- object of type ResidentialLocationProfile
"""
Node.Node.__init__(self, name = name)
self.AddDecoration(LayoutDecoration.ResidentialLocationDecoration())
profile.AddMember(self)
# -----------------------------------------------------------------
def AddCapsuleToLocation(self, capsule) :
"""
Args:
capsule -- object of type LayoutNodes.LocationCapsule
"""
self.AddMember(capsule)
## XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
## XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
class BusinessLocationProfile(Node.Node) :
# -----------------------------------------------------------------
def __init__(self, name, employees, customers, types) :
"""
Args:
name -- string
employees -- integer, max number of employees per node
customers -- integer, max number of customers per node
types -- dict mapping Business.BusinessTypes to count
"""
Node.Node.__init__(self, name = name)
self.AddDecoration(LayoutDecoration.BusinessLocationProfileDecoration(employees, customers, types))
## XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
## XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
class ResidentialLocationProfile(Node.Node) :
# -----------------------------------------------------------------
def __init__(self, name, residents) :
"""
Args:
residents -- integer, max number of residents per node
"""
Node.Node.__init__(self, name = name)
self.AddDecoration(LayoutDecoration.ResidentialLocationProfileDecoration(residents))
## XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
## XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
class RoadType(Node.Node) :
"""
The RoadType class is used to specify parameters for rendering roads
in Sumo and OpenSim.
"""
# -----------------------------------------------------------------
def __init__(self, name, lanes, pri, speed, wid, sig, render, center) :
"""
Args:
name -- string
lanes -- integer, number of lanes in the road
pri -- integer, priority for stop lights
speed -- float, maximum speed allowed on the road
sig -- string, signature
render -- boolean, flag to indicate whether opensim should render
center -- boolean, flag to indicate the coordinate origin
"""
Node.Node.__init__(self, name = name)
self.AddDecoration(LayoutDecoration.RoadTypeDecoration(name, lanes, pri, speed, wid, sig, render, center))
| [
"ra.rohan@gmail.com"
] | ra.rohan@gmail.com |
25cc35183020aef1120dbf3d18e7b3ab3b4fbcd8 | 202539c64a109ea1d59b2ea91fd4eaf99b9ef407 | /world/settings.py | e42be1f327bc5c3336f3948fec10939a49b737a6 | [] | no_license | Ahmansee/newman | 95a3ee006248c93d7fc8a2ecf30e2168784d587b | 3f6d982b3701b88b2d53af6938441c8b98728853 | refs/heads/main | 2022-12-24T01:47:31.839599 | 2020-10-02T11:20:25 | 2020-10-02T11:20:25 | 300,582,816 | 0 | 0 | null | 2020-10-02T11:20:26 | 2020-10-02T10:42:51 | null | UTF-8 | Python | false | false | 3,249 | py | """
Django settings for world project.
Generated by 'django-admin startproject' using Django 3.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
TEMPLATE_DIR = os.path.join(BASE_DIR,"template")
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'jb4mhnnbfl(1razgjgw#3a+()pn7pe=q(-tjxb3ys+!cb8d8&_'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'world.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'world.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR,'static')
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR,'world/static')
] | [
"samyisotop@gmail.com"
] | samyisotop@gmail.com |
a65dddcf69a9be612371a4aa3fd4b4d9a448c0ba | dea03a31e322a06d743b48d714389724591dd45d | /testProject/app/migrations/0002_auto_20191017_2144.py | d424e86a3efcdb901d835ff701542334e57e3659 | [] | no_license | alanpolimentes/test | c5d4f50148649e047c2e9bdca502cb83ad8ff36b | d3c0d40d3292cefd6241e2f9ffe930a701ceaa33 | refs/heads/master | 2020-08-17T00:56:20.518223 | 2019-10-18T22:55:58 | 2019-10-18T22:55:58 | 215,583,157 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,624 | py | # Generated by Django 2.2.6 on 2019-10-17 21:44
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('app', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='categoria',
name='id',
field=models.UUIDField(default=uuid.UUID('0837b07f-e94a-4380-abfb-39ea475d3ecd'), editable=False, primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='marca',
name='id',
field=models.UUIDField(default=uuid.UUID('1e18fdd1-ac3b-4222-8ee0-1f4922c7da29'), editable=False, primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='producto',
name='id',
field=models.UUIDField(default=uuid.UUID('6c4089c2-f7f9-4544-a3af-748b3730e3db'), editable=False, primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='solicitante',
name='id',
field=models.UUIDField(default=uuid.UUID('0340a3e8-c4df-4839-8550-1a13238a6a81'), editable=False, primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='solicitud',
name='cantidad',
field=models.CharField(max_length=100),
),
migrations.AlterField(
model_name='subcategoria',
name='id',
field=models.UUIDField(default=uuid.UUID('43c349e0-b97e-4c2f-a968-c27c91c1845e'), editable=False, primary_key=True, serialize=False),
),
]
| [
"alan.garcia@polimentes.mx"
] | alan.garcia@polimentes.mx |
26520cf0e4d572626cca7f3ae58470069e37fd63 | 5e84763c16bd6e6ef06cf7a129bb4bd29dd61ec5 | /blimgui/dist/OpenGL/raw/GLES2/NV/read_buffer.py | 638349916933fad25c3ba754755ffda4f1e717dc | [
"MIT"
] | permissive | juso40/bl2sdk_Mods | 8422a37ca9c2c2bbf231a2399cbcb84379b7e848 | 29f79c41cfb49ea5b1dd1bec559795727e868558 | refs/heads/master | 2023-08-15T02:28:38.142874 | 2023-07-22T21:48:01 | 2023-07-22T21:48:01 | 188,486,371 | 42 | 110 | MIT | 2022-11-20T09:47:56 | 2019-05-24T20:55:10 | Python | UTF-8 | Python | false | false | 617 | py | '''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GLES2 import _types as _cs
# End users want this...
from OpenGL.raw.GLES2._types import *
from OpenGL.raw.GLES2 import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GLES2_NV_read_buffer'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GLES2,'GLES2_NV_read_buffer',error_checker=_errors._error_checker)
GL_READ_BUFFER_NV=_C('GL_READ_BUFFER_NV',0x0C02)
@_f
@_p.types(None,_cs.GLenum)
def glReadBufferNV(mode):pass
| [
"justin.sostmann@googlemail.com"
] | justin.sostmann@googlemail.com |
3ed100785341bbd1cd924b27b04e790838d9c78d | bbe169a02aaec4e6744350530d835af17587cf09 | /models/vgg19_localization.py | d743a694819efc1af8a7865e8222a5d65f3eab69 | [] | no_license | atwang16/sp19-6s897-colon | af4fca0b472ca9e9d4039794b4e33fa9640d1e73 | e414708d1af6953d89ea7cdf88ceeabd04a56136 | refs/heads/master | 2021-08-06T20:42:29.518055 | 2020-06-01T06:27:34 | 2020-06-01T06:27:34 | 182,820,179 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,190 | py | #!/usr/bin/env python3
# 64c-64c-p-128c-128c-p-(256c)4-p-(512c)4-p-(512c)4-p-1560fc-1560fc
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import MaxPool2D, Input, Flatten, GlobalAveragePooling2D
from keras.models import Model
from keras.applications.vgg19 import VGG19
def convs(input, num_filters, kernel_size, stride, num_layers=1):
x = input
for i in range(num_layers):
x = Conv2D(num_filters,
kernel_size=kernel_size,
strides=stride,
padding="same",
kernel_initializer="he_normal")(x)
x = MaxPool2D(pool_size=2)(x)
return x
def vgg19(input_shape, pretrained_weights=None, use_sigmoid=False):
# inputs = Input(shape=input_shape)
#
# # convolutional layers
# x = convs(inputs, num_filters=64, kernel_size=3, stride=1, num_layers=2)
# x = convs(x, num_filters=128, kernel_size=3, stride=1, num_layers=2)
# x = convs(x, num_filters=256, kernel_size=3, stride=1, num_layers=4)
# x = convs(x, num_filters=512, kernel_size=3, stride=1, num_layers=4)
# x = convs(x, num_filters=512, kernel_size=3, stride=1, num_layers=4)
base_model = VGG19(weights='imagenet', include_top=False, input_shape=input_shape)
inputs = base_model.inputs
x = base_model.output
# fully connected layers
x = GlobalAveragePooling2D()(x)
# x = Flatten(name="flatten")(x)
x = Dense(1024, kernel_initializer="he_normal")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
# x = Dense(1560, activation="relu")(x)
if use_sigmoid:
assert input_shape[0] == input_shape[1], "Currently only support equal width and height"
outputs = Dense(4, activation="sigmoid")(x)
outputs = keras.layers.Lambda(lambda x: x * input_shape[0])(outputs)
else:
outputs = Dense(4, activation="linear")(x)
# create model
model = Model(inputs=inputs, outputs=outputs)
if pretrained_weights is not None:
model.load_weights(pretrained_weights, by_name=True)
for i in range(len(base_model.layers)):
base_model.layers[i].trainable = False
return model
| [
"austinw@mit.edu"
] | austinw@mit.edu |
fd88c9382e2933697e301ad7e1c6fb31402f8d9c | 9f91d88b96ce10aadd0759132330a6b353b5cbe2 | /LeetCode/RE_add-two-numbers.py | dcd4373bac4d9c40521795a20dd62e5776694ff5 | [] | no_license | shivanishimpi/CP | b1c9bbace29892fa79183a941655ba216a79d1da | f74197fde545c90a8339f162adae532478ff451c | refs/heads/main | 2023-09-01T16:02:39.116495 | 2020-12-31T12:30:39 | 2020-12-31T12:30:39 | 308,958,764 | 2 | 0 | null | 2021-10-09T18:49:44 | 2020-10-31T19:39:38 | Python | UTF-8 | Python | false | false | 1,906 | py | """
You are given two non-empty linked lists representing two non-negative integers. The digits are stored in reverse order, and each of their nodes contains a single digit. Add the two numbers and return the sum as a linked list.
You may assume the two numbers do not contain any leading zero, except the number 0 itself.
"""
#RUNTIME ERROR
#CORRECT OUTPUTS
class Node:
# Singly linked node
def __init__(self, data=None):
self.data = data
self.next = None
class singly_linked_list:
def __init__(self):
# Createe an empty list
self.tail = None
self.head = None
self.count = 0
def iterate_item(self):
# Iterate the list.
current_item = self.tail
while current_item:
val = current_item.data
current_item = current_item.next
yield val
def append_item(self, data):
#Append items on the list
node = Node(data)
if self.head:
self.head.next = node
self.head = node
else:
self.tail = node
self.head = node
self.count += 1
for _ in range(int(input())):
l1 = list(map(int, input().split(' ')))
l2 = list(map(int, input().split(' ')))
l1 = l1[::-1]
l2 = l2[::-1]
#if l1==l2:
#addList = [int(l1[i])+int(l2[i]) for i in range(len(l2)-1)]
#else:
# addList = [int(l1[i])+int(l2[i]) for i in range(len(l1)-1)]
#print(int(l1), int(l2))
#print(l1)
#print(l2)
for i in l1:
val1=''.join(map(str, l1))
#print(val1)
for j in l2:
val2=''.join(map(str,l2))
#print(val2)
addedNums = int(val1)+int(val2)
lis = list(str(addedNums)[::-1])
ints = [int(i) for i in lis]
print(ints)
items= singly_linked_list()
for i in ints:
items.append_item(i)
for val in items.iterate_item():
print(val)
| [
"shivanishimpi9@gmail.com"
] | shivanishimpi9@gmail.com |
c19420c7182d6d7cac399f8ef28bbe96ae2b4058 | e00870366e2db04dfe739a5b1b17e9682de6a129 | /Geometric/Graphics/cube_model.py | 59859edf4ef6724693cc29e8fae753ddb936a2cc | [] | no_license | jwatson-CO-edu/py_toybox | 06b0243626fbe0a136b1fa4f9a74783321ee1601 | 7f3b2aaeb24e41002e9dee2f2af669006e1cbd5c | refs/heads/master | 2022-03-25T05:11:55.724434 | 2022-01-27T21:07:26 | 2022-01-27T21:07:26 | 230,351,810 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,535 | py | #! /usr/bin/env python
#
# <one line to give the program's name and a brief idea of what it does.>
# Copyright (C) 2001 Michael Urman
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
"""Loads a simple model format and draws it with opengl.
allows rotations and whatnot based on simple cursor input"""
__version__ = '0.05'
__date__ = '2002/05/23'
__author__ = 'Michael Urman (mu on irc.openprojects.net)'
import sys, math
try:
import pygame
from pygame.locals import *
from OpenGL.GL import *
except:
print 'model requires pygame and pyopengl'
raise SystemExit
try:
from OpenGL.GLU import *
GLU = 1
except:
print "Warning: OpenGL.GLU did not import correctly."
GLU = None
if len(sys.argv) < 2:
print 'Usage: model.py model.mf'
raise SystemExit
import Numeric as N
import types
def _cross(a, b):
return N.array((\
a[1]*b[2]-a[2]*b[1], \
a[2]*b[0]-a[0]*b[2], \
a[0]*b[1]-a[1]*b[0]))
class Quaternion:
def __init__(q, quat=None):
"""Quaternion(q, quat=None) -> Quaternion
Creates a new "pure" quaternion (of no rotation). If a quaternion
is passed in, it is copied. If a tuple of the format (degrees, (x,
y, z)) or (x, y, z, w) is passed in, it is turned into a
quaternion."""
if quat:
if isinstance(quat, Quaternion):
q.r = quat.r
q.v = quat.v[:]
elif isinstance(quat, types.TupleType):
if len(quat) == 4:
q.v = N.array(quat[0:3])
q.r = quat[3]
elif len(quat) == 2 and isinstance(quat[1], types.TupleType)\
and len(quat[1]) == 3:
angle = quat[0] * math.pi/360
q.r = math.cos(angle)
sin_a = math.sin(angle)
q.v = sin_a * N.array(quat[1])
else:
raise TypeError("Invalid tuple for argument 2")
else:
raise TypeError("Argument 2 must be a tuple")
else:
q.v = N.array((1,0,0))
q.r = 0
def rotate(self, angle, axis):
"""rotate(self, angle, axis) -> self
Rotate Quaternion self by angle (degrees) around axis (x, y, z) and
return self for each chaining"""
q = Quaternion((angle,axis)) * self
self.r = q.r
self.v = q.v
def __mul__(q, o):
"""Quaternion *= Quaternion -> Quaternion
Quaternion *= float -> Quaternion
Multiplies a Quaternion by a constant or by another Quaternion.
Remember to do (b * a) for a Quaternion representation of a rotation
by a then by b. Not Commutative!"""
if isinstance(o, Quaternion):
r = q.r; v = q.v
s = o.r; w = o.v
angle = r*s - N.dot(v,w)
vec = r*w + s*v + _cross(v,w)
n = math.sqrt(angle*angle + N.dot(vec,vec))
return Quaternion((vec[0]/n, vec[1]/n, vec[2]/n, angle/n))
else:
return Quaternion((q.v[0]*o, q.v[1]*o, q.v[2]*o, q.r*o))
def __imul__(q, o):
"""Quaternion *= Quaternion -> None
Quaternion *= float -> None
Multiplies a Quaternion in place by a constant or by another
Quaternion. Remember to do (b * a) for a Quaternion representation
of a rotation by a then by b. Not Commutative!"""
if isinstance(o, Quaternion):
r = q.r; v = q.v
s = o.r; w = o.v
q.r = r*s - N.dot(v,w)
q.v = r*w + s*v + _cross(v,w)
q.normalize()
else:
q.r *= o
q.v *= o
return q
def __abs__(q):
"""abs(Quaternion) -> float
Returns the magnitue of the Quaternion = sqrt(x*x+y*y+z*z+w*w)"""
return math.sqrt(q.r*q.r + N.dot(q.v,q.v))
def normalize(q):
"""normalize(q) -> q
Normalizes the Quaternion such that the magnitued is 1.0"""
n = abs(q)
if n: q *= (1.0/n)
return q
def angle(q):
"""angle(q) -> float
Return the angle of rotation (in degrees) represented by q"""
return math.acos(q.r)*360/math.pi
def axis(q):
"""axis(q) -> [x,y,z]
Returns a (Numeric) array of the axis of rotation represented by q.
Normalizes the vector to have magnitude of 1.0"""
n = math.sqrt(N.dot(q.v,q.v))
if not n: n = 1.0
return q.v / n
def __repr__(q):
"""repr(Quaternion) -> string
Return a string of the format '<w [x y z]>' (direct Quaternion
values)"""
return '<%f %s>'%(q.r, q.v)
def __str__(q):
"""str(Quaternion) -> string
Return a string of the format '<angle (x y z)>' (angle in degrees
and normalized axis of rotation)"""
ax = q.axis()
return '<%0.2f (%0.2f %0.2f %0.2f)>'%(q.angle(), ax[0], ax[1], ax[2])
class ModelNode:
"""Node base class for modeling system"""
def __init__(self, *children):
if children: self._children = children
else: self._children = []
def __repr__(self):
return '<ModelNode id %s>'%id(self)
def draw(self, *args, **kvargs):
for c in self._children: c.draw(*args, **kvargs)
def add(self, *children):
"""add(self, children) -> None
Add all children under the current Node"""
self._children.extend(children)
class Transform(ModelNode):
"""Transformation nodes for hierarchical models"""
def __init__(self, translate=None, rotate=None, scale=None):
ModelNode.__init__(self)
self._x_translate = translate or [0,0,0]
self._x_rotate = rotate or Quaternion((0, (1,0,0)))
self._x_scale = scale or [1,1,1]
def draw(self, *args, **kvargs):
"""draw(self, *args, **kvargs) -> None
Applies transformations and calls children's draw() routine"""
glPushMatrix()
glTranslate(*self._x_translate)
glRotatef(self._x_rotate.angle(), *self._x_rotate.axis())
glScale(*self._x_scale)
ModelNode.draw(self, *args, **kvargs)
glPopMatrix()
def rotate(self, angle, axis):
"""rotate(self, angle, axis) -> None
Rotate model by angle around the axis (x,y,z)"""
# normalize axis
al = 1/math.sqrt(axis[0]*axis[0]+axis[1]*axis[1]+axis[2]*axis[2])
axis = axis[0] * al, axis[1]*al, axis[2]*al
self._x_rotate.rotate(angle, axis)
return self._x_rotate
def translate(self, delta):
"""translate(self, delta) -> None
Translate model by delta (x,y,z)"""
self._x_translate[0] += delta[0]
self._x_translate[1] += delta[1]
self._x_translate[2] += delta[2]
return self._x_translate[:]
def scale(self, factor):
"""scale(self, factor) -> None
Scale model by factor (x,y,z)"""
if factor: self._x_scale = [self._x_scale[i]*factor[i] for i in range(3)]
return self._x_scale[:]
class SMF(ModelNode):
"""Handles loading and drawing of a simple model format"""
def __init__(self, filename=None, calcnormals=None):
"""SMF([filename]) optionally loads model stored in passed filename"""
ModelNode.__init__(self)
self.vertices = [] # list of vertices
self.colors = [] # corresponding colors
self.faces = [] # list of references to vertices
self.normals = [] # normals correspond to each face
self.texture = None
self.texturecoords = [] # S,T corresponds to each vertex
self.usedrawlist = None
self.drawlist = None
if filename:
f = open(filename)
linecount = 0
for line in f.xreadlines():
linecount += 1
items = line.split()
# v X Y Z defines a vertex at (x, y, z)
if len(items) == 4 and items[0] == 'v':
self.vertices.append(map(lambda x:float(x), items[1:4]))
# f A B C defines a face using vertices A, B, C
elif len(items) == 4 and items[0] == 'f':
self.faces.append(map(lambda x:int(x)-1, items[1:4]))
# c R G B defines a color for corresponding vertex
elif len(items) == 4 and items[0] == 'c':
self.colors.append(map(lambda x:float(x), items[1:4]))
# t S T defines a texture coordinate for corresponding vertex
elif len(items) == 3 and items[0] == 't':
self.texturecoords.append(map(lambda x:float(x), items[1:3]))
# t filename defines a texture for the model
# should be 2**k x 2**k pixels
elif len(items) == 2 and items[0] == 't':
if self.texture:
raise RuntimeError("Can't handle multiple textures")
self.texture = items[1]
elif line[0] == '#' or len(items) == 0:
pass
else:
raise RuntimeError("Invalid syntax on line %d '%s'"%(linecount, line))
if self.texture:
if not GLU:
raise NotImplementedError("textures require mipmaps require OpenGL.GLU")
# load and prepare texture image for opengl
img = pygame.image.load(self.texture)
w, h = img.get_width(), img.get_height()
rgb = pygame.image.tostring(img, "RGB", 0)
#assign a texture
self.textureid = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D, self.textureid)
#glPixelStorei(GL_UNPACK_ALIGNMENT,1)
#build MIPMAP levels
gluBuild2DMipmaps(GL_TEXTURE_2D, GL_RGB, w, h, GL_RGB, GL_UNSIGNED_BYTE, rgb)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
def draw(self, *args, **kvargs):
"""Draw the model to the waiting screen"""
if kvargs.has_key('wireframe') and kvargs['wireframe']:
for face in self.faces:
glBegin(GL_LINE_LOOP)
for vert in face:
glColor3fv(self.colors[vert])
glVertex3fv(self.vertices[vert])
glEnd()
elif self.usedrawlist:
glCallList(self.drawlist)
else:
if self.texture:
glPushAttrib(GL_ENABLE_BIT) # save old enables
glColor4f(1,1,1,1)
glEnable(GL_TEXTURE_2D)
glBindTexture(GL_TEXTURE_2D, self.textureid)
glBegin(GL_TRIANGLES)
for face in self.faces:
for vert in face:
glTexCoord2fv(self.texturecoords[vert])
glVertex3fv(self.vertices[vert])
glEnd()
glPopAttrib()
else:
glBegin(GL_TRIANGLES)
for face in self.faces:
for vert in face:
glColor3fv(self.colors[vert])
glVertex3fv(self.vertices[vert])
glEnd()
def build_display_list(self):
"""Try to optimize the draw routine by using a display list"""
self.drawlist = glGenLists(1)
glNewList(self.drawlist, GL_COMPILE)
self.draw()
glEndList()
self.usedrawlist = 1
class OGLSprite:
"""Implement the ugly details of "blitting" to OpenGL"""
def __init__(self, surf, rect=None, mipmap=None):
"""OGLSprite(self, surf, rect=None) -> OGLSprite
Create a drawable texture out of a given surface."""
if not rect: rect = surf.get_rect()
w, h = surf.get_width(), surf.get_height()
w2, h2 = 1, 1
while w2 < w: w2 <<= 1
while h2 < h: h2 <<= 1
#surfr = pygame.surfarray.pixels3d(surf)
#surfa = pygame.surfarray.alpha(surf)
img = pygame.Surface((w2, h2), SRCALPHA, surf)
#imgr = pygame.surfarray.pixels3d(img)
#imga = pygame.surfarray.pixels_alpha(img)
#putmask(imgr,
#putmask(imga,
img.blit(surf, (0,h2-h), rect)
rgba = pygame.image.tostring(img, "RGBA", 0)
# prove that blitting sucks?
#print "0:",surf.get_at((0,0))
#print "1:",img.get_at((0,0))
#assign a texture
texid = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D, texid)
#glPixelStorei(GL_UNPACK_ALIGNMENT,1)
if mipmap:
if not GLU:
raise NotImplementedError("OGLSprite mipmaps require OpenGL.GLU")
#build MIPMAP levels. Ths is another slow bit
gluBuild2DMipmaps(GL_TEXTURE_2D, GL_RGBA, w2, h2, GL_RGBA, GL_UNSIGNED_BYTE, rgba)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT)
else:
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, w2, h2, 0, GL_RGBA, GL_UNSIGNED_BYTE, rgba)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
self.mipmap = mipmap
self.srcsize = w, h
self.texsize = w2, h2
self.coords = float(w)/w2, float(h)/h2
self.texid = texid
#print "TEX", self.srcsize, self.texsize, self.coords
def update(self, surf, rect=None):
"""update(self, surf, rect=None) -> None
"""
if self.mipmap:
raise TypeError("Cannot update a mipmap enabled OGLSprite")
if not rect: rect = surf.get_rect()
w, h = surf.get_width(), surf.get_height()
w2, h2 = 1, 1
while w2 < w: w2 <<= 1
while h2 < h: h2 <<= 1
img = pygame.Surface((w2, h2), SRCALPHA, surf)
img.blit(surf, (0,h2-h), rect)
rgba = pygame.image.tostring(img, "RGBA", 0)
glBindTexture(GL_TEXTURE_2D, self.texid)
if 'glTexSubImage2D' in dir() \
and w2 <= self.texsize[0] and h2 <= self.texsize[1]:
# untested; i suspect it doesn't work
w2, h2 = self.texsize
glTexSubImage2D(GL_TEXTURE_RECTANGLE_EXT, 0,
0, 0, w2, h2, GL_RGBA, GL_UNSIGNED_BYTE, rgba);
if (w, h) != self.srcsize:
self.coords = float(w)/w2, float(h)/h2
else:
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA,
w2, h2, 0, GL_RGBA, GL_UNSIGNED_BYTE, rgba)
self.coords = float(w)/w2, float(h)/h2
self.texsize = w2, h2
self.srcsize = w, h
#print "TEX", self.srcsize, self.texsize, self.coords
def blit_at(self, *rects):
"""blit_at(self, *rects) -> self
Draw the texture at the supplied position(s). If a tuple and width and
height are not specified, the original size is used (just like you'd
expect). Returns self so ogs.enter().blit().exit() works"""
for rect in rects:
x0, y0 = rect[0:2]
try:
x1, y1 = x0 + rect[2], y0 + rect[3]
except IndexError:
x1, y1 = x0 + self.srcsize[0] - 1, y0 + self.srcsize[1] - 1
glBindTexture(GL_TEXTURE_2D, self.texid)
glBegin(GL_TRIANGLE_STRIP)
glTexCoord2f(0, 0); glVertex2f(x0, y0)
glTexCoord2f(self.coords[0], 0); glVertex2f(x1, y0)
glTexCoord2f(0, self.coords[1]); glVertex2f(x0, y1)
glTexCoord2f(self.coords[0], self.coords[1]); glVertex2f(x1, y1)
glEnd()
return self
def enter(self):
"""enter(self) -> self
Set up OpenGL for drawing textures; do this once per batch of
textures. Returns self so ogs.enter().blit().exit() works"""
glPushAttrib(GL_ENABLE_BIT) # save old enables
glDisable(GL_DEPTH_TEST)
glDisable(GL_CULL_FACE)
glColor4f(1,1,1,1)
glEnable(GL_TEXTURE_2D)
# XXX: in pre pygame1.5, there is no proper alpha, so this makes
# the entire texture transparent. in 1.5 and forward, it works.
if pygame.version.ver >= '1.4.9':
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
#glEnable(GL_ALPHA_TEST)
#glAlphaFunc(GL_GREATER, 0.5)
glMatrixMode(GL_PROJECTION)
glPushMatrix()
glLoadIdentity()
glOrtho(0.0, 640.0, 480.0, 0.0, 0.0, 1.0)
glMatrixMode(GL_MODELVIEW)
glPushMatrix()
glLoadIdentity()
#glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_DECAL)
return self
def exit(self):
"""exit(self) -> None
Return OpenGL to previous settings; do this once per batch."""
glMatrixMode(GL_PROJECTION)
glPopMatrix()
glMatrixMode(GL_MODELVIEW)
glPopMatrix()
glPopAttrib()
def get_width(self):
"""get_width(self) -> int"""
return self.srcsize[0]
def get_height(self):
"""get_height(self) -> int"""
return self.srcsize[1]
def main():
"""loads and runs the model display"""
pygame.display.init()
pygame.font.init()
screen = pygame.display.set_mode((640,480), OPENGL|DOUBLEBUF)
glEnable(GL_DEPTH_TEST) # use zbuffer
# boring camera setup
glMatrixMode(GL_PROJECTION)
if GLU:
gluPerspective(45.0, 640/480.0, 0.1, 100.0)
else:
f = 1.3 / (math.tan(45.0/2))
glMultMatrix((f*480/640, 0, 0, 0,
0, f, 0, 0,
0, 0, 100.1/-99.9, -1,
0, 0, 2*100*0.1/-99.9, 0))
glTranslatef(0.0, 0.0, -3.0)
glRotatef(25, 1,0,0)
cube = SMF(sys.argv[1])
model = Transform()
model.add(cube)
font = pygame.font.Font(None, 48)
text = OGLSprite(font.render('Pygame', 1, (255, 0, 0)))
update = 1
do_wireframe = 0
quit = 0
hide = 0
while 1:
events = [pygame.event.wait()]
if pygame.event.peek():
events.extend(pygame.event.get())
for event in events:
if event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE):
quit = 1
elif event.type == KEYDOWN and event.key == K_RETURN:
pygame.display.toggle_fullscreen()
elif event.type == KEYDOWN and event.key == K_t:
text.update(font.render('Pygame', 1, (255, 0, 0)))
update = 1
elif event.type == KEYDOWN and event.key == K_w:
do_wireframe = not do_wireframe
update = 1
elif event.type == VIDEOEXPOSE:
update = 1
elif event.type == MOUSEBUTTONDOWN:
if event.button in (1,3):
hide += 1
pygame.mouse.set_visible(0)
pygame.event.set_grab(1)
elif event.button == 2:
print 'building display list'
cube.build_display_list()
elif event.type == MOUSEBUTTONUP:
if event.button in (1,3):
hide -= 1
if not hide:
pygame.mouse.set_visible(1)
pygame.event.set_grab(0)
if event.type == MOUSEMOTION and not update:
if event.buttons[0]:
dx, dy = event.rel
dist = math.sqrt(dx*dx+dy*dy)
q = model.rotate(dist, (event.rel[1], event.rel[0], 0))
text.update(font.render(str(q), 1, (255, 127, 127)))
update = 1
if event.buttons[2]:
if pygame.key.get_mods() & KMOD_SHIFT:
s = 100 + event.rel[0]+event.rel[1]
s *= 0.01
x, y, z = model.scale((s,s,s))
text.update(font.render('<%0.2f %0.2f %0.2f>'%(x,y,z), 1, (126, 127, 255)))
else:
x, y, z = model.translate((event.rel[0]*0.02, -event.rel[1]*0.02, 0))
text.update(font.render('<%0.2f %0.2f %0.2f>'%(x,y,z), 1, (255, 127, 127)))
update = 1
if quit:
break
if update:
update = 0
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
model.draw(wireframe=do_wireframe)
text.enter()
text.blit_at(
(screen.get_width()-text.get_width()+1,
screen.get_height()-text.get_height()+1))
text.exit()
pygame.display.flip()
if __name__ == '__main__': main()
| [
"james.watson-2@colorado.edu"
] | james.watson-2@colorado.edu |
c23dd561e8a88cfa7422931a62513d59e6b6cbf6 | 6effd74388d87b4fbad1e2b858571189f575a850 | /5.py | 2e463301ee81a67ecde78622424e6cd2500244b5 | [] | no_license | wiesson/tdd-project-euler | b1521d521f1e2304b6180337c592716b68e3b055 | 189b772172ed723658924d6832f3c403bcc3f56e | refs/heads/master | 2021-01-17T20:16:40.359816 | 2015-06-23T16:27:40 | 2015-06-23T16:27:40 | 37,330,230 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,844 | py | #! /usr/bin/env python
from functools import reduce
from math import ceil, sqrt
import unittest
def is_prime(prime):
# http://stackoverflow.com/a/4117879/2050629
return all(prime % i for i in range(2, ceil(sqrt(prime))))
def prime_factors(n):
primes_list = []
z = n
while z > 1:
i = 2
prime_found = False
while i * i <= n and not prime_found:
if z % i == 0:
prime_found = True
p = i
else:
i = i + 1
if not prime_found:
p = z
primes_list.append(p)
z = z // p
return primes_list
def smallest_multiple_slow(n_range):
n = 1
while True:
for each in n_range:
if n % each:
break
if each == 10 and each == len(n_range) + 1:
print("> " + str(n))
return True
n = n + 1
return False
def smallest_multiple_new(n_range):
primes = []
prime_factors_list = []
for each in n_range:
if is_prime(each):
primes.append(each)
prime_factors_list.append(prime_factors(each))
primes_per_range_item = {}
for prime in primes:
primes_per_range_item[prime] = 0
for each in prime_factors_list:
if primes_per_range_item.get(prime) < each.count(prime):
primes_per_range_item[prime] = each.count(prime)
num = 1
for each in [pow(k, v) for (k, v) in primes_per_range_item.items()]:
num *= each
print(num)
return True
class TestCase(unittest.TestCase):
# @unittest.skip
def test_smallest_multiple(self):
self.assertEqual(smallest_multiple_new(range(2, 11)), True) # [2, 13]
self.assertEqual(smallest_multiple_new(range(2, 21)), True) # [2, 13]
unittest.main()
| [
"wiese@sipgate.de"
] | wiese@sipgate.de |
4530a5673d72ef9a8eac58ce77431858da59fecf | b47853b6dcf8ab14f1dafeb2d613212a71329206 | /war/prefabs/warrior/mage.py | 8cc1d91efe572268825c7516d0560767f4180e6b | [] | no_license | hittun/pygamel | b75f5b340825a3d99a0087eb6d396dc1a5f26802 | adc294f076d7659dd6357a3365053c62a1868876 | refs/heads/master | 2021-05-16T20:46:53.776772 | 2020-03-30T15:48:08 | 2020-03-30T15:48:08 | 250,463,406 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 682 | py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
法师(Mage):
法师俗称AP或APC,是具有强大魔法伤害技能、但防守能力和移动能力偏低的英雄。
一些法师可以在短时间内造成巨大伤害,一些则是以长期持续伤害为主,爆发性的法师和刺客间的界线很模糊。
# @File : mage.py
# @Time : 2020/3/28 4:46
# @GitHub: https://github.com/hittun/pygamel
"""
from .warrior import Warrior
class Mage(Warrior):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
class DaJi(Mage):
"""妲己"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
| [
"hittun@163.com"
] | hittun@163.com |
06b5353e742f734e8a4918ac8cd03995c46fc663 | aed424076253479f698cb13acecb1800c9647092 | /Qlearn_proto/manual_control_data.py | 0e01b7793e33078e25cfe05dca7d8188e7c0739c | [] | no_license | starjh94/Drone-Hover | c2e93578ccf47c0e72b38f49441a0a393272ee4d | 60e3f7f56ed5c884bb860f2c1630b03d419b67c2 | refs/heads/master | 2021-01-22T17:42:54.250271 | 2017-12-23T00:20:04 | 2017-12-23T00:20:04 | 85,031,328 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,165 | py | import subprocess
subprocess.Popen(["python","degree_process.py"])
import sys, time
import navio.rcinput
import navio.util
import navio.pwm
import degree_gyro
import numpy as np
import Servo
import threading
period1 = 0
period3 = 0
start_time = 0
## Data numpy value initialize ##
np_gyro_degree = np.array([[0, 0]])
np_acc_degree = np.array([[0, 0]])
np_acc_gyro = np.array([[0, 0]])
np_left_motor = np.array([[0, 0]])
np_right_motor = np.array([[0, 0]])
np_ML_data = np.array([[0, 0, 0, 0, 0, 0]])
class Manual_control(threading.Thread):
def run(self):
global period1
global period3
rcin = navio.rcinput.RCInput()
while(True):
period1 = rcin.read(0)
period3 = rcin.read(2)
def main():
manual = Manual_control(name='recv_rc')
global period1
global period3
global np_gyro_degree
global np_acc_degree
global np_acc_gyro
global np_left_motor
global np_right_motor
global np_ML_data
global start_time
pwm_1 = 1.1
pwm_2 = 1.1
print "start"
a = Servo.servo()
b = degree_gyro.acc()
timecheck_list = []
pitch_aver = acc_gyro_pitch = gyro_pitch_degree = b.pitch()
## matplotlib data initialization ##
"""
np_gyro_degree = np.array([[0, gyro_pitch_degree]])
np_acc_degree = np.array([[0, b.pitch()]])
np_acc_gyro = np.array([[0, acc_gyro_pitch]])
np_left_motor = np.array([[0, pwm_1]])
np_right_motor = np.array([[0, pwm_2]])
"""
np_ML_data = np.array([[0, acc_gyro_pitch, b.pitch(), gyro_pitch_degree, pwm_1, pwm_2]])
manual.daemon = True
manual.start()
start_time = time.time()
timecheck_list.append(start_time)
while True:
start_action = raw_input("\nI'm ready\nAre you ready?(Y / N): ")
if start_action.upper() == "Y":
print "\nGame will be started! "
break
else:
print "\nOK! let me do it again ~"
while(True):
timecheck_list.append(time.time())
loop_time = timecheck_list[1] - timecheck_list[0]
timecheck_list.pop(0)
acc_pitch_degree = b.pitch()
gyro_pitch_degree = b.gyro_pitch(loop_time, gyro_pitch_degree)
get_gyro_degree = b.gyro_pitch(loop_time, acc_gyro_pitch)
acc_gyro_pitch = np.sign(get_gyro_degree) * ((0.97 * abs(get_gyro_degree)) + (0.03 * abs(acc_pitch_degree)))
## servo part ##
servo_pwm1 = pwm_1 + (int(period3) - 982) * 0.00049
servo_pwm2 = pwm_2 + (int(period1) - 982) * 0.00049
a.servo_1(servo_pwm1)
a.servo_2(servo_pwm2)
## for matplotlib ##
data_time = time.time() - start_time
"""
np_gyro_degree = np.append(np_gyro_degree, [[data_time, gyro_pitch_degree]], axis=0)
np_acc_degree = np.append(np_acc_degree, [[data_time, acc_pitch_degree]], axis=0)
np_acc_gyro = np.append(np_acc_gyro, [[data_time, acc_gyro_pitch]], axis=0)
np_left_motor = np.append(np_left_motor, [[data_time, servo_pwm1]], axis=0)
np_right_motor = np.append(np_right_motor, [[data_time, servo_pwm2]], axis=0)
"""
np_ML_data = np.append(np_ML_data, [[data_time, acc_gyro_pitch, acc_pitch_degree, gyro_pitch_degree, servo_pwm1, servo_pwm2]], axis=0)
print "<time: %.16s> : degree= %.16s \tpwm_1= %.5s pwm2= %.5s" % (data_time, acc_gyro_pitch, servo_pwm1, servo_pwm2)
#print "pwm_v1 = %s pwm_v2 = %s degree = C: %s\t<-\tG: %s vs A: %s" % (servo_pwm1, servo_pwm2, acc_gyro_pitch, gyro_pitch_degree, acc_pitch_degree)
time.sleep(0.01)
if __name__ == '__main__':
try :
main()
except :
print("finish")
"""
np.save('gyro_degree_Data', np_gyro_degree)
np.save('acc_degree_Data', np_acc_degree)
np.save('accGyro_degree_Data', np_acc_gyro)
np.save('left_motor_Data', np_left_motor)
np.save('right_motor_Data', np_right_motor)
"""
np.save('M_L_Data', np_ML_data)
print "time: %s, number of numpy data: %s" % (time.time() - start_time, len(np_ML_data))
| [
"lbaksa21@gmail.com"
] | lbaksa21@gmail.com |
e5238361b8ddfb4b94907a15d69e41fc53033e74 | 7bc7fe046ce8393870ebba5b744d8809932f036f | /leetcode/344-Reverse-String.py | 606fa4bc8a47658edd61da99b3a186270acc50dc | [] | no_license | BrandonBlimp/Interview-Prep | 2d6376396f8349818c6ebd58130a95acbaa0df30 | b309883565bdc82a4a95dc441bbf0c2d1f89c3ae | refs/heads/master | 2020-06-07T08:52:40.148138 | 2019-06-20T20:20:09 | 2019-06-20T20:20:09 | 192,979,885 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 872 | py | # Write a function that reverses a string. The input string is given as an array of characters char[].
# Do not allocate extra space for another array, you must do this by modifying the input array in-place with O(1) extra memory.
# You may assume all the characters consist of printable ascii characters.
# Example 1:
# Input: ["h","e","l","l","o"]
# Output: ["o","l","l","e","h"]
# Example 2:
# Input: ["H","a","n","n","a","h"]
# Output: ["h","a","n","n","a","H"]
class Solution:
def reverseString(self, s):
"""
:type s: List[str]
:rtype: void Do not return anything, modify s in-place instead.
"""
length = len(s)
i = 0
while i < (length//2):
front_char = s[i]
end_char = s[length-i-1]
s[i] = end_char
s[length-i-1] = front_char
i+=1
| [
"brandonloong.lim@gmail.com"
] | brandonloong.lim@gmail.com |
c606b6292b29221a89c5a63f93c792418b45da08 | d58ab1038a1fb412a45c7e471ec3bcbde1ce66a2 | /theZoo/pipelines.py | 7133a4d2045de932e6fb358a6c45da9dfb1a3251 | [
"MIT"
] | permissive | webclinic017/Tarantula | af316134bead2694a4501149def06aa2a8876420 | 1eb10c7d588493fabee0f8cf5269c737389dd193 | refs/heads/main | 2023-08-24T05:06:22.976241 | 2021-10-27T18:35:27 | 2021-10-27T18:35:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,582 | py | # Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# useful for handling different item types with a single interface
# from itemadapter import ItemAdapter
from sqlalchemy.orm import sessionmaker
from theZoo.models import Items, create_items_table, db_connect
from scrapy.exporters import JsonItemExporter
class ThezooPipeline:
def __init__(self):
"""
Initializes database connection and sessionmaker.
Creates items table.
"""
engine = db_connect()
create_items_table(engine)
self.Session = sessionmaker(bind=engine)
file = None
def open_spider(self, spider):
self.file = open('item.json', 'wb')
self.exporter = JsonItemExporter(self.file)
self.exporter.start_exporting()
def close_spider(self, spider):
self.exporter.finish_exporting()
self.file.close()
def process_item(self, item, spider):
"""
Here we are processing our item and storing to the database
"""
self.exporter.export_item(item)
session = self.Session()
# instance = session.query(Items).filter_by(**item).one_or_none()
# if instance:
# return instance
scrape_item = Items(**item)
try:
session.add(scrape_item)
session.commit()
except:
session.rollback()
raise
finally:
session.close()
return item
| [
"georgereyes103@gmail.com"
] | georgereyes103@gmail.com |
9e36dee4daba7a090054dbc2c153be60056ec165 | 99f9b73530de837297f095689b250b8ebda8f836 | /project euler/prob1.py | b3fba06d1275ea9ae970cd68d930db97fe063ef9 | [] | no_license | ManimaranN/Competitive_solutions | 18cfcf648d85ac4c253690348c180836257cb6d4 | 34f49f9b05b674796335125f7087c4393c05d886 | refs/heads/master | 2020-03-25T05:18:32.165026 | 2018-08-07T16:24:59 | 2018-08-07T16:24:59 | 143,440,103 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 167 | py | list = []
for i in range(3,1000):
if i%3 == 0:
list.append(i)
elif i%5 == 0 :
list.append(i)
i += 1
add = sum(list)
print("sum :", add)
| [
"manimaran.n36@gmail.com"
] | manimaran.n36@gmail.com |
9619928a1ab7f5e6f1375ccdfbd3e23c15e65270 | 1296db988387cc1e6a14db76f777cf2cdfca97ba | /corregraphe/core.py | 7a8795545a34fc696fd2fd741e8d584dbba8d89a | [
"MIT"
] | permissive | theodcr/corregraphe | 30529650a86a0722ff83aacbb2fc3556258cff11 | fec9bf844d7ea2dc870197580a4c59a1fad258b8 | refs/heads/master | 2020-06-25T21:25:21.888291 | 2019-09-15T08:11:53 | 2019-09-15T08:11:53 | 199,425,868 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,976 | py | from typing import Dict
import hvplot.networkx as hvnx
import networkx as nx
from holoviews import Overlay
from pandas import DataFrame
class CorrelationGraph(object):
"""
Creates a correlation graph from dataframe.
Parameters
----------
data : DataFrame
data to use to compute correlations
method : str = 'kendall' {'pearson', 'kendall', 'spearman'}
correlation method, see pandas.DataFrame.corr
Attributes
----------
correlations : DataFrame
dataframe of correlations, columns and indexes are columns of `data`
graph : Graph
NetworkX graph object representing the correlations,
each node is a column of `data`, edges are correlations
pos : Dict
positions of nodes, keys are node names, value are (x, y) positions
Usage
-----
>>> df = DataFrame({'a': [1, 2, 3, 4], 'b': [2, 4, 6, 8]})
>>> cg = CorrelationGraph(df)
>>> cg.correlations
a b
a 1.0 1.0
b 1.0 1.0
>>> fig = cg.draw()
"""
def __init__(self, data: DataFrame, method: str = "kendall") -> None:
self._data = data
self._method = method
self.correlations = self._compute_correlations(self._data, self._method)
self.graph = self._create_graph(self.correlations)
self.pos = self._compute_positions(self.graph)
def draw(self, **kwargs: Dict) -> Overlay:
"""Draws the graph and returns the hvplot object.
Parameters
----------
**kwargs : Dict[str, Any]
keyword arguments given to the hvplot.networkx.draw method
Returns
-------
Overlay
HoloViews Overlay representing the correlation graph
"""
return hvnx.draw(
self.graph,
pos=self.pos,
edge_width="weight",
node_color="cluster_corr",
labels="name",
colorbar=True,
**kwargs
)
@staticmethod
def _compute_correlations(data: DataFrame, method: str) -> DataFrame:
"""Computes correlation between columns of dataframe.
Parameters
----------
data : DataFrame
method : str
correlation method
Returns
-------
DataFrame
dataframe of correlations, columns and indexes are columns of `data`
"""
return data.corr(method=method).abs()
@staticmethod
def _create_graph(correlations: DataFrame) -> nx.Graph:
"""Creates a graph object to represent correlations.
Parameters
----------
correlations : DataFrame
square dataframe of correlations, columns and indexes must be identical
Returns
-------
Graph
NetworkX graph object representing the correlations
"""
graph = nx.complete_graph(correlations.shape[1])
graph = nx.relabel_nodes(
graph, {i: col for i, col in enumerate(correlations.columns)}
)
for edge in graph.edges:
graph.edges[edge]["weight"] = correlations[edge[0]][edge[1]]
for node in graph.nodes:
graph.nodes[node]["name"] = node
for node, coef in nx.clustering(graph, weight="weight").items():
graph.nodes[node]["cluster_corr"] = coef
return graph
@staticmethod
def _compute_positions(graph: nx.Graph) -> Dict:
"""Returns positions of nodes using a spring layout.
Random seed is set and not changeable to make graphs always reproductible.
Parameters
----------
graph : Graph
correlation graph, each node is a column, each link is a correlation
Returns
-------
Dict
positions of nodes, keys are node names, value are (x, y) positions
"""
return nx.spring_layout(graph, seed=42)
if __name__ == "__main__":
import doctest
doctest.testmod()
| [
"theo.delecour@gmx.com"
] | theo.delecour@gmx.com |
6f0cf4d61aa094e7e4958d5d2d42c7ee379e097f | 942a82cd1e34cd8f57e1d7f3272e4086605256ee | /config/settings.py | 4ab609f97c0680e52cc1f2490a6f0d441b5e6b02 | [] | no_license | hanieh-mav/SocialNetwork-with-drf | d451126f93e3735a8c9d6dbf714a8179785e15cc | d929704a3d9f26e1e0ca5d961a01ba7dd5c6bf84 | refs/heads/main | 2023-06-13T08:17:46.591597 | 2021-07-09T13:37:06 | 2021-07-09T13:37:06 | 353,754,846 | 2 | 0 | null | 2021-07-09T13:27:27 | 2021-04-01T16:04:26 | Python | UTF-8 | Python | false | false | 4,482 | py | """
Django settings for config project.
Generated by 'django-admin startproject' using Django 3.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '@81g)s8gw+7-84o%ks%*8&j$cbb+&m%(#)+e6getb5o40@vil)'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# 'crispy_forms',
'posts.apps.PostsConfig',
'accounts.apps.AccountsConfig',
'postapi.apps.PostapiConfig',
'accountapi.apps.AccountapiConfig',
'rest_framework',
'rest_framework.authtoken',
'dj_rest_auth',
'django.contrib.sites',
'allauth',
'allauth.account',
'allauth.socialaccount',
'dj_rest_auth.registration',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
#LOGIN_URL
LOGIN_URL = 'accounts:login'
LOGIN_REDIRECT_URL = 'posts:post-list'
#LOGOUT_URL
LOGOUT_REDIRECT_URL = 'posts:post-list'
STATIC_URL = '/static/'
#MEDIA
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR,'media')
AUTH_USER_MODEL = 'accounts.User'
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.IsAuthenticated'
],
'DEFAULT_AUTHENTICATION_CLASSES': [
'dj_rest_auth.jwt_auth.JWTCookieAuthentication',
],
}
SITE_ID = 1
REST_USE_JWT = True
JWT_AUTH_COOKIE = 'my-app-auth'
JWT_AUTH_REFRESH_COOKIE = 'my-refresh-token'
#EMAIL SETTING
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = 'haniehproject.it@gmail.com'
EMAIL_HOST_PASSWORD = 'xxxxxxxxxxxxxxxxxxx'
EMAIL_PORT = 587
EMAIL_USE_TLS = True | [
"h.mehdiabadi@gmail.com"
] | h.mehdiabadi@gmail.com |
8723a4a6f9bb16968b5f83ec44895b30cb9da123 | d82b879f41e906589a0a6ad5a6a09e0a0032aa3f | /ObservationScripts/on_off/observe_moon_spec_analyser.py | 176f9c75c90dd4f6945052404f93c17615964d9f | [] | no_license | SETIatHCRO/ATA-Utils | 66718eed669882792148fe0b7a2f977cd0f6ac2e | 59f4d21b086effaf41d5e11e338ce602c803cfd0 | refs/heads/master | 2023-08-16T20:41:44.233507 | 2023-08-10T20:39:13 | 2023-08-10T20:39:13 | 137,617,987 | 5 | 5 | null | 2023-08-10T20:39:14 | 2018-06-17T00:07:05 | Jupyter Notebook | UTF-8 | Python | false | false | 867 | py | #!/home/obsuser/miniconda3/envs/ATAobs/bin/python
from ATATools import ata_control, logger_defaults
from SNAPobs import snap_dada, snap_if
import time
import atexit
import numpy as np
import sys
import argparse
import logging
import os
def main():
logger = logger_defaults.getProgramLogger("observe",
loglevel=logging.INFO)
az_offset = 20.
el_offset = 0.
ant_list = ["2b"]
source = "moon"
ata_control.reserve_antennas(ant_list)
atexit.register(ata_control.release_antennas,ant_list, False)
ata_control.create_ephems2(source, az_offset, el_offset)
ata_control.point_ants2(source, "off", ant_list)
#ata_control.autotune(ant_list)
_ = input("Press any key to switch to on source")
ata_control.point_ants2(source, "on", ant_list)
print("on source acquired")
if __name__ == "__main__":
main()
| [
"wael.a.farah@gmail.com"
] | wael.a.farah@gmail.com |
21ac72b7d43b4b2cd092a87758265844e747314f | 7be17509048a46e8d33d051495f6c01d97aa1b14 | /plot_price.py | f735c59fce937df3e662928fdf125d59f865db16 | [
"Apache-2.0"
] | permissive | javierarilos/market_session | 7aca2d1fc0aaa37825d44fa6d4e15af67a2b93f6 | 35f3020acfdc771cb6a7bfddabb7ca481c792aa5 | refs/heads/master | 2021-01-10T08:56:45.200902 | 2016-03-17T14:13:40 | 2016-03-17T14:13:40 | 52,529,884 | 0 | 0 | null | 2016-03-17T14:13:40 | 2016-02-25T14:14:33 | Python | UTF-8 | Python | false | false | 327 | py | """ Load data from pickle session file, with only one instrument.
Plot prices after removing zeroes
"""
import matplotlib.pyplot as plt
import preprocess_data
session_file = 'f_mupssan20140901.F:FESXU4.pkl'
mkt = preprocess_data.load_session(session_file)
ts = mkt[:, 1]
last = mkt[:, 4]
plt.plot(ts, last)
plt.show()
| [
"javier.arilos@gmail.com"
] | javier.arilos@gmail.com |
aaa0827ca1960e9bbf5f709391f05f25418b11bd | 61594a19ffaca4b97f7905a82844132df6860837 | /trio2o/tests/unit/common/scheduler/test_pod_manager.py | 307430b4e306486df2ce0fcfff758d6dd657b1d0 | [
"Apache-2.0"
] | permissive | OpenCloudNeXt/trio2o | db679ab292162e564145fddaa55aa8f8c3c6c0b8 | f4d2d5458fbba71414edebf5e9f69b98abd2d080 | refs/heads/master | 2020-03-14T03:26:16.805389 | 2017-10-18T07:03:20 | 2017-10-18T07:03:38 | 131,419,855 | 1 | 0 | Apache-2.0 | 2018-04-28T15:09:21 | 2018-04-28T15:09:21 | null | UTF-8 | Python | false | false | 5,910 | py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from stevedore import driver
from trio2o.common import context
from trio2o.db import api
from trio2o.db import core
from trio2o.db import models
import unittest
class PodManagerTest(unittest.TestCase):
def setUp(self):
core.initialize()
core.ModelBase.metadata.create_all(core.get_engine())
self.context = context.Context()
self.project_id = 'test_pm_project'
self.az_name_2 = 'b_az_pm_2'
self.az_name_1 = 'b_az_pm_1'
self.pod_manager = driver.DriverManager(
namespace='trio2o.common.schedulers',
name='pod_manager',
invoke_on_load=True
).driver
self.b_pod_1 = {'pod_id': 'b_pod_pm_uuid_1',
'pod_name': 'b_region_pm_1',
'az_name': self.az_name_1}
self.b_pod_2 = {'pod_id': 'b_pod_pm_uuid_2',
'pod_name': 'b_region_pm_2',
'az_name': self.az_name_2}
self.b_pod_3 = {'pod_id': 'b_pod_pm_uuid_3',
'pod_name': 'b_region_pm_3',
'az_name': self.az_name_2}
self.b_pod_4 = {'pod_id': 'b_pod_pm_uuid_4',
'pod_name': 'b_region_pm_4',
'az_name': self.az_name_2}
def test_get_current_binding_and_pod(self):
api.create_pod(self.context, self.b_pod_1)
api.create_pod_binding(
self.context, self.project_id, self.b_pod_1['pod_id'])
pod_b_1, pod_1 = self.pod_manager.get_current_binding_and_pod(
self.context, self.az_name_1, self.project_id, pod_group='')
binding_q = core.query_resource(
self.context, models.PodBinding,
[{'key': 'tenant_id',
'comparator': 'eq',
'value': self.project_id}], [])
self.assertEqual(len(binding_q), 1)
self.assertEqual(binding_q[0]['id'], pod_b_1['id'])
pod_b_2, pod_2 = self.pod_manager.get_current_binding_and_pod(
self.context, self.az_name_1, 'new_project_pm_1', pod_group='')
binding_q = core.query_resource(
self.context, models.PodBinding,
[{'key': 'tenant_id',
'comparator': 'eq',
'value': 'new_project_pm_1'}], [])
self.assertEqual(len(binding_q), 0)
self.assertEqual(pod_b_2, None)
self.assertEqual(pod_2, None)
pod_b_3, pod_3 = self.pod_manager.get_current_binding_and_pod(
self.context, 'unknown_az', self.project_id, pod_group='')
binding_q = core.query_resource(
self.context, models.PodBinding,
[{'key': 'tenant_id',
'comparator': 'eq',
'value': self.project_id}], [])
self.assertEqual(len(binding_q), 1)
self.assertEqual(pod_b_3, None)
self.assertEqual(pod_3, None)
pod_b_4, pod_4 = self.pod_manager.get_current_binding_and_pod(
self.context, self.az_name_1, self.project_id, pod_group='test')
binding_q = core.query_resource(
self.context, models.PodBinding,
[{'key': 'tenant_id',
'comparator': 'eq',
'value': self.project_id}], [])
self.assertEqual(len(binding_q), 1)
self.assertEqual(pod_b_4['id'], binding_q[0]['id'])
self.assertEqual(pod_4, None)
def test_create_binding(self):
api.create_pod(self.context, self.b_pod_2)
flag = self.pod_manager.create_binding(
self.context, 'new_project_pm_2', self.b_pod_2['pod_id'])
self.assertEqual(flag, True)
binding_q = core.query_resource(
self.context, models.PodBinding,
[{'key': 'tenant_id',
'comparator': 'eq',
'value': 'new_project_pm_2'}], [])
self.assertEqual(len(binding_q), 1)
self.assertEqual(binding_q[0]['pod_id'], self.b_pod_2['pod_id'])
self.assertEqual(binding_q[0]['tenant_id'], 'new_project_pm_2')
self.assertEqual(binding_q[0]['is_binding'], True)
def test_update_binding(self):
api.create_pod(self.context, self.b_pod_4)
api.create_pod(self.context, self.b_pod_3)
flag = self.pod_manager.create_binding(
self.context, 'new_project_pm_3', self.b_pod_3['pod_id'])
self.assertEqual(flag, True)
current_binding = core.query_resource(
self.context, models.PodBinding,
[{'key': 'tenant_id',
'comparator': 'eq',
'value': 'new_project_pm_3'}], [])
flag = self.pod_manager.update_binding(
self.context, current_binding[0], self.b_pod_4['pod_id'])
self.assertEqual(flag, True)
binding_q = core.query_resource(
self.context, models.PodBinding,
[{'key': 'tenant_id',
'comparator': 'eq',
'value': 'new_project_pm_3'}], [])
self.assertEqual(len(binding_q), 2)
self.assertEqual(binding_q[0]['pod_id'], self.b_pod_3['pod_id'])
self.assertEqual(binding_q[0]['tenant_id'], 'new_project_pm_3')
self.assertEqual(binding_q[0]['is_binding'], False)
self.assertEqual(binding_q[1]['pod_id'], self.b_pod_4['pod_id'])
self.assertEqual(binding_q[1]['tenant_id'], 'new_project_pm_3')
self.assertEqual(binding_q[1]['is_binding'], True)
| [
"yingfeicaozhang100527@gmail.com"
] | yingfeicaozhang100527@gmail.com |
76efc587e9870c67ce18c75d2a19291da2b320b2 | d051b7c8e9375beea6e4ee1a3375d120fde178ac | /moveit_experimental/moveit_jog_arm/test/python_tests/vel_accel_limits/test_vel_accel_limits.py | 2baf6a594a6baf04492bfe3afda8e1b276b15442 | [
"BSD-3-Clause"
] | permissive | anion0278/moveit | 3e4a35f5c37efb1c79cf6c7e52e49c9e2c43bf94 | 7c733c576d34bada28ff8986b1f71f06712d34a6 | refs/heads/master | 2023-02-23T17:01:51.947763 | 2021-01-31T14:28:45 | 2021-01-31T14:28:45 | 267,310,801 | 0 | 0 | BSD-3-Clause | 2020-05-27T12:16:41 | 2020-05-27T12:16:40 | null | UTF-8 | Python | false | false | 2,749 | py | #!/usr/bin/env python
import time
import pytest
import rospy
from control_msgs.msg import JointJog
from trajectory_msgs.msg import JointTrajectory
# Import common Python test utilities
from os import sys, path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
import util
# Test that commands that are too fast are caught and flagged
# This can be run as part of a pytest, or like a normal ROS executable:
# rosrun moveit_jog_arm test_vel_accel_limits.py
JOINT_JOG_COMMAND_TOPIC = 'jog_server/joint_delta_jog_cmds'
COMMAND_OUT_TOPIC = 'jog_server/command'
# Check if jogger is initialized with this service
SERVICE_NAME = 'jog_server/change_drift_dimensions'
@pytest.fixture
def node():
return rospy.init_node('pytest', anonymous=True)
class JointJogCmd(object):
def __init__(self):
self._pub = rospy.Publisher(JOINT_JOG_COMMAND_TOPIC, JointJog, queue_size=1)
def send_joint_velocity_cmd(self, joint_pos):
jj = JointJog()
jj.header.stamp = rospy.Time.now()
jj.joint_names = ['joint_{}'.format(i) for i in range(len(joint_pos))]
jj.velocities = list(map(float, joint_pos))
self._pub.publish(jj)
def test_vel_limit(node):
# Test sending a joint command
assert util.wait_for_jogger_initialization(SERVICE_NAME)
received = []
sub = rospy.Subscriber(
COMMAND_OUT_TOPIC, JointTrajectory, lambda msg: received.append(msg)
)
joint_cmd = JointJogCmd()
TEST_DURATION = 1
PUBLISH_PERIOD = 0.01 # 'PUBLISH_PERIOD' from jog_arm config file
# Panda arm limit, from joint_limits.yaml
VELOCITY_LIMIT = rospy.get_param("/robot_description_planning/joint_limits/panda_joint1/max_velocity")
# Send a velocity command that exceeds the limit
velocities = [10 * VELOCITY_LIMIT]
# Send a command to start the jogger
joint_cmd.send_joint_velocity_cmd(velocities)
start_time = rospy.get_rostime()
received = []
while (rospy.get_rostime() - start_time).to_sec() < TEST_DURATION:
joint_cmd.send_joint_velocity_cmd(velocities)
time.sleep(0.1)
# Period of outgoing commands from the jogger, from yaml
JOGGER_COMMAND_PERIOD = rospy.get_param("/jog_server/publish_period")
# Should be no velocities greater than the limit
assert len(received) > 2
for msg_idx in range(1, len(received)):
velocity = \
(received[msg_idx].points[0].positions[0] - received[msg_idx - 1].points[0].positions[0]) / JOGGER_COMMAND_PERIOD
assert abs(velocity) <= VELOCITY_LIMIT
if __name__ == '__main__':
node = node()
test_vel_limit(node)
# TODO(andyz): add an acceleration limit test (the Panda joint_limits.yaml doesn't define acceleration limits)
| [
"noreply@github.com"
] | anion0278.noreply@github.com |
b2df86aff826ef401a2e795e3a010d9464621cb0 | 2a0ec173d3e60ee01e07d348728b8517a07d6d0d | /light/select_max_region.py | 7746c4e201022dae8d4055452a0c275874b51860 | [] | no_license | pabogdan/spinnaker_vision | 62c22572e95cfc30a1ad60ef7473d215326b1dfa | b1939a432a672cb6dcd33966a175eb9f2027aca9 | refs/heads/master | 2021-01-18T10:33:41.928880 | 2015-10-21T14:07:45 | 2015-10-21T14:07:45 | 42,939,233 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 93 | py | __author__ = 'bogdanp'
import nengo
import nengo_spinnaker
import nengo_pushbot
import numpy | [
"dwTheresnosugar2013"
] | dwTheresnosugar2013 |
3f35bb4275325e1c72d173246eef8e4413773be3 | 25b1d29a9e25629b1fcd469641cf871354b78761 | /Exercicios Listas/listas-17.py | 2ad5b9d2a814128ac5a02e43e1edc9a57313ce68 | [] | no_license | AndersonBatalha/Programacao1 | 0a73302e216ddd189f75231cbbae910743ab67b9 | 07ef756d6984f25d294ce4e758e8a671942581fa | refs/heads/master | 2021-07-15T14:17:34.661718 | 2017-10-15T17:33:20 | 2017-10-15T17:33:20 | 106,857,443 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,344 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# 17. Em uma competição de salto em distância cada atleta tem direito a cinco saltos. O resultado do atleta será determinado pela média dos cinco valores restantes. Você deve fazer um programa que receba o nome e as cinco distâncias alcançadas pelo atleta em seus saltos e depois informe o nome, os saltos e a média dos saltos. O programa deve ser encerrado quando não for informado o nome do atleta. A saída do programa deve ser conforme o exemplo abaixo:
# Atleta: Rodrigo Curvêllo
# Primeiro Salto: 6.5 m / Segundo Salto: 6.1 m / Terceiro Salto: 6.2 m
# Quarto Salto: 5.4 m / Quinto Salto: 5.3 m
# Resultado final:
# Atleta: Rodrigo Curvêllo
# Saltos: 6.5 - 6.1 - 6.2 - 5.4 - 5.3
# Média dos saltos: 5.9 m
nomes = []
saltos = []
while True:
nome = raw_input("Nome do atleta: ")
if len(nome) > 0:
for i in range(1, 6):
salto = float(raw_input("Salto %d: " % i))
saltos.append(salto)
media = sum(saltos) / len(saltos)
nomes.append(nome)
else:
if len(nomes) == 0:
print "Sem atletas cadastrados."
else:
print "Encerrado"
for i in range(len(nomes)):
print "\nAtleta: %s" % nomes[i]
for i in range(len(saltos)):
for i in range(len(saltos)):
print "Salto %d: %.2f m" % (i + 1, saltos[i])
print "Média dos saltos: %.2f m" % media
break
| [
"andersonpbatalha@gmail.com"
] | andersonpbatalha@gmail.com |
dfe222014934e58ee6918b968a783bb1b48102ec | 10475b80244955f380820898b0197de8b82cf41e | /user_mailbox/models/res_users.py | d1ef7eb440898de1151113aef51d2830182be539 | [] | no_license | marionumza/base | ec92de4ee50d319b4e9b95309059c99b766c5b11 | 044a5f5da659957d31e1c063375c5e83fc5d5134 | refs/heads/master | 2020-12-28T06:39:40.489441 | 2019-10-28T06:18:39 | 2019-10-28T06:18:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 583 | py | # -*- coding: utf-8 -*-
# See LICENSE file for full copyright and licensing details.
"""Res Users Model."""
from odoo import models, fields
class ResUsers(models.Model):
"""Added list of incoming and outgoing mail server for user."""
_inherit = 'res.users'
incoming_mail_server_ids = fields.One2many("fetchmail.server", "user_id",
string="Incoming Mail Servers")
outgoing_mail_server_ids = fields.One2many("ir.mail_server", "user_id",
string="Outgoing Mail Servers")
| [
"promitgt@gmail.com"
] | promitgt@gmail.com |
7f6f59e39a0cf95c2270f55d00248d46fc8634da | b1abf1f549b9e3029f3fb56bfcf3bf91a4258f4e | /Class_Weights_Mutual_Cold/MLP_Cold_hyper_KHI_500.py | eae1a01308f40d1e39b5393411f0135f15c7927f | [] | no_license | tundik/coldcompare2017 | d223954b62bc004781884a8f103bcae3c1c48c96 | 0d69828cf22198e31cc441a1c750d3c0f4fb3888 | refs/heads/master | 2021-01-18T04:10:53.003705 | 2017-04-01T19:29:17 | 2017-04-01T19:29:17 | 85,758,213 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,893 | py |
# coding: utf-8
from __future__ import print_function
import os
import numpy as np
from numpy import newaxis
np.random.seed(1337)
import keras
from keras.utils.np_utils import to_categorical
from keras.layers import Dense, Input, Flatten
from keras.layers import Convolution1D, MaxPooling1D, Embedding, Dense,Dropout , Activation
from keras.models import Model,Sequential
import sys
from keras.optimizers import SGD
from sklearn.metrics import classification_report,recall_score,accuracy_score,confusion_matrix,roc_curve,roc_auc_score
import pandas as pd
import random
from hyperopt import Trials, STATUS_OK, tpe
from hyperas import optim
from hyperas.distributions import choice, uniform
from keras.optimizers import RMSprop
from keras.datasets import mnist
from keras.utils import np_utils
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import mutual_info_classif
from myfunc import encode
from myfunc import create_class_weight
from myfunc import delete_column
def data():
import pandas as pd
import numpy as np
from sklearn import preprocessing
from sklearn.decomposition import PCA
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import mutual_info_classif
print("Data mfcc read started...")
data = pd.read_csv("ComParE2017_Cold.ComParE.train.arff",delimiter=',',skiprows=range(0, 6379))
data=data.as_matrix()
print ("Data mfcc read finished.")
data=data[:,1:6375]
Y_train=[x[6373] for x in data]
Y_train[300]
x_train=data[:,0:6373]
labels = ['C','NC']
label2ind = {label: (index + 1) for index, label in enumerate(labels)}
ind2label = {(index + 1): label for index, label in enumerate(labels)}
print (label2ind)
max_label = max(label2ind.values())+1
y_enc = [[label2ind[ey] for ey in Y_train]]
y_enc = [[encode(c, max_label) for c in ey] for ey in y_enc]
y_enci =delete_column(y_enc)
y_train=y_enci[0]
print("Data mfcc read started...")
data2 = pd.read_csv("ComParE2017_Cold.ComParE.devel.arff",delimiter=',',skiprows=range(0, 6379))
data2=data2.as_matrix()
print ("Data mfcc read finished.")
data2=data2[:,1:6375]
Y_val=[x[6373] for x in data2]
x_val=data2[:,0:6373]
y_enc = [[label2ind[ey] for ey in Y_val]]
y_enc = [[encode(c, max_label) for c in ey] for ey in y_enc]
y_enci =delete_column(y_enc)
y_val=y_enci[0]
scaler = preprocessing.StandardScaler().fit(x_train)
train_data_input = scaler.transform(x_train)
valid_data_input = scaler.transform(x_val)
test_data_input = scaler.transform(x_val)
train_output = y_train
validation_output = y_val
test_output = y_val
print(train_data_input.shape)
print(test_data_input.shape)
print(valid_data_input.shape)
print(train_output.shape)
print(test_output.shape)
print(validation_output.shape)
kbest = SelectKBest(score_func=mutual_info_classif, k=500).fit(train_data_input, train_output[:,0])
train_input = kbest.transform(train_data_input)
validation_input = kbest.transform(valid_data_input)
test_input = kbest.transform(test_data_input)
import pickle
pickle.dump( kbest, open( "best_mut500.pickle", "wb" ) )
label_count={}
for i in range(train_output.shape[-1]):
label_count.update({int(i):len(train_output[train_output[:,int(i)]==1])})
cweights=create_class_weight(label_count)
return train_input, train_output, validation_input, validation_output, test_input, test_output, cweights
def model(train_input, train_output, validation_input, validation_output, test_input, test_output, cweights):
earlyStopping=keras.callbacks.EarlyStopping(monitor='val_loss', patience=5, verbose=0, mode='auto')
model = Sequential()
model.add(Dense({{choice([150,300,500,750,1000])}}, input_shape=(500,), init={{choice(['glorot_normal','glorot_uniform'])}}))
model.add(Dropout({{uniform(0, 1)}}))
model.add(Dense({{choice([150,300,500,750,1000])}}, activation='relu', init={{choice(['glorot_normal','glorot_uniform'])}}))
model.add(Dropout({{uniform(0, 1)}}))
model.add(Dense({{choice([150,300,500,750,1000])}}, activation='relu', init={{choice(['glorot_normal','glorot_uniform'])}}))
model.add(Dropout({{uniform(0, 1)}}))
model.add(Dense(2, activation='softmax'))
epochs = 100
model.compile(loss='binary_crossentropy',optimizer={{choice(['rmsprop','adam'])}},metrics=['acc'])
model.fit(train_input, train_output, nb_epoch=epochs,batch_size={{choice([50,100,150,200,250,300])}}, callbacks=[earlyStopping], shuffle=True, validation_data = (validation_input, validation_output), class_weight=cweights)
score = model.evaluate(test_input, test_output)
accuracy = score[1]
loss = score[0]
print("Accuracy: ", accuracy, " Loss: ", loss)
pr = model.predict_classes(test_input)
yh = test_output.argmax(1)
print("\n")
print (recall_score(yh, pr, average="macro"))
uar=recall_score(yh, pr, average="macro")
print (uar)
return {'loss': -uar, 'status': STATUS_OK, 'model': model}
if __name__ == '__main__':
train_input, train_output, validation_input, validation_output, test_input, test_output, cweights = data()
best_run, best_model = optim.minimize(model=model,
data=data,
algo=tpe.suggest,
max_evals=70,
trials=Trials())
print("Evalutation of best performing model:")
print(best_model.evaluate(test_input, test_output))
print (best_run)
pr = best_model.predict_classes(test_input)
yh = test_output.argmax(1)
print (recall_score(yh, pr, average="macro"))
print (classification_report(yh,pr))
best_model.save('best_model500.h5')
| [
"czbalint14@gmail.com"
] | czbalint14@gmail.com |
0c0dd439cd2989daf5ab75b214422e48e706d813 | 023c5085b5aa58b1ee07e602ac2afdb17fc11ec5 | /Decision_Tree_Classifier/ReadData.py | 2463bb556dcc60857eb27d7fdcd7fe4e0579912e | [] | no_license | AnubhavGupta3377/Machine-Learning-Algorithms | b87b7d6c5934ca4f1c6cce6bcf6988156518faa3 | c454f88387f4e6a8cb5357826d793f0582df1efc | refs/heads/master | 2021-05-06T06:29:01.262205 | 2017-12-11T13:47:28 | 2017-12-11T13:47:28 | 113,857,275 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,242 | py | from collections import defaultdict
class ReadData:
'''
1. labels : list of all the labels of all the examples
2. features : list of list of feature values of all the examples
3. values : dictionary that maps every feature to the set of
possible values it can take
4. numExamples : number of (training) examples in the dataset
5. n : #features + 1(label)
6. names : maps every number to associated name of the attribute
7. nums : Reverse of names
8. outputLabel : Label of predicted output
'''
def __init__(self, labels=[], features=[], values=defaultdict(set), numExamples=0, n=0):
self.labels = labels[:]
self.features = features[:]
self.values = values
self.numExamples = numExamples
self.n = n
#self.m = m
self.names = defaultdict(str)
self.nums = defaultdict(int)
self.outputLabel = None
'''
This function reads the data from the file named filename and
stores the information in the form that is easier to process
for the decision tree.
'''
def readData(self, filename):
dataFile = open(filename, 'r')
linesRead = 0
for line in dataFile:
linesRead += 1
if linesRead == 1:
self.n, self.m = map(int, line.split())
elif linesRead == 2:
attrs = line.split()
self.outputLabel = attrs[0]
for i in range(1,self.n):
self.names[i-1] = attrs[i]
self.nums[attrs[i]] = i-1
elif linesRead == 3:
labelType = line[0]
types = [0 for i in range(self.n)]
for i in range(1,self.n+1):
types[i-1] = line[i]
if labelType != 'b':
print 'Only binary classification is allowed'
return 2
else:
line = line.split()
self.labels.append(int(line[0]))
self.features.append(line[1:])
for i in range(1,self.n):
self.values[i-1].add(line[i])
self.numExamples = linesRead - 3
| [
"anubhagu@adobe.com"
] | anubhagu@adobe.com |
f65170ad67be87a0b687ba1d668c0e6b07848267 | fa03d0932cda3030a1b8da8f6ecfe1fe7314ba72 | /pyproject/scenario04/server/config.py | d4cb406de011985c9701cbfa9a0e9480aa41a2cc | [] | no_license | mathcircle/ccircle | 05d9310c535b5a7f613b3a8fda2eebc105f17b0c | dfc672a1b6a96169d179bb7292a43dd9d9510640 | refs/heads/master | 2020-12-03T00:43:11.324286 | 2019-06-02T05:21:45 | 2019-06-02T05:22:00 | 96,068,861 | 1 | 8 | null | null | null | null | UTF-8 | Python | false | false | 182 | py | SERVER_HOST = '130.39.91.40'
SERVER_HOST_LOCAL = '127.0.0.1'
SERVER_PORT = 50382
SOCKET_BUFFER_SIZE = 4096
MAX_MESSAGE_SIZE = 2048
MIN_REQUEST_WAIT = 1.0 / 10.0
STATUS_GOOD = 'good' | [
"josh@ltheory.com"
] | josh@ltheory.com |
b6d70d3cd0bcef780e9d1bf21d1470f79ecdd2e7 | 2fc197681ac9cdd0346fe9ab56d9aa4d59b6f1d0 | /polyaxon/db/migrations/0001_initial.py | 27bab9b4b5e9c713404ae0d918e70a6b313ea7ff | [
"MIT"
] | permissive | dtaniwaki/polyaxon | 32e0fcfc4cd4b46d1d502ae26cd285dc9c11d55a | 04e3c9c9a732a2128233e8d1db1bdc1647fe7c55 | refs/heads/master | 2020-03-20T08:16:33.334881 | 2018-06-13T22:40:17 | 2018-06-13T22:40:17 | 137,303,634 | 0 | 0 | null | 2018-06-14T03:53:13 | 2018-06-14T03:53:13 | null | UTF-8 | Python | false | false | 52,402 | py | # Generated by Django 2.0.3 on 2018-06-12 13:31
import db.models.abstract_jobs
import db.models.repos
import db.models.utils
from django.conf import settings
import django.contrib.postgres.fields
import django.contrib.postgres.fields.jsonb
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import libs.blacklist
import libs.resource_validation
import libs.spec_validation
import re
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='ActivityLog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('event_type', models.CharField(max_length=128)),
('context', django.contrib.postgres.fields.jsonb.JSONField(help_text='Extra context information.')),
('created_at', models.DateTimeField()),
('object_id', models.PositiveIntegerField()),
('actor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to=settings.AUTH_USER_MODEL)),
('content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType')),
],
options={
'verbose_name': 'activity log',
'verbose_name_plural': 'activities logs',
},
),
migrations.CreateModel(
name='BuildJob',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.TextField(blank=True, null=True)),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('sequence', models.PositiveSmallIntegerField(editable=False, help_text='The sequence number of this job.')),
('definition', django.contrib.postgres.fields.jsonb.JSONField(default={}, help_text='The specific values/manifest for this job.')),
('config', django.contrib.postgres.fields.jsonb.JSONField(help_text='The compiled polyaxonfile for the build job.', validators=[libs.spec_validation.validate_build_spec_config])),
('dockerfile', models.TextField(blank=True, help_text='The dockerfile used to create the image with this job.', null=True)),
],
bases=(models.Model, db.models.utils.LastStatusMixin, db.models.abstract_jobs.JobMixin),
),
migrations.CreateModel(
name='BuildJobStatus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('message', models.CharField(blank=True, max_length=256, null=True)),
('status', models.CharField(blank=True, choices=[('Created', 'Created'), ('Building', 'Building'), ('Scheduled', 'Scheduled'), ('Running', 'Running'), ('Succeeded', 'Succeeded'), ('Failed', 'Failed'), ('Stopped', 'Stopped'), ('UNKNOWN', 'UNKNOWN')], default='Created', max_length=64, null=True)),
('details', django.contrib.postgres.fields.jsonb.JSONField(blank=True, default={}, null=True)),
('job', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='statuses', to='db.BuildJob')),
],
options={
'ordering': ['created_at'],
'verbose_name_plural': 'Build Job Statuses',
'abstract': False,
},
),
migrations.CreateModel(
name='ChartVersion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('version', models.CharField(max_length=16)),
],
),
migrations.CreateModel(
name='CliVersion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('latest_version', models.CharField(max_length=16)),
('min_version', models.CharField(max_length=16)),
],
),
migrations.CreateModel(
name='Cluster',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('version_api', django.contrib.postgres.fields.jsonb.JSONField(help_text='The cluster version api info')),
],
),
migrations.CreateModel(
name='ClusterEvent',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('data', django.contrib.postgres.fields.jsonb.JSONField()),
('meta', django.contrib.postgres.fields.jsonb.JSONField()),
('level', models.CharField(max_length=16)),
('cluster', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='events', to='db.Cluster')),
],
),
migrations.CreateModel(
name='ClusterNode',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('sequence', models.PositiveSmallIntegerField(editable=False, help_text='The sequence number of this node within the cluser.')),
('name', models.CharField(help_text='Name of the node', max_length=256)),
('hostname', models.CharField(blank=True, max_length=256, null=True)),
('role', models.CharField(choices=[('master', 'master'), ('agent', 'agent')], help_text='The role of the node', max_length=16)),
('docker_version', models.CharField(blank=True, max_length=128, null=True)),
('kubelet_version', models.CharField(max_length=64)),
('os_image', models.CharField(max_length=128)),
('kernel_version', models.CharField(max_length=128)),
('schedulable_taints', models.BooleanField(default=False)),
('schedulable_state', models.BooleanField(default=False)),
('memory', models.BigIntegerField()),
('cpu', models.FloatField()),
('n_gpus', models.PositiveSmallIntegerField()),
('status', models.CharField(choices=[('UNKNOWN', 'UNKNOWN'), ('Ready', 'Ready'), ('NotReady', 'NotReady'), ('Deleted', 'Deleted')], default='UNKNOWN', max_length=24)),
('is_current', models.BooleanField(default=True)),
('cluster', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='nodes', to='db.Cluster')),
],
options={
'ordering': ['sequence'],
},
),
migrations.CreateModel(
name='CodeReference',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('commit', models.CharField(blank=True, max_length=40, null=True)),
],
),
migrations.CreateModel(
name='Experiment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.TextField(blank=True, null=True)),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('sequence', models.PositiveSmallIntegerField(editable=False, help_text='The sequence number of this experiment within the project.')),
('declarations', django.contrib.postgres.fields.jsonb.JSONField(blank=True, help_text='The parameters used for this experiment.', null=True)),
('config', django.contrib.postgres.fields.jsonb.JSONField(blank=True, help_text='The compiled polyaxon with specific values for this experiment.', null=True, validators=[libs.spec_validation.validate_experiment_spec_config])),
('cloning_strategy', models.CharField(blank=True, choices=[('copy', 'copy'), ('restart', 'restart'), ('resume', 'resume')], default='restart', max_length=16, null=True)),
('build_job', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='db.BuildJob')),
('code_reference', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='db.CodeReference')),
],
options={
'ordering': ['sequence'],
},
bases=(models.Model, db.models.utils.LastStatusMixin),
),
migrations.CreateModel(
name='ExperimentGroup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.TextField(blank=True, null=True)),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('sequence', models.PositiveSmallIntegerField(editable=False, help_text='The sequence number of this group within the project.')),
('content', models.TextField(blank=True, help_text='The yaml content of the polyaxonfile/specification.', null=True, validators=[libs.spec_validation.validate_group_spec_content])),
('hptuning', django.contrib.postgres.fields.jsonb.JSONField(blank=True, help_text='The experiment group hptuning params config.', null=True, validators=[libs.spec_validation.validate_group_hptuning_config])),
('code_reference', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='db.CodeReference')),
],
options={
'ordering': ['sequence'],
},
bases=(models.Model, db.models.utils.LastStatusMixin),
),
migrations.CreateModel(
name='ExperimentGroupIteration',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('data', django.contrib.postgres.fields.jsonb.JSONField(help_text='The experiment group iteration meta data.')),
('experiment_group', models.ForeignKey(help_text='The experiment group.', on_delete=django.db.models.deletion.CASCADE, related_name='iterations', to='db.ExperimentGroup')),
],
options={
'ordering': ['created_at'],
},
),
migrations.CreateModel(
name='ExperimentGroupStatus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('message', models.CharField(blank=True, max_length=256, null=True)),
('status', models.CharField(blank=True, choices=[('Created', 'Created'), ('Running', 'Running'), ('Succeeded', 'Succeeded'), ('Failed', 'Failed'), ('Stopped', 'Stopped')], default='Created', max_length=64, null=True)),
('experiment_group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='statuses', to='db.ExperimentGroup')),
],
options={
'ordering': ['created_at'],
'verbose_name_plural': 'Experiment group Statuses',
},
),
migrations.CreateModel(
name='ExperimentJob',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('sequence', models.PositiveSmallIntegerField(editable=False, help_text='The sequence number of this job.')),
('definition', django.contrib.postgres.fields.jsonb.JSONField(default={}, help_text='The specific values/manifest for this job.')),
('role', models.CharField(default='master', max_length=64)),
('experiment', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='jobs', to='db.Experiment')),
],
options={
'ordering': ['sequence'],
},
bases=(models.Model, db.models.utils.LastStatusMixin),
),
migrations.CreateModel(
name='ExperimentJobStatus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('message', models.CharField(blank=True, max_length=256, null=True)),
('status', models.CharField(blank=True, choices=[('Created', 'Created'), ('Building', 'Building'), ('Scheduled', 'Scheduled'), ('Running', 'Running'), ('Succeeded', 'Succeeded'), ('Failed', 'Failed'), ('Stopped', 'Stopped'), ('UNKNOWN', 'UNKNOWN')], default='Created', max_length=64, null=True)),
('details', django.contrib.postgres.fields.jsonb.JSONField(blank=True, default={}, null=True)),
('job', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='statuses', to='db.ExperimentJob')),
],
options={
'ordering': ['created_at'],
'verbose_name_plural': 'Experiment Job Statuses',
'abstract': False,
},
),
migrations.CreateModel(
name='ExperimentMetric',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(default=django.utils.timezone.now)),
('values', django.contrib.postgres.fields.jsonb.JSONField()),
('experiment', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='metrics', to='db.Experiment')),
],
options={
'ordering': ['created_at'],
},
),
migrations.CreateModel(
name='ExperimentStatus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('message', models.CharField(blank=True, max_length=256, null=True)),
('status', models.CharField(blank=True, choices=[('Created', 'Created'), ('Resuming', 'Resuming'), ('Building', 'Building'), ('Scheduled', 'Scheduled'), ('Starting', 'Starting'), ('Running', 'Running'), ('Succeeded', 'Succeeded'), ('Failed', 'Failed'), ('Stopped', 'Stopped'), ('UNKNOWN', 'UNKNOWN')], default='Created', max_length=64, null=True)),
('experiment', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='statuses', to='db.Experiment')),
],
options={
'ordering': ['created_at'],
'verbose_name_plural': 'Experiment Statuses',
},
),
migrations.CreateModel(
name='ExternalRepo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('git_url', models.URLField()),
('is_public', models.BooleanField(default=True, help_text='If repo is public or private.')),
],
bases=(models.Model, db.models.repos.RepoMixin),
),
migrations.CreateModel(
name='Job',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.TextField(blank=True, null=True)),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('sequence', models.PositiveSmallIntegerField(editable=False, help_text='The sequence number of this job.')),
('definition', django.contrib.postgres.fields.jsonb.JSONField(default={}, help_text='The specific values/manifest for this job.')),
('config', django.contrib.postgres.fields.jsonb.JSONField(help_text='The compiled polyaxonfile for the run job.', validators=[libs.spec_validation.validate_job_spec_config])),
('cloning_strategy', models.CharField(blank=True, choices=[('copy', 'copy'), ('restart', 'restart'), ('resume', 'resume')], default='restart', max_length=16, null=True)),
('build_job', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='db.BuildJob')),
('code_reference', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='db.CodeReference')),
('original_job', models.ForeignKey(blank=True, help_text='The original job that was cloned from.', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='clones', to='db.Job')),
],
bases=(models.Model, db.models.utils.LastStatusMixin, db.models.abstract_jobs.JobMixin),
),
migrations.CreateModel(
name='JobResources',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cpu', django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True, validators=[libs.resource_validation.validate_resource])),
('memory', django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True, validators=[libs.resource_validation.validate_resource])),
('gpu', django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True, validators=[libs.resource_validation.validate_resource])),
],
options={
'verbose_name': 'job resources',
'verbose_name_plural': 'jobs resources',
},
),
migrations.CreateModel(
name='JobStatus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('message', models.CharField(blank=True, max_length=256, null=True)),
('status', models.CharField(blank=True, choices=[('Created', 'Created'), ('Building', 'Building'), ('Scheduled', 'Scheduled'), ('Running', 'Running'), ('Succeeded', 'Succeeded'), ('Failed', 'Failed'), ('Stopped', 'Stopped'), ('UNKNOWN', 'UNKNOWN')], default='Created', max_length=64, null=True)),
('details', django.contrib.postgres.fields.jsonb.JSONField(blank=True, default={}, null=True)),
('job', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='statuses', to='db.Job')),
],
options={
'ordering': ['created_at'],
'verbose_name_plural': 'Run Job Statuses',
'abstract': False,
},
),
migrations.CreateModel(
name='LibVersion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('latest_version', models.CharField(max_length=16)),
('min_version', models.CharField(max_length=16)),
],
),
migrations.CreateModel(
name='NodeGPU',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('index', models.PositiveSmallIntegerField()),
('serial', models.CharField(max_length=256)),
('name', models.CharField(max_length=256)),
('memory', models.BigIntegerField()),
('cluster_node', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='gpus', to='db.ClusterNode')),
],
options={
'ordering': ['index'],
},
),
migrations.CreateModel(
name='NotebookJob',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.TextField(blank=True, null=True)),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('sequence', models.PositiveSmallIntegerField(editable=False, help_text='The sequence number of this job.')),
('definition', django.contrib.postgres.fields.jsonb.JSONField(default={}, help_text='The specific values/manifest for this job.')),
('config', django.contrib.postgres.fields.jsonb.JSONField(help_text='The compiled polyaxonfile for the notebook job.', validators=[libs.spec_validation.validate_notebook_spec_config])),
('build_job', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='db.BuildJob')),
('code_reference', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='db.CodeReference')),
],
bases=(models.Model, db.models.utils.LastStatusMixin, db.models.abstract_jobs.JobMixin),
),
migrations.CreateModel(
name='NotebookJobStatus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('message', models.CharField(blank=True, max_length=256, null=True)),
('status', models.CharField(blank=True, choices=[('Created', 'Created'), ('Building', 'Building'), ('Scheduled', 'Scheduled'), ('Running', 'Running'), ('Succeeded', 'Succeeded'), ('Failed', 'Failed'), ('Stopped', 'Stopped'), ('UNKNOWN', 'UNKNOWN')], default='Created', max_length=64, null=True)),
('details', django.contrib.postgres.fields.jsonb.JSONField(blank=True, default={}, null=True)),
('job', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='statuses', to='db.NotebookJob')),
],
options={
'ordering': ['created_at'],
'verbose_name_plural': 'Notebook Job Statuses',
'abstract': False,
},
),
migrations.CreateModel(
name='Operation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.TextField(blank=True, null=True)),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('execute_at', models.DateTimeField(blank=True, help_text='When this instance should be executed. default None which translate to now', null=True)),
('timeout', models.PositiveIntegerField(blank=True, help_text='specify how long this instance should be up before timing out in seconds.', null=True)),
('trigger_policy', models.CharField(blank=True, choices=[('all_succeeded', 'all_succeeded'), ('all_failed', 'all_failed'), ('all_done', 'all_done'), ('one_succeeded', 'one_succeeded'), ('one_failed', 'one_failed'), ('one_done', 'one_done')], default='all_succeeded', help_text='defines the rule by which dependencies are applied, default is `all_success`.', max_length=16, null=True)),
('max_retries', models.PositiveSmallIntegerField(blank=True, help_text='the number of retries that should be performed before failing the operation.', null=True)),
('retry_delay', models.PositiveIntegerField(blank=True, default=60, help_text='The delay between retries.', null=True)),
('retry_exponential_backoff', models.BooleanField(default=False, help_text='allow progressive longer waits between retries by using exponential backoff algorithm on retry delay.')),
('max_retry_delay', models.PositiveIntegerField(blank=True, default=3600, help_text='maximum delay interval between retries.', null=True)),
('concurrency', models.PositiveSmallIntegerField(blank=True, help_text='When set, an operation will be able to limit the concurrent runs across execution_dates', null=True)),
('run_as_user', models.CharField(blank=True, help_text='unix username to impersonate while running the operation.', max_length=64, null=True)),
('config', models.TextField(blank=True, null=True)),
('celery_task', models.CharField(help_text='The celery task name to execute.', max_length=128)),
('celery_queue', models.CharField(blank=True, help_text='The celery queue name to use for the executing this task. If provided, it will override the queue provided in CELERY_TASK_ROUTES.', max_length=128, null=True)),
],
),
migrations.CreateModel(
name='OperationRun',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('celery_task_context', django.contrib.postgres.fields.jsonb.JSONField(blank=True, help_text='The kwargs required to execute the celery task.', null=True)),
('celery_task_id', models.CharField(blank=True, max_length=36)),
('operation', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='runs', to='db.Operation')),
],
bases=(models.Model, db.models.utils.LastStatusMixin),
),
migrations.CreateModel(
name='OperationRunStatus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('message', models.CharField(blank=True, max_length=256, null=True)),
('status', models.CharField(blank=True, choices=[('created', 'created'), ('scheduled', 'scheduled'), ('running', 'running'), ('finished', 'finished'), ('stopped', 'stopped'), ('skipped', 'skipped')], default='created', max_length=64, null=True)),
('operation_run', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='statuses', to='db.OperationRun')),
],
options={
'ordering': ['created_at'],
'verbose_name_plural': 'Operation Run Statuses',
},
),
migrations.CreateModel(
name='Pipeline',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.TextField(blank=True, null=True)),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('execute_at', models.DateTimeField(blank=True, help_text='When this instance should be executed. default None which translate to now', null=True)),
('timeout', models.PositiveIntegerField(blank=True, help_text='specify how long this instance should be up before timing out in seconds.', null=True)),
('name', models.CharField(max_length=256, validators=[django.core.validators.RegexValidator(re.compile('^[-a-zA-Z0-9_]+\\Z'), "Enter a valid 'slug' consisting of letters, numbers, underscores or hyphens.", 'invalid'), libs.blacklist.validate_blacklist_name])),
('concurrency', models.PositiveSmallIntegerField(blank=True, help_text='If set, it determines the number of operation instances allowed to run concurrently.', null=True)),
],
),
migrations.CreateModel(
name='PipelineRun',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('pipeline', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='runs', to='db.Pipeline')),
],
bases=(models.Model, db.models.utils.LastStatusMixin),
),
migrations.CreateModel(
name='PipelineRunStatus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('message', models.CharField(blank=True, max_length=256, null=True)),
('status', models.CharField(blank=True, choices=[('created', 'created'), ('scheduled', 'scheduled'), ('running', 'running'), ('finished', 'finished'), ('stopped', 'stopped'), ('skipped', 'skipped')], default='created', max_length=64, null=True)),
('pipeline_run', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='statuses', to='db.PipelineRun')),
],
options={
'ordering': ['created_at'],
'verbose_name_plural': 'Pipeline Run Statuses',
},
),
migrations.CreateModel(
name='PlatformVersion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('latest_version', models.CharField(max_length=16)),
('min_version', models.CharField(max_length=16)),
],
),
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.TextField(blank=True, null=True)),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('name', models.CharField(max_length=256, validators=[django.core.validators.RegexValidator(re.compile('^[-a-zA-Z0-9_]+\\Z'), "Enter a valid 'slug' consisting of letters, numbers, underscores or hyphens.", 'invalid'), libs.blacklist.validate_blacklist_name])),
('is_public', models.BooleanField(default=True, help_text='If project is public or private.')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='projects', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Repo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('is_public', models.BooleanField(default=True, help_text='If repo is public or private.')),
('project', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='repo', to='db.Project')),
],
bases=(models.Model, db.models.repos.RepoMixin),
),
migrations.CreateModel(
name='Schedule',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('frequency', models.CharField(blank=True, help_text="Defines how often to run, this timedelta object gets added to your latest operation instance's execution_date to figure out the next schedule", max_length=64, null=True)),
('start_at', models.DateTimeField(blank=True, help_text='When this instance should run, default is None which translate to now.', null=True)),
('end_at', models.DateTimeField(blank=True, help_text='When this instance should stop running, default is None which translate to open ended.', null=True)),
('depends_on_past', models.BooleanField(default=False, help_text="when set to true, the instances will run sequentially while relying on the previous instances' schedule to succeed.")),
],
),
migrations.CreateModel(
name='SSOIdentity',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('provider', models.CharField(choices=[('github', 'github'), ('bitbucket', 'bitbucket'), ('gitlab', 'gitlab')], max_length=32)),
('external_id', models.CharField(max_length=64, null=True)),
('valid', models.BooleanField(default=False)),
('last_verified', models.DateTimeField(default=django.utils.timezone.now)),
('last_synced', models.DateTimeField(default=django.utils.timezone.now)),
('scopes', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=200), blank=True, null=True, size=None)),
('data', django.contrib.postgres.fields.jsonb.JSONField()),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='identities', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'SSO identity',
'verbose_name_plural': 'SSO identities',
},
),
migrations.CreateModel(
name='TensorboardJob',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.TextField(blank=True, null=True)),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('sequence', models.PositiveSmallIntegerField(editable=False, help_text='The sequence number of this job.')),
('definition', django.contrib.postgres.fields.jsonb.JSONField(default={}, help_text='The specific values/manifest for this job.')),
('config', django.contrib.postgres.fields.jsonb.JSONField(help_text='The compiled polyaxonfile for the tensorboard job.', validators=[libs.spec_validation.validate_tensorboard_spec_config])),
('build_job', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='db.BuildJob')),
('code_reference', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='db.CodeReference')),
('experiment', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='tensorboard_jobs', to='db.Experiment')),
('experiment_group', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='tensorboard_jobs', to='db.ExperimentGroup')),
('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tensorboard_jobs', to='db.Project')),
],
bases=(models.Model, db.models.utils.LastStatusMixin, db.models.abstract_jobs.JobMixin),
),
migrations.CreateModel(
name='TensorboardJobStatus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('message', models.CharField(blank=True, max_length=256, null=True)),
('status', models.CharField(blank=True, choices=[('Created', 'Created'), ('Building', 'Building'), ('Scheduled', 'Scheduled'), ('Running', 'Running'), ('Succeeded', 'Succeeded'), ('Failed', 'Failed'), ('Stopped', 'Stopped'), ('UNKNOWN', 'UNKNOWN')], default='Created', max_length=64, null=True)),
('details', django.contrib.postgres.fields.jsonb.JSONField(blank=True, default={}, null=True)),
('job', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='statuses', to='db.TensorboardJob')),
],
options={
'ordering': ['created_at'],
'verbose_name_plural': 'Tensorboard Job Statuses',
'abstract': False,
},
),
migrations.AddField(
model_name='tensorboardjob',
name='status',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='db.TensorboardJobStatus'),
),
migrations.AddField(
model_name='tensorboardjob',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='pipelinerun',
name='status',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='db.PipelineRunStatus'),
),
migrations.AddField(
model_name='pipeline',
name='project',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='pipelines', to='db.Project'),
),
migrations.AddField(
model_name='pipeline',
name='schedule',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='db.Schedule'),
),
migrations.AddField(
model_name='pipeline',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='pipelines', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='operationrun',
name='pipeline_run',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='operation_runs', to='db.PipelineRun'),
),
migrations.AddField(
model_name='operationrun',
name='status',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='db.OperationRunStatus'),
),
migrations.AddField(
model_name='operationrun',
name='upstream_runs',
field=models.ManyToManyField(blank=True, related_name='downstream_runs', to='db.OperationRun'),
),
migrations.AddField(
model_name='operation',
name='pipeline',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='operations', to='db.Pipeline'),
),
migrations.AddField(
model_name='operation',
name='schedule',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='db.Schedule'),
),
migrations.AddField(
model_name='operation',
name='upstream_operations',
field=models.ManyToManyField(blank=True, related_name='downstream_operations', to='db.Operation'),
),
migrations.AddField(
model_name='notebookjob',
name='project',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='notebook_jobs', to='db.Project'),
),
migrations.AddField(
model_name='notebookjob',
name='status',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='db.NotebookJobStatus'),
),
migrations.AddField(
model_name='notebookjob',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='job',
name='project',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='jobs', to='db.Project'),
),
migrations.AddField(
model_name='job',
name='status',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='db.JobStatus'),
),
migrations.AddField(
model_name='job',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='externalrepo',
name='project',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='external_repos', to='db.Project'),
),
migrations.AddField(
model_name='experimentjob',
name='resources',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='db.JobResources'),
),
migrations.AddField(
model_name='experimentjob',
name='status',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='db.ExperimentJobStatus'),
),
migrations.AddField(
model_name='experimentgroup',
name='project',
field=models.ForeignKey(help_text='The project this polyaxonfile belongs to.', on_delete=django.db.models.deletion.CASCADE, related_name='experiment_groups', to='db.Project'),
),
migrations.AddField(
model_name='experimentgroup',
name='status',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='db.ExperimentGroupStatus'),
),
migrations.AddField(
model_name='experimentgroup',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='experiment_groups', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='experiment',
name='experiment_group',
field=models.ForeignKey(blank=True, help_text='The experiment group that generate this experiment.', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='experiments', to='db.ExperimentGroup'),
),
migrations.AddField(
model_name='experiment',
name='metric',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='db.ExperimentMetric'),
),
migrations.AddField(
model_name='experiment',
name='original_experiment',
field=models.ForeignKey(blank=True, help_text='The original experiment that was cloned from.', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='clones', to='db.Experiment'),
),
migrations.AddField(
model_name='experiment',
name='project',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='experiments', to='db.Project'),
),
migrations.AddField(
model_name='experiment',
name='status',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='db.ExperimentStatus'),
),
migrations.AddField(
model_name='experiment',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='experiments', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='codereference',
name='external_repo',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='references', to='db.ExternalRepo'),
),
migrations.AddField(
model_name='codereference',
name='repo',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='references', to='db.Repo'),
),
migrations.AddField(
model_name='buildjob',
name='code_reference',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='db.CodeReference'),
),
migrations.AddField(
model_name='buildjob',
name='project',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='build_jobs', to='db.Project'),
),
migrations.AddField(
model_name='buildjob',
name='status',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='db.BuildJobStatus'),
),
migrations.AddField(
model_name='buildjob',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to=settings.AUTH_USER_MODEL),
),
migrations.AlterUniqueTogether(
name='ssoidentity',
unique_together={('provider', 'user'), ('provider', 'external_id')},
),
migrations.AlterUniqueTogether(
name='project',
unique_together={('user', 'name')},
),
migrations.AlterUniqueTogether(
name='nodegpu',
unique_together={('cluster_node', 'index')},
),
migrations.AlterUniqueTogether(
name='externalrepo',
unique_together={('project', 'git_url')},
),
migrations.AlterUniqueTogether(
name='experimentjob',
unique_together={('experiment', 'sequence')},
),
migrations.AlterUniqueTogether(
name='experimentgroup',
unique_together={('project', 'sequence')},
),
migrations.AlterUniqueTogether(
name='experiment',
unique_together={('project', 'sequence')},
),
migrations.AlterUniqueTogether(
name='clusternode',
unique_together={('cluster', 'sequence')},
),
]
| [
"mouradmourafiq@gmail.com"
] | mouradmourafiq@gmail.com |
13420aecf149f66ef9cb63a68a5a090dbc8a2e3c | 6c3e475dcd95d14875a199466b8a7c744f61478b | /userProfile/userProfile.py | 1395f4986a45fed5e4b88da12ed0cb114aa8c04b | [] | no_license | webclinic017/tripleATradeBot | b4cce7a330e76f9f207c4d6f4d16327b1717a17a | 40b6130f52eb969336c7b602e698f41a2d8f947b | refs/heads/main | 2023-01-04T04:16:38.338563 | 2020-10-29T10:33:34 | 2020-10-29T10:33:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,432 | py | from eventLogger import eventLogger as logger
from pathList import pathList
from alice_blue import *
import openpyxl
class userProfile:
userProfileWorkBook=""
profileName=""
userName = ""
password = ""
apiSecret = ""
accessToken = ""
aliceObj = ""
exchangeList = ['NSE']
def __init__(self, profileName):
self.userProfileWorkBook = openpyxl.load_workbook(pathList.userProfileFileName)
self.profileName = profileName
self.userName = self.userProfileWorkBook.get_sheet_by_name(self.profileName)['A1'].value
self.password = self.userProfileWorkBook.get_sheet_by_name(self.profileName)['A2'].value
self.apiSecret = self.userProfileWorkBook.get_sheet_by_name(self.profileName)['A3'].value
logger.info(self.userName)
logger.info(self.password)
logger.info(self.apiSecret)
def login(self):
logger.info("login")
self.accessToken = AliceBlue.login_and_get_access_token(username=self.userName, password=self.password, twoFA='a', api_secret=self.apiSecret)
self.aliceObj = AliceBlue(username=self.userName, password=self.password, access_token=self.accessToken, master_contracts_to_download=self.exchangeList)
def profileData(self):
logger.info("profileData")
print (self.aliceObj.get_profile())
print (self.aliceObj.get_balance())
| [
"noreply@github.com"
] | webclinic017.noreply@github.com |
b38a5417f4cf586733ab782b41c420ea59c10d53 | 6a084a2df2869ce3ad565610cbf92eccf00a233e | /states/postgres_user.py | c65e8d42284fe1a2a9ce2c6f70f436a95ff64235 | [] | no_license | ltxin/saltstack | 95b5356715cc918afec378e2926d9f9a1c7a85d5 | 30a493ef5e46bd7629c8ba400e559dab023c1431 | refs/heads/master | 2021-01-16T17:52:56.939714 | 2017-08-11T10:13:41 | 2017-08-11T10:13:41 | 100,019,324 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 9,166 | py | # -*- coding: utf-8 -*-
'''
Management of PostgreSQL users (roles)
======================================
The postgres_users module is used to create and manage Postgres users.
.. code-block:: yaml
frank:
postgres_user.present
'''
from __future__ import absolute_import
# Import Python libs
# Import salt libs
import logging
# Salt imports
from salt.modules import postgres
import salt.ext.six as six
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if the postgres module is present
'''
if 'postgres.user_exists' not in __salt__:
return (False, 'Unable to load postgres module. Make sure `postgres.bins_dir` is set.')
return True
def present(name,
createdb=None,
createroles=None,
createuser=None,
encrypted=None,
superuser=None,
replication=None,
inherit=None,
login=None,
password=None,
default_password=None,
refresh_password=None,
groups=None,
user=None,
maintenance_db=None,
db_password=None,
db_host=None,
db_port=None,
db_user=None):
'''
Ensure that the named user is present with the specified privileges
Please note that the user/group notion in postgresql is just abstract, we
have roles, where users can be seens as roles with the LOGIN privilege
and groups the others.
name
The name of the system user to manage.
createdb
Is the user allowed to create databases?
createroles
Is the user allowed to create other users?
createuser
Alias to create roles
encrypted
Should the password be encrypted in the system catalog?
login
Should the group have login perm
inherit
Should the group inherit permissions
superuser
Should the new user be a "superuser"
replication
Should the new user be allowed to initiate streaming replication
password
The system user's password. It can be either a plain string or a
md5 postgresql hashed password::
'md5{MD5OF({password}{role}}'
If encrypted is None or True, the password will be automatically
encrypted to the previous
format if it is not already done.
default_passwoord
The password used only when creating the user, unless password is set.
.. versionadded:: 2016.3.0
refresh_password
Password refresh flag
Boolean attribute to specify whether to password comparison check
should be performed.
If refresh_password is ``True``, the password will be automatically
updated without extra password change check.
This behaviour makes it possible to execute in environments without
superuser access available, e.g. Amazon RDS for PostgreSQL
groups
A string of comma separated groups the user should be in
user
System user all operations should be performed on behalf of
.. versionadded:: 0.17.0
db_user
Postres database username, if different from config or default.
db_password
Postgres user's password, if any password, for a specified db_user.
db_host
Postgres database host, if different from config or default.
db_port
Postgres database port, if different from config or default.
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'User {0} is already present'.format(name)}
if createuser:
createroles = True
# default to encrypted passwords
if encrypted is not False:
encrypted = postgres._DEFAULT_PASSWORDS_ENCRYPTION
# maybe encrypt if if not already and necessary
password = postgres._maybe_encrypt_password(name,
password,
encrypted=encrypted)
if default_password is not None:
default_password = postgres._maybe_encrypt_password(name,
default_password,
encrypted=encrypted)
db_args = {
'maintenance_db': maintenance_db,
'runas': user,
'host': db_host,
'user': db_user,
'port': db_port,
'password': db_password,
}
# check if user exists
mode = 'create'
user_attr = __salt__['postgres.role_get'](
name, return_password=not refresh_password, **db_args)
if user_attr is not None:
mode = 'update'
# The user is not present, make it!
cret = None
update = {}
if mode == 'update':
user_groups = user_attr.get('groups', [])
if (
createdb is not None
and user_attr['can create databases'] != createdb
):
update['createdb'] = createdb
if (
inherit is not None
and user_attr['inherits privileges'] != inherit
):
update['inherit'] = inherit
if login is not None and user_attr['can login'] != login:
update['login'] = login
if (
createroles is not None
and user_attr['can create roles'] != createroles
):
update['createroles'] = createroles
if (
replication is not None
and user_attr['replication'] != replication
):
update['replication'] = replication
if superuser is not None and user_attr['superuser'] != superuser:
update['superuser'] = superuser
if password is not None and (refresh_password or user_attr['password'] != password):
update['password'] = True
if groups is not None:
lgroups = groups
if isinstance(groups, (six.string_types, six.text_type)):
lgroups = lgroups.split(',')
if isinstance(lgroups, list):
missing_groups = [a for a in lgroups if a not in user_groups]
if missing_groups:
update['groups'] = missing_groups
if mode == 'create' and password is None:
password = default_password
if mode == 'create' or (mode == 'update' and update):
if __opts__['test']:
if update:
ret['changes'][name] = update
ret['result'] = None
ret['comment'] = 'User {0} is set to be {1}d'.format(name, mode)
return ret
cret = __salt__['postgres.user_{0}'.format(mode)](
username=name,
createdb=createdb,
createroles=createroles,
encrypted=encrypted,
superuser=superuser,
login=login,
inherit=inherit,
replication=replication,
rolepassword=password,
groups=groups,
**db_args)
else:
cret = None
if cret:
ret['comment'] = 'The user {0} has been {1}d'.format(name, mode)
if update:
ret['changes'][name] = update
else:
ret['changes'][name] = 'Present'
elif cret is not None:
ret['comment'] = 'Failed to create user {0}'.format(name)
ret['result'] = False
else:
ret['result'] = True
return ret
def absent(name,
user=None,
maintenance_db=None,
db_password=None,
db_host=None,
db_port=None,
db_user=None):
'''
Ensure that the named user is absent
name
The username of the user to remove
user
System user all operations should be performed on behalf of
.. versionadded:: 0.17.0
db_user
database username if different from config or default
db_password
user password if any password for a specified user
db_host
Database host if different from config or default
db_port
Database port if different from config or default
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
db_args = {
'maintenance_db': maintenance_db,
'runas': user,
'host': db_host,
'user': db_user,
'port': db_port,
'password': db_password,
}
# check if user exists and remove it
if __salt__['postgres.user_exists'](name, **db_args):
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'User {0} is set to be removed'.format(name)
return ret
if __salt__['postgres.user_remove'](name, **db_args):
ret['comment'] = 'User {0} has been removed'.format(name)
ret['changes'][name] = 'Absent'
return ret
else:
ret['result'] = False
ret['comment'] = 'User {0} failed to be removed'.format(name)
return ret
else:
ret['comment'] = 'User {0} is not present, so it cannot ' \
'be removed'.format(name)
return ret
| [
"it_ltxin@163.com"
] | it_ltxin@163.com |
44cea4fe3778010257338bd771d9d18ac7df6092 | 125c9f3d0180e2f11dce03c8edbd5f5924c26165 | /api/envProjet/Scripts/pasteurize-script.py | a41b70ca648548d889ad5856e91983d007be18b7 | [] | no_license | pasterp/WhatBrandIsThat | 816bbfd4918ad60e5f74952b1d50ec60b799d7cc | 3e70b7578d6029a3367630863db93064d2a6185a | refs/heads/master | 2023-01-23T10:26:02.119102 | 2020-05-26T13:26:26 | 2020-05-26T13:26:26 | 163,280,347 | 0 | 0 | null | 2022-12-09T18:26:23 | 2018-12-27T10:20:52 | Objective-C | UTF-8 | Python | false | false | 443 | py | #!C:\Users\Vbourdon\PycharmProjects\testRest\envProjet\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'future==0.17.1','console_scripts','pasteurize'
__requires__ = 'future==0.17.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('future==0.17.1', 'console_scripts', 'pasteurize')()
)
| [
"32929884+ValentinBou@users.noreply.github.com"
] | 32929884+ValentinBou@users.noreply.github.com |
208e6516ab0e2ac5658d992587c4ae1606eb7ff5 | c473ecff4600ade1ad126e483999a89677bfd635 | /lightcone_FRB_decreasingz_xlos_forHaloFinder.py | 8e9aa60945fd1516774db04bbf6f17c6fec64a77 | [] | no_license | pagano-michael/FRB | 1f34b5adac49ddfa09cd781e37a71f3ad173e5b9 | 33c17ed854ea2e37b44e350dbbdc6dba4ac16975 | refs/heads/master | 2023-02-06T07:54:51.310251 | 2020-12-27T15:18:26 | 2020-12-27T15:18:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,268 | py | #!/usr/bin/env python
# coding: utf-8
#This code takes in an arbitrary set of boxes, whose directory and redshift ranges you must provide, and makes a lightcone
import numpy as np
import matplotlib.pyplot as pl
import sys
import os
import astropy
from astropy.cosmology import Planck15 as p15
from astropy.cosmology import z_at_value
from astropy.cosmology import WMAP9 as cosmo
def lightcone(**kwargs ):
#set defaults:
mode = 'xH'
marker = 'xH_'
N = 500
DIM = 200
Box_length = 300
z_start = 10
z_end = 6
nboxes = 21
directory = '/Users/michael/Documents/MSI-C/21cmFAST/Boxes/z6-10/OriginalTemperatureBoxesNoGaussianities/'
slice = DIM - 1
#sort arguments
if 'marker' in kwargs:
marker = kwargs.get('marker')
if 'DIM' in kwargs:
DIM = kwargs.get('DIM')
if 'z_range_of_boxes' in kwargs:
z_range_of_boxes = kwargs.get('z_range_of_boxes')
nboxes = len(z_range_of_boxes)
z_start = np.max(z_range_of_boxes)
z_end = np.min(z_range_of_boxes)
print(z_start,z_end, nboxes)
if 'N' in kwargs:
N = kwargs.get('N')
else:
N = 50*nboxes
if 'Box_length' in kwargs:
Box_length = kwargs.get('Box_length')
if 'box_slice' in kwargs:
slice = kwargs.get('box_slice')
if 'return_redshifts' in kwargs:
return_redshifts = kwargs.get('return_redshifts')
else:
return_redshifts = False
if 'sharp_cutoff' in kwargs:
sharp_cutoff = kwargs.get('sharp_cutoff')
else:
sharp_cutoff = np.inf
if 'halo_boxes_z' in kwargs:
halo_boxes_z = kwargs.get('halo_boxes_z')
#21cmFAST boxes have different naming tags, if it is an ionization box the redshifts info will be found
#at different parts of the filename as compared to a density box
if 'smoothed' in marker:
#this is a density box box
s,e=25,31
else:
if 'xH' in marker:
s,e = 10, 20
else:
if 'halos' in marker:
s , e = 5, 10
else:
print('We can not identify what box this is')
return -1
#the total range of redshifts that this lightcone will span
z_range_of_boxes = np.linspace(z_start,z_end,nboxes)
####################################################
# useful functions
####################################################
#this function determines which boxes a given redshift lies between
def find_sandwiched_bins(z_range, z):
z_floor = np.max(z_range)
z_ceil = z_range[1]
binn = 1
while(z_ceil >= np.min(z_range)):
if ((z <= z_floor) and (z > z_ceil)):
return ( z_ceil, z_floor)
z_floor = z_ceil
if z_ceil == np.max(z_range):
print('looking for ' , z_range, z)
break
z_ceil = z_range[binn+1]
binn += 1
#safety net
if binn > 1000:
print('breaking')
break
#function which converts a comoving distance to a pixel location within the box
def comoving2pixel(DIM, Box_length, comoving_distance):
return int(float(comoving_distance * DIM)/float(Box_length))
#function which determines whether we have exceeded the maximum allowable redshift for a box
def didweswitchbox(historyofzminus, z_plus, ctr):
if z_plus < historyofzminus[ctr - 1 ]:
return True
else:
return False
####################################################
# initialize all relevant arrays
####################################################
lightcone = np.zeros((N, DIM, DIM))
lightcone_halo = np.zeros((N))
z_range = np.linspace(z_start,z_end,N)
zs = []
z = z_range[0]
ctr = 0
comoving_distance_z0_zstart = cosmo.comoving_distance(z_range[0]).value
prev_pix_loc = 0
pixel_addition = 0
pixel_origin = 0
pixel_location_relative_to_origin = 0
historyofzminus = []
####################################################
# loop through redshifts
####################################################
box_path_redshifts = z_range_of_boxes
#scroll through all the redshifts and pick out the slice of the box that corresponds to that z
while(z > np.min(z_range)):
#this redshift is sandwiched between the following z
z_sandwhich = find_sandwiched_bins(box_path_redshifts, z)
z_minus = z_sandwhich[0]
z_plus = z_sandwhich[1]
historyofzminus.append(z_plus)
#these are the boxes that z is sandwiched between
xH_minus = halo_boxes_z[list(box_path_redshifts).index(z_minus)]
xH_plus = halo_boxes_z[list(box_path_redshifts).index(z_plus)]
#convert that redshift to a comoving distance
comoving_distance_z = cosmo.comoving_distance(z).value
comoving_distance_z0_to_z = comoving_distance_z0_zstart - comoving_distance_z
comoving_distance_from_last_switch = cosmo.comoving_distance(z_plus).value
if ctr == 0:
pixel_addition = comoving2pixel(DIM,Box_length, comoving_distance_z0_to_z)
prev_pix_loc = -pixel_addition + slice
pixel_origin = slice
#save this redshift
zs.append(z)
lightcone[ctr,:,:] = (xH_plus[:,:,slice] - xH_minus[:,:,slice])*((z - z_minus)/(z_plus - z_minus)) + xH_minus[:,:,slice]
#increment counter and redshift
ctr += 1
z = z_range[ctr]
#skip to the next step
continue
else:
if didweswitchbox(historyofzminus, z_plus, ctr):
pixel_origin = prev_pix_loc
pixel_location_relative_to_origin = -comoving2pixel(DIM,Box_length, comoving_distance_from_last_switch - comoving_distance_z)
pixel_addition = (pixel_location_relative_to_origin + pixel_origin)%DIM
prev_pix_loc = pixel_addition
#save this redshift
zs.append(z)
#save the box information for this particular lightcone slice
lightcone[ctr,:,:] = (xH_plus[pixel_addition,:,:] - xH_minus[pixel_addition,:,:])*((z - z_minus)/(z_plus - z_minus)) + xH_minus[pixel_addition,:,:]
ctr += 1
z = z_range[ctr]
#pl.savefig(str(ctr)+'.png')
#safety net
if ctr > N:
break
#does the user want us to stop the z scroll after a particular value?
if ctr >= sharp_cutoff:
if return_redshifts:
return lightcone[0:sharp_cutoff,:,] , np.array(zs[0:])
else:
return lightcone[0:sharp_cutoff,:,]
#return the lightcone history as the redshift log (should the user specify that)
if return_redshifts:
return lightcone[0:int(N-1),:,] , np.array(zs)
else:
return lightcone[0:int(N-1),:,]
#lightconepng = lightcone(N = 500 )
#directory = '/Users/michael/Research/LAE_Clustering/Boxes_w_HaloFinder/'
#pl.imshow(np.swapaxes(lightconepng,0,2)[100])
#pl.savefig('Lightcone.png')
#pl.ylabel('Box slice at x = 0')
#pl.xlabel('Redshift')
#pl.show()
#pl.close()
| [
"michael.pagano@mail.mcgill.ca"
] | michael.pagano@mail.mcgill.ca |
567c4f1b87268b45b3e5955082e71554b4e4551e | e3abb55ba514fb102ce01601ab0e9ebc15f5d26f | /code/l010_await.py | 1c1c6228bf6292b72ebae15c80d040f4c8a0b5a4 | [] | no_license | joshmarshall/coroutine-presentation | 1d8dec7a6c31a0ee5e8875883a326ea801300e93 | a6d07e70bdff286f45785f4127d854ea701a6a08 | refs/heads/master | 2023-09-03T04:23:20.422823 | 2018-01-03T10:19:50 | 2018-01-03T10:19:50 | 64,452,999 | 1 | 0 | null | 2017-11-19T21:17:58 | 2016-07-29T05:29:08 | Python | UTF-8 | Python | false | false | 1,248 | py | import asyncio
class Session(object):
@classmethod
def connect(cls):
return Session()
async def __aenter__(self):
print("Creating session...")
await asyncio.sleep(1)
return self
async def __aexit__(self, exc_typ, exc, tb):
# can also handle exceptions as necessary
await asyncio.sleep(1)
print("Disconnected.")
async def __aiter__(self):
self.records = [Record(), Record()]
return self
async def __anext__(self):
print("Finding record...")
await asyncio.sleep(1)
if not self.records:
raise StopAsyncIteration()
return self.records.pop(0)
def find(self):
return self
class Record(object):
async def update(self, **kwargs):
await asyncio.sleep(1)
print("Updating record: {0}".format(kwargs))
async def wait():
async with Session.connect() as session:
i = 0
async for record in session.find():
i += 1
await record.update(foo=i)
def main():
loop = asyncio.get_event_loop()
print("Starting...")
loop.run_until_complete(wait())
print("Finishing...")
loop.close()
if __name__ == "__main__":
main()
| [
"catchjosh@gmail.com"
] | catchjosh@gmail.com |
62abf1b5cf573596ca943d290748c41b37bd2e49 | 4dfb1731e42654d2694b9ea109b0da26f0e6215c | /qbittorrent_mod.py | d6a8fc5bab59ef956ce8f458554ba67a2d766cb4 | [
"MIT"
] | permissive | y2038558528/flexget_qbittorrent_mod | 3e89e13c8814e21de51e101f3430ce660b4cfcb5 | a49dacf0b4bf20217cb43df0ad94112b7dc67364 | refs/heads/master | 2023-03-22T22:00:04.330858 | 2021-03-15T13:45:02 | 2021-03-15T13:45:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 30,029 | py | import math
import os
import re
from datetime import datetime
from flexget import plugin
from flexget.entry import Entry
from flexget.event import event
from loguru import logger
from .ptsites.client.qbittorrent_client import QBittorrentClientFactory
class QBittorrentModBase:
def __init__(self):
self.client = None
def prepare_config(self, config):
if isinstance(config, bool):
config = {'enabled': config}
config.setdefault('enabled', True)
config.setdefault('host', 'localhost')
config.setdefault('port', 8080)
config.setdefault('use_ssl', False)
config.setdefault('verify_cert', True)
return config
def create_client(self, config):
client = QBittorrentClientFactory().get_client(config)
return client
def on_task_start(self, task, config):
self.client = None
config = self.prepare_config(config)
if config['enabled']:
if task.options.test:
logger.info('Trying to connect to qBittorrent...')
self.client = self.create_client(config)
if self.client:
logger.info('Successfully connected to qBittorrent.')
else:
logger.error('It looks like there was a problem connecting to qBittorrent.')
class PluginQBittorrentModInput(QBittorrentModBase):
schema = {
'type': 'object',
'properties': {
'host': {'type': 'string'},
'use_ssl': {'type': 'boolean'},
'port': {'type': 'integer'},
'username': {'type': 'string'},
'password': {'type': 'string'},
'verify_cert': {'type': 'boolean'},
'server_state': {'oneOf': [{'type': 'boolean'}, {'type': 'string'}]},
'force_update': {'type': 'boolean'},
'enabled': {'type': 'boolean'},
},
'additionalProperties': False
}
def prepare_config(self, config):
config = QBittorrentModBase.prepare_config(self, config)
return config
def on_task_input(self, task, config):
config = self.prepare_config(config)
if not config['enabled']:
return
server_state = config.get('server_state')
if server_state:
entry = Entry(
title='qBittorrent Server State' if isinstance(server_state, bool) else server_state,
url=config.get('host')
)
entry['time'] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
entry['server_state'] = {}
try:
self.client = self.create_client(config)
entry['server_state'] = self.client.get_main_data_snapshot(id(task)).get('server_state')
entry['server_state']['flexget_connected'] = True
except plugin.PluginError:
entry['server_state']['flexget_connected'] = False
return [entry]
else:
self.client = self.create_client(config)
force_update = config.get('force_update', False)
return list(
self.client.get_main_data_snapshot(id(task), force_update=force_update).get('entry_dict').values())
class PluginQBittorrentMod(QBittorrentModBase):
schema = {
'type': 'object',
'properties': {
'host': {'type': 'string'},
'use_ssl': {'type': 'boolean'},
'port': {'type': 'integer'},
'username': {'type': 'string'},
'password': {'type': 'string'},
'verify_cert': {'type': 'boolean'},
'action': {
'type': 'object',
'properties': {
'add': {
'type': 'object',
'properties': {
'savepath': {'type': 'string'},
'cookie': {'type': 'string'},
'category': {'type': 'string'},
'skip_checking': {'type': 'boolean'},
'paused': {'type': 'string'},
'root_folder': {'type': 'string'},
'rename': {'type': 'string'},
'upLimit': {'type': 'integer'},
'dlLimit': {'type': 'integer'},
'autoTMM': {'type': 'boolean'},
'sequentialDownload': {'type': 'string'},
'firstLastPiecePrio': {'type': 'string'},
'reject_on': {
'type': 'object',
'properties': {
'bandwidth_limit': {'type': 'integer'},
'dl_speed': {
'oneOf': [
{'type': 'boolean'},
{'type': 'integer'},
{'type': 'number', 'minimum': 0.1, 'maximum': 0.9},
]
},
'dl_limit': {'oneOf': [{'type': 'boolean'}, {'type': 'integer'}]}
}
}
}
},
'remove': {
'type': 'object',
'properties': {
'keeper': {
'type': 'object',
'properties': {
'keep_disk_space': {'type': 'integer'},
'check_reseed': {
'oneOf': [{'type': 'boolean'}, {'type': 'array', 'items': {'type': 'string'}}]},
'delete_files': {'type': 'boolean'},
'dl_limit_on_succeeded': {'type': 'integer'},
'alt_dl_limit_on_succeeded': {'type': 'integer'},
'dl_limit_interval': {'type': 'integer'}
},
},
'cleaner': {
'type': 'object',
'properties': {
'delete_files': {'type': 'boolean'}
}
}
},
"minProperties": 1,
"maxProperties": 1,
},
'resume': {
'type': 'object',
'properties': {
'recheck_torrents': {'type': 'boolean'}
}
},
'pause': {
'type': 'boolean'
},
'modify': {
'type': 'object',
'properties': {
'tag_by_tracker': {'type': 'boolean'},
'replace_trackers': {
'type': 'object',
'properties': {
}
}
}
},
'manage_conn': {
'type': 'object',
'properties': {
'min': {'type': 'integer'},
'max': {'type': 'integer'}
}
},
'limit_upload_by_tracker': {
'type': 'object',
'properties': {
'working': {'type': 'integer'},
'not_working': {'type': 'integer'}
}
}
},
"minProperties": 1,
"maxProperties": 1,
},
'fail_html': {'type': 'boolean'},
},
'additionalProperties': False
}
def prepare_config(self, config):
config = super().prepare_config(config)
config.setdefault('fail_html', True)
return config
@plugin.priority(120)
def on_task_download(self, task, config):
config = self.prepare_config(config)
add_options = config.get('action').get('add')
if not add_options or not task.accepted:
return
if not self.client:
self.client = self.create_client(config)
if self.client:
logger.debug('Successfully connected to qBittorrent.')
else:
raise plugin.PluginError("Couldn't connect to qBittorrent.")
main_data_snapshot = self.client.get_main_data_snapshot(id(task))
server_state = main_data_snapshot.get('server_state')
reject_on = add_options.get('reject_on')
bandwidth_limit = reject_on.get('bandwidth_limit')
reject_on_dl_speed = reject_on.get('dl_speed')
reject_on_dl_limit = reject_on.get('dl_limit')
reject_reason = ''
dl_rate_limit = server_state.get('dl_rate_limit')
if reject_on_dl_limit:
if dl_rate_limit and dl_rate_limit < reject_on_dl_limit:
reject_reason = 'dl_limit: {:.2F} MiB < reject_on_dl_limit: {:.2F} MiB'.format(
dl_rate_limit / (1024 * 1024), reject_on_dl_limit / (1024 * 1024))
if reject_on_dl_speed:
if isinstance(reject_on_dl_speed, float):
dl_rate_limit = dl_rate_limit if dl_rate_limit else bandwidth_limit
reject_on_dl_speed = int(dl_rate_limit * reject_on_dl_speed)
dl_info_speed = server_state.get('dl_info_speed')
if dl_info_speed and dl_info_speed > reject_on_dl_speed:
reject_reason = 'dl_speed: {:.2F} MiB > reject_on_dl_speed: {:.2F} MiB'.format(
dl_info_speed / (1024 * 1024), reject_on_dl_speed / (1024 * 1024))
for entry in task.accepted:
if reject_reason:
entry.reject(reason=reject_reason, remember=True)
site_name = self._get_site_name(entry.get('url'))
logger.info('reject {}, because: {}, site: {}', entry['title'], reject_reason, site_name)
continue
if 'download' not in task.config:
download = plugin.get('download', self)
download.get_temp_files(task, handle_magnets=True, fail_html=config['fail_html'])
@plugin.priority(135)
def on_task_output(self, task, config):
config = self.prepare_config(config)
action_config = config.get('action')
if len(action_config) != 1:
raise plugin.PluginError('There must be and only one action')
# don't add when learning
if task.options.learn:
return
if not task.accepted and not action_config.get('remove'):
return
if not self.client:
self.client = self.create_client(config)
if self.client:
logger.debug('Successfully connected to qBittorrent.')
else:
raise plugin.PluginError("Couldn't connect to qBittorrent.")
(action_name, option), = action_config.items()
action = getattr(self, action_name + '_entries', None)
if action:
action(task, option)
else:
raise plugin.PluginError('Unknown action.')
def add_entries(self, task, add_options):
options = {}
for entry in task.accepted:
for attr_str in ['savepath',
'cookie',
'category',
'skip_checking',
'paused',
'root_folder',
'rename',
'upLimit',
'dlLimit',
'autoTMM',
'sequentialDownload',
'firstLastPiecePrio']:
attr = entry.get(attr_str, add_options.get(attr_str))
if attr:
options[attr_str] = attr
if options.get('autoTMM') and options.get('category') and options.get('savepath'):
del options['savepath']
is_magnet = entry['url'].startswith('magnet:')
if not is_magnet:
if 'file' not in entry:
entry.fail('File missing?')
return
if not os.path.exists(entry['file']):
tmp_path = os.path.join(task.manager.config_base, 'temp')
logger.debug('entry: {}', entry)
logger.debug('temp: {}', ', '.join(os.listdir(tmp_path)))
entry.fail("Downloaded temp file '%s' doesn't exist!?" % entry['file'])
return
self.client.add_torrent_file(entry['file'], options)
else:
self.client.add_torrent_url(entry['url'], options)
def remove_entries(self, task, remove_options):
(mode_name, option), = remove_options.items()
mode = getattr(self, 'remove_entries_' + mode_name, None)
if mode:
mode(task, option)
else:
raise plugin.PluginError('Unknown mode.')
def remove_entries_keeper(self, task, keeper_options):
delete_files = keeper_options.get('delete_files')
check_reseed = keeper_options.get('check_reseed')
keep_disk_space = keeper_options.get('keep_disk_space')
dl_limit_interval = keeper_options.get('dl_limit_interval', 24 * 60 * 60)
main_data_snapshot = self.client.get_main_data_snapshot(id(task))
server_state = main_data_snapshot.get('server_state')
dl_rate_limit = server_state.get('dl_rate_limit')
use_alt_speed_limits = server_state.get('use_alt_speed_limits')
free_space_on_disk = server_state.get('free_space_on_disk')
dl_limit_mode = 'dl_limit'
dl_limit_on_succeeded = keeper_options.get('dl_limit_on_succeeded', 0)
alt_dl_limit_on_succeeded = keeper_options.get('alt_dl_limit_on_succeeded', 0)
if use_alt_speed_limits:
dl_limit_mode = 'alt_dl_limit'
dl_limit_on_succeeded = alt_dl_limit_on_succeeded
keep_disk_space = keep_disk_space * 1024 * 1024 * 1024
if keep_disk_space < free_space_on_disk:
if dl_limit_on_succeeded is not None:
dl_limit = math.floor(dl_limit_on_succeeded / 1024) * 1024
if dl_limit != dl_rate_limit:
self.client.set_application_preferences('{{"{}": {}}}'.format(dl_limit_mode, dl_limit))
logger.info("set {} to {} KiB/s", dl_limit_mode, dl_limit / 1024)
return
accepted_entry_hashes = []
delete_hashes = []
delete_size = 0
if not task.accepted:
self.calc_and_set_dl_limit(keep_disk_space, free_space_on_disk, delete_size, dl_limit_interval,
dl_limit_on_succeeded, dl_rate_limit, dl_limit_mode)
return
entry_dict = main_data_snapshot.get('entry_dict')
reseed_dict = main_data_snapshot.get('reseed_dict')
for entry in task.accepted:
accepted_entry_hashes.append(entry['torrent_info_hash'])
for entry_hash in accepted_entry_hashes:
if entry_hash in delete_hashes:
continue
server_entry = entry_dict.get(entry_hash)
if not server_entry:
self.client.reset_rid()
save_path_with_name = server_entry.get('qbittorrent_save_path_with_name')
reseed_entry_list = reseed_dict.get(save_path_with_name)
check_hashes = []
torrent_hashes = []
torrent_size = 0
for reseed_entry in reseed_entry_list:
if reseed_entry['qbittorrent_completed'] != 0:
torrent_size = reseed_entry['qbittorrent_completed']
if isinstance(check_reseed, list):
trackers = reseed_entry['qbittorrent_trackers']
site_names = []
for tracker in trackers:
site_names.append(self._get_site_name(tracker.get('url')))
if len(set(check_reseed) & set(site_names)) > 0:
check_hashes.append(reseed_entry['torrent_info_hash'])
else:
check_hashes.append(reseed_entry['torrent_info_hash'])
torrent_hashes.append(reseed_entry['torrent_info_hash'])
if check_reseed and not set(accepted_entry_hashes) >= set(check_hashes):
for torrent_hash in torrent_hashes:
entry_dict.get(torrent_hash).reject(
reason='torrents with the same save path are not all tested')
continue
else:
if keep_disk_space > free_space_on_disk + delete_size:
delete_size += torrent_size
self._build_delete_hashes(delete_hashes, torrent_hashes, entry_dict, keep_disk_space,
free_space_on_disk, delete_size)
if keep_disk_space < free_space_on_disk + delete_size:
break
self.calc_and_set_dl_limit(keep_disk_space, free_space_on_disk, delete_size, dl_limit_interval,
dl_limit_on_succeeded, dl_rate_limit, dl_limit_mode)
if len(delete_hashes) > 0:
self.client.delete_torrents(str.join('|', delete_hashes), delete_files)
def calc_and_set_dl_limit(self, keep_disk_space, free_space_on_disk, delete_size, dl_limit_interval,
dl_limit_on_succeeded, dl_rate_limit, dl_limit_mode):
if keep_disk_space > free_space_on_disk + delete_size:
dl_limit = (free_space_on_disk + delete_size) / dl_limit_interval
if dl_limit_on_succeeded and dl_limit > dl_limit_on_succeeded:
dl_limit = dl_limit_on_succeeded
dl_limit = math.floor(dl_limit / 1024) * 1024
if dl_limit != dl_rate_limit:
self.client.set_application_preferences('{{"{}": {}}}'.format(dl_limit_mode, dl_limit))
logger.warning("not enough disk space, set {} to {} KiB/s", dl_limit_mode, dl_limit / 1024)
def _build_delete_hashes(self, delete_hashes, torrent_hashes, all_entry_map, keep_disk_space, free_space_on_disk,
delete_size):
delete_hashes.extend(torrent_hashes)
logger.info('keep_disk_space: {:.2F} GiB, free_space_on_disk: {:.2f} GiB, delete_size: {:.2f} GiB',
keep_disk_space / (1024 * 1024 * 1024), free_space_on_disk / (1024 * 1024 * 1024),
delete_size / (1024 * 1024 * 1024))
entries = []
for torrent_hash in torrent_hashes:
entry = all_entry_map.get(torrent_hash)
entry.accept(reason='torrent with the same save path are all pass tested')
entries.append(entry)
entries.sort(key=lambda e: e['qbittorrent_last_activity'], reverse=True)
for entry in entries:
logger.info(
'{}, size: {:.2f} GiB, seeding_time: {:.2f} h, share_ratio: {:.2f}, last_activity: {}, site: {}',
entry['title'],
entry['qbittorrent_completed'] / (1024 * 1024 * 1024),
entry['qbittorrent_seeding_time'] / (60 * 60),
entry['qbittorrent_share_ratio'],
entry['qbittorrent_last_activity'],
entry['qbittorrent_tags'])
def remove_entries_cleaner(self, task, cleaner_options):
delete_files = cleaner_options.get('delete_files')
delete_hashes = []
delete_files_hashes = []
accepted_entry_hashes = []
main_data_snapshot = self.client.get_main_data_snapshot(id(task))
entry_dict = main_data_snapshot.get('entry_dict')
reseed_dict = main_data_snapshot.get('reseed_dict')
for entry in task.accepted:
accepted_entry_hashes.append(entry['torrent_info_hash'])
for entry_hash in accepted_entry_hashes:
if entry_hash in delete_hashes or entry_hash in delete_files_hashes:
continue
server_entry = entry_dict.get(entry_hash)
if not server_entry:
self.client.reset_rid()
continue
save_path_with_name = server_entry.get('qbittorrent_save_path_with_name')
reseed_entry_list = reseed_dict.get(save_path_with_name)
torrent_hashes = []
for reseed_entry in reseed_entry_list:
torrent_hashes.append(reseed_entry['torrent_info_hash'])
if not set(accepted_entry_hashes) >= set(torrent_hashes):
delete_hashes.extend(set(accepted_entry_hashes) & set(torrent_hashes))
else:
delete_files_hashes.extend(torrent_hashes)
if len(delete_hashes) > 0:
self.client.delete_torrents(str.join('|', delete_hashes), False)
self.print_clean_log(entry_dict, delete_hashes, False)
if len(delete_files_hashes) > 0:
self.client.delete_torrents(str.join('|', delete_files_hashes), delete_files)
self.print_clean_log(entry_dict, delete_files_hashes, delete_files)
def print_clean_log(self, entry_dict, hashes, delete_files):
for torrent_hash in hashes:
entry = entry_dict.get(torrent_hash)
logger.info(
'{}, size: {:.2f} GiB, seeding_time: {:.2f} h, share_ratio: {:.2f}, last_activity: {}, tracker_msg: {}, site: {}, delete_files: {}',
entry['title'],
entry['qbittorrent_completed'] / (1024 * 1024 * 1024),
entry['qbittorrent_seeding_time'] / (60 * 60),
entry['qbittorrent_share_ratio'],
entry['qbittorrent_last_activity'],
entry['qbittorrent_tracker_msg'],
entry['qbittorrent_tags'],
delete_files
)
def resume_entries(self, task, resume_options):
recheck_torrents = resume_options.get('recheck_torrents')
main_data_snapshot = self.client.get_main_data_snapshot(id(task))
reseed_dict = main_data_snapshot.get('reseed_dict')
hashes = []
recheck_hashes = []
for entry in task.accepted:
save_path_with_name = entry['qbittorrent_save_path_with_name']
reseed_entry_list = reseed_dict.get(save_path_with_name)
resume = False
for reseed_entry in reseed_entry_list:
seeding = 'up' in reseed_entry['qbittorrent_state'].lower() and reseed_entry[
'qbittorrent_state'] != 'pausedUP'
if seeding:
hashes.append(entry['torrent_info_hash'])
logger.info('{}', entry['title'])
resume = True
break
if not resume and entry['qbittorrent_state'] != 'checkingUP':
entry.reject(reason='can not find seeding torrent in same save path')
recheck_hashes.append(entry['torrent_info_hash'])
if recheck_torrents and len(recheck_hashes) > 0:
logger.info('recheck {}', recheck_hashes)
self.client.recheck_torrents(str.join('|', recheck_hashes))
self.client.resume_torrents(str.join('|', hashes))
def pause_entries(self, task, pause_options):
if not pause_options:
return
hashes = []
for entry in task.accepted:
hashes.append(entry['torrent_info_hash'])
logger.info('pause: {}', entry['title'])
self.client.pause_torrents(str.join('|', hashes))
def modify_entries(self, task, modify_options):
tag_by_tracker = modify_options.get('tag_by_tracker')
replace_trackers = modify_options.get('replace_trackers')
for entry in task.accepted:
tags = entry.get('qbittorrent_tags')
torrent_trackers = entry.get('qbittorrent_trackers')
for tracker in torrent_trackers:
if tag_by_tracker:
site_name = self._get_site_name(tracker.get('url'))
if site_name and site_name not in tags:
self.client.add_torrent_tags(entry['torrent_info_hash'], site_name)
tags += ', {}'.format(site_name)
logger.info('{} add tag {}', entry.get('title'), site_name)
if replace_trackers:
for orig_url, new_url in replace_trackers.items():
if tracker.get('url') == orig_url:
if new_url:
logger.info('{} update tracker {}', entry.get('title'), new_url)
self.client.edit_trackers(entry.get('torrent_info_hash'), orig_url, new_url)
else:
logger.info('{} remove tracker {}', entry.get('title'), orig_url)
self.client.remove_trackers(entry.get('torrent_info_hash'), orig_url)
def manage_conn_entries(self, task, manage_conn_options):
min_conn = manage_conn_options.get('min')
max_conn = manage_conn_options.get('max')
for entry in task.accepted:
step = entry.get('step')
if not step:
return
server_state = entry.get('server_state')
server_queued_io_jobs = server_state.get('queued_io_jobs')
server_total_peer_connections = server_state.get('total_peer_connections')
application_preferences = self.client.get_application_preferences()
max_connect = application_preferences.get('max_connec')
if max_connect == -1:
max_connect = float('inf')
if (step > 0 and max_connect <= server_total_peer_connections) or step < 0:
max_connect_changed = server_total_peer_connections + step
if max_connect_changed < min_conn:
max_connect_changed = min_conn
elif max_connect_changed > max_conn:
max_connect_changed = max_conn
self.client.set_application_preferences('{{"max_connec": {}}}'.format(max_connect_changed))
logger.info('queued_io_jobs: {} , total_peer_connections: {}, set max_connec to {}',
server_queued_io_jobs, server_total_peer_connections, max_connect_changed)
def limit_upload_by_tracker_entries(self, task, limit_when_not_working_options):
working_speed = limit_when_not_working_options.get('working')
not_working_speed = limit_when_not_working_options.get('not_working')
working_hashes = []
not_working_hashes = []
for entry in task.accepted:
torrent_trackers = entry.get('qbittorrent_trackers')
is_working = False
updating = False
for tracker in torrent_trackers:
status = tracker.get('status')
if status == 2:
is_working = True
elif status == 3:
updating = True
if updating:
continue
up_limit = 0 if entry['qbittorrent_up_limit'] == -1 else entry['qbittorrent_up_limit']
if is_working:
entry_working = entry.get('working') if entry.get('working') else working_speed
if up_limit != entry_working:
if entry.get('working'):
self.client.set_torrent_upload_limit(entry['torrent_info_hash'], entry_working)
else:
working_hashes.append(entry['torrent_info_hash'])
logger.debug(
f'{entry["title"]} site: {entry["qbittorrent_tags"]} tracker is working, set torrent upload limit to {entry_working} B/s')
else:
if up_limit != not_working_speed:
not_working_hashes.append(entry['torrent_info_hash'])
logger.debug(
f'{entry["title"]} site: {entry["qbittorrent_tags"]} tracker is not working, set torrent upload limit to {not_working_speed} B/s')
if working_hashes:
self.client.set_torrent_upload_limit(str.join('|', working_hashes), working_speed)
if not_working_hashes:
self.client.set_torrent_upload_limit(str.join('|', not_working_hashes), not_working_speed)
def _get_site_name(self, tracker_url):
re_object = re.search('(?<=//).*?(?=/)', tracker_url)
if re_object:
domain = re_object.group().split('.')
if len(domain) > 1:
site_name = domain[len(domain) - 2]
if site_name == 'edu':
site_name = domain[len(domain) - 3]
return site_name
def on_task_learn(self, task, config):
""" Make sure all temp files are cleaned up when entries are learned """
# If download plugin is enabled, it will handle cleanup.
if 'download' not in task.config:
download = plugin.get('download', self)
download.cleanup_temp_files(task)
on_task_abort = on_task_learn
@event('plugin.register')
def register_plugin():
plugin.register(PluginQBittorrentMod, 'qbittorrent_mod', api_ver=2)
plugin.register(PluginQBittorrentModInput, 'from_qbittorrent_mod', api_ver=2)
| [
"12468675@qq.com"
] | 12468675@qq.com |
cada27b71e62290be03d07aabfae414035418c63 | 465422bf294af104fe6963900f93d89280c211ec | /generator.py | 5ec266646aac7b586f740b247cc3c0a034133759 | [] | no_license | hevervie/Python | 9272215d31ba8cd83741beb4db80b9fe810be94d | eda55dd49d4b405cf434bdb96357bdf4c4856107 | refs/heads/master | 2021-06-04T10:49:04.122122 | 2016-10-18T13:22:05 | 2016-10-18T13:22:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py | #!/usr/bin/env python
#coding=utf-8
g=(x*x for x in range(1,11))
for i in g:
print(i)
#生成斐波那契数列
def fib(max):
n,a,b=0,0,1
while n<max:
print(b)
a,b=b,a+b
n=n+1
return 'done'
fib(6)
| [
"zhoupans_mail@163.com"
] | zhoupans_mail@163.com |
ee80ca6b31092e5fc1369e74a7618bf280402a55 | cf0ee22c5e880eae8098b09a5476e293cdd5c15e | /mod1.py | b0e32edc899d1fb3d8140d4222272f99e2b40b70 | [] | no_license | 90075sourab/daydictionary | 94124c52fe38b654b8a3025506fb1393c2e3d1fd | 43c37ca1f11fd0a93db6fec0d3768b649c8ea4fb | refs/heads/main | 2023-01-28T22:05:51.152902 | 2020-12-07T17:45:24 | 2020-12-07T17:45:24 | 319,397,329 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,203 | py | import math
#l=[1,2,None,None,None,8,10,9,None,None,8]
def fillNone(num_list):
#this function find where None value start and where end
#and pass start and end index,None count to setMean function with list
start_none=False
count=0
for i in range(0,len(num_list)):
if num_list[i]==None and start_none==False:
start_index=i
start_none=True
count+=1
elif(num_list[i]==None and start_none==True):
count+=1
continue
elif(num_list[i]!=None and start_none==True):
end_index=i-1
start_none=False
setMean(num_list,start_index,end_index,count)
#return start_index,end_index
def setMean(num_list,start_index,end_index,count):
'''this function take first and last
None value index and put mean to all of them
'''
f_num=num_list[start_index-1]
l_num=num_list[end_index+1]
diff=math.floor((l_num-f_num)/(count+1)) if l_num>f_num else -math.floor((f_num-l_num)/(count+1))
#print(f_num,l_num,diff)
for i in range(start_index,end_index+1):
num_list[i]=num_list[i-1]+diff
#print(setNone(l))
#print(l)
| [
"sourabmajh@gmail.com"
] | sourabmajh@gmail.com |
7d167e1feb92203517a6bf08e8597b19369c565e | 42ffa887ca0ac7b54f0473880613865fe523fbfc | /src/viztracer/__init__.py | 38fd0acde24ec07503595c6da251f4e74a45e921 | [
"Apache-2.0"
] | permissive | tianxie1989/viztracer | e61090ac286a5b4ffe4c8f0265fde38bca68837b | 39a6314b2a5a30ede71be96bd5e174b2bdaa2664 | refs/heads/master | 2022-12-11T08:21:25.415858 | 2020-08-21T00:21:00 | 2020-08-21T00:21:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 384 | py | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/gaogaotiantian/viztracer/blob/master/NOTICE.txt
from .viztracer import VizTracer
from .flamegraph import FlameGraph
from .decorator import ignore_function
__version__ = "0.3.0"
__all__ = [
"__version__",
"VizTracer",
"FlameGraph",
"ignore_function"
]
| [
"gaogaotiantian@hotmail.com"
] | gaogaotiantian@hotmail.com |
9967bfbb48682fff74e8fa93da453b918a2d908b | 43715a10381ec37c275850c2e4f5302cde18de8c | /rooms/models.py | 8544758b5c7d49ad504a4a43c4f38656f611174b | [] | no_license | dongdong-e/airbnb-clone | 443f290baca4ea5c8f22f6c573383d11de4140f4 | 32c083c4e7f562d968639099d8439f26a666b175 | refs/heads/master | 2023-05-02T22:08:32.232594 | 2019-11-25T12:13:13 | 2019-11-25T12:13:13 | 219,305,006 | 0 | 0 | null | 2023-04-21T20:42:00 | 2019-11-03T13:27:34 | Python | UTF-8 | Python | false | false | 2,842 | py | from django.db import models
from django.urls import reverse
from django_countries.fields import CountryField
from core import models as core_models
class AbstractItem(core_models.TimeStampedModel):
""" Abstract Item """
name = models.CharField(max_length=80)
class Meta:
abstract = True
def __str__(self):
return self.name
class RoomType(AbstractItem):
""" RoomType Model Definition """
class Meta:
verbose_name = "Room Type"
ordering = ["name"]
class Amenity(AbstractItem):
""" Amenity Model Definition """
class Meta:
verbose_name_plural = "Amenities"
class Facility(AbstractItem):
""" Facility Model Definition """
class Meta:
verbose_name_plural = "Facilities"
class HouseRule(AbstractItem):
""" HouseRule Model Definition """
class Meta:
verbose_name = "House Rule"
class Photo(core_models.TimeStampedModel):
""" Photo Model Definition """
caption = models.CharField(max_length=80)
file = models.ImageField(upload_to="room_photos")
room = models.ForeignKey("Room", related_name="photos", on_delete=models.CASCADE)
def __str__(self):
return self.caption
class Room(core_models.TimeStampedModel):
""" Room Model Definition """
name = models.CharField(max_length=140)
description = models.TextField()
country = CountryField()
city = models.CharField(max_length=80)
price = models.IntegerField()
address = models.CharField(max_length=140)
guests = models.IntegerField()
beds = models.IntegerField()
bedrooms = models.IntegerField()
baths = models.IntegerField()
check_in = models.TimeField()
check_out = models.TimeField()
instant_book = models.BooleanField(default=False)
host = models.ForeignKey(
"users.User", related_name="rooms", on_delete=models.CASCADE
)
room_type = models.ForeignKey(
"RoomType", related_name="rooms", on_delete=models.SET_NULL, null=True
)
amenities = models.ManyToManyField("Amenity", related_name="rooms", blank=True)
facilities = models.ManyToManyField("Facility", related_name="rooms", blank=True)
house_rules = models.ManyToManyField("HouseRule", related_name="rooms", blank=True)
def __str__(self):
return self.name
def save(self, *args, **kwargs):
self.city = str.capitalize(self.city)
super().save(*args, **kwargs)
def get_absolute_url(self):
return reverse("rooms:detail", kwargs={"pk": self.pk})
def total_rating(self):
all_reviews = self.reviews.all()
all_ratings = 0
if len(all_reviews) > 0:
for review in all_reviews:
all_ratings += review.rating_average()
return round(all_ratings / len(all_reviews), 2)
return 0
| [
"youngdong9800@gmail.com"
] | youngdong9800@gmail.com |
c97349deca021fa02bd1829b3ff9ee1936879849 | e943b5a6580cac653272c5cf85d4d46867c419fb | /3/3_dht.py | 358d35357e9ceca52e82169fdd97479ecad20227 | [] | no_license | afrizaloky/Praktikum-IoT | a505b078136d4e1c5961a3f34c997ee5a39b5856 | b719020bc2e6cff6adf298b24973fc23e93552ff | refs/heads/master | 2022-11-30T01:21:58.045866 | 2020-08-18T14:17:49 | 2020-08-18T14:17:49 | 287,882,031 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,707 | py | # Libraries import os import time import sys
import paho.mqtt.client as mqtt
import json
import RPi.GPIO as GPIO
import time
# GPIO Mode (BOARD / BCM)
GPIO.setmode(GPIO.BCM) # set GPIO Pins
GPIO_DATA = 4
# set GPIO direction (IN / OUT)
GPIO.setup(GPIO_DATA, GPIO.IN)
THINGSBOARD_HOST = 'demo.thingsboard.io'
ACCESS_TOKEN = 'LD3XPkG3Q4hgCaVqXrBs'
# Data capture and upload interval in seconds. Less interval will eventually.
INTERVAL = 1
sensor_data = {'temperature': 0, 'humidity': 0}
next_reading = time.time()
client = mqtt.Client()
# Set access token
client.username_pw_set(ACCESS_TOKEN)
# Connect to ThingsBoard using default MQTT port and 60 seconds keepalive interval
client.connect(THINGSBOARD_HOST, 1883, 60)
client.loop_start()
if __name__ == "__main__":
try:
while True:
humidity,temperature = dht.read_retry(dht.DHT22, 4)
humidity = round(humidity, 2)
temperature = round(temperature, 2)
print(u"Temperature: {:g}\u00b0C, Humidity: {:g}%".format(temperature, humidity))
sensor_data['temperature'] = temperature
sensor_data['humidity'] = humidity
# Sending humidity and temperature data to ThingsBoard
client.publish('v1/devices/me/telemetry', json.dumps(sensor_data), 1)
next_reading += INTERVAL
sleep_time = next_reading-time.time()
if sleep_time > 0:
time.sleep(sleep_time)
# Reset by pressing CTRL + C
except KeyboardInterrupt:
print("Measurement stopped by User")
GPIO.cleanup()
client.loop_stop()
client.disconnect()
| [
"afrizaloky@outlook.com"
] | afrizaloky@outlook.com |
84bb2ebc15bbae9433e6730fe93e4c27375addca | f19937a6e18fe1b3e53d082e00bb00e931c9f4bb | /element/app/main/__init__.py | 234e186929b64d10542c905c3489a16baafd17bb | [] | no_license | 239103/hello-world | 3ac9a2bb5d61ed22baa30a209f20a8beaf0219fd | ad6434b6d95c8186df387254670f02c735339346 | refs/heads/master | 2021-01-13T00:37:13.662641 | 2016-05-10T06:32:26 | 2016-05-10T06:32:26 | 47,692,331 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 107 | py | from flask import Blueprint
main = Blueprint('main', __name__)
from . import views
views.init_apis(main) | [
"cw0319@qq.com"
] | cw0319@qq.com |
d2a67d571a6ae128e18235f827a76b271bc6e6e8 | cbd2eee46663fad5b5375b13c8c21b1b06eb4c6b | /ecloud/code/src/main/python/manor/streamlet/create_nodes.py | 159486c27b7fd7132e26361dfada9a5c35673aba | [] | no_license | 1026237416/Python | ef474ee40d7efcd6dabb6fb0ecba81b4dcfc7e14 | ffa8f9ffb8bfec114b0ca46295db05c4213c4c30 | refs/heads/master | 2021-07-05T00:57:00.456886 | 2019-04-26T10:13:46 | 2019-04-26T10:13:46 | 114,510,323 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,090 | py | # coding=utf-8
import yaml
from tornado import gen
from manor.screwdriver import stack_util
from manor.screwdriver.vendor_ecloud import list_app_resources
from manor.streamlet import StreamletBase,get_stack_resources
from manor.streamlet import download_path
from manor.util import generals
from manor.util import redis_tool
SUCCESS_FLAG='CREATE_COMPLETE'
CREATING_FLAG='CREATE_IN_PROGRESS'
def get_instance(params,node_id,serial):
return CreateNodes(params,serial,node_id)
class CreateNodes(StreamletBase):
def __init__(self,params,serial,node_id):
super(CreateNodes,self).__init__(node_id,params,serial)
self.result=None
self.created_resources=[]
self.stack_status=''
self.ips=[]
@gen.coroutine
def execute(self):
if not self.executed:
self.executed=True
# todo: check input parameters...
self.log.debug('params:')
self.log.debug(self.params)
data_module={
'name':'create node',
'resources':{},
'group_name':self.get_resource('group_name')
}
self.log.debug('calculate data module ..')
try:
if self.get_resource('group_name')=='':
raise Exception('group name is empty.')
if self.get_resource('max')!='':
_max=int(self.get_resource('max'))
group_name=self.get_resource('group_name')
rs=yield list_app_resources(self.serial)
rs=[_ for _ in rs if _['group_name']==group_name]
if len(rs)>=_max:
raise Exception('manor.create.node.upper.limited')
os_name=yield download_path(self.get_resource('image'))
data_module['resources'][self.get_resource('group_name')]={
"count":self.get_resource('amount'),
"group_name":self.get_resource('group_name'),
"image":self.get_resource('image'),
'flavor':self.get_resource('flavors'),
"memory":self.get_resource('memory'),
"cores":self.get_resource('cores'),
'tenant':self.get_resource('tenant'),
'size':self.get_resource('disk_capacity'),
"os":os_name,
"network":[
{
"network":self.get_resource('network'),
"subnet":self.get_resource('subnet')
}
]
}
self.log.debug(data_module)
self.stack_id=yield stack_util.create_action(data_module,
self.serial)
except Exception as e:
self.log.error(generals.trace())
raise e
@gen.coroutine
def calculate_created_resources(self):
resources=yield get_stack_resources(self.stack_id)
self.log.debug('calculate created:\n %s'%yaml.safe_dump(resources))
self.created_resources=resources
@gen.coroutine
def get_stack_status(self):
future=yield stack_util.get_stack(self.stack_id)
self.stack_status=future.to_dict()['stack_status']
def get_resource(self,key):
if key in self.params:
return self.params[key]
else:
return ''
def __ips_not_in_road_map(self,ips):
return [_ for _ in ips if _ not in self.__get_road_map()]
def __get_road_map(self):
r=redis_tool.get_it()
road_map=r.keys('mapup*')
return [_.split('_$_')[3] for _ in road_map]
def check_finish(self):
"""
注意,此方法运行在一个线程中,每秒会执行一次。
"""
try:
self.log.debug('create_nodes step. check finish. stack_id %s'%
self.stack_id)
if self.stack_id is None:
return False
if self.stack_status!=CREATING_FLAG:
if self.stack_status==SUCCESS_FLAG:
if len(self.created_resources)==0:
self.calculate_created_resources()
if len(self.ips)==0:
self.ips=[_['ip'] for _ in self.created_resources]
checked=[_ for _ in self.ips if _ in self.__get_road_map()]
self.log.debug('%s - %s'%(self.ips,checked))
if len(self.ips)>0 and self.ips==checked:
return True
else:
return False
else:
self.get_stack_status()
else:
self.log.debug('the stack stack_status is %s'%self.stack_status)
self.get_stack_status()
return False
except:
self.log.error(generals.trace())
raise Exception('error.manor.stream.check.create.node.finish')
| [
"1026237416@qq.com"
] | 1026237416@qq.com |
02b6660eee24fb762865d74b05c9a9efb1bdd81b | 0de10fdcc1ef06a33611219a464fbdd1c7c4ae8b | /pickupfinder/trunk/bin/pip-2.7 | 9de063f867c62edc35320e97bffd0d7c012351db | [] | no_license | steinbachr/WebDev | aa3f1577aed94be4ef9dbb3170c5ddebe309df66 | 7a8c53e1816ba0155ac818a83027000212396180 | refs/heads/master | 2020-06-04T08:59:04.309930 | 2016-12-07T01:04:34 | 2016-12-07T01:04:34 | 7,824,721 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 289 | 7 | #!/Users/Bobby/Documents/Projects/PickupFinder/trunk/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==1.1','console_scripts','pip-2.7'
__requires__ = 'pip==1.1'
import sys
from pkg_resources import load_entry_point
sys.exit(
load_entry_point('pip==1.1', 'console_scripts', 'pip-2.7')()
)
| [
"steinbach.r@husky.neu.edu"
] | steinbach.r@husky.neu.edu |
7ab917ac2d5b6dbd613df8ad73eaa04c6fd703b9 | e042a2437aa60fdc966c4bb97d87f27fb6378c9c | /vae-mnist/utils.py | cbc53886b453559793ea1d4b8a743196b76eca8f | [] | no_license | JZDBB/OOC-for-research | a8653f69a01fe9edd024411234ca422e220a437f | 265fbd1732460acbe2a36f4273635485abf0eb0c | refs/heads/master | 2020-07-04T04:08:51.130198 | 2019-08-21T13:00:38 | 2019-08-21T13:00:38 | 202,150,646 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 288 | py | import numpy as np
def merge(images, size):
h, w = images.shape[1], images.shape[2]
img = np.zeros((h * size[0], w * size[1]))
for idx, image in enumerate(images):
i = idx % size[1]
j = idx / size[1]
img[j*h:j*h+h, i*w:i*w+w] = image
return img
| [
"oxuyining@gmail.com"
] | oxuyining@gmail.com |
c8856f83a16833e017ba42874f29a120fe5c05d5 | 8d46b7767ee1dc8737247772fe8696263676fa09 | /app/recipe/serializers.py | 488f4974a99559614e326bb88b86aa0e6a8e0975 | [] | no_license | Diaga/recipe-api | 866997d58348923d039fcf18049f0f8e5738ce13 | e3796253c82e447566e46978e7fd67b0b01ce892 | refs/heads/master | 2020-06-22T18:40:32.851888 | 2019-07-26T17:59:51 | 2019-07-26T17:59:51 | 197,775,758 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,422 | py | from rest_framework import serializers
from core.models import Tag, Ingredient, Recipe
class TagSerializer(serializers.ModelSerializer):
"""Serializer for tag model"""
class Meta:
model = Tag
fields = ('id', 'name')
read_only_fields = ('id',)
class IngredientSerializer(serializers.ModelSerializer):
"""Serializer for ingredient model"""
class Meta:
model = Ingredient
fields = ('id', 'name')
read_only_fields = ('id',)
class RecipeSerializer(serializers.ModelSerializer):
"""Serializer for recipe model"""
ingredients = serializers.PrimaryKeyRelatedField(
many=True,
queryset=Ingredient.objects.all()
)
tags = serializers.PrimaryKeyRelatedField(
many=True,
queryset=Tag.objects.all()
)
class Meta:
model = Recipe
fields = ('id', 'title', 'time_minutes', 'price',
'link', 'tags', 'ingredients')
read_only_fields = ('id',)
class RecipeDetailSerializer(RecipeSerializer):
"""Serializer for recipe detail"""
ingredients = IngredientSerializer(many=True, read_only=True)
tags = TagSerializer(many=True, read_only=True)
class RecipeImageSerializer(serializers.ModelSerializer):
"""Serializer for uploading images to recipes"""
class Meta:
model = Recipe
fields = ('id', 'image')
read_only_fields = ('id',)
| [
"diagaaos@gmail.com"
] | diagaaos@gmail.com |
3890719b1de619a46527dd653f3b42ca89a5dcb1 | 430cfece27c54180baf29b3199a67f79fe7d155c | /pygmt/tests/test_grdimage.py | 5ad3913c5bef4b786a5d1de55c30b647ec31619e | [
"BSD-3-Clause"
] | permissive | JamieJQuinn/pygmt | 139f25a3f4280b2d2d43c3fa63179437a9227d31 | 9269fbcb2fc7fca2d5c412acdb794be375c260ab | refs/heads/main | 2023-08-24T16:19:27.673739 | 2021-10-29T09:51:44 | 2021-10-29T09:51:44 | 384,119,354 | 0 | 0 | BSD-3-Clause | 2021-07-08T12:37:21 | 2021-07-08T12:37:21 | null | UTF-8 | Python | false | false | 7,213 | py | """
Test Figure.grdimage.
"""
import numpy as np
import pytest
import xarray as xr
from pygmt import Figure
from pygmt.datasets import load_earth_relief
from pygmt.exceptions import GMTInvalidInput
from pygmt.helpers.testing import check_figures_equal
@pytest.fixture(scope="module", name="grid")
def fixture_grid():
"""
Load the grid data from the sample earth_relief file.
"""
return load_earth_relief(registration="gridline")
@pytest.fixture(scope="module", name="grid_360")
def fixture_grid_360(grid):
"""
Earth relief grid with longitude range from 0 to 360 (instead of -180 to
180).
"""
_grid = grid.copy() # get a copy of original earth_relief grid
_grid.encoding.pop("source") # unlink earth_relief NetCDF source
_grid["lon"] = np.arange(0, 361, 1) # convert longitude from -180:180 to 0:360
return _grid
@pytest.fixture(scope="module", name="xrgrid")
def fixture_xrgrid():
"""
Create a sample xarray.DataArray grid for testing.
"""
longitude = np.arange(0, 360, 1)
latitude = np.arange(-89, 90, 1)
x = np.sin(np.deg2rad(longitude))
y = np.linspace(start=0, stop=1, num=179)
data = y[:, np.newaxis] * x
return xr.DataArray(
data,
coords=[
("latitude", latitude, {"units": "degrees_north"}),
("longitude", longitude, {"units": "degrees_east"}),
],
attrs={"actual_range": [-1, 1]},
)
@pytest.mark.mpl_image_compare
def test_grdimage(grid):
"""
Plot an image using an xarray grid.
"""
fig = Figure()
fig.grdimage(grid, cmap="earth", projection="W0/6i")
return fig
@pytest.mark.mpl_image_compare
def test_grdimage_slice(grid):
"""
Plot an image using an xarray grid that has been sliced.
"""
grid_ = grid.sel(lat=slice(-30, 30))
fig = Figure()
fig.grdimage(grid_, cmap="earth", projection="M6i")
return fig
@pytest.mark.mpl_image_compare
def test_grdimage_file():
"""
Plot an image using file input.
"""
fig = Figure()
fig.grdimage(
"@earth_relief_01d_g",
cmap="ocean",
region=[-180, 180, -70, 70],
projection="W0/10i",
shading=True,
)
return fig
@check_figures_equal()
@pytest.mark.parametrize(
"shading",
[True, 0.5, "+a30+nt0.8", "@earth_relief_01d_g+d", "@earth_relief_01d_g+a60+nt0.8"],
)
def test_grdimage_shading_xarray(grid, shading):
"""
Test that shading works well for xarray.
The ``shading`` can be True, a constant intensity, some modifiers, or
a grid with modifiers.
See https://github.com/GenericMappingTools/pygmt/issues/364 and
https://github.com/GenericMappingTools/pygmt/issues/618.
"""
fig_ref, fig_test = Figure(), Figure()
kwargs = dict(
region=[-180, 180, -90, 90],
frame=True,
projection="Cyl_stere/6i",
cmap="geo",
shading=shading,
)
fig_ref.grdimage("@earth_relief_01d_g", **kwargs)
fig_test.grdimage(grid, **kwargs)
return fig_ref, fig_test
@pytest.mark.xfail(
reason="Incorrect scaling of geo CPT on xarray.DataArray grdimage plot."
"See https://github.com/GenericMappingTools/gmt/issues/5294",
)
@check_figures_equal()
def test_grdimage_grid_and_shading_with_xarray(grid, xrgrid):
"""
Test that shading works well when xarray.DataArray is input to both the
``grid`` and ``shading`` arguments.
"""
fig_ref, fig_test = Figure(), Figure()
fig_ref.grdimage(
grid="@earth_relief_01d_g", region="GL", cmap="geo", shading=xrgrid, verbose="i"
)
fig_ref.colorbar()
fig_test.grdimage(grid=grid, region="GL", cmap="geo", shading=xrgrid, verbose="i")
fig_test.colorbar()
return fig_ref, fig_test
def test_grdimage_fails():
"""
Should fail for unrecognized input.
"""
fig = Figure()
with pytest.raises(GMTInvalidInput):
fig.grdimage(np.arange(20).reshape((4, 5)))
@pytest.mark.mpl_image_compare
def test_grdimage_over_dateline(xrgrid):
"""
Ensure no gaps are plotted over the 180 degree international dateline.
Specifically checking that `xrgrid.gmt.gtype = 1` sets `GMT_GRID_IS_GEO`,
and that `xrgrid.gmt.registration = 0` sets `GMT_GRID_NODE_REG`. Note that
there would be a gap over the dateline if a pixel registered grid is used.
See also https://github.com/GenericMappingTools/pygmt/issues/375.
"""
fig = Figure()
assert xrgrid.gmt.registration == 0 # gridline registration
xrgrid.gmt.gtype = 1 # geographic coordinate system
fig.grdimage(grid=xrgrid, region="g", projection="A0/0/1c")
return fig
@pytest.mark.mpl_image_compare
def test_grdimage_global_subset(grid_360):
"""
Ensure subsets of grids are plotted correctly on a global map.
Specifically checking that xarray.DataArray grids can wrap around the left
and right sides on a Mollweide projection (W) plot correctly. Note that a
Cartesian grid is used here instead of a Geographic grid (i.e.
GMT_GRID_IS_CARTESIAN). This is a regression test for
https://github.com/GenericMappingTools/pygmt/issues/732.
"""
# Get a slice of South America and Africa only (lat=-90:31, lon=-180:41)
sliced_grid = grid_360[0:121, 0:221]
assert sliced_grid.gmt.registration == 0 # gridline registration
assert sliced_grid.gmt.gtype == 0 # Cartesian coordinate system
fig = Figure()
fig.grdimage(
grid=sliced_grid, cmap="vik", region="g", projection="W0/3.5c", frame=True
)
return fig
@check_figures_equal()
@pytest.mark.parametrize("lon0", [0, 123, 180])
@pytest.mark.parametrize("proj_type", ["H", "W"])
def test_grdimage_central_meridians(grid, proj_type, lon0):
"""
Test that plotting a grid with different central meridians (lon0) using
Hammer (H) and Mollweide (W) projection systems work.
"""
fig_ref, fig_test = Figure(), Figure()
fig_ref.grdimage(
"@earth_relief_01d_g", projection=f"{proj_type}{lon0}/15c", cmap="geo"
)
fig_test.grdimage(grid, projection=f"{proj_type}{lon0}/15c", cmap="geo")
return fig_ref, fig_test
# Cylindrical Equidistant (Q) projections plotted with xarray and NetCDF grids
# are still slightly different with an RMS error of 25, see issue at
# https://github.com/GenericMappingTools/pygmt/issues/390
# TO-DO remove tol=1.5 and pytest.mark.xfail once bug is solved in upstream GMT
@check_figures_equal(tol=1.5)
@pytest.mark.parametrize("lat0", [0, 30])
@pytest.mark.parametrize("lon0", [0, 123, 180])
@pytest.mark.parametrize("proj_type", [pytest.param("Q", marks=pytest.mark.xfail), "S"])
def test_grdimage_central_meridians_and_standard_parallels(grid, proj_type, lon0, lat0):
"""
Test that plotting a grid with different central meridians (lon0) and
standard_parallels (lat0) using Cylindrical Equidistant (Q) and General
Stereographic (S) projection systems work.
"""
fig_ref, fig_test = Figure(), Figure()
fig_ref.grdimage(
"@earth_relief_01d_g", projection=f"{proj_type}{lon0}/{lat0}/15c", cmap="geo"
)
fig_test.grdimage(grid, projection=f"{proj_type}{lon0}/{lat0}/15c", cmap="geo")
return fig_ref, fig_test
| [
"noreply@github.com"
] | JamieJQuinn.noreply@github.com |
f74296653aa5f909d55be6b01db02cd11a8f0142 | 69533190b829ae8d37fe87e6990ecb9cc250bef3 | /old/teach_pendant/switch_map.py | d91d5db1a81cd2eaa23f0f5cc8e4f22691e1cba2 | [] | no_license | chxb1987/idx6dof | a3ebd70d9901845b3a72f611e021caaba8814602 | b6a2a1b79673cdc3d929c469116ff4eaf3f7583d | refs/heads/master | 2020-08-03T21:46:51.620409 | 2017-06-14T20:50:22 | 2017-06-14T20:50:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,074 | py | SWITCH_UP=1
SWITCH_DOWN=3
sw_map = (
( 4, 2, SWITCH_UP),
( 12, 2, SWITCH_DOWN),
( 16, 17, SWITCH_UP),
( 17, 16, SWITCH_UP),
( 18, 15, SWITCH_UP),
( 19, 14, SWITCH_UP),
( 20, 13, SWITCH_UP),
( 21, 12, SWITCH_UP),
( 22, 10, SWITCH_UP),
( 23, 11, SWITCH_UP),
( 24, 17, SWITCH_DOWN),
( 25, 16, SWITCH_DOWN),
( 26, 15, SWITCH_DOWN),
( 27, 14, SWITCH_DOWN),
( 28, 13, SWITCH_DOWN),
( 29, 12, SWITCH_DOWN),
( 30, 10, SWITCH_DOWN),
( 31, 11, SWITCH_DOWN),
( 32, 7, SWITCH_UP),
( 33, 6, SWITCH_UP),
( 34, 5, SWITCH_UP),
( 35, 4, SWITCH_UP),
( 36, 3, SWITCH_UP),
( 37, 8, SWITCH_UP),
( 38, 1, SWITCH_UP),
( 39, 9, SWITCH_UP),
( 40, 7, SWITCH_DOWN),
( 41, 6, SWITCH_DOWN),
( 42, 5, SWITCH_DOWN),
( 43, 4, SWITCH_DOWN),
( 44, 3, SWITCH_DOWN),
( 45, 8, SWITCH_DOWN),
( 46, 1, SWITCH_DOWN),
( 47, 9, SWITCH_DOWN),
)
for sw_code, sw_n, sw_pos in sw_map:
if sw_pos == SWITCH_UP:
vn = 'this->swbits_ups'
mn = 'SET_SW_UP'
else:
vn = 'this->swbits_downs'
mn = 'SET_SW_DOWN'
print "case {sw_code}: {mn}({sw_n}); break; ".format(**locals()) | [
"eric@clarinova.com"
] | eric@clarinova.com |
28c30da6018c2bb5ce3deb9c731631034ca15789 | 25ec110360c3d743c33528a5dac59cf0dd997254 | /site_KFU/registration/functions.py | 8d875a482538a22a53b3fed38f47e186d999aeb1 | [] | no_license | Nikiouch/KFUsite | 9130b92d6daceb4ef1ac9ded327d0d3e4ecba3e1 | c786d31723b89710200de23086d5b338522b8258 | refs/heads/master | 2021-08-29T01:35:29.637617 | 2017-12-11T15:01:17 | 2017-12-11T15:01:17 | 113,872,790 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,089 | py | import string, random
alphabet = {'а': 'a', 'б': 'b', 'в': 'v', 'г': 'g', 'д': 'd', 'е': 'e',
'ё': 'yo','ж': 'zh', 'з': 'z', 'и': 'i', 'й': 'y', 'к': 'k',
'л': 'l', 'м': 'm', 'н': 'n', 'о': 'o', 'п': 'p', 'р': 'r',
'с': 's', 'т': 't', 'у': 'u', 'ф': 'f', 'х': 'h', 'ц': 'c',
'ч': 'ch', 'ш': 'sh', 'щ': 'sh', 'ъ': 'y', 'ы': 'y', 'ь': "'",
'э': 'e', 'ю': 'yu', 'я': 'ya',
'А': 'A', 'Б': 'B', 'В': 'V', 'Г': 'G', 'Д': 'D', 'Е': 'E',
'Ё': 'Yo', 'Ж': 'Zh', 'З': 'Z', 'И': 'I', 'Й': 'Y', 'К': 'K',
'Л': 'L', 'М': 'M', 'Н': 'N', 'О': 'O', 'П': 'P', 'Р': 'R',
'С': 'S', 'Т': 'T', 'У': 'U', 'Ф': 'F', 'Х': 'H', 'Ц': 'Ts',
'Ч': 'Ch', 'Ш': 'Sh', 'Щ': 'Sh', 'Ъ': 'Y', 'Ы': 'Y',
'Ь': "'", 'Э': 'E', 'Ю': 'Yu', 'Я': 'Ya',}
def eng_translate(word):
for i,j in alphabet.items():
word = word.replace(i, j)
return word
def GeneratePassword(login):
password = ""
for i in range(0, len(login)):
password += str(ord(login[i]))
return password | [
"hanouchh@gmail.com"
] | hanouchh@gmail.com |
84c9f0305d09c62aeebc0f058ea4b46557611c42 | 627eccf21233ec99a8fb0a770d4f11a7f887eac7 | /Lab-7/es3.py | dad87720f1f3f43914e0d5c634e4f520f03ea65e | [] | no_license | frollo/AdvancedProgramming | 76e15cc5fd9ac21fb1db36806d8a3d2136da1252 | f4c27a9b1067ead9720ea23d6630d5b7611d4d68 | refs/heads/master | 2021-01-21T04:40:35.984135 | 2016-06-14T16:46:09 | 2016-06-14T16:46:09 | 49,653,406 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,419 | py | from functools import reduce
class Matrix(object):
def __init__(self,height, width, values):
self.height = height
self.width = width
index = 0
self.matrix = list()
line = list()
while index < len(values):
line.append(values[index])
index += 1
if index % self.width == 0:
self.matrix.append(line)
line = list()
def __eq__(self, other):
if self.height == other.height and self.width == other.width:
return reduce(lambda x,y: x and y, [x[0] == x[1] for x in zip(self.matrix, other.matrix)])
else:
return False
def __add__(self, other):
if self.height != other.height or self.width != other.width:
raise Exception("Cannot sum matrix of different dimensions!")
v1 = reduce(lambda x,y: x+y, self.matrix)
v2 = reduce(lambda x,y: x+y, other.matrix)
return Matrix(self.height, self.width, list(map(lambda x: x[0] + x[1], zip(v1,v2))))
def __mul__(self, other):
try:
if self.height == other.width:
mult_values = [sum([self.matrix[j][x] * other.matrix[x][i] for x in range(self.width)]) for j in range (self.width) for i in range (self.height)]
except Exception as e:
print(other)
mult_values = list(map(lambda x: x * other, reduce(lambda x,y: x+y, self.matrix)))
return Matrix(self.height, self.width, mult_values)
def tras(self):
new_values = [self.matrix[j][i] for i in range(self.width) for j in range(self.height)]
return Matrix(self.height, self.width, new_values)
def copy(self):
return Matrix(self.height, self.width, reduce(lambda x,y: x + y, self.matrix))
def __repr__(self):
rep = ""
for line in self.matrix:
rep += "|"
for el in line:
rep += " {0}".format(el)
rep += " |\n"
return rep
if __name__ == '__main__':
muno = Matrix(2,2,[1,2,3,4])
mdue = Matrix(2,2,[1,2,3,4])
mtre = Matrix(2,2,[5,6,7,8])
mquattro = Matrix(1,1, [0])
print(muno == mdue)
print(mtre == mdue)
print(mtre == mquattro)
mcinque = muno.copy()
print(mcinque == muno)
print (muno + mdue)
print (mquattro * mquattro)
print(muno * mdue)
mid = Matrix(2,2,[1,0,0,1])
print(mid * muno)
| [
"rossi.lorenzo@yandex.com"
] | rossi.lorenzo@yandex.com |
8b10787326d6e24fc474df2d039a94863b0a3aab | 9969ee7cfa666c3ba4ec101fd983284d31eadf35 | /leetcode/remove-element.py | f14c018552b59c22efba9b82ec09b70be98fa85a | [] | no_license | QinGeneral/Algorithm | d0385575fdde3aa0b535b3c15ecbadf2c3cc3ff4 | 251b2b8bab88d3cbd9463b7a3c3120587aa281a2 | refs/heads/master | 2023-08-18T18:45:28.958941 | 2023-08-08T14:46:53 | 2023-08-08T14:46:53 | 265,273,903 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 913 | py | class BetterSolution:
def removeElement(self, nums, val: int) -> int:
length_range = range(len(nums))
length = 0
for i in length_range:
if nums[i] != val:
nums[length] = nums[i]
length += 1
return length
class Solution:
def removeElement(self, nums, val: int) -> int:
length_range = range(len(nums))
not_val_num = 0
for i in length_range:
if nums[i] != val:
not_val_num += 1
for j in range(0, i):
if nums[j] == val:
nums[j] = nums[i]
nums[i] = val
break
return not_val_num
Solution = Solution()
print(Solution.removeElement([0, 1, 2, 2, 3, 0, 4, 2], 2))
BetterSolution = BetterSolution()
print(BetterSolution.removeElement([0, 1, 2, 2, 3, 0, 4, 2], 2))
| [
"qingeneral@gmail.com"
] | qingeneral@gmail.com |
8dacb18cc5f4b37537297886d225802a28f1729d | 7746611d41fb12db544bb871b270258b563fd108 | /Instanciating and global variable.py | 5d3723d6b589de161e2c4122ac197b1608e8b740 | [] | no_license | christianjoy/OOP | d1c5411e83b8c51ae4cf0898fc85e53e06ef80c2 | a17de30c7f98dbfa48857e69d6179d7d417fe46b | refs/heads/master | 2021-01-19T14:56:50.828685 | 2012-08-29T02:33:24 | 2012-08-29T02:33:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 570 | py |
class Score
def __init__(self):
self.score=0
def Add (self):
self.score += 1
def show(self):
print("The score is %i" % (score))
Basketball=Score()
Volleyball=Score()
Chess=Score()
Basketball.Add()
Basketball.Add()
Basketball.Add()
Basketball.Add()
Basketball.Add()
Basketball.Add()
Basketball.Add()
Basketball.Add()
Basketball.Add()
Basketball.Add()
Volleyball.Add()
Volleyball.Add()
Volleyball.Add()
Volleyball.Add()
Volleyball.Add()
Chess.Add()
Chess.Add()
Chess.Add()
Basketball.show()
Volleyball.show()
Chess.show()
| [
"christianjoyventura@yahoo.com"
] | christianjoyventura@yahoo.com |
9c5ae5f21eb5f1a36093fe5f764a1835128a01d2 | dc67e70a303f265ee6cb4c1a2d61fe811053fb3d | /beginner/095/C.py | e641e597678f29556c9fceffadc8270b970f8ac8 | [] | no_license | cry999/AtCoder | d39ce22d49dfce805cb7bab9d1ff0dd21825823a | 879d0e43e3fac0aadc4d772dc57374ae72571fe6 | refs/heads/master | 2020-04-23T13:55:00.018156 | 2019-12-11T05:23:03 | 2019-12-11T05:23:03 | 171,214,066 | 0 | 0 | null | 2019-05-13T15:17:02 | 2019-02-18T04:24:01 | Python | UTF-8 | Python | false | false | 623 | py | def half_and_half(
A: int, B: int, C: int, X: int, Y: int) -> int:
"""
:param A: A ピザの値段
:param B: B ピザの値段
:param C: AB ピザの値段
:param X: A ピザの必要数
:param Y: B ピザの必要数
"""
min_price = float('inf')
for num_ab in range(max(X, Y)+1):
num_a, num_b = max(0, X-num_ab), max(0, Y-num_ab)
price = num_a*A + num_b*B + 2*num_ab*C
min_price = min(min_price, price)
return min_price
if __name__ == "__main__":
A, B, C, X, Y = map(int, input().split())
ans = half_and_half(A, B, C, X, Y)
print(ans)
| [
"when.the.cry999@gmail.com"
] | when.the.cry999@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.