seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
73605360338 | # !/usr/bin/python3
# -*- coding: utf-8 -*-
from typing import List
# @Author: 花菜
# @File: 46全排列.py
# @Time : 2022/11/2 23:09
# @Email: lihuacai168@gmail.com
class Solution:
def permute(self, nums: List[int]) -> List[List[int]]:
def backtracking(nums, used, path, paths):
if len(path) == len(nums):
# 收获结果集
paths.append(path[:])
# 排序的元素是有序的,取值应该是每次都从头开始
# 然后通过used来排除已经使用过的元素
for i in range(len(nums)):
if used[i]:
# 去重
continue
path.append(nums[i])
used[i] = True
backtracking(nums, used, path, paths)
# 回溯
used[i] = False
path.pop()
paths = []
# 排列时,需要记录已经使用过的元素
used = [False for _ in range(len(nums))]
backtracking(nums, used, [], paths)
return paths
| lihuacai168/LeetCode | 排列组合/46全排列.py | 46全排列.py | py | 1,079 | python | en | code | 4 | github-code | 13 |
11670484523 | from requests import *
# import pymongo
import time
import json
key = ""
mongo = None
db = None
request_counter = 0
# DONE
def get_secrets(filename: str):
with open(filename) as file:
global key
key = file.readline().strip("\n")
username = file.readline().strip("\n")
password = file.readline().strip("\n")
cluster_name = file.readline().strip("\n")
mongo_uri = f'mongodb+srv://{username}:{password}@{cluster_name}.buaixd5.mongodb.net/?retryWrites=true&w=majority'
global mongo
mongo = pymongo.MongoClient(mongo_uri)
global db
db = mongo.PoliSee
def get_secrets_just_key(filename: str):
with open(filename) as file:
global key
key = file.readline().strip("\n")
# DONE
def get_until_success(endpoint, params):
# get_secrets_just_key("secrets.txt")
try:
global request_counter
request_counter += 1
params["api_key"] = key
params["format"] = "json"
req = get(endpoint, params)
while req.status_code != 200:
print()
print(f"Error Fetching following endpoint: {endpoint}, {req.text}")
time.sleep(300)
req = get(endpoint, params)
time.sleep(1.4)
return req.json()
except:
print("network did an oopsie")
# tail recursion go brap brap
return get_until_success(endpoint, params)
# DONE
def get_bills(congress_number):
params = {
"api_key": key,
"format": "json",
"offset": 0,
"limit": 250
}
bills = []
url = f"https://api.congress.gov/v3/bill/{congress_number}/hr"
response = get_until_success(url, params)
while len(response["bills"]) != 0:
bills.extend(response["bills"])
print(
f"\rFetching House bills: {params['offset']}/{response['pagination']['count']}", end="")
params["offset"] += 250
response = get_until_success(url, params)
url = f"https://api.congress.gov/v3/bill/{congress_number}/s"
params["offset"] = 0
response = get_until_success(url, params)
while len(response["bills"]) != 0:
bills.extend(response["bills"])
print(
f"\rFetching Senate bills: {params['offset']}/{response['pagination']['count']}", end="")
params["offset"] += 250
response = get_until_success(url, params)
return bills
# DONE
def update_edge(congress_number: int, from_node: str, to_node: str, chamber: str):
collection = db[str(congress_number) + "_edges"]
edge_document = collection.find_one(
{"$and": [{"from_node": from_node}, {"to_node": to_node}, {"chamber": chamber}]})
if edge_document is not None:
collection.update_one({"$and": [{"from_node": from_node}, {"to_node": to_node}, {
"chamber": chamber}]}, {"$inc": {"count": 1}})
else:
doc = {"_id": from_node + "," + to_node, "from_node": from_node,
"to_node": to_node, "chamber": chamber, "count": 1}
collection.insert_one(doc)
# DONE
def update_node(congress_number: int, bioguide_id: str, first_name: str, last_name: str, state: str, party: str, chamber: str):
collection = db[str(congress_number) + "_nodes"]
node_document = collection.find_one({"_id": bioguide_id})
if node_document is not None:
collection.update_one({"_id": bioguide_id}, {
"$inc": {"sponsorships_this_congress": 1}})
else:
doc = {"_id": bioguide_id, "first_name": first_name.upper(), "last_name": last_name.upper(
), "state": state.upper(), "party": party[:1], "chamber": chamber, "sponsorships_this_congress": 1}
collection.insert_one(doc)
# DONE
def get_num_cosponsorships(congress_number: int, bioguide_id: str):
edges = db[str(congress_number) + "_edges"]
cosponsorships = 0
for edge in edges.find({"to_node": bioguide_id}):
cosponsorships += edge["count"]
return cosponsorships
# DONE
def get_num_aisle_crosses(congress_number: int, bioguide_id: str):
nodes = db[str(congress_number) + "_nodes"]
edges = db[str(congress_number) + "_edges"]
current_node = nodes.find_one({"_id": bioguide_id})
current_party = current_node["party"]
# Eliminates third parties
if current_party not in ["D", "R"]:
return 0
# Swaps member party for opposition
if current_party == "D":
current_party = "R"
else:
current_party = "D"
aisle_crosses = 0
for edge in edges.find({"$and": [{"chamber": current_node["chamber"]}, {"from_node": bioguide_id}]}):
if nodes.find_one({"_id": edge["to_node"]})["party"] == current_party:
aisle_crosses += edge["count"]
for edge in edges.find({"$and": [{"chamber": current_node["chamber"]}, {"to_node": bioguide_id}]}):
if nodes.find_one({"_id": edge["from_node"]})["party"] == current_party:
aisle_crosses += edge["count"]
return aisle_crosses
# DONE
def get_prolific_rank(congress_number: int, bioguide_id: str):
nodes = db[str(congress_number) + "_nodes"]
current_node = nodes.find_one({"_id": bioguide_id})
rank = 1
for node in nodes.find({"chamber": current_node["chamber"]}):
if node["sponsorships_this_congress"] > current_node["sponsorships_this_congress"]:
rank += 1
return rank
# DONE
def get_collaborative_rank(congress_number: int, bioguide_id: str):
nodes = db[str(congress_number) + "_nodes"]
current_node = nodes.find_one({"_id": bioguide_id})
rank = 1
for node in nodes.find({"chamber": current_node["chamber"]}):
if node["cosponsorships_this_congress"] > current_node["cosponsorships_this_congress"]:
rank += 1
return rank
# DONE
def get_bipartisan_rank(congress_number: int, bioguide_id: str):
nodes = db[str(congress_number) + "_nodes"]
current_node = nodes.find_one({"_id": bioguide_id})
rank = 1
for node in nodes.find({"chamber": current_node["chamber"]}):
if node["aisle_crosses_this_congress"] > current_node["aisle_crosses_this_congress"]:
rank += 1
return rank
# DONE
def clean_unpaired_ids(congress_number: int):
params = {
"api_key": key,
"format": "json"
}
edges = db[str(congress_number) + "_edges"]
nodes = db[str(congress_number) + "_nodes"]
n_docs = edges.count_documents({})
ctr = 0
for edge in edges.find():
node_document = nodes.find_one({"_id": edge["to_node"]})
if node_document is None:
member = get_until_success(
f"https://api.congress.gov/v3/member/{edge['to_node']}", params)["member"]
print(f"Adding {member['firstName']} {member['lastName']}")
new_node = {"_id": edge["to_node"], "first_name": member["firstName"].upper(),
"last_name": member["lastName"].upper(), "state": member["state"].upper(),
"party": member["party"][:1].upper(), "chamber": edge["chamber"],
"sponsorships_this_congress": 0}
nodes.insert_one(new_node)
print("SUCCESS: Added missing member")
time.sleep(3)
ctr += 1
print(f"{ctr}/{n_docs}")
# DONE
def augment_existing_nodes(congress_number: int):
params = {
"api_key": key,
"format": "json"
}
nodes = db[str(congress_number) + "_nodes"]
nodes.update_many({}, {"$set": {"cosponsorships_this_congress": 0}})
nodes.update_many({}, {"$set": {"aisle_crosses_this_congress": 0}})
ctr = 0
for node in nodes.find():
new_fields = {"cosponsorships_this_congress": get_num_cosponsorships(
congress_number, node["_id"]), "aisle_crosses_this_congress": get_num_aisle_crosses(congress_number, node["_id"])}
nodes.update_one({"_id": node["_id"]}, {"$set": new_fields})
ctr += 0.5
print(f"Augmented {ctr} nodes")
for node in nodes.find():
member = get_until_success(
f"https://api.congress.gov/v3/member/{node['_id']}", params)["member"]
new_fields = {"prolific_rank": get_prolific_rank(congress_number, node["_id"]),
"collaborative_rank": get_collaborative_rank(congress_number, node["_id"]),
"bipartisan_rank": get_bipartisan_rank(congress_number, node["_id"]),
"image_link": member["depiction"]["imageUrl"]}
nodes.update_one({"_id": node["_id"]}, {"$set": new_fields})
ctr += 0.5
print(f"Augmented {ctr} nodes")
# DONE
def get_bill_info(bill: dict, congress_number: int):
params = {
"api_key": key,
"format": "json"
}
bill_type = bill["type"].upper()
bill_number = bill["number"]
current_sponsor = get_until_success(
f"https://api.congress.gov/v3/bill/{congress_number}/{bill_type}/{bill_number}", params)["bill"]["sponsors"][0]
if bill_type == "S":
current_sponsor["chamber"] = "Senate"
elif bill_type == "HR":
current_sponsor["chamber"] = "House of Representatives"
# Fixes J. Gresham Barrett-style names from just being J. Barrett
if len(current_sponsor["firstName"]) == 2 and current_sponsor["firstName"][1] == ".":
current_sponsor["firstName"] += current_sponsor["middleName"]
params = {
"api_key": key,
"format": "json",
"offset": 0,
"limit": 250
}
current_cosponsors = []
url = f"https://api.congress.gov/v3/bill/{congress_number}/{bill_type}/{bill_number}/cosponsors"
response = get_until_success(url, params)
while len(response["cosponsors"]) != 0:
current_cosponsors.extend(response["cosponsors"])
params["offset"] += 250
response = get_until_success(url, params)
return current_sponsor, current_cosponsors
# DONE
def get_congress_data(congress_number: int):
global request_counter
bills = get_bills(congress_number)
ctr = 0
for bill in bills:
ctr += 1
print(
f"\rProcessing bills: {ctr}/{len(bills)}; {request_counter} requests", end="")
current_sponsor, current_cosponsors = get_bill_info(
bill, congress_number)
for cosponsor in current_cosponsors:
update_edge(congress_number, current_sponsor["bioguideId"], cosponsor["bioguideId"],
current_sponsor["chamber"])
update_node(congress_number, current_sponsor["bioguideId"], current_sponsor["firstName"],
current_sponsor["lastName"], current_sponsor["state"], current_sponsor["party"], current_sponsor["chamber"])
clean_unpaired_ids(congress_number)
augment_existing_nodes(congress_number)
# TODO TEMP
def fix_sponsorless_congress(congress_number: int):
bills = get_bills(congress_number)
nodes = db[str(congress_number) + "_nodes"]
print(f"Initializing {congress_number} sponsorship counts...")
nodes.update_many({}, {"$set": {"sponsorships_this_congress": 0}})
print("Done")
params = {
"api_key": key,
"format": "json"
}
ctr = 0
for bill in bills:
bill_type = bill["type"].upper()
bill_number = bill["number"]
current_sponsor = get_until_success(
f"https://api.congress.gov/v3/bill/{congress_number}/{bill_type}/{bill_number}", params)["bill"]["sponsors"][0]
current_node = nodes.find_one({"_id": current_sponsor["bioguideId"]})
if current_node is None:
continue
current_sponsorships = current_node["sponsorships_this_congress"] + 1
nodes.update_one({"_id": current_sponsor["bioguideId"]}, {
"$set": {"sponsorships_this_congress": current_sponsorships}})
ctr += 1
print(
f"\rFixing nodes: {ctr}/{len(bills)} of the way there; {request_counter} requests", end="")
def clean_unpaired_ids_json(congress_number: int):
params = {
"api_key": key,
"format": "json"
}
f = open(f"./client/public/data/{congress_number}.json", "r")
nodes = db[str(congress_number) + "_nodes"]
unified = json.load(f)
n_docs = len(unified["edges"])
ctr = 0
members = {}
for node in unified["nodes"]:
members[node["_id"]] = node
for edge in unified["edges"]:
if edge["to_node"] not in members:
member = get_until_success(
f"https://api.congress.gov/v3/member/{edge['to_node']}", params)["member"]
print(f"Adding {member['firstName']} {member['lastName']}")
new_node = {"_id": edge["to_node"], "first_name": member["firstName"].upper(),
"last_name": member["lastName"].upper(), "state": member["state"].upper(),
"party": member["party"][:1].upper(), "chamber": edge["chamber"],
"sponsorships_this_congress": 0}
nodes.insert_one(new_node)
print("SUCCESS: Added missing member")
time.sleep(3)
ctr += 1
print(f"{ctr}/{n_docs}")
if __name__ == "__main__":
get_secrets("secrets.txt")
clean_unpaired_ids_json(112)
clean_unpaired_ids_json(113)
| jackwilmerding/polisee | PoliSee.py | PoliSee.py | py | 13,231 | python | en | code | 2 | github-code | 13 |
26890649594 | from bs4 import BeautifulSoup
import pandas as pd
import requests, os
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
import pymongo
import geckodriver_autoinstaller
from webdriver_manager.firefox import GeckoDriverManager
from flask import jsonify
# profile = webdriver.FirefoxProfile()
browser = webdriver.Firefox(
executable_path=GeckoDriverManager().install()
)
CONN = os.getenv("CONN")
client = pymongo.MongoClient(CONN)
db = client.marscrape
def get_html(url, wait):
options = Options()
options.headless = True
driver = webdriver.Firefox(options=options)
driver.get(url)
driver.implicitly_wait(wait)
html = driver.page_source
driver.close()
return html
def scrape():
###################################################################################################
#### Scrape NASA Mars News ####
print("""
#### Scrape NASA Mars News ####
""")
mars_url = "https://mars.nasa.gov/news/?page=0&per_page=40&order=publish_date+desc%2Ccreated_at+desc&search=&category=19%2C165%2C184%2C204&blank_scope=Latest"
mars_html = get_html(mars_url, wait=10)
soup = BeautifulSoup(mars_html, "html.parser") # Parse HTML with Beautiful Soup
# URL = "https://mars.nasa.gov/news/?page=0&per_page=40&order=publish_date+desc%2Ccreated_at+desc&search=&category=19%2C165%2C184%2C204&blank_scope=Latest"
# soup = BeautifulSoup(URL, "html.parser")
# Retrieve the latest element that contains news title and news_paragraph
news_t = soup.find("div", class_="content_title").text
# for new in news_t:
# print(new)
news_p = soup.find("div", class_="article_teaser_body").text
# for newsp in news_p:
# print(newsp)
news = [news_t, news_p]
print(news_t)
print(news_p)
###################################################################################################
#### Scrape Mars Featured Image ####
print("""
#### Scrape Mars Featured Image ####
""")
featured_image_url = "https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars"
# Use Selenium to scrape URL
featured_image_html = get_html(featured_image_url, wait=10)
# Create beautiful soup object
soup = BeautifulSoup(featured_image_html, "html.parser")
featured_image = soup.find_all("a", id="full_image")[0]
# for image in images:
image_number = featured_image["data-link"].replace("/spaceimages/details.php?id=", "")
image_link = "https://www.jpl.nasa.gov/spaceimages/images/largesize/" + image_number + "_hires.jpg"
featured_image_link = [image_link]
###################################################################################################
#### Scrape Mars Facts ####
print("""
#### Scrape Mars Facts ####
""")
mars_facts_url = "https://space-facts.com/mars/"
# Call selenium function to scrape url
mars_facts_html = get_html(mars_facts_url, wait=10)
soup = BeautifulSoup(mars_facts_html, "html.parser")
table = soup.find_all("table")[0]
table_html_str = str(table)
dfs = pd.read_html(table_html_str)
df = dfs[0]
df = df.rename(columns={0: "Characteristic", 1: "Value"})
df = df.set_index("Characteristic")
df_dict = df.T.to_dict()
# print(df_dict)
print(df)
facts = [table_html_str]
###################################################################################################
#### Scrape Mars Hemispheres ####
print("""
#### Scrape Mars Hemispheres ####
""")
hemispheres_url = "https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars"
# Scrape URL with Selenium
hemispheres_html = get_html(hemispheres_url, wait=10)
# Create BeautifulSoup object
soup = BeautifulSoup(hemispheres_html, "html.parser")
hemispheres = soup.find_all("a", class_="product-item")
hemispheres_list = []
for hemisphere in hemispheres:
if hemisphere.find("h3"):
hem_name = hemisphere.find("h3").text.replace("Enhanced", "").strip()
hem_link = "https://astropedia.astrogeology.usgs.gov/download" + hemisphere["href"].replace("/search/map", "") + ".tif/full.jpg"
hemispheres_list.append({"title": hem_name, "img_url": hem_link})
print(hemispheres_list)
# Mars dictionary
print("""
#### Create Mars dictionary ####
""")
# mars_dict = {}
mars_dict = {
"news": news,
"featured_image": featured_image_link,
"facts": facts,
"hemispheres": hemispheres_list
}
print(mars_dict)
print("""
***** Inserting data into Mongo Database... *****
""")
db.marscrape.drop()
db.marscrape.insert_one(mars_dict)
print("""
***** ...Successfully inserted data. *****
""")
return mars_dict
def get_mongo_dict():
print("""
#### Mongo Query ####
""")
mongo_dict = db.marscrape.find_one()
print(type(mongo_dict))
for key, value in mongo_dict.items():
print(key)
print(value)
return mongo_dict
| DMVance/web-scraping-challenge | Missions_to_Mars/scrape_mars.py | scrape_mars.py | py | 5,170 | python | en | code | 0 | github-code | 13 |
19103188367 | import numpy as np
import seaborn as sns
from tensorflow.keras import Model
from matplotlib import pyplot as plt
from sklearn.metrics import confusion_matrix
from tensorflow.keras.layers import Input, Dense, Flatten, Conv1D, MaxPooling1D, Reshape, LSTM, TimeDistributed
from tensorflow.keras.callbacks import EarlyStopping
class TimeSeriesNetworks:
#Initializer
def __init__(self, X, y) -> None:
self.X = X
self.y = y
self.models = {'cnn': {}, 'lstm': {}, 'cnn-lstm': {}}
self.history = {'cnn': {}, 'lstm': {}, 'cnn-lstm': {}}
self.maxAccuracy = {'cnn': {}, 'lstm': {}, 'cnn-lstm': {}}
#Builds a Dense Block
def denseBlock(self, x):
for i in range(round(np.random.random()*10)):
x = Dense(units = 100/(i+2), activation = 'relu')(x)
return x
#Builds a ConvBlock
def convBlock(self, x):
for i in range(round(np.random.random()*5)):
x = Conv1D(128/(i+2), 1, activation='relu')(x)
return x
#Builds a LSTM Block
def lstmBlock(self, x):
for i in range(round(np.random.random()*10)):
x = LSTM(50, activation='relu', return_sequences=True)(x)
return x
#Builds a TimeDistributed CNN Block
def timeBlock(self, x):
for i in range(round(np.random.random()*5)):
x = TimeDistributed(Conv1D(64, 1, activation='relu'))(x)
return x
#Build CNN
def cnn(self) -> Model:
input_tensor = Input(shape = self.input)
x = Flatten()(input_tensor)
if len(self.input) == 1:
x = Reshape(self.input + (1,))(x)
else:
x = Reshape(self.input)(x)
x = Conv1D(128, 2, activation = 'relu')(x)
x = self.convBlock(x)
x = MaxPooling1D()(x)
x = Flatten()(x)
x = Dense(100, activation = 'relu')(x)
x = self.denseBlock(x)
output_tensor = Dense(4, activation = 'softmax')(x)
model = Model(inputs = input_tensor, outputs = output_tensor)
return model
#Build LSTM
def lstm(self) -> Model:
input_tensor = Input(shape = self.input)
x = Flatten()(input_tensor)
if len(self.input) == 1:
x = Reshape(self.input + (1,))(x)
else:
x = Reshape(self.input)(x)
x = self.lstmBlock(x)
x = LSTM(50, activation='relu')(x)
output_tensor = Dense(4, activation = 'softmax')(x)
model = Model(inputs = input_tensor, outputs = output_tensor)
return model
#Build CNN-LSTM
def cnnLstm(self) -> Model:
input_tensor = Input(shape = self.input)
x = Flatten()(input_tensor)
if len(self.input) < 3:
x = Reshape((2, 41, 223))(x)
else:
x = Reshape(self.input)(x)
x = TimeDistributed(Conv1D(64, 1, activation='relu'))(x)
x = self.timeBlock(x)
x = TimeDistributed(MaxPooling1D(pool_size=2))(x)
x = TimeDistributed(Flatten())(x)
x = self.lstmBlock(x)
x = LSTM(50, activation='relu')(x)
output_tensor = Dense(4, activation = 'softmax')(x)
model = Model(inputs = input_tensor, outputs = output_tensor)
return model
#Build Models
def build(self):
self.input = self.X.shape[1:]
for i in range(3):
self.models['cnn'][i] = self.cnn()
for i in range(1):
self.models['lstm'][i] = self.lstm()
self.models['cnn-lstm'][i] = self.cnnLstm()
#Train Models
def train(self):
es = EarlyStopping(monitor = 'accuracy', patience = 10, restore_best_weights = True)
#CNN
for k, i in self.models['cnn'].items():
print(f"Now training cnn {k}")
i.compile(optimizer='adam', loss=['sparse_categorical_crossentropy'], metrics=['accuracy'])
self.history['cnn'][k] = i.fit(self.X, self.y, epochs=10, verbose = False, callbacks=[es])
#LSTM
for k, i in self.models['lstm'].items():
print(f"Now training lstm {k}")
i.compile(optimizer='RMSprop', loss=['sparse_categorical_crossentropy'], metrics=['accuracy'])
self.history['lstm'][k] = i.fit(self.X, self.y, epochs=10, verbose = False, callbacks=[es])
#CNN-LSTM
for k, i in self.models['cnn-lstm'].items():
print(f"Now training cnn-lstm {k}")
i.compile(optimizer='adam', loss=['sparse_categorical_crossentropy'], metrics=['accuracy'])
self.history['cnn-lstm'][k] = i.fit(self.X, self.y, epochs=10, verbose = False, callbacks=[es])
#Provides best results
def results(self):
#CNN
for k, h in self.history['cnn'].items():
self.maxAccuracy['cnn'][k] = max(h.history['accuracy'])
#LSTM
for k, h in self.history['lstm'].items():
self.maxAccuracy['lstm'][k] = max(h.history['accuracy'])
#CNN-LSTM
for k, h in self.history['cnn-lstm'].items():
self.maxAccuracy['cnn-lstm'][k] = max(h.history['accuracy'])
#Function to execute to do all models
def model(self):
self.build()
self.train()
self.results()
def plot_confusion_matrix(true, pred):
cm = confusion_matrix(true, pred, labels=true.unique())
f, ax = plt.subplots(figsize =(7,7))
sns.heatmap(cm, annot = True, linewidths=0.2, linecolor="black", fmt = ".0f", ax=ax, cmap="Purples")
plt.xlabel("PREDICTED LABEL")
plt.ylabel("TRUE LABEL")
plt.title('Confusion Matrix for SVM Classifier')
plt.show() | G4ll4rd0/Examen-TEMA3_MNLP | utils.py | utils.py | py | 5,793 | python | en | code | 0 | github-code | 13 |
30631573517 | """
MAIN FUNCTION
"""
from machine import Pin, SPI, I2C, PWM, Timer
import time
import rp2
# DRIVERS
import gc9a01 as lcd
from imu import MPU6050
from rotary_irq_rp2 import RotaryIRQ
# HELPER
import italicc
import NotoSansMono_32 as font
print("BOOTING", end='')
i2c = I2C(0, sda=Pin(0), scl=Pin(1), freq=400000)
gyro = MPU6050(i2c)
tolerence = 0.05
current_orientation = "UNKNOWN"
print('>', end='')
LCD_SIZE = [240, 240]
spi = SPI(1, baudrate=40000000, sck=Pin(14), mosi=Pin(15))
screen = lcd.GC9A01(
spi,
LCD_SIZE[0],
LCD_SIZE[1],
reset=Pin(11, Pin.OUT),
cs=Pin(13, Pin.OUT),
dc=Pin(12, Pin.OUT),
backlight=Pin(10, Pin.OUT),
rotation=0)
BG = lcd.BLACK # Background Color
FG = lcd.WHITE # Forground Color
AC = lcd.MAGENTA # ACCENT COLOR
MODES = {
"IDLE":0,
"RUNNING":1,
"SET":2,
"MENU":3
}
class DisplayTimer:
def __init__(self, minutes, seconds):
self.minutes = minutes
self.seconds = seconds
self.startTime = 0
self.isActive = False
def start(self):
self.startTime = time.ticks_ms()
self.isActive = True
def percentComplete(self):
delta = time.ticks_diff(time.ticks_ms(), self.startTime)
totalTime = ((60* self.minutes) + self.seconds) * 1000
percent_complete = (delta/totalTime)
if(percent_complete < 100):
return percent_complete
else:
self.isActive = False
return 100
print('>', end='')
encoder_switch = Pin(20, Pin.OUT, Pin.PULL_UP)
encoder = RotaryIRQ(pin_num_clk=18,
pin_num_dt=19,
min_val=0,
max_val=99,
range_mode=RotaryIRQ.RANGE_WRAP)
times = [0, 0] # Time in Minutes and Seconds
timeMode = 0 # Time Mode 0 = seconds 1 = minutes
switch_holdTime = 0
print('>', end='')
buzzer = PWM(Pin(16))
print('>', end='')
#%% Helper Funcitons
"""
Get the Laabel for the current Orientation
"""
def getOrientationLabel(irq = None):
global current_orientation
# Take Gyroscope Reading
x = gyro.accel.x
y = gyro.accel.y
z = gyro.accel.z
LOOK_RANGE = 0.9
output = ""
look_val = 0
if abs(z) > LOOK_RANGE:
output = "FLAT"
look_val = z
elif abs(x) > LOOK_RANGE:
output = "SIDEWAYS"
look_val = x
elif abs(y) > LOOK_RANGE:
output = "FACE"
look_val = y
elif abs(x) + abs(z) > 1:
output = "ANGLE"
if x > 0:
output += "-UP"
else:
output += "-DOWN"
if z > 0:
output += "-R"
else:
output += "-L"
else:
output = "UNKNOWN"
if output == "FACE" or output == "SIDEWAYS" or output == "FLAT":
if look_val > 0:
output += "-UP"
else:
output += "-DOWN"
# If being handled by an event,
if irq != None:
current_orientation = output
return output
def writeCenter(text = "", y = 120):
# Get LCD Size
h = screen.height()
l = screen.width()
length = screen.write_len(font, text)
screen.write(font, text, int((l/2)-(length/2)), y, FG, BG)
def updateScreenRotation():
if current_orientation == "FLAT-UP":
screen.rotation(0)
elif current_orientation == "FLAT-DOWN":
screen.rotation(2)
elif current_orientation == "SIDEWAYS-UP":
screen.rotation(1)
elif current_orientation == "SIDEWAYS-DOWN":
screen.rotation(3)
elif current_orientation == "FACE-UP":
screen.rotation(0)
def updateScreen(irq = None):
global MODE
screen.off()
updateScreenRotation()
screen.fill(BG)
if MODE == MODE["MENU"]:
writeCenter("MENU", 60)
writeCenter(current_orientation, 120)
screen.on()
last_orientation = getOrientationLabel()
last_switch_state = 0
SWITCH_TO_MENU_TIME = 2000
MODE = 0 # MODES, 0 = Idle, 1 = Timer, 2 = Menu
def tickEvent(irq = None):
global last_orientation, last_switch_state, switch_holdTime, MODE
if last_orientation != current_orientation:
last_orientation = current_orientation
updateScreen(irq)
def switchTimeMode(switch):
global switch_holdTime, switch_down, MODE
#print("SWITCH PRESSED")
if(MODE == 0):
switch_holdTime = time.ticks_ms()
def switchPressed(switch):
switchTimeMode(switch)
test = DisplayTimer(0, 10)
# SETUP
print('\nSETUP', end='')
getOrientationLabel()
orientation_timer = Timer(period=1000, mode=Timer.PERIODIC, callback=getOrientationLabel)
print('>', end='')
#tick = Timer(period=100, mode=Timer.PERIODIC, callback=tickEvent)
print('>', end='')
screen.init()
print('>', end='')
encoder_switch.irq(trigger=Pin.IRQ_RISING, handler=switchPressed)
print('>', end='')
test.start()
while True:
tickEvent() | Negative-light/advanced-palmadoro-timer | test_code/main.py | main.py | py | 5,131 | python | en | code | 1 | github-code | 13 |
17231994875 | # -*- coding: utf-8 -*-
"""
Created on Sun Mar 24 18:31:37 2019
从txt里读取数据 写入excel
@author: Administrator
"""
import numpy as np
import openpyxl
import os,sys
sys.path.append('D:/Refresh/py36')
import support_pie
def check_data(data):
for i in range(data.shape[0]-1):
if data[i+1,1]-data[i,1] < 0 :
print('数据有误')
break
else:
continue
#读取excel
excel_dir = 'D:/Refresh/data/南极色素-20201028-冯毓彬/text.xlsx'
file_dir = 'D:/Refresh/data/南极色素-20201028-冯毓彬/pdfplumber_txt_440/'
wb = openpyxl.load_workbook(excel_dir)
#sheet_name = wb.sheetnames
sheet_r = wb['station']
#station_y = 6
station_num = int((sheet_r.max_column-3)/5)
error_names =[]
for i in range(station_num):
station_y = 6 + i * 5
station_name = sheet_r.cell(1,station_y).value
txtnames = support_pie.file_name(file_dir,'.txt')
for txtname in txtnames:
split_ser = txtname.split('-')
stationname = split_ser[3]
if station_name == stationname:
# print(station_name)
data = np.loadtxt(txtname,dtype='float',delimiter=',')
check_data(data)
pig_index =np.linspace(6,32,num=27,dtype='int')
for j in pig_index:
blank = sheet_r.cell(1,1).value
res_time = sheet_r.cell(j,station_y-1).value
pig_area = sheet_r.cell(j,station_y).value
if res_time != blank and pig_area == blank:
res = data[:,1] - res_time
if len(np.where(res == 0)[0]) == 1:
value = float(data[np.where(res == 0),2][0])
sheet_r.cell(j,station_y,value)
value = -1
else:
# wb.save(excel_dir)
print('%s保留时间错误:错误值为:%.3f'%(station_name,res_time))
else:
continue
# elif txtname == txtnames[-1]:
# error_names.append(station_name)
# print('未找到' +station_name +'站位')
wb.save(excel_dir) | hz12zfyb/PlotCode | 数据处理方法/认色素读写excel/4读取txt数据写入excel.py | 4读取txt数据写入excel.py | py | 2,243 | python | en | code | 0 | github-code | 13 |
21538617524 | '''
Example: Countdown
'''
def countdown_to_one(n):
if n == 0: # base case
return
countdown_to_one(n-1) # recursive call and moving towards a "base case"
print(n)
countdown_to_one(3)
'''
Example: Double Countdown
'''
def countdown(n):
if n == 0:
return
print(n)
countdown(n-1)
countdown(n-1)
countdown(3)
'''
Example: Recursive Sum
'''
def sum_list(items): # Time complexity: O(n); Space: O(n)
if len(items) == 1: # base case
return items[0]
else:
return items[0] + sum_list(items[1:]) # recursive case
'''
Example: Recursive Factorial
'''
def recursive_factorial(n):
if n == 1: # base case
return 1
else:
return n * recursive_factorial(n - 1) # recursive call
recursive_factorial(5)
| Mark-McAdam/cs_lambda | recursive-sorting/basic_recursion.py | basic_recursion.py | py | 802 | python | en | code | 0 | github-code | 13 |
13377926967 | from flask import render_template, make_response, request
from flask_restful import Resource, reqparse
from flask import json
from pymongo import ASCENDING, DESCENDING
from common.util import mongo
from bson.json_util import dumps, default
class CategoriesResource(Resource):
"""Returns list of categories"""
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('initial', type=str, location='args')
args = parser.parse_args()
if (not args['initial']):
try:
cursor = mongo.db.categories.find({}, {"letter":1, "categories": 1, "_id": 0}).sort([('categories', ASCENDING), ('letter', ASCENDING)])
jsonstring = dumps(cursor, default=default)
return json.loads(jsonstring), 200
except Exception as e:
print(e)
return {"message": "Error calling all categories"}, 400
_initial = args['initial']
try:
cursor = mongo.db.categories.find_one({'letter': _initial}, {'categories': 1})
jsonstring = dumps(cursor, default=default)
return json.loads(jsonstring), 200
except Exception as e:
print(e)
return {"message": "Error calling categories filtered by initial {}".format(_initial)}, 400
def category_exists(self, categories):
"""Check if category exists
Parameters: array of input categories
Return: array of new categories or None
"""
new_categories = []
for cat in categories:
cursor = mongo.db.categories.find_one({'categories': cat})
if cursor == None:
new_categories.append(cat)
if len(new_categories) == 0:
return None
else:
return new_categories
def post(self):
"""Add new category"""
req_json = request.get_json(force=True)
try:
_categoriesArr = req_json.get('categories')
except Exception as e:
print(e)
return {"message": "category is a required field"}, 400
add_categories = self.category_exists(_categoriesArr)
if add_categories != None:
for cat in add_categories:
letter = cat[0].upper()
try:
mongo.db.categories.update_one({'letter': letter}, {'$push': {'categories': cat}})
except Exception as e:
print(e)
return {"message": "error adding new category {}".format(cat)}, 400
return {"message": "Successfully added new categories {}".format(add_categories)}, 200
return {"message": "No new categories to add"}, 200
| andrehadianto/50043_isit_database | server/resources/categories.py | categories.py | py | 2,807 | python | en | code | 0 | github-code | 13 |
12090915193 | """
练习:创建两个分支线程,一个用于打印1-52这52个数字
另一个用于打印 A-Z这26个字母。要求最终打印顺序为
12A34B56C .... 5152Z
"""
from threading import Thread,Lock
lock1 = Lock()
lock2 = Lock()
def print_num():
for i in range(1,53,2):
lock1.acquire()
print(i)
print(i+1)
lock2.release()
def print_chr():
for i in range(65,91):
lock2.acquire()
print(chr(i))
lock1.release()
t1 = Thread(target=print_num)
t2 = Thread(target=print_chr)
lock2.acquire() #确保字母打印阻塞让数字先打印
t1.start()
t2.start()
t1.join()
t2.join() | 15149295552/Code | Month03/day16/exercise01.py | exercise01.py | py | 646 | python | en | code | 1 | github-code | 13 |
25005850090 | import pandas as pd
import requests
base_url = 'https://fantasy.premierleague.com/api/'
def get_bootstrap_data(data_type):
resp = requests.get(base_url + 'bootstrap-static/')
if resp.status_code != 200:
raise Exception('Response was status code ' + str(resp.status_code))
data = resp.json()
try:
elements_data = pd.DataFrame(data[data_type])
return elements_data
except KeyError:
print('Unable to reach bootstrap API successfully')
def get_manager_history_data(manager_id):
manager_hist_url = base_url + 'entry/' + str(manager_id) + '/history/'
resp = requests.get(manager_hist_url)
if resp.status_code != 200:
raise Exception('Response was status code ' + str(resp.status_code))
json = resp.json()
try:
data = pd.DataFrame(json['current'])
return data
except KeyError:
print('Unable to reach bootstrap API successfully')
manager_data = get_manager_history_data(392357)
print(manager_data.head(5)) | timyouell-servian/fantasy_premier_league | fpl_functions.py | fpl_functions.py | py | 1,012 | python | en | code | 0 | github-code | 13 |
28778771542 | from math import sqrt
def prime_sieve(max_num):
primes = []
primality = [True] * (max_num+1)
primality[0] = False
primality[1] = False
for i in range(2, int(sqrt(max_num)) + 1):
if primality[i]:
for j in range(i*i, max_num+1, i):
primality[j] = False
for i in range(len(primality)):
if primality[i]:
primes.append(i)
return primes
def find_factors(num, primes, factors):
if num in factors:
return factors[num]
else:
if num in primes:
return [num]
else:
for p in primes:
factor = float(num) / float(p)
if factor.is_integer():
return find_factors(int(factor), primes, factors) + [p]
def prime_factors(max_num):
factors = {}
primes = prime_sieve(max_num)
for x in range(2, max_num, 1):
f = find_factors(x, primes, factors)
f.sort()
factors[x] = f
return factors
def n_consecutive_distinct_factors(n, max_num):
factors = prime_factors(max_num)
for i in range(2, max_num-n+1, 1):
distinct = True
for j in range(0, n-1, 1):
set1 = set(factors[i+j])
set2 = set(factors[i+j+1])
setU = set(factors[i+j] + factors[i+j+1])
if not (len(set1) >= n and len(set2) >= n and len(set1) + len(set2) == len(setU)):
distinct = False
if distinct:
return i
#print(list(range(0, 5, 1)))
print(n_consecutive_distinct_factors(4, 1000000))
| Pineci/ProjectEuler | Problem47.py | Problem47.py | py | 1,566 | python | en | code | 0 | github-code | 13 |
27282122598 | def save_file(boy, girl, count):
file_name_boy = 'boy_' + str(count) + '.txt'
file_name_girl = 'girl_' + str(count) + '.txt'
boy_file = open(file_name_boy, 'w')
girl_file = open(file_name_girl, 'w')
boy_file.writelines(boy)
girl_file.writelines(girl)
boy_file.close()
girl_file.close()
def split_file(file_name):
f = open('record.txt')
boy = []
girl = []
count = 1
for each_line in f:
if each_line[:6] != '======':
(role, line_spoken) = each_line.split(':', 1)
if role == '小甲鱼':
boy.append(line_spoken)
if role == '小客服':
girl.append(line_spoken)
else:
save_file(boy, girl, count)
boy = []
girl = []
count += 1
save_file(boy, girl, count)
f.close()
split_file('record.txt')
| DodgeV/learning-programming | books/python/零基础入门学习Python(小甲鱼)全套源码课件/029文件:一个任务(课件+源代码)/课堂练习/test_2.py | test_2.py | py | 930 | python | en | code | 3 | github-code | 13 |
20573561380 | from language import choose_language
from modifiers import draconic_lines, clearscreen, validate_choice
from spells import single_spell_select
class Race:
def __init__(self, level=None):
self.name = ''
self.intelligence = 0
self.dexterity = 0
self.wisdom = 0
self.charisma = 0
self.constitution = 0
self.strength = 0
self.language = ["Common"]
self.weaponpro = []
self.cantrip = []
self.abilities = []
self.speed = 25
# Looks
self.age = 0
self.height = 0
self.weight = 0
self.eyes = ''
self.skin = ''
self.hair = ''
if level is None:
level = 1
self.level = level
def get_intelligence(self):
return self.intelligence
def get_dexterity(self):
return self.dexterity
def get_wisdom(self):
return self.wisdom
def get_strength(self):
return self.strength
def get_charisma(self):
return self.charisma
def get_constitution(self):
return self.constitution
# Start Aarokocra --------------------------------------------------
class Aarakocra(Race):
def __init__(self):
super(Aarakocra, self).__init__()
self.name = 'Aarakocra'
self.dexterity += 1
self.wisdom += 1
self.abilities = []
self.attacks = ["TALONS, 1d4 Slashing"]
self.language.append("Aarakocra")
self.language.append("Auran")
self.speed = 25
self.age = [3, 30]
self.height = [53, 60]
self.weight = [80, 100]
self.skin = []
self.hair = ['Blue', 'Green', 'Red', 'Orange', 'Yellow', 'Brown', 'Gray']
self.eyes = ['Blue', 'Black', 'Brown', 'Green', 'Hazel', 'Amber']
# Start Dragonborn --------------------------------------------------
class Dragonborn(Race):
def __init__(self, character_name):
super(Dragonborn, self).__init__()
self.name = 'Dragonborn'
self.strength += 2
self.charisma += 1
self.speed = 30
self.age = [15, 80]
self.height = [66, 96]
self.weight = [220, 280]
self.skin = ['Brass', 'Bronze', 'Scarlet', 'Rust', 'Gold', 'Copper-Green']
self.hair = []
self.eyes = ['Blue', 'Black', 'Brown', 'Green', 'Hazel', 'Amber']
self.abilities = ["DAMAGE RESISTANCE"]
clearscreen()
print("What type of Dragonborn is {}".format(character_name))
for key, value in draconic_lines().items():
print(key, value)
x = validate_choice(len(draconic_lines().items()))
if x <= 5:
xtype = "5 by 30 ft. line (Dex Save)"
else:
xtype = "15 ft. Cone (Dex Save)"
self.attacks = ["BREATH WEAPON" + xtype]
self.language.append("Draconic")
# Start Dwarf --------------------------------------------------
class Dwarf(Race):
def __init__(self):
super(Dwarf, self).__init__()
self.constitution += 2
self.abilities = ["DARKVISION", "DWARVEN RESILIENCE", "DWARVEN COMBAT TRAINING", "STONECUNNING"]
self.language.append("Dwarvish")
self.weaponpro = ['Battleaxe', 'Handaxe', 'Light hammer', 'Warhammer']
self.speed = 25
self.age = [50, 350]
self.height = [48, 60]
self.weight = [110, 170]
self.skin = ['Fair', 'Bronze', 'Ruddy', 'Ash', 'Olive', 'Ebony']
self.hair = ['Black', 'Brunette', 'Auburn', 'Wildfire', 'Blonde']
self.eyes = ['Blue', 'Silver-Blue', 'Black', 'Brown', 'Green', 'Hazel', 'Amber']
class HillDwarf(Dwarf):
def __init__(self):
super(HillDwarf, self).__init__()
self.wisdom += 1
self.abilities.append("DWARVEN TOUGHNESS")
self.name = 'Hill Dwarf'
class MountainDwarf(Dwarf):
def __init__(self):
super(MountainDwarf, self).__init__()
self.strength += 2
self.abilities.append("DWARVEN ARMOUR TRAINING")
self.name = 'Mountain Dwarf'
# Start Elf --------------------------------------------------
class Elf(Race):
def __init__(self):
super(Elf, self).__init__()
self.dexterity += 2
self.name = 'Elf'
self.abilities = ["FEY ANCESTRY", "TRANCE"]
self.language.append("Elvish")
self.speed = 30
self.age = [100, 750]
self.height = [60, 86]
self.weight = [110, 170]
self.skin = ['Fair', 'Bronze', 'Ruddy', 'Ash', 'Olive', 'Ebony', 'Silver', 'Azure']
self.hair = ['Black', 'Brunette', 'Auburn', 'Wildfire', 'Blonde']
self.eyes = ['Blue', 'Silver-Blue' 'Black', 'Brown', 'Green', 'Hazel', 'Amber']
class HighElf(Elf):
def __init__(self):
super(HighElf, self).__init__()
self.intelligence += 1
self.name = 'High Elf'
self.magic = True
x = single_spell_select('Wizard', 0)
self.cantrip = [x]
clearscreen()
self.language.append(str(choose_language(self.language)))
class WoodElf(Elf):
def __init__(self):
super(WoodElf, self).__init__()
self.wisdom += 1
self.name = 'Wood Elf'
self.abilities = ["ELF WEAPON TRAINING", "FLEET OF FOOT", "MASK OF THE WILD"]
class Eladrin(Elf):
def __init__(self):
super(Eladrin, self).__init__()
self.intelligence += 1
self.name = 'Eladrin'
self.abilities = ["ELF WEAPON TRAINING", "FEY STEP"]
class DrowElf(Elf):
def __init__(self, level):
super(DrowElf, self).__init__()
self.charisma += 1
self.name = 'Drow Elf'
self.abilities = ["SUPERIOR DARKVISION", "DROW MAGIC", "DROW WEAPON TRAINING", "SUNLIGHT SENSITIVITY"]
self.magic = True
self.cantrip = ["Dancing Lights"]
if level > 3:
self.cantrip.append("FAIRIE FIRE")
elif level > 5:
self.cantrip.append("FAIRIE FIRE")
self.cantrip.append("DARKNESS")
# Start Genasi --------------------------------------------------
class Genasi(Race):
def __init__(self):
super(Genasi, self).__init__()
self.constitution += 2
self.name = 'Genasi'
self.speed = 30
self.abilities = []
self.language.append("Primordial")
self.age = [15, 120]
self.height = [60, 80]
self.weight = [110, 170]
self.skin = []
self.hair = []
self.eyes = []
class AirGenasi(Genasi):
def __init__(self):
super(AirGenasi, self).__init__()
self.dexterity += 1
self.name = 'Air Genasi'
self.abilities.append("UNENDING BREATH")
self.abilities.append("MINGLE WITH THE WIND")
self.magic = True
self.cantrip = ["Levitate"]
self.skin = ['Light Blue', 'Cerulean', 'Cobalt']
self.hair = ['Midnight Blue', 'Electric Blue', 'Azure']
self.eyes = ['Silver-Blue', 'Midnight Blue', 'Violet-Blue']
class EarthGenasi(Genasi):
def __init__(self):
super(EarthGenasi, self).__init__()
self.strength += 1
self.name = 'Earth Genasi'
self.abilities.append("EARTH WALK")
self.abilities.append("MERGE WITH STONE")
self.magic = True
self.cantrip = ["Pass without Trace"]
self.skin = ['Smooth Black Metallic', 'Polished Gold', 'Dull Iron', 'Rusted Copper', 'Shining White Gemstone']
self.hair = ['Dusty Brown', 'Muddy Hide', 'Waves of Smooth Copper']
self.eyes = ['Diamond', 'Tiger\'s Eye', 'Rose Quartz']
class FireGenasi(Genasi):
def __init__(self):
super(FireGenasi, self).__init__()
self.intelligence += 1
self.name = 'Fire Genasi'
self.abilities.append("DARKVISION")
self.abilities.append("FIRE RESISTANCE")
self.abilities.append("REACH TO THE BLAZE")
self.magic = True
self.cantrip = ["Produce Flame"]
self.skin = ['Flaming Red', 'Coal Black', 'Ash Gray', 'White Hot', 'Oxidising Blue']
self.hair = ['Red Flames', 'Blue Flames', 'White Flames', 'Black Flames']
self.eyes = ['White', 'Scarlet', 'Azure', 'Midnight']
class WaterGenasi(Genasi):
def __init__(self):
super(WaterGenasi, self).__init__()
self.wisdom += 1
self.name = 'Water Genasi'
self.abilities.append("ACID RESISTANCE")
self.abilities.append("AMPHIBIOUS")
self.abilities.append("SWIM")
self.abilities.append("CALL TO THE WAVE")
self.magic = True
self.cantrip = ["Shape Water"]
self.skin = ['Aqua Blue', 'Aqua Green', 'Pale White', 'Midnight Black', 'Ash Gray']
self.hair = ['Seafoam Green', 'Dark Green', 'Emerald', 'Azure', 'Sky Blue']
self.eyes = ['White', 'Cyan', 'Mint', 'Midnight']
# Start Gnome --------------------------------------------------
class Gnome(Race):
def __init__(self):
super(Gnome, self).__init__()
self.intelligence += 2
self.name = 'Gnome'
self.speed = 25
self.abilities = ["DARKVISION", "GNOME CUNNING"]
self.language.append("Gnomish")
self.age = [40, 450]
self.height = [36, 48]
self.weight = [30, 60]
self.skin = ['Fair', 'Bronze', 'Ruddy', 'Ash', 'Olive', 'Ebony', 'Silver', 'Azure']
self.hair = ['Black', 'Brunette', 'Auburn', 'Wildfire', 'Blonde']
self.eyes = ['Blue', 'Silver-Blue', 'Black', 'Brown', 'Green', 'Hazel', 'Amber']
class RockGnome(Gnome):
def __init__(self):
super(RockGnome, self).__init__()
self.constitution += 1
self.name = 'Rock Gnome'
self.abilities.append("ARTIFICER'S LORE")
self.abilities.append("TINKER")
class DeepGnome(Gnome):
def __init__(self):
super(DeepGnome, self).__init__()
self.dexterity += 1
self.name = 'Deep Gnome'
self.abilities.append("SUPERIOR DARKVISION")
self.abilities.append("STONE CAMOUFLAGE")
self.language.append("Undercommon")
# Start Goliath --------------------------------------------------
class Goliath(Race):
def __init__(self):
super(Goliath, self).__init__()
self.strength += 2
self.constitution += 1
self.name = 'Goliath'
self.speed = 30
self.abilities = ["NATURAL ATHLETE", "STONE'S ENDURANCE", "POWERFUL BUILD", "MOUNTAIN BORN"]
self.language.append("Giant")
self.age = [15, 80]
self.height = [84, 96]
self.weight = [280, 340]
self.skin = ['Fair', 'Bronze', 'Ruddy', 'Ash', 'Olive', 'Ebony', 'Silver', 'Azure']
self.hair = ['Black', 'Brunette', 'Auburn', 'Wildfire', 'Blonde']
self.eyes = ['Blue', 'Silver-Blue', 'Black', 'Brown', 'Green', 'Hazel', 'Amber']
# Start Half-Elf --------------------------------------------------
class HalfElf(Race):
def __init__(self):
super(HalfElf, self).__init__()
self.charisma += 2
self.name = 'Half-Elf'
self.speed = 30
self.abilities = ["DARKVISION", "FEY ANCESTRY", "SKILL VERSATILITY"]
self.age = [20, 180]
self.height = [60, 76]
self.weight = [120, 200]
self.skin = ['Fair', 'Bronze', 'Ruddy', 'Ash', 'Olive', 'Ebony', 'Silver', 'Azure']
self.hair = ['Black', 'Brunette', 'Auburn', 'Wildfire', 'Blonde']
self.eyes = ['Blue', 'Silver-Blue', 'Black', 'Brown', 'Green', 'Hazel', 'Amber']
self.language.append("Elvish")
clearscreen()
self.language.append(str(choose_language(self.language)))
# Start HalfOrc --------------------------------------------------
class HalfOrc(Race):
def __init__(self):
super(HalfOrc, self).__init__()
self.strength += 2
self.strength += 1
self.name = 'Half-Orc'
self.speed = 30
self.abilities = ["DARKVISION", "MENACING", "RELENTLESS ENDURANCE", "SAVAGE ATTACKS"]
self.language.append("Orc")
self.age = [14, 75]
self.height = [60, 86]
self.weight = [120, 220]
self.skin = ['Pale Gray', 'Scarlet', 'Ash', 'Pickle', 'Emerald', 'Dark Green', 'Midnight Blue']
self.hair = ['Black', 'Brunette', 'Auburn', 'Wildfire', 'Blonde']
self.eyes = ['Blue', 'Silver-Blue', 'Black', 'Brown', 'Green', 'Hazel', 'Amber']
# Start Halfling --------------------------------------------------
class Halfling(Race):
def __init__(self):
super(Halfling, self).__init__()
self.dexterity += 2
self.name = 'Halfling'
self.speed = 25
self.abilities = ["LUCKY", "BRAVE", "HALFLING NIMBLENESS"]
self.language.append("Halfling")
self.age = [20, 160]
self.height = [28, 38]
self.weight = [30, 50]
self.skin = ['Fair', 'Bronze', 'Ruddy', 'Ash', 'Olive', 'Ebony', 'Silver', 'Azure']
self.hair = ['Black', 'Brunette', 'Auburn', 'Wildfire', 'Blonde']
self.eyes = ['Blue', 'Silver-Blue', 'Black', 'Brown', 'Green', 'Hazel', 'Amber']
class LightfootHalfling(Halfling):
def __init__(self):
super(LightfootHalfling, self).__init__()
self.charisma += 1
self.name = 'Lightfoot Halfling'
self.abilities.append("NATURALLY STEALTHY")
class StoutHalfling(Halfling):
def __init__(self):
super(StoutHalfling, self).__init__()
self.constitution += 1
self.name = 'Stout Halfling'
self.abilities.append("STOUT RESILIENCE")
# Start Human --------------------------------------------------
class Human(Race):
def __init__(self):
super(Human, self).__init__()
self.constitution += 1
self.charisma += 1
self.dexterity += 1
self.intelligence += 1
self.wisdom += 1
self.strength += 1
self.name = 'Human'
self.speed = 30
self.age = [20, 80]
self.height = [48, 80]
self.weight = [90, 200]
self.skin = ['Fair', 'Bronze', 'Ruddy', 'Ash', 'Olive', 'Ebony']
self.hair = ['Black', 'Brunette', 'Auburn', 'Wildfire', 'Blonde']
self.eyes = ['Blue', 'Silver-Blue', 'Black', 'Brown', 'Green', 'Hazel', 'Amber']
self.language.append(str(choose_language(self.language)))
self.abilities = list()
# Start Tiefling --------------------------------------------------
class Tiefling(Race):
def __init__(self):
super(Tiefling, self).__init__()
self.intelligence += 1
self.charisma += 2
self.name = 'Tiefling'
self.speed = 30
self.age = [20, 80]
self.height = [48, 80]
self.weight = [90, 200]
self.skin = ['Pink', 'Scarlet', 'Blood']
self.hair = ['Violet', 'Midnight Blue', 'Azure', 'Wildfire', 'Blonde']
self.eyes = ['Blue', 'Silver-Blue', 'Black', 'Brown', 'Green', 'Hazel', 'Amber']
self.abilities = ["DARKVISION", "HELLISH RESISTANCE", "INFERNAL LEGACY"]
self.magic = True
self.cantrip = ["Thaumaturgy"]
self.language.append("Infernal")
| brian-chalfant/CharacterBuilder | Race.py | Race.py | py | 15,442 | python | en | code | 1 | github-code | 13 |
3281297110 | from pytrends.request import TrendReq
from google_trends.models import Trend
from .serializers import TrendSerializer
from rest_framework import generics, decorators
class TrendsView(generics.ListAPIView):
queryset = Trend.objects.all().order_by('-created_on')[:10]
serializer_class = TrendSerializer
def get_trends():
pytrends = TrendReq(hl='en-US')
countries = ['united_states', 'united_kingdom']
trends = []
def trending_searches(country):
data = pytrends.trending_searches(country)
for trend in data.head(10)[0].array:
trends.append(trend)
for country in countries:
trending_searches(country)
return trends
| gicu-90/phemecheck | google_trends/views.py | views.py | py | 690 | python | en | code | 0 | github-code | 13 |
11603524851 | #
# @lc app=leetcode.cn id=167 lang=python3
#
# [167] 两数之和 II - 输入有序数组
#
# @lc code=start
class Solution:
def twoSum(self, numbers: List[int], target: int) -> List[int]:
def bin_search(x):
l, r = 0, len(numbers)
while l < r:
mid = l + (r - l) // 2
# print(x, l, mid, r)
if numbers[mid] == x:
# print('return', mid, numbers[mid], x)
return mid
if numbers[mid] < x:
l = mid + 1
else:
r = mid
# print(x, l, r)
return l if l < len(numbers) and numbers[l] == x else -1
for i in range(len(numbers)):
ans = []
temp = bin_search(target - numbers[i])
if temp == i:
continue
# print(i, numbers[i], temp)
if temp != -1:
ans.append(i + 1)
ans.append(temp + 1)
ans.sort()
return ans
# if numbers[i] <= target:
return ans.sort()
# @lc code=end
| RGBRYANT24/LeetCodePractice_PY | 167.两数之和-ii-输入有序数组.py | 167.两数之和-ii-输入有序数组.py | py | 1,166 | python | en | code | 0 | github-code | 13 |
30545073878 | # https://github.com/huggingface/transformers/blob/master/examples/pytorch/question-answering/run_qa_no_trainer.py
import re
import copy
def overflow_to_sample_mapping(tokens, offsets, idx, max_len = 384, doc_stride = 128):
fixed_tokens = []
fixed_offsets = []
sep_index = tokens.index(-100)
question = tokens[:sep_index]
context = tokens[sep_index+1:]
q_offsets = offsets[:sep_index]
c_offsets = offsets[sep_index+1:]
q_len = len(question)
c_len = len(context)
st_idx = 0
samplings = []
sequences = []
while True:
ed_idx = st_idx+max_len-q_len-1
pad_re = max_len - len(question+ [0] + context[st_idx:ed_idx])
if len(context[st_idx:ed_idx]) == 0:
break
curr_tokens = question+[0] + context[st_idx:ed_idx] + [0] * pad_re
curr_offset = q_offsets+[(0,0)] + c_offsets[st_idx:ed_idx] + [(0,0)] * pad_re
curr_seq = [0]*q_len+[None]+[1]*len(context[st_idx:ed_idx])+[None] * pad_re
assert len(curr_tokens) == len(curr_offset) == len(curr_seq) == max_len, f"curr_tokens: {len(curr_tokens)}, curr_seq: {len(curr_seq)}"
fixed_tokens.append(curr_tokens[:max_len])
fixed_offsets.append(curr_offset[:max_len])
samplings.append(idx)
sequences.append(curr_seq)
st_idx += doc_stride
if pad_re > 0:
break
for i in range(len(fixed_tokens)):
assert len(fixed_tokens[i]) == len(fixed_offsets[i])
return fixed_tokens, fixed_offsets, samplings, sequences
def prepare_features(examples, tokenizer, data_config, model_type = 'transformer', max_len = 384):
# Tokenize our examples with truncation and padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
if 'transformer' in model_type:
tokenized_examples = tokenizer(
examples["question"],
examples["context"],
truncation="only_second",
max_length=max_len,
stride=128,
return_overflowing_tokens=True,
return_offsets_mapping=True,
padding="max_length",
)
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
sample_mapping = tokenized_examples.pop("overflow_to_sample_mapping")
# The offset mappings will give us a map from token to character position in the original context. This will
# help us compute the start_positions and end_positions.
offset_mapping = tokenized_examples["offset_mapping"]
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
# Let's label those examples!
tokenized_examples["start_positions"] = []
tokenized_examples["end_positions"] = []
for i, offsets in enumerate(offset_mapping):
# We will label impossible answers with the index of the CLS token.
input_ids = tokenized_examples["input_ids"][i]
cls_index = input_ids.index(tokenizer.cls_token_id)
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
sequence_ids = tokenized_examples.sequence_ids(i)
# One example can give several spans, this is the index of the example containing this span of text.
sample_index = sample_mapping[i]
answers = examples["answers"][sample_index]
# If no answers are given, set the cls_index as answer.
if len(answers["answer_start"]) == 0:
tokenized_examples["start_positions"].append(cls_index)
tokenized_examples["end_positions"].append(cls_index)
else:
# Start/end character index of the answer in the text.
start_char = answers["answer_start"][0]
end_char = start_char + len(answers["text"][0])
# Start token index of the current span in the text.
token_start_index = 0
while sequence_ids[token_start_index] != 1:
token_start_index += 1
# End token index of the current span in the text.
token_end_index = len(input_ids) - 1
while sequence_ids[token_end_index] != 1:
token_end_index -= 1
# Detect if the answer is out of the span (in which case this feature is labeled with the CLS index).
if not (offsets[token_start_index][0] <= start_char and offsets[token_end_index][1] >= end_char):
tokenized_examples["start_positions"].append(cls_index)
tokenized_examples["end_positions"].append(cls_index)
else:
# Otherwise move the token_start_index and token_end_index to the two ends of the answer.
# Note: we could go after the last offset if the answer is the last word (edge case).
while token_start_index < len(offsets) and offsets[token_start_index][0] <= start_char:
token_start_index += 1
tokenized_examples["start_positions"].append(token_start_index - 1)
while offsets[token_end_index][1] >= end_char:
token_end_index -= 1
tokenized_examples["end_positions"].append(token_end_index + 1)
tokenized_examples["example_id"] = []
for i in range(len(tokenized_examples["input_ids"])):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
sequence_ids = tokenized_examples.sequence_ids(i)
context_index = 1
# One example can give several spans, this is the index of the example containing this span of text.
sample_index = sample_mapping[i]
tokenized_examples["example_id"].append(examples["id"][sample_index])
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
tokenized_examples["offset_mapping"][i] = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["offset_mapping"][i])
]
return tokenized_examples
else:
question_col, context_col = data_config['text'].split(",")
tokenized_examples = copy.deepcopy(examples)
input_ids = []
offset_mapping = []
sequence_ids = []
sample_mapping = []
for i, (question, context) in enumerate(zip(examples[question_col], examples[context_col])):
offsets = []
tokens = []
sequences = []
question_context = question + " <sp> "+context
st = 0
for word in question_context.split(" "):
if len(word) == 0:
st += 1
continue
word = word.strip()
if word == "<sp>":
offsets.append((0, 0))
tokens.append(-100)
st = 0
else:
token_ids = tokenizer._encode_word(word)
token_ids = [token_id for token_id in token_ids]
token_strs = tokenizer._tokenize_word(word, remove_sow=True)
if token_ids[0] == tokenizer.sow_idx:
token_strs = [tokenizer.sow] + token_strs
for j, token_id in enumerate(token_ids):
token_str = token_strs[j]
tokens.append(token_id)
if token_str == tokenizer.sow:
offsets.append((st, st))
else:
offsets.append((st, st+len(token_str)))
st += len(token_str)
st += 1 # for space
tokens, offsets, samplings, sequences = overflow_to_sample_mapping(tokens, offsets, i, max_len = max_len)
sample_mapping += samplings
input_ids += tokens
offset_mapping += offsets
sequence_ids += sequences
tokenized_examples = {'input_ids':input_ids, 'sequence_ids':sequence_ids, 'offset_mapping': offset_mapping, 'overflow_to_sample_mapping': sample_mapping}
tokenized_examples["start_positions"] = []
tokenized_examples["end_positions"] = []
for i, offsets in enumerate(offset_mapping):
# We will label impossible answers with the index of the CLS token.
input_ids = tokenized_examples["input_ids"][i]
# cls_index = input_ids.index(tokenizer.cls_token_id)
cls_index = 0
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
sequence_ids = tokenized_examples['sequence_ids'][i]
# One example can give several spans, this is the index of the example containing this span of text.
sample_index = tokenized_examples["overflow_to_sample_mapping"][i]
answers = examples["answers"][sample_index]
# If no answers are given, set the cls_index as answer.
if len(answers["answer_start"]) == 0:
tokenized_examples["start_positions"].append(cls_index)
tokenized_examples["end_positions"].append(cls_index)
else:
# Start/end character index of the answer in the text.
start_char = answers["answer_start"][0]
end_char = start_char + len(answers["text"][0])
# Start token index of the current span in the text.
token_start_index = 0
while sequence_ids[token_start_index] != 1:
token_start_index += 1
# End token index of the current span in the text.
token_end_index = len(input_ids) - 1
while sequence_ids[token_end_index] != 1:
token_end_index -= 1
# Detect if the answer is out of the span (in which case this feature is labeled with the CLS index).
if not (offsets[token_start_index][0] <= start_char and offsets[token_end_index][1] >= end_char):
tokenized_examples["start_positions"].append(cls_index)
tokenized_examples["end_positions"].append(cls_index)
else:
# Otherwise move the token_start_index and token_end_index to the two ends of the answer.
# Note: we could go after the last offset if the answer is the last word (edge case).
while token_start_index < len(offsets) and offsets[token_start_index][0] <= start_char:
token_start_index += 1
tokenized_examples["start_positions"].append(token_start_index - 1)
while offsets[token_end_index][1] >= end_char:
token_end_index -= 1
tokenized_examples["end_positions"].append(token_end_index + 1)
tokenized_examples["example_id"] = []
for i in range(len(tokenized_examples["input_ids"])):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
sequence_ids = tokenized_examples['sequence_ids'][i]
context_index = 1
# One example can give several spans, this is the index of the example containing this span of text.
sample_index = sample_mapping[i]
tokenized_examples["example_id"].append(examples["id"][sample_index])
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
tokenized_examples["offset_mapping"][i] = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["offset_mapping"][i])
]
return tokenized_examples | ARBML/nmatheg | nmatheg/preprocess_qa.py | preprocess_qa.py | py | 12,624 | python | en | code | 21 | github-code | 13 |
7676923882 | import sympy
def number_of_ways(arr, total):
dp = [0 for _ in range(total + 1)]
dp[0] = 1
for i in range(len(arr)):
for j in range(arr[i], total + 1):
dp[j] += dp[j - arr[i]]
return dp[total]
primes = list(sympy.sieve.primerange(1, 100000))
array = []
n = 10
for idx in primes:
if idx >= 10:
break
array.append(idx)
while 1:
if number_of_ways(array, n) > 5000:
break
n += 1
array.clear()
for idx in primes:
if idx >= n:
break
array.append(idx)
print(n)
print(number_of_ways(array, n)) | notBlurryFace/project-euler | PE077.py | PE077.py | py | 631 | python | en | code | 1 | github-code | 13 |
17794939140 | ANSIBLE_METADATA = {'status': ['stableinterface'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: oneview_appliance_device_snmp_v3_trap_destinations
short_description: Manage the Appliance Device SNMPv3 Trap Destinations.
description:
- Provides an interface to manage the Appliance Device SNMPv3 Trap Destinations.
version_added: "2.5"
requirements:
- "python >= 2.7.9"
- "hpOneView >= 4.8.0"
author:
"Gianluca Zecchi (@gzecchi)"
options:
state:
description:
- Indicates the desired state for the Appliance Device SNMPv3 Trap Destinations.
C(present) ensures data properties are compliant with OneView.
C(absent) removes the resource from OneView, if it exists.
choices: ['present', 'absent']
data:
description:
- List with the SNMPv3 Trap Destinations properties
required: true
extends_documentation_fragment:
- oneview
'''
EXAMPLES = '''
- name: Ensure that the SNMPv3 Trap Destination is present
oneview_appliance_device_snmp_v3_trap_destinations:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 800
state: present
data:
type: "Destination"
destinationAddress: "10.0.0.1"
port: 162
userId: "8e57d829-2f17-4167-ae23-8fb46607c76c"
delegate_to: localhost
- debug:
var: oneview_appliance_device_snmp_v3_trap_destinations
- name: Update the userId of specified SNMPv3 Trap Destination
oneview_appliance_device_snmp_v3_trap_destinations:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 800
state: present
data:
destinationAddress: "10.0.0.1"
userId: "3953867c-5283-4059-a9ae-33487f901e85"
delegate_to: localhost
- debug:
var: oneview_appliance_device_snmp_v3_trap_destinations
- name: Ensure that the SNMPv3 Trap Destination is absent
oneview_appliance_device_snmp_v3_trap_destinations:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 800
state: absent
data:
destinationAddress: "10.0.0.1"
delegate_to: localhost
'''
RETURN = '''
oneview_appliance_device_snmp_v3_trap_destinations:
description: Has all the OneView facts about the OneView appliance SNMPv3 Trap Destination.
returned: On state 'present'. Can be null.
type: dict
'''
from ansible.module_utils.oneview import OneViewModuleBase, OneViewModuleException, OneViewModuleValueError, OneViewModuleResourceNotFound
class ApplianceDeviceSnmpV3TrapDestinationsModule(OneViewModuleBase):
MSG_CREATED = 'Appliance Device SNMPv3 Trap Destination created successfully.'
MSG_UPDATED = 'Appliance Device SNMPv3 Trap Destination updated successfully.'
MSG_DELETED = 'Appliance Device SNMPv3 Trap Destination deleted successfully.'
MSG_USER_NOT_FOUND = 'Appliance Device SNMPv3 User not found.'
MSG_ALREADY_PRESENT = 'Appliance Device SNMPv3 Trap Destination is already present.'
MSG_ALREADY_ABSENT = 'Appliance Device SNMPv3 Trap Destination is already absent.'
MSG_VALUE_ERROR = 'The destinationAddress or the id attrbiutes must be specfied'
MSG_API_VERSION_ERROR = 'This module requires at least OneView 4.0 (API >= 600)'
RESOURCE_FACT_NAME = 'appliance_device_snmp_v3_trap_destinations'
argument_spec = dict(
data=dict(required=True, type='dict'),
state=dict(
required=True,
choices=['present', 'absent'])
)
def __init__(self):
super(ApplianceDeviceSnmpV3TrapDestinationsModule, self).__init__(additional_arg_spec=self.argument_spec, validate_etag_support=True)
self.resource_client = self.oneview_client.appliance_device_snmp_v3_trap_destinations
def execute_module(self):
if self.oneview_client.api_version < 600:
raise OneViewModuleValueError(self.MSG_API_VERSION_ERROR)
self.__replace_snmpv3_username_by_uri(self.data)
if self.data.get('id'):
query = self.resource_client.get_by_id(self.data.get('id'))
resource = query[0] if query and query[0].get('id') == self.data['id'] else None
elif self.data.get('destinationAddress'):
query = self.resource_client.get_by('destinationAddress', self.data.get('destinationAddress'))
resource = query[0] if query and query[0].get('destinationAddress') == self.data['destinationAddress'] else None
else:
raise OneViewModuleValueError(self.MSG_VALUE_ERROR)
if self.state == 'present':
return self.resource_present(resource, self.RESOURCE_FACT_NAME)
elif self.state == 'absent':
return self.resource_absent(resource)
def __get_snmpv3_user_by_username(self, username):
result = self.oneview_client.appliance_device_snmp_v3_users.get_by('userName', username)
return result[0] if result else None
def __get_snmpv3_user_by_uri(self, snmpv3_user_name_or_uri):
if snmpv3_user_name_or_uri.startswith('/rest/appliance/snmpv3-trap-forwarding/users'):
return snmpv3_user_name_or_uri
else:
snmpv3_user = self.__get_snmpv3_user_by_username(snmpv3_user_name_or_uri)
if snmpv3_user:
return snmpv3_user['uri']
else:
raise OneViewModuleResourceNotFound(self.MSG_USER_NOT_FOUND)
def __replace_snmpv3_username_by_uri(self, data):
if 'userUri' in data:
data['userUri'] = self.__get_snmpv3_user_by_uri(data['userUri'])
def main():
ApplianceDeviceSnmpV3TrapDestinationsModule().run()
if __name__ == '__main__':
main()
| bryansullins/baremetalesxi-hpesynergyoneview | library/oneview_appliance_device_snmp_v3_trap_destinations.py | oneview_appliance_device_snmp_v3_trap_destinations.py | py | 5,767 | python | en | code | 1 | github-code | 13 |
71964744978 | # -*- coding: utf-8 -*-
"""
Django settings for VideoPair project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
PROJECT_PATH = os.path.abspath(os.path.dirname(__name__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '!$__6pm@zeu3355q-zs7o#h6cn=c@0#mii_u&yu+m)7j+3#7kn'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'core',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'VideoPair.urls'
WSGI_APPLICATION = 'VideoPair.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
os.path.join(PROJECT_PATH, 'static'),
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
TEMPLATE_DIRS = (
'./Templates',
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
SESSION_EXPIRE_AT_BROWSER_CLOSE = False
SESSION_COOKIE_AGE = 5 * 24 * 60 * 60
FIXTURE_DIRS = (
'./Fixtures/',
)
ITEMS_ON_PAGE = 25
EMAIL_HOST = 'smtp.yandex.ru'
EMAIL_PORT = '465'
EMAIL_HOST_USER = 'video.pair@yandex.ru' # account
EMAIL_HOST_PASSWORD = 'videomatting' # password
EMAIL_USE_SSL = True
ALERT_EMAIL = 'merofeev@graphics.cs.msu.ru' # email for alerts
VIDEO_CORE_PATH = 'http://ec2-54-218-67-110.us-west-2.compute.amazonaws.com/content/videos/composites'
| avrybintsev/VideoPair | VideoPair/settings.py | settings.py | py | 4,115 | python | en | code | 0 | github-code | 13 |
20941913846 | '''
A
B C
D E F
G H I J
K L M N O
'''
s = int(input('Enter the number:'))
asciiValue = 65
m = (2 * s) - 2
for i in range(0, s):
for j in range(0, m):
print(end=" ")
m = m - 1
for j in range(0, i + 1):
alphabate = chr(asciiValue)
print(alphabate, end=' ')
asciiValue += 1
print() | Rohit-saxena125/Python-code | Loop/pattern16.py | pattern16.py | py | 374 | python | en | code | 0 | github-code | 13 |
35931640253 | from collections import deque
M, N, H = map(int, input().split())
graph = [[list(map(int, input().split())) for _ in range(N)] for _ in range(H)]
dx = [-1, 1, 0, 0, 0, 0]
dy = [0, 0, -1, 1, 0, 0]
dz = [0, 0, 0, 0, -1, 1]
queue = deque()
def bfs():
while queue:
z, x, y= queue.popleft()
for i in range(6):
nx = x + dx[i]
ny = y + dy[i]
nz = z + dz[i]
if 0 <= nx < N and 0 <= ny < M and 0 <= nz < H:
if graph[nz][nx][ny] == 0:
graph[nz][nx][ny] = graph[z][x][y] + 1
queue.append((nz, nx, ny))
#토마토 있는 곳 찾아서 bfs 시작
for i in range(H):
for j in range(N):
for k in range(M):
if graph[i][j][k] == 1:
queue.append((i, j, k))
bfs()
flag = False
maxDay = 0
for i in range(H):
for j in range(N):
for k in range(M):
if graph[i][j][k] == 0:
flag = True
break
maxDay = max(maxDay, graph[i][j][k])
if flag:
print(-1)
else:
print(maxDay-1)
| YJeongs/Backjoon | 백준/Gold/7569. 토마토/토마토.py | 토마토.py | py | 1,148 | python | en | code | 0 | github-code | 13 |
44974392676 | from .objects import Ray, Sphere, Triangle, Point, Vector
import numpy as np
def intersect(first_object, second_object):
...
def _intersect_ray_with_sphere(ray, sphere):
ray._origin = ray._origin._point
ray._direction = ray._direction._vector
sphere._center= sphere._center._point
nabla = (np.dot(ray._direction,(ray._origin - sphere._center))**2 - (np.linalg.norm((ray._origin - sphere._center))**2 - sphere._radius**2))
if nabla < 0:
return "No Intersecction"
if nabla == 0:
return -1*np.dot(ray._direction,(ray._origin - sphere._center))
if nabla > 0:
if np.sum((ray._origin - sphere._center)**2) <= sphere._radius**2:
d1 = -1*np.dot(ray._direction,(ray._origin - sphere._center)) + np.sqrt(nabla)
vector_1 = d1*(ray._direction/np.sqrt(np.sum(ray._direction**2))) + ray._origin
point_1 = np.round(((ray._origin - 0*ray._origin) + (vector_1 - ray._origin)),decimals=2)
return Point(point_1)
if np.sum((ray._origin - sphere._center)**2 )> sphere._radius**2:
d2 = -1*np.dot(ray._direction,(ray._origin - sphere._center)) + np.sqrt(nabla)
d1 = -1*np.dot(ray._direction,(ray._origin - sphere._center)) - np.sqrt(nabla)
vector_1 = d1*(ray._direction/np.sqrt(np.sum(ray._direction**2))) + ray._origin
vector_2 = d2*(ray._direction/np.sqrt(np.sum(ray._direction**2))) + ray._origin
point_1 = np.round(((ray._origin - 0*ray._origin) + (vector_1 - ray._origin)),decimals=2)
point_2 = np.round(((ray._origin - 0*ray._origin) + (vector_2 - ray._origin)),decimals=2)
if np.isclose(((point_1 - ray._origin )/(np.sqrt(np.sum((point_1 - ray._origin)**2)))), ray._direction).all():
return Point(point_1),Point(point_2)
else:
return "No Intersecction"
...
def _intersect_ray_with_triangle(ray, triangle):
...
| neo0311/pygeo_64845 | src/pygeo/intersect.py | intersect.py | py | 1,954 | python | en | code | 0 | github-code | 13 |
12343974255 | # Program to check if the given binary tree is height balanced or not.
# Height Balanced Tree means for every node difference bewteen height of left subtree and right subtree should be atmost 1.
# If height of left sub tree is h1, if height of right sub tree is h2, then at every node, |h1 - h2| <= 1.
# IDEA: so, logic is to simply we can get height of left subtree and right subtree at every node using postorder traversal.
# So, basically we can get something like :
# def isBalanced(root):
# if root == None:
# return True
# lheight, rheight = isBalanced(root.left), isBalanced(root.right)
# then , we can check here, if abs(lheight - rheight) <= 1 and isBalanced(root.left) and isBalanced(root.right):
# return True
# otherwise finally return False
# This would take upto 0(N^2) and SPACE : 0(N).
# ---------------------------------------------------------
# So, can we do better than this ?
# Yes, we can follow bottom up approach and return a tuple containing (height at that node, True/False based on if balanced or not).
# In this way, we can move till upto root, which will return if tree is height balanced or not.
# BELOW CODE IS FOR THIS OPTIMIZED APPROACH :
# TIME : 0(N), SPACE : 0(N).
# ------------------------------------------------------------
# We can take example for follwoing tree and visualize stack call :
# 1
# / \
# 2 3
# / \
# 4 5
# --------------------------------
class Node:
def __init__(self, data):
self.left = None
self.right = None
self.data = data
# tuple pair for use in saving height, diameter for every node
class Pair:
def __init__(self):
self.height = 0
self.balance = True
# recursive approach for postorder traversal and using above tuple to return whether tree is height balanced.
def isBalanced(root):
p = Pair()
if root == None:
p.height = 0
p.balance = True
return p
left = isBalanced(root.left)
right = isBalanced(root.right)
p.height = max(left.height, right.height) + 1
if abs(left.height - right.height) <= 1 and left.balance and right.balance:
p.balance = True
else:
p.balance = False
return p
if __name__ == '__main__':
root = Node(1)
root.left = Node(2)
root.right = Node(3)
root.left.left = Node(4)
root.left.right = Node(5)
print(isBalanced(root).balance)
| souravs17031999/100dayscodingchallenge | binary trees/check_Height_balanced_binary_tree.py | check_Height_balanced_binary_tree.py | py | 2,450 | python | en | code | 43 | github-code | 13 |
42302036384 | n = int(input())
total = 0
graph = [[0]*101 for _ in range(101)] # 가로세로 100 크기의 그래프를 생성한다
for _ in range(n):
a,b = map(int,input().split())
for i in range(a,a+10): # 해당 범위에 가로세로를 10씩 더한값에
for j in range(b,b+10): # 값을 1로 바꿔준다
graph[i][j] = 1
for i in range(1,101):
cnt = graph[i].count(1) # 1로 되어있는값을 갯수를 다 세어준후 total에 더해준다.
total += cnt
print(total) | Lee-GS/Algorithm-with-python | 구현/색종이_2563.py | 색종이_2563.py | py | 503 | python | ko | code | 0 | github-code | 13 |
13103756784 | # https://school.programmers.co.kr/learn/courses/30/lessons/87946
def solution(k, dungeons, num_dungeons=0):
if len(dungeons) == 0:
return num_dungeons
cur_max = num_dungeons
for i in range(len(dungeons)):
if k >= dungeons[i][0] and k >= dungeons[i][1]:
cur_max = max(cur_max, solution(k-dungeons[i][1], dungeons[:i]+dungeons[i+1:], num_dungeons+1))
return cur_max | olwooz/algorithm-practice | practice/2022_12/221204_Programmers_Dungeons/221204_Programmers_Dungeons.py | 221204_Programmers_Dungeons.py | py | 420 | python | en | code | 0 | github-code | 13 |
21658524374 | import json
import urllib
import os
import driver_helper
import consts
import logger
class Tab4uCrawler:
def __init__(self):
self.my_driver = None
self.crush_msg = "unknown error: session deleted because of page crash"
self.skipped_artists = []
def handle_crash(self, url, e):
if self.crush_msg in str(e):
logger.warning(f"Reloading the chrome driver")
self.my_driver = driver_helper.DriverHelper()
self.my_driver.get_chrome_driver(consts.CHROME_DRIVER_PATH)
self.my_driver.driver.get(url)
return True
else:
return False
# ##################################################################################################################
# ########################################### pages navigation #####################################################
def navigate_all_pages_by_letters(self, url):
""" """
full_artists_lists_a_xpath = "//li[@class='more']/a"
full_artists_lists_xpath = "//li[@class='more']"
# check if there are multiple pages
try:
artists_by_letter_pages = self.my_driver.find_elements_by_xpath(full_artists_lists_a_xpath)
for idx, _ in enumerate(artists_by_letter_pages):
try:
artist_a_xpath_by_idx = f"{self.my_driver.xpath_by_idx(full_artists_lists_xpath, idx)}/a"
artist_a_element = self.my_driver.find_element_by_xpath(artist_a_xpath_by_idx)
next_url = artist_a_element.get_attribute('href')
logger.notice(f"Moving to a page of artists by letter, url is {urllib.parse.unquote(next_url)}")
self.my_driver.try_click(url, artist_a_element)
# self.navigate_artists(url)
self.navigate_artists_to_add_urls(url)
self.my_driver.driver.get(url)
except Exception as e:
logger.warning(f"Could not loaf this index page: {idx}, Reloading")
self.my_driver.driver.get(url)
except Exception as e:
logger.warning(f"Could not load this letter page: {url}, Reloading")
self.my_driver.driver.get(url)
logger.notice(f"\nSkipped artists list:")
for skipped_artist in self.skipped_artists:
logger.log(skipped_artist)
def navigate_artists_to_add_urls(self, url):
""" navigates artist pages, add to the json files the url for each artist """
self.navigate_pages(url, None, None, self.add_url_for_artist)
def add_url_for_artist(self, url):
""" add to the json files the url for each artist """
artists_a_xpath = "//a[@class='searchLink']"
try:
artists_a_element = self.my_driver.find_elements_by_xpath(artists_a_xpath)
for idx, _ in enumerate(artists_a_element):
artist_a_xpath_by_idx = self.my_driver.xpath_by_idx(artists_a_xpath, idx)
artist_a_element = self.my_driver.find_element_by_xpath(artist_a_xpath_by_idx)
artist_name = artist_a_element.text
# get the url for the artist's page
try:
artist_url = artist_a_element.get_attribute('href')
logger.log(f"Artist name {artist_name}, url is {urllib.parse.unquote(artist_url)}")
# fix artist name to the new schema
if "' " in artist_name:
new_artist_name = artist_name.replace("' ", "_")
elif "'" in artist_name:
new_artist_name = artist_name.replace("'", "_")
else:
new_artist_name = artist_name
artist_json_file_path = f"json_files/new{new_artist_name}.json"
new_artist_json_file_path = f"json_files_edit/{artist_name}.json"
# add to the json file - check if the artist exists - if not - skip
if os.path.exists(artist_json_file_path):
with open(artist_json_file_path) as original_json_file:
original_json = json.load(original_json_file)
original_json.update({consts.URL: urllib.parse.unquote(artist_url)})
with open(new_artist_json_file_path, 'w', encoding='utf8') as outfile:
json.dump(original_json, outfile, ensure_ascii=False)
else:
logger.warning(f"Skipped artist: {artist_name} because it does not exists")
self.skipped_artists.append(artist_name)
except Exception as e:
if not self.handle_crash(url, e):
logger.warning(f"Failed to click on the artist {artist_name} page, exception: {e}. Reloading")
self.my_driver.driver.get(url)
except Exception as e:
if not self.handle_crash(url, e):
logger.warning(f"Couldn't get artists for this url: {url}. Reloading")
self.my_driver.driver.get(url)
def navigate_pages(self, url, data_lst, artist_name, single_page_func):
""" returns a dictionary of data, from all the pages """
next_page_nav_xpath = "//div[@class='pagination row']"
# check if there are multiple pages
try:
self.my_driver.find_element_by_xpath(next_page_nav_xpath)
return self.navigate_multiple_pages(url, data_lst, artist_name, single_page_func)
except Exception as e:
next_page_nav_xpath = "//div[@class='pagination']"
# check if there are multiple pages
try:
self.my_driver.find_element_by_xpath(next_page_nav_xpath)
return self.navigate_multiple_pages(url, data_lst, artist_name, single_page_func)
except Exception as e:
return single_page_func(url) if data_lst is None else single_page_func(url, data_lst, artist_name)
def navigate_multiple_pages(self, url, data_dict, artist_name, single_page_func):
""" returns a dictionary of data, from all the pages """
next_page_str = "עמוד הבא"
next_page_a_xpath = "//a[@class='nextPre']"
# get current page data
single_page_func(url) if data_dict is None else single_page_func(url, data_dict, artist_name)
try:
# check for more pages for this page
next_prev_page_a_elements = self.my_driver.find_elements_by_xpath(next_page_a_xpath)
for next_prev_page_a_element in next_prev_page_a_elements:
next_page_a_text = next_prev_page_a_element.text
if next_page_str in next_page_a_text:
try:
url = next_prev_page_a_element.get_attribute('href') # no point to go back to the prev url
self.my_driver.try_click(url, next_prev_page_a_element)
logger.notice(f"Moved to the next page for this artist, current url is {urllib.parse.unquote(url)}")
self.navigate_multiple_pages(url, data_dict, artist_name, single_page_func)
except Exception as e:
if not self.handle_crash(url, e):
logger.warning(f"Failed to go to the next page, exception: {e}. Reloading")
self.my_driver.driver.get(url)
except Exception as e:
if not self.handle_crash(url, e):
logger.warning(f"Failed to find next page element by xpath: {next_page_a_xpath}, exception: {e}. Reloading")
self.my_driver.driver.get(url)
return data_dict
def navigate_artists(self, url):
""" navigates artist pages, dump a json file for each artist """
self.navigate_pages(url, None, None, self.navigate_artists_single_page)
def navigate_artists_single_page(self, url):
""" navigate through artists pages and dump a json file for each artist """
artists_a_xpath = "//a[@class='searchLink']"
artist_name = "" # this is needed in case of warning
# this shouldn't fail all page - might give empty data
artists_albums_songs_cnt_dict = self.get_albums_songs_cnt_data(url)
try:
artists_a_element = self.my_driver.find_elements_by_xpath(artists_a_xpath)
for idx, _ in enumerate(artists_a_element):
artist_a_xpath_by_idx = self.my_driver.xpath_by_idx(artists_a_xpath, idx)
artist_a_element = self.my_driver.find_element_by_xpath(artist_a_xpath_by_idx)
artist_name = artist_a_element.text
# check if the artist already exists - if so - skip
artist_json_file_path = f"json_files/{artist_name}.json"
if os.path.exists(artist_json_file_path):
logger.notice(f"Skipped artist: {artist_name} because it already exists")
continue
artist_albums_cnt, artist_songs_cnt = self.get_artist_albums_songs_cnt(artist_name,
artists_albums_songs_cnt_dict)
# go to the artist's page and create a json file for him
try:
artist_url = artist_a_element.get_attribute('href')
self.my_driver.try_click(url, artist_a_element)
logger.notice(f"clicked successfully, current url is {urllib.parse.unquote(artist_url)}")
self.get_data_as_json_file_by_artist(artist_url, artist_name, artist_albums_cnt, artist_songs_cnt)
# go back to the previous page
try:
self.my_driver.go_back(url)
except Exception as e:
logger.warning(f"Reloading the chrome driver")
self.my_driver = driver_helper.DriverHelper()
self.my_driver.get_chrome_driver(consts.CHROME_DRIVER_PATH)
self.my_driver.driver.get(url)
except Exception as e:
if not self.handle_crash(url, e):
logger.warning(f"Failed to click on the artist {artist_name} page, exception: {e}. Reloading")
self.my_driver.driver.get(url)
except Exception as e:
if not self.handle_crash(url, e):
logger.warning(f"Failed to get artists links, exception: {e}.")
def navigate_songs_single_page(self, url, songs_data_lst, artist_name):
""" returns a dictionary of songs and their data dictionaries """
songs_xpath = "//td[@class='song']"
songs_a_xpath = "//td[@class='song']/a"
try:
songs_elements = self.my_driver.find_elements_by_xpath(songs_xpath)
song_url = "" # needed for warning msg in case of failure
song_name = "" # needed for warning msg in case of failure
for idx, _ in enumerate(songs_elements):
try:
songs_a_xpath_by_idx = self.my_driver.xpath_by_idx(songs_a_xpath, idx) # note: xpath arr starts from 1
song_a_element = self.my_driver.find_element_by_xpath(songs_a_xpath_by_idx)
# get song's name
song_name = song_a_element.text
# get the link to the song's page and move to the page
song_url = song_a_element.get_attribute('href')
self.my_driver.try_click(url, song_a_element)
# get song's chords and words data
song_data_dict = self.get_song_data_init_page(song_url, artist_name, song_name)
songs_data_lst.append(song_data_dict)
logger.log(f"parsed song {(idx+1)} for this page. name: {song_name}, href: {urllib.parse.unquote(song_url)}")
except Exception as e:
if not self.handle_crash(url, e):
logger.warning(f"Failed to parse a song {idx} of current page, song href: "
f"{urllib.parse.unquote(song_url)}, song text: "
f"{song_name}, current url: {urllib.parse.unquote(url)}, exception: {e}. Reloading")
self.my_driver.driver.get(url)
# go back to the previous page
try:
self.my_driver.go_back(url)
except Exception as e:
if not self.handle_crash(url, e):
logger.warning(f"Reloading the chrome driver")
self.my_driver = driver_helper.DriverHelper()
self.my_driver.get_chrome_driver(consts.CHROME_DRIVER_PATH)
self.my_driver.driver.get(url)
return songs_data_lst
except Exception as e:
if not self.handle_crash(url, e):
logger.warning(f"Failed to find songs by xpath: {songs_xpath}, exception: {e}. Reloading")
self.my_driver.driver.get(url)
return {}
# ##################################################################################################################
# ########################################### get artist data ######################################################
def get_artist_albums_songs_cnt(self, artist_name, artists_albums_songs_cnt_dict):
""" separates the albums and songs count dictionaty into 2 parameters """
if artist_name not in artists_albums_songs_cnt_dict:
artist_albums_cnt = None
artist_songs_cnt = None
else:
artist_albums_cnt = artists_albums_songs_cnt_dict[artist_name][consts.ALBUMS_CNT]
artist_songs_cnt = artists_albums_songs_cnt_dict[artist_name][consts.SONGS_CNT]
return artist_albums_cnt, artist_songs_cnt
def get_albums_songs_cnt_data(self, url):
""" returns a dictionary of key is the artist name, value is a dictionary of albums cnt and songs cnt """
artists_table_trs_xpath = "//table[@class='tbl_type5']/tbody/tr"
albums_songs_cnt_dct = {}
try:
artists_table_trs = self.my_driver.find_elements_by_xpath(artists_table_trs_xpath)[1:] # remove table header
for idx, _ in enumerate(artists_table_trs):
try:
artist_data = artists_table_trs[idx].text
if artist_data is not None:
artist_data_lst = artist_data.split(" ")
if len(artist_data) >= 3:
artist_name = " ".join(artist_data_lst[:-2])
albums_songs_cnt_dct.update(
{artist_name: {
consts.ALBUMS_CNT: artist_data_lst[-2],
consts.SONGS_CNT: artist_data_lst[-1]
}})
except Exception as e:
if not self.handle_crash(url, e):
logger.warning(f"Failed to get artist data, return empty albums and songs cnt, exception: {e}")
except Exception as e:
if not self.handle_crash(url, e):
logger.warning(f"Failed to get artists table, return empty albums and songs cnt, exception: {e}")
logger.log(f"Parsed artists albums and songs count")
return albums_songs_cnt_dct
def get_data_as_json_file_by_artist(self, curr_url, artist_name, artist_albums_cnt, artist_songs_cnt):
""" returns a data dictionary of the artist's biography information and songs data """
# get data
data_by_artist_dict = {
consts.ARTIST_DATA: {
consts.ARTIST_NAME: artist_name,
consts.ARTIST_BIO: self.get_artist_data(curr_url),
consts.ALBUMS_CNT: artist_albums_cnt,
consts.SONGS_CNT: artist_songs_cnt
},
consts.SONGS_DATA: self.navigate_pages(curr_url, [], artist_name, self.navigate_songs_single_page)
}
# dump dictionary to json file by artist name
try:
file_name = f"json_files/{artist_name}.json"
with open(file_name, 'w', encoding='utf-8') as f:
json.dump(data_by_artist_dict, f, ensure_ascii=False, indent=4)
except Exception as e:
if not self.handle_crash(curr_url, e):
logger.warning(f"Failed to dump artist {artist_name} to json file, exception: {e}.")
def get_artist_data(self, url):
""" returns a dictionary of the artist's biography information """
artist_bios_xpath = "//ul[@class='artist_block_bio']/li"
artist_bio_dict = {}
last_artist_bio_key = None
try:
artist_bios_elements = self.my_driver.find_elements_by_xpath(artist_bios_xpath)
for artist_bio_e in artist_bios_elements:
if ": " in artist_bio_e.text:
artist_bio = artist_bio_e.text.split(": ")
artist_bio_dict[artist_bio[0]] = artist_bio[1]
last_artist_bio_key = artist_bio[0]
elif last_artist_bio_key:
artist_bio_dict[last_artist_bio_key] += artist_bio_e.text
logger.log(f"parsed artist's biography:' {artist_bio_dict}")
return artist_bio_dict
except Exception as e:
if not self.handle_crash(url, e):
logger.warning(f"Failed to get the artist's biography information, exception: {e}. Reloading")
self.my_driver.driver.get(url)
return artist_bio_dict
# ##################################################################################################################
# ########################################### get song data ########################################################
def get_song_data_init_page(self, url, artist_name, song_name):
""" get data dict about the song from its initial page """
author, composer = self.get_song_author_composer(url, song_name)
paragraph_content, definitions = self.get_song_paragraphs_content(url, song_name)
song_data_dict = {
consts.SONG_NAME: song_name,
consts.RANKING: self.get_song_ranking(url, song_name),
consts.AUTHOR: author,
consts.COMPOSER: composer,
consts.CATEGORIES: self.get_song_categories(url, song_name),
consts.COLLABORATORS: self.get_song_collaborators(url, artist_name, song_name),
consts.PARAGRAPHS: paragraph_content,
consts.DEFINITIONS: definitions
}
logger.log(f"Found song data for song: {song_data_dict[consts.SONG_NAME]}")
return song_data_dict
def fix_tab_paragraphs(self, song_paragraphs):
fixed_song_paragraphs = []
paragraph_type = consts.UNIQUE
definition_name = ""
tabs_lines = []
chords_lines = []
has_chords = False
for paragraph in song_paragraphs:
if not paragraph[consts.IS_TAB_PARA]:
# if there is a tabs paragraph in fixing progress, it is now finished and should be appended
chords_lines, definition_name, paragraph_type, tabs_lines = self.append_fixed_tabs_paragraph(
chords_lines, definition_name, fixed_song_paragraphs, paragraph, paragraph_type, tabs_lines, False)
paragraph.pop(consts.IS_TAB_PARA)
fixed_song_paragraphs.append(paragraph)
# check if this is tabs paragraph of tabs
elif len(paragraph[consts.TABS_LINES]) > 0:
# when reaching to a definition of a new tab paragraph, append the last tab paragraph, if such exists
if paragraph[consts.TYPE] == consts.DEFINITION:
chords_lines, definition_name, paragraph_type, tabs_lines = self.append_fixed_tabs_paragraph(
chords_lines, definition_name, fixed_song_paragraphs, paragraph, paragraph_type, tabs_lines)
tabs_lines.append({
consts.TABS_LINE: '\n'.join(paragraph[consts.TABS_LINES]),
consts.HAS_CHORDS: has_chords
})
has_chords = False
# check if this is tabs paragraph of chords
else:
has_chords = True
# when reaching to a definition of a new tab paragraph, append the last tab paragraph, if such exists
if paragraph[consts.TYPE] == consts.DEFINITION:
chords_lines, definition_name, paragraph_type, tabs_lines = self.append_fixed_tabs_paragraph(
chords_lines, definition_name, fixed_song_paragraphs, paragraph, paragraph_type, tabs_lines)
chords_lines.append(paragraph[consts.CHORDS_LINES][0])
# if there is a tabs paragraph in fixing progress, it is now finished and should be appended
if len(tabs_lines) > 0:
fixed_song_paragraphs.append({
consts.TYPE: paragraph_type,
consts.DEFINITION_NAME: definition_name,
consts.CHORDS_LINES: chords_lines,
consts.TABS_LINES: tabs_lines,
consts.LYRICS_LINES: []
})
return fixed_song_paragraphs
def append_fixed_tabs_paragraph(self, chords_lines, definition_name, fixed_song_paragraphs, paragraph, paragraph_type,
tabs_lines, is_definition=True):
""" appends the fixed paragraph and initiate the parameters again to be ready for the next one """
if len(tabs_lines) > 0:
fixed_song_paragraphs.append({
consts.TYPE: paragraph_type,
consts.DEFINITION_NAME: definition_name,
consts.CHORDS_LINES: chords_lines,
consts.TABS_LINES: tabs_lines,
consts.LYRICS_LINES: []
})
# create the parameters for the new paragraph
tabs_lines = []
chords_lines = []
if is_definition:
definition_name = paragraph[consts.DEFINITION_NAME]
paragraph_type = paragraph[consts.TYPE]
else:
definition_name = ""
paragraph_type = consts.UNIQUE
return chords_lines, definition_name, paragraph_type, tabs_lines
def get_song_paragraphs_content(self, url, song_name):
""" Returns a list of the song's paragraphs. Each item in the list contains the paragraph definition, chords, tabs
and lyrics. """
song_paragraphs_xpath = "//div[@id='songContentTPL']/*"
song_paragraphs = []
definitions = {} # key = definition name, value = paragraph number
is_current_a_definition = False
definition_name = ""
try:
song_paragraphs_elements = self.my_driver.find_elements_by_xpath(song_paragraphs_xpath)
# go through all paragraphs (and br)
for paragraph_idx, song_paragraph_element in enumerate(song_paragraphs_elements):
# Do not try to parse tag br paragraphs
if song_paragraph_element.tag_name == "br":
continue
chords_lines = []
tabs_lines = []
song_lines = []
is_tabs_paragraph = False
try:
song_lines_xpath = f"{self.my_driver.xpath_by_idx(song_paragraphs_xpath, paragraph_idx)}/tbody/tr/td"
song_lines_elements = self.my_driver.find_elements_by_xpath(song_lines_xpath)
# go through all lines in the paragraph
for line_idx, song_line_element in enumerate(song_lines_elements):
line_text = song_line_element.text
line_type = song_line_element.get_attribute("class")
if line_type == consts.CHORDS_CLASS:
chords_lines.append(line_text)
elif line_type == consts.TABS_CLASS:
tabs_lines.append(line_text)
is_tabs_paragraph = True
elif line_type == consts.SONG_CLASS:
song_lines.append(line_text)
# check if this paragraph belongs to a larger tab paragraph - it has no br at the end
next_idx = paragraph_idx + 1
if next_idx < len(song_paragraphs_elements) and song_paragraphs_elements[next_idx].tag_name != "br":
is_tabs_paragraph = True
# check if this paragraph is part of a definition started at the previous paragraph
if is_current_a_definition:
song_paragraphs.append({
consts.TYPE: consts.DEFINITION,
consts.DEFINITION_NAME: definition_name,
consts.CHORDS_LINES: chords_lines,
consts.TABS_LINES: tabs_lines,
consts.LYRICS_LINES: song_lines,
consts.IS_TAB_PARA: is_tabs_paragraph
})
is_current_a_definition = False
else:
is_current_a_definition, definition_name, paragraph_type, song_lines = \
self.get_paragraph_definition(chords_lines, definitions, song_lines, song_paragraphs)
if not is_current_a_definition:
song_paragraphs.append({
consts.TYPE: paragraph_type,
consts.DEFINITION_NAME: definition_name,
consts.CHORDS_LINES: chords_lines,
consts.TABS_LINES: tabs_lines,
consts.LYRICS_LINES: song_lines,
consts.IS_TAB_PARA: is_tabs_paragraph
})
except Exception as e:
if not self.handle_crash(url, e):
logger.warning(
f"Failed to find song's paragraph for song {song_name}, exception: {e}. Reloading")
self.my_driver.driver.get(url)
fixed_paragraphs = self.fix_tab_paragraphs(song_paragraphs)
return fixed_paragraphs, definitions
except Exception as e:
if not self.handle_crash(url, e):
logger.warning(f"Failed to find song's words and chords for song {song_name}, exception: {e}. Reloading")
self.my_driver.driver.get(url)
return ""
def get_paragraph_definition(self, chords_lines, definitions, song_lines, song_paragraphs):
""" decide if this paragraph is type definition/repetitive/unique """
is_next_define = False
# decide if this is a definition of a repetitive section
if len(song_lines) > 0 and song_lines[0][-1] == ":":
definition_name = song_lines[0].replace(":", "")
definition_paragraph_num = len(song_paragraphs)
definitions.update({definition_name: definition_paragraph_num})
# decide if the defined paragraph is the next or the current
if len(song_lines) == 1 and len(chords_lines) == 0:
is_next_define = True
paragraph_type = None
else:
paragraph_type = consts.DEFINITION
song_lines = song_lines[1:]
# decide if this paragraph was already defined before
elif len(song_lines) == 1 and len(chords_lines) == 0 and song_lines[0] in definitions:
paragraph_type = consts.REPETITIVE
definition_name = song_lines[0]
song_lines = []
else:
paragraph_type = consts.UNIQUE
definition_name = ""
return is_next_define, definition_name, paragraph_type, song_lines
def get_song_collaborators(self, url, artist_name, song_name):
""" Returns a string with the names of the other artists that worked on this song """
# TODO: we can make this at the end of the crawling to be a list -
# TODO: by looking for words that starts with " ו" and checking to see if there is an artist with this name....
all_artists_xpath = "//div[@class='data_block_title_text']/a"
and_artist = " ו" + artist_name
collaborators = None
try:
all_artists = self.my_driver.find_element_by_xpath(all_artists_xpath).text
collaborators = all_artists.replace(and_artist, "") if and_artist in all_artists else all_artists.replace(artist_name, "")
except Exception as e:
if not self.handle_crash(url, e):
logger.log(f"Could not to find collaborators for song {song_name}, exception: {e}")
# self.my_driver.driver.get(url)
return collaborators
def get_song_categories(self, url, song_name):
""" Returns a list of the song's categories """
categories_xpath = "//a[@class='catLinkInSong']"
categories_lst = []
try:
categories_elements = self.my_driver.find_elements_by_xpath(categories_xpath, wait=False)
categories_lst = [category_element.text for category_element in categories_elements]
except Exception as e:
if not self.handle_crash(url, e):
logger.log(f"Could not to find categories for song {song_name}, exception: {e}")
# self.my_driver.driver.get(url)
return categories_lst
def get_song_author_composer(self, url, song_name):
""" returns author and composer names """
author_composer_headers_spans_xpath = "//div[@id='aAndcArea']/span[@id='koteretInSong']"
author_composer_info_spans_xpath = "//div[@id='aAndcArea']/span[@id='textInSong']"
author = None
composer = None
try:
author_composer_headers_spans = self.my_driver.find_elements_by_xpath(author_composer_headers_spans_xpath)
author_composer_info_spans = self.my_driver.find_elements_by_xpath(author_composer_info_spans_xpath)
if not isinstance(author_composer_headers_spans, list) or \
not isinstance(author_composer_info_spans, list) or \
not len(author_composer_headers_spans) == len(author_composer_info_spans):
raise Exception()
for idx, (author_composer_header_span, author_composer_info_span) in \
enumerate(zip(author_composer_headers_spans, author_composer_info_spans)):
author_composer_header = author_composer_header_span.text.replace(":", "")
author_composer_info = author_composer_info_span.text
if author_composer_header == consts.AUTHOR_AND_COMPOSER_HEB or \
author_composer_header == consts.COMPOSER_AND_AUTHOR_HEB:
author = author_composer_info
composer = author_composer_info
elif author_composer_header == consts.AUTHOR_HEB:
author = author_composer_info
elif author_composer_header == consts.COMPOSER_HEB:
composer = author_composer_info
except Exception as e:
if not self.handle_crash(url, e):
logger.warning(f"Failed to find composer and author for song {song_name}, exception: {e}")
# self.my_driver.driver.get(url)
return author, composer
def get_song_ranking(self, url, song_name):
""" returns the song's ranking as a float """
ranking = None
try:
ranking_xpath = "//span[@class='rankPre']"
ranking_element = self.my_driver.find_element_by_xpath(ranking_xpath, wait=False)
ranking = float(ranking_element.text)
except Exception as e:
if not self.handle_crash(url, e):
logger.log(f"Could not to find ranking for song {song_name}, exception: {e}")
# my_driver.driver.get(url)
return ranking
def run(self):
self.my_driver = driver_helper.DriverHelper()
self.my_driver.get_chrome_driver(consts.CHROME_DRIVER_PATH)
try:
all_pages_url = "https://www.tab4u.com/tabs/"
self.my_driver.driver.get(all_pages_url)
self.navigate_all_pages_by_letters(all_pages_url)
# navigate each letter in separate
# url = input("Enter url\n")
# self.my_driver.driver.get(url)
# print(self.navigate_artists(url))
finally:
self.my_driver.driver.close()
# ######################################################################################################################
# ################################################ main ################################################################
if __name__ == "__main__":
crawler = Tab4uCrawler()
crawler.run()
| yuvallhv/ChordsAnalizer | tab4u_crawl.py | tab4u_crawl.py | py | 33,634 | python | en | code | 0 | github-code | 13 |
8525097026 | import os
import numpy as np
import pathlib
import pandas as pd
import keras.api._v2.keras as keras
from sklearn.metrics import confusion_matrix, classification_report
from keras.api._v2.keras import layers, \
losses, regularizers, optimizers, applications
from keras.api._v2.keras.preprocessing.image import ImageDataGenerator
import tensorflow as tf
import tensorflow_hub as hub
from util.my_tf_callback import LearningRateA, saver
import util.datasets_util as ds_util
from util.util import print_in_color
import matplotlib.pyplot as plt
import math
plt.rcParams['font.sans-serif'] = ['Microsoft YaHei']
# 定义一个函数创建混淆矩阵和分类报告
def print_info(test_gen, preds, print_code, save_dir, subject):
"""
:param test_gen: 测试集数据集生成器(其指定了生成方式,通常是指向本地图片库)
:param preds: 预测结果
:param print_code:
:param save_dir: 保存目录
:param subject:
:return:
"""
# 获取类名及下标字典
class_dict = test_gen.class_indices
# 获取所有类名
labels = test_gen.labels
# 获取所有文件名称
file_names = test_gen.filenames
error_list = []
true_class = []
pred_class = []
prob_list = []
# 按下标为key 类名为value创建一个新的字典
new_dict = {}
error_indies = []
# 实际预测值数组
y_pred = []
for key, value in class_dict.items():
new_dict[value] = key
# 将所有类名作为目录存储在save_dir下
classes = list(new_dict.values())
# 记录错误的分类次数
errors = 0
for i, p in enumerate(preds):
# 预测值
pred_index = np.argmax(p)
# 实际值
true_index = labels[i]
# 如果预测错误
if pred_index != true_index:
error_list.append(file_names[i])
true_class.append(new_dict[true_index])
pred_class.append(new_dict[pred_index])
# 预测的最高概率装进prob
prob_list.append(p[pred_index])
error_indies.append(true_index)
errors = errors + 1
y_pred.append(pred_index)
if print_code != 0:
if errors > 0:
if print_code > errors:
r = errors
else:
r = print_code
msg = '{0:^28s}{1:^28s}{2:^28s}{3:^16s}' \
.format('Filename', 'Predicted Class', 'True Class', 'Probability')
print_in_color(msg, (0, 255, 0), (55, 65, 80))
for i in range(r):
# TODO 暂时不知道这几行代码干嘛的
split1 = os.path.split(error_list[i])
split2 = os.path.split(split1[0])
fname = split2[1] + '/' + split1[1]
msg = '{0:^28s}{1:^28s}{2:^28s}{3:4s}{4:^6.4f}'.format(fname, pred_class[i], true_class[i], ' ',
prob_list[i])
print_in_color(msg, (255, 255, 255), (55, 65, 60))
else:
msg = '精度为100%,没有错误'
print_in_color(msg, (0, 255, 0), (55, 65, 80))
if errors > 0:
plot_bar = []
plot_class = []
for key, value in new_dict.items():
# 获得被错误分类的类型的计数(例如:假设 丹顶鹤的下标是11,则下面的操作将获得实际为丹顶鹤的鸟被错误分类的数量)
count = error_indies.count(key)
if count != 0:
plot_bar.append(count)
plot_class.append(value)
fig = plt.figure()
fig.set_figheight(len(plot_class) / 3)
fig.set_figwidth(10)
for i in range(0, len(plot_class)):
c = plot_class[i]
x = plot_bar[i]
plt.barh(c, x, )
plt.title("测试集错误分类")
y_true = np.array(labels)
y_pred = np.array(y_pred)
# 最多显示分类错误的30个分类
if len(classes) <= 30:
# 创建混淆矩阵
cm = confusion_matrix(y_true, y_pred)
length = len(classes)
if length < 8:
fig_width = 8
fig_height = 8
else:
fig_width = int(length * 0.5)
fig_height = int(length * 0.5)
plt.figure(figsize=(fig_width, fig_height))
plt.xticks(np.array(length) + 0.5, classes, rotation=90)
plt.yticks(np.array(length) + 0.5, classes, rotation=0)
plt.xlabel("预测的")
plt.ylabel("真实的")
plt.title("混淆矩阵")
plt.show()
clr = classification_report(y_true, y_pred, target_names=classes)
print("Classification Report:\n----------------------\n", clr)
# 定义一个函数绘制训练数据
def tr_plot(tr_data, start_epoch):
# 绘制训练数据和验证数据
tacc = tr_data.history["accuracy"]
tloss = tr_data.history["loss"]
vacc = tr_data.history["val_accuracy"]
vloss = tr_data.history["val_loss"]
# 计算最终迭代了多少次
Epoch_count = len(tacc) + start_epoch
Epochs = [i + 1 for i in range(start_epoch, Epoch_count)]
index_loss = np.argmin(vloss)
val_lowest = vloss[index_loss]
index_acc = np.argmax(vacc)
acc_highest = vacc[index_acc]
sc_label = 'best epoch=' + str(index_loss + 1 + start_epoch)
vc_label = 'best epoch=' + str(index_acc + 1 + start_epoch)
# 创建图表
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(20, 8))
axes[0].plot(Epochs, tloss, 'r', label='训练损失')
axes[0].plot(Epochs, vloss, 'g', label='验证损失')
axes[0].scatter(index_loss + 1 + start_epoch, val_lowest, s=150, c="blue", label=sc_label)
axes[0].set_title('训练和验证损失')
axes[0].set_xlabel("迭代次数")
axes[0].set_ylabel("损失")
axes[0].legend()
axes[1].plot(Epochs, tacc, 'r', label='训练准确率')
axes[1].plot(Epochs, vacc, 'g', label='验证准确率')
axes[1].scatter(index_acc + 1 + start_epoch, acc_highest, s=150, c='blue', label=val_lowest)
axes[1].set_title("训练和验证损失")
axes[1].set_xlabel("迭代次数")
axes[1].set_ylabel("准确率")
axes[1].legend()
plt.show()
# 定义一个函数,该函数对图片像素值进行压缩(0-1),
# 但由于EfficientNet网络需要0-1所以不需要进行缩放
def scalar(img):
img = img * 1./255.
return img
# 创建训练集、测试集、验证集
train_df, test_df, valid_df = ds_util.preprocessing("datasets")
# 设置超参数
model_name = "ViT-B_32"
ask_epoch = None
dwell = True
stop_patience = 3
patience = 1
epochs = 10
learning_rate = 0.001
factor = 0.5
dropout_p = 0.2
threshold = 0.95
freeze = True
batch_size = 128
num_classes = 325
image_size = (224, 224)
channels = 3
max_num = 140
min_num = 0
label_column_name = "labels"
work_dir = "./datasets"
test_len = len(test_df)
test_batch_size = sorted([int(test_len / n) for n in range(1, test_len + 1)
if test_len % n == 0 and test_len / n <= 80], reverse=True)[0]
# 平衡数据集
dataset_name = "balance"
train_df = ds_util.balance(train_df, min_num, max_num, work_dir,
label_column_name, image_size)
# 然后将其转换为tf的数据生成器
trgen = ImageDataGenerator(
preprocessing_function=scalar,
# 设置随机旋转角度 15度 # 设置随机水平翻转 # 设置随机垂直翻转
rotation_range=15, horizontal_flip=True, vertical_flip=True)
tvgen = ImageDataGenerator(preprocessing_function=scalar)
msg = '训练集生成器'
print_in_color(msg, (0, 255, 0), (55, 65, 80))
train_gen = trgen.flow_from_dataframe(
train_df, x_col='filepaths', y_col='labels',
target_size=image_size, class_mode='categorical',
color_mode='rgb', shuffle=True, batch_size=batch_size)
msg = '测试集生成器'
print_in_color(msg, (0, 255, 255), (55, 65, 80))
test_gen = tvgen.flow_from_dataframe(
test_df, x_col='filepaths', y_col='labels',
target_size=image_size, class_mode='categorical',
color_mode='rgb', shuffle=False, batch_size=test_batch_size)
msg = '验证集生成器'
print_in_color(msg, (0, 255, 255), (55, 65, 80))
valid_gen = tvgen.flow_from_dataframe(
valid_df, x_col='filepaths', y_col='labels',
target_size=image_size, class_mode='categorical',
color_mode='rgb', shuffle=True, batch_size=batch_size)
train_steps = int(np.ceil(len(train_gen.labels) / batch_size))
test_steps = int(test_len / test_batch_size)
valid_steps = int(np.ceil(len(valid_gen.labels) / batch_size))
batches = train_steps
# 初始化模型
version = 1
model = tf.keras.Sequential([
layers.Input(shape=(224, 224, 3)),
# layers.InputLayer((image_size, image_size, 3)),
hub.KerasLayer(r"transformer/models", trainable=False),
layers.Dropout(dropout_p),
layers.Dense(1024, activation="relu", use_bias=True,
kernel_regularizer=regularizers.l2(0.02), name="fc1"),
layers.Dense(num_classes, activation="softmax", name="fc2")
])
# 加载已初始化好的
print(model.summary())
model.compile(optimizer=optimizers.Adam(learning_rate=learning_rate),
loss=losses.CategoricalCrossentropy(),
metrics=["accuracy"])
tensorboard = keras.callbacks.TensorBoard("tmp", histogram_freq=1)
callbacks = [
LearningRateA(model=model, base_model=None, patience=patience,
stop_patience=stop_patience, threshold=threshold,
factor=factor, dwell=dwell, batches=batches, initial_epoch=0,
epochs=epochs, ask_epoch=ask_epoch), tensorboard]
history = model.fit(x=train_gen, epochs=epochs, verbose=0,
callbacks=callbacks, validation_data=valid_gen,
validation_steps=None, shuffle=False, initial_epoch=0)
tr_plot(history, 0)
# loss_object = tf.keras.losses.SparseCategoricalCrossentropy()
# train_loss = tf.keras.metrics.Mean(name='train_loss')
# train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy')
#
# optimizer = keras.optimizers.Adam(learning_rate=learning_rate)
#
# valid_loss = tf.keras.metrics.Mean(name='valid_loss')
# valid_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='valid_accuracy')
# tf.config.experimental_run_functions_eagerly(True)
# @tf.function
# def train_step(images, labels, optimizer):
# with tf.GradientTape() as tape:
# predictions = model(images, training=True)
# loss_aux = loss_object(y_true=labels, y_pred=predictions)
# loss = 0.5 * loss_aux + 0.5 * loss_object(y_true=labels, y_pred=predictions)
# gradients = tape.gradient(loss, model.trainable_variables)
# optimizer.apply_gradients(grads_and_vars=zip(gradients, model.trainable_variables))
#
# train_loss(loss)
# train_accuracy(labels, predictions)
#
#
# @tf.function
# def valid_step(images, labels):
# predictions = model(images, training=False)
# v_loss = loss_object(labels, predictions)
#
# valid_loss(v_loss)
# valid_accuracy(labels, predictions)
#
#
# # start training
# for epoch in range(epochs):
# train_loss.reset_states()
# train_accuracy.reset_states()
# valid_loss.reset_states()
# valid_accuracy.reset_states()
# step = 1
#
# while train_steps >= step:
# images, labels = next(train_gen)
# num_labels = []
# for label in labels:
# num_labels.append(np.argmax(label))
# train_step(images, num_labels, optimizer)
#
# print(f"Epoch: {epoch + 1}/{epochs}, "
# f"step: {step}/{train_steps},"
# f"learning_rate: {optimizer.lr.numpy():.7f}"
# f" loss: {train_loss.result():.5f},"
# f" accuracy: {train_accuracy.result():.5f}")
# step += 1
#
# step = 1
# while valid_steps >= step:
# valid_images, valid_labels = next(valid_gen)
# num_labels = []
# for label in valid_labels:
# num_labels.append(np.argmax(label))
# valid_step(valid_images, num_labels)
# step += 1
# print(f"Epoch: {epoch + 1}/{epochs}, "
# f"valid loss: {valid_loss.result():.5f}, "
# f"valid accuracy: {valid_accuracy.result():.5f}, ")
#
# # 每训练一轮就降低80%
# learning_rate = learning_rate * 0.6
# keras.backend.set_value(optimizer.lr, learning_rate)
subject = 'birds'
acc = model.evaluate(test_gen, steps=test_steps, return_dict=False)[1] * 100
msg = f'accuracy on the test set is {acc:5.2f} %'
print_in_color(msg, (0, 255, 0), (55, 65, 80))
generator = train_gen
scale = 1
model_save_loc, csv_save_loc = saver(
f"model/{model_name}", model, model_name, subject, acc, image_size, scale,
generator, epochs=epochs, version=version, dataset_name=dataset_name)
print_code = 0
preds = model.predict(test_gen, steps=test_steps)
print_info(test_gen, preds, print_code, work_dir, subject)
| NCcoco/kaggle-project | Bird-Species/train-py-ViT.py | train-py-ViT.py | py | 12,952 | python | en | code | 0 | github-code | 13 |
73777584019 | import random
import numpy as np
from math import *
import cv2
import matplotlib.pyplot as plt
import scipy as sc
import scipy.optimize as opt
from sklearn.linear_model import LinearRegression
lidarGeneratedData = []
pic = cv2.imread("whiteboard.png")
#lidarinput keeps distance to each point at each degree with step of 1 degree
def drawData(coordsArray):
for i in range(len(coordsArray)):
print('')
#print("coordsArray for drawing is " +str(coordsArray))
#print(coordsArray[i][0][0],coordsArray[i][1][0])
#cv2.circle(pic, (int(coordsArray[i][0][0]*1000),int(coordsArray[i][1][0]*1000)), 3, (255,0,0), -1)
#cv2.imshow("frame" , pic)
#cv2.imwrite("result.png", pic)
def generateLine():
print('generated random line')
x0 = np.random.uniform(-5.0,5.0)
y0 = np.random.uniform(-5.0,5.0)
x1 = x0 + np.random.uniform(-10.0,10.0)
y1 = y0 + np.random.uniform(-10.0, 10.0)
xLowerLimit = np.random.uniform(-1.0,1.0)
xUpperLimit = np.random.uniform(xLowerLimit, xLowerLimit+np.random.uniform(0, 50.0))
twoPoints = [x0,y0,x1,y1,xLowerLimit,xUpperLimit] #x0,y0,x1,y1
slope = (y1-y0)/(x1-x0)
const = ((0 - x0) / (x1 - x0)) * (y1 - y0) + y0
print(f'slope is {slope}')
print(f'const is {const}')
twoPoints.append(slope)
twoPoints.append(const)
#print(f"Generated points: {twoPoints}")
#print(f"Func eq is: {slope}x + {const}")
#print('')
return twoPoints
twoPoints = generateLine()
print(f'twoPoints is {twoPoints}')
print(f'limits are {twoPoints[5], twoPoints[6]}')
#print(twoPoints)
#def randFunc(x):
# print(twoPoints[4] , x[0] , twoPoints[5])
# if twoPoints[4]<x[0]<twoPoints[5]:
# x0 = twoPoints[0]
# y0 = twoPoints[1]
# x1 = twoPoints[2]
# y1 = twoPoints[3]
# func = ((x[0] - x0) / (x1 - x0)) * (y1 - y0) + y0
# return func
# else:
# return "out of bounds"
#
#
#def angleFunc(alpha, x):
# y = -alpha[0] + 90
# func = np.tan(y)*x
# return (func)
#
#def difFunc(alpha, x):
# func = randFunc(x[0])-angleFunc(alpha, x[0])
# return func
#
#def sqDifFunc(alpha, x):
# func = difFunc(alpha, x[0])**2
# return func
#
#xIntersection = opt.minimize(sqDifFunc, [0], options={'eps': 0.1})
#print(xIntersection)
def findIntersections():
intersectionsX = []
b = twoPoints[7]
k = twoPoints[6]
print(f'b is {b}')
print(f'k is {k}')
for i in range (360):
j = -i + 90 #angle in normal coords
if -90<=j<=90:
j = radians(j)
if (tan(j) != k):
xIntersect = b / (tan(j) - k)
if twoPoints[4] < xIntersect < twoPoints[5] and xIntersect>=0:
xIntersect = xIntersect
# intersectionsX.append([i,xIntersect])
intersectionsX.append([i, xIntersect])
else:
intersectionsX.append([i, None])
# intersectionsX.append([i,'none'])
if j< -90:
j = radians(j)
if (tan(j) != k):
xIntersect = b / (tan(j) - k)
if twoPoints[4] < xIntersect < twoPoints[5] and xIntersect < 0:
xIntersect = xIntersect
# intersectionsX.append([i,xIntersect])
intersectionsX.append([i, xIntersect])
else:
intersectionsX.append([i, None])
# intersectionsX.append([i,'none'])
return intersectionsX
intersectionsX = findIntersections()
#pointx = [3]
#print(f"Func value in point {pointx} is: {randFunc(pointx)}")
#print('')
#print(f'Intersection points of lidar with obstacles are: {intersectionsX}')
print(len(intersectionsX))
for unit in intersectionsX:
alpha = unit[0]
lengthX = unit[1]
if lengthX is not None and sin(radians(alpha))!=0:
lengthR = lengthX/sin(radians(alpha))
lidarGeneratedData.append(round(lengthR,2))
else:
lidarGeneratedData.append(None)
print('')
def generateNoise(lidarGeneratedData):
noisedData = np.array([])
for datum in lidarGeneratedData:
if datum is None:
if np.random.uniform(0,100)<0.5:
datum = np.random.uniform(0,20)
else:
datum += np.random.uniform(0,0.2)
noisedData = np.append(noisedData, datum)
#print(noisedData)
return noisedData
noisedData = generateNoise(lidarGeneratedData)
"""
THIS WAS THE INFORMATION GENERATION PART
FROM NOW ON, DATA ANALYSIS IS BEING IMPLEMENTED
"""
def data2coords(dist):
overall = []
xS = np.array([])
yS = np.array([])
c = 0
#print('')
#print(f'dist in data2coords is {dist}')
for i in range (len(dist)):
if dist[i] is not None:
#print(f'dist[i] is {dist[i]}')
xCoordinate = dist[i]*sin(radians(i))
xS = np.append(xS, xCoordinate)
#print(f'appended {xCoordinate} to xS: {xS}')
yCoordinate = dist[i]*cos(radians(i))
yS = np.append(yS, yCoordinate)
overall.append([xCoordinate, yCoordinate])
return xS, yS
#return overall
def data2coords4line (line):
xS = np.array([])
yS = np.array([])
c = 0
# print('')
# print(f'dist in data2coords is {dist}')
for point in line:
xCoordinate = point[0] * sin(radians(point[1]))
xS = np.append(xS, xCoordinate)
# print(f'appended {xCoordinate} to xS: {xS}')
yCoordinate = point[0] * cos(radians(point[1]))
yS = np.append(yS, yCoordinate)
return xS, yS
def data2coordsov(dist):
overall = []
xS = np.array([])
yS = np.array([])
c = 0
#print('')
#print(f'dist in data2coords is {dist}')
for i in range (len(dist)):
if dist[i] is not None:
#print(f'dist[i] is {dist[i]}')
xCoordinate = dist[i]*sin(radians(i))
xS = np.append(xS, xCoordinate)
#print(f'appended {xCoordinate} to xS: {xS}')
yCoordinate = dist[i]*cos(radians(i))
yS = np.append(yS, yCoordinate)
overall.append([xCoordinate, yCoordinate])
#return xS, yS
return overall
def LinesSplit(inputData):
lines = []
linesWithoutAngle = []
lineNum = 0
inputData = np.append(inputData, inputData[0])
inputData = np.insert(inputData,0, inputData[-2])
#print(f'inputData in linessplit is {inputData} after appending and inserting 2 basics')
flag = 1
for i in range(1, len(inputData)-1):
if inputData[i] is not None:
if inputData[i+1] is not None or inputData[i-1] is not None:
if flag == 1:
lines.append([])
linesWithoutAngle.append([])
flag = 0
lines[-1].append([inputData[i],i - 1])
linesWithoutAngle[-1].append([inputData[i]])
#print(f'appended {[inputData[i],i - 1]} to lines in linessplit')
#print(f'now lines are {lines}')
else:
lineNum +=1
flag = 1
#if lines[0][0][1] == 0 and lines[-1][-1][1] == 359:
# for k in range(len(lines[0])):
# lines[-1].append(lines[0][k])
# lines.pop(0)
return lines
def process_data(data): #data is an array of length with each degree
lines = LinesSplit(data) #lines are arrays in array thet give radius and angle
print(f'data is {data}')
outData = []
for line in lines:
print(f'line is: {line}')
modelIntercepts = []
modelCoefs = []
X, Y = data2coords4line(line)
#xS = np.array([])
yS = Y
xS = X.reshape(-1,1)
#yS = np.array([])
#lineNP = np.array(line)
#drawData(data2coordsov(lineNP))
#print(f'lineNP is given to data2coords as data. Its value is {lineNP}')
#xS = np.append(xS, data2coords(lineNP)[0])
#print(f'xS = {xS}')
#print(f'xS length is {len(xS)}')
#yS = np.append(yS, data2coords(lineNP)[1])
#xS = np.array(xS).reshape(-1, 1)
model = LinearRegression().fit(xS, yS)
#print(f'intercept: {model.intercept_}')
modelIntercepts.append(model.intercept_)
#print(f'slope: {model.coef_}')
modelCoefs.append(model.coef_[0])
#print(xS)
xSSorted = np.sort(xS, axis = 0)
print(f'xSorted are {xSSorted}')
xLowerLimit = xSSorted[-1][0]
xHigherLimit = xSSorted[0][0]
outData.append([modelIntercepts,modelCoefs, xLowerLimit, xHigherLimit])
#print (f'modelIntercepts are {modelIntercepts}, modelCoefs are {modelCoefs}')
#print('')
print(f'outData is {outData}')
return outData
def displayPoints(dataset):
X, Y = data2coords(dataset)
plt.scatter(X, Y)
print(X)
#print(X,Y, sep = "\n" )
datanp = np.array(dataset)
plt.scatter(datanp, range(360))
plt.scatter(0,0)
plt.show()
displayPoints(noisedData)
process_data(noisedData)
| l3cire/path-planning | lidar.py | lidar.py | py | 9,056 | python | en | code | 0 | github-code | 13 |
6114216890 | def frequency(data, cols):
rows = 2
freq = [[0 for i in range(cols)] for j in range(rows)]
for line in data:
if len(line) >= digit_count:
stripped = line.rstrip()
for i in range(digit_count):
if stripped[i] == '1':
freq[1][i] += 1
elif stripped[i] == '0':
freq[0][i] += 1
else:
print("Invalid input")
else:
print("Invalid line length")
return freq
with open("input.txt") as f:
data = f.readlines()
digit_count = len(data[0].rstrip())
freq = frequency(data, digit_count)
gamma_rate = 0
for i in range(digit_count):
if (freq[1][i] > freq[0][i]):
gamma_rate = gamma_rate | (1 << (digit_count - 1 - i))
epsilon_rate = gamma_rate ^ (2 ** digit_count - 1)
pc = gamma_rate * epsilon_rate
print("==== Part 1 ====")
print("Gamma rate: %d, epsilon rate: %d, power consumtion: %d" %
(gamma_rate, epsilon_rate, pc))
kept_lines = data
for i in range(digit_count):
tmp = []
for line in kept_lines:
line = line.rstrip()
if len(line) >= digit_count:
if (freq[1][i] >= freq[0][i]):
if line[i] == '1':
tmp.append(line)
else:
if line[i] == '0':
tmp.append(line)
kept_lines = tmp
freq = frequency(kept_lines, digit_count)
if len(kept_lines) == 1:
break
oxygen_gen_rating = 0
for i in range(len(kept_lines[0])):
if kept_lines[0][i] == '1':
oxygen_gen_rating = oxygen_gen_rating | (1 << (digit_count - 1 - i))
print("==== Part 2 ====")
print("Oxygen generator rating: %d" % oxygen_gen_rating)
kept_lines = data
for i in range(digit_count):
tmp = []
for line in kept_lines:
line = line.rstrip()
if len(line) >= digit_count:
if (freq[0][i] <= freq[1][i]):
if line[i] == '0':
tmp.append(line)
else:
if line[i] == '1':
tmp.append(line)
kept_lines = tmp
freq = frequency(kept_lines, digit_count)
if len(kept_lines) == 1:
break
co2_scrub_rating = 0
for i in range(len(kept_lines[0])):
if kept_lines[0][i] == '1':
co2_scrub_rating = co2_scrub_rating | (1 << (digit_count - 1 - i))
print("CO2 scrub rating: %d" % co2_scrub_rating)
print("Life support rating: %d" % (co2_scrub_rating * oxygen_gen_rating))
| iceaway/advent-of-code-2021 | day3/day3.py | day3.py | py | 2,719 | python | en | code | 0 | github-code | 13 |
29102227709 | import numpy as np
import pandas as pd
import pymongo
import os
import errno
import logging
from urllib import parse, request
# from urllib.error import HTTPError
from func import get_InstitutionSearch, get_aff_id, read_credentials
from my_scival import InstitutionSearch, MetricSearch
import pickle as pk
from pprint import pprint as pp
import urllib.error
BASE_DIR = os.path.abspath(os.path.realpath(__file__))
BASE_DIR = os.path.join(os.path.dirname(BASE_DIR), '..')
os.chdir(BASE_DIR)
FOLNAME_AFF_SEARCH = os.path.join(BASE_DIR, 'data', 'aff_search')
FOLNAME_METRIC_RESPONSE = os.path.join(BASE_DIR, 'data', 'metric_response')
key_aff = 'Institution'
key_acc = 'id_downloaded'
key_id = 'id'
key_met = 'metrics'
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# create file handler which logs info messages
fh = logging.FileHandler(os.path.join(BASE_DIR, 'logs', 'logs.txt'), 'w', 'utf-8')
fh.setLevel(logging.DEBUG)
# create console handler with a debug log level
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# creating a formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)-8s: %(message)s')
# setting handler format
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(fh)
logger.addHandler(ch)
class my_df_id():
def __init__(self, df):
self.i = 0
self.df = df.replace(np.nan, 0).copy()
self.n_inds = len(self.df.index.tolist())
self.inds = self.df.index.tolist()
def next_row(self):
if self.i < self.n_inds:
ind = self.inds[self.i]
row = df.loc[ind, :]
return row
else:
return -1
def next_aff_name(self):
while(True):
row = self.next_row()
if isinstance(row, int) is False:
if row['is_downloaded'] == 0:
name = row.name
break
elif isinstance(row, int):
if row == -1:
name = '-1'
raise("sorry, the end of DataFrame was reached")
break
return name
def pd_write_data(df, d, aux_key=None, aux_val=None):
"""
write data from dict d to pandas.dataframe df
data will be written to columns of df by using keys
"""
# if the university not in the table
if not (d['name'] in df.index.tolist()):
index_name = df.index.name
df = df.reset_index().append({index_name: d['name']}, ignore_index=True).set_index(index_name)
for key, value in d.items():
df.at[d['name'], key] = value
if aux_key is not None and aux_val is not None:
df.at[d['name'], aux_key] = aux_val
return df
if __name__=="__main__":
"""
get the InstitutionSearch output for every affiliation that was not already downloaded
"""
MY_API_KEY = read_credentials("MY_API_KEY")
# MY_API_KEY = "e53785aedfc1c54942ba237f8ec0f891"
# MY_API_KEY = "7f59af901d2d86f78a1fd60c1bf9426a"
logger.debug('loading university names')
fname_aff_names = os.path.join(BASE_DIR, 'data', "universities_table.csv")
df = pd.read_csv(fname_aff_names).set_index(key_aff)
a = df[key_acc].replace(np.nan,0) == 0 # which universities are not downloaded
all_aff_names = df.index[a].tolist()
n = 15
responses = []
jsons = []
dff = df.copy()
dff = dff.replace(0, '')
logger.debug('finalizing affiliation ids to get')
aff_name = []
for i in range(n):
aff_name.append(all_aff_names[i])
logger.debug('aff_name is {}'.format(aff_name[i]))
# res = get_InstitutionSearch(aff_name, MY_API_KEY)
try:
logger.debug('retrieving aff ids')
res = InstitutionSearch(query_type="name", universityName=aff_name, apiKey=MY_API_KEY, logger=logger).get_jres()
dict_res, json_res = get_aff_id(res.jres)
logger.debug('aff ids retrieval is successful')
responses.append(dict_res)
jsons.append(json_res)
fname_save_responses = 'responses_{}_{}.pickle'.format(aff_name[0], n)
fname_save_responses = os.path.join(FOLNAME_AFF_SEARCH, fname_save_responses)
pk.dump(dict_res, open(fname_save_responses, 'wb'))
"""
fname_save_json = 'json{}.pickle'.format(aff_name)
fname_save_responses = os.path.join(FOLNAME_AFF_SEARCH, fname_save_responses)
fname_save_json = os.path.join(FOLNAME_AFF_SEARCH, fname_save_json)
logger.debug('saving responses and json response to {} and {} respectively'.format(fname_save_responses, fname_save_json))
pk.dump(dict_res, open(fname_save_responses, 'wb'))
pk.dump(json_res, open(fname_save_json, 'wb'))
"""
# pp(res.jres)
for x in aff_name:
dff.at[x, key_acc] = 1
for x in dict_res:
logger.debug('updating acknowledgement in the table for affiliation {}'.format(x))
dff = pd_write_data(dff, x, key_acc, 1)
except Exception as e:
if res.http_error in [401, 429]:
logger.debug("error retrieved, error is {}".format(res.http_error))
else:
logger.debug("error retrieved, error is {}".format(res.http_error))
dff.at[aff_name, key_acc] = -1
logger.debug('updating csv file {}'.format(fname_aff_names))
dff.to_csv(fname_aff_names)
"""
if res.jres is not None:
dict_res, json_res = get_aff_id(res.jres)
responses.append(dict_res)
jsons.append(json_res)
fname_save_responses = 'responses_{}.pickle'.format(aff_name)
fname_save_json = 'json{}.pickle'.format(aff_name)
fname_save_responses = os.path.join(FOLNAME_AFF_SEARCH, fname_save_responses)
fname_save_json = os.path.join(FOLNAME_AFF_SEARCH, fname_save_json)
logger.debug('saving responses and json response to {} and {} respectively'.format(fname_save_responses, fname_save_json))
pk.dump(dict_res, open(fname_save_responses, 'wb'))
pk.dump(json_res, open(fname_save_json, 'wb'))
# pp(res.jres)
dff.at[aff_name, key_acc] = 1
for x in dict_res:
dff = pd_write_data(dff, x, key_acc, 1)
elif res.jres is None:
if res.http_error in [401, 429]:
logger.debug("error retrieved, error is {}".format(res.http_error))
else:
logger.debug("error retrieved, error is {}".format(res.http_error))
dff.at[aff_name, key_acc] = -1
logger.debug('updating csv file {}'.format(fname_aff_names))
dff.to_csv(fname_aff_names)
"""
| gnukinad/scival | src/get_aff_ids.py | get_aff_ids.py | py | 6,792 | python | en | code | 1 | github-code | 13 |
35297246598 | # @nzm_ort
# https://github.com/nozomuorita/atcoder-workspace-python
# import module ------------------------------------------------------------------------------
from collections import defaultdict, deque, Counter
import math
from itertools import combinations, permutations, product, accumulate, groupby, chain
from heapq import heapify, heappop, heappush
import bisect
import sys
# sys.setrecursionlimit(100000000)
inf = float('inf')
mod1 = 10**9+7
mod2 = 998244353
def ceil_div(x, y): return -(-x//y)
# main code ------------------------------------------------------------------------------------
n = int(input())
s = input()
ans = ''
for i in s:
tmp = ord(i)
tmp += n
if tmp > ord('Z'):
tmp -= 26
ans += chr(tmp)
print(ans) | nozomuorita/atcoder-workspace-python | abc/abc146/B/answer.py | answer.py | py | 760 | python | en | code | 0 | github-code | 13 |
33100022424 | from oled.device import sh1106
from oled.render import canvas
from PIL import ImageDraw, ImageFont
from datetime import datetime
FONT_FILE0 = 'Roboto-BoldCondensed.ttf'
FONT_FILE1 = 'wwDigital.ttf'
class SSPMeteoOled:
oled = sh1106()
font0 = ImageFont.truetype(FONT_FILE0, 30)
font1 = ImageFont.truetype(FONT_FILE0, 24)
font2 = ImageFont.truetype(FONT_FILE1, 12)
@classmethod
def begin(cls):
with canvas(cls.oled) as draw:
draw.text((8, 14), 'SSPMeteo2', 1, cls.font1)
draw.text((0, 50), 'Esperando datos...', 1, cls.font2)
@classmethod
def update(cls, ddatos):
for k, v in ddatos.items():
ddatos[k] = float(v)
with canvas(cls.oled) as draw:
# Line 1 - Temp. and humidity
line = '{:.1f}º {:.0f}%'.format(ddatos['temp'], ddatos['humi'])
font = cls.font0
if draw.textsize(line, font)[0] > cls.oled.width:
font = cls.font1
of = int((cls.oled.width - draw.textsize(line, font)[0]) / 2)
draw.text((0 + of, 0), line, 1, font)
# Line 2 - Rain
line = ''
if ddatos['llud'] > 0 or ddatos['lluh'] > 0:
if ddatos['lluh'] > 0:
line = '{:.1f}mm/h {:.0f}mm'.format(ddatos['lluh'], ddatos['llud'])
#~ status = '¡ LLUEVE ! '
else:
line = 'Lluvia diaria {:.0f}mm'.format(ddatos['llud'])
draw.text((0, 28), line, 1, cls.font2)
# Line 3 - Pressure and wind
line = '{:.0f}mb {:.0f}kph {:.0f}º'.format(ddatos['pres'], ddatos['vven'], ddatos['dven'])
font = cls.font2
draw.text((0, 40), line, 1, font)
# Line 4 - Status
d, resto = divmod(ddatos['wdog'] * 5, 24 * 60)
h, m = divmod(resto, 60)
line = '{} {}d{}:{}'.format(datetime.now().strftime('%H:%M:%S'), int(d), int(h), int(m))
draw.text((0, 52), line, 1, cls.font2)
if __name__ == "__main__":
SSPMeteoOled.begin()
| sersope/sspmeteo2 | sspmeteo2_oled.py | sspmeteo2_oled.py | py | 2,091 | python | en | code | 0 | github-code | 13 |
31440268213 | #!/usr/bin/env python3
# This file is part of krakenex.
# Licensed under the Simplified BSD license. See `examples/LICENSE.txt`.
# Demonstrate use of json_options().
from types import SimpleNamespace
import krakenex
kraken = krakenex.API().json_options(object_hook=lambda kv: SimpleNamespace(**kv))
response = kraken.query_public('Time')
if response.error:
print('error:', response.error)
else:
result = response.result
print('unixtime:', result.unixtime)
print('rfc1123:', result.rfc1123)
| veox/python3-krakenex | examples/json-options.py | json-options.py | py | 513 | python | en | code | 688 | github-code | 13 |
39667073784 | __author__ = 'rsimpson'
from constraintSatisfaction import *
from math import sqrt
# This variable defines the size of the grids within the Sudoku puzzle - N x N x N (N grids, each with NxN cells)
# This value needs to have an integer square root, i.e., 4, 9, 16, 25...
gridSize = 4
class CSPGraphSudoku(CSPGraph):
def __init__(self):
# call parent constructor
CSPGraph.__init__(self)
def createNotEqualConstraints(_cellList, _cspGraph):
'''
cellList contains a list of lists. Each sublist is a list of cells that should not be equal.
This function creates the constraint objects to implement the not-equals constraints.
'''
# for each list of cells in a single column (or row, or square)...
for cells in _cellList:
# get a cell...
for c1 in range(0, len(cells)-1):
# create a not equal constraint for all the cells after it in the list
for c2 in range(c1+1, len(cells)):
# get the two cells we're creating a constraint for
ftrTail = str(cells[c1])
ftrHead = str(cells[c2])
# create a new constraint object from tail to head
newConstraint = CSPConstraintNotEqual(_cspGraph.getFeature(ftrTail), '!=', _cspGraph.getFeature(ftrHead))
# put the new constraint in the graph's list of constraints
_cspGraph.addConstraint(newConstraint)
# create a new constraint object from head to tail
newConstraint = CSPConstraintNotEqual(_cspGraph.getFeature(ftrHead), '!=', _cspGraph.getFeature(ftrTail))
# put the new constraint in the graph's list of constraints
_cspGraph.addConstraint(newConstraint)
class CSPFeatureGridCell(CSPFeature):
def __init__(self, _strName, _lstDomain):
# call parent constructor
CSPFeature.__init__(self, _strName, _lstDomain)
def sudoku():
# create a csp graph
cspGraph = CSPGraphSudoku()
# add a feature for every cell in the puzzle
# the puzzle consists of a GxG puzzle with G^2 grids of size NxN
for row in range(0, gridSize):
for col in range(0, gridSize):
# cell name is a combination of grid, row and column
cellName = str(row * gridSize + col)
# create a feature corresponding to the cell
cspGraph.addFeature(CSPFeatureGridCell(cellName, range(1, gridSize + 1)))
#
# add not-equal constraints
#
#
# start with column constraints
#
# begin with an empty list
colList = []
# fill the list with an empty list for each column in the puzzle
for c in range(0, gridSize):
# add an empty list
colList.append([])
# for each row...
for row in range(0, gridSize):
# for each column...
for col in range(0, gridSize):
# cell name is a combination of grid, row and column
cellName = row * gridSize + col
# add the cell name to the correct sub-list
colList[col].append(cellName)
#
# row constraints
#
# begin with an empty list
rowList = []
# fill the list with an empty list for each row in the puzzle
for r in range(0, gridSize):
# add a list with all the cells in a single row
rowList.append(range(r * gridSize, r * gridSize + gridSize))
#
# sub-grid constraints
#
# start with an empty list
sqrList = []
# fill the list with an empty list for each sub-grid in the puzzle
for s in range(0, gridSize):
# add an empty list
sqrList.append([])
# the number of sqrs in each row and column is the square-root of the total grid size
sqrSize = int(sqrt(gridSize))
# for each row of squares...
for sqrRow in range(0, sqrSize):
# for each column of squares...
for sqrCol in range(0, sqrSize):
# for each row within the square
for row in range(0, sqrSize):
# for each column within the square
for col in range(0, sqrSize):
# cell name is a combination of grid, row and column
cellName = sqrRow*gridSize*sqrSize + sqrCol*sqrSize + row*gridSize + col
# add the cell name to the correct sub-list
sqrList[sqrRow*sqrSize + sqrCol].append(cellName)
# this is a list of all the cells in each column of the grid
#colList = [['0', '2', '8', '10'], ['1', '3', '9', '11'], ['4', '6', '12', '14'], ['5', '7', '13', '15']]
createNotEqualConstraints(colList, cspGraph)
# this is a list of all the cells in each row of the grid
#rowList = [['0', '1', '4', '5'], ['2', '3', '6', '7'], ['8', '9', '12', '13'], ['10', '11', '14', '15']]
createNotEqualConstraints(rowList, cspGraph)
# this is a list of all the cells in each 2x2 square within the grid
#sqrList = [['0', '1', '2', '3'], ['4', '5', '6', '7'], ['8', '9', '10', '11'], ['12', '13', '14', '15']]
createNotEqualConstraints(sqrList, cspGraph)
hillClimbingSearch(cspGraph)
#backtrackingSearch(cspGraph)
sudoku()
| richs1000/Constraint-Satisfaction | sudokuBig.py | sudokuBig.py | py | 5,149 | python | en | code | 1 | github-code | 13 |
39140780576 | from typing import Optional, Union
from sqlalchemy import select
from sqlalchemy.orm import Session
from ...models import CalculatedPotential, ScoreBest, ScoreCalculated
from .account import AndrealImageGeneratorAccount
class AndrealImageGeneratorApiDataConverter:
def __init__(
self,
session: Session,
account: AndrealImageGeneratorAccount = AndrealImageGeneratorAccount(),
):
self.session = session
self.account = account
def account_info(self):
return {
"code": self.account.code,
"name": self.account.name,
"is_char_uncapped": self.account.character_uncapped,
"rating": self.account.rating,
"character": self.account.character,
}
def score(self, score: Union[ScoreCalculated, ScoreBest]):
return {
"score": score.score,
"health": 75,
"rating": score.potential,
"song_id": score.song_id,
"modifier": score.modifier or 0,
"difficulty": score.rating_class,
"clear_type": score.clear_type or 1,
"best_clear_type": score.clear_type or 1,
"time_played": score.date * 1000 if score.date else 0,
"near_count": score.far,
"miss_count": score.lost,
"perfect_count": score.pure,
"shiny_perfect_count": score.shiny_pure,
}
def user_info(self, score: Optional[ScoreCalculated] = None):
if not score:
score = self.session.scalar(
select(ScoreCalculated).order_by(ScoreCalculated.date.desc()).limit(1)
)
if not score:
raise ValueError("No score available.")
return {
"content": {
"account_info": self.account_info(),
"recent_score": [self.score(score)],
}
}
def user_best(self, song_id: str, rating_class: int):
score = self.session.scalar(
select(ScoreBest).where(
(ScoreBest.song_id == song_id)
& (ScoreBest.rating_class == rating_class)
)
)
if not score:
raise ValueError("No score available.")
return {
"content": {
"account_info": self.account_info(),
"record": self.score(score),
}
}
def user_best30(self):
scores = list(
self.session.scalars(
select(ScoreBest).order_by(ScoreBest.potential.desc()).limit(40)
)
)
if not scores:
raise ValueError("No score available.")
best30_avg = self.session.scalar(select(CalculatedPotential.b30))
best30_overflow = (
[self.score(score) for score in scores[30:40]] if len(scores) > 30 else []
)
return {
"content": {
"account_info": self.account_info(),
"best30_avg": best30_avg,
"best30_list": [self.score(score) for score in scores[:30]],
"best30_overflow": best30_overflow,
}
}
| 283375/arcaea-offline | src/arcaea_offline/external/andreal/api_data.py | api_data.py | py | 3,161 | python | en | code | 21 | github-code | 13 |
2334554298 | from typing import List
class Solution:
def kidsWithCandies(self, candies: List[int], extraCandies: int) -> List[bool]:
result = []
for idx, val in enumerate(candies):
candies[idx] = val + extraCandies
if max(candies) <= val + extraCandies:
result.append(True)
else:
result.append(False)
candies[idx] = val
return result
if __name__ == '__main__':
# Test Case 1
candies = [4, 2, 1, 1, 2]
extraCandies = 1
print(Solution().kidsWithCandies(candies, extraCandies)) # [true, false, false, false, false]
# Test Case 2
candies = [12, 1, 12]
extraCandies = 10
print(Solution().kidsWithCandies(candies, extraCandies)) # [true, false, true]
| k1m743hyun/algorithm-in-python | LeetCode/1431. Kids With the Greatest Number of Candies.py | 1431. Kids With the Greatest Number of Candies.py | py | 779 | python | en | code | 0 | github-code | 13 |
7397871253 | class Node:
def __init__(self,val,next):
self.val=val
self.next=next
def printfn(self):
dummy=self
while dummy!=None:
print(dummy.val,end="->")
dummy=dummy.next
print()
def insert_At(self,v,position):
dummy=self
ind=0
if position==0:
return Node(v,self)
while ind<position-1:
ind+=1
dummy=dummy.next
if dummy==None:
print("Index reached")
return self
next=dummy.next
dummy.next=Node(v,next)
return self
def insert_At_rec(self,v,pos):
if pos==0:
return Node(v,self)
if pos==1:
dummy=self
next=dummy.next
dummy.next=Node(v,next)
return self
else:
if self.next==None:
print("index out of range")
return self
self.next.insert_At_rec(v,pos-1)
return self
n1=Node(1,Node(2,Node(3,Node(4,None))))
n1.printfn()
n1=n1.insert_At(9,2)
n1=n1.insert_At(6,1)
n1=n1.insert_At(114,8)
n1=n1.insert_At(114,0)
n1.printfn()
n1=n1.insert_At_rec(3,13)
n1.printfn() | NandhniV25/Data-Structures | 01_linked_list/04_insert_at_position_and_length_of_the_node.py | 04_insert_at_position_and_length_of_the_node.py | py | 1,210 | python | en | code | 0 | github-code | 13 |
74481971538 | import time
import argparse
import hashlib
import json
import logging
import os
import signal
import sys
# Status Constants
FILE_KNOWN_UNTOUCHED = "FILE_KNOWN_UNTOUCHED"
FILE_KNOWN_TOUCHED = "FILE_KNOWN_TOUCHED"
FILE_UNKNOWN = "FILE_UNKNOWN"
# List of dangerous file extensions
dangerous_extensions = set([
"DMG", "DLL", "ACTION", "APK", "APP", "BAT", "BIN", "CMD", "COM",
"COMMAND", "CPL", "CSH", "EXE", "GADGET", "INF1", "INS", "INX", "IPA",
"ISU", "JOB", "JSE", "KSH", "LNK", "MSC", "MSI", "MSP", "MST", "OSX",
"OUT", "PAF", "PIF", "PRG", "PS1", "REG", "RGS", "RUN", "SCT", "SH",
"SHB", "SHS", "U3P", "VB", "VBE", "VBS", "VBSCRIPT", "WORKFLOW", "WS",
"WSF"])
# Global variables
cached_db = None
###########
#Utilities#
###########
def shellquote(s):
return "'" + s.replace("'", "'\\''") + "'"
def sha256_checksum(filename, block_size=65536):
sha256 = hashlib.sha256()
with open(filename, 'rb') as f:
for block in iter(lambda: f.read(block_size), b''):
sha256.update(block)
return sha256.hexdigest()
def check_file_status(file_info):
global cached_db
known_path = False
for db_file in cached_db:
if db_file["path"] == file_info["path"]:
known_path = True
if file_info["sha256"] in db_file["sha256"]:
return FILE_KNOWN_UNTOUCHED
else:
return FILE_KNOWN_TOUCHED
if not known_path:
return FILE_UNKNOWN
def add_alert_to_db(file_info, status):
global cached_db
with open("binsnitch_data/db.json") as data_file:
db_data = json.load(data_file)
for db_file in db_data:
if db_file["path"] == file_info["path"]:
if file_info["sha256"] not in db_file["sha256"]:
db_file["sha256"].append(file_info["sha256"])
if status == FILE_UNKNOWN:
logging.info("New file detected: " + db_file["path"] +
" - hash: " + file_info["sha256"])
if status == FILE_KNOWN_TOUCHED:
logging.info("Modified file detected: " + db_file["path"] +
" - new hash: " + file_info["sha256"])
cached_db = db_data
write_to_db(cached_db)
def write_to_db(db_data):
s = signal.signal(signal.SIGINT, signal.SIG_IGN)
json.dump(
db_data, open("binsnitch_data/db.json", 'w'), sort_keys=False,
indent=4, separators=(',', ': '))
signal.signal(signal.SIGINT, s)
def add_file_to_db(file_info):
global cached_db
with open("binsnitch_data/db.json") as data_file:
db_data = json.load(data_file)
file_info_to_add = {"path": file_info["path"], "sha256": [file_info["sha256"]]}
db_data.append(file_info_to_add)
cached_db = db_data
write_to_db(cached_db)
def refresh_cache():
global cached_db
try:
file = open("binsnitch_data/db.json", 'r')
cached_db = json.load(file)
except Exception as exc:
print(str(sys.exc_info()))
def prepare_data_files(args):
global cached_db
# Wipe both alerts and db file in case the user wants to start fresh
try:
if args.wipe:
os.remove("binsnitch_data/db.json")
os.remove("binsnitch_data/alerts.log")
except (IOError, OSError):
pass # if the files are not there yet, then the wipe does not do anything anyway
# Make sure the data folders exist
if not os.path.exists("./binsnitch_data"):
os.makedirs("./binsnitch_data")
try:
file = open("binsnitch_data/db.json", 'r')
except IOError:
json.dump([], open("binsnitch_data/db.json", 'w'))
try:
file = open("binsnitch_data/alerts.log", 'r')
except IOError:
open("binsnitch_data/alerts.log", 'a').close()
refresh_cache()
############
#Entrypoint#
############
parser = argparse.ArgumentParser()
parser.add_argument("dir", type=str, help="the directory to monitor")
parser.add_argument("-v", "--verbose", action="store_true",
help="increase output verbosity")
parser.add_argument("-s", "--singlepass", action="store_true",
help="do a single pass over all files")
parser.add_argument("-a", "--all", action="store_true",
help="keep track of all files, not only executables")
parser.add_argument("-n", "--new", action="store_true",
help="alert on new files too, not only on modified files")
parser.add_argument("-b", "--baseline", action="store_true",
help="do not generate alerts (useful to create baseline)")
parser.add_argument("-w", "--wipe", action="store_true",
help="start with a clean db.json and alerts.log file")
args = parser.parse_args()
prepare_data_files(args)
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p',
filename="binsnitch_data/alerts.log",
level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler())
logging.info("binsnitch.py started")
if not os.path.isdir(args.dir):
print("Error: " + args.dir + " could not be read, exiting.")
exit()
print("Loaded " + str(len(cached_db)) + " items from db.json into cache")
keepRunning = True
while keepRunning:
logging.info("Scanning " + str(args.dir) + " for new and modified files, this can take a long time")
for dirName, subdirList, fileList in os.walk(args.dir, topdown=False):
try:
if args.verbose:
print('Scanning %s' % dirName)
except UnicodeEncodeError as e:
continue
for filename in fileList:
full_path = os.path.join(dirName, filename)
file_extension = str.upper(os.path.splitext(full_path)[1][1:])
try:
process_file = False
if args.all:
process_file = True
else:
if file_extension in dangerous_extensions:
process_file = True
if process_file:
file_hash = sha256_checksum(full_path)
file_info = dict()
file_info["path"] = full_path
file_info["sha256"] = file_hash
status = check_file_status(file_info)
if status == FILE_UNKNOWN:
add_file_to_db(file_info)
if args.new and not args.baseline:
add_alert_to_db(file_info, FILE_UNKNOWN)
elif status == FILE_KNOWN_TOUCHED:
if not args.baseline:
add_alert_to_db(file_info, FILE_KNOWN_TOUCHED)
elif status == FILE_KNOWN_UNTOUCHED:
pass
except Exception as exc:
print(str(sys.exc_info()))
if not args.singlepass:
logging.info("Finished! Sleeping for a minute before scanning " + args.dir + " for changes again")
time.sleep(60)
else:
logging.info("Finished!")
keepRunning = False
| NVISOsecurity/binsnitch | binsnitch.py | binsnitch.py | py | 7,175 | python | en | code | 155 | github-code | 13 |
71292261778 | import numpy as np
import tensorflow as tf
import os
import cv2
from model import vgg
VIEWS = 6 # Total views
# loads the evaluation images
def load_eval(dimension):
images0 = []
images1 = []
images2 = []
images3 = []
images4 = []
images5 = []
ls = 200
# change before running
folder = "./MVCNN/Data/images/c/"
length = len(os.listdir(folder)) // VIEWS
files = os.listdir((folder))
files = sorted(files)
for filename in files:
view = int(filename.split("_")[1].split('.')[0])
view = view % VIEWS
img = cv2.imread(folder + filename, cv2.IMREAD_GRAYSCALE)
if img is not None:
if view == 0:
images0.append(img / 255.)
elif view == 1:
images1.append(img / 255.)
elif view == 2:
images2.append(img / 255.)
elif view == 3:
images3.append(img / 255.)
elif view == 4:
images4.append(img / 255.)
else:
images5.append(img / 255.)
images0 = np.array(images0)
images1 = np.array(images1)
images2 = np.array(images2)
images3 = np.array(images3)
images4 = np.array(images4)
images5 = np.array(images5)
images0 = np.reshape(images0, (ls, dimension * dimension))
images1 = np.reshape(images1, (ls, dimension * dimension))
images2 = np.reshape(images2, (ls, dimension * dimension))
images3 = np.reshape(images3, (ls, dimension * dimension))
images4 = np.reshape(images4, (ls, dimension * dimension))
images5 = np.reshape(images5, (ls, dimension * dimension))
images = [images0, images1, images2, images3, images4, images5]
return images
#load chairs dataset
test_images = load_eval(64)
test_evaluations = [[], [], [], [], [], []]
for id, view in enumerate([0,1,2,3,4,5]):
classifier = tf.estimator.Estimator(model_fn=vgg, model_dir="checkpoint/"+str(view)+"/")
eval_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(x={"x": np.array(test_images[id])},
num_epochs=1,
shuffle=False)
eval_results = classifier.predict(input_fn=eval_input_fn)
for eval in eval_results:
#print("probability that this instance is positive is %3.2f " % eval['probabilities'][1])
test_evaluations[id].append(eval['probabilities'][1])
evaluation_chairs = np.amin(test_evaluations, axis=0)
# print results
print(len(evaluation_chairs))
print("______")
print (np.where(evaluation_chairs > 0.99))
print(np.sort(evaluation_chairs)) | balashanmugam/mix-match-part-assembler | MVCNN/evaluate_sample.py | evaluate_sample.py | py | 2,664 | python | en | code | 1 | github-code | 13 |
17060017884 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class SpecEntity(object):
def __init__(self):
self._id = None
self._shop_id = None
self._spec_name = None
self._system = None
@property
def id(self):
return self._id
@id.setter
def id(self, value):
self._id = value
@property
def shop_id(self):
return self._shop_id
@shop_id.setter
def shop_id(self, value):
self._shop_id = value
@property
def spec_name(self):
return self._spec_name
@spec_name.setter
def spec_name(self, value):
self._spec_name = value
@property
def system(self):
return self._system
@system.setter
def system(self, value):
self._system = value
def to_alipay_dict(self):
params = dict()
if self.id:
if hasattr(self.id, 'to_alipay_dict'):
params['id'] = self.id.to_alipay_dict()
else:
params['id'] = self.id
if self.shop_id:
if hasattr(self.shop_id, 'to_alipay_dict'):
params['shop_id'] = self.shop_id.to_alipay_dict()
else:
params['shop_id'] = self.shop_id
if self.spec_name:
if hasattr(self.spec_name, 'to_alipay_dict'):
params['spec_name'] = self.spec_name.to_alipay_dict()
else:
params['spec_name'] = self.spec_name
if self.system:
if hasattr(self.system, 'to_alipay_dict'):
params['system'] = self.system.to_alipay_dict()
else:
params['system'] = self.system
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = SpecEntity()
if 'id' in d:
o.id = d['id']
if 'shop_id' in d:
o.shop_id = d['shop_id']
if 'spec_name' in d:
o.spec_name = d['spec_name']
if 'system' in d:
o.system = d['system']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/SpecEntity.py | SpecEntity.py | py | 2,134 | python | en | code | 241 | github-code | 13 |
70349005458 | from datetime import datetime, timedelta
from xivo_dao import cel_dao
from xivo_dao.alchemy.cel import CEL
from xivo_dao.helpers.cel_exception import CELException
from xivo_dao.tests.test_dao import DAOTestCase
def _new_datetime_generator(step=timedelta(seconds=1)):
base_datetime = datetime.now()
cur_datetime = base_datetime
while True:
yield cur_datetime
cur_datetime = cur_datetime + step
def _new_cel(**kwargs):
cel_kwargs = {
'eventtype': '',
'eventtime': datetime.now(),
'userdeftype': '',
'cid_name': u'name1',
'cid_num': u'num1',
'cid_ani': '',
'cid_rdnis': '',
'cid_dnid': '',
'exten': u'1',
'context': 'default',
'channame': u'SIP/A',
'appname': '',
'appdata': '',
'amaflags': 3,
'accountcode': '',
'peeraccount': '',
'uniqueid': '1',
'linkedid': '1',
'userfield': '',
'peer': '',
}
cel_kwargs.update(kwargs)
return CEL(**cel_kwargs)
class TestCELDAO(DAOTestCase):
def _insert_cels(self, cels):
self.session.begin()
for cel in cels:
self.session.add(cel)
self.session.commit()
def test_caller_id_by_unique_id_when_unique_id_is_present(self):
self._insert_cels([
_new_cel(eventtype='CHAN_START', cid_name='name1', cid_num='num1',
uniqueid='1'),
_new_cel(eventtype='APP_START', cid_name='name2', cid_num='num2',
uniqueid='2'),
])
self.assertEqual('"name2" <num2>', cel_dao.caller_id_by_unique_id('2'))
def test_caller_id_by_unique_id_when_unique_id_is_present_no_app_start(self):
self._insert_cels([
_new_cel(eventtype='CHAN_START', cid_name='name1', cid_num='num1',
uniqueid='1'),
])
self.assertEqual('"name1" <num1>', cel_dao.caller_id_by_unique_id('1'))
def test_caller_id_by_unique_id_when_unique_id_is_missing(self):
self._insert_cels([
_new_cel(eventtype='CHAN_START', cid_name='name1', cid_num='num1',
uniqueid='1'),
])
self.assertRaises(CELException, cel_dao.caller_id_by_unique_id, '2')
def test_channel_by_unique_id_when_channel_is_present(self):
self._insert_cels([
_new_cel(eventtype='CHAN_START', uniqueid='1', exten=u'100'),
_new_cel(eventtype='HANGUP', uniqueid='1'),
_new_cel(eventtype='CHAN_END', uniqueid='1'),
])
channel = cel_dao.channel_by_unique_id('1')
self.assertEqual(u'100', channel.exten())
def test_channel_by_unique_id_when_channel_is_missing(self):
self._insert_cels([
_new_cel(eventtype='CHAN_START', uniqueid='2'),
_new_cel(eventtype='HANGUP', uniqueid='2'),
_new_cel(eventtype='CHAN_END', uniqueid='2'),
])
self.assertRaises(CELException, cel_dao.channel_by_unique_id, '1')
def test_channels_for_phone_sip(self):
phone = {'protocol': 'sip',
'name': 'abcdef'}
cels = [
_new_cel(eventtype='CHAN_START', channame=u'SIP/abcdef-001', uniqueid=u'1',),
_new_cel(eventtype='HANGUP', uniqueid='1', linkedid=u'1'),
_new_cel(eventtype='CHAN_END', uniqueid='1', linkedid=u'1'),
_new_cel(eventtype='CHAN_START', channame=u'SIP/ghijkl-001', uniqueid=u'2', linkedid=u'2'),
_new_cel(eventtype='HANGUP', uniqueid='2', linkedid=u'2'),
_new_cel(eventtype='CHAN_END', uniqueid='2', linkedid=u'2'),
]
self._insert_cels(cels)
channels = cel_dao.channels_for_phone(phone)
self.assertEqual(len(channels), 1)
self.assertEqual(channels[0].linked_id(), u'1')
def test_channels_for_phone_sccp(self):
phone = {'protocol': 'sccp',
'name': '101'}
cels = [
_new_cel(eventtype='CHAN_START', channame=u'SCCP/101-001122334455-1', uniqueid=u'1',),
_new_cel(eventtype='HANGUP', uniqueid='1', linkedid=u'1'),
_new_cel(eventtype='CHAN_END', uniqueid='1', linkedid=u'1'),
_new_cel(eventtype='CHAN_START', channame=u'SCCP/102-001122334466-1', uniqueid=u'2', linkedid=u'2'),
_new_cel(eventtype='HANGUP', uniqueid='2', linkedid=u'2'),
_new_cel(eventtype='CHAN_END', uniqueid='2', linkedid=u'2'),
]
self._insert_cels(cels)
channels = cel_dao.channels_for_phone(phone)
self.assertEqual(len(channels), 1)
self.assertEqual(channels[0].linked_id(), u'1')
| jaunis/xivo-dao | xivo_dao/tests/test_cel_dao.py | test_cel_dao.py | py | 4,667 | python | en | code | 0 | github-code | 13 |
31495553094 | from advent_day import AdventDay
class Day(AdventDay):
test_files = {"data/day24/example.txt": [18, 54]}
data_file = "data/day24/data.txt"
clock = ["<", "^", ">", "v"]
directions = {"<": (0, -1), ">": (0, 1), "^": (-1, 0), "v": (1, 0)}
def parse_file(self, data):
data = data.split("\n")[:-1]
blizzards = []
for i, line in enumerate(data):
line = []
for j, char in enumerate(data[i]):
if char in {"<", ">", "^", "v"}:
blizzards.append((i, j, char))
line.append(".")
else:
line.append(char)
data[i] = line
start = (0, data[0].index("."))
end = (len(data) - 1, data[-1].index("."))
return data, blizzards, start, end
def move_blizzard(self, blizzard, chart):
direction = self.directions[blizzard[2]]
pos = (blizzard[0] + direction[0], blizzard[1] + direction[1])
if chart[pos[0]][pos[1]] == "#":
return self.teleport_blizzard((pos[0], pos[1], blizzard[2]), chart)
return pos[0], pos[1], blizzard[2]
@staticmethod
def teleport_blizzard(blizzard, chart):
x, y, direction = blizzard
if x == len(chart) - 1:
x = 1
elif x == 0:
x = len(chart) - 2
elif y == len(chart[x]) - 1:
y = 1
elif y == 0:
y = len(chart[x]) - 2
return x, y, direction
def move_blizzards(self, blizzards, chart):
for i, blizzard in enumerate(blizzards):
blizzards[i] = self.move_blizzard(blizzard, chart)
return blizzards
def debug(self, chart, blizzards):
for i, row in enumerate(chart):
line = []
for j, char in enumerate(row):
blizzards_at = []
for direction in self.clock:
if (i, j, direction) in blizzards:
blizzards_at.append(direction)
if len(blizzards_at) == 0:
line.append(char)
elif len(blizzards_at) == 1:
line.append(blizzards_at[0])
else:
line.append(str(len(blizzards_at)))
print("".join(line))
@staticmethod
def get_next_states(position, chart, blizzards):
moves = [(0, 0), (1, 0), (-1, 0), (0, 1), (0, -1)]
x, y = position
blizzard_positions = {(blizzard[0], blizzard[1]) for blizzard in blizzards}
next_states = set()
for (mx, my) in moves:
new_x, new_y = x + mx, y + my
if 0 <= new_x < len(chart) and 0 <= new_y < len(chart[new_x]):
if (new_x, new_y) not in blizzard_positions and chart[new_x][
new_y
] != "#":
next_states.add((new_x, new_y))
return next_states
def bfs(self, chart, blizzards, start, end):
states = {start}
depth = 0
while True:
blizzards = self.move_blizzards(blizzards, chart)
all_next_states = set()
for state in states:
all_next_states.update(self.get_next_states(state, chart, blizzards))
states = all_next_states
depth += 1
if end in states:
return depth
def part_1_logic(self, data):
chart, blizzards, start, end = data
depth = self.bfs(chart, blizzards, start, end)
return depth
def part_2_logic(self, data):
chart, blizzards, start, end = data
depth_1 = self.bfs(chart, blizzards, start, end)
depth_2 = self.bfs(chart, blizzards, end, start)
depth_3 = self.bfs(chart, blizzards, start, end)
return depth_1 + depth_2 + depth_3
day = Day()
| lukap3/adventofcode2022 | days/day24.py | day24.py | py | 3,832 | python | en | code | 0 | github-code | 13 |
28003112649 | #Faça um programa que, leia uma matriz 5x2 com os números de telefones dos clientes, as linhas representam os clientes, as colunas representam os telefones. E uma lista de 5 elementos com os nomes dos clientes. Depois de preenchidos a lista e a matriz, deverá ser feito uma busca pelo nome do cliente, se o nome existir, deverá ser mostrado na tela, os telefones desse cliente.
from random import randint
lista_nomes = []
matriz_telefones = []
for i in range(5):
lista_nomes.append('Cliente '+str(i+1))
linha = []
for j in range(2):
linha.append(randint(1000000,2000000))
matriz_telefones.append(linha)
print(lista_nomes)
for i in range(5):
print(matriz_telefones[i])
busca = str(input('Qual nome você deseja procurar na lista? '))
if busca in lista_nomes:
if True:
print('Os telefones são: ', matriz_telefones[lista_nomes.index(busca)][0],'e', matriz_telefones[lista_nomes.index(busca)][1])
else:
print('Nome não está na lista')
#-------------------------
print('--------------')
tel = []
nome = []
for i in range(5):
nome.append(input('Nome: '))
lin = []
for j in range(2):
lin.append(randint(10000000,20000000))
tel.append(lin)
for i in range(5):
print(nome[i],' - ', tel[i])
pesq = input('Digite um nome para buscar: ')
while pesq.upper() != "FIM":
if pesq in nome:
idx = nome.index(pesq)
print(pesq,' - telefones:', tel[idx])
else:
print('NOme não foi encontrado')
pesq = input('Digite um nome para busca: ')
| felipefporto/FATEC-Itapetininga | Linguagem-de-Programacao/aula_19_05_22_exer_2.py | aula_19_05_22_exer_2.py | py | 1,477 | python | pt | code | 0 | github-code | 13 |
38586411930 | """
Return a dictionary representing the header block - a block in JSON
representing metadata about the file.
"""
from ..util import chunk_sequence, hasher
import datetime
import os
import pathlib
def metadata(desc, source):
stat = os.stat(source)
block_count, rem = divmod(stat.st_size, desc.qr.block_size)
block_count += bool(rem)
c, r = desc.dimensions
metadata_every = chunk_sequence.guess_metadata_every(c * r)
chunk_count = 1 + block_count + (block_count // metadata_every)
return {
'block': {'count': block_count, 'size': desc.qr.block_size},
'chunk': {'count': chunk_count, 'size': desc.qr.chunk_size},
'dimensions': desc.dimensions,
'file_bytes': stat.st_size,
'file_name': pathlib.Path(source).name,
'sha256': hasher.hash_file(source).hexdigest(),
'timestamp': str(datetime.datetime.utcfromtimestamp(stat.st_mtime)),
}
def format(sha256, **metadata):
return FORMAT.format(s1=sha256[:32], s2=sha256[32:], **metadata)
FORMAT = """\
<pre>
file_name: {file_name}
file_bytes: {file_bytes}
timestamp: '{timestamp}'
sha256: "\\
{s1}\\
{s2}\\
"
block: {{count: {block[count]}, size: {block[size]}}}
chunk: {{count: {chunk[count]}, size: {chunk[size]}}}
dimensions: {dimensions}
</pre>
"""
| rec/hardback | hardback/book/metadata.py | metadata.py | py | 1,300 | python | en | code | 1 | github-code | 13 |
6188635555 | # birthdaySingAlong_cc.py
# Created by Jo Narvaez-Jensen
# Project 2C
# This program inputs a user's name and sings them happy birthday with a bouncing ball.
from graphics import *
from random import *
# textFormat method, standardizes all initial values for any graphic text and
# creates an assoicated shadow
def textFormat (text, size):
text.setSize (size)
text.setFace ('arial')
text.setStyle ('bold italic')
# Sets the specific text color based on the various text thanks to their
# predetermined sizes, will look into a more method based approch
if size == 20:
text.setTextColor ('blue')
elif size == 34:
text.setTextColor ('dodgerblue')
else:
text.setTextColor ('darkmagenta')
sText = text.clone ()
sText.setTextColor ('dimgray')
sText.move (-2, -2)
return sText, text
# Function to change the various text colors
def colorChange (text):
text.setTextColor (color_rgb (random.randint (0, 255), random.randint (0, 255), random.randint (0, 255)))
return text
# Function to draw the various graphic features
def featureDraw (feature, window):
for i in range (len(feature)):
feature[i].draw (window)
# Function to clear features from the graphic window
def featureClear (feature, window):
for i in range (len(feature)):
feature[i].draw (window)
# method getNam: explains program and instructs the user for their name
def getName (window):
welcome = Text (Point(250, 350), "Please Tell Me\nWho Has a Birthday!")
welcome = textFormat (welcome, 22)
name = Entry (Point (250, 280), 30)
name.setText("Name Here")
# Method to create and set text for the programs start button, with associated
# assoicated instruction
def button (window):
buttonFeature = []
#script for the "phyical" button
button = Rectangle (Point (150, 250), Point (350, 200))
button.setFill ("royalblue")
#script for the button lable
buttonText = Text (Point (250, 225), "Click here to start!")
buttonText = textFormat (buttonText, 19)
return buttonFeature
# Method cretas a pretty birthday card congratulating the user on their birthday
def birthdayCard (window, name):
# Creates the card
rp1 = Point (50, 450)
rp2 = Point (450, 190)
rShape = Rectangle (rp1, rp2)
rShape.setFill ('lightpink')
rShape.setOutline ('darkslateblue')
rShape.setWidth (9)
rShape.draw (window)
def birthdayMessage (window, name):
# card's full text
# title text
size = 36
title = Text (Point (250, 390), "!! Happy Birthday !!")
title = textFormat (title, size)
# user's name
name = Text (Point (250, 320), "{0}".format(name.getText()))
name, nameShadow = textFormat (name, size)
# bottom message
message = Text (Point (250, 245), "! Congatulations !")
message, messageShadow = textFormat (message, 30)
# method that determines which verse is needed for the song
def lyrics (i, name):
size = 20
if i != 2:
verse = Text (Point (250, 75), "Hap - py Birth - day to you ")
else:
verse = Text (Point (250, 75), "Hap - py Birth - day dear {0}".format (name.getText()))
verse = textFormat (verse, size)
return verse
# graphic method for the ball, left seperate to ensure limited conflicts with
# the bounce method
def drawBall (window):
ball = Circle (Point (5, 160), 12)
ball.setFill ('slategray1')
ball.setOutline ('slategray1')
ball.draw (window)
return ball
# graphic method for moving the ball object across the graphic window
def bouncing (ball):
# Loop moves a ball 6 times across the graphic window
for j in range (6):
# Inside Loop makes the move appear more fluid to the user, API limits
# the time.sleep function to a smallest maximum of 10 - 13 ms
for d in range (10):
ball.move (4, -7.3)
time.sleep (.012)
for u in range (10):
ball.move (4, 7.3)
time.sleep (.012)
# method for the lyrics, ball, and bounce output
def birthdaySong (window, name):
for i in range (4):
ball = drawBall(window)
verse = lyrics (i, name)
bouncing (ball)
ball.undraw ()
i=+1
return verse
# calls all funcations and closes the window when the user is done.
def main ():
# Create a graphing window
window = GraphWin ("Birthday Sing Along", 500, 500)
window.setCoords (0, 0, 500, 500)
window.setBackground ("darkcyan")
getName (window)
featureDraw (getName (window), window)
featureDraw (button (window), window)
if window.getMouse ():
featureClear (getName, window)
birthdayCard (window, name)
verse = birthdaySong (window, name)
for i in range (2):
verse[i].setText ("Click to Exit")
featureDraw (verse, window)
window.getMouse ()
window.close ()
main ()
#IDEAS Crete function to print the text, iterating thru a list of each in order to save repeating myself and keep a text with it's shadow form
| thenobleone/Programming | CSC-110/Project2/birthdaySingAlongCC.py | birthdaySingAlongCC.py | py | 5,139 | python | en | code | 1 | github-code | 13 |
73845779858 | import argparse
from gdl_apps.EMOCA.utils.io import save_obj, save_images, save_codes, test
import os
import gc
import librosa
import PIL.Image as Image
import numpy as np
from pathlib import Path
import torch
import math
import tgm
### rotational conversion
def angle_axis_to_quaternion(angle_axis: torch.Tensor) -> torch.Tensor:
"""Convert an angle axis to a quaternion.
DECA project adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h
Args:
angle_axis (torch.Tensor): tensor with angle axis.
Return:
torch.Tensor: tensor with quaternion.
Shape:
- Input: :math:`(*, 3)` where `*` means, any number of dimensions
- Output: :math:`(*, 4)`
Example:
"""
if not torch.is_tensor(angle_axis):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(angle_axis)))
if not angle_axis.shape[-1] == 3:
raise ValueError("Input must be a tensor of shape Nx3 or 3. Got {}"
.format(angle_axis.shape))
# unpack input and compute conversion
a0: torch.Tensor = angle_axis[..., 0:1]
a1: torch.Tensor = angle_axis[..., 1:2]
a2: torch.Tensor = angle_axis[..., 2:3]
theta_squared: torch.Tensor = a0 * a0 + a1 * a1 + a2 * a2
theta: torch.Tensor = torch.sqrt(theta_squared)
half_theta: torch.Tensor = theta * 0.5
mask: torch.Tensor = theta_squared > 0.0
ones: torch.Tensor = torch.ones_like(half_theta)
k_neg: torch.Tensor = 0.5 * ones
k_pos: torch.Tensor = torch.sin(half_theta) / theta
k: torch.Tensor = torch.where(mask, k_pos, k_neg)
w: torch.Tensor = torch.where(mask, torch.cos(half_theta), ones)
quaternion: torch.Tensor = torch.zeros_like(angle_axis)
quaternion[..., 0:1] += a0 * k
quaternion[..., 1:2] += a1 * k
quaternion[..., 2:3] += a2 * k
return torch.cat([w, quaternion], dim=-1)
def rot_mat_to_euler(rot_mats):
# Calculates rotation matrix to euler angles
# Careful for extreme cases of eular angles like [0.0, pi, 0.0]
sy = torch.sqrt(rot_mats[:, 0, 0] * rot_mats[:, 0, 0] +
rot_mats[:, 1, 0] * rot_mats[:, 1, 0])
return torch.atan2(-rot_mats[:, 2, 0], sy)
def batch_rodrigues(rot_vecs, epsilon=1e-8, dtype=torch.float32):
''' same as batch_matrix2axis
Calculates the rotation matrices for a batch of rotation vectors
Parameters
----------
rot_vecs: torch.tensor Nx3
array of N axis-angle vectors
x: pitch. positive for looking down.
y: yaw. positive for looking left.
z: roll. positive for tilting head right.
Returns
-------
R: torch.tensor Nx3x3
The rotation matrices for the given axis-angle parameters
'''
batch_size = rot_vecs.shape[0]
device = rot_vecs.device
angle = torch.norm(rot_vecs + 1e-8, dim=1, keepdim=True)
rot_dir = rot_vecs / angle
cos = torch.unsqueeze(torch.cos(angle), dim=1)
sin = torch.unsqueeze(torch.sin(angle), dim=1)
# Bx1 arrays
rx, ry, rz = torch.split(rot_dir, 1, dim=1)
K = torch.zeros((batch_size, 3, 3), dtype=dtype, device=device)
zeros = torch.zeros((batch_size, 1), dtype=dtype, device=device)
K = torch.cat([zeros, -rz, ry, rz, zeros, -rx, -ry, rx, zeros], dim=1) \
.view((batch_size, 3, 3))
ident = torch.eye(3, dtype=dtype, device=device).unsqueeze(dim=0)
rot_mat = ident + sin * K + (1 - cos) * torch.bmm(K, K)
return rot_mat
def batch_axis2euler(r):
return rot_mat_to_euler(batch_rodrigues(r))
def angle_axis_to_quaternion_numpy(angle_axis):
"""Convert an angle axis to a quaternion.
DECA project adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h
Args:
angle_axis (np.ndarray): numpy array with angle axis.
Return:
np.ndarray: numpy array with quaternion.
Shape:
- Input: `(N, 3)`
- Output: `(N, 4)`
Example:
>>> angle_axis = np.random.rand(2, 3)
>>> quaternion = angle_axis_to_quaternion(angle_axis)
"""
if not isinstance(angle_axis, np.ndarray):
raise TypeError("Input type is not a np.ndarray. Got {}".format(
type(angle_axis)))
if not angle_axis.shape[-1] == 3:
raise ValueError("Input must be a numpy array of shape Nx3 or 3. Got {}"
.format(angle_axis.shape))
# unpack input and compute conversion
a0 = angle_axis[..., 0:1]
a1 = angle_axis[..., 1:2]
a2 = angle_axis[..., 2:3]
theta_squared = a0 * a0 + a1 * a1 + a2 * a2
theta = np.sqrt(theta_squared)
half_theta = theta * 0.5
mask = theta_squared > 0.0
ones = np.ones_like(half_theta)
k_neg = 0.5 * ones
k_pos = np.sin(half_theta) / theta
k = np.where(mask, k_pos, k_neg)
w = np.where(mask, np.cos(half_theta), ones)
quaternion = np.zeros_like(angle_axis)
quaternion[..., 0:1] += a0 * k
quaternion[..., 1:2] += a1 * k
quaternion[..., 2:3] += a2 * k
return np.concatenate([w, quaternion], axis=-1)
def quaternion_to_euler_xyz(quaternion: torch.Tensor) -> torch.Tensor:
"""Convert a quaternion to Euler angles in XYZ order.
Args:
quaternion (torch.Tensor): tensor with quaternion.
Return:
torch.Tensor: tensor with Euler angles in XYZ order.
Shape:
- Input: :math:`(*, 4)` where `*` means, any number of dimensions
- Output: :math:`(*, 3)`
Example:
#>>> quaternion = torch.rand(2, 4) # Nx4
#>>> euler_xyz = quaternion_to_euler_xyz(quaternion) # Nx3
"""
if not torch.is_tensor(quaternion):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(quaternion)))
if not quaternion.shape[-1] == 4:
raise ValueError("Input must be a tensor of shape Nx4 or 4. Got {}"
.format(quaternion.shape))
w, x, y, z = quaternion[..., 0:1], quaternion[..., 1:2], quaternion[..., 2:3], quaternion[..., 3:4]
# Calculate Euler angles from quaternion
sinr_cosp = 2 * (w * x + y * z)
cosr_cosp = 1 - 2 * (x * x + y * y)
roll = torch.atan2(sinr_cosp, cosr_cosp)
sinp = 2 * (w * y - z * x)
pitch = torch.where(torch.abs(sinp) >= 1, torch.sign(sinp) * torch.tensor(math.pi / 2, device=sinp.device), torch.asin(sinp))
siny_cosp = 2 * (w * z + x * y)
cosy_cosp = 1 - 2 * (y * y + z * z)
yaw = torch.atan2(siny_cosp, cosy_cosp)
return torch.cat([roll, pitch, yaw], dim=-1)
#
# data_con = np.load("data/incoming_data/machine_learning_output_sd.npy")
# print(data_con[0,0,0,50:].shape)
#
#
# posecode_tensor = torch.from_numpy(data_con[0,0,0,50:])
#
# glob_qat = angle_axis_to_quaternion(posecode_tensor[:3]) # Nx4
# jaw_qat = angle_axis_to_quaternion(posecode_tensor[3:]) # Nx4
#
#
#
# print("glob_qat",glob_qat, "jaw_qat", jaw_qat)
#
# glob_euler_xyz = quaternion_to_euler_xyz(glob_qat)
# jaw_euler_xyz = quaternion_to_euler_xyz(jaw_qat)
#
# print("glob_euler_xyz",glob_euler_xyz, "jaw_euler_xyz", jaw_euler_xyz) | Daksitha/ReNeLib | IVA/fastApi_backend/rotation_conversion.py | rotation_conversion.py | py | 7,098 | python | en | code | 3 | github-code | 13 |
72393229459 | import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import random
#Read file
data = pd.read_csv('linear_regression_data.csv')
#Split data to training data & testing data by random
def train_test_split(dataset):
list = []
size = len(dataset)
a = int(len(dataset) * 0.8)
training_data = dataset.iloc[:a].reset_index(drop=True)
for i in range(a):
num = random.randrange(0,size)
while num in list:
num = random.randrange(0,size)
list.append(num)
list.sort(reverse=True)
for j in range(a):
training_data.loc[j] = dataset.iloc[list[j]]
dataset = dataset.drop(list[j],0)
testing_data = dataset.iloc[:].reset_index(drop=True)
return training_data,testing_data
#print trainind data & testing data
training_data = train_test_split(data)[0]
print("Training Dataset : \n",training_data,"\n")
testing_data = train_test_split(data)[1]
print("Testing Dataset : \n",testing_data,"\n")
#Linear Regression
fit = np.polyfit(training_data.iloc[:,0],training_data.iloc[:,1],1)
fit_fn = np.poly1d(fit)
print('E = ',fit_fn,"\n")
#RSS
predict = fit_fn(testing_data.iloc[:,0]) #predict to use E
predict = np.array(predict)
output = np.array(testing_data.iloc[:,1]) #Real output of testing set
print('Predict of testing data = ' , predict,"\n")
print('Output of testing data = ', output,"\n")
residual = output - predict
print('Residual of testing data = ',residual,"\n")
RSS = sum(residual*residual)
print('RSS = ',RSS)
#Graph
plt.scatter(testing_data.iloc[:,0],testing_data.iloc[:,1])
plt.plot(training_data.iloc[:,0], fit_fn(training_data.iloc[:,0]),c='r')
plt.show()
| starfishda/Data-Science | DS - 실습4/linear_regression.py | linear_regression.py | py | 1,681 | python | en | code | 0 | github-code | 13 |
11898007221 | """
Created on Mon Feb 23 20:10:35 2015
@author: Rodolfo Viana
"""
# In this program it is possible search for tweets that say anything (bad or not bad) about Apple at Times Square.
import twitter
import sys
import json
reload(sys)
sys.setdefaultencoding("utf-8")
# Load twitter api with consumer key and access token
myApi = twitter.Api(consumer_key='Q6KsZDoY5vNUAyeQhY1Xaw', consumer_secret='mvO4XXnPz2KLvPD6KR5N2S19a1CTiHkN8PKZRv1KQ',
access_token_key='44794860-ShHgE1f3MI6TqJ5cyJt7DWTzQ8vVxjbaWC5MMGZSy',
access_token_secret='i2r27D09rJiwn4wNs1QBe0uRmsMTor2SzK0iV8e9AD006')
# Search for anything related to Apple at Times Square
def search_query(set_Tweets):
geo = ('40.76222', '-73.9844', '15mi') # Times Square (NYC)
query = '(apple OR iPhone OR iWatch OR iOS)'
max_id = None
tweets = []
K = 18
x = 0
for it in range(K): # Retrieve up to K * 100 tweets
temp_tweets = [json.loads(str(raw_tweet)) for raw_tweet \
in myApi.GetSearch(query, geo, count=100, max_id=max_id)] #result_type='recent')]
print('Tweets retrieved: %d' % len(tweets))
if temp_tweets:
max_id = temp_tweets[-1]['id']
file_id = open('id.txt', 'a+')
file_out = open('data_tweet.txt', 'a+')
count_new_tweets = 0
unique_tweets = set_Tweets
for raw_tweet in tweets:
if unique_tweets.__contains__(str(raw_tweet['id'])):
pass
else:
unique_tweets.append(str(raw_tweet['id']))
file_out.write('Id: ' + str(raw_tweet['id']) + "," + 'Text: ' + raw_tweet['text'] + ' \n')
file_id.write(str(raw_tweet['id']) + '\n')
count_new_tweets += 1
print ('%d New Tweets' % count_new_tweets)
file_id.close()
file_out.close()
# Read file and return all tweets ID in a list of tweets ID
def read_file(file_in):
file_in = open(file_in, "r")
set_tweets = []
for line in file_in:
set_tweets.append(str(line).split("\n")[0])
file_in.close()
return set_tweets
# Main
def main():
search_query(read_file("id.txt"))
pass
if __name__ == '__main__':
main()
| RodolfoViana/NeuralNetwork | code/search_query.py | search_query.py | py | 2,269 | python | en | code | 0 | github-code | 13 |
25296515350 | from frontend.tests.base_view import RegisteredBaseViewTestBase
from frontend.views.home import HomeView
class TestHomeView(RegisteredBaseViewTestBase):
view_name = 'home'
view_cls = HomeView
def test_organisation_selector(self):
self.do_test_anonymous_user()
self.do_test_superuser()
self.do_test_user_with_organisations()
self.do_test_user_without_organisation()
self.do_test_get_current_organisation_with_profile()
self.do_test_get_or_set_current_organisation_with_superuser()
| kartoza/sawps | django_project/frontend/tests/test_home_view.py | test_home_view.py | py | 543 | python | en | code | 0 | github-code | 13 |
72413614738 | inventario = []
resposta = "S"
while resposta == "S":
inventario.append(input("Equipamento: ")) #O append tem por funcção adicionar um objeto à lista
inventario.append(float(input("Valor: ")))
inventario.append(int(input("Número Serial: ")))
inventario.append(input("Departamento: "))
resposta=input("Digite \"S\" para continuar: ").upper() #As barras invertidas antes das aspas fazem com que as aspas sejam impressas no output, sem serem interpretadas pelo código
for elemento in inventario:
print(elemento)
#Com uma lista podemos inserir múltiplos valores no mesmo identificador, neste caso utilizamos o exemplo de um inventário, e o nome do identidicador da lista foi inventario | castrogh/lists_python | listas.py | listas.py | py | 700 | python | pt | code | 0 | github-code | 13 |
42000091862 | #
# @lc app=leetcode.cn id=746 lang=python3
#
# [746] 使用最小花费爬楼梯
# Time: O(n) 单层循环
# Space: O(n) 辅助空间长度为给定数组长度
# @lc code=start
class Solution:
def minCostClimbingStairs(self, cost: List[int]) -> int:
# 这里dp保存的是从到达第i格消耗的最佳cost
# 题目规定可以走1、2格
# 根据题意判断, 从起点出发不消耗cost
# 所以从起点开始, 能够到达第0格, 第1格, 所以dp[0] == dp[1] == 0
dp = [0] * (len(cost) + 1)
for i in range(2, len(cost) + 1):
# 从第i格出发消耗的cost == 到达这格的cost + 移动消耗的cost
# 即为dp[i] + cost[i]
# 为了到达第i格, 可以走1、2格, 且需要找到它的最小值
dp[i] = min(dp[i - 1] + cost[i - 1], dp[i - 2] + cost[i - 2])
return dp[-1]
# @lc code=end
| WeiS49/leetcode | Solution/动态规划/一维/746. 最小花费爬楼梯/动态规划_逆.py | 动态规划_逆.py | py | 913 | python | zh | code | 0 | github-code | 13 |
30964794665 | ##Useful physical constants and unit conversions.
#Note: This file is all in mks.
#@author Alexander Adams
#edited for psi=6894.757 Juha Nieminen 5/11/1014
h = 6.62606957e-34 #planck's constant
kb = 1.3806488e-23#boltzman constant
Runiv = 8.3144621e3#J/kmolK universal gass constant
Navo = 6.022e23#avagadros number
G = 6.67384e-11#gravitational constant
sigma = 5.67e-8#stephen boltzmann constant
epsilon0 = 8.85e-12#C^2/Nm^2 permativity of free space/electric constnat
mu0 = 1.25663706e-6#permeability of free space/magnetic constant
c = 299792458. #speed of light
gearth = 9.80665#m/s^2
##########################
#unit convertion factors##
##########################
poise = 0.1
#to use, multiply something in non mks by its conversion to factor to get
#in mks. For example 9*eV will give the value of 9eV in J
eV = 1.602176565e-19 #electron volt
qe = eV#fundamental charge
#distance units
nmi = 1852.#nautical miles
miles = 1609.
ft = 0.3048#feet
inches = ft/12
#pressure units
atm = 101325. #atmosphere
psi = 6894.757#pounds per square inch
mmHg = 133.322387415 #mm mercury
inHg = 3386.#inches mercury
#force units
lbf = 4.448#pound force
#mass units
lbm = 0.4536#pound mass
amu = 1.660468e-27#atomic mass unit
me = 9.1093821545e-31#mass of electron
#volume units
liters = 1/1000.
gallons = 0.00378541
#units of time
minutes = 60.
hours = 60*minutes
sidereal_days = 86164.0905
jyear = 86400*365.25#julian year
| USCLiquidPropulsionLaboratory/Engine-sizing-snake | physical_constants.py | physical_constants.py | py | 1,467 | python | en | code | 2 | github-code | 13 |
5525343423 | import json
import logging
import os
import datetime
import boto3
logger = logging.getLogger()
logger.setLevel(logging.INFO)
dynamodb_client = boto3.client('dynamodb')
def lambda_handler(event, context):
logger.info('Event: {}'.format(event))
user_table = os.environ["USERS_TABLE"]
if event['triggerSource'] == 'PostConfirmation_ConfirmSignUp':
user_sub = event['request']['userAttributes']['sub']
user_name = event['request']['userAttributes']['name']
user_email = event['request']['userAttributes']['email']
user_status = event['request']['userAttributes']['cognito:user_status']
user_email_status = event['request']['userAttributes']['email_verified']
user = {
'id': {
'S': user_sub
},
'name': {
'S': user_name
},
'email': {
'S': user_email
},
'status': {
'S': user_status
},
'email_status': {
'S': user_email_status
},
'createdOn': {
'S': datetime.datetime.now().astimezone().isoformat()
}
}
dynamodb_client.put_item(
TableName = user_table,
Item = user,
ConditionExpression = 'attribute_not_exists(id)'
)
return event
else:
return event | ParthTrambadiya/congito-iac-sf | functions/confirm_user_signup.py | confirm_user_signup.py | py | 1,428 | python | en | code | 1 | github-code | 13 |
44193953541 | from .exceptions import NotificationKeyError
from .exceptions import RegistrationError
from .exceptions import CallbackFailed
from .callback import Callback
import logging
def id_generator():
x = 0
while True:
x += 1
yield x
class NotificationManager:
"""Manages invocation of callback functions in response to a notification
The notification manager registers listeners to receive callbacks if and
when a given notification key is received.
The power of the notificaiton manager is that it does not require the
source of the notifcation to know anything about what (if anything) may
be listening for the notification. This decouples the logic of events
occurring and the logic of how to respond to those events.
A registered callback consists of:
- the function to be invoked
- a priority value (default of 0)
Mutiple callbacks may be registered for a given notification key.
If more than one callback is registered with a given key:
- They are invoked in order of decreasing priority.
- The default priority is 0.
- The callback order is not defined for callbacks of equal priority.
When the callback is invoked, it will be passed the notification key
as the sole positional argument. All other arguments are passed
by keyword. The keyword arguments may be specified:
- when the callback is first registered
- when the notification is posted
- in case of conflict, the latter takes precedence
There is a shared notificaition manager that can be created on demand.
Alternatively, notification manager instances can be created as desired.
"""
_shared = None
_ids = id_generator()
def __init__(self,name=None):
self._name = name
self._queues = dict()
@classmethod
@property
def shared(cls):
"""Returns the default (shared) NotificationManager instance"""
if not cls._shared:
cls._shared = NotificationManager("shared")
return cls._shared
@property
def name(self):
return self._name
@property
def keys(self):
"""Returns a set of all the currently registered notification keys"""
return set(self._queues.keys())
def register(self, key, callback, *args, priority=0, **kwargs):
"""Registers a new notification callback
Args:
key (str): notification key
callback (Callback or callable): see below
priority (float): used to determine order of callback invocation
args (list): positional arguments passed to callback (optional)
kwargs (dict): keyword arguments passed to callback (optional)
The callback may be specified either as a Callback instance
or as any callable function or method (bound or unbound).
Returns:
registration_id (int): unique id for each registered callback
Raises: AssertionError if callback
- is not callable
- is an instance of Callable and args or kwargs are specified
Any positional arguments specified here will be passed to the callback
function immediately after the notification key. They will appear
before any positional arguments specifed when the notification is
invoked.
Any keyword arguments specified here will be passed to the callback
function, but may be overridden by any keyword arguments with the same
keyword specified when the notification is invoked.
"""
if isinstance(callback,Callback):
if args:
raise RegistrationError("Cannot specify both Callback and args")
if kwargs:
raise RegistrationError("Cannot specify both Callback and kwargs")
else:
if not callable(callback):
raise RegistrationError("callback must be callable")
callback = Callback(callback,*args,**kwargs)
try:
priority = float(priority)
except ValueError:
raise RegistrationError(f"priority must be a float, not {priority}")
try:
queue = self._queues[key]
except KeyError:
queue = dict()
self._queues[key] = queue
try:
pri_queue = queue[priority]
except KeyError:
pri_queue = dict()
queue[priority] = pri_queue
cb_id = next(self._ids)
pri_queue[cb_id] = callback
return cb_id
def notify(self,key,*args,**kwargs):
"""Invokes the callbacks associated with the specified key
Args:
key(str): notification key
args (list): positional arguments passed to callback (optional)
kwargs (dict): keyword arguments passed to callback (optional)
Raises: nothing
If any of the invoked callbacks raise an exception, the
exception will be logged, but otherwise ignored.
Any positional arguments specified here will be passed to the callback
function immediately after the notification key and any positional
arguments specified when the callback was registered.
Any keyword arguments specified here will be passed to the callback
function. They will override any keyword arguments with the same
keyword specified when the callback was registered.
If there are no callbacks registered for the specified notification
key, this method simply returns without doing anything else.
"""
try:
queue = self._queues[key]
except KeyError:
return
for priority in sorted(queue.keys(),reverse=True):
for cb_id,cb in queue[priority].items():
try:
cb(*args,key=key,**kwargs)
except CallbackFailed as e:
logging.warning(
"Exception raised while invoking notification callback\n"
+ f" key: {key}\n"
+ f" priority: {priority}\n"
+ f" callback: {cb_id}\n"
+ f" function: {e.callback}\n"
+ f" reason: {e.reason}"
)
def reset(self):
"""Forgets ALL registered callbacks immediately"""
self._queues = dict()
def forget(self, key=None, priority=None, cb_id=None, callback=None):
"""Forgets the specified callbacks that match the specified criteria
Args:
key (str): notification key
priority (float): used to determine order of callback invocation
cb_id (int): callback id returned when it was registered
callback (Callback or callable): registered callback
Raises:
AssertionError if both cb_id and callback are specified
If no criteria are specified, this has the same effect
as calling `reset` but is not as efficient.
"""
assert cb_id is None or callback is None, (
"Cannot specify both cb_id and callback"
)
keys = [key] if key is not None else list(self._queues.keys())
for key in keys:
self._forget_key(key,priority, cb_id, callback)
def _forget_key(self,key, priority=None, cb_id=None, callback=None):
"""Internal method to support `forget`"""
try:
queue = self._queues[key]
except KeyError:
return
priorities = [priority] if priority is not None else list(queue.keys())
for priority in priorities:
self._forget_priority(key,priority,cb_id,callback)
if not self._queues[key]:
del self._queues[key]
def _forget_priority(self,key,priority,cb_id,callback):
"""Internal method to support `forget`"""
if cb_id:
self._forget_cb_id(key,priority,cb_id)
elif callback:
self._forget_callback(key,priority,callback)
else:
self._queues[key][priority].clear()
if not self._queues[key][priority]:
del self._queues[key][priority]
def _forget_callback(self,key,priority,callback):
"""Internal method to support `forget`"""
self._queues[key][priority] = {
k:v
for k,v in self._queues[key][priority].items()
if id(v.func) != id(callback)
}
def _forget_cb_id(self,key,priority,cb_id):
"""Internal method to support `forget`"""
try:
del self._queues[key][priority][cb_id]
except KeyError:
pass
| mikemayer67/pynm | pynm/manager.py | manager.py | py | 8,715 | python | en | code | 0 | github-code | 13 |
12918122580 | import sys
f = open( sys.argv[1] )
ls = f.readlines( )
f.close( )
x1 = None
for l in ls :
x2, y2 = map( float, l.split( ) )
y2 *= 3
if( x1 is not None ) : print("\drawline(%s, %s)(%s, %s)" % (x1, y1, x2, y2))
x1, y1 = x2, y2
| LLNL/gidiplus | numericalFunctions/Doc/Misc/pointsToLatexCurve.py | pointsToLatexCurve.py | py | 242 | python | en | code | 10 | github-code | 13 |
74319495379 | import re
from unittest import mock
import httpx
import pytest
from fedora.clients.fasjson import FasjsonClient
from fedora.exceptions import InfoGatherError
@pytest.mark.parametrize(
"groupname,membership_type,expected_url",
[
("sysadmin-main", "members", "groups/sysadmin-main/members"),
("sysadmin-main", "sponsors", "groups/sysadmin-main/sponsors"),
],
)
async def test_get_group_membership(monkeypatch, groupname, membership_type, expected_url):
result = [{"username": "member1"}, {"username": "member2"}]
client = FasjsonClient("http://fasjson.example.com")
mock__get = mock.AsyncMock(
return_value=httpx.Response(
200,
json={"result": result},
)
)
monkeypatch.setattr(client, "_get", mock__get)
response = await client.get_group_membership(groupname, membership_type)
mock__get.assert_called_once_with(
expected_url, params=None, headers={"X-Fields": "username,human_name,ircnicks"}
)
assert response == result
@pytest.mark.parametrize(
"errorcode,expected_result",
[
(404, "Sorry, but group 'biscuits_group' does not exist"),
(403, "Sorry, could not get info from FASJSON (code 403)"),
],
)
async def test_get_group_membership_errors(respx_mock, errorcode, expected_result):
client = FasjsonClient("http://fasjson.example.com")
respx_mock.get("http://fasjson.example.com").mock(
return_value=httpx.Response(
errorcode,
json={"result": "biscuits"},
)
)
with pytest.raises(InfoGatherError, match=(re.escape(expected_result))):
await client.get_group_membership("biscuits_group", "members")
@pytest.mark.parametrize(
"groupname,expected_url",
[
("sysadmin-main", "groups/sysadmin-main"),
],
)
async def test_get_group(monkeypatch, groupname, expected_url):
result = {
"groupname": groupname,
"description": "A test group",
}
client = FasjsonClient("http://fasjson.example.com")
mock__get = mock.AsyncMock(
return_value=httpx.Response(
200,
json={"result": result},
)
)
monkeypatch.setattr(client, "_get", mock__get)
response = await client.get_group(groupname)
mock__get.assert_called_once_with(expected_url, params=None)
assert response == result
@pytest.mark.parametrize(
"errorcode,expected_result",
[
(404, "Sorry, but group 'biscuits_group' does not exist"),
(403, "Sorry, could not get info from FASJSON (code 403)"),
],
)
async def test_get_group_errors(respx_mock, errorcode, expected_result):
client = FasjsonClient("http://fasjson.example.com")
respx_mock.get("http://fasjson.example.com").mock(
return_value=httpx.Response(
errorcode,
json={"result": "biscuits"},
)
)
with pytest.raises(InfoGatherError, match=(re.escape(expected_result))):
await client.get_group("biscuits_group", "members")
@pytest.mark.parametrize(
"username,expected_url",
[
("biscuit_eater", "users/biscuit_eater"),
],
)
async def test_get_user(monkeypatch, username, expected_url):
result = {
"username": username,
}
client = FasjsonClient("http://fasjson.example.com")
mock__get = mock.AsyncMock(
return_value=httpx.Response(
200,
json={"result": result},
)
)
monkeypatch.setattr(client, "_get", mock__get)
response = await client.get_user(username)
mock__get.assert_called_once_with(expected_url, params=None)
assert response == result
@pytest.mark.parametrize(
"errorcode,expected_result",
[
(404, "Sorry, but Fedora Accounts user 'biscuits_eater' does not exist"),
(403, "Sorry, could not get info from FASJSON (code 403)"),
],
)
async def test_get_user_errors(respx_mock, errorcode, expected_result):
client = FasjsonClient("http://fasjson.example.com")
respx_mock.get("http://fasjson.example.com").mock(
return_value=httpx.Response(
errorcode,
json={"result": "biscuits"},
)
)
with pytest.raises(InfoGatherError, match=(re.escape(expected_result))):
await client.get_user("biscuits_eater")
@pytest.mark.parametrize(
"mxid", ["#scotchfinger:biscuits.test", "!icedvovo:biscuits.test", "timtam@biscuits.test"]
)
async def test_get_users_by_matrix_id_invalid_mxid(mxid):
client = FasjsonClient("http://fasjson.example.com")
with pytest.raises(
InfoGatherError,
match=(
re.escape(
f"Sorry, {mxid} does not look like a valid matrix user ID "
"(e.g. @username:homeserver.com )"
)
),
):
await client.get_users_by_matrix_id(mxid)
async def test_get_users_by_matrix_id_multiple_users(monkeypatch):
client = FasjsonClient("http://fasjson.example.com")
mock_search_users = mock.AsyncMock(
return_value=[{"username": "biscuit_eater"}, {"username": "cookie_eater"}]
)
monkeypatch.setattr(client, "search_users", mock_search_users)
with pytest.raises(
InfoGatherError,
match=(
re.escape(
"2 Fedora Accounts users have the @cookie:biscuit.test Matrix Account "
"defined: \nbiscuit_eater \ncookie_eater"
)
),
):
await client.get_users_by_matrix_id("@cookie:biscuit.test")
| fedora-infra/maubot-fedora | tests/clients/test_fasjson.py | test_fasjson.py | py | 5,509 | python | en | code | 0 | github-code | 13 |
1289680287 | from collections import deque
temp = deque([a for a in range(1,int(input())+1)])
while(True):
if len(temp) == 1:
break
temp.popleft()
temp.rotate(-1)
print(temp[0]) | junhaalee/Algorithm | problems/2164.py | 2164.py | py | 189 | python | en | code | 0 | github-code | 13 |
42945492450 | from datetime import date, datetime, timedelta, timezone
from django.contrib.auth import authenticate, login, logout
from django.http import HttpResponse
from django.shortcuts import render, redirect
from django.views import View
from django.contrib.auth.models import User
from .forms import MessageForm
from .models import Task, Categories, PRIORITY, MyMessage
class LoginView(View):
def get(self, request):
return render(request, 'login.html')
def post(self, request):
loginuser = request.POST['login']
passworduser = request.POST['password']
last_log = User.objects.filter(username=request.user)
tasks = Task.objects.filter(create_user=request.user.id).order_by('deadline')
if loginuser and passworduser:
user = authenticate(username=loginuser, password=passworduser)
if user is not None:
login(request, user)
last_loggin = request.user.accessdata.last_access
print(last_loggin)
my_time = datetime.now()
print(my_time)
check = request.user.last_login
print(check)
check = check.strftime("%Y, %B, %D, %H, %M, %S")
print(check)
return render(request, 'main.html', {'last_log': last_log,
'tasks': tasks})
else:
return HttpResponse('bad pass or login')
else:
return redirect('login-view')
class LastAccessMixin(object):
def dispatch(self, request):
if request.user.is_authenticated():
request.user.accessdata.last_access = timezone.now()
request.user.accessdata.save(update_fields=['last_access'])
return super(LastAccessMixin, self).dispatch(request)
class LogoutView(LastAccessMixin, View):
def get(self, request):
logout(request)
return redirect('login-view')
class AddUserView(View):
def get(self, request):
return render(request, 'sign.html')
def post(self, request):
loginuser = request.POST['login']
passworduser = request.POST['password']
emailuser = request.POST['email']
if loginuser and passworduser and emailuser:
User.objects.create_user(username=loginuser, password=passworduser, email=emailuser)
return redirect('login-view')
else:
return redirect('adduser-view')
class CategoriesView(View):
def get(self, request):
user = request.user
compare_date = date.today()
if user.is_authenticated:
categories = Categories.objects.filter(owner=user)
tasks = Task.objects.filter(create_user=request.user.id).order_by('deadline').filter(deadline__range=[compare_date,
'2020-01-01'])
return render(request, 'categories.html', {
'categories': categories,
'tasks': tasks
})
def post(self, request):
name = request.POST['name']
owner = request.user
categories = Categories.objects.all()
if name:
Categories.objects.create(name=name, owner=owner)
return redirect('categories-view')
class EditCategoriesView(View):
def get(self, request, category_id):
category = Categories.objects.get(id=category_id)
return render(request, 'editcategories.html', {'category': category})
def post(self, request, category_id):
category = Categories.objects.get(id=category_id)
name = request.POST['name']
category.name = name
category.save()
return redirect('categories-view')
class DeleteCategoriesView(View):
def get(self, request, category_id):
category = Categories.objects.get(id=category_id)
category.delete()
return redirect('categories-view')
class DetailCategoriesView(View):
def get(self, request, category_id):
compare_date = date.today()
tasks = Task.objects.filter(categorie__owner=request.user).filter(categorie__id=category_id).order_by('deadline').filter(deadline__range=[compare_date,
'2020-01-01'])
return render(request, 'detailscategories.html', {'tasks': tasks})
class TasksView(View):
def get(self, request):
user = request.user
if user.is_authenticated:
categories = Categories.objects.filter(owner=request.user.id)
compare_date = date.today()
tasks = Task.objects.filter(create_user=user.id).order_by('deadline').filter(deadline__range=[compare_date,
'2020-01-01'])
for t in tasks:
print(date.today())
print('zmiana')
print(t.deadline)
if t.deadline < date.today():
print('ok')
out = 0
return render(request, 'tasks.html', {
'categories': categories,
'tasks': tasks,
'PRIORITY': PRIORITY,
})
def post(self, request):
name = request.POST['name']
priority = request.POST.get('priority')
categorie = request.POST.get('cat')
deadline = request.POST['deadline']
Task.objects.create(name=name, priority=priority, categorie_id=categorie, deadline=deadline, create_user=request.user)
return redirect('tasks-view')
class MessageView(View):
def get(self, request):
compare_date = date.today()
messages = MyMessage.objects.filter(towho=request.user)
tasks = Task.objects.filter(create_user=request.user.id).order_by('deadline').filter(deadline__range=[compare_date,
'2020-01-01'])
form = MessageForm()
return render(request, 'messages.html', {'messages': messages,
'form': form,
'tasks': tasks })
def post(self, request):
messages = MyMessage.objects.filter(towho=request.user)
form = MessageForm(request.POST)
if form.is_valid():
MyMessage.objects.create(fromwho=request.user,
towho=form.cleaned_data['towho'],
description=form.cleaned_data['description'])
return redirect('message-view')
class ArchivesView(View):
def get(self, request):
user = request.user
if user.is_authenticated:
compare_date = date.today()
categories = Categories.objects.filter(owner=request.user.id)
tasks = Task.objects.filter(create_user=request.user).filter(deadline__range=['1990-01-01',compare_date])
return render(request, 'archives.html', {
'categories': categories,
'tasks': tasks,
'PRIORITY': PRIORITY,
})
class DeleteTaskView(View):
def get(self, request, task_id):
task = Task.objects.get(id=task_id)
task.delete()
return redirect('tasks-view')
| zawadzkijakub/taskmanager | taskmanagerenv/taskmanager/MyApp/views.py | views.py | py | 7,530 | python | en | code | 0 | github-code | 13 |
29314503873 | from sys import exit
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import yfinance as yf
import scipy.signal as sps
gme_info = yf.Ticker('GME')
gme_df_2y = gme_info.history(period='2y',
interval='1h',
actions=False)
gme_2y = gme_df_2y['Open'].values
time = np.linspace(0, 1, len(gme_2y))
# find peaks and proiminces
peak_idx, _ = sps.find_peaks(gme_2y, distance=140)
prom = sps.peak_prominences(gme_2y, peak_idx)
peak_dom = np.array([time[int(i)] for i in peak_idx])
peak_val = np.array([gme_2y[int(i)] for i in peak_idx])
# worthy peaks
peak_pair = np.array([[pid, pp, pd, pv]
for pid, pp, pd, pv in zip(peak_idx, prom[0], peak_dom, peak_val)])
peak_pair = peak_pair[peak_pair[:, 1].argsort()[::-1]]
worthy_peak_pair = peak_pair[:10][peak_pair[:10][:,0].argsort()]
plt.plot(time, gme_2y)
plt.scatter(worthy_peak_pair[:,2], worthy_peak_pair[:,3], marker='x', c='red')
plt.show()
| nickeisenberg/Phython | Notebook/gme_peaks.py | gme_peaks.py | py | 997 | python | en | code | 1 | github-code | 13 |
17053908894 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.UserAssetInfoVO import UserAssetInfoVO
class JointAccountBillDetailDTO(object):
def __init__(self):
self._account_id = None
self._amount = None
self._bill_no = None
self._biz_date = None
self._biz_no = None
self._in_out = None
self._open_id = None
self._out_trade_no = None
self._payer_asset_info = None
self._seller_full_name = None
self._seller_logon_id = None
self._title = None
self._user_id = None
@property
def account_id(self):
return self._account_id
@account_id.setter
def account_id(self, value):
self._account_id = value
@property
def amount(self):
return self._amount
@amount.setter
def amount(self, value):
self._amount = value
@property
def bill_no(self):
return self._bill_no
@bill_no.setter
def bill_no(self, value):
self._bill_no = value
@property
def biz_date(self):
return self._biz_date
@biz_date.setter
def biz_date(self, value):
self._biz_date = value
@property
def biz_no(self):
return self._biz_no
@biz_no.setter
def biz_no(self, value):
self._biz_no = value
@property
def in_out(self):
return self._in_out
@in_out.setter
def in_out(self, value):
self._in_out = value
@property
def open_id(self):
return self._open_id
@open_id.setter
def open_id(self, value):
self._open_id = value
@property
def out_trade_no(self):
return self._out_trade_no
@out_trade_no.setter
def out_trade_no(self, value):
self._out_trade_no = value
@property
def payer_asset_info(self):
return self._payer_asset_info
@payer_asset_info.setter
def payer_asset_info(self, value):
if isinstance(value, UserAssetInfoVO):
self._payer_asset_info = value
else:
self._payer_asset_info = UserAssetInfoVO.from_alipay_dict(value)
@property
def seller_full_name(self):
return self._seller_full_name
@seller_full_name.setter
def seller_full_name(self, value):
self._seller_full_name = value
@property
def seller_logon_id(self):
return self._seller_logon_id
@seller_logon_id.setter
def seller_logon_id(self, value):
self._seller_logon_id = value
@property
def title(self):
return self._title
@title.setter
def title(self, value):
self._title = value
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, value):
self._user_id = value
def to_alipay_dict(self):
params = dict()
if self.account_id:
if hasattr(self.account_id, 'to_alipay_dict'):
params['account_id'] = self.account_id.to_alipay_dict()
else:
params['account_id'] = self.account_id
if self.amount:
if hasattr(self.amount, 'to_alipay_dict'):
params['amount'] = self.amount.to_alipay_dict()
else:
params['amount'] = self.amount
if self.bill_no:
if hasattr(self.bill_no, 'to_alipay_dict'):
params['bill_no'] = self.bill_no.to_alipay_dict()
else:
params['bill_no'] = self.bill_no
if self.biz_date:
if hasattr(self.biz_date, 'to_alipay_dict'):
params['biz_date'] = self.biz_date.to_alipay_dict()
else:
params['biz_date'] = self.biz_date
if self.biz_no:
if hasattr(self.biz_no, 'to_alipay_dict'):
params['biz_no'] = self.biz_no.to_alipay_dict()
else:
params['biz_no'] = self.biz_no
if self.in_out:
if hasattr(self.in_out, 'to_alipay_dict'):
params['in_out'] = self.in_out.to_alipay_dict()
else:
params['in_out'] = self.in_out
if self.open_id:
if hasattr(self.open_id, 'to_alipay_dict'):
params['open_id'] = self.open_id.to_alipay_dict()
else:
params['open_id'] = self.open_id
if self.out_trade_no:
if hasattr(self.out_trade_no, 'to_alipay_dict'):
params['out_trade_no'] = self.out_trade_no.to_alipay_dict()
else:
params['out_trade_no'] = self.out_trade_no
if self.payer_asset_info:
if hasattr(self.payer_asset_info, 'to_alipay_dict'):
params['payer_asset_info'] = self.payer_asset_info.to_alipay_dict()
else:
params['payer_asset_info'] = self.payer_asset_info
if self.seller_full_name:
if hasattr(self.seller_full_name, 'to_alipay_dict'):
params['seller_full_name'] = self.seller_full_name.to_alipay_dict()
else:
params['seller_full_name'] = self.seller_full_name
if self.seller_logon_id:
if hasattr(self.seller_logon_id, 'to_alipay_dict'):
params['seller_logon_id'] = self.seller_logon_id.to_alipay_dict()
else:
params['seller_logon_id'] = self.seller_logon_id
if self.title:
if hasattr(self.title, 'to_alipay_dict'):
params['title'] = self.title.to_alipay_dict()
else:
params['title'] = self.title
if self.user_id:
if hasattr(self.user_id, 'to_alipay_dict'):
params['user_id'] = self.user_id.to_alipay_dict()
else:
params['user_id'] = self.user_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = JointAccountBillDetailDTO()
if 'account_id' in d:
o.account_id = d['account_id']
if 'amount' in d:
o.amount = d['amount']
if 'bill_no' in d:
o.bill_no = d['bill_no']
if 'biz_date' in d:
o.biz_date = d['biz_date']
if 'biz_no' in d:
o.biz_no = d['biz_no']
if 'in_out' in d:
o.in_out = d['in_out']
if 'open_id' in d:
o.open_id = d['open_id']
if 'out_trade_no' in d:
o.out_trade_no = d['out_trade_no']
if 'payer_asset_info' in d:
o.payer_asset_info = d['payer_asset_info']
if 'seller_full_name' in d:
o.seller_full_name = d['seller_full_name']
if 'seller_logon_id' in d:
o.seller_logon_id = d['seller_logon_id']
if 'title' in d:
o.title = d['title']
if 'user_id' in d:
o.user_id = d['user_id']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/JointAccountBillDetailDTO.py | JointAccountBillDetailDTO.py | py | 6,968 | python | en | code | 241 | github-code | 13 |
72139298578 | import pandas as pd
import random
import time
import boto3
import json
import os
AWS_ACCESS_KEY_ID = os.getenv("AWS_ACCESS_KEY_ID")
AWS_SECRET_ACCESS_KEY = os.getenv("AWS_SECRET_ACCESS_KEY")
AWS_REGION = os.getenv("AWS_REGION")
# Carregar o arquivo CSV
csv_file = '../data/streaming_data/olist_order_reviews_dataset.csv'
df = pd.read_csv(csv_file)
# Configuração do cliente do Amazon Kinesis
delivery_stream_name = 'firehose-reviews-stream'
session = boto3.Session(aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY)
# Criar um cliente do Kinesis Data Firehose usando a sessão
firehose_client = session.client('firehose', region_name=AWS_REGION)
while True:
# Gerar um número aleatório de linhas para selecionar
num_rows = random.randint(1, 5)
# Selecionar linhas aleatórias do DataFrame
selected_rows = df.sample(num_rows)
# Enviar registros para o Amazon Kinesis
for _, row in selected_rows.iterrows():
data = json.dumps(row.to_dict())
response = firehose_client.put_record(
DeliveryStreamName=delivery_stream_name,
Record={"Data": data}
)
print(f'Registro enviado: {response}')
# Aguardar um intervalo de tempo antes de continuar o loop
time.sleep(random.uniform(1, 5)) | lucas-placido/E-CommerceProject | files/scripts/kinesis_data_producer.py | kinesis_data_producer.py | py | 1,338 | python | pt | code | 0 | github-code | 13 |
14646767685 | from sqlalchemy import Column, ForeignKey, Identity, Integer, String, Table
from . import metadata
SetupIntentPaymentMethodOptionsMandateOptionsBlikJson = Table(
"setup_intent_payment_method_options_mandate_options_blikjson",
metadata,
Column(
"expires_after",
Integer,
comment="Date at which the mandate expires",
nullable=True,
),
Column(
"off_session",
MandateOptionsOffSessionDetailsBlik,
ForeignKey("MandateOptionsOffSessionDetailsBlik"),
nullable=True,
),
Column("type", String, comment="Type of the mandate", nullable=True),
Column("id", Integer, primary_key=True, server_default=Identity()),
)
__all__ = ["setup_intent_payment_method_options_mandate_options_blik.json"]
| offscale/stripe-sql | stripe_openapi/setup_intent_payment_method_options_mandate_options_blik.py | setup_intent_payment_method_options_mandate_options_blik.py | py | 776 | python | en | code | 1 | github-code | 13 |
28311782079 | # Imports
import gradio as gr
import spacy
from spacy.lang.en.stop_words import STOP_WORDS
from string import punctuation
from heapq import nlargest
from textblob import TextBlob
stop_words = list(STOP_WORDS)
nlp = spacy.load('en_core_web_sm')
# Adding "\n" to the puctuation list to remove it
punctuation = punctuation + "\n"
def summarise(text):
# Creating the spacy object
doc = nlp(text)
# Counting the frequency of each word
word_frequency = {}
for word in doc:
if word.text.lower() not in stop_words and word.text.lower() not in punctuation:
if word.text.lower() not in word_frequency.keys():
word_frequency[word.text.lower()] = 1
else:
word_frequency[word.text.lower()] += 1
maxFrequency = max(word_frequency.values())
# Normalise the importance of each word
for word in word_frequency.keys():
word_frequency[word] = word_frequency[word]/maxFrequency
# Giving each sentence scores based on importance and words in it
sent_tokens = [sent for sent in doc.sents]
sentence_scores = {}
for sentence in sent_tokens:
for word in sentence:
if word.text.lower() in word_frequency.keys():
if sentence not in sentence_scores.keys():
sentence_scores[sentence] = word_frequency[word.text.lower()]
else:
sentence_scores[sentence] += word_frequency[word.text.lower()]
# Taking 30% of best describing sentences from the text
summary_size = int(len(sent_tokens)*0.3)
if summary_size == 0:
summary_size = 1
summary = nlargest(summary_size, sentence_scores, key=sentence_scores.get)
final_summary = [word.text for word in summary]
final_summary = ' '.join(final_summary)
# Also predicting the sentiment of the text
def sentiment(text):
polarity = TextBlob(text).sentiment.polarity
if polarity < 0:
return 'Negetive'
elif polarity == 0:
return 'Neutral'
else:
return 'Positive'
return final_summary, sentiment(text)
# Creating interface for our model
iface = gr.Interface(
fn=summarise,
inputs=gr.inputs.Textbox(lines=15, label="ORIGINAL TEXT"),
outputs=[gr.outputs.Textbox(label="SUMMARY"),
gr.outputs.Textbox(label="SENTIMENT")],
title="Text Summariser",
theme="dark-grass",
allow_flagging='never',
layout='vertical',
)
iface.launch(server_port=8000, debug=False)
| MeghanshBansal/Text-Summarizer | main.py | main.py | py | 2,531 | python | en | code | 0 | github-code | 13 |
42658536509 | ########################
## Created by Cue Hyunkyu Lee
## Date Nov 28 2017
##
## import
import os, time
import numpy as np
output_file = sys.argv[1]
n_argv = int(sys.argv[2])
main_argv = list(map(float,sys.argv[3:]))
cors=[x-1 for x in main_argv]
print("output will be generated at: {}".format(output_file))
## define parameters
print("n_argv: {}".format(n_argv))
corMat = np.zeros((n_argv,n_argv))
np.fill_diagonal(corMat,1)
ind = 0
for i in range(n_argv - 1):
for j in range( i+1 , n_argv,1):
corMat[i,j] = cors[ind]
corMat[j,i] = cors[ind]
ind = ind + 1
print(corMat)
np.savetxt(fname=output_file,X=corMat,fmt='%1.5f',delimiter=' ',newline='\n',header='',footer='',comments='#')
| cuelee/regen | 06_2_intercept_matrix.py | 06_2_intercept_matrix.py | py | 701 | python | en | code | 1 | github-code | 13 |
33651768326 | from django.shortcuts import render,get_object_or_404
from django.views.generic.list import ListView
from .models import MainMarket, Sport, League, Event
from django.core import serializers
from django.views.generic.base import TemplateResponseMixin, View
import re
import json
from django.http import JsonResponse
def Sport_list(request):
context = {"sports": Sport.objects.all(),}
return render(request,"odds/sport_list.html",context)
class ManageSportListView(ListView):
model = Sport
template_name = 'courses/manage/course/list.html'
class EventContentView(TemplateResponseMixin, View):
template_name = 'odds/Events.html'
def get(self, request, Event_id):
events = get_object_or_404(Event,id=Event_id)
g=MainMarket.objects.datetimes('date_update', 'second', order='DESC')[0]
events_main_market = MainMarket.objects.filter(event_id=Event_id)
events_main_market_current = MainMarket.objects.filter(event_id=Event_id, date_update=g)
# with open('D:\WiseBet\parsers\Книга1.csv','wb') as csv_file:
# write_csv(events_main_market_for_graphs, csv_file, field_order=["type","date","bwin","onexstavka","fonbet","tennesi"])
sports=League.objects.raw("select* from odds_sport a inner join(SELECT count (title), sport_id FROM public.odds_league group by sport_Id) b on a.id=b.sport_id")
graphs=MainMarket.objects.raw("select* from odds_mainmarket WHERE event_id = %s" % Event_id)
h=serializers.serialize('json', events_main_market, fields=('bmk', 'cf1', 'cf2','cfX','date_update'))
# h=json.load(h)
json_graphs=h.replace('"model": "odds.mainmarket", "pk": ','')
json_graphs=re.sub('{(\d){,5}, "fields": ','',json_graphs)
#json_graphs=json_graphs.replace('{, "fields": ','')
json_graphs=json_graphs.replace('}}','}')
#print(json_graphs)
x=json.loads(json_graphs)
return self.render_to_response({'events_main_market_last': events_main_market,'current_events':events_main_market_current, 'event_main': Event_id,'events': events, 'sports': sports, 'current_date':g, 'for_graphs': x})
class AjaxInfo(TemplateResponseMixin, View):
def get(self, request, Event_id):
g=MainMarket.objects.datetimes('date_update', 'second', order='DESC')[0]
events_main_market = MainMarket.objects.filter(event_id=Event_id,date_update=g )
a=[]
for i in events_main_market:
a.append({
"cf1": i.cf1,
"cfX": i.cfX,
"cf2": i.cf2,
"bmk": i.bmk,
"date_update": i.date_update})
#print(a)
return JsonResponse({"current_info":a}, status=200)
class LeagueContentListView(TemplateResponseMixin, View):
template_name = 'odds/league_list.html'
def get(self, request, league_id):
league = get_object_or_404(League,id=league_id)
events = Event.objects.filter(League_id=league_id)
sports=League.objects.raw("select* from odds_sport a inner join(SELECT count (title), sport_id FROM public.odds_league group by sport_Id) b on a.id=b.sport_id")
return self.render_to_response({'league': league,'events': events, 'sports': sports})
class LeagueListView(TemplateResponseMixin, View):
template_name = 'odds/Sports.html'
def get(self, request, sport_id):
sport = get_object_or_404(Sport,id=sport_id)
sports=League.objects.raw("select* from odds_sport a inner join(SELECT count (title), sport_id FROM public.odds_league group by sport_Id) b on a.id=b.sport_id")
leagues = League.objects.filter(sport_id=sport_id)
return self.render_to_response({'sport': sport,'leagues': leagues, 'sports': sports})
def index(request):
# sports = Sport.objects.all
sports=League.objects.raw("select* from odds_sport a inner join(SELECT count (title), sport_id FROM public.odds_league group by sport_Id) b on a.id=b.sport_id")
context = {'sports': sports}
return render(request, 'odds/bsae.html', context)
#def index(request):
# events = MainMarket.objects.all()
# context = {'events': events}
# return render(request, 'odds/bet_page.html', context)
## Create your views here.
##class ArticleListView(ListView):
##
## model = League
##
## def get_context_data(self, **kwargs):
## context = super().get_context_data(**kwargs)
## return context
##
#class PublisherBookList(ListView):
#
# template_name = 'odds/league_list.html'
#
# def get_queryset(self):
# return League.objects.filter(sport=self.sport) | asavitsky/Bets | odds/views.py | views.py | py | 4,544 | python | en | code | 0 | github-code | 13 |
1046224612 | from abc import abstractmethod
from vec3 import *
from ray import *
class hit_record:
def __init__(self):
self.p = point3()
self.normal = vec3()
self.mat_ptr = None
self.t = 0.0
self.front_face = True
def copy(self, rec):
self.p = rec.p
self.normal = rec.normal
self.mat_ptr = rec.mat_ptr
self.t = rec.t
self.front_face = rec.front_face
def set_face_normal(self, r: ray, outward_normal: vec3):
self.front_face = (dot(r.direction(), outward_normal) < 0.0)
self.normal = outward_normal if self.front_face else -outward_normal
class hittable:
@abstractmethod
def hit(self, r: ray, t_min: float, t_max: float, rec: hit_record) -> bool:
pass
| songjiahuan/a_slow_ray_tracer | hittable.py | hittable.py | py | 767 | python | en | code | 0 | github-code | 13 |
30968461135 | import os
import shutil
import json
import gzip
import csv
import sys
from collections import defaultdict
import glob
import warnings
import pandas as pd
import pyrallel
from config import config, get_logger
from common import exec_sh
import re
# ldc_kg = None
# df_wd_fb = None
# kb_to_fb_mapping = None
kgtk_labels = {}
re_cluster = re.compile(r'<.*InterchangeOntology#(clusterMember|ClusterMembership|SameAsCluster|cluster|prototype)>')
re_entity = re.compile(r'<.*InterchangeOntology#(Event|Entity|Relation)>')
re_bnode = re.compile(r'_:([^\s]*)')
class Importer(object):
def __init__(self, source):
self.source = source
self.logger = get_logger('importer-' + source)
self.infile = os.path.join(config['input_dir'], config['run_name'], config['subrun_name'], f'{source}.ttl')
self.temp_dir = os.path.join(config['temp_dir'], config['run_name'], config['subrun_name'], source)
self.stat_info = {}
def run(self):
# global ldc_kg, df_wd_fb, kb_to_fb_mapping
os.makedirs(self.temp_dir, exist_ok=True)
try:
nt_file = os.path.join(self.temp_dir, '{}.nt'.format(self.source))
cleaned_nt_file = os.path.join(self.temp_dir, '{}.cleaned.nt'.format(self.source))
kgtk_file = os.path.join(self.temp_dir, '{}.tsv'.format(self.source))
kgtk_db_file = os.path.join(self.temp_dir, '{}.sqlite'.format(self.source))
entity_outfile = os.path.join(self.temp_dir, '{}.entity.h5'.format(self.source))
event_outfile = os.path.join(self.temp_dir, '{}.event.h5'.format(self.source))
relation_outfile = os.path.join(self.temp_dir, '{}.relation.h5'.format(self.source))
role_outfile = os.path.join(self.temp_dir, '{}.role.h5'.format(self.source))
self.convert_ttl_to_nt(self.infile, nt_file)
self.clean_nt(nt_file, cleaned_nt_file)
self.convert_nt_to_kgtk(nt_file, kgtk_file)
self.create_entity_df(kgtk_file, kgtk_db_file, entity_outfile, self.source)
self.create_event_df(kgtk_file, kgtk_db_file, event_outfile, self.source)
self.create_relation_df(kgtk_file, kgtk_db_file, relation_outfile, self.source)
self.create_role(kgtk_file, kgtk_db_file, role_outfile, self.source)
except:
self.logger.exception('Exception caught in Importer.run()')
os.remove(nt_file)
os.remove(kgtk_file)
os.remove(kgtk_db_file)
self.clean_temp_files()
def create_namespace_file(self, outfile):
os.makedirs(self.temp_dir, exist_ok=True)
nt_file = os.path.join(self.temp_dir, '{}.nt'.format(self.source))
kgtk_file = os.path.join(self.temp_dir, '{}.tsv'.format(self.source))
self.convert_ttl_to_nt(self.infile, nt_file)
exec_sh('''kgtk import-ntriples -i {nt_file} > {kgtk_file}'''
.format(nt_file=nt_file, kgtk_file=kgtk_file), self.logger)
shutil.copy(kgtk_file, outfile)
def tmp_file_path(self, x=None):
suffix = '' if not x else '.{}'.format(x)
return os.path.join(self.temp_dir, 'tmp{}'.format(suffix))
def clean_temp_files(self):
for f in glob.glob(os.path.join(self.temp_dir, 'tmp*')):
os.remove(f)
def predicate_path(self, dbfile, infile, path, quoting=0, doublequote=True):
all_p = path.split('/')
all_p = [f'-[:`{p}`]->' for p in all_p]
all_p_str = ''.join([f'{all_p[idx]}(t{idx})' for idx in range(len(all_p)-1)]) \
+ all_p[-1] # create temp nodes in the middle
exec_sh('kgtk query --graph-cache "{dbfile}" -i "{infile}" --match \'(s){p}(o)\' --return \'s,o\' > {tmp_file}'
.format(dbfile=dbfile, infile=infile, p=all_p_str, tmp_file=self.tmp_file_path()), self.logger)
pd_tmp = pd.read_csv(self.tmp_file_path(), delimiter='\t', quoting=quoting, doublequote=doublequote)
return pd_tmp
def kgtk_query(self, dbfile, infile, match, option=None, return_=None, where=None, quoting=csv.QUOTE_MINIMAL):
query = f'kgtk query --graph-cache "{dbfile}" -i "{infile}"'
if match:
query += f' --match \'{match}\''
if where:
query += f' --where \'{where}\''
if option:
for opt in option:
query += f' --opt \'{opt}\''
if return_:
query += f' --return \'{return_}\''
query += f' > {self.tmp_file_path()}'
# print(query)
exec_sh(query, self.logger)
# kgtk query set quoting to csv.QUOTE_NONE by default
# https://github.com/usc-isi-i2/kgtk/blob/6168e06fac121f2e60b687ff90ee6f5cc3d074b5/kgtk/cli/query.py#L288
pd_tmp = pd.read_csv(self.tmp_file_path(), delimiter='\t', quoting=quoting)
return pd_tmp
def convert_ttl_to_nt(self, ttl_file, nt_file):
self.logger.info('converting ttl to nt')
exec_sh('apache-jena-3.16.0/bin/riot --syntax=ttl --output=nt < {ttl} > {nt}'
.format(ttl=ttl_file, nt=self.tmp_file_path()), self.logger)
# normalization (make iri globally unique)
with open(self.tmp_file_path(), 'r') as fin:
with open(nt_file, 'w') as fout:
for line in fin:
line = line.strip()
# normalize bnode
line = re_bnode.sub(f'<http://www.isi.edu/gaia/bnode/{self.source}/' + r'\1' + '>', line)
fout.write(line + '\n')
def execute_update(self, infile, query):
query_file = self.tmp_file_path('query')
tmp_outfile = self.tmp_file_path('out')
with open(query_file, 'w') as f:
f.write(query)
exec_sh(f'apache-jena-3.16.0/bin/update --data={infile} --update={query_file} --dump > {tmp_outfile}', self.logger)
shutil.move(tmp_outfile, infile)
os.remove(query_file)
def clean_nt(self, nt_file, cleaned_nt_file):
# remove conflict TA1 triples
self.logger.info('cleaning nt')
# remove clusters
self.logger.info('Loading TA1 graph')
# load ns
str_ns = ''
with open(config['namespace_file'], 'r') as f:
for row in csv.DictReader(f, delimiter='\t'):
str_ns += f'PREFIX {row["node1"]}: <{row["node2"]}>\n'
# make a copy to work on
shutil.copy(nt_file, cleaned_nt_file)
# remove associatedKEs
self.logger.info('Removing TA1 associatedKEs')
str_update = '''
DELETE {
?claim aida:associatedKEs ?cluster .
}
WHERE {
?cluster a aida:SameAsCluster .
?cluster aida:prototype ?proto.
?proto a aida:Entity .
?claim a aida:Claim .
?claim aida:associatedKEs ?cluster .
}
'''
self.execute_update(cleaned_nt_file, str_ns + str_update)
# remove claimSemantics
self.logger.info('Removing TA1 claimSemantics')
str_update = '''
DELETE {
?claim aida:claimSemantics ?cluster .
}
WHERE {
?cluster a aida:SameAsCluster .
?cluster aida:prototype ?proto.
?proto a aida:Entity .
?claim a aida:Claim .
?claim aida:claimSemantics ?cluster .
}'''
self.execute_update(cleaned_nt_file, str_ns + str_update)
# remove cluster member
self.logger.info('Removing TA1 ClusterMembership')
str_update = '''
DELETE {
?cm a aida:ClusterMembership .
?cm aida:cluster ?cluster .
}
WHERE {
?cluster a aida:SameAsCluster .
?cluster aida:prototype ?proto.
?proto a aida:Entity .
?cm a aida:ClusterMembership .
?cm aida:cluster ?cluster .
}'''
self.execute_update(cleaned_nt_file, str_ns + str_update)
# remove cluster & prototype
self.logger.info('Removing TA1 SameAsCluster')
str_update = '''
DELETE {
?cluster a aida:SameAsCluster .
?cluster aida:prototype ?proto.
}
WHERE {
?cluster a aida:SameAsCluster .
?cluster aida:prototype ?proto.
?proto a aida:Entity .
}'''
self.execute_update(cleaned_nt_file, str_ns + str_update)
def convert_nt_to_kgtk(self, nt_file, kgtk_file):
self.logger.info('convert nt to kgtk')
exec_sh('''kgtk import-ntriples \
--namespace-file {ns_file} \
--namespace-id-use-uuid False \
--newnode-use-uuid False \
--build-new-namespaces=False \
--local-namespace-use-uuid True \
--local-namespace-prefix {prefix} \
--local-namespace-use-uuid False \
-i {nt_file} > {kgtk_file}'''
.format(ns_file=config['namespace_file'], prefix=self.source, # prefix here would produce an invalid triple files
nt_file=nt_file, kgtk_file=kgtk_file), self.logger)
def merge_values(self, values):
# print(type(values))
# print(values, values.index > 0)
ret = {}
for col in values.columns:
if not values.empty:
ret[col] = tuple(values[col].tolist())
else:
ret[col] = tuple([])
return pd.Series(ret)
def assign_qnode_label(self, value):
global kgtk_labels
return tuple([kgtk_labels.get(v) for v in value])
def create_entity_df(self, kgtk_file, kgtk_db_file, output_file, source):
self.logger.info('create entity df for ' + source)
### id
self.logger.info('creating id')
df_entity = self.kgtk_query(kgtk_db_file, kgtk_file,
match='(e)-[:`rdf:type`]->(:`aida:Entity`)',
# '(e)-[:`aida:justifiedBy`]->(just)',
return_='e AS e'
)
df_entity = df_entity.drop_duplicates().reset_index(drop=True)
# df_entity = df_entity.groupby('e')[['e_just']].apply(self.merge_values).reset_index()
### type
self.logger.info('creating type')
df_type = self.kgtk_query(kgtk_db_file, kgtk_file,
match='(stmt)-[:`rdf:type`]->(stmt_type),'+
'(stmt)-[:`rdf:subject`]->(e),'+
'(stmt)-[:`rdf:predicate`]->(:`rdf:type`),'+
'(stmt)-[:`rdf:object`]->(type),'+
'(stmt)-[:`aida:confidence`]->(c)-[:`aida:confidenceValue`]->(cv),'+
'(stmt)-[:`aida:justifiedBy`]->(just)',
where='stmt_type IN ["rdf:Statement", "aida:TypeStatement"]',
return_='e AS e,type AS type,cv AS type_cv,just AS type_just'
)
df_type = pd.merge(df_entity, df_type, left_on='e', right_on='e')
df_type = df_type.groupby('e')[['type', 'type_cv', 'type_just']].apply(self.merge_values).reset_index()
def merge_just(v):
result = {'e': v['e'], 'type': [], 'type_cv': [], 'type_just': []}
type_, type_cv, type_just = v['type'], v['type_cv'], v['type_just']
unique_type = set(type_)
for t in unique_type:
# use the maximum cv
# aggregate justification
indices = [i for i, x in enumerate(type_) if x == t]
cv = max([type_cv[i] for i in indices])
justs = tuple([type_just[i] for i in indices])
result['type'].append(t)
result['type_cv'].append(cv)
result['type_just'].append(justs)
result['type'] = tuple(result['type'])
result['type_cv'] = tuple(result['type_cv'])
result['type_just'] = tuple(result['type_just'])
return pd.Series(result)
df_type = df_type.apply(merge_just, axis=1).reset_index(drop=True)
### assign type label
self.logger.info('assigning type label')
df_type['type_label'] = df_type['type'].apply(self.assign_qnode_label)
### confidence
self.logger.info('creating confidence')
df_confidence = self.predicate_path(kgtk_db_file, kgtk_file, 'aida:confidence/aida:confidenceValue')\
.rename(columns={'node1': 'e', 'node2': 'cv'})
df_confidence = pd.merge(df_entity, df_confidence, left_on='e', right_on='e')
### name
self.logger.info('creating name')
df_name = self.predicate_path(kgtk_db_file, kgtk_file, 'aida:hasName')\
.rename(columns={'node1': 'e', 'node2': 'name'})
df_name = pd.merge(df_entity, df_name, left_on='e', right_on='e')
df_name = df_name.groupby('e')[['name']].apply(self.merge_values).reset_index()
### link
self.logger.info('creating link')
df_link = self.kgtk_query(kgtk_db_file, kgtk_file,
match='(e)-[:`aida:link`]->(t1)-[:`aida:linkTarget`]->(link),'+
'(e)-[:`aida:link`]->(t1)-[:`aida:confidence`]->(t2)-[:`aida:confidenceValue`]->(cv)',
return_='e AS e,link AS link,cv AS link_cv'
)
df_link = pd.merge(df_entity, df_link, left_on='e', right_on='e')
df_link = df_link.groupby('e')[['link', 'link_cv']].apply(self.merge_values).reset_index()
### assign link label
self.logger.info('assigning type label')
df_link['link_label'] = df_link['link'].apply(self.assign_qnode_label)
### informative justification
self.logger.info('creating informative justification')
df_infojust = self.kgtk_query(kgtk_db_file, kgtk_file,
match='(e)-[:`rdf:type`]->(:`aida:Entity`),'+
'(e)-[:`aida:informativeJustification`]->(ij)',
return_='e AS e, ij AS info_just'
)
df_infojust = pd.merge(df_entity, df_infojust, left_on='e', right_on='e')
### informative justification extension
if config.get('extract_mention', False):
self.logger.info('creating informative justification extension')
df_infojust_ext = self.kgtk_query(kgtk_db_file, kgtk_file,
match='(e)-[:`rdf:type`]->(:`aida:Entity`),'+
'(e)-[:`aida:informativeJustification`]->(ij),'+
'(ij)-[:`rdf:type`]->(:`aida:TextJustification`),'+
'(ij)-[:`aida:startOffset`]->(ij_start),'+
'(ij)-[:`aida:endOffsetInclusive`]->(ij_end),'+
'(ij)-[:`aida:privateData`]->(p),'+
'(p)-[:`aida:jsonContent`]->(j),'+
'(p)-[:`aida:system`]->(:`http://www.uiuc.edu/mention`)',
return_='ij AS info_just, ij_start AS ij_start, ij_end AS ij_end, j AS mention',
quoting=csv.QUOTE_NONE # this maks mention string properly parsed
)
def parse_private_date(v):
try:
v = json.loads(eval(v))
return v
# return v.get('mention_string')
except:
return None
df_infojust_ext['mention'] = df_infojust_ext['mention'].apply(parse_private_date)
df_infojust = pd.merge(df_infojust, df_infojust_ext, left_on='info_just', right_on='info_just', how='left')
### associated claims
self.logger.info('creating associated claims')
df_asso_claim = self.kgtk_query(kgtk_db_file, kgtk_file,
match='(cluster)-[:`rdf:type`]->(:`aida:SameAsCluster`),'+
'(cluster)-[:`aida:prototype`]->(proto)-[:`rdf:type`]->(:`aida:Entity`),'+
'(cm)-[:`rdf:type`]->(:`aida:ClusterMembership`),'+
'(cm)-[:`aida:cluster`]->(cluster),'+
'(cm)-[:`aida:clusterMember`]->(e),'+
'(claim)-[:`rdf:type`]->(:`aida:Claim`),'+
'(claim)-[:`aida:associatedKEs`]->(cluster)',
return_='e AS e, claim AS asso_claim'
)
df_asso_claim = pd.merge(df_entity, df_asso_claim, left_on='e', right_on='e')
df_asso_claim = df_asso_claim.groupby('e')[['asso_claim']].apply(self.merge_values).reset_index()
### claim semantics
self.logger.info('creating claim semantics')
df_claim_seman = self.kgtk_query(kgtk_db_file, kgtk_file,
match='(cluster)-[:`rdf:type`]->(:`aida:SameAsCluster`),'+
'(cluster)-[:`aida:prototype`]->(proto)-[:`rdf:type`]->(:`aida:Entity`),'+
'(cm)-[:`rdf:type`]->(:`aida:ClusterMembership`),'+
'(cm)-[:`aida:cluster`]->(cluster),'+
'(cm)-[:`aida:clusterMember`]->(e),'+
'(claim)-[:`rdf:type`]->(:`aida:Claim`),'+
'(claim)-[:`aida:claimSemantics`]->(cluster)',
return_='e AS e, claim AS claim_seman'
)
df_claim_seman = pd.merge(df_entity, df_claim_seman, left_on='e', right_on='e')
df_claim_seman = df_claim_seman.groupby('e')[['claim_seman']].apply(self.merge_values).reset_index()
### cluster
self.logger.info('creating associated cluster')
df_cluster = self.kgtk_query(kgtk_db_file, kgtk_file,
match='(cluster)-[:`rdf:type`]->(:`aida:SameAsCluster`),'+
'(cluster)-[:`aida:prototype`]->(proto)-[:`rdf:type`]->(:`aida:Entity`),'+
'(cm)-[:`rdf:type`]->(:`aida:ClusterMembership`),'+
'(cm)-[:`aida:cluster`]->(cluster),'+
'(cm)-[:`aida:clusterMember`]->(e)',
return_='e AS e, proto AS ta1_proto, cluster AS ta1_cluster'
)
df_cluster = pd.merge(df_entity, df_cluster, left_on='e', right_on='e')
df_cluster = df_cluster.groupby('e')[['ta1_proto', 'ta1_cluster']].apply(self.merge_values).reset_index()
### merge
self.logger.info('merging all dfs to entity df')
df_entity_complete = df_entity
df_entity_complete = pd.merge(df_entity_complete, df_type, how='left')
df_entity_complete = pd.merge(df_entity_complete, df_confidence, how='left')
df_entity_complete = pd.merge(df_entity_complete, df_name, how='left')
df_entity_complete = pd.merge(df_entity_complete, df_link, how='left')
df_entity_complete = pd.merge(df_entity_complete, df_infojust, how='left')
df_entity_complete = pd.merge(df_entity_complete, df_asso_claim, how='left')
df_entity_complete = pd.merge(df_entity_complete, df_claim_seman, how='left')
df_entity_complete = pd.merge(df_entity_complete, df_cluster, how='left')
df_entity_complete['source'] = source
df_entity_complete.drop_duplicates(subset=['e']).reset_index(drop=True)
### export
self.logger.info('exporting df')
with warnings.catch_warnings():
warnings.simplefilter('ignore')
df_entity_complete.to_hdf(output_file, 'entity', mode='w', format='fixed')
df_entity_complete.to_csv(output_file + '.csv')
def create_event_df(self, kgtk_file, kgtk_db_file, output_file, source):
self.logger.info('create event df for ' + source)
### id
self.logger.info('creating id')
df_event = self.kgtk_query(kgtk_db_file, kgtk_file,
match='(e)-[:`rdf:type`]->(:`aida:Event`)',
return_='e AS e'
)
df_event = df_event.drop_duplicates().reset_index(drop=True)
### type
self.logger.info('creating type')
df_type = self.kgtk_query(kgtk_db_file, kgtk_file,
match='(stmt)-[:`rdf:type`]->(stmt_type),'+
'(stmt)-[:`rdf:subject`]->(e),'+
'(stmt)-[:`rdf:predicate`]->(:`rdf:type`),'+
'(stmt)-[:`rdf:object`]->(type),'+
'(stmt)-[:`aida:confidence`]->(c)-[:`aida:confidenceValue`]->(cv)',
where='stmt_type IN ["rdf:Statement", "aida:TypeStatement"]',
return_='e AS e,type AS type,cv AS type_cv'
)
df_type = pd.merge(df_event, df_type, left_on='e', right_on='e')
df_type = df_type.groupby('e')[['type', 'type_cv']].apply(self.merge_values).reset_index()
df_type['type_label'] = df_type['type'].apply(self.assign_qnode_label)
### time
self.logger.info('creating datetime')
# df_time = self.kgtk_query(kgtk_db_file, kgtk_file,
# match='(e)-[:`aida:ldcTime`]->(dt)',
# option=('(dt)-[:`aida:end`]->(end)',
# '(end)-[:`aida:timeType`]->(dte_type)',
# '(end)-[:`aida:day`]->(e1)-[:`kgtk:structured_value`]->(dte_day)',
# '(end)-[:`aida:month`]->(e2)-[:`kgtk:structured_value`]->(dte_month)',
# '(end)-[:`aida:year`]->(e3)-[:`kgtk:structured_value`]->(dte_year)',
# '(dt)-[:`aida:start`]->(start)',
# '(start)-[:`aida:timeType`]->(dts_type)',
# '(start)-[:`aida:day`]->(s1)-[:`kgtk:structured_value`]->(dts_day)',
# '(start)-[:`aida:month`]->(s2)-[:`kgtk:structured_value`]->(dts_month)',
# '(start)-[:`aida:year`]->(s3)-[:`kgtk:structured_value`]->(dts_year)'),
# return_='e AS e,'+
# 'dte_type AS dte_type, dte_day AS dte_day, dte_month AS dte_month, dte_year AS dte_year,'+
# 'dts_type AS dts_type, dts_day AS dts_day, dts_month AS dts_month, dts_year AS dts_year'
# )
def merge_time(values):
output = []
for idx, row in values.iterrows():
output_inner = {}
for k, v in row.items():
output_inner[k] = v
output.append(output_inner)
return pd.Series({'dt': output})
df_time_end = self.kgtk_query(kgtk_db_file, kgtk_file,
match='(e)-[:`aida:ldcTime`]->(dt)-[:`aida:end`]->(end)-[:`aida:timeType`]->(type)', # dt_type: ON, BEFORE, AFTER, UNKNOWN
option=('(end)-[:`aida:day`]->(e1)-[:`kgtk:structured_value`]->(day)',
'(end)-[:`aida:month`]->(e2)-[:`kgtk:structured_value`]->(month)',
'(end)-[:`aida:year`]->(e3)-[:`kgtk:structured_value`]->(year)'),
return_='e AS e, type AS type, day AS day, month AS month, year AS year'
)
df_time_end = df_time_end.groupby('e')[['type', 'day', 'month', 'year']].apply(merge_time).rename(columns={'dt':'dt_end'}).reset_index()
df_time_start = self.kgtk_query(kgtk_db_file, kgtk_file,
match='(e)-[:`aida:ldcTime`]->(dt)-[:`aida:start`]->(start)-[:`aida:timeType`]->(type)', # dt_type: ON, BEFORE, AFTER, UNKNOWN
option=('(start)-[:`aida:day`]->(e1)-[:`kgtk:structured_value`]->(day)',
'(start)-[:`aida:month`]->(e2)-[:`kgtk:structured_value`]->(month)',
'(start)-[:`aida:year`]->(e3)-[:`kgtk:structured_value`]->(year)'),
return_='e AS e, type AS type, day AS day, month AS month, year AS year'
)
df_time_start = df_time_start.groupby('e')[['type', 'day', 'month', 'year']].apply(merge_time).rename(columns={'dt':'dt_start'}).reset_index()
df_time = pd.merge(df_time_start, df_time_end)
# associated cluster
self.logger.info('creating associated cluster')
df_cluster = self.kgtk_query(kgtk_db_file, kgtk_file,
match='(cluster)-[:`rdf:type`]->(:`aida:SameAsCluster`),'+
'(cluster)-[:`aida:prototype`]->(proto)-[:`rdf:type`]->(:`aida:Event`),'+
'(cm)-[:`rdf:type`]->(:`aida:ClusterMembership`),'+
'(cm)-[:`aida:cluster`]->(cluster),'+
'(cm)-[:`aida:clusterMember`]->(e)',
return_='e AS e, proto AS proto, cluster AS cluster'
)
df_cluster = pd.merge(df_event, df_cluster, left_on='e', right_on='e')
df_cluster = df_cluster.groupby('e')[['proto', 'cluster']].apply(self.merge_values).reset_index()
### merge
self.logger.info('merging dfs')
df_event_complete = df_event
df_event_complete = pd.merge(df_event_complete, df_type, how='left')
df_event_complete = pd.merge(df_event_complete, df_time, how='left')
df_event_complete = pd.merge(df_event_complete, df_cluster, how='left')
df_event_complete['source'] = source
df_event_complete.drop_duplicates(subset=['e']).reset_index(drop=True)
### export
self.logger.info('exporting df')
with warnings.catch_warnings():
warnings.simplefilter('ignore')
df_event_complete.to_hdf(output_file, 'event', mode='w', format='fixed')
df_event_complete.to_csv(output_file + '.csv')
def create_relation_df(self, kgtk_file, kgtk_db_file, output_file, source):
self.logger.info('create relation df for ' + source)
### id
self.logger.info('creating id')
df_relation = self.kgtk_query(kgtk_db_file, kgtk_file,
match='(e)-[:`rdf:type`]->(:`aida:Relation`)',
return_='e AS e'
)
df_relation = df_relation.drop_duplicates().reset_index(drop=True)
### type
self.logger.info('creating type')
df_type = self.kgtk_query(kgtk_db_file, kgtk_file,
match='(stmt)-[:`rdf:type`]->(stmt_type),'+
'(stmt)-[:`rdf:subject`]->(e),'+
'(stmt)-[:`rdf:predicate`]->(:`rdf:type`),'+
'(stmt)-[:`rdf:object`]->(type),'+
'(stmt)-[:`aida:confidence`]->(c)-[:`aida:confidenceValue`]->(cv)',
where='stmt_type IN ["rdf:Statement", "aida:TypeStatement"]',
return_='e AS e,type AS type,cv AS type_cv'
)
df_type = pd.merge(df_relation, df_type, left_on='e', right_on='e')
df_type = df_type.groupby('e')[['type', 'type_cv']].apply(self.merge_values).reset_index()
# associated cluster
self.logger.info('creating associated cluster')
df_cluster = self.kgtk_query(kgtk_db_file, kgtk_file,
match='(cluster)-[:`rdf:type`]->(:`aida:SameAsCluster`),'+
'(cluster)-[:`aida:prototype`]->(proto)-[:`rdf:type`]->(:`aida:Relation`),'+
'(cm)-[:`rdf:type`]->(:`aida:ClusterMembership`),'+
'(cm)-[:`aida:cluster`]->(cluster),'+
'(cm)-[:`aida:clusterMember`]->(e)',
return_='e AS e, proto AS proto, cluster AS cluster'
)
df_cluster = pd.merge(df_relation, df_cluster, left_on='e', right_on='e')
df_cluster = df_cluster.groupby('e')[['proto', 'cluster']].apply(self.merge_values).reset_index()
### merge
self.logger.info('merging dfs')
df_relation_complete = df_relation
df_relation_complete = pd.merge(df_relation_complete, df_type, how='left')
df_relation_complete = pd.merge(df_relation_complete, df_cluster, how='left')
df_relation_complete['source'] = source
df_relation_complete.drop_duplicates(subset=['e']).reset_index(drop=True)
### export
self.logger.info('exporting df')
with warnings.catch_warnings():
warnings.simplefilter('ignore')
df_relation_complete.to_hdf(output_file, 'relation', mode='w', format='fixed')
df_relation_complete.to_csv(output_file + '.csv')
def create_role(self, kgtk_file, kgtk_db_file, output_file, source):
self.logger.info('creating role')
# entities = set(pd.read_hdf(entity_outfile)['e'].to_list())
# events = set(pd.read_hdf(event_outfile)['e'].to_list())
# relations = set(pd.read_hdf(relation_outfile)['e'].to_list())
df_role = self.kgtk_query(kgtk_db_file, kgtk_file,
match='(stmt)-[:`rdf:type`]->(stmt_type),'+
'(stmt)-[:`rdf:subject`]->(e1),'+
'(stmt)-[:`rdf:predicate`]->(role),'+
'(stmt)-[:`rdf:object`]->(e2),'+
'(stmt)-[:`aida:confidence`]->(c)-[:`aida:confidenceValue`]->(cv),'+
'(stmt)-[:`aida:justifiedBy`]->(just),'+
'(e1)-[:`rdf:type`]->(e1_type),'+
'(e2)-[:`rdf:type`]->(e2_type)',
where='role != "rdf:type" AND stmt_type IN ["rdf:Statement", "aida:ArgumentStatement"]',
return_='e1 AS e1, e2 AS e2, e1_type AS e1_type, e2_type AS e2_type, role AS role, cv AS cv, just AS just'
)
df_role['source'] = source
self.logger.info('exporting df')
with warnings.catch_warnings():
warnings.simplefilter('ignore')
df_role.to_hdf(output_file, 'role', mode='w', format='fixed')
df_role.to_csv(output_file + '.csv')
def load_resource():
global kgtk_labels
with gzip.open(config['kgtk_labels'], 'rt') as f:
reader = csv.DictReader(f, delimiter='\t')
for idx, row in enumerate(reader):
kgtk_labels[row['node1']] = row['node2']
# def convert_nan_to_none(df):
# return df.where(pd.notnull(df), None)
#
#
# def create_wd_to_fb_mapping():
# # only need to run once
# # step 1
# # kgtk import-wikidata \
# # -i wikidata-20200803-all.json.bz2 \
# # --node wikidata-20200803-all-nodes.tsv \
# # --edge wikidata-20200803-all-edges.tsv \
# # --qual wikidata-20200803-all-qualifiers.tsv \
# # --explode-values False \
# # --lang en,ru,uk \
# # --procs 24 \
# # |& tee wikidata-20200803-all-import.log
# # step 2
# # kgtk filter -p ';P646;' wikidata-20200803-all-edges.tsv.gz > qnode_to_freebase_20200803.tsv
# # step 3
# # kgtk ifexists --filter-on qnode_to_freebase.tsv --filter-keys node1 \
# # --input-keys id -i wikidata-20200803-all-nodes.tsv.gz > qnode_freebase_20200803.tsv
#
# # df_wdid_fb = pd.read_csv('qnode_to_free_tsv_file_path', delimiter='\t').drop(columns=['id', 'label', 'rank'])\
# # .rename(columns={'node1': 'qnode', 'node2': 'fbid'})
# # df_wd_node = pd.read_csv('filtered_qnode_file_path', delimiter='\t').drop(columns={'type'})\
# # .rename(columns={'id': 'qnode'})
# # df_wd_fb = pd.merge(df_wd_node, df_wdid_fb, left_on='qnode', right_on='qnode')
# # df_wd_fb.to_csv(config['wd_to_fb_file'], index=False)
# pass
#
#
# def load_ldc_kb():
# kb_names = defaultdict(lambda: {'type': None, 'names': []})
#
# # entities
# with open(os.path.join(config['ldc_kg_dir'], 'entities.tab')) as f:
# for idx, line in enumerate(f):
# if idx == 0:
# continue
# line = line.strip().split('\t')
# type_, id_, name1 = line[1], line[2], line[3]
# kb_names[id_]['type'] = type_
# kb_names[id_]['names'].append(name1)
# if len(line) >= 5:
# name2 = line[4]
# kb_names[id_]['names'].append(name2)
#
# # alternative names
# with open(os.path.join(config['ldc_kg_dir'], 'alternate_names.tab')) as f:
# for idx, line in enumerate(f):
# if idx == 0:
# continue
# line = line.strip().split('\t')
# id_, name_ = line[0], line[1]
# kb_names[id_]['names'].append(name_)
#
# return kb_names
#
#
# def load_kb_to_fb_mapping():
# mapping = None
# if config['kb_to_fbid_mapping']:
# with open(config['kb_to_fbid_mapping'], 'r') as f:
# mapping = json.load(f)
# return mapping
#
#
# def load_wd_to_fb_df():
# return convert_nan_to_none(pd.read_csv(config['wd_to_fb_file']))
def worker(source, logger=None, message=None):
if logger and message:
logger.info(message)
importer = Importer(source=source)
importer.run()
def process():
# global ldc_kg, df_wd_fb, kb_to_fb_mapping
logger = get_logger('importer-main')
logger.info('loading resource')
load_resource()
# ldc_kg = load_ldc_kb()
# df_wd_fb = load_wd_to_fb_df()
# kb_to_fb_mapping = load_kb_to_fb_mapping()
logger.info('starting multiprocessing mode')
pp = pyrallel.ParallelProcessor(
num_of_processor=config['num_of_processor'],
mapper=worker,
max_size_per_mapper_queue=config['num_of_processor'] * 2
)
pp.start()
all_infiles = glob.glob(os.path.join(config['input_dir'], config['run_name'], config['subrun_name'], '*.ttl'))
logger.info(f'{len(all_infiles)} files to process')
for idx, infile in enumerate(all_infiles):
source = os.path.basename(infile).split('.')[0]
pp.add_task(source, logger, f'starting task {source} [{idx+1}/{len(all_infiles)}]')
pp.task_done()
pp.join()
logger.info('all tasks are finished')
# integrity check
# logger.info('checking file integrity')
# all_ta1_files = set()
# all_ta2_nt_files = set()
# for infile in glob.glob(os.path.join(config['input_dir'], config['run_name'], '*.ttl')):
# source = os.path.basename(infile).split('.')[0]
# all_ta1_files.add(source)
# for infile in glob.glob(os.path.join(config['temp_dir'], config['run_name'], '*/*.cleaned.nt')):
# source = os.path.basename(infile).split('.')[0]
# all_ta2_nt_files.add(source)
# fn = os.path.join(config['temp_dir'], config['run_name'], source, source)
# if not os.path.exists(fn + '.tsv'):
# logger.error('Incorrect KGTK file: {}'.format(source))
# if not os.path.exists(fn + '.entity.h5'):
# logger.error('Incorrect entity df: {}'.format(source))
# if not os.path.exists(fn + '.event.h5'):
# logger.error('Incorrect event df: {}'.format(source))
# if not os.path.exists(fn + '.relation.h5'):
# logger.error('Incorrect relation df: {}'.format(source))
# if not os.path.exists(fn + '.event_role.h5'):
# logger.error('Incorrect event role df: {}'.format(source))
# if not os.path.exists(fn + '.relation_role.h5'):
# logger.error('Incorrect relation role df: {}'.format(source))
# ta2_missing = all_ta1_files - all_ta2_nt_files
# if len(ta2_missing) > 0:
# for source in ta2_missing:
# logger.error('{} has not been parsed'.format(source))
# logger.info('integrity check completed')
# def generate_kb_to_wd_mapping(run_name, outfile):
# df_entity = pd.DataFrame()
# for infile in glob.glob(os.path.join(config['temp_dir'], run_name, '*/*.entity.h5')):
# df_entity = df_entity.append(pd.read_hdf(infile))
# df_entity = df_entity.reset_index(drop=True)
#
# mapping = defaultdict(lambda: defaultdict(float))
# for idx, e in df_entity.iterrows():
# targets = e['target']
# target_scores = e['target_score']
# fbs = e['fbid']
# fb_scores = e['fbid_score_avg']
# if pd.notna(targets) and pd.notna(fbs):
# for i, t in enumerate(targets):
# t_score = target_scores[i]
# for j, fb in enumerate(fbs):
# fb_score = fb_scores[j]
# curr_score = 1.0 * t_score * fb_score
# prev_score = mapping[t].get(fb)
# if prev_score:
# mapping[t][fb] = max(curr_score, prev_score)
# else:
# mapping[t][fb] = curr_score
# with open(outfile, 'w') as f:
# json.dump(mapping, f)
if __name__ == '__main__':
argv = sys.argv
if argv[1] == 'process':
process()
# elif argv[1] == 'kb_to_wd':
# run_name = argv[2]
# outfile = argv[3]
# generate_kb_to_wd_mapping(outfile)
# elif argv[1] == 'create_namespace':
# outfile = argv[2]
#
# # pick the file with biggest size
# source = None
# source_size = 0
# for infile in glob.glob(os.path.join(config['input_dir'], config['run_name'], '*.ttl')):
# if not source:
# source = infile
# file_size = os.stat(infile).st_size
# if file_size > source_size:
# source = os.path.basename(infile).split('.')[0]
# im = Importer(source=source)
# im.create_namespace_file(outfile)
| usc-isi-i2/gaia-ta2pipeline | pipeline2/importer.py | importer.py | py | 39,036 | python | en | code | 1 | github-code | 13 |
8163149349 | """
File: Game File
Authors: Spencer Wheeler, Benjamin Paul, Troy Scites
Description: Set of API classes for post/get methods for game information
"""
import sqlite3
from User import User
from flask_restful import Resource, reqparse
#using reqparse despite its depreciated status
class total_games(Resource):
"""
Class for accessing database information related to total game information
"""
global class_user
class_user = User()
TABLE_NAME = '_game_total_table'
#Set up parser for json input. Set input variable accepted
parser = reqparse.RequestParser()
parser.add_argument('username',
required = True,
help = "Error: Generic"
)
parser.add_argument('level_reached',
type = int,
required = False,
help = "No level entered"
)
parser.add_argument('total_game_time',
type = float,
required = False,
help = "No time added"
)
def find_current_game_run_number(self, username):
table_name = username.capitalize() + self.TABLE_NAME
connection = sqlite3.connect('data.db')
cursor = connection.cursor()
query = '''SELECT game_run FROM {table} ORDER BY game_run DESC'''.format(table = table_name)
rows = cursor.execute(query).fetchone()[0]
connection.close()
return rows
@classmethod
def find_game(cls, table_name, game_run):
connection = sqlite3.connect('data.db')
cursor = connection.cursor()
query = "SELECT * FROM {table} WHERE game_run =?".format(table = table_name)
#This is going to grab all rows; can specific w/ this lster
rows = cursor.execute(query, (game_run,))
row = rows.fetchone()
return row
def get(self):
""" Returns total time of run as JSON
Parameters
----------
username: str, required
game_run: int, required
"""
data = total_games.parser.parse_args()
if not User.find_user(data['username'].capitalize()):
return {"message": "{name} was not found.".format(name = data['username'].capitalize())}
global class_user
table_name = data['username'].capitalize()+self.TABLE_NAME
row = self.find_game(table_name, data['game_run'])
if row:
return {"game_run":row[0], "game_mode":row[1], "Level Reached":row[2], "total_game_time":row[3]}
#return row
return {"message": "No Game Found"}
def put(self, game_run):
data = total_games.parser.parse_args()
if not User.find_user(data['username'].capitalize()):
return {"message": "No user was found."}
global class_user
table_name = data['username'].capitalize() + self.TABLE_NAME
#current_game_run = class_user.find_current_game_run_number(table_name)
#try:
connection = sqlite3.connect('data.db')
cursor = connection.cursor()
query = '''UPDATE {table} SET level_reached = ?, game_total_time =? WHERE game_run = ?'''.format(table = table_name)
cursor.execute(query, (data['level_reached'], data['total_game_time'], game_run))
connection.commit()
current_game_mode = class_user.get_game_mode()
#check leaderboard
#lb = leaderboard()
#lb.check_leaderboard(data['username'], data['total_game_time'], data['level_reached'], current_game_mode)
#push to leaderboard?
if current_game_mode == "infinite":
return {"Message":"Updated"}
add_leaderboard = self.check_leaderboard(data['username'].capitalize(), data['level_reached'], data['total_game_time'])
if add_leaderboard:
connection.close()
return {"message": "Added"}
return {"message": "something went wrong"}
#return {"Message": "Game run {game_num} updated".format(game_num = current_game_run), "level_reached":data['level_reached'], "total_game_time":data['total_game_time']}
#except Error:
#connection.close()
#return Error
def check_leaderboard(self, username, level_reached, total_game_time):
global class_user
current_game_mode = class_user.get_game_mode()
#return table_name
try:
connection = sqlite3.connect('data.db')
cursor = connection.cursor()
table_name = "{type}_leaderboard".format(type = current_game_mode)
query = '''SELECT * FROM {table} ORDER BY position ASC'''.format(table = table_name)
'''
Table:
Position | Username | Game Level | Game_time | Game_Mode
'''
rows = cursor.execute(query).fetchall()
for row in rows:
if row[2] == -1:
query = '''UPDATE {table} SET username = ?, game_level =?, game_time = ? WHERE position is ?'''.format(table = table_name)
cursor.execute(query, (username, level_reached, total_game_time, row[0]))
connection.commit()
connection.close()
return True
elif level_reached > row[2]: #if current run is further along
self.shift_rows(row[0], username)
#return row[0]
query = '''UPDATE {table} SET username = ?, game_level =?, game_time = ? WHERE position is ?'''.format(table = table_name)
cursor.execute(query, (username, level_reached, total_game_time, row[0]))
connection.commit()
connection.close()
return True
elif level_reached == row[2] and current_game_mode != "speed":
if total_game_time < row[3]:
#move the rest down
continue
return False
except Error:
connection.close()
return Error
def shift_rows(self, current_row, username):
current_game_mode = class_user.get_game_mode()
table_name = current_game_mode+"_leaderboard"
try:
connection = sqlite3.connect('data.db')
cursor = connection.cursor()
for number in range(99, current_row-1, -1):
query = ''' SELECT * FROM {table} WHERE position = ?'''.format(table=table_name)
row = cursor.execute(query, (number,)).fetchone()
query = '''UPDATE {table} SET username = ?, game_level =?, game_time = ? WHERE position is ?'''.format(table = table_name)
adjusted_number = number + 1
cursor.execute(query, (row[1], row[2], row[3], adjusted_number))
connection.commit()
connection.close
except Error:
connection.close()
return Error
#DeprecationWarning
'''def post(self):
"""Sends game run data to database for storage and further processing
Parameters
----------
username: str, required
game_run: int, required
total_game_time: float, required"""
data = total_games.parser.parse_args()
if not User.find_user(data['username']):
return {"message": "No user was found."}
table_name = data['username']+self.TABLE_NAME
row = self.find_game(table_name, data['game_run'])
if row:
return {"message": "Game Run is already entered", "game_run": data['game_run'], "game_mode": data['game_mode'],
"Level Reached": data['level_reached'], "total_game_time": data['total_game_time']}
connection = sqlite3.connect('data.db')
cursor = connection.cursor()
query = "INSERT INTO {table} VALUES (?, ?, ?, ?)".format(table=table_name)
cursor.execute(query, (data['game_run'], data['game_mode'], data['level_reached'], data['total_game_time']))
connection.commit()
connection.close()
return {"message": "Game Run added", "game_run": data['game_run'], "game_mode": data['game_mode'],
"level_reached": data['level_reached'], "total_game_time": data['total_game_time']}
'''
class single_games(Resource):
"""
Class for accessing database information related to individual game types and run information
"""
TABLE_NAME = '_game_times_table'
parser = reqparse.RequestParser()
parser.add_argument('username',
required = True,
help = "Error: Generic"
)
parser.add_argument('game_run',
type = int,
required = False,
help = "Not accepted format for Game run"
)
parser.add_argument('game_mode',
required = False,
help = "no game mode"
)
parser.add_argument('game_level',
type = int,
required = False,
help = "No level entered"
)
parser.add_argument('game_type',
required = True,
help = "No games Found"
)
parser.add_argument('game_time',
type = float,
required = True,
help = "No time added"
)
def post(self):
data = single_games.parser.parse_args()
if not User.find_user(data['username'].capitalize()):
return {"message": "No user was found."}
table_name = data['username'].capitalize() +self.TABLE_NAME
#row = self.find_game(table_name, data['game_run'])
connection = sqlite3.connect('data.db')
cursor = connection.cursor()
query = "INSERT INTO {table} VALUES (?,?,?,?,?)".format(table=table_name)
cursor.execute(query, (data['game_run'], data['game_mode'].lower(), data['game_level'],
data['game_type'], data['game_time']))
connection.commit()
connection.close()
return {"message": "Game Run added", "game_run":data['game_run'], "game_mode":data['game_mode'],
"game_level": data['game_level'], "game_type": data['game_type'], "game_time": data['game_time']}
def get(self):
return {"message": "No functionality"}
@classmethod
def find_game(cls, table_name, game_run):
try:
connection = sqlite3.connect('data.db')
cursor = connection.cursor()
query = "SELECT * FROM {table} WHERE game_run =?".format(table = table_name)
#This is going to grab all rows; can specific w/ this lster
rows = cursor.execute(query, (game_run,))
row = rows.fetchone()
return row
except Error:
connection.close()
return | benp23/Spazzle-clone | Spazzle/Game.py | Game.py | py | 11,997 | python | en | code | 0 | github-code | 13 |
37993694528 | import ROOT
import cppyy
import AthenaROOTAccess.transientTree
import sys
from AthenaROOTAccess.dumpers import Evdump, try_autokey
if not globals().has_key ('onlykeys'):
onlykeys = []
if not globals().has_key ('onlytypes'):
onlytypes = []
class Files:
def __init__ (self, f, fout_base):
self.f = f
self.tt = AthenaROOTAccess.transientTree.makeTree (f)
if isinstance (fout_base, Files):
self.fout = fout_base.fout
self.fout_rand = fout_base.fout_rand
else:
import bz2
#self.fout = open(fout_base + '.out', 'w')
#self.fout_rand = open(fout_base + '-rand.out', 'w')
self.fout = bz2.BZ2File(fout_base + '.out.bz2', 'w')
self.fout_rand = bz2.BZ2File(fout_base + '-rand.out.bz2', 'w')
return
def file (self):
if isinstance (self.f, ROOT.TChain):
return self.f.GetFile()
elif isinstance (self.f, ROOT.TTree):
return self.f.GetDirectory()
return self.f
def _getTokenField (token, field):
ipos = token.find ('[' + field + '=')
if ipos < 0: return None
beg = ipos + 2 + len(field)
end = token.find (']', beg)
if end < 0: return None
return token[beg:end]
def get_dhes (files, entry):
dh = AthenaROOTAccess.transientTree.get_dataheader (files.file(), entry)
# Loop over elements.
for elem in dh.elements():
s = elem.token()
cnt = _getTokenField (s, 'CNT')
if cnt == "":
oid = _getTokenField (s, 'OID')
offset = oid[0:oid.find ('-')]
cnt = files.tt.getLink( int(offset, 16) )
tokenStr = s.replace("[CNT=]", "[CNT=" + cnt + "]");
elem.setToken(tokenStr)
return [(elem.key(), elem.token()) for elem in dh.elements()]
def run (files, first=0, n=-1):
evdump = Evdump(files.fout, files.fout_rand,
onlykeys = onlykeys, onlytypes = onlytypes)
last = files.tt.GetEntries()
if n >= 0:
last = min (last, first + n)
for i in range(first, last):
if isinstance (files.f, ROOT.TChain):
local_entry = files.f.LoadTree (i)
else:
local_entry = i
def getter (d, dhe, keyprint):
br = files.tt.GetBranch(keyprint)
if not br:
return try_autokey (files.tt, d, dhe, keyprint)
files.tt.GetBranch(keyprint).GetEntry(i)
return getattr (files.tt, keyprint)
files.tt.GetEntry (i)
evdump.dump_event_tree (get_dhes(files, local_entry), files.tt)
#files.tt.setEntry(i)
#evdump.dump_event_common (get_dhes(files, local_entry), getter)
evdump.print_missed (sys.stdout)
return
import sys
import os
def make_fin (fin):
if len(sys.argv) > 1:
if os.path.isfile (sys.argv[1]):
return sys.argv[1]
if fin.startswith ('AOD-'):
fin = 'AOD-' + sys.argv[1] + fin[3:]
fin = 'AOD-' + sys.argv[1] + '/' + fin
return fin
| rushioda/PIXELVALID_athena | athena/PhysicsAnalysis/AthenaROOTAccess/test/ara_dumper_common.py | ara_dumper_common.py | py | 3,036 | python | en | code | 1 | github-code | 13 |
37165115543 | import contextlib
import os
import threading
import time
from .test_utils import (
TempDirectoryTestCase,
skip_unless_module,
skip_without_drmaa,
restartable_pulsar_app_provider,
integration_test,
)
from pulsar.manager_endpoint_util import (
submit_job,
)
from pulsar.managers.stateful import ActiveJobs
from pulsar.client.amqp_exchange import ACK_FORCE_NOACK_KEY
from pulsar.client.amqp_exchange_factory import get_exchange
from pulsar.managers.util.drmaa import DrmaaSessionFactory
class StateIntegrationTestCase(TempDirectoryTestCase):
@skip_without_drmaa
@skip_unless_module("kombu")
@integration_test
def test_restart_finishes_job(self):
test = "restart_finishes"
with self._setup_app_provider(test) as app_provider:
job_id = '12345'
with app_provider.new_app() as app:
manager = app.only_manager
job_info = {
'job_id': job_id,
'command_line': 'sleep 1000',
'setup': True,
}
submit_job(manager, job_info)
external_id = None
for i in range(10):
time.sleep(.05)
# TODO: unfortunate breaking of abstractions here.
external_id = manager._proxied_manager._external_id(job_id)
if external_id:
break
if external_id is None:
assert False, "Test failed, couldn't get exteranl id for job id."
drmaa_session = DrmaaSessionFactory().get()
drmaa_session.kill(external_id)
drmaa_session.close()
consumer = self._status_update_consumer(test)
consumer.start()
with app_provider.new_app() as app:
consumer.wait_for_messages()
consumer.join()
assert len(consumer.messages) == 1, len(consumer.messages)
assert consumer.messages[0]["status"] == "complete"
@skip_unless_module("drmaa")
@skip_unless_module("kombu")
@integration_test
def test_recovery_failure_fires_lost_status(self):
test = "restart_failure_fires_lost"
with self._setup_app_provider(test) as app_provider:
job_id = '12345'
with app_provider.new_app() as app:
persistence_directory = app.persistence_directory
# Break some abstractions to activate a job that
# never existed.
manager_name = "manager_%s" % test
active_jobs = ActiveJobs(manager_name, persistence_directory)
active_jobs.activate_job(job_id)
consumer = self._status_update_consumer(test)
consumer.start()
with app_provider.new_app() as app:
consumer.wait_for_messages()
consumer.join()
assert len(consumer.messages) == 1, len(consumer.messages)
assert consumer.messages[0]["status"] == "lost"
@skip_unless_module("kombu")
@integration_test
def test_staging_failure_fires_failed_status(self):
test = "stating_failure_fires_failed"
with self._setup_app_provider(test, manager_type="queued_python") as app_provider:
job_id = '12345'
consumer = self._status_update_consumer(test)
consumer.start()
with app_provider.new_app() as app:
manager = app.only_manager
job_info = {
'job_id': job_id,
'command_line': 'sleep 1000',
'setup': True,
# Invalid staging description...
'remote_staging': {"setup": [{"moo": "cow"}]}
}
# TODO: redo this with submit_job coming through MQ for test consistency.
submit_job(manager, job_info)
import time
time.sleep(2)
consumer.wait_for_messages()
consumer.join()
assert len(consumer.messages) == 1, len(consumer.messages)
assert consumer.messages[0]["status"] == "failed"
@skip_unless_module("kombu")
@integration_test
def test_async_request_of_mq_status(self):
test = "async_request_of_mq_status"
with self._setup_app_provider(test, manager_type="queued_python") as app_provider:
job_id = '12345'
consumer = self._status_update_consumer(test)
consumer.start()
with app_provider.new_app() as app:
manager = app.only_manager
job_info = {
'job_id': job_id,
'command_line': 'sleep 1000',
'setup': True,
# Invalid staging description...
'remote_staging': {"setup": [{"moo": "cow"}]}
}
# TODO: redo this with submit_job coming through MQ for test consistency.
submit_job(manager, job_info)
self._request_status(test, job_id)
import time
time.sleep(2)
consumer.wait_for_messages()
consumer.join()
messages = consumer.messages
assert len(messages) == 2, len(messages)
assert messages[0]["status"] == "failed"
assert messages[1]["status"] == "failed", messages[1]
@skip_unless_module("kombu")
@integration_test
def test_async_request_of_mq_status_lost(self):
test = "async_request_of_mq_status_lost"
with self._setup_app_provider(test, manager_type="queued_python") as app_provider:
job_id = '12347' # should be lost? - never existed right?
consumer = self._status_update_consumer(test)
consumer.start()
with app_provider.new_app() as app:
app.only_manager
# do two messages to ensure generation of status message doesn't
# create a job directory we don't mean to or something like that
self._request_status(test, job_id)
self._request_status(test, job_id)
import time
time.sleep(2)
consumer.wait_for_messages()
consumer.join()
messages = consumer.messages
assert len(messages) == 2, len(messages)
assert messages[0]["status"] == "lost", messages[0]
assert messages[1]["status"] == "lost", messages[1]
@skip_unless_module("kombu")
@integration_test
def test_setup_failure_fires_failed_status(self):
test = "stating_failure_fires_failed"
with self._setup_app_provider(test, manager_type="queued_python") as app_provider:
job_id = '12345'
consumer = self._status_update_consumer(test)
consumer.start()
with app_provider.new_app() as app:
manager = app.only_manager
job_info = {
'job_id': job_id,
'command_line': 'sleep 1000',
'setup': True,
}
with open(os.path.join(app_provider.staging_directory, job_id), "w") as f:
f.write("File where staging directory should be, setup should fail now.")
# TODO: redo this with submit_job coming through MQ for test consistency,
# would eliminate the need for the exception catch as well.
try:
submit_job(manager, job_info)
except Exception:
pass
consumer.wait_for_messages()
consumer.join()
assert len(consumer.messages) == 1, len(consumer.messages)
assert consumer.messages[0]["status"] == "failed"
@contextlib.contextmanager
def _setup_app_provider(self, test, manager_type="queued_drmaa"):
mq_url = "memory://test_%s" % test
manager = "manager_%s" % test
app_conf = dict(message_queue_url=mq_url)
app_conf["managers"] = {manager: {'type': manager_type}}
with restartable_pulsar_app_provider(app_conf=app_conf, web=False) as app_provider:
yield app_provider
def _status_update_consumer(self, test):
mq_url = "memory://test_%s" % test
manager = "manager_%s" % test
consumer = SimpleConsumer(queue="status_update", url=mq_url, manager=manager)
return consumer
def _request_status(self, test, job_id):
mq_url = "memory://test_%s" % test
manager = "manager_%s" % test
exchange = get_exchange(mq_url, manager, {})
params = {
"job_id": job_id,
ACK_FORCE_NOACK_KEY: True,
}
exchange.publish("status", params)
class SimpleConsumer:
def __init__(self, queue, url, manager="_default_"):
self.queue = queue
self.url = url
self.manager = manager
self.active = True
self.exchange = get_exchange(url, manager, {})
self.messages = []
def start(self):
t = threading.Thread(target=self._run)
t.start()
self.thread = t
def join(self):
self.active = False
self.thread.join(10)
def wait_for_messages(self, n=1):
accumulate_time = 0.0
while len(self.messages) < n:
time.sleep(.1)
accumulate_time += 0.05
if accumulate_time > 3.0:
raise Exception("Waited too long for messages.")
def _run(self):
self.exchange.consume("status_update", self._callback, check=self)
def _callback(self, body, message):
self.messages.append(body)
message.ack()
def __nonzero__(self):
return self.active
__bool__ = __nonzero__ # Both needed Py2 v 3
| galaxyproject/pulsar | test/integration_test_state.py | integration_test_state.py | py | 9,859 | python | en | code | 37 | github-code | 13 |
37647319374 | from typing import Optional, Any
from pathlib import Path
from fastapi import FastAPI, APIRouter, Query, HTTPException, Request
from fastapi.templating import Jinja2Templates
from models import Recipe, RecipeSearchResults, RecipeCreate
from recipes_data import RECIPES
BASE_PATH = Path(__file__).resolve().parent
TEMPLATES = Jinja2Templates(directory=str(BASE_PATH / "templates"))
app = FastAPI(title="Recipe API", openapi_url="/openapi.json")
api_router = APIRouter()
# Root Get
@api_router.get("/", status_code=200)
async def root(request: Request) -> dict:
return TEMPLATES.TemplateResponse("index.html", {"request": request, "recipes": RECIPES})
# Fetch a single recipe by ID
@api_router.get("/recipe/{recipe_id}", status_code=200, response_model=Recipe)
async def fetch_recipe(*, recipe_id: int) -> dict:
result = [recipe for recipe in RECIPES if recipe["id"] == recipe_id]
if not result:
raise HTTPException(status_code=404, detail=f"Recipe with ID {recipe_id} not found")
return result[0]
# Search for recipes based on label keyword
@api_router.get("/search/", status_code=200, response_model=RecipeSearchResults)
async def search_recipes(
*, keyword: Optional[str] = Query(None, min_length=3, example="chicken"), max_results: Optional[int] = 10
) -> dict:
if not keyword:
return {"results": RECIPES[:max_results]}
results = filter(lambda recipe: keyword.lower() in recipe["label"].lower(), RECIPES)
return {"results": list(results)[:max_results]}
@api_router.post("/recipe/", status_code=201, response_model=Recipe)
async def create_recipe(*, recipe_in: RecipeCreate) -> dict:
new_entry_id = len(RECIPES) + 1
recipe_entry = Recipe(
id=new_entry_id,
label=recipe_in.label,
source=recipe_in.source,
url=recipe_in.url,
)
RECIPES.append(recipe_entry.dict())
return recipe_entry
app.include_router(api_router)
if __name__ == "__main__":
import uvicorn
uvicorn.run("main:app", host="0.0.0.0", port=8001, log_level="debug", reload=True)
| kev-luo/fast_api_playground | main.py | main.py | py | 2,064 | python | en | code | 0 | github-code | 13 |
21793277130 | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def isCousins(self, root: Optional[TreeNode], x: int, y: int) -> bool:
queue = [(root,None,0)]
while queue:
cur, parent, level = queue.pop(0)
if cur.val == x:
x_parent = parent
x_level = level
if cur.val == y:
y_parent = parent
y_level = level
if cur.left:
queue.append((cur.left,cur,level+1))
if cur.right:
queue.append((cur.right,cur,level+1))
return x_parent != y_parent and x_level == y_level | HyoungwooHahm/Leetcode | 0993-cousins-in-binary-tree/0993-cousins-in-binary-tree.py | 0993-cousins-in-binary-tree.py | py | 858 | python | en | code | 0 | github-code | 13 |
18902673263 | from django.forms import ModelForm, ModelMultipleChoiceField
from django.forms.fields import (
BooleanField,
CharField,
ChoiceField,
IntegerField,
)
from django.forms.widgets import PasswordInput
from .models import MassMail
from django_summernote.widgets import SummernoteWidget
from django.contrib.admin.widgets import FilteredSelectMultiple
from agendas.models import Agenda
from contacts.models import Contact
from mailtemplates.models import MailTemplate
SMTP_SERVER_INITIAL = "correo.ugr.es"
SMTP_PORT_INITIAL = 587
class MassMailForm(ModelForm):
agenda = ChoiceField()
template = ChoiceField()
maintain = BooleanField(required=False, label="Conservar contactos existentes")
headers = ChoiceField()
sender_name = CharField(required=True, label="Nombre del remitente")
sender_email = CharField(required=True, label="Email del remitente")
sender_user = CharField(required=True, label="Usuario (sin @ugr.es)")
sender_password = CharField(widget=PasswordInput(), label="Contraseña")
smtp_server = CharField(
max_length=200, required=False, label="Servidor", initial=SMTP_SERVER_INITIAL
)
smtp_port = IntegerField(
min_value=0,
max_value=9999,
required=False,
label="Puerto",
initial=SMTP_PORT_INITIAL,
)
def __init__(self, *args, **kwargs):
category = kwargs.pop("category") if "category" in kwargs else None
headers = kwargs.pop("headers") if "headers" in kwargs else []
headers = [
("{{ " + header + " }}", "{{ " + header + " }}") for header in headers
]
agendas = [(agenda.slug, agenda.__str__) for agenda in Agenda.objects.all()]
templates = [
(template.slug, template.__str__) for template in MailTemplate.objects.all()
]
headers.insert(0, (None, "---"))
agendas.insert(0, (None, "---"))
templates.insert(0, (None, "---"))
queryset = Contact.objects.all()
initial_queryset = (
Contact.objects.filter(agenda__category__slug=category.slug)
if category
else []
)
super(MassMailForm, self).__init__(*args, **kwargs)
self.fields["agenda"] = ChoiceField(
choices=agendas,
required=False,
label="Agenda",
)
self.fields["template"] = ChoiceField(
choices=templates, required=False, label="Plantilla"
)
self.fields["headers"] = ChoiceField(
choices=headers, required=False, label="Variables de la agenda"
)
self.fields["recipients"] = ModelMultipleChoiceField(
label="Destinatarios del mensaje",
required=False,
queryset=queryset,
initial=initial_queryset,
widget=FilteredSelectMultiple("contacts", is_stacked=True),
)
self.fields["smtp_server"] = CharField(
max_length=200, required=False, label="Servidor", initial="correo.ugr.es"
)
class Meta:
model = MassMail
fields = [
"sender_name",
"sender_email",
"recipients",
"subject",
"headers",
"content",
"sender_user",
"sender_password",
"smtp_server",
"smtp_port",
"agenda",
"maintain",
"template",
]
widgets = {
"content": SummernoteWidget(),
# 'recipients': FilteredSelectMultiple("contacts", is_stacked=False),
}
class Media:
css = {
"all": (
"/static/admin/css/widgets.css",
"/static/css/styles.css",
),
}
js = (
"/admin/jsi18n",
"/static/js/custom-menu.js",
)
# class RecipientsForm(ModelForm):
# agenda=ChoiceField(choices=[(agenda.id, agenda.__str__) for agenda in Agenda.objects.all()], required = False)
# class TemplateForm(ModelForm):
# template=ChoiceField(choices=[(template.id, template.__str__) for template in MailTemplate.objects.all()], required = False)
| GabCas28/Agenda-Movilidad | src/mailsender/forms.py | forms.py | py | 4,180 | python | en | code | 0 | github-code | 13 |
17956901457 | from pickle import TRUE
from unicodedata import category
from lifestore_file import lifestore_searches, lifestore_sales, lifestore_products
"""
La info de LifeStore_file:
lifestore_searches = [id_search, id product]
lifestore_sales = [id_sale, id_product, score (from 1 to 5), date, refund (1 for true or 0 to false)]
lifestore_products = [id_product, n ame, price, category, stock]
"""
"""
login
credenciales:
usuario:
lalo96
contraseña:
dudu96
"""
def login():
usuarioAccedio = False
intentos = 0
mensajeBienvenida = 'Bienvenida al sistema\nAccede con tus credenciiales'
print(mensajeBienvenida)
while not usuarioAccedio:
usuario = input('usuario: ')
contraseña = input('contraseña: ')
intentos += 1
if usuario == 'lalo96' and contraseña == 'dudu96':
usuarioAccedio = True
print('Hola de nuevo')
else:
if usuario == 'lalo96':
print('Te equivocaste en la contraseña')
else:
print(f'El usuario: "{usuario}" no esta registrado')
print('tienes', 3- intentos, 'intentos restantes ')
#print(f'tienes {3- intentos} intentos restantes ') --> es para reducir una linea de codigo
if intentos == 3:
exit()
def punto_1():
prod_vistas = {}
for busqueda in lifestore_searches:
prod_id = busqueda[0]
busq_id= busqueda[1]
if busq_id not in prod_vistas.keys():
prod_vistas[busq_id] = []
prod_vistas[busq_id].append(busqueda)
for key in prod_vistas.keys():
print(key)
print(prod_vistas[key])
category_ids = {}
for venta in lifestore_sales:
id_venta = venta[1]
if id_venta not in category_ids.keys():
category_ids[id_venta] = []
category_ids[id_venta].append(id_venta)
resultado_por_cateria = ()
for category, venta_id_lista in category_ids.items():
prod_busq = []
busq = 0
ventas = 0
for venta_id in venta_id_lista:
if venta_id not in prod_vistas.keys():
continue
ventas_busqueda = prod_vistas[venta_id]
sale = lifestore_sales[venta_id][1]
total_sales = len(venta_id)
prod_busq += ventas_busqueda
prod_vistas = sum(prod_vistas) / len(prod_vistas)
resultado_por_cateria[category] = {
'prod_busq' : prod_busq,
'ventas' : ventas,
}
print(resultado_por_cateria)
def punto_2():
#hacer el analisis de reviews por ategoria tambien la de ventas
prod_reviews = {}
for sale in lifestore_sales:
#prod y reviews de venta
prod_id = sale[1]
review = sale[2]
#categorizar por id
if prod_id not in prod_reviews.keys():
prod_reviews[prod_id] = []
prod_reviews[prod_id].append(review)
id_rev_prom = {}
for id, reviews in prod_reviews.items():
rev_prom = sum(reviews) / len(reviews)
rev_prom = int(rev_prom*100)/100
id_rev_prom[id] = [rev_prom, len(reviews)]
#para orrdenar siempre esmas
dicc_en_lista = []
for id, lista in id_rev_prom.items():
rev_prom = lista[0]
cant = lista[1]
sub = [id, rev_prom, cant]
dicc_en_lista.append(sub)
def seg_elemento(sub):
return sub[1]
dicc_en_lista = sorted(dicc_en_lista, key=seg_elemento, reverse=True)
""" dicc_en_lista = sorted(dicc_en_lista, key=lambda lista:lista[2], reverse=True) """
for sublista in dicc_en_lista:
print(sublista)
print('\nTop 5 productos con las mejores reseñas\n')
for sublista in dicc_en_lista[:5]:
id, rev, num = sublista
indice_lsp = id - 1
prod = lifestore_products[indice_lsp]
nombre = prod[1]
nombre = nombre.split(' ')
nombre = ' '.join(nombre[:4])
print(f'El producto "{nombre}":\n\trev_prom: {rev}, num de ventas: {num}')
print('\nTop 5 productos con las peores reseñas\n')
for sublista in dicc_en_lista[-5:]:
id, rev, num = sublista
indice_lsp = id - 1
prod = lifestore_products[indice_lsp]
nombre = prod[1]
nombre = nombre.split(' ')
nombre = ' '.join(nombre[:4])
print(f'El producto "{nombre}":\n\trev_prom: {rev}, num de ventas: {num}')
def punto_3():
id_fecha = [ [sale[0], sale[3]] for sale in lifestore_sales if sale[4] == 0 ]
# Para categorizar usamos un diccionario
categorizacion_meses = {}
for par in id_fecha:
# Tengo ID y Mes
id = par[0]
_, mes, _ = par[1].split('/')
# Si el mes aun no existe como llave, la creamos
if mes not in categorizacion_meses.keys():
categorizacion_meses[mes] = []
categorizacion_meses[mes].append(id)
# mes : [ids de venta]
# for key in categorizacion_meses.keys():
# print(key)
# print(categorizacion_meses[key])
# crear dic
mes_info = {}
for mes, ids_venta in categorizacion_meses.items():
lista_mes = ids_venta
suma_venta = 0
for id_venta in lista_mes:
indice = id_venta - 1
info_venta = lifestore_sales[indice]
id_product = info_venta[1]
info_prod = lifestore_products[id_product-1]
precio = info_prod[2]
suma_venta += precio
print(mes, suma_venta, f'ventas totales: {len(lista_mes)}')
mes_info[mes] = [suma_venta, len(lista_mes)]
mes_ganancia_ventas = []
for mes, datos in mes_info.items():
ganancias, ventas = datos
sub = [mes, ganancias, ventas]
mes_ganancia_ventas.append(sub)
ord_mes = sorted(mes_ganancia_ventas)
ord_gancia = sorted(mes_ganancia_ventas, key=lambda x:x[1], reverse=True)
ord_ventas = sorted(mes_ganancia_ventas, key=lambda x:x[2], reverse=True)
print(ord_ventas)
id_ventas = []
for prod in lifestore_products:
id_prod = prod[0]
sub = [id_prod, 0]
id_ventas.append(sub)
for sale in lifestore_sales:
id_prod = sale[1]
indice = id_prod - 1
if sale[-1] == 1:
continue
id_ventas[indice][1] += 1
print(id_ventas)
def menu():
login()
while True:
print("inicio del proyecto")
print("\t1: Producto más vendido y productos rezagados")
print("\t2: productos por reseñas en el servicio")
print("\t3: promedio mensual,anual y meses con mas demanda")
print("\t0: Salir")
seleccion = input('> ')
if seleccion == '1':
punto_1()
print('\n')
elif seleccion == '2':
punto_2()
print('\n')
elif seleccion == '3':
punto_3()
print('\n')
elif seleccion == '0':
exit()
else:
print('Opcion invalida, solamente son los numeros que aparecen, proximamente nuevas opciones')
menu()
| lalo0596/PROYECTO-01-JAVIER-EDUARDO | PROYECTO-01-JAVIER-EDUARDO.py | PROYECTO-01-JAVIER-EDUARDO.py | py | 7,112 | python | es | code | 0 | github-code | 13 |
30552721443 | from flask import Flask
from flask import jsonify
from datetime import date
import urllib.request
import json
app = Flask(__name__)
@app.route("/getExchangeRate/<fromCurrency>/<toCurrency>")
def profile(fromCurrency, toCurrency):
print("From currency: " + fromCurrency)
print("To currency: " + toCurrency)
url = "http://api.nbp.pl/api/exchangerates/tables/a/"
json_obj = urllib.request.urlopen(url)
data = json.load(json_obj)
headers = {'Content-Type': 'application/json'}
return jsonify(data)
if __name__ == "__main__":
app.run()
| wojciodataist/currency-service | application.py | application.py | py | 572 | python | en | code | 0 | github-code | 13 |
31178606432 | import json
from flask import Flask, redirect, url_for, session, request, jsonify
from flask_oauthlib.client import OAuth
from . import weibo_bp
from .. import app,utils
oauth = OAuth(app)
weibo = oauth.remote_app(
'weibo',
consumer_key='',
consumer_secret='',
request_token_params={'scope': 'email,statuses_to_me_read'},
base_url='https://api.weibo.com/2/',
authorize_url='https://api.weibo.com/oauth2/authorize',
request_token_url=None,
access_token_method='POST',
access_token_url='https://api.weibo.com/oauth2/access_token',
# since weibo's response is a shit, we need to force parse the content
content_type='application/json',
)
def json_to_dict(x):
try:
return json.loads(x, encoding='utf-8')
except:
return x
@weibo_bp.route('/login/weibo')
def weibo_login():
return weibo.authorize(callback=url_for('.authorized',
next=request.args.get('next') or request.referrer or None,
_external=True))
# @app.route('/logout')
# def logout():
# session.pop('oauth_token', None)
# return redirect(url_for('index'))
@weibo_bp.route('/login_weibo/authorized')
def authorized():
try:
resp = weibo.authorized_response()
if resp is None:
return 'Access denied: reason=%s error=%s' % (
request.args['error_reason'],
request.args['error_description']
)
except:
return redirect(url_for('home'))
session['oauth_token'] = (resp['access_token'], '')
session['uid'] = (resp['uid'], '')
resp = weibo.get('/users/show.json', {'access_token': session['oauth_token'][0],'uid':session['uid'][0]})
user_info = json_to_dict(resp.data)
if type(user_info) == dict:
user = utils.login_or_create_union_user(user_info.get("id"), user_info, "weibo")
return redirect(url_for('home'))
@weibo.tokengetter
def get_weibo_oauth_token():
return session.get('oauth_token')
def change_weibo_header(uri, headers, body):
"""Since weibo is a rubbish server, it does not follow the standard,
we need to change the authorization header for it."""
auth = headers.get('Authorization')
if auth:
auth = auth.replace('Bearer', 'OAuth2')
headers['Authorization'] = auth
return uri, headers, body
weibo.pre_request = change_weibo_header | memkeytm/OnepayShop | app/social_login/weibo.py | weibo.py | py | 2,353 | python | en | code | 0 | github-code | 13 |
73291332816 | import base64
import hashlib
import itertools
import json
import struct
import time
import uuid
import cloudant
from hamcrest import *
def b64url(val):
term = chr(131) + chr(109) + struct.pack("!I", len(val)) + str(val)
md5 = hashlib.md5(term).digest()
b64 = base64.b64encode(md5)
return b64.rstrip("=").replace("/", "_").replace("+", "-")
def mk_docid(src_val, tgt_val):
n1 = b64url(src_val)
n2 = b64url(tgt_val)
return "_local/shard-sync-{0}-{1}".format(n1, n2)
def test_basic_internal_replication():
srv = cloudant.get_server()
db = srv.db("test_suite_db")
db.reset(q=1)
private_nodes = cloudant.nodes()
dbsdb = private_nodes[0].db("dbs")
dbdoc = dbsdb.doc_open("test_suite_db")
suffix = "".join(map(chr, dbdoc["shard_suffix"]))
pdbname = "shards%2F00000000-ffffffff%2Ftest_suite_db" + suffix
srcdb = private_nodes[0].db(pdbname)
tgtdbs = [s.db(pdbname) for s in private_nodes[1:]]
def make_docs(count):
ret = []
for i in range(count):
ret.append({"_id": uuid.uuid4().hex})
return ret
for i in range(10):
srcdb.bulk_docs(make_docs(100))
total_docs = srcdb.info()["doc_count"]
for tdb in tgtdbs:
i = 0
while tdb.info()["doc_count"] < total_docs:
i += 1
if i > 32:
raise AssertionError("Timeout during internal replication")
time.sleep(0.25)
# There's a race with the next tests on
# who writes/reads the _local doc first.
time.sleep(0.25)
for (src, tgt) in itertools.permutations(private_nodes, 2):
sdb = src.db(pdbname)
tdb = tgt.db(pdbname)
docid = mk_docid(sdb.info()["uuid"], tdb.info()["uuid"])
doc1 = sdb.doc_open(docid)
doc2 = tdb.doc_open(docid)
assert_that(doc1, is_(doc2))
assert_that(doc1, has_key("seq"))
assert_that(doc1["seq"], is_(total_docs))
assert_that(doc1, has_key("history"))
assert_that(doc1["history"], has_length(equal_to(1)))
assert_that(doc1["history"].values()[0], has_length(greater_than(0)))
entry = has_entries({
"source_node": contains_string("@"),
"source_uuid": has_length(32),
"source_seq": greater_than(0),
"target_node": contains_string("@"),
"target_uuid": has_length(32),
"target_seq": greater_than(0),
"timestamp": instance_of(basestring)
})
assert_that(doc1["history"].values()[0], only_contains(entry))
| cloudant/quimby | internal_replication/1000-basic-internal-rep-test.py | 1000-basic-internal-rep-test.py | py | 2,571 | python | en | code | 0 | github-code | 13 |
17114353194 | """ondelete_cascade_on_tags
Revision ID: 5471c0ac2e0a
Revises: 6b245dc1afdc
Create Date: 2022-08-25 22:16:20.171838
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '5471c0ac2e0a'
down_revision = '6b245dc1afdc'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint('topic_entity_tag_reference_id_fkey', 'topic_entity_tag', type_='foreignkey')
op.create_foreign_key(None, 'topic_entity_tag', 'reference', ['reference_id'], ['reference_id'], ondelete='CASCADE')
op.drop_constraint('workflow_tag_reference_id_fkey', 'workflow_tag', type_='foreignkey')
op.create_foreign_key(None, 'workflow_tag', 'reference', ['reference_id'], ['reference_id'], ondelete='CASCADE')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'workflow_tag', type_='foreignkey')
op.create_foreign_key('workflow_tag_reference_id_fkey', 'workflow_tag', 'reference', ['reference_id'], ['reference_id'])
op.drop_constraint(None, 'topic_entity_tag', type_='foreignkey')
op.create_foreign_key('topic_entity_tag_reference_id_fkey', 'topic_entity_tag', 'reference', ['reference_id'], ['reference_id'])
# ### end Alembic commands ###
| alliance-genome/agr_literature_service | alembic/versions/5471c0ac2e0a_ondelete_cascade_on_tags.py | 5471c0ac2e0a_ondelete_cascade_on_tags.py | py | 1,370 | python | en | code | 1 | github-code | 13 |
14131385203 | # poissionian distribution
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.special import factorial
excel_1 = "lab1.xlsx"
df_first = pd.read_excel(excel_1,sheet_name = "Sheet5")
x1 = list(df_first['Count Rate'])
plt.hist(x1, bins = 200, rwidth= 0.7)
# the mean = 815.45, probablity density function is given by:
x = np.arange(10,60,.001)
# Gaussian
y1 = 0.0595*np.exp(-((x-30.506)**2)/89.86)
plt.plot(x,y1*100)
plt.hist(x1, bins = 200, rwidth= 0.7)
plt.xlabel('Count Rate (1/s)')
plt.ylabel('pdf*100')
plt.title("Gaussian Distribution")
plt.show()
# Poisson
mu =30.506
y = np.exp(-mu)*np.power(mu,x)/(factorial(x))
plt.plot(x,y*30)
plt.hist(x1, bins = 200, rwidth= 0.7)
plt.xlabel('Count Rate (1/s)')
plt.ylabel('pdf*30')
plt.title("Poisson Distribution")
plt.show()
# Binomial
p = 0.30506
y3 = ((factorial(100))/((factorial(x))*(factorial(100-x))))*(np.power(p,x))*(np.power(1-p,100-x))
plt.plot(x,y3*30)
plt.hist(x1, bins = 200, rwidth= 0.7)
plt.xlabel('Count Rate (1/s)')
plt.ylabel('pdf*30')
plt.title("Binomial Distribution")
plt.show() | gsrakib/Fitting-data-with-distribution | FItting data with different distribution.py | FItting data with different distribution.py | py | 1,119 | python | en | code | 0 | github-code | 13 |
6971439003 | student_score={
"Harry":81,
"Pranav":99,
"Jhon":78,
"SRK":74,
"Tiger":10
}
print(student_score)
student_grade={} # creating an empty dictionary
for key in student_score:
score=student_score[key]
if(score>=91 and score<=100):
grade="Outstanding"
elif(score>=81 and score<=90):
grade="Exceeds Exellence"
elif(score>=71 and score<=80):
grade="Acceptable"
elif(score<=70):
grade="Fail"
student_grade[key]=grade
print(student_grade) | malpani2003/100_days_Python_bootcamp | Code_Challenge/challenge_grade_program.py | challenge_grade_program.py | py | 530 | python | en | code | 0 | github-code | 13 |
31467127762 | # coding=utf-8
class State:
'''
state
抽象状态类,定义一个接口以封装与context的
一个特定状态相关的行为
'''
def write_program(self, w):
pass
class Work:
'''
context
维护一个具体状态子类的实例,这个实例
定义当前的状态
'''
def __init__(self):
self.hour = 9
self.current = ForenoonState()
def set_state(self, temp):
self.current = temp
def write_program(self):
self.current.write_program(self)
class NoonState(State):
'''
concrete_state
具体状态,每一个子类实现一个与context
的一个状态
'''
def write_program(self, w):
print('noon working')
if w.hour < 13:
print('fun')
else:
print('need to rest')
class ForenoonState(State):
def write_program(self, w):
if w.hour < 12:
print('morning working')
print('energetic')
else:
w.set_state(NoonState())
w.write_program()
if __name__ == '__main__':
mywork = Work()
mywork.hour = 9
mywork.write_program()
mywork.hour = 14
mywork.write_program()
| hflyf123/Python_design_mode | State.py | State.py | py | 1,208 | python | en | code | 0 | github-code | 13 |
4537486650 | '''
NAME: VAIBHAV SUDHAKAR BHAVSAR
TE-B
ROLL NO: 08
ASSIGNMENT NO: 4
PROBLEM STATEMENT: Write a program using TCP socket for wired network for following
Calculator (Arithmetic) : client side '''
import socket
import sys
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('localhost',23000))
sock.listen(1)
clisock, (ip,port) = sock.accept()
while True:
data = clisock.recv(16)
op = data.decode()
data = clisock.recv(16)
v1 = data.decode()
data = clisock.recv(16)
v2 = data.decode()
print(op,v1,v2)
w1=int(v1)
w2=int(v2)
if(op=='+'):
res=w1+w2
clisock.send(str.encode(str(res)))
if(op=='-'):
res=w1-w2
clisock.send(str.encode(str(res)))
sock.close()
| IamVaibhavsar/Third_Year_Lab_Assignments | Computer Networks Lab/A4TCPSocket/Calculator/calculator_server.py | calculator_server.py | py | 728 | python | en | code | 20 | github-code | 13 |
31806646115 | from aiogram import types
from aiogram.dispatcher import FSMContext
from aiogram.dispatcher.filters import Command
from aiogram.types import ReplyKeyboardRemove
from create_bot import dp, bot, db
from keyboards.adminbuttons import adminpanelcontinue, startposting, adminpanelmenu
from states.moderator_states import ModeratorStates
@dp.message_handler(Command('rassilka'))
async def show_rassilka(message: types.Message):
await message.answer('Введите текст поста:', reply_markup=ReplyKeyboardRemove())
await ModeratorStates.text.set()
@dp.message_handler(state=ModeratorStates.text)
async def get_posttext(message: types.Message, state: FSMContext):
textpost = message.text
await state.update_data(textpost=textpost)
await message.answer('Выберите то, что вам нужно :', reply_markup=adminpanelmenu)
await ModeratorStates.next_stage.set()
@dp.message_handler(state=ModeratorStates.next_stage, text='С фото 🏞')
async def get_photo(message: types.Message, state: FSMContext):
await message.answer('Отправьте фото 🏞 :')
await ModeratorStates.get_img.set()
@dp.message_handler(state=ModeratorStates.get_img, content_types=types.ContentType.PHOTO)
async def get_photo_id(message: types.Message, state: FSMContext):
fileid = message.photo[0].file_id
await state.update_data(photoid=fileid)
await ModeratorStates.finishpost.set()
await message.answer('✅ Данные получены нажмите - продолжить', reply_markup=adminpanelcontinue)
@dp.message_handler(state=ModeratorStates.next_stage, text='С видео 🎥')
async def get_video(message: types.Message, state: FSMContext):
await message.answer('Отправьте видео 🎥 :')
await ModeratorStates.get_video.set()
@dp.message_handler(state=ModeratorStates.get_video, content_types=types.ContentType.VIDEO)
async def get_video_id(message: types.Message, state: FSMContext):
fileid = message.video.file_id
await state.update_data(videoid=fileid)
await ModeratorStates.finishpost.set()
await message.answer('✅ Данные получены нажмите - продолжить', reply_markup=adminpanelcontinue)
@dp.message_handler(state=ModeratorStates.next_stage, text='Пропустить ➡️')
@dp.message_handler(state=ModeratorStates.finishpost)
async def get_testpost(message: types.Message, state: FSMContext):
data = await state.get_data()
post_text = data.get('textpost')
photoid = data.get('photoid')
videoid = data.get('videoid')
user = message.from_user.id
try:
if photoid:
await bot.send_photo(user, photo=photoid, caption=post_text,
parse_mode='HTML', reply_markup=startposting)
elif videoid:
await bot.send_video(user, video=videoid, caption=post_text,
parse_mode='HTML', reply_markup=startposting)
else:
await bot.send_message(user, disable_web_page_preview=True, text=post_text, parse_mode='HTML',
reply_markup=startposting)
await ModeratorStates.publish.set()
except Exception as e:
print(e)
await bot.send_message(user,
text=f'Введенный текст не правильно форматирован! Убедитесь, что все теги закрыты.\n Начните всё заного : /rassilka')
await state.finish()
await state.reset_data()
@dp.callback_query_handler(state=ModeratorStates.publish, text='startposting')
async def sendposts(call: types.CallbackQuery, state: FSMContext):
data = await state.get_data()
post_text = data.get('textpost')
photoid = data.get('photoid')
videoid = data.get('videoid')
senpostcol = 0
users = db.get_all_users()
user_ids = []
for user in users:
user_ids.append(user[0])
for user in set(user_ids):
try:
if photoid:
await bot.send_photo(user, photo=photoid, caption=post_text,
parse_mode='HTML')
elif videoid:
await bot.send_video(user, video=videoid, caption=post_text,
parse_mode='HTML')
else:
await bot.send_message(chat_id=user, disable_web_page_preview=True, text=post_text, parse_mode='HTML')
senpostcol += 1
except Exception as e:
print(e)
await call.message.answer(f'✅ Пост успешно отправлен {senpostcol} пользователям \n',
reply_markup=ReplyKeyboardRemove())
await state.finish()
await state.reset_data()
@dp.callback_query_handler(state=ModeratorStates.publish, text='cancelposting')
async def cancel_post(call: types.CallbackQuery, state: FSMContext):
await call.message.answer(f'✅ Данные удалены.\n Начните всё заново : /rassilka', reply_markup=ReplyKeyboardRemove())
await state.finish()
await state.reset_data()
| jackflaggg/telegram-bot-barbershop | handlers/rassilka.py | rassilka.py | py | 5,169 | python | en | code | 0 | github-code | 13 |
17090562834 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.AlipayUserCustIdentifyActivity import AlipayUserCustIdentifyActivity
class AlipayUserCustomerIdentifyResponse(AlipayResponse):
def __init__(self):
super(AlipayUserCustomerIdentifyResponse, self).__init__()
self._activity_list = None
self._user_profile = None
@property
def activity_list(self):
return self._activity_list
@activity_list.setter
def activity_list(self, value):
if isinstance(value, list):
self._activity_list = list()
for i in value:
if isinstance(i, AlipayUserCustIdentifyActivity):
self._activity_list.append(i)
else:
self._activity_list.append(AlipayUserCustIdentifyActivity.from_alipay_dict(i))
@property
def user_profile(self):
return self._user_profile
@user_profile.setter
def user_profile(self, value):
self._user_profile = value
def parse_response_content(self, response_content):
response = super(AlipayUserCustomerIdentifyResponse, self).parse_response_content(response_content)
if 'activity_list' in response:
self.activity_list = response['activity_list']
if 'user_profile' in response:
self.user_profile = response['user_profile']
| alipay/alipay-sdk-python-all | alipay/aop/api/response/AlipayUserCustomerIdentifyResponse.py | AlipayUserCustomerIdentifyResponse.py | py | 1,458 | python | en | code | 241 | github-code | 13 |
12304649248 | # https://developers.google.com/accounts/docs/OAuth2ForDevices
import sys
import urllib
import httplib2
import os.path
import json
import time
from oauth2client import client
from datetime import datetime, timedelta
class DeviceOAuth:
def __init__(self, client_id, client_secret, scopes):
self.client_id = client_id
self.client_secret = client_secret
self.token = None
self.retry_interval = 10 # will be set by get_user_code
self.device_code = None
self.verification_url = None
self.user_code = None
self.conn = None
self.token_file = 'oauth_token.json'
self.scope = scopes
self.reset_connection()
def get_token(self, on_user_code):
token = self.load_token()
if token == None:
user_code = self.get_user_code()
on_user_code(user_code, self.verification_url) # prompt user
token = self.get_new_token()
return token
def get_credentials(self):
isoFormat = "%Y-%m-%dT%H:%M:%S.%f"
access_token = self.token['access_token']
refresh_token = self.token['refresh_token']
expires_at = datetime.strptime(self.token['expires_at'], isoFormat)
token_uri = 'https://accounts.google.com/o/oauth2/token'
user_agent = 'gaugette/1.0'
credentials = client.GoogleCredentials(access_token, self.client_id, self.client_secret, refresh_token, expires_at, token_uri, user_agent)
return credentials
# this setup is isolated because it eventually generates a BadStatusLine
# exception, after which we always get httplib.CannotSendRequest errors.
# When this happens, we try re-creating the exception.
def reset_connection(self):
# httplib.HTTPConnection.debuglevel = 1
self.conn = httplib2.Http()
def load_token(self):
self.token = None
if os.path.isfile(self.token_file):
with open(self.token_file) as file:
self.token = json.load(file)
return self.token
def save_token(self):
with open(self.token_file, 'w') as file:
file.write(json.dumps(self.token))
def has_token(self):
return self.token != None
def get_user_code(self):
(response, content) = self.conn.request(
"https://accounts.google.com/o/oauth2/device/code",
"POST",
urllib.parse.urlencode({
'client_id': self.client_id,
'scope' : ' '.join(self.scope)
}),
{"Content-type": "application/x-www-form-urlencoded"}
)
content_utf8 = content.decode('utf-8')
if response.status == 200:
data = json.loads(content_utf8)
self.device_code = data['device_code']
self.user_code = data['user_code']
self.verification_url = data['verification_url']
self.retry_interval = data['interval']
else:
print(response.status)
print(content)
sys.exit()
return self.user_code
def set_token_expiry(self):
expires_in = timedelta(seconds=self.token['expires_in'])
expires_at = datetime.now() + expires_in
self.token['expires_at'] = expires_at.isoformat()
def get_new_token(self):
while self.token == None:
(response, content) = self.conn.request(
"https://accounts.google.com/o/oauth2/token",
"POST",
urllib.parse.urlencode({
'client_id' : self.client_id,
'client_secret' : self.client_secret,
'code' : self.device_code,
'grant_type' : 'http://oauth.net/grant_type/device/1.0'
}),
{"Content-type": "application/x-www-form-urlencoded"}
)
content_utf8 = content.decode('utf-8')
if response.status == 200:
data = json.loads(content_utf8)
if 'access_token' in data:
self.token = data
self.set_token_expiry()
self.save_token()
else:
time.sleep(self.retry_interval + 2)
return self.token
def refresh_token(self):
refresh_token = self.token['refresh_token']
self.conn.request(
"POST",
"/o/oauth2/token",
urllib.parse.urlencode({
'client_id' : self.client_id,
'client_secret' : self.client_secret,
'refresh_token' : refresh_token,
'grant_type' : 'refresh_token'
}),
{"Content-type": "application/x-www-form-urlencoded"}
)
response = self.conn.getresponse()
if response.status == 200:
data = json.loads(response.read())
if 'access_token' in data:
self.token = data
# we NEVER get a new refresh token at this point
self.token['refresh_token'] = refresh_token
self.set_token_expiry()
self.save_token()
return True
else:
print("Unexpected response %d to renewal request" % response.status)
print(response.read())
return False
| guyc/py-gaugette | gaugette/oauth.py | oauth.py | py | 5,387 | python | en | code | 120 | github-code | 13 |
42402720391 | #! /usr/bin/env python
#
# Generalize rotation curve plotter in order to compare rotation curves
#
# -u rotcur format
# -i ringfit format
# -s velfitss07 format [not activated yet]
#
# -r keep in radio format (or whichever format it was)
# -o convert from radio to optical convention (needs vsys)
#
# -p points
# -l lines
# -e add error bar to points
#
# -z (not implemented yet) convert everything to relativistic format
#
# key=value allowed to override
#
# Example of use:
# rotcur2.py NGC2347 NGC2347.co.ringfit -o NGC2347.ha.ringfit -u -r try2.rotcur
#
#
# Example rotcur2.txt file: (first non-comment line needs to be the column names!!!)
#<<
## name rmax vsys inc w50 w90
## NGC2347 30.0 4421 50.2 416 445
#>>
#
from __future__ import print_function
from astropy.io import ascii
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
import itertools
import math
import os,sys
version = "9-may-2017 PJT"
degrad = 57.2957795
c = 299792.458
parfile = 'rotcur2.txt'
def properties(name, file='rotcur2.txt'):
"""
return a dictionary of properties for a galaxy/object by name
"""
data = ascii.read(file)
names = data['name'].data.tolist()
idx = names.index(name)
if True:
print(data[idx])
return data[idx]
else:
p = {}
p['rmax'] = 30.0
p['vsys'] = 4400.0
p['inc'] = 50.0
p['w50'] = 416.0
p['w90'] = 445.0
return p
def rotcurtab(file):
""" reads a outputfile from tab= in rotcur
if no comments in it, perhaps use popen("tabcomment $file -")
"""
if True:
os.system('tabcomment %s - > %s.tmp' % (file,file))
data = ascii.read('%s.tmp' % file)
else:
data = ascii.read(file)
data.sort('col1')
return data
def get_ringplot(data, efactor=1):
"""
radius from the center in arcsec, vrot, error, vrad, error, vsys, error.
"""
r = data['col1']
vt = data['col2']
d_vt = data['col3']*efactor
vr = data['col4']
d_vr = data['col5']*efactor
vsys = data['col6']
d_vsys= data['col7']*efactor
return (r,vt,d_vt)
def get_rotcur(data, efactor=1):
"""
radius from the center in arcsec, vrot, error, vrad, error, vsys, error.
"""
r = data['col1']
vt = data['col4']
d_vt = data['col5']*efactor
return (r,vt,d_vt)
def get_velfit(data, efactor=1):
"""
runvelfitss07 produces a .p file
r npt vt eVt Vr eVr Vm,t eVm,t Vm,r eVm,r
"""
r = data['col1']
vt = data['col3']
evt = data['col4']
return (r,vt,evt)
def junk():
ax1.scatter(r,inc)
ax1.errorbar(r,inc,yerr=d_inc,fmt='ro')
ax1.set_title('Inclination (arcsec)')
ax1.xaxis.label.set_size(10)
ax1.yaxis.label.set_size(10)
ax1.set_xlim([0,rmax])
ax1.set_ylim([0,90])
##
ax2 = fig.add_subplot(2,4,8)
#ax2.set_title('RMS velocities in ring')
ax2.scatter(r,vrms)
ax2.set_title('RMS (km/s)')
ax2.xaxis.label.set_size(10)
ax2.yaxis.label.set_size(10)
ax2.set_xlim([0,rmax])
ax2.set_ylim([0,10])
##
ax3 = fig.add_subplot(2,4,1)
ax3.scatter(r,v)
ax3.errorbar(r,v,yerr=d_v,fmt='ro')
ax3.set_title('Vrot (km/s)')
ax3.xaxis.label.set_size(10)
ax3.yaxis.label.set_size(10)
ax3.set_xlim([0,rmax])
ax3.set_ylim([0,110])
##
ax4 = fig.add_subplot(2,4,5)
#ax2.set_title('V.sin(INC)')
ax4.scatter(r,vsini)
ax4.set_xlabel('Radius (arcsec)')
ax4.set_title('V.sin(i) (km/s)')
ax4.xaxis.label.set_size(10)
ax4.yaxis.label.set_size(10)
ax4.set_xlim([0,rmax])
ax4.set_ylim([0,110])
##
ax5 = fig.add_subplot(2,4,3)
ax5.set_title('X-center')
ax5.scatter(r,xpos)
ax5.errorbar(r,xpos,yerr=d_xpos,fmt='ro')
ax5.xaxis.label.set_size(10)
ax5.yaxis.label.set_size(10)
ax5.set_xlim([0,rmax])
ax5.set_ylim([150,400])
#
ax6 = fig.add_subplot(2,4,7)
ax6.set_title('Y-center')
ax6.scatter(r,ypos)
ax6.errorbar(r,ypos,yerr=d_ypos,fmt='ro')
ax6.xaxis.label.set_size(10)
ax6.yaxis.label.set_size(10)
ax6.set_xlim([0,rmax])
ax6.set_ylim([250,500])
#
ax7 = fig.add_subplot(2,4,4)
ax7.set_title('VSYS')
ax7.scatter(r,vsys)
ax7.errorbar(r,vsys,yerr=d_vsys,fmt='ro')
ax7.set_xlim([0,rmax])
ax7.set_ylim([140,190])
ax7.xaxis.label.set_size(10)
ax7.yaxis.label.set_size(10)
#
ax8 = fig.add_subplot(2,4,6)
ax8.set_title('PA')
ax8.scatter(r,pa)
ax8.errorbar(r,pa,yerr=d_pa,fmt='ro')
ax8.set_xlim([0,rmax])
ax8.set_ylim([0,90])
ax8.xaxis.label.set_size(10)
ax8.yaxis.label.set_size(10)
#
plt.show()
fig.savefig('junk.pdf')
def region_ds9(data,ds9,scale=0.0083333):
""" create a ds9 region of the ellipses found in rotcur solution
Also needs the scale to convert to pixels in a map
"""
(xpos,ypos) = data['col10'],data['col12']
inc = data['col8']
pa = data['col6']
r = data['col1']/3600.0
maj = r / scale
min = r*np.cos(inc/degrad) / scale
if False:
print("Center: ",xpos,ypos)
print("PA,INC: ",pa,inc)
print("Radius: ",r)
r1='global color=green dashlist=8 3 width=1 font="helvetica 10 normal roman" select=1 highlite=1 dash=0 fixed=0 edit=1 move=1 delete=1 include=1 source=1\n'
r2='image\n'
#
fp = open(ds9,'w')
fp.write('# Region file format: DS9 version 4.1\n')
fp.write('# Filename: smc.mom1f/fits\n')
fp.write(r1)
fp.write(r2)
for i in range(len(data)):
r3='ellipse(%f,%f,%f,%f,%f) # color=black width=2\n' % (xpos[i],ypos[i],min[i],maj[i],pa[i])
print(r3)
fp.write(r3)
fp.close()
print(data)
def region_cgdisp(data,ds9,scale=0.0083333):
""" create an overlay file for miriad::cgdisp
"""
def plabel(umode,scale):
"""
"""
if umode:
lab = 'u'
else:
lab = 'i'
if scale:
lab = lab + '-o'
else:
lab = lab + '-r'
return lab
def print_usage(argv):
print("Multi-table rotation curve plotter and comparisons - version %s " % version)
print("Usage:")
print("%s name [key=val] [-u] [-i] [-r] [-o] curve1 [-u] [-i] [-r] [-o] curve2 ..." % argv[0])
print(" name Required name, an ID grab default values from %s" % parfile)
print(" key=val Optional re-assigned keyword")
print(" -u rotcur type table (for this and all following tables until reset) ")
print(" -i ringfit type table (for this and all following tables until reset) ")
print(" -r radio convention (for this and all following tables until reset) [default] ")
print(" -o optical convention (for this and all following tables until reset) ")
print(" -l use lines")
print(" -p use points")
print(" -e plot error bars (for points only)")
print("Currently all curves are *plotted* in the radio convention")
print("")
print("In addition, for a limited number of keywords, a new value can be given:")
print(" rmax")
print(" vsys")
print(" inc")
print(" w50")
print(" w90")
sys.exit(0)
if __name__ == "__main__":
print("LEN:",len(sys.argv))
if len(sys.argv) < 2: print_usage(sys.argv)
gal = sys.argv[1]
p = properties(gal)
rmax = p['rmax']
vsys = p['vsys']
inc = p['inc']
w50 = p['w50']
w90 = p['w90']
#
fig = plt.figure()
plt.title('%s : VSYS=%g INC=%g' % (gal,vsys,inc))
ax = fig.add_subplot(1,1,1)
scale = False # scale from optical to radio convention? (-o and -r)
umode = False # -u: rotcur format -i: ringfit format (default)
lines = True # -l: lines -p: points
errors = False
colors = itertools.cycle(["r", "g", "b", "c"])
for name in sys.argv[2:]:
if name.find('=') > 0:
print("EXEC: ",name)
exec(name)
continue
if name=='-o':
scale = True
continue
if name=='-r':
scale = False
continue
if name=='-u':
umode = True
continue
if name=='-i':
umode = False
continue
if name=='-l':
lines = True
continue
if name=='-p':
lines = False
continue
if name=='-e':
errors = True
continue
data = rotcurtab(name)
if umode:
(r1,v1,ve1) = get_rotcur(data) # 'u'
else:
(r1,v1,ve1) = get_ringplot(data) # 'i'
#(r1,v1,ve1) = get_velfit(data) # 's'
if scale:
o2r = 1.0-2.0*vsys/c
v1 = v1 * o2r
n = len(data)
print("Found %d radii for %s" % (n,name))
if lines:
ax.plot(r1,v1,label="%s[%s]" % (name,plabel(umode,scale)))
else:
color = next(colors)
ax.scatter(r1,v1,label="%s[%s]" % (name,plabel(umode,scale)), color=color)
if errors:
ax.errorbar(r1,v1,yerr=ve1,color=color)
(rmin,rmax) = ax.get_xlim()
(vmin,vmax) = ax.get_ylim()
ax.set_xlim([0.0,rmax])
ax.set_ylim([0.0,vmax])
sini = math.sin(inc*math.pi/180.0)
v50 = 0.5*w50/sini
v90 = 0.5*w90/sini
print(v50,v90)
ax.plot([0.9*rmax,rmax],[v50,v50],'k-',label='HI-W50',linestyle='-',linewidth=2)
ax.plot([0.8*rmax,rmax],[v90,v90],'k-',label='HI-W90',linestyle='-',linewidth=2)
ax.plot([0.0,0.1*rmax],[v50,v50],'k-',linestyle='-',linewidth=2)
ax.plot([0.0,0.2*rmax],[v90,v90],'k-',linestyle='-',linewidth=2)
ax.legend(loc='best',prop={'size':8})
ax.set_xlabel('Radius (arcsec)')
ax.set_ylabel('Velocity (km/s)')
plt.savefig("rotcur2.png")
plt.show()
#
| teuben/nemo | src/scripts/python/rotcur2.py | rotcur2.py | py | 9,965 | python | en | code | 53 | github-code | 13 |
27264462749 | #%%
import pandas as pd
import torch
from tqdm import tqdm
import ijson
from transformers import pipeline
#%%
datasets_root = r"E:\social-bot-data\datasets\Twibot-20"
tmp_files_root = r"E:\social-bot-data\code\First-HGT-Detector\twibot-20\preprocess\tmp-files"
#%%
node2id_list = pd.read_csv(rf"{datasets_root}\node2id.csv", dtype={"node_id": str, "num_id": int})
# tweets: 1-33488192, users: 33488193-33713010
node2id = {}
for row in tqdm(node2id_list.iterrows(), desc="Generate node2id dict."):
node2id[row[1]["node_id"]] = row[1]["num_id"]
#%% md
### 利用node文件按顺序生成所有推文的向量表示
#%%
tweet_feature_extract = pipeline('feature-extraction', model='roberta-base', tokenizer='roberta-base', device=0,
padding=True, truncation=True, max_length=50, add_special_tokens=True)
#%%
tweet_tensors_dicts = []
with open(rf"{datasets_root}\mini-nodes-for-test.json", 'r', encoding="utf-8") as f:
for record in tqdm(ijson.items(f, "item"), desc="Reading node.json with each item."):
if record.get("text"):
word_tensors = torch.tensor(tweet_feature_extract(record.get("text")))
each_tweet_tensor = torch.zeros(768)
for each_word_tensor in word_tensors[0]:
each_tweet_tensor += each_word_tensor
tweet_tensors_dicts.append({"node_id": record.get("id"), "tweet_tensor": each_tweet_tensor})
tweet_tensors_df = pd.DataFrame(tweet_tensors_dicts)
tweet_tensors_df = pd.merge(tweet_tensors_df, node2id_list, on="node_id", how="inner")
tweet_tensors_df.sort_values(by="num_id", inplace=True, ascending=True)
#%%
tweet_tensors_df.to_pickle(rf"{tmp_files_root}\tweet_tensors_df.pkl")
tweet_tensors = torch.stack(tweet_tensors_df["tweet_tensor"].tolist())
torch.save(tweet_tensors, rf"{tmp_files_root}\tweet_tensors.pt")
| jbk-xiao/First-HGT-Detector | twibot-20/preprocess/gen_tweets.py | gen_tweets.py | py | 1,841 | python | en | code | 1 | github-code | 13 |
1070663263 | import fnmatch
from pathlib import Path
import yaml
from core.Constants import MigrationKey, LibPairKey
DataItem = dict[str, any]
class Db:
migrations: dict[str, DataItem]
lib_pairs: dict[str, DataItem]
_mapping: dict[str, dict[str, DataItem]]
def __init__(self, data_root: str):
self.data_root = data_root
def load(self):
self.migrations = self.load_items("migration")
self.lib_pairs = self.load_items("libpair")
self._mapping = {
MigrationKey: self.migrations,
LibPairKey: self.lib_pairs,
}
def get_list(self, data_type: str):
return self._mapping[data_type].values()
def filter_list(self, data_type: str, filters: dict[str, str]):
list = self.get_list(data_type)
for k, v in filters.items():
list = [item for item in list if self.item_satisfies_filter(item, k, v)]
return list
def get_item(self, data_type: str, id: str):
return self._mapping[data_type][id]
def load_items(self, data_folder):
paths = Path(self.data_root, data_folder).glob("*.yaml")
items = (self.load_item(p) for p in paths)
dict = {item["id"]: item for item in items}
return dict
@staticmethod
def item_satisfies_filter(item: DataItem, filter_key: str, filter_value: str):
prop = item[filter_key]
if isinstance(prop, list):
if not filter_value and not prop:
return True # If the user passes empty string and the list property is empty, consider it matching
return any(fnmatch.fnmatch(prop_item, filter_value) for prop_item in prop)
else:
return fnmatch.fnmatch(prop, filter_value)
pass
@staticmethod
def load_item(yaml_path: Path):
with open(yaml_path) as f:
content = f.read()
dict: DataItem = yaml.safe_load(content)
return dict
| ualberta-smr/PyMigBench | code/db/Db.py | Db.py | py | 1,945 | python | en | code | 3 | github-code | 13 |
20428398138 | from Cells import *
import config
import random
import time
import math
from tkinter import messagebox
def all_children(wid):
lister = wid.find_all()
#print(len(lister))
def getCoordinates(event):
x = event.x // config.SquareSize
y = event.y // config.SquareSize
return (x,y)
def clearWalls():
if not config.MazeDrawn and not config.AlgoWorking:
for i in range(len(config.Grid)):
for j in range(len(config.Grid[0])):
if config.Grid[i][j] != config.StartCell and config.Grid[i][j] != config.EndCell: #Cant use IsWall, edges blead to other cells
ChangeColorTo(config.Grid[i][j], "White")
config.Grid[i][j].SearchVisited = False
config.Grid[i][j].isWall = False
config.Grid[i][j].WallUp = False
config.Grid[i][j].WallDown = False
config.Grid[i][j].WallRight = False
config.Grid[i][j].WallLeft = False
config.canvas.update()
def clearSearch():
if not config.AlgoWorking:
for i in range(len(config.Grid)):
for j in range(len(config.Grid[0])):
if config.Grid[i][j] != config.StartCell and config.Grid[i][j] != config.EndCell:
if not config.Grid[i][j].isWall:
config.Grid[i][j].SearchVisited = False
config.Grid[i][j].parentCell = 0
config.Grid[i][j].RevertColor()
config.JustSearched = False
config.StartCell.SearchVisited = False
config.EndCell.SearchVisited = False
def replaceDrawCanvas():
config.canvas.delete("all") #Need this to prevent memory leak. Bug where program runs slower after every "clear"
for i in range(config.VCells):
for j in range(config.HCells):
config.Grid[i][j] = Cell(j ,i, config.canvas, config.SquareSize, config.root, "White", True) #The Cell class will automatically draw the squares
def bindDrawingMode():
if config.JustSearched:
messagebox.showerror("Error Notice: ", "Plesse 'Clear Search' before trying to draw new walls!")
if config.AlgoWorking:
messagebox.showerror("Error Notice: ", "Please wait for the algorithm to stop working!")
elif not config.DrawingMode:
replaceDrawCanvas()
config.DrawingMode = True
config.MazeDrawn = False
#config.StartCell = config.Grid[0][0]
#config.EndCell = config.Grid[config.VCells -1][config.HCells -1]
config.canvas.bind('<B1-Motion>', DrawingMode)
else:
config.canvas.bind('<B1-Motion>', DrawingMode)
def DrawingMode(event):
if (not config.AlgoWorking and not config.pausePlay) and config.DrawingMode:
a = getCoordinates(event)
(x, y) = a
if x <= config.HCells - 1 and y <= config.VCells -1 and x >= 0 and y >= 0:
if config.StartCell == None or a[0] != config.StartCell.x or a[1] != config.StartCell.y:
if config.EndCell == None or a[0] != config.EndCell.x or a[1] != config.EndCell.y:
DrawCell = config.Grid[y][x]
tempChangeColorTo(DrawCell, "Black")
DrawCell.isWall = True
DrawCell.WallUp = True
DrawCell.WallDown = True
DrawCell.WallRight = True
DrawCell.WallLeft = True
if (x >= 0 and x < config.HCells - 1): #Restricts the horizontal bounds
config.Grid[y][x+1].WallLeft = True
if (x >= 1 and x <= config.HCells - 1): #Restricts the horizontal bounds
config.Grid[y][x-1].WallRight = True
if (y >= 0 and y < config.VCells - 1):
config.Grid[y+1][x].WallUp = True
if (y > 0 and y <= config.VCells - 1):
config.Grid[y-1][x].WallDown = True
def bindPlaceStart():
if config.DrawingMode or config.MazeDrawn and not config.AlgoWorking and not config.JustSearched:
config.canvas.unbind('<B1-Motion>')
config.canvas.bind('<Button-1>', PlaceStart)
elif config.AlgoWorking:
messagebox.showerror("Error Notice: ", "Please wait for the algorithm to finish working!")
elif config.JustSearched:
messagebox.showerror("Error Notice: ", "Please 'Clear Search' before trying to place a start cell")
else:
messagebox.showerror("Error Notice: ", "Please generate/manually draw a maze first before trying to place cells!")
def PlaceStart(event):
a = getCoordinates(event)
if not config.Grid[a[1]][a[0]].isWall: #Fixes bug where start / cell was placed on a wall
if (config.pausePlay or (not config.AlgoWorking and not config.pausePlay)):
if config.StartCell != None:
config.StartCell.RevertColor()
config.StartCell = config.Grid[a[1]][a[0]]
tempChangeColorTo(config.StartCell, "#4cdfff")
config.canvas.unbind('<Button-1>')
def bindPlaceEnd():
if config.DrawingMode or config.MazeDrawn and not config.AlgoWorking and not config.JustSearched:
config.canvas.unbind('<B1-Motion>')
config.canvas.bind('<Button-1>', PlaceEnd)
elif config.AlgoWorking:
messagebox.showerror("Error Notice: ", "Please wait for the algorithm to finish working!")
elif config.JustSearched:
messagebox.showerror("Error Notice :", "Please 'Clear Search' before trying to place an end cell")
else:
messagebox.showerror("Error Notice: ", "Please generate/manually draw a maze before trying to place cells!")
def PlaceEnd(event):
a = getCoordinates(event)
if not config.Grid[a[1]][a[0]].isWall:
if (config.pausePlay or (not config.AlgoWorking and not config.pausePlay)):
if config.EndCell != None:
config.EndCell.RevertColor()
config.EndCell = config.Grid[a[1]][a[0]]
tempChangeColorTo(config.EndCell, "#ffb763")
config.canvas.unbind('<Button-1>')
def adjustSpeed(value):
config.Speed = int(value)
def replaceGrid():
config.canvas.delete("all") #Need this to prevent memory leak. Bug where program runs slower after every "clear"
for i in range(config.VCells):
for j in range(config.HCells):
config.Grid[i][j] = Cell(j ,i, config.canvas, config.SquareSize, config.root, config.BackgroundColor, False) #The Cell class will automatically draw the squares
def clearCanvas(HCells, VCells, start, canvas, root, BackgroundColor):
#print("Algoworking: ", config.AlgoWorking, "Drawing Mode: ", config.DrawingMode, "MazeDrawn: ", config.MazeDrawn)
if config.pausePlay or (config.AlgoWorking == False and config.pausePlay == False):
replaceGrid() ## This will be used in the findPossibleMoves Method
config.Stack = [config.Grid[0][0]]
config.AlgoWorking = False
config.pausePlay = False
config.DrawingMode = False
config.MazeDrawn = False
config.StartCell = None
config.EndCell = None
def TrackPlacedColor(Cell):
if config.Speed != 0 and config.AlgoWorking:
config.canvas.itemconfig(Cell.SquareCell, fill = "Orange")
config.root.after(config.Speed, config.canvas.update())
config.canvas.itemconfig(Cell.SquareCell, fill = "White")
Cell.color = "White"
else:
config.canvas.itemconfig(Cell.SquareCell, fill = "White")
Cell.color = "White"
def ChangeColorTo(Cell, color):
config.canvas.itemconfig(Cell.SquareCell, fill = color)
Cell.color = color
def tempChangeColorTo(Cell, color):
config.canvas.itemconfig(Cell.SquareCell, fill = color)
def TrackColor(Cell):
if config.Speed != 0 and config.AlgoWorking:
config.canvas.itemconfig(Cell.SquareCell, fill = "Blue")
config.root.after(config.Speed, config.canvas.update())
config.canvas.itemconfig(Cell.SquareCell, fill = "White")
Cell.color = "White"
def ChangeColorBlue(Cell):
if config.AlgoWorking or (config.AlgoWorking == False and config.pausePlay == False):
config.canvas.itemconfig(Cell.SquareCell, fill = "Blue")
Cell.color = "Blue"
def DebuggerColorChange(Cell):
config.canvas.itemconfig(Cell.SquareCell, fill = "Blue")
def findBadMoves(Cell, Grid, canvas):#Must keep track of Borders to ensure I dont go out of bounds
PossibleCells = []
Relation = []
CurrX = Cell.x
CurrY = Cell.y
HCells = len(config.Grid[0]) - 1 #Works with Index rather than length
VCells = len(config.Grid) - 1
if (CurrX >= 0 and CurrX < HCells): #Restricts the horizontal bounds
if config.Grid[CurrY][CurrX+1].visited: #Checks Right Cell
PossibleCells.append(config.Grid[CurrY][CurrX+1])
Relation.append("Right") #Indicates the Right Cell was a free cell
if (CurrX >= 1 and CurrX <= HCells): #Restricts the horizontal bounds
if config.Grid[CurrY][CurrX-1].visited:#Checks Left Value
PossibleCells.append(config.Grid[CurrY][CurrX-1])
Relation.append("Left") #Indicates the Left Cell was a free cell
if (CurrY >= 0 and CurrY < VCells):
if config.Grid[CurrY+1][CurrX].visited:
PossibleCells.append(config.Grid[CurrY+1][CurrX]) #Checks Buttom Cell
Relation.append("Bot") #Indicates the Bottom Cell was a free cell
if (CurrY > 0 and CurrY <= VCells):
if config.Grid[CurrY-1][CurrX].visited:
PossibleCells.append(config.Grid[CurrY-1][CurrX]) #Checks Top Value
Relation.append("Top") #Indicates the Top Cell was a free Cell
return tuple(zip(PossibleCells, Relation)) #Combines 2 lists to a list of tuples
def openPossibleWall(Cell, Possibilities): #Opens random wall and returns the cell that was shifted to
RandoCell = random.randint(0,len(Possibilities) - 1)
ChosenCombo = Possibilities[RandoCell]
if config.AlgoWorking:
if (len(Possibilities) > 0):
ChosenCell = ChosenCombo[0]
if ChosenCombo[1] == "Top":
Cell.deleteTopWall()
ChosenCell.deleteBotWall()
if ChosenCombo[1] == "Bot":
Cell.deleteBotWall()
ChosenCell.deleteTopWall()
if ChosenCombo[1] == "Left":
Cell.deleteLeftWall()
ChosenCell.deleteRightWall()
if ChosenCombo[1] == "Right":
Cell.deleteRightWall()
ChosenCell.deleteLeftWall()
return ChosenCell
return None
def findGoodMoves(Cell, Grid, canvas):#Must keep track of Borders to ensure I dont go out of bounds
PossibleCells = []
Relation = []
CurrX = Cell.x
CurrY = Cell.y
HCells = len(config.Grid[0]) - 1 #Works with Index rather than length
VCells = len(config.Grid) - 1
if (CurrX >= 0 and CurrX < HCells): #Restricts the horizontal bounds
if not config.Grid[CurrY][CurrX+1].visited: #Checks Right Cell
PossibleCells.append(config.Grid[CurrY][CurrX+1])
Relation.append("Right") #Indicates the Right Cell was a free cell
if (CurrX >= 1 and CurrX <= HCells): #Restricts the horizontal bounds
if not config.Grid[CurrY][CurrX-1].visited:#Checks Left Value
PossibleCells.append(config.Grid[CurrY][CurrX-1])
Relation.append("Left") #Indicates the Left Cell was a free cell
if (CurrY >= 0 and CurrY < VCells):
if not config.Grid[CurrY+1][CurrX].visited:
PossibleCells.append(config.Grid[CurrY+1][CurrX]) #Checks Buttom Cell
Relation.append("Bot") #Indicates the Bottom Cell was a free cell
if (CurrY > 0 and CurrY <= VCells):
if not config.Grid[CurrY-1][CurrX].visited:
PossibleCells.append(config.Grid[CurrY-1][CurrX]) #Checks Top Value
Relation.append("Top") #Indicates the Top Cell was a free Cell
return tuple(zip(PossibleCells, Relation)) #Combines 2 lists to a list of tuples
def BinaryTreeSortBotRight(possibilities):
A = []
for combo in possibilities:
if combo[1] == "Right" or combo[1] == "Bot":
A.append(combo)
return A
def RecursiveBackTrack(Cell, Stack, canvas, root): #Recursive Back Track Algo
pauseStall(config.root) #Checks if pause is active, ifso, will freeze program until otherwise
if config.AlgoWorking: #Needs thsi to fix the pause / play glitch. Where pause then clear then resume starts at where it previously left off
GoodMoves = findGoodMoves(Cell, config.Grid, config.canvas)
Cell.visited = True
while len(config.Stack) != 0:
GoodMoves = findGoodMoves(Cell, config.Grid, config.canvas)
config.root.after(config.Speed, config.canvas.update())
if (len(GoodMoves) > 0):
ChosenCell = openPossibleWall(Cell, GoodMoves)
config.Stack.append(ChosenCell)
ChangeColorBlue(ChosenCell)
Cell = ChosenCell
else:
while True:
Cell = config.Stack.pop()
ChangeColorTo(Cell, "White")
GoodMoves = findGoodMoves(Cell, config.Grid, config.canvas)
if len(GoodMoves) > 0 or len(config.Stack) == 0:
break
config.root.after(config.Speed, config.canvas.update())
if len(config.Stack) >= 0:
return RecursiveBackTrack(Cell, config.Stack, config.canvas, config.root)
else:
return True
def RecursiveBackTrackButton():
'''Uses a stack. Keeps pushing to stack as it randomly travels the grid.
Once it hits a dead end, keeps popping the stack until we land on a node where we can move
in a different direction. Repeat this process'''
if config.AlgoWorking == False and not config.DrawingMode and not config.MazeDrawn:
config.AlgoWorking = True
finished = RecursiveBackTrack(config.Stack[0], config.Stack, config.canvas, config.root)
if config.AlgoWorking:
config.MazeDrawn = True
config.AlgoWorking = False
config.root.after(config.Speed, config.canvas.update())
def HuntAndKill(row, Cell, canvas, root):
Finished = False
if config.AlgoWorking:
while not Finished and config.AlgoWorking:
Cell.visited = True
TrackPlacedColor(Cell)
pauseStall(config.root) #Pause / Play Mechanism
GoodMoves = findGoodMoves(Cell, config.Grid, config.canvas)
if (len(GoodMoves) > 0): #If It can keep finding new move
ChosenCell = openPossibleWall(Cell, GoodMoves) #Will open possible wall and return the wall it opened
config.root.after(config.Speed, config.canvas.update()) #Slows down the visual
Cell = ChosenCell #Reassigns new cell, will keep looping until it hits dead end
else: #Else we hunt for a new cell
for i in range (row, len(config.Grid)): #Added row so it does not have to start from the 0'th row everytime
for j in range(len(config.Grid[0])):
pauseStall(config.root)
TrackColor(config.Grid[i][j]) #Shows Left to Right scanning
if config.Grid[i][j].visited == False: #Stops when it hits new Node
BadMoves = findBadMoves(config.Grid[i][j], config.Grid, config.canvas)
openPossibleWall(config.Grid[i][j], BadMoves) #Opens a visited wall, calls hunt kill again on the cell just created
return HuntAndKill(i, config.Grid[i][j], config.canvas, config.root) #Feed in I which is the row we left off at
else:
Finished = True
else:
return
def HuntAndKillButton():
'''Similar to recursive back track. Walks grid until finds a dead end.
When it finds dead end, walk row then columns until an unvisited node is found.
Connect that node first with an adjacent visited node, then repeat
the process with the newly retrieved node'''
print("Algoworking: ", config.AlgoWorking, "Drawing Mode: ", config.DrawingMode, "MazeDrawn: ", config.MazeDrawn)
if config.AlgoWorking == False and not config.DrawingMode and not config.MazeDrawn:
config.AlgoWorking = True
finished = HuntAndKill(0, config.Stack[0], config.canvas, config.root)
if config.AlgoWorking:
config.MazeDrawn = True
config.AlgoWorking = False
def BinaryTreeAlgorithm():
for i in range(len(config.Grid)):
for j in range(len(config.Grid[0])):
if config.AlgoWorking:
config.Grid[i][j].visited = True
ChangeColorTo(config.Grid[i][j], "Orange")
config.root.after(config.Speed, config.canvas.update())
ChangeColorTo(config.Grid[i][j], "White")
pauseStall(config.root)
PossibleMoves = BinaryTreeSortBotRight(findGoodMoves(config.Grid[i][j], config.Grid, config.canvas) + findBadMoves(config.Grid[i][j], config.Grid, config.canvas))
if len(PossibleMoves) > 0 and config.AlgoWorking: #config.AlgoWorking is temporary fix to clear error
ChosenCell = openPossibleWall(config.Grid[i][j], PossibleMoves)
ChosenCell.visited = True
TrackColor(ChosenCell)
else:
return #Shuts off if clear was performed
def BinaryTreeButton():
'''Very Simple Algorithm. Has a bias, in this case South East
Will walk across all nodes and will open either an east wall or south wall
Creates very simple solved maze'''
if config.AlgoWorking == False and not config.DrawingMode and not config.MazeDrawn:
config.AlgoWorking = True
BinaryTreeAlgorithm()
if config.AlgoWorking:
config.MazeDrawn = True
config.AlgoWorking = False
def PrimsAlgorithm():
Cell = config.Grid[int(config.VCells / 2)][int(config.HCells / 2)]
FrontiereSet = [Cell]
while len(FrontiereSet) > 0:
pauseStall(config.root)
if config.AlgoWorking:
Cell.ChangeColor()
Cell.visited = True
FrontiereSet.remove(Cell)
FrontiereAdjacents = findGoodMoves(Cell, config.Grid, config.canvas)
for Combo in FrontiereAdjacents:
if not Combo[0] in FrontiereSet: #Prevents double duplicates
FrontiereSet.append(Combo[0])
ChangeColorBlue(Combo[0])
if len(FrontiereSet) > 0:
config.root.after(config.Speed, config.canvas.update())
ChosenFrontiere = random.choice(FrontiereSet)
VisitedPossibles = findBadMoves(ChosenFrontiere, config.Grid, config.canvas)
openPossibleWall(ChosenFrontiere, VisitedPossibles)
Cell = ChosenFrontiere
else:
return
def PrimsAlgorithmButton():
if config.AlgoWorking == False and not config.DrawingMode and not config.MazeDrawn:
config.AlgoWorking = True
PrimsAlgorithm()
if config.AlgoWorking:
config.MazeDrawn = True
config.AlgoWorking = False
def SidewinderAlgorithm():
for i in range(len(config.Grid[0])): #Takes care of first row
pauseStall(config.root)
if config.AlgoWorking:
config.Grid[0][i].visited = True
if i < len(config.Grid[0]) - 1:
openPossibleWall(config.Grid[0][i], [[config.Grid[0][i+1], "Right"]])
TrackPlacedColor(config.Grid[0][i])
else:
return
for i in range(1, len(config.Grid)):
TempSet = [config.Grid[i][0]] #Need 2d List as open possible wall takes 2d list
config.Grid[i][0].visited = True
TrackPlacedColor(config.Grid[i][0])
for j in range(1, len(config.Grid[0])):
pauseStall(config.root)
if config.AlgoWorking:
config.Grid[i][j].visited = True
MoveForward = random.choice([True, False])
if MoveForward:
TempSet.append(config.Grid[i][j])
ChangeColorTo(config.Grid[i][j], "Blue")
openPossibleWall(config.Grid[i][j-1], [[config.Grid[i][j], "Right"]])
else:
for thing in TempSet:
ChangeColorTo(thing, "White")
ChosenOpening = random.choice(TempSet)
openPossibleWall(ChosenOpening, [[config.Grid[ChosenOpening.y - 1][ChosenOpening.x], "Top"]])
TempSet = [config.Grid[i][j]]
ChangeColorTo(config.Grid[i][j], "Blue")
config.root.after(config.Speed, config.canvas.update())
else:
return
if len(TempSet) > 0: #This means it stopped on last node
ChosenOpening = random.choice(TempSet)
openPossibleWall(ChosenOpening, [[config.Grid[ChosenOpening.y-1][ChosenOpening.x], "Top"]])
for thing in TempSet:
ChangeColorTo(thing, "White")
def SidewinderButton():
if config.AlgoWorking == False and not config.DrawingMode and not config.MazeDrawn:
config.AlgoWorking = True
SidewinderAlgorithm()
if config.AlgoWorking:
config.MazeDrawn = True
config.AlgoWorking = False
def DjikstrasAlgorithm():
Curr = config.StartCell
Curr.distance = 0
Unvisited = [Curr]
End = config.EndCell
while (Curr != End):
if config.AlgoWorking: #Fixes bug where continues to draw after ended
X = Curr.x
Y = Curr.y
pauseStall(config.root)
if not Curr.WallUp and Y != 0 and config.AlgoWorking:
if not config.Grid[Y-1][X].SearchVisited:
config.Grid[Y-1][X].distance = Curr.distance + 1
if config.Grid[Y-1][X] not in Unvisited:
Unvisited.append(config.Grid[Y-1][X])
config.Grid[Y-1][X].parentCell = Curr
if config.Grid[Y-1][X] != config.EndCell:
tempChangeColorTo(config.Grid[Y-1][X], "Blue") #Doesnt alter root color. For clear search
if not Curr.WallRight and X != config.HCells - 1 and config.AlgoWorking:
if not config.Grid[Y][X+1].SearchVisited:
config.Grid[Y][X+1].distance = Curr.distance + 1
if config.Grid[Y][X+1] not in Unvisited:
Unvisited.append(config.Grid[Y][X+1])
config.Grid[Y][X+1].parentCell = Curr
if config.Grid[Y][X+1] != config.EndCell:
tempChangeColorTo(config.Grid[Y][X+1], "Blue")
if not Curr.WallLeft and X != 0 and config.AlgoWorking:
if not config.Grid[Y][X-1].SearchVisited:
config.Grid[Y][X-1].distance = Curr.distance + 1
if config.Grid[Y][X-1] not in Unvisited:
Unvisited.append(config.Grid[Y][X-1])
config.Grid[Y][X-1].parentCell = Curr
if config.Grid[Y][X-1] != config.EndCell:
tempChangeColorTo(config.Grid[Y][X-1], "Blue")
if not Curr.WallDown and Y != config.VCells -1 and config.AlgoWorking:
if not config.Grid[Y+1][X].SearchVisited: #Ensures Unvisited Node
config.Grid[Y+1][X].distance = Curr.distance + 1
if config.Grid[Y+1][X] not in Unvisited:
Unvisited.append(config.Grid[Y+1][X])
config.Grid[Y+1][X].parentCell = Curr
if config.Grid[Y+1][X] != config.EndCell:
tempChangeColorTo(config.Grid[Y+1][X], "Blue")
Curr.SearchVisited = True
Unvisited.remove(Curr)
config.root.after(config.Speed, config.canvas.update())
if len(Unvisited) == 0: # Meaning no path was found
messagebox.showerror("Error Notice: ", "No Path Found :(")
return
Curr = Unvisited[0]
for Cell in Unvisited:
if Cell.distance < Curr.distance:
Curr = Cell
else:
return
temp = config.EndCell.parentCell
while temp != config.StartCell and len(Unvisited) != 0:
pauseStall(config.root)
tempChangeColorTo(temp, "Yellow")
config.root.after(10,config.canvas.update())
temp = temp.parentCell
def DijkstrasAlgorithmButton():
if config.JustSearched:
messagebox.showerror("Error Notice: ", "Plesse 'Clear Search' before trying to search again!")
elif config.StartCell == None:
messagebox.showerror("Error Notice: ", "Please place start node before starting a search!")
elif config.EndCell == None:
messagebox.showerror("Error Notice: ", "Please place end node before starting a search!")
elif config.AlgoWorking == False and config.StartCell != None and config.EndCell != None:
config.AlgoWorking = True
DjikstrasAlgorithm()
config.AlgoWorking = False
config.JustSearched = True
else:
return
def aStarAlgorithm():
Curr = config.StartCell
Unvisited = [Curr]
Curr.gCost = 0
Curr.hCost = abs((config.EndCell.x - Curr.x)) + abs((config.EndCell.y - Curr.y))
Curr.fCost = Curr.gCost + Curr.hCost
while (Curr != config.EndCell):
if config.AlgoWorking:
X = Curr.x
Y = Curr.y
pauseStall(config.root)
if not Curr.WallUp and Y != 0 and config.AlgoWorking:
if not config.Grid[Y-1][X].SearchVisited:
if config.Grid[Y-1][X].gCost > Curr.gCost + 1:
config.Grid[Y-1][X].gCost = Curr.gCost + 1
config.Grid[Y-1][X].hCost = abs(config.EndCell.x - config.Grid[Y-1][X].x) + abs(config.EndCell.y - config.Grid[Y-1][X].y)
config.Grid[Y-1][X].fCost = config.Grid[Y-1][X].gCost + config.Grid[Y-1][X].hCost
if config.Grid[Y-1][X] not in Unvisited:
Unvisited.append(config.Grid[Y-1][X])
config.Grid[Y-1][X].parentCell = Curr
if config.Grid[Y-1][X] != config.EndCell:
tempChangeColorTo(config.Grid[Y-1][X], "Blue") #Doesnt alter root color. For clear search
if not Curr.WallRight and X != config.HCells - 1 and config.AlgoWorking:
if not config.Grid[Y][X+1].SearchVisited:
if config.Grid[Y][X+1].gCost > Curr.gCost +1:
config.Grid[Y][X+1].gCost = Curr.gCost + 1
config.Grid[Y][X+1].hCost = abs(config.EndCell.x - config.Grid[Y][X+1].x) + abs(config.EndCell.y - config.Grid[Y][X+1].y)
config.Grid[Y][X+1].fCost = config.Grid[Y][X+1].gCost + config.Grid[Y][X+1].hCost
if config.Grid[Y][X+1] not in Unvisited:
Unvisited.append(config.Grid[Y][X+1])
config.Grid[Y][X+1].parentCell = Curr
if config.Grid[Y][X+1] != config.EndCell:
tempChangeColorTo(config.Grid[Y][X+1], "Blue")
if not Curr.WallLeft and X != 0 and config.AlgoWorking:
if not config.Grid[Y][X-1].SearchVisited:
if config.Grid[Y][X-1].gCost > Curr.gCost +1:
config.Grid[Y][X-1].gCost = Curr.gCost +1
config.Grid[Y][X-1].hCost = abs(config.EndCell.x - config.Grid[Y][X-1].x) + abs(config.EndCell.y - config.Grid[Y][X-1].y)
config.Grid[Y][X-1].fCost = config.Grid[Y][X-1].gCost + config.Grid[Y][X-1].hCost
if config.Grid[Y][X-1] not in Unvisited:
Unvisited.append(config.Grid[Y][X-1])
config.Grid[Y][X-1].parentCell = Curr
if config.Grid[Y][X-1] != config.EndCell:
tempChangeColorTo(config.Grid[Y][X-1], "Blue")
if not Curr.WallDown and Y != config.VCells -1 and config.AlgoWorking:
if not config.Grid[Y+1][X].SearchVisited: #Ensures Unvisited Node
if config.Grid[Y+1][X].gCost > Curr.gCost +1:
config.Grid[Y+1][X].gCost = Curr.gCost +1
config.Grid[Y+1][X].hCost = abs(config.EndCell.x - config.Grid[Y+1][X].x) + abs(config.EndCell.y - config.Grid[Y+1][X].y)
config.Grid[Y+1][X].fCost = config.Grid[Y+1][X].gCost + config.Grid[Y+1][X].hCost
if config.Grid[Y+1][X] not in Unvisited:
Unvisited.append(config.Grid[Y+1][X])
config.Grid[Y+1][X].parentCell = Curr
if config.Grid[Y+1][X] != config.EndCell:
tempChangeColorTo(config.Grid[Y+1][X], "Blue")
Curr.SearchVisited = True
Unvisited.remove(Curr)
config.root.after(config.Speed, config.canvas.update())
if len(Unvisited) == 0: # Meaning no path was found
messagebox.showerror("Error Notice: ", "No Path Found :(")
return
Curr = Unvisited[0]
for Cell in Unvisited:
if Cell.fCost < Curr.fCost:
Curr = Cell
else:
return
temp = config.EndCell.parentCell
while temp != config.StartCell and len(Unvisited) != 0:
pauseStall(config.root)
tempChangeColorTo(temp, "Yellow")
config.root.after(10,config.canvas.update())
temp = temp.parentCell
def aStarAlgorithmButton():
if config.JustSearched:
messagebox.showerror("Error Notice: ", "Plesse 'Clear Search' before trying to search again!")
elif config.StartCell == None:
messagebox.showerror("Error Notice: ", "Please place start node before starting a search!")
elif config.EndCell == None:
messagebox.showerror("Error Notice: ", "Please place end node before starting a search!")
elif config.AlgoWorking == False and config.StartCell != None and config.EndCell != None:
config.AlgoWorking = True
aStarAlgorithm()
config.AlgoWorking = False
config.JustSearched = True
else:
return
def pausePlay():
#print("Algoworking: ", config.AlgoWorking, "Drawing Mode: ", config.DrawingMode, "MazeDrawn: ", config.MazeDrawn)
if config.AlgoWorking == True:
config.pausePlay = not config.pausePlay
else:
config.pausePlay = False
#print("pausePlay: ", config.pausePlay) #For debugging
#print("AlgoWorking: ", config.AlgoWorking) #For debugging
def pauseStall(root):
while config.pausePlay:
config.root.after(50, config.canvas.update()) #Every 50 milisecond will check of PausePlay has changed
####################################################################################################
# Debugger - Helps find valid walls
####################################################################################################
def moveUp(event):
if config.CurrentCellDebug.WallUp == False and config.CurrentCellDebug.y > 0:
config.CurrentCellDebug.RevertColor()
config.CurrentCellDebug = config.Grid[config.CurrentCellDebug.y - 1][config.CurrentCellDebug.x]
DebuggerColorChange(config.CurrentCellDebug)
config.root.after(config.Speed, config.canvas.update())
def moveLeft(event):
if config.CurrentCellDebug.WallLeft == False and config.CurrentCellDebug.x > 0:
config.CurrentCellDebug.RevertColor()
config.CurrentCellDebug = config.Grid[config.CurrentCellDebug.y][config.CurrentCellDebug.x - 1]
DebuggerColorChange(config.CurrentCellDebug)
config.root.after(config.Speed, config.canvas.update())
def moveDown(event):
if config.CurrentCellDebug.WallDown == False and config.CurrentCellDebug.y < config.VCells:
config.CurrentCellDebug.RevertColor()
config.CurrentCellDebug = config.Grid[config.CurrentCellDebug.y + 1][config.CurrentCellDebug.x]
DebuggerColorChange(config.CurrentCellDebug)
config.root.after(config.Speed, config.canvas.update())
def moveRight(event):
if config.CurrentCellDebug.WallRight == False and config.CurrentCellDebug.x < config.HCells:
config.CurrentCellDebug.RevertColor()
config.CurrentCellDebug = config.Grid[config.CurrentCellDebug.y][config.CurrentCellDebug.x + 1]
DebuggerColorChange(config.CurrentCellDebug)
config.root.after(config.Speed, config.canvas.update())
def WallDebugger():
config.root.bind('<Left>', moveLeft)
config.root.bind('<Right>', moveRight)
config.root.bind('<Down>', moveDown)
config.root.bind('<Up>', moveUp)
def WallDebuggerButton():
config.CurrentCellDebug = config.Grid[0][0]
WallDebugger()
#################################################################################################### | Chris-Abboud/Pathfinding-Maze-Generation-Visualizer | Helpers.py | Helpers.py | py | 33,930 | python | en | code | 1 | github-code | 13 |
30490235532 | import time
from openerp.osv import osv, fields
from openerp.tools.translate import _
class account_invoice(osv.Model):
_inherit="account.invoice"
def invoice_print(self, cr, uid, ids, context=None):
'''
This function prints the invoice and mark it as sent, so that we can see more easily the next step of the workflow
'''
assert len(ids) == 1, 'This option should only be used for a single id at a time.'
self.write(cr, uid, ids, {'sent': True}, context=context)
datas = {
'ids': ids,
'model': 'account.invoice',
'form': self.read(cr, uid, ids[0], context=context)
}
return {
'type': 'ir.actions.report.xml',
'report_name': 'tax_invoice_qweb_report.tax_invoice_report_template_id',
'datas': datas,
'nodestroy' : True
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| genpexdeveloper/tax_invoice_qweb_report | account_invoice_extended.py | account_invoice_extended.py | py | 971 | python | en | code | 0 | github-code | 13 |
75052932176 | from mock import patch
from mock import MagicMock
from device_notifications.tests.utils import DeviceNotificationTestCase
from device_notifications.tests.utils import ConcreteTestDevice
from device_notifications.spi.gcm import gcm_send_message
class FakeGCMResponse(object):
canonical = []
not_registered = []
failed = []
needs_retry_ctl = False
def needs_retry(self):
return self.needs_retry_ctl
class GCMSendMessageTests(DeviceNotificationTestCase):
def setUp(self):
super(GCMSendMessageTests, self).setUp()
gcm_mock = MagicMock(name='gcm')
self.send_mock = gcm_mock.send
self.GCM_patcher = patch(
'device_notifications.spi.gcm.GCM',
return_value=gcm_mock)
self.GCM_patcher.start()
def tearDown(self):
self.GCM_patcher.stop()
super(GCMSendMessageTests, self).tearDown()
def test_basic_send_message(self):
logger = MagicMock(name='logger')
device = ConcreteTestDevice(device_id='testid')
self.send_mock.return_value = FakeGCMResponse()
gcm_send_message(device, 'test message', 0, logger)
self.assertTrue(self.send_mock.called)
| roverdotcom/django-device-notifications | device_notifications/tests/gcm_tests.py | gcm_tests.py | py | 1,200 | python | en | code | 4 | github-code | 13 |
24150158393 | import numpy as np
prob_dict = np.load('shields_RAL/Qmax_values_0_td.npy', allow_pickle = True).item()
# prob_dict = np.load('shields/state_action_values_1_td.npy', allow_pickle = True).item()
print(prob_dict.keys())
# print(prob_dict[((0, 0, 6, 7, 0, 1), 0)])
print(type(prob_dict))
num_xbins=8
def convert_state_to_int(state):
increments = [(num_xbins**3)*2, (num_xbins**2)*2, num_xbins*2, 2, 1]
return np.sum(np.multiply(list(state), increments))
state = (0,0,0,2,1)
print(convert_state_to_int(state)) | sharachchandra/context-driven-control-modulation | discrete_toy_examples/grid_world_2d/gym_gridworld/build/lib/gym_gridworld/envs/print_shield_np.py | print_shield_np.py | py | 506 | python | en | code | 0 | github-code | 13 |
42670655062 | import re
from collections import deque
from pathlib import Path
def read_input() -> list[str]:
filepath = Path(__file__).resolve()
filename_no_ext = filepath.name.split(".")[0]
filedir = filepath.parent
input_file = filedir / f"../inputs/{filename_no_ext}.txt"
with open(input_file) as infile:
input = infile.readlines()
return [line.strip() for line in input]
def get_state_and_instructions(input: list[str]) -> tuple[list[str], list[str]]:
split_index = None
for index, line in enumerate(input):
if not line:
split_index = index
break
return input[:split_index], input[split_index + 1 :]
def get_starter_crate_stacks(input: list[str]) -> dict[int, deque]:
crate_stacks = dict()
for index, line in enumerate(input[::-1]):
# create deques per stack identifier
if index == 0:
stack_ids: list[int] = [int(stack_id) for stack_id in line.split(" ") if stack_id]
for stack_id in stack_ids:
crate_stacks[stack_id] = deque()
continue
# each crate id takes us 3 chars with 1 char in-between
# thus, we can look at every 4th index, starting on index 1.
# then, add crates to their respective stack.
for stack_id, crate in enumerate(line[1::4], 1):
if not crate.strip():
continue
crate_stacks[stack_id].append(crate)
return crate_stacks
def apply_instructions(
crate_stacks: dict[int, deque], instructions: list[str], can_move_multi=False
) -> dict[int, deque]:
regex = re.compile(r"move (?P<qty>\d.*) from (?P<src>\d.*) to (?P<dst>\d.*)")
for step in instructions:
matches = re.match(regex, step)
src = int(matches.group("src"))
dst = int(matches.group("dst"))
qty = int(matches.group("qty"))
tmp_deque = deque()
for _ in range(qty):
tmp_deque.append(crate_stacks[src].pop())
if can_move_multi:
# keep the original order, thus reverse the tmp deque before extending
tmp_deque.reverse()
crate_stacks[dst].extend(tmp_deque)
return crate_stacks
def get_top_crates(crate_stacks: dict[int, deque]) -> str:
top_crates = list()
for _, stack in crate_stacks.items():
top_crates.append(stack[-1])
return "".join(top_crates)
def run_part_1(input: list[str]) -> str:
input_state, input_instructions = get_state_and_instructions(input)
crate_stacks = get_starter_crate_stacks(input_state)
apply_instructions(crate_stacks, input_instructions, can_move_multi=False)
top_crates: str = get_top_crates(crate_stacks)
return top_crates
def run_part_2(input: list[str]) -> str:
input_state, input_instructions = get_state_and_instructions(input)
crate_stacks = get_starter_crate_stacks(input_state)
apply_instructions(crate_stacks, input_instructions, can_move_multi=True)
top_crates: str = get_top_crates(crate_stacks)
return top_crates
if __name__ == "__main__":
input = read_input()
print(run_part_1(input)) # SBPQRSCDF - correct
print(run_part_2(input)) # RGLVRCQSB - correct
| dlstadther/advent_of_code_2022 | solutions/05.py | 05.py | py | 3,187 | python | en | code | 0 | github-code | 13 |
12117379413 | """
3. 需求:
定义函数,在电影列表中删除阿凡达2
定义函数,在汽车列表中删除雅阁
步骤:
-- 根据需求,写出函数。
-- 因为主体逻辑相同,核心算法不同.
所以使用函数式编程思想(分、隔、做)
创建通用函数delete_single
-- 在当前模块中调用
"""
from common.iterable_tools import IterableHelper
class Car:
def __init__(self, brand="", price=0, rank=""):
self.brand = brand
self.price = price
self.rank = rank
class Movie:
def __init__(self, name="", type="", actor=""):
self.name = name
self.type = type
self.actor = actor
list_car = [
Car("五菱宏光", 46000, "微面"),
Car("迈腾", 19000, "中型车"),
Car("雅阁", 170000, "中型车"),
]
list_movie = [
Movie("独行月球", "搞笑", "沈腾"),
Movie("阿凡达2", "冒险", "萨姆·沃辛顿"),
Movie("万里归途", "战争", "张译"),
Movie("疯狂72小时", "搞笑", "闫妮"),
]
"""
def delete_single01():
for i in range(len(list_movie)):
if list_movie[i].name == "阿凡达2":
del list_movie[i]
return True
return False
def delete_single02():
for i in range(len(list_car)):
if list_car[i].brand == "雅阁":
del list_car[i]
return True
return False
print(delete_single01())
def condition01(item):
return item.name == "阿凡达2"
def condition02(item):
return item.brand == "雅阁"
def delete_single(iterable,condition):
for i in range(len(iterable)):
# if list_movie[i].name == "阿凡达2":
# if condition01(iterable[i]):
# if condition02(iterable[i]):
if condition(iterable[i]):
del iterable[i]
return True
return False
print(delete_single(list_movie, condition01))
print(delete_single(list_movie, lambda m: m.type == "战争"))
"""
print(IterableHelper.delete_single(list_movie, lambda m: m.type == "战争")) | 15149295552/Code | Month07/day14_python/homework/exercise02.py | exercise02.py | py | 2,080 | python | en | code | 1 | github-code | 13 |
12355796803 | import json
def split_by_brackets(str1):
try:
result = str1.split('{')[1]
except:
result = ''
return result
def get_ydas_data(file):
f = open(file,'r')
content = f.read()
f_content_list = content.split('}')
f_content_list = list(map(lambda x: split_by_brackets(x), f_content_list))
f_content_list = list(filter(lambda x: x != '', f_content_list))
f_content_list = list(map(lambda x: eval('{' + x + '}'), f_content_list))
f.close()
return f_content_list
file2 = r'E:\广州项目\acceptydas_bk_0322_0328.log' #2
f_content_list = get_ydas_data(file2)
CMCS_STATION_MAP = {
"员村": [101, 102],
"天河公园": [104, 144, 103, 143],
"棠东": [106, 105],
"黄村": [108, 107],
"大观南路": [110, 109],
"天河智慧城": [112, 111],
"神舟路": [114, 113],
"科学城": [116, 115],
"苏元": [118, 117],
"水西": [120, 119],
"长平": [122, 121],
"金坑": [124, 123, 145, 146],
"镇龙西": [126, 147, 125],
"镇龙": [128, 127],
"中新": [130, 129],
"坑贝": [132, 131],
"凤岗": [134, 133],
"朱村": [136, 150, 135, 149],
"山田": [138, 151, 152, 137],
"钟岗": [140, 139],
"增城广场": [142, 141],
"0": [0, ],#这个是在列车停靠时的next_station的值
"1": [154,184,185],#这个为员村站之前的、增城广场之后的
}
exist_stations = []
for k, v in CMCS_STATION_MAP.items():
for station in v:
exist_stations.append(station)
not_exist_stations = []
for data in f_content_list:
if data['now_station'] not in exist_stations and data['year'] != 0:
print(data, "\n")
not_exist_stations.append(data)
# print('not_exist_stations', not_exist_stations)
def write_ydas_txt(file, list1):
f = open(file, 'w')
for x in list1:
f.write(str(x) + "," + "\n")
f.close()
file2_2 = r'E:\广州项目\acceptydas_bk_0322_0328_result.txt'
write_ydas_txt(file2_2, not_exist_stations) | hjl092868/hjl | gz_subway_test/station_ydas_statistic.py | station_ydas_statistic.py | py | 2,010 | python | en | code | 0 | github-code | 13 |
37529462510 | # Question 1) - Your function will take N arrays as Arguments and a string X, Integer Y,
# you must return a final list of all possible elements from all Arrays ;
# - Whose length is greater than length of string X by at-least twice,
# - Whose value contains the pattern string x
#
# Conditions for an element to be shortlisted;
# --------------------------------------------
# 1. The element in the list must not be == string X
# 2. The total number of elements in each list must be greater than 1
# 3. The elements in the list have to be Alphabets alone , no other special chars or types allowed
# 4. The max number of lists that can be passed is Integer Y
def func(argument1, argument2, **arguments):
result = []
for arg in arguments.values():
output = [match for match in arg if len(arguments) <= argument2 if len(arg) > 1 if
argument1.lower() in match.lower() if len(match) > len(argument1) * 2 if
match.isalpha() or '' in match]
result.extend(output)
return result
| valliammai-tech/Python | pythonProject/PythonEx1.py | PythonEx1.py | py | 1,081 | python | en | code | 0 | github-code | 13 |
38925663965 |
def read_parquet() -> None:
import pandas as pd
path = 'utils/parquet/parquet_sample_files/COINBASE-BCH-USD-l2_book-1614500246.parquet'
# table = pq.read_table(path)
# meta = pq.read_metadata(path)
# pandas = pq.read_pandas(path)
# print(pandas)
df = pd.read_parquet(path)
print(df.head(3))
def read_s3() -> None:
import pyarrow.parquet as pq
import s3fs
s3 = s3fs.S3FileSystem()
bucket = 's3://svoe.test.1/parquet/FTX/l2_book/BTC-USD'
df = pq.ParquetDataset(bucket, filesystem=s3).read_pandas().to_pandas()
print(df.timestamp)
import awswrangler as wr
wr.s3.store_parquet_metadata()
| dirtyValera/svoe | utils/parquet/parquet_test.py | parquet_test.py | py | 652 | python | en | code | 12 | github-code | 13 |
26863182105 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.contrib.postgres.fields
class Migration(migrations.Migration):
dependencies = [
('countries', '0003_auto_20150903_0156'),
]
operations = [
migrations.AlterField(
model_name='chart',
name='ordinals',
field=django.contrib.postgres.fields.ArrayField(size=None, null=True, base_field=models.CharField(max_length=100), blank=True),
),
]
| sentinel-project/sentinel-app | sentinel/countries/migrations/0004_auto_20150903_0156.py | 0004_auto_20150903_0156.py | py | 532 | python | en | code | 0 | github-code | 13 |
9801550536 | # -*- coding: utf-8 -*-
"""
Created on Tue Oct 18 11:11:54 2022
@author: 983045
"""
from rdkit import Chem
test_mol = Chem.MolFromSmiles("CC(=O)NC1=C(C=C(C=C1)O)O")
def get_bonds(mol):
all_bonds = []
for b in mol.GetBonds():
a = (b.GetBeginAtomIdx(),b.GetEndAtomIdx(),
b.GetBondType(),b.GetStereo())
#print(a) #- printing this will show type of bond
b = repr(a)
bond_atoms = []
atom_one = a[0]
atom_two = a[1]
bond_type = repr(a[2]).split('.')[4]
bond_atoms.append(atom_one)
bond_atoms.append(atom_two)
bond_atoms.append(bond_type)
all_bonds.append(bond_atoms)
return(all_bonds)
a = get_bonds(test_mol)
'''
The 'get_bonds' function takes an RDkit mol object and returns a 2d array
detailing each bond. It provides the numbers of atoms either end of the bond
and the type of bond.
'''
def get_ring_atoms(mol):
ri = mol.GetRingInfo()
c = ri.AtomRings()
no_of_rings = len(c)
ring_atoms = set()
for i in range(no_of_rings):
ring_atom = c[i]
for j in range(len(ring_atom)):
ring_atoms.add(c[i][j])
return(ring_atoms)
#b = get_ring_atoms(test_mol)
'''
The 'get_ring_atoms' function takes an RDkit mol object and returns a set. This
set contains the numbers of the atoms that comprise rings within the molecule.
Returns a set of which atoms make up rings within the molecule.
'''
def fragment_simple(mol, atom1, atom2):
rwmol = Chem.RWMol(mol)
rwmol.RemoveBond(atom1, atom2)
wildcard1 = rwmol.AddAtom(Chem.Atom(0))
wildcard2 = rwmol.AddAtom(Chem.Atom(0))
rwmol.AddBond(atom1, wildcard1, Chem.BondType.SINGLE)
rwmol.AddBond(atom2, wildcard2, Chem.BondType.SINGLE)
return rwmol.GetMol()
'''
The 'fragment_simple' takes an RDkit mol object and the atom numbers of atoms
either side of the bond. The bond between these two atoms is then split and wildcard
atoms are added to each end of the cut bond - which are useful for reforming the molecule.
Returns a fragmented mol
This function is taken from:
http://www.dalkescientific.com/writings/diary/archive/2016/08/14/fragment_chiral_molecules.html
NOTE: This only works properly for achiral molecules and chirality should be preserved
somehow - allowing for the construction of even more unique molecules.
'''
def cut_bonds_to_core(mol):
a = get_bonds(mol)
b = get_ring_atoms(mol)
bond_pairs = []
for i in range(len(a)):
atom_pair = []
atom_one = a[i][0]
atom_two = a[i][1]
if atom_one in b:
if atom_two in b:
#print("atoms makeing ring bond", atom_one, atom_two)
pass
elif atom_two not in b:
#print("ring atom to non ring atom bond", atom_one, atom_two)
atom_pair.append(atom_one)
atom_pair.append(atom_two)
bond_pairs.append(atom_pair)
if atom_two in b:
if atom_one in b:
#print("skipping")
pass
elif atom_one not in b:
#print("ring atom to non ring atom bond", atom_one, atom_two )
atom_pair.append(atom_one)
atom_pair.append(atom_two)
bond_pairs.append(atom_pair) # This first part of the function identifies which bonds to be cut
for i in range(len(bond_pairs)):
wildcard_replacer = 80
wildcard = '*'
wildcard_branch = '(*)'
if i == 0:
frag_time = fragment_simple(mol, bond_pairs[i][0], bond_pairs[i][1])
print("this is frag time", Chem.MolToSmiles(frag_time), type(Chem.MolToSmiles(frag_time)))
#if wildcard in frag_time:
# frag_time.replace(wildcard, str(wildcard_replacer))
else:
frag_time = fragment_simple(frag_time, bond_pairs[i][0], bond_pairs[i][1])
print("this is frag time", Chem.MolToSmiles(frag_time))
cut_mol = Chem.MolToSmiles(frag_time)
return(cut_mol)
'''
The 'cut_bonds_to_core' function takes a single RDkit mol object and returns a SMILES
where any bonds to a ring core have been cut. This will only work with ring core structures
and aliphatic molecules with chains must be considered differently.
Returns a smiles string of the snipped up molecule.
NOTE: No function for removing branches from aliphatic molecules currently exists.
'''
def has_rings(mol):
ring_functionals = ["[r5]", "[r6]", "[r7]", "[r8]"]
ring_names = ["5-membered ring", "6-membered ring", "7-membered ring", "8-membered ring"]
ring_groups = []
ring_groups.append(ring_names)
ring_groups.append(ring_functionals)
ring_groups_in_mol = []
for i in range(len(ring_groups[0])):
pattern_smarts = Chem.MolFromSmarts(ring_groups[1][i])
if mol.HasSubstructMatch(pattern_smarts) == True:
ring_groups_in_mol.append(ring_groups[0][i])
if len(ring_groups) != 0:
return(ring_groups_in_mol)
if len(ring_groups) == 0:
no_list = ["N"]
return(no_list)
def get_atom_set(mol):
counter = 0
for atom in mol.GetAtoms():
counter += 1
atom_set= set()
for i in range(counter):
atom_set.add(i+1)
return(atom_set)
| DanielYyork/MChem- | functions.py | functions.py | py | 5,459 | python | en | code | 0 | github-code | 13 |
15309381938 | import numpy as np
import pandas as pd
import datetime as dt
from util.data_operations import get_dataset
from util.distance_operations import harversine, distance_df
from util.trip_enhancement import TripEnhancer, SNAP_TO_ROAD_KEY
from conf.settings import FilesConfig
TRIP_DEFINITON = 7 * 60 * 1000
TRIP_RELEVANCE = 2017 + 10/12
SAVE = True
def create_table(save=False):
df = get_dataset()
df["timestamp"] = df.timestamp.astype(int).values
df = df.sort_values("timestamp")
if save:
df.to_csv(FilesConfig.FileNames.datapoints_csv, index=False)
return df
def create_trips(df, save=False):
trip_enhancer = TripEnhancer(SNAP_TO_ROAD_KEY)
driving_df = df.query("likely_activity == 'IN_VEHICLE'").copy()
driving_df["delta_time"] = [0] + list(driving_df.timestamp.values[1:] - driving_df.timestamp.values[:-1])
driving_df["trip_id"] = np.cumsum(driving_df.delta_time.values > TRIP_DEFINITON)
trips = pd.DataFrame([], columns=["id", "distance", "time", "lat", "lng"])
ids, distance, time, lats, lngs = [], [], [], [], []
enhanced_lats, enhanced_lngs = [], []
initial_unix_time, initial_date_time = [], []
deleted_trips = 0
for trip in driving_df.trip_id.unique():
trip_data = driving_df.query("trip_id == {}".format(trip))
if len(trip_data) < 5:
deleted_trips += 1
continue
distance.append(distance_df(trip_data).apply(harversine, 1).sum())
time.append(trip_data.timestamp.values[-1] - trip_data.timestamp.values[0])
initial_unix_time.append(trip_data.timestamp.values[0])
temp_time = int(trip_data.timestamp.values[0]/1000)
initial_date_time.append(dt.datetime.fromtimestamp(temp_time))
ids.append(trip)
lats.append(trip_data.lat.values)
lngs.append(trip_data.lng.values)
if dt.datetime.fromtimestamp(temp_time).year + dt.datetime.fromtimestamp(temp_time).month > TRIP_RELEVANCE:
try:
temp = trip_enhancer.snap_road(pd.DataFrame({"lat": trip_data.lat.values, "lng": trip_data.lng.values}))
enhanced_lats.append(list(temp.lat.values))
enhanced_lngs.append(list(temp.lng.values))
except Exception as e:
enhanced_lats.append([])
enhanced_lngs.append([])
else:
enhanced_lats.append([])
enhanced_lngs.append([])
trips["id"] = ids
trips["distance"] = distance
trips["start_unix_time"] = initial_unix_time
trips["start_date_time"] = initial_date_time
trips["time"] = time
trips["lat"] = lats
trips["enhanced_lats"] = enhanced_lats
trips["lng"] = lngs
trips["enhanced_lngs"] = enhanced_lngs
trips["n_datapoints"] = trips.apply(lambda x: len(x["lat"]), 1).values
if save:
trips.to_csv(FilesConfig.FileNames.trips_csv, index=False)
return trips
def main():
df = create_table(SAVE)
trips = create_trips(df, SAVE)
if __name__ == "__main__":
main()
| RHDZMOTA/gmaps-data-analysis | main.py | main.py | py | 3,029 | python | en | code | 1 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.