seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
18056955699 | s = input()
k = int(input())
ans = []
for c in s[:-1]:
to_a = (ord('z')-ord(c)+1) % 26
if k < to_a:
ans.append(c)
else:
k -= to_a
ans.append('a')
else:
c = s[-1]
x = ord(c)-ord('a')
y = (k+x) % 26
ans.append(chr(ord('a')+y))
print(''.join(ans))
| Aasthaengg/IBMdataset | Python_codes/p03994/s121699193.py | s121699193.py | py | 300 | python | en | code | 0 | github-code | 90 |
30072402830 | from django_filters import rest_framework as filters
from django_filters.utils import get_model_field
class BaseFilters(filters.FilterSet):
"""
https://django-filter.readthedocs.io/en/master/guide/tips.html?highlight=help_text#adding-model-field-help-text-to-filters
django-filter不会用到model的help_text字段,所以需再要生成field的时候手动添加
"""
@classmethod
def filter_for_field(cls, f, field_name, lookup_expr='exact'):
filter_ = super().filter_for_field(f, field_name, lookup_expr)
filter_.extra['help_text'] = f.help_text
return filter_
@classmethod
def get_filters(cls):
for field_name, filter_ in cls.declared_filters.items():
if not filter_.extra.get('help_text'):
field = get_model_field(cls._meta.model, field_name)
if field:
filter_.extra['help_text'] = field.help_text
return super().get_filters()
| liushiwen555/unified_management_platform_backend | utils/core/filters.py | filters.py | py | 997 | python | en | code | 0 | github-code | 90 |
20712281599 | import re
import itertools
with open('input.txt') as file:
data = file.readlines()
data = [line.strip() for line in data]
fields, tickets = [], []
for line in data:
if 'or' in line:
fields.append(line)
elif ',' in line:
tickets.append(line.split(','))
def get_ranges (a,b,c,d):
output = []
for i in range(a,b+1):
output.append(i)
for i in range(c,d+1):
output.append(i)
return output
def get_error_rate():
sequences = []
for row in fields:
char = re.split('-|or|:', row)
if char[1] == 'm':
continue
sequences.append(get_ranges(int(char[1]), int(char[2]), int(char[3]), int(char[4])))
sequences_comb = list(itertools.chain.from_iterable(sequences)) # combine separate sequences
error_rate_count = 0
for row in tickets[1:]:
for num in row:
num = int(num)
if num not in sequences_comb:
error_rate_count += num
return error_rate_count
if '__main__' == __name__:
print(get_error_rate())
| tabers77/Advent-of-Code | 2020/day16/day16_p1.py | day16_p1.py | py | 1,072 | python | en | code | 1 | github-code | 90 |
4956362405 | import os, io
from subprocess import call
import requests
import base64
import yaml
import json
with open("/home/pi/.homeassistant/secrets.yaml", 'r') as secrets:
secret = yaml.load(secrets)
apikey_baidu = secret['baidu_body_apikey']
secretkey_baidu = secret['baidu_body_secretkey']
ha_token = secret['token']
ffmpeg_input = secret['ffmpeg_input']
auth = 'Bearer ' + ha_token
url = 'http://localhost:8123/api/services/tts/baidu_say'
timeout = 10
headers = {
'Authorization': auth,
'content-type': 'application/json',
}
entity_id = 'media_player.vlc'
message1 = '欢迎你们'
message2 = '欢迎淡淡'
message3 = '欢迎哲哥'
message4 = '欢迎回家'
host_baidu_token = 'https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials'
token_baidu = requests.post(host_baidu_token, data={'client_id':apikey_baidu, 'client_secret':secretkey_baidu}).json()
# result = requests.post('https://open.ys7.com/api/lapp/device/capture', data={'accessToken':'','deviceSerial':'','channelNo':1}).json()
# if (result['code']=='200'):
# imgurl = result['data']['picUrl']
# else:
# imgurl = ''
# base64_data = base64.b64encode(io.BytesIO(requests.get(imgurl).content).read())
call(["ffmpeg", "-i", ffmpeg_input, "-f", "image2", "-t", "0.001", "-y", "/home/pi/tmpimg/snapshot.jpg"])
with open("/home/pi/tmpimg/snapshot.jpg", "rb") as f:
base64_data = base64.b64encode(f.read())
host_baidu_body = 'https://aip.baidubce.com/rest/2.0/image-classify/v1/body_attr'
result = requests.post(host_baidu_body, data={'access_token':token_baidu['access_token'],'image':base64_data, 'type': 'gender,age,glasses'}).json()
while result['person_num'] == 0:
call(["ffmpeg", "-i", ffmpeg_input, "-f", "image2", "-t", "0.001", "-y", "/home/pi/tmpimg/snapshot.jpg"])
with open("/home/pi/tmpimg/snapshot.jpg", "rb") as f:
base64_data = base64.b64encode(f.read())
result = requests.post(host_baidu_body, data={'access_token':token_baidu['access_token'],'image':base64_data, 'type': 'gender,age,glasses'}).json()
timeout = timeout-1
if timeout == 0:
quit()
print(result['person_info'])
if result['person_num'] > 1:
r = requests.post(url, data=json.dumps({'entity_id': entity_id, 'message': message1}), headers=headers)
elif result['person_info'][0]['attributes']['gender']['score'] < 0.75:
r = requests.post(url, data=json.dumps({'entity_id': entity_id, 'message': message4}), headers=headers)
elif result['person_info'][0]['attributes']['gender']['name'] == "女性":
r = requests.post(url, data=json.dumps({'entity_id': entity_id, 'message': message2}), headers=headers)
else:
r = requests.post(url, data=json.dumps({'entity_id': entity_id, 'message': message3}), headers=headers)
| wittyfilter/homeassistant | recog_people.py | recog_people.py | py | 2,707 | python | en | code | 2 | github-code | 90 |
13306881071 | """
Kernel estimation methods.
"""
from numpy import atleast_1d as in1d, atleast_2d as in2d
from sklearn import neighbors
import numpy as np
ROOT_2PI = np.sqrt(2 * np.pi)
KERNEL_RADIUS_RATIO = 0.35
NUM_NEIGHBORS = 10
class AdaptiveGaussianKernel(object):
"""
Nearest neighbors method for estimating density or custom functions.
"""
def __init__(self, dataset, values=None, k_neighbors=NUM_NEIGHBORS):
"""Set up the nearest neighbors model for evaluation.
Arguments:
dataset -- (n,2)-shaped array of spatial points
Keyword arguments:
values -- scalar values for each point in the dataset
k_neighbors -- number of neighbors to consider in the model
"""
dataset = in2d(dataset)
if dataset.shape[0] == 2:
dataset = dataset.T
if values is None:
self.values = np.ones(dataset.shape[0])
else:
values = in1d(values)
if values.ndim != 1:
raise ValueError("values can be at most 1-dimensional")
if values.size != dataset.shape[0]:
raise ValueError("size mismatch with values (%d)" % values.size)
self.values = values
self.model = neighbors.NearestNeighbors(
n_neighbors=k_neighbors,
algorithm='kd_tree')
self.model.fit(dataset)
def _Gk(self, dists, radius, kfrac):
H = kfrac * radius
G = lambda x: np.exp(-x**2 / (2 * H**2)) / (ROOT_2PI * H)
return G(dists) - G(radius)
def evaluate(self, points, minrad=0.0, maxrad=100.0, kernel_func=None,
n_outputs=1, mask_value=np.nan, kernel_ratio=KERNEL_RADIUS_RATIO,
debug=None):
"""Evaluate the nearest-neighbors model at test points.
Arguments:
points -- (n,2)-shaped array of test points
Keyword arguments:
minrad -- minimum allowed kernel radius
maxrad -- maximum allowed kernel radius
kernel_func -- neighbor function that produces `n_outputs` values
n_outputs -- number of outputs generated by the `kernel_func`
kernel_ratio -- Gaussian kernel size as fraction of radius
Returns:
(n,)-shaped array, or tuple of arrays (see Notes)
Notes:
If neither `values` nor `kernel_func` are provided, then this method
computes a probability density estimate of the data points by default.
If `values` was provided for the training set, then a weighted average
of these data point values is calculated instead of the density
estimate.
Providing a kernel function as `kernel_func` can generate multiple
output evaluations organized along the first axis of the evaluated
matrix. The kernel function must have the following form:
```
def foo(weights, values):
...
return output
```
where each argument is a (n,k)-shaped array where `n` is some number of
test points, `k` is the number of nearest neighbors for that test point,
and `output` is a (n_outputs,n)-shaped array (or (n,)-shaped if only one
output). If `values` was not provided, then the argument passed will be
the nearest-neighbor distances.
"""
do_density = False
if kernel_func is None:
kernel_func = lambda w, v: np.sum(w, axis=-1)
do_density = True
n_outputs = 1
points = in2d(points)
if points.shape[0] == 2:
points = points.T
L = np.zeros((n_outputs, points.shape[0])) + mask_value
try:
D, I = self.model.kneighbors(points)
V = self.values[I]
except ValueError:
return L
# Separate adaptive test points from points that hit the bounds
rad = np.sort(D)[:,-1].reshape(-1,1)
ihigh = (rad > maxrad).nonzero()[0]
iadapt = np.logical_and(rad >= minrad, rad <= maxrad).nonzero()[0]
ilow = (rad < minrad).nonzero()[0]
def eval_fixed_radius(index, radius):
dists, inds = self.model.radius_neighbors(points[index],
radius=radius)
for i, (d, ix) in enumerate(zip(dists, inds)):
vals = self.values[ix]
g = self._Gk(d, radius, kernel_ratio)
L[:,index[i]] = kernel_func(g, vals)
if ihigh.size:
eval_fixed_radius(ihigh, maxrad)
if iadapt.size:
vals = V[iadapt]
g = self._Gk(D[iadapt], rad[iadapt], kernel_ratio)
L[:,iadapt] = kernel_func(g, vals)
if ilow.size:
eval_fixed_radius(ilow, minrad)
L = L.squeeze()
if debug is not None:
debug['rad'] = rad
debug['adapt'] = a = rad.copy()
a[ilow] = 0
a[iadapt] = 1
a[ihigh] = 2
if do_density:
if np.isnan(L).all():
return L
return L / L[np.isfinite(L)].sum()
return L
__call__ = evaluate
| jdmonaco/spikemaps | spikemaps/kernels.py | kernels.py | py | 5,093 | python | en | code | 0 | github-code | 90 |
41244018483 | import streamlit as st
from src.data_management import load_house_data
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import ppscore as pps
sns.set_style("whitegrid")
def page2_house_price_study():
# load data
df = load_house_data()
# hard copied from sales price correlation study notebook
vars_to_study = ['1stFlrSF', 'GarageArea', 'GrLivArea',
'KitchenQual', 'TotalBsmtSF', 'OverallQual', 'YearBuilt']
st.write("## House Price Correlation Study")
st.info(
f"** Business requirement 1** \n\n"
f"* The client is interested in discovering how the house "
f" attributes correlate with the sale price."
)
# inspect data
if st.checkbox("Inspect House Dataset"):
st.write(
f"* The dataset has {df.shape[0]} rows and {df.shape[1]} columns, "
f"Find below the first 10 rows of the dataset.")
st.write(df.head(10))
st.write("---")
st.write(
f"We conducted a correlation study in the notebook to better "
f" understand how the variables are correlated to the sales price.\n "
f"We found the following most correlated "
f" variables: **{vars_to_study}**"
)
# Text based on "SalePrice Correlation" notebook - "Conclusions" section
st.info(
f"**From the Scatterplot, coupled with the Correlation "
f"and PPS analysis, we were able to draw the following insight:**\n "
f"* There is mostly a positive correlation with houses of "
f" higher overall quality material and finish of the "
f" house with sales price.\n "
f"* Large first floors, garages, above-ground living areas "
f"and basements in square feet tend to sell for higher price.\n"
f"* Recently built houses tend to sell at higher prices.\n\n "
)
st.info(
f"* Our client expects a data visualisations of the correlated "
f" variables against the sale price."
)
st.write(
f"We used Spearman and Pearson Correlation and the "
f"Power Predictive Score (PPS) Heatmap to demonstrate "
f"variable correlations below"
)
# Code copied from 'SalePrice study' notebook - Correlation Study section
# Checkbox widget displays the Spearman correlation Heatmap
if st.checkbox("Spearman Correlations"):
st.write(
f"* This plot displays a monotonic correlation and a threshold "
f" set to 0.6 (moderate correlation).\n"
)
df_corr_pearson, df_corr_spearman, pps_matrix = CalculateCorrAndPPS(df)
heatmap_corr(df=df_corr_spearman, threshold=0.6,
figsize=(20, 12), font_annot=12)
# Checkbox widget displays the Pearson correlation Heatmap
if st.checkbox("Peason Correlations"):
st.write(
f"* This plot displays a linear correlation and a threshold "
f" set to 0.6 (moderate correlation).\n"
)
df_corr_pearson, df_corr_spearman, pps_matrix = CalculateCorrAndPPS(df)
heatmap_corr(df=df_corr_pearson, threshold=0.6,
figsize=(20, 12), font_annot=15)
# Checkbox widget displays the PPS Heatmap
if st.checkbox("Predictive Power Score"):
st.write(
f"* This plot displays correlation btween variable x and y "
f"and a threshold set to 0.15 (moderate correlation).\n"
)
df_corr_pearson, df_corr_spearman, pps_matrix = CalculateCorrAndPPS(df)
heatmap_pps(df=pps_matrix, threshold=0.15,
figsize=(20, 12), font_annot=15)
st.write("---")
st.write(
f"The scatterplots displays each selected variable "
f" against the SalePrice. "
)
# Create a Dataframe for the Selected Variables
# for study with the SalesPrice
df_to_study = df.filter(vars_to_study + ['SalePrice'])
if st.checkbox("Variable Correlation to Sale Price"):
Var_corr_to_sale_price(df_to_study)
# Displays heatmaps for correlation and PPS
def heatmap_corr(df, threshold, figsize=(20, 12), font_annot=15):
if len(df.columns) > 1:
mask = np.zeros_like(df, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
mask[abs(df) < threshold] = True
fig, axes = plt.subplots(figsize=figsize)
sns.heatmap(df, annot=True, xticklabels=True, yticklabels=True,
mask=mask, cmap='viridis',
annot_kws={"size": font_annot}, ax=axes,
linewidth=0.5
)
axes.set_yticklabels(df.columns, rotation=0, fontsize=20)
axes.set_xticklabels(df.columns, fontsize=20)
plt.ylim(len(df.columns), 0)
st.pyplot(fig)
def heatmap_pps(df, threshold, figsize=(20, 12), font_annot=15):
if len(df.columns) > 1:
mask = np.zeros_like(df, dtype=np.bool)
mask[abs(df) < threshold] = True
fig, ax = plt.subplots(figsize=figsize)
ax = sns.heatmap(df, annot=True, xticklabels=True, yticklabels=True,
mask=mask, cmap='rocket_r',
annot_kws={"size": font_annot},
linewidth=0.05, linecolor='grey')
ax.set_yticklabels(df.columns, rotation=0, fontsize=20)
ax.set_xticklabels(df.columns, fontsize=20)
plt.ylim(len(df.columns), 0)
st.pyplot(fig)
# Function to calculate correlations and PPS
def CalculateCorrAndPPS(df):
df_corr_spearman = df.corr(method="spearman")
df_corr_pearson = df.corr(method="pearson")
pps_matrix_raw = pps.matrix(df)
pps_matrix = pps_matrix_raw.filter(['x', 'y', 'ppscore']).pivot(
columns='x', index='y', values='ppscore')
return df_corr_pearson, df_corr_spearman, pps_matrix
# Functions to display scatterplots to show correlations
def Var_corr_to_sale_price(df_to_study):
target_var = 'SalePrice'
for col in df_to_study.drop([target_var], axis=1).columns.to_list():
plot_scatter(df_to_study, col, target_var)
st.write("\n\n")
def plot_scatter(df, col, target_var):
fig, axes = plt.subplots(figsize=(8, 4))
sns.scatterplot(data=df, x=col, y=target_var)
plt.title(f"{col}", fontsize=20, y=1.05)
st.pyplot(fig)
| Shida18719/heritage-housing-issues | app_pages/page_house_price_study.py | page_house_price_study.py | py | 6,285 | python | en | code | 0 | github-code | 90 |
29542913917 | # -*- coding: utf-8 -*-
# @Time : 2022/5/7 17:18
# @Author : 模拟卷
# @Github : https://github.com/monijuan
# @CSDN : https://blog.csdn.net/qq_34451909
# @File : 1901. 找出顶峰元素 II.py
# @Software: PyCharm
# ===================================
"""一个 2D 网格中的 顶峰元素 是指那些 严格大于 其相邻格子(上、下、左、右)的元素。
给你一个 从 0 开始编号 的 m x n 矩阵 mat ,其中任意两个相邻格子的值都 不相同 。找出 任意一个 顶峰元素 mat[i][j] 并 返回其位置 [i,j] 。
你可以假设整个矩阵周边环绕着一圈值为 -1 的格子。
要求必须写出时间复杂度为 O(m log(n)) 或 O(n log(m)) 的算法
示例 1:
输入: mat = [[1,4],[3,2]]
输出: [0,1]
解释: 3和4都是顶峰元素,所以[1,0]和[0,1]都是可接受的答案。
示例 2:
输入: mat = [[10,20,15],[21,30,14],[7,16,32]]
输出: [1,1]
解释: 30和32都是顶峰元素,所以[1,1]和[2,2]都是可接受的答案。
提示:
m == mat.length
n == mat[i].length
1 <= m, n <= 500
1 <= mat[i][j] <= 105
任意两个相邻元素均不相等.
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/find-a-peak-element-ii
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
"""
from leetcode_python.utils import *
class Solution:
def findPeakGrid(self, mat: List[List[int]]) -> List[int]:
l, r = 0, len(mat) - 1
while l <= r:
m = (l + r) >> 1
localMax = max(mat[m]) # 一行最大
localCol = mat[m].index(localMax)
if m + 1 < len(mat) and mat[m + 1][localCol] > localMax:
l = m + 1
elif m - 1 >= 0 and mat[m - 1][localCol] > localMax:
r = m - 1
else:
return [m, localCol]
def test(data_test):
s = Solution()
data = data_test # normal
# data = [List2Node(data_test[0])] # list转node
return s.getResult(*data)
def test_obj(data_test):
result = [None]
obj = Solution(*data_test[1][0])
for fun,data in zip(data_test[0][1::],data_test[1][1::]):
if data:
res = obj.__getattribute__(fun)(*data)
else:
res = obj.__getattribute__(fun)()
result.append(res)
return result
if __name__ == '__main__':
datas = [
[],
]
for data_test in datas:
t0 = time.time()
print('-'*50)
print('input:', data_test)
print('output:', test(data_test))
print(f'use time:{time.time() - t0}s')
| monijuan/leetcode_python | code/AC2_normal/1901. 找出顶峰元素 II.py | 1901. 找出顶峰元素 II.py | py | 2,621 | python | zh | code | 0 | github-code | 90 |
20638586281 | import random
import time
import us
import geonamescache
from selenium import webdriver
from selenium.webdriver.common.by import By
from fake_useragent import UserAgent
import requests
import re
from config import BaseConfig as conf
from app.models import Site, Location, URL, City
from app.logger import log
def set_browser():
chrome_options = webdriver.FirefoxOptions()
# chrome_options.add_argument("--headless")
chrome_options.add_argument("--no-sandbox")
chrome_options.add_argument("--disable-dev-shm-usage")
chrome_options.add_argument("--disable-infobars")
chrome_options.add_argument("--disable-extensions")
user_agent = UserAgent()
user_agent.random
chrome_options.add_argument(f"user-agent={user_agent}")
browser = webdriver.Firefox(
executable_path="app\controllers\selenium\webdriver\geckodriver.exe",
options=chrome_options,
)
log(log.INFO, "UserAgent: [%s]", user_agent)
return browser
def scrape(query: str, location_name):
log(log.INFO, "run controllers.scrape()")
log(log.INFO, "query: [%s]", query)
REAL_ESTATE_TEXT = "Real Estate Websites by"
SIERRA_TEXT = "Sierra Interactive"
urls = []
browser = set_browser()
try:
log(log.INFO, "try")
browser.get(query)
log(log.INFO, "browser.get(query): [%s]", query)
pages_counter = 0
while True:
time.sleep(1)
while "captcha" in browser.page_source:
log(log.WARNING, "CAPTCHA DETECTED!")
time.sleep(3)
results = browser.find_elements(By.TAG_NAME, "a")
for page in results:
link = page.get_attribute("href")
if not link:
continue
url_pattern = r"https://[www\.]?[\w\-]+\.[a-z]{2,3}"
matches = re.findall(url_pattern, link)
url = matches[0] if matches else link
if "google.com" in url:
continue
current_site: Site = Site.query.filter_by(url=url).first()
if current_site:
continue
try:
page_response = requests.get(url, timeout=15)
except Exception:
continue
log(
log.INFO,
"page_response.status_code: %s, REAL_ESTATE_TEXT: %s",
page_response.status_code,
REAL_ESTATE_TEXT in page_response.text,
)
if (
page_response.status_code == 200
and REAL_ESTATE_TEXT in page_response.text
and SIERRA_TEXT in page_response.text
):
new_site = Site(url=url)
new_site.save()
urls.append(url)
log(log.INFO, "[%d] Saved URL: %s", new_site.id, url)
elif page_response.status_code == 200:
other_site = URL(url=url)
other_site.save()
pages_counter += 1
log(log.INFO, "Pages parsed: %d", pages_counter)
try:
next_button = browser.find_element(By.ID, "pnnext")
except Exception:
try:
log(log.ERROR, "No next button")
# location = Location(name=location_name)
# location.save()
next_button = browser.find_element(By.TAG_NAME, "i")
except Exception:
log(log.ERROR, "No extended results")
if pages_counter >= conf.MAX_PAGES_AMOUNT:
log(log.INFO, "Max pages reached")
break
new_page = next_button.get_attribute("href")
browser.get(new_page)
log(log.INFO, "urls just saved: [%d]", len(urls))
except Exception as e:
log(log.ERROR, "Error: %s", e)
finally:
browser.close()
browser.quit()
def scrape_states():
for state in us.states.STATES:
state: us.states.State = state
query_str = "+".join([state.abbr, conf.SEARCH_STR])
query = conf.BASE_GOOGLE_GET.format(query_str)
scrape(query)
# state
pass
def scrape_cities():
gc = geonamescache.GeonamesCache()
cities = gc.get_cities()
us_cities = []
for city in cities:
if cities[city]["countrycode"] == "US":
us_cities.append(cities[city]["name"].replace(" ", "+"))
random.shuffle(us_cities)
for index, us_city in enumerate(us_cities):
query_str = "+".join([us_city, conf.SEARCH_STR])
query = conf.BASE_GOOGLE_GET.format(query_str)
log(log.INFO, "-------City %d of %d: %s-------", index, len(us_cities), us_city)
if not Location.query.filter_by(name=us_city).first():
scrape(query, us_city)
def scrape_counties():
gc = geonamescache.GeonamesCache()
counties = gc.get_us_counties()
random.shuffle(counties)
for index, county in enumerate(counties):
query_str = "+".join([county["name"].replace(" ", "+"), conf.SEARCH_STR])
query = conf.BASE_GOOGLE_GET.format(query_str)
log(log.INFO, "-------County %d of %d: %s-------", index, len(counties), county)
if not Location.query.filter_by(name=county["name"]).first():
scrape(query, county["name"])
def scrape_db_cities():
page = 1
CITIES_PER_PAGE = 100
cities: list[City] = City.query.paginate(page=page, per_page=CITIES_PER_PAGE)
pages_amount = cities.total // CITIES_PER_PAGE
for page in range(1, pages_amount):
log(log.INFO, "-------Checking page %d of %d-------", page, pages_amount)
cities: list[City] = City.query.paginate(page=page, per_page=CITIES_PER_PAGE)
for city in cities:
log(log.INFO, "Checking city %d: %s, %s", city.id, city.name, city.state)
query_str = "+".join([city.name, conf.SEARCH_STR])
query = conf.BASE_GOOGLE_GET.format(query_str)
scrape(query, city.name)
| Simple2B/RealEstateParser | app/controllers/selenium/urls.py | urls.py | py | 6,127 | python | en | code | 0 | github-code | 90 |
34855182473 | from functools import cache
import json
import typing
from dataclasses import dataclass, field
from fastapi import FastAPI, HTTPException, Response
from cached import cached
app = FastAPI()
@dataclass
class Profile:
id: str
name: str
website: str
github: str
linkedin: str
twitter: str
education: str
languages : list[str] = field(default_factory=list)
frameworks : list[str] = field(default_factory=list)
about_me: str = ""
profiles: dict[str, Profile] = {}
with open("profile.json", encoding="utf8") as profile:
raw_profile = json.load(profile)
profile = Profile(**raw_profile)
profiles[profile.id] = profile
@app.get("/")
def get_root() -> Response:
return Response("TO check profile gotto /profile")
class PrettyJSONResponse(Response):
media_type = "application/json"
def render(self, content: typing.Any) -> bytes:
return json.dumps(
content,
ensure_ascii=False,
allow_nan=False,
indent=5,
separators=(',', ': '),
).encode('utf-8')
@cached
@app.get("/{profile_id}", response_model=Profile, response_class=PrettyJSONResponse)
def read_profile(profile_id: str):
if profile_id not in profiles:
raise HTTPException(status_code=404, detail="Profile not found")
return profiles[profile_id]
| rein14/fastapi-profile | main.py | main.py | py | 1,360 | python | en | code | 0 | github-code | 90 |
18297616549 | n, m = map(int, input().split())
l = list(map(int, input().split()))
l.sort()
import bisect
def func(x):
C = 0
for p in l:
q = x -p
j = bisect.bisect_left(l, q)
C += n-j
if C >= m:
return True
else:
return False
l_ = 0
r_ = 2*10**5 +1
while l_+1 < r_:
c_ = (l_+r_)//2
if func(c_):
l_ = c_
else:
r_ = c_
ans = 0
cnt = 0
lr = sorted(l, reverse=True)
from itertools import accumulate
cum = [0] + list(accumulate(lr))
for i in lr:
j = bisect.bisect_left(l, l_-i)
ans += i*(n-j) + cum[n-j]
cnt += n -j
ans -= (cnt-m)*l_
print(ans) | Aasthaengg/IBMdataset | Python_codes/p02821/s181670765.py | s181670765.py | py | 584 | python | en | code | 0 | github-code | 90 |
25769833906 | # multiAgents.py
# --------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
from inspect import currentframe
from stat import FILE_ATTRIBUTE_NOT_CONTENT_INDEXED
from util import manhattanDistance
from game import Directions
import random, util
from game import Agent
from pacman import GameState
class ReflexAgent(Agent):
"""
A reflex agent chooses an action at each choice point by examining
its alternatives via a state evaluation function.
The code below is provided as a guide. You are welcome to change
it in any way you see fit, so long as you don't touch our method
headers.
"""
def getAction(self, gameState: GameState):
"""
You do not need to change this method, but you're welcome to.
getAction chooses among the best options according to the evaluation function.
Just like in the previous project, getAction takes a GameState and returns
some Directions.X for some X in the set {NORTH, SOUTH, WEST, EAST, STOP}
"""
# Collect legal moves and successor states
legalMoves = gameState.getLegalActions()
# Choose one of the best actions
scores = [self.evaluationFunction(gameState, action) for action in legalMoves]
bestScore = max(scores)
bestIndices = [index for index in range(len(scores)) if scores[index] == bestScore]
chosenIndex = random.choice(bestIndices) # Pick randomly among the best
"Add more of your code here if you want to"
return legalMoves[chosenIndex]
def evaluationFunction(self, currentGameState: GameState, action):
"""
Design a better evaluation function here.
The evaluation function takes in the current and proposed successor
GameStates (pacman.py) and returns a number, where higher numbers are better.
The code below extracts some useful information from the state, like the
remaining food (newFood) and Pacman position after moving (newPos).
newScaredTimes holds the number of moves that each ghost will remains
scared because of Pacman having eaten a power pellet.
Print out these variables to see what you're getting, then combine them
to create a masterful evaluation function.
"""
# Useful information you can extract from a GameState (pacman.py)
successorGameState = currentGameState.generatePacmanSuccessor(action)
newPos = successorGameState.getPacmanPosition()
newFood = successorGameState.getFood()
newGhostStates = successorGameState.getGhostStates()
newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]
"*** YOUR CODE HERE ***"
newFoods = newFood.asList()
nearestGhostDistance = min([util.manhattanDistance(newPos, ghostPos) for ghostPos in successorGameState.getGhostPositions()])
if min(newScaredTimes) > nearestGhostDistance:
return 10 / nearestGhostDistance
if nearestGhostDistance < 3:
return - len(newFoods) - 200
minManhattanDistance = 10000
for foodPos in newFoods:
currManhattanDistance = util.manhattanDistance(newPos, foodPos)
if currManhattanDistance < minManhattanDistance:
minManhattanDistance = currManhattanDistance
return - len(newFoods) + 1 / (minManhattanDistance + 1)
def scoreEvaluationFunction(currentGameState: GameState):
""")
This default evaluation function just returns the score of the state.
The score is the same one displayed in the Pacman GUI.
This evaluation function is meant for use with adversarial search agents
(not reflex agents).
"""
return currentGameState.getScore()
class MultiAgentSearchAgent(Agent):
"""
This class provides some common elements to all of your
multi-agent searchers. Any methods defined here will be available
to the MinimaxPacmanAgent, AlphaBetaPacmanAgent & ExpectimaxPacmanAgent.
You *do not* need to make any changes here, but you can if you want to
add functionality to all your adversarial search agents. Please do not
remove anything, however.
Note: this is an abstract class: one that should not be instantiated. It's
only partially specified, and designed to be extended. Agent (game.py)
is another abstract class.
"""
def __init__(self, evalFn = 'scoreEvaluationFunction', depth = '2'):
self.index = 0 # Pacman is always agent index 0
self.evaluationFunction = util.lookup(evalFn, globals())
self.depth = int(depth)
class MinimaxAgent(MultiAgentSearchAgent):
"""
Your minimax agent (question 2)
"""
def getAction(self, gameState: GameState):
"""
Returns the minimax action from the current gameState using self.depth
and self.evaluationFunction.
Here are some method calls that might be useful when implementing minimax.
gameState.getLegalActions(agentIndex):
Returns a list of legal actions for an agent
agentIndex=0 means Pacman, ghosts are >= 1
gameState.generateSuccessor(agentIndex, action):
Returns the successor game state after an agent takes an action
gameState.getNumAgents():
Returns the total number of agents in the game
gameState.isWin():
Returns whether or not the game state is a winning state
gameState.isLose():
Returns whether or not the game state is a losing state
"""
"*** YOUR CODE HERE ***"
agentList = ['Pacman'] + ['Ghost'] * (gameState.getNumAgents() - 1)
def maxAndmin(currState, currDepth, index):
currAgent = agentList[index]
if currAgent == 'Pacman':
nextDepth = currDepth - 1
agentMode, optimalValue = max, -float('inf')
elif currAgent == "Ghost":
nextDepth = currDepth
agentMode, optimalValue = min, float('inf')
if currState.isWin() or currState.isLose() or nextDepth < 0:
return (self.evaluationFunction(currState), None)
nextAgentIndex = index + 1 if index != len(agentList) - 1 else 0
optimalAction = None
for childAction in currState.getLegalActions(index):
currentValue = maxAndmin(currState.generateSuccessor(index, childAction), nextDepth, nextAgentIndex)[0]
if agentMode(optimalValue, currentValue) == currentValue:
optimalValue, optimalAction = currentValue, childAction
return (optimalValue, optimalAction)
return maxAndmin(gameState, self.depth, 0)[1]
class AlphaBetaAgent(MultiAgentSearchAgent):
"""
Your minimax agent with alpha-beta pruning (question 3)
"""
def getAction(self, gameState: GameState):
"""
Returns the minimax action using self.depth and self.evaluationFunction
"""
"*** YOUR CODE HERE ***"
agentList = ['Pacman'] + ['Ghost'] * (gameState.getNumAgents() - 1)
def alphaBeta(currState, currDepth, index, alpha, beta):
currAgent = agentList[index]
if currAgent == 'Pacman':
nextDepth = currDepth - 1
agentMode, optimalValue = max, -float('inf')
elif currAgent == "Ghost":
nextDepth = currDepth
agentMode, optimalValue = min, float('inf')
if currState.isWin() or currState.isLose() or nextDepth < 0:
return (self.evaluationFunction(currState), None)
nextAgentIndex = index + 1 if index != len(agentList) - 1 else 0
optimalAction = None
for childAction in currState.getLegalActions(index):
currentValue = alphaBeta(currState.generateSuccessor(index, childAction), nextDepth, nextAgentIndex, alpha, beta)[0]
if agentMode(optimalValue, currentValue) == currentValue:
optimalValue, optimalAction = currentValue, childAction
if agentMode == max:
if optimalValue > beta:
return (optimalValue, optimalAction)
alpha = agentMode(alpha, optimalValue)
else:
if optimalValue < alpha:
return (optimalValue, optimalAction)
beta = agentMode(beta, optimalValue)
return (optimalValue, optimalAction)
return alphaBeta(gameState, self.depth, 0, -float('inf'), float('inf'))[1]
class ExpectimaxAgent(MultiAgentSearchAgent):
"""
Your expectimax agent (question 4)
"""
def getAction(self, gameState: GameState):
"""
Returns the expectimax action using self.depth and self.evaluationFunction
All ghosts should be modeled as choosing uniformly at random from their
legal moves.
"""
"*** YOUR CODE HERE ***"
agentList = ['Pacman'] + ['Ghost'] * (gameState.getNumAgents() - 1)
def expectiMax(currState, currDepth, index):
currAgent = agentList[index]
if currAgent == 'Pacman':
nextDepth = currDepth - 1
agentMode, optimalValue = max, -float('inf')
elif currAgent == "Ghost":
nextDepth = currDepth
agentMode, optimalValue = 'exp', 0
if currState.isWin() or currState.isLose() or nextDepth < 0:
return (self.evaluationFunction(currState), None)
nextAgentIndex = index + 1 if index != len(agentList) - 1 else 0
optimalAction = None
for childAction in currState.getLegalActions(index):
currentValue = expectiMax(currState.generateSuccessor(index, childAction), nextDepth, nextAgentIndex)[0]
if agentMode == max:
if agentMode(optimalValue, currentValue) == currentValue:
optimalValue, optimalAction = currentValue, childAction
else: #expecti
optimalValue += 1 / len(currState.getLegalActions(index)) * currentValue
return (optimalValue, optimalAction)
return expectiMax(gameState, self.depth, 0)[1]
def betterEvaluationFunction(currentGameState: GameState):
"""
Your extreme ghost-hunting, pellet-nabbing, food-gobbling, unstoppable
evaluation function (question 5).
DESCRIPTION: <write something here so we know what you did>
Priority; 1. Avoid Stop 2. Ghost Hunting 3. Avoid Lose 4. Food-gobbling
"""
"*** YOUR CODE HERE ***"
currPos = currentGameState.getPacmanPosition()
currFoods = currentGameState.getFood().asList()
currGhostStates = currentGameState.getGhostStates()
currScaredTimes = [ghostState.scaredTimer for ghostState in currGhostStates]
nearestGhostDistance = min([util.manhattanDistance(currPos, ghostPos) for ghostPos in currentGameState.getGhostPositions()])
if len(currentGameState.getLegalActions()) < 3 and not currentGameState.isLose() and not currentGameState.isWin():
return - len(currFoods) - 500
if min(currScaredTimes) > nearestGhostDistance:
return 10 / nearestGhostDistance
if nearestGhostDistance < 2:
return - len(currFoods) - 200
minManhattanDistance = 10000
for foodPos in currFoods:
currManhattanDistance = util.manhattanDistance(currPos, foodPos)
if currManhattanDistance < minManhattanDistance:
minManhattanDistance = currManhattanDistance
return - len(currFoods) + 1 / (minManhattanDistance + 1)
# Abbreviation
better = betterEvaluationFunction
| psxxj/Pacman_AI | PJ2_MultiagentSearch/multiAgents.py | multiAgents.py | py | 12,352 | python | en | code | 0 | github-code | 90 |
71213154537 | from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class CustomerRemovalRequest(Document):
def validate(self):
contact = _get_contact(self.customer)
if not contact:
frappe.msgprint("There is no Contact found on this customer. Contact fields will not be used.")
for field in self.fields:
if not field.is_contact_field:
frappe.db.set_value("Customer", self.customer, field.fieldname, None)
def on_submit(self):
contact = _get_contact(self.customer)
if contact:
fields = list(map(lambda x: x.fieldname, self.fields))
_set_contact_fields_to_empty(contact, fields)
def _set_contact_fields_to_empty(contact, fields):
for field in fields:
frappe.db.set_value("Contact", contact, field, None)
def _get_contact(customer):
link = frappe.db.get_all(
"Dynamic Link",
filters={
"link_doctype": "Customer",
"link_name": customer,
},
fields=["parent"]
)
if not link:
return None
return link[0].get("parent")
| iRaySpace/pdpl | pdpl/pdpl/doctype/customer_removal_request/customer_removal_request.py | customer_removal_request.py | py | 998 | python | en | code | 0 | github-code | 90 |
10052642653 | from django import template
register = template.Library()
extract_types = {
0: "blind (0)",
1: "forced (1)",
2: "manual (2)",
}
@register.filter
def extract_type(value):
"""
Returns a string format for the extract_type column in the
extracted source table.
"""
if not value:
return
return extract_types[value] | transientskp/banana | banana/templatetags/extract_type.py | extract_type.py | py | 358 | python | en | code | 4 | github-code | 90 |
7694916989 | """Do the image"""
import time
from dataclasses import dataclass
from typing import Iterable
import os
import logging
import easygui
from PIL import Image
class NotRGBRGBA(Exception):
"""dumb."""
def __str__(self) -> str:
"""are."""
return 'you are not supposed to see this'
STI_NAME = 'sti_names.txt'
def open_file(file: str) -> str:
"""Return file contents of any plain text file in the directory file.
"""
with open(file) as f:
file_text = f.read()
return file_text
def check_allowed_suffix(filename: str, allowed_suffix: Iterable) -> bool:
"""Return true if filename is one of the formats in allowed_suffix.
"""
for suffix in allowed_suffix:
if filename[-len(suffix):] == suffix:
return True
else:
return False
@dataclass
class Preferences:
"""A class representing the preferences of this module.
"""
image_dim: tuple[int, int]
export_filetype: str = '.jpg'
process_type: str = 'crop'
allowed_dir_names: tuple[str] = ('.png', '.jpg')
def import_image(directory: str) -> Image:
"""Import image given directory.
"""
# if dir[-4:] != '.png':
# image = Image.open(dir)
# return image
image = Image.open(directory)
if image.mode == 'RGB':
return image
elif image.mode == 'RGBA':
image.load() # required for png.split()
background = Image.new("RGB", image.size, (255, 255, 255))
background.paste(image, mask=image.split()[3]) # 3 is the alpha channel
return background
else:
raise NotRGBRGBA
def transform_image(image: Image, prefs: Preferences) -> None:
image.thumbnail(prefs.image_dim, Image.ANTIALIAS)
image.save('testoutfile.jpg', 'JPEG')
# def main() -> None:
# image_dir = 'sample_image.jpg'
# prefs = Preferences((425, 320))
# image = import_image(image_dir)
# transform_image(image, prefs)
class ImageSystem:
"""A class representing the entire image system.
"""
prefs: Preferences
dir_list = list[str] # not used here
import_folder_path: str # the folder you wish to process
export_folder_path: str # always export
images_to_process: list[str] # list of images to process
images_to_process_full: list[str] # that with the dir
export_image_names: list[str] # list of STI image names
def __init__(self) -> None:
"""init bruv
"""
self.prefs = Preferences((425, 320))
self.export_folder_path = 'export'
self.create_new_dir()
self.import_folder_path = easygui.diropenbox('Select import folder', 'Test')
self.images_to_process = self.collect_files_from_dir()
self.export_image_names = self.generate_export_image_names()
self.images_to_process_full = [self.image_path_to_dir(x) for x in self.images_to_process]
self.comp_main()
def comp_main(self) -> None:
"""The entire thing."""
if len(self.images_to_process_full) < len(self.export_image_names):
logging.warning('Input folder has less images than total STI images.')
for raw_image_dir, export_image_name in zip(self.images_to_process_full,
self.export_image_names):
try:
img = import_image(raw_image_dir)
except NotRGBRGBA:
continue
img = img.resize(self.prefs.image_dim)
# img.thumbnail(self.prefs.image_dim, Image.ANTIALIAS)
img.save('export\\' + export_image_name, 'JPEG', quality=100, subsampling=0)
logging.info(f'Saved image of filename {export_image_name}')
def generate_export_image_names(self) -> list[str]:
"""Return list of STI image names.
"""
return ['Airplane.jpg', 'ApplePicking.jpg', 'ArtMuseum.jpg', 'AvocadoToast.jpg', 'BabyAnnouncement.jpg',
'Barn.jpg', 'Baseball.jpg', 'Basketball.jpg', 'Bat.jpg', 'BathroomStall.jpg', 'BBQ.jpg', 'Beach.jpg',
'Bear.jpg', 'Bicycle.jpg', 'BirthdayParty.jpg', 'Bonfire.jpg', 'BookStore.jpg', 'Boxing.jpg',
'BubbleBath.jpg', 'Cabin.jpg', 'Camping.jpg', 'Cat.jpg', 'Chimpanzee.jpg', 'Chinese.jpg',
'CoffeeShop.jpg', 'CompactCar.jpg', 'ConcertStage.jpg', 'Condo.jpg', 'CondoInterior.jpg',
'Convertible.jpg', 'Dentist.jpg', 'Desert.jpg', 'DMV.jpg', 'Dog.jpg', 'Dolphin.jpg', 'Elephant.jpg',
'EngagementRing.jpg', 'FarmHouse.jpg', 'FeetOnBeach.jpg', 'Ferret.jpg', 'Field.jpg', 'Fireworks.jpg',
'Football.jpg', 'Forest.jpg', 'Funeral.jpg', 'GasStation.jpg', 'Giraffe.jpg', 'Goldfish.jpg',
'Gorilla.jpg', 'Graduation.jpg', 'Guacamole.jpg', 'Guacamole.png', 'Gym.jpg', 'Gyro.jpg',
'Hamburger.jpg', 'Hockey.jpg', 'HomeRepair.jpg', 'Horse.jpg', 'HorseRace.jpg', 'HotAirBalloon.jpg',
'HotDogs.jpg', 'HouseParty.jpg', 'IceCream.jpg', 'Igloo.jpg', 'Indian.jpg', 'Island.jpg', 'Jeep.jpg',
'Jungle.jpg', 'Kite.jpg', 'Kittens.jpg', 'Laundromat.jpg', 'Lion.jpg', 'London.jpg', 'Mansion.jpg',
'MeatSkewer.jpg', 'Meditation.jpg', 'Mexican.jpg', 'Minivan.jpg', 'Moose.jpg', 'Motel.jpg',
'Motorcycle.jpg', 'Mountains.jpg', 'MovieTheater.jpg', 'Moving.jpg', 'Museum.jpg', 'Nascar.jpg',
'NewYorkCity.jpg', 'Office.jpg', 'Orangutan.jpg', 'Parade.jpg', 'Paris.jpg', 'Park.jpg',
'ParkingTicket.jpg', 'PickupTruck.jpg', 'Pig.jpg', 'Pizza.jpg', 'PoolParty.jpg', 'PostOffice.jpg',
'Puppies.jpg', 'Rabbit.jpg', 'Rainstorm.jpg', 'Rollerblades.jpg', 'RollerCoaster.jpg',
'RomanticGetaway.jpg', 'Russia.jpg', 'Sailboat.jpg', 'Salad.jpg', 'Salmon.jpg', 'SanFrancisco.jpg',
'Segway.jpg', 'SemiTruck.jpg', 'ShootingStar.jpg', 'Skyscraper.jpg', 'Snake.jpg', 'Soccer.jpg',
'Spider.jpg', 'StationWagon.jpg', 'Statue.jpg', 'Steak.jpg', 'SubwayCar.jpg', 'Suitcase.jpg',
'Sunset.jpg', 'Tennis.jpg', 'Traffic.jpg', 'Train.jpg', 'Vegas.jpg', 'Volleyball.jpg', 'Voting.jpg',
'VR.jpg', 'Wedding.jpg', 'Wrestling.jpg', 'WritingDesk.jpg', 'Yoga.jpg']
# this method of pasting lists is dumb
# image_list_str = open_file(STI_NAME)
# image_list = image_list_str.split('\n')
# return image_list
def create_new_dir(self) -> None:
"""Create the export directory. If it already exists, do nothing.
"""
current_directory = os.getcwd()
final_directory = os.path.join(current_directory, self.export_folder_path)
if not os.path.exists(final_directory):
os.makedirs(final_directory)
else:
logging.warning('export path already exists')
def collect_files_from_dir(self) -> list[str]:
"""Return a list of filenames from a directory, and then
only include images within the allowed formats.
"""
arr = os.listdir(self.import_folder_path)
allowed_img_files = []
for filename in arr:
if check_allowed_suffix(filename, self.prefs.allowed_dir_names):
allowed_img_files.append(filename)
return allowed_img_files
def image_path_to_dir(self, image_path: str) -> str:
"""Return the full directory path for an image in the import folder.
"""
return self.import_folder_path + '\\' + image_path
def try_everything() -> None:
ImageSystem()
if __name__ == '__main__':
try:
try_everything()
print('Exporting complete. Closing in 4 seconds.')
finally:
time.sleep(4)
| i-winxd/STIPhoto-generator | main.py | main.py | py | 7,689 | python | en | code | 1 | github-code | 90 |
37931081734 | import unittest
from insert_into_a_sorted_circular_linked_list import Node, Solution
class TestInsertIntoASortedCircularLinkedList(unittest.TestCase):
def test_example_1(self):
head = Node(val=1)
head.next = Node(val=3)
head.next.next = Node(val=4)
head.next.next.next = head
node = Solution().insert(head=head, insertVal=2)
assert node.val == 1
assert node.next.val == 2
assert node.next.next.val == 3
assert node.next.next.next.val == 4
assert node.next.next.next.next == node
def test_example_2(self):
node = Solution().insert(head=None, insertVal=2)
assert node.val == 2
assert node.next == node
def test_example_3(self):
head = Node(val=1)
head.next = head
node = Solution().insert(head=head, insertVal=2)
assert node.val == 1
assert node.next.val == 2
assert node.next.next == node
| saubhik/leetcode | tests/test_insert_into_a_sorted_circular_linked_list.py | test_insert_into_a_sorted_circular_linked_list.py | py | 957 | python | en | code | 3 | github-code | 90 |
7134974576 | from functools import wraps
import asyncio
def retry_timeout(times=3, sleep=5):
async def make_try(times_flag, func, args, kwargs):
times_flag += 1
try:
await asyncio.sleep(sleep)
return await func(*args, **kwargs)
except asyncio.exceptions.TimeoutError:
if times_flag < times:
await asyncio.sleep(sleep)
return await make_try(times_flag, func, args, kwargs)
else:
raise asyncio.exceptions.TimeoutError
except Exception:
raise Exception
def inner_decorator(func):
@wraps(func)
async def decorated(*args, **kwargs):
return await make_try(0, func, args, kwargs)
return decorated
return inner_decorator
| JoryPein/book-spider | utils/aio_retry.py | aio_retry.py | py | 793 | python | en | code | 0 | github-code | 90 |
33666772685 | import numpy as np
import pandas as pd
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error, r2_score
#주어진 데이터를 바탕으로 따릉이 대여량을 예측 해보세요!
# 1. 데이터
path = './_data/ddarung/'
#인공지능을 다룰 때 대부분의 데이터들은 csv 파일로 관리 (csv : 엑셀파일)
#index 컬럼은 0번째
train_csv = pd.read_csv(path + 'train.csv', index_col=0) # [715 rows x 9 columns]
test_csv = pd.read_csv(path + 'test.csv', index_col=0) #[1459 rows x 10 columns]
submission = pd.read_csv(path + 'submission.csv', index_col=0) #[715 rows x 1 columns], 2개중 count 컬럼을 제외한 나머지 1개
print(train_csv)
print(test_csv.shape)
print(submission)
print(train_csv.columns) #컬럼명 추출
# Index(['hour', 'hour_bef_temperature', 'hour_bef_precipitation',
# 'hour_bef_windspeed', 'hour_bef_humidity', 'hour_bef_visibility',
# 'hour_bef_ozone', 'hour_bef_pm10', 'hour_bef_pm2.5', 'count'],
# dtype='object')
print(train_csv.info()) #컬럼 정보, non-null = 결측지
# non-null (결측지) 데이터 처리
# 1. 결측지가 들어있는 데이터를 삭제한다.
# 2. 임의의 숫자를 넣는다. ex) 0, 100, 아래 데이터 기준 등
print(test_csv.info()) #컬럼 정보, non-null = 결측지
print(train_csv.describe())
#axis : 인덱스, 행 : 0(default), 열 : 1,
x = train_csv.drop(['count'], axis=1) # 10개 중 count 컬럼을 제외한 나머지 9개만 inputing
print(x) # [1459 rows x 9 columns]
#count 컬럼 제거
y = train_csv['count']
x_train, x_test, y_train, y_test = train_test_split(x, y,
train_size=0.7, shuffle=True, random_state=123
)
print(x_train.shape, x_test.shape)
print(y_train.shape, y_test.shape)
#2. 모델 구성
model = Sequential()
model.add(Dense(10, input_dim=9))
model.add(Dense(10))
model.add(Dense(10))
model.add(Dense(10))
model.add(Dense(1))
#3. 컴파일, 훈련
model.compile(loss='mse', optimizer='adam')
model.fit(x_train, y_train , epochs=100, batch_size=32)
#4. 예측, 평가
loss = model.evaluate(x_test, y_test)
y_predict = model.predict(x_test)
def RMSE(y_test, y_predict) :
return np.sqrt(mean_squared_error(y_test, y_predict))
r2 = r2_score(y_test, y_predict)
#제출
y_submit = model.predict(test_csv)
# 결측치 나쁜놈!
# 결측치 때문에 To Be Contiunue!
print("===================================")
print(y_test)
print(y_predict)
print("R2 : " , r2)
print("RMSE : ", RMSE(y_test, y_predict))
print("===================================")
'''
[결측지가 있는 경우]
loss : nan
ValueError: Input contains NaN.
[결측지 제거]
R2 : 0.5738889026735445
RMSE : 53.12491189278038
# [결측지 제거 전]
#(1021, 9) (438, 9)
#(1021, ) (438, )
# [결측지 제거 후]
# (929, 9) (399, 9)
# (929,) (399,)
''' | JDanmuji/BitCamp_AI | keras/keras15_1_dacon_ddarung1.py | keras15_1_dacon_ddarung1.py | py | 3,147 | python | ko | code | 0 | github-code | 90 |
31972000051 | word1 = input("Enter a word: ").lower()
word2 = input('pick a second word').lower()
fixed = sorted(word1)
refixed = "".join(fixed)
fixed1 = sorted(word2)
refixed2 = "".join(fixed1)
if refixed == refixed2:
print('these are anagrams ')
else:
print('these are not anagrams')
| davidl0673/pythonstuff | lab17_part2.py | lab17_part2.py | py | 307 | python | en | code | 0 | github-code | 90 |
11188971284 | from flask import request
from demo_api.common import create_api
from sgnlp.models.rst_pointer import (
RstPointerParserConfig,
RstPointerParserModel,
RstPointerSegmenterConfig,
RstPointerSegmenterModel,
RstPreprocessor,
RstPostprocessor,
)
app = create_api(app_name=__name__, model_card_path="model_card/rst_pointer.json")
# Load processors and models
preprocessor = RstPreprocessor()
postprocessor = RstPostprocessor()
segmenter_config = RstPointerSegmenterConfig.from_pretrained(
"https://storage.googleapis.com/sgnlp/models/rst_pointer/segmenter/config.json"
)
segmenter = RstPointerSegmenterModel.from_pretrained(
"https://storage.googleapis.com/sgnlp/models/rst_pointer/segmenter/pytorch_model.bin",
config=segmenter_config,
)
segmenter.eval()
parser_config = RstPointerParserConfig.from_pretrained(
"https://storage.googleapis.com/sgnlp/models/rst_pointer/parser/config.json"
)
parser = RstPointerParserModel.from_pretrained(
"https://storage.googleapis.com/sgnlp/models/rst_pointer/parser/pytorch_model.bin",
config=parser_config,
)
parser.eval()
app.logger.info("Model initialization complete")
@app.route("/predict", methods=["POST"])
def predict():
req_body = request.get_json()
sentence = req_body["sentence"]
sentence = [sentence] # Treat it as a batch size of 1
tokenized_sentence_ids, tokenized_sentence, length = preprocessor(sentence)
segmenter_output = segmenter(tokenized_sentence_ids, length)
end_boundaries = segmenter_output.end_boundaries
parser_output = parser(tokenized_sentence_ids, end_boundaries, length)
hierplane_tree = postprocessor(
sentences=sentence,
tokenized_sentences=tokenized_sentence,
end_boundaries=end_boundaries,
discourse_tree_splits=parser_output.splits,
)
return {"tree": hierplane_tree[0]}
if __name__ == "__main__":
app.run()
| aisingapore/sgnlp | demo_api/rst_pointer/api.py | api.py | py | 1,914 | python | en | code | 32 | github-code | 90 |
10829926382 |
#****************************************************************************************************************************
#Importing libraries
#****************************************************************************************************************************
import argparse, os, time, glob, sys
import tensorflow as tf;
import tensorflow_hub as hub;
import numpy as np
from PIL import Image
import argparse
import json
import matplotlib.pylab as plt
tf.config.list_physical_devices('GPU')
#****************************************************************************************************************************
#Functions
#****************************************************************************************************************************
#Function to normalize images for processing
def process_image(image):
'''
Funtion that normalize images for their correct use when making predictions
inputs:
image -> Image(Numpy array)
output:
image -> Normalized image(Numpy array)
'''
#Image nornalization
image = tf.convert_to_tensor(image, dtype=tf.float32);
image = tf.image.resize(image, (224, 224));
image /= 255;
return image.numpy()
#Function that make predictions based on a loaded model
def predict(image_path, model_path, top_k=5):
'''
Function that predicts the class (or classes) of an image using a trained deep learning model.
inputs:
image_path -> Path to the image to be classified
model -> Model used for the classification, pretrained model
top_k - > Number of predictions to consider
output
prob -> Probabilities of the classifications obtained with the loaded model
classes -> Classes related with the classifications probabilities
'''
#Number of classification to consider for each prediction
if top_k > 101:
print("Too many classes to predict (top_k)")
return
#Loading the given model
model = tf.keras.models.load_model(model_path, custom_objects={'KerasLayer':hub.KerasLayer})
print(model.summary())
# TODO: Implement the code to predict the class from an image file
image = Image.open(image_path)
image = np.asarray(image)
image = process_image(image)
print('aqui', np.expand_dims(image, axis=0).shape)
prediction = model.predict(np.expand_dims(image, axis=0))
indexs = [clase for clase in (-prediction[0]).argsort()[:5]]
classes = [str(clase+1) for clase in indexs]
prob = prediction[0][indexs]
return prob, classes
#Function to classified images with a preloaded model
def Classification(img_path, model_path, class_names, top_k=5):
'''
Function to classified images with a pretrained model
inputs:
path -> Path for the image of interst
model -> Pretrained model
class_names -> dictionary contained the possibles classes to classify the image of interest
top_k -> Number of predictions per classified images
'''
#Classifying images of interest
prob, classes = predict(img_path, model_path, top_k)
print('Image to classify: ', img_path.split('\\')[-1].split('.')[0], '\n')
print('Probabilities for the classified image: ')
for p, cls in zip(prob, classes):
print('Predicted class: ', class_names[cls], ' ----- ', 'Probabiity: ', p, '\n')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("img_path", help="echo the string you use here")
parser.add_argument('model_path', help="echo the string you use here")
parser.add_argument('classes_path', help="echo the string you use here")
parser.add_argument('top_k', type=int, help="echo the string you use here")
print(parser)
args = parser.parse_args()
print('dasdadsa \n ',args)
classes_path = 'D:/Cursos/tensorflow/intro-to-ml-tensorflow/projects/p2_image_classifier/label_map.json'
with open(args.classes_path, 'r') as f:
class_names = json.load(f)
Classification(args.img_path, args.model_path, class_names, int(args.top_k))
| hazutecuhtli/Create_Your_Own_Image_Classifier | Predict.py | Predict.py | py | 4,090 | python | en | code | 0 | github-code | 90 |
25961932270 | import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.applications import MobileNetV2
from functools import partial
import ee_strats.uncertainty_sampling as uncertainty_sampling
'''Configuration file for the Trainer'''
LABELER_IP = "http://127.0.0.1:3334"
BATCH_SIZE = 4 # Batch size for the training and test datasets
SHUFFLE_BUFFER_SIZE = 1000 # Buffer for the train and test datasets shuffling
TRAIN_CONTINUOUSLY = False # Whether or not the trainer should wait for new training data between each training loop
QUERY_EVERY = 1 # Make query every x batches sent by the labeler
EVAL_EVERY = 2 # Evaluate every x batches sent by the labeler
INPUT_SHAPE = (224, 224) # Model input shape
NUM_EPOCHS_PER_LOOP = 5 # Number of epochs to do per training loop
TRAINING_VERBOSITY = 2 # Keras model.fit verbosity level
ANNOTATIONS_SAVE_PATH = "./annotations" # Save directory for the annotations, should be the same as the one from the Labeler configuration.
SAVED_MODEL_PATH = "./saved_model" # Save directory for the keras model
FORCE_ON_CPU = True # Force tensorflow to use the CPU
## Set an early stopping treshold per metric. Dict of metrics as keys and thresholds as values.
## You can skip some metrics if you want to. We can also define a threshold for the "loss".
## If you don't want early stopping logic, set the variable to an empty dict
EARLY_STOPPING_METRICS_THRESHOLDS = {"accuracy": [0.95, "upper_bound"],
"loss": [0, "lower_bound"]}
def setup_model(num_classes, input_shape=INPUT_SHAPE+(3,)):
'''Defines a basic test model.'''
baseModel = MobileNetV2(weights="imagenet", include_top=False, input_shape=input_shape,
input_tensor=layers.Input(shape=input_shape))
headModel = baseModel.output
headModel = layers.AveragePooling2D(pool_size=(3, 3))(headModel)
headModel = layers.Flatten(name="flatten")(headModel)
headModel = layers.Dense(128, activation="relu")(headModel)
headModel = layers.Dropout(0.5)(headModel)
headModel = layers.Dense(num_classes, activation="softmax")(headModel)
baseModel.trainable = False
model = keras.Model(inputs=baseModel.input, outputs=headModel)
model.compile(loss='binary_crossentropy',
optimizer=keras.optimizers.Adam(),
metrics=['accuracy'])
return model
def reload_model(*_):
'''Use this if you want to reload a model saved from a previous run'''
return keras.models.load_model(SAVED_MODEL_PATH)
## Select your model function here, num_classes has to be the only argument. Must return a compiled model.
model_fn = setup_model
# model_fn = reload_model # Use this if you want to reload the previous model.
## Choose your Exploration/Exploitation strat from the ee_strats module
## This base strat use a random entropy sampling with a probability of 0.8.
ee_strat = partial(uncertainty_sampling.randomize_sampling,
sampling=uncertainty_sampling.entropy, p=0.8)
| kangzi/Online-Active-Learning | trainer/config.py | config.py | py | 3,010 | python | en | code | 0 | github-code | 90 |
43835949230 | from PyWebSystem.PyUtil.pw_logger import logmessage
from PyWebSystem.PyUtil.pw_extra_methods import id_generator
from django.template import Template
from PyWebSystem.PyUtil.DickUpdate import pw_loop
from PyWebSystem.customtags.pw_definePrimaryNode import definePrimaryNode
def LayoutRepeat(context, *args, **kwargs):
logmessage("LayoutRepeat", "warning")
backupPrimary = context["ElementPrimary"]
primaryNode = context["ElementPrimary"]
# logmessage("LayoutRepeat", "warning", primaryNode.get("elementpath", ""))
if primaryNode.get("PrimaryNode", "")[0] == ".":
elepath = primaryNode.get("elementpath", "") + primaryNode.get("PrimaryNode", "")
else:
elepath = primaryNode.get("PrimaryNode", "")
# logmessage("LayoutRepeat", "warning", elepath)
Primary = definePrimaryNode(context, "", "", elepath)
# logmessage("LayoutRepeat", "warning", Primary)
gen_id = id_generator(10)
repeathtml = "<div data-uitype='rowgroup' data-path='"+elepath+"' data-target=" + gen_id + " id="+gen_id+">"
primaryNodeColumns = context["ElementPrimary"]["columns"]
addrowicon = context["ElementPrimary"].get("addrowlay", {})
logmessage("LayoutRepeat", "warning", primaryNodeColumns)
if addrowicon.get("controltype", "") == "Layout":
addrowicon["elementpath"] = elepath
context["ElementPrimary"] = addrowicon
context["elpath"] = addrowicon["elementpath"]
html = '{%load TagUtility%} {%includeTag layout %}'
t = Template(html)
rowhtml = t.render(context)
for index, key, path, value in pw_loop(Primary):
if index == -1:
pass
else:
value["elementpath"] = elepath + path
repeathtml += "<div data-uitype='row' data-path='" + value["elementpath"] + "'>"
for index, key, path, cvalue in pw_loop(primaryNodeColumns):
bodyhtml = ""
"""if cvalue.get("PrimaryNode", "") == "":
cvalue["elementpath"] = value.get("elementpath", "")
elif cvalue.get("PrimaryNode", "")[0] == ".":
cvalue["elementpath"] = cvalue.get("PrimaryNode", "")
else:
cvalue["elementpath"] = value.get("elementpath", "") + cvalue.get("PrimaryNode", "")"""
cvalue["elementpath"] = value["elementpath"]
context["ElementPrimary"] = cvalue
context["elpath"] = value["elementpath"]
if cvalue.get("controltype", "") == "Layout":
# cvaluestr = json.dumps(cvalue)
html = '{%load TagUtility%} {%includeTag layout %}'
elif cvalue.get("controltype", "") == "section":
html = '{%load TagUtility%} {%includeTag findelement as elementconfig%}{%with elpath=ElementPrimary.elementpath ElementPrimary=elementconfig%}{%includeTag addsection %}{%endwith%}'
t = Template(html)
bodyhtml += t.render(context)
repeathtml += bodyhtml
repeathtml += "</div>"
# logmessage("LayoutRepeat", "warning", repeathtml)
repeathtml += rowhtml+"</div>"
t = Template(repeathtml)
html = t.render(context)
context["ElementPrimary"] = backupPrimary
return html
| anji-a/PyWebSystem | PyWebSystem/customtags/pw_LayoutRepeat.py | pw_LayoutRepeat.py | py | 3,307 | python | en | code | 0 | github-code | 90 |
18681156748 | import yaml
from ax.util.const import SECONDS_PER_MINUTE
VALID_VERSIONS = ["v1"]
class AXPlatformConfigDefaults:
# Default platform manifest / config file info
DefaultManifestRoot = "/ax/config/service/standard/"
DefaultPlatformConfigFile = "/ax/config/service/config/platform-bootstrap.cfg"
# Create timeouts
ObjCreateWaitTimeout = 25 * SECONDS_PER_MINUTE
ObjCreatePollInterval = 3
ObjCreatePollMaxRetry = ObjCreateWaitTimeout / ObjCreatePollInterval
# Create extra poll timeouts
ObjCreateExtraPollTimeout = 15 * SECONDS_PER_MINUTE
ObjCreateExtraPollInterval = 3
ObjCreateExtraPollMaxRetry = ObjCreateExtraPollTimeout / ObjCreateExtraPollInterval
# Delete timeouts
ObjDeleteWaitTimeout = 2 * SECONDS_PER_MINUTE
ObjDeletePollInterval = 3
ObjDeletePollMaxRetry = ObjDeleteWaitTimeout / ObjDeletePollInterval
# Jitters
ObjectOperationJitter = 5
class ObjectGroupPolicy:
# CreateOnce means it should not be recreated or upgraded. i.e. it can only be
# created once during the cluster's entire life cycle
# Example would be volume, cluster ELB, namespace, etc
CreateOnce = "CreateOnce"
# CreateMany means it can be created multiple times during cluster's life cycle
# These object groups will be teared down / brought up again during pause/restart/upgrade
# Example would be all Argo micro-services
CreateMany = "CreateMany"
class ObjectGroupPolicyPredicate:
"""
ObjectGroupPolicy can be attached with a predicate.
e.g. CreateOnce:PrivateRegistryOnly
"""
NoPredicate = ""
PrivateRegistryOnly = "PrivateRegistryOnly"
class ObjectGroupConsistency:
# CreateIfNotExist means during platform start, we will check if the
# object is there or not. We only create it if the object does not exist
# Note that if the object is not healthy, i.e. not all Pods are in "Running"
# state, we delete and recreate
CreateIfNotExist = "CreateIfNotExist"
class AXPlatformConfig(object):
def __init__(self, config_file):
self._config_file = config_file
self.version = ""
self.name = ""
self.steps = []
self._load_config()
def _load_config(self):
with open(self._config_file, "r") as f:
config_raw = yaml.load(f.read())
self.version = config_raw["version"]
if self.version not in VALID_VERSIONS:
raise ValueError("Invalid platform config version: {}".format(self.version))
self.name = config_raw["name"]
for s in config_raw["spec"].get("steps", []):
self.steps.append(AXPlatformObjectGroup(s))
class AXPlatformObjectGroup(object):
def __init__(self, object_group):
self.name = object_group["name"]
self.policy = object_group.get("policy", ObjectGroupPolicy.CreateMany)
self.policy_predicate = object_group.get("policy_predicate", ObjectGroupPolicyPredicate.NoPredicate)
self.consistency = object_group.get("consistency", ObjectGroupConsistency.CreateIfNotExist)
if self.policy not in [ObjectGroupPolicy.CreateOnce, ObjectGroupPolicy.CreateMany]:
raise ValueError("Invalid object group policy: {}.".format(self.policy))
if self.policy_predicate not in [ObjectGroupPolicyPredicate.NoPredicate, ObjectGroupPolicyPredicate.PrivateRegistryOnly]:
raise ValueError("Invalid object group policy predicate {}".format(self.policy_predicate))
if self.consistency not in [ObjectGroupConsistency.CreateIfNotExist]:
raise ValueError("Invalid object group consistency: {}".format(self.consistency))
self.object_set = set()
for o in object_group.get("objects", []):
self.object_set.add(AXPlatformObject(o))
class AXPlatformObject(object):
def __init__(self, object_input):
# name, file are must have fields, namespace is not enforced (we might be creating namespace) itself
self.name = object_input["name"]
self.manifest = object_input["file"]
self.namespace = object_input.get("namespace", None)
| zhan849/argo | platform/source/lib/ax/platform/component_config/platform_config.py | platform_config.py | py | 4,112 | python | en | code | null | github-code | 90 |
23301495216 | '''
Given a non-empty string s and a dictionary wordDict containing a list of non-empty words, add spaces in s to construct a sentence where each word is a valid dictionary word. Return all such possible sentences.
Note:
The same word in the dictionary may be reused multiple times in the segmentation.
You may assume the dictionary does not contain duplicate words.
Example 1:
Input:
s = "catsanddog"
wordDict = ["cat", "cats", "and", "sand", "dog"]
Output:
[
"cats and dog",
"cat sand dog"
]
Example 2:
Input:
s = "pineapplepenapple"
wordDict = ["apple", "pen", "applepen", "pine", "pineapple"]
Output:
[
"pine apple pen apple",
"pineapple pen apple",
"pine applepen apple"
]
Explanation: Note that you are allowed to reuse a dictionary word.
Example 3:
Input:
s = "catsandog"
wordDict = ["cats", "dog", "sand", "and", "cat"]
Output:
[]
'''
class Solution:
def wordBreak(self, s: str, wordDict: List[str]) -> List[str]:
result = []
wordDict = set(wordDict)
dp = [False]*(len(s)+1)
dp[0] = True
temp=defaultdict(list)
for i in range(len(s)):
if dp[i]:
for j in range(i,len(s)+1):
if s[i:j] in wordDict:
dp[j] = True
temp[j].append(i)
else:
continue
if dp[-1]==False:
return result
queue=[]
for i in temp[len(s)]:
queue.append([i,len(s)+1,""])
while len(queue)!=0:
ele = queue.pop(0)
if ele[0]==0:
subString=s[ele[0]:ele[1]]+" "+ele[2]
result.append(subString.rstrip())
else:
subString=s[ele[0]:ele[1]]+" "+ele[2]
for p in temp[ele[0]]:
queue.append([p,ele[0],subString])
return result
| ahujapankaj16/CompetitiveProgramming | WordBreakII.py | WordBreakII.py | py | 1,907 | python | en | code | 0 | github-code | 90 |
18397469349 | # bit全順列列挙
def bitall(n):
parm = []
for i in range(1<<n): # 0/1の組み合わせは1<<n(=2**n)通り存在。
on = [0 for _ in range(n)] # とりあえず全て0にしておく。
for j in range(n):
if i & (1<<j): # iのj桁目が1となっている場所をonにする。
on[j] = 1
parm.append(on)
return parm
N, M = map(int, input().split())
K = []
S = []
for m in range(M):
tmp = list(map(int, input().split()))
K.append(tmp[0])
S.append(tmp[1:])
P = list(map(int, input().split()))
swi_parms = bitall(N)
ans =0
for swi_parm in swi_parms:
flag = 0
for k in range(len(K)):
cnt = 0
for s in S[k]:
# 対応するスイッチのon数をカウント
if swi_parm[s-1] == 1: cnt += 1
if cnt%2 == P[k]:
continue
else:
flag = 1
break
if flag == 0:
ans += 1
print(ans) | Aasthaengg/IBMdataset | Python_codes/p03031/s903884300.py | s903884300.py | py | 1,026 | python | ja | code | 0 | github-code | 90 |
40662814392 | #!/usr/bin/env python
# *-* encoding: utf-8 *-*
import numpy as np
import scipy.spatial
from scipy.special import lambertw
from typing import Optional, Tuple
class PoolTest:
EPSILON = 1e-12
a: np.ndarray
b: np.ndarray
N: np.ndarray
p: np.ndarray
n_subpop: int
n_indiv: np.ndarray
@staticmethod
def _binent(t: np.ndarray):
"""
Evaluates the binary entropy function in nat.
:param t: Numpy array of values 0 <= t <= 1
:return: H_2(t), evaluated element-wise, in nat
"""
assert np.all(np.logical_and(0.0 <= t, t <= 1.0))
with np.errstate(divide='ignore', invalid='ignore'):
binent = -t * np.log(t) - (1 - t) * np.log(1 - t)
binent[t == 0.0] = 0.0
binent[t == 1.0] = 0.0
return binent
def _D(self, a: np.ndarray, p: np.ndarray, v: np.ndarray) -> np.ndarray:
"""
Evaluate \bar D(p, a, v) as defined in the reference.
All broadcasting is applied by numpy implicitly.
:param a: a > 0
:param p: 0 < p < 1
:param v: 0 <= v < 1
:return:
"""
assert np.all(np.logical_and(0.0 < p, p < 1.0))
assert np.all(np.logical_and(0.0 <= v, v < 1.0))
assert np.all(0.0 < a)
return p * (v / (1 - v) - a * v ** a / (1 - v ** a)) + a / (1 - v ** a) - (a + v ** (a + 1)) / (
1 - v ** (a + 1))
def _R(self, a: np.ndarray, p: np.ndarray, v: np.ndarray) -> np.ndarray:
"""
Evaluate \bar R(p, a, v) as defined in the reference.
All broadcasting is applied by numpy implicitly.
:param a: a > 0
:param p: 0 < p < 1.0
:param v: 0 <= v <=1
:return:
"""
assert np.all(np.logical_and(0.0 < p, p < 1.0))
assert np.all(np.logical_and(0.0 <= v, v <= 1.0))
assert np.all(0.0 < a)
with np.errstate(divide='ignore', invalid='ignore'):
R = self._D(a, p, v) * np.log(v) + self._binent(p) - np.log((1 - v ** (a + 1)) / (1 - v ** a)) + \
p * np.log((1 - v) / (1 - v ** a))
# We take care of NaN values arising from v == 0.0
R_fallback = np.broadcast_to(self._binent(p), R.shape)
R[v == 0.0] = R_fallback[v == 0.0]
return R
def __init__(self):
pass
def set(self,
a: np.ndarray,
p: np.ndarray,
b: np.ndarray,
N: np.ndarray,
):
"""
Set the properties of the pooltest. This method can be called later on to change the parameters
for an already initialized pooltest.
For all numpy arrays, the first dimension contains the subpopulation index 0 <= i <= I-1
The total number of subpopulations is limited to 30.
:param a: 0 < a
:param p: 0 < p <= 0.5
:param b: 0 < b
:param N: 0 < N
"""
a = np.array(a)
p = np.array(p)
b = np.array(b)
N = np.array(N)
assert np.all(np.logical_and(0.0 < p, p <= 0.5))
assert np.all(0.0 < a)
assert np.all(0.0 < b)
assert np.all(0.0 < N)
# We put the subpopulation along the first dimension
self.a = a.reshape((-1, 1))
self.p = p.reshape((-1, 1))
self.b = b.reshape((-1, 1))
self.N = N.reshape((-1, 1))
# Make sure that we have all parameters for all subpopulations
assert a.shape == p.shape == b.shape == N.shape
# Number of subpopulations
self.n_subpop = a.shape[0]
assert self.n_subpop > 0
assert self.n_subpop <= 30
# Total number of individuals
self.n_indiv = np.sum(self.N)
def Dmax(self) ->np.ndarray:
"""
Compute the maximum cost, i.e., the minimal cost that can be obtained with zero tests.
:return: [1,1] numpy array
"""
return np.sum(self.N * self.b * np.minimum(self.a * self.p, 1 - self.p)) / self.n_indiv
def entropy_bound(self) -> np.ndarray:
"""
Compute average binary entropy per individual in bit.
This is the lower bound for testing with zero cost.
:return: [1,1] numpy array
"""
entropy = self._binent(self.p) / np.log(2) # we use bits
return np.sum(self.N * entropy) / self.n_indiv
def distortion_rate(self, n_plot: int = 1000):
"""
Computes the distortion-rate function in bits.
The integer n_plot specifies the nubmer of sample points.
A convex envelope is returned, removing spurious points due to v notin [0, v_0].
:param n_plot: number of sample points (positive integer)
:return: a tuple (D,R) of 1d-arrays is returned; the distortion-rate function is given in bits
"""
# Get v for each subpopulation
v0 = np.linspace(self.EPSILON, 1-self.EPSILON, n_plot)
v = v0 ** self.b
D = self._D(self.a, self.p, v)
R = self._R(self.a, self.p, v) / np.log(2) # We use bits
idx1 = (1 - self.p) * v ** (self.a + 1) + self.p - v ** self.a >= 0.0
idx2 = 1 - v - self.p * (1 - v ** (self.a + 1)) >= 0.0
idx = np.logical_not(np.logical_and(idx1, idx2))
D_fallback = np.broadcast_to(np.minimum(self.a * self.p, 1 - self.p), D.shape)
D[idx] = D_fallback[idx]
R[idx] = 0.0
D = np.sum(self.b * self.N * D / self.n_indiv, axis=0)
R = np.sum(R * self.N / self.n_indiv, axis=0)
D = D.reshape((-1,))
R = R.reshape((-1,))
# We add the trivial points by hand
D = np.r_[[self.Dmax()], D, [0.0]]
R = np.r_[[0.0], R, [self.entropy_bound()]]
# We sort the output in R, ascending
D, R = self._cvx_hull(D, R)
return D, R
def _cvx_hull(self,
D: np.ndarray,
R: np.ndarray,
labels: Optional[np.ndarray] = None):
"""
Computes the lower convex evelope of the function D(R).
The output is flattened to a 1d-array and always in ascending order by rate.
Optionally, preserves some labels for these points.
:param D: cost
:param R: rate
:param labels: optional labels; shape [D.shape, ...]
:return: return a tuple (D', R'), a subset of input values, flattened to 1D; if labels is not None,
(D', R', labels') is returned.
"""
assert D.shape == R.shape
if labels is not None:
assert labels.shape[:len(D.shape)] == D.shape
lbl_shape = labels.shape[len(D.shape):]
D = D.reshape((-1,))
R = R.reshape((-1,))
if labels is not None:
labels = labels.reshape((-1,) + lbl_shape)
# Combine D, R into one array
DR = np.c_[D, R]
# Append (Dmax, 1.0) as a catch-all on the "top-right"
DR = np.r_['0,2,-1', DR, [self.Dmax(), 1.0]]
# use Qhull to compute the convex hull
hull = scipy.spatial.ConvexHull(DR)
idx = hull.vertices
# Remove the catch-all point again
idx = idx[idx < DR.shape[0] - 1]
DR = DR[idx]
if labels is not None:
labels = labels[idx]
D, R = DR[:, 0], DR[:, 1]
idx = R.argsort()
R = R[idx]
D = D[idx]
if labels is not None:
labels = labels[idx]
r = (D, R)
if labels is not None:
r += (labels,)
return r
def _mix_points(self, D, R):
"""
Given k values for costs and tests per individual, for each of the I subpopulations,
this function tries all possible pairs of these k^I strategies and returns D' and R',
each with shape [k, k, k ... k] (I dimensions in total), holding all these values.
:param D: [I, k] numpy array
:param R: [I, k] numpy array
:return: (D', R') numpy arrays, each with shape [k, k ... k]
"""
# Normalize R, D by their subpopulation sizes
R = R * self.N / self.n_indiv
D = D * self.N / self.n_indiv
# Prepare large array
shp = (D.shape[1],) * self.n_subpop
R_ = np.zeros(shp)
D_ = np.zeros(shp)
for n in range(self.n_subpop):
# We put the n-th subpopulation in position n in the array
# and let broadcasting do the rest
R_ += R[n, :].reshape((1,) * n + (-1,) + (1,) * (self.n_subpop - n - 1))
D_ += D[n, :].reshape((1,) * n + (-1,) + (1,) * (self.n_subpop - n - 1))
return D_, R_
def SG1(self, u: Tuple[int,...] = tuple(range(100))):
"""
Provides the distortion-rate pairs achieved by the SG1 strategy.
The D/R pairs as well as the corresponding group sizes are returned. A convex envelope
is computed to yield only the significant results. Output is sorted by rate in ascending
order.
A group size of 0 corresponds to no testing of the group, i.e., group size of infinity.
:param u: tuple of group sizes to consider
:return: (D, R, lbl): D and R are 1-d arrays, lbl is a [:, I] array containing the
group sizes for each of the I subpopulations
"""
# We use the second dimension for the group sizes
u: np.ndarray = np.array(u, dtype=int).reshape((1, -1))
# Compute the rate and the cost
with np.errstate(divide='ignore'):
R = 1 / u
D = self.b * (1 - (1 - self.p) ** u - self.p)
# for u == 0 we set R = 0 and D = Dmax
idx = (u.squeeze() == 0).nonzero()[0]
D[:, idx] = self.b * np.minimum((1 - self.p), self.a * self.p)
R[:, idx] = 0.0
# Mix the points to obtain all possible combinations of goup sizes
D, R = self._mix_points(D, R)
# We now construct the labels, using the u_label array with shape
# [u_max, u_max, ...., u_max, I] with I+1 dimensions.
# we prepare it, such that u_label[a_1, a_2, ... a_I, k] == a_k for k in {1,...,I} .
# Probably there is a nicer way of doing this.
shp = (u.shape[1],) * self.n_subpop
u_1d = u.astype(dtype=int).squeeze()
u_label = np.ndarray(shp + (self.n_subpop,), dtype=int)
for n in range(self.n_subpop):
u_label[(slice(None),) * self.n_subpop + (slice(n, n + 1),)] = u_1d.reshape(
(1,) * n + (-1,) + (1,) * (self.n_subpop - n))
# Disregard everything larger than Dmax
idx = D <= self.Dmax() + self.EPSILON # add EPSILON to provide room for numerical inaccuracy
D = D[idx]
R = R[idx]
u_label = u_label[idx, :]
return self._cvx_hull(D, R, labels=u_label)
def _SG2_tests(self, u1: np.ndarray, u2: np.ndarray):
"""
Evaluates the necessary tests per indivudal for SG2(u1, u2).
Special cases:
- if u2 == 0, we have u1 == 0 and zero tests.
- if u1 == 0, but u2 != 0 we have 1SG(u2) with 1/u2 tests per individual.
We need u1 >= u2 or u1 == 0.
:param u1: first group size (integer)
:param u2: second group size (integer)
:return:
"""
assert np.all(np.logical_or(u1 >= u2, u1 == 0.0))
u1_ = u1.astype(dtype=float)
u1_[u1 == 0] = np.inf
u2_ = u2.astype(dtype=float)
u2_[u2 == 0] = np.inf
with np.errstate(divide='ignore'):
n_tests = 1 / u1_ + (1 - (1 - self.p) ** u1_) / u2_
n_tests[np.broadcast_to(u2 == 0, n_tests.shape)] = 0.0
return n_tests
def SG2(self, u2: Tuple[int,...] = tuple(range(100))):
"""
Provides the distortion-rate pairs achieved by the SG2 strategy.
The D/R pairs as well as the corresponding group sizes are returned. A convex envelope
is computed to yield only the significant results. Output is sorted by rate in ascending
order.
A group size of (0, 0) corresponds to no testing of the group, i.e., group size of infinity.
A group size of (0, k) corresponds to a 1SG(k) strategy for this group.
:param u2: group sizes for the second group to consider
:return: (D, R, lbl): D and R are 1-d arrays, lbl is a [:, I, 2] array containing the two
group sizes for each of the I subpopulations
"""
# We use the second dimension for the group sizes
u2: np.ndarray = np.array(u2, dtype=int).reshape((1, -1))
# Optimitze u1 for the particular u2
u1opt_c = np.real(2 / np.log(1 - self.p) * lambertw(-np.sqrt(-u2 * np.log(1 - self.p)) / 2))
with np.errstate(invalid='ignore'):
u1 = np.maximum(np.floor(u1opt_c / u2) * u2, u2)
u1opt_hi = np.maximum(np.ceil(u1opt_c / u2) * u2, u2)
# if u2 is zero, we don't test at all
idx = (u2.squeeze() == 0).nonzero()[0]
u1[:, idx] = 0
u1opt_hi[:, idx] = 0
idx = (self._SG2_tests(u1, u2) > self._SG2_tests(u1opt_hi, u2))
u1[idx] = u1opt_hi[idx]
# Check if u1 = 0 yields a better result
idx = self._SG2_tests(u1, u2) > self._SG2_tests(np.zeros(u1.shape), u2)
u1[idx] = 0
# Compute Rate and Cost
D = self.b * (1 - (1 - self.p) ** u2 - self.p)
R = self._SG2_tests(u1, u2)
# if u2 == 0 we set R = 0 and D = Dmax
idx = (u2.squeeze() == 0).nonzero()[0]
D[:, idx] = self.b * np.minimum((1 - self.p), self.a * self.p)
R[:, idx] = 0.0
# Mix the points to obtain all possible combinations of goup sizes
D, R = self._mix_points(D, R)
# We now construct the labels, using the u_label array with shape
# [u_max, u_max, ...., u_max, I, 2] with I+1 dimensions.
# we prepare it, such that u_label[a_1, a_2, ... a_I, k, i] == a_k for k in {1,...,I} and i == 1. For i == 0
# it yields the optimal 1st stage group size for 2nd stage group size a_k for the k-th subpopulation.
#
# Probably there is also a nicer way of doing this.
shp = (u2.shape[1],) * self.n_subpop
u_label = np.ndarray(shp + (self.n_subpop, 2), dtype=int)
for n in range(self.n_subpop):
u_label[(slice(None),) * self.n_subpop + (slice(n, n + 1), slice(1, 2))] = u2.reshape(
(1,) * n + (-1,) + (1,) * (self.n_subpop - n) + (1,))
u_label[(slice(None),) * self.n_subpop + (slice(n, n + 1), slice(0, 1))] = u1[n,:].reshape(
(1,) * n + (-1,) + (1,) * (self.n_subpop - n) + (1,))
# Disregard everything larger than Dmax
idx = D <= self.Dmax() + self.EPSILON # add EPSILON to provide room for numerical inaccuracy
D = D[idx]
R = R[idx]
u_label = u_label[idx, :, :]
return self._cvx_hull(D, R, labels=u_label)
def binary_splitting(self):
"""
Provides the binary splitting upper bound from
Aldridge, M. (2019). Rates of adaptive group testing in the linear regime.
In: 2019 IEEE International Symposium on Information Theory (ISIT). IEEE. pp. 236–240.
:return: tuple (D, R) with the extreme points describing the achievable
region by binary splitting.
"""
m = 2 ** np.floor(np.log2(1/self.p - 1))
R = np.c_[(1 / m + (1 + np.log2(m) - 1 / m) * self.p), [0.0] * self.n_subpop]
D = np.c_[[0.0] * self.n_subpop, self.b * np.minimum(self.a * self.p, 1 - self.p)]
D, R = self._mix_points(D, R)
D, R = self._cvx_hull(D, R)
return D, R
def individual_testing(self):
"""
Provides the bound for individual testing.
Each subpop is individually tested. The convex envelope is comuted.
:return: tupe (D, R) giving the I+1 extreme points.
"""
# We start at (1,0) and move up to (0, Dmax)
DR = np.r_['-1,2,1', self.b * np.minimum(self.a * self.p, 1 - self.p) * self.N, -self.N] / self.n_indiv
idx = (-DR[:, 0] / DR[:, 1]).argsort()
DR = DR[idx, :]
DR = np.cumsum(np.r_['0,2,1', [0.0, 1.0], DR], axis=0)
D, R = DR[:, 0], DR[:, 1]
idx = R.argsort()
D, R = D[idx], R[idx]
return D, R
| g-pichler/group-testing | pooltesting/pooltest.py | pooltest.py | py | 16,203 | python | en | code | 0 | github-code | 90 |
35645216645 | # -*- coding: utf-8 -*-
'''
from wordcloud import WordCloud
import jieba
import time
seg_list = jieba.cut("Python123!Python123为你提供优秀的 Python 学习工具、教程、平台和更好的学习体验。", cut_all=True)
word_split = " ".join(seg_list)
# 显示中文需要的字体,以下是 Windows 系统的设置
# MacOS 中 font_path 可以设置为:"/System/Library/fonts/PingFang.ttc"
my_wordclud = WordCloud(background_color='white', font_path = 'C:\Windows\Fonts\simhei.ttf', max_words = 100, width = 1600, height = 800)
# 产生词云
my_wordclud = my_wordclud.generate(word_split)
# 以当前时间为名称存储词云图片
now = time.strftime('%Y-%m-%d-%H_%M_%S', time.localtime(time.time()))
my_wordclud.to_file(now + '.png')
'''
from apscheduler.schedulers.blocking import BlockingScheduler
from requests_html import HTMLSession
import jieba
from wordcloud import WordCloud
import time
def get_news():
print('开始爬取热点新闻')
ans_news_titles = []
session = HTMLSession()
# 获取百度新闻
r = session.get('https://news.baidu.com/')
title1_baidu = r.html.find('#pane-news > div > ul > li.hdline0 > strong > a', first=True)
ans_news_titles.append(title1_baidu.text)
titles_baidu = r.html.find('#pane-news > ul:nth-child(n) > li.bold-item > a')
for title in titles_baidu:
ans_news_titles.append(title.text)
# 获取网易新闻
r = session.get('https://news.163.com/')
title1_163 = r.html.find('#js_top_news > h2:nth-child(1) > a', first=True)
title2_163 = r.html.find('#js_top_news > h2.top_news_title > a', first=True)
titles_163 = r.html.find('#js_top_news > ul:nth-child(n) > li:nth-child(n)')
ans_news_titles.append(title1_163.text)
ans_news_titles.append(title2_163.text)
for title in titles_163:
ans_news_titles.append(title.text)
word_jieba = jieba.cut(' '.join(ans_news_titles), cut_all=True)
word_split = " ".join(word_jieba)
my_wordclud = WordCloud(background_color='white', font_path = 'C:\Windows\Fonts\simhei.ttf', max_words = 100, width = 1600, height = 800)
# 生成词云
my_wordclud = my_wordclud.generate(word_split)
# 以当前时间为名称存储词云
now = time.strftime('%Y-%m-%d-%H_%M_%S', time.localtime(time.time()))
my_wordclud.to_file(now + '.png')
sched = BlockingScheduler()
get_news()
# 之后每 30 秒执行一次
sched.add_job(get_news, 'interval', seconds = 30)
sched.start() | zzm99/Simple-code-demo | py123/untitled1.py | untitled1.py | py | 2,480 | python | en | code | 1 | github-code | 90 |
19181537096 | import os
from dotenv import load_dotenv
from flask import Flask, flash, request, redirect, url_for, render_template
from steganography import encrypt, decrypt
load_dotenv()
app = Flask(__name__)
app.secret_key = "cairocoders-endnalan"
app.config['UPLOAD_FOLDER'] = os.getenv("UPLOAD_FOLDER")
app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024
def allowed_file(filename: str) -> bool:
""" Determines whether the file extension in parameter is one of the allowed file extensions. """
return ('.' in filename and filename.rsplit('.', 1)[1].lower() in
set(os.getenv("ALLOWED_EXTENSIONS").split(",")))
@app.route('/')
def home():
return render_template('index.html')
@app.route('/', methods=['POST'])
def upload_image():
""" Route for upload an image and the text to be hidden """
message = request.form['mes']
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
if file.filename == '':
flash('No image selected for uploading')
return redirect(request.url)
if file and allowed_file(file.filename):
# Save file
file.save(os.path.join(app.config['UPLOAD_FOLDER'], file.filename))
filename = file.filename
result = encrypt(image=os.path.join(app.config['UPLOAD_FOLDER'], file.filename), text=message)
if result:
new_filename = os.path.join(app.config['UPLOAD_FOLDER'], f"{filename.split('.')[0]}{os.getenv('FILE_EXT')}")
result.save(new_filename)
flash('Image successfully upload and displayed below this alert')
return render_template('index.html', filename=new_filename, msg=message)
else:
flash('Image error')
return redirect(request.url)
else:
flash('Allowed images types are only png')
return redirect(request.url)
@app.route('/decode')
def decode():
return render_template('decode.html')
@app.route('/decode', methods=['POST'])
def decode_image():
""" Route to load an image to extract hidden information """
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
if file.filename == '':
flash('No image selected for uploading')
return redirect(request.url)
if file and allowed_file(file.filename):
file.save(os.path.join(app.config['UPLOAD_FOLDER'], file.filename))
filename = file.filename
hidden_message = decrypt(image=os.path.join(app.config['UPLOAD_FOLDER'], file.filename))
if hidden_message:
flash(f'Text successfully detected: { hidden_message }')
return render_template('/decode.html', filename=filename)
else:
flash('Image error')
return redirect(request.url)
else:
flash('Allowed images types are only png')
return redirect(request.url)
@app.route('/display/<filename>')
def display_image(filename):
return redirect(url_for('static', filename=f'/uploads/{filename}'), code=301)
@app.route('/<filename>')
def display_decode_image(filename):
return redirect(url_for('static', filename=f'/uploads/{filename}'), code=301)
if __name__ == "__main__":
app.run(host=os.getenv("HOST"), port=int(os.getenv("PORT")))
| python237/steganography | app.py | app.py | py | 3,362 | python | en | code | 0 | github-code | 90 |
20672243486 | # ****************************************************************************
#
# Plot the last configuration encountered in the simulation of a specific file
#
# ****************************************************************************
import pyalps
import matplotlib.pyplot as plt
import numpy as np
import sys
import re
def add_arrow(ax, x,y,size,c,theta=0):
ax.arrow(x-size/2*np.cos(theta),y-size/2*np.sin(theta),size*np.cos(theta),size*np.sin(theta),width=0.1*size,length_includes_head=True,color=c)
def plot_spins(T, spins, coordinates, is_deleted):
plt.figure()
ax=plt.gca()
for i in range(len(coordinates)/2):#only 2D
x=coordinates[2*i]
y=coordinates[2*i+1]
if not(bool(is_deleted[i])):
add_arrow(ax,x,y,0.5,'#000000',spins[i])
w=max(coordinates)-min(coordinates)
ax.set_xlim(min(coordinates)-w/10, max(coordinates)+w/10)
ax.set_ylim(min(coordinates)-w/10, max(coordinates)+w/10)
plt.title('T='+str(T))
if not SAVE:
plt.show()
else:
plt.savefig('last-configuration.'+SAVENAME+'.png')
if (len(sys.argv)<2 or '-h' in sys.argv or '--help' in sys.argv):
print("""Usage: alpspython """+str(sys.argv[0])+""" parm.task[X].out.h5
--help shows this message
--save Save the file as a png
--ssf-clones switches for multiple-clone mode, the usage changes slightly to:
alpspython """+str(sys.argv[0])+"""--ssf-clones parm.task[X].clone[Y].h5
--exmc switches for exmc mode, the usage changes slightly to:
alpspython """+str(sys.argv[0])+"""--exmc parm.task[X].clone[Y].h5 [sector]""")
else:
FILENAME=''
SAVE=False
SSF =True
EXMC=False
SSF_CLONES=False
if ('--ssf-clones' in sys.argv):
SSF_CLONES=True
SSF=False
if ('--exmc' in sys.argv):
EXMC=True
SSF =False
if SSF_CLONES:
FILENAME=sys.argv[-1]
ar=pyalps.h5.archive(FILENAME,'r')
try:
CLONE_NR=re.match('.*clone(.*)\..*',sys.argv[-1]).group(1)
except:
print('Regex failed...')
exit()
data= ar['simulation/realizations/0/clones/'+str(CLONE_NR)+'/results/']
spins =data['Last Configuration']['mean']['value']
coords =data['Coordinates']['mean']['value']
is_deleted=data['Is Deleted']['mean']['value']
T=ar['parameters/T']
if SSF:
FILENAME=sys.argv[-1]
data=pyalps.ResultsToXY(pyalps.loadMeasurements([FILENAME],['Last Configuration']),x='EXMC: Temperature',y='Last Configuration')[0]
spins=data.y
if EXMC:
FILENAME=sys.argv[-2]
ar=pyalps.h5.archive(FILENAME,'r')
data= ar['simulation/realizations/0/clones/0/results/sections/'+sys.argv[-1]]
T =data['EXMC: Temperature']['mean']['value']
spins =data['Last Configuration']['mean']['value']
coords =data['Coordinates']['mean']['value']
is_deleted=data['Is Deleted']['mean']['value']
if ('--save' in sys.argv):
SAVE=True
SAVENAME=FILENAME[:-3]
if(EXMC):
SAVENAME=SAVENAME+'.sector'+((sys.argv[-1]).rjust(4,str(0)))
plot_spins(T,spins, coords, is_deleted)
| domischi/mcpp | scripts/plot-last-configuration.py | plot-last-configuration.py | py | 3,256 | python | en | code | 3 | github-code | 90 |
41808326684 |
from turtle import back
from numpy import roll
from student.models import Batch,Branch, Performance,Semester, Student
def get_select_sem_backlog_analysis(sem):
sem = Semester.objects.get(id=sem.id)
students = Student.objects.filter(sem=sem.id)
backlog_count = 0
for i in students:
if Performance.objects.filter(roll=i.id).exists():
perf = Performance.objects.get(roll=i.id,sem=sem.id)
if perf.had_backlog == True:
backlog_count += 1
return {"semName":sem.name,"backlogCount":backlog_count}
def get_sem_wise_backlog_analysis(sems, batch, branch):
data = []
for sem in sems:
data.append(get_select_sem_backlog_analysis(sem))
print(data)
return data
| nikhilap784/Student-Result-Analysis--main | result/student/multi_sem_analysis/Sem_backlog_data_analysis.py | Sem_backlog_data_analysis.py | py | 756 | python | en | code | 0 | github-code | 90 |
18115334079 | #coding:utf-8
#1_5_B
def merge_sort(array):
if len(array) > 1:
L, countL = merge_sort(array[0:len(array)//2])
R, countR = merge_sort(array[len(array)//2:])
return merge(L, R, countL+countR)
if len(array) == 1:
return [array, 0]
def merge(L, R, count=0):
L.append(10**9+1)
R.append(10**9+1)
response = []
i = 0
j = 0
for k in range(len(L)-1 + len(R)-1):
if L[i] <= R[j]:
response.append(L[i])
i += 1
count += 1
else:
response.append(R[j])
j += 1
count += 1
return [response, count]
n = int(input())
S = list(map(int, input().split()))
numbers, count = merge_sort(S)
print(*numbers)
print(count) | Aasthaengg/IBMdataset | Python_codes/p02272/s287066787.py | s287066787.py | py | 752 | python | en | code | 0 | github-code | 90 |
15045425380 | #!/usr/bin/env python
"""
Summarize pipeline output across data sets.
# TODO:
- handle seperator in _convert_filepaths_to_dataframes
"""
import re
import pathlib
import numpy as np
import pandas as pd
import StyleFrame
from functools import partial
def convert_filepaths_to_dataframes(func):
"""Given a potentially mixed list of file paths or dataframes, replace
filepaths by their corresponing dataframes before passing the list
to the function.
"""
def wrapped_func(dataframes_or_filenames, *args, **kwargs):
dataframes = [_convert_filepath_to_dataframe(obj) for obj in dataframes_or_filenames]
return func(dataframes, *args, **kwargs)
return wrapped_func
def convert_filepath_to_dataframe(func):
"""Given an object that is either a file path or dataframe, replace
the filepath by its corresponing dataframe before passing the object
to the function.
"""
def wrapped_func(obj, *args, **kwargs):
dataframe = _convert_filepath_to_dataframe(obj)
return func(dataframe, *args, **kwargs)
return wrapped_func
def _convert_filepath_to_dataframe(obj, *args, **kwargs):
if isinstance(obj, pd.DataFrame):
return obj
elif isinstance(obj, (str, pathlib.Path)):
return pd.read_csv(obj, *args, **kwargs)
_convert_filepath_to_dataframe = partial(_convert_filepath_to_dataframe, sep='\t')
@convert_filepaths_to_dataframes
def concatenate_tables(dataframes):
return pd.concat(dataframes, axis=0, ignore_index=True, sort=False)
@convert_filepaths_to_dataframes
def write_to_excel_workbook(dataframes,
output_filepath,
sheet_names=None,
*args, **kwargs):
"""Save multiple dataframes as individual sheets in a single excel workbook.
"""
if not sheet_names:
sheet_names = [f'Sheet{ii}' for ii, _ in enumerate(dataframes)]
style = StyleFrame.Styler(wrap_text=False,
horizontal_alignment='general',
font='Courier',
)
with StyleFrame.StyleFrame.ExcelWriter(output_filepath) as writer:
for df, sheet_name in zip(dataframes, sheet_names):
columns = tuple(df.columns.values)
sf = StyleFrame.StyleFrame(df)
sf.apply_column_style(columns, style)
sf.to_excel(writer, sheet_name=sheet_name, best_fit=columns)
def sort_strings_ending_in_numbers(strings):
"""Sort a list strings ending in numbers.
['bcd3', 'abc1', 'abc11', abc2'] -> ['abc1', 'abc2', 'abc11', 'bcd3']
Adapted from:
-------------
# stackoverflow.com/a/4318182/2912349
"""
def key(mixed):
string, num = re.search(r"^(\D+)(\d+)", mixed).groups()
return string, int(num)
return sorted(strings, key=key)
def argsort_strings_ending_in_numbers(strings):
"""Return the indices that would sort a given list of strings ending in numbers.
unsorted = ['bcd3', 'abc1', 'abc11', abc2']
sorted = [unsorted[idx] for idx in argsort(unsorted)]
print(sorted) # ['abc1', 'abc2', 'abc11', 'bcd3']
Adapted from:
-------------
# stackoverflow.com/a/4318182/2912349
# stackoverflow.com/a/6979121/2912349
"""
def key(mixed):
string, num = re.search(r"^(\D+)(\d+)", mixed).groups()
return string, int(num)
return [x for x, y in sorted(enumerate(strings), key = lambda z: key(z[1]))]
@convert_filepath_to_dataframe
def get_barcode_recovery_statistics(dataframe):
recovered = np.array([is_barcode(value, 12) for value in dataframe['barcode']])
total_recovered = np.sum(recovered == 1)
total_partials = np.sum(recovered == 0.5)
total_missing = np.sum(recovered == 0.)
total_samples = len(dataframe)
recovery_rate = (total_recovered + total_partials) / total_samples
return total_recovered, total_partials, total_missing, total_samples, recovery_rate
def is_barcode(candidate, expected_length):
"""
Classifies candidate barcode sequences into
- barcodes (1.0): sequences containing only ATGC bases and having the right length
- partials (0.5): sequences containing some ATGC bases, and not necessarily being of the right length
- nonsense (0.0): sequences containing only N bases; length is irrelevant
"""
if np.all([char in 'ATGC' for char in candidate]) and len(candidate) == expected_length:
return 1.
elif np.all([char == 'N' for char in candidate]):
return 0.
else:
import warnings
warnings.warn(f'Barcode is only a partial match: {candidate}.')
return 0.5
if __name__ == '__main__':
# --------------------------------------------------------------------------------
# concatenate all cloneseq summary.tsv tables into one excel workbook
project_dir = pathlib.Path('/ifs/projects/proj093/analysis/cloneseq/')
filepaths = list(project_dir.glob('*/summary.tsv'))
plate_names = [p.parent.name for p in filepaths]
# sort plates by plate name and number;
# plate names are assumed to follow the format <organism>-Plate<number>, e.g. mouse-Plate15
order = argsort_strings_ending_in_numbers(plate_names)
filepaths = [filepaths[idx] for idx in order]
plate_names = [plate_names[idx] for idx in order]
# add a table that is a concatenation of all other tables
summary = concatenate_tables(filepaths)
filepaths.append(summary)
plate_names.append('summary')
write_to_excel_workbook(filepaths,
project_dir.joinpath('summary.xlsx'),
sheet_names=plate_names)
# --------------------------------------------------------------------------------
# compute barcode recovery rate per table
total_recovered, total_partials, total_missing, total_samples, recovery_rate = \
zip(*[get_barcode_recovery_statistics(filepath) for filepath in filepaths])
df = pd.DataFrame(dict(
plate = plate_names,
total_recovered = total_recovered,
total_partials = total_partials,
total_missing = total_missing,
total_samples = total_samples,
recovery_rate = recovery_rate,
))
df.to_csv(project_dir.joinpath('barcode_recovery_statistics.xlsx'))
print(df)
| sims-lab/cloneseq | summarize_cloneseq_results.py | summarize_cloneseq_results.py | py | 6,333 | python | en | code | 0 | github-code | 90 |
70386599016 | # from trie import Trie
from DB.trie import Trie
from os import getcwd,walk
from DB.archiveDB import ArchiveDB
def initializeDB():
path = getcwd()
global_path = path + '/2021-archive'
small_path = global_path + '/RFC'
initialize_from_directories(small_path)
return insert_archiveDB_to_trie()
def initialize_from_directories(path):
for subdir, dirs, files in walk(path):
if subdir:
for dir in dirs:
initialize_from_directories(path + '/' + dir)
for file in files:
initialize_from_file(path + '/' + file)
def initialize_from_file(file_path):
archiveDB = ArchiveDB()
try:
with open(file_path, encoding='utf-8') as f:
# Read the entire file, where each line will be an item in a the returned list
lines = f.readlines()
for line in lines:
archiveDB.add_line(line, file_path)
except IOError as e:
print("Error:", e)
def insert_archiveDB_to_trie():
archiveDB = ArchiveDB()
trie = Trie()
for id, text_and_source in archiveDB.get_db().items():
text = text_and_source.get_text()
trie.insert_text(text, id)
return trie
| ChavaIsrael/Auto-Complete | DB/initializationDB.py | initializationDB.py | py | 1,226 | python | en | code | 0 | github-code | 90 |
72292943658 | def dfs(graph, v, visited):
# 함수가 호츨된다함은 곧 해당 노드에 대한 방문이다.
print(v, end=" ")
visited[v] = True
# 노드 자신과 연결된 다른 노드가 방문되지 않았을 경우 방문을 재귀적으로 진행한다.
for i in graph[v]:
if not visited[i]:
dfs(graph, i, visited)
# 1~8번 까지의 노드가 존재. 그리고 각각의 노드는 다음과 같이 연결되어있음
# (가중치 없는 인접리스트 형태)
graph = [[], [2, 3, 8], [1, 7], [1, 4, 5], [3, 5], [3, 4], [7], [2, 6, 8],
[1, 7]]
visited = [False] * 9
dfs(graph, 1, visited)
#142p.에 존재하던 기본 DFS 코드
# 정답은 1 2 7 6 8 3 4 5 | ldgeao99/Python-Algorithm-Study | 이것이 코딩 테스트다/CHAPTER 05 - DFS BFS/5-8(DFS Basic).py | 5-8(DFS Basic).py | py | 691 | python | ko | code | 0 | github-code | 90 |
41473147454 | from flask import Flask, render_template, request
from flask_cors import CORS, cross_origin
import assignment
app = Flask('app')
cors = CORS(app)
app.config['CORS_HEADERS'] = 'Content-Type'
@app.route('/stats', methods = ['GET'])
@cross_origin()
def get_oregon_trail_stats():
return dict(
day = assignment.day,
distance = assignment.distance,
food = assignment.food,
health = assignment.health
)
@app.route('/action', methods = ['POST'])
@cross_origin()
def get_oregon_trail_result():
json_data = request.get_json(force=True)
assignment.clear_outputs()
action = json_data['action']
if action == 'travel':
assignment.travel()
if action == 'rest':
assignment.rest()
if action == 'hunt':
assignment.hunt()
if action == 'status':
assignment.status()
if action == 'help':
assignment.help()
print(assignment.outputs)
return dict(
results = assignment.outputs
)
@app.route('/')
def index():
return render_template('index.html')
app.run(host='0.0.0.0', port=8080)
| andreybutenko/oregon-trail-flask | main.py | main.py | py | 1,106 | python | en | code | 0 | github-code | 90 |
22762077739 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from dragon.vm import torch
from dragon.vm.torch import nn
from seetadet.core.config import cfg
from seetadet.data.targets.ssd import AnchorTargets
from seetadet.ops.build import build_loss
from seetadet.ops.conv import ConvNorm2d
class SSDHead(nn.Module):
"""SSD head."""
def __init__(self, in_dims):
super(SSDHead, self).__init__()
self.targets = AnchorTargets()
self.cls_score = nn.ModuleList()
self.bbox_pred = nn.ModuleList()
self.num_classes = len(cfg.MODEL.CLASSES)
conv_module = nn.Conv2d
if cfg.FPN.CONV == 'SepConv2d':
conv_module = functools.partial(ConvNorm2d, conv_type='SepConv2d')
conv_module = functools.partial(conv_module, kernel_size=3, padding=1)
for i, dim in enumerate(in_dims):
num_anchors = self.targets.generator.num_cell_anchors(i)
self.cls_score.append(conv_module(dim, num_anchors * self.num_classes))
self.bbox_pred.append(conv_module(dim, num_anchors * 4))
self.cls_loss = nn.CrossEntropyLoss(ignore_index=-1, reduction='sum')
self.bbox_loss = build_loss(cfg.SSD.BBOX_REG_LOSS_TYPE)
def get_outputs(self, inputs):
"""Return the outputs."""
features = list(inputs['features'])
cls_score, bbox_pred = [], []
for i, x in enumerate(features):
cls_score.append(self.cls_score[i](x).permute(0, 2, 3, 1).flatten_(1))
bbox_pred.append(self.bbox_pred[i](x).permute(0, 2, 3, 1).flatten_(1))
cls_score = torch.cat(cls_score, 1) if len(features) > 1 else cls_score[0]
bbox_pred = torch.cat(bbox_pred, 1) if len(features) > 1 else bbox_pred[0]
cls_score = cls_score.reshape_((0, -1, self.num_classes))
bbox_pred = bbox_pred.reshape_((0, -1, 4))
return {'cls_score': cls_score, 'bbox_pred': bbox_pred}
def get_losses(self, inputs, targets):
"""Return the losses."""
cls_score = inputs['cls_score'].flatten_(0, 1)
bbox_pred = inputs['bbox_pred'].flatten_(0, 1)
bbox_pred = bbox_pred[targets['bbox_inds']]
cls_loss = self.cls_loss(cls_score, targets['labels'])
bbox_loss = self.bbox_loss(bbox_pred, targets['bbox_targets'],
targets['bbox_anchors'])
normalizer = targets['bbox_inds'].size(0)
cls_loss_weight = 1.0 / normalizer
bbox_loss_weight = cfg.SSD.BBOX_REG_LOSS_WEIGHT / normalizer
cls_loss = cls_loss.mul_(cls_loss_weight)
bbox_loss = bbox_loss.mul_(bbox_loss_weight)
return {'cls_loss': cls_loss, 'bbox_loss': bbox_loss}
def forward(self, inputs):
outputs = self.get_outputs(inputs)
cls_score = outputs['cls_score']
if self.training:
cls_score_data = nn.functional.softmax(cls_score.data, dim=2)
targets = self.targets.compute(cls_score=cls_score_data, **inputs)
logits = {'cls_score': cls_score.float(),
'bbox_pred': outputs['bbox_pred'].float()}
return self.get_losses(logits, targets)
else:
cls_score = nn.functional.softmax(cls_score, dim=2, inplace=True)
return {'cls_score': cls_score.float(),
'bbox_pred': outputs['bbox_pred'].float()}
| seetaresearch/seetadet | seetadet/models/dense_heads/ssd.py | ssd.py | py | 3,428 | python | en | code | 1 | github-code | 90 |
42528108891 | import cv2 as cv
imag=cv.imread('pmo.jpg')
img=cv.resize(imag,(700,600)) #without considering the aspect ratio
cv.imshow('resized',img)
cv.imshow('image',img)
gray=cv.cvtColor(img,cv.COLOR_BGR2GRAY)
cv.imshow('gray',gray)
haar_cascade =cv.CascadeClassifier('haar_face.xml')
faces_rect=haar_cascade.detectMultiScale(gray,scaleFactor=1.1,minNeighbors=5)
print(f'number of faces found{len(faces_rect)}')
for (x,y,w,h) in faces_rect:
img = cv.rectangle(img,(x,y),(x+w,y+h),(90,250,100),4)
cv.imshow('img',img)
cv.waitKey(0) | Aryasah/OpenCv-Learning | face.py | face.py | py | 531 | python | en | code | 0 | github-code | 90 |
43121606277 | from django import forms
from django.contrib.auth.models import User
from django.contrib.admin import widgets
from app.librarys.models import Librarys, LibrarysStorage, Librarian
from app.librarys.validators import validation_librarys_address, validation_librarys_name
class LibrarysForm(forms.ModelForm):
librarys_name = forms.CharField(
required=True,
min_length=4,
label='Название библиотеки',
validators=[validation_librarys_name]
)
librarys_address = forms.CharField(
required=True,
min_length=10,
label='Адрес библиотеки',
validators=[validation_librarys_address]
)
librarian_query = Librarian.objects.all()
librarian_array = [
(librarian.id, f'{librarian.last_name} {librarian.first_name} {librarian.father_name}') for librarian in librarian_query
]
librarian = forms.MultipleChoiceField(
widget=forms.CheckboxSelectMultiple(),
choices=librarian_array,
required=True,
label='Библиотекари'
)
class Meta:
model = Librarys
fields = ['librarys_name', 'librarys_address', 'description', 'librarian', 'id_librarys_storage', 'library_img']
widgets = {
'librarys_name': forms.TextInput(attrs={'class': "librarys_text"}),
'librarys_address': forms.TextInput(attrs={'class': "librarys_text"}),
'description': forms.Textarea(attrs={'class': "librarys_textarea"}),
}
| IgorCurukalo/first1 | first/app/librarys/forms.py | forms.py | py | 1,528 | python | en | code | 0 | github-code | 90 |
35072069111 | import random
exceed_smart_contract:str = '0x1eae15d9f4fa16f5278d02d2f8bda8b0dcd31f71'
#max_drop_per_person = int(input('What is the maximum amount of Exceed are we are dropping per person here?'))
x = []
y = []
for i in range(100):
y = random.random() * 1200
zz = round(y, 8)
x.append(zz)
whitespace_containing_list = x
final_list_values = str(whitespace_containing_list).replace(" ", "")
print('Copy and Paste this into the "ERC20 Address" box: {}'.format(exceed_smart_contract))
print("Printing out comma separated list...")
print(final_list_values)
| Exceed-Arthur/airdroptool | arbitrary_values.py | arbitrary_values.py | py | 573 | python | en | code | 0 | github-code | 90 |
1813078576 | # -*- coding: utf-8 -*-
"""
Created on Fri Apr 17 18:25:00 2020
@author: Rajesh
"""
'''
Pie Chart :-
---------
'''
import matplotlib.pyplot as plt
exp_vals = [1400,600,300,410,250]
exp_labels = ['Home Rent','Food','Phone/Internet bill','Car','Other Utilities']
plt.axis()
plt.pie(exp_vals,labels=exp_labels , radius=1.5 , autopct='%0.2f%%') # If Need Decimal till 2 places.
plt.pie(exp_vals,labels=exp_labels , radius=1.5 , autopct='%0.0f%%') # If No Decimal
plt.pie(exp_vals,labels=exp_labels , radius=1.5 , autopct='%0.0f%%',shadow=True)
# We want to take food out from the chart. We will use the explode=[]
plt.pie(exp_vals,labels=exp_labels , radius=1.5 , autopct='%0.0f%%',shadow=True , explode=[0,0.2,0,0,0.1])
# We want to change my Home rent to Angal like 45 , 180.
plt.pie(exp_vals,labels=exp_labels , radius=1.5 , autopct='%0.0f%%',\
shadow=True , explode=[0,0.2,0,0,0.1],startangle=45)
plt.pie(exp_vals,labels=exp_labels , radius=1.5 , autopct='%0.0f%%',\
shadow=True , explode=[0,0.2,0,0,0.2],startangle=180)
# plt.savefig('E:\CodeBasics_Pandas\Matplotlib_CB\Pie_Chart.png')
# To make the fit the properly chart.
# plt.savefig('E:\CodeBasics_Pandas\Matplotlib_CB\Pie_Chart2.png' ,bbox_inches='tight')
# To extra Padding.
# plt.savefig('E:\CodeBasics_Pandas\Matplotlib_CB\Pie_Chart3.png' ,bbox_inches='tight',pad_inches=0.5)
# To make the Transparent.
# plt.savefig('E:\CodeBasics_Pandas\Matplotlib_CB\Pie_Chart4.png' ,bbox_inches='tight',pad_inches=0.5,transparent=True)
# To store in to jpg Format.
# plt.savefig('E:\CodeBasics_Pandas\Matplotlib_CB\Pie_Chart5.jpg' ,bbox_inches='tight')
# To store in to pdf Format.
plt.savefig('E:\CodeBasics_Pandas\Matplotlib_CB\Pie_Chart5.pdf' ,bbox_inches='tight')
| Rajesh-sharma92/FTSP_2020 | CodeBasics_Pandas/Matplotlib_CB/Pie_Chart_Matplotlib.py | Pie_Chart_Matplotlib.py | py | 1,768 | python | en | code | 3 | github-code | 90 |
4964488822 | def smooth_grid_table() :
import dismod_at
import copy
import collections
#
file_name = 'example.db'
connection = dismod_at.create_connection(
file_name, new = True, readonly = False
)
cursor = connection.cursor()
#
# create smooth table
ptype = 'integer primary key'
col_name2type = collections.OrderedDict( [
('smooth_name', 'text' ),
('n_age', 'integer' ),
('n_time', 'integer' ),
('mulstd_value_prior_id', 'integer' ),
('mulstd_dage_prior_id', 'integer' ),
('mulstd_dtime_prior_id', 'integer' )
] )
col_name = list(col_name2type.keys())
col_type = list(col_name2type.values())
row_list = [
[ 'constant', 1, 1, 1, 1, 1],
[ 'age_only', 3, 1, 1, 1, 1],
[ 'time_only', 1, 2, 1, 1, 1],
[ 'bilinear', 3, 2, 1, 1, 1]
]
tbl_name = 'smooth'
dismod_at.create_table(connection, tbl_name, col_name, col_type, row_list)
#
# smooth_grid table column names
col_name2type = collections.OrderedDict( [
('smooth_id', 'integer' ),
('age_id', 'integer' ),
('time_id', 'integer' ),
('value_prior_id', 'integer' ),
('dage_prior_id', 'integer' ),
('dtime_prior_id', 'integer' ),
('const_value', 'real' )
] )
col_name = list(col_name2type.keys())
col_type = list(col_name2type.values())
#
# smooth_grid table values
row_list = list()
default = [
3, # smooth_id (smooth_id == 3 is bilinear)
None, # age_id (age_id index is 1 in default)
None, # time_id (time_id index is 2 in default)
1, # value_prior_id
2, # dage_prior_id
3, # dtime_prior_id
None # const_value
]
age_time_list = list()
for age_id in [0, 1, 2] : # n_age is 3
for time_id in [0, 1] : # n_time is 2
default[1] = age_id
default[2] = time_id
row = copy.copy(default)
if age_id == 2 :
row[4] = None # dage_prior_id null for this case
if time_id == 1 :
row[5] = None # dtime_prior_id null for this case
row_list.append( row )
age_time_list.append( (age_id, time_id) )
#
# write the table
tbl_name = 'smooth_grid'
dismod_at.create_table(connection, tbl_name, col_name, col_type, row_list)
#
# check values in the table
columns = ','.join(col_name)
columns = 'smooth_grid_id,' + columns
cmd = 'SELECT ' + columns + ' FROM smooth_grid'
cmd += ' JOIN smooth USING(smooth_id) '
cmd += ' WHERE smooth_name = "bilinear"'
count = 0
cursor = connection.cursor()
for row in cursor.execute(cmd) :
assert len(row) == 8
assert row[0] == count
assert row[1] == 3
assert row[2] == age_time_list[count][0]
assert row[3] == age_time_list[count][1]
assert row[4] == 1
if row[2] == 2 :
assert row[5] == None
else :
assert row[5] == 2
if row[3] == 1 :
assert row[6] == None
else :
assert row[6] == 3
assert row[7] == None
count += 1
assert count == len(row_list)
#
connection.close()
print('smooth_grid_table: OK')
# END PYTHON
| bradbell/dismod_at | example/table/smooth_grid_table.py | smooth_grid_table.py | py | 3,507 | python | en | code | 6 | github-code | 90 |
18731104670 | # This code is written by harsh.
def uidCheck(s):
upper = 0
digits = 0
if len(s) != 10:
return False
for i in range(len(s)):
for j in range(i + 1, len(s)):
if s[i] == s[j]:
return False
for i in range(len(s)):
if not s[i].isalnum():
return False
if s[i].isdigit():
digits += 1
if s[i].isupper():
upper += 1
if upper < 2 or digits < 3:
return False
return True
if __name__ == "__main__":
n = int(input())
for _ in range(n):
uid = input()
if uidCheck(uid):
print("Valid")
else:
print("Invalid")
| harshsinghs1058/python_hackerrank_solutions | Validating_UID.py | Validating_UID.py | py | 692 | python | en | code | 1 | github-code | 90 |
71027570217 | from sklearn.ensemble import RandomForestRegressor
from sklearn.utils.validation import check_is_fitted
from joblib import Parallel, delayed
from sklearn.ensemble._base import _partition_estimators
import threading
import numpy as np
class RandomForestRegressor2(RandomForestRegressor):
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_decrease=0.,
min_impurity_split=None,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomForestRegressor2, self).__init__(
n_estimators,
criterion,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
min_impurity_decrease,
min_impurity_split,
bootstrap,
oob_score,
n_jobs,
random_state,
verbose,
warm_start)
def predict(self, X, return_std=False):
if return_std:
check_is_fitted(self, 'estimators_')
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# if self.n_outputs_ > 1:
# y_pred = np.zeros((self.n_estimators, X.shape[0], self.n_outputs_), dtype=np.float64)
# else:
# y_pred = np.zeros((self.n_estimators, X.shape[0]), dtype=np.float64)
# Parallel loop
# lock = self.threading.Lock()
y_pred = np.array(Parallel(n_jobs=n_jobs, verbose=self.verbose, backend="threading")(
delayed(e.predict)(X) for e in self.estimators_))
# y_hat /= len(self.estimators_)
ypred_mean = np.mean(y_pred, axis=0)
ypred_std = np.std(y_pred, axis=0)
if len(ypred_std.shape) > 1:
ypred_std = np.max(ypred_std, 1)
return ypred_mean, ypred_std
else:
return super(RandomForestRegressor2, self).predict(X)
| KastnerRG/sherlock | src/RandomForest.py | RandomForest.py | py | 2,532 | python | en | code | 6 | github-code | 90 |
9777450270 | from PyQt5.QtCore import pyqtSignal, Qt
from brickv.plugin_system.plugin_base import PluginBase
from brickv.plugin_system.plugins.nfc_rfid.ui_nfc_rfid import Ui_NFCRFID
from brickv.bindings.bricklet_nfc_rfid import BrickletNFCRFID
from brickv.async_call import async_call
from brickv.spin_box_hex import SpinBoxHex
class NFCRFID(PluginBase, Ui_NFCRFID):
qtcb_state = pyqtSignal(int, bool)
def __init__(self, *args):
PluginBase.__init__(self, BrickletNFCRFID, *args)
self.setupUi(self)
self.nfc = self.device
self.qtcb_state.connect(self.cb_state)
self.nfc.register_callback(self.nfc.CALLBACK_STATE_CHANGED,
self.qtcb_state.emit)
self.label_id.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.write_page_was_clicked = False
self.key_read_spinbox = []
for i in range(6):
sb = SpinBoxHex()
sb.setRange(0, 255)
self.key_read_spinbox.append(sb)
self.widget_read_spinbox.layout().addWidget(sb)
self.key_write_spinbox = []
for i in range(16):
sb = SpinBoxHex()
sb.setRange(0, 255)
self.key_write_spinbox.append(sb)
if i < 4:
self.layout_write1.addWidget(sb)
elif i < 8:
self.layout_write2.addWidget(sb)
elif i < 12:
self.layout_write3.addWidget(sb)
else:
self.layout_write4.addWidget(sb)
self.scan_clicked_type = -1
self.read_page_clicked_first_page = 0
self.read_page_clicked_page_range = ''
self.write_page_clicked_first_page = 0
self.write_page_clicked_page_range = ''
self.write_page_clicked_data = []
doc = self.textedit_read_page.document()
font = doc.defaultFont()
font.setFamily('Courier New')
doc.setDefaultFont(font)
self.button_scan.clicked.connect(self.scan_clicked)
self.button_read_page.clicked.connect(self.read_page_clicked)
self.button_write_page.clicked.connect(self.write_page_clicked)
self.combo_box_tag_type.currentIndexChanged.connect(self.tag_type_changed)
self.spinbox_read_page.valueChanged.connect(self.page_changed)
self.index0_show = [self.widget_read_spinbox, self.label_read_key, self.combobox_read_key]
self.index1_hide = [self.widget_read_spinbox, self.label_read_key, self.combobox_read_key]
self.index2_hide = [self.widget_read_spinbox, self.label_read_key, self.combobox_read_key]
self.disable = [self.widget_read_spinbox, self.label_read_key, self.combobox_read_key, self.label_read_page, self.spinbox_read_page, self.button_read_page, self.button_write_page]
self.tag_type_changed(0)
def get_current_page_range(self):
tt = self.combo_box_tag_type.currentIndex()
page = self.spinbox_read_page.value()
page_range = ''
if tt == self.nfc.TAG_TYPE_MIFARE_CLASSIC:
page_range = '{0}'.format(page)
elif tt == self.nfc.TAG_TYPE_TYPE1:
page_range = '{0}-{1}'.format(page, page+1)
elif tt == self.nfc.TAG_TYPE_TYPE2:
page_range = '{0}-{1}'.format(page, page+3)
return page_range
def tag_type_changed(self, index):
s = ''
self.label_id.setText(s)
self.textedit_read_page.setPlainText(s)
if index == self.nfc.TAG_TYPE_MIFARE_CLASSIC:
for show in self.index0_show:
show.show()
elif index in (self.nfc.TAG_TYPE_TYPE1, self.nfc.TAG_TYPE_TYPE2):
for hide in self.index1_hide:
hide.hide()
for sp in self.key_write_spinbox:
sp.setEnabled(False)
for disable in self.disable:
disable.setEnabled(False)
self.page_changed(self.spinbox_read_page.value())
def page_changed(self, _page):
text_read = 'Read Page ' + self.get_current_page_range()
text_write = 'Write Page ' + self.get_current_page_range()
self.button_read_page.setText(text_read)
self.button_write_page.setText(text_write)
def scan_clicked(self):
t = self.combo_box_tag_type.currentIndex()
self.scan_clicked_type = t
self.nfc.request_tag_id(t)
def read_page_clicked(self):
page = self.spinbox_read_page.value()
self.read_page_clicked_first_page = page
self.read_page_clicked_page_range = self.get_current_page_range()
if self.scan_clicked_type == self.nfc.TAG_TYPE_MIFARE_CLASSIC:
key_number = self.combobox_read_key.currentIndex()
key = []
for sb in self.key_read_spinbox:
key.append(sb.value())
self.nfc.authenticate_mifare_classic_page(page, key_number, key)
else:
self.nfc.request_page(page)
def write_page_clicked(self):
self.write_page_was_clicked = True
page = self.spinbox_read_page.value()
self.write_page_clicked_first_page = page
self.write_page_clicked_page_range = self.get_current_page_range()
self.write_page_clicked_data = []
for sp in self.key_write_spinbox:
self.write_page_clicked_data.append(sp.value())
if self.scan_clicked_type == self.nfc.TAG_TYPE_MIFARE_CLASSIC:
key_number = self.combobox_read_key.currentIndex()
key = []
for sb in self.key_read_spinbox:
key.append(sb.value())
self.nfc.authenticate_mifare_classic_page(page, key_number, key)
else:
self.nfc.write_page(page, self.write_page_clicked_data)
def start(self):
pass
def stop(self):
pass
def cb_state(self, state, idle):
if state & (self.nfc.STATE_ERROR & ~self.nfc.STATE_IDLE):
self.tag_type_changed(self.combo_box_tag_type.currentIndex())
if (state & 0xF) == self.nfc.STATE_REQUEST_TAG_ID:
s = 'Could not find {0} tag'.format(self.combo_box_tag_type.currentText())
self.label_id.setText(s)
elif (state & 0xF) == self.nfc.STATE_AUTHENTICATING_MIFARE_CLASSIC_PAGE:
s = 'Error: Could not authenticate page {0}'.format(self.read_page_clicked_page_range)
self.textedit_read_page.setPlainText(s)
elif (state & 0xF) == self.nfc.STATE_WRITE_PAGE:
self.write_page_was_clicked = False
s = 'Error: Could not write page {0}'.format(self.write_page_clicked_page_range)
self.textedit_read_page.setPlainText(s)
elif (state & 0xF) == self.nfc.STATE_REQUEST_PAGE:
s = 'Error: Could not read page {0}'.format(self.read_page_clicked_page_range)
self.textedit_read_page.setPlainText(s)
elif state & self.nfc.STATE_IDLE:
if (state & 0xF) == self.nfc.STATE_REQUEST_TAG_ID:
async_call(self.nfc.get_tag_id, None, self.get_tag_id_async, self.increase_error_count)
elif (state & 0xF) == self.nfc.STATE_AUTHENTICATING_MIFARE_CLASSIC_PAGE:
if self.write_page_was_clicked:
self.write_page_was_clicked = False
self.nfc.write_page(self.write_page_clicked_first_page, self.write_page_clicked_data)
else:
self.nfc.request_page(self.read_page_clicked_first_page)
elif (state & 0xF) == self.nfc.STATE_REQUEST_PAGE:
async_call(self.nfc.get_page, None, self.get_page_async, self.increase_error_count)
elif (state & 0xF) == self.nfc.STATE_WRITE_PAGE:
self.write_page_was_clicked = False
s = 'Successfully wrote page {0}'.format(self.write_page_clicked_page_range)
self.textedit_read_page.setPlainText(s)
def get_page_async(self, page):
if self.scan_clicked_type == self.nfc.TAG_TYPE_TYPE2:
s = 'Page {0}: '.format(self.read_page_clicked_first_page)
s += '{0:02X} {1:02X} {2:02X} {3:02X}\n'.format(*page[0:4])
s += 'Page {0}: '.format(self.read_page_clicked_first_page+1)
s += '{0:02X} {1:02X} {2:02X} {3:02X}\n'.format(*page[4:8])
s += 'Page {0}: '.format(self.read_page_clicked_first_page+2)
s += '{0:02X} {1:02X} {2:02X} {3:02X}\n'.format(*page[8:12])
s += 'Page {0}: '.format(self.read_page_clicked_first_page+3)
s += '{0:02X} {1:02X} {2:02X} {3:02X}'.format(*page[12:16])
elif self.scan_clicked_type == self.nfc.TAG_TYPE_TYPE1:
s = 'Page {0}: '.format(self.read_page_clicked_first_page)
s += '{0:02X} {1:02X} {2:02X} {3:02X} {4:02X} {5:02X} {6:02X} {7:02X}\n'.format(*page[0:8])
s += 'Page {0}: '.format(self.read_page_clicked_first_page+1)
s += '{0:02X} {1:02X} {2:02X} {3:02X} {4:02X} {5:02X} {6:02X} {7:02X}'.format(*page[8:16])
elif self.scan_clicked_type == self.nfc.TAG_TYPE_MIFARE_CLASSIC:
s = 'Page {0}: '.format(self.read_page_clicked_first_page)
s += '{0:02X} {1:02X} {2:02X} {3:02X} {4:02X} {5:02X} {6:02X} {7:02X} {8:02X} {9:02X} {10:02X} {11:02X} {12:02X} {13:02X} {14:02X} {15:02X}'.format(*page[0:16])
else:
return
self.textedit_read_page.setPlainText(s)
for i, sp in enumerate(self.key_write_spinbox):
sp.setValue(page[i])
def get_tag_id_async(self, ret):
if self.scan_clicked_type != ret.tag_type:
return
if ret.tid_length == 4:
s = 'Found tag with ID <b>{0:02X} {1:02X} {2:02X} {3:02X}</b>'.format(*ret.tid)
elif ret.tid_length == 7:
s = 'Found tag with ID <b>{0:02X} {1:02X} {2:02X} {3:02X} {4:02X} {5:02X} {6:02X}</b>'.format(*ret.tid)
else:
s = 'Found tag with unsupported ID length ({0})'.format(ret.tid_length)
self.label_id.setText(s)
for sp in self.key_write_spinbox:
sp.setEnabled(True)
for disable in self.disable:
disable.setEnabled(True)
def destroy(self):
pass
@staticmethod
def has_device_identifier(device_identifier):
return device_identifier == BrickletNFCRFID.DEVICE_IDENTIFIER
| Tinkerforge/brickv | src/brickv/plugin_system/plugins/nfc_rfid/nfc_rfid.py | nfc_rfid.py | py | 10,335 | python | en | code | 18 | github-code | 90 |
17975276539 | def resolve():
'''
code here
'''
import math
N, K = [int(item) for item in input().split()]
As = [int(item) for item in input().split()]
gcd = As[0]
for item in As[1:]:
gcd = math.gcd(gcd, item)
res = 'IMPOSSIBLE'
max_A = max(As)
if max_A > K:
if K % gcd == 0:
res = 'POSSIBLE'
elif K in set(As):
res = 'POSSIBLE'
print(res)
if __name__ == "__main__":
resolve()
| Aasthaengg/IBMdataset | Python_codes/p03651/s927744113.py | s927744113.py | py | 472 | python | en | code | 0 | github-code | 90 |
8880338037 | """
autor: Valentina Garrido
Main game module
"""
import glfw
from OpenGL.GL import *
import sys
import scene_graph_3D as sg
import easy_shaders as es
import lighting_shaders as ls
import basic_shapes as bs
from model import *
from controller import Controller
import basic_shapes_extended as bs_ext
from models.Enemy import Enemy
from models.gameCameras import FollowCamera, FrontCamera, EagleCamera
from models.platforms.platform import FakePlatform
import lights as light
from models.staticViewObjects import Text3D
from ex_lighting_texture2 import createDice
if __name__ == '__main__':
if len(sys.argv) == 1:
print("Using the csv platform file from the homework example.")
csv_file = "ex_structure.csv"
else:
csv_file = sys.argv[1]
# Initialize glfw
if not glfw.init():
sys.exit()
width = 558
height = 992
window = glfw.create_window(width, height, 'Cat Jump 3D', None, None)
if not window:
glfw.terminate()
sys.exit()
glfw.make_context_current(window)
controlador = Controller()
glfw.set_key_callback(window, controlador.on_key)
# Assembling the shader program (pipeline) with both shaders
#pipeline = es.SimpleModelViewProjectionShaderProgram()
#texture_pipeline = es.SimpleTextureModelViewProjectionShaderProgram()
lightShaderProgram = ls.SimpleTexturePhongShaderProgram()
simpleLightShaderProgram = ls.SimplePhongShaderProgram()
#lightShaderProgram = es.SimpleTextureGouraudShaderProgram()
# Telling OpenGL to use our shader program
#glUseProgram(texture_pipeline.shaderProgram)
# Setting up the clear screen color
glClearColor(0.85, 0.85, 0.85, 1.0)
# As we work in 3D, we need to check which part is in front,
# and which one is at the back
glEnable(GL_DEPTH_TEST)
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)
# Enabling transparencies
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
# Using the same view and projection matrices in the whole application
projection = tr.perspective(45, float(width) / float(height), 0.1, 100)
# TODO: use better camera classes
# Generaremos diversas cámaras.
static_view = tr.lookAt(
np.array([0, 5, 0]), # eye
np.array([0, 0, 0]), # at
np.array([0, 0, 1]) # up
)
# HACEMOS LOS OBJETOS
world = TextureScrollableWorld(csv_file)
world.enable_game_over()
controlador.set_model(world.mc)
controlador.set_world(world)
world.mc.model.set_shader(simpleLightShaderProgram)
world.lose_cg.text_3D.set_shader(simpleLightShaderProgram)
world.win_cg.text_3D.set_shader(simpleLightShaderProgram)
t0 = 0
while not glfw.window_should_close(window): # Dibujando --> 1. obtener el input
# Calculamos el dt
ti = glfw.get_time()
dt = ti - t0
t0 = ti
glfw.poll_events() # OBTIENE EL INPUT --> CONTROLADOR --> MODELOS
# Clearing the screen in both, color and depth
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
world.update(dt, ti)
glUseProgram(lightShaderProgram.shaderProgram)
# Setting all uniform shader variables
# region lighting texture
# White light in all components: ambient, diffuse and specular.
glUniform3f(glGetUniformLocation(lightShaderProgram.shaderProgram, "La"), 1.0, 1.0, 1.0)
glUniform3f(glGetUniformLocation(lightShaderProgram.shaderProgram, "Ld"), 1.0, 1.0, 1.0)
glUniform3f(glGetUniformLocation(lightShaderProgram.shaderProgram, "Ls"), 1.0, 1.0, 1.0)
# Object is barely visible at only ambient. Bright white for diffuse and specular components.
glUniform3f(glGetUniformLocation(lightShaderProgram.shaderProgram, "Ka"), 0.6, 0.6, 0.6)
glUniform3f(glGetUniformLocation(lightShaderProgram.shaderProgram, "Kd"), 0.9, 0.9, 0.9)
glUniform3f(glGetUniformLocation(lightShaderProgram.shaderProgram, "Ks"), 1.0, 1.0, 1.0)
glUniform3f(glGetUniformLocation(lightShaderProgram.shaderProgram, "lightPosition"), -5, -5, 5)
glUniform3f(glGetUniformLocation(lightShaderProgram.shaderProgram, "viewPosition"), 2.12132034, 2.12132034, 2.)
glUniform1ui(glGetUniformLocation(lightShaderProgram.shaderProgram, "shininess"), 100)
glUniform1f(glGetUniformLocation(lightShaderProgram.shaderProgram, "constantAttenuation"), 0.0001)
glUniform1f(glGetUniformLocation(lightShaderProgram.shaderProgram, "linearAttenuation"), 0.03)
glUniform1f(glGetUniformLocation(lightShaderProgram.shaderProgram, "quadraticAttenuation"), 0.01)
# endregion
# region lighting simple
glUseProgram(simpleLightShaderProgram.shaderProgram)
# White light in all components: ambient, diffuse and specular.
glUniform3f(glGetUniformLocation(simpleLightShaderProgram.shaderProgram, "La"), 1.0, 1.0, 1.0)
glUniform3f(glGetUniformLocation(simpleLightShaderProgram.shaderProgram, "Ld"), 1.0, 1.0, 1.0)
glUniform3f(glGetUniformLocation(simpleLightShaderProgram.shaderProgram, "Ls"), 1.0, 1.0, 1.0)
# Object is barely visible at only ambient. Bright white for diffuse and specular components.
glUniform3f(glGetUniformLocation(simpleLightShaderProgram.shaderProgram, "Ka"), 0.6, 0.6, 0.6)
glUniform3f(glGetUniformLocation(simpleLightShaderProgram.shaderProgram, "Kd"), 0.9, 0.9, 0.9)
glUniform3f(glGetUniformLocation(simpleLightShaderProgram.shaderProgram, "Ks"), 1.0, 1.0, 1.0)
# TO DO: Explore different parameter combinations to understand their effect!
glUniform3f(glGetUniformLocation(simpleLightShaderProgram.shaderProgram, "lightPosition"), -5, -5, 5)
glUniform3f(glGetUniformLocation(simpleLightShaderProgram.shaderProgram, "viewPosition"), 2.12132034, 2.12132034, 2.)
glUniform1ui(glGetUniformLocation(simpleLightShaderProgram.shaderProgram, "shininess"), 100)
glUniform1f(glGetUniformLocation(simpleLightShaderProgram.shaderProgram, "constantAttenuation"), 0.0001)
glUniform1f(glGetUniformLocation(simpleLightShaderProgram.shaderProgram, "linearAttenuation"), 0.03)
glUniform1f(glGetUniformLocation(simpleLightShaderProgram.shaderProgram, "quadraticAttenuation"), 0.01)
# endregion
world.draw(lightShaderProgram, projection)
world.check_collisions()
#text.draw(projection)
glfw.swap_buffers(window)
glfw.terminate() | malva28/Cat-Jump-3D | codigo/cat_jump.py | cat_jump.py | py | 6,535 | python | en | code | 0 | github-code | 90 |
3161446457 | # -*- coding: utf-8 -*-
# @Author : DevinYang(pistonyang@gmail.com)
__all__ = ['summary']
from collections import OrderedDict
import torch
import torch.nn as nn
import numpy as np
def _flops_str(flops):
preset = [(1e12, 'T'), (1e9, 'G'), (1e6, 'M'), (1e3, 'K')]
for p in preset:
if flops // p[0] > 0:
N = flops / p[0]
ret = "%.1f%s" % (N, p[1])
return ret
ret = "%.1f" % flops
return ret
def _cac_grad_params(p, w):
t, n = 0, 0
if w.requires_grad:
t += p
else:
n += p
return t, n
def _cac_conv(layer, input, output):
# bs, ic, ih, iw = input[0].shape
oh, ow = output.shape[-2:]
kh, kw = layer.kernel_size
ic, oc = layer.in_channels, layer.out_channels
g = layer.groups
tb_params = 0
ntb__params = 0
flops = 0
if hasattr(layer, 'weight') and hasattr(layer.weight, 'shape'):
params = np.prod(layer.weight.shape)
t, n = _cac_grad_params(params, layer.weight)
tb_params += t
ntb__params += n
flops += (2 * ic * kh * kw - 1) * oh * ow * (oc // g)
if hasattr(layer, 'bias') and hasattr(layer.bias, 'shape'):
params = np.prod(layer.bias.shape)
t, n = _cac_grad_params(params, layer.bias)
tb_params += t
ntb__params += n
flops += oh * ow * (oc // g)
return tb_params, ntb__params, flops
def _cac_xx_norm(layer, input, output):
tb_params = 0
ntb__params = 0
if hasattr(layer, 'weight') and hasattr(layer.weight, 'shape'):
params = np.prod(layer.weight.shape)
t, n = _cac_grad_params(params, layer.weight)
tb_params += t
ntb__params += n
if hasattr(layer, 'bias') and hasattr(layer.bias, 'shape'):
params = np.prod(layer.bias.shape)
t, n = _cac_grad_params(params, layer.bias)
tb_params += t
ntb__params += n
if hasattr(layer, 'running_mean') and hasattr(layer.running_mean, 'shape'):
params = np.prod(layer.running_mean.shape)
ntb__params += params
if hasattr(layer, 'running_var') and hasattr(layer.running_var, 'shape'):
params = np.prod(layer.running_var.shape)
ntb__params += params
in_shape = input[0]
flops = np.prod(in_shape.shape)
if layer.affine:
flops *= 2
return tb_params, ntb__params, flops
def _cac_linear(layer, input, output):
ic, oc = layer.in_features, layer.out_features
tb_params = 0
ntb__params = 0
flops = 0
if hasattr(layer, 'weight') and hasattr(layer.weight, 'shape'):
params = np.prod(layer.weight.shape)
t, n = _cac_grad_params(params, layer.weight)
tb_params += t
ntb__params += n
flops += (2 * ic - 1) * oc
if hasattr(layer, 'bias') and hasattr(layer.bias, 'shape'):
params = np.prod(layer.bias.shape)
t, n = _cac_grad_params(params, layer.bias)
tb_params += t
ntb__params += n
flops += oc
return tb_params, ntb__params, flops
@torch.no_grad()
def summary(model, x, target, return_results=False, is_splitnet=True):
"""
Args:
model (nn.Module): model to summary
x (torch.Tensor): input data
return_results (bool): return results
Returns:
"""
# change bn work way
model.eval()
def register_hook(layer):
def hook(layer, input, output):
model_name = str(layer.__class__.__name__)
module_idx = len(model_summary)
s_key = '{}-{}'.format(model_name, module_idx + 1)
model_summary[s_key] = OrderedDict()
model_summary[s_key]['input_shape'] = list(input[0].shape)
if isinstance(output, (tuple, list)):
model_summary[s_key]['output_shape'] = [
list(o.shape) for o in output
]
else:
model_summary[s_key]['output_shape'] = list(output.shape)
tb_params = 0
ntb__params = 0
flops = 0
if isinstance(layer, nn.Conv2d):
tb_params, ntb__params, flops = _cac_conv(layer, input, output)
elif isinstance(layer, (nn.BatchNorm2d, nn.GroupNorm)):
tb_params, ntb__params, flops = _cac_xx_norm(
layer, input, output)
elif isinstance(layer, nn.Linear):
tb_params, ntb__params, flops = _cac_linear(
layer, input, output)
model_summary[s_key]['trainable_params'] = tb_params
model_summary[s_key]['non_trainable_params'] = ntb__params
model_summary[s_key]['params'] = tb_params + ntb__params
model_summary[s_key]['flops'] = flops
if not isinstance(layer, (nn.Sequential, nn.ModuleList,
nn.Identity, nn.ModuleDict)):
hooks.append(layer.register_forward_hook(hook))
model_summary = OrderedDict()
hooks = []
model.apply(register_hook)
if is_splitnet:
model(x, target=target, mode='summary')
else:
model(x)
for h in hooks:
h.remove()
print('-' * 80)
line_new = "{:>20} {:>25} {:>15} {:>15}".format(
"Layer (type)", "Output Shape", "Params", "FLOPs(M+A) #")
print(line_new)
print('=' * 80)
total_params = 0
trainable_params = 0
total_flops = 0
for layer in model_summary:
line_new = "{:>20} {:>25} {:>15} {:>15}".format(
layer,
str(model_summary[layer]['output_shape']),
model_summary[layer]['params'],
model_summary[layer]['flops'],
)
print(line_new)
total_params += model_summary[layer]['params']
trainable_params += model_summary[layer]['trainable_params']
total_flops += model_summary[layer]['flops']
param_str = _flops_str(total_params)
flop_str = _flops_str(total_flops)
flop_str_m = _flops_str(total_flops // 2)
param_size = total_params * 4 / (1024 ** 2)
print('=' * 80)
print(' Total parameters: {:,} {}'.format(total_params, param_str))
print(' Trainable parameters: {:,}'.format(trainable_params))
print(
'Non-trainable parameters: {:,}'.format(total_params - trainable_params))
print('Total flops(M) : {:,} {}'.format(total_flops // 2, flop_str_m))
print('Total flops(M+A): {:,} {}'.format(total_flops, flop_str))
print('-' * 80)
print('Parameters size (MB): {:.2f}'.format(param_size))
if return_results:
return total_params, total_flops
if __name__ == '__main__':
A = nn.Conv2d(50, 10, 3, padding=1, groups=5, bias=True)
summary(A, torch.rand((1, 50, 10, 10)),
target=torch.ones(1, dtype=torch.long),
is_splitnet=False)
for name, p in A.named_parameters():
print(name, p.size())
| FreeformRobotics/Divide-and-Co-training | utils/summary.py | summary.py | py | 7,086 | python | en | code | 99 | github-code | 90 |
73827948778 | #!/usr/bin/env python
from time import sleep
import PySimpleGUI as sg
from matplotlib.pyplot import pause
# Usage of Graph element.
layout = [[sg.Graph(canvas_size=(500, 100), graph_bottom_left=(0, 0), graph_top_right=(2000, 2000), background_color='white', enable_events=True, key='graph')]]
window = sg.Window('Graph test', layout, finalize=True, no_titlebar=True,
grab_anywhere=True,)
graph = window['graph'] # type: sg.Graph
i=0
direction=1
lastpoint=(0,0)
while True:
if( i == 2000) :
direction == -1
else :
if( i == 0) :
direction == 1
sleep(0.1)
i += direction
graph.draw_line((i,i), lastpoint)
lastpoint = (i,i)
print(i)
window.close() | Borgotto/InputRaceTelemetry | test.py | test.py | py | 736 | python | en | code | 0 | github-code | 90 |
32322385999 | #!/usr/bin/env python3
import gzip
import sys
import struct
import io
import mmap
# much of this is based on https://github.com/HearthSim/UnityPack/wiki/Format-Documentation
class bytestream:
def __init__(self, by):
self._full_by = by
self._by = memoryview(self._full_by)
def bytes(self, n):
ret = bytes(self._by[0:n])
self._by = self._by[n:]
return ret
def peekbytes(self, n):
return bytes(self._by[0:n])
def signature(self, s):
if isinstance(s, str):
s = s.encode("ascii")
if self.peekbytes(len(s)) == s:
self.bytes(len(s))
return True
return False
def u8(self): return self.bytes(1)[0]
def u8l(self): return self.u8()
def u8b(self): return self.u8()
def u16l(self): return struct.unpack("<H", self.bytes(2))[0]
def u16b(self): return struct.unpack(">H", self.bytes(2))[0]
def u32l(self): return struct.unpack("<I", self.bytes(4))[0]
def u32b(self): return struct.unpack(">I", self.bytes(4))[0]
def uu64l(self): return struct.unpack("<Q", self.bytes(8))[0] # unaligned - rarely used by Unity
def uu64b(self): return struct.unpack(">Q", self.bytes(8))[0]
def f32l(self): return struct.unpack("<f", self.bytes(4))[0]
def f32b(self): return struct.unpack(">f", self.bytes(4))[0]
def f64l(self): return struct.unpack("<d", self.bytes(8))[0]
def f64b(self): return struct.unpack(">d", self.bytes(8))[0]
def align(self, n):
skip = (-self.tell())&(n-1)
assert self.zeroes(skip)
def a32(self):
self.align(4)
def au64l(self):
self.a32()
return self.uu64l()
def au64b(self):
self.a32()
return self.uu64b()
def str(self, n):
return self.bytes(n).decode("utf-8")
def strnul(self):
l = self.peekbytes(256).index(b'\0')
ret = self.str(l)
self.bytes(1)
return ret
def str32la(self):
ret = self.bytes(self.u32l()).decode("utf-8")
self.a32()
return ret
def str32ba(self):
ret = self.bytes(self.u32b()).decode("utf-8")
self.a32()
return ret
def str32lu(self):
return self.bytes(self.u32l()).decode("utf-8")
def str32bu(self):
return self.bytes(self.u32b()).decode("utf-8")
def str32lx(self):
ret = self.bytes(self.u32l()).decode("utf-8")
assert self.tell()&3 == 0
return ret
def str32bx(self):
ret = self.bytes(self.u32b()).decode("utf-8")
assert self.tell()&3 == 0
return ret
def zeroes(self, n):
if sum(self.peekbytes(n)) == 0:
self.bytes(n)
return True
return False
def pptrl(self):
return self.u32l(), self.au64l()
def pptrb(self):
return self.u32b(), self.au64b()
def tell(self):
return len(self._full_by) - len(self._by)
def size(self):
return len(self._full_by)
def remaining(self):
return len(self._by)
files = {}
remaining_files = []
unity_objects = {} # filename -> list (unsorted)
class UnityObject:
def __init__(self, obj_id, obj_type, by_src, references):
self.id = obj_id
self.type = obj_type
self.bytes_src = by_src
self.references = references
def bytes(self):
return self.bytes_src()
def follow_pptr(self, pptr):
file_id, obj_id = pptr
ret, = ( obj for obj in unity_objects[self.references[file_id]] if obj.id == obj_id )
return ret
t_Texture2D = 28
t_TextAsset = 49
t_AudioClip = 83
t_MonoBehavior = 114
t_AssetBundle = 142
t_ResourceManager = 147
t_Sprite = 213
t_VideoClip = 329
def add_file(fn, by, source, scan_file=True):
if isinstance(fn, bytes):
fn = fn.decode("utf-8")
files[fn] = by
if scan_file:
remaining_files.append(fn)
if source is not None:
print("("+source+") ->", fn)
def gzip_fname(by):
if by[0:2] != b"\x1F\x8B": 1/0 # should've been checked already
if by[3] & 8:
by = by[10:]
return by[0:by.index(b'\0')]
def lz4_decompress(by):
# https://ticki.github.io/blog/how-lz4-works/
s = bytestream(by)
def getnum():
ret = 15
while True:
last = s.u8()
ret += last
if last != 255:
return ret
ret = bytearray()
while True:
token = s.u8()
copy = token >> 4
if copy == 15:
copy = getnum()
ret += s.bytes(copy)
if not s.remaining():
return ret
rlepos = len(ret) - s.u16l()
rle = token & 15
if rle == 15:
rle = getnum()
rle += 4
while rle:
ret.append(ret[rlepos])
rlepos += 1
rle -= 1
def unity_decompress(by, flags):
if flags&63 == 0:
return by
if flags&63 == 3:
return lz4_decompress(by)
1/0
def unity_enumerate_objects(f):
f.seek(0)
s = bytestream(f.read(48))
metasz = s.u32b()
filesz = s.u32b()
version = s.u32b()
assert version in (21,22)
if version == 21:
dataoffs = s.u32b()
assert s.u32b() == 0 # big endian
if version == 22:
assert metasz == 0
assert filesz == 0
assert s.u32b() == 0 # unknown, probably padding
metasz = s.au64b()
filesz = s.au64b()
dataoffs = s.au64b()
assert s.au64b() == 0 # big endian
f.seek(s.tell())
s = bytestream(f.read(metasz))
s.strnul() # 2022.1.11f1
s.u32l() # platform
# types
types = []
type_trees = s.u8()
for n in range(s.u32l()):
cls = s.u32l()
s.u8() # unknown
s.u16l() # unknown
if cls == t_MonoBehavior: s.bytes(16) # script hash
s.bytes(16) # type hash
types.append(cls)
if type_trees:
1/0 # untested
n_nodes = s.u32l()
len_strbuf = s.u32l()
s_t = bytestream(s.bytes(n_nodes*32))
local_strbuf = s.bytes(len_strbuf)
global_strbuf = \
b"AABB\0AnimationClip\0AnimationCurve\0AnimationState\0Array\0Base\0BitField\0bitset\0bool\0char\0ColorRGBA\0Comp" \
b"onent\0data\0deque\0double\0dynamic_array\0FastPropertyName\0first\0float\0Font\0GameObject\0Generic Mono\0Grad" \
b"ientNEW\0GUID\0GUIStyle\0int\0list\0long long\0map\0Matrix4x4f\0MdFour\0MonoBehaviour\0MonoScript\0m_ByteSize\0m" \
b"_Curve\0m_EditorClassIdentifier\0m_EditorHideFlags\0m_Enabled\0m_ExtensionPtr\0m_GameObject\0m_Index\0m_IsA" \
b"rray\0m_IsStatic\0m_MetaFlag\0m_Name\0m_ObjectHideFlags\0m_PrefabInternal\0m_PrefabParentObject\0m_Script\0m" \
b"_StaticEditorFlags\0m_Type\0m_Version\0Object\0pair\0PPtr<Component>\0PPtr<GameObject>\0PPtr<Material>\0PPtr" \
b"<MonoBehaviour>\0PPtr<MonoScript>\0PPtr<Object>\0PPtr<Prefab>\0PPtr<Sprite>\0PPtr<TextAsset>\0PPtr<Texture" \
b">\0PPtr<Texture2D>\0PPtr<Transform>\0Prefab\0Quaternionf\0Rectf\0RectInt\0RectOffset\0second\0set\0short\0size\0" \
b"SInt16\0SInt32\0SInt64\0SInt8\0staticvector\0string\0TextAsset\0TextMesh\0Texture\0Texture2D\0Transform\0Typele" \
b"ssData\0UInt16\0UInt32\0UInt64\0UInt8\0unsigned int\0unsigned long long\0unsigned short\0vector\0Vector2f\0Vec" \
b"tor3f\0Vector4f\0m_ScriptingClassIdentifier\0Gradient\0Type*\0int2_storage\0int3_storage\0BoundsInt\0m_Corre" \
b"spondingSourceObject\0m_PrefabInstance\0m_PrefabAsset\0"
def get_str(off):
if off & 0x80000000:
src = global_strbuf
else:
src = local_strbuf
ret = src[off&0x7FFFFFFF:]
ret = ret[:ret.index(b'\0')]
return ret
for n in range(n_nodes):
version = s_t.u16l()
depth = s_t.u8()
is_array = s_t.u8()
type_off = s_t.u32l()
name_off = s_t.u32l()
index = s_t.u32l()
flags = s_t.u32l() # 0x4000 means align stream after this field, others unknown
unk1 = s_t.u32l()
unk2 = s_t.u32l()
print("version",version,"depth",depth,"is_array",is_array,"type",get_str(type_off),"name",get_str(name_off),
"index",index,"flags",flags,"unk1",unk1,"unk2",unk2)
assert s.u32l() == 0
ext_files = []
objs = []
def get_obj_bytes(obj_start, obj_len):
def inner():
f.seek(obj_start)
return f.read(obj_len)
return inner
# objects
for n in range(s.u32l()):
obj_id = s.au64l()
if version <= 21:
local_start = s.u32l()
if version >= 22:
local_start = s.au64l()
obj_start = dataoffs + local_start
obj_len = s.u32l()
type_idx = s.u32l()
obj_type = types[type_idx]
objs.append(UnityObject(obj_id, obj_type, get_obj_bytes(obj_start, obj_len), ext_files))
# adds
for n in range(s.u32l()):
# unclear what these are
s.u32l()
s.au64l()
# external files
for n in range(s.u32l()):
assert s.strnul() == ""
s.bytes(16)
assert s.u32l() == 0
ext_files.append(s.strnul())
# unknown what this is
# I've seen it only in a single game, containing the values
# ffffffff0000000000000000000000000000000000000000000000000000000000000000000000
# ffffffff0001000000000000000000000000000000000000000000000000000000000000000000
for n in range(s.u32l()):
print(s.bytes(39).hex())
assert s.u8() == 0
assert s.remaining() == 0
return objs
def process_unitywebdata(by):
# decompressing the entire thing isn't ideal, but random access is needed, and unitywebdata files tend to be relatively small
s = bytestream(by)
assert s.signature("UnityWebData1.0\0")
files = {}
head_len = s.u32l()
while s.tell() != head_len:
off = s.u32l()
size = s.u32l()
name = s.str(s.u32l())
add_file(name, by[off:off+size], "UnityWebData")
def process_unityfs(by):
head = bytestream(by)
assert head.signature("UnityFS\0")
version = head.u32b()
assert version in (6,7,8)
head.strnul() # 5.x.x
head.strnul() # 2022.1.11f1
assert head.uu64b() == len(by)
ciblock = head.u32b()
uiblock = head.u32b()
flags = head.u32b()
if version >= 7:
head.align(16)
block = unity_decompress(head.bytes(ciblock), flags)
if version >= 7:
head.align(16)
body = bytestream(block)
body.bytes(16) # guid
in_pos = head.tell()
BLOCK_SIZE = 131072
BLOCK_FLAGS = 3
class UnityFS:
def __init__(self, blocks):
self.blocks = blocks
def read_single_from(self, pos):
block_idx = pos//BLOCK_SIZE
block_off = pos%BLOCK_SIZE
block = self.blocks[block_idx]
if isinstance(block, tuple):
block = unity_decompress(*self.blocks[block_idx])
self.blocks[block_idx] = block
return block[block_off:]
def read_from(self, pos, n):
ret = self.read_single_from(pos)
if len(ret) < n:
ret = bytearray(ret)
while len(ret) < n:
ret += self.read_single_from(pos+len(ret))
return ret[:n]
class UnityFSFile:
def __init__(self, fs, fs_pos, fs_len):
self.fs = fs
self.fs_pos = fs_pos
self.fs_len = fs_len
self.pos = 0
def seek(self, pos):
self.pos = pos
def read(self, n=-1):
pos = self.pos
if n == -1:
n = self.fs_len - pos
self.pos += n
return self.fs.read_from(self.fs_pos + pos, n)
blocks = []
nblocks = body.u32b()
for n in range(nblocks):
u_size = body.u32b()
c_size = body.u32b()
flags = body.u16b()
blocks.append(( head.bytes(c_size), flags ))
fs = UnityFS(blocks)
nfile = body.u32b()
for n in range(nfile):
offset = body.uu64b()
size = body.uu64b()
flags = body.u32b()
name = body.strnul()
assert flags&~4 == 0
f = UnityFSFile(fs, offset, size)
add_file(name, f, "UnityFS", False)
if flags & 4:
print("Extracting")
unity_objects[name] = unity_enumerate_objects(f)
assert body.tell() == uiblock
for fn in sys.argv[1:]:
with open(fn, "rb") as f:
add_file(fn, mmap.mmap(f.fileno(), 0, prot=mmap.PROT_READ), None)
while remaining_files:
fn = remaining_files.pop(0)
print(fn)
by = files[fn]
by_head = bytes(by[0:64])
if by_head.startswith(b"\x1F\x8B"):
add_file(gzip_fname(by), gzip.open(io.BytesIO(by), mode='rb').read(), "gzip")
elif by_head.startswith(b"UnityWebData1.0\0"):
process_unitywebdata(by)
elif by_head.startswith(b"UnityFS\0"):
process_unityfs(by)
else:
print("-> none ("+str(by[:64])+")")
def sanitize_filename(fn):
return fn.replace("/", "_") # will pass through . and .., but they'll just fail to open
resmgr_objs = {}
for objs in unity_objects.values():
for obj in objs:
if obj.type == t_ResourceManager:
s = bytestream(obj.bytes())
for n in range(s.u32l()):
# todo: if this contains any music, use it
name = s.str32la()
body_ref = s.pptrl()
# if name == "audiomanager":
# body = obj.follow_pptr(body_ref)
# print(body)
for objs in unity_objects.values():
for obj in objs:
# print(obj.type)
if obj.type == t_AudioClip:
s = bytestream(obj.bytes())
outname = s.str32la()
s.bytes(32) # don't know what's in here
srcfn = s.str32la()
srcpos = s.au64l()
srclen = s.au64l()
by = files[srcfn][srcpos:srcpos+srclen]
print(outname)
# open("out/"+sanitize_filename(outname)+".bin","wb").write(by)
# unity3d music is often aac in mp4 container
# to check contents, use
# ffprobe "$sfn" -loglevel warning -select_streams a:0 -show_entries stream=codec_name -of csv=p=0
# and to transcode to a proper sound-only container,
# ffmpeg -loglevel warning -i "$sfn" -acodec copy -vcodec none "$tfn"
| Alcaro/misctoys | unity-extract.py | unity-extract.py | py | 12,554 | python | en | code | 2 | github-code | 90 |
32631391761 | #!/usr/bin/env python3
# Author: DMR
import os
import sys
def cube_root(num):
"""Use bisection search to find the cube root of a number"""
epsilon = 0.01
low = 0
high = num
guess = (high + low) / 2.0
while abs(guess**3 - num) >= epsilon:
if guess**3 < num:
low = guess
else:
high = guess
guess = (high + low) / 2.0
print(f"{guess:.3f} is close to the cube root of {num:,}")
def main():
try:
os.system("clear")
if len(sys.argv) == 2:
argument = int(sys.argv[1])
cube_root(argument)
else:
print("Usage: cube_root <int>")
except ValueError:
print("Usage: cube_root <int>")
if __name__ == "__main__":
main()
| dmr-git/py | guttag/cube2.py | cube2.py | py | 771 | python | en | code | 0 | github-code | 90 |
71019959977 | from http.server import HTTPServer, BaseHTTPRequestHandler
from pathlib import Path
import socket
import pygame as pg
import threading
import time
HOST = socket.gethostbyname(socket.gethostname())
PORT = 9999
print(socket.gethostname(), HOST)
save = Path("saves/save1.txt")
class HTTPRequestHandler(BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header("Content-type", "text.html")
self.end_headers()
content_length = 0
try:
content_length = int(self.headers['Content-Length'])
except TypeError:
content_length = 0
except ValueError:
content_length = 0
finished = False
while not finished:
try:
result = self.rfile.read(content_length).decode()
resultSplit = result.split(" ")
print(resultSplit)
if resultSplit[0] != "joinPlayerTest": # joinPlayerTest [username]
self.send_response(200)
self.wfile.write(bytes(save.read_text(), "utf-8"))
finished = True
else:
print("Join player test: " + resultSplit[1])
if len(save.read_text()) > 0:
if resultSplit[1] in save.read_text().split("\n")[0][9:].split(", "):
self.send_response(200)
self.wfile.write(
bytes("1", "utf-8"))
finished = True
else:
self.send_response(200)
self.wfile.write(
bytes("0", "utf-8"))
finished = True
except IndexError:
continue
def do_POST(self):
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
content_length = int(self.headers['Content-Length'])
result = self.rfile.read(content_length).decode()
resultSplit = result.split(" ")
finished = False
while not finished:
try:
if resultSplit[0] == "attr": # attr [player #] [attr #] [result]
if len(save.read_text().split("\n")[0].split(", ")) > int(resultSplit[1]):
if int(resultSplit[2]) > 5 or int(resultSplit[2]) < 0:
self.send_response(400)
self.wfile.write(
bytes('{"response":"Unavailable attribute index"}', "utf-8"))
finished = True
lines = save.read_text().split("\n")
prefix = lines[int(resultSplit[2])].split(": ")[0]
allAttributeValues = lines[int(resultSplit[2])].split(": ")[
1].split(", ")
allAttributeValues[int(resultSplit[1])] = " ".join(
resultSplit[3:])
lines[int(resultSplit[2])] = prefix + \
": " + ", ".join(allAttributeValues)
save.write_text("\n".join(lines))
self.wfile.write(
bytes('{"response":"Request completed"}', "utf-8"))
finished = True
else:
self.send_response(400)
self.wfile.write(
bytes('{"response":"Unavailable player index"}', "utf-8"))
finished = True
elif resultSplit[0] == "grid": # grid [y] [x] [result]
try:
lines = save.read_text().split("\n")
cells = lines[int(resultSplit[2])+8].split(",")
cells[int(resultSplit[1])] = " ".join(resultSplit[3:])
lines[int(resultSplit[2])+8] = ",".join(cells)
save.write_text("\n".join(lines))
self.wfile.write(
bytes('{"response":"Request completed"}', "utf-8"))
finished = True
except IndexError:
self.send_response(400)
self.wfile.write(
bytes('{"response":"Out-of-bounds"}', "utf-8"))
finished = True
elif resultSplit[0] == "joinPlayer": # joinPlayer [username] [color]
lines = save.read_text().split("\n")
defaultAtributes = lines[6][20:].split(", ")
if len(lines[0].split(": ")[1]) == 1:
lines[0] += resultSplit[1] # Username
lines[1] += defaultAtributes[0] # Location
lines[2] += defaultAtributes[1] # Inventory
lines[3] += defaultAtributes[2] # Last signal
lines[4] += resultSplit[2] # Color
lines[5] += defaultAtributes[3] # Direction
else:
lines[0] += ", " + resultSplit[1] # Username
lines[1] += ", " + defaultAtributes[0] # Location
lines[2] += ", " + defaultAtributes[1] # Inventory
lines[3] += ", " + defaultAtributes[2] # Last signal
lines[4] += ", " + resultSplit[2] # Color
lines[5] += ", " + defaultAtributes[3] # Direction
save.write_text("\n".join(lines))
self.wfile.write(
bytes('{"response":"Request completed"}', "utf-8"))
finished = True
except IndexError:
continue
if __name__ == "__main__":
server = HTTPServer((HOST, PORT), HTTPRequestHandler)
print("Server now running...")
running = True
pg.init()
pg.display.set_mode([500, 400])
threading.Thread(target=server.serve_forever).start()
while running:
for event in pg.event.get():
if event.type == pg.QUIT:
running = False
server.shutdown()
| SwinkyWorks/Top-down-game | httpServer.py | httpServer.py | py | 6,458 | python | en | code | 0 | github-code | 90 |
70725932778 | from django.test import TestCase
from django.urls import resolve, reverse
from question.models import Answer, Question, Tag
from user.models import User
class QuestionView(TestCase):
@classmethod
def setUp(cls):
user = User(
username="testuser", email="test@usertest.com", password="testuserpass"
)
user.save()
for i in range(11):
question = Question(
subject="Subject_{0}".format(i),
description="description_{0}".format(i),
author=user,
votes=99 - 1,
)
question.save()
question.add_tags({"tag1", "tag2", "otus"})
def test_queryset_subject_search(self):
response = self.client.get(reverse("question:search"), {"q": "Subject_2"})
self.assertEquals(response.context[-1]["object_list"].count(), 1)
response = self.client.get(reverse("question:search"), {"q": "Subject_1"})
self.assertEquals(response.context[-1]["object_list"].count(), 2)
response = self.client.get(reverse("question:search"), {"q": "Subject"})
self.assertEquals(response.context[-1]["object_list"].count(), 11)
def test_queryset_description_search(self):
response = self.client.get(reverse("question:search"), {"q": "description_5"})
self.assertEquals(response.context[-1]["object_list"].count(), 1)
response = self.client.get(reverse("question:search"), {"q": "description_1"})
self.assertEquals(response.context[-1]["object_list"].count(), 2)
def test_queryset_title_and_content_search(self):
response = self.client.get(reverse("question:search"), {"q": "2"})
self.assertEquals(response.context[-1]["object_list"].count(), 1)
response = self.client.get(reverse("question:search"), {"q": "1"})
self.assertEquals(response.context[-1]["object_list"].count(), 2)
def test_queryset_tag_search(self):
response = self.client.get(reverse("question:tag", kwargs={"tag": "otus"}))
self.assertEquals(response.context[-1]["object_list"].count(), 11)
def test_queryset_top(self):
response = self.client.get(reverse("question:top"))
self.assertEquals(response.context[-1]["object_list"].count(), 11)
def test_queryset_latest(self):
response = self.client.get(reverse("question:latest"))
self.assertEquals(response.context[-1]["object_list"].count(), 11)
| varusN/hasker | hasker/hasker/tests/tests_views.py | tests_views.py | py | 2,464 | python | en | code | 0 | github-code | 90 |
3827848158 | import os
import logging
config_path = 'config/'
def dir_check(directory):
if not os.path.isdir(directory):
os.makedirs(directory)
def image_to_binary(file_name):
if os.path.isfile(file_name):
with open(file_name, 'rb') as image_file:
image_data = image_file.read()
else:
logging.warning('{} does not exist returning None'.format(file_name))
image_data = None
return image_data
| Jamezzz5/screenshotmaster | ssm/utils.py | utils.py | py | 443 | python | en | code | 0 | github-code | 90 |
40851531341 | import openai
import base64
import os
testKey = os.environ.get('API_KEY')
#print(testKey)
openai.api_key = testKey
breakpoint()
file = open("result.txt", "r")
#story = ""
for i,a in enumerate(file.readlines()):
completion = openai.ChatCompletion.create(
model='gpt-4',
messages=[
{"role": "system", "content": "your job is to provide concise, vivid descriptions of the given storyboard point to make an optimal prompt for an AI image generator like DALL-E. Make sure to make the prompt generates a panel with a top down view of a cute pixel art styled video game."},
{"role" : "user", "content" : a}
]
)
story= completion.choices[0].message.content[0:1000]
print(i, story)
#breakpoint()
images = openai.Image.create(
prompt=story,
n=3,
size="1024x1024",
response_format = "b64_json",
)
#print(type(images.data[0].b64_json))
#breakpoint()
path = './images/image_' + str(i)
if not os.path.exists(path):
os.mkdir(path)
#print("Folder %s created!" % path)
promptFile = open("./images/image_" + str(i) + "/prompt.txt", 'w')
promptFile.write(a+"better prompt: " + story)
for j,x in enumerate(images.data):
decodedData = base64.b64decode(x.b64_json)
fileName = "./images/image_" + str(i) + "/image_"+ str(j) + ".png"
imgFile = open(fileName, 'wb')
imgFile.write(decodedData)
imgFile.close()
# newImage = openai.Image.create_edit(
# image=open(fileName, "rb"),
# prompt="pixel art version of the image",
# mask=open("./tt.png", "rb"),
# n=1,
# size="1024x1024",
# response_format = "b64_json",
# )
# newFileName = "./images/image_" + str(i) + "/pixel_image_"+ str(j) + ".png"
# pimgFile = open(newFileName, 'wb')
# pimgFile.write(base64.b64decode(newImage.data[0].b64_json))
# pimgFile.close()
# Write Image from Base64 File
| mrwadepro/ai-gameplay-generator | storyBoardGeneration/generative.py | generative.py | py | 2,054 | python | en | code | 0 | github-code | 90 |
18204978329 | import numpy as np
N = int(input())
A = []
B = []
for i in range(N):
a, b = [int(x) for x in input().split()]
A.append(a)
B.append(b)
C = np.array(A)
D = np.array(B)
m_inf = np.median(C)
m_sup = np.median(D)
if N % 2 == 0:
ans = 2 * m_sup - 2 * m_inf + 1
else:
ans = m_sup - m_inf + 1
print(int(ans)) | Aasthaengg/IBMdataset | Python_codes/p02661/s402755377.py | s402755377.py | py | 326 | python | en | code | 0 | github-code | 90 |
16466829364 | import re
from flask_restful import Resource, reqparse
from models import Scoreboard
from views import DBSession, get_rankings
from sqlalchemy.orm.exc import NoResultFound
# Username legality check: 4-50 alphanumeric characters
USERNAME_REGEX = re.compile('^[a-zA-Z0-9._-]{4,50}$')
# accept and parse POST
score_parser = reqparse.RequestParser()
score_parser.add_argument('username',
help='Your username should contain 4-50 '
'characters, with only letters and numbers.',
required=True)
score_parser.add_argument('score',
help='This field is required', required=True)
# Score API
class PublishScore(Resource):
# GET: get the top 5
def get(self):
top_ten = dict()
# find all users, sorted by score
board = DBSession.query(Scoreboard).order_by(Scoreboard.score.desc()).all()
# find top five users if there's more than 5 users
if len(board) > 10:
for i in range(10):
# result contains username, score, ranking (handles ties)
top_ten[i] = self.build_user_data_json(board, i)
# otherwise find all users
else:
for i in range(len(board)):
# result contains username, score, ranking (handles ties)
top_ten[i] = self.build_user_data_json(board, i)
# close DB session
DBSession.close()
return top_ten, 200
# POST: submit score
def post(self):
data = score_parser.parse_args()
# Find if user already exists
try:
user_data = DBSession.query(Scoreboard).\
filter_by(username=data['username']).one()
# If the user's score in database is lower, update score
if user_data.score < int(data['score']):
user_data.score = data.score
# If user doesn't exist, create user and save score
except NoResultFound:
user_data = Scoreboard(username=data['username'],
score=int(data['score']))
DBSession.add(user_data)
# Commit change to DB
DBSession.commit()
# Find current ranking
board = DBSession.query(Scoreboard).order_by(Scoreboard.score.desc()).all()
# find index of user
index = board.index(user_data)
# close DB session
DBSession.close()
return self.build_user_data_json(board, index), 201
def build_user_data_json(self, board, i):
return {'username': board[i].username,
'score': board[i].score,
'ranking': get_rankings(board)[i]
}
| chrisx8/Ballzzz | scoreboard/api.py | api.py | py | 2,710 | python | en | code | 0 | github-code | 90 |
18558667599 | n,m = map(int,input().split())
if n > 1 and m > 1:
num_9 = (n-2) * (m-2)
ans = num_9
elif (n == 1) ^ (m == 1):
if n == 1:
num = m
else:
num = n
ans = num - 2
else:
ans = 1
print(ans) | Aasthaengg/IBMdataset | Python_codes/p03417/s645403024.py | s645403024.py | py | 222 | python | en | code | 0 | github-code | 90 |
27087651458 | from __future__ import print_function
import sys
import re
import argparse
from llnl.util.argparsewriter import ArgparseWriter, ArgparseRstWriter
import spack.main
from spack.main import section_descriptions
description = "list available spack commands"
section = "developer"
level = "long"
#: list of command formatters
formatters = {}
def formatter(func):
"""Decorator used to register formatters"""
formatters[func.__name__] = func
return func
def setup_parser(subparser):
subparser.add_argument(
'--format', default='names', choices=formatters,
help='format to be used to print the output (default: names)')
subparser.add_argument(
'documented_commands', nargs=argparse.REMAINDER,
help='list of documented commands to cross-references')
class SpackArgparseRstWriter(ArgparseRstWriter):
"""RST writer tailored for spack documentation."""
def __init__(self, documented_commands, out=sys.stdout):
super(SpackArgparseRstWriter, self).__init__(out)
self.documented = documented_commands if documented_commands else []
def usage(self, *args):
super(SpackArgparseRstWriter, self).usage(*args)
cmd = re.sub(' ', '-', self.parser.prog)
if cmd in self.documented:
self.line()
self.line(':ref:`More documentation <cmd-%s>`' % cmd)
class SubcommandWriter(ArgparseWriter):
def begin_command(self, prog):
print(' ' * self.level + prog)
@formatter
def subcommands(args):
parser = spack.main.make_argument_parser()
spack.main.add_all_commands(parser)
SubcommandWriter().write(parser)
def rst_index(out=sys.stdout):
out.write('\n')
index = spack.main.index_commands()
sections = index['long']
dmax = max(len(section_descriptions.get(s, s)) for s in sections) + 2
cmax = max(len(c) for _, c in sections.items()) + 60
row = "%s %s\n" % ('=' * dmax, '=' * cmax)
line = '%%-%ds %%s\n' % dmax
out.write(row)
out.write(line % (" Category ", " Commands "))
out.write(row)
for section, commands in sorted(sections.items()):
description = section_descriptions.get(section, section)
for i, cmd in enumerate(sorted(commands)):
description = description.capitalize() if i == 0 else ''
ref = ':ref:`%s <spack-%s>`' % (cmd, cmd)
comma = ',' if i != len(commands) - 1 else ''
bar = '| ' if i % 8 == 0 else ' '
out.write(line % (description, bar + ref + comma))
out.write(row)
@formatter
def rst(args):
# print an index to each command
rst_index()
print()
# create a parser with all commands
parser = spack.main.make_argument_parser()
spack.main.add_all_commands(parser)
# get documented commands from the command line
documented_commands = set(args.documented_commands)
# print sections for each command and subcommand
SpackArgparseRstWriter(documented_commands).write(parser, root=1)
@formatter
def names(args):
for cmd in spack.cmd.all_commands():
print(cmd)
def commands(parser, args):
# Print to stdout
formatters[args.format](args)
return
| matzke1/spack | lib/spack/spack/cmd/commands.py | commands.py | py | 3,198 | python | en | code | 2 | github-code | 90 |
3388727325 | import socket
#server = socket.gethostname()
server = "192.168.0.9"
port = 8080
print(f"server is {server}. port no is {port}.start service.")
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((server, port)) # IPとポート番号を指定します
s.listen(5)
while True:
clientsocket, address = s.accept()
print(f"Connection from {address} has been established!")
clientsocket.send(bytes("Welcome to the server!", 'utf-8'))
clientsocket.close() | WhiteRabbit82651/study | python3/server.py | server.py | py | 479 | python | en | code | 0 | github-code | 90 |
29570836116 | # Test Part 1
# def yield_next_multiple(input_number):
# output_number = 0
# while True:
# yield output_number
# output_number += input_number
#
#
# sequence = yield_next_multiple(5)
# for i in range(5):
# print(next(sequence))
# Test Part 2
# main_sequence = range(10000000000000000)
# main_seed_list = [1789,37,47,1889]
#
# def yield_iterator(seed, sequence, offset):
# for i in sequence:
# if (i + offset) % seed == 0:
# yield i
#
#
# def yield_looper(seed_list, sequence):
# next_seq = sequence
# offset = 0
# for seed in seed_list:
# if seed != 0:
# next_seq = yield_iterator(seed, next_seq, offset)
# offset += 1
# return next_seq
#
#
# seed_iterator = yield_looper(main_seed_list, main_sequence)
# print(next(seed_iterator))
# Test Part 4 - Nope
# import math
#
# f = open("TestData2.txt", "r")
# raw_data = f.read()
#
# reqd_time, time_intervals = int(raw_data.split('\n')[0]), raw_data.split('\n')[1].split(',')
# essential_intervals = [int(i) for i in time_intervals if i != 'x']
#
# t_list = [int(i) if i != 'x' else int(0) for i in time_intervals]
# i_list = [i for i, v in enumerate(t_list) if v != 0]
# t_list[:] = [i for i in t_list if i != 0]
#
# print(f"{t_list}\n{i_list}")
#
# t_1 = sum(int(math.prod(t_list) / i) for i in t_list)
# t_2 = sum(i_list[i] * int(math.prod(t_list) / v) for i, v in enumerate(t_list))
# t_prod = math.prod(t_list)
#
# print(f"T1:\t{t_1}\n"
# f"T2:\t{t_2}\n"
# f"T_PROD:\t{t_prod}\n")
#
#
# def calculate_if_integer(num):
# y = ((num * t_1) + t_2) / t_prod
# if y.is_integer():
# return num
# else:
# return False
#
#
# def get_multiples(num):
# inp = num
# while True:
# yield inp
# inp += num
#
#
# get_multiple = get_multiples(1)
# while True:
# next_multiple = next(get_multiple)
# if calculate_if_integer(next_multiple):
# print(next_multiple)
# break
# Method 5
from functools import reduce
f = open("TestData2.txt", "r")
raw_data = f.read()
reqd_time, time_intervals = int(raw_data.split('\n')[0]), raw_data.split('\n')[1].split(',')
essential_intervals = [int(i) for i in time_intervals if i != 'x']
t_list = [int(i) if i != 'x' else int(0) for i in time_intervals]
i_list = [i for i, v in enumerate(t_list) if v != 0]
t_list[:] = [i for i in t_list if i != 0]
def chinese_remainder(n, a):
sum = 0
prod = reduce(lambda a, b: a * b, n)
for n_i, a_i in zip(n, a):
p = prod // n_i
sum += a_i * mul_inv(p, n_i) * p
return sum % prod
def mul_inv(a, b):
b0 = b
x0, x1 = 0, 1
if b == 1: return 1
while a > 1:
q = a // b
a, b = b, a % b
x0, x1 = x1 - q * x0, x0
if x1 < 0: x1 += b0
return x1
t_i_list = []
for i in range(len(t_list)):
t_i_list.append(t_list[i] - i_list[i])
print(chinese_remainder(t_list, t_i_list))
| Surya-77/personal-advent-of-code-2020 | Day13/13_test.py | 13_test.py | py | 2,951 | python | en | code | 1 | github-code | 90 |
14065240491 | import unittest
from datetime import datetime
import iris
import numpy as np
from iris.coord_systems import GeogCS, TransverseMercator
from iris.coords import DimCoord
from iris.tests import IrisTest
from improver.metadata.constants.attributes import MANDATORY_ATTRIBUTE_DEFAULTS
from improver.metadata.constants.mo_attributes import MOSG_GRID_ATTRIBUTES
from improver.orographic_enhancement import OrographicEnhancement
from improver.synthetic_data.set_up_test_cubes import construct_scalar_time_coords
from improver.utilities.cube_manipulation import sort_coord_in_cube
# UKPP projection
TMercCS = TransverseMercator(
latitude_of_projection_origin=49.0,
longitude_of_central_meridian=-2.0,
false_easting=400000.0,
false_northing=-100000.0,
scale_factor_at_central_meridian=0.9996013045310974,
ellipsoid=GeogCS(semi_major_axis=6377563.396, semi_minor_axis=6356256.91),
)
def set_up_variable_cube(
data, name="temperature", units="degC", xo=400000.0, yo=0.0, attributes=None
):
"""
Set up cube containing diagnostic variable data for regridding tests.
Data are on a 2 km Transverse Mercator grid with an inverted y-axis,
located in the UK.
"""
y_points = 2000.0 * (data.shape[0] - np.arange(data.shape[0])) + yo
x_points = 2000.0 * np.arange(data.shape[1]) + xo
y_coord = DimCoord(
y_points, "projection_y_coordinate", units="m", coord_system=TMercCS
)
x_coord = DimCoord(
x_points, "projection_x_coordinate", units="m", coord_system=TMercCS
)
time_coords = construct_scalar_time_coords(
datetime(2015, 11, 23, 4, 30), None, datetime(2015, 11, 22, 22, 30)
)
cube = iris.cube.Cube(
data,
long_name=name,
units=units,
attributes=attributes,
dim_coords_and_dims=[(y_coord, 0), (x_coord, 1)],
aux_coords_and_dims=time_coords,
)
return cube
def set_up_invalid_variable_cube(valid_cube):
"""
Generate a new cube with an extra dimension from a 2D variable cube, to
create an invalid cube for testing the process method.
"""
realization_coord = DimCoord(np.array([0], dtype=np.int32), "realization")
cube1 = valid_cube.copy()
cube1.add_aux_coord(realization_coord)
cube2 = cube1.copy()
cube2.coord("realization").points = [1]
return iris.cube.CubeList([cube1, cube2]).merge_cube()
def set_up_orography_cube(data, xo=400000.0, yo=0.0):
"""
Set up cube containing high resolution UK orography data.
"""
y_points = 1000.0 * (data.shape[0] - np.arange(data.shape[0])) + yo
x_points = 1000.0 * np.arange(data.shape[1]) + xo
y_coord = DimCoord(
y_points, "projection_y_coordinate", units="m", coord_system=TMercCS
)
x_coord = DimCoord(
x_points, "projection_x_coordinate", units="m", coord_system=TMercCS
)
cube = iris.cube.Cube(
data,
long_name="topography",
units="m",
dim_coords_and_dims=[(y_coord, 0), (x_coord, 1)],
)
cube.attributes["mosg__grid_type"] = "standard"
cube.attributes["mosg__grid_version"] = "1.0.0"
cube.attributes["mosg__grid_domain"] = "uk"
return cube
class Test__init__(IrisTest):
"""Test the __init__ method"""
def test_basic(self):
"""Test initialisation with no arguments"""
plugin = OrographicEnhancement()
self.assertAlmostEqual(plugin.orog_thresh_m, 20.0)
self.assertAlmostEqual(plugin.rh_thresh_ratio, 0.8)
self.assertAlmostEqual(plugin.vgradz_thresh_ms, 0.0005)
self.assertAlmostEqual(plugin.upstream_range_of_influence_km, 15.0)
self.assertAlmostEqual(plugin.efficiency_factor, 0.23265)
self.assertAlmostEqual(plugin.cloud_lifetime_s, 102.0)
none_type_attributes = [
"topography",
"temperature",
"humidity",
"pressure",
"uwind",
"vwind",
"svp",
"vgradz",
"grid_spacing_km",
]
for attr in none_type_attributes:
self.assertIsNone(getattr(plugin, attr))
class Test__repr__(IrisTest):
"""Test the __repr__ method"""
def test_basic(self):
"""Test string representation of plugin"""
plugin = OrographicEnhancement()
self.assertEqual(str(plugin), "<OrographicEnhancement()>")
class Test__orography_gradients(IrisTest):
"""Test the _orography_gradients method"""
def setUp(self):
"""Set up an input cube"""
self.plugin = OrographicEnhancement()
data = np.array(
[[200.0, 450.0, 850.0], [320.0, 500.0, 1000.0], [230.0, 600.0, 900.0]]
)
x_coord = DimCoord(np.arange(3), "projection_x_coordinate", units="km")
y_coord = DimCoord(np.arange(3), "projection_y_coordinate", units="km")
self.plugin.topography = iris.cube.Cube(
data,
long_name="topography",
units="m",
dim_coords_and_dims=[(y_coord, 0), (x_coord, 1)],
)
def test_basic(self):
"""Test outputs are cubes"""
gradx, grady = self.plugin._orography_gradients()
self.assertIsInstance(gradx, iris.cube.Cube)
self.assertIsInstance(grady, iris.cube.Cube)
def test_values(self):
"""Test output values and units"""
expected_gradx = np.array(
[
[0.12333333, 0.33, 0.53666667],
[0.2, 0.33333333, 0.46666667],
[0.27666667, 0.33666667, 0.39666667],
]
)
expected_grady = np.array(
[
[0.15833333, 0.175, 0.19166667],
[0.035, 0.03833333, 0.04166667],
[-0.08833333, -0.09833333, -0.10833333],
]
)
gradx, grady = self.plugin._orography_gradients()
self.assertArrayAlmostEqual(gradx.data, expected_gradx)
self.assertArrayAlmostEqual(grady.data, expected_grady)
for cube in [gradx, grady]:
self.assertEqual(cube.units, "1")
class Test__regrid_variable(IrisTest):
"""Test the _regrid_variable method"""
def setUp(self):
"""Set up input cubes"""
temperature = np.arange(6).reshape(2, 3)
self.temperature_cube = set_up_variable_cube(temperature)
orography = np.array(
[
[20.0, 30.0, 40.0, 30.0, 25.0, 25.0],
[30.0, 50.0, 80.0, 60.0, 50.0, 45.0],
[50.0, 65.0, 90.0, 70.0, 60.0, 50.0],
[45.0, 60.0, 85.0, 65.0, 55.0, 45.0],
]
)
orography_cube = set_up_orography_cube(orography)
self.plugin = OrographicEnhancement()
self.plugin.topography = sort_coord_in_cube(
orography_cube, orography_cube.coord(axis="y")
)
def test_basic(self):
"""Test cube of the correct shape and type is returned"""
expected_data = np.array(
[
[4.5, 5.0, 5.5, 6.0, 6.5, 7.0],
[3.0, 3.5, 4.0, 4.5, 5.0, 5.5],
[1.5, 2.0, 2.5, 3.0, 3.5, 4.0],
[0.0, 0.5, 1.0, 1.5, 2.0, 2.5],
]
)
result = self.plugin._regrid_variable(self.temperature_cube, "degC")
self.assertIsInstance(result, iris.cube.Cube)
self.assertArrayAlmostEqual(result.data, expected_data)
self.assertEqual(result.data.dtype, "float32")
def test_axis_inversion(self):
"""Test axes are output in ascending order"""
result = self.plugin._regrid_variable(self.temperature_cube, "degC")
x_points = result.coord(axis="x").points
y_points = result.coord(axis="y").points
self.assertTrue(x_points[1] > x_points[0])
self.assertTrue(y_points[1] > y_points[0])
def test_unit_conversion(self):
"""Test units are correctly converted"""
expected_data = np.array(
[
[277.65, 278.15, 278.65, 279.15, 279.65, 280.15],
[276.15, 276.65, 277.15, 277.65, 278.15, 278.65],
[274.65, 275.15, 275.65, 276.15, 276.65, 277.15],
[273.15, 273.65, 274.15, 274.65, 275.15, 275.65],
],
dtype=np.float32,
)
result = self.plugin._regrid_variable(self.temperature_cube, "kelvin")
self.assertEqual(result.units, "kelvin")
self.assertArrayAlmostEqual(result.data, expected_data)
def test_null(self):
"""Test cube is unchanged if axes and grid are already correct"""
correct_cube = self.plugin.topography.copy()
result = self.plugin._regrid_variable(correct_cube, "m")
self.assertArrayAlmostEqual(result.data, correct_cube.data)
self.assertEqual(result.metadata, correct_cube.metadata)
def test_input_unchanged(self):
"""Test the input cube is not modified in place"""
reference_cube = self.temperature_cube.copy()
_ = self.plugin._regrid_variable(self.temperature_cube, "degC")
self.assertArrayAlmostEqual(self.temperature_cube.data, reference_cube.data)
self.assertEqual(self.temperature_cube.metadata, reference_cube.metadata)
class DataCubeTest(IrisTest):
"""Shared setUp function for tests requiring full input data cubes
with an inverted y-axis"""
def setUp(self):
"""Set up input cubes"""
temperature = np.arange(6).reshape(2, 3)
self.temperature = set_up_variable_cube(temperature)
humidity = np.arange(0.75, 0.86, 0.02).reshape(2, 3)
self.humidity = set_up_variable_cube(humidity, "relhumidity", "1")
pressure = np.arange(820, 921, 20).reshape(2, 3)
self.pressure = set_up_variable_cube(pressure, "pressure", "hPa")
uwind = np.full((2, 3), 20.0, dtype=np.float32)
self.uwind = set_up_variable_cube(uwind, "wind-u", "knots")
vwind = np.full((2, 3), 12.0, dtype=np.float32)
self.vwind = set_up_variable_cube(vwind, "wind-v", "knots")
orography = np.array(
[
[20.0, 30.0, 40.0, 30.0, 25.0, 25.0],
[30.0, 50.0, 80.0, 60.0, 50.0, 45.0],
[50.0, 65.0, 90.0, 70.0, 60.0, 50.0],
[45.0, 60.0, 85.0, 65.0, 55.0, 45.0],
]
)
self.orography_cube = set_up_orography_cube(orography)
self.plugin = OrographicEnhancement()
class Test__regrid_and_populate(DataCubeTest):
"""Test the _regrid_and_populate method"""
def test_basic(self):
"""Test function populates class instance"""
self.plugin._regrid_and_populate(
self.temperature,
self.humidity,
self.pressure,
self.uwind,
self.vwind,
self.orography_cube,
)
plugin_cubes = [
self.plugin.temperature,
self.plugin.humidity,
self.plugin.pressure,
self.plugin.uwind,
self.plugin.vwind,
self.plugin.topography,
]
for cube in plugin_cubes:
self.assertIsInstance(cube, iris.cube.Cube)
self.assertIsInstance(self.plugin.vgradz, np.ndarray)
def test_variables(self):
"""Test variable values are sensible"""
expected_temperature = np.array(
[
[277.65, 278.15, 278.65, 279.15, 279.65, 280.15],
[276.15, 276.65, 277.15, 277.65, 278.15, 278.65],
[274.65, 275.15, 275.65, 276.15, 276.65, 277.15],
[273.15, 273.65, 274.15, 274.65, 275.15, 275.65],
],
dtype=np.float32,
)
expected_humidity = np.array(
[
[0.84, 0.85, 0.86, 0.87, 0.88, 0.89],
[0.81, 0.82, 0.83, 0.84, 0.85, 0.86],
[0.78, 0.79, 0.80, 0.81, 0.82, 0.83],
[0.75, 0.76, 0.77, 0.78, 0.79, 0.80],
],
dtype=np.float32,
)
expected_pressure = np.array(
[
[91000.0, 92000.0, 93000.0, 94000.0, 95000.0, 96000.0],
[88000.0, 89000.0, 90000.0, 91000.0, 92000.0, 93000.0],
[85000.0, 86000.0, 87000.0, 88000.0, 89000.0, 90000.0],
[82000.0, 83000.0, 84000.0, 85000.0, 86000.0, 87000.0],
],
dtype=np.float32,
)
expected_uwind = np.full((4, 6), 10.288889, dtype=np.float32)
expected_vwind = np.full((4, 6), 6.1733336, dtype=np.float32)
self.plugin._regrid_and_populate(
self.temperature,
self.humidity,
self.pressure,
self.uwind,
self.vwind,
self.orography_cube,
)
plugin_cubes = [
self.plugin.temperature,
self.plugin.humidity,
self.plugin.pressure,
self.plugin.uwind,
self.plugin.vwind,
]
expected_data = [
expected_temperature,
expected_humidity,
expected_pressure,
expected_uwind,
expected_vwind,
]
for cube, array in zip(plugin_cubes, expected_data):
self.assertArrayAlmostEqual(cube.data, array)
self.assertArrayAlmostEqual(
self.plugin.topography.data, np.flipud(self.orography_cube.data)
)
def test_vgradz(self):
"""Test values of vgradz are sensible"""
expected_vgradz = np.array(
[
[
0.20577779,
0.29837778,
0.10803331,
-0.07716664,
-0.03086665,
-0.03601114,
],
[
0.07888144,
0.19205923,
0.01371852,
-0.16976666,
-0.10460369,
-0.08231109,
],
[
0.02229258,
0.07030742,
-0.10288889,
-0.25722224,
-0.17148148,
-0.12175184,
],
[
0.05315927,
-0.01543331,
-0.22464074,
-0.36525553,
-0.24864818,
-0.17148149,
],
]
)
self.plugin._regrid_and_populate(
self.temperature,
self.humidity,
self.pressure,
self.uwind,
self.vwind,
self.orography_cube,
)
self.assertArrayAlmostEqual(self.plugin.vgradz, expected_vgradz)
class Test__generate_mask(IrisTest):
"""Test the _generate_mask method"""
def setUp(self):
"""Set up and populate a plugin instance"""
self.plugin = OrographicEnhancement()
x_coord = DimCoord(np.arange(5), "projection_x_coordinate", units="km")
y_coord = DimCoord(np.arange(5), "projection_y_coordinate", units="km")
# this is neighbourhood-processed as part of mask generation
topography_data = np.array(
[
[0.0, 10.0, 20.0, 50.0, 100.0],
[10.0, 20.0, 50.0, 100.0, 200.0],
[25.0, 60.0, 80.0, 160.0, 220.0],
[50.0, 80.0, 100.0, 200.0, 250.0],
[50.0, 80.0, 100.0, 200.0, 250.0],
]
)
self.plugin.topography = iris.cube.Cube(
topography_data,
long_name="topography",
units="m",
dim_coords_and_dims=[(y_coord, 0), (x_coord, 1)],
)
humidity_data = np.full((5, 5), 0.9)
humidity_data[1, 3] = 0.5
self.plugin.humidity = iris.cube.Cube(
humidity_data,
long_name="relhumidity",
units="1",
dim_coords_and_dims=[(y_coord, 0), (x_coord, 1)],
)
self.plugin.vgradz = np.full((5, 5), 0.01)
self.plugin.vgradz[3:, :] = 0.0
def test_basic(self):
"""Test output is array"""
result = self.plugin._generate_mask()
self.assertIsInstance(result, np.ndarray)
def test_values(self):
"""Test output mask is correct"""
expected_output = np.full((5, 5), False, dtype=bool)
expected_output[0, :2] = True # orography too low
expected_output[1, 3] = True # humidity too low
expected_output[3:, :] = True # vgradz too low
result = self.plugin._generate_mask()
self.assertArrayEqual(result, expected_output)
class Test__point_orogenh(IrisTest):
"""Test the _point_orogenh method"""
def setUp(self):
"""Set up and populate a plugin instance"""
x_coord = DimCoord(np.arange(3), "projection_x_coordinate", units="km")
y_coord = DimCoord(np.arange(3), "projection_y_coordinate", units="km")
temperature = np.array(
[[277.1, 278.2, 277.7], [278.6, 278.4, 278.9], [278.9, 279.0, 279.6]]
)
humidity = np.array(
[[0.74, 0.85, 0.94], [0.81, 0.82, 0.91], [0.86, 0.93, 0.97]]
)
self.plugin = OrographicEnhancement()
self.plugin.temperature = iris.cube.Cube(
temperature,
long_name="temperature",
units="kelvin",
dim_coords_and_dims=[(y_coord, 0), (x_coord, 1)],
)
self.plugin.humidity = iris.cube.Cube(
humidity,
long_name="relhumidity",
units="1",
dim_coords_and_dims=[(y_coord, 0), (x_coord, 1)],
)
self.plugin.svp = np.array(
[[813.6, 878.0, 848.3], [903.2, 890.5, 922.2], [922.1, 928.4, 967.9]]
)
self.plugin.vgradz = np.array(
[[0.02, 0.08, 0.2], [-0.06, 0.12, 0.22], [0.08, 0.16, 0.23]]
)
topography_data = np.full((3, 3), 50.0, dtype=np.float32)
self.plugin.topography = iris.cube.Cube(
topography_data,
long_name="topography",
units="m",
dim_coords_and_dims=[(y_coord, 0), (x_coord, 1)],
)
def test_basic(self):
"""Test output is an array"""
result = self.plugin._point_orogenh()
self.assertIsInstance(result, np.ndarray)
def test_values(self):
"""Test output values are as expected"""
expected_values = np.array(
[
[0.0, 1.67372072, 4.47886658],
[0.0, 2.45468903, 5.1627059],
[1.77400422, 3.86162901, 6.02323198],
]
)
result = self.plugin._point_orogenh()
self.assertArrayAlmostEqual(result, expected_values)
class Test__get_point_distances(IrisTest):
"""Test the _get_point_distances function"""
def setUp(self):
"""Define input matrices and plugin"""
self.wind_speed = np.ones((3, 4), dtype=np.float32)
sin_wind_dir = np.linspace(0, 1, 12).reshape(3, 4)
cos_wind_dir = np.sqrt(1.0 - np.square(sin_wind_dir))
self.max_sin_cos = np.where(
abs(sin_wind_dir) > abs(cos_wind_dir), abs(sin_wind_dir), abs(cos_wind_dir)
)
self.plugin = OrographicEnhancement()
self.plugin.grid_spacing_km = 3.0
def test_basic(self):
"""Test the function returns an array of the expected shape"""
distance = self.plugin._get_point_distances(self.wind_speed, self.max_sin_cos)
self.assertIsInstance(distance, np.ndarray)
self.assertSequenceEqual(distance.shape, (5, 3, 4))
def test_values_with_nans(self):
"""Test for expected values including nans"""
slice_0 = np.zeros((3, 4), dtype=np.float32)
slice_1 = np.array(
[
[1.0, 1.00415802, 1.01695037, 1.03940225],
[1.07349002, 1.12268281, 1.1931175, 1.2963624],
[1.375, 1.22222221, 1.10000002, 1.0],
]
)
slice_2 = 2.0 * slice_1
slice_3 = 3.0 * slice_1
slice_3[1, 3] = np.nan
slice_3[2, 0] = np.nan
slice_4 = np.full_like(slice_0, np.nan)
slice_4[0, 0] = 4.0
slice_4[-1, -1] = 4.0
expected_data = np.array([slice_0, slice_1, slice_2, slice_3, slice_4])
distance = self.plugin._get_point_distances(self.wind_speed, self.max_sin_cos)
np.testing.assert_allclose(distance, expected_data, equal_nan=True)
class Test__locate_source_points(IrisTest):
"""Test the _locate_source_points method"""
def setUp(self):
"""Define input matrices and plugin"""
self.wind_speed = np.ones((3, 4), dtype=np.float32)
self.sin_wind_dir = np.full((3, 4), 0.4, dtype=np.float32)
self.cos_wind_dir = np.full((3, 4), np.sqrt(0.84), dtype=np.float32)
self.plugin = OrographicEnhancement()
self.plugin.grid_spacing_km = 3.0
def test_basic(self):
"""Test location of source points"""
distance = self.plugin._get_point_distances(self.wind_speed, self.cos_wind_dir)
xsrc, ysrc = self.plugin._locate_source_points(
self.wind_speed, distance, self.sin_wind_dir, self.cos_wind_dir
)
expected_xsrc = np.array(
[
[[0, 1, 2, 3], [0, 1, 2, 3], [0, 1, 2, 3]],
[[0, 1, 2, 3], [0, 1, 2, 3], [0, 1, 2, 3]],
[[0, 0, 1, 2], [0, 0, 1, 2], [0, 0, 1, 2]],
[[0, 0, 1, 2], [0, 0, 1, 2], [0, 0, 1, 2]],
]
)
expected_ysrc = np.array(
[
[[0, 0, 0, 0], [1, 1, 1, 1], [2, 2, 2, 2]],
[[0, 0, 0, 0], [0, 0, 0, 0], [1, 1, 1, 1]],
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
]
)
self.assertArrayEqual(xsrc, expected_xsrc)
self.assertArrayEqual(ysrc, expected_ysrc)
class Test__compute_weighted_values(IrisTest):
"""Test the _compute_weighted_values method"""
def setUp(self):
"""Set up plugin and some inputs"""
self.plugin = OrographicEnhancement()
self.plugin.grid_spacing_km = 3.0
self.point_orogenh = np.array(
[
[4.1, 4.6, 5.6, 6.8, 5.5],
[4.4, 4.6, 5.8, 6.2, 5.5],
[5.2, 3.0, 3.4, 5.1, 3.3],
[0.6, 2.0, 1.8, 4.2, 2.5],
[0.0, 0.0, 0.2, 3.2, 1.8],
]
)
self.wind_speed = np.full((5, 5), 25.0, dtype=np.float32)
sin_wind_dir = np.full((5, 5), 0.4, dtype=np.float32)
cos_wind_dir = np.full((5, 5), np.sqrt(0.84), dtype=np.float32)
self.distance = self.plugin._get_point_distances(self.wind_speed, cos_wind_dir)
self.xsrc, self.ysrc = self.plugin._locate_source_points(
self.wind_speed, self.distance, sin_wind_dir, cos_wind_dir
)
def test_basic(self):
"""Test output is two arrays"""
orogenh, weights = self.plugin._compute_weighted_values(
self.point_orogenh, self.xsrc, self.ysrc, self.distance, self.wind_speed
)
self.assertIsInstance(orogenh, np.ndarray)
self.assertIsInstance(weights, np.ndarray)
def test_values(self):
"""Test values are as expected"""
expected_orogenh = np.array(
[
[6.0531969, 6.7725644, 8.2301264, 9.9942646, 8.1690931],
[6.3531971, 6.7725644, 8.4301271, 9.3942642, 8.1690931],
[7.2848172, 5.1725645, 6.1178742, 8.0310230, 5.9690924],
[3.0469213, 3.4817038, 3.4649093, 6.6558237, 4.1816435],
[0.4585612, 1.0727906, 1.1036499, 5.1721582, 3.0895371],
]
)
expected_weights = np.full((5, 5), 1.4763895, dtype=np.float32)
orogenh, weights = self.plugin._compute_weighted_values(
self.point_orogenh, self.xsrc, self.ysrc, self.distance, self.wind_speed
)
self.assertArrayAlmostEqual(orogenh, expected_orogenh)
self.assertArrayAlmostEqual(weights, expected_weights)
class Test__add_upstream_component(IrisTest):
"""Test the _add_upstream_component method"""
def setUp(self):
"""Set up a plugin with wind components"""
x_coord = DimCoord(3.0 * np.arange(5), "projection_x_coordinate", units="km")
y_coord = DimCoord(3.0 * np.arange(5), "projection_y_coordinate", units="km")
uwind = np.full((5, 5), 20.0, dtype=np.float32)
vwind = np.full((5, 5), 12.0, dtype=np.float32)
self.plugin = OrographicEnhancement()
self.plugin.uwind = iris.cube.Cube(
uwind,
long_name="grid_eastward_wind",
units="m s-1",
dim_coords_and_dims=[(y_coord, 0), (x_coord, 1)],
)
self.plugin.vwind = iris.cube.Cube(
vwind,
long_name="grid_northward_wind",
units="m s-1",
dim_coords_and_dims=[(y_coord, 0), (x_coord, 1)],
)
self.plugin.grid_spacing_km = 3.0
self.point_orogenh = np.array(
[
[4.1, 4.6, 5.6, 6.8, 5.5],
[4.4, 4.6, 5.8, 6.2, 5.5],
[5.2, 3.0, 3.4, 5.1, 3.3],
[0.6, 2.0, 1.8, 4.2, 2.5],
[0.0, 0.0, 0.2, 3.2, 1.8],
]
)
def test_basic(self):
"""Test output is an array"""
result = self.plugin._add_upstream_component(self.point_orogenh)
self.assertIsInstance(result, np.ndarray)
def test_values(self):
"""Test output values are sensible"""
expected_values = np.array(
[
[0.953865, 1.039876, 1.241070, 1.506976, 1.355637],
[1.005472, 1.039876, 1.275474, 1.403762, 1.355637],
[1.161275, 0.782825, 0.863303, 1.226206, 0.942638],
[0.418468, 0.659300, 0.496544, 0.927728, 0.735382],
[0.036423, 0.036423, 0.152506, 0.660092, 0.558801],
]
)
result = self.plugin._add_upstream_component(self.point_orogenh)
self.assertArrayAlmostEqual(result, expected_values)
class Test__create_output_cube(IrisTest):
"""Test the _create_output_cube method"""
def setUp(self):
"""Set up a plugin instance, data array and cubes"""
self.plugin = OrographicEnhancement()
topography = set_up_orography_cube(np.zeros((3, 4), dtype=np.float32))
self.plugin.topography = sort_coord_in_cube(
topography, topography.coord(axis="y")
)
t_attributes = {
"institution": "Met Office",
"source": "Met Office Unified Model",
"mosg__grid_type": "standard",
"mosg__grid_version": "1.2.0",
"mosg__grid_domain": "uk_extended",
"mosg__model_configuration": "uk_det",
}
self.temperature = set_up_variable_cube(
np.full((2, 4), 280.15, dtype=np.float32),
units="kelvin",
xo=398000.0,
attributes=t_attributes,
)
self.orogenh = np.array(
[[1.1, 1.2, 1.5, 1.4], [1.0, 1.3, 1.4, 1.6], [0.8, 0.9, 1.2, 0.9]]
)
def test_basic(self):
"""Test that the cube is returned with float32 coords"""
output = self.plugin._create_output_cube(self.orogenh, self.temperature)
self.assertIsInstance(output, iris.cube.Cube)
for coord in output.coords(dim_coords=True):
self.assertEqual(coord.points.dtype, "float32")
def test_values(self):
"""Test the cube is changed only in units (to m s-1)"""
original_converted = 2.7777778e-07 * self.orogenh
output = self.plugin._create_output_cube(self.orogenh, self.temperature)
self.assertArrayAlmostEqual(output.data, original_converted)
def test_metadata(self):
"""Check output metadata on cube is as expected"""
expected_attributes = {
"title": MANDATORY_ATTRIBUTE_DEFAULTS["title"],
"source": self.temperature.attributes["source"],
"institution": self.temperature.attributes["institution"],
}
for attr in MOSG_GRID_ATTRIBUTES:
expected_attributes[attr] = self.plugin.topography.attributes[attr]
output = self.plugin._create_output_cube(self.orogenh, self.temperature)
for axis in ["x", "y"]:
self.assertEqual(
output.coord(axis=axis), self.plugin.topography.coord(axis=axis)
)
self.assertEqual(output.name(), "orographic_enhancement")
self.assertEqual(output.units, "m s-1")
for t_coord in ["time", "forecast_period", "forecast_reference_time"]:
self.assertEqual(output.coord(t_coord), self.temperature.coord(t_coord))
self.assertDictEqual(output.attributes, expected_attributes)
class Test_process(DataCubeTest):
"""Test the process method"""
def test_basic(self):
"""Test output is float32 cube with float32 coordinates"""
orogenh = self.plugin.process(
self.temperature,
self.humidity,
self.pressure,
self.uwind,
self.vwind,
self.orography_cube,
)
self.assertIsInstance(orogenh, iris.cube.Cube)
self.assertEqual(orogenh.data.dtype, "float32")
for coord in orogenh.coords(dim_coords=True):
self.assertEqual(coord.points.dtype, "float32")
def test_unmatched_coords(self):
"""Test error thrown if input variable cubes do not match"""
self.temperature.coord("forecast_reference_time").points = (
self.temperature.coord("forecast_reference_time").points - 3600
)
self.temperature.coord("forecast_period").points = (
self.temperature.coord("forecast_period").points - 3600
)
msg = "Input cube coordinates"
with self.assertRaisesRegex(ValueError, msg):
_ = self.plugin.process(
self.temperature,
self.humidity,
self.pressure,
self.uwind,
self.vwind,
self.orography_cube,
)
def test_extra_dimensions(self):
"""Test error thrown if input variable cubes have an extra dimension"""
temperature = set_up_invalid_variable_cube(self.temperature)
humidity = set_up_invalid_variable_cube(self.humidity)
pressure = set_up_invalid_variable_cube(self.pressure)
uwind = set_up_invalid_variable_cube(self.uwind)
vwind = set_up_invalid_variable_cube(self.vwind)
msg = "Require 2D fields as input; found 3 dimensions"
with self.assertRaisesRegex(ValueError, msg):
_ = self.plugin.process(
temperature, humidity, pressure, uwind, vwind, self.orography_cube
)
def test_inputs_unmodified(self):
"""Test the process method does not modify any of the input cubes"""
cube_list = [
self.temperature,
self.humidity,
self.pressure,
self.uwind,
self.vwind,
self.orography_cube,
]
copied_cubes = []
for cube in cube_list:
copied_cubes.append(cube.copy())
_ = self.plugin.process(
self.temperature,
self.humidity,
self.pressure,
self.uwind,
self.vwind,
self.orography_cube,
)
for cube, copy in zip(cube_list, copied_cubes):
self.assertArrayAlmostEqual(cube.data, copy.data)
self.assertEqual(cube.metadata, copy.metadata)
def test_values(self):
"""Test values of output"""
expected_data = np.array(
[
[
2.6524199e-07,
3.4075157e-07,
2.5099993e-07,
9.1911055e-08,
1.7481890e-08,
1.5676112e-09,
],
[
1.6797775e-07,
2.4365076e-07,
1.7639361e-07,
9.1911055e-08,
1.7481890e-08,
1.5676112e-09,
],
[
4.1531862e-08,
4.1531862e-08,
8.9591637e-08,
2.8731334e-08,
5.3441389e-09,
1.5676112e-09,
],
[
8.5711110e-10,
8.5711110e-10,
8.5711110e-10,
8.5711110e-10,
2.1291666e-09,
2.4547223e-10,
],
],
dtype=np.float32,
)
orogenh = self.plugin.process(
self.temperature,
self.humidity,
self.pressure,
self.uwind,
self.vwind,
self.orography_cube,
)
self.assertArrayAlmostEqual(orogenh.data, expected_data)
self.assertAlmostEqual(self.plugin.grid_spacing_km, 1.0)
if __name__ == "__main__":
unittest.main()
| metoppv/improver | improver_tests/orographic_enhancement/test_OrographicEnhancement.py | test_OrographicEnhancement.py | py | 33,266 | python | en | code | 95 | github-code | 90 |
18311241399 | import sys
read = sys.stdin.read
T1, T2, A1, A2, B1, B2 = map(int, read().split())
answer = 0
v1 = A1 - B1
v2 = A2 - B2
d = v1 * T1 + v2 * T2
if d == 0:
print('infinity')
exit()
elif v1 * d > 0:
print(0)
exit()
if v1 * T1 % -d == 0:
print(v1 * T1 // -d * 2)
else:
print(v1 * T1 // -d * 2 + 1) | Aasthaengg/IBMdataset | Python_codes/p02846/s086829182.py | s086829182.py | py | 319 | python | en | code | 0 | github-code | 90 |
6381336038 | points = [
{
'name': 'Ariful Islam',
'point': 2425,
'answer': 1625,
'explanation': 15,
'subject': 785,
'refer': 0,
'image': 'profile-pic.jpeg',
},
{
'name': 'পিপীলিকা পাঠান',
'point': 340,
'answer': 50,
'explanation': 15,
'subject': 60,
'refer': 0,
'image': None,
},
{
'name': 'Sheikh Mohammad Sagor Ahmmed',
'point': 266,
'answer': 50,
'explanation': 15,
'subject': 60,
'refer': 0,
'image': None,
},
{
'name': 'Md. Sohel Rana',
'point': 142,
'answer': 115,
'explanation': 24,
'subject': 0,
'refer': 0,
'image': None,
},
] | naimurhasan/python-pillow-info-graphic-image-point | points.py | points.py | py | 805 | python | en | code | 0 | github-code | 90 |
18245320259 |
from collections import defaultdict
N, X, Y = map(int, input().split())
ctr = defaultdict(int)
for i in range(1, N + 1):
for j in range(i + 1, N + 1):
d = min(j - i, abs(i - X) + 1 + abs(j - Y))
ctr[d] += 1
for i in range(1, N):
print(ctr[i])
| Aasthaengg/IBMdataset | Python_codes/p02726/s460725783.py | s460725783.py | py | 272 | python | en | code | 0 | github-code | 90 |
9894798838 | from rest_framework.exceptions import PermissionDenied
from rest_framework import status
class InvalidUserException(PermissionDenied):
status_code = status.HTTP_403_FORBIDDEN
default_detail = "User information inconsistent"
default_code = 'invalid'
def __init__(self, detail, status_code=None):
self.detail = detail
if status_code is not None:
self.status_code = status_code | Wkeirn7/drip_backend | api/exceptions.py | exceptions.py | py | 421 | python | en | code | 0 | github-code | 90 |
9322350695 | from assertpy import assert_that
import server
class TestLoadClubs:
def test_load_clubs_data(self):
# initialisation
clubs_json = [
{"name": "Simply Lift", "email": "john@simplylift.co", "points": "13"},
{"name": "Iron Temple", "email": "admin@irontemple.com", "points": "4"},
{"name": "She Lifts", "email": "kate@shelifts.co.uk", "points": "12"},
]
# expected
expected_result = clubs_json
# method call
result = server.loadClubs()
# assertions
assert_that(result).is_equal_to(expected_result)
class TestLoadCompetitions:
def test_load_competitions_nominal(self):
# initialisation
competitions_json = [
{
"name": "Spring Festival",
"date": "2022-03-27 10:00:00",
"numberOfPlaces": "25",
},
{
"name": "Fall Classic",
"date": "2022-10-22 13:30:00",
"numberOfPlaces": "13",
},
]
# expected
expected_result = competitions_json
# method call
result = server.loadCompetitions()
# assertions
assert_that(result).is_equal_to(expected_result)
| PierreRtec/P11_Rondeau_Pierre | tests/tests_unitaires/test_server_unit.py | test_server_unit.py | py | 1,277 | python | en | code | 0 | github-code | 90 |
15454835224 | # -*- coding: utf-8 -*-
import sys
from PyQt5 import QtCore, QtGui
from PyQt5.QtWidgets import (QApplication, QMainWindow, QFrame,
QMenu, QMenuBar, QStatusBar, QAction,
QLabel, QPushButton, QWidget)
from mainframe import MainFrame
from client import Client
from order import Order
from car import Car
from logolabael import Logo
from templateCSS import CSS
class Ui_MainWindow(object):
mainframe = MainFrame(MainWindow)
client = Client()
order = Order()
car = Car()
logo = Logo()
def __int__(self, MainWindow):
mainframe = MainFrame(MainWindow)
def set_frame(self):
# self.main_frame = QFrame(parent=self.centralwidget)
# self.main_frame.setGeometry(QtCore.QRect(0, 0, 1024, 600))
# self.main_frame.setFrameShape(QFrame.StyledPanel)
# self.main_frame.setFrameShadow(QFrame.Raised)
# self.main_frame.setObjectName("main_frame")
self.frame_buttom = QFrame(self.mainframe.main_frame)
self.frame_buttom.setGeometry(QtCore.QRect(0, 520, 801, 41))
self.frame_buttom.setFrameShape(QFrame.StyledPanel)
self.frame_buttom.setFrameShadow(QFrame.Raised)
self.frame_buttom.setObjectName("frame_buttom")
self.frame_car = QFrame(self.mainframe.main_frame)
self.frame_car.setEnabled(True)
self.frame_car.setGeometry(QtCore.QRect(0, 30, 420, 490))
self.frame_car.setFrameShape(QFrame.StyledPanel)
self.frame_car.setFrameShadow(QFrame.Raised)
self.frame_car.setObjectName("frame_car")
self.frame_car.hide()
self.frame_client = QFrame(self.mainframe.main_frame)
self.frame_client.setEnabled(True)
self.frame_client.setGeometry(QtCore.QRect(0, 30, 420, 491))
self.frame_client.setFrameShape(QFrame.StyledPanel)
self.frame_client.setFrameShadow(QFrame.Raised)
self.frame_client.setObjectName("frame_client")
self.frame_client.hide()
def set_bottom(self):
self.pushButton_quit = QPushButton(self.frame_buttom)
self.pushButton_quit.setGeometry(QtCore.QRect(720, 15, 56, 21))
self.pushButton_quit.setStyleSheet("background-color: rgb(205, 205, 205);")
self.pushButton_quit.setObjectName("pushButton_quit")
self.pushButton_old_order_list = QPushButton(self.frame_buttom)
self.pushButton_old_order_list.setGeometry(QtCore.QRect(10, 15, 80, 21))
self.pushButton_old_order_list.setStyleSheet("background-color: rgb(205, 205, 205);")
self.pushButton_old_order_list.setObjectName("pushButton_old_order_list")
self.pushButton_client_list = QPushButton(self.frame_buttom)
self.pushButton_client_list.setGeometry(QtCore.QRect(90, 15, 80, 21))
self.pushButton_client_list.setStyleSheet("background-color: rgb(205, 205, 205);")
self.pushButton_client_list.setObjectName("pushButton_client_list")
self.pushButton_car_list = QPushButton(self.frame_buttom)
self.pushButton_car_list.setGeometry(QtCore.QRect(170, 15, 80, 21))
self.pushButton_car_list.setStyleSheet("background-color: rgb(205, 205, 205);")
self.pushButton_car_list.setObjectName("pushButton_car_list")
self.pushButton_new_order = QPushButton(self.frame_buttom)
self.pushButton_new_order.setGeometry(QtCore.QRect(250, 15, 80, 21))
self.pushButton_new_order.setStyleSheet("background-color: rgb(205, 205, 205);")
self.pushButton_new_order.setObjectName("pushButton_new_order")
self.pushButton_help = QPushButton(self.frame_buttom)
self.pushButton_help.setGeometry(QtCore.QRect(660, 15, 56, 21))
self.pushButton_help.setStyleSheet("background-color: rgb(205, 205, 205);")
self.pushButton_help.setObjectName("pushButton_help")
self.line_hbot_1 = QFrame(self.frame_buttom)
self.line_hbot_1.setGeometry(QtCore.QRect(10, 1, 791, 16))
self.line_hbot_1.setFrameShape(QFrame.HLine)
self.line_hbot_1.setFrameShadow(QFrame.Sunken)
self.line_hbot_1.setObjectName("line_hbot_1")
def set_menu_bar(self, MainWindow):
self.menubar = QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 797, 29))
self.menubar.setObjectName("menubar")
self.menuFile = QMenu(self.menubar)
self.menuFile.setObjectName("menuFile")
self.menuView = QMenu(self.menubar)
self.menuView.setObjectName("menuView")
self.menuNewCar = QMenu(self.menuView)
self.menuNewCar.setObjectName("menuNewCar")
self.menuSettings = QMenu(self.menubar)
self.menuSettings.setObjectName("menuSettings")
self.menuAbaut = QMenu(self.menubar)
self.menuAbaut.setObjectName("menuAbaut")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.actionFile = QAction(MainWindow)
self.actionFile.setObjectName("actionFile")
self.actionPrint = QAction(MainWindow)
self.actionPrint.setObjectName("actionPrint")
self.actionSave = QAction(MainWindow)
self.actionSave.setObjectName("actionSave")
self.actionExit = QAction(MainWindow)
self.actionExit.setObjectName("actionExit")
self.actionNew_order = QAction(MainWindow)
self.actionNew_order.setObjectName("actionNew_order")
self.actionNew_client = QAction(MainWindow)
self.actionNew_client.setObjectName("actionNew_client")
self.action_order = QAction(MainWindow)
self.action_order.setObjectName("action_order")
self.action_client = QAction(MainWindow)
self.action_client.setObjectName("action_client")
self.action_car = QAction(MainWindow)
self.action_car.setObjectName("action_car")
self.actionSee_old_order = QAction(MainWindow)
self.actionSee_old_order.setObjectName("actionSee_old_order")
self.actionHelp = QAction(MainWindow)
self.actionHelp.setObjectName("actionHelp")
self.actionAbaut_of_programm = QAction(MainWindow)
self.actionAbaut_of_programm.setObjectName("actionAbaut_of_programm")
self.menuFile.addAction(self.actionFile)
self.menuFile.addAction(self.actionPrint)
self.menuFile.addAction(self.actionSave)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionExit)
self.menuNewCar.addAction(self.action_order)
self.menuNewCar.addAction(self.action_client)
self.menuNewCar.addAction(self.action_car)
self.menuView.addAction(self.menuNewCar.menuAction())
self.menuView.addAction(self.actionSee_old_order)
self.menuAbaut.addAction(self.actionHelp)
self.menuAbaut.addSeparator()
self.menuAbaut.addAction(self.actionAbaut_of_programm)
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menuView.menuAction())
self.menubar.addAction(self.menuSettings.menuAction())
self.menubar.addAction(self.menuAbaut.menuAction())
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.setWindowModality(QtCore.Qt.WindowModal)
MainWindow.resize(1024, 600)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("../photo.jpg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
MainWindow.setWindowIcon(icon)
self.centralwidget = QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
MainWindow.setCentralWidget(self.centralwidget)
# self.set_frame()
Ui_MainWindow.mainframe.main_frame(self.centralwidget)
Ui_MainWindow.mainframe.set_body()
self.order.set_body(Ui_MainWindow.mainframe.main_frame)
# Ui_MainWindow.logo.set_logo()
# self.set_bottom()
self.set_menu_bar(MainWindow)
# self.add_functions()
#
# self.frame_order.raise_()
# self.frame_logo.raise_()
# self.listWidget_order.raise_()
# self.listWidget_client.raise_()
# self.listWidget_car.raise_()
# self.frame_buttom.raise_()
# self.line_vfrm_1.raise_()
# self.line_vfrm_2.raise_()
# self.label_frame_client.raise_()
# self.label_frame_car.raise_()
# self.label_frame_order.raise_()
# self.pushButton_update_client.raise_()
# self.pushButton_update_car.raise_()
# self.pushButton_update_order.raise_()
# self.frame_client.raise_()
# self.frame_car.raise_()
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def add_functions(self):
# Ui_MainWindow.order.pushButton_print.clicked.connect(lambda: self.action_to_change_window(Ui_MainWindow.order.pushButton_print))
# Ui_MainWindow.order.pushButton_order_close.clicked.connect(
# lambda: self.action_to_change_window(Ui_MainWindow.order.pushButton_order_close)
# )
# Ui_MainWindow.order.pushButton_new_client.clicked.connect(
# lambda: self.action_to_change_window(Ui_MainWindow.order.pushButton_new_client, 'новый клиент')
# )
# Ui_MainWindow.order.pushButton_new_car.clicked.connect(
# lambda: self.action_to_change_window(Ui_MainWindow.order.pushButton_new_car, 'новая машина')
# )
# Ui_MainWindow.client.pushButton_save_client.clicked.connect(
# lambda: self.action_to_change_window(Ui_MainWindow.client.pushButton_save_client)
# )
#
# Ui_MainWindow.car.pushButton_save_car.clicked.connect(
# lambda: self.action_to_change_window(Ui_MainWindow.car.pushButton_save_car)
# )
# Ui_MainWindow.car.pushButton_new_client.clicked.connect(
# lambda: self.action_to_change_window(Ui_MainWindow.car.pushButton_new_client)
# )
# Ui_MainWindow.mainframe.pushButton_update_client.clicked.connect(
# lambda: self.action_to_change_window(Ui_MainWindow.mainframe.pushButton_update_client)
# )
# Ui_MainWindow.mainframe.pushButton_update_order.clicked.connect(
# lambda: self.action_to_change_window(Ui_MainWindow.mainframe.pushButton_update_order)
# )
# Ui_MainWindow.mainframe.pushButton_update_car.clicked.connect(
# lambda: self.action_to_change_window(Ui_MainWindow.mainframe.pushButton_update_car)
# )
# self.pushButton_car_list
# self.pushButton_client_list
# self.pushButton_old_order_list
# self.pushButton_new_order.clicked.connect(lambda: self.action_to_change_window(self.pushButton_new_order))
self.pushButton_quit.clicked.connect(lambda: QApplication.quit())
# self.pushButton_help.clicked.connect(lambda: self.action_to_change_window(self.pushButton_help))
self.actionExit.triggered.connect(lambda: QApplication.quit())
# self.action_client.triggered.connect(lambda: self.action_to_change_window(self.action_client))
# self.action_car.triggered.connect(lambda: self.action_to_change_window(self.action_car))
# @classmethod
# def body_frame(cls, frame: QFrame = None, hide: bool = True):
# if hide:
# frame.hide()
# else:
# frame.show()
#
# def clicked(self, act):
# print(12123)
# print(act.text())
#
# def action_to_change_window(self, btn, text):
# self.body_frame(self.frame_order, hide=True)
# self.body_frame(self.frame_client, hide=True)
# self.body_frame(self.frame_car, hide=True)
# change_body_and_action_dict = {
# 'новый клиент': lambda: Ui_MainWindow.client.set_body(self.frame_client),
# '... client': lambda: Ui_MainWindow.client.set_body(self.frame_client),
# 'новый акт': lambda: Ui_MainWindow.order.set_body(self.frame_order),
# 'новая машина': lambda: Ui_MainWindow.car.set_body(self.frame_car),
# '... car': lambda: Ui_MainWindow.car.set_body(self.frame_car),
# 'save': print,
# 'справка': print,
# 'quit': QApplication.quit,
# 'закрыть акт': print
# }
# if btn.objectName().startswith('pushButton'):
# btn.setDown(True)
# change_body_and_action_dict[text.lower().strip()]()
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "BaseSTO"))
# self.label_logo.setText(_translate("MainWindow", "TextLabel"))
self.pushButton_quit.setText(_translate("MainWindow", "Quit"))
self.pushButton_old_order_list.setText(_translate("MainWindow", "Старые заказы"))
self.pushButton_client_list.setText(_translate("MainWindow", "Список клиентов"))
self.pushButton_car_list.setText(_translate("MainWindow", "Список машин"))
self.pushButton_new_order.setText(_translate("MainWindow", "Новый Акт"))
self.pushButton_help.setText(_translate("MainWindow", "Справка"))
self.menuFile.setTitle(_translate("MainWindow", "File"))
self.menuView.setTitle(_translate("MainWindow", "View"))
self.menuNewCar.setTitle(_translate("MainWindow", "New Car"))
self.menuSettings.setTitle(_translate("MainWindow", "Settings"))
self.menuAbaut.setTitle(_translate("MainWindow", "Help"))
self.actionFile.setText(_translate("MainWindow", "File"))
self.actionPrint.setText(_translate("MainWindow", "Print"))
self.actionSave.setText(_translate("MainWindow", "Save"))
self.actionExit.setText(_translate("MainWindow", "Exit"))
self.actionNew_order.setText(_translate("MainWindow", "New order"))
self.actionNew_client.setText(_translate("MainWindow", "New client"))
self.action_order.setText(_translate("MainWindow", "... order"))
self.action_client.setText(_translate("MainWindow", "... client"))
self.action_car.setText(_translate("MainWindow", "... car"))
self.actionSee_old_order.setText(_translate("MainWindow", "See old order"))
self.actionHelp.setText(_translate("MainWindow", "Help"))
self.actionAbaut_of_programm.setText(_translate("MainWindow", "Abaut of programm"))
def Main():
app = QApplication(sys.argv)
MainWindow = QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
if __name__ == "__main__":
Main()
| markizdesadist/BaseSTO | windowstomodel/BaseSTO.py | BaseSTO.py | py | 14,843 | python | en | code | 0 | github-code | 90 |
4342251549 | from django.db import models
from django.contrib.auth import get_user_model
from account.models import LevelAndSection, Level
from datetime import datetime
from django.utils.text import slugify
from account.models import FacultyProfile, StudentProfile
from django.db.models.signals import post_save
from django.dispatch import receiver
# Create your models here.
SY = []
for year in range(2010, datetime.now().year + 15):
SY.append((str(year) + "-" + str(year + 1), str(year) + "-" + str(year + 1)))
User = get_user_model()
class Subject(models.Model):
subject_name = models.CharField(max_length=50)
level_and_section = models.ForeignKey(
LevelAndSection, on_delete=models.SET_NULL, null=True, blank=True)
designated_instructor = models.ForeignKey(
FacultyProfile, related_name="assigned_subject", on_delete=models.SET_NULL, null=True, blank=True)
schedule = models.CharField(max_length=255, null=True, blank=True)
start_time = models.TimeField(null=True, blank=True)
end_time = models.TimeField(null=True, blank=True)
slug = models.SlugField(
max_length=250, null=True, blank=True, editable=False)
date_created = models.DateTimeField(auto_now=False, auto_now_add=True)
updated = models.DateTimeField(auto_now=True, auto_now_add=False)
class Meta:
ordering = ['-date_created', ]
unique_together = ('subject_name', 'level_and_section')
def __str__(self):
if self.level_and_section:
return "{}, {}-{}".format(
self.subject_name,
self.level_and_section.level,
self.level_and_section.section
)
return self.subject_name
class SubjectGrade(models.Model):
student = models.ForeignKey(
StudentProfile, on_delete=models.CASCADE, related_name='grade')
school_year = models.CharField(
max_length=25, choices=SY, default=str(datetime.now().year) + "-" + str(datetime.now().year + 1))
subject = models.ForeignKey(
Subject, on_delete=models.DO_NOTHING, null=True, blank=True, related_name='subject_grade')
instructor = models.ForeignKey(
FacultyProfile, null=True, blank=True, on_delete=models.SET_NULL, related_name='given_grade')
first_quarter = models.DecimalField(
max_digits=10, decimal_places=4, null=True, blank=True)
second_quarter = models.DecimalField(
max_digits=10, decimal_places=4, null=True, blank=True)
third_quarter = models.DecimalField(
max_digits=10, decimal_places=4, null=True, blank=True)
fourth_quarter = models.DecimalField(
max_digits=10, decimal_places=4, null=True, blank=True)
final_subject_grade = models.DecimalField(
max_digits=10, decimal_places=4, null=True, blank=True)
is_finalized = models.BooleanField(
verbose_name="Finalized?", default=False,
help_text="Once finalized, you can no longer make any more changes.")
date_created = models.DateTimeField(auto_now=False, auto_now_add=True)
updated = models.DateTimeField(auto_now=True, auto_now_add=False)
class Meta:
unique_together = ('student', 'subject')
def __str__(self):
return "Subject: {} Student: {}".format(
self.subject.subject_name,
self.student.user.get_full_name
)
def save(self, *args, **kwargs):
if self.first_quarter and self.second_quarter and self.third_quarter and self.fourth_quarter:
self.final_subject_grade = (
self.first_quarter + self.second_quarter +
self.third_quarter + self.fourth_quarter
) / 4
else:
self.final_subject_grade = 0
super(SubjectGrade, self).save(*args, **kwargs)
class FinalGrade(models.Model):
student = models.ForeignKey(
StudentProfile, on_delete=models.CASCADE, related_name='finalGrade')
level = models.ForeignKey(
Level, on_delete=models.SET_NULL, related_name='final', null=True, blank=True)
school_year = models.CharField(
max_length=25, choices=SY,
default=str(datetime.now().year) + "-" + str(datetime.now().year + 1))
grade = models.DecimalField(
max_digits=10, decimal_places=4, null=True, blank=True)
comment = models.TextField(null=True, blank=True)
is_finalized = models.BooleanField(
verbose_name="Finalized?", default=False,
help_text="Once finalized, you can no longer make any more changes."
)
date_created = models.DateTimeField(auto_now=False, auto_now_add=True)
updated = models.DateTimeField(auto_now=True, auto_now_add=False)
class Meta:
unique_together = ('student', 'level')
def __str__(self):
return "{} GWA:{} SY: {}".format(self.student.user.get_full_name, self.grade, self.school_year)
@receiver(post_save, sender=Subject)
def create_dynamic_subject_slug(sender, **kwargs):
if kwargs['created']:
kwargs['instance'].slug = slugify(
kwargs['instance'].subject_name + " " + str(kwargs['instance'].id))
kwargs['instance'].save()
@receiver(post_save, sender=FinalGrade)
def sync_subject_school_year(sender, **kwargs):
subjects = SubjectGrade.objects.filter(
subject__level_and_section__level=kwargs['instance'].level)
sample = subjects.first()
if sample.school_year != kwargs['instance'].school_year:
for subject in subjects:
subject.school_year = kwargs['instance'].school_year
subject.save()
| joshuariveramnltech/projectDMCA- | grading_system/models.py | models.py | py | 5,608 | python | en | code | 0 | github-code | 90 |
73411567977 | # Author: Zhang Huangbin <zhb _at_ iredmail.org>
#
# Purpose: Reject senders listed in per-user blacklists, bypass senders listed
# in per-user whitelists stored in Amavisd database (@lookup_sql_dsn).
#
# Note: Amavisd is configured to be an after-queue content filter in iRedMail.
# with '@lookup_sql_dsn' setting enabled in Amavisd config file, Amavisd
# will query per-recipient, per-domain and server-wide (a.k.a. catch-all)
# white/blacklists and policy rules (tables: `mailaddr`, `users`,
# `wblist`, `policy`) stored in Amavisd SQL database.
#
# if you don't enable this plugin, Amavisd will quarantine emails sent
# from blacklisted senders, and bypass spam scanning for emails sent from
# whitelisted senders (note: other checkings like banned filename, bad
# headers, virus are still checked - if you didn't disable them in
# `amavisd.policy`). With this plugin, we can tell Postfix to reject
# blacklisted sender BEFORE email enter mail queue, or bypass emails sent
# from whitelisted senders directly.
#
# How to use this plugin:
#
# *) Enable `@lookup_sql_dsn` with correct SQL account credential in Amavisd
# config file.
#
# *) Set Amavisd lookup SQL database related parameters (`amavisd_db_*`) in
# iRedAPD config file `/opt/iredapd/settings.py`.
#
# *) Enable this plugin in iRedAPD config file `/opt/iredapd/settings.py`,
# parameter `plugins =`.
#
# *) Restart iRedAPD service.
#
# Formats of valid white/blacklist senders:
#
# - user@domain.com: single sender email address
# - @domain.com: entire sender domain
# - @.domain.com: entire sender domain and all sub-domains
# - @.: all senders
# - 192.168.1.2: single sender ip address
# - 192.168.1.0/24: CIDR network.
# - 192.168.1.*, 192.168.*.2: wildcard sender ip addresses. [DEPRECATED]
# NOTE: if you want to use wildcard IP address like
# '192.*.1.2', '192.*.*.2', please set
# 'WBLIST_ENABLE_ALL_WILDCARD_IP = True' in
# /opt/iredapd/settings.py.
import ipaddress
from web import sqlquote
from libs.logger import logger
from libs import SMTP_ACTIONS, utils
import settings
SMTP_PROTOCOL_STATE = ["RCPT"]
REQUIRE_AMAVISD_DB = True
if settings.backend == "ldap":
from libs.ldaplib.conn_utils import is_local_domain, get_alias_target_domain
else:
from libs.sql import is_local_domain, get_alias_target_domain
if settings.WBLIST_DISCARD_INSTEAD_OF_REJECT:
reject_action = SMTP_ACTIONS["discard"]
else:
reject_action = SMTP_ACTIONS["reject_blacklisted"]
def get_id_of_possible_cidr_network(conn, client_address):
"""Return list of `mailaddr.id` which are CIDR network addresses."""
ids = []
if not client_address:
logger.debug("No client address.")
return ids
try:
_ip = ipaddress.ip_address(client_address)
if _ip.version == 4:
first_field = client_address.split(".")[0]
sql_cidr = first_field + r".%%"
else:
return ids
except:
return ids
sql = """SELECT id, email
FROM mailaddr
WHERE email LIKE %s
ORDER BY priority DESC""" % sqlquote(sql_cidr)
logger.debug("[SQL] Query CIDR network: \n{}".format(sql))
try:
qr = conn.execute(sql)
qr_cidr = qr.fetchall()
except Exception as e:
logger.error("Error while querying CIDR network: {}, SQL: \n{}".format(repr(e), sql))
return ids
if qr_cidr:
_cidrs = [(int(r.id), utils.bytes2str(r.email)) for r in qr_cidr]
# Get valid CIDR.
_ip_networks = set()
for (_id, _cidr) in _cidrs:
# Verify whether client_address is in CIDR network
try:
_net = ipaddress.ip_network(_cidr)
_ip_networks.add((_id, _net))
except:
pass
if _ip_networks:
_ip = ipaddress.ip_address(client_address)
for (_id, _net) in _ip_networks:
if _ip in _net:
ids.append(_id)
logger.debug("IDs of CIDR network(s): {}".format(ids))
return ids
def get_id_of_external_addresses(conn, addresses):
"""Return list of `mailaddr.id` of external addresses."""
ids = []
if not addresses:
logger.debug("No addresses, return empty list of ids.")
return ids
# Get `mailaddr.id` of external addresses, ordered by priority
sql = """SELECT id, email
FROM mailaddr
WHERE email IN %s
ORDER BY priority DESC""" % sqlquote(addresses)
logger.debug("[SQL] Query external addresses: \n{}".format(sql))
try:
qr = conn.execute(sql)
qr_addresses = qr.fetchall()
except Exception as e:
logger.error("Error while getting list of id of external addresses: {}, SQL: {}".format(repr(e), sql))
return ids
if qr_addresses:
ids = [int(r.id) for r in qr_addresses]
if not ids:
# don't waste time if we don't even have senders stored in sql db.
logger.debug("No record found in SQL database.")
return []
else:
logger.debug("Addresses (in `mailaddr`): {}".format(qr_addresses))
return ids
def get_id_of_local_addresses(conn, addresses):
"""Return list of `users.id` of local addresses."""
# Get `users.id` of local addresses
sql = """SELECT id, email
FROM users
WHERE email IN %s
ORDER BY priority DESC""" % sqlquote(addresses)
logger.debug("[SQL] Query local addresses: \n{}".format(sql))
ids = []
try:
qr = conn.execute(sql)
qr_addresses = qr.fetchall()
if qr_addresses:
ids = [int(r.id) for r in qr_addresses]
logger.debug("Local addresses (in `amavisd.users`): {}".format(qr_addresses))
except Exception as e:
logger.error("Error while executing SQL command: {}".format(repr(e)))
if not ids:
# don't waste time if we don't have any per-recipient wblist.
logger.debug("No record found in SQL database.")
return []
else:
return ids
def apply_inbound_wblist(conn, sender_ids, recipient_ids):
# Return if no valid sender or recipient id.
if not (sender_ids and recipient_ids):
logger.debug("No valid sender id or recipient id.")
return SMTP_ACTIONS["default"]
# Get wblist
sql = """SELECT rid, sid, wb
FROM wblist
WHERE sid IN %s
AND rid IN %s""" % (sqlquote(sender_ids), sqlquote(recipient_ids))
logger.debug("[SQL] Query inbound wblist (in `wblist`): \n{}".format(sql))
qr = conn.execute(sql)
wblists = qr.fetchall()
if not wblists:
# no wblist
logger.debug("No wblist found.")
return SMTP_ACTIONS["default"]
logger.debug("Found inbound wblist: {}".format(wblists))
# Check sender addresses
# rids/recipients are orded by priority
for rid in recipient_ids:
# sids/senders are sorted by priority
for sid in sender_ids:
if (rid, sid, "W") in wblists:
logger.info("Whitelisted: wblist=({}, {}, 'W')".format(rid, sid))
return SMTP_ACTIONS["whitelist"]
if (rid, sid, "B") in wblists:
logger.info("Blacklisted: wblist=({}, {}, 'B')".format(rid, sid))
return reject_action
return SMTP_ACTIONS["default"]
def apply_outbound_wblist(conn, sender_ids, recipient_ids):
# Return if no valid sender or recipient id.
if not (sender_ids and recipient_ids):
logger.debug("No valid sender id or recipient id.")
return SMTP_ACTIONS["default"]
# Bypass outgoing emails.
if settings.WBLIST_BYPASS_OUTGOING_EMAIL:
logger.debug("Bypass outgoing email as defined in WBLIST_BYPASS_OUTGOING_EMAIL.")
return SMTP_ACTIONS["default"]
# Get wblist
sql = """SELECT rid, sid, wb
FROM outbound_wblist
WHERE sid IN %s
AND rid IN %s""" % (sqlquote(sender_ids), sqlquote(recipient_ids))
logger.debug("[SQL] Query outbound wblist: \n{}".format(sql))
qr = conn.execute(sql)
wblists = qr.fetchall()
if not wblists:
# no wblist
logger.debug("No wblist found.")
return SMTP_ACTIONS["default"]
logger.debug("Found outbound wblist: {}".format(wblists))
# Check sender addresses
# rids/recipients are orded by priority
for sid in sender_ids:
for rid in recipient_ids:
if (rid, sid, "W") in wblists:
logger.info("Whitelisted: outbound_wblist=({}, {}, 'W')".format(rid, sid))
return SMTP_ACTIONS["default"] + " outbound_wblist=({}, {}, 'W')".format(rid, sid)
if (rid, sid, "B") in wblists:
logger.info("Blacklisted: outbound_wblist=({}, {}, 'B')".format(rid, sid))
return reject_action
return SMTP_ACTIONS["default"]
def restriction(**kwargs):
conn = kwargs["conn_amavisd"]
conn_vmail = kwargs["conn_vmail"]
if not conn:
logger.error("Error, no valid Amavisd database connection.")
return SMTP_ACTIONS["default"]
# Get sender and recipient
sender = kwargs["sender_without_ext"]
sender_domain = kwargs["sender_domain"]
recipient = kwargs["recipient_without_ext"]
recipient_domain = kwargs["recipient_domain"]
if kwargs["sasl_username"]:
# Use sasl_username as sender for outgoing email
sender = kwargs["sasl_username"]
sender_domain = kwargs["sasl_username_domain"]
if not sender:
logger.debug("SKIP: no sender address.")
return SMTP_ACTIONS["default"]
if sender == recipient:
logger.debug("SKIP: Sender is same as recipient.")
return SMTP_ACTIONS["default"]
valid_senders = utils.get_policy_addresses_from_email(mail=sender)
valid_recipients = utils.get_policy_addresses_from_email(mail=recipient)
if not kwargs["sasl_username"]:
# Sender `username@*`
sender_username = sender.split("@", 1)[0]
if "+" in sender_username:
valid_senders.append(sender_username.split("+", 1)[0] + "@*")
else:
valid_senders.append(sender_username + "@*")
# Append original IP address
client_address = kwargs["client_address"]
valid_senders.append(client_address)
# Append all possible wildcast IP addresses
if utils.is_ipv4(client_address):
valid_senders += utils.wildcard_ipv4(client_address)
alias_target_sender_domain = get_alias_target_domain(alias_domain=sender_domain, conn=conn_vmail)
if alias_target_sender_domain:
_mail = sender.split("@", 1)[0] + "@" + alias_target_sender_domain
valid_senders += utils.get_policy_addresses_from_email(mail=_mail)
alias_target_rcpt_domain = get_alias_target_domain(alias_domain=recipient_domain, conn=conn_vmail)
if alias_target_rcpt_domain:
_mail = recipient.split("@", 1)[0] + "@" + alias_target_rcpt_domain
valid_recipients += utils.get_policy_addresses_from_email(mail=_mail)
logger.debug("Possible policy senders: {}".format(valid_senders))
logger.debug("Possible policy recipients: {}".format(valid_recipients))
id_of_client_cidr_networks = []
client_cidr_network_checked = False
# Outbound
if kwargs["sasl_username"]:
logger.debug("Apply wblist for outbound message.")
id_of_local_addresses = get_id_of_local_addresses(conn, valid_senders)
id_of_ext_addresses = []
if id_of_local_addresses:
id_of_ext_addresses = get_id_of_external_addresses(conn, valid_recipients)
id_of_client_cidr_networks = get_id_of_possible_cidr_network(conn, client_address)
client_cidr_network_checked = True
action = apply_outbound_wblist(conn,
sender_ids=id_of_local_addresses + id_of_client_cidr_networks,
recipient_ids=id_of_ext_addresses)
if not action.startswith("DUNNO"):
return action
check_inbound = False
if not kwargs["sasl_username"]:
check_inbound = True
if (not check_inbound) and kwargs["sasl_username"] and (sender_domain == recipient_domain):
# Local user sends to another user in same domain
check_inbound = True
if not check_inbound:
rcpt_domain_is_local = is_local_domain(conn=conn_vmail, domain=recipient_domain, include_alias_domain=False)
if alias_target_rcpt_domain or rcpt_domain_is_local:
# Local user sends to another local user in different domain
check_inbound = True
if check_inbound:
logger.debug("Apply wblist for inbound message.")
id_of_ext_addresses = []
id_of_local_addresses = get_id_of_local_addresses(conn, valid_recipients)
if id_of_local_addresses:
id_of_ext_addresses = get_id_of_external_addresses(conn, valid_senders)
if not client_cidr_network_checked:
id_of_client_cidr_networks = get_id_of_possible_cidr_network(conn, client_address)
action = apply_inbound_wblist(conn,
sender_ids=id_of_ext_addresses + id_of_client_cidr_networks,
recipient_ids=id_of_local_addresses)
if not action.startswith("DUNNO"):
return action
return SMTP_ACTIONS["default"]
| iredmail/iRedAPD | plugins/amavisd_wblist.py | amavisd_wblist.py | py | 13,634 | python | en | code | 42 | github-code | 90 |
70747815336 | #this is the config_load.py in Config_files directory to loading all json config
import json
import os
def load_json_config(file_path):
with open(file_path, 'r') as file:
return json.load(file)
def load_all_configs(base_dir):
config_dir = os.path.join(base_dir, 'Config_files')
gNodeB_json_path = os.path.join(config_dir, 'gNodeB_config.json')
cell_json_path = os.path.join(config_dir, 'cell_config.json')
ue_json_path = os.path.join(config_dir, 'ue_config.json')
gNodeBs_config = load_json_config(gNodeB_json_path)
cells_config = load_json_config(cell_json_path)
ue_config = load_json_config(ue_json_path)
return gNodeBs_config, cells_config, ue_config
| natanzi/RAN-Fusion | Config_files/config_load.py | config_load.py | py | 703 | python | en | code | 0 | github-code | 90 |
18285568039 | N=int(input())
xl=[]
for _ in range(N):
x,l=map(int,input().split())
xl.append([x+l,2*l])
xl.sort()
r=-10**9
ans=0
for i in range(N):
if xl[i][0]-xl[i][1]>=r:
ans+=1
r=xl[i][0]
print(ans) | Aasthaengg/IBMdataset | Python_codes/p02796/s298618554.py | s298618554.py | py | 217 | python | en | code | 0 | github-code | 90 |
32087559760 | import sys
n = int(input())
l = []
for i in range(n):
temp = sys.stdin.readline().strip()
l.append(temp)
l = list(set(l))
l.sort()
l.sort(key = len)
for i in l:
print(i)
| denmark-dangnagui/baekjoon | 1181.py | 1181.py | py | 182 | python | en | code | 0 | github-code | 90 |
41566380084 | from dash import Dash, dcc, html
import plotly.graph_objs as go
# Daten für den Graphen
x_data = [1, 2, 3, 4, 5]
y_data = [2, 4, 6, 8, 10]
# Plotly Trace erstellen
trace = go.Scatter(
x=x_data,
y=y_data,
mode='lines',
name='Linien'
)
# Layout für das Dash-Dashboard
layout = go.Layout(
title='Test',
xaxis=dict(title='Jahr'),
yaxis=dict(title='Anzahl Gesamtkrebsfälle')
)
# Dash-App erstellen
app = Dash(__name__)
# Layout der Dash-App definieren
app.layout = html.Div(children=[
html.H1(children='Test'),
dcc.Graph(
id='graph',
figure={
'data': [trace],
'layout': layout
}
)
])
# Dash-App starten
if __name__ == '__main__':
app.run_server(debug=True, port=8054)
| malikljajic/DSE_Projekt_SoSe2023 | Graphtest.py | Graphtest.py | py | 765 | python | en | code | 0 | github-code | 90 |
33661669907 | # 20191117
"""
DP:
1. 分治
2. 状态定义
3. DP方程
"""
class Solution:
def rob(self, nums: List[int]) -> int:
if not nums or len(nums) == 0:
return 0
n = len(nums)
a = [[0] * 2 for _ in range(n)]
a[0][0] = 0
a[0][1] = nums[0]
for i in range(1, n):
a[i][0] = max(a[i - 1][0], a[i - 1][1])
a[i][1] = a[i - 1][0] + nums[i]
return max(a[n - 1][0], a[n - 1][1])
class Solution:
def rob(self, nums: List[int]) -> int:
if not nums or len(nums) == 0:
return 0
if len(nums) == 1:
return nums[0]
n = len(nums)
a = [0] * n
a[0] = nums[0]
a[1] = max(nums[0], nums[1])
res = max(a[0], a[1])
for i in range(2, n):
a[i] = max(a[i-1], a[i-2] + nums[i])
res = max(res, a[i])
return res
| algorithm004-04/algorithm004-04 | Week 05/id_069/LeetCode-198-069.py | LeetCode-198-069.py | py | 913 | python | en | code | 66 | github-code | 90 |
11673634037 | import torchvision.datasets.video_utils
from torchvision.datasets.video_utils import VideoClips
from torchvision.datasets.utils import list_dir
from torchvision.datasets.folder import make_dataset
from torchvision.datasets.vision import VisionDataset
import numpy as np
import ffmpeg
import random
def get_video_size(filename):
probe = ffmpeg.probe(filename)
video_info = next(s for s in probe['streams'] if s['codec_type'] == 'video')
try:
n_frames = int(video_info['nb_frames'])
except KeyError:
n_frames = float(video_info['duration']) * eval(video_info['r_frame_rate'])
frame_rate = eval(video_info['r_frame_rate'])
width = int(video_info['width'])
height = int(video_info['height'])
return width, height, int(n_frames), frame_rate, float(video_info['duration'])
class VideoDataset(VisionDataset):
def __init__(
self,
root,
extensions=('mp4', 'avi'),
transform=None,
):
super(VideoDataset, self).__init__(root)
extensions = extensions
classes = list(sorted(list_dir(root)))
class_to_idx = {classes[i]: i for i in range(len(classes))}
self.samples = make_dataset(self.root, class_to_idx, extensions, is_valid_file=None)
self.classes = classes
self.transform = transform
def __len__(self):
return len(self.samples)
def __getitem__(self, idx):
# Get random sample
success = False
while not success:
try:
path, target = self.samples[idx]
_, _, _, _, duration = get_video_size(path)
start = random.uniform(0., duration)
frame, _, _ = torchvision.io.read_video(
path,
start_pts=start,
end_pts=start,
pts_unit='sec',
output_format="TCHW"
)
success = True
except Exception as e:
print(e)
print('skipped idx', idx)
idx = np.random.randint(self.__len__())
# Seek and return frames
frame = self.transform(frame[0])
return frame, target
# class VideoDataset(VisionDataset):
# def __init__(
# self,
# root,
# frames_per_clip,
# step_between_clips=1,
# frame_rate=None,
# extensions=('mp4',),
# transform=None,
# _precomputed_metadata=None
# ):
# super(VideoDataset, self).__init__(root)
# extensions = extensions
# classes = list(sorted(list_dir(root)))
# class_to_idx = {classes[i]: i for i in range(len(classes))}
# self.samples = make_dataset(self.root, class_to_idx, extensions, is_valid_file=None)
# self.classes = classes
# video_list = [x[0] for x in self.samples]
# self.video_clips = VideoClips(
# video_list,
# frames_per_clip,
# step_between_clips,
# frame_rate,
# _precomputed_metadata,
# )
# self.transform = transform
# def __len__(self):
# return self.video_clips.num_clips()
# def __getitem__(self, idx):
# success = False
# while not success:
# try:
# video, _, info, video_idx = self.video_clips.get_clip(idx)
# success = True
# except:
# print('skipped idx', idx)
# idx = np.random.randint(self.__len__())
# label = self.samples[video_idx][1]
# if self.transform is not None:
# video = self.transform(video)
# return video, label
| jeffhernandez1995/VideoVIT | old/default_torch_videodataset.py | default_torch_videodataset.py | py | 3,821 | python | en | code | 0 | github-code | 90 |
28890128928 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 2 11:49:50 2018
@author: eo
"""
# ---------------------------------------------------------------------------------------------------------------------
#%% Imports
import cv2
import numpy as np
from collections import deque
# ---------------------------------------------------------------------------------------------------------------------
#%% Callback classes
class Mouse_Follower:
def __init__(self,
state_list,
borderWH = (0, 0),
initial_state = None,
follow_states = None,
no_follow_states = None):
# Store mouse co-ords and any frame border offsets
self.mouse_xy = np.array((0, 0), dtype=np.int32)
self.borderWH = np.array(borderWH, dtype=np.int32)
# Store state list and count, for indexing
self.state_list = state_list
self._state_count = len(state_list)
# Set the initial state index
self._state_idx = 0
if initial_state is not None:
if initial_state in state_list:
self._state_idx = state_list.index(initial_state)
else:
print("")
print("Initial state '{}' is not in state list!".format(initial_state))
print("Defaulting to {}".format(state_list[0]))
# Check that all follow states are valid (remove non-valids)
follow_check = state_list if follow_states is None else follow_states
follow_check = follow_check if type(follow_check) in {list, tuple} else [follow_check]
for idx, each_state in enumerate(reversed(follow_check)):
if each_state not in state_list:
print("")
print("Follow state: '{}' is not in state list!".format(each_state))
del follow_check[idx]
# Check that all non-follow states are valid (remove non-valids)
no_follow_check = [] if no_follow_states is None else no_follow_states
no_follow_check = no_follow_check if type(no_follow_check) in {list, tuple} else [no_follow_check]
for idx, each_state in enumerate(reversed(no_follow_check)):
if each_state not in state_list:
print("")
print("No follow state: '{}' is not in state list!".format(each_state))
del no_follow_check[idx]
# Remove no-follows from the follow list
follow_set = set(follow_check)
no_follow_set = set(no_follow_check)
self.follow_states = list(follow_set.difference(no_follow_set))
# .................................................................................................................
def callback(self, event, mx, my, flags, param):
# Record mouse xy position
if self._enable_following():
self.mouse_xy = np.array((mx, my)) - self.borderWH
# Increment state on left click
if event == cv2.EVENT_LBUTTONDOWN:
self._state_idx = (1 + self._state_idx) % self._state_count
# Reset state on double right click
if event == cv2.EVENT_RBUTTONDBLCLK:
self._state_idx = 0
# .................................................................................................................
def in_state(self, *states_to_check):
current_state = self.state_list[self._state_idx]
for each_state in states_to_check:
# Sanity check. Make sure state to check is actually in the state list
if each_state not in self.state_list:
print("State: '{}' is not in state list!".format(each_state))
return False
# Check if we're currently in the target state
if each_state == current_state:
return True
return False
# .................................................................................................................
def xy(self):
return self.mouse_xy
# .................................................................................................................
def draw_mouse_xy(self, display_frame,
font = cv2.FONT_HERSHEY_SIMPLEX,
scale = 0.5,
color = (255, 255, 255),
thickness = 1,
line_type = cv2.LINE_AA,
with_background = True):
xy = tuple(self.mouse_xy)
text_string = "({}, {})".format(*xy)
if with_background:
bg_color = [255 - each_col for each_col in color]
bg_thickness = 2*thickness
cv2.putText(display_frame, text_string, xy, font, scale, bg_color, bg_thickness, line_type)
cv2.putText(display_frame, text_string, xy, font, scale, color, thickness, line_type)
# .................................................................................................................
def _enable_following(self):
current_state = self.state_list[self._state_idx]
return (current_state in self.follow_states)
# .................................................................................................................
# =====================================================================================================================
# =====================================================================================================================
# =====================================================================================================================
class Polygon_Drawer:
# .................................................................................................................
def __init__(self,
frameWH,
borderWH = (0, 0),
hover_distance_threshold = 50,
max_zones = 20,
min_points_per_poly = 3,
max_points_per_poly = 1000):
# Get proper frame size and scaling
self.frameWH = np.array(frameWH)
self._frame_scaling = self.frameWH - np.array((1,1))
self._changed = True
self.mouse_xy = np.array((0, 0), dtype=np.int32)
self.borderWH = np.array(borderWH, dtype=np.int32)
self._max_zones = max_zones
self._min_poly_pts = min_points_per_poly
self._max_poly_pts = max_points_per_poly
self.zone_hover = None
self.point_hover = None
self.zone_select = None
self.point_select = None
self.zone_list = None
self.new_points = None
# Initialize zones and points
self._new_zone_list()
self._clear_points_in_progress()
self.hover_distance_sq = hover_distance_threshold**2
self.points_in_progress = False
# .................................................................................................................
def mouse_hover(self):
# Check for the closest point
minSqDist = 1E9
# Loop through each zone, then loop through every point in each zone
# Find which point is closest to the mouse
for zoneIdx, eachZone in enumerate(self.zone_list):
for pointIdx, eachPoint in enumerate(eachZone):
# Calculate the distance between mouse and point
distSq = np.sum(np.square(self.mouse_xy - eachPoint))
# Record the closest point
if distSq < minSqDist:
minSqDist = distSq
best_match_zone = zoneIdx
best_match_point = pointIdx
# Record the closest zone and point if it's close enough to the mouse
if minSqDist < self.hover_distance_sq:
self.zone_hover = best_match_zone
self.point_hover = best_match_point
else:
self.zone_hover = None
self.point_hover = None
# .................................................................................................................
def left_click(self):
if not self.points_in_progress:
# Select the point nearest to the mouse when left clicking (and not in the middle of drawing new points)
self.zone_select = self.zone_hover
self.point_select = self.point_hover
# .................................................................................................................
def left_double_click(self):
if self.points_in_progress:
if len(self.new_points) >= self._min_poly_pts:
self._create_zone_from_points()
self._flag_change()
# Request a return from main callback
return True
# .................................................................................................................
def shift_left_click(self):
# Add new point to list
self.new_points.append(self.mouse_xy)
# If we pass the polygon point limit, create a new zone
if len(self.new_points) >= self._max_poly_pts:
self._create_zone_from_points()
self._flag_change()
# .................................................................................................................
def ctrl_left_click(self):
if not self.points_in_progress:
# Insert points into existing zones
pass
# .................................................................................................................
def left_drag(self):
# Update the dragged points based on the mouse position
if None not in {self.zone_select, self.point_select}:
self.zone_list[self.zone_select][self.point_select] = self.mouse_xy
self._flag_change()
# .................................................................................................................
def left_release(self):
# Unselect points when releasing left click
self.zone_select = None
self.point_select = None
# Request a return from main callback
return True
# .................................................................................................................
def middle_click(self):
pass
# .................................................................................................................
def middle_double_click(self):
pass
# .................................................................................................................
def shift_middle_click(self):
pass
# .................................................................................................................
def ctrl_middle_click(self):
pass
# .................................................................................................................
def middle_drag(self):
pass
# .................................................................................................................
def middle_release(self):
# Request a return from main callback
return True
# .................................................................................................................
def right_click(self):
if self.points_in_progress:
self._clear_points_in_progress()
else:
# Clear zone that are moused over, but only if we aren't currently drawing a new region
new_zone_list = [eachZone for eachZone in self.zone_list
if (cv2.pointPolygonTest(eachZone, tuple(self.mouse_xy), measureDist=False) < 0)]
self._new_zone_list(new_zone_list)
self._flag_change()
# Clear selections, since the zone indexing could be off
self.zone_select = None
self.point_select = None
self.zone_hover = None
self.point_hover = None
# .................................................................................................................
def right_double_click(self):
# Clear all zones on double right click
if not self.points_in_progress:
self._new_zone_list([])
self._flag_change()
return True
# .................................................................................................................
def shift_right_click(self):
pass
# .................................................................................................................
def ctrl_right_click(self):
if not self.points_in_progress:
# Delete nearest point from zone, assuming it doesn't shrink below minimum point count
self._flag_change()
pass
pass
# .................................................................................................................
def right_drag(self):
pass
# .................................................................................................................
def right_release(self):
# Request a return from main callback
return True
# .................................................................................................................
def mouse_move(self):
#print("XY: {}\nEVENT: {}\nFLAGS: {}\n".format(self.mouse_xy, self.event, self.flags))
pass
# .................................................................................................................
def mouse_check(self, event, mx, my, flags):
# Record events & corrected flag
self.points_in_progress = len(self.new_points) > 0
self.flags = flags & 0x1F # Mask numbers >= 32 (alt and numlock both report 32)
self.event = event
# Record mouse x and y positions at all times for hovering
self.mouse_xy = np.array((mx, my)) - self.borderWH
# Left mouse events
self.is_left_click = (self.event == cv2.EVENT_LBUTTONDOWN)
self.is_left_double_click = (self.event == cv2.EVENT_LBUTTONDBLCLK)
self.is_left_drag = ((self.flags & cv2.EVENT_FLAG_LBUTTON) > 0)
self.is_left_release = (self.event == cv2.EVENT_LBUTTONUP)
# Right mouse events
self.is_right_click = (self.event == cv2.EVENT_RBUTTONDOWN)
self.is_right_double_click = (self.event == cv2.EVENT_RBUTTONDBLCLK)
self.is_right_drag = ((self.flags & cv2.EVENT_FLAG_RBUTTON) > 0)
self.is_right_release = (self.event == cv2.EVENT_RBUTTONUP)
# Middle mouse events
self.is_middle_click = (self.event == cv2.EVENT_MBUTTONDOWN)
self.is_middle_double_click = (self.event == cv2.EVENT_MBUTTONDBLCLK)
self.is_middle_drag = ((self.flags & cv2.EVENT_FLAG_MBUTTON) > 0)
self.is_middle_release = (self.event == cv2.EVENT_MBUTTONUP)
# Modifiers
self.is_shift = ((self.flags & cv2.EVENT_FLAG_SHIFTKEY) > 0)
self.is_ctrl = ((self.flags & cv2.EVENT_FLAG_CTRLKEY) > 0)
# Alt key is not properly recognized! Gets confused with numlock
# Passive events
self.is_mouse_move = (self.event == cv2.EVENT_MOUSEMOVE)
self.is_hovering = (self.is_mouse_move
and not self.is_left_click
and not self.is_middle_click
and not self.is_right_click
and not self.points_in_progress)
# .................................................................................................................
def callback(self, event, mx, my, flags, param):
# Figure out what events are occurring
self.mouse_check(event, mx, my, flags)
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# Check for passive events
if self.is_mouse_move:
self.mouse_move()
if self.is_hovering:
self.mouse_hover()
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# Check for left click events
if self.is_left_click:
if self.is_shift:
req_return = self.shift_left_click()
elif self.is_ctrl:
req_return = self.ctrl_left_click()
else:
req_return = self.left_click()
# Stop if needed
if req_return: return
if self.is_left_double_click:
self.left_double_click()
if self.is_left_drag:
self.left_drag()
if self.is_left_release:
req_return = self.left_release()
if req_return: return
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# Check for right click events
if self.is_middle_click:
if self.is_shift:
req_return = self.shift_middle_click()
elif self.is_ctrl:
req_return = self.ctrl_middle_click()
else:
req_return = self.middle_click()
# Stop if needed
if req_return: return
if self.is_middle_double_click:
self.middle_double_click()
if self.is_middle_drag:
self.middle_drag()
if self.is_middle_release:
req_return = self.middle_release()
if req_return: return
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# Check for right click events
if self.is_right_click:
if self.is_shift:
req_return = self.shift_right_click()
elif self.is_ctrl:
req_return = self.ctrl_right_click()
else:
req_return = self.right_click()
# Stop if needed
if req_return: return
if self.is_right_double_click:
self.right_double_click()
if self.is_right_drag:
self.right_drag()
if self.is_right_release:
req_return = self.right_release()
if req_return: return
# .................................................................................................................
def arrow_keys(self, key_press):
# Check for arrow key presses (left: 81, up: 82, right: 83, down: 84)
arrow_pressed = (80 < key_press < 85)
if arrow_pressed:
if None not in (self.zone_hover, self.point_hover):
multiplier = 1 + 19*int(self.is_shift)
arrowX = (int(key_press == 83) - int(key_press == 81))*multiplier
arrowY = (int(key_press == 84) - int(key_press == 82))*multiplier
self.zone_list[self.zone_hover][self.point_hover] += (arrowX, arrowY)
self._flag_change()
# .................................................................................................................
def snap_to_border(self, key_press, snap_key = 'b', snap_threshold_distance = 0.05):
# Only snap if the snap key is pressed
snap_key_pressed = (key_press == ord(snap_key))
if snap_key_pressed:
# Make sure a point is being hovered (the point that will be snapped)
if None not in (self.zone_hover, self.point_hover):
# Get hovered point position and frame bounaries for convenience
point_x, point_y = self.zone_list[self.zone_hover][self.point_hover]
frameW, frameH = self._frame_scaling
# Set the threshold for how close a point can be 'inside' the frame and still be snapped
distance_threshold = 0.05*min(frameW, frameH)
# Check if point is snappable in x
snapXleft = (point_x < distance_threshold)
snapXright = (point_x > frameW - distance_threshold)
if snapXleft: point_x = 0
if snapXright: point_x = frameW
# Check if point is snappable in y
snapYleft = (point_y < distance_threshold)
snapYright = (point_y > frameH - distance_threshold)
if snapYleft: point_y = 0
if snapYright: point_y = frameH
# Update the quad points if any of the co-ords were snapped
if any([snapXleft, snapXright, snapYleft, snapYright]):
self.zone_list[self.zone_hover][self.point_hover] = np.array((point_x, point_y))
self._flag_change()
# .................................................................................................................
def draw_poly_in_progress(self, display_frame,
line_color = (255, 255, 0),
line_thickness = 1,
line_type = cv2.LINE_AA,
circle_radius = 5,
show_circles = True):
# Don't draw anything if there aren't any new points!
if len(self.new_points) < 1: return
# Set up points for drawing
points_and_mouse_list = self.new_points.copy()
points_and_mouse_list.append(self.mouse_xy)
draw_points = np.array(points_and_mouse_list, dtype=np.int32) + self.borderWH
# Draw region in-progress
cv2.polylines(display_frame, [draw_points], True,
line_color,
line_thickness,
line_type)
# Draw circles at all new points (not counting mouse)
if show_circles:
for each_point in draw_points[:-1]:
cv2.circle(display_frame, tuple(each_point), circle_radius, line_color, -1, line_type)
# .................................................................................................................
def draw_zones(self, display_frame,
line_color = (0, 255, 255),
line_thickness = 1,
line_type = cv2.LINE_AA,
circle_radius = 5,
show_circles = True):
# Don't draw anything if there aren't any zones!
if len(self.zone_list) < 1: return
# Draw zones
for each_zone in self.zone_list:
# Add border offsets before drawing
draw_points = np.int32(each_zone) + self.borderWH
# Draw each zone as a closed polygon
cv2.polylines(display_frame,
[draw_points],
True,
line_color,
line_thickness,
line_type)
# Draw circles at all points
if show_circles:
for each_point in draw_points:
cv2.circle(display_frame, tuple(each_point), circle_radius, line_color, -1, line_type)
# .................................................................................................................
def add_zone(self, point_list, normalized_input = True):
if point_list is not None:
if normalized_input:
self.zone_list.append(np.int32(np.round(np.array(point_list) * self._frame_scaling)))
else:
self.zone_list.append(np.int32(point_list))
# Trigger change flag
self._flag_change()
# .................................................................................................................
def add_zone_list(self, zone_list, normalized_input = True):
if zone_list is not None:
for each_point_list in zone_list:
self.add_zone(each_point_list, normalized_input)
# .................................................................................................................
def add_frame_borders(self, display_frame, border_color = (20,20,20), border_type = cv2.BORDER_CONSTANT):
# Add borders to the frame for drawing 'out-of-bounds'
return cv2.copyMakeBorder(display_frame,
top=self.borderWH[1],
bottom=self.borderWH[1],
left=self.borderWH[0],
right=self.borderWH[0],
borderType=border_type,
value=border_color)
# .................................................................................................................
def fetch_zone_list(self, normalize = False, force_frame_boundaries = True):
# Create copy for output (don't want to modify internal zone list)
output_zone_list = self.zone_list.copy()
# Force values to fit properly in frame, if needed
if force_frame_boundaries:
min_x, min_y = 0, 0
max_x, max_y= self._frame_scaling
for zone_idx, each_zone in enumerate(self.zone_list):
for point_idx, each_point in enumerate(each_zone):
# Force each point to be within the frame
x_pt = self.zone_list[zone_idx][point_idx][0]
y_pt = self.zone_list[zone_idx][point_idx][1]
output_zone_list[zone_idx][point_idx][0] = min(max_x, max(min_x, x_pt))
output_zone_list[zone_idx][point_idx][1] = min(max_y, max(min_y, y_pt))
# Normalize if needed
if normalize:
for zone_idx, each_zone in enumerate(self.zone_list):
output_zone_list[zone_idx] = each_zone / self._frame_scaling
return list(output_zone_list)
# .................................................................................................................
def changed(self):
changed_state = self._changed
self._clear_change_flag()
return changed_state
# .................................................................................................................
def _flag_change(self, changed = True):
self._changed = True
# .................................................................................................................
def _clear_change_flag(self):
self._changed = False
# .................................................................................................................
def _create_zone_from_points(self):
self.zone_list.append(np.array(self.new_points))
self._clear_points_in_progress()
# .................................................................................................................
def _new_zone_list(self, new_list = []):
self.zone_list = deque(new_list, maxlen = self._max_zones)
# .................................................................................................................
def _clear_points_in_progress(self):
self.new_points = deque([], maxlen = self._max_poly_pts)
# .................................................................................................................
# =====================================================================================================================
# =====================================================================================================================
# =====================================================================================================================
class Interactive_Quadrilateral(Polygon_Drawer):
def __init__(self,
frameWH,
borderWH = (0, 0),
hover_distance_threshold = 50,
max_zones = 1,
min_points_per_poly = 4,
max_points_per_poly = 4,
initial_quad = [(0.0, 0.0), (1.0, 0.0), (1.0, 1.0), (0.0, 1.0)]):
# Fill out base polygon drawing object
super().__init__(frameWH,
borderWH,
hover_distance_threshold,
max_zones,
min_points_per_poly,
max_points_per_poly)
# Add initial quadrilateral zone
self.add_zone(initial_quad, normalized_input = True)
# .................................................................................................................
# Remove undesirable left click functions
def left_double_click(self): pass
def shift_left_click(self): pass
def ctrl_left_click(self): pass
# .................................................................................................................
# Remove undesirable middle click functions
def middle_click(self): pass
def middle_double_click(self): pass
def shift_middle_click(self): pass
def ctrl_middle_click(self): pass
# .................................................................................................................
# Remove undesirable right click functions
def right_click(self): pass
def shift_right_click(self): pass
def ctrl_right_click(self): pass
# .................................................................................................................
# Reset quad with double right click
def right_double_click(self):
reset_zone = [(0.0, 0.0), (1.0, 0.0), (1.0, 1.0), (0.0, 1.0)]
self.zone_list[0] = np.int32(np.round(np.float32(reset_zone) * self._frame_scaling))
self._flag_change()
# .................................................................................................................
# =====================================================================================================================
# =====================================================================================================================
# =====================================================================================================================
class Interactive_Rectangle(Interactive_Quadrilateral):
def __init__(self,
frameWH,
borderWH = (0, 0),
hover_distance_threshold = 50,
max_zones = 1,
min_points_per_poly = 4,
max_points_per_poly = 4,
initial_rectangle = [(0.05, 0.05), (0.95, 0.05), (0.95, 0.95), (0.05, 0.95)]):
# Fill out base polygon drawing object
super().__init__(frameWH,
borderWH,
hover_distance_threshold,
max_zones,
min_points_per_poly,
max_points_per_poly,
initial_rectangle)
# Add initial rectangular zone
self.add_zone(initial_rectangle, normalized_input = True)
# .................................................................................................................
# Override the regular point dragging
def left_drag(self):
# Update the dragged points based on the mouse position
if None not in {self.zone_select, self.point_select}:
self._rectangularize(self.zone_select, self.point_select)
# .................................................................................................................
def fetch_tl_br(self, normalize = False, force_frame_boundaries = True):
# Get the rectangle for easier reference
rect_zone = self.zone_list[0]
# Get the corner points
min_x, min_y = np.min(rect_zone, axis = 0)
max_x, max_y = np.max(rect_zone, axis = 0)
# Pull out the convenient corners for describing the rectangle
top_left = [min_x, min_y]
bot_right = [max_x, max_y]
# Force values to fit properly in frame, if needed
if force_frame_boundaries:
top_left[0] = min(self._frame_scaling[0], max(0, top_left[0]))
top_left[1] = max(0, min(self._frame_scaling[1], top_left[1]))
bot_right[0] = min(self._frame_scaling[0], max(0, bot_right[0]))
bot_right[1] = max(0, min(self._frame_scaling[1], bot_right[1]))
# Normalize if needed
if normalize:
top_left = top_left/self._frame_scaling
bot_right = bot_right/self._frame_scaling
return tuple(top_left), tuple(bot_right)
# .................................................................................................................
def _rectangularize(self, zone_select, modified_point_index):
# Get the two opposing corners from the quadrilateral
opposite_index = (modified_point_index + 2) % 4
opposite_corner = self.zone_list[zone_select][opposite_index]
far_corner_list = [self.mouse_xy, opposite_corner]
# Figure out the bounding rectangle based on the corner points
min_x, min_y = np.min(far_corner_list, axis = 0)
max_x, max_y = np.max(far_corner_list, axis = 0)
# Build the new quad corner points
tl = (min_x, min_y)
tr = (max_x, min_y)
br = (max_x, max_y)
bl = (min_x, max_y)
rect_zone = np.int32([tl, tr, br, bl])
# Update closest point selection to allow for cross-overs
self.point_select = np.argmin(np.sqrt(np.sum(np.square(rect_zone - self.mouse_xy), axis=1)))
# Replace the existing zone with updated rectangular zone
self.zone_list[zone_select] = rect_zone
self._flag_change()
# .................................................................................................................
# =====================================================================================================================
# =====================================================================================================================
# =====================================================================================================================
class Interactive_Line(Polygon_Drawer):
def __init__(self,
frameWH,
borderWH = (0, 0),
hover_distance_threshold = 50,
max_zones = 1,
min_points_per_poly = 2,
max_points_per_poly = 2,
initial_line = [(0.15, 0.5), (0.85, 0.5)]):
# Fill out base polygon drawing object
super().__init__(frameWH,
borderWH,
hover_distance_threshold,
max_zones,
min_points_per_poly,
max_points_per_poly)
# Add initial line
self.add_zone(initial_line, normalized_input = True)
# .................................................................................................................
# Remove undesirable left click functions
def left_double_click(self): pass
def shift_left_click(self): pass
def ctrl_left_click(self): pass
# .................................................................................................................
# Remove undesirable middle click functions
def middle_click(self): pass
def middle_double_click(self): pass
def shift_middle_click(self): pass
def ctrl_middle_click(self): pass
# .................................................................................................................
# Remove undesirable right click functions
def right_click(self): pass
def shift_right_click(self): pass
def ctrl_right_click(self): pass
# .................................................................................................................
# Reset quad with double right click
def right_double_click(self):
reset_zone = [(0.15, 0.5), (0.85, 0.5)]
self.zone_list[0] = np.int32(np.round(np.float32(reset_zone) * self._frame_scaling))
self._flag_change()
# .................................................................................................................
pass
# =====================================================================================================================
# =====================================================================================================================
# =====================================================================================================================
class Button_Grid:
def __init__(self, buttonWH, num_rows, num_cols,
padding = 5,
bg_color = (0, 0, 0),
hover_color = (200, 200, 200),
click_color = (255, 255, 255)):
# Make sure we have sane values
buttonWH = (int(round(buttonWH[0])), int(round(buttonWH[1])))
num_rows = max(1, num_rows)
num_cols = max(1, num_cols)
self._buttonWH = buttonWH
self._num_rows = num_rows
self._num_cols = num_cols
self._padding = padding
self._bg_color = bg_color
self._hover_color = hover_color
self._click_color = click_color
self._button_row_col_indices = {}
self._button_configs = {}
self._button_tlbr_record = {}
self._button_hover_record = {}
self._button_pressed_events = {}
# Get button frame sizing
frame_w = (buttonWH[0] * num_cols) + (num_cols + 1)*padding
frame_h = (buttonWH[1] * num_rows) + (num_rows + 1)*padding
self.frameWH = (frame_w, frame_h)
# Create blank frame for drawing buttons
self._blank_frame = np.full((frame_h, frame_w, 3), bg_color, dtype=np.uint8)
# Allocate space for an image with buttons drawn in to it and storage for each button image
self._button_frame = self._blank_frame.copy()
self._button_images = {}
# Store default config of all text
self._text_config = {"fontFace": cv2.FONT_HERSHEY_SIMPLEX,
"fontScale": 0.75,
"thickness": 1}
# .................................................................................................................
def add_button(self, button_label, row_index = 0, col_index = 0,
button_color = (80,80,80),
text_color = (230, 230, 230),
draw_text_shadow = True):
if (1 + row_index > self._num_rows):
raise ValueError("Cannot index row {}, only {} rows available!".format(row_index, self._num_rows))
if (1 + col_index > self._num_cols):
raise ValueError("Cannot index column {}, only {} columns available!".format(col_index, self._num_cols))
# First get the button bounding box
tlx = self._padding*(1 + col_index) + self._buttonWH[0]*(col_index)
tly = self._padding*(1 + row_index) + self._buttonWH[1]*(row_index)
brx = tlx + self._buttonWH[0]
bry = tly + self._buttonWH[1]
# Add button bounding box to record
self._button_tlbr_record[button_label] = [(tlx, tly), (brx, bry)]
# Add button label to press event dictionary
self._button_pressed_events[button_label] = False
self._button_hover_record[button_label] = False
# Draw button image and store it for use later
b_w, b_h = self._buttonWH
button_image = np.full((b_h, b_w, 3), button_color, dtype=np.uint8)
label_conf = self._get_label_config(button_label, text_color)
# Draw text, with shadow if desired
if draw_text_shadow:
shadow_conf = label_conf.copy()
shadow_conf["color"] = [255 - each_col for each_col in text_color]
shadow_conf["thickness"] = 2
cv2.putText(button_image, **shadow_conf)
cv2.putText(button_image, **label_conf)
# Store button index and config settings
self._button_row_col_indices[button_label] = (row_index, col_index)
self._button_configs[button_label] = {"button_color": button_color,
"text_color": text_color,
"draw_text_shadow": draw_text_shadow}
# Store button image
self._button_images[button_label] = button_image
# Draw button into button frame
self._button_frame[tly:bry, tlx:brx, :] = button_image
# .................................................................................................................
def rename_button(self, old_label, new_label,
new_button_color = None,
new_text_color = None,
new_draw_text_shadow = None):
row_idx, col_idx = self._button_row_col_indices[old_label]
old_config = self._button_configs[old_label]
new_config = {}
if new_button_color is None:
new_config["button_color"] = old_config["button_color"]
if new_text_color is None:
new_config["text_color"] = old_config["text_color"]
if new_draw_text_shadow is None:
new_config["draw_text_shadow"] = old_config["draw_text_shadow"]
self.remove_button(old_label)
self.add_button(new_label,
row_index = row_idx,
col_index = col_idx,
**new_config)
# .................................................................................................................
def remove_button(self, button_label = None, row_index = None, col_index = None):
# Can supply either a button label or row/col indices to get button
if button_label is None and row_index is None and col_index is None:
raise AttributeError("Must supply either a button label or row/col indices to delete a button!")
# Use row/col indices to select button, if provided
if row_index is not None and col_index is not None:
button_label = self._button_row_col_indices[button_label]
# Erase button image from the button frame
(tlx, tly), (brx, bry) = self._button_tlbr_record[button_label]
self._button_frame[tly:bry, tlx:brx, :] = self._blank_frame[tly:bry, tlx:brx, :]
# Delete all recorded info about button
del self._button_tlbr_record[button_label]
del self._button_pressed_events[button_label]
del self._button_hover_record[button_label]
del self._button_row_col_indices[button_label]
del self._button_images[button_label]
# .................................................................................................................
def callback(self, event, mx, my, flags, param):
left_clicked = (event == cv2.EVENT_LBUTTONDOWN)
for each_button, each_tlbr in self._button_tlbr_record.items():
(tlx, tly), (brx, bry) = each_tlbr
# Figure out in the mouse is hovering over the button
in_x = (tlx < mx < brx)
in_y = (tly < my < bry)
hovering = (in_x and in_y)
# Record hovering and click events
self._button_hover_record[each_button] = hovering
self._button_pressed_events[each_button] = left_clicked and hovering
# .................................................................................................................
def draw_buttons(self):
# Get copy of button frame so we can draw without ruining the original image
button_frame = self._button_frame.copy()
for each_button, each_image in self._button_images.items():
# Get button bounding boxes so we can draw stuff
tl, br = self._button_tlbr_record[each_button]
# Get each button bounding box so we can draw outlines
if self._button_hover_record[each_button]:
cv2.rectangle(button_frame, tl, br, self._hover_color, 1, cv2.LINE_AA)
if self._button_pressed_events[each_button]:
cv2.rectangle(button_frame, tl, br, self._click_color, -1, cv2.LINE_AA)
return button_frame
# .................................................................................................................
def button_pressed(self, button_label, clear_on_read = True, error_if_missing = True):
if button_label not in self._button_pressed_events:
if error_if_missing:
raise KeyError("Button '{}' is not in button grid!".format(button_label))
else:
return False
button_is_pressed = self._button_pressed_events[button_label]
if clear_on_read:
self._button_pressed_events[button_label] = False
return button_is_pressed
# .................................................................................................................
def read_all_buttons(self, clear_on_read = True):
button_states = self._button_pressed_events.copy()
if clear_on_read:
self._button_pressed_events = {each_key: False for each_key in self._button_pressed_events.keys()}
return button_states
# .................................................................................................................
def _get_label_config(self, button_label, text_color = (230, 230, 230)):
text_is_too_big = True
text_config = self._text_config.copy()
while text_is_too_big:
# OpenCV: cv2.getTextSize(text, font, scale, thickness)
text_xy, text_base = cv2.getTextSize(button_label, **text_config)
# Stop if we can fit the text in the button
if ((text_xy[0] + 5) < self._buttonWH[0]) and ((text_xy[1] + 5) < self._buttonWH[1]):
text_is_too_big = False
# If we can't fit the text in the button, shrink it a bit and try again
if text_is_too_big:
text_config["fontScale"] -= 0.25
# Stop text from shrinking forever
if text_config["fontScale"] <= 0.15:
text_config["fontScale"] = 0.15
text_is_too_big = False
# Get text positioning
text_x = int(round(self._buttonWH[0] - text_xy[0])/2)
text_y = int(round(self._buttonWH[1]/2)) + text_base
# Add remaining config data
text_config["text"] = button_label
text_config["org"] = (text_x, text_y)
text_config["color"] = text_color
text_config["lineType"] = cv2.LINE_AA
return text_config
# .................................................................................................................
# .................................................................................................................
# .................................................................................................................
# =====================================================================================================================
# =====================================================================================================================
# =====================================================================================================================
# ---------------------------------------------------------------------------------------------------------------------
#%% Useful functions
# .....................................................................................................................
def crop_drawer_util(video_obj_ref,
initial_crop_tl_br = None,
borderWH = (40, 40),
displayWH = None,
normalize_output = True,
window_title = "Crop Frame"):
_add_eolib_to_path()
from eolib.video.windowing import SimpleWindow, breakByKeypress
# Get important video parameters
initial_frame_position = video_obj_ref.get(cv2.CAP_PROP_POS_FRAMES)
vid_width = int(video_obj_ref.get(cv2.CAP_PROP_FRAME_WIDTH))
vid_height = int(video_obj_ref.get(cv2.CAP_PROP_FRAME_HEIGHT))
vid_fps = video_obj_ref.get(cv2.CAP_PROP_FPS)
vid_frame_delay_ms = 1000/vid_fps
frame_delay = max(1, min(1000, int(vid_frame_delay_ms)))
# Set the initial cropping area if needed
if initial_crop_tl_br is None:
initial_rectangle = [(0.0, 0.0), (1.0, 0.0), (1.0, 1.0), (0.0, 1.0)]
else:
(left_x, top_y), (right_x, bot_y) = initial_crop_tl_br
initial_rectangle = [(left_x, top_y), (right_x, top_y), (right_x, bot_y), (left_x, bot_y)]
# Figure out frame (re-)sizing
resize_frame = (displayWH is not None)
frameWH = displayWH if resize_frame else (vid_width, vid_height)
# Build cropping object to get drawing callback
cropper = Interactive_Rectangle(frameWH = frameWH,
borderWH = borderWH,
initial_rectangle=initial_rectangle)
# Create window for display and attach cropping callback function
cropWindow = SimpleWindow(window_title)
cropWindow.attachCallback(cropper.callback)
# Video loop
while True:
(rec, inFrame) = video_obj_ref.read()
if not rec:
video_obj_ref.set(cv2.CAP_PROP_POS_FRAMES, 0)
continue
# Resize the frame if needed
if resize_frame:
inFrame = cv2.resize(inFrame, dsize = frameWH)
# Add borders to help with drawing out of bounds
inFrame = cropper.add_frame_borders(inFrame)
cropper.draw_zones(inFrame, show_circles=False)
winExists = cropWindow.imshow(inFrame)
if not winExists:
break
# Get keypresses
reqBreak, keyPress = breakByKeypress(frame_delay, break_on_enter = True)
if reqBreak:
break
# Clean up. Reset video back to initial frame and close windows
video_obj_ref.set(cv2.CAP_PROP_POS_FRAMES, initial_frame_position)
cv2.destroyAllWindows()
return cropper.fetch_tl_br(normalize = normalize_output)
# .....................................................................................................................
def multizone_drawer_util(video_obj_ref,
max_zones = 100,
initial_zone_list = None,
borderWH = (40, 40),
displayWH = None,
normalize_output = True,
show_zone_circles = True,
window_title = "Drawing Frame"):
_add_eolib_to_path()
from eolib.video.windowing import SimpleWindow, breakByKeypress
# Get important video parameters
initial_frame_position = video_obj_ref.get(cv2.CAP_PROP_POS_FRAMES)
vid_width = int(video_obj_ref.get(cv2.CAP_PROP_FRAME_WIDTH))
vid_height = int(video_obj_ref.get(cv2.CAP_PROP_FRAME_HEIGHT))
vid_fps = video_obj_ref.get(cv2.CAP_PROP_FPS)
vid_frame_delay_ms = 1000/vid_fps
frame_delay = max(1, min(1000, int(vid_frame_delay_ms)))
# Figure out frame (re-)sizing
resize_frame = (displayWH is not None)
frameWH = displayWH if resize_frame else (vid_width, vid_height)
# Build cropping object to get drawing callback
zone_drawer = Polygon_Drawer(frameWH = frameWH,
borderWH = borderWH,
max_zones = max_zones)
# Add any initial zones
zone_drawer.add_zone_list(initial_zone_list)
# Create window for display and attach cropping callback function
zoneWindow = SimpleWindow(window_title)
zoneWindow.attachCallback(zone_drawer.callback)
# Video loop
while True:
(rec, inFrame) = video_obj_ref.read()
if not rec:
video_obj_ref.set(cv2.CAP_PROP_POS_FRAMES, 0)
continue
# Resize the frame if needed
if resize_frame:
inFrame = cv2.resize(inFrame, dsize = frameWH)
# Add borders to help with drawing out of bounds
inFrame = zone_drawer.add_frame_borders(inFrame)
zone_drawer.draw_poly_in_progress(inFrame)
zone_drawer.draw_zones(inFrame, show_circles=show_zone_circles)
winExists = zoneWindow.imshow(inFrame)
if not winExists:
break
# Get keypresses
reqBreak, keyPress = breakByKeypress(frame_delay, break_on_enter = True)
if reqBreak:
break
# Nudge mask points with arrow keys
zone_drawer.arrow_keys(keyPress)
# Clean up. Reset video back to initial frame and close windows
video_obj_ref.set(cv2.CAP_PROP_POS_FRAMES, initial_frame_position)
cv2.destroyAllWindows()
return zone_drawer.fetch_zone_list(normalize=normalize_output, force_frame_boundaries=False)
# .....................................................................................................................
def _add_eolib_to_path():
import os
import sys
# Use the location of this file as the starting search path. The root of eolib is a few folders backwards from here
search_directory = os.path.dirname(os.path.abspath(__file__))
for k in range(5):
if "eolib" in os.listdir(search_directory):
if search_directory not in sys.path:
sys.path.append(search_directory)
break
search_directory = os.path.dirname(search_directory)
# .....................................................................................................................
# ---------------------------------------------------------------------------------------------------------------------
#%% Demo
if __name__ == "__main__":
'''
video_source = "/home/eo/Desktop/PythonData/Shared/videos/pl_part1_rot720.mp4"
videoObj = cv2.VideoCapture(video_source)
#crop_tl_br = crop_drawer_util(videoObj, displayWH=(320, 180))
ini_list = [[[ 0.45454545, 0.37988827], [-0.06583072, 0.36871508], [-0.00626959, -0.06145251], [ 0.67398119, -0.06145251]]]
#zone_list = multizone_drawer_util(videoObj, displayWH=(320,180), initial_zone_list=ini_list)
one_zone_list = singlezone_drawer_util(videoObj, displayWH=(640, 360))
videoObj.release()
cv2.destroyAllWindows()
'''
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# Button grid demo
cv2.destroyAllWindows()
# Build button grid object
btn_grid = Button_Grid((120, 40), 2, 3,)
btn_grid.add_button("ABC", 0, 0, (255, 0, 0))
btn_grid.add_button("Hello", 0, 1, (0, 255, 0), (0, 0, 255))
btn_grid.add_button("Goodbye", 1, 2, (200, 200, 200), (20, 20, 20))
btn_grid.add_button("Close", 1, 1)
# Add button callback to window
win_name = "Button Demo"
demo_window = cv2.namedWindow(win_name)
cv2.setMouseCallback(win_name, btn_grid.callback, {})
for k in range(500):
btn_img = btn_grid.draw_buttons()
cv2.imshow(win_name, btn_img)
# Read a specific button
if btn_grid.button_pressed("Close"):
cv2.destroyAllWindows()
break
# Read all buttons (as a dictionary with values of True/False depending on press state)
for each_button, each_state in btn_grid.read_all_buttons().items():
if each_state:
print("Button pressed:", each_button)
cv2.waitKey(20)
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
# . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . | EricPacefactory/eolib | video/video_callbacks.py | video_callbacks.py | py | 59,440 | python | en | code | 0 | github-code | 90 |
13267819422 | import os
import torch
import torch.distributed as dist
import pdb
os.makedirs('results/', exist_ok=True)
os.makedirs('weights/', exist_ok=True)
class res50_1x:
def __init__(self, args, val_mode=False):
data_root = '/home/feiyu/Data/coco2017/'
self.gpu_id = args.gpu_id
if not val_mode:
self.train_bs = args.train_bs
self.bs_per_gpu = args.bs_per_gpu
self.test_bs = args.test_bs
if not val_mode:
self.train_imgs = data_root + 'train2017/'
self.train_ann = data_root + 'annotations/instances_train2017.json'
self.val_imgs = data_root + 'val2017/'
self.val_ann = data_root + 'annotations/instances_val2017.json'
self.val_num = args.val_num
self.val_api = 'Improved COCO' if args.improved_coco else 'Original COCO'
self.num_classes = 81
self.backbone = 'res50'
self.weight = 'weights/R-50.pkl' if not val_mode else args.weight
if not val_mode:
self.resume = args.resume
self.stage_with_dcn = (False, False, False, False)
self.dcn_tower = False
self.anchor_strides = (8, 16, 32, 64, 128)
self.anchor_sizes = ((64,), (128,), (256,), (512,), (1024,))
self.aspect_ratios = (1.,)
if not val_mode:
self.min_size_train = 800
self.max_size_train = 1333
self.val_interval = args.val_interval
self.box_loss_w = 1.3
self.iou_loss_w = 0.5
self.bs_factor = self.train_bs / 16
self.base_lr = 0.01 * self.bs_factor
self.max_iter = int(90000 / self.bs_factor)
self.decay_steps = (int(60000 / self.bs_factor), int(80000 / self.bs_factor))
self.min_size_test = 800
self.max_size_test = 1333
self.nms_topk = 1000
self.nms_score_thre = 0.05
self.nms_iou_thre = 0.6
self.test_score_voting = args.score_voting
# rarely used parameters ----------------------------------
if not val_mode:
self.fl_gamma = 2. # focal loss gamma, alpha
self.fl_alpha = 0.25
self.weight_decay = 0.0001
self.momentum = 0.9
self.warmup_factor = 1 / 3
self.warmup_iters = 500
self.freeze_backbone_at = 2
self.fpn_topk = 9
self.match_iou_thre = 0.1
self.max_detections = 100
self.val_mode = val_mode
def print_cfg(self):
print()
print('-' * 30 + self.__class__.__name__ + '-' * 30)
for k, v in vars(self).items():
if k not in ('bs_factor', 'val_mode'):
print(f'{k}: {v}')
print()
class res50_15x(res50_1x):
def __init__(self, args_attr, val_mode=False):
super().__init__(args_attr, val_mode)
if not val_mode:
self.max_iter = int(135000 / self.bs_factor)
self.decay_steps = (int(90000 / self.bs_factor), int(120000 / self.bs_factor))
class res101_2x(res50_1x):
def __init__(self, args_attr, val_mode=False):
super().__init__(args_attr, val_mode)
self.backbone = 'res101'
if not val_mode:
self.weight = 'weights/R-101.pkl'
self.min_size_train = (640, 800)
self.max_iter = int(180000 / self.bs_factor)
self.decay_steps = (int(120000 / self.bs_factor), int(160000 / self.bs_factor))
class res101_dcn_2x(res50_1x):
def __init__(self, args_attr, val_mode=False):
super().__init__(args_attr, val_mode)
self.backbone = 'res101'
self.stage_with_dcn = (False, True, True, True)
self.dcn_tower = True
if not val_mode:
self.weight = 'weights/R-101.pkl'
self.min_size_train = (640, 800)
self.max_iter = int(180000 / self.bs_factor)
self.decay_steps = (int(120000 / self.bs_factor), int(160000 / self.bs_factor))
def get_config(args, val_mode=False):
if val_mode:
assert args.gpu_id.isdigit(), f'Only one GPU can be used in val mode, got {args.gpu_id}.'
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
else:
torch.cuda.set_device(args.local_rank)
dist.init_process_group(backend="nccl", init_method="env://")
# Only launched by torch.distributed.launch, 'WORLD_SIZE' can be add to environment variables.
num_gpus = int(os.environ["WORLD_SIZE"])
assert args.train_bs % num_gpus == 0, 'Training batch size must be divisible by GPU number.'
args.bs_per_gpu = int(args.train_bs / num_gpus)
args.gpu_id = os.environ.get('CUDA_VISIBLE_DEVICES') if os.environ.get('CUDA_VISIBLE_DEVICES') else 0
cfg = globals()[args.cfg](args, val_mode) # change the desired config here
if val_mode:
cfg.print_cfg()
elif dist.get_rank() == 0:
cfg.print_cfg()
return cfg
| feiyuhuahuo/PAA_minimal | config.py | config.py | py | 4,896 | python | en | code | 10 | github-code | 90 |
3639833295 | import pytest
import os
import numpy as np
import datetime
import spiceypy as spice
from importlib import reload
import json
import unittest
from unittest.mock import MagicMock, PropertyMock, patch
from conftest import get_isd, get_image_label, get_image_kernels, convert_kernels, compare_dicts
import ale
from ale.drivers.selene_drivers import KaguyaTcPds3NaifSpiceDriver
image_dict = {
'TC1S2B0_01_06691S820E0465' : get_isd("kaguyatc"),
'MVA_2B2_01_02329N002E0302' : get_isd("kaguyami")
}
@pytest.fixture(scope='module')
def test_kernels():
updated_kernels = {}
binary_kernels = {}
for image in image_dict.keys():
kernels = get_image_kernels(image)
updated_kernels[image], binary_kernels[image] = convert_kernels(kernels)
yield updated_kernels
for kern_list in binary_kernels.values():
for kern in kern_list:
os.remove(kern)
@pytest.mark.parametrize("label_type", ['pds3'])
@pytest.mark.parametrize("image", image_dict.keys())
def test_kaguya_load(test_kernels, label_type, image):
label_file = get_image_label(image, label_type)
isd_str = ale.loads(label_file, props={'kernels': test_kernels[image]})
isd_obj = json.loads(isd_str)
print(json.dumps(isd_obj, indent=2))
assert compare_dicts(isd_obj, image_dict[image]) == []
# ========= Test pdslabel and naifspice driver =========
class test_pds_naif(unittest.TestCase):
def setUp(self):
label = get_image_label("TC1S2B0_01_06691S820E0465", "pds3")
self.driver = KaguyaTcPds3NaifSpiceDriver(label)
def test_short_mission_name(self):
assert self.driver.short_mission_name == 'selene'
def test_utc_start_time(self):
assert self.driver.utc_start_time == datetime.datetime(2009, 4, 5, 20, 9, 53, 607478, tzinfo=datetime.timezone.utc)
def test_utc_stop_time(self):
assert self.driver.utc_stop_time == datetime.datetime(2009, 4, 5, 20, 10, 23, 864978, tzinfo=datetime.timezone.utc)
def test_instrument_id(self):
assert self.driver.instrument_id == 'LISM_TC1_STF'
def test_sensor_frame_id(self):
with patch('ale.drivers.selene_drivers.spice.namfrm', return_value=12345) as namfrm:
assert self.driver.sensor_frame_id == 12345
namfrm.assert_called_with('LISM_TC1_HEAD')
def test_instrument_host_name(self):
assert self.driver.instrument_host_name == 'SELENE-M'
def test_ikid(self):
with patch('ale.drivers.selene_drivers.spice.bods2c', return_value=12345) as bods2c:
assert self.driver.ikid == 12345
bods2c.assert_called_with('LISM_TC1')
def test_spacecraft_name(self):
assert self.driver.spacecraft_name == 'SELENE'
def test_spacecraft_clock_start_count(self):
assert self.driver.spacecraft_clock_start_count == 922997380.174174
def test_spacecraft_clock_stop_count(self):
assert self.driver.spacecraft_clock_stop_count == 922997410.431674
def test_ephemeris_start_time(self):
with patch('ale.drivers.selene_drivers.spice.sct2e', return_value=12345) as sct2e, \
patch('ale.drivers.selene_drivers.spice.bods2c', return_value=-12345) as bods2c:
assert self.driver.ephemeris_start_time == 12345
sct2e.assert_called_with(-12345, 922997380.174174)
def test_detector_center_line(self):
with patch('ale.drivers.selene_drivers.spice.gdpool', return_value=np.array([54321, 12345])) as gdpool, \
patch('ale.drivers.selene_drivers.spice.bods2c', return_value=-12345) as bods2c:
assert self.driver.detector_center_line == 12344.5
gdpool.assert_called_with('INS-12345_CENTER', 0, 2)
def test_detector_center_sample(self):
with patch('ale.drivers.selene_drivers.spice.gdpool', return_value=np.array([54321, 12345])) as gdpool, \
patch('ale.drivers.selene_drivers.spice.bods2c', return_value=-12345) as bods2c:
assert self.driver.detector_center_sample == 54320.5
gdpool.assert_called_with('INS-12345_CENTER', 0, 2)
def test_focal2pixel_samples(self):
with patch('ale.drivers.selene_drivers.spice.gdpool', return_value=np.array([2])) as gdpool, \
patch('ale.drivers.selene_drivers.spice.bods2c', return_value=-12345) as bods2c:
assert self.driver.focal2pixel_samples == [0, 0, -1/2]
gdpool.assert_called_with('INS-12345_PIXEL_SIZE', 0, 1)
def test_focal2pixel_lines(self):
with patch('ale.drivers.selene_drivers.spice.gdpool', return_value=np.array([2])) as gdpool, \
patch('ale.drivers.selene_drivers.spice.bods2c', return_value=-12345) as bods2c, \
patch('ale.drivers.selene_drivers.KaguyaTcPds3NaifSpiceDriver.spacecraft_direction', \
new_callable=PropertyMock) as spacecraft_direction:
spacecraft_direction.return_value = 1
assert self.driver.focal2pixel_lines == [0, 1/2, 0]
spacecraft_direction.return_value = -1
assert self.driver.focal2pixel_lines == [0, -1/2, 0]
gdpool.assert_called_with('INS-12345_PIXEL_SIZE', 0, 1)
def test_spacecraft_direction(self):
assert self.driver.spacecraft_direction == 1
| victoronline/ale | tests/pytests/test_kaguya_drivers.py | test_kaguya_drivers.py | py | 5,259 | python | en | code | null | github-code | 90 |
299205835 |
from motor import motor
import RPi.GPIO as GPIO
"Set this equal to the number of steps in a 360 degree rotation of yoru stepper motor"
MAX_STEPS = 360
class water_gun:
shots_remaining = 100
current_x = (MAX_STEPS / 2)
current_y = (MAX_STEPS / 4)
def move_x(self, degrees):
if degrees < 0:
#Removing unecessary full rotations
steps = abs(degrees) % MAX_STEPS
self.motor_x.shift((steps * -1))
else:
#Removing unecessary full rotations
steps = degrees % MAX_STEPS
self.motor_x.shift(steps)
def move_y(self, degrees):
if degrees < 0:
#Removing unecessary full rotations
steps = abs(degrees) % MAX_STEPS
self.motor_y.shift((steps * -1))
else:
#Removing unecessary full rotations
steps = degrees % MAX_STEPS
self.motor_y.shift(steps)
def shoot(self):
self.motor_shoot.shoot()
self.shots_remaining -= 1
def return_to_center(self):
distance_to_x = (MAX_STEPS / 2) - self.current_x
distance_to_y = (MAX_STEPS / 4) - self.current_y
self.move_x(distance_to_x)
self.move_y(distance_to_y)
def __init__(self, motor_x_gpio, motor_y_gpio, motor_shoot_gpio):
self.motor_x = motor(motor_x_gpio)
self.motor_y = motor(motor_y_gpio)
self.motor_shoot = motor(motor_shoot_gpio)
def __del__(self):
self.return_to_center()
if __name__ == "__main__":
GPIO.setwarnings(False)
pin_set_x = [14,15,18,17]
pin_set_y = [27,22,23,24]
pin_set_s = [10,9,25,8]
pi_water_controller = water_gun(pin_set_x, pin_set_y, pin_set_s)
pi_water_controller.move_x(100)
pi_water_controller.move_x(280)
pi_water_controller.move_x(360)
pi_water_controller.move_x(180)
pi_water_controller.move_x(75)
pi_water_controller.move_y(90)
pi_water_controller.move_y(180)
pi_water_controller.move_y(45)
pi_water_controller.move_y(32)
| myoj/pi-water | water_gun.py | water_gun.py | py | 1,778 | python | en | code | 0 | github-code | 90 |
25787318834 | # -*- coding: utf-8 -*-
"""
Created on Fri Jan 13 10:55:46 2017
@author: Lucie
"""
# AFCM
## Librairies utilisées
import pandas as pd
from mca import mca
import numpy as np
import matplotlib.pyplot as plt
import pylab
## Lecture des données
# Definition du chemin où sont situées les données :
path = 'C:/Users/Richard/Documents/GitHub/Segmentation-multicanale2/Données'
# Import des données
base_quali = pd.read_table(path + '/base_variables_quali.txt', delimiter=";", dtype={"IDPART_CALCULE":object})
types = base_quali.dtypes # Ok
print(types)
del types
# Import de la base quanti pour recuperation des tops en ligne et dépose
quanti_trans = pd.read_csv(path + '/v2/quanti_trans2.csv', delimiter=",", \
dtype={"IDPART_CALCULE":object})
# Concatenation des variables qui nous interessent
base_quali_V2 = pd.concat([base_quali, quanti_trans['nb_contrats_enligne'], quanti_trans['nb_contrats_depose']], axis=1)
# Creation de la variable top
base_quali_V2['top_enligne'] = np.where(base_quali_V2['nb_contrats_enligne'] == 0, 'pas_enligne', 'enligne')
base_quali_V2['top_depose'] = np.where(base_quali_V2['nb_contrats_depose'] == 0, 'pas_depose', 'depose')
base_quali_V2 = base_quali_V2.drop(['nb_contrats_enligne', 'nb_contrats_depose'], axis=1)
base_quali2_V2 = base_quali_V2.drop(['IDPART_CALCULE'], axis=1)
del(base_quali, quanti_trans, path, base_quali_V2)
## Transformation en dummies
dc = pd.DataFrame(pd.get_dummies(base_quali2_V2))
# Selection des 15000 premiers individus (la mca ne tourne pas sur 20000 :-( )
dc_sample = dc.iloc[0:15000, :]
del(dc, base_quali2_V2)
# Code mca
mca_df = mca(dc_sample, benzecri=False)
del dc_sample
coor = mca_df.fs_r(N=10)
plt.boxplot(coor)
coor = pd.DataFrame(coor)
# Enregistrement pour eviter de faire retourner les codes
coor.to_csv(path + '/coor_afcm.csv', index=False)
# Variance expliquée
var_exp = mca_df.L/sum(mca_df.L)
# Variance cumulée
var_cumul = np.cumsum(mca_df.L)/sum(mca_df.L)
## Graph de la variance expliquée par les composantes
plt.title('Var expliquée AFCM')
plt.ylabel('Part de la variance expliquée')
plt.xlabel('Composantes')
plt.plot(var_exp, 'bo')
plt.show()
## Graph de la variance cumulée expliquée
plt.title('Var expliquée cumulée AFCM')
plt.ylabel('Part de la variance expliquée')
plt.xlabel('Composantes')
plt.plot(var_cumul, 'bo')
plt.show()
# eigenvalues
print(mca_df.L)
# Composantes principales des colonnes (modalités)
print(mca_df.fs_c())
# Représentation des modalités dans les plans principaux
# Comp 1 et 2
plt.figure(figsize=(16, 8))
plt.scatter(mca_df.fs_c()[:, 0], mca_df.fs_c()[:, 1])
for i, j, nom in zip(mca_df.fs_c()[:, 0], mca_df.fs_c()[:, 1], dc_sample.columns):
plt.text(i, j, nom)
plt.show()
# Comp 1 et 3
plt.figure(figsize=(16, 8))
plt.scatter(mca_df.fs_c()[:, 0], mca_df.fs_c()[:, 2])
for i, j, nom in zip(mca_df.fs_c()[:, 0], mca_df.fs_c()[:, 2], dc_sample.columns):
plt.text(i, j, nom)
plt.show()
# Comp 2 et 3
plt.figure(figsize=(16, 8))
plt.scatter(mca_df.fs_c()[:, 1], mca_df.fs_c()[:, 2])
for i, j, nom in zip(mca_df.fs_c()[:, 1], mca_df.fs_c()[:, 2], dc_sample.columns):
plt.text(i, j, nom)
plt.show()
| OliviaJly/segmentation-multicanale | AFCM.py | AFCM.py | py | 3,194 | python | fr | code | 0 | github-code | 90 |
18483839869 | from collections import deque
N = int(input())
A_list = sorted([int(input()) for _ in range(N)])
if N == 2:
print(abs(A_list[0] - A_list[1]))
exit()
if N == 3:
a0,a1,a2 = A_list
print(max(abs(a0-a1) + abs(a1-a2),abs(a1-a2) + abs(a2-a0) ,abs(a0-a2) + abs(a0-a1)))
exit()
q = deque(A_list)
res_q = deque([q.popleft(), q.pop()])
l = res_q[0]
r = res_q[-1]
v1 = v2 =0
v1 = q.popleft()
v2 = q.pop()
res = abs(l-r)
for _ in range(2,N-2):
diff_l1 = abs(l-v1)
diff_l2 = abs(l-v2)
diff_r1 = abs(r-v1)
diff_r2 = abs(r-v2)
max_diff = max(diff_l1,diff_l2, diff_r1, diff_r2)
res += max_diff
if diff_l1 == max_diff:
l = v1
v1 = q.popleft()
elif diff_l2 == max_diff:
l = v2
v2 = q.pop()
elif diff_r1 == max_diff:
r = v1
v1 = q.popleft()
else:
r = v2
v2 = q.pop()
# あまった2つの処理
diff_list = [abs(l-v1) + abs(v1-v2),
abs(l-v1) + abs(r-v2),
abs(l-v2) + abs(v1-v2),
abs(l-v2) + abs(r-v1),
abs(r-v1) + abs(l-v2),
abs(r-v1) + abs(v1-v2),
abs(r-v2) + abs(l-v1),
abs(r-v2) + abs(v1-v2)]
res += max(diff_list)
print(res) | Aasthaengg/IBMdataset | Python_codes/p03229/s932250597.py | s932250597.py | py | 1,234 | python | en | code | 0 | github-code | 90 |
20512269257 | from django.urls import path
from . import views
urlpatterns = [
# recommend
path('recommend/anonymous/', views.recommend_anonymous), # 메인 추천리스트(비회원)
path('recommend/<int:user_pk>/', views.recommend_user), # 메인 추천리스트(회원)
path('recommend/allrandom/', views.all_random), # 올랜덤 선택 한개
# search movie page(all_list)
path('listall/', views.movie_list),
# Detail modal page
path('<int:movie_pk>/', views.movie_detail),
# like_movie
path('<int:movie_pk>/like/', views.like_movie),
# wish_watch
path('<int:movie_pk>/wish/', views.add_watchlist),
# is_watched
path('<int:movie_pk>/watched/', views.add_watched),
# add_prefer
path('<int:movie_pk>/prefer/', views.add_user_prefer),
]
| jonghopark1014/Clickflix | Clickflix_back/movies/urls.py | urls.py | py | 795 | python | en | code | 0 | github-code | 90 |
16316969463 | from tkinter import ttk
import cv2
import numpy as np
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import random
from torchvision import models
import librosa
import librosa.display
# import torchaudio
import copy
import scipy.ndimage as ndimage
from config import *
# from visualization import *
def load_one_video_audio(path, final_frame, fps, input_dim, hop_len=20, hop_sample_num=512,
audio_sample_rate=22050, raw_h=720, raw_w=1280):
"""
0813 update
Parameters:
-----------
hop_len: the len of audio for one visual frame, 20 * 23 ms = 460 ms
Reference:
----------
human audio frequency: https://zh.wikipedia.org/wiki/%E8%AF%AD%E9%9F%B3%E9%A2%91%E7%8E%87
the output len: https://blog.csdn.net/c2c2c2aa/article/details/81583973
librosa.feature.melspectrogram: https://librosa.github.io/librosa/generated/librosa.feature.melspectrogram.html
"""
# print(">>>> path: ", path)
# tt
y, sr = librosa.load(path) # y: the audio data; sr: sample rate. sr(default)=22050
# print(">>>> y: ", np.shape(y))
# tt
melspec = librosa.feature.melspectrogram(y, sr)
logmelspec = librosa.power_to_db(melspec, ref=np.max)
logmelspec = logmelspec - np.min(logmelspec)
logmelspec = logmelspec/np.max(logmelspec) * 255
### convert to frmaes
one_video_audio = []
one_frame_time = 1000.0/fps ## ms
one_hop_time = hop_sample_num * 1000/audio_sample_rate # ms
mel_spectr_len = np.shape(logmelspec)[1]
for i_frame in range(final_frame):
frame_point = int(one_frame_time * i_frame / one_hop_time)
one_frame_start = frame_point - int(hop_len/2)
one_frame_end = frame_point + int(hop_len/2)
if one_frame_start < 0:
off_set = 0 - one_frame_start
elif one_frame_end > mel_spectr_len:
off_set = mel_spectr_len - one_frame_end
else:
off_set = 0
one_frame_start += off_set ## has to ensue (end-start) = 112
one_frame_end += off_set
one_frame_audio = logmelspec[:, one_frame_start:one_frame_end]
one_frame_audio = cv2.resize(one_frame_audio, (input_dim, input_dim))
one_video_audio.append(one_frame_audio)
print(">>>> load one video audio: {} {} {}".format(raw_w, raw_h, np.shape(one_video_audio)))
# t
return one_video_audio
def load_one_video_frames(read_one_video_path):
one_video_all_frames = []
cap = cv2.VideoCapture(read_one_video_path)
success, image = cap.read()
count = 1
one_video_all_frames.append(image)
while success:
# cv2.imwrite(save_video_path + "/%03d.png"%count, image)
success, image = cap.read()
count += 1
if len(np.shape(image)) == 3:
one_video_all_frames.append(image)
# print(np.shape(image))
# cv2.imshow("t1", image)
# cv2.waitKey()
actal_video_frames = np.shape(one_video_all_frames)[0]
print(">>>> load one video all frames done!, data_shape: {}\n".format(
np.shape(one_video_all_frames)))
return one_video_all_frames, actal_video_frames
def load_one_video_gmm(path):
one_video_all_frames = []
frame_list = os.listdir(path)
# print(frame_list[:10])
# t
for i_frame in range(len(frame_list)):
frame_path = path + "/%d.jpg"%(i_frame)
one_frame_data = cv2.imread(frame_path, 0) ## 0 means read as gray format
one_video_all_frames.append(one_frame_data)
hmap_frame_num = np.shape(one_video_all_frames)[0]
print(">>>> load one video gmm maps done!, data_shape: {}\n".format(
np.shape(one_video_all_frames)))
# t
return one_video_all_frames, hmap_frame_num
def load_one_batch_image(one_video_frames, i_batch, train_batch_size, input_dim, frame_offset=0,
final_frame=None):
"""
frame_offset is for load image of optical flow
"""
one_batch_image = []
one_batch_raw_image = []
one_batch_start = frame_offset + i_batch * train_batch_size
one_batch_end = frame_offset + (i_batch + 1) * train_batch_size
if not final_frame == None:
if one_batch_end > final_frame:
return -1, -1
for i_frame in range(one_batch_start, one_batch_end):
one_frame = one_video_frames[i_frame]
raw_h, raw_w, c = np.shape(one_frame)
one_frame = cv2.resize(one_frame, (input_dim, input_dim))
inputimage = cv2.cvtColor(one_frame, cv2.COLOR_BGR2RGB) ## (720, 1280, 3)
## put the channnel dim first
inputimage = np.transpose(inputimage, (2, 0, 1))
one_batch_raw_image.append(inputimage)
inputimage = inputimage - MEAN_VALUE
inputimage = inputimage.astype(np.dtype(np.float32))
one_batch_image.append(inputimage)
# print(np.shape(one_frame))
# t
# cv2.imshow("1", one_frame)
# cv2.waitKey()
# print(">>>> load one batch images done!, data_shape: {}\n".format(
# np.shape(one_batch_image)))
one_batch_image = torch.from_numpy(np.array(one_batch_image))
return one_batch_image, one_batch_raw_image
def load_one_batch_audio_melspectr(one_video_audio, i_batch, train_batch_size, frame_offset=0,
final_frame=None, one_visual_frame_3D_frames=16, debug=False):
"""
output dimension: [batch, channel, frames, h, w]
one frame len: 16 * 23 ms = 528 ms
for each visual frame, we use frame_T number of point as the corresponding input
each point is one hop lenght, about 23 ms
"""
one_batch_audio = []
one_batch_start = frame_offset + i_batch * train_batch_size
one_batch_end = frame_offset + (i_batch + 1) * train_batch_size
if not final_frame == None:
if one_batch_end > final_frame:
return -1, -1
for i_frame in range(one_batch_start, one_batch_end):
one_frame_start = i_frame - int(one_visual_frame_3D_frames/2)
one_frame_end = i_frame + int(one_visual_frame_3D_frames/2)
if one_frame_start < 0:
off_set = 0 - one_frame_start
elif one_frame_end > final_frame:
off_set = final_frame - one_frame_end
else:
off_set = 0
one_frame_start += off_set ## has to ensue (end-start) = 112
one_frame_end += off_set
# print(">>>> i_batch: {} i_frame: {}, one_frame_s: {}, end: {}".format(i_batch, i_frame,
# one_frame_start, one_frame_end))
one_frame_audio = []
for i_step in range(one_frame_start, one_frame_end):
one_step_audio = one_video_audio[i_step]
one_step_audio = np.expand_dims(one_step_audio, axis=0)
one_frame_audio.append(one_step_audio)
# print(np.shape(one_step_audio))
# t
one_batch_audio.append(one_frame_audio)
one_batch_audio = np.array(one_batch_audio).transpose(0, 2, 1, 3, 4)
one_batch_audio = torch.from_numpy(np.array(one_batch_audio)) # (64, 113)
# print(">>>> shape: {}, max: {}".format(np.shape(one_batch_audio), np.shape(one_batch_audio[0]))) # torch.Size([12, 1, 16, 112, 112])
# t
return one_batch_audio
def load_one_batch_gmm(one_video_frames, i_batch, train_batch_size, output_dim,
frame_offset=0, final_frame=None):
"""
0809 update by ml
there is a problem that, each batch has no overlayped
"""
# print(">>>> d1 {}".format(np.shape(one_video_frame)))
# t
one_batch_hmap = []
one_batch_start = frame_offset + i_batch * train_batch_size
one_batch_end = frame_offset + (i_batch + 1) * train_batch_size
if not final_frame == None:
if one_batch_end > final_frame:
return -1, -1
# print(">>>> ", one_batch_end, final_frame, final_frame)
for i_frame in range(one_batch_start, one_batch_end):
one_frame = one_video_frames[i_frame]
saliencyimage = one_frame # cv2.resize(one_frame, (output_dim, output_dim))
# print(">>>> d2: {}".format(np.shape(saliencyimage)))
# t
saliencyimage = saliencyimage - np.min(saliencyimage)
if np.max(saliencyimage) > 0:
saliencyimage = saliencyimage/np.max(saliencyimage) * 255 - 14.9 ## 14.9 is the ave value of all training gmms
else:
print(">>>>>>>>> np.max(gmm) <= 0")
# print(">>>> {}".format(np.max(saliencyimage)))
# t
## put the channnel dim first
saliencyimage = saliencyimage.astype(np.dtype(np.float32))
saliencyimage = np.expand_dims(saliencyimage, axis=0) # 1 * 37 * 33
one_batch_hmap.append(saliencyimage) # (8, 3, 448, 448)
# print("np.shape(saliencyimage), np.shape(one_batch_hmap): ", np.shape(saliencyimage), np.shape(one_batch_hmap))
one_batch_hmap = torch.from_numpy(np.array(one_batch_hmap))
# print(">>>> load one batch hmaps done!, data_shape: {}\n".format(
# np.shape(one_batch_hmap)))
# t
return one_batch_hmap
def process_output(outputs_map, image_size_W, image_size_H):
batch_size = len(outputs_map)
saliency_map = []
for i_image in range(batch_size):
sal_map = outputs_map[i_image, :, :]
sal_map = sal_map - np.min(sal_map)
sal_map = sal_map / np.max(sal_map) * 255
sal_map = cv2.resize(sal_map, (image_size_W, image_size_H)) ## INTER_LINEAR interpolation
saliency_map.append(sal_map)
return np.array(saliency_map) | MinglangQiao/visual_audio_saliency | utils1.py | utils1.py | py | 9,440 | python | en | code | 2 | github-code | 90 |
6330855645 | from plasma.flex.messaging.selector import LexerError
literals = ('(', ')', ',')
reserved = ('AND', 'BETWEEN', 'IN', 'IS', 'LIKE', 'NOT', 'NULL', 'OR',
'ESCAPE')
# List of token names. This is always required
tokens = (
'NUMBER',
'STRING',
'BOOLEAN',
'VARIABLE',
'PLUS',
'MINUS',
'TIMES',
'DIVIDE',
'GT',
'GTE',
'LT',
'LTE',
'EQ',
'NEQ') + reserved
# Regular expression rules for simple tokens
t_PLUS = r'\+'
t_MINUS = r'-'
t_TIMES = r'\*'
t_DIVIDE = r'/'
t_GT = r'>'
t_GTE = r'>='
t_LT = r'<'
t_LTE = r'<='
t_EQ = r'='
t_NEQ = r'<>'
def t_HEXNUMBER(t):
r'0x[0-9a-fA-F]+'
t.type = 'NUMBER'
t.value = long(t.value[2:], 16)
return t
def t_NUMBER(t):
r'(?P<main>\d*\.?\d+)([eE](?P<exponent>(\+|-)?\d+))?'
main = t.lexer.lexmatch.group('main')
exponent = t.lexer.lexmatch.group('exponent')
t.value = float(main) if '.' in main else long(main)
if exponent:
t.value *= 10 ** int(exponent)
return t
def t_STRING(t):
r"'([^'\\]|\\.)*'"
t.value = t.value[1:-1]
return t
def t_BOOLEAN(t):
r"(true|false)"
t.value = (t.value == 'true')
return t
def t_VAR(t):
r"[a-zA-Z_][a-zA-Z0-9_]*"
uval = t.value.upper()
if uval in reserved:
t.type = t.value = uval
else:
t.type = 'VARIABLE'
return t
# A string containing ignored characters (spaces and tabs)
t_ignore = ' \t\r\n\f'
# Error handling rule
def t_error(t):
raise LexerError(u"Illegal character '%s'" % t.value[0])
| hydralabs/plasma | plasma/flex/messaging/selector/sql92lexer.py | sql92lexer.py | py | 1,574 | python | en | code | 1 | github-code | 90 |
15206158179 | # Given a number N find the sum of all the even valued terms in the fibonacci sequence less than or equal to N.
# Try generating only even fibonacci numbers instead of iterating over all Fibonacci numbers.
# Sample Input 1:
# 8
# Sample Output 1 :
# 10
# Sample Input 2:
# 400
# Sample Output 2:
# 188
n = int(input())
ans = 0
def getFib(n):
if n <=1 :
return n
return getFib(n-1) + getFib(n-2)
for i in range(1, n+1):
current_fib = getFib(i)
if current_fib >= n:
break
if current_fib % 2 == 0:
ans += current_fib
print(ans)
| farhan528/Coding-Problems | Problems/even_fibonacci_sum.py | even_fibonacci_sum.py | py | 572 | python | en | code | 0 | github-code | 90 |
807661572 | import random
import re
from .fuzz_utils import (
replace_random,
filter_candidates,
random_string,
num_tautology,
string_tautology,
num_contradiction,
string_contradiction,
)
def reset_inline_comments(payload: str):
positions = list(re.finditer(r"/\*[^(/\*|\*/)]*\*/", payload))
if not positions:
return payload
pos = random.choice(positions).span()
replacements = ["/**/"]
replacement = random.choice(replacements)
new_payload = payload[: pos[0]] + replacement + payload[pos[1]:]
return new_payload
def logical_invariant(payload):
pos = re.search("(#|-- )", payload)
if not pos:
# No comments found
return payload
pos = pos.start()
replacement = random.choice(
[
# AND True
" AND 1",
" AND True",
" AND " + num_tautology(),
" AND " + string_tautology(),
# OR False
" OR 0",
" OR False",
" OR " + num_contradiction(),
" OR " + string_contradiction(),
]
)
new_payload = payload[:pos] + replacement + payload[pos:]
return new_payload
def change_tautologies(payload):
results = list(re.finditer(r'((?<=[^\'"\d\wx])\d+(?=[^\'"\d\wx]))=\1', payload))
if not results:
return payload
candidate = random.choice(results)
replacements = [num_tautology(), string_tautology()]
replacement = random.choice(replacements)
new_payload = (
payload[: candidate.span()[0]] + replacement + payload[candidate.span()[1]:]
)
return new_payload
def spaces_to_comments(payload):
symbols = {
"[blank]": ["/**/", "%20", "+"],
"/**/": ["[blank]", "%20", "+"],
"%20": ["[blank]", "/**/", "+"],
"+": ["[blank]", "/**/", "%20"],
}
symbols_in_payload = filter_candidates(symbols, payload)
if not symbols_in_payload:
return payload
# Randomly choose symbol
candidate_symbol = random.choice(symbols_in_payload)
# Check for possible replacements
replacements = symbols[candidate_symbol]
# Choose one replacement randomly
candidate_replacement = random.choice(replacements)
# Apply mutation at one random occurrence in the payload
return replace_random(payload, candidate_symbol, candidate_replacement)
def spaces_to_whitespaces_alternatives(payload):
symbols = {
"%20": ["%2f", "%09", "%0A", "%0C", "%0D"],
"%2f": ["%20", "%09", "%0A", "%0C", "%0D"],
"%09": ["%2f", "%20", "%0A", "%0C", "%0D"],
"%0A": ["%2f", "%09", "%20", "%0C", "%0D"],
"%0C": ["%2f", "%09", "%0A", "%20", "%0D"],
"%0D": ["%2f", "%09", "%0A", "%0C", "%20"],
}
symbols_in_payload = filter_candidates(symbols, payload)
if not symbols_in_payload:
return payload
# Randomly choose symbol
candidate_symbol = random.choice(symbols_in_payload)
# Check for possible replacements
replacements = symbols[candidate_symbol]
# Choose one replacement randomly
candidate_replacement = random.choice(replacements)
# Apply mutation at one random occurrence in the payload
return replace_random(payload, candidate_symbol, candidate_replacement)
def random_case(payload):
new_payload = []
for c in payload:
if random.random() > 0.5:
c = c.swapcase()
new_payload.append(c)
return "".join(new_payload)
def comment_rewriting(payload):
p = random.random()
if p < 0.5 and ("#" in payload or "-- " in payload):
return payload + random_string(2)
elif p >= 0.5 and ("*/" in payload):
return replace_random(payload, "*/", random_string() + "*/")
else:
return payload
def swap_int_repr(payload):
candidates = list(re.finditer(r'(?<=[^\'"\d\wx])\d+(?=[^\'"\d\wx])', payload))
if not candidates:
return payload
candidate_pos = random.choice(candidates).span()
candidate = payload[candidate_pos[0]: candidate_pos[1]]
replacements = [
hex(int(candidate)),
"(SELECT {})".format(candidate),
# "({})".format(candidate),
]
replacement = random.choice(replacements)
return payload[: candidate_pos[0]] + replacement + payload[candidate_pos[1]:]
def swap_keywords(payload):
symbols = {
# OR
"||": ["or"],
"or": ["||"],
# AND
"&&": ["and"],
"and": ["&&"],
# Not equals
"<>": ["!=", " NOT LIKE "],
"!=": [" != ", "<>", " <> ", " NOT LIKE "],
# Equals
" = ": [" like "],
" like ": [" = "],
}
symbols_in_payload = filter_candidates(symbols, payload)
if not symbols_in_payload:
return payload
# Randomly choose symbol
candidate_symbol = random.choice(symbols_in_payload)
# Check for possible replacements
replacements = symbols[candidate_symbol]
# Choose one replacement randomly
candidate_replacement = random.choice(replacements)
# Apply mutation at one random occurrence in the payload
return replace_random(payload, candidate_symbol, candidate_replacement)
class Fuzzer(object):
strategies = [
spaces_to_comments,
random_case,
swap_keywords,
# swap_int_repr,
spaces_to_whitespaces_alternatives,
comment_rewriting,
change_tautologies,
# logical_invariant,
reset_inline_comments,
]
def __init__(self, payload):
self.initial_payload = payload
self.payload = payload
def fuzz(self):
strategy = random.choice(self.strategies)
self.payload = strategy(self.initial_payload)
return self.payload
def current(self):
return self.payload
def reset(self):
self.payload = self.initial_payload
return self.payload
| yangheng95/DaNuoYi | DaNuoYi/evolution/fuzzer.py | fuzzer.py | py | 5,900 | python | en | code | 5 | github-code | 90 |
18405646949 | from collections import deque
n = int(input())
e = {}
for i in range(n-1):
u,v,w = map(int,input().split())
if u not in e:
e[u] = [[v,w]]
else:
e[u].append([v,w])
if v not in e:
e[v] = [[u,w]]
else:
e[v].append([u,w])
que = deque()
que.append([1,0,0])
ans = [-1 for i in range(n)]
ans[0] = 0
no_one = True
flag = True
while True:
if len(que) == 0:
break
top = que.popleft()
st = top[0]
for edge in e[st]:
ecos = edge[1]
nex = edge[0]
if ans[nex-1] == -1:
cos0 = top[1]+ecos
cos1 = top[2]+ecos
if cos0 %2 == 0:
que.append([nex,0,cos1])
ans[nex-1] = 0
elif cos1 %2 == 0:
que.append([nex,cos0,0])
ans[nex-1] = 1
else:
if no_one:
que.append([nex, cos0, 0])
ans[nex - 1] = 1
else:
flag = False
break
if not flag:
break
# print(que)
for a in ans:
print(a)
# print(ans) | Aasthaengg/IBMdataset | Python_codes/p03044/s074170156.py | s074170156.py | py | 1,126 | python | en | code | 0 | github-code | 90 |
40271072537 | import pandas as pd
from rdkit.Chem import Descriptors
from rdkit import Chem
df_guts=pd.read_csv('../results/guts_smiles.csv', sep=";", encoding= 'unicode_escape')
df_drugs=pd.read_csv('../results/drugs_smiles.csv', sep=";", encoding= 'unicode_escape')
df_drugs['mol'] = df_drugs.canonical_smiles.apply(lambda x: Chem.MolFromSmiles(x))
df_drugs["tpsa"] = df_drugs.mol.apply(Descriptors.TPSA)
df_drugs["logp"] = df_drugs.mol.apply(Descriptors.MolLogP)
df_drugs["rb"] = df_drugs.mol.apply(Chem.Lipinski.NumRotatableBonds)
df_drugs["hbd"] = df_drugs.mol.apply(Chem.Lipinski.NumHDonors)
df_drugs["hba"] = df_drugs.mol.apply(Chem.Lipinski.NumHAcceptors)
df_drugs["mw"] = df_drugs.mol.apply(Descriptors.ExactMolWt)
df_drugs["qed"] = df_drugs.mol.apply(Descriptors.qed)
df_drugs["nring"] = df_drugs.mol.apply(Chem.Lipinski.RingCount)
df_drugs["naring"] = df_drugs.mol.apply(Chem.Lipinski.NumAromaticRings)
df_drugs["fsp3"] = df_drugs.mol.apply(Chem.Lipinski.FractionCSP3)
df_drugs["set"]="DB"
df_guts['mol'] = df_guts.canonical_smiles.apply(lambda x: Chem.MolFromSmiles(x))
df_guts["tpsa"] = df_guts.mol.apply(Descriptors.TPSA)
df_guts["logp"] = df_guts.mol.apply(Descriptors.MolLogP)
df_guts["rb"] = df_guts.mol.apply(Chem.Lipinski.NumRotatableBonds)
df_guts["hbd"] = df_guts.mol.apply(Chem.Lipinski.NumHDonors)
df_guts["hba"] = df_guts.mol.apply(Chem.Lipinski.NumHAcceptors)
df_guts["mw"] = df_guts.mol.apply(Descriptors.ExactMolWt)
df_guts["qed"] = df_guts.mol.apply(Descriptors.qed)
df_guts["nring"] = df_guts.mol.apply(Chem.Lipinski.RingCount)
df_guts["naring"] = df_guts.mol.apply(Chem.Lipinski.NumAromaticRings)
df_guts["fsp3"] = df_guts.mol.apply(Chem.Lipinski.FractionCSP3)
df_guts["set"]=df_guts.ccl.apply(lambda x: "G (GL)" if x=="Glycerolipids" else "G (NoGL)" )
df_full=pd.concat([df_drugs,df_guts]).drop_duplicates()
def df2sdf(fname, mblist, idlist, idfield, addid = True):
nmols = len(mblist)
f = open(fname,"w+")
for i in range(nmols):
if i > 0:
f.write("$$$$\n")
if addid is True:
f.write(f'\t{idlist[i]}\n{mblist[i][1:]}')
else:
f.write(mblist[i])
f.write(f'{idfield}\n')
f.write(f'{idlist[i]}\n\n')
f.write("$$$$\n")
f.close()
df2sdf("df_full.sdf", [Chem.MolToMolBlock(x) for x in df_full[df_full.rb<9].mol], df_full[df_full.rb<9].name.values.tolist(), '> <full_dataset >', addid = True) | corgazp/tfm-uoc | scripts/prepareSNF_files.py | prepareSNF_files.py | py | 2,405 | python | en | code | 0 | github-code | 90 |
33510139438 | import pinocchio as pin
import numpy as np
# Create model and data
model = pin.buildSampleModelHumanoidRandom()
data = model.createData()
# Set bounds (by default they are undefinded)
model.lowerPositionLimit = -np.matrix(np.ones((model.nq,1)))
model.upperPositionLimit = np.matrix(np.ones((model.nq,1)))
q = pin.randomConfiguration(model) # joint configuration
v = np.matrix(np.random.rand(model.nv,1)) # joint velocity
a = np.matrix(np.random.rand(model.nv,1)) # joint acceleration
# Evaluate all the terms required by the kinematics derivatives
pin.computeForwardKinematicsDerivatives(model,data,q,v,a)
# Evaluate the derivatives for a precise joint (e.g. rleg6_joint)
joint_name = "rleg6_joint"
joint_id = model.getJointId(joint_name)
# Derivatives of the spatial velocity with respect to the joint configuration and velocity vectors
(dv_dq,dv_dv) = pin.getJointVelocityDerivatives(model,data,joint_id,pin.ReferenceFrame.WORLD)
# or to get them in the LOCAL frame of the joint
(dv_dq_local,dv_dv_local) = pin.getJointVelocityDerivatives(model,data,joint_id,pin.ReferenceFrame.LOCAL)
# Derivatives of the spatial acceleration of the joint with respect to the joint configuration, velocity and acceleration vectors
(dv_dq,da_dq,da_dv,da_da) = pin.getJointAccelerationDerivatives(model,data,joint_id,pin.ReferenceFrame.WORLD)
# or to get them in the LOCAL frame of the joint
(dv_dq_local,da_dq_local,da_dv_local,da_da_local) = pin.getJointAccelerationDerivatives(model,data,joint_id,pin.ReferenceFrame.LOCAL)
| zhangOSK/pinocchio | examples/python/kinematics-derivatives.py | kinematics-derivatives.py | py | 1,523 | python | en | code | 0 | github-code | 90 |
8292115818 | from django.db.models.signals import post_save
from django.dispatch import receiver
from .models import Vendor, OpeningHours, Day
@receiver(post_save, sender=Vendor)
def post_save_user(sender, instance, created, **kwargs):
print("signals called for vendor")
try:
if created:
for day_choice in Day.choices:
day, day_display = day_choice
OpeningHours.objects.create(
restaurant=instance,
day=day,
opening_time='09:00:00',
closing_time='20:00:00'
)
print("Created Days timings")
# else:
# print("updating user instance")
# if instance.user:
# instance.user.first_name = instance.first_name
# instance.user.last_name = instance.last_name
# instance.user.save()
# print("Updated related user's first_name and last_name")
except Exception as e:
print(e) | neilravi7/tasteswift | vendor/signals.py | signals.py | py | 1,029 | python | en | code | 0 | github-code | 90 |
29425363678 | from django.shortcuts import redirect, render
from django.contrib import messages
from user.models import UserDetails
def profile(request):
if request.user.is_authenticated:
userd=UserDetails.objects.get(user=request.user)
return render(request,'dashboard/profile.html',{'userd':userd})
else:
messages.error(request,"Kindly Log In")
return redirect("/user/loginpage")
def dashboard(request):
if request.user.is_authenticated:
return render(request,'dashboard/dashboard.html')
else:
messages.error(request,"Kindly Log In")
return redirect("/user/loginpage")
def works(request):
if request.user.is_authenticated:
return render(request,'dashboard/works.html')
else:
messages.error(request,"Kindly Log In")
return redirect("/user/loginpage") | Aayush5sep/CollegeWorks | dashboard/views.py | views.py | py | 844 | python | en | code | 1 | github-code | 90 |
43663374963 | # Code for producing Figure 2 in the report.
import matplotlib.pyplot as plt
import numpy as np
import matplotlib
font = {'family' : 'normal',
'weight' : 'bold',
'size' : 22}
matplotlib.rc('font', **font)
samples = 100000
with open("data/probs_different_its.txt") as f:
lines = f.readlines()
values = np.zeros((2, len(lines)))
for i, line in enumerate(lines):
line = line.replace('\n', '')
x = line.split(",")
values[0][i] = float(x[0])
values[1][i] = float(x[1])/samples * 6
deriv = np.diff(values[1])
# Create left and right axes objects
fig, axl = plt.subplots(figsize=(16,12))
axr = axl.twinx()
axl.set_xlabel('Iterations')
# Create left side line plot, color accordingly
color = "blue"
axl.plot(values[0], values[1], color=color)
axl.tick_params(axis="y", color=color, labelcolor=color)
axl.set_ylabel('Mandelbrot area')
# Create right side line plot, color accordingly
color = "red"
axr.plot(values[0][7:], deriv[6:],'-.', color=color)
axr.tick_params(axis="y", color=color, labelcolor=color)
axr.set_ylabel(r'$\frac{dA_{i,n}}{di}$')
plt.grid()
plt.show()
| Snoeprol/stochastic_simulations | Assignment_code/iterations_derivative.py | iterations_derivative.py | py | 1,198 | python | en | code | 0 | github-code | 90 |
30382898848 | from json import dumps as json_dumps
from requests import get as requests_get, put as requests_put, post as requests_post
from lib.helper import ssdp_discovery, hex_to_hue, portup
# ---------------------------
# HUE Handling
# ---------------------------
class Hue:
def __init__(self, settings):
self.settings = settings
self.enabled = self.settings.HueEnabled
self.active = False
self.ip = self.settings.HueIP
self.token = self.settings.HueToken
self.lights = {}
if not self.enabled:
return
if self.settings.os == "win":
from msvcrt import getch
elif self.settings.os == "lx":
from getch import getch
self.settings.log("Initializing Philips HUE...")
#
# IP Not set
#
if not self.ip or not portup(self.ip, 80):
self.ip = self.detect_hue()
settings.HueIP = self.ip
#
# Token not set
#
url = "http://{}:80/api/{}".format(self.ip, self.token)
result = requests_get(
url, data=json_dumps({"devicetype": "wooferbot"}), timeout=5
)
output_json = result.json()
if result.status_code != 200 or len(output_json) == 0:
self.settings.log("Philips HUE Bridge did not responding correctly")
return
if (
isinstance(output_json, list)
and "error" in output_json[0]
and "description" in output_json[0]["error"]
and (
output_json[0]["error"]["description"] == "unauthorized user"
or output_json[0]["error"]["description"]
== "method, GET, not available for resource, /"
)
):
while not self.auth():
self.settings.log("Press C to cancel or any key to try again")
if self.settings.os == "win":
input_char = getch().decode("utf-8").upper()
elif self.settings.os == "lx":
input_char = getch().upper()
if input_char == "C":
return
settings.HueToken = self.token
url = "http://{}:80/api/{}".format(self.ip, self.token)
result = requests_get(
url, data=json_dumps({"devicetype": "wooferbot"}), timeout=5
)
output_json = result.json()
if (
result.status_code == 200
and "config" in output_json
and "bridgeid" in output_json["config"]
and len(output_json["config"]["bridgeid"]) > 2
):
self.detect_lights()
self.active = True
self.check_mappings()
# ---------------------------
# check_mappings
# ---------------------------
def check_mappings(self):
# Check if hue is active
if not self.active:
return
for action in self.settings.PoseMapping:
if "Hue" in self.settings.PoseMapping[action]:
for light in self.settings.PoseMapping[action]["Hue"]:
if light not in self.lights:
self.settings.log(
'Error: Hue light "{}" defined in PoseMapping "{}" has not been detected.'.format(
light, action
)
)
# ---------------------------
# state
# ---------------------------
def state(self, device, col="", bri=100):
# Check if hue is active
if not self.active:
return
# Check if light has been detected on startup
if device not in self.lights:
self.settings.log(
'Philips HUE Device "{}" does not detected'.format(device)
)
return
data = {}
if col:
# Turn hue light on
data["on"] = True
tmp = hex_to_hue(col)
data["hue"] = tmp[0]
data["sat"] = tmp[1]
else:
# Turn hue light off
data["on"] = False
if "bri" in data:
data["bri"] = round(bri * 2.54)
# Send API request to Hue Bridge
url = "http://{}:80/api/{}/lights/{}/state".format(
self.ip, self.token, str(self.lights[device])
)
requests_put(url, data=json_dumps(data), timeout=5)
# ---------------------------
# detect_lights
# ---------------------------
def detect_lights(self):
url = "http://{}:80/api/{}/lights".format(self.ip, self.token)
result = requests_get(url, timeout=5)
if result.status_code == 200:
output_json = result.json()
i = -1
for items in output_json:
i = i + 1
if "error" in items and output_json[i]["error"]["type"] == 1:
self.settings.log("Philips HUE: Unauthorized user")
return False
if not output_json[items]["state"]["reachable"]:
continue
if len(output_json[items]["name"]) > 0:
self.lights[output_json[items]["name"]] = items
# ---------------------------
# auth
# ---------------------------
def auth(self):
self.settings.log("Registering HueBridge...")
# Send API request
data = {"devicetype": "wooferbot"}
url = "http://{}:80/api".format(self.ip)
result = requests_post(url, data=json_dumps(data), timeout=5)
if result.status_code == 200:
output_json = result.json()
i = -1
for items in output_json:
i = i + 1
# Authorization requires hardware confirmation
if "error" in items:
error_type = output_json[i]["error"]["type"]
if error_type == 101:
self.settings.log("Error: Press link button and try again")
return False
# Authorization successful
if "success" in items:
self.token = output_json[i]["success"]["username"]
self.settings.log("Authorized successfully")
return True
# General error
self.settings.log("Error connecting")
return False
# ---------------------------
# detect_hue
# ---------------------------
def detect_hue(self):
if self.settings.os == "win":
from msvcrt import getch
elif self.settings.os == "lx":
from getch import getch
ip_list = []
discovery_time = 5
while len(ip_list) == 0:
self.settings.log("Starting Hue Bridge discovery.")
ip_list = ssdp_discovery(
searchstr="ipbridge", discovery_time=discovery_time
)
if len(ip_list) == 0:
self.settings.log("Philips HUE Bridge not found")
self.settings.log("Press C to cancel or any key to scan again")
if self.settings.os == "win":
input_char = getch().decode("utf-8").upper()
elif self.settings.os == "lx":
input_char = getch().upper()
if discovery_time < 20:
discovery_time = discovery_time + 5
if input_char == "C":
return
return ip_list[0]
| tomaae/WooferBot | src/lib/hue.py | hue.py | py | 7,516 | python | en | code | 6 | github-code | 90 |
3849032062 | import os
import time
import traceback
import json
import decimal
import boto3
import twint
from boto3.dynamodb.conditions import Key, Attr
from botocore.exceptions import ClientError
from scrapelog import ScrapeLog
logger = ScrapeLog()
# Helper class to convert a DynamoDB item to JSON.
class DecimalEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, decimal.Decimal):
if o % 1 > 0:
return float(o)
else:
return int(o)
return super(DecimalEncoder, self).default(o)
class ScrapeBot(object):
def __init__(self, handle=None, i=None):
self.handle = handle
self.filename = None
self.i = i
# logger.info("Scraping for handle: {}".format(self.handle))
def __get_users(self):
dynamodb = boto3.resource("dynamodb", region_name='us-east-2')
users = []
table = dynamodb.Table('person')
try:
response = table.scan(
FilterExpression=Attr('is_scraped').eq(0)
)
except ClientError as e:
logger.warn(e.response['Error']['Message'])
else:
users = response['Items']
print("GetItem succeeded:")
# users = json.dumps(item, indent=4, cls=DecimalEncoder)
users = [user['handle'] for user in users]
return users
def __lookup(self, handle):
user_config = twint.Config()
user_config.Username = handle
# self.user_config.Format = "ID {id} | Name {name} | Bio {bio} | Location {location} | Join Date {join_date}"
user_config.Store_object = True
user_config.User_full = True
twint.run.Lookup(user_config)
return twint.output.users_list
def __scrape_tweets(self, handle):
tweet_config = twint.Config()
tweet_config.Username = handle
tweet_config.Limit = 120
tweet_config.Store_object = True
twint.run.Search(tweet_config)
return twint.output.tweets_list
def add_user_dynamo(self,
name,
date_joined,
handle,
location,
website,
bio,
tweets,
user_id
):
# logger.info("Adding user to DynamoDB...")
dynamodb = boto3.resource("dynamodb", region_name='us-east-2')
table = dynamodb.Table('person')
response = table.put_item(
Item={
'handle': handle,
'userid': user_id,
'is_scraped': 1,
'twitter_name': name,
'date_joined': date_joined,
'twitter_location': location,
'website': website,
'bio': bio,
'tweets': tweets
}
)
print("User Added to Dynamo DB:")
print(json.dumps(response, indent=4, cls=DecimalEncoder))
def add_tweet(self, tweets):
logger.info("Adding tweets to S3...")
s3 = boto3.resource('s3')
t = time.localtime()
current_time = time.strftime("%H:%M:%S", t)
self.filename = "{}_tweets_{}.txt".format(self.handle, current_time)
tweet_file = s3.Object(os.environ.get('BUCKET_NAME'), self.filename)
tweet_file.put(Body=tweets)
def mark_as_scraped_dynamo(self, handle):
dynamodb = boto3.resource('dynamodb', region_name='us-east-2')
table = dynamodb.Table('person')
response = table.update_item(
Key={
'handle': handle,
},
UpdateExpression="set is_scraped= :s, tweets=:t",
ExpressionAttributeValues={
':s': 1,
':t': self.filename,
},
ReturnValues="UPDATED_NEW"
)
print("User updated:")
print(json.dumps(response, indent=4, cls=DecimalEncoder))
def run(self):
logger.info("User lookup: {}".format(self.i))
user = self.__lookup(self.handle)
print(user)
user = user[self.i]
print(user.name)
logger.info("Tweets lookup")
tweets = self.__scrape_tweets(self.handle)
tweets = [tweet.tweet for tweet in tweets]
tweets = ' '.join(tweets)
self.add_tweet(tweets)
self.add_user_dynamo(
handle=self.handle,
user_id=user.id if user.id else 'empty',
bio=user.bio if user.bio else 'empty',
date_joined=user.join_date if user.join_date else 'empty',
location=user.location if user.location else 'empty',
name=user.name if user.name else 'empty',
tweets=self.filename,
website=user.url if user.url else 'empty'
)
def run2(self):
users = self.__get_users()
logger.info("Length of Users: {}".format(len(users)))
for user in users:
self.handle = user
logger.info("User lookup: {}".format(user))
logger.info("Tweets lookup")
tweets = self.__scrape_tweets(user)
tweets = [tweet.tweet for tweet in tweets]
tweets = ' '.join(tweets)
self.add_tweet(tweets)
self.mark_as_scraped_dynamo(user)
class TestSelenium1:
def test_scrape(self):
with open('demola_followers.txt', 'r') as file:
data = file.readlines()
contents = [x.strip() for x in data]
i=0
for content in contents:
try:
ScrapeBot(handle=content, i=i).run()
i += 1
except (Exception, IndexError) as e:
logger.warn(e)
time.sleep(3)
# ScrapeBot(handle=content[i], i=i).run()
def scrape2(self):
try:
ScrapeBot().run2()
except (Exception, IndexError) as e:
logger.warn(e)
time.sleep(3)
if __name__ == '__main__':
logger.info("Starting Update Scraper in Parallel")
TestSelenium1().scrape2()
| enoreese/project_scrape | tests/single_scrape.py | single_scrape.py | py | 6,127 | python | en | code | 0 | github-code | 90 |
19862714954 | from functools import wraps
from queue import Queue
import numpy as np
from copy import copy, deepcopy
from queue import Queue
def bfs(all_sons):
def bfs_(fun):
@wraps(fun)
def decorated(executor):
queue = Queue()
visited = set()
root = executor.graph.nodes[0]
queue.put(root)
visited.add(root)
info = {}
while not queue.empty():
current_node = queue.get()
success = fun(executor, current_node, visited=visited, info=info)
if not success:
visited.remove(current_node)
else:
if all_sons:
next_nodes = current_node.sons
else:
next_nodes = current_node.next_nodes()
for node in next_nodes:
if node not in visited:
queue.put(node)
visited.add(node)
return decorated
return bfs_
def check_buffer(buffer, batch_size, bigger_than_buffer=True):
# if all data in the buffer is bigger than batch_size, then return true
for data in buffer:
if bigger_than_buffer:
if data.shape[0] < batch_size:
return True
else:
if data.shape[0] > batch_size:
return True
return False
| GIS-PuppetMaster/DB4AI | utils.py | utils.py | py | 1,433 | python | en | code | 0 | github-code | 90 |
35410742875 | #_*_coding:utf-8_*_
from django.conf.urls import url
from .views import *
urlpatterns = [
url(r"^login",Logins.as_view()),
url(r"^register$", register),
url(r"^db_movie$", db_movie),
# url(r"^index", index),
] | zzdn/douban_project | apps/users/urls.py | urls.py | py | 237 | python | en | code | 0 | github-code | 90 |
72365619177 | from ast import iter_fields
import numpy
# 전프레임의 공과의 거리비교
def diff_xy(coords):
coords = coords.copy()
diff_list = []
for i in range(0, len(coords)-1):
if coords[i] is not None and coords[i+1] is not None:
point1 = coords[i]
point2 = coords[i+1]
diff = [abs(point2[0] - point1[0]), abs(point2[1] - point1[1])]
diff_list.append(diff)
else:
diff_list.append(None)
xx, yy = numpy.array([x[0] if x is not None else numpy.nan for x in diff_list]), numpy.array(
[x[1] if x is not None else numpy.nan for x in diff_list])
return xx, yy
# 이상값 제거
def remove_outliers(x, y, coords):
ids = set(numpy.where(x > 50)[0]) & set(numpy.where(y > 50)[0])
for id in ids:
left, middle, right = coords[id-1], coords[id], coords[id+1]
if left is None:
left = [0]
if right is None:
right = [0]
if middle is None:
middle = [0]
MAX = max(map(list, (left, middle, right)))
if MAX == [0]:
pass
else:
try:
coords[coords.index(tuple(MAX))] = None
except ValueError:
coords[coords.index(MAX)] = None
def nan_helper(y):
"""Helper to handle indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return numpy.isnan(y), lambda z: z.nonzero()[0]
# 보간법
def interpolation(coords):
coords = coords.copy()
x, y = [x[0] if x is not None else numpy.nan for x in coords], [
x[1] if x is not None else numpy.nan for x in coords]
xxx = numpy.array(x) # x coords
yyy = numpy.array(y) # y coords
nons, yy = nan_helper(xxx)
xxx[nons] = numpy.interp(yy(nons), yy(~nons), xxx[~nons])
nans, xx = nan_helper(yyy)
yyy[nans] = numpy.interp(xx(nans), xx(~nans), yyy[~nans])
newCoords = [*zip(xxx, yyy)]
return newCoords
| kpuce2022CD/Pierrot | analysis_application/Functions/bounce.py | bounce.py | py | 2,348 | python | en | code | 2 | github-code | 90 |
34448868738 | class Settings:
# 存储游戏的设置类
def __init__(self):
'''初始化游戏设置,屏幕设置'''
self.screen_width = 1200
self.screen_height = 600
self.bg_color = (230, 230, 230)
self.ship_speed = 0.5
self.ship_limit = 3
self.bullet_speed = 1.5
self.bullet_width = 3
self.bullet_height = 15
self.bullet_color = (60, 60, 60)
self.bullet_allowed = 3
self.alien_speed = 0.5
self.fleet_drop_speed = 5
# fleet.direction 为1表示向右移,为-1表示向左移
self.fleet_direction = 1
# 子弹设置
| lijikun123/plane_game | setting.py | setting.py | py | 642 | python | en | code | 0 | github-code | 90 |
40562609281 | #!/usr/lib/python3
import requests
import json
import yaml
def LineFilter(workdirectory,AuthToken,tenantid,portal,starttimenanosec,endtimenanosec) :
reportfile = open(workdirectory + "/Report.yml")
parsedreportfile = yaml.load(reportfile,Loader=yaml.FullLoader)
payload={}
headers = {
'Authorization': AuthToken,
'Content-Type': 'application/json'
}
logsurl = "https://"\
+portal+\
"/logsrql/api/v7/tenants/"\
+tenantid+\
'/logs?query={source="agent"}|="connectivity"&limit=51&start='\
+str(starttimenanosec)+\
"&end="\
+str(endtimenanosec)
message= ''
log_response = requests.request("GET", logsurl, headers=headers, data=payload)
if log_response.status_code == 200:
logsresponsejson = log_response.json()
logsdata = logsresponsejson['data']
logsresultdata = logsdata['result']
for p in logsresultdata:
val = (p['values'])
for k in val:
jsondata = json.loads(k[1])
message = jsondata['message']
if "connectivity" not in message:
status = "Validation Fail - Line Filter Functionality is not Working Properly"
parsedreportfile['LineFilter_Functionality'] = status
else:
status = "Validation Pass - Line Filter Functionality is Working Properly"
parsedreportfile['LineFilter_Functionality'] = status
else:
status = log_response.reason
parsedreportfile['LineFilter_Functionality'] = status
with open(workdirectory + "/Report.yml","w") as file :
yaml.dump(parsedreportfile,file)
| UditOpsramp/UATPipeline | TestCases/LineFilter.py | LineFilter.py | py | 1,762 | python | en | code | 0 | github-code | 90 |
13942237464 | import win32com.client
import json
import base64
metadata_filename = "metadata.json"
class UserSystem:
def __init__(self):
with open(metadata_filename) as f:
try:
self.j = json.load(f)
self.user_information = self.j[2]
except:
self.j = [{},{}, {}]
self.user_information = {}
self.key = "database"
pass
def save_user_keys(self, user_name, key):
user_name = self.encrypt(self.key, user_name)
key = self.encrypt(self.key, key)
self.user_information[user_name] = key
self.j[2] = self.user_information
with open(metadata_filename, "w") as f:
json.dump(self.j, f)
def check_username(self, user_name):
user_name = self.encrypt(self.key,user_name)
if user_name in self.user_information:
return True
else:
return False
def check_key(self, user_name, key):
user_name = self.encrypt(self.key, user_name)
key = self.encrypt(self.key, key)
if self.user_information[user_name] == key:
return True
else:
return False
def encrypt(self, key, content): # key:密钥,content:明文
return str(base64.encodestring(content.encode()) )
def decrypt(self, key, content): # key:密钥,content:密文
EncryptedData = win32com.client.Dispatch('CAPICOM.EncryptedData')
EncryptedData.Algorithm.KeyLength = 5
EncryptedData.Algorithm.Name = 2
EncryptedData.SetSecret(key)
EncryptedData.Decrypt(content)
str = EncryptedData.Content
return str
if __name__ == "__main__":
user_system = UserSystem()
user_system.save_user_keys("ys", "123456")
user_system.save_user_keys("database", "111111") | sunyiwei24601/MiniSql | SystemManagement/System_Login.py | System_Login.py | py | 1,882 | python | en | code | 0 | github-code | 90 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.