seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
17669251690 | #Trick is to first cyclic sort without caring for anything outside 1 to n. Then start adding missing values in result
#and while doing this keep track of all ignored values since k might not be over till len(nums) so the ignored values might
#be those nums. Then add additional values outside len till len(reult) not less than k and make sure not to insert the ignored values.
#TC = O(N) + O(N) + O(K) => O(N+K) and O(K)
def find_first_k_missing_positive(nums, k):
i = 0
result= []
ignored = {}
while i < len(nums):
j = nums[i] -1
if nums[i] >= 0 and nums[i] <= len(nums) and nums[i] != nums[j]:
nums[i], nums[j] = nums[j], nums[i]
else:
i+=1
print("sorted: ",nums)
for i in range(len(nums)):
if len(result) < k:
if nums[i] != i+1:
result.append(i+1)
ignored[nums[i]] = True
i=1
while len(result) < k:
candidate_num = i + len(nums)
if candidate_num not in ignored:
result.append(candidate_num)
i+=1
return result
def main():
print(find_first_k_missing_positive([3, -1, 4, 5, 5], 3))
print(find_first_k_missing_positive([2, 3, 4], 3))
print(find_first_k_missing_positive([-2, -3, 4], 2))
main() | SharmaManjul/DS-Algo | LeetCode/Hard/grok_findKthMissingPositiveNum.py | grok_findKthMissingPositiveNum.py | py | 1,284 | python | en | code | 0 | github-code | 13 |
27282657908 | import pygame
import sys
from pygame.locals import *
pygame.init()
size = width, height = 640, 480
bg = (255, 255, 255)
clock = pygame.time.Clock()
screen = pygame.display.set_mode(size)
pygame.display.set_caption("FishC Demo")
oturtle = pygame.image.load("turtle.png")
turtle = pygame.transform.chop(oturtle, (207, 200, 50, 50))
# turtle = oturtle
position = turtle.get_rect()
position.center = width // 2, height // 2
while True:
for event in pygame.event.get():
if event.type == QUIT:
sys.exit()
screen.fill(bg)
screen.blit(turtle, position)
pygame.draw.rect(screen, (0, 0, 0), position, 1)
pygame.display.flip()
clock.tick(30)
| DodgeV/learning-programming | books/python/零基础入门学习Python(小甲鱼)全套源码课件/082Pygame:提高游戏的颜值2(源代码)/课堂演示/py_1.py | py_1.py | py | 722 | python | en | code | 3 | github-code | 13 |
321264235 | import matplotlib.pyplot as plt
import pandas as pd
import pylab as pl
import numpy as np
# read data
df = pd.read_csv("winequality-red.csv")
print(df.head(9))
cdf = df[['fixed_acidity','volatile_acidity','citric_acid','residual_sugar','chlorides','density','pH','alcohol','quality']]
print(cdf.head(9))
plt.scatter(cdf.alcohol, cdf.pH, color='blue')
plt.xlabel("alcohol")
plt.ylabel("pH")
plt.show()
"""
Creating train and test dataset
Train/Test Split involves splitting the dataset into training and testing sets respectively, which are mutually exclusive.
After which, you train with the training set and test with the testing set.
"""
msk = np.random.rand(len(df)) < 0.8
train = cdf[msk]
test = cdf[~msk]
"""Polynomial regression
Sometimes, the trend of data is not really linear, and looks curvy. In this case we can use Polynomial regression methods.
In fact, many different regressions exist that can be used to fit whatever the dataset looks like, such as quadratic, cubic, and
so on, and it can go on and on to infinite degrees.
In essence, we can call all of these, polynomial regression, where the relationship between the independent variable x and
the dependent variable y is modeled as an nth degree polynomial in x. Lets say you want to have a polynomial regression
(let's make 2 degree polynomial):
�=�+�1�+�2�2
Now, the question is: how we can fit our data on this equation while we have only x values, such as Engine Size? Well,
we can create a few additional features: 1, � , and �2 .
PloynomialFeatures() function in Scikit-learn library, drives a new feature sets from the original feature set. That is, a
matrix will be generated consisting of all polynomial combinations of the features with degree less than or equal to the
specified degree. For example, lets say the original feature set has only one feature, ENGINESIZE. Now, if we select the
degree of the polynomial to be 2, then it generates 3 features, degree=0, degree=1 and degree=2:
"""
from sklearn.preprocessing import PolynomialFeatures
from sklearn import linear_model
train_x = np.asanyarray(train[['alcohol']])
train_y = np.asanyarray(train[['pH']])
test_x = np.asanyarray(test[['alcohol']])
test_y = np.asanyarray(test[['pH']])
poly = PolynomialFeatures(degree=2)
train_x_poly = poly.fit_transform(train_x)
print(train_x_poly)
"""
fit_transform takes our x values, and output a list of our data raised from power of 0 to power of 2
(since we set the degree of our polynomial to 2).
�1�2⋮�� ⟶ [1[1⋮[1�1�2⋮���21]�22]⋮�2�]
in our example
2.2.41.5⋮ ⟶ [1[1[1⋮2.2.41.5⋮4.]5.76]2.25]⋮
It looks like feature sets for multiple linear regression analysis, right? Yes. It Does. Indeed, Polynomial regression is a
special case of linear regression, with the main idea of how do you select your features. Just consider replacing the �
with �1 , �21 with �2 , and so on. Then the degree 2 equation would be turn into:
�=�+�1�1+�2�2
Now, we can deal with it as 'linear regression' problem. Therefore, this polynomial regression is considered to be a special
case of traditional multiple linear regression. So, you can use the same mechanism as linear regression to solve such a problems.
so we can use LinearRegression() function to solve it:
"""
clf = linear_model.LinearRegression()
train_y_ = clf.fit(train_x_poly, train_y)
# The coefficients
print ('Coefficients: ', clf.coef_)
print ('Intercept: ',clf.intercept_)
"""
As mentioned before, Coefficient and Intercept , are the parameters of the fit curvy line. Given that it is a typical
multiple linear regression, with 3 parameters, and knowing that the parameters are the intercept and coefficients of
hyperplane, sklearn has estimated them from our new set of feature sets. Lets plot it:
"""
plt.scatter(train.alcohol, train.pH, color='blue')
XX = np.arange(0.0, 10.0, 0.1)
yy = clf.intercept_[0]+ clf.coef_[0][1]*XX+ clf.coef_[0][2]*np.power(XX, 2)
plt.plot(XX, yy, '-r' )
plt.xlabel("Alcohol")
plt.ylabel("pH")
plt.show()
""" Evaluation """
from sklearn.metrics import r2_score
test_x_poly = poly.fit_transform(test_x)
test_y_ = clf.predict(test_x_poly)
print("Mean absolute error: %.2f" % np.mean(np.absolute(test_y_ - test_y)))
print("Residual sum of squares (MSE): %.2f" % np.mean((test_y_ - test_y) ** 2))
print("R2-score: %.2f" % r2_score(test_y_ , test_y) )
| spider2510/Regression | Regression/polynomial.py | polynomial.py | py | 4,750 | python | en | code | 0 | github-code | 13 |
10273634689 | # Importamos las clases y funciones del archivo protocolo.py
from protocolo import *
# Función del emisor
def sender(socketio,____error,___secuencia):
___buffer = from_network_layer() # Obtener algo para enviar desde la capa de red
s = Frame() # Crear un objeto frame
s.info = ___buffer # Copiamos el paquete en s para transmisión
s.kind = FrameKind.DATA # Set the frame kind as data
to_physical_layer(s,socketio,"A") # Enviamos el frame a la capa física/
# Función del receptor
def receiver(socketio,error,___secuencia):
event = wait_for_event(error,"utopia") # Esperamos un evento, la única posibilidad es la llegada de un frame
if event == EventType.FRAME_ARRIVAL: # Si ha llegado un frame
r = from_physical_layer(socketio,"B") # Obtenemos el frame de la capa física
to_network_layer(r.info,socketio,"B") # Enviamos la información del frame a la capa de red
| johanec/Proyecto1-Redes | backend/utopia.py | utopia.py | py | 924 | python | es | code | 0 | github-code | 13 |
4956513333 | from http import HTTPStatus
class CityNotFoundError(Exception):
def __init__(
self,
expected_type: dict = {},
received_type: dict = {},
message: str = "",
status_code: int = HTTPStatus.NOT_FOUND,
*args,
**kwargs
):
super().__init__(args, kwargs)
if message:
self.message = message
else:
self.message = {
"error": "city not found",
"expected_type": expected_type,
"received_type": received_type,
}
self.status_code = status_code
class CityOutOfRangeError(Exception):
def __init__(
self,
cities_coverage: dict = {},
received_city: dict = {},
message: str = "",
status_code: int = HTTPStatus.BAD_REQUEST,
*args,
**kwargs
):
super().__init__(args, kwargs)
if message:
self.message = message
else:
self.message = {
"error": "city out of coverage",
"received_city": received_city,
"cities_coverage": cities_coverage,
}
self.status_code = status_code
class ZipCodeNotFoundError(Exception):
def __init__(
self,
message: str = "",
status_code: int = HTTPStatus.NOT_FOUND,
*args,
**kwargs
):
super().__init__(args, kwargs)
if message:
self.message = message
else:
self.message = {
"error": "zip code not found",
}
self.status_code = status_code
class InvalidZipCodeFormatError(Exception):
def __init__(
self,
zip_code: str,
message: str = "",
status_code: int = HTTPStatus.BAD_REQUEST,
*args,
**kwargs
):
super().__init__(args, kwargs)
if message:
self.message = message
else:
self.message = {
"error": "invalid zip code format",
"expected_type": "99999-999",
"received_type": zip_code,
}
self.status_code = status_code
| penguinuux/rain-forecast-risk-alert-api | app/exceptions/city_exc.py | city_exc.py | py | 2,186 | python | en | code | 3 | github-code | 13 |
86362336422 | import numpy as np
import tensorflow as tf
import nrrd
import os
import matplotlib.pyplot as plt
input_data_path = os.path.join('test_data', 'test_output_data.nrrd')
# Load the volume
input_data, input_data_header = nrrd.read(input_data_path) # XYZ
# Normalize the data -> Values between 0 and 1
scaled_data = (input_data - np.min(input_data))/(np.max(input_data)-np.min(input_data))
scaled_data = scaled_data.astype('float')
# Scale the data -> Values between 0 and 65535
scaled_data = scaled_data*65535
scaled_data = scaled_data.astype('uint16')
###############################################################################
# Generate patches
###############################################################################
# Undo the scaling
output_data = scaled_data.astype('float')
output_data = scaled_data/65535
# Undo the normalization
output_data = output_data.astype('float64')
output_data = output_data*(np.max(input_data)-np.min(input_data))+np.min(input_data)
# Compare the sum
np.sum(input_data)
np.sum(output_data)
| ChrisE087/3D_cell_counting | 00-playground/scaling_target_data.py | scaling_target_data.py | py | 1,040 | python | en | code | 0 | github-code | 13 |
37154810064 | from __future__ import print_function, absolute_import, division
import pytest
from distutils.version import LooseVersion
from astropy import units as u
from astropy import wcs
import numpy as np
from . import path
from .helpers import assert_allclose, assert_array_equal
from .test_spectral_cube import cube_and_raw
from ..spectral_axis import doppler_gamma, doppler_beta, doppler_z, get_rest_value_from_wcs
try:
import regions
regionsOK = True
REGIONS_GT_03 = LooseVersion(regions.__version__) >= LooseVersion('0.3')
except ImportError:
regionsOK = REGIONS_GT_03 = False
try:
import scipy
scipyOK = True
except ImportError:
scipyOK = False
def test_subcube():
cube, data = cube_and_raw('advs.fits')
sc1 = cube.subcube(xlo=1, xhi=3)
sc2 = cube.subcube(xlo=24.06269*u.deg, xhi=24.06206*u.deg)
sc2b = cube.subcube(xlo=24.06206*u.deg, xhi=24.06269*u.deg)
assert sc1.shape == (2,3,2)
assert sc2.shape == (2,3,2)
assert sc2b.shape == (2,3,2)
assert sc1.wcs.wcs.compare(sc2.wcs.wcs)
assert sc1.wcs.wcs.compare(sc2b.wcs.wcs)
sc3 = cube.subcube(ylo=1, yhi=3)
sc4 = cube.subcube(ylo=29.93464 * u.deg,
yhi=29.93522 * u.deg)
assert sc3.shape == (2, 2, 4)
assert sc4.shape == (2, 2, 4)
assert sc3.wcs.wcs.compare(sc4.wcs.wcs)
sc5 = cube.subcube()
assert sc5.shape == cube.shape
assert sc5.wcs.wcs.compare(cube.wcs.wcs)
assert np.all(sc5._data == cube._data)
@pytest.mark.skipif('not scipyOK', reason='Could not import scipy')
@pytest.mark.skipif('not regionsOK', reason='Could not import regions')
@pytest.mark.skipif('not REGIONS_GT_03', reason='regions version should be >= 0.3')
@pytest.mark.parametrize('regfile',
('255-fk5.reg', '255-pixel.reg'),
)
def test_ds9region_255(regfile):
# specific test for correctness
cube, data = cube_and_raw('255.fits')
shapelist = regions.read_ds9(path(regfile))
subcube = cube.subcube_from_regions(shapelist)
assert_array_equal(subcube[0, :, :].value,
np.array([11, 12, 16, 17]).reshape((2, 2)))
@pytest.mark.skipif('not scipyOK', reason='Could not import scipy')
@pytest.mark.skipif('not regionsOK', reason='Could not import regions')
@pytest.mark.skipif('not REGIONS_GT_03', reason='regions version should be >= 0.3')
@pytest.mark.parametrize(('regfile', 'result'),
(('fk5.reg', [slice(None), 1, 1]),
('image.reg', [slice(None), 1, slice(None)]),
(
'partial_overlap_image.reg', [slice(None), 1, 1]),
('no_overlap_image.reg', ValueError),
('partial_overlap_fk5.reg', [slice(None), 1, 1]),
('no_overlap_fk5.reg', ValueError),
))
def test_ds9region_new(regfile, result):
cube, data = cube_and_raw('adv.fits')
regionlist = regions.read_ds9(path(regfile))
if isinstance(result, type) and issubclass(result, Exception):
with pytest.raises(result) as exc:
sc = cube.subcube_from_regions(regionlist)
# this assertion is redundant, I think...
assert exc.errisinstance(result)
else:
sc = cube.subcube_from_regions(regionlist)
scsum = sc.sum()
dsum = data[result].sum()
assert_allclose(scsum, dsum)
#region = 'fk5\ncircle(29.9346557, 24.0623827, 0.11111)'
#subcube = cube.subcube_from_ds9region(region)
# THIS TEST FAILS!
# I think the coordinate transformation in ds9 is wrong;
# it uses kapteyn?
#region = 'circle(2,2,2)'
#subcube = cube.subcube_from_ds9region(region)
@pytest.mark.skipif('not scipyOK', reason='Could not import scipy')
@pytest.mark.skipif('not regionsOK', reason='Could not import regions')
@pytest.mark.skipif('not REGIONS_GT_03', reason='regions version should be >= 0.3')
def test_regions_spectral():
cube, data = cube_and_raw('adv.fits')
rf_cube = get_rest_value_from_wcs(cube.wcs).to("GHz",
equivalencies=u.spectral())
# content of image.reg
regpix = regions.RectanglePixelRegion(regions.PixCoord(0.5, 1), width=4, height=2)
# Velocity range in doppler_optical same as that of the cube.
vel_range_optical = u.Quantity([-318 * u.km/u.s, -320 * u.km/u.s])
regpix.meta['range'] = list(vel_range_optical)
sc1 = cube.subcube_from_regions([regpix])
scsum1 = sc1.sum()
freq_range = vel_range_optical.to("GHz",
equivalencies=u.doppler_optical(rf_cube))
regpix.meta['range'] = list(freq_range)
sc2 = cube.subcube_from_regions([regpix])
scsum2 = sc2.sum()
regpix.meta['restfreq'] = rf_cube
vel_range_gamma = freq_range.to("km/s", equivalencies=doppler_gamma(rf_cube))
regpix.meta['range'] = list(vel_range_gamma)
regpix.meta['veltype'] = 'GAMMA'
sc3 = cube.subcube_from_regions([regpix])
scsum3 = sc3.sum()
vel_range_beta = freq_range.to("km/s",
equivalencies=doppler_beta(rf_cube))
regpix.meta['range'] = list(vel_range_beta)
regpix.meta['veltype'] = 'BETA'
sc4 = cube.subcube_from_regions([regpix])
scsum4 = sc4.sum()
vel_range_z = freq_range.to("km/s",
equivalencies=doppler_z(rf_cube))
regpix.meta['range'] = list(vel_range_z)
regpix.meta['veltype'] = 'Z'
sc5 = cube.subcube_from_regions([regpix])
scsum5 = sc5.sum()
dsum = data[1:-1, 1, :].sum()
assert_allclose(scsum1, dsum)
# Proves that the vel/freq conversion works
assert_allclose(scsum1, scsum2)
assert_allclose(scsum2, scsum3)
assert_allclose(scsum3, scsum4)
assert_allclose(scsum4, scsum5)
| mevtorres/astrotools | spectral_cube/tests/test_subcubes.py | test_subcubes.py | py | 5,901 | python | en | code | 0 | github-code | 13 |
6990882425 | from setuptools import setup, find_packages
from aminoed import __version__
with open("README.md", "r") as stream:
long_description = stream.read()
setup(
name="Amino.ed",
version=__version__,
url="https://github.com/Alert-Aigul/Amino.ed",
download_url="https://github.com/Alert-Aigul/Amino.ed/archive/refs/heads/main.zip",
license="MIT",
author="Alert Aigul",
author_email="alertaigul@gmail.com",
description="A library to create Amino bots.",
long_description=long_description,
long_description_content_type="text/markdown",
keywords=[
"aminoapps",
"amino-py",
"amino",
"amino-bot",
"amino.py",
"amino.ed",
"amino-ed",
"narvii",
"api",
"python",
"python3",
"python3.x",
"official",
"alert",
"fix",
"ed"
],
install_requires=[
"requests",
"setuptools",
"six",
"aiohttp",
"ujson",
"requests",
"eventemitter",
"pydantic"
],
setup_requires=[
"wheel"
],
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Programming Language :: Python :: 3.6",
],
python_requires=">=3.6",
packages=find_packages()
)
| Zetsu00167373/Amino.ed | setup.py | setup.py | py | 1,339 | python | en | code | 0 | github-code | 13 |
25451219828 | # coding:utf-8
# from celery import Celery,platforms
import time
from core.Subdomain_Baidu import Baidu
from core.Subdomain_Brute import Brute
from core.Subdomain_Crawl import Crawl
from core.Subdomain_Api import Api
from core.Url_Info import Get_Url_Info
from core.Host_Info import Get_Ip_Info,Get_Alive_Url
from core.Cor import Cor
import pymysql
import random
import socket
import django
import os
import sys
pathname = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0,pathname)
sys.path.insert(0,os.path.abspath(os.path.join(pathname,'..')))
os.environ.setdefault("DJANGO_SETTINGS_MODULE","LangSrcCurise.settings")
django.setup()
from app.models import Other_Url,IP,URL,Show_Data,Error_Log,Cpu_Min,Domains,Setting,Content
from django.db import connections
from concurrent.futures import ThreadPoolExecutor,ProcessPoolExecutor
from multiprocessing import Pool
import threading
import multiprocessing
Gloval_Check = {'domain':'qq.com','counts':0}
#Sem = multiprocessing.Manager().BoundedSemaphore(1)
Dicts = os.path.join('Auxiliary','Black_Ip.list')
black_ip = list(set([x.strip() for x in open(Dicts, 'r', encoding='utf-8').readlines()]))
def Except_Log(stat,url,error):
print('错误代码 [{}] {}'.format(stat,str(error)))
try:
Error_Log.objects.create(url=url, error='错误代码 [{}] {}'.format(stat,str(error)))
except:
close_old_connections()
Error_Log.objects.create(url=url, error='错误代码 [{}] {}'.format(stat,str(error)))
def close_old_connections():
for conn in connections.all():
conn.close_if_unusable_or_obsolete()
Set = Setting.objects.all()[0]
pool_count = int(Set.Pool)
Alive_Status = eval(Set.Alive_Code)
BA = Domains.objects.all()
ALL_DOMAINS = [x.get('url') for x in BA.values()]
def Run_Cpu_Min():
while 1:
c = Cor()
cpu, men, new_send, new_recv = c[0], c[1], c[2], c[3]
try:
Cpu_Min.objects.create(cpu=cpu,menory=men,network_send=new_send,network_recv=new_recv)
except Exception as e:
close_old_connections()
Cpu_Min.objects.create(cpu=cpu,menory=men,network_send=new_send,network_recv=new_recv)
Except_Log(stat=16,url='资源监控消耗',error=str(e))
#
# def on_done(future):
# # 因为每一个线程都有一个 connections,所以这里可以调用 close_all(),把本线程名下的所有连接关闭。
# connections.close_all()
def get_host(url):
url = url.split('//')[1]
if ':' in url:
url = url.split(':')[0]
try:
s = socket.gethostbyname(url)
return str(s)
except Exception as e:
time.sleep(2)
try:
s = socket.gethostbyname(url)
return str(s)
except Exception as e:
Except_Log(stat=24, url=url+'|获取IP失败', error=str(e))
return '获取失败'
def Add_Data_To_Url(url):
time.sleep(random.randint(5,20))
time.sleep(random.randint(5,20))
time.sleep(random.randint(5,20))
close_old_connections()
if '.gov.cn' in url or '.edu.cn' in url:
return
try:
ip = get_host(url)
if ip == '获取失败':
return
if ip in black_ip:
# print('{} : {}所属IP触发IP黑名单 已过滤'.format(url,ip))
return
# print('[+ Domain UrlIP] IP解析 --> {} IP --> {}'.format(url, ip))
# Sem.acquire()
try:
test_url = list(URL.objects.filter(url=url))
except:
try:
test_url = list(URL.objects.filter(url=url))
except:
close_old_connections()
test_url = list(URL.objects.filter(url=url))
# Sem.release()
# 如果数据库有这个网站的话,就直接退出
if test_url != []:
return
try:
Test_Other_Url = Other_Url.objects.filter(url=url)
# 判断网络资产表是否有这个数据,如果没有的话,就添加进去
if list(Test_Other_Url) == []:
res = Get_Url_Info(url).get_info()
res_url = res.get('url')
try:
res_title = pymysql.escape_string(res.get('title'))
except Exception as e:
res_title = 'Error'
Except_Log(stat=11, url=url + '|网页内容转码失败', error=str(e))
res_power = res.get('power')
res_server = res.get('server')
res_status = res.get('status')
res_ip = ip
try:
Other_Url.objects.create(url=res_url, title=res_title, power=res_power, server=res_server,status=res_status,ip=res_ip)
except Exception as e:
close_old_connections()
Except_Log(stat=17, url=url + '|标题等信息编码不符合', error=str(e))
Other_Url.objects.create(url=res_url, title='Error', power='Error', server=res_server,status=res_status,ip=res_ip)
except Exception as e:
Except_Log(stat=29, url=url + '|网络资产表错误', error=str(e))
try:
# res = Get_Url_Info(url).get_info()
# res_status = res.get('status')
# 再次获取状态码,判断是否符合入库状态,以保证数据统一
# if int(res_status) not in Alive_Status:
# return
# 接下来可以进行数据索引唯一统一
'''
这里添加网址资产到 索引表 和 清洗表
'''
test_url1 = list(URL.objects.filter(url=url))
# 如果数据库有这个网站的话,就直接退出
if test_url1 == []:
URL.objects.create(url=url,ip=ip)
# 添加 网址索引
try:
try:
Show_contents = pymysql.escape_string(Get_Url_Info(url).Requests()[0])
Cont = Content()
Cont.url = url
Cont.content = Show_contents
IP_Res = Get_Ip_Info(ip)
Show_cs = IP_Res.get_cs_name(ip)
Cont.save()
Show_Data.objects.create(url=url, ip=ip,cs=Show_cs, content=Cont)
except Exception as e:
close_old_connections()
Except_Log(stat=4, url=url + '|外键添加错误', error=str(e))
Show_contents = 'Error'
Cont = Content()
Cont.url = url
Cont.content = Show_contents
IP_Res = Get_Ip_Info(ip)
Show_cs = IP_Res.get_cs_name(ip)
Cont.save()
Show_Data.objects.create(url=url, ip=ip,cs=Show_cs, content=Cont)
# 添加网页内容,数据展示
except Exception as e:
Except_Log(stat=8, url=url + '|外键添加错误', error=str(e))
This_Sub = [x for x in ALL_DOMAINS if x in url]
# 获取到当前子域名属于的主域名
try:
# 尝试进行域名总数据获取检测
if This_Sub != []:
Domain_Count = Domains.objects.filter(url=This_Sub[0])[0]
counts = Other_Url.objects.filter(url__contains=This_Sub[0])
Domain_Count.counts = str(len(counts))
# counts = int(Domain_Count.counts)+1
# Domain_Count.counts = counts
Domain_Count.save()
except Exception as e:
Except_Log(stat=15, url=url +'|获取归属域名失败|'+This_Sub, error=str(e))
except Exception as e:
Except_Log(stat=22, url=url + '|添加到网址索引表失败|', error=str(e))
try:
test_ip = list(IP.objects.filter(ip=ip))
except:
close_old_connections()
test_ip = list(IP.objects.filter(ip=ip))
# 开始添加ip 维护ip统一
# 这里开始判断数据库中是否有这个ip,并且先添加然后修改(防止重复浪费资源)
# if test_ip != []:
# test_ip_0 = IP.objects.filter(ip=ip)[0]
# # 这里判断数据中IP时候存在,如果存在并且有扫描状态,就直接中断操作
# if test_ip_0.get == '是' or test_ip_0.get == '中':
# return
if test_ip ==[]:
try:
IP_Res = Get_Ip_Info(ip)
area = IP_Res.get_ip_address(ip)
cs_name = IP_Res.get_cs_name(ip)
try:
IP.objects.create(ip=ip, servers='None', host_type='None', cs=cs_name,alive_urls='None', area=area)
# 这里先添加数据,异步执行获取到的数据作为结果给下个进程使用
# 这里本来是要扫描c段开放端口,但是这样就相当于把耗时操作加载到同步执行的线程中
# 于是把扫描开放端口 放在获取ip详细信息线程中处理
except Exception as e:
Except_Log(stat=86, url=url + '|转换IP地区编码失败|', error=str(e))
IP.objects.create(ip=ip, servers='None', host_type='None', cs=cs_name,alive_urls='None', area='获取失败')
except Exception as e:
Except_Log(stat=21, url=url + '|添加IP资源失败|', error=str(e))
except Exception as e:
Except_Log(stat=30, url=url + '|维护传入网址失败|', error=str(e))
Add_Data_To_Url(url)
def Change_IP_Info():
time.sleep(random.randint(1,20))
time.sleep(random.randint(1,20))
time.sleep(random.randint(1,20))
time.sleep(random.randint(1,20))
# 首先捕获一个值,设置为扫描中状态,但是要确保是事务
try:
target_ip = IP.objects.filter(get='否')[0]
ip = target_ip.ip
target_ip.get = '中'
# 为了防止重复获取同一个数值,这里进行修改
# 但是有时候 数据没有正常跑出来 设置成 【是】 会导致偏差
target_ip.save()
except Exception as e:
Except_Log(stat=19, url='|扫描IP资产并设置扫描状态失败|', error='获取预扫描IP失败')
time.sleep(360)
# 等待并充实一次
return
try:
print('[+ Host Scaner] 当前扫描主机 : {}'.format(ip))
IP_Res = Get_Ip_Info(ip)
servers = IP_Res.get_server_from_nmap(ip)
# 服务与端口 字典类型
open_port = servers.keys()
check_alive_url = []
for port in open_port:
check_alive_url.append('http://{}:{}'.format(ip, port))
check_alive_url.append('https://{}:{}'.format(ip, port))
alive_url = Get_Alive_Url(check_alive_url)
# 该IP上存活WEB,类型为列表,内容为多个字典
host_type = IP_Res.get_host_type(ip)
# windows/linux
area = IP_Res.get_ip_address(ip)
# 返回地址
cs = IP_Res.get_cs_name(ip)
IP_Obj_ip = ip
IP_Obj_servers = str(servers)
IP_Obj_host_type = host_type
IP_Obj_alive_urls = str(alive_url)
IP_Obj_area = area
IP_Obj_cs = cs
IP_Obj_get = '是'
try:
IP_Obj = IP.objects.filter(ip=ip)[0]
IP_Obj.ip = IP_Obj_ip
IP_Obj.host_type = IP_Obj_host_type
IP_Obj.alive_urls = IP_Obj_alive_urls
IP_Obj.servers = IP_Obj_servers
IP_Obj.area = IP_Obj_area
IP_Obj.cs = IP_Obj_cs
IP_Obj.get = IP_Obj_get
IP_Obj.save()
except:
# 这里重试的原因是有可能因为失去链接导致无法保存
# 但是扫描ip是耗时操作,获取的数据不能轻易的舍弃,重试一次
try:
close_old_connections()
IP_Obj = IP.objects.filter(ip=ip)[0]
IP_Obj.ip = IP_Obj_ip
IP_Obj.host_type = IP_Obj_host_type
IP_Obj.alive_urls = IP_Obj_alive_urls
IP_Obj.servers = IP_Obj_servers
IP_Obj.area = IP_Obj_area
IP_Obj.cs = IP_Obj_cs
IP_Obj.get = IP_Obj_get
IP_Obj.save()
except:
# 还是无法保存到数据库,直接回滚状态
close_old_connections()
IP_Obj_fx = IP.objects.filter(ip=ip)[0]
IP_Obj_fx.get = '否'
IP_Obj_fx.save()
except Exception as e:
Except_Log(stat=28, url=ip+'|清洗 IP 资产失败|', error=str(e))
# 这里如果失败,则回退
close_old_connections()
IP_Obj_f = IP.objects.filter(ip=ip)[0]
IP_Obj_f.get = '否'
IP_Obj_f.save()
'''
下面的代码,是获取ip的c段存活主机,然后加载到扫描计划中,老夫先注释了
'''
# try:
# cs_ips = [str(x) for x in list(IP_Res.get_cs_ips(ip).values())[0]]
# cs_name = cs
# # 整个 C 段的数据ip
#
# if ip in cs_ips:
# cs_ips.remove(ip)
#
# Read_to_check_host = set()
# for cs_ip in cs_ips:
# indata = list(IP.objects.filter(ip=str(cs_ip)))
# if indata == [] and cs_ip != ip:
# Read_to_check_host.add(cs_ip)
#
# Alive_Hosts = IP_Res.get_alive_hosts(Read_to_check_host)
# print('[+ CHost Scaner] {} 段存活主机 : {}台'.format(cs_name, len(Alive_Hosts)))
# if Alive_Hosts == []:
# return
# for alive_host in Alive_Hosts:
# try:
# checkindata = list(IP.objects.filter(ip=str(alive_host)))
# if checkindata == []:
# # 最后一次数据判断校验
# c_ip = str(alive_host)
# c_cs = cs_name
# c_area = IP_Res.get_ip_address(c_ip)
# IP.objects.create(ip=c_ip, servers='None', host_type='None', cs=c_cs, alive_urls='None',
# area=c_area)
# except Exception as e:
# print('错误代码 [03] {}'.format(str(e)))
# Error_Log.objects.create(url=ip, error='错误代码 [03] {}'.format(str(e)))
#
# except Exception as e:
# print('错误代码 [38] {}'.format(str(e)))
# Error_Log.objects.create(url='获取 IP 失败', error='错误代码 [38] {}'.format(str(e)))
def Change_ShowData_Info(Sub_Domains):
try:
try:
target_info = Show_Data.objects.filter(success='否')[0]
ip = target_info.ip
url = target_info.url
Data_IP = IP.objects.filter(ip=ip)[0]
if Data_IP.get == '否':
# 如果收集整理的数据还没有获取完成,但是一定要保证扫描IP线程数据库连接安全
time.sleep(300)
return
elif Data_IP.get == '中':
# 如果获取的数据一直都在扫描中,说明有两点原因导致,1是意外关闭 2是正在扫描
time.sleep(60)
Gloval_Check['domain'] = url
Gloval_Check['counts'] = Gloval_Check['counts']+1
if Gloval_Check['counts'] == 5:
# 连续五次 6 分钟都获取不到数据,直接跳过
Gloval_Check['counts'] = 0
target_info.get = '中'
target_info.save()
else:
return
else:
target_info.get = '中'
target_info.save()
except Exception as e:
Except_Log(stat=35, url='|清洗数据并设置扫描状态失败|', error='获取预清洗数据失败')
time.sleep(300)
# 等待并充实一次
return
print('[+ DataInfo Collection] 数据整理清洗 : {} --> {}'.format(url,ip))
try:
Data_IP = IP.objects.filter(ip=ip)[0]
try:
Data_URL = Other_Url.objects.filter(url=url)[0]
Show_title = Data_URL.title
Show_power = Data_URL.power
Show_server = Data_URL.server
# 该网站使用的web容器
Show_status = Data_URL.status
except Exception as e:
Show_title = 'None'
Show_power = 'None'
Show_server = 'None'
# 该网站使用的web容器
Show_status = '404'
Except_Log(stat=12, url=url + '|清洗数据流程设置数据异常|', error=str(e))
Show_servers = Data_IP.servers
# 开放的端口和服务,字典类型
Show_alive_urls = Data_IP.alive_urls
# 旁站
Show_host_type = Data_IP.host_type
Show_area = Data_IP.area
Show_cs = Data_IP.cs
# IP_Res = Get_Ip_Info(ip)
# Show_cs = IP_Res.get_cs_name(ip)
Show_belong_domain = [x for x in Sub_Domains if x in url]
if Show_belong_domain == []:
Show_belong_domain = 'None'
else:
Show_belong_domain = Show_belong_domain[0]
Show_success = '是'
# 可以设置为获取成功
ShowS_DataD = Show_Data.objects.filter(url=url)[0]
ShowS_DataD.title = Show_title
ShowS_DataD.power = Show_power
ShowS_DataD.server = Show_server
ShowS_DataD.status = Show_status
# ShowS_DataD.content = Cont
ShowS_DataD.servers = Show_servers
ShowS_DataD.cs = Show_cs
ShowS_DataD.alive_urls = Show_alive_urls
ShowS_DataD.host_type = Show_host_type
ShowS_DataD.area = Show_area
ShowS_DataD.belong_domain = Show_belong_domain
ShowS_DataD.success = Show_success
ShowS_DataD.save()
except Exception as e:
Except_Log(stat=43, url=url + '|'+ip+'|清洗数据流程获取数据失败|', error=str(e))
close_old_connections()
ShowS_DataD_f = Show_Data.objects.filter(url=url)[0]
ShowS_DataD_f.success = '否'
ShowS_DataD_f.save()
except Exception as e:
Except_Log(stat=13, url='|清洗数据流程失败|', error=str(e))
close_old_connections()
# def Run_Baidu(url):
# # 这里对传入Baidu进行重写,该函数接受一个参数域名,返回参数对应的网址,列表格式
#
def Run_Crawl(Domains):
Domains = ['.'+str(x) for x in Domains]
time.sleep(random.randint(1, 20))
time.sleep(random.randint(1, 20))
time.sleep(random.randint(1, 20))
try:
target_url = URL.objects.filter(get='否')[0]
url = target_url.url
target_url.get = '是'
target_url.save()
# 这里需要提前设置的原因是,防止下一个进程启动重复 使用 同一个数据
except Exception as e:
time.sleep(600)
Except_Log(stat=31, url='|获取URL并设置扫描状态失败|', error='获取预爬行网址失败')
# 在获取失败(数据库没数据存入),重试一次
return
try:
All_Urls = Crawl(url)
if All_Urls == []:
return
All_Urls = set(All_Urls)
Other_Domains = []
if list(All_Urls) != [] and All_Urls != None:
try:
Sub_Domains1 = set([y for x in Domains for y in All_Urls if x in y])
if list(Sub_Domains1) != []:
with ThreadPoolExecutor(max_workers=pool_count) as pool1:
result = pool1.map(Add_Data_To_Url, list(Sub_Domains1))
Other_Domains = list(All_Urls-Sub_Domains1)
except Exception as e:
Except_Log(stat=11, url='|获取URL失败|', error=str(e))
if Other_Domains != [] and Other_Domains != None:
try:
for urle in Other_Domains:
if '.gov.cn' not in urle and '.edu.cn' not in urle:
try:
try:
Test_Other_Url = list(Other_Url.objects.filter(url=urle))
except:
close_old_connections()
Test_Other_Url = list(Other_Url.objects.filter(url=urle))
if Test_Other_Url == []:
ip = get_host(urle)
res = Get_Url_Info(urle).get_info()
res_url = res.get('url')
try:
res_title = pymysql.escape_string(res.get('title'))
except:
res_title = 'Error'
res_power = res.get('power')
res_server = res.get('server')
status = res.get('status')
res_ip = ip
#if int(status) in Alive_Status:
try:
Other_Url.objects.create(url=res_url, title=res_title, power=res_power, server=res_server,status=status,ip=res_ip)
except Exception as e:
Except_Log(stat=33, url=url+'|资产爬行错误|', error=str(e))
close_old_connections()
Other_Url.objects.create(url=res_url, title='Error', power=res_power, server=res_server,status=status,ip=res_ip)
except Exception as e:
Except_Log(stat=37, url=url + '|资产爬行错误|', error=str(e))
except Exception as e:
Except_Log(stat=36, url=url + '|资产爬行错误|', error=str(e))
except Exception as e:
Except_Log(stat=32, url=url + '|网址爬行错误|', error=str(e))
def Heartbeat():
while 1:
try:
heartcheck = list(URL.objects.all())
if heartcheck == []:
time.sleep(60)
else:
time.sleep(2)
# 维持 2 S 发送一次心跳包检测连接,如果失败则清洗连接
except:
print('[+ HeartBeat] 维持心跳包失败,清洗失败链接')
close_old_connections()
def Sub_Api(Sub_Domains):
while 1:
res = []
for sub_domain in Sub_Domains:
res = Api(sub_domain)
if res != [] and res != None:
with ThreadPoolExecutor(max_workers=pool_count) as pool4:
result = pool4.map(Add_Data_To_Url, list(set(res)))
time.sleep(60)
# 每次扫完一个域名等待一小会儿
time.sleep(3600 * 48)
def Sub_Baidu(Sub_Domains):
while 1:
close_old_connections()
res = []
for sub_domain in Sub_Domains:
res = Baidu(sub_domain)
if res != []:
with ThreadPoolExecutor(max_workers=pool_count) as pool3:
result = pool3.map(Add_Data_To_Url, list(set(res)))
time.sleep(60)
# 每次扫完一个域名等待一小会儿
time.sleep(3600*24)
def Sub_Brute(Sub_Domains):
while 1:
for domain in Sub_Domains:
res = []
Br = Brute(domain)
res = Br.start()
res = list(set(res))
if res != []:
with ThreadPoolExecutor(max_workers=pool_count) as pool4:
result = pool4.map(Add_Data_To_Url, res)
# 每爆破一个子域名,歇会儿
time.sleep(60)
time.sleep(3600*48)
def Start_Crawl_Run(Sub_Domains):
while 1:
Run_Crawl(Sub_Domains)
def Start_ChangeIp_Run():
while 1:
Change_IP_Info()
def Start_ChangeInf_Run(Sub_Domains):
while 1:
Change_ShowData_Info(Sub_Domains)
def Sub_Crawl(pax,Sub_Domains):
p = Pool(processes=pool_count)
for i in range(pool_count):
p.apply_async(Start_Crawl_Run,args=(Sub_Domains,))
p.close()
p.join()
def Sub_ChangeIp(pax):
p0 = Pool(processes=pool_count)
for i in range(pool_count):
p0.apply_async(Start_ChangeIp_Run)
p0.close()
p0.join()
def Sub_ChangeInf(Sub_Domains):
Start_ChangeInf_Run(Sub_Domains)
# p2 = Pool(processes=pool_count)
# for i in range(pool_count):
# p2.apply_async(Start_ChangeInf_Run,args=(Sub_Domains,))
# p2.close()
# p2.join()
if __name__ == '__main__':
pass
#Domains = list(set([x.strip() for x in open('domains.list', 'r', encoding='utf-8').readlines()]))
Domains = ['baidu.com','qq.com','jd.com','iqiyi.com','kuaishou.com','sina.com']
res = Sub_Brute(Domains)
# Sub_Baidu(Domains)
# Sub_Crawl(range(20),Domains) | Mespoding/LangSrcCurise | core/main.py | main.py | py | 25,972 | python | en | code | null | github-code | 13 |
73755550097 | import tornado.web
import traceback
class BaseHandler(tornado.web.RequestHandler):
async def prepare(self):
user_id = self.get_secure_cookie("fyssionmediaserver_user")
if user_id:
self.current_user = await self.db.get_user(int(user_id))
if self.current_user:
await self.current_user.get_role()
def write_error(self, status_code, **kwargs):
if status_code == 404:
self.render("errors/404.html")
elif self.settings.get("serve_traceback") and "exc_info" in kwargs:
# in debug mode, try to send a traceback
exc = traceback.format_exception(*kwargs["exc_info"])
self.render("errors/traceback.html", traceback=exc)
else:
self.render("errors/any.html", code=status_code, message=self._reason)
@property
def db(self):
return self.application.db
| Fyssion/FyssionMediaServer | server/handlers/base.py | base.py | py | 906 | python | en | code | 0 | github-code | 13 |
25392857596 | """
Provides date conversion functions, HistDate, and date scales.
"""
# ========== For conversion between calendars and Julian day numbers. ==========
# Algorithms were obtained from:
# https://en.wikipedia.org/wiki/Julian_day#Converting_Gregorian_calendar_date_to_Julian_Day_Number.
def gregorianToJdn(year: int, month: int, day: int) -> int:
"""
Converts a Gregorian calendar date to a Julian day number,
denoting the noon-to-noon 'Julian day' that starts within the input day.
A year of 1 means 1 AD, and -1 means 1 BC (0 is treated like -1).
A month of 1 means January. Can use a month of 13 and a day of 0.
Valid for dates from 24th Nov 4714 BC onwards.
"""
if year < 0:
year += 1
x = int((month - 14) / 12)
jdn = int(1461 * (year + 4800 + x) / 4)
jdn += int((367 * (month - 2 - 12 * x)) / 12)
jdn -= int((3 * int((year + 4900 + x) / 100)) / 4)
jdn += day - 32075
return jdn
def julianToJdn(year: int, month: int, day: int) -> int:
"""
Like gregorianToJdn(), but converts a Julian calendar date.
Valid for dates from 1st Jan 4713 BC onwards.
"""
if year < 0:
year += 1
jdn = 367 * year
jdn -= int(7 * (year + 5001 + int((month - 9) / 7)) / 4)
jdn += int(275 * month / 9)
jdn += day + 1729777
return jdn
def jdnToGregorian(jdn: int) -> tuple[int, int, int]:
"""
Converts a Julian day number to a Gregorian calendar date, denoting the
day in which the given noon-to-noon 'Julian day' begins.
Valid for non-negative input.
"""
f = jdn + 1401 + (((4 * jdn + 274277) // 146097) * 3) // 4 - 38
e = 4 * f + 3
g = (e % 1461) // 4
h = 5 * g + 2
D = (h % 153) // 5 + 1
M = (h // 153 + 2) % 12 + 1
Y = (e // 1461) - 4716 + (12 + 2 - M) // 12
if Y <= 0:
Y -= 1
return Y, M, D
def jdnToJulian(jdn: int) -> tuple[int, int, int]:
""" Like jdnToGregorian(), but converts to a Julian calendar date """
f = jdn + 1401
e = 4 * f + 3
g = (e % 1461) // 4
h = 5 * g + 2
D = (h % 153) // 5 + 1
M = (h // 153 + 2) % 12 + 1
Y = (e // 1461) - 4716 + (12 + 2 - M) // 12
if Y <= 0:
Y -= 1
return Y, M, D
def julianToGregorian(year: int, month: int, day: int) -> tuple[int, int, int]:
return jdnToGregorian(julianToJdn(year, month, day))
def gregorianToJulian(year: int, month: int, day: int) -> tuple[int, int, int]:
return jdnToJulian(gregorianToJdn(year, month, day))
# ========== For date representation ==========
MIN_CAL_YEAR = -4713 # Year before which JDNs are not usable
MONTH_SCALE = -1;
DAY_SCALE = -2;
SCALES: list[int] = [int(s) for s in [1e9, 1e8, 1e7, 1e6, 1e5, 1e4, 1e3, 100, 10, 1, MONTH_SCALE, DAY_SCALE]];
# The timeline is divided into units of SCALES[0], then SCALES[1], etc
# Positive ints represent numbers of years, -1 represents 1 month, -2 represents 1 day
if __debug__: # Validate SCALES
if SCALES[-1] != DAY_SCALE or SCALES[-2] != MONTH_SCALE or SCALES[-3] != 1:
raise Exception('SCALES must end with [1, MONTH_SCALE, DAY_SCALE]');
for i in range(1, len(SCALES) - 2):
if SCALES[i] <= 0:
raise Exception('SCALES must only have positive ints before MONTH_SCALE')
if SCALES[i-1] <= SCALES[i]:
raise Exception('SCALES must hold decreasing values')
if SCALES[i-1] % SCALES[i] > 0:
raise Exception('Each positive int in SCALES must divide the previous int')
class HistDate:
"""
Represents a historical date
- 'year' may be negative (-1 means 1 BC)
- 'month' and 'day' are at least 1, if given
- 'gcal' may be:
- True: Indicates a Gregorian calendar date
- False: Indicates a Julian calendar date
- None: 'month' and 'day' are 1 (required for dates before MIN_CAL_YEAR)
"""
# Note: Intentionally not enforcing, for gcal=None, that year < MIN_CAL_YEAR
def __init__(self, gcal: bool | None, year: int, month=1, day=1):
self.gcal = gcal
self.year = year
self.month = month
self.day = day
def __eq__(self, other): # Used in unit testing
return isinstance(other, HistDate) and \
(self.gcal, self.year, self.month, self.day) == (other.gcal, other.year, other.month, other.day)
def __repr__(self): # Used in unit testing
return str(self.__dict__)
def dbDateToHistDate(n: int, fmt: int, end=False) -> HistDate:
""" Converts a start/start_upper/etc and fmt value in the 'events' db table, into a HistDate """
if fmt == 0: # year
return HistDate(None, n)
elif fmt == 1 or fmt == 3 and end: # jdn for gregorian calendar
return HistDate(True, *jdnToGregorian(n))
else: # fmt == 2 or fmt == 3 and not end
return HistDate(False, *jdnToJulian(n))
def dateToUnit(date: HistDate, scale: int) -> int:
""" Converts a date to an int representing a unit on a scale """
if scale >= 1:
return date.year // scale
elif scale == MONTH_SCALE:
if date.gcal == False:
return julianToJdn(date.year, date.month, 1)
else: # True or None
return gregorianToJdn(date.year, date.month, 1)
else: # scale == DAY_SCALE
if date.gcal == False:
return julianToJdn(date.year, date.month, date.day)
else:
return gregorianToJdn(date.year, date.month, date.day)
| terry06890/chrona | backend/hist_data/cal.py | cal.py | py | 4,976 | python | en | code | 1 | github-code | 13 |
72966079699 | from datetime import datetime, timedelta
from logging import getLogger
import pytz
import time
from random import Random
from threading import Condition, Event, Thread
from crab import CrabError, CrabEvent, CrabStatus
from crab.service import CrabMinutely
from crab.util.schedule import CrabSchedule
HISTORY_COUNT = 10
LATE_GRACE_PERIOD = timedelta(seconds=30)
logger = getLogger(__name__)
class JobDeleted(Exception):
"""Exception raised by _initialize_job if the job can not be found."""
pass
class CrabMonitor(CrabMinutely):
"""A class implementing the crab monitor thread."""
def __init__(self, store, passive=False):
"""Constructor.
Saves the given storage backend and prepares the instance
data.
If "passive" mode is requested, then the monitor will watch the
job status but will not write alarms into the store. This could
be used, for example, to implement a web interface (which requires
a monitor) separately from the active monitor.
"""
CrabMinutely.__init__(self)
self.store = store
self.passive = passive
self.sched = {}
self.status = {}
self.status_ready = Event()
self.config = {}
self.last_start = {}
self.timeout = {}
self.late_timeout = {}
self.miss_timeout = {}
self.max_startid = 0
self.max_alarmid = 0
self.max_finishid = 0
self.new_event = Condition()
self.num_warning = 0
self.num_error = 0
self.random = Random()
def run(self):
"""Monitor thread main run function.
When the thread is started, this function will run. It begins
by fetching a list of jobs and using them to populate its
data structures. When this is complete, the Event status_ready
is fired.
It then goes into a loop, and every few seconds it checks
for new events, processing any which are found. The new_event
Condition is fired if there were any new events.
We call _check_minute from CrabMinutely to check whether the
minute has changed since the last time round the loop."""
jobs = self.store.get_jobs()
for job in jobs:
id_ = job['id']
try:
self._initialize_job(id_, load_events=True)
except JobDeleted:
logger.warning('Warning: job {} has vanished'.format(id_))
self.status_ready.set()
while True:
time.sleep(5)
datetime_ = datetime.now(pytz.UTC)
# Retrieve events. Trap exceptions in case of database
# disconnection.
events = []
try:
events = self.store.get_events_since(
self.max_startid, self.max_alarmid, self.max_finishid)
except Exception as e:
logger.exception('Error: monitor exception getting events')
for event in events:
id_ = event['jobid']
self._update_max_id_values(event)
try:
if id_ not in self.status:
self._initialize_job(id_)
self._process_event(id_, event)
self._compute_reliability(id_)
# If the monitor is loaded when a job has just been
# deleted, then it may have events more recent
# than those of the events that still exist.
except JobDeleted:
pass
# Also trap other exceptions, in case a database disconnection
# causes a failure from _initialize_job. Do this separately,
# inside the events loop so that we keep the max_id_values
# up to date with the other events.
except Exception as e:
logger.exception('Error: monitor exception handling event')
self.num_error = 0
self.num_warning = 0
for id_ in self.status:
jobstatus = self.status[id_]['status']
if (jobstatus is None or CrabStatus.is_ok(jobstatus)):
pass
elif (CrabStatus.is_warning(jobstatus)):
self.num_warning += 1
else:
self.num_error += 1
if events:
with self.new_event:
self.new_event.notify_all()
# Allow superclass CrabMinutely to call our run_minutely
# method as required. Note: the call back to run_minutely
# is protected by a try-except block in the superclass.
self._check_minute()
# Check status of timeouts - need to get a list of keys
# so that we can delete from the dict while iterating.
# Note: _write_alarm uses a try-except block for CrabErrors.
for id_ in list(self.late_timeout.keys()):
if self.late_timeout[id_] < datetime_:
self._write_alarm(id_, CrabStatus.LATE)
del self.late_timeout[id_]
for id_ in list(self.miss_timeout.keys()):
if self.miss_timeout[id_] < datetime_:
self._write_alarm(id_, CrabStatus.MISSED)
del self.miss_timeout[id_]
for id_ in list(self.timeout.keys()):
if self.timeout[id_] < datetime_:
self._write_alarm(id_, CrabStatus.TIMEOUT)
del self.timeout[id_]
def run_minutely(self, datetime_):
"""Every minute the job scheduling is checked.
At this stage we also check for new / deleted / updated jobs."""
if not self.passive:
for id_ in self.sched:
if self.sched[id_].match(datetime_):
if ((id_ not in self.last_start) or
(self.last_start[id_] +
self.config[id_]['graceperiod'] < datetime_)):
# No need to check if the late timeout is already
# running as the grace period is currently less
# than the minimum scheduling interval.
self.late_timeout[id_] = datetime_ + LATE_GRACE_PERIOD
# Do not reset the miss timeout if it is already
# "running".
if id_ not in self.miss_timeout:
self.miss_timeout[id_] = (
datetime_ + self.config[id_]['graceperiod'])
# Look for new or deleted jobs.
currentjobs = set(self.status.keys())
jobs = self.store.get_jobs()
for job in jobs:
id_ = job['id']
if id_ in currentjobs:
currentjobs.discard(id_)
# Compare installed timestamp is case we need to
# reload the schedule.
if job['installed'] > self.status[id_]['installed']:
self._schedule_job(id_)
self.status[id_]['installed'] = job['installed']
# TODO: is there a quick way to check whether we
# need to do this?
self._configure_job(id_)
else:
try:
self._initialize_job(id_, load_events=True)
except JobDeleted:
logger.warning('Warning: job {} has vanished'.format(id_))
# Remove (presumably deleted) jobs.
for id_ in currentjobs:
self._remove_job(id_)
def _initialize_job(self, id_, load_events=False):
"""Fetches information about the specified job and records it
in the instance data structures. Includes a call to _schedule_job."""
jobinfo = self.store.get_job_info(id_)
if jobinfo is None or jobinfo['deleted'] is not None:
raise JobDeleted
self.status[id_] = {'status': None, 'running': False, 'history': [],
'installed': jobinfo['installed']}
self._schedule_job(id_, jobinfo)
self._configure_job(id_)
if load_events:
# Allow a margin of events over HISTORY_COUNT to allow
# for start events and alarms.
events = self.store.get_job_events(id_, 4 * HISTORY_COUNT)
# Events are returned newest-first but we need to work
# through them in order.
for event in reversed(events):
self._update_max_id_values(event)
self._process_event(id_, event)
self._compute_reliability(id_)
def _schedule_job(self, id_, jobinfo=None):
"""Sets or updates job scheduling information.
The job information can either be passed in as a dict, or it
will be fetched from the storage backend. If scheduling information
(i.e. a "time" string, and optionally a timezone) is present,
a CrabSchedule object is constructed and stored in the sched dict."""
if jobinfo is None:
jobinfo = self.store.get_job_info(id_)
self.status[id_]['scheduled'] = False
if jobinfo is not None and jobinfo['time'] is not None:
try:
self.sched[id_] = CrabSchedule(jobinfo['time'],
jobinfo['timezone'])
except CrabError as err:
logger.exception('Warning: could not add schedule')
else:
self.status[id_]['scheduled'] = True
def _configure_job(self, id_):
"""Sets the job configuration.
The configuration will be fetched from the storage backend
and stored in the config dict."""
default_time = {'graceperiod': 2, 'timeout': 5}
if id_ not in self.config:
self.config[id_] = {}
dbconfig = self.store.get_job_config(id_)
for parameter in default_time:
if dbconfig is not None and dbconfig[parameter] is not None:
self.config[id_][parameter] = timedelta(
minutes=dbconfig[parameter])
else:
self.config[id_][parameter] = timedelta(
minutes=default_time[parameter])
def _remove_job(self, id_):
"""Removes a job from the instance data structures."""
try:
del self.status[id_]
if id_ in self.config:
del self.config[id_]
if id_ in self.sched:
del self.sched[id_]
if id_ in self.last_start:
del self.last_start[id_]
if id_ in self.timeout:
del self.timeout[id_]
if id_ in self.late_timeout:
del self.late_timeout[id_]
if id_ in self.miss_timeout:
del self.miss_timeout[id_]
except KeyError:
logger.warning(
'Warning: stopping monitoring job but it is not in monitor.')
def _update_max_id_values(self, event):
"""Updates the instance max_startid, max_alarmid and max_finishid
values if they are outdate by the event, which is passed as a dict."""
if (event['type'] == CrabEvent.START and
event['eventid'] > self.max_startid):
self.max_startid = event['eventid']
if (event['type'] == CrabEvent.ALARM and
event['eventid'] > self.max_alarmid):
self.max_alarmid = event['eventid']
if (event['type'] == CrabEvent.FINISH and
event['eventid'] > self.max_finishid):
self.max_finishid = event['eventid']
def _process_event(self, id_, event):
"""Processes the given event, updating the instance data
structures accordingly."""
datetime_ = event['datetime']
if event['status'] is not None:
status = event['status']
prevstatus = self.status[id_]['status']
# Avoid overwriting a status with a less important one.
if status == CrabStatus.CLEARED:
self.status[id_]['status'] = status
elif CrabStatus.is_trivial(status):
if prevstatus is None or CrabStatus.is_ok(prevstatus):
self.status[id_]['status'] = status
elif CrabStatus.is_warning(status):
if prevstatus is None or not CrabStatus.is_error(prevstatus):
self.status[id_]['status'] = status
# Always set success / failure status (the remaining options).
else:
self.status[id_]['status'] = status
if not CrabStatus.is_trivial(status):
history = self.status[id_]['history']
if len(history) >= HISTORY_COUNT:
del history[0]
history.append(status)
# Handle ALREADYRUNNING as a 'start' type event, so that
# the MISSED alarm is not raised and the timeout period
# is extended.
if (event['type'] == CrabEvent.START or
event['status'] == CrabStatus.ALREADYRUNNING):
self.status[id_]['running'] = True
if not self.passive:
self.last_start[id_] = datetime_
self.timeout[id_] = datetime_ + self.config[id_]['timeout']
if id_ in self.late_timeout:
del self.late_timeout[id_]
if id_ in self.miss_timeout:
del self.miss_timeout[id_]
elif (event['type'] == CrabEvent.FINISH or
event['status'] == CrabStatus.TIMEOUT):
self.status[id_]['running'] = False
if not self.passive:
if id_ in self.timeout:
del self.timeout[id_]
def _compute_reliability(self, id_):
"""Uses the history list of the specified job to recalculate its
reliability percentage and store it in the 'reliability'
entry of the status dict."""
history = self.status[id_]['history']
if len(history) == 0:
self.status[id_]['reliability'] = 0
else:
self.status[id_]['reliability'] = int(
100 * len([x for x in history if x == CrabStatus.SUCCESS]) /
len(history))
def _write_alarm(self, id_, status):
"""Inserts an alarm into the storage backend."""
if self.passive:
logger.warning('Warning: trying to write alarm in passive mode')
return
try:
self.store.log_alarm(id_, status)
except CrabError as err:
logger.exception('Could not record alarm')
def get_job_status(self, id_=None):
"""Fetches the status of all jobs as a dict.
For efficiency this returns a reference to our job status dict.
Callers should not modify it. If a job ID is specified, the
status entry for that job is returned, or a dummy entry if it is not
in the status dict."""
self.status_ready.wait()
if id_ is None:
return self.status
else:
if id_ in self.status:
return self.status[id_]
else:
return {'status': None, 'running': False}
def wait_for_event_since(self, startid, alarmid, finishid, timeout=120):
"""Function which waits for new events.
It does this by comparing the IDs with our maximum values seen so
far. If no new events have already be seen, wait for the new_event
Condition to fire.
A random time up to 20s is added to the timeout to stagger requests."""
if (self.max_startid > startid or
self.max_alarmid > alarmid or
self.max_finishid > finishid):
pass
else:
with self.new_event:
self.new_event.wait(timeout + self.random.randint(0, 20))
return {'startid': self.max_startid, 'alarmid': self.max_alarmid,
'finishid': self.max_finishid, 'status': self.status,
'numwarning': self.num_warning, 'numerror': self.num_error}
| grahambell/crab | lib/crab/service/monitor.py | monitor.py | py | 16,297 | python | en | code | 61 | github-code | 13 |
11707907717 | import torch
# import numpy as np
# import cv2
from torch.utils.data import Dataset
from pathlib import Path
# from pycocotools.coco import COCO
# from pycocotools import mask as cocomask
import cv2
import numpy as np
import skimage.io as io
# import matplotlib.pyplot as plt
# import pylab
# import random
# import prepare_data
import os
data_path = Path('data')
data_directory = "data/"
# annotation_file_template = "{}/{}/annotation{}.json"
TRAIN_IMAGES_DIRECTORY = "data/train/images"
TRAIN_MASKS_DIRECTORY = "data/train/masks"
VAL_IMAGES_DIRECTORY = "data/train/images"
TEST_IMAGES_DIRECTORY = "data/test/images"
class SaltDataset(Dataset):
def __init__(self, file_names: str, to_augment=False, transform=None, mode='train', problem_type=None):
self.file_names = file_names
self.to_augment = to_augment
self.transform = transform
self.mode = mode
self.problem_type = problem_type
## !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# self.coco = COCO(self.file_names)
# self.image_ids = self.coco.getImgIds(catIds=self.coco.getCatIds())
def __len__(self):
# if self.mode == 'valid':
return len(self.file_names)
# else:
# return 2
def __getitem__(self, idx):
# print(self.file_names)
# print(idx)
# print(self.file_names[idx], len(self.file_names), idx)
# img = self.coco.loadImgs(self.image_ids[idx])[0]
# annotation_ids = self.coco.getAnnIds(imgIds=img['id'])
# annotations = self.coco.loadAnns(annotation_ids)
if self.mode == 'predict':
img_file_name = os.path.join(TEST_IMAGES_DIRECTORY, self.file_names[idx])
pic = load_image(img_file_name, self.mode)
pic, _ = self.transform(pic, None)
# pic, _ = self.transform(pic[0], None)
else:
img_file_name = os.path.join(TRAIN_IMAGES_DIRECTORY, self.file_names[idx])
mask_file_name = os.path.join(TRAIN_MASKS_DIRECTORY, self.file_names[idx])
pic = load_image(img_file_name, self.mode)
mask = load_image(mask_file_name, 'mask')
pic, mask = self.transform(pic, mask)
# plot_aug(pic, mask)
if self.problem_type == 'binary' and self.mode == 'train':
# return to_float_tensor(pic),\
# torch.from_numpy(np.expand_dims(mask, 0)).float()
return torch.from_numpy(np.expand_dims(pic, 0)).float(), \
torch.from_numpy(np.expand_dims(mask, 0)).float()
elif self.problem_type == 'binary' and self.mode == 'valid':
# return to_float_tensor(pic),\
# torch.from_numpy(np.expand_dims(mask, 0)).float(), idx
return torch.from_numpy(np.expand_dims(pic, 0)).float(), \
torch.from_numpy(np.expand_dims(mask, 0)).float(), idx
elif self.mode == 'predict':
# return to_float_tensor(pic), self.file_names[idx]
return torch.from_numpy(np.expand_dims(pic, 0)).float(), self.file_names[idx]
else:
# return to_float_tensor(img), torch.from_numpy(mask).long()
return to_float_tensor(pic), to_float_tensor(mask)
# else:
# return to_float_tensor(img)# , str(img_file_name)
def to_float_tensor(img):
return torch.from_numpy(np.moveaxis(img, -1, 0)).float()
def load_image(image_path, mode):
# if mode == 'valid':
# image_path = os.path.join(VAL_IMAGES_DIRECTORY, img["file_name"])
# else:
# image_path = os.path.join(TRAIN_IMAGES_DIRECTORY, img)
if mode == 'mask':
I = cv2.imread(image_path, 0)
I = pad(I, pad_size=32)
I = I[0]/255
else:
I = cv2.imread(image_path, 0)
# arr = np.linspace(0, 100, 101)
# mesh = np.meshgrid(arr, arr)[0]
# I[:,:,1] = mesh
# I[:,:,2] = mesh.transpose(1, 0)
I = pad(I, pad_size=32)
I = I[0]/255
# I1 = cv2.imread(image_path)
# print(path_, img.shape)
return I
def pad(img, pad_size=32):
"""
Load image from a given path and pad it on the sides, so that eash side is divisible by 32 (network requirement)
if pad = True:
returns image as numpy.array, tuple with padding in pixels as(x_min_pad, y_min_pad, x_max_pad, y_max_pad)
else:
returns image as numpy.array
"""
if pad_size == 0:
return img
height, width = img.shape[:2]
if height % pad_size == 0:
y_min_pad = 0
y_max_pad = 0
else:
y_pad = pad_size - height % pad_size
y_min_pad = int(y_pad / 2)
y_max_pad = y_pad - y_min_pad
if width % pad_size == 0:
x_min_pad = 0
x_max_pad = 0
else:
x_pad = pad_size - width % pad_size
x_min_pad = int(x_pad / 2)
x_max_pad = x_pad - x_min_pad
img = cv2.copyMakeBorder(img, y_min_pad, y_max_pad, x_min_pad, x_max_pad, cv2.BORDER_REFLECT_101)
return img, (x_min_pad, y_min_pad, x_max_pad, y_max_pad)
def unpad(img, pads):
"""
img: numpy array of the shape (height, width)
pads: (x_min_pad, y_min_pad, x_max_pad, y_max_pad)
@return padded image
"""
(x_min_pad, y_min_pad, x_max_pad, y_max_pad) = pads
height, width = img.shape[:2]
return img[y_min_pad:height - y_max_pad, x_min_pad:width - x_max_pad]
# def load_mask(annotations, img):
# mask = np.zeros((img['height'], img['width']))
# for i in annotations:
# rle = cocomask.frPyObjects(i['segmentation'], img['height'], img['width'])
# m = cocomask.decode(rle)
# # m.shape has a shape of (300, 300, 1)
# # so we first convert it to a shape of (300, 300)
# m = m.reshape((img['height'], img['width']))
# mask += m
# path_ = "data/stage1_train_/{}/masksmask.png".format(path)
# if mode != 'train':
# path_ = "data/stage2_test/{}/images/{}.png".format(path, path)
# if not os.path.isfile(path_):
# print('{} was empty'.format(path_))
# factor = prepare_data.binary_factor
# mask = cv2.imread(str(path_))
# kernel = np.ones((5, 5), np.uint8)
# mask = mask.astype(np.uint8)
# kernel = np.ones((5, 5), np.uint8)
# dilation = cv2.dilate(mask, kernel, iterations=10)
# mask = dilation - mask
# seed = cv2.erode(mask[:, :, 0], kernel, iterations=1)
# border = mask[:, :, 0] - seed
# mask[:, :, 1] = np.zeros(seed.shape)
# mask[:, :, 1] = seed
# mask[:, :, 2] = np.zeros(seed.shape)
# mask[:, :, 2] = border
# return mask
class MapDatasetTest(Dataset):
def __init__(self, file_names: str, to_augment=False, transform=None, mode='predict', problem_type=None):
self.file_names = file_names
self.to_augment = to_augment
self.transform = transform
self.mode = mode
self.problem_type = problem_type
def __len__(self):
# if self.mode == 'valid':
return len(self.file_names)
# else:
# return 10
def __getitem__(self, idx):
# print(self.file_names)
# print(idx)
# print(self.file_names[idx], len(self.file_names), idx)
img_file_name = self.file_names[idx]
img = load_image_test(img_file_name)
# mask = None
img, mask = self.transform(img, None)
return to_float_tensor(img), img_file_name
def load_image_test(img):
# path_ = "data/stage1_train_/{}/images/{}.png".format(path, path)
# path_ = "data/stage1_test/{}/images/{}.png".format(path, path)
# path_ = "../mapping-challenge-starter-kit/data/test_images/{}".format(img)
path_ = "../mapping-challenge-starter-kit/data/val/images/{}".format(img)
if not os.path.isfile(path_):
print('{} was empty'.format(path_))
I = io.imread(path_)
# I1 = cv2.imread(image_path)
# print(path_, img.shape)
return I
# def plot_aug(pic, mask):
# fig = plt.figure(figsize=(8, 8))
# fig.add_subplot(1, 2, 1)
# plt.imshow(pic)
# fig.add_subplot(1, 2, 2)
# plt.imshow(mask)
# # plt.imshow(pic)
# # plt.imshow(mask)
# plt.show()
| Justdjent/kaggle_salt | dataset.py | dataset.py | py | 8,191 | python | en | code | 0 | github-code | 13 |
72555580818 | import random
import time
from email_build import buildEmail
from confirmation_email import buildKeyEmail
from lsts import giver_list, key_master
giver = giver_list
receiver = [name for name in giver]
# this function randomly chooses a int within the range of the list provided an removes it from the list
def pop_random(lst):
idx = random.randrange(0, len(lst))
return lst.pop(idx)
# loops through list and removes similar "pairing" of current item in list, adds the pair to a dictionary and then adds back in the "pairing" after the email is sent
pairs = {}
def randomize_pairs(list1, list2):
for counter, name in enumerate(list1):
findPairing = name['pairing']
pairingMatch = [x for x in list2 if x['pairing'] == findPairing]
for person in pairingMatch:
list2.remove(person)
chosenPerson = pop_random(list2)
for person in pairingMatch:
list2.append(person)
pairs[counter+1] = {"giver": name['name'], "email": name['email'], "receiver": chosenPerson['name'],}
# loops over pairs and sends email based on info provided
def sendEmail(match):
for p_Id, p_info in match.items():
buildEmail(p_info['giver'], p_info['email'], p_info['receiver'])
working = False
# this while function will only work if no exception is thrown, which in this case is when the last person matches with a similar pairing. It will auto restart the function until it completes without the exception
while not working:
try:
randomize_pairs(giver, receiver)
sendEmail(pairs)
buildKeyEmail(pairs, key_master)
working = True
except ValueError as e:
# the receiver reset is needed, otherwise the randomize_pairs() function doesn't work. All dictionaries are removed after function is run
receiver = [name for name in giver]
print(e)
pass | bparker12/xmasNameRandomizer | random_generator.py | random_generator.py | py | 1,887 | python | en | code | 0 | github-code | 13 |
37473491574 | startSequence = []
substitutions = {}
READ_SEQUENCE=0
READ_SUBSTITUTIONS=1
inputMode = READ_SEQUENCE
with open("input.txt") as FILE:
for line in FILE.readlines():
line = line.strip()
if inputMode == READ_SEQUENCE:
if len(line)==0:
inputMode = READ_SUBSTITUTIONS
else:
startSequence = list(line)
else:
tokens = line.split(' -> ')
substitutions[tokens[0]] = tokens[1]
#part a
currentSequence = startSequence
for step in range(10):
newElements = []
for i in range(0,len(currentSequence)-1):
pair = ''.join(currentSequence[i:i+2])
newElements += substitutions[pair]
newSequence = currentSequence+newElements
newSequence[::2] = currentSequence
newSequence[1::2] = newElements
currentSequence = newSequence
from collections import Counter
counter = Counter(currentSequence)
sorted = counter.most_common()
print(sorted[0][1]-sorted[-1][1])
#part b, aww peas
#assumption: start and end character unchanged (for resolving boundary conditions. appears to always be B...N for input)
startingPairs = Counter()
for i in range(0,len(startSequence)-1):
pair = ''.join(startSequence[i:i+2])
startingPairs[pair]+=1
currentPairs = startingPairs
for step in range(40):
newPairs = Counter()
for pair, value in currentPairs.items():
newChar = substitutions[pair]
newPairs[pair[0]+newChar] += value
newPairs[newChar+pair[1]] += value
currentPairs = newPairs
elementCounter = Counter()
for pair, count in currentPairs.items():
for c in pair:
elementCounter[c] += count
sorted = elementCounter.most_common()
def GetHalvedCount(count):
if count%2 == 1:
return count//2+1
else:
return count//2
print(GetHalvedCount(sorted[0][1])-GetHalvedCount(sorted[-1][1])) | Chromega/adventofcode | 2021/Day14/day14.py | day14.py | py | 1,873 | python | en | code | 0 | github-code | 13 |
30651904435 | day = 0
age = 0
#Taking input
age = int(input("Enter Age = "))
day = int(input("Enter Day of Week 1-7 :"))
if (day==1) :
print("The museum is closed.")
if((day == 2) or (day == 4)):
print("You get half price discount!")
if(((age>=13) and (age<=20)) and (day == 3)) :
print("You get half price discount!")
else :
print("You pay full price.")
| Umair-Manzoor-47/Semester-2 | week 1/Meuseum.py | Meuseum.py | py | 395 | python | en | code | 0 | github-code | 13 |
23240766238 | import cozmo
import time
from iudrl_agent import IUDRL_Agent as Agent
from torchvision.transforms.functional import *
straight_drive = lambda robot: robot.drive_straight(cozmo.util.Distance(100), cozmo.util.Speed(100))
straight_drive_backwards = lambda robot: robot.drive_straight(cozmo.util.Distance(-100), cozmo.util.Speed(100))
rot_90_left = lambda robot: robot.turn_in_place(cozmo.util.Angle(degrees=30))
rot_90_right = lambda robot: robot.turn_in_place(cozmo.util.Angle(degrees=-30))
do_nothing = lambda robot: robot.drive_straight(cozmo.util.Distance(0), cozmo.util.Speed(0))
move_head_up = lambda robot: robot.set_head_angle(cozmo.util.Angle(degrees=robot.head_angle.degrees+5))
move_head_down = lambda robot: robot.set_head_angle(cozmo.util.Angle(degrees=robot.head_angle.degrees-5))
def get_shot(robot: cozmo.robot.Robot, size):
image = robot.world.latest_image.raw_image
pillow = resize(image, size)
tensor = to_tensor(pillow)
tensor = torch.unsqueeze(tensor, dim=0)
return tensor
def main(robot: cozmo.robot.Robot):
robot.camera.image_stream_enabled = True
time.sleep(1)
agent = Agent((100,100), 7)
state = get_shot(robot, (100,100))
replay_process = torch.multiprocessing.Process(target=agent.replay)
acc = 0
while True:
action, probs, features = agent.act(state)
try:
if action == 0:
straight_drive(robot)
elif action == 1:
straight_drive_backwards(robot)
elif action == 2:
rot_90_left(robot)
elif action == 3:
rot_90_right(robot)
next_state = get_shot(robot, (100,100))
reward = 0
agent.remember(features, state, probs, reward, next_state, acc)
if acc == 0:
replay_process.start()
prev_state = state
state = next_state
except cozmo.exceptions.RobotBusy:
pass
acc += 1
cozmo.run_program(main) | reeshogue/Cozminimum | test.py | test.py | py | 2,001 | python | en | code | 0 | github-code | 13 |
4849863092 | from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
class InputPanel(QDialog):
def __init__(self, parent, label, defaulttext):
super(InputPanel, self).__init__(parent=parent)
self.setWindowFlags(Qt.FramelessWindowHint)
self._text = ''
self.setObjectName("self")
hlay = QHBoxLayout(self)
hlay.addWidget(QLabel(label))
self.line_edit = QLineEdit()
self.line_edit.setText(defaulttext)
hlay.addWidget(self.line_edit)
self.setLayout(hlay)
self.setMinimumWidth(600)
self.line_edit.returnPressed.connect(self._ok_clicked)
self.line_edit.setFocus()
self.setStyleSheet("""
QLineEdit{ background-color: #ffffff; }
InputPanel { background: #f0f0f0; }
QLabel{ background: #d08080; }
QDialog {
background: #d08080;
}
""")
def _ok_clicked(self):
self._text = self.line_edit.text()
self.done(1)
def text(self):
return self._text
def show_input_panel(parent, label, defaulttext):
ip = InputPanel(parent, label, defaulttext)
if parent:
ip.move(parent.rect().center() - ip.rect().center())
ret = ip.exec_()
if ret:
return ip.text()
else:
return None
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
QApplication.setStyle(QStyleFactory.create('Fusion'))
gui = QMainWindow()
gui.setFocus()
gui.show()
show_input_panel(gui, 'Label', 'Default')
sys.exit(app.exec_())
| renerocksai/sublimeless_zk | src/inputpanel.py | inputpanel.py | py | 1,626 | python | en | code | 185 | github-code | 13 |
38921860304 | # avg O(Log(n)) where n is the number of nodes. It's log of n because you eliminate half of the tree | O(n)
def closest_value_in_bst_recursive(tree, target):
return findClosestValueInBstHelper(tree, target, float("inf"))
# Average: O(log(n)) time | O(log(n) space
# worst: O(n) time | O(n) space
def findClosestValueInBstHelper(tree, target, closest):
if tree is None:
return closest
if abs(target - closest) > abs(target - tree.value):
closest = tree.value
if target < tree.value:
return findClosestValueInBstHelper(tree.left, target, closest)
elif target > tree.value:
return findClosestValueInBstHelper(tree.right, target, closest)
else:
return closest
# Average: O(log(n)) time | O(log(1) space
# worst: O(n) time | O(1) space
def closest_value_in_bst_iteratively(tree, target):
return findClosestValueInBstHelper(tree, target, float("inf"))
def findClosestValueInBstHelperIteratively(tree, target, closest):
currentNode = tree
while currentNode is not None:
if abs(target - closest) > abs(target - tree.value):
closest = currentNode.value
if target < tree.value:
currentNode = currentNode.left
elif target > currentNode.value:
currentNode = currentNode.right
else:
break
return closest
| RysanekRivera/DataStructuresAndAlgorithmsPractice | src/problems/closestvalueinbst/ClosestValueInBST.py | ClosestValueInBST.py | py | 1,357 | python | en | code | 0 | github-code | 13 |
16393503047 | import logging
import os
import re
import sys
import traceback
import yaml
from datetime import datetime, timedelta, timezone
from nexus_helper.nexus_helper import NexusHelper
from zmpe import raise_error, zabbix_sender
# Program settings
settings = {}
# Program logger
logger = logging.getLogger()
def main():
mine_time = datetime.now()
# Get app settings
try:
get_settings()
except Exception:
print(f"ERROR: app cannot get settings\n{traceback.format_exc()}")
exit(1)
# Create app logger
set_logger()
logger.debug(settings)
# Читаем списки правил для каждого репо из rules.yaml
with open("rules.yaml", "r") as stream:
try:
rules_yaml = yaml.safe_load(stream)
except yaml.YAMLError as err:
raise_error(settings, logger, program=settings.PROGRAM, hostname=settings.HOSTNAME,
message=f"{err}\n{traceback.format_exc()}", do_error_exit=True)
logger.debug(rules_yaml)
repos = rules_yaml['repos']
try:
count = 0
# Обрабатываем каждый репозиторий
for repo in repos:
logger.info(f"Work with {repo} repo")
settings.nexus_repo = repo
nexus = NexusHelper(settings, logger)
if settings.DEV:
result = nexus.fake_get_list_component_items()
else:
result = nexus.get_list_component_items()
logger.debug(result)
logger.debug(f"Всего {len(result)} items")
# Из списка result удалить все образы которые соответствуют правилам exclude_rules.
logger.info(f" ")
logger.info(f"Apply exclude_rules")
exclude_rules = repos[repo]['exclude_rules']
logger.debug(exclude_rules)
for rule in exclude_rules:
rule = rule['rule']
logger.debug(f"{type(rule)} | {rule}")
for name in list(result.keys()):
if re.search(rule, name):
logger.debug(f"{rule} | {name}")
del result[name]
logger.info(f" ")
logger.debug(f"После exclude_rules осталось {len(result)} items")
logger.debug(result)
# Оставшиеся образы обрабатываем правилами include_rules.
logger.info(f" ")
logger.info(f"Apply include_rules")
include_rules = repos[repo]['include_rules']
logger.debug(include_rules)
for rule in include_rules: # Цикл по каждому правилу
rule_rule = rule['rule']
logger.debug(f" ")
logger.debug(f" {type(rule_rule)} | {rule_rule}")
for name in list(result.keys()): # Выбираем из списка всех образов те которые соответствуют правилу
if re.search(rule_rule, name):
logger.debug(f" {rule_rule} | {name}")
logger.info(f"Save last {rule['last']} images")
# Сортируем list of components (NexusComponent) по reverse last_modified и берём всё что далее last.
# Т.е. сохраняем [rule['last'] образов.
result[name].sort(reverse=True, key=lambda comp: comp.last_modified)
for i in result[name][0:rule['last']]:
logger.debug(f" {i.name}:{i.version} | {i.last_modified}")
list_for_check_days = result[name][rule['last']:]
logger.info(f"Delete images older than {rule['days']} days")
for comp in list_for_check_days:
# logger.debug(f" {comp.name}:{comp.version} | {comp.last_modified}")
if (datetime.now(timezone.utc) - comp.last_modified).days > rule['days']:
logger.info(f"Delete component {comp.name}:{comp.version} | {comp.last_modified}")
nexus.delete_component(comp_id=comp.id)
count += 1
except Exception as err:
result = {'cleaner-result': 1, 'cleaner-time': 0, 'cleaner-count': 0}
zabbix_sender(settings, logger, result)
raise_error(settings, logger, program=settings.PROGRAM, hostname=settings.HOSTNAME,
message=f"{err}\n{traceback.format_exc()}", do_error_exit=True)
time_execution = round((datetime.now() - mine_time).total_seconds())
logger.info(f"Process executed in {time_execution} sec.")
# Send Zabbix info
if settings.ZM_ZABBIX_SEND:
try:
result = {'cleaner-result': 0, 'cleaner-time': time_execution, 'cleaner-count': count}
zabbix_sender(settings, logger, result)
except Exception as err:
raise_error(settings, logger, program=settings.PROGRAM, hostname=settings.HOSTNAME,
message=f"Cannot send data to Zabbix\n{err}\n{traceback.format_exc()}", do_error_exit=True)
class Settings(object):
"""
Program settings like class
"""
def __init__(self, iterable=(), **kwargs):
self.__dict__.update(iterable, **kwargs)
def __str__(self):
return str(self.__dict__)
def get_settings():
global settings
# Enable DEBUG mode?
settings['DEBUG'] = os.getenv("NX_DEBUG", 'False').lower() in 'true'
settings['nexus_host'] = os.getenv('NX_HOST', "Unknown")
settings['nexus_username'] = os.getenv('NX_USERNAME', "Unknown")
settings['nexus_password'] = os.getenv('NX_PASSWORD', "Unknown")
settings['nexus_repo'] = os.getenv('NX_REPO', "Unknown")
# Get program name (without extension so that telegram does not convert the program name into a link)
settings['PROGRAM'] = os.path.splitext(os.path.basename(__file__))[0]
# Telegram settings
settings['ZM_TELEGRAM_NOTIF'] = os.getenv("ZM_TELEGRAM_NOTIF", 'True').lower() in 'true'
settings['ZM_TELEGRAM_CHAT'] = os.getenv('ZM_TELEGRAM_CHAT', "Unknown")
settings['ZM_TELEGRAM_BOT_TOKEN'] = os.getenv('ZM_TELEGRAM_BOT_TOKEN', "Unknown")
# Zabbix settings
# Should app send data to Zabbix?
settings['ZM_ZABBIX_SEND'] = os.getenv("ZM_ZABBIX_SEND", 'True').lower() in 'true'
# Zabbix server ip address.
settings['ZM_ZABBIX_IP'] = os.getenv('ZM_ZABBIX_IP', None)
# Zabbix "Host name". How is the host named in Zabbix.
settings['ZM_ZABBIX_HOST_NAME'] = os.getenv('ZM_ZABBIX_HOST_NAME', None)
# Add settings
settings['DEV'] = os.getenv("NX_DEV", 'False').lower() in 'true'
settings['HOSTNAME'] = settings['ZM_ZABBIX_HOST_NAME']
settings = Settings(settings)
# Check required setting
if settings.ZM_TELEGRAM_NOTIF and \
(not settings.ZM_TELEGRAM_BOT_TOKEN or not settings.ZM_TELEGRAM_CHAT):
print(f"ERROR: Telegram notifications are enabled but the parameters ZM_TELEGRAM_BOT_TOKEN, ZM_TELEGRAM_CHAT "
f"are not defined")
exit(1)
if settings.ZM_ZABBIX_SEND and \
(not settings.ZM_ZABBIX_IP or not settings.ZM_ZABBIX_HOST_NAME):
print(f"ERROR: Send data to Zabbix are enabled but the parameters ZM_ZABBIX_IP, "
f"ZM_ZABBIX_HOST_NAME are not defined")
exit(1)
def set_logger():
# Log to stdout
global logger
if settings.DEBUG:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
stdout_log_formatter = logging.Formatter('%(asctime)s|%(levelname)-5s|%(funcName)s| %(message)s')
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.setFormatter(stdout_log_formatter)
logger.addHandler(stdout_handler)
if __name__ == '__main__':
main()
| MinistrBob/MyPythonTools | Nexus/nexus_repo_cleaner.py | nexus_repo_cleaner.py | py | 8,032 | python | en | code | 0 | github-code | 13 |
1702506415 | import requests
from bs4 import BeautifulSoup
class Movie:
def __init__(self, link):
headers = {"Accept-Language": "en-US,en;q=0.5",
'User-Agent': 'Mozilla/5.0'}
response = requests.get(link, headers=headers)
self.soup = BeautifulSoup(response.text, 'html.parser')
def get_name(self):
return self.soup.select_one('h1').text + f' ({self.get_year()})'
def get_year(self):
return self.soup.select_one('.sc-f26752fb-1').text
# ipc-metadata-list-item__list-content baseAlt
def get_directors(self):
directors = []
directors_raw = self.soup.select_one(
'.ipc-inline-list.ipc-inline-list--show-dividers.ipc-inline-list--inline')
try:
directors_list = directors_raw.select('li')
for director in directors_list:
director_name = director.text
directors.append(director_name)
return directors
except AttributeError:
return
def get_actors(self):
actors_raw = self.soup.select(
'.sc-bfec09a1-1')
return [actor.text for actor in actors_raw]
| alexrotaru15/actors-crawler | classes.py | classes.py | py | 1,168 | python | en | code | 0 | github-code | 13 |
37882360920 | from copy import deepcopy
readline = lambda: list(map(int, input().split()))
N, M = readline()
cloud_cord = [[N, 1], [N, 2], [N - 1, 1], [N - 1, 2]]
basket = []
visited = []
move = []
DIRECTION = {
1: (0, -1),
2: (-1, -1),
3: (-1, 0),
4: (-1, 1),
5: (0, 1),
6: (1, 1),
7: (1, 0),
8: (1, -1)
}
CROSS_CORD = [(-1, 1),(1, 1),(1, -1),(-1, -1)]
def make_cloud_entirely():
global cloud_cord
new_cloud_cord = []
for i in range(1, N + 1):
for j in range(1, N + 1):
if basket[i][j] >= 2 and not visited[i][j]:
basket[i][j] -= 2
new_cloud_cord.append([i, j])
if visited[i][j]:
visited[i][j] = False
cloud_cord = deepcopy(new_cloud_cord)
def cast_water_copy_bug():
for r, c in cloud_cord:
for cr, cc in CROSS_CORD:
dr = r + cr
dc = c + cc
# 범위 넘어가면 skip
if dr < 1 or dr > N or dc < 1 or dc > N:
continue
# 해당 바구니에 물이없으면 스킵
if basket[dr][dc] == 0:
continue
basket[r][c] += 1
def rain():
# 비내리기 1증가
for r, c in cloud_cord:
basket[r][c] += 1
def move_cloud(dir, mov):
# 구름 입력 방향으로 이동
for cord in cloud_cord:
cord[0] += DIRECTION[dir][0] * mov
cord[1] += DIRECTION[dir][1] * mov
# print("r:",cord[0], "c:", cord[1])
cord[0] %= N
cord[1] %= N
if cord[0] == 0:
cord[0] = N
if cord[1] == 0:
cord[1] = N
visited[cord[0]][cord[1]] = True
def sum_water():
count = 0
for i in basket:
count += sum(i)
return count
def init():
# 초기화
basket.append([0] * (N + 1))
visited.append([False] * (N + 1))
for _ in range(N):
data = readline()
# 첫번째 자리 인덱스 1을 맞춰 주기 위한 0
basket.append([0, *data])
visited.append([False] * (N + 1))
for _ in range(M):
dir, mov = readline()
move.append((dir, mov))
# 출력부
init()
for dir, mov in move:
move_cloud(dir, mov)
rain()
cast_water_copy_bug()
make_cloud_entirely()
print(sum_water()) | kod4284/kod-algo-note | 백준/21610-마법사-상어와-비바라기/answer.py | answer.py | py | 2,297 | python | en | code | 0 | github-code | 13 |
35654688522 |
file = open('abc.txt', 'w+')
file.write('Linha 1\n')
file.write('Linha 2\n')
file.seek(0, 0)
print('Lendo linhas: ', file.read())
print('########')
file.seek(0, 0)
for linha in file.readlines():
print(linha, end='')
file.seek(0, 0)
file.close()
#############################################
try:
file = open('abc.txt', 'w+')
file.write('Linha')
file.seek(0, 0)
print(file.read())
finally:
file.close()
#############################################
with open('abc.txt', 'w+') as file:
file.write('Teste da linha1\n')
file.write('Teste da linha2\n')
file.seek(0)
print(file.read())
#############################################
with open('abc.txt', 'r') as file:
print(file.read())
file.seek(0)
with open('abc.txt', 'a+') as file:
file.write('\nLinha adicional')
file.seek(0)
print(file.read())
#############################################
try:
import os
os.remove('abc1.txt')
except:
pass
#############################################
import json
from aula_80_map.aula_80_data import pessoas2
pessoas_json = json.dumps(pessoas2, indent=True)
with open('abc.json', 'w+') as file:
file.write(pessoas_json) | JonasFiechter/UDEMY-Python | aula_89_arquivos/aula_89.py | aula_89.py | py | 1,198 | python | en | code | 0 | github-code | 13 |
73291335376 |
from hamcrest import *
import cloudant
import streaming.util
def test_red_view():
db = streaming.util.create_streaming_db()
v = db.view("foo", "bam")
assert_that(v.rows, has_length(streaming.util.NUM_RED_ROWS[0]))
assert_that(v.rows[0], has_entry("value", 1000))
def test_red_view_group_true():
db = streaming.util.create_streaming_db()
v = db.view("foo", "bam", group=True)
# Technically we could have random numbers collide
# but it shouldn't be very many
assert_that(streaming.util.NUM_RED_ROWS[2] - len(v.rows), less_than(3))
def test_red_view_group_level():
db = streaming.util.create_streaming_db()
v = db.view("foo", "bam", group_level=1)
# Technically we're relying on a uniform random
# distribution so. Make sure we cover at least
# seven of the eight possibilities
assert_that(streaming.util.NUM_RED_ROWS[1] - len(v.rows), less_than(2))
def test_red_view_stale_ok():
db = streaming.util.create_streaming_db()
v = db.view("foo", "bam", stale="ok")
assert_that(v.rows, has_length(streaming.util.NUM_RED_ROWS[0]))
assert_that(v.rows[0], has_entry("value", 1000))
| cloudant/quimby | streaming/1002-reduce-views-test.py | 1002-reduce-views-test.py | py | 1,157 | python | en | code | 0 | github-code | 13 |
26079235305 | from math import pi
def circle_area(radius):
if type(radius) not in [int, float]:
raise TypeError("Radius must be real non-negative number")
if radius < 0:
raise ValueError("Radius must be non-negative")
return pi * radius ** 2
check_list = [1, 20, 37, -5, 2+3j, [1, 2,], True, 'string']
for i in check_list:
print(f'Площадь окружности с радиусом {i} равна {circle_area(i)}') | PaulusCereus/unittests | testing_circle_area/circle.py | circle.py | py | 441 | python | en | code | 0 | github-code | 13 |
70549207059 | f = open("input")
nums = [[int(c) for c in l] for l in f.read().splitlines()]
width = len(nums[0])
height = len(nums)
padding = 10
border = [padding] * (width + 2)
nums = [border, *([padding, *l, padding] for l in nums), border]
def get(x, y):
return nums[y + 1][x + 1]
def is_low(x, y):
h = get(x, y)
return (h < get(x - 1, y) and
h < get(x + 1, y) and
h < get(x, y - 1) and
h < get(x, y + 1))
risk = sum(get(x, y) + 1 for x in range(width) for y in range(height) if is_low(x, y))
print(risk) | Leowbattle/aoc2021 | day09/day9a.py | day9a.py | py | 512 | python | en | code | 0 | github-code | 13 |
31181390583 | import os
import re
from collections import namedtuple
import numpy as np
from nltk.corpus import stopwords as sw
from gensim.utils import simple_preprocess
# Helper functions
punctuations = '!"#$%&()\*\+,-\./:;<=>?@[\\]^_`{|}~'
re_punc = re.compile(r"["+punctuations+r"]+")
re_space = re.compile(r" +")
stopwords = set(sw.words('english'))
Definition = namedtuple('Definition', ['word', 'type', 'def_sent', 'meta_data'])
# For slang data entries
SlangEntry = namedtuple('SlangEntry', ['word', 'def_sent', 'meta_data'])
DataIndex = namedtuple('DataIndex', ['train', 'dev', 'test'])
Triplet = namedtuple('Triplet', ['anchor', 'positive', 'negative'])
def tokenize(sentence):
return re.compile(r"(?:^|(?<=\s))\S+(?=\s|$)").findall(sentence)
def processTokens(fun, sentence):
return re.compile(r"(?:^|(?<=\s))\S+(?=\s|$)").sub(fun, sentence)
def normalize(array, axis=1):
denoms = np.sum(array, axis=axis)
if axis == 1:
return array / denoms[:,np.newaxis]
if axis == 0:
return array / denoms[np.newaxis, :]
def normalize_L2(array, axis=1):
if axis == 1:
return array / np.linalg.norm(array, axis=1)[:, np.newaxis]
if axis == 0:
return array / np.linalg.norm(array, axis=0)[np.newaxis, :]
def acronym_check(entry):
if 'acronym' in entry.def_sent:
return True
for c in str(entry.word):
if ord(c) >= 65 and ord(c) <= 90:
continue
return False
return True
def is_close_def(query_sent, target_sent, threshold=0.5):
query_s = [w for w in simple_preprocess(query_sent) if w not in stopwords]
target_s = set([w for w in simple_preprocess(target_sent) if w not in stopwords])
overlap_c = 0
for word in query_s:
if word in target_s:
overlap_c += 1
return overlap_c >= len(query_s) * threshold
def has_close_conv_def(word, slang_def_sent, conv_data, threshold=0.5):
conv_sents = [d['def'] for d in conv_data[word].definitions]
for conv_sent in conv_sents:
if is_close_def(slang_def_sent, conv_sent, threshold):
return True
return False
def create_directory(path):
try:
if not os.path.exists(path):
os.mkdir(path)
except OSError:
print ("Creation of the directory %s failed" % path)
# For conventional data entries
class Word:
def __init__(self, word):
self.word = word
self.pos_tags = set()
self.definitions = []
def attach_def(self, word_def, pos, sentences):
new_def = {'def':word_def, 'pos':pos, 'sents':sentences}
self.pos_tags.add(pos)
self.definitions.append(new_def)
# Evaluation helpers
def get_rankings(l_model, inds, labels):
N = l_model.shape[0]
ranks = np.zeros(l_model.shape, dtype=np.int32)
rankings = np.zeros(N, dtype=np.int32)
for i in range(N):
ranks[i] = np.argsort(l_model[i])[::-1]
rankings[i] = ranks[i].tolist().index(labels[inds[i]])+1
return rankings
def get_roc(rankings, N_cat):
roc = np.zeros(N_cat+1)
for rank in rankings:
roc[rank]+=1
for i in range(1,N_cat+1):
roc[i] = roc[i] + roc[i-1]
return roc / rankings.shape[0] | zhewei-sun/slanggen | Code/util.py | util.py | py | 3,244 | python | en | code | 5 | github-code | 13 |
13081561491 | from shop.sales import Sales
# Config what to buy:
unavailable_black_totle = {
'name': 'black tote',
'url': 'https://www.hermes.com/us/en/product/herbag-zip-cabine-bag-H082835CKAC/'
}
available_hand_bag = {
'name': 'white hand bag',
'url': 'https://www.hermes.com/us/en/product/herbag-zip-cabine-bag-H077787CKAA/'
}
hand_bag2 = {
'name': 'gold hand bag',
'url': 'https://www.hermes.com/us/en/product/herbag-zip-cabine-bag-H077787CKAA/'
}
items_to_buy = [unavailable_black_totle, available_hand_bag, hand_bag2]
# Config run:
CheckFrequencyInSecond = 10
MaxRunTimeInHour = 0.02
try:
sales_person = Sales()
sales_person.add_items(items_to_buy)
sales_person.set_wait_time_second(CheckFrequencyInSecond)
sales_person.set_max_duration_hour(MaxRunTimeInHour)
sales_person.run()
except RuntimeError as e:
print(e)
| carerley/hermes | main.py | main.py | py | 863 | python | en | code | 0 | github-code | 13 |
74166694417 | import threading
import time
def thread_job():
# print("this is add_thread,number is %s"% threading.current_thread())
print("T1 start\n")
for i in range(10):
time.sleep(0.1)
print("T1 finish\n")
def T2_job():
print("T2 start\n")
print("T2 finish\n")
def main():
add_thread=threading.Thread(target=thread_job,name="T1")
thread2 = threading.Thread(target=T2_job,name="T2")
add_thread.start()
thread2.start()
thread2.join()
add_thread.join()
# print(threading.active_count())
# print(threading.enumerate())
# print(threading.current_thread())
print("all done\n")
if __name__ == '__main__':
main()
| levinyi/scripts | crawler/thread_day1.py | thread_day1.py | py | 620 | python | en | code | 8 | github-code | 13 |
9889031834 |
import cv2
import numpy as np
import os
import argparse
import logging
log_format = '%(created)f:%(levelname)s:%(message)s'
logging.basicConfig(level=logging.DEBUG, format=log_format) # log to file filename='example.log',
TAG = "edge-detector-full:"
def detectEdges(img):
height, width, depth = img.shape
# pyr = np.zeros((height/2, width/2, depth), np.uint8)
# cv2.pyrDown(img, pyr)
timg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(timg, 10, 100, apertureSize=3)
return edges
def main():
logging.debug(TAG + "inside main")
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--video", help="capture from video file instead of from camera")
args = parser.parse_args()
logging.debug(TAG + "done parsing arguments")
capture = cv2.VideoCapture()
if args.video:
capture.open(args.video)
else:
capture.open(0)
if not capture.isOpened():
# Failed to open camera
return False
logging.debug(TAG + "camera opened")
# Create window
cv2.namedWindow('edge_detector')
while True:
logging.debug(TAG + "before reading frame")
retval, frame = capture.read()
if not retval:
break # end of video
logging.debug(TAG + "after reading frame")
edges = detectEdges(frame)
out = np.zeros(frame.shape, np.uint8)
contours, hierarchy = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
for i in xrange(len(contours)):
# has parent, inner (hole) contour of a closed edge
# print hierarchy[i]
# print hierarchy[0][i][3]
if (hierarchy[0][i][3] >= 0):
cv2.drawContours(out, contours, i, (255, 255, 255), 1, 8)
cv2.imshow("frame", frame)
cv2.imshow("edges", out)
logging.debug(TAG + "displayed image")
if cv2.waitKey(5) == 27: # exit on escape
logging.debug(TAG + "received escape key")
break
return True
logging.debug(TAG + "starting module")
if __name__ == "__main__":
logging.debug(TAG + "starting main")
main()
| christhompson/recognizers-arch | apps/darkly/downsampling_edge_detector/edges.py | edges.py | py | 2,153 | python | en | code | 1 | github-code | 13 |
8183857073 | import Extract
import Transform
import Load
import psutil
import datetime
def pipeline(url):
start = datetime.datetime.now()
print("pipeline started ...")
print(
f"extractData ended, CPU : {psutil.cpu_percent()}, Memory: {psutil.virtual_memory().percent}"
)
extractedData = Extract.extractData(url)
transformedDataDict = Transform.transforData(extractedData)
Load.loadData(transformedDataDict)
print(
f"extractData ended, CPU : {psutil.cpu_percent()}, Memory: {psutil.virtual_memory().percent}"
)
end = datetime.datetime.now()
print("pipeline ended ...")
print(f"total time to execute {end - start}")
dataApiSource = "https://storage.googleapis.com/uber-data-storage-bucket/uberData2023-06-07%2019%3A48%3A49.092952.csv"
pipeline(dataApiSource)
| 3zHrb/DataEngineering-UberProject | uber_project/uber_project_pipeline/pipeline.py | pipeline.py | py | 814 | python | en | code | 0 | github-code | 13 |
39099868450 | __author__ = 'Hakan Uyumaz'
import json
from django.shortcuts import get_object_or_404
from django.http import HttpResponse
from ..models import User, FriendshipRequest
from ..views import file
responseJSON = {}
def is_POST(request):
if request.method != "POST":
fail_response()
responseJSON["message"] = "No request found."
return False
return True
def success_response(responseJSON):
responseJSON["status"] = "success"
def fail_response():
responseJSON["status"] = "failed"
def create_user_JSON(user):
userJSON = {}
userJSON["name"] = user.name
userJSON["surname"] = user.surname
userJSON["username"] = user.username
userJSON["email"] = user.email
return userJSON
def search_user(request, search_field):
responseJSON = {}
user = None
if request.user.is_authenticated():
user = request.user
else:
user = get_object_or_404(User, username=request.POST["username"])
if len(search_field) > 0:
users_list = set()
for result_user in User.objects.filter(name__contains=search_field).exclude(username__exact=user.username):
users_list.add(result_user)
for result_user in (User.objects.filter(surname__contains=search_field)).exclude(username__exact=user.username):
users_list.add(result_user)
for result_user in User.objects.filter(username__contains=search_field).exclude(username__exact=user.username):
users_list.add(result_user)
success_response(responseJSON)
#print(json.dumps(responseJSON))
if len(users_list) > 0:
responseJSON["message"] = "Users found."
responseJSON["users"] = []
for user in users_list:
responseJSON["users"].append(create_user_JSON(user))
else:
responseJSON["message"] = "Users not found."
else:
fail_response(responseJSON)
responseJSON["message"] = "No search field found."
#print(json.dumps(responseJSON))
file.create_file(request, responseJSON, "search_user", request.method)
return HttpResponse(json.dumps(responseJSON))
def send_friend_request(request):
responseJSON = {}
if is_POST(request):
sender_username = request.POST["sender"]
receiver_username = request.POST["receiver"]
sender = get_object_or_404(User, username=sender_username)
receiver = get_object_or_404(User, username=receiver_username)
if FriendshipRequest.objects.filter(sender=sender, receiver=receiver).count() > 0:
friend_request = get_object_or_404(FriendshipRequest, sender=sender, receiver=receiver)
friend_request.status = 'P'
friend_request.save()
success_response(responseJSON)
responseJSON["message"] = "Existing friend request updated."
else:
friend_request = FriendshipRequest(sender=sender, receiver=receiver, status='P')
friend_request.save()
success_response(responseJSON)
responseJSON["message"] = "Friend request created."
file.create_file(request, responseJSON, "send_friend_request", request.method)
return HttpResponse(json.dumps(responseJSON))
def accept_friend_request(request):
responseJSON = {}
if is_POST(request):
sender_username = request.POST["sender"]
receiver_username = request.POST["receiver"]
sender = get_object_or_404(User, username=sender_username)
receiver = get_object_or_404(User, username=receiver_username)
if FriendshipRequest.objects.filter(sender=sender, receiver=receiver, status='P').count() > 0:
friend_request = get_object_or_404(FriendshipRequest, sender=sender, receiver=receiver)
friend_request.status = 'A'
friend_request.save()
sender.friends.add(receiver)
success_response(responseJSON)
responseJSON["message"] = "Existing friend request updated."
else:
fail_response(responseJSON)
responseJSON["message"] = "Pending friend request cannot be found."
file.create_file(request, responseJSON, "accept_friend_request", request.method)
return HttpResponse(json.dumps(responseJSON))
def reject_friend_request(request):
responseJSON = {}
if is_POST(request):
sender_username = request.POST["sender"]
receiver_username = request.POST["receiver"]
sender = get_object_or_404(User, username=sender_username)
receiver = get_object_or_404(User, username=receiver_username)
if FriendshipRequest.objects.filter(sender=sender, receiver=receiver, status='P').count() > 0:
friend_request = get_object_or_404(FriendshipRequest, sender=sender, receiver=receiver)
friend_request.status = 'R'
friend_request.save()
success_response(responseJSON)
responseJSON["message"] = "Existing friend request updated."
else:
fail_response(responseJSON)
responseJSON["message"] = "Pending friend request cannot be found."
file.create_file(request, responseJSON, "reject_friend_request", request.method)
return HttpResponse(json.dumps(responseJSON))
def get_friendship_request_situation(request):
responseJSON = {}
if is_POST(request):
sender_username = request.POST["sender"]
receiver_username = request.POST["receiver"]
sender = get_object_or_404(FriendshipRequest, sender=sender_username)
receiver = get_object_or_404(FriendshipRequest, receiver=receiver_username)
if FriendshipRequest.objects.filter(sender=sender, receiver=receiver).count() > 0:
friendship_request_situation = get_object_or_404(FriendshipRequest, sender=sender, receiver=receiver).status
success_response(responseJSON)
friendship_requestJSON = {}
friendship_requestJSON["sender"] = sender_username
friendship_requestJSON["receiver"] = receiver_username
friendship_requestJSON["status"] = friendship_request_situation
friendship_requestJSON["friendship_request"].append(friendship_requestJSON)
else:
fail_response()
responseJSON["message"] = "Pending friend request cannot be found."
file.create_file(request, responseJSON, "get_friendship_request_situation", request.method)
return HttpResponse(json.dumps(responseJSON))
def get_friend_list(request):
responseJSON = {}
if is_POST(request):
username = request.POST["username"]
user = get_object_or_404(User, username=username)
friend_list = user.friends.all()
success_response(responseJSON)
responseJSON["message"] = "Friends found."
responseJSON["friends"] = []
for friend in friend_list:
responseJSON["friends"].append(create_user_JSON(friend))
file.create_file(request, responseJSON, "get_friend_list", request.method)
return HttpResponse(json.dumps(responseJSON))
def get_friend_requests(request):
responseJSON = {}
if is_POST(request):
username = request.POST["username"]
#print(json.dumps(responseJSON))
user = get_object_or_404(User, username=username)
friend_request_list = FriendshipRequest.objects.filter(receiver=user, status='P')
success_response(responseJSON)
responseJSON["senders"] = []
sender_list = []
for friend_request in friend_request_list:
sender_list.append(friend_request.sender)
if len(sender_list) > 0:
responseJSON["message"] = "Requests found."
else:
responseJSON["message"] = "Requests not found."
for sender in sender_list:
responseJSON["senders"].append(create_user_JSON(sender))
file.create_file(request, responseJSON, "get_friend_requests", request.method)
return HttpResponse(json.dumps(responseJSON))
def remove_friend(request):
responseJSON = {}
if is_POST(request):
username = request.POST["username"]
friend_username = request.POST["friend"]
user = get_object_or_404(User, username=username)
friend = get_object_or_404(User, username=friend_username)
user.friends.remove(friend)
success_response(responseJSON)
responseJSON["message"] = "Friend removed"
file.create_file(request, responseJSON, "remove_friend", request.method)
return HttpResponse(json.dumps(responseJSON)) | OEA/LetsEat | web/api/views/friendship_views.py | friendship_views.py | py | 8,531 | python | en | code | 6 | github-code | 13 |
14502215246 | input = open("../input/day02/input.txt", "r").readlines()
horizontal = 0
depth = 0
aim = 0
for line in input:
(command, x) = line.split(" ")
x = int(x)
if command == "forward":
horizontal += x
depth += aim * x
elif command == "up":
# depth -= x
aim -= x
elif command == "down":
# depth += x
aim += x
print(depth * horizontal) | verysamuel/advent-of-code-2021 | python/day02.py | day02.py | py | 395 | python | en | code | 1 | github-code | 13 |
73685126417 | #To create SSL keys use:
#
# openssl genrsa -out webhook_pkey.pem 2048
# openssl req -new -x509 -days 3650 -key webhook_pkey.pem -out weebhook_cert.pem
#
# in "Common Name write the same value as in WEBHOOK_HOST"
import telebot, json, requests, time, ssl, logging
import ToPng
from io import BytesIO
from aiohttp import web
#load and set configs
def load_config():
with open('token.json') as json_file:
return json.load(json_file)
config = load_config()
API_TOKEN = config['api']['token']
WEBHOOK_HOST = config['webhook']['host']
WEBHOOK_PORT = int(config['webhook']['port'])
WEBHOOK_LISTEN = config['webhook']['listen']
WEBHOOK_SSL_CERT = './webhook_cert.pem'
WEBHOOK_SSL_PRIV = './webhook_pkey.pem'
WEBHOOK_URL_BASE = "https://{}:{}".format(WEBHOOK_HOST, WEBHOOK_PORT)
WEBHOOK_URL_PATH = "/{}/".format(API_TOKEN)
logging.basicConfig(filename='logs.txt', filemode='a',
format='%(asctime)s,%(msecs) %(name)s %(levelname)s %(message)s',
datefmt='%H:%M:%S',
level=logging.DEBUG)
logger = telebot.logger
telebot.logger.setLevel(logging.INFO)
telebot.apihelper.READ_TIMEOUT = 5
bot = telebot.TeleBot(API_TOKEN, threaded=False)
app = web.Application()
# webhook calls
async def handle(request):
if request.match_info.get('token') == bot.token:
request_body_dict = await request.json()
update = telebot.types.Update.de_json(request_body_dict)
bot.process_new_updates([update])
return web.Response()
else:
return web.Response(status=403)
app.router.add_post('/{token}/', handle)
#download file from telegram
def download_file(file_path):
req = requests.get('https://api.telegram.org/file/bot{0}/{1}'.format(config['api']['token'], file_path))
return req;
#reply to message
def replyto(message, text):
try:
bot.reply_to(message, text)
except ReadTimeout as e:
logging.error(e)
replyto(message, text)
except Exception as e:
logging.error(e)
#download photo, convert it and send as document
def download_and_send_photo(message, file_path):
req = download_file(file_path)
if req.status_code == 200:
try:
file2 = ToPng.convert_to_png(BytesIO(req.content))
file2.name = 'image.png'
bot.send_document(message.chat.id, file2)
except:
bot.reply_to(message, 'something went wrong, maybe file is not an image')
else:
replyto(message, 'something went wrong, please try again')
@bot.message_handler(content_types=['document'])
def get_document(message):
file_info = bot.get_file(message.document.file_id)
if message.document.file_size > 2000000:
replyto(message, 'maximum file size is 2MB')
return
download_and_send_photo(message, file_info.file_path)
@bot.message_handler(commands=['start', 'help'])
def handle_help(message):
replyto(message, 'send your image(as photo or document) or sticker to get its raw copy in png')
@bot.message_handler(content_types=['photo'])
def handle_photo(message):
print(len(message.photo))
max_photo = message.photo[0]
for photo in message.photo:
if photo.file_size > max_photo.file_size:
max_photo = photo
file_info = bot.get_file(max_photo.file_id)
download_and_send_photo(message, file_info.file_path)
@bot.message_handler(content_types=['sticker'])
def handle_sticker(message):
file_info = bot.get_file(message.sticker.file_id)
if message.sticker.is_animated:
req = download_file(file_info.file_path)
if req.status_code == 200:
file2 = BytesIO(req.content)
file2.name = 'image.tgs'
bot.send_document(message.chat.id, file2)
replyto(message, 'just download your animated sticker with right-click. If you are using mobile version of telegram, accept my condolences.' )
else:
replyto('something went wrong, please try again')
return
download_and_send_photo(message, file_info.file_path)
#remove webhook cause previous may be set
bot.remove_webhook()
bot.set_webhook(url=WEBHOOK_URL_BASE + WEBHOOK_URL_PATH, certificate=open(WEBHOOK_SSL_CERT, 'r'))
print('set webhook', WEBHOOK_URL_BASE, WEBHOOK_URL_PATH, sep=' ')
#build ssl context
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
context.load_cert_chain(WEBHOOK_SSL_CERT, WEBHOOK_SSL_PRIV)
print('kek ' + str(WEBHOOK_LISTEN) + ' ' + str(WEBHOOK_PORT))
web.run_app(
app,
host=WEBHOOK_LISTEN,
port=WEBHOOK_PORT,
ssl_context=context,
)
| Graftiger/Lab4_XLA | ImageToSticker_converter/src/main.py | main.py | py | 4,560 | python | en | code | 0 | github-code | 13 |
31917665206 | class Rekins():
def __init__(self,klients,veltijums,izmers,materials):
self.klients = klients
self.veltijums = veltijums
self.izmers = izmers.split(",")
self.materials = float (materials)
sad_izm = self.izmers.split(",")
print(sad_izm)
self.aprekins()
def izdrukat(self):
print("Klients:",self.klients)
print("Veltījums:",self.veltijums)
print("Izmērs:", self.izmers)
print("Materiāls:",self.materials)
print("Apmaksas summa:", self.aprekins())
def aprekins(self):
darba_samaksa = 15
PVN = 21
produkta_cena = (len(self.veltijums)) * 1.2 + (self.izmers[0]/100 * self.izmers[1]/100 * self.izmers[2]/100)/ 3 * self.materials
PVN_summa = (produkta_cena + darba_samaksa)*PVN/100
rekina_summa = (produkta_cena + darba_samaksa) + PVN_summa
return rekina_summa
klients = input("Ievadi vārdu: ")
veltijums = input("Ievadi veltījumu: ")
izmers = input("Ievadi izmēru (platums,garums,augstums): ")
materials = input("Ievadi materiāla cenu EUR/m2: ")
pirmais = Rekins(klients,veltijums,izmers,materials)
pirmais.izdrukat()
| malinovskiss/2022-2023.g | 13.09.2022.py | 13.09.2022.py | py | 1,215 | python | lv | code | 0 | github-code | 13 |
37451261520 | import numpy as np
import kcorrect as kc
import astropy.table
kc.load_templates()
kc.load_filters(f='myfilter.dat')
catalogues={}
#=======================================================================
balogh_data2 = astropy.table.Table.read('/home/dannyluo/cosmosdata/balogh_data2bands.csv', format='ascii.csv')
catalogues['balogh_data2'] = balogh_data2
print('Imported Balogh Datatable 2 as balogh_data2')
#=======================================================================
ultravista = astropy.table.Table.read('/home/dannyluo/cosmosdata/UVISTA_final_v4.1_modified.csv', format='ascii.csv')
catalogues['ultravista'] = ultravista
print('Imported Ultravista Final v.4.1 MODIFIED as ultravista')
#=======================================================================
ultravista_main = astropy.table.Table.read('/home/dannyluo/cosmosdata/UVISTA_Main.csv', format='ascii.csv')
catalogues['ultravista_main'] = ultravista_main
print('Imported Ultravista Main as ultravista_main')
#=======================================================================
bands = ['ch1', 'J', 'V', 'zp']
bd = np.empty((len(balogh_data2), len(bands)))
bdivar = np.empty((len(balogh_data2), len(bands)))
uv = np.empty((len(ultravista_main), len(bands)))
uvivar = np.empty((len(ultravista_main), len(bands)))
bd = np.array([ultravista_main[band][balogh_data2['ultravista_match']].data for band in bands]).T
bdivar = (np.array([ultravista_main['e'+band][balogh_data2['ultravista_match']].data for band in bands]).T)**(-2)
uv = np.array([ultravista_main[band].data for band in bands]).T
uvivar = np.array([ultravista_main['e'+band].data for band in bands]).T**(-2)
#uv = []
#this way to ensure no decimal funny business
for i in range(len(bands)):
#uv.append(ultravista_main[bands[i]].data)
uv[:, i] = ultravista_main[bands[i]].data
uvivar[:, i] = (ultravista_main['e'+bands[i]].data)**(-2)
bd[:, i] = ultravista_main[bands[i]][balogh_data2['ultravista_match']].data
bdivar[:, i] = (ultravista_main['e'+bands[i]][balogh_data2['ultravista_match']].data)**(-2)
bdzp = balogh_data2['zp'].data
bdzs = balogh_data2['zs'].data
#Not the same as 'zp' in ultravista_main, which is the band flux
uvzp = ultravista['zp'].data
#balogh_data2 photometric kcorrection
bdpkc = np.zeros((len(balogh_data2), len(bands)))
#balogh_data2 spectroscopic kcorrection
bdskc = np.zeros((len(balogh_data2), len(bands)))
#ultravista kcorrection
uvkc = np.zeros((len(ultravista), len(bands)))
#performing kcorrection------------------------------------------------------
for i in range(len(bdzp)):
coeffs = kc.fit_nonneg(bdzp[i], bd[i], bdivar[i])
rm = kc.reconstruct_maggies(coeffs)
rm0 = kc.reconstruct_maggies(coeffs, redshift=0.9)
kcorrection = -2.5*np.log10(rm[1:]/rm0[1:])
bdpkc[i] = kcorrection
for i in range(len(bdzs)):
coeffs = kc.fit_nonneg(bdzs[i], bd[i], bdivar[i])
rm = kc.reconstruct_maggies(coeffs)
rm0 = kc.reconstruct_maggies(coeffs, redshift=0.9)
kcorrection = -2.5*np.log10(rm[1:]/rm0[1:])
bdskc[i] = kcorrection
for i in range(len(uvzp)):
coeffs = kc.fit_nonneg(uvzp[i], uv[i], uvivar[i])
rm = kc.reconstruct_maggies(coeffs)
rm0 = kc.reconstruct_maggies(coeffs, redshift=0.9)
kcorrection = -2.5*np.log10(rm[1:]/rm0[1:])
uvkc[i] = kcorrection
#General Bad Flag, If at least one filter flux is -99.99
uv_flag = ((uv==-9.9999e+1).sum(axis=1)!=0)
#Same for balogh_data2
bdzp_flag = ((bd==-9.9999e+1).sum(axis=1)!=0)
#But include, for spectrocospic data, null zs values
bdzs_flag = bdzp_flag+(bdzs==-99.99)
#Setting null entries
uvkc[uv_flag] = -99.99
bdpkc[bdzp_flag] = -99.99
bdskc[bdzs_flag] = -99.99
print('There are ', (uv_flag==0).sum(), ' good entries out of ', len(ultravista_main), ' for ultravista')
print('There are ', (bdzp_flag==0).sum(), ' good entries out of ', len(balogh_data2),' for balogh_data photometric redshifts')
print('There are ', (bdzs_flag==0).sum(), ' good entries out of ', len(balogh_data2), ' for ultravista spectroscopic redshifts')
#Writing Data---------------------------------------------------------------
astropy.table.Table(bdpkc, names=['kc_'+band for band in bands]).write('balogh_data2_kczp', format='ascii')
astropy.table.Table(bdskc, names=['kc_'+band for band in bands]).write('balogh_data2_kczs', format='ascii')
astropy.table.Table(uvkc, names=['kc_'+band for band in bands]).write('ultravista_kczp', format='ascii')
| PiercingDan/cosmos-analysis | kcorrectscript.py | kcorrectscript.py | py | 4,459 | python | en | code | 0 | github-code | 13 |
7579618412 | #!/usr/bin/python3
'''Module for: function that divides all elements of a matrix'''
def matrix_divided(matrix, div):
'''
Divides all elements of a matrix.
Args:
matrix (list): A list of lists of integers or floats.
div (int or float): A number to divide all elements of the matrix.
Returns:
A new matrix with all elements divided by div.
Raises:
TypeError: If matrix is not a list of lists of integers/floats
or if div is not a number.
ZeroDivisionError: If div is equal to 0.
TypeError: If each row of the matrix is not of the same size.
'''
if not isinstance(matrix, list) or \
not all(isinstance(row, list) for row in matrix):
raise TypeError('matrix must be a '
'matrix (list of lists) of integers/floats')
if not all(isinstance(num, (int, float))
for row in matrix for num in row):
raise TypeError('matrix must be a '
'matrix (list of lists) of integers/floats')
if not isinstance(div, (int, float)):
raise TypeError('div must be a number')
if div == 0:
raise ZeroDivisionError('division by zero')
row_sizes = set(len(row) for row in matrix)
if len(row_sizes) != 1:
raise TypeError('Each row of the matrix must have the same size')
return [[round(num / div, 2) for num in row] for row in matrix]
| janymuong/alx-higher_level_programming | 0x07-python-test_driven_development/2-matrix_divided.py | 2-matrix_divided.py | py | 1,445 | python | en | code | 0 | github-code | 13 |
14606304607 | from param import *
from sensors import GPS
import Adafruit_BBIO.GPIO as GPIO
import time
import csv
import math
import traceback
# GPS sample time
sample_time = 1.0/gps_dataUpdateRate
# Create GPS object
gps = GPS()
# Setup CSV file
file_name = raw_input('Input the name of the file where the path will be recorded: ')
timestr = time.strftime("%Y%m%d-%H%M%S")
csv_path = './paths/%s-%s.csv' % (file_name,timestr)
results_gps = open(csv_path, 'wb')
writer_gps = csv.writer(results_gps)
# writer_gps.writerow(('Time (s)', 'latitude', 'longitude'))
# Wait before starting experiment
print("")
for i in range(0, int(math.ceil(start_up_interval))):
time.sleep(1)
print("Please start walking the bike in %is" % (int(math.ceil(start_up_interval)) - i))
print("")
# Initialize lat/lon
lat_measured_GPS_raw = 0
lon_measured_GPS_raw = 0
while lat_measured_GPS_raw == 0 or lat_measured_GPS_raw == 0:
# Get GPS position
gpspos = gps.get_position()
lat_measured_GPS_raw = gpspos[2]
lon_measured_GPS_raw = gpspos[3]
if ((lat_measured_GPS_raw >= 53) and (lat_measured_GPS_raw <= 70) and (lon_measured_GPS_raw >= 8) and (lon_measured_GPS_raw <= 26)): # The location should be in SWEDEN
window_movingAverage = 10 # Samples
lat_movingAverage = np.full(window_movingAverage,lat_measured_GPS_raw)
lon_movingAverage = np.full(window_movingAverage,lon_measured_GPS_raw)
lat_LP = np.average(lat_movingAverage)
lon_LP = np.average(lon_movingAverage)
# Write position to CSV file
writer_gps.writerow((0.0, lat_LP, lon_LP, lat_measured_GPS_raw, lon_measured_GPS_raw))
# Save start time of the recording
start_time = time.time()
while 1:
try:
# Save time of the start of the current loop
time_start_current_loop = time.time()
# Get GPS position
gpspos = gps.get_position()
lat_measured_GPS_raw = gpspos[2]
lon_measured_GPS_raw = gpspos[3]
# Check that we are in Sweden
if ((lat_measured_GPS_raw >= 53) and (lat_measured_GPS_raw <= 70) and (lon_measured_GPS_raw >= 8) and (lon_measured_GPS_raw <= 26)): # The location should be in SWEDEN
lat_movingAverage = np.append(lat_movingAverage[1:], lat_measured_GPS_raw)
lon_movingAverage = np.append(lon_movingAverage[1:], lon_measured_GPS_raw)
lat_LP = np.average(lat_movingAverage)
lon_LP = np.average(lon_movingAverage)
# Write position to CSV file
writer_gps.writerow((time.time() - start_time, lat_LP, lon_LP, lat_measured_GPS_raw, lon_measured_GPS_raw))
# Compute total time for current loop
loop_time = time.time() - time_start_current_loop
# Sleep to match sampling time
if loop_time < sample_time:
time.sleep(sample_time - loop_time)
except Exception as e:
print('Path file saved : %s' % (csv_path))
# e = sys.exc_info()[0]
print("Detected error :")
print(e)
print(traceback.print_exc())
exc_msg = 'Error or keyboard interrupt, aborting the experiment'
print(exc_msg) | Hannnes1/autobike | Python_backup_20220402/record_path_latlon_old.py | record_path_latlon_old.py | py | 3,139 | python | en | code | 0 | github-code | 13 |
43357078507 | from collections import deque
def solution(n, computers):
def bfs(i):
queue = deque()
queue.append(i)
while queue:
i = queue.popleft()
visited[i] = True
for j in range(n):
if computers[i][j] and not visited[j]:
queue.append(j)
visited = [False] * n
count = 0
for i in range(n):
if not visited[i]:
bfs(i)
count += 1
return count
| tr0up2r/coding-test | website/programmers/level3/113_network.py | 113_network.py | py | 480 | python | en | code | 0 | github-code | 13 |
11888689164 |
import numpy as np
import tensorflow as tf
class FeaturesLoss:
def __init__(self, templates_images, model):
self.templates_features = self.build_templates(templates_images, model)
def build_templates(self, templates_images, model):
templates = []
for i in range(templates_images.shape[0]):
image = np.expand_dims(templates_images[i], axis=0)
templates.append(
np.squeeze(model(image, training=False), axis=0))
return np.array(templates)
def __call__(self, labels, preds):
preds_num = preds.shape[0]
losses = np.zeros(preds_num)
for i in range(preds_num):
distances = []
for t in range(self.templates_features.shape[0]):
distances.append(np.sqrt(float(np.dot(preds[i] - self.templates_features[t],
preds[i] - self.templates_features[t])))) # Eucleaden distance
losses[i] = min(distances)
return losses
| LotanLevy/affordance_visualization | losses.py | losses.py | py | 1,028 | python | en | code | 0 | github-code | 13 |
10591600758 | from tqdm.notebook import tqdm
import matplotlib.pyplot as plt
import wandb
import os
import torch
from torch.utils.data import DataLoader
from torchvision import datasets
from torchvision import transforms
from torchvision.utils import save_image
from torchvision.utils import make_grid
import torch.nn as nn
import torch.nn.functional as F
class Generator(nn.Module):
def __init__(self, latent_size=128):
super(Generator, self).__init__()
self.model = nn.Sequential(
# in: latent_size x 1 x 1
nn.ConvTranspose2d(latent_size, 512, kernel_size=4, stride=1, padding=0, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(True),
# out: 512 x 4 x 4
nn.ConvTranspose2d(512, 256, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(True),
# out: 256 x 8 x 8
nn.ConvTranspose2d(256, 128, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(True),
# out: 128 x 16 x 16
nn.ConvTranspose2d(128, 64, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(True),
# out: 64 x 32 x 32
nn.ConvTranspose2d(64, 3, kernel_size=4, stride=2, padding=1, bias=False),
nn.Tanh()
# out: 3 x 64 x 64
)
def forward(self, x):
x = self.model(x)
return x
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
self.model = nn.Sequential(
# in: 3 x 64 x 64
nn.Conv2d(3, 64, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm2d(64),
nn.LeakyReLU(0.2, inplace=True),
# out: 64 x 32 x 32
nn.Conv2d(64, 128, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm2d(128),
nn.LeakyReLU(0.2, inplace=True),
# out: 128 x 16 x 16
nn.Conv2d(128, 256, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm2d(256),
nn.LeakyReLU(0.2, inplace=True),
# out: 256 x 8 x 8
nn.Conv2d(256, 512, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm2d(512),
nn.LeakyReLU(0.2, inplace=True),
# out: 512 x 4 x 4
nn.Conv2d(512, 1, kernel_size=4, stride=1, padding=0, bias=False),
# out: 1 x 1 x 1
nn.Flatten(),
nn.Sigmoid())
def forward(self, x):
validity = self.model(x)
return validity
class GAN():
def __init__(self, device,
batch_size,
latent_size,
discriminator,
generator,
trainloader):
super(GAN, self).__init__()
self.discriminator = discriminator
self.generator = generator
self.trainloader = trainloader
self.device = device
self.batch_size = batch_size
self.latent_size = latent_size
self.fixed_latent = torch.randn(64, latent_size, 1, 1, device=device)
self.sample_dir = 'generated'
os.makedirs(self.sample_dir, exist_ok=True)
def train_discriminator(self, real_images, opt_d):
# Clear discriminator gradients
opt_d.zero_grad()
# Pass real images through discriminator
real_preds = self.discriminator(real_images)
real_targets = torch.ones(real_images.size(0), 1, device=self.device)
real_loss = F.binary_cross_entropy(real_preds, real_targets)
real_score = torch.mean(real_preds).item()
# Generate fake images
latent = torch.randn(self.batch_size, self.latent_size, 1, 1, device=self.device)
fake_images = self.generator(latent)
# Pass fake images through discriminator
fake_targets = torch.zeros(fake_images.size(0), 1, device=self.device)
fake_preds = self.discriminator(fake_images)
fake_loss = F.binary_cross_entropy(fake_preds, fake_targets)
fake_score = torch.mean(fake_preds).item()
# Update discriminator weights
loss = real_loss + fake_loss
loss.backward()
opt_d.step()
return loss.item(), real_score, fake_score
def train_generator(self, opt_g):
# Clear generator gradients
opt_g.zero_grad()
# Generate fake images
latent = torch.randn(self.batch_size, self.latent_size, 1, 1, device=self.device)
fake_images = self.generator(latent)
# Try to fool the discriminator
preds = self.discriminator(fake_images)
targets = torch.ones(self.batch_size, 1, device=self.device)
loss = F.binary_cross_entropy(preds, targets)
# Update generator weights
loss.backward()
opt_g.step()
return loss.item()
def save_checkpoint(self, state, filename):
torch.save(state, filename)
def denorm(self, img_tensors):
stats = (0.5, 0.5, 0.5), (0.5, 0.5, 0.5)
return img_tensors * stats[1][0] + stats[0][0]
def save_samples(self, index, latent_tensors, show=True):
fake_images = self.generator(latent_tensors)
fake_fname = 'generated-images-{0:0=4d}.png'.format(index)
save_image(self.denorm(fake_images), os.path.join(self.sample_dir, fake_fname), nrow=8)
print('Saving', fake_fname)
if show:
fig, ax = plt.subplots(figsize=(8, 8))
ax.set_xticks([]); ax.set_yticks([])
ax.imshow(make_grid(fake_images.cpu().detach(), nrow=8).permute(1, 2, 0))
def fit(self, epochs, lr, start_idx=1):
torch.cuda.empty_cache()
# Losses & scores
losses_g = []
losses_d = []
real_scores = []
fake_scores = []
# Create optimizers
opt_d = torch.optim.Adam(self.discriminator.parameters(), lr=lr, betas=(0.5, 0.999))
opt_g = torch.optim.Adam(self.generator.parameters(), lr=lr, betas=(0.5, 0.999))
for epoch in range(epochs):
for i, (real_images, _) in tqdm(enumerate(self.trainloader)):
# Train discriminator
loss_d, real_score, fake_score = self.train_discriminator(real_images.to(self.device), opt_d)
# Train generator
loss_g = self.train_generator(opt_g)
# Record losses & scores
losses_g.append(loss_g)
losses_d.append(loss_d)
real_scores.append(real_score)
fake_scores.append(fake_score)
# Log losses & scores (last batch)
print("Epoch [{}/{}], loss_g: {:.4f}, loss_d: {:.4f}, real_score: {:.4f}, fake_score: {:.4f}".format(
epoch+1, epochs, loss_g, loss_d, real_score, fake_score))
# Save generated images
self.save_samples(epoch+start_idx, self.fixed_latent, show=False)
self.save_checkpoint({
'generator_dict': self.generator.state_dict(),
'discriminator_dict': self.discriminator.state_dict(),
'optimizer_g_dict': opt_g.state_dict(),
'optimizer_d_dict': opt_d.state_dict(),
}, filename=f"Compression-GAN/W-Gan/gan.pt")
return losses_g, losses_d, real_scores, fake_scores
if __name__ == "__main__":
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
batch_size = 16
stats = (0.5, 0.5, 0.5), (0.5, 0.5, 0.5)
# Configure data loader
os.makedirs("../../data/cifar-10", exist_ok=True)
trainloader = torch.utils.data.DataLoader(
datasets.CIFAR10(
"../../data/cifar-10",
train=True,
download=True,
transform=transforms.Compose([
transforms.Resize(64),
transforms.RandomCrop(64, padding=4, padding_mode="reflect"),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(*stats)])),
batch_size=batch_size,
shuffle=False,
)
# Initialize generator and discriminator
generator = Generator().to(device)
discriminator = Discriminator().to(device)
gan = GAN(generator=generator,
discriminator=discriminator,
trainloader=trainloader,
device=device,
latent_size=128,
batch_size=batch_size,)
wandb.init(project="Vanilla-GAN", entity="harsh1729")
# Train
losses_g, losses_d, real_scores, fake_scores = gan.fit(100, 0.0002)
wandb.finish() | harshraj172/Compression-GAN | W-Gan/gan.py | gan.py | py | 9,594 | python | en | code | 0 | github-code | 13 |
5870241881 | class Solution:
def nt(self, s):
lst = (0, s[0])
res = list()
i = 0
for i in range(len(s)):
if s[i] != lst[1]:
res.append(str(i-lst[0]))
res.append(lst[1])
lst = (i, s[i])
res.append(str(i-lst[0]+1))
res.append(lst[1])
return res
# @return a string
def countAndSay(self, n):
res = '1'
for i in range(n-1):
res = str(''.join(self.nt(res)))
return res
| eric6356/LeetCode | countAndSay.py | countAndSay.py | py | 522 | python | en | code | 0 | github-code | 13 |
15219581756 | from multiprocessing import Process, Queue
from robot_controller import Controller
from text_parser import Parser
import pyaudio
import librosa
import pickle
import sounddevice as sd
import numpy as np
import threading
from array import array
import wave
mapping = ['tien', 'lui', 'len', 'xuong', 'trai', 'phai', 'quay', 'dung', 'thoat']
CHUNK_SIZE = 1024
MIN_VOLUME = 1000
RECORD_SECONDS = 1
RATE = 44100
CHANNELS = 2
FORMAT = pyaudio.paInt16
MODEL = pickle.load(open('./train_hmm/hmm.pk', 'rb'))
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum()
def parseText(text_queue, action_queue, parser, DEBUG=True):
'''Get parse text_queue to get action'''
while True:
text = text_queue.get()
if text is not None:
action = parser.apply(text)
if action is not None:
action_queue.put(action)
def transcribe(data, fs=44100):
global MODEL
mfcc = librosa.feature.mfcc(data, sr=fs, n_fft=1024, hop_length=128).T
score = [MODEL[i].score(mfcc) for i in range(len(mapping))]
score = softmax(np.array(score))
if max(score) > 0.8:
idx = np.argmax(score)
text = mapping[idx]
# print(score[idx], end=' ')
return text
return None
def saveFile(filename, frames):
wf = wave.open(filename, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(pyaudio.PyAudio().get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
wf.close()
def record(stopped, audio_queue, text_queue):
while True:
frames = audio_queue.get()
isContinue = True
for frame in frames:
vol = max(array('h', frame))
if vol >= MIN_VOLUME:
isContinue = False
if isContinue:
continue
filename = 'command.wav'
saveFile(filename, frames)
data, fs = librosa.load(filename, sr=None)
text_queue.put(transcribe(data, fs))
def listen(stopped, audio_queue):
stream = pyaudio.PyAudio().open(
format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK_SIZE,
)
while True:
frames = []
for i in range(0, int(RATE / CHUNK_SIZE * RECORD_SECONDS)):
data = stream.read(CHUNK_SIZE)
frames.append(data)
audio_queue.put(frames)
if __name__ == '__main__':
parser = Parser()
controller = Controller()
text_queue = Queue()
action_queue = Queue()
audio_queue = Queue()
stopped = threading.Event()
listen_t = threading.Thread(target=listen, args=(stopped, audio_queue))
listen_t.start()
record_t = threading.Thread(target=record, args=(stopped, audio_queue, text_queue))
record_t.start()
parser_process = Process(target=parseText, args=(
text_queue, action_queue, parser
))
parser_process.start()
controller.start()
prev_action = None
while True:
action = action_queue.get()
if action is not None:
if action == 'quit':
print('thoat')
break
else:
controller.apply(action)
prev_action = action
print(action)
elif prev_action == 'tien' or prev_action == 'lui':
controller.apply(prev_action)
# Clean up the multiprocessing process.
controller.stop()
listen_t.join()
record_t.join()
parser_process.terminate()
parser_process.join() | ductm104/project_speech_processing | src/main.py | main.py | py | 3,553 | python | en | code | 0 | github-code | 13 |
39349849965 | def quicksorting(array):
if len(array) < 2:
return array
else:
pivot = array[0]
less =[ i for i in array[1:] if i < pivot]
greater =[ i for i in array[1:] if i > pivot]
return quicksorting(less) + [pivot] + quicksorting(greater)
print (quicksorting([10, 5, 2, 3, 6, 7, 6])) | Not-user-1984/My_Stady_lvl_0 | Training_task/quick_sorting.py | quick_sorting.py | py | 322 | python | en | code | 0 | github-code | 13 |
873276257 | from termcolor import colored
from pyfiglet import figlet_format
from random import choice
import colorama
import requests
colorama.init()
txt = "DAD JOKE BY GOURAV"
text= figlet_format(txt)
print(colored(text,color="green"))
topic = input("Let me tell you a joke! Give me a topic : ")
url = "https://icanhazdadjoke.com/search"
response = requests.get(url,
headers={"Accept": "application/json"},
params={"term": topic}
)
data = response.json()
count=data["total_jokes"]
if count > 1 :
print(f"I have got {count} jokes about {topic}. Here's one: ")
print(choice(data["results"])["joke"])
elif count == 1:
print(f"I have got {count} jokes about {topic}. Here's one: ")
print(data["results"][0]['joke'])
else:
print(f"Sorry, I don't have any jokes about {topic}! Please try again.")
| gouravt38/Dad-Jokes | Dad_Joke.py | Dad_Joke.py | py | 820 | python | en | code | 0 | github-code | 13 |
70361154898 | import pymysql
from app import app
from db_config import mysql
from flask import jsonify
from flask import flash, request
from util.lastId import get_last_id
from util.sendGetResponse import send_get_response
from LoginSignUp.util.required2 import token_required
def update_days(cursor,data,res_id):
# try:
sql="UPDATE Day SET Monday=%s,Tuesday=%s,Wednesday=%s,Thursday=%s,Friday=%s,Saturday=%s,Sunday=%s WHERE restaurant_id=%s"
values=(data['Monday'],data['Tuesday'],data['Wednesday'],data['Thursday'],data['Friday'],data['Saturday'],data['Sunday'],res_id)
cursor.execute(sql,values)
# except Exception as e:
# print("Days ",e)
def update_location(cursor,data,loc_id):
# try:
_address=data['address']['line_1']+data['address']['line_2']
_city=data['city']
_zipcode=None
if data['zipcode'].isdigit():
_zipcode=int(data['zipcode'])
_locality=data['locality']
_loc_verb=data['locality_verbose']
sql="UPDATE Location SET city=%s,zipcode=%s,locality=%s,address=%s,locality_verbose=%s WHERE id=%s"
values=(_city,_zipcode,_locality,_address,_loc_verb,loc_id)
cursor.execute(sql,values)
# except Exception as e:
# print("LOCATION ",e," LOCATION")
def update_highlights(data,cursor):
cursor.execute("SELECT name FROM Highlights")
l1=cursor.fetchall()
l1=[i for sub in l1 for i in sub]
new_list=list(set(data)-set(l1))
for dd in new_list:
cursor.execute("INSERT INTO Highlights(name) values(%s)",dd)
return ", ".join(data)
def update_establishments(data,cursor):
cursor.execute("SELECT name FROM Establishments")
l1=cursor.fetchall()
l1=[i for sub in l1 for i in sub]
new_list=list(set(data)-set(l1))
for dd in new_list:
cursor.execute("INSERT INTO Establishments(name) values(%s)",dd)
return ", ".join(data)
def update_cuisines(data,cursor):
cursor.execute("SELECT name FROM Cuisines")
l1=cursor.fetchall()
l1=[i for sub in l1 for i in sub]
new_list=list(set(data)-set(l1))
for dd in new_list:
cursor.execute("INSERT INTO Cuisines(name) values(%s)",dd)
return ", ".join(data)
def update_restaurant_table(cursor,data,res_id):
# try:
_ave_cost=int("0"+data['average_cost_for_two'])
_cuisines=update_cuisines(data['cuisines'],cursor)
_establishment=update_establishments(data['establishment'],cursor)
_highlights=update_highlights(data['highlights'],cursor)
_name=data['name']
_phone=data['phone']['std']+" , "+data['phone']['number']
_thumb=data['thumb']
_timings=data['timings']
_opening_status=data['opening_status']
_email=data['email']
_website=data['website']
_capacity=int("0"+data['capacity'])
sql="""UPDATE Restaurant SET
name=%s,email=%s,average_cost_for_two=%s,cuisines=%s,timings=%s,establishment=%s,highlights=%s,thumb=%s,phone_numbers=%s,capacity=%s,opening_status=%s,website=%s
WHERE id=%s"""
values=(_name,_email,_ave_cost,_cuisines,_timings,_establishment,_highlights,_thumb,_phone,_capacity,_opening_status,_website,res_id)
cursor.execute(sql,values)
# except Exception as e:
# print("resta",e,"resta")
def update_slot(cursor,data,res_id):
# try:
cursor.execute("DELETE FROM Slot WHERE restaurant_id=%s",res_id)
for slot in data:
sql="INSERT INTO Slot(restaurant_id,start_time,end_time) VALUES(%s,%s,%s)"
values=(res_id,slot['start_time'],slot['end_time'])
cursor.execute(sql,values)
@app.route('/api/restaurants',methods=['PUT'])
@token_required
def update_restaurant(current_user):
try:
data=request.json
conn=mysql.connect()
cursor=conn.cursor()
id=current_user['id']
loc_id=cursor.execute("SELECT location_id FROM Restaurant WHERE id=%s",id)
update_location(cursor,data[0]['location'],loc_id)
update_restaurant_table(cursor,data[0],id)
update_days(cursor,data[0]['days'],id)
update_slot(cursor,data[0]['slots'],id)
conn.commit()
return send_get_response(data,"No header")
except Exception as e:
print(e)
resp=jsonify("ERROR")
resp.status_code=500
return resp
finally:
conn.close()
cursor.close()
| garganshul108/BookMyTable | BackEnd/API/Restaurant/update.py | update.py | py | 4,451 | python | en | code | 0 | github-code | 13 |
15912902356 | from __future__ import absolute_import
import functools
import time
from mom import codec
__author__ = "yesudeep@google.com (Yesudeep Mangalapilly)"
__all__ = [
"cert_time_to_seconds",
"der_to_pem",
"der_to_pem_certificate",
"der_to_pem_private_key",
"der_to_pem_private_rsa_key",
"der_to_pem_public_key",
"pem_to_der",
"pem_to_der_certificate",
"pem_to_der_private_key",
"pem_to_der_private_rsa_key",
"pem_to_der_public_key",
]
CERT_PEM_HEADER = "-----BEGIN CERTIFICATE-----"
CERT_PEM_FOOTER = "-----END CERTIFICATE-----"
PRIVATE_KEY_PEM_HEADER = "-----BEGIN PRIVATE KEY-----"
PRIVATE_KEY_PEM_FOOTER = "-----END PRIVATE KEY-----"
PUBLIC_KEY_PEM_HEADER = "-----BEGIN PUBLIC KEY-----"
PUBLIC_KEY_PEM_FOOTER = "-----END PUBLIC KEY-----"
RSA_PRIVATE_KEY_PEM_HEADER = "-----BEGIN RSA PRIVATE KEY-----"
RSA_PRIVATE_KEY_PEM_FOOTER = "-----END RSA PRIVATE KEY-----"
def cert_time_to_seconds(cert_time):
"""
Takes a date-time string in standard ASN1_print form
("MON DAY 24HOUR:MINUTE:SEC YEAR TIMEZONE") and return
a Python time value in seconds past the epoch.
:param cert_time:
Time value in the certificate.
:returns:
Python time value.
"""
return time.mktime(time.strptime(cert_time, "%b %d %H:%M:%S %Y GMT"))
def pem_to_der(pem_cert_string, pem_header, pem_footer):
"""
Extracts the DER as a byte sequence out of an ASCII PEM formatted
certificate or key.
Taken from the Python SSL module.
:param pem_cert_string:
The PEM certificate or key string.
:param pem_header:
The PEM header to find.
:param pem_footer:
The PEM footer to find.
"""
# Be a little lenient.
pem_cert_string = pem_cert_string.strip()
if not pem_cert_string.startswith(pem_header):
raise ValueError("Invalid PEM encoding; must start with %s"
% pem_header)
if not pem_cert_string.endswith(pem_footer):
raise ValueError("Invalid PEM encoding; must end with %s"
% pem_footer)
encoded = pem_cert_string[len(pem_header):-len(pem_footer)]
return codec.base64_decode(encoded)
def der_to_pem(der_cert_bytes, pem_header, pem_footer):
"""
Takes a certificate in binary DER format and returns the
PEM version of it as a string.
Taken from the Python SSL module.
:param der_cert_bytes:
A byte string of the DER.
:param pem_header:
The PEM header to use.
:param pem_footer:
The PEM footer to use.
"""
# Does what base64.b64encode without the `altchars` argument does.
import textwrap
encoded = codec.base64_encode(der_cert_bytes)
return (pem_header + "\n" +
textwrap.fill(encoded, 64) + "\n" +
pem_footer + "\n")
# Helper functions. Use these instead of using der_to_per and per_to_der.
pem_to_der_private_key = functools.partial(pem_to_der,
pem_header=PRIVATE_KEY_PEM_HEADER,
pem_footer=PRIVATE_KEY_PEM_FOOTER)
pem_to_der_private_rsa_key = functools.partial(pem_to_der,
pem_header=RSA_PRIVATE_KEY_PEM_HEADER,
pem_footer=RSA_PRIVATE_KEY_PEM_FOOTER)
pem_to_der_public_key = functools.partial(pem_to_der,
pem_header=PUBLIC_KEY_PEM_HEADER,
pem_footer=PUBLIC_KEY_PEM_FOOTER)
pem_to_der_certificate = functools.partial(pem_to_der,
pem_header=CERT_PEM_HEADER,
pem_footer=CERT_PEM_FOOTER)
der_to_pem_private_key = functools.partial(der_to_pem,
pem_header=PRIVATE_KEY_PEM_HEADER,
pem_footer=PRIVATE_KEY_PEM_FOOTER)
der_to_pem_private_rsa_key = functools.partial(der_to_pem,
pem_header=RSA_PRIVATE_KEY_PEM_HEADER,
pem_footer=RSA_PRIVATE_KEY_PEM_FOOTER)
der_to_pem_public_key = functools.partial(der_to_pem,
pem_header=PUBLIC_KEY_PEM_HEADER,
pem_footer=PUBLIC_KEY_PEM_FOOTER)
der_to_pem_certificate = functools.partial(der_to_pem,
pem_header=CERT_PEM_HEADER,
pem_footer=CERT_PEM_FOOTER)
| gorakhargosh/mom | mom/security/codec/pem/__init__.py | __init__.py | py | 4,490 | python | en | code | 37 | github-code | 13 |
18157680264 | """Day03 - puzzle solutions for day 03."""
def load_data(path: str) -> list[str]:
"""Load and split data from file."""
rows = []
with open(path, encoding="ascii") as file:
for row in file:
rows.append(row.rstrip())
return rows
def part1(input):
sumOfItemPriorities = 0
for line in input:
firstComp = line[0:int(len(line)/2)]
secondComp = line[int(len(line)/2):len(line)]
wrongItemType = ''.join(set(firstComp).intersection(secondComp))
sumOfItemPriorities += itemPriorities.index(wrongItemType) + 1
return sumOfItemPriorities
def part2(input):
sumOfBadgePriorities = 0
for group in range(1, int(len(input)/3)+1):
badgeItemType = ''.join(set(
''.join(set(input[(group*3)-3]).intersection(input[(group*3)-2]))
).intersection(input[(group*3)-1]))
sumOfBadgePriorities += itemPriorities.index(badgeItemType.strip()) + 1
return sumOfBadgePriorities
if __name__ == "__main__":
itemPriorities = ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z",
"A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z"]
data = load_data("3.txt")
print(f'Part 1 answer: {part1(data)}')
print(f'Part 2 answer: {part2(data)}') | Sfera-IT/adventofcode2022 | maramazza/3/3.py | 3.py | py | 1,412 | python | en | code | 2 | github-code | 13 |
13670822233 | DOCUMENTATION = r"""
---
module: iam_server_certificate_info
version_added: 1.0.0
short_description: Retrieve the information of a server certificate
description:
- Retrieve the attributes of a server certificate.
author:
- "Allen Sanabria (@linuxdynasty)"
options:
name:
description:
- The name of the server certificate you are retrieving attributes for.
type: str
extends_documentation_fragment:
- amazon.aws.common.modules
- amazon.aws.region.modules
- amazon.aws.boto3
"""
EXAMPLES = r"""
- name: Retrieve server certificate
community.aws.iam_server_certificate_info:
name: production-cert
register: server_cert
- name: Fail if the server certificate name was not found
community.aws.iam_server_certificate_info:
name: production-cert
register: server_cert
failed_when: "{{ server_cert.results | length == 0 }}"
"""
RETURN = r"""
server_certificate_id:
description: The 21 character certificate id
returned: success
type: str
sample: "ADWAJXWTZAXIPIMQHMJPO"
certificate_body:
description: The asn1der encoded PEM string
returned: success
type: str
sample: "-----BEGIN CERTIFICATE-----\nbunch of random data\n-----END CERTIFICATE-----"
server_certificate_name:
description: The name of the server certificate
returned: success
type: str
sample: "server-cert-name"
arn:
description: The Amazon resource name of the server certificate
returned: success
type: str
sample: "arn:aws:iam::123456789012:server-certificate/server-cert-name"
path:
description: The path of the server certificate
returned: success
type: str
sample: "/"
expiration:
description: The date and time this server certificate will expire, in ISO 8601 format.
returned: success
type: str
sample: "2017-06-15T12:00:00+00:00"
upload_date:
description: The date and time this server certificate was uploaded, in ISO 8601 format.
returned: success
type: str
sample: "2015-04-25T00:36:40+00:00"
"""
try:
import botocore
except ImportError:
pass # Handled by AnsibleAWSModule
from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
def get_server_certs(iam, name=None):
"""Retrieve the attributes of a server certificate if it exists or all certs.
Args:
iam (botocore.client.IAM): The boto3 iam instance.
Kwargs:
name (str): The name of the server certificate.
Basic Usage:
>>> import boto3
>>> iam = boto3.client('iam')
>>> name = "server-cert-name"
>>> results = get_server_certs(iam, name)
{
"upload_date": "2015-04-25T00:36:40+00:00",
"server_certificate_id": "ADWAJXWTZAXIPIMQHMJPO",
"certificate_body": "-----BEGIN CERTIFICATE-----\nbunch of random data\n-----END CERTIFICATE-----",
"server_certificate_name": "server-cert-name",
"expiration": "2017-06-15T12:00:00+00:00",
"path": "/",
"arn": "arn:aws:iam::123456789012:server-certificate/server-cert-name"
}
"""
results = dict()
try:
if name:
server_certs = [iam.get_server_certificate(ServerCertificateName=name)["ServerCertificate"]]
else:
server_certs = iam.list_server_certificates()["ServerCertificateMetadataList"]
for server_cert in server_certs:
if not name:
server_cert = iam.get_server_certificate(ServerCertificateName=server_cert["ServerCertificateName"])[
"ServerCertificate"
]
cert_md = server_cert["ServerCertificateMetadata"]
results[cert_md["ServerCertificateName"]] = {
"certificate_body": server_cert["CertificateBody"],
"server_certificate_id": cert_md["ServerCertificateId"],
"server_certificate_name": cert_md["ServerCertificateName"],
"arn": cert_md["Arn"],
"path": cert_md["Path"],
"expiration": cert_md["Expiration"].isoformat(),
"upload_date": cert_md["UploadDate"].isoformat(),
}
except botocore.exceptions.ClientError:
pass
return results
def main():
argument_spec = dict(
name=dict(type="str"),
)
module = AnsibleAWSModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
try:
iam = module.client("iam")
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to connect to AWS")
cert_name = module.params.get("name")
results = get_server_certs(iam, cert_name)
module.exit_json(results=results)
if __name__ == "__main__":
main()
| ansible-collections/community.aws | plugins/modules/iam_server_certificate_info.py | iam_server_certificate_info.py | py | 4,833 | python | en | code | 174 | github-code | 13 |
5210242311 | """
Given a string s, return the longest palindromic substring in s.
Example 1:
Input: s = "babad"
Output: "bab"
Note: "aba" is also a valid answer.
Example 2:
Input: s = "cbbd"
Output: "bb"
Example 3:
Input: s = "a"
Output: "a"
Example 4:
Input: s = "ac"
Output: "a"
Constraints:
1 <= s.length <= 1000
s consist of only digits and English letters (lower-case and/or upper-case),
"""
class Solution:
"""
Start by checking for a few edge cases. Either the string is 1 long, the string is two identical characters or the string is two different characters. These must be done seperately due to out of bound index concerns
Create a queue with the two possible starting points and a array that stores the index of the longest substring that defaults to have a value of nothing.
For every coordinate in the queue check if it is a palindrome and expand until it is the largest possible palindrome from this start point. If these compare these coordinates to the greatest and keep the one with the greatest difference.
Once all starting points have been checked return the substring with the coordinates stored in longest.
"""
def longestPalindrome(self, s: str) -> str:
length = len(s)
if length == 1 or (s[0] != s[1] and length == 2):
return s[0]
elif length == 2:
return s
longest = [0, 0]
queue = [[0,1],[0,2]]
for coords in queue:
start = coords[0]
end = coords[1]
if end < length - 1:
queue.append([start + 1, end + 1])
if s[start] == s[end]:
while start > 0 and end < length - 1 and s[start - 1] == s[end + 1]:
start -= 1
end += 1
if end - start > longest[1] - longest[0] and s[start] == s[end]:
longest = [start, end]
return s[longest[0]:longest[1] + 1]
"""
Make a queue with every possible substring until you find a substring that is a palindrome.
"""
def checkIfPalindrome(self, s: str) -> bool:
i = 0
j = len(s) - 1
while i < j:
if s[i] != s[j]:
return False
i += 1
j -= 1
return True
def longestPalindromeFailed(self, s: str) -> str:
longest = f"{s[0]}"
cache = set()
queue = [s]
for curString in queue:
if curString[0] == curString[-1]:
if self.checkIfPalindrome(curString):
return curString
if len(curString) > 1:
if curString[1:] not in cache:
queue.append(curString[1:])
cache.add(curString[1:])
if curString[:-1] not in cache:
queue.append(curString[:-1])
cache.add(curString[:-1])
return longest | 1like2learn/code-puzzle-solutions | Puzzles/longest-palindromic-substring.py | longest-palindromic-substring.py | py | 2,940 | python | en | code | 0 | github-code | 13 |
30182526521 | from proton.reactor import Reactor
from datawire import Agent, Container, Linker, Tether, Processor
import common
class BizLogic(object):
def __init__(self, args):
self.host = args.host
self.port = args.port
self.tether = Tether(None, "//%s/bizlogic" % self.host, None,
host=self.host, port=self.port,
agent_type="bizlogic", policy="ordered")
self.users = {}
self.user_reread_period = 30 # seconds
self.linker = Linker()
self.container = Container(Processor(self))
self.container[self.tether.agent] = Agent(self.tether)
def on_reactor_init(self, event):
event.reactor.acceptor(self.host, self.port, self.container)
self.tether.start(event.reactor)
event.reactor.schedule(0, self)
self.linker.start(event.reactor)
def on_timer_task(self, event):
self.users = common.load_data("users.pickle")
event.reactor.schedule(self.user_reread_period, self)
def on_message(self, event):
bark = common.Bark(*event.message.body)
words = bark.content.split()
mentions = [word[1:] for word in words if word.startswith("@")]
user = self.users[bark.user]
followers = user.getFollowers(self.users)
targets = set(mentions + followers + [bark.user])
for target in targets:
sender = self.linker.sender("//%s/inbox/%s" % (self.host, target))
sender.send(event.message.body)
from argparse import ArgumentParser
def main():
parser = ArgumentParser()
parser.add_argument("-n", "--host", default="127.0.0.1", help="network hostname")
parser.add_argument("-p", "--port", default="5680", help="network port")
args = parser.parse_args()
Reactor(BizLogic(args)).run()
if __name__ == "__main__":
main()
| datawire/datawire-common | barker/bizlogic.py | bizlogic.py | py | 1,873 | python | en | code | 2 | github-code | 13 |
17351740316 | import torch, glob, os
def checkpoint_restore(model,exp_name,name2,use_cuda=True,epoch=0):
if use_cuda:
model.cpu()
if epoch>0:
f=exp_name+'-%09d-'%epoch+name2+'.pth'
assert os.path.isfile(f)
print('Restore from ' + f)
model.load_state_dict(torch.load(f))
else:
f=sorted(glob.glob(exp_name+'-*-'+name2+'.pth'))
if len(f)>0:
f=f[-1]
print('Restore from ' + f)
model.load_state_dict(torch.load(f))
epoch=int(f[len(exp_name)+1:-len(name2)-5])
if use_cuda:
model.cuda()
return epoch+1
def is_power2(num):
return num != 0 and ((num & (num - 1)) == 0)
def checkpoint_save(model,exp_name,name2,epoch, use_cuda=True):
f=exp_name+'-%09d-'%epoch+name2+'.pth'
model.cpu()
torch.save(model.state_dict(),f)
if use_cuda:
model.cuda()
#remove previous checkpoints unless they are a power of 2 to save disk space
epoch=epoch-1
f=exp_name+'-%09d-'%epoch+name2+'.pth'
if os.path.isfile(f):
if not is_power2(epoch):
os.remove(f)
| Benzlxs/spconv_scannet | spconv/util.py | util.py | py | 1,112 | python | en | code | 2 | github-code | 13 |
73606120016 | # %%
import pandas as pd
import requests as rq
import time
import os
import hashlib
import hmac
from apikey_bitmex import API_KEY, API_SECRET
BASE_URL = 'https://www.bitmex.com'
DATE_FROM = pd.Timestamp('2022-01-30')
DATE_TILL = pd.Timestamp('2022-08-04')
SYMBOLS = ['XBTUSD', 'ETHUSD']
PATH = os.path.abspath(os.path.dirname(__file__) + '../../../ticks/bitmex')
DEFAULT_WAIT = 0.5
# %%
def get_chunk(symbol, date, start):
st = date.strftime('%Y-%m-%d')
et = (date + pd.Timedelta(days=1)).strftime('%Y-%m-%d')
try:
query = f'/api/v1/trade?symbol={symbol}&count=1000&start={start}&reverse=false&startTime={st}&endTime={et}'
expires = str(int(round(time.time()) + 5))
message = bytes('GET' + query + expires, 'utf-8')
signature = hmac.new(bytes(API_SECRET, 'utf-8'), message, digestmod=hashlib.sha256).hexdigest()
headers= {
'api-expires': expires,
'api-key': API_KEY,
'api-signature': signature
}
res = rq.get(BASE_URL + query, headers=headers)
json = res.json()
if res.status_code == 200:
print(f'\r{symbol}:{st}:{start} -> {len(json)} rows', end='')
return json
else:
msg = json['error']['message']
print(f'\r{symbol}:{st}:{start} -> {msg}', end='')
return None
except Exception as e:
print(f'\r{symbol}:{st}:{start} -> error: {e}', end='')
return None
# %%
def get_day(symbol, date):
wait_time = DEFAULT_WAIT
err_cnt = 0
res = []
start = 0
ds = date.strftime('%Y-%m-%d')
while True:
chunk = get_chunk(symbol, date, start)
if chunk == None:
err_cnt += 1
wait_time = min(wait_time + 20, wait_time * 2)
else:
err_cnt = 0
wait_time = DEFAULT_WAIT
res.extend(chunk)
start += len(chunk)
if len(chunk) == 0: # no more data for that day
print(f'\r{symbol}:{ds} -> {start} rows. ')
return res
if err_cnt >= 10:
print(f'\r{symbol}:{ds} -> too many errors. ')
return None
time.sleep(wait_time)
# %%
def make_df(items):
df = pd.DataFrame(items)
df.drop_duplicates(subset='trdMatchID', inplace=True)
df.timestamp = pd.to_datetime(df.timestamp)
df.side = df.side.apply(lambda s: 1 if s == 'Buy' else -1)
df = df[['timestamp', 'price', 'size', 'side']]
return df
# %%
def save_day(symbol, date, items):
ds = date.strftime('%Y-%m-%d')
filename = f'{PATH}/{symbol}/{ds}.csv'
df = make_df(items)
df.to_csv(filename, date_format='%Y-%m-%d %H:%M:%S.%f', index=False)
# %%
for symbol in SYMBOLS:
os.makedirs(f'{PATH}/{symbol}', exist_ok=True)
date = DATE_FROM
while date <= DATE_TILL:
for symbol in SYMBOLS:
items = get_day(symbol, date)
save_day(symbol, date, items)
date = date + pd.Timedelta(days=1)
# %%
| bellerofonte/skillfactory-dst-50 | final/src/history/hst-bitmex.py | hst-bitmex.py | py | 2,999 | python | en | code | 0 | github-code | 13 |
34131806897 | import pandas as pd
import numpy as np
from sklearn.linear_model import Lasso, LassoCV
from sklearn.model_selection import train_test_split
from scipy.stats import pearsonr
from collections import Counter
from tqdm import tqdm
import utils
import gnk_model
"""
Code for running LASSO experiments on empirical fitness functions, the results of
which are shown in Figure 4D.
"""
def run_lasso_experiment(X, y, alpha, ns, savefile, save_example_at=None,
example_savefile=None, replicates=50):
"""
Runs the LASSO algorithm for a given data set (X, y), regularization parameter (alpha),
and ns, a list of the number of data points to subsample from the data. At each value in
ns, data are randomly subsampled and and the LASSO algorithm is run on the subsampled
data. This process is repeated a number of times given by 'replicates'. For each
replicate at each value in ns, the mse and pearson correlation between the predicted
and true y values are saved in a dictionary, which is also returned.
"""
ns = np.array(np.sort(list(ns))).astype(int)
mse = np.zeros((len(ns), replicates))
pearson = np.zeros((len(ns), replicates))
print("Running LASSO tests...")
for i, n in enumerate(tqdm(ns)):
for j in tqdm(range(replicates)):
model = Lasso(alpha=alpha)
X_train, _, y_train, _ = train_test_split(X, y, train_size=n)
model.fit(X_train, y_train)
pred = model.predict(X)
pearson[i, j] = pearsonr(y, pred)[0]
mse[i, j] = np.mean((pred-y)**2)
if save_example_at is not None and n == save_example_at and j==0:
np.save(example_savefile, np.array([y, pred]))
results_dict = {'n': ns, 'pearson': pearson, 'mse': mse, 'alpha': alpha}
np.save(savefile, results_dict)
return results_dict
def determine_alpha(X, y, n, replicates=10):
"""
Determines the optimal regularization parameter for n data points randomly subsampled from
a given data set (X, y).
"""
alphas = [5e-8, 1e-8, 5e-7, 1e-7, 5e-6, 1e-6, 5e-5, 1e-5, 5e-4, 1e-4, 5e-3, 1e-3]
opt_vals = np.zeros(replicates)
for j in range(replicates):
model = LassoCV(alphas=alphas, n_jobs=10)
X_train, _, y_train, _ = train_test_split(X, y, train_size=n)
model.fit(X_train, y_train)
opt_vals[j] = model.alpha_
cts = Counter(opt_vals)
opt_alpha = cts.most_common(1)[0][0]
return opt_alpha | dhbrookes/FitnessSparsity | src/empirical_lasso.py | empirical_lasso.py | py | 2,494 | python | en | code | 6 | github-code | 13 |
73645040019 | class Fuctura():
def __init__(self, nome, matricula, telefone, email):
self.nome = nome
self.matricula = matricula
self.telefone = telefone
self.email = email
aluno1 = Fuctura('André', '123', '987648036', 'andregabriel_lima@hotmail.com.br')
print(aluno1.nome)
aluno2 = Fuctura('Bianca', '124', '997330372', 'biancamarques@gmail')
print(aluno2.nome)
| AndreGabrielLima/aulasDePython1 | class2.py | class2.py | py | 398 | python | pt | code | 1 | github-code | 13 |
998041337 | from django.contrib import admin
from django.urls import path
from . import views
from .views import detalhe
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
path('admin/', admin.site.urls),
path('', views.home, name='home'),
path('links', views.links, name='links'),
path('departamento', views.departamento, name='departamento'),
path('lojas', views.lojas, name='lojas'),
path('empresa_avenida', views.empresa_avenida, name='empresa_avenida'),
path('empresa_giovanna', views.empresa_giovanna, name='empresa_giovanna'),
path('empresa_cd', views.empresa_cd, name='empresa_cd'),
path('empresa_esc', views.empresa_esc, name='empresa_esc'),
path('teste', views.teste, name='teste'),
path('etiqueta_correio', views.etiqueta_correio, name='etiqueta_correio'),
path('filtro/<int:loja>/', views.filtro, name='filtro'),
path('detalhe/<int:loja>', detalhe.as_view(), name='detalhe'),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | CleverssonHames/intranetgav | urls.py | urls.py | py | 1,024 | python | es | code | 0 | github-code | 13 |
7125320647 | import openslide
from PIL import Image
from math import ceil
import os
from os import listdir
from os.path import isfile, join, isdir
import glob
def get_image_paths(folder):
image_paths = [join(folder, f) for f in listdir(folder) if isfile(join(folder, f))]
if join(folder, '.DS_Store') in image_paths:
image_paths.remove(join(folder, '.DS_Store'))
return image_paths
def get_subfolder_paths(folder):
subfolder_paths = [join(folder, f) for f in listdir(folder) if (isdir(join(folder, f)) and '.DS_Store' not in f)]
if join(folder, '.DS_Store') in subfolder_paths:
subfolder_paths.remove(join(folder, '.DS_Store'))
return subfolder_paths
def get_num_horizontal_positions(input_folder):
horizontal_positions = []
image_paths = get_image_paths(input_folder)
for image_path in image_paths:
x_increment = int(image_path.split('/')[-1].split('.')[0].split('_')[1])
horizontal_positions.append(x_increment)
return len(set(horizontal_positions))
def get_num_vertical_positions(input_folder):
vertical_positions = []
image_paths = get_image_paths(input_folder)
for image_path in image_paths:
x_increment = int(image_path.split('/')[-1].split('.')[0].split('_')[2])
vertical_positions.append(x_increment)
return len(set(vertical_positions))
def output_repieced_image(input_folder, output_image_path, window_size, compression_factor):
Image.MAX_IMAGE_PIXELS = 1e10
compressed_window_size = int(window_size / compression_factor)
num_horizontal_positions = get_num_horizontal_positions(input_folder)
num_vertical_positions = get_num_vertical_positions(input_folder)
image_paths = get_image_paths(input_folder)
images = map(Image.open, image_paths)
widths, heights = zip(*(i.size for i in images))
last_width = min(widths)
last_height = min(heights)
total_width = (num_horizontal_positions - 1)*compressed_window_size + last_width
total_height = (num_vertical_positions - 1)*compressed_window_size + last_height
new_im = Image.new('RGB', (total_width, total_height))
for image_path in image_paths:
x_increment = int(image_path.split('/')[-1].split('.')[0].split('_')[1])
y_increment = int(image_path.split('/')[-1].split('.')[0].split('_')[2])
image = Image.open(image_path)
new_im.paste(image, (compressed_window_size*x_increment, compressed_window_size*y_increment))
new_im.save(output_image_path)
def main():
window_size=224*20
compression_factor=1.5
images=glob.glob("input/*.svs")
for image in images:
img = openslide.OpenSlide(image)
image_name=image.split('/')[-1][:-4]
output_path=("tmp_patches")
width, height = img.level_dimensions[0]
increment_x = int(ceil(width / window_size))
increment_y = int(ceil(height / window_size))
print("converting", image_name, "with width", width, "and height", height)
for incre_x in range(increment_x): # read the image in patches
for incre_y in range(increment_y):
begin_x = window_size * incre_x
end_x = min(width, begin_x + window_size)
begin_y = window_size * incre_y
end_y = min(height, begin_y + window_size)
patch_width = end_x - begin_x
patch_height = end_y - begin_y
patch = img.read_region((begin_x, begin_y), 0, (patch_width, patch_height))
patch.load()
patch_rgb = Image.new("RGB", patch.size, (255, 255, 255))
patch_rgb.paste(patch, mask=patch.split()[3])
# compress the image
patch_rgb = patch_rgb.resize((int(patch_rgb.size[0] / compression_factor), int(patch_rgb.size[1] / compression_factor)), Image.ANTIALIAS)
# save the image
#output_subfolder = os.path.join(output_path, image_name)
if not os.path.exists(output_path):
os.makedirs(output_path)
output_image_name = os.path.join(output_path,image_name+ '_' + str(incre_x) + '_' + str(incre_y) + '.tiff')
print(output_image_name)
patch_rgb.save(output_image_name, 'tiff')
output_repieced_image(output_path, "output/"+image_name+".tiff", window_size, compression_factor)
removing_files = glob.glob('tmp_patches/*.tiff')
for i in removing_files:
os.remove(i)
print('#################################################')
if __name__ == '__main__':
main()
| GeNeHetX/Histology_ResNet50_Features | svs_conversion/convert_svs.py | convert_svs.py | py | 4,241 | python | en | code | 0 | github-code | 13 |
41766793632 | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def findPairs(self, d1, d2):
for d_i in d1:
for d_j in d2:
if abs(d_i + d_j) <= self.d:
self.ans += 1
def f(self, root):
if root is None:
return []
if root.left is None and root.right is None:
return [0]
if root:
d_l = [p + 1 for p in self.f(root.left)]
d_r = [p + 1 for p in self.f(root.right)]
self.findPairs(d_l, d_r)
# print(d_l, d_r, root.val)
return d_l + d_r
def countPairs(self, root: TreeNode, distance: int) -> int:
self.d = distance
self.ans = 0
self.f(root)
return self.ans
| ritwik-deshpande/LeetCode | 1530-number-of-good-leaf-nodes-pairs/1530-number-of-good-leaf-nodes-pairs.py | 1530-number-of-good-leaf-nodes-pairs.py | py | 1,041 | python | en | code | 0 | github-code | 13 |
17050089094 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class CodeResult(object):
def __init__(self):
self._code = None
self._code_token = None
self._code_url = None
@property
def code(self):
return self._code
@code.setter
def code(self, value):
self._code = value
@property
def code_token(self):
return self._code_token
@code_token.setter
def code_token(self, value):
self._code_token = value
@property
def code_url(self):
return self._code_url
@code_url.setter
def code_url(self, value):
self._code_url = value
def to_alipay_dict(self):
params = dict()
if self.code:
if hasattr(self.code, 'to_alipay_dict'):
params['code'] = self.code.to_alipay_dict()
else:
params['code'] = self.code
if self.code_token:
if hasattr(self.code_token, 'to_alipay_dict'):
params['code_token'] = self.code_token.to_alipay_dict()
else:
params['code_token'] = self.code_token
if self.code_url:
if hasattr(self.code_url, 'to_alipay_dict'):
params['code_url'] = self.code_url.to_alipay_dict()
else:
params['code_url'] = self.code_url
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = CodeResult()
if 'code' in d:
o.code = d['code']
if 'code_token' in d:
o.code_token = d['code_token']
if 'code_url' in d:
o.code_url = d['code_url']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/CodeResult.py | CodeResult.py | py | 1,755 | python | en | code | 241 | github-code | 13 |
24376457332 | from reboot.common.dbutils import MySQLHelper
CNT = 1
CPU_PERCENT = 0.2
RAM_PERCENT = 50
db = MySQLHelper()
def has_alarm(ip):
# CPU&RAM 大于80%
_sql = 'select cpu,ram from performs where ip=%s order by time desc limit %s'
_args = (ip, CNT)
count, rt_list = db.fetch_all(sql=_sql, args=_args)
cpu = False
ram = False
for _cpu, _ram in rt_list:
if _cpu > CPU_PERCENT:
cpu = True
if _ram > RAM_PERCENT:
ram = True
return cpu, ram
def monitor():
ip_list = ['10.0.2.15',]
for ip in ip_list:
cpu, ram = has_alarm(ip)
content = ['<b>主機{ip}資源報警</b><br>'.format(ip=ip)]
if cpu:
content.append('CPU 告警!')
if ram:
content.append('RAM 告警!')
content = ''.join(content).strip(',')
print(content)
return ''
if __name__ == '__main__':
monitor()
| chenjinhui520/Python | reboot/common/monitor.py | monitor.py | py | 922 | python | en | code | 0 | github-code | 13 |
10609925127 | # coding:utf-8
"""
Filename : api.py
Role : api with pipelines hosting
@author : Sunwaee
"""
import os
import time
import fasttext
from fastapi import FastAPI
def ready_api_content():
"""
Puts models in buffers and prepare API for requesting.
"""
# Changing os dir
dir_buffer = os.getcwd()
os.chdir('system/')
# Necessary imports
import pipelines
# Loading models
language_detector = fasttext.load_model('language_detector/lid.176.ftz')
classic_pipeline = None # pipelines.run()
# Returning in api path
os.chdir(dir_buffer)
return language_detector, classic_pipeline
language_detector, classic_pipeline = ready_api_content()
app = FastAPI(title='MT5 API')
@app.get(path="/classic", tags=['inference'])
async def classic(text: str = ""):
"""
Infers using classic pipeline.
:param text: text on which apply classic task
:return: dictionary which contains classic output
"""
start = time.time()
language = language_detector.predict(text=[text])
# output = classic_pipeline(inputs=text)
reponse_time = time.time() - start
return {
"input": text,
"language": language[0][0][0].replace('__label__', ''),
# "output": output,
"response time": reponse_time
}
@app.get(path='/pipeline', tags=['pipeline'])
async def pipeline(text: str = ""):
"""
Generates a response given an input text
:param text: text from which generate a response
:return: a dictionary which contains the given response
"""
return {
'text': text,
'output': 'Here is the pipeline output.',
'features': 'Here are the features.',
'response': 'Here is the response.'
}
@app.get(path='/HelloWorld', tags=['test'])
async def hello():
return {'text': 'Hello World!'}
| DvdNss/sunwaee-mt5-api | api.py | api.py | py | 1,859 | python | en | code | 1 | github-code | 13 |
26436456730 | """Test delete_flashcards_row application use case for Flashcards entity"""
from logogram.tests.base_test import BaseTestCase
from logogram.common.execute.execute_command_fetch_data import (
ExecuteCommandFetchData)
from logogram.users.insert_rows.insert_rows import insert_user_row
from logogram.flashcards.insert_rows.insert_row import insert_flashcard_row
from django.core import management
from logogram.flashcards.delete_rows.delete_row import delete_flashcard_row
class DeleteFlashcardsTable(BaseTestCase):
def setUp(self):
super(DeleteFlashcardsTable, self).setUp()
management.call_command('createtables')
def tearDown(self):
management.call_command('droptables')
def test_delete_flashcard(self):
"""
Test that when you pass values to the delete_flashcard_row method for
a user's table that the function deletes a flashcard with the specified
value.
"""
values = (
"""
'password', 'pointer@gmail.com', 'false', 'false',
NULL, NULL, now(), NULL
"""
)
insert_user_row(values)
values = (
"""
'Computer Science', 'Computer Science Flashcard', 1
"""
)
insert_flashcard_row(values)
select_flashcard_command = (
"""
SELECT * FROM flashcards
WHERE id = 1;
"""
)
before_delete_data = ExecuteCommandFetchData().execute_command(
select_flashcard_command)
self.assertEqual(before_delete_data[0][1], 'Computer Science')
delete_flashcard_row('1')
after_delete_data = ExecuteCommandFetchData().execute_command(
select_flashcard_command)
self.assertEqual(after_delete_data, [])
| WinstonKamau/DatabasePlayBook | src/database_playbook/logogram/tests/flashcards/test_delete_row.py | test_delete_row.py | py | 1,826 | python | en | code | 0 | github-code | 13 |
19527920532 | import queue
from flask import Flask, jsonify,request,Blueprint
import pymongo
from bson.objectid import ObjectId
from datetime import date, datetime, timedelta
### integration
from Database.Database import Database as mydb
from flask_cors import cross_origin
from functools import wraps
import jwt
# myclient = pymongo.MongoClient("mongodb+srv://karimhafez:KojGCyxxTJXTYKYV@cluster0.buuqk.mongodb.net/twitter?retryWrites=true&w=majority", connect=True)
# mydb = myclient["Twitter_new"]
# app = Flask(__name__)
notification_by_id = Blueprint('notification_by_id', __name__)
#################
def token_required(f):
@wraps(f)
def decorated(*args, **kwargs):
token = None
if 'x-access-token' in request.headers:
token = request.headers['x-access-token']
if not token:
return jsonify({'message': 'Token is missing!'}), 401
try:
data = jwt.decode(token, "SecretKey1911", "HS256")
user_id = ObjectId(data['_id'])
current_user = mydb.User.find_one({'_id': user_id})
except:
return jsonify({'message': 'Token is invalid!'}), 401
return f(current_user, *args, **kwargs)
return decorated
########################################
@notification_by_id.route('/notifications')
@cross_origin(allow_headers=['Content-Type', 'x-access-token', 'Authorization'])
@token_required
def get_by_notification_id(current_user):
user_id = request.args.get('user_id')
notification_id = request.args.get('notification_id')
## targeting the given user
user_collection = mydb['User']
try:
query = {"_id": ObjectId(user_id)}
except:
return jsonify({'Error Message: ': "Enter a valid user id"}) , 400
if user_id == None:
return jsonify({"Error message": "Please, Eneter a User ID"}) , 400
target_user_document = user_collection.find_one(query)
if target_user_document == None:
return jsonify({'Error Message: ': "User Not found"}) , 404
## Getting the notification using its id
the_notification={}
list_of_notifications = target_user_document['notifications']
for notification in list_of_notifications:
if len(list_of_notifications) < 1:
break
notification['_id'] = str(notification['_id'])
if notification['_id'] == notification_id:
the_notification = notification
else:
continue
return jsonify(the_notification),200
| OmarNashat01/Back-End-Twitter-Clone | Routes/notifications/get_by_notification_id.py | get_by_notification_id.py | py | 2,498 | python | en | code | 2 | github-code | 13 |
23793192606 | from flask import Flask, render_template, abort, request, jsonify
from models import *
from controllers import *
from db import create_db
import subprocess
# -----
# index
# -----
@app.route('/')
def index():
return render_template('index.html')
# --------
# about
# --------
@app.route('/about/')
def about():
return render_template('about.html')
@app.route('/api/v1.0/tests/', methods=['GET'])
def get_test_results():
script = subprocess.Popen("make test", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
try:
out, errs = script.communicate()
except:
script.kill()
return jsonify({'results': {'out':out.decode(), 'err':errs.decode()} })
# --------
# location
# --------
@app.route('/location/')
@app.route('/locations/')
@app.route('/locations/<name>')
@app.route('/location/<name>')
def location_id(name=None):
if name is not None:
return render_template("route_data.html", route=Routes.get(name), pokemon=RoutePokemon.get(name), images=RouteImages.get(name), trainers=RouteTrainers.get(name), items=RouteItems.get(name))
return render_template('location.html', routes=Routes.get_all())
#=============API==========#
@app.route('/api/v1.0/locations/', methods=['GET'])
def get_locations():
locations = Routes.get_all()
loc_list = []
for l in locations:
loc_list.append({c.name: getattr(l, c.name) for c in l.__table__.columns})
return jsonify({'locations': loc_list})
@app.route('/api/v1.0/locations/<int:id>/', methods=['GET'])
def get_locations_name(id):
location = Routes.get_id(id)
loc_dic = {c.name: getattr(location, c.name) for c in location.__table__.columns}
return jsonify({'location': loc_dic})
'''
@app.route('/api/v1.0/locations/', methods=['POST'])
def create_locations():
if not request.json:
abort(400)
return jsonify({'locations': 'test'}), 201
@app.route('/api/v1.0/locations/<int:id>/', methods=['PUT'])
def update_locations(id):
if not request.json:
abort(400)
return jsonify({'locations': 'test'})
@app.route('/api/v1.0/locations/<int:id>/', methods=['DELETE'])
def delete_locations(id):
return jsonify({'result': True})
'''
@app.route('/about_pokemon')
def aboutPokemon():
return render_template('about_pokemon.html')
# -------
# pokemon
# -------
@app.route('/pokemon/')
@app.route('/pokemon/<name>/')
def pokemon(name=None):
if name is not None:
return render_template('pokemon_details.html', pokemon=Pokemon.get(name), moves=PokemonMoves.get_for_pokemon(name), locations=RoutePokemon.get_pokemon_routes(name), types=Types.get_pokemon_types(name), evos=Evolutions.get_pokemon_evo(name), abilities=Abilities.get_pokemon_abilities(name))
return render_template('pokemon.html', pokemon=Pokemon.get_all())
#=============API==========#
@app.route('/api/v1.0/pokemon/', methods=['GET'])
def get_pokemon():
pokemon = Pokemon.get_all()
poke_list = []
for poke in pokemon:
poke_list.append({c.name: getattr(poke, c.name) for c in poke.__table__.columns})
return jsonify({'pokemon': poke_list})
@app.route('/api/v1.0/pokemon/<int:id>/', methods=['GET'])
def get_pokemon_id(id):
pokemon = Pokemon.get_id(id)
poke_dic = {c.name: getattr(pokemon, c.name) for c in pokemon.__table__.columns}
return jsonify({'pokemon': poke_dic})
'''
@app.route('/api/v1.0/pokemon', methods=['POST'])
def create_pokemon():
if not request.json:
abort(400)
return jsonify({'pokemon': 'test'}), 201
@app.route('/api/v1.0/pokemon/<int:id>', methods=['PUT'])
def update_pokemon(id):
if not request.json:
abort(400)
return jsonify({'pokemon': 'test'})
@app.route('/api/v1.0/pokemon/<int:id>', methods=['DELETE'])
def delete_task(id):
return jsonify({'result': True})
'''
# -----
# moves
# -----
@app.route('/moves')
@app.route('/moves/<name>')
def moves(name=None):
if name is not None:
return render_template('moves.html', move=Move.get(name), pokemonLVL=PokemonMoves.get_for_level(name), pokemonTM=PokemonMoves.get_for_machine(name), pokemonEGG=PokemonMoves.get_for_egg(name), pokemonTUT=PokemonMoves.get_for_tutor(name))
# change name of the above html to move and the below one to moves
return render_template('allMoves.html', moves=Move.get_all())
#=============API==========#
@app.route('/api/v1.0/moves/', methods=['GET'])
def get_moves():
moves = Move.get_all()
moves_list = []
for move in moves:
moves_list.append({c.name: getattr(move, c.name) for c in move.__table__.columns})
return jsonify({'moves': moves_list})
@app.route('/api/v1.0/moves/<int:id>/', methods=['GET'])
def get_moves_id(id):
move = Move.get_id(id)
move_dic = {c.name: getattr(move, c.name) for c in move.__table__.columns}
return jsonify({'moves': move_dic})
'''
@app.route('/api/v1.0/moves/', methods=['POST'])
def create_moves():
if not request.json:
abort(400)
return jsonify({'moves': 'test'}), 201
@app.route('/api/v1.0/moves/<int:id>/', methods=['PUT'])
def update_moves(id):
if not request.json:
abort(400)
return jsonify({'moves': 'test'})
@app.route('/api/v1.0/moves/<int:id>/', methods=['DELETE'])
def delete_moves(id):
return jsonify({'result': True})
'''
# -----------
# politicians
# -----------
@app.route('/politicians')
def politicians():
democratic_politicians = politician_controller()
return render_template('politicians.html', politicians=democratic_politicians)
# -----
# search
# -----
@app.route('/search/<query>')
def search(query):
terms = query.split()
pokemon_and, pokemon_or = Pokemon.search(query)
moves_and, moves_or = Move.search(query)
location_and, location_or = Routes.search(query)
pokemon_results = { "and":pokemon_and, "or": pokemon_or }
moves_results = { "and":moves_and, "or": moves_or }
location_results = { "and":location_and, "or":location_or }
results = { "pokemon":pokemon_results, "moves":moves_results, "locations":location_results}
return render_template('search.html', query=query, terms=terms, results=results)
#return render_template('index.html')
if __name__ == '__main__':
#create_db()
app.run(host="0.0.0.0", port=8000, debug=True) | RobinsonNguyen/cs373-idb | main.py | main.py | py | 6,254 | python | en | code | 0 | github-code | 13 |
38433681797 | import win32event, time
mutex = win32event.CreateMutex(None, True, "WEBLAUNCHASSIST_MUTEX")
# try to acquire the mutex
result = win32event.WaitForSingleObject(mutex, 0)
if result == win32event.WAIT_OBJECT_0:
print("Acquired the mutex, going to sleep for a minute")
time.sleep(60)
win32event.ReleaseMutex(mutex)
else:
print("Something went wrong; you maybe already infected or another instance of this program is running")
mutex.Close()
| MocanuAlexandru/LabWork | Reverse Engineering/Laboratory 1/Solutions/taskbonus_vaccine.py | taskbonus_vaccine.py | py | 459 | python | en | code | 0 | github-code | 13 |
72436173777 | import json
import uuid
from tornado.web import RequestHandler
from tornado.web import Application
from tornado.ioloop import IOLoop
from tornado.options import options, define, parse_command_line
class LoginHandler(RequestHandler):
users = [{
'id': 1,
'name': 'disen',
'pwd': '123',
'last_login_device': 'Android 5.1 Oneplus5'
}]
def set_default_headers(self):
self.set_header('Access-Control-Allow-Origin', '*')
self.set_header('Access-Control-Allow-Headers', 'x-requested-with')
self.set_header('Access-Control-Allow-Methods', 'GET,POST,DELETE,PUT')
def get(self):
# 读取json数据
bytes = self.request.body
# 从请求头中读取请求上传的类型
content_type = self.request.headers.get('Content-Type')
if content_type.startswith('application/json'):
json_str = bytes.decode('utf-8')
json_data = json.loads(json_str)
resp_data = {}
login_user = None
for user in self.users:
if json_data['name'] == user['name']:
if json_data['pwd'] == user['pwd']:
login_user = user
break
if login_user:
resp_data['msg'] = 'success'
resp_data['token'] = uuid.uuid4().hex
else:
resp_data['msg'] = '查无此用户'
self.set_header('Content-Type', 'application/json')
self.write(resp_data)
else:
self.write('uploda data 必须是json类型')
def post(self, *args, **kwargs):
bytes = self.request.body
# 从请求头中获取请求上传的类型
content_type = self.request.headers.get('Content-Type')
if content_type.startswith('application/json'):
json_str = bytes.decode('utf-8')
json_data = json.loads(json_str)
name = json_data['name']
pwd = json_data[('pwd')]
phone = json_data[('phone')]
last_login_device = json_data.setdefault('last_login_device', 'PC')
resp_data = {}
if all((name, pwd)):
yonghu = {
'id': self.users[len(self.users) - 1]['id'] + 1,
'name': name,
'pwd': pwd,
'phone': phone,
'last_login_device': last_login_device
}
self.users.append(yonghu)
resp_data['msg'] = '添加成功'
resp_data['user'] = self.users
print(self.users)
else:
resp_data['msg'] = '添加失败'
self.write('填写正确的用户名或者密码')
self.set_header('Content', 'application/json')
self.write(resp_data)
else:
print('请上传正确的类型')
def put(self):
bytes = self.request.body
content_type = self.request.headers.get('Content-Type')
if content_type.startswith('application/json'):
json_str = bytes.decode('utf-8')
json_data = json.loads(json_str)
resp_data = {}
for u in self.users:
if u['id'] == int(json_data['id']):
for kay, value in json_data.items():
u[kay] = value
resp_data['msg'] = '修改成功'
resp_data['user'] = self.users
else:
resp_data['msg'] = '您输入的信息不存在,请重新输入'
self.set_header('Content-Type', 'application/json')
self.write(resp_data)
else:
self.write('请输入正确的类型')
def delete(self, *args, **kwargs):
bytes = self.request.body
content_type = self.request.headers.get('Content-Type')
if content_type.startswith('application/json'):
json_str = bytes.decode('utf-8')
json_data = json.loads(json_str)
resp_data = {}
for u in self.users:
if u['id'] == int(json_data['id']):
index = int(json_data['id']) - 1
self.users.remove(u)
resp_data['msg'] = '删除成功'
resp_data['user'] = self.users
else:
resp_data['msg'] = '请输入正确的信息'
self.set_header('Content-Type', 'application/json')
self.write(resp_data)
else:
print('请输入正确的格式')
def options(self, *args, **kwargs):
self.set_status(200)
def on_finish(self):
pass
def make_app():
return Application(
handlers=[
('/user', LoginHandler)
],
default_host=options.h)
if __name__ == '__main__':
# 绑定命令行参数
define('p', default=8000, type=int, help='绑定的port端口')
define('h', default='localhost', type=str, help='绑定的主机IP')
# 解析命令行参数
parse_command_line()
app = make_app()
app.listen(options.p)
print('Running http://%s:%s' % (options.h, options.p))
IOLoop.current().start()
| yixialei0215/microServer | other/api_server.py | api_server.py | py | 5,288 | python | en | code | 0 | github-code | 13 |
5991246147 | import pandas as pd
import numpy as np
import os
import sys
import time
import logging
from logging.handlers import RotatingFileHandler
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from config import Config
class Dataset:
def __init__(self, config):
self.config = config
self.data, self.data_column_name = self.read_data()
self.data_num = self.data.shape[0]
print("all data number:",self.data_num )
self.train_num = int(self.data_num * self.config.train_data_rate)
print("train data number:",self.train_num)
self.mean = np.mean(self.data, axis=0) # 数据的均值和方差
self.std = np.std(self.data, axis=0)
self.norm_data = (self.data - self.mean)/self.std # 归一化,去量纲
self.start_num_in_test = 0 # 测试集中前几天的数据会被删掉,因为它不够一个time_step
def read_data(self): # 读取初始数据
init_data = pd.read_csv(self.config.train_data_path)
init_data['datetime'] = pd.to_datetime(init_data['datetime'])
#根据时间排序dataframe,然后查看前三条记录。
init_data=init_data.sort_values(by=['datetime'])#
init_data = init_data.reset_index(drop=True)#重建索引
print(init_data.head(n=3))
print(init_data.tail(n=3))
#选择数据
init_data = init_data[(init_data['datetime'] > '2012-10-01') &
(init_data['datetime'] < '2018-10-31')]#.sort_values(by=['datetime'])
print(init_data.head(n=3))
init_data=init_data[init_data.columns[self.config.feature_columns]]#根据索引保留列
print(init_data.head(n=3))
return init_data.values, init_data.columns.tolist() # .columns.tolist() 是获取列名
def get_train_and_valid_data(self):
feature_data = self.norm_data[:self.train_num]
label_data = self.norm_data[self.config.predict_future_day : self.config.predict_future_day + self.train_num,
self.config.label_in_feature_index] # 将延后几天的数据作为label
label_data1 = self.data[self.config.predict_future_day : self.config.predict_future_day + self.train_num,
self.config.label_in_feature_index] # 将延后几天的数据作为label
# 每time_step行数据会作为一个样本,比如:1-50行,2-51行
train_x = [feature_data[i:i+self.config.time_step] for i in range(self.train_num-self.config.time_step)]
train_y = [label_data[i:i+self.config.time_step] for i in range(self.train_num-self.config.time_step)]
train_x, train_y = np.array(train_x), np.array(train_y)
#print(train_x.shape)
train_x, valid_x, train_y, valid_y = train_test_split(train_x, train_y, test_size=self.config.valid_data_rate,
random_state=self.config.random_seed,
shuffle=False) # 划分训练和验证集
return train_x, valid_x, train_y, valid_y
def get_test_data(self, return_label_data=False):
feature_data = self.norm_data[self.train_num:]
sample_interval = min(feature_data.shape[0], self.config.time_step) # 防止time_step大于测试集数量
self.start_num_in_test = feature_data.shape[0] % sample_interval # 这些天的数据不够一个sample_interval
time_step_size = feature_data.shape[0] // sample_interval
# 每time_step行数据会作为一个样本,比如:1-50行,2-51行
test_x = [feature_data[self.start_num_in_test+i*sample_interval : self.start_num_in_test+(i+1)*sample_interval]
for i in range(time_step_size)]
if return_label_data: # 实际应用中的测试集是没有label数据的
label_data = self.norm_data[self.train_num + self.start_num_in_test:, self.config.label_in_feature_index]
return np.array(test_x), label_data
return np.array(test_x)
np.random.seed(Config.random_seed) # 设置随机种子,保证可复现
data_g = Dataset(Config) | moujin/soothsayer | dataset_stock.py | dataset_stock.py | py | 4,244 | python | en | code | 0 | github-code | 13 |
12256054290 | class DetectPPE:
def __init__(self, client, imgfilename):
self.dict = []
with open(imgfilename, 'rb') as imgfile:
self.imgbytes = imgfile.read()
self.response = client.detect_protective_equipment(Image={'Bytes': self.imgbytes},
SummarizationAttributes={'MinConfidence':80, 'RequiredEquipmentTypes':['FACE_COVER', 'HAND_COVER', 'HEAD_COVER']})
self.response_data = self.response
# if have equipment on head, it will return true.
# if someone doesn't have equipment on head, it will print his Id and return false.
def detect_hand_eqp(self):
self.right_hand_list = []
self.left_hand_list = []
self.persons = self.response_data['Persons']
for self.person in self.persons:
self.Id = self.person['Id']
self.body_parts = self.person['BodyParts']
if len(self.body_parts) == 0:
print(f'{self.Id}:No body_part detected')
else:
for self.body_part in self.body_parts:
# check have detected head.
if self.body_part['Name'] == 'RIGHT_HAND':
self.ppe_items = self.body_part['EquipmentDetections']
if not self.ppe_items:
self.right_hand_list.append(self.Id)
print(f'{self.Id}, not have right_hand_epq')
for self.ppe_item in self.ppe_items:
self.right_hand = self.ppe_item['CoversBodyPart']['Value']
if self.right_hand == 'False':
self.right_hand_list.append(self.Id)
if self.body_part['Name'] == 'LEFT_HAND':
self.ppe_items = self.body_part['EquipmentDetections']
if not self.ppe_items:
self.left_hand_list.append(self.Id)
print(f'{self.Id}, not have left_hand_epq')
for self.ppe_item in self.ppe_items:
self.left_hand = self.ppe_item['CoversBodyPart']['Value']
if self.left_hand == 'False':
self.left_hand_list.append(self.Id)
self.right_hand_set = set(self.right_hand_list)
self.left_hand_set = set(self.left_hand_list)
self.hand_set = self.right_hand_set | self.left_hand_set
#If anyone doesn't have hand_eqp, he will be in the set.
if len(self.hand_set) != 0:
return False, self.hand_set
else:
return True, self.hand_set
def detect_face_eqp(self):
self.no_eqp_list = []
self.persons = self.response_data['Persons']
for self.person in self.persons:
self.Id = self.person['Id']
self.body_parts = self.person['BodyParts']
if len(self.body_parts) == 0:
print(f'{self.Id}:No body_part detected')
else:
for self.body_part in self.body_parts:
# check have detected head.
if self.body_part['Name'] == 'FACE':
self.ppe_items = self.body_part['EquipmentDetections']
if not self.ppe_items:
self.no_eqp_list.append(self.Id)
print(f'{self.Id}, not have face_epq')
for self.ppe_item in self.ppe_items:
self.head_eqp = self.ppe_item['CoversBodyPart']['Value']
if self.head_eqp == 'False':
self.no_eqp_list.append(self.Id)
print(f'{self.Id}, not have face_epq')
if len(self.no_eqp_list) != 0:
return False, self.no_eqp_list
else:
return True, self.no_eqp_list
# if have equipment on head, it will return true.
# if someone doesn't have equipment on head, it will print his Id and return false.def detect_head_eqp(client, imgfilename):
def detect_head_eqp(self):
self.no_eqp_list = []
self.persons = self.response_data['Persons']
for self.person in self.persons:
self.Id = self.person['Id']
self.body_parts = self.person['BodyParts']
if len(self.body_parts) == 0:
print(f'{self.Id}:No body_part detected')
else:
for self.body_part in self.body_parts:
# check have detected head.
if self.body_part['Name'] == 'HEAD':
self.ppe_items = self.body_part['EquipmentDetections']
if not self.ppe_items:
self.no_eqp_list.append(self.Id)
print(f'{self.Id}, not have head_epq')
for self.ppe_item in self.ppe_items:
self.head_eqp = self.ppe_item['CoversBodyPart']['Value']
if self.head_eqp == 'False':
self.no_eqp_list.append(self.Id)
#If anyone doesn't have head_eqp, it will be append to the list.
if len(self.no_eqp_list) != 0:
return False, self.no_eqp_list
else:
return True, self.no_eqp_list
class FaceDetails:
def __init__(self, client, imgfilename):
with open(imgfilename, 'rb') as imgfile:
self.imgbytes = imgfile.read()
self.response = client.detect_faces(Image={'Bytes': self.imgbytes}, Attributes=['ALL'])
self.response_data = self.response
try:
self.rekfd = self.response_data['FaceDetails'][0]
except IndexError:
print('偵測不到臉部!')
def emotion(self):
try:
self.emotion = self.rekfd['Emotions'][0]['Type']
return self.emotion
except (IndexError, AttributeError):
print('沒偵測到臉部!')
# This function will return the number of people
def count_face(self):
self.count_face = self.response_data['FaceDetails']
return (len(self.count_face))
# This function will return bolling, if eyes are close, it return 'True'
def eyes_open(self):
self.eyes_open_list = []
for self.eop in self.response_data['FaceDetails']:
self.eyes_open = self.eop['EyesOpen']
self.eyes_open_list.append(self.eyes_open)
return self.eyes_open_list
#This function will return list
def age(self):
self.age_list = []
try:
for self.rd in self.response_data['FaceDetails']:
self.age = self.rd['AgeRange']
self.age_list.append(self.age)
except (NameError, IndexError):
print('沒有偵測到臉部!無法判斷年齡')
return self.age_list
#This function will return list
def smile(self):
self.smile_list = []
for self.sm in self.response_data['FaceDetails']:
self.smile = self.sm['Smile']
self.smile_list.append(self.smile)
return self.smile_list
#This function will return list
def mouth_open(self):
self.mouth_open_list = []
for self.mo in self.response_data['FaceDetails']:
self.mouth_open = self.mo['MouthOpen']
self.mouth_open_list.append(self.mouth_open)
return self.mouth_open_list
| welly50704/AWS-API-MODULE | AWS-API-MODULE/Recognition.py | Recognition.py | py | 8,203 | python | en | code | 1 | github-code | 13 |
1619799670 | import httplib
import traceback
import os,sys
def webscale_errorhook(excType, excValue, traceback):
api = os.environ.get('ERROR_API', "api.error.technology")
params = os.environ.get("ERROR_API_PARAMS", "")
conn = httplib.HTTPConnection(api)
conn.request("GET", "/?lang=python&full=true"+ params)
resp = conn.getresponse()
print(resp.read())
excValue = None
sys.excepthook = webscale_errorhook
| euank/error.technology | pythonlib/errortech.py | errortech.py | py | 423 | python | en | code | 1 | github-code | 13 |
15495575751 | from setuptools import setup, find_packages
with open('README.md') as f:
readme = f.read()
setup(
name='nglutils',
version='0.1.1',
description='NGL utils for trajectory visualization',
long_description=readme,
author='Simon Grosse-Holz',
url='https://github.com/mirnylab/nglutils',
packages=find_packages(exclude=('tests', 'docs')),
install_requires=[
'matplotlib', 'numpy', 'mdtraj', 'nglview'
# Jupyter and widgets are currently not included in the dependencies
])
# For installation of the nglview widget for ipython one might need:
# $jupyter-nbextension enable --py --user widgetsnbextension
# $jupyter-nbextension enable --py --user nglview
| mirnylab/nglutils | setup.py | setup.py | py | 706 | python | en | code | 2 | github-code | 13 |
9484972317 | #!/usr/bin/env python
# -*- coding=utf-8 -*-
__author__ = "柯博文老師 Powen Ko, www.powenko.com"
from sklearn import datasets
from sklearn.model_selection import train_test_split
import tensorflow as tf
import numpy as np
iris = datasets.load_iris()
category=3
dim=4
x_train , x_test , y_train , y_test = train_test_split(iris.data,iris.target,test_size=0.2)
y_train2=tf.contrib.keras.utils.to_categorical(y_train, num_classes=(category))
y_test2=tf.contrib.keras.utils.to_categorical(y_test, num_classes=(category))
print("x_train[:4]",x_train[:4])
print("y_train[:4]",y_train[:4])
print("y_train2[:4]",y_train2[:4])
# 建立模型
model = tf.contrib.keras.models.Sequential()
model.add(tf.contrib.keras.layers.Dense(units=10,
activation=tf.nn.relu,
input_dim=dim))
model.add(tf.contrib.keras.layers.Dense(units=10,
activation=tf.nn.relu ))
model.add(tf.contrib.keras.layers.Dense(units=category,
activation=tf.nn.softmax ))
model.compile(optimizer='adam',
loss=tf.contrib.keras.losses.categorical_crossentropy,
metrics=['accuracy'])
history=model.fit(x_train, y_train2,
epochs=200,
batch_size=128)
#測試
score = model.evaluate(x_test, y_test2, batch_size=128)
print("score:",score)
predict = model.predict(x_test)
print("Ans:",np.argmax(predict[0]),np.argmax(predict[1]),np.argmax(predict[2]),np.argmax(predict[3]))
predict2 = model.predict_classes(x_test)
print("predict_classes:",predict2)
print("y_test",y_test[:])
import matplotlib.pyplot as plt
plt.plot(history.history['acc'])
plt.plot(history.history['loss'])
plt.title('model accuracy')
plt.ylabel('acc & loss')
plt.xlabel('epoch')
plt.legend(['acc', 'loss'], loc='upper left')
plt.show()
| jtlai0921/sampleCode | ch29/01-Iris-MLP_show.py | 01-Iris-MLP_show.py | py | 1,711 | python | en | code | 0 | github-code | 13 |
72947644819 | # python standard
# JSON support to load the database info
from typing import Dict, Union
from datetime import datetime, timedelta
# components of the factory
from .object import Material, Producer, Obj_Initial
from .object.tool_data import SQL
from graphviz import Digraph
# pytorch
import torch
# load database information
def _load_database_info():
return{
"host": "localhost",
"port": 114,
"user": "reader",
"password": "666666",
"database": "Factory"
}
class Factory:
def __init__(self):
"""
Initialize a factory environment.
:param int day_plus: The number of days to advance the factory's date.
:ivar list[Material] materials: A list of Material objects representing materials in the factory.
:ivar list[Producer] producers: A list of Producer objects representing producers in the factory.
:ivar dict raw: A dictionary containing raw Nanjing for materials and producers.
:ivar SQL database: An SQL object for connecting to a MySQL database.
:ivar Obj_Initial obj_ini: An Obj_Initial object for initializing raw Nanjing.
"""
# factory inner Nanjing in gaming
self.materials: list[Material] = []
self.producers: list[Producer] = []
self.price_source: Dict[datetime, Union[float]] = {}
# origin Nanjing
self.raw = {
"material": list[Material],
"producer": list[Producer],
}
# other Nanjing that will be used in the environment
# connect mySQL database
_database_info = _load_database_info()
# database start date
self.date_start: datetime = datetime(2022, 2, 1)
self.date: datetime = self.date_start
# pass database to obj_initial get the raw Nanjing of material and producer
self.obj_ini = Obj_Initial(DB_type="CSV")
# get the raw Nanjing
self.raw["material"] = self.obj_ini.material_initialize()
self.raw["producer"] = self.obj_ini.producer_initialize()
self.price_source = self.obj_ini.price_initialize()
# initialize
self.reset(0)
def relationship(self):
# Generate product relationship
graph_product = Digraph(comment='Product Relation')
# get dots
for material in self.materials:
graph_product.node(material.un_id, label=material.name)
# get edges
for producer in self.producers:
graph_product.node(producer.un_id, label=f"Pro: {producer.un_id}", shape="house")
for id, amount in producer.material.items():
if amount > 0:
graph_product.edge(producer.un_id, id)
else:
graph_product.edge(id, producer.un_id)
with open('./Demo_data/Base/product_structure.dot', 'w') as f:
f.write(graph_product.source)
graph_product.render('./Demo_data/Base/product_structure', format='png', view=True)
def reset(self, day_plus: int = 0) -> None:
"""
Reset the factory by restoring materials and producers to their initial state.
"""
self.materials = self.raw["material"]
self.producers = self.raw["producer"]
self.materials = [i.reset() for i in self.materials]
self.producers = [i.reset() for i in self.producers]
self.date = self.date_start + timedelta(day_plus)
def info(self) -> (torch.Tensor, list, int):
"""
Get information about the factory's materials and producers.
:returns: A tuple containing the environment tensor and matrix size.
:rtype: tuple[torch.tensor, list[int]]
"""
mat_info = []
pro_info = []
# generate the material Nanjing matrix
for item in self.materials:
item.update_price(self.date, self.price_source[item.un_id])
mat_info.append([
int(item.un_id),
item.inventory,
item.inventory_cap,
item.cache,
item.cache_cap,
1 if item.trade_permit["purchase"] else 0,
1 if item.trade_permit["sale"] else 0,
item.price["price_now"],
])
# generate the producer Nanjing matrix
for item in self.producers:
for mat_key, mat_value in item.material.items():
pro_info.append([
int(item.un_id),
item.daily_low_cost,
item.daily_produce_cap,
int(mat_key),
mat_value,
])
mat_count = len(mat_info)
pro_count = len(pro_info)
mat_colum = len(mat_info[-1])
pro_colum = len(pro_info[-1])
matrix_size = [1, mat_colum + pro_colum+1, mat_count if mat_count > pro_count else pro_count]
# transfer list matrix into tensor
def write_tensor(target: torch.tensor, matrix: list[list], m_count: int, m_colum: int, start: list):
for m_c in range(m_count):
for m_l in range(m_colum):
target[m_l + start[0], m_c + start[1]] = matrix[m_c][m_l]
env_tensor = torch.zeros(matrix_size, dtype=torch.float32)
write_tensor(env_tensor[0], mat_info, mat_count, mat_colum, [0, 0])
write_tensor(env_tensor[0], pro_info, pro_count, pro_colum, [mat_colum + 1, 0])
num_actions = len(self.materials)+len(self.producers)
return env_tensor, matrix_size, num_actions
def step(self, action: list[float], mode: str = "train") -> Dict[str, torch.tensor]: # make one step forward
"""
Take one step forward in the factory environment.
:param list[float] action: A list of actions to be performed.
:param str mode: The mode in which the factory is running ("train" or "play"). Default is "train".
:returns: A dictionary containing relevant information based on the chosen mode.
:rtype: Dict[str, torch.tensor]
This method simulates one step in the factory environment based on the provided actions.
It takes actions for trading and producing, computes the results, and returns information
such as earnings, rewards, and outputs. The returned information depends on the mode specified.
"""
action_mode = {
"train": "normal",
"play": "normal",
"mock": "mock",
}[mode]
# action amount needs
mat_act_count = len(self.materials)
pro_act_count = len(self.producers)
# record actions
trade_action = action[:mat_act_count]
produce_action = action[mat_act_count:]
# record result
trade_result: list[Dict[str, Union[int, float, str]]] = []
produce_result: list[Dict[str, Union[int, float, str]]] = []
# trade
for act in range(mat_act_count):
trade_result.append(
self.materials[act].trade(amount=trade_action[act], date=self.date, price_source=self.price_source, mode=action_mode)
)
# produce
for act in range(pro_act_count):
produce_result.append(
self.producers[act].produce(amount=produce_action[act], materials=self.materials, mode=action_mode)
)
# get result unpacked
def unpack_result(target: list[Dict[str, Union[int, float, str]]]):
Earn: list[float] = []
Reward: list[float] = []
Output: list[str] = []
for item in target:
Earn.append(item["Earn"])
Reward.append(item["Reward"])
Output.append(item["Output"])
return Earn, Reward, Output
trade_earn, trade_reward, trade_output = unpack_result(trade_result)
produce_earn, produce_reward, produce_output = unpack_result(produce_result)
# choose return values by mode choice
# return when train mode
def train_return() -> Dict[str, torch.tensor]:
total_earn = torch.tensor(trade_earn + produce_earn)
total_reward = torch.tensor(trade_reward + produce_reward)
return {
"total_earn": total_earn,
"total_reward": total_reward
}
# return when play mode
def play_return() -> Dict[str, torch.Tensor]:
total_earn = torch.tensor(trade_earn + produce_earn)
total_reward = torch.tensor(trade_reward + produce_reward)
total_output = trade_output + produce_output
return {
"total_earn": total_earn,
"total_reward": total_reward,
"total_output": total_output
}
# match dictionary
if mode == "train":
self.date += timedelta(days=1)
for i in self.materials:
i.inventory_change("refresh")
return train_return()
elif mode == "play":
self.date += timedelta(days=1)
for i in self.materials:
i.inventory_change("refresh")
return play_return()
elif mode == "mock":
return train_return()
else:
return {}
def action_mock(self, action_genes: torch.Tensor) -> (torch.Tensor, torch.Tensor):
shape = action_genes.shape
num_actions = shape[0]
num_choices = shape[1]
mock_earn = torch.zeros(shape, dtype=torch.float32)
mock_reward = torch.zeros(shape, dtype=torch.float32)
for NO_choice in range(num_choices):
result = self.step(action_genes.t()[NO_choice], mode="mock")
temp_earn = result["total_earn"]
temp_reward = result["total_reward"]
for NO_action in range(num_actions):
mock_earn[NO_action, NO_choice] = temp_earn[NO_action]
mock_reward[NO_action, NO_choice] = temp_reward[NO_action]
return mock_earn, mock_reward
def forward(self):
self.date += timedelta(days=1)
if __name__ == '__main__':
# a demo to test info and step
example = Factory()
example.reset(6)
_, _, act = example.info()
print(act)
print(
example.step([66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], "train")
)
| RuihanRZhao/Efficiency_RL | src/game/factory/environment.py | environment.py | py | 10,368 | python | en | code | 2 | github-code | 13 |
26943696493 | import os
import server
import unittest
import tempfile
class FlaskrTestCase(unittest.TestCase):
def setUp(self):
self.db_fd, server.app.config['DATABASE'] = tempfile.mkstemp()
server.app.config['TESTING'] = True
self.app = server.app.test_client()
#server.init_db()
def tearDown(self):
os.close(self.db_fd)
os.unlink(server.app.config['DATABASE'])
def test_index_page(self):
rv = self.app.get('/')
assert 'localhost:8080' in rv.data
def test_add_client(self):
rv = self.app.post('/edit_client', data=dict(
name='First client',
clientid='clientid'
), follow_redirects=True)
assert 'First client' in rv.data
assert 'clientid' in rv.data
if __name__ == '__main__':
unittest.main() | bds-orsk/1CMonitorService | ObmenMonitor_test.py | ObmenMonitor_test.py | py | 814 | python | en | code | 1 | github-code | 13 |
70865475537 | import torch
import math
import random
AUG_TYPE = {0: 'resize_padding', 1: 'translation', 2: 'rotation',
3: 'gaussian_noise', 4: 'horizontal_flip', 5: 'vertical_flip',
6: 'scaling', 7: 'invert', 8: 'solarize'}
def augmentation(img_tensor, op_type, magnitude):
''' augmentation that capable of backward.
with given magnitude, augmentations are done with random directions.
Inputs: img_tensor range from 0 to 1,
operation type in str description,
magnitude range from 0 to 9.
Return: augmented img tensor.
'''
if op_type == 'resize_padding':
img_w = img_tensor.shape[2]
img_h = img_tensor.shape[3]
w_modified = 2*int(0.01*magnitude*img_w)
h_modified = 2*int(0.01*magnitude*img_h)
img_tensor = torch.nn.functional.interpolate(img_tensor,
[img_w-w_modified, img_h-h_modified])
h_padding_t = random.choice(range(0, h_modified+1))
h_padding_b = h_modified - h_padding_t
w_padding_l = random.choice(range(0, w_modified+1))
w_padding_r = w_modified - w_padding_l
#h_padding = h_modified//2
#w_padding = w_modified//2
img_tensor = torch.nn.functional.pad(img_tensor, (h_padding_t, h_padding_b, w_padding_l, w_padding_r),
mode='constant', value=0)
return img_tensor
elif op_type == 'translation':
w_direction = random.choice([-1, 1])
h_direction = random.choice([-1, 1])
#magnitude_ = magnitude-5 # 0to11 -> -5to5
magnitude_ = magnitude
w_modified = w_direction*0.02*magnitude_
h_modified = h_direction*0.02*magnitude_
trans_M = torch.Tensor([[1., 0., w_modified],
[0., 1., h_modified]])
batch_size = img_tensor.shape[0]
trans_M = trans_M.unsqueeze(0).repeat(batch_size, 1, 1)
grid = torch.nn.functional.affine_grid(trans_M, img_tensor.shape)
img_tensor = torch.nn.functional.grid_sample(img_tensor, grid.to(img_tensor.device))
return img_tensor
elif op_type == 'rotation':
rot_direction = random.choice([-1, 1])
#magnitude_ = magnitude-5 # 0to11 -> -5to5
magnitude_ = magnitude
rot_deg = torch.tensor(rot_direction*math.pi*magnitude_/60.) # -pi/6 to pi/6
rot_M = torch.Tensor([[torch.cos(rot_deg), -torch.sin(rot_deg), 0],
[torch.sin(rot_deg), torch.cos(rot_deg), 0]])
batch_size = img_tensor.shape[0]
rot_M = rot_M.unsqueeze(0).repeat(batch_size, 1, 1)
grid = torch.nn.functional.affine_grid(rot_M, img_tensor.shape)
img_tensor = torch.nn.functional.grid_sample(img_tensor, grid.to(img_tensor.device))
return img_tensor
elif op_type == 'gaussian_noise':
noise = torch.randn_like(img_tensor)
img_tensor = img_tensor + noise * magnitude/60
img_tensor = torch.clamp(img_tensor, 0, 1)
return img_tensor
elif op_type == 'horizontal_flip':
return torch.flip(img_tensor, [3])
elif op_type == 'vertical_flip':
return torch.flip(img_tensor, [2])
elif op_type == 'scaling':
# Refer to ICLR 2020 paper:
# "NESTEROV ACCELERATED GRADIENT AND SCALE INVARIANCE FOR ADVERSARIAL ATTACKS"
# https://arxiv.org/abs/1908.06281
# And its implementation:
# https://github.com/JHL-HUST/SI-NI-FGSM/blob/master/si_mi_fgsm.py
# In its implementation, the scaling op is performed on image scaled to [-1, 1].
# We don't know if such op is resonable because it is actually reduing contrast
# to a biased mean [125, 125, 125]. However, we still follow such implementation here.
# Meanwhile, the original implementation uses 1, 1/2, 1/4, 1/8, 1/16
# which is actually 1, 0.5, 0.25, 0.125, 0.0625.
# Here we make search range roughly contains such scales, 0.1 to 1.0
img_tensor = img_tensor*2.0 -1.0
magnitude = 1.0 - 0.1*magnitude
img_tensor = img_tensor * magnitude
img_tensor = (img_tensor + 1.0)/2.0
return img_tensor
elif op_type == 'invert':
return 1.0 - img_tensor
elif op_type == 'solarize':
solarize_threshold = 1.0 - 0.09*magnitude
return torch.where(img_tensor < solarize_threshold, img_tensor, 1.0-img_tensor)
elif op_type == 'equalize':
# code taken from https://github.com/kornia/
img_tensor = img_tensor * 255. #0~1 to 0~255
def scale_channel(im, c):
im = im[c, :, :]
histo = torch.histc(im, bins=256, min=0, max=255).detach()
nonzero_histo = torch.reshape(histo[histo!=0], [-1])
step = (torch.sum(nonzero_histo)-nonzero_histo[-1]) // 255
def build_lut(histo, step):
lut = (torch.cumsum(histo, 0)) + (step//2)//step
lut = torch.cat([torch.zeros(1).to(im.device), lut[:-1]])
return torch.clamp(lut, 0, 255)
if step == 0:
result = im
else:
result = torch.gather(build_lut(histo, step), 0, im.flatten().long())
result = result.reshape_as(im)
return result/255.
res = []
for image in img_tensor:
scaled_image = torch.stack([scale_channel(image, i) for i in range(len(image))])
res.append(scaled_image)
return torch.stack(res)
else:
print(op_type)
assert False, "Unknown augmentation type."
return img_tensor
| HaojieYuan/autoAdv | aug_search.py | aug_search.py | py | 5,664 | python | en | code | 1 | github-code | 13 |
31494146312 | # Faça um Programa que verifique se uma letra digitada é "F" ou "M".
# Conforme a letra escrever: F - Feminino, M - Masculino, Sexo Inválido.
a = input('Digite (M) para masculino \n Digite (F) para feminino')
a = a.upper()
a = a.strip()
if a == 'M':
print('Sexo Masculino selecionado.')
elif a == 'F':
print('Sexo Feminino selecionado.')
else:
print('Comando Inválido')
| GuilhermeMastelini/Exercicios_documentacao_Python | Estrutura de Decisão/Lição 3.py | Lição 3.py | py | 401 | python | pt | code | 0 | github-code | 13 |
74852657616 | from __future__ import unicode_literals
import logging
import sys
import json
import click
try:
from importlib import metadata
except ImportError: # for Python<3.8
import importlib_metadata as metadata
from colorama import init
from .api import get_threads, get_posts
from .threads import (
parse_threads,
search_threads,
sort_threads,
generate_thread_output,
ThreadEncoder,
)
from .posts import generate_posts_output, PostEncoder
init()
logging.getLogger()
logging.getLogger().setLevel(logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler())
def get_version():
return "rfd v" + metadata.version("rfd")
def print_version(ctx, _, value):
if not value or ctx.resilient_parsing:
return
click.echo(get_version(), nl=True)
ctx.exit()
@click.group(invoke_without_command=True)
@click.option(
"-v",
"--version",
is_flag=True,
callback=print_version,
expose_value=False,
is_eager=True,
)
@click.pass_context
def cli(ctx):
"""CLI for https://forums.redflagdeals.com"""
if not ctx.invoked_subcommand:
click.echo(ctx.get_help())
@cli.command(short_help="Display all posts in a thread.")
@click.argument("post_id")
@click.option(
"--output", default=None, help="Defaults to custom formatting. Other options: json"
)
def posts(post_id, output):
"""Iterate all pages and display all posts in a thread.
post_id can be a full url or post id only
Example:
\b
rfd posts https://forums.redflagdeals.com/koodo-targeted-public-mobile-12-120-koodo-5gb-40-no-referrals-2173603
"""
try:
if output == "json":
click.echo_via_pager(
json.dumps(
get_posts(post=post_id),
cls=PostEncoder,
indent=2,
sort_keys=True,
)
)
else:
click.echo_via_pager(generate_posts_output(get_posts(post=post_id)))
except ValueError:
click.echo("Invalid post id.")
sys.exit(1)
except AttributeError as err:
click.echo("The RFD API did not return the expected data. %s", err)
sys.exit(1)
@cli.command(short_help="Displays threads in the forum. Defaults to hot deals.")
@click.option("--forum-id", default=9, help="The forum id number")
@click.option("--pages", default=1, help="Number of pages to show. Defaults to 1.")
@click.option("--sort-by", default=None, help="Sort threads by")
@click.option(
"--output", default=None, help="Defaults to custom formatting. Other options: json"
)
def threads(forum_id, pages, sort_by, output):
"""Display threads in the specified forum id. Defaults to 9 (hot deals).
Popular forum ids:
\b
9 \t hot deals
14 \t computer and electronics
15 \t offtopic
17 \t entertainment
18 \t food and drink
40 \t automotive
53 \t home and garden
67 \t fashion and apparel
74 \t shopping discussion
88 \t cell phones
"""
_threads = sort_threads(
parse_threads(get_threads(forum_id, pages)), sort_by=sort_by
)
if output == "json":
click.echo_via_pager(
json.dumps(
sort_threads(_threads, sort_by=sort_by),
cls=ThreadEncoder,
indent=2,
sort_keys=True,
)
)
else:
click.echo_via_pager(generate_thread_output(_threads))
@cli.command(short_help="Search deals based on a regular expression.")
@click.option("--pages", default=5, help="Number of pages to search.")
@click.option(
"--forum-id", default=9, help="The forum id number. Defaults to 9 (hot deals)."
)
@click.option("--sort-by", default=None, help="Sort threads by")
@click.option(
"--output", default=None, help="Defaults to custom formatting. Other options: json"
)
@click.argument("regex")
def search(pages, forum_id, sort_by, output, regex):
"""Search deals based on regex.
Popular forum ids:
\b
9 \t hot deals
14 \t computer and electronics
15 \t offtopic
17 \t entertainment
18 \t food and drink
40 \t automotive
53 \t home and garden
67 \t fashion and apparel
74 \t shopping discussion
88 \t cell phones
"""
matched_threads = []
_threads = parse_threads(get_threads(forum_id, pages=pages))
for thread in search_threads(threads=_threads, regex=regex):
matched_threads.append(thread)
if output == "json":
click.echo_via_pager(
json.dumps(
sort_threads(matched_threads, sort_by=sort_by),
indent=2,
sort_keys=True,
cls=ThreadEncoder,
)
)
else:
click.echo_via_pager(
generate_thread_output(sort_threads(matched_threads, sort_by=sort_by))
)
| davegallant/rfd | rfd/cli.py | cli.py | py | 4,859 | python | en | code | 9 | github-code | 13 |
7494436973 | from tkinter import *
from tkinter import filedialog
class FileDir:
def __init__(self, root, GUIManagerWidgetsList):
self.path = None # Global variable to store path from user
self.readSettings()
row = Frame(root)
# File Path Button
lab = Label(row, width=20, text="Excel Directory", anchor='w')
userPath = Button(row, text='Browse', command=self.getPath)
row.pack(side=TOP, fill=X, padx=5, pady=5)
lab.pack(side=LEFT)
# User Path Text
userPath.pack(side=LEFT, expand=NO, fill=X)
self.userPath = Label(row, width=30, text=str(self.path), anchor='w')
self.userPath.pack(side=LEFT)
GUIManagerWidgetsList.extend((row, lab, self.userPath))
return
def getPath(self):
self.path = filedialog.askdirectory()
if self.path == '':
self.path = None
self.userPath.config(text=str(self.path))
self.writeSettings('')
else:
self.writeSettings(str(self.path))
self.userPath.config(text=str(self.path))
return
def readSettings(self):
try:
f = open('dir.txt', 'r')
self.path = f.read().replace('\n', '')
if self.path == '':
self.path = None
f.close()
except FileNotFoundError:
f = open('dir.txt', 'w')
f.close()
return
def writeSettings(self, userPath):
if userPath == '\n':
f = open('dir.txt', 'w')
f.write('')
f.close()
self.path = None
f = open('dir.txt', 'w')
f.write(userPath)
f.close()
return
| DavidCastillo2/GrouponScraper | tKinter/fileDir.py | fileDir.py | py | 1,709 | python | en | code | 0 | github-code | 13 |
3356302380 | # print("Hello World!")
# print("Day 1 - Python Print Function")
# print("The function is declared like this:")
# print("print('what to print')")
# \n makes a new line
# print("Hello World\nHello World\nHello World")
# print("Hello" + " Sean")
#Fix the code below
# print("Day 1 - String Manipulation")
# print("String Concatenation is done with the" ' +' " sign.")
# print('e.g. print("Hello " + "World")')
# print("New lines can be created\nwith a backslash and n.")
# print("What is your name?")
# input("What is your name?")
# function to calculate length of string
# string = "sean"
# print(len(string))
# print(len(input("What is your name?")))
# name = input("What is your name?")
# print(name)
# name = "Jack"
# print(name)
# name = "Angela"
# print(name)
# name = input("What is your name?")
# length = len(name)
# print(length)
print("Welcome to the name generator")
city = input("What city did you grow up in?\n")
pet_name = input("What is the name of your pet?\n")
length = len(city)
print("Your band name is " + city + " " + pet_name)
| SeanUnland/Python | Python Day 1/main.py | main.py | py | 1,065 | python | en | code | 0 | github-code | 13 |
36815579547 | from CoolProp.CoolProp import PropsSI as prop
from CoolProp.CoolProp import PhaseSI as fase
from matplotlib import pyplot as plot
from tabulate import tabulate
t1 = 300
t3 = 300
p1 = 0.1 * (10**6)
p4 = 1.2 * (10**6)
s1 = prop('S', 'T', t1, 'P', p1, 'air')
s2 = s1
for i in range(100):
p2 = (i/100000000000 + .346574194) * (10**6)
p3 = p2
t2 = prop('T', 'P', p2, 'S', s2, 'air')
s3 = prop('S', 'T', t3, 'P', p3, 'air')
s4 = s3
t4 = prop('T', 'P', p4, 'S', s4, 'air')
h4 = prop('H', 'P', p4, 'T', t4, 'air')
h3 = prop('H', 'P', p3, 'T', t3, 'air')
h2 = prop('H', 'P', p2, 'T', t2, 'air')
h1 = prop('H', 'P', p1, 'T', t1, 'air')
h43 = h4-h3
h21 = h2-h1
print(p2, h43-h21)
| fttaunton/Tarea-3-Conversi-n-de-Energ-a | Codigo/T3P42.py | T3P42.py | py | 723 | python | en | code | 0 | github-code | 13 |
40640501305 | # exact data from orca output
import re
import os
import sys
import numpy as np
from tqdm import tqdm
from energy_calc import calculate_energy_xtb
# file path
re_coor = re.compile(r'\s*(\d{0,3}[A-Z][a-z]?)\s+(\S+)\s+(\S+)\s+(\S+)\s*')
re_double_end_line = re.compile(r'\n\s*\n')
atom_index_table = {
'H': 1,
'He': 2,
'C': 6,
'O': 8,
'F': 9,
'P': 15,
'S': 16,
'Cl': 17,
}
def text_converter(text):
result = re.findall(re_coor, text)
if not result:
return
idx = []
coor = []
for i in result:
idx.append(atom_index_table[i[0]])
coor.append(list(map(float, i[1:])))
return np.array(idx), np.array(coor)
def sort_one_file(file_path):
global count
with open(file_path) as f:
t = f.read()
output_dict = {}
split_lines = re.split(re_double_end_line, t)
total_len = len(split_lines)
if total_len <= count:
count = total_len
with tqdm(total=count) as pbar:
calc_num = 0
for sep_coor in split_lines:
r = text_converter(sep_coor)
if r:
energy = calculate_energy_xtb(*r)
output_dict[sep_coor] = energy
calc_num += 1
pbar.update(1)
if calc_num == count:
break
return output_dict
if __name__ == '__main__':
count = 99999
if len(sys.argv) == 2:
file_path = sys.argv[1]
elif len(sys.argv) == 3:
count = int(sys.argv[2])
else:
exit()
results = sort_one_file(file_path)
with open(file_path.replace('.xyz', 'xtb_sort.xyz'), 'w') as f:
for t, e in sorted(results.items(), key=lambda x: x[1]):
f.write(f'{e:5f}')
f.write('\n')
f.write(t)
f.write('\n\n')
| MWFudan/MolStruFitting | XTB_sort/xtb_sort.py | xtb_sort.py | py | 1,896 | python | en | code | 0 | github-code | 13 |
38035526825 | # -*- coding: utf-8 -*-
"""
Created on Fri Feb 19 12:36:44 2021
@author: Maxi
"""
#Decimal to binary
num = 11
if num < 0:
isNeg = True
num = abs(num)
else:
isNeg = False
result = ''
if num == 0:
result = '0'
while num > 0:
result = str(num%2) + result
num = num//2
if isNeg:
result = '-' + result
# Decimal to binary for fraction numbers
x = float(input('Enter a decimal number between 0 and 1: '))
p = 0
while ((2**p)*x)%1 != 0:
print('Remainder = ' + str((2**p)*x - int((2**p)*x)))
p += 1
num = int(x*(2**p))
result = ''
if num == 0:
result = '0'
while num > 0:
result = str(num%2) + result
num = num//2
for i in range(p - len(result)):
result = '0' + result
result = result[0:-p] + '.' + result[-p:]
print('The binary representation of the decimal ' + str(x) + ' is ' + str(result))
## Newton - Raphson
epsilon = 0.01
y = 54.0
guess = y/2.0
numGuesses = 0
while abs(guess*guess - y) >= epsilon:
numGuesses += 1
guess = guess - (((guess**2) - y)/(2*guess))
print('numGuesses = ' + str(numGuesses))
print('Square root of ' + str(y) + ' is about ' + str(guess))
| Maxdelsur/Introduction-to-Computer-Science-and-Programming-Using-Python | Unit two/lect 3/Lect 3 - Floats and Fractions.py | Lect 3 - Floats and Fractions.py | py | 1,209 | python | en | code | 0 | github-code | 13 |
71139106259 | from __future__ import print_function
from option import *
from model import *
from load_data import *
import torch
opt = Option()()
model = create_model(opt)
batch_size = opt.batch_size
is_small = opt.is_small
if 'EFG' in opt.model:
if 'CYC' in opt.model:
transformed_dataset = EFGDataset(mode='training', transform=transforms.Compose(
[ToTensor(),
Normalize([0.5,0.5,0.5],[0.5,0.5,0.5])]), is_unpaired=True, is_small=is_small)
else:
transformed_dataset = EFGDataset(mode='training', transform=transforms.Compose(
[ToTensor(),
Normalize([0.5,0.5,0.5],[0.5,0.5,0.5])]), is_unpaired=True, is_small=is_small)
elif 'NFG' in opt.model:
if 'CYC' in opt.model:
transformed_dataset = NFGDataset(mode='training',transform=transforms.Compose(
[ToTensor(),
Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])]), is_small=is_small)
else:
transformed_dataset = NFGDataset(mode='training',transform=transforms.Compose(
[ToTensor(),
Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])]), is_small=is_small)
data_loader = torch.utils.data.DataLoader(transformed_dataset, batch_size=batch_size, shuffle=False)
epoch_num = opt.epoch_num
data_size = len(data_loader)
total_step = 0
print('data_size: %d, scheduled_iter_num: %d' %(data_size, epoch_num*data_size))
which_model = opt.model
for e in range(epoch_num):
for i, data in enumerate(data_loader):
model.set_input(data)
if 'LSGAN' in which_model:
model.optimize()
elif 'WGAN' in which_model:
model.forward()
if 'CYC' in which_model:
for _ in range(5):
model.optimizer_D_A.zero_grad()
model.backward_D_A()
model.optimizer_D_A.step()
model.optimizer_D_B.zero_grad()
model.backward_D_B()
model.optimizer_D_B.step()
model.optimizer_G.zero_grad()
model.backward_G()
model.optimizer_G.step()
else:
for _ in range(5):
model.optimizer_D.zero_grad()
model.backward_D()
model.optimizer_D.step()
model.optimizer_G.zero_grad()
model.backward_G()
model.optimizer_G.step()
else:
raise ValueError('%s is not supported.' % which_model)
model.save_loss()
if total_step % opt.disp_freq == 0:
print("iter: {0:5d} ".format(total_step), end='')
model.print_current_loss()
if total_step != 0 and total_step % opt.save_freq == 0:
print('saving model at iteration {0}...'.format(total_step))
model.save(total_step)
total_step += 1
print('saving model at iteration {0}...'.format(total_step))
model.save(total_step)
print('Training complete.')
| klory/s2f2e | train.py | train.py | py | 3,010 | python | en | code | 1 | github-code | 13 |
29008104720 |
from typing import Tuple, List, Dict, Union
import numpy as np
from dataloaders.batchdatagenerator import BatchDataGenerator
class NetworkCheckerBase(object):
def __init__(self, size_image: Union[Tuple[int, int, int], Tuple[int, int]]) -> None:
self._size_image = size_image
def get_network_layers_names_all(self) -> List[str]:
raise NotImplementedError
def _compute_feature_maps(self, image_data_loader: BatchDataGenerator,
in_layer_name: str,
index_first_featmap: int = None,
max_num_featmaps: int = None
) -> np.ndarray:
raise NotImplementedError
def get_feature_maps(self, image_data_loader: BatchDataGenerator,
in_layer_name: str,
index_first_featmap: int = None,
max_num_featmaps: int = None
) -> np.ndarray:
return self._compute_feature_maps(image_data_loader, in_layer_name,
index_first_featmap, max_num_featmaps)
def get_feature_maps_list_layers(self, image_data_loader: BatchDataGenerator,
in_list_layers_names: List[str],
index_first_featmap: int = None,
max_num_featmaps: int = None
) -> Dict[str, np.ndarray]:
out_dict_featmaps = {}
for it_layer_name in in_list_layers_names:
out_dict_featmaps[it_layer_name] = self._compute_feature_maps(image_data_loader, it_layer_name,
index_first_featmap, max_num_featmaps)
return out_dict_featmaps
def get_feature_maps_all_layers(self, image_data_loader: BatchDataGenerator,
index_first_featmap: int = None,
max_num_featmaps: int = None
) -> Dict[str, np.ndarray]:
in_list_layers_names = self.get_network_layers_names_all()
return self.get_feature_maps_list_layers(image_data_loader, in_list_layers_names,
index_first_featmap, max_num_featmaps)
| antonioguj/bronchinet | src/models/networkchecker.py | networkchecker.py | py | 2,355 | python | en | code | 42 | github-code | 13 |
6727542080 | import talib
import configargparse
import datetime as dt
import numpy as np
import pandas as pd
from .base import Base
import core.common as common
from .enums import TradeState
from core.bots.enums import BuySellMode
from core.tradeaction import TradeAction
from lib.indicators.stoploss import StopLoss
from sklearn import tree
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPRegressor
from strategies.predictioncommon.feature_engineering import feature_engineering
from strategies.predictioncommon.model_visualization import model_visualization
#from strategies.predictioncommon.data_loading import read_co_data
from strategies.predictioncommon.model_evaluation import model_evaluation
from termcolor import colored
acc = 10
Points = []
Highs = []
Lows = []
Volumes = []
dates = []
CashRecords = []
Cash = 100
days = 0
decision = 0
stockSymbol = 'AAPL'
def algot(t):
features = []
labels = []
for i in range(len(t) - acc + 1):
features.append(t[-1*acc:-1])
#1 means price went down
if t[-1] > t[-2]:
labels.append(1)
else:
labels.append(0)
clf = tree.DecisionTreeClassifier()
clf.fit(features, labels)
print(len(features))
if clf.predict([t[-1*acc+1:]]) == 1:
return 0
else:
return 1
class Emasuperprediction(Base):
"""
Ema strategy
About: Buy when close_price > ema20, sell when close_price < ema20 and below death_cross
"""
#fields
arg_parser = configargparse.get_argument_parser()
def __init__(self):
args = self.arg_parser.parse_known_args()[0]
super(Emasuperprediction, self).__init__()
self.name = 'emasuperprediction'
self.min_history_ticks = 60
self.pair = self.parse_pairs(args.pairs)[0]
self.buy_sell_mode = BuySellMode.all
self.stop_loss = StopLoss(int(args.ticker_size))
self.Bought = False
self.interval = int(args.ticker_size)
self.standardizationFeatureFlag = True
self.numStudyTrial = 50
self.backTestInitialFund = 1000
self.backTestDays = 15
self.backTestSpread = 0
self.marginTrade = False
def calculate(self, look_back, wallet):
"""
Main strategy logic (the meat of the strategy)
"""
(dataset_cnt, _) = common.get_dataset_count(look_back, self.group_by_field)
# Wait until we have enough data
if dataset_cnt < self.min_history_ticks:
print('dataset_cnt:', dataset_cnt)
return self.actions
self.actions.clear()
new_action = TradeState.none
# Calculate indicators
df = look_back.tail(self.min_history_ticks)
self.chartData_ = df
for i in df[['close']]:
for j in df[i]:
last_price = round(j,10)
Points.append(last_price)
for i in df[['high']]:
for j in df[i]:
Highs.append(round(j,8))
for i in df[['low']]:
for j in df[i]:
Lows.append(round(j,8))
for i in df[['volume']]:
for j in df[i]:
Volumes.append(round(j,8))
for i in df[['date']]:
for j in df[i]:
last_time = dt.datetime.fromtimestamp(j)
dates.append(last_time)
self.appreciationRate_ = self.getAppreciationRate(self.chartData_.open)
self.quantizer_ = self.quantizer(self.appreciationRate_)
print("Format appreciation {}".format(self.appreciationRate_))
print("Format quantizer {}".format(self.quantizer_))
#self.prediction(self.appreciationRate_, self.quantizer_, 0, 30, 30)
#bactTest_ = self.backTest(self.appreciationRate_, self.quantizer_, 30, 15, False)
#print(bactTest_)
fed_data = feature_engineering(df)
# feature vector
X = fed_data.take(list(range(fed_data.shape[1] - 1)), axis=1)
# target
y = np.ravel(fed_data.take([fed_data.shape[1] - 1], axis=1))
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
print(X_train.shape, y_train.shape, X_test.shape, y_test.shape)
# 定义一个BP神经网络
reg = MLPRegressor(solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(15,), random_state=1)
# 训练
print("start training...")
reg.fit(X_train, y_train)
print("training finished.")
# 预测
print("start predicting...")
y_pred = reg.predict(X_test)
print("predicting finished.")
y_pred = pd.DataFrame(y_pred)
y_pred.index = X_test.index
y_test = pd.DataFrame(y_test)
y_test.index = X_test.index
# 将结果写入文件
# pd.DataFrame(y_pred).to_excel('y_pred.xlsx')
# 模型评估
model_evaluation(y_test, y_pred)
# 可视化
model_visualization(y_test, y_pred)
print(type(X), type(y), type(X_train), type(X_test), type(y_train), type(y_test), type(y_pred))
days = len(df[['close']])
print("Last time: "+str(last_time)+" "+str(last_price))
if days > acc:
decision = algot(Points[:days])
new_last_time = last_time + dt.timedelta(minutes = self.interval)
print('-----------------------------------------------------------------------------------------')
print('| |')
print('| |')
print('| |')
#if self.Bought == True:
if decision == 0:
self.Bought = False
print(colored("* Buy now or wait, price will went UP at {} *".format(new_last_time), 'green'))
#new_action = TradeState.sell
#else:
elif decision == 1:
self.Bought = True
#new_action = TradeState.buy
print(colored("* Sell now or wait, price will went DOWN at {} *".format(new_last_time), 'red'))
print('| |')
print('| |')
print('| |')
print('-----------------------------------------------------------------------------------------')
trade_price = self.get_price(new_action, df.tail(), self.pair)
# Get stop-loss
#if new_action == TradeState.buy and self.stop_loss.calculate(close):
# print('stop-loss detected,..selling')
# new_action = TradeState.sell
action = TradeAction(self.pair,
new_action,
amount=None,
rate=trade_price,
buy_sell_mode=self.buy_sell_mode)
self.actions.append(action)
return self.actions
def prediction(self, sampleData, classData, trainStartIndex, numFeature, numTrainSample):
"""Return probability of price rise."""
train_X, train_y = self.preparationTrainSample(sampleData, classData, trainStartIndex, numFeature, numTrainSample)
X = np.array([sampleData[trainStartIndex:trainStartIndex + numFeature]])
if self.standardizationFeatureFlag:
train_X, X = self.standardizationFeature(train_X, X)
y = []
for i in range(0, self.numStudyTrial):
clf = tree.DecisionTreeClassifier()
clf.fit(train_X, train_y)
y.append(clf.predict(X)[0])
return sum(y) * 1.0 / len(y)
def getAppreciationRate(self,price):
"""Transrate chart price to appreciation rate."""
return np.append(-np.diff(price) / price[1:].values,0)
def quantizer(self, y):
"""Transrate appreciation rate to -1 or 1 for preparing teacher data."""
return np.where(np.array(y) >= 0.0, 1, -1)
def preparationTrainSample(self,sampleData,classData,trainStartIndex, numFeature, numTrainSample):
"""Prepare training sample."""
train_X = []
train_y = []
for i in range(numTrainSample):
train_X.append(sampleData[trainStartIndex + i + 1:trainStartIndex + numFeature + i + 1])
train_y.append(classData[trainStartIndex + i])
print("Length of class data "+str(trainStartIndex + i + 1)+" : "+str(trainStartIndex + numFeature + i + 1))
return np.array(train_X), np.array(train_y)
def standardizationFeature(self, train_X, test_X):
"""Standarize feature data."""
sc = StandardScaler()
train_X_std = sc.fit_transform(train_X)
test_X_std = sc.transform(test_X)
return train_X_std, test_X_std
def backTest(self, sampleData, classData, numFeature, numTrainSample, saveBackTestGraph):
"""Do back test and return the result."""
Y = []
YPrediction = []
fund = [self.backTestInitialFund]
pastDay = 0
accuracyUp = 0
accuracyDown = 0
for trainStartIndex in range(self.backTestDays):
print("Train start index: "+str(trainStartIndex))
yPrediction = self.quantizer(self.prediction(sampleData, classData, trainStartIndex, numFeature, numTrainSample))
y = self.quantizer(classData[trainStartIndex - 1])
Y.append(y.tolist())
YPrediction.append(yPrediction.tolist())
pastDay += 1
if yPrediction == y:
if yPrediction == 1:
accuracyUp += 1
fund.append(fund[pastDay - 1] * (1 + abs(self.appreciationRate_[trainStartIndex - 1]) - self.backTestSpread))
else:
accuracyDown += 1
if self.marginTrade:
fund.append(fund[pastDay - 1] * (1 + abs(self.appreciationRate_[trainStartIndex - 1]) - self.backTestSpread))
else:
fund.append(fund[pastDay - 1])
else:
if yPrediction == 1:
fund.append(fund[pastDay - 1] * (1 - abs(self.appreciationRate_[trainStartIndex - 1]) - self.backTestSpread))
else:
if self.marginTrade:
fund.append(fund[pastDay - 1] * (1 - abs(self.appreciationRate_[trainStartIndex - 1]) - self.backTestSpread))
else:
fund.append(fund[pastDay - 1])
backTestAccuracyRateUp = float(accuracyUp) / sum(np.array(YPrediction)[np.where(np.array(YPrediction) == 1)])
backTestAccuracyRateDown = -float(accuracyDown) / sum(np.array(YPrediction)[np.where(np.array(YPrediction) == -1)])
trainStartIndex = 0
backTestCurrentPrice = self.chartData_.open[trainStartIndex:trainStartIndex + self.backTestDays + 1]
backTestCurrentPrice = backTestCurrentPrice[::-1].tolist()
backTestDate = self.chartData_.date[trainStartIndex:trainStartIndex + self.backTestDays + 1]
backTestDate = backTestDate[::-1].tolist()
backTestFinalFund = fund[-1]
backTestInitialCurrentPrice = backTestCurrentPrice[0]
backTestFinalCurrentPrice = backTestCurrentPrice[-1]
backTestIncreasedFundRatio = (backTestFinalFund - self.backTestInitialFund) / self.backTestInitialFund
backTestIncreasedCurrentPriceRatio = (backTestFinalCurrentPrice - backTestInitialCurrentPrice) / backTestInitialCurrentPrice
columnNames = ["AccuracyRateUp", "AccuracyRateDown",
"InitialFund", "FinalFund", "IncreasedFundRatio",
"InitialCurrentPrice", "FinalCurrentPrice", "IncreasedCurrentPriceRatio"]
columnValues = [backTestAccuracyRateUp, backTestAccuracyRateDown,
self.backTestInitialFund, backTestFinalFund, backTestIncreasedFundRatio,
backTestInitialCurrentPrice, backTestFinalCurrentPrice, backTestIncreasedCurrentPriceRatio]
backTestResult = pd.DataFrame(np.array([columnValues]), columns=columnNames)
if saveBackTestGraph:
fig1, ax1 = plt.subplots(figsize=(11, 6))
p1, = ax1.plot(backTestDate, fund, "-ob")
ax1.set_title("Back test (" + self.currentPair + ")")
ax1.set_xlabel("Day")
ax1.set_ylabel("Fund")
plt.grid(fig1)
ax2 = ax1.twinx()
p2, = ax2.plot(backTestDate, backTestCurrentPrice, '-or')
ax2.set_ylabel("Price[" + self.currentPair + "]")
ax1.legend([p1, p2], ["Fund", "Price_" + self.currentPair], loc="upper left")
plt.savefig(self.workingDirPath + "/backTest_" + self.currentPair + ".png", dpi=50)
plt.close()
self.backTestResult_ = backTestResult
return backTestResult
| OlzhasAldabergenov/trading_bot_huobi | strategies/emasuperprediction.py | emasuperprediction.py | py | 13,415 | python | en | code | 1 | github-code | 13 |
42125658463 | import os.path
import pandas as pd
import skbio
import qiime2
from ._utilities import (_get_group_pairs, _extract_distance_distribution,
_visualize, _validate_metadata_is_superset,
_get_pairwise_differences, _stats_and_visuals,
_add_metric_to_metadata, _linear_effects,
_regplot_subplots_from_dataframe, _load_metadata,
_validate_input_values, _validate_input_columns,
_control_chart_subplots, _nmit,
_validate_is_numeric_column, _tabulate_matrix_ids,
_first_differences)
def pairwise_differences(output_dir: str, metadata: qiime2.Metadata,
group_column: str, metric: str, state_column: str,
state_1: str, state_2: str, individual_id_column: str,
parametric: bool=False, palette: str='Set1',
replicate_handling: str='error',
table: pd.DataFrame=None) -> None:
# find metric in metadata or derive from table and merge into metadata
metadata = _add_metric_to_metadata(table, metadata, metric)
_validate_input_values(metadata, metric, individual_id_column,
group_column, state_column, state_1, state_2)
# calculate paired difference distributions
pairs = {}
pairs_summaries = {}
errors = []
pairs_summary = pd.DataFrame()
group_names = metadata[group_column].unique()
for group in group_names:
group_pairs, error = _get_group_pairs(
metadata, group_value=group,
individual_id_column=individual_id_column,
group_column=group_column, state_column=state_column,
state_values=[state_1, state_2],
replicate_handling=replicate_handling)
pairs[group], pairs_summaries[group] = _get_pairwise_differences(
metadata, group_pairs, metric, individual_id_column, group_column)
pairs_summary = pd.concat([pairs_summary, pairs_summaries[group]])
errors.extend(error)
pairs_summary.to_csv(os.path.join(output_dir, 'pairs.tsv'), sep='\t')
# Calculate test statistics and generate boxplots
y_label = 'Difference in {0} ({1} {2} - {1} {3})'.format(
metric, state_column, state_2, state_1)
_stats_and_visuals(
output_dir, pairs, y_label, group_column, state_column, state_1,
state_2, individual_id_column, errors, parametric, palette,
replicate_handling, multiple_group_test=True, pairwise_tests=True,
paired_difference_tests=True, boxplot=True)
def pairwise_distances(output_dir: str, distance_matrix: skbio.DistanceMatrix,
metadata: qiime2.Metadata, group_column: str,
state_column: str, state_1: str, state_2: str,
individual_id_column: str, parametric: bool=False,
palette: str='Set1', replicate_handling: str='error',
) -> None:
metadata = _load_metadata(metadata)
_validate_input_values(metadata, None, individual_id_column, group_column,
state_column, state_1, state_2)
# calculate pairwise distance distributions
pairs = {}
pairs_summaries = {}
errors = []
pairs_summary = pd.DataFrame()
group_names = metadata[group_column].unique()
for group in group_names:
group_pairs, error = _get_group_pairs(
metadata, group_value=group,
individual_id_column=individual_id_column,
group_column=group_column, state_column=state_column,
state_values=[state_1, state_2],
replicate_handling=replicate_handling)
pairs[group], pairs_summaries[group] = _extract_distance_distribution(
distance_matrix, group_pairs, metadata, individual_id_column,
group_column)
pairs_summary = pd.concat([pairs_summary, pairs_summaries[group]])
errors.extend(error)
pairs_summary.to_csv(os.path.join(output_dir, 'pairs.tsv'), sep='\t')
# Calculate test statistics and generate boxplots
_stats_and_visuals(
output_dir, pairs, 'distance', group_column,
state_column, state_1, state_2, individual_id_column, errors,
parametric, palette, replicate_handling, multiple_group_test=True,
pairwise_tests=True, paired_difference_tests=False, boxplot=True,
plot_name='Pairwise distance boxplot')
def linear_mixed_effects(output_dir: str, metadata: qiime2.Metadata,
metric: str, state_column: str,
individual_id_column: str, group_categories: str=None,
random_effects: str=None, table: pd.DataFrame=None,
palette: str='Set1', lowess: bool=False, ci: int=95
) -> None:
raw_data_columns = [metric, state_column, individual_id_column]
# split group_categories into list of categories
if group_categories is not None:
group_categories = group_categories.split(",")
raw_data_columns.extend(group_categories)
if random_effects is not None:
random_effects = random_effects.split(",")
raw_data_columns.extend(random_effects)
# find metric in metadata or derive from table and merge into metadata
metadata = _add_metric_to_metadata(table, metadata, metric)
_validate_input_columns(metadata, individual_id_column, group_categories,
state_column, metric)
# separately validate random_effects, since these can recycle state_column
# and individual_id_column and group_column values, but not metric
_validate_input_columns(metadata, None, random_effects, None, metric)
# let's force states to be numeric
_validate_is_numeric_column(metadata, state_column)
# Generate LME model summary
model_summary, model_results = _linear_effects(
metadata, metric, state_column, group_categories,
individual_id_column, random_effects=random_effects)
# Plot dependent variable as function of independent variables
g = _regplot_subplots_from_dataframe(
state_column, metric, metadata, group_categories, lowess=lowess,
ci=ci, palette=palette)
# summarize parameters and visualize
summary = pd.Series(
[metric, group_categories, state_column,
individual_id_column, random_effects],
index=['Metric', 'Group column', 'State column',
'Individual ID column', 'Random effects'],
name='Linear mixed effects parameters')
raw_data = metadata[list(set(raw_data_columns))]
_visualize(output_dir, model_summary=model_summary,
model_results=model_results, plot=g, summary=summary,
raw_data=raw_data,
plot_name='Regression scatterplots')
def volatility(output_dir: str, metadata: qiime2.Metadata, group_column: str,
metric: str, state_column: str, individual_id_column: str,
table: pd.DataFrame=None, palette: str='Set1', ci: int=95,
plot_control_limits: bool=True, xtick_interval: int=None,
yscale: str='linear', spaghetti: str='no') -> None:
# find metric in metadata or derive from table and merge into metadata
metadata = _add_metric_to_metadata(table, metadata, metric)
_validate_input_columns(metadata, individual_id_column, group_column,
state_column, metric)
# let's force states to be numeric
_validate_is_numeric_column(metadata, state_column)
# plot control charts
chart, global_mean, global_std = _control_chart_subplots(
state_column, metric, metadata, group_column, individual_id_column,
ci=ci, palette=palette, plot_control_limits=plot_control_limits,
xtick_interval=xtick_interval, yscale=yscale, spaghetti=spaghetti)
# summarize parameters and visualize
summary = pd.Series(
[metric, group_column, state_column, individual_id_column, global_mean,
global_std],
index=['Metric', 'Group column', 'State column',
'Individual ID column', 'Global mean',
'Global standard deviation'],
name='Volatility test parameters')
raw_data = metadata[[
metric, state_column, individual_id_column, group_column]]
_visualize(output_dir, plot=chart, summary=summary, raw_data=raw_data,
plot_name='Control charts')
def nmit(table: pd.DataFrame, metadata: qiime2.Metadata,
individual_id_column: str, corr_method: str="kendall",
dist_method: str="fro") -> skbio.DistanceMatrix:
# load and prep metadata
metadata = _load_metadata(metadata)
_validate_metadata_is_superset(metadata, table)
metadata = metadata[metadata.index.isin(table.index)]
# validate id column
_validate_input_columns(metadata, individual_id_column, None, None, None)
# run NMIT
_dist = _nmit(
table, metadata, individual_id_column=individual_id_column,
corr_method=corr_method, dist_method=dist_method)
return _dist
def first_differences(metadata: qiime2.Metadata, state_column: str,
individual_id_column: str, metric: str,
replicate_handling: str='error', baseline: float=None,
table: pd.DataFrame=None) -> pd.Series:
# find metric in metadata or derive from table and merge into metadata
if table is not None:
_validate_metadata_is_superset(metadata.to_dataframe(), table)
metadata = _add_metric_to_metadata(table, metadata, metric)
else:
metadata = _load_metadata(metadata)
_validate_is_numeric_column(metadata, metric)
# validate columns
_validate_input_columns(
metadata, individual_id_column, None, state_column, metric)
return _first_differences(
metadata, state_column, individual_id_column, metric,
replicate_handling, baseline=baseline, distance_matrix=None)
def first_distances(distance_matrix: skbio.DistanceMatrix,
metadata: qiime2.Metadata, state_column: str,
individual_id_column: str, baseline: float=None,
replicate_handling: str='error') -> pd.Series:
# load and validate metadata
metadata = _load_metadata(metadata)
_validate_metadata_is_superset(
metadata, _tabulate_matrix_ids(distance_matrix))
# validate columns
# "Distance" is not actually a metadata value, so don't validate metric!
_validate_input_columns(
metadata, individual_id_column, None, state_column, None)
return _first_differences(
metadata, state_column, individual_id_column, metric=None,
replicate_handling=replicate_handling, baseline=baseline,
distance_matrix=distance_matrix)
| gregcaporaso/q2-longitudinal | q2_longitudinal/_longitudinal.py | _longitudinal.py | py | 10,952 | python | en | code | null | github-code | 13 |
13555146753 | import os
import sys
try:
from setuptools import setup, find_packages
from setuptools.command.install_lib import install_lib as InstallLib
except ImportError:
print("cmlxztp now needs setuptools in order to build. Install it using"
" your package manager (usually python-setuptools) or via pip (pip"
" install setuptools).")
sys.exit(1)
sys.path.insert(0, os.path.abspath('lib'))
from cmlxztp import version as NV
curr_ver = NV.CMLXZTP_VER
curr_rel = NV.CMLXZTP_REL
def _get_conf_files():
content = os.listdir('conf')
return ['conf/%s' % file_name for file_name in content]
class install_lib(InstallLib):
"""
This class overwrite setuptools install_lib class implementation
"""
def run(self):
"""
remove the .py files from the destination folder
"""
InstallLib.run(self)
outfiles = self.install()
for file_name in outfiles:
if file_name.endswith(".py"):
os.remove(file_name)
def get_outputs(self):
"""Return the list of files that would be installed and
remove the .py files from that list
"""
output = InstallLib.get_outputs(self)
outputs_without_py = [file_name for file_name in output
if not file_name.endswith(".py")]
return outputs_without_py
with open('README.rst', 'r') as f:
long_description = f.read()
with open('requirements.txt') as requirements_file:
install_requirements = requirements_file.read().splitlines()
if not install_requirements:
print("Unable to read requirements from the requirements.txt file"
"That indicates this copy of the source code is incomplete.")
sys.exit(2)
setup(name='cmlxztp',
version=curr_ver,
description='NEO Cumulus ZTP Manager',
long_description=long_description,
url='http://www.mellanox.com/content/pages.php?pg='
'products_dyn&product_family=100&menu_section=55',
author='Samer Deeb',
author_email='samerd@mellanox.com',
packages=find_packages('lib'),
package_dir={'': 'lib'},
data_files=[('/etc/logrotate.d', ['conf/cmlxztp']),
('/etc/cmlxztp', _get_conf_files()),
('/var/lib/cmlxztp/data', []), ],
scripts=["scripts/cmlxztp", 'scripts/cmlxztp_uninstall.sh'],
install_requires=install_requirements,
cmdclass={'install_lib': install_lib})
| Mellanox/cmlx_ztp | setup.py | setup.py | py | 2,477 | python | en | code | 0 | github-code | 13 |
27152236144 | from unicodedata import name
from xml.etree.ElementTree import Comment
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from django.template import loader
from django.shortcuts import render, get_object_or_404, redirect
from user.models import UserAccount
from home.views import repository
from .models import Repository
from django.contrib.auth.models import User
from pullrequest.models import Pullrequest
from milestone.models import Milestone
from issue.models import Issue
from branch.models import Branch
from commit.models import Commit
from label.models import Label
from django.contrib import messages
def index(request, id):
template = loader.get_template('repository/index.html')
repository = Repository.objects.get(id=id)
my_milestones, my_pullrequests, issues,branch_list,default_branch,commit_list,watchers,stargazers,forks,forked_from = get_repo_infos(request,id)
show = ''
if (forked_from != request.user):
show = 'Forked repo'
return render(request, "repository/index.html", {
'repository':repository,
'milestones': my_milestones,
'pullrequests': my_pullrequests,
'issues': issues,
'branch_list': branch_list,
'commit_list': commit_list,
'selected_branch': default_branch,
'logged_user_id': request.user.id,
'watchers':watchers,
'stargazers': stargazers,
'forks':forks,
'forked_from': forked_from,
'show':show})
def get_repo_infos(request,id):
repository = Repository.objects.get(id=id)
my_milestones = get_my_milestones(request,id)
my_pullrequests = get_my_pullrequests(request, id)
issues = get_issues_by_repo(request, id)
branch_list = Branch.objects.all().filter(repository = id)
default_branch = Branch.objects.all().filter(is_default = True, repository = repository)[0]
commit_list = Commit.objects.all().filter(branch = default_branch)
watchers = User.objects.all().filter(user_watchers = repository)
stargazers = User.objects.all().filter(user_stargazers = repository)
forks = User.objects.all().filter(user_forks = repository)
forkers,forked_from, forked_repo, repo_copy = find_forkers_info(request,id, repository)
return my_milestones, my_pullrequests,issues,branch_list,default_branch,commit_list,watchers,stargazers,forks,forked_from
def get_my_milestones(request, id):
milestones = Milestone.objects.all()
repositoryMilestones=[]
for m in milestones:
if(m.repository.id == id):
repositoryMilestones.append(m)
return repositoryMilestones
def get_my_pullrequests(request, id):
repository = get_object_or_404(Repository, id=id)
pullrequests = Pullrequest.objects.all().filter(prRepository=repository)
return pullrequests
def get_issues_by_repo(request, id):
repository = get_object_or_404(Repository, id=id)
issues = Issue.objects.filter(repository = repository)
return issues
@login_required(login_url="login")
def newRepository(request):
return render(request, "repository/newRepository.html")
@login_required(login_url="login")
def addRepository(request):
errorTitle = None
if request.method == 'POST':
name = request.POST['name']
status = request.POST['status']
creator = request.user
if name is not None and name == "":
errorTitle = "Please enter name!"
return render(request, "repository/newRepository.html", {"errorTitle": errorTitle})
else:
newRepository = Repository(name = name, status = status, creator = creator)
newRepository.save()
messages.success(request, 'Repository has been created.')
newRepository.developers.add(creator)
newRepository.watchers.add(creator)
add_initial_labels(newRepository)
branch = Branch.objects.create(
name = 'master',
is_default = True,
repository = Repository.objects.get(pk = newRepository.id)
)
return redirect("all_repositories")
def add_initial_labels(newRepository):
bug_label = Label(name = 'bug', description = "Something isn't working", color = '#ff2e1f', repository = newRepository)
bug_label.save()
documentation_label = Label(name = 'documentation', description = "Improvements or additions to documentation", color = '#0073ff', repository = newRepository)
documentation_label.save()
enhancment_label = Label(name = 'enhancment', description = "New feature or request", color = '#30feff', repository = newRepository)
enhancment_label.save()
first_issue_label = Label(name = 'good first issue', description = "Good first issue", color = '#8974c5', repository = newRepository)
first_issue_label.save()
help_wanted_label = Label(name = 'help wanted', description = "Extra attention is needed", color = '#ffee00', repository = newRepository)
help_wanted_label.save()
question_label = Label(name = 'question', description = "Further information is requested", color = '#e816ff', repository = newRepository)
question_label.save()
invalid_label = Label(name = 'invalid', description = "This doesn't seem right", color = '#7efa19', repository = newRepository)
invalid_label.save()
@login_required(login_url="login")
def transferToEditRepository(request,id):
repo = Repository.objects.get(id = id)
if request.user.id == repo.creator_id:
return render(request, "repository/editRepository.html", {'repository':repo})
else:
return HttpResponse('401 Unauthorized', status=401)
@login_required(login_url="login")
def editRepository(request):
id = request.POST['id']
name = request.POST['name']
status = request.POST['status']
repo = Repository.objects.get(id = id)
repo.name = name
repo.status = status
repo.save()
messages.success(request, 'Repository has been updated.')
return redirect("/repository/all_repositories")
@login_required(login_url="login")
def deleteRepository(request,id):
repo = Repository.objects.get(id=id)
forks = User.objects.all().filter(user_forks = repo)
forkers,forked_from, forked_repo, repo_copy = find_forkers_info(request,id, repo)
if request.user.id == repo.creator_id:
if (forked_repo is not None and forked_repo.id == repo.id):
forked_repo.forks.clear()
repo_copy.forks.clear()
forked_repo.delete()
elif (repo_copy is not None and repo_copy.id == repo.id):
user = User.objects.get(id=repo_copy.creator.id)
forked_repo.forks.remove(user)
repo_copy.forks.clear()
repo_copy.delete()
else: # ovaj deo je za obican repo
pullrequests = Pullrequest.objects.all()
for pr in pullrequests:
if pr.prRepository == repo:
pr.prRepository = None
repo.delete()
messages.success(request, 'Repository has been deleted.')
return redirect("/repository/all_repositories")
else:
return HttpResponse('401 Unauthorized', status=401)
def get_my_pullrequests(request, id):
repository = get_object_or_404(Repository, id=id)
pullrequests = Pullrequest.objects.all().filter(prRepository=repository)
return pullrequests
@login_required(login_url="login")
def all_repositories(request):
#prikazuju se samo koje je kreirao
#koristi se na profilnoj stranici
#treba obratiti paznju i na one gde je on developer
all_repositories = Repository.objects.all().filter(creator = request.user)
return render(request, 'repository/all_repositories.html',{'all_repositories':all_repositories})
def repo_branch(request, id, branch_id):
template = loader.get_template('repository/index.html')
repository = Repository.objects.get(id=id)
my_milestones = get_my_milestones(request,id)
my_pullrequests = get_my_pullrequests(request, id)
issues = get_issues_by_repo(request, id)
branch_list = Branch.objects.all().filter(repository = id)
branch = get_object_or_404(Branch, id = branch_id)
commit_list = Commit.objects.all().filter(branch = branch)
return render(request, "repository/index.html", {
'repository':repository,
'milestones': my_milestones,
'pullrequests': my_pullrequests,
'issues': issues,
'branch_list': branch_list,
'commit_list': commit_list,
'selected_branch': branch,})
@login_required(login_url="login")
def watchRepository(request,id):
repository = Repository.objects.get(id=id)
watchers = User.objects.all().filter(user_watchers = repository)
user = User.objects.get(id=request.user.id)
if request.user not in watchers:
repository.watchers.add(user)
else:
repository.watchers.remove(user)
return redirect('/repository/'+ str(repository.id))
@login_required(login_url="login")
def watchers(request,id):
repository = Repository.objects.get(id=id)
watchers = User.objects.all().filter(user_watchers = repository)
stargazers = User.objects.all().filter(user_stargazers = repository)
forkers = User.objects.all().filter(user_forks = repository)
return render(request, 'repository/watchers.html',{"repository": repository,"watchers":watchers,"stargazers":stargazers,
"forkers":forkers})
@login_required(login_url="login")
def starRepository(request,id):
repository = Repository.objects.get(id=id)
stargazers = User.objects.all().filter(user_stargazers = repository)
user = User.objects.get(id=request.user.id)
if (user not in stargazers):
repository.stargazers.add(user)
else:
repository.stargazers.remove(user)
return redirect('/repository/'+ str(repository.id))
@login_required(login_url="login")
def stargazers(request,id):
repository = Repository.objects.get(id=id)
stargazers = User.objects.all().filter(user_stargazers = repository)
watchers = User.objects.all().filter(user_watchers = repository)
forkers = User.objects.all().filter(user_forks = repository)
return render(request, 'repository/stargazers.html',{"repository": repository,"stargazers":stargazers,"watchers": watchers,
"forkers": forkers})
@login_required(login_url="login")
def forkRepository(request,id):
repository = Repository.objects.get(id=id)
repositories = Repository.objects.all().filter(creator=request.user)
forks = User.objects.all().filter(user_forks = repository)
user = User.objects.get(id=request.user.id)
if (repository.creator == user):
message = 'You can not fork your own repository!'
my_milestones, my_pullrequests, issues,branch_list,default_branch,commit_list,watchers,stargazers,forks,forked_from_user = get_repo_infos(request,id)
return render(request, "repository/index.html", {'repository':repository,'milestones': my_milestones,'pullrequests': my_pullrequests,
'issues': issues,'branch_list': branch_list,'commit_list': commit_list,'selected_branch': default_branch,'watchers':watchers,
'stargazers': stargazers,'forks':forks,'forked_from': forked_from_user,'message':message})
newRepository = None
if request.user not in forks:
newRepository = Repository(name = repository.name, status = repository.status, creator = request.user)
newRepository.save()
add_initial_labels(newRepository)
newRepository.developers.add(request.user)
repo_branches = Branch.objects.all().filter(repository = repository)
for branch in repo_branches:
if (branch.is_default):
newBranch = Branch.objects.create(name = branch.name, is_default = True , repository = newRepository )
newBranch.save()
else:
newBranch = Branch.objects.create(name = branch.name, is_default = False , repository = newRepository )
branch_commits = Commit.objects.all().filter(branch = branch)
for commit in branch_commits:
newCommit = Commit.objects.create(message = commit.message, date_time = commit.date_time,
hash_id = commit.hash_id, branch = newBranch, author = commit.author, repository = newRepository)
newCommit.save()
repository.forks.add(user)
newRepository.forks.add(user)
else:
message = 'You have already forked this repo'
my_milestones, my_pullrequests, issues,branch_list,default_branch,commit_list,watchers,stargazers,forks,forked_from_user = get_repo_infos(request,id)
return render(request, "repository/index.html", {'repository':repository,'milestones': my_milestones,'pullrequests': my_pullrequests,
'issues': issues,'branch_list': branch_list,'commit_list': commit_list,'selected_branch': default_branch,'watchers':watchers,
'stargazers': stargazers,'forks':forks,'forked_from': forked_from_user,'message':message})
return redirect('/repository/'+ str(newRepository.id))
@login_required(login_url="login")
def forkers(request,id):
repository = Repository.objects.get(id=id)
watchers = User.objects.all().filter(user_watchers = repository)
stargazers = User.objects.all().filter(user_stargazers = repository)
forkers,forked_from, forked_repo, repo_copy = find_forkers_info(request,id, repository)
show = ''
if (forked_from != request.user):
show = 'Forked repo'
return render(request, 'repository/forkers.html',{"repository": repository,"watchers":watchers,"stargazers":stargazers,"forks": forkers,
"forked_from":forked_from,"forked_repo":forked_repo,"repo_copy":repo_copy, "show":show})
def find_forkers_info(request,id,repository):
forkers = User.objects.all().filter(user_forks = repository)
forked_from = None
forked_repo = None
repo_copy = None
for f in forkers:
if (f.id == repository.creator.id):
repo = Repository.objects.get(id=repository.id)
repos_with_same_name = Repository.objects.all().filter(name = repo)
for r in repos_with_same_name:
if (r.creator.id != repo.creator.id):
forked_from = get_object_or_404(User, id=r.creator.id)
forked_repo = r
repo_copy = repo
break
else:
forked_from = get_object_or_404(User, id=r.creator.id)
else:
repo = Repository.objects.get(id = repository.id)
repos_with_same_name = Repository.objects.all().filter(name = repo)
if (f.id != repo.creator.id):
repos = Repository.objects.all().filter(creator = f, name = repo.name)
repo_copy = repos[0]
for r in repos_with_same_name:
if (r.creator.id != repo.creator.id):
forked_from = get_object_or_404(User, id=r.creator.id)
forked_repo = r
else:
forked_from = get_object_or_404(User, id=repo.creator.id)
forked_repo = repo
break
return forkers, forked_from, forked_repo, repo_copy
@login_required(login_url="login")
def collaborators(request, id):
repository = Repository.objects.get(id = id)
collaborators = User.objects.all().filter(user_developers = repository)
only_collaborators = []
for collab in collaborators:
if collab.id != repository.creator.id:
only_collaborators.append(collab)
developers = User.objects.all()
not_added_developers = []
for developer in developers:
if developer not in collaborators and developer.id != repository.creator.id:
not_added_developers.append(developer)
selected_developer = User.objects.first()
return render(request, "repository/collaborators.html",{'repository':repository, 'collaborators':only_collaborators,'selected_developer': selected_developer, 'developers':not_added_developers, 'logged_user_id': request.user.id})
@login_required(login_url="login")
def repo_developer(request, id, developer_id):
template = loader.get_template('repository/collaborators.html')
repository = Repository.objects.get(id=id)
developers = User.objects.all()
developers_without_creator = filter(lambda id: id != repository.creator.id, developers)
collaborators = User.objects.all().filter(user_developers = repository)
only_collaborators = []
for collab in collaborators:
if collab.id != repository.creator.id:
only_collaborators.append(collab)
not_added_developers = []
for developer in developers:
if developer not in collaborators and developer.id != repository.creator.id:
not_added_developers.append(developer)
selected_developer = get_object_or_404(User, id = developer_id)
return render(request, "repository/collaborators.html", {
'repository':repository,
'selected_developer': selected_developer,
'collaborators':only_collaborators, 'developers':not_added_developers})
@login_required(login_url="login")
def add_collaborator(request, id, developer_id):
repository = Repository.objects.get(id = id)
developer = User.objects.get(id = developer_id)
developers = User.objects.all()
collaborators = add_collaborator_on_repository(repository, developer)
only_collaborators = []
for collab in collaborators:
if collab.id != repository.creator.id:
only_collaborators.append(collab)
not_added_developers = []
for developer in developers:
if developer not in collaborators and developer.id != repository.creator.id:
not_added_developers.append(developer)
if len(not_added_developers)>0 :
selected_developer = not_added_developers[0]
else:
selected_developer = User.objects.first()
return render(request,"repository/collaborators.html",{
'repository':repository,
'selected_developer': selected_developer,
'collaborators': only_collaborators, 'developers':not_added_developers})
def add_collaborator_on_repository(repository, developer):
repository.save()
repository.developers.add(developer)
collaborators = User.objects.all().filter(user_developers = repository)
return collaborators
@login_required(login_url="login")
def remove_collaborator(request, id, developer_id):
repository = Repository.objects.get(id = id)
developer = User.objects.get(id = developer_id)
remove_collaborato_from_repository(repository, developer)
collaborators = User.objects.all().filter(user_developers = repository)
only_collaborators = []
for collab in collaborators:
if collab.id != repository.creator.id:
only_collaborators.append(collab)
not_added_developers = []
developers = User.objects.all()
for developer in developers:
if developer not in collaborators and developer.id != repository.creator.id:
not_added_developers.append(developer)
selected_developer = get_object_or_404(User, id = developer_id)
return render(request, "repository/collaborators.html", { 'repository':repository,
'selected_developer': selected_developer,
'collaborators': only_collaborators, 'developers':not_added_developers})
def remove_collaborato_from_repository(repository, developer):
repository.developers.remove(developer)
collaborators = User.objects.all().filter(user_developers = repository)
return collaborators
def search_in_this_repo(request, id):
if request.method == 'POST':
repository = Repository.objects.get(id=id)
searchedWord = request.POST['search']
words = searchedWord.split()
issues = checkIssues(words, repository)
commits = checkCommits(words, repository)
issuesIds=[]
for issu in issues:
issuesIds.append(issu.id)
commitsIds=[]
for c in commits:
commitsIds.append(c.id)
return render(request, 'repository/searchedRepoResult.html', {"foundCommits":commitsIds,
"commits":commits,"foundIssues":issuesIds, "issues":issues,
"repository":repository,
"searchedWords":searchedWord})
def checkIssues(words, repository):
issues = []
all_repo_issues = Issue.objects.all().filter(repository = repository)
for issue in all_repo_issues:
for word in words:
if (word.lower() in issue.issue_title.lower() or word.lower() in issue.description.lower() ):
if (len(issues) == 0):
issues.append(issue)
elif(issue not in issues):
issues.append(issue)
return issues
def checkCommits(words, repository):
commits = []
branch_list = Branch.objects.all().filter(repository = repository)
all_commits = Commit.objects.all()
for branch in branch_list:
for commit in all_commits:
if (branch.id == commit.branch.id):
for word in words:
if (word.lower() in commit.message.lower()):
if (len(commits) == 0):
commits.append(commit)
elif(commit not in commits):
commits.append(commit)
return commits
def searched_repo_issues(request, id):
if request.method == 'POST':
repository, issues, issuesIds, commits, commitsIds,searchedWords = find_all_searched_items(request,id)
return render(request, 'repository/searchedRepoIssues.html',{"foundCommits":commitsIds,
"commits":commits,"foundIssues":issuesIds, "issues":issues, "repository": repository,
"searchedWords":searchedWords})
def searched_repo_commits(request, id):
if request.method == 'POST':
repository, issues, issuesIds, commits, commitsIds,searchedWords = find_all_searched_items(request,id)
return render(request, 'repository/searchedRepoCommits.html',{"foundCommits":commitsIds,
"commits":commits,"foundIssues":issuesIds, "issues":issues, "repository":repository,
"searchedWords":searchedWords})
def findIssues(request):
foundIssues = request.POST.get('foundIssues')
issues = []
if (foundIssues != '[]'):
issuesFound = foundIssues.strip('][').split(', ')
for foundIssue in issuesFound:
issue = get_object_or_404(Issue, id = foundIssue)
issues.append(issue)
return issues
def findIssuesIds(request):
issues = findIssues(request)
issuesIds=[]
for issu in issues:
issuesIds.append(issu.id)
return issuesIds
def findCommits(request):
foundCommits = request.POST.get('foundCommits')
commits = []
if (foundCommits != '[]'):
commitsFound = foundCommits.strip('][').split(', ')
for foundCommit in commitsFound:
commit = get_object_or_404(Commit, id = foundCommit)
commits.append(commit)
return commits
def findCommitsIds(request):
commits = findCommits(request)
commitsIds=[]
for c in commits:
commitsIds.append(c.id)
return commitsIds
def find_all_searched_items(request,id):
repository = Repository.objects.get(id=id)
issues = findIssues(request)
issuesIds = findIssuesIds(request)
commits = findCommits(request)
commitsIds = findCommitsIds(request)
searchedWords = request.POST.get('searchedWords')
return repository, issues, issuesIds, commits, commitsIds,searchedWords
| marijamilanovic/UksGitHub | Uks/repository/views.py | views.py | py | 23,726 | python | en | code | 0 | github-code | 13 |
30584729752 | # Definition for a Node.
class Node:
def __init__(self, val=None, children=None):
self.val = val
self.children = children
class Solution:
def maxDepth(self, root: 'Node') -> int:
if not root:
return 0
# will contain [Node, depth]
stack = [[root, 1]]
max_depth = 0
while stack:
curr_node, depth = stack.pop()
if curr_node.children != None:
for child in curr_node.children:
stack.append([child, depth + 1])
if depth > max_depth:
max_depth = depth
return max_depth
| dark-shade/CompetitiveCoding | LeetCode/332/559.py | 559.py | py | 648 | python | en | code | 0 | github-code | 13 |
34315934793 | from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
data = load_iris()
x_train, x_test, y_train, y_test = train_test_split(data['data'], data['target'], test_size=0.1)
classifier = KNeighborsClassifier(n_neighbors=5)
classifier.fit(x_train, y_train)
y_pred = classifier.predict(x_test)
print(y_pred)
print(y_test)
| fauwara/aiml | p8/test.py | test.py | py | 408 | python | en | code | 0 | github-code | 13 |
22483786873 | import os
import requests
# get environment variables
APIFY_USER_ID = os.getenv('APIFY_USER_ID', '')
APIFY_CRAWLER_ID = os.getenv('APIFY_CRAWLER_ID', '')
APIFY_TOKEN = os.getenv('APIFY_TOKEN', '')
# start crawler execution run
r = requests.post(f'https://api.apify.com/v1/{APIFY_USER_ID}/crawlers/{APIFY_CRAWLER_ID}/execute?token={APIFY_TOKEN}&wait=60')
crawler_run = r.json()
# get results from crawler run
r = requests.get(crawler_run['resultsUrl'])
crawler_results = r.json()
rawdata = crawler_results[0]['pageFunctionResult']
# format results
pollen = {
'yesterday': {},
'today': {},
'tomorrow': {}
}
for item in rawdata:
pollen[item['day'].lower()] = { 'count': item['count'], 'desc': item['desc'] }
# create pushover notification
title = f"pollen count is {pollen['today']['count']} ({pollen['today']['desc']})"
msg = f"""Tomorrow: {pollen['tomorrow']['count']} ({pollen['tomorrow']['desc']})
Yesterday: {pollen['yesterday']['count']} ({pollen['yesterday']['desc']})
"""
r = requests.post('https://api.pushover.net/1/messages.json', data = {
'token': os.environ['POLLEN_PUSHOVER_APP_KEY'],
'user': os.environ['PUSHOVER_USER_KEY'],
'message': msg,
'title': title,
'url': os.getenv('POLLEN_URL', 'https://www.pollen.com'),
'device': os.environ['PUSHOVER_DEVICE']
})
| stacybrock/pollenwatch | pollenwatch.py | pollenwatch.py | py | 1,316 | python | en | code | 0 | github-code | 13 |
42418244385 | import pandas
from matplotlib import pyplot as plt
from sklearn.feature_selection import RFECV
from sklearn.preprocessing import StandardScaler
from sklearn.svm import LinearSVC
from sklearn.svm import SVC
cv = 4
input_filename = 'dataset.csv'
print('==> Reading file (' + input_filename + ')')
data_frame = pandas.read_csv(input_filename, header=0, encoding="ISO-8859-1") # header: Row number(s) to use as the
print('==> File successfully loaded:')
print(data_frame.head())
print("==> Dropping columns 'name' & 'Class'")
data_frame = data_frame.drop(['name', 'Class'], axis=1) # Dropping columns 'name' and 'Class'
print('==> Columns dropped: ')
print(data_frame.head())
'''
###########################################################################
print('###########################################################################')
if_more_than_percent = 0.25
columns_to_drop = []
for column in data_frame:
zero_counter = 0
for row in data_frame[column]:
if row == 0:
zero_counter += 1
if zero_counter / 1072 > if_more_than_percent:
columns_to_drop.append(column)
print(len(columns_to_drop), columns_to_drop)
data_frame = data_frame.drop(columns_to_drop, axis=1) # Dropping columns 'name' and 'Class'
###########################################################################
###########################################################################
print('###########################################################################')
if_more_than_percent = 0.25
rows_to_drop = []
for row in data_frame.iloc[:, 0:-1].itertuples():
zero_counter = 0
for element in row:
if element == 0:
zero_counter += 1
if zero_counter / 126 > if_more_than_percent:
rows_to_drop.append(row[0])
print(len(rows_to_drop), rows_to_drop)
#print(rows_to_drop[0])
#print(data_frame.iloc[rows_to_drop[0], :])
data_frame = data_frame.drop(rows_to_drop, axis=0) # Dropping columns 'name' and 'Class'
###########################################################################
'''
array = data_frame.values
X = data_frame.iloc[:, 0:-1]
Y = data_frame.iloc[:, -1]
print('==> X:')
print(X)
print('==> Y:')
print(Y)
'''
###########################################################################
print('###########################################################################')
columns_means_excluding_zero_values = []
for column in X:
print(column)
num_valid_elements = 0
sum = 0
for row in data_frame[column]:
if row != 0:
num_valid_elements += 1
sum += row
mean = sum / num_valid_elements
print(column, mean)
X[column] = X[column].replace(0, mean)
#for row in data_frame[column]:
# if row == 0:
# row = mean
'''
print('================================= Scaling data(NORMALIZATION) =================================')
# SVMs assume that the data it works with is in a standard range
scaler = StandardScaler().fit(X) # Standardize features by removing the mean and scaling to unit variance
X = scaler.transform(X)
print('Scaled data')
print('==> X:')
print(X)
print('================================= FEATURE SELECTION =================================')
# Esteban:
# as for penalty=: l2 works better
# as for loss. No real change
# as for dual. This must set to false because n_samples > n_features
# as tolerance. The lower this number, the higher the accuracy
# as for max_iter: graph is sharper with more iter.
# A supervised learning estimator with a fit method that updates a coef_ attribute that holds the
# fitted parameters. Important features must correspond to high absolute values in the coef_ array.
estimator = LinearSVC(dual=False, tol=1e-5, random_state=0)
wrapper = RFECV(estimator, cv=cv, verbose=True, n_jobs=-1)
wrapper.fit(X, Y)
plt.figure()
plt.xlabel("Number of features selected")
plt.ylabel("Cross validation score \n of number of selected features")
plt.title("RFECV(LinearSVC(penalty='l2', tol=1e-5)) cv=" + str(cv))
plt.plot(range(1, len(wrapper.grid_scores_) + 1), wrapper.grid_scores_)
plt.show()
print(wrapper.support_)
counter = 0
selected_features = []
for element in wrapper.support_:
if element:
selected_features.append(counter)
counter += 1
print('select (', wrapper.n_features_, ') features at score:', max(wrapper.grid_scores_))
print(selected_features)
print()
| festeban26/data_mining_usfq_projects | [4] Normalización y reducción/src/main.py | main.py | py | 4,377 | python | en | code | 0 | github-code | 13 |
28351354753 | class Solution:
def wiggleMaxLength(self, nums: List[int]) -> int:
###dp
# n=len(nums)
# dp=[[1]*2 for i in range(n)]
# maxi=1
# for i in range(1,n):
# po=ne=0
# for j in range(i-1,-1,-1):
# if nums[j]>nums[i]:
# ne=max(ne,dp[j][1])
# elif nums[j]<nums[i]:
# po=max(po,dp[j][0])
# dp[i][1]+=po
# dp[i][0]+=ne
# maxi=max(maxi,dp[i][1],dp[i][0])
# return maxi
###o(N) solution
n=len(nums)
if n<2:
return n
pred=nums[1]-nums[0]
counti=1
if pred!=0:
counti+=1
for i in range(2,n):
dif=nums[i]-nums[i-1]
if (dif>0 and pred<=0) or (dif<0 and pred>=0):
counti+=1
pred=dif
return counti | saurabhjain17/leetcode-coding-questions | 376-wiggle-subsequence/376-wiggle-subsequence.py | 376-wiggle-subsequence.py | py | 935 | python | en | code | 1 | github-code | 13 |
40451873025 | import graphviz
from bs4 import BeautifulSoup
import threading
import os
from fiber import fiber
filePathName = "test-output/round-table.gv"
filePublic = "test-output"
mode = "svg"
eventMap = {}
eventList = ["click"]
def pushData(initList,viewData,childName):
# print(viewData,childName)
initList.append({
"nodeName":viewData.get("nodeName"),
"childName":childName.get("nodeName"),
"childKey": childName.get("nodeKey"),
"nodeEvent":viewData.get("nodeEvent"),
"nodeKey":viewData.get("nodeKey")
})
def pushEvent(viewData):
if viewData.get("nodeKey"):
nodeSelfKey = viewData["nodeKey"]
else:
nodeSelfKey = viewData["nodeName"]
eventMap[nodeSelfKey] = {"nodeEvent" : viewData.get("nodeEvent"),"relation":viewData.get("relation"),"nodeKey":viewData.get("nodeKey"),"nodeName":viewData["nodeName"]}
def recursion(viewData,initList,mainstream=False):
if mainstream:
pushEvent(viewData)
if "child" in viewData:
for i in viewData["child"]:
pushData(initList, viewData,i)
recursion(i,initList,mainstream)
else:
pass
def renderDigraph(initList,eventMap,fileName,**kwargs):
print(initList,eventMap,fileName,kwargs)
u = graphviz.Digraph('unix', filename='unix.gv',
node_attr={'color': 'lightblue2', 'style': 'filled'})
u.attr(size='6,6')
for item in eventMap:
# print(item, "initList[item].")
if eventMap[item].get("nodeKey"):
u.node(eventMap[item].get("nodeKey"),eventMap[item].get("nodeName"))
for node in initList:
# print(initList,"initList")
if node.get("nodeKey"):
if node.get("childKey"):
u.edge(node["nodeKey"], node["childKey"])
else:
u.edge(node["nodeKey"], node["childName"])
else:
if node.get("childKey"):
u.edge(node["nodeName"], node["childKey"])
else:
u.edge(node["nodeName"], node["childName"])
u.format = kwargs["mode"] if kwargs.get("mode") else mode
u.render(fileName, view=kwargs['views'])
def handleRelationData(relation,map,list):
fileAbsName = os.path.join(os.getcwd(), filePublic,map+".gv")
recursion(relation, list, False)
renderDigraph(list,{},fileAbsName,views = False,mode="png")
print(fileAbsName+ "." + mode)
# relationSvg = readTemplate("embed.js")
# print(relationSvg)
# print("handleRelationData",list)
def relationList(eventMap):
for map in eventMap:
if eventMap[map].get("relation") and type(eventMap[map].get("relation")) != str:
print(eventMap[map].get("relation"))
relationListBoth = []
lyon = threading.Thread(target=handleRelationData,args=(eventMap[map].get("relation"),map,relationListBoth))
lyon.start()
def createSvgFactory(viewData):
print(viewData,"viewData")
if not (type(viewData) is dict):
return;
initList = []
recursion(viewData,initList,True)
renderDigraph(initList,eventMap,filePathName,views = True )
relationList(eventMap)
# print(initList,"3")
def xpathDom(readFilesLine,**kwargs):
html = BeautifulSoup(readFilesLine, 'html.parser')
for item in html.select("g g title"):
kwargs.index += 1
if item.text in eventMap:
text = item.text
if eventMap[text]["nodeEvent"] in eventList:
readStringHoder = kwargs.readTemplateStr.format(
index=kwargs.index, node=item.parent['id'],
eventName=eventMap[text]["nodeEvent"],
relation=eventMap[text]["relation"],
nodeKeyName=eventMap[text]['nodeKey'] if eventMap[text]['nodeKey'] else eventMap[text]['nodeName']
)
kwargs.readStringHoders += readStringHoder + "\r\n"
def readFile(filePathName):
filePathName = filePathName + "." + mode
fileAbsName = os.path.join(os.getcwd(),filePathName)
# print(fileAbsName)
listenerEvent = ""
with open(fileAbsName,"r") as f:
readFiles = f.read()
html = BeautifulSoup(readFiles,'html.parser')
# print("f.read()",readFiles, "f.read()")
index = 0
readTemplateStr = readTemplate("template.js")
customEvent = readTemplate("customFunction.js")
readStringHoders = customEvent + "\r\n"
for item in html.select("g g title"):
index += 1
if item.text in eventMap:
text = item.text
if eventMap[text]["nodeEvent"] in eventList:
readStringHoder = readTemplateStr.format(
index=index,node=item.parent['id'],
eventName=eventMap[text]["nodeEvent"],
relation=eventMap[text]["relation"],
nodeKeyName = eventMap[text]['nodeKey'] if eventMap[text]['nodeKey'] else eventMap[text]['nodeName']
)
readStringHoders += readStringHoder + "\r\n"
createFileName = os.path.join(os.getcwd(), "scriptLister.js")
scriptPathName = createFunctionFactory(readStringHoders,createFileName)
readF = readFiles.replace("</svg>","<script>"+readStringHoders+"</script></svg>")
# print(readF)
with open(fileAbsName,"w") as fw:
fw.write(readF)
def createFunctionFactory(readStringHoder,createFileName):
with open(createFileName,"w") as f:
f.write(readStringHoder)
def readTemplate(fileName):
filePathName = os.path.join(os.getcwd(),fileName)
with open(filePathName,"r") as f:
return f.read()
# print(f.read())
if __name__ == "__main__":
createSvgFactory(fiber)
readFile(filePathName)
# readTemplate()
| cailuan/graphvizView | main.py | main.py | py | 5,857 | python | en | code | 0 | github-code | 13 |
8249689442 | #!/usr/bin/python3
import i3
# retrieve only active outputs
outputs = list(filter(lambda output: output['active'], i3.get_outputs()))
current_ws = i3.filter(i3.get_workspaces(), focused=True)[0]['name']
for output in outputs:
# set current workspace to the one active on that output
i3.workspace(output['current_workspace'])
# ..and move it to the output to the right.
# outputs wrap, so the right of the right is left ;)
i3.command('move', 'workspace to output right')
i3.workspace(current_ws)
| JonaLoeffler/dotfiles | .config/i3/swap_workspaces.py | swap_workspaces.py | py | 510 | python | en | code | 0 | github-code | 13 |
17189724214 | import random
import structlog
from locust import FastHttpUser, constant, task
from .gql.mutations import dasri_create, form_create, form_update
from .gql.queries import (base_bsdas_query, base_bsffs_query, base_dasri_query,
base_form_query, base_forms_query, base_vhus_query,
bsd_query, formslifecycle_query, light_dasri_query,
me_query)
from .mixins import TDUserMixin
from .settings.locust_settings import (DEFAULT_PASS, LOGGING_DISABLED,
REDIRECT_LOGIN_URL, WAIT_TIME)
logger = structlog.get_logger()
forms_query = base_forms_query.replace("#extra", "")
form_query_filter_sent = base_forms_query.replace("#extra", "status: SENT")
form_query = base_form_query
def random_custom_id():
return "".join([str(random.randint(1, 9)) for _ in range(6)])
def log_response_many(res, name, sub_field=None):
if LOGGING_DISABLED:
return
parsed = res.json()
try:
info = parsed["data"][name]
if sub_field:
info = info[sub_field]
logger.msg(name, data=len(info))
except Exception as e:
logger.error(name, fail=str(e), response=parsed)
def log_response_unique(res, name):
if LOGGING_DISABLED:
return
parsed = res.json()
try:
info = parsed["data"][name]["id"]
logger.msg(name, data=info)
except Exception as e:
logger.error(name, fail=str(e), response=parsed)
class UIUser(TDUserMixin, FastHttpUser):
wait_time = constant(WAIT_TIME)
def on_start(self):
with self.client.post(
"login",
json={"email": self.email, "password": DEFAULT_PASS},
name="ui-login",
catch_response=True,
) as res:
if res.url == REDIRECT_LOGIN_URL:
res.success()
else:
logger.error("login-error", email=self.email, url=res.url)
self.get_user_forms()
@task
def me(self):
self.client.post("", json={"query": me_query}, name="ui-me")
@task
def bsds_archived(self):
res = self.client.post(
"",
json={
"query": bsd_query.replace("#tab", "isArchivedFor"),
"variables": {"siret": self.siret},
},
name="ui-bsds-archived",
)
log_response_many(res, "bsds", "edges")
@task
def bsds_draft(self):
res = self.client.post(
"",
json={
"query": bsd_query.replace("#tab", "isDraftFor"),
"variables": {"siret": self.siret},
},
name="ui-bsds-draft",
)
log_response_many(res, "bsds", "edges")
@task
def bsds_action(self):
res = self.client.post(
"",
json={
"query": bsd_query.replace("#tab", "isForActionFor"),
"variables": {"siret": self.siret},
},
name="ui-bsds-action",
)
log_response_many(res, "bsds", "edges")
@task
def bsds_follow(self):
res = self.client.post(
"",
json={
"query": bsd_query.replace("#tab", "isFollowFor"),
"variables": {"siret": self.siret},
},
name="ui-bsds-follow",
)
log_response_many(res, "bsds", "edges")
@task
def bsds_to_collect(self):
res = self.client.post(
"",
json={
"query": bsd_query.replace("#tab", "isToCollectFor"),
"variables": {"siret": self.siret},
},
name="ui-bsds-to-collect",
)
log_response_many(res, "bsds", "edges")
@task
def bsds_collected_for(self):
res = self.client.post(
"",
json={
"query": bsd_query.replace("#tab", "isCollectedFor"),
"variables": {"siret": self.siret},
},
name="ui-bsds-collected-for",
)
log_response_many(res, "bsds", "edges")
@task(10)
def form_update(self):
if not self.editableBsddIds:
return
custom_id = random_custom_id()
bsd_id = random.choice(self.editableBsddIds)
self.client.post(
"",
json={
"query": form_update,
"variables": {"updateFormInput": {"customId": custom_id, "id": bsd_id}},
},
name="ui-form-update",
)
logger.msg("ui-form-update", id=bsd_id, custom_id=custom_id)
@task(10)
def form(self):
if not self.bsddIds:
return
res = self.client.post(
"",
json={
"query": form_query,
"variables": {"id": random.choice(self.bsddIds)},
},
name="ui-form",
)
log_response_unique(res, "form")
class ApiUser(TDUserMixin, FastHttpUser):
wait_time = constant(WAIT_TIME)
def __init__(self, environment):
super().__init__(environment)
self.headers = {"Authorization": f"bearer {self.token}"}
@task
def me(self):
self.client.post(
"", json={"query": me_query}, headers=self.headers, name="api-me"
)
def on_start(self):
self.get_user_forms()
@task
def forms(self):
res = self.client.post(
"",
json={"query": forms_query, "variables": {"siret": self.siret}},
headers=self.headers,
name="api-forms",
)
return res
@task(10)
def form(self):
if not self.bsddIds:
return
res = self.client.post(
"",
json={
"query": form_query,
"variables": {"id": random.choice(self.bsddIds)},
},
headers=self.headers,
name="api-form",
)
log_response_unique(res, "form")
# @tag("slow-request")
# @task
# def forms_lifecycle(self):
# self.client.post(
# "",
# json={"query": formslifecycle_query, "variables": {"siret": self.siret}},
# headers=self.headers,
# name="api-forms-lifecycle",
# )
@task
def forms_by_status(self):
res = self.client.post(
"",
json={"query": form_query_filter_sent, "variables": {"siret": self.siret}},
headers=self.headers,
name="api-forms-filter-status",
)
log_response_many(res, "forms")
@task
def bsdasris_full(self):
res = self.client.post(
"",
json={"query": base_dasri_query},
headers=self.headers,
name="api-bsdasris-full",
)
log_response_many(res, "bsdasris", "edges")
@task
def bsdasris_light(self):
res = self.client.post(
"",
json={"query": light_dasri_query},
headers=self.headers,
name="api-bsdasris-light",
)
log_response_many(res, "bsdasris", "edges")
@task
def bsvhus(self):
res = self.client.post(
"",
json={"query": base_vhus_query},
headers=self.headers,
name="api-bsvhus",
)
log_response_many(res, "bsvhus", "edges")
@task
def bsdas(self):
res = self.client.post(
"",
json={"query": base_bsdas_query},
headers=self.headers,
name="api-bsdas",
)
log_response_many(res, "bsdas", "edges")
@task
def bsff(self):
res = self.client.post(
"",
json={"query": base_bsffs_query},
headers=self.headers,
name="api-bsff",
)
log_response_many(res, "bsffs", "edges")
@task(4)
def form_create(self):
self.client.post(
"",
json={
"query": form_create,
"variables": {
"createFormInput": {"emitter": {"company": {"siret": self.siret}}}
},
},
name="api-form-create",
headers=self.headers,
)
@task(25)
def form_update(self):
if not self.editableBsddIds:
return
custom_id = random_custom_id()
bsd_id = random.choice(self.editableBsddIds)
self.client.post(
"",
json={
"query": form_update,
"variables": {"updateFormInput": {"customId": custom_id, "id": bsd_id}},
},
name="api-form-update",
headers=self.headers,
)
@task
def dasri_create(self):
self.client.post(
"",
json={
"query": dasri_create,
"variables": {
"input": {
"waste": {"adr": "lorem", "code": "18 01 03*"},
"emitter": {
"company": {
"siret": self.siret,
"name": "lorem",
"address": "rue x",
"phone": "123",
"mail": "user@test.fr",
"contact": "john doe",
},
"emission": {
"packagings": [
{
"type": "BOITE_CARTON",
"volume": 22,
"quantity": 88,
}
]
},
},
},
},
},
name="api-dasri-create",
headers=self.headers,
)
| MTES-MCT/td-load-testing | src/locustfiles/scenario_more_queries.py | scenario_more_queries.py | py | 10,029 | python | en | code | 0 | github-code | 13 |
12548976459 | import csv, requests, re
from httplib2 import Response
# repo: expected as user/repository or company/repository
repo = ''
# token: have a look here https://github.com/settings/tokens
token = ''
out_path = "."
# first (there is a link pagination) github url.
# in this case I'll download all issues
# more filters available here: https://docs.github.com/en/rest/reference/issues#list-issues-for-a-repository)
gh_url = f'https://api.github.com/repos/{repo}/issues?state=all'
# download JSON issues and write them properly
def handle_gh_request_and_write_issues(url: str) -> Response:
response = requests.get(url, headers={'Authorization' : f'token {token}'})
if response.status_code == 200:
for issue in response.json():
writer.writerow([issue['number'], issue['state'], issue['title'], issue['body'], issue['created_at'], issue['closed_at']])
else:
print (f'{response.status_code} - {response.reason}')
return response
# note headers - link: <https://api.github.com/repositories/000000000/issues?page=2>; rel="next", <https://api.github.com/repositories/000000000/issues?page=29>; rel="last"
def get_gh_issue_urls(response_headers: list) -> dict:
pages = dict()
if 'link' in response_headers:
for slice in response_headers['link'].split(','):
groups = re.search(r'<(.*)>; rel="(.*)"', slice).groups()
pages[groups[1]] = groups[0]
return pages;
file_path = f'{out_path}/{repo.replace("/", "-")}-issues.csv'
file = open(file_path, 'w', encoding='utf-8', newline='')
writer = csv.writer(file)
writer.writerow(['id', 'State', 'Title', 'Body', 'Created At', 'Updated At', 'Closed At'])
# response headers contains first, previous, next, last pages
gh_urls = dict()
while gh_url != None:
print(f"Downloadin' from { gh_url }")
# handle current page
response = handle_gh_request_and_write_issues(gh_url)
# check if we are at the EoL
if 'last' in gh_urls and gh_url == gh_urls['last']:
gh_url = None
break
# get links for next url
gh_urls = get_gh_issue_urls(response.headers)
if 'next' not in gh_urls:
break;
gh_url = gh_urls['next']
file.close()
print ('...aaaand done!') | gigadr3w/github-issues-to-csv | github-issues-to-csv.py | github-issues-to-csv.py | py | 2,250 | python | en | code | 1 | github-code | 13 |
11236608855 | import os
import random
from pytorch_lightning import Trainer, seed_everything
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint, RichModelSummary, RichProgressBar
from pytorch_lightning.loggers import TensorBoardLogger
from rindti.data import DTIDataModule
from rindti.models import ClassificationModel, RegressionModel
from rindti.utils import get_git_hash, read_config
models = {
"class": ClassificationModel,
"reg": RegressionModel,
}
def train(**kwargs):
"""Train the whole model"""
seed_everything(kwargs["seed"])
seeds = random.sample(range(1, 100), kwargs["runs"])
folder = os.path.join(
"tb_logs",
f"dti_{kwargs['datamodule']['exp_name']}",
f"{kwargs['datamodule']['filename'].split('/')[-1].split('.')[0]}",
)
if not os.path.exists(folder):
os.makedirs(folder, exist_ok=True)
if len(os.listdir(folder)) == 0:
next_version = 0
else:
next_version = str(
int(
[d for d in os.listdir(folder) if "version" in d and os.path.isdir(os.path.join(folder, d))][-1].split(
"_"
)[1]
)
+ 1
)
for i, seed in enumerate(seeds):
print(f"Run {i+1} of {kwargs['runs']} with seed {seed}")
kwargs["seed"] = seed
single_run(folder, next_version, **kwargs)
def single_run(folder, version, **kwargs):
"""Does a single run."""
seed_everything(kwargs["seed"])
datamodule = DTIDataModule(**kwargs["datamodule"])
datamodule.setup()
datamodule.update_config(kwargs)
logger = TensorBoardLogger(
save_dir=folder,
name=f"version_{version}",
version=kwargs["seed"],
default_hp_metric=False,
)
callbacks = [
ModelCheckpoint(monitor=kwargs["model"]["monitor"], save_top_k=3, mode="min"),
EarlyStopping(monitor=kwargs["model"]["monitor"], mode="min", **kwargs["early_stop"]),
RichModelSummary(),
RichProgressBar(),
]
trainer = Trainer(
callbacks=callbacks,
logger=logger,
log_every_n_steps=25,
enable_model_summary=False,
**kwargs["trainer"],
)
model = models[kwargs["model"]["module"]](**kwargs)
from pprint import pprint
pprint(kwargs)
trainer.fit(model, datamodule)
trainer.test(model, datamodule)
if __name__ == "__main__":
from argparse import ArgumentParser
parser = ArgumentParser(prog="Model Trainer")
parser.add_argument("config", type=str, help="Path to YAML config file")
args = parser.parse_args()
orig_config = read_config(args.config)
orig_config["git_hash"] = get_git_hash() # to know the version of the code
train(**orig_config)
| ilsenatorov/rindti | train.py | train.py | py | 2,765 | python | en | code | 8 | github-code | 13 |
74165325136 | #!/usr/bin/python3
from __future__ import print_function
import sys
if len(sys.argv) > 1:
entries = []
with open(sys.argv[1], "r") as f:
for i in f:
entries.append(
(i[0],) + tuple(i[1:].rstrip().split("\t")))
print(str(entries).replace("), ", "),\n\t"))
else:
print("Usage:\n\tdir2python <filename>")
| felixp7/gophersnake | dir2python.py | dir2python.py | py | 318 | python | en | code | 15 | github-code | 13 |
30584286008 | # coding=utf-8
"""
Utilities for ETL part
"""
import collections
from datetime import datetime
from dateutil.parser import parse
from requests.exceptions import ConnectionError
ERROR_MSG_DATEIFY_INVALID_DATE = (
'dateify: ' + 'Invalid date without null values allowed')
# Requests error management
def connection_error_handler(func):
"""
Return a function wrapper to handle requests exceptions
"""
def func_wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except ConnectionError as e:
return None
return func_wrapper
# Dict manipulation
def dictionarize(el):
"""
Returns a dict representation without enumerable types
"""
_dict = {}
if isinstance(el, dict):
for key, value in el.items():
if isinstance(value, dict):
_dict[key] = dictionarize(value)
elif (isinstance(value, (list, tuple)) and value):
_dict[key] = {str(idx): dictionarize(elt)
for idx, elt in enumerate(value)}
else:
_dict[key] = value
return _dict
elif isinstance(el, list):
return {str(idx): dictionarize(elt)
for idx, elt in enumerate(el)}
else:
return el
def flatten(d, parent_key='', sep='.'):
"""
Flattens dict arborescence to a single level.
"""
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, collections.MutableMapping):
items.extend(flatten(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items)
def flatten_unformatted(nested):
return flatten(dictionarize(nested))
# Dict rules utilities
def dateify(item, allow_null=False):
if item is None:
if not allow_null:
raise ValueError(ERROR_MSG_DATEIFY_INVALID_DATE)
else:
return item
return item if isinstance(item, datetime) else parse(item)
def substring(item, start=0, end=10):
if item:
return item[start:end]
def booleanify(item, false_condition_value=None):
return not (item == false_condition_value)
def value_or_null(item, condition_value=True, replacement=None):
if item == condition_value:
return replacement
return None
def listify(item, separator=','):
if not isinstance(item, str):
ValueError('Item must be a string')
return item.split(',')
# Aggregation rules
def first_not_null(current_value, item_key, item_value):
return item_value if not current_value else current_value
def aggregate_dict(current_value, item_key, item_value):
_dict = current_value or {}
_dict[item_key.split('.')[-1]] = item_value
return _dict
def aggregate_version(current_value, item_key, item_value):
if item_value:
return '%s' % item_value if not current_value else '%s.%s' % (
current_value, item_value)
# Other utils
def explode_as_dict(value, separator, keys):
items = value.split(separator)
if len(items) != len(keys):
return
return {keys[idx]: v for idx, v in enumerate(items)}
def first_available(_dict, keys):
filtered = [x for x in keys if x in _dict]
if not filtered:
return KeyError('No available key')
return _dict[filtered[0]]
# Business utils
def to_dvr_interval(data):
lower, upper = dict(), dict()
lower['rule'] = upper['rule'] = data['_class'].split("$")[-1]
lower['value'] = data['lowerBound']
upper['value'] = data['upperBound']
lower['relation'] = 'GTE' if data["lowerBoundInclusive"] else 'GT'
upper['relation'] = 'LTE' if data["upperBoundInclusive"] else 'LT'
return (lower, upper)
DVR_VALUE_KEYS = ['upperBound', 'lowerBound', 'bound', 'version', 'prefix']
def to_dvrs(items):
dvrs = []
for dvr in items:
if dvr['_class'].endswith('$Interval'):
return to_dvr_interval(dvr)
dvrs.append({
'rule': dvr['_class'].split('$')[-1],
'relation': dvr.get('relation'),
'value': first_available(dvr, DVR_VALUE_KEYS)
})
return dvrs
| dbenlopers/SANDBOX | misc/data_quality_is/ge.ibis.etl/ge/ibis/etl/utilities.py | utilities.py | py | 4,204 | python | en | code | 0 | github-code | 13 |
26755603622 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
debug=False # flag to debug
lamb_=3 # for regularization term in loss function (given in question)
lr=0.2 #learning rate (given in question)
iter_num=500 #given number of iterations for batch gradient
input_features = pd.read_csv('./data/X.csv',index_col=False,header= None)
X = input_features.to_numpy() # input_matrix 5000X400
output_labels = pd.read_csv('./data/Y.csv',index_col = False,header = None)
Y = output_labels.to_numpy() # given labels vector 5000X1
# function for one hot encoding
def one_hot_vector_encoder(Y):
row_num = Y.shape[0]
col_num = 10 # 10 digits
one_hot_vector = np.zeros([row_num,col_num],dtype=int)
for i in range(one_hot_vector.shape[0]):
for j in range(one_hot_vector.shape[1]+1):
one_index=Y[i,0]-1 # Since 1 should be labeled at 0th index and 0 should be labeled as 10th index
if j==one_index:
one_hot_vector[i,j]=1
return one_hot_vector
Y= one_hot_vector_encoder(Y) #one_hot_encoded (shape=5000X10)
b = np.ones([X.shape[0],1]) # bias initialization (5000X1) value =1
## Loading given Weight matrix
init_w1 = pd.read_csv('./data/initial_W1.csv',index_col = False, header = None)
init_w2 = pd.read_csv('./data/initial_W2.csv',index_col = False, header = None)
initial_W1 = init_w1.to_numpy() # weight matrix for hidden layer (25X401)
initial_W2 = init_w2.to_numpy() # weight matrix for hidden layer (10X26)
# Accuracy checking
def calc_accuracy(Y_actual, Y_pred):
pred = np.argmax(Y_pred,axis = 1)
actual = np.argmax(Y_actual,axis =1)
loss = sum([x != y for x,y in zip (pred,actual)])
count = len(pred)
accuracy= 1- (loss/count)
return accuracy*100
#activation function
def logistic(z):
return 1/(1 + np.exp(-z))
# Gradient of logistic function
def grad_logistic(z):
return (logistic(z)*(1 - logistic(z)))
#Forward propagation
def forward_propagation(X,b,W1,W2):
X = np.concatenate((b,X),axis=1) # adding bias column vector to make X shape (5000X401) compatible with W1
Z1 = np.dot(X,W1.T) # X=5000X401, W1=25X401. So Z1=5000X25
H = logistic(Z1)
H = np.concatenate((b,H), axis = 1) # adding bias column vector to make H shape (5000X26) compatible with W2
Z2 = np.dot(H,W2.T) # H=5000X26, W2=10X26. So Z2=5000X10
Y_pred = logistic(Z2)
return Y_pred, H, Z1 #3 returnig H and Z1 to be used in backward propagation
if debug:
## Debugging Forward Propagation part
W1 = pd.read_csv('./data/W1.csv', index_col= False, header = None)
forward_W1 = W1.to_numpy()
W2 = pd.read_csv('./data/W2.csv', index_col = False, header = None)
forward_W2 = W2.to_numpy()
forward_Y,forward_H,forward_Z1 = forward_propagation(X,b,forward_W1,forward_W2)
accuracy = calc_accuracy(Y, forward_Y)
print("Accuracy in Forward Propagation Debugging: {}% ".format(accuracy))
##----------------------------------------------------------------------------
##loss function
def loss_function(Y_actual,Y_pred,lamb_,W1,W2):
m= Y_pred.shape[0] # 5000: no. of examples
ones_vector= np.ones([len(Y_actual),1],dtype=int)
first_term = 1/m*((np.sum(-Y_actual * np.log(Y_pred) - (ones_vector - Y_actual)* np.log(ones_vector - Y_pred))))
W1_wo_b=W1[:,1:W1.shape[1]]
W2_wo_b=W2[:,1:W2.shape[1]]
second_term=lamb_/(2 * m)*(np.sum((W1_wo_b)**2) + np.sum((W2_wo_b)**2))
return first_term + second_term
if debug:
## ----------debugging loss function implementation------------
cost=loss_function(Y,forward_Y,lamb_,forward_W1,forward_W2)
print("Cost after forward propagation: ",cost)
##----------------------------------------------------------------------------------------------------------
# Back propagation
def back_propagation(Y_pred, Y_actual,X,b,W1,W2,H,Z1,lamb_):
X = np.concatenate((b,X),axis=1)
regu_W1 = W1
regu_W1[:,0] = 0 #first column =0
regu_W2 = W2
regu_W2[:,0] = 0 # first column =0
beta2 = Y_pred - Y_actual
beta1 = np.dot(beta2,W2[:,1:])*grad_logistic(Z1)
dW2 = 1/len(Y_pred)*(np.dot(beta2.T,H) + lamb_*(regu_W2))
dW1 = 1/len(Y_pred)*(np.dot(beta1.T,X) + lamb_*(regu_W1))
return dW1, dW2
def batch_grad(X,b,Y,W1,W2,lamb_, lr, iter_num,debug):
costHistory = []
Y_pred_1,h,z = forward_propagation(X,b,W1,W2)
cost = loss_function(Y, Y_pred_1,lamb_,W1,W2)
costHistory.append(cost)
for i in range(iter_num):
Y_pred, H, Z1 = forward_propagation(X,b,W1,W2)
dW1, dW2 = back_propagation(Y_pred,Y,X,b,W1,W2,H,Z1,lamb_)
if debug==True:
np.savetxt("./data/W1_result_"+str(i)+".csv", dW1, delimiter=",")
np.savetxt("./data/W2_result_"+str(i)+".csv", dW2, delimiter=",")
W1 = W1 - lr*dW1
W2 = W2 - lr*dW2
cost = loss_function(Y,Y_pred,lamb_,W1,W2)
costHistory.append(cost)
return costHistory,W1,W2
## ------------------------------------------------Back propagation debugging -------------------------
if debug:
cost,W1,W2=batch_grad(X,b,Y,initial_W1,initial_W2,lamb_, lr,3,True)
# Calling batch_gradient to get results after 500 iterations
final_cost,W1,W2=batch_grad(X,b,Y,initial_W1,initial_W2,lamb_, lr,iter_num,False)
plt.plot(final_cost)
plt.ylabel('Loss Function')
plt.xlabel('Number of Iterations')
plt.title("Loss vs number of iterations")
plt.show()
#checking accuracy after training the weights
Pred_Y,H,Z1 = forward_propagation(X,b,W1,W2)
accuracy = calc_accuracy(Y, Pred_Y)
print("Accuracy after 500 iterations: {}% ".format(accuracy))
# checking the 10 examples given in the question
print("Checking the given indices: ")
check_index=[2171,145 ,1582, 2446 ,3393, 815, 1378, 529, 3945, 4628]
for i in range(len(check_index)):
print("index:",check_index[i], "Prediction:", np.where(Pred_Y[check_index[i]-1]==max(Pred_Y[check_index[i]-1]))[0]+1)
| ialrazi/CSCE-5063-Machine-Learning- | Assignment_3_Solution/assignment_3_solution_010850660.py | assignment_3_solution_010850660.py | py | 6,079 | python | en | code | 0 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.