seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
15441316510 | """
Entity class for iDiamant.
"""
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import DOMAIN, NAME, VERSION, MANUFACTURER
class IdiamantEntity(CoordinatorEntity):
"""
The main iDiamant entity class.
"""
def __init__(self, coordinator, config_entry):
super().__init__(coordinator)
self.config_entry = config_entry
@property
def unique_id(self):
"""
Return a unique ID to use for this entity.
"""
return self.config_entry.entry_id
@property
def device_info(self):
"""
Return the device information.
"""
return {
"identifiers": {(DOMAIN, self.unique_id)},
"manufacturer": MANUFACTURER,
"model": VERSION,
"name": NAME,
}
@property
def device_state_attributes(self):
"""
Return the state attributes.
"""
return {
"id": str(self.coordinator.data.get("id")),
"integration": DOMAIN,
}
| clementprevot/home-assistant-idiamant | custom_components/idiamant/entity.py | entity.py | py | 1,067 | python | en | code | 4 | github-code | 36 |
17256388628 | execfile('simple_map.py')
def viterbi(states, piarr, trans_p, emit_p, obs):
# Initialize T1, which keep track of everything done so far
# T1 - probability of most likely path so far
T1 = [{}]
T2 = [{}]
# length of sequence
T = len(obs)
# init T1 for each state
for s in range(0, len(states)):
st = states[s]
if obs[0] in emit_p[st].keys():
T1[0][s] = piarr[s]*emit_p[st][obs[0]]
else:
T1[0][s] = 0.0
for t in range(1, T):
T1.append({})
T2.append({})
for s in range(0, len(states)):
st = states[s]
# evaluate the probabilitiy of each possible state transition
# on the basis of the transition probability and the current observation
prob_each_step = [T1[(t-1)][y0]*trans_p[states[y0]][st]*emit_p[st][obs[t]] for y0 in range(0,len(states))]
maxprob = max(prob_each_step)
T1[t][s] = maxprob
T2[t][s] = prob_each_step
opt = []
for j in T1:
for x, y in j.items():
if j[x] == max(j.values()):
opt.append(x)
# The highest probability
h = max(T1[-1].values())
return([opt,T1, T2])
# Prior probability of state space
piarr = [0.0]*len(states)
piarr[0:2] = [0.05]*3
piarr[3] = 0.9
# observations: down, right, down, right, right, etc.
#obs = (2,3,2,3,3,0,0,1,3)
obs = (2,2,3,3,3,0,0,1,3)
vit = viterbi(states, piarr, trans_p, emit_p, obs)
path = vit[0]
path_states = [states[step] for step in path]
pp.pprint(path_states)
import json
with open('site/public/js/path.json', 'w') as outfile:
json.dump(path_states, outfile, sort_keys = True, indent = 4, ensure_ascii=False)
| abarciauskas-bgse/stochastic | project/viterbi.py | viterbi.py | py | 1,593 | python | en | code | 0 | github-code | 36 |
20693896588 | #!/usr/bin/python
# -- coding: utf8 --
"""
Django settings for Russian Learner Corpus project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
import os
import json
from django.utils.translation import ugettext_lazy as _
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Use a separate file for storing usernames and passwords
with open(os.path.join(BASE_DIR, '.secure.settings.json')) as secret:
SECRET = json.loads(secret.read())
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = SECRET["SECRET_KEY"]
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = SECRET["DEBUG"]
TEMPLATE_DEBUG = DEBUG
# Identifies whether the code is running in prod
PROD = '/home/elmira' in BASE_DIR
if PROD:
ALLOWED_HOSTS = ['.web-corpora.net']
else:
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'Corpus',
'annotator',
'news'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
# 'django.contrib.admindocs.middleware.XViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'heritage_corpus.urls'
WSGI_APPLICATION = 'heritage_corpus.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'TEST_CHARSET': 'UTF8',
'HOST': '',
'PORT': '3306',
}
}
if PROD:
DATABASES['default']['NAME'] = SECRET['PROD_DATABASES_NAME']
DATABASES['default']['USER'] = SECRET['PROD_DATABASES_USER']
DATABASES['default']['PASSWORD'] = SECRET['PROD_DATABASES_PASSWORD']
else:
DATABASES['default']['NAME'] = SECRET['DEV_DATABASES_NAME']
DATABASES['default']['USER'] = SECRET['DEV_DATABASES_USER']
DATABASES['default']['PASSWORD'] = SECRET['DEV_DATABASES_PASSWORD']
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
LANGUAGES = (
('ru', _('Russian')),
('en', _('English')),
)
TIME_ZONE = 'UTC'
# todo set timezone
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
if PROD:
STATIC_URL = '/RLC/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static/')
else:
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static/'),
)
MEDIA_ROOT = os.path.dirname(BASE_DIR) + '/public/media/'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
TEMPLATE_DIRS = [os.path.join(BASE_DIR, 'templates')]
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.contrib.messages.context_processors.messages'
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'django.template.loaders.eggs.Loader',
)
LOCALE_PATHS = (
os.path.join(BASE_DIR, 'locale'),
)
# Corpus related settings
if PROD:
PATH_TO_MYSTEM = os.path.join(BASE_DIR, 'mystem')
else:
PATH_TO_MYSTEM = SECRET["DEV_PATH_TO_MYSTEM"]
TEMPORARY_FILE_LOCATION = os.path.join(BASE_DIR, 'tempfiles') | elmiram/russian_learner_corpus | heritage_corpus/settings.py | settings.py | py | 4,402 | python | en | code | 3 | github-code | 36 |
17895997999 | import argparse
import time
from modulos.AOJApp import AOJ
from modulos.Acciones import Acciones
from modulos.BarraMenu import irA
from modulos.Cartas import EmitirCarta, BlanquearCarta
from modulos.ConsultaRespuesta import CR
from modulos.Reporte import Reporte
newInstance = AOJ()
app = newInstance.retornarAOJApp()
reporte = Reporte("Smoke de Cartas", "CPMB09.37 Editar Carta")
parser = argparse.ArgumentParser()
parser.add_argument("-o", "--oficio")
parser.add_argument("-a", "--anio")
args = parser.parse_args()
if args.oficio and args.anio:
numOficio = args.oficio
anioOficio = args.anio
else:
numOficio = 25
anioOficio = 20201
# Ingreso a consulta respuesta
newConsultaRespuesta = CR(app, reporte)
irA(app, "Oficios->Consulta respuestas", reporte)
# Selecciona un oficio
newConsultaRespuesta.seleccionarOficio(anioOficio, numOficio)
# Clickea sobre el boton Bloq/DbloqRta
newConsultaRespuesta.presionarBloqDbloq()
time.sleep(1)
# Clickea sobre el boton Actualizar
newConsultaRespuesta.presionarActualizar()
time.sleep(1)
# Validamos el estado bloqueado del oficio
emitirCarta = EmitirCarta(app, reporte)
time.sleep(1)
emitirCarta.obtenerEstadoBloqueado("NO")
time.sleep(1)
# Presionamos en editar carta
emitirCarta.clickearEmitirCarta()
# Editamos la carta
editarCartaBlanqueada = BlanquearCarta(app,reporte)
editarCartaBlanqueada.editarCartaBlanqueada()
# Terminamos el reporte
reporte.terminarReporte()
# Cerramos la ventana de consulta de respuesta
newConsultaRespuesta.presionarSalir()
# Cerramos la app
newInstance.closeAOJApp() | gameztoy/AOJ | scripts/Cartas/CPMB09_37_EditarCarta.py | CPMB09_37_EditarCarta.py | py | 1,569 | python | es | code | 0 | github-code | 36 |
29719252617 | """
Day 9 part 2
"""
from utils import read_input
def find_window(opts, total):
window = []
for o in opts:
window.append(o)
while sum(window) > total:
window.pop(0)
if sum(window) == total:
return window
def find_missing(vals, pre):
idx = pre
while idx < len(vals):
preamble = set(vals[idx - pre : idx])
found = False
for p in preamble:
if vals[idx] - p in preamble:
found = True
break
if not found:
window = find_window(vals, vals[idx])
return min(window) + max(window)
idx += 1
test_input = [
35,
20,
15,
25,
47,
40,
62,
55,
65,
95,
102,
117,
150,
182,
127,
219,
299,
277,
309,
576,
]
assert find_missing(test_input, 5) == 62
inpt = read_input(9, line_parser=int)
assert find_missing(inpt, 25) == 4794981
| yknot/adventOfCode | 2020/09_02.py | 09_02.py | py | 975 | python | en | code | 0 | github-code | 36 |
29788651993 | from os.path import join, isdir
from os import listdir, mkdir
from importlib import import_module
NOTCODE_DIR = 'notcode'
if not isdir(NOTCODE_DIR):
mkdir(NOTCODE_DIR)
# read profile:
profiles = [x.rstrip('.py') for x in listdir('profiles') if x.endswith('.py')]
profile = None
if not profiles:
raise Exception('No profile found.')
elif len(profiles) == 1:
profile = profiles[0]
while profile not in profiles:
profile = input('Profile: ')
# import constants:
profmodule = import_module('profiles.' + profile)
ANKI_USER = profmodule.ANKI_USER
DECK_NAME = profmodule.DECK_NAME
NOT_FOUND_PATH = profmodule.NOT_FOUND_PATH
PONS_KEYS = profmodule.PONS_KEYS
getmarkings = profmodule.getmarkings
DONE_PATH = join(NOTCODE_DIR, 'donemarkings_' + profile + '.txt')
| ofek-b/vomBuch-insAnki | constants.py | constants.py | py | 774 | python | en | code | 1 | github-code | 36 |
9955744674 | #!/usr/bin/env python3
from PIL import ImageEnhance
from PIL import Image
def get_average_color(img):
img = Image.open('/home/pi/Desktop/img5.jpg')
img = img.resize((50,50))
#print(img.size)
img = img.crop((15, 15, 35, 35))
converter = ImageEnhance.Color(img)
img = converter.enhance(2.5)
#img.show()
#print(img.size)
w,h = img.size
r_tot = 0
g_tot = 0
b_tot = 0
count = 0
for i in range(0, w):
for j in range(0, h):
r, g, b = img.getpixel((i,j))
r_tot += r
g_tot += g
b_tot += b
count += 1
return (r_tot/count, g_tot/count, b_tot/count)
my_img = Image.open('/home/pi/Desktop/button.jpg')
average_color = get_average_color(my_img)
#print(average_color)
| aparajitaghimire/Clueless-Recreated | Python Scripts/color_detect.py | color_detect.py | py | 787 | python | en | code | 1 | github-code | 36 |
2114104151 | import aiohttp
import uvicorn
from fastapi import FastAPI
from fastapi import Request
from starlette.responses import Response
app = FastAPI(title="Yhop Proxy",
version="0.0.1",
openapi_url="/openapi.json", )
@app.route("/", methods=['HEAD', 'OPTION', 'GET', 'POST'])
async def proxy(request: Request):
url = request.url
method = request.method
headers = request.headers
data = await request.body()
# 发送转发请求到目标服务器
async with aiohttp.ClientSession(timeout=5) as session:
async with session.request(method=method, url=str(url), headers=headers, data=data) as response:
# 构造响应对象,并将目标服务器的响应返回给客户端
resp = Response(status_code=response.status,
content=response.content,
headers=response.headers)
return resp
def start_server(ip: str = '0.0.0.0', port: int = 1080, timeout: int = 60):
uvicorn.run(app, host=ip, port=port)
| sdliang1013/caul-proxy | src/caul_proxy/server_uvicorn.py | server_uvicorn.py | py | 1,038 | python | en | code | 0 | github-code | 36 |
43143608911 | from rest_framework import serializers
from apps.categories.serializers import CategorySerializer
from apps.media.models import Image
from apps.media.serializers import ImageSerializer
from apps.products.models import Product, Variant
from apps.reviews.serializers import ReviewSerializer
class ProductSerializer(serializers.ModelSerializer):
product_category = serializers.SerializerMethodField()
product_images = serializers.SerializerMethodField()
class Meta:
model = Product
fields = "__all__"
extra_kwargs = {
"category": {
"required": True,
},
}
expandable_fields = {
'reviews': (ReviewSerializer, {'many': True}),
# 'image': (ImageSerializer, {'many': True}),
}
def get_product_category(self, obj):
data = {
'id': obj.category.id,
'name': obj.category.name
}
return data
def get_product_images(self, obj):
request = self.context.get('request')
product_id = obj.id
images = Image.objects.filter(product_id=product_id)
# serializer = ImageSerializer(product_images, many=True, context={"request": request})
serializer = ImageSerializer(images, many=True)
# images = [{'id': data['id'], 'name': data['name']} for data in serializer.data]
return serializer.data
# images = [{'id': data['id'], 'name': data['name']} for data in serializer.data]
# return images if images else ['https://www.electrosolutions.in/wp-content/uploads/2018/08/product-image-dummy'
# '-600x353.jpg']
# def get_brand(self, obj):
# data = {
# 'id': obj.brand.id,
# 'name': obj.brand.brand
# }
# return data
#
# def get_type(self, obj):
# data = {
# 'id': obj.type.id,
# 'name': obj.type.type
# }
# return data
class VariantSerializer(serializers.ModelSerializer):
class Meta:
model = Variant
fields = '__all__'
| mushfiq1998/bkpe-multivendor-ecommerce | apps/products/serializers.py | serializers.py | py | 2,117 | python | en | code | 0 | github-code | 36 |
74367228902 | import json
from flask import Flask, render_template, request, jsonify
import requests
app = Flask(__name__)
API_KEY = '0c20320445392a19d9b2a02ae290502c'
BASE_URL = 'http://api.weatherstack.com/current'
def get_weather(city):
params = {
'access_key': API_KEY,
'query': city,
}
try:
response = requests.get(BASE_URL, params=params)
response.raise_for_status()
data = response.json()
temperature_celsius = data['current']['temperature']
temperature_fahrenheit = (temperature_celsius * 9/5) + 32
temperature_kelvin = temperature_celsius + 273.15
description = data['current']['weather_descriptions'][0]
country = data['location']['country']
longitude = data['location']['lon']
latitude = data['location']['lat']
humidity = data['current']['humidity']
# Additional data
visibility = data['current']['visibility']
wind_speed = data['current']['wind_speed']
wind_direction = data['current']['wind_dir']
atmospheric_pressure = data['current']['pressure']
time_zone = data['location']['utc_offset']
return {
'city': city,
'country': country,
'temperature': temperature_celsius,
'fahrenheit': temperature_fahrenheit,
'kelvin': temperature_kelvin,
'description': description,
'longitude': longitude,
'latitude': latitude,
'humidity': humidity,
'visibility': visibility,
'wind_speed': wind_speed,
'wind_direction': wind_direction,
'atmospheric_pressure': atmospheric_pressure,
'time_zone': time_zone,
}
except requests.exceptions.RequestException as e:
status_code = e.response.status_code if e.response is not None else None
error_message = 'Network error. Please check your internet connection and try again.'
return {
'error': f'Error {status_code}: {error_message}',
}
except (KeyError, ValueError) as e:
status_code = 400
error_message = 'Invalid data received from the server.'
return {
'error': f'Error {status_code}: {error_message}',
}
@app.route('/', methods=['GET', 'POST'])
def index():
error_message = None
if request.method == 'POST':
city = request.form.get('city')
weather_data = get_weather(city)
if 'error' in weather_data:
error_message = weather_data['error']
else:
return render_template('index.html', **weather_data)
return render_template('index.html', city='', country='', temperature='', description='', error=error_message)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000, debug=True)
# app.run(debug=True) | ruisu666/WeatherApp-Flask | app.py | app.py | py | 2,886 | python | en | code | 0 | github-code | 36 |
13100916881 | from numpy import matrix, array, linalg, random, amax, asscalar
from time import time
def linpack(N):
eps=2.22e-16
ops=(2.0*N)*N*N/3.0+(2.0*N)*N
# Create AxA array of random numbers -0.5 to 0.5
A=random.random_sample((N,N))-0.5
B=A.sum(axis=1)
# Convert to matrices
A=matrix(A)
B=matrix(B.reshape((N,1)))
na=amax(abs(A.A))
start = time()
X=linalg.solve(A,B)
latency = time() - start
mflops = (ops*1e-6/latency)
result = {
'mflops': mflops,
'latency': latency
}
return result
def function_handler(request):
request_json = request.get_json(silent=True)
N = request_json['N']
result = linpack(N)
print(result)
return "latency : " + str(result['latency']) + " mflops : " + str(result['mflops']) | ddps-lab/serverless-faas-workbench | google/cpu-memory/linpack/main.py | main.py | py | 801 | python | en | code | 96 | github-code | 36 |
41928732216 | from __future__ import absolute_import
import xadmin
from .models import UserSettings, Log
from xadmin import views
from xadmin.layout import *
from django.utils.translation import ugettext_lazy as _, ugettext
class BaseSetting(object):
enable_themes = True
use_bootswatch = True
xadmin.site.register(views.BaseAdminView, BaseSetting) # 注册到xadmin中
class GlobalSetting(object):
# 设置base_site.html的Title
site_title = '后台管理'
# 设置base_site.html的Footer
site_footer = '我的脚丫'
menu_style = 'accordion'
def get_site_menu(self):
return [
{
'title': '赋分表',
'menus': (
{
'title': '分配赋分表',
'url': '/xadmin/assignTables'
},
)
},
{
'title': "会议管理",
# # 'icon': 'fa fa-bar-chart-o',
'menus': (
{
'title': '会议设置',
'url': '/xadmin/meetingManage'
},
)
},
]
from rewardSystem.adminViews import MeetingManage, ImportStudent, Download_student_xls, AssignTables, \
MeetingSetting, AllotJury, JuryList, ImportStudentGrade, StatisticsQuestion, StatisticsResult
# 注册自定义分配赋分表页面
xadmin.site.register_view('meetingManage', MeetingManage, name='meetingManage')
# 分配赋分表
xadmin.site.register_view("assignTables", AssignTables, name='assignTables')
# 会议设置页面
xadmin.site.register_view('meetingSetting', MeetingSetting, name="meetingSetting")
# 导入学生成绩
xadmin.site.register_view('importStudentGrade', ImportStudentGrade, name="importStudentGrade")
# 评委列表
xadmin.site.register_view('juryList', JuryList, name="juryList")
# 分配评委
xadmin.site.register_view('allotJury', AllotJury, name="allotJury")
xadmin.site.register_view('importStudent', ImportStudent, name="importStudent")
xadmin.site.register_view('downloadStudent', Download_student_xls, name="downloadStudent")
# 会议统计 问题
xadmin.site.register_view('statisticsQuestion', StatisticsQuestion, name="statisticsQuestion")
# 会议统计 结果
xadmin.site.register_view('statisticsResult', StatisticsResult, name="statisticsResult")
# 注册F
xadmin.site.register(xadmin.views.CommAdminView, GlobalSetting)
class UserSettingsAdmin(object):
model_icon = 'fa fa-cog'
hidden_menu = True
xadmin.site.register(UserSettings, UserSettingsAdmin)
class LogAdmin(object):
def link(self, instance):
if instance.content_type and instance.object_id and instance.action_flag != 'delete':
admin_url = self.get_admin_url(
'%s_%s_change' % (instance.content_type.app_label, instance.content_type.model),
instance.object_id)
return "<a href='%s'>%s</a>" % (admin_url, _('Admin Object'))
else:
return ''
link.short_description = ""
link.allow_tags = True
link.is_column = False
list_display = ('action_time', 'user', 'ip_addr', '__str__', 'link')
list_filter = ['user', 'action_time']
search_fields = ['ip_addr', 'message']
model_icon = 'fa fa-cog'
xadmin.site.register(Log, LogAdmin)
| SweetShance/rewardSystem | rewardSystem/extra_apps/xadmin/adminx.py | adminx.py | py | 3,368 | python | en | code | 0 | github-code | 36 |
2954449489 | import argparse
import logging
def main(pretrained_graph_path, dataset_path):
from model import FacialRecognition
from dataloader import load_inception_graph
load_inception_graph(pretrained_graph_path)
model = FacialRecognition(dataset_path, 'test_set.csv')
model.train()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('graph_path', nargs=1)
parser.add_argument('dataset_path', nargs=1)
parser.add_argument('--v', action='store_true')
parser.add_argument('--vv', action='store_true')
args = parser.parse_args()
if args.vv:
print('Super Verbose Mode enabled')
logging.getLogger().setLevel(logging.DEBUG)
elif args.v:
print('Verbose Mode enabled')
logging.getLogger().setLevel(logging.INFO)
main(args.graph_path[0], args.dataset_path[0]) | josepdecid/IU-AdvancedMachineLearning | Labs/Lab3/main.py | main.py | py | 886 | python | en | code | 0 | github-code | 36 |
13860968318 | import pygame
import assets
clock = pygame.time.Clock()
win = pygame.display.set_mode((1365, 768))
#=================ROW 1====================
button1 = assets.Button(0, 0, 452, 253, (12, 12, 12), "", (255, 255, 255))
button2 = assets.Button(455, 0, 452, 253, (12, 12, 12), "", (255, 255, 255))
button3 = assets.Button(910, 0, 452, 253, (12, 12, 12), "", (255, 255, 255))
#=================ROW 2======================
button4 = assets.Button(0, 256, 452, 253, (12, 12, 12), "", (255, 255, 255))
button5 = assets.Button(455, 256, 452, 253, (12, 12, 12), "", (255, 255, 255))
button6 = assets.Button(910, 256, 452, 253, (12, 12, 12), "", (255, 255, 255))
#=================ROW 3=======================
button7 = assets.Button(0, 512, 452, 253, (12, 12, 12), "", (255, 255, 255))
button8 = assets.Button(455, 512, 452, 253, (12, 12, 12), "", (255, 255, 255))
button9 = assets.Button(910, 512, 452, 253, (12, 12, 12), "", (255, 255, 255))
buttons = [button1, button2, button3, button4, button5, button6, button7, button8, button9]
click = True
def messagebox(text):
button_font = pygame.font.SysFont("Impact", 22)
text_surface = button_font.render(text, 1, (45, 167, 235))
win.blit(text_surface, (1365 /
2 - text_surface.get_width()/2, 768/2 - text_surface.get_height()/2))
def anyone_won():
'''WIN CHECK FOR TIC TACK TOE'''
if(button1.text == "X" and button2.text == "X" and button3.text == "X" or
button4.text == "X" and button5.text == "X" and button6.text == "X" or
button7.text == "X" and button8.text == "X" and button9.text == "X" or
button3.text == "X" and button6.text == "X" and button9.text == "X" or
button5.text == "X" and button2.text == "X" and button8.text == "X" or
button1.text == "X" and button4.text == "X" and button7.text == "X" or
button1.text == "X" and button5.text == "X" and button9.text == "X" or
button3.text == "X" and button5.text == "X" and button7.text == "X" ):
win.fill((0, 0, 0))
messagebox("X has won a game")
return
if(button1.text == "O" and button2.text == "O" and button3.text == "O" or
button4.text == "O" and button5.text == "O" and button6.text == "O" or
button7.text == "O" and button8.text == "O" and button9.text == "O" or
button3.text == "O" and button6.text == "O" and button9.text == "O" or
button5.text == "O" and button2.text == "O" and button8.text == "O" or
button1.text == "O" and button4.text == "O" and button7.text == "O" or
button1.text == "O" and button5.text == "O" and button9.text == "O" or
button3.text == "O" and button5.text == "O" and button7.text == "O" ):
win.fill((0, 0, 0))
messagebox("O has won a game")
return
i = 0
for x in buttons:
if x == "":
continue
i += 1
if i == 9:
return"Tie"
def whoose_turn():
'''
TURN DECIDER
'''
global click
if click == True:
click = False
elif click == False:
click = True
run = True
while run:
clock.tick(100)
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
win.fill((45, 167, 235))
left, middle, right = pygame.mouse.get_pressed()
if left:
pos = pygame.mouse.get_pos()
for i in buttons:
x = i.clicked(pos)
if x:
if click and i.text == "":
i.text = "O"
elif i.text == "" and click == False:
i.text = "X"
click = not click
for i in buttons:
i.draw(win)
messagebox(anyone_won())
pygame.display.flip()
| tanmay440/Game-Hub-Mega | Tic Tack Toe/main.pyw | main.pyw | pyw | 3,872 | python | en | code | 0 | github-code | 36 |
21548587442 | import pytest
from mixer.backend.django import mixer
from apps.edemocracia.models import EdemocraciaGA
from apps.edemocracia.tasks import (get_ga_edemocracia_daily,
get_ga_edemocracia_monthly,
get_ga_edemocracia_yearly,)
from django.db import IntegrityError
class TestGAEdemocracia:
@pytest.mark.django_db
def test_edemocracia_ga_create(self):
mixer.blend(EdemocraciaGA)
assert EdemocraciaGA.objects.count() == 1
@pytest.mark.django_db
def test_edemocracia_ga_integrity_error(self):
content = mixer.blend(EdemocraciaGA)
with pytest.raises(IntegrityError) as excinfo:
mixer.blend(EdemocraciaGA,
period=content.period,
start_date=content.start_date)
assert 'duplicate key value violates unique constraint' in str(
excinfo.value)
@pytest.mark.django_db
def test_monthly_get_ga_data(self):
json_data = {"date": "00000000", "users": 10, "newUsers": 10,
"sessions": 10, "pageViews": 10}
mixer.cycle(5).blend(EdemocraciaGA, period='daily', data=json_data,
start_date=mixer.sequence('2020-10-1{0}'),
end_date=mixer.sequence('2020-10-1{0}'))
get_ga_edemocracia_monthly.apply(args=(['2020-10-01']))
monthly_data = EdemocraciaGA.objects.filter(period='monthly').first()
assert monthly_data.data['users'] == 50
assert monthly_data.data['newUsers'] == 50
assert monthly_data.data['sessions'] == 50
assert monthly_data.data['pageViews'] == 50
@pytest.mark.django_db
def test_yearly_get_ga_data(self):
json_data = {"users": 10, "newUsers": 10,
"sessions": 10, "pageViews": 10}
start_dates = ['2019-01-01', '2019-02-01', '2019-03-01']
end_dates = ['2019-01-31', '2019-02-28', '2019-03-31']
for i in range(3):
mixer.blend(EdemocraciaGA, period='monthly', data=json_data,
start_date=start_dates[i], end_date=end_dates[i])
get_ga_edemocracia_yearly.apply(args=(['2019-01-01']))
monthly_data = EdemocraciaGA.objects.filter(period='yearly').first()
assert monthly_data.data['users'] == 30
assert monthly_data.data['newUsers'] == 30
assert monthly_data.data['sessions'] == 30
assert monthly_data.data['pageViews'] == 30
@pytest.mark.django_db
def test_get_ga_edemocracia_daily(self, mocker):
ga_data = ['20201208', '647', '446', '830', '1692']
mocker.patch(
'apps.edemocracia.tasks.get_analytics_data',
return_value=[ga_data])
get_ga_edemocracia_daily.apply()
data = {
"date": ga_data[0],
"users": ga_data[1],
"newUsers": ga_data[2],
"sessions": ga_data[3],
"pageViews": ga_data[4],
}
adiencias_ga = EdemocraciaGA.objects.first()
assert EdemocraciaGA.objects.count() > 0
assert adiencias_ga.data == data
| labhackercd/cpp-participacao-backend | src/apps/edemocracia/tests/test_analytics_edemocracia.py | test_analytics_edemocracia.py | py | 3,150 | python | en | code | 2 | github-code | 36 |
37639995341 | from block import Block
from hashlib import sha256
from collections import deque
class Blockchain():
# Set the parameters for the blockchain
def __init__(self, block_size, genesis_block_secret):
self.block_size = block_size
self.genesis_block_hash = sha256(genesis_block_secret.encode('utf-8')).hexdigest()
# Create the blockchain and the first block on it
self.blockchain = deque()
self.blockchain.append(Block(0, self.block_size, self.genesis_block_hash))
# Function to return the starting hash for the blockchain
def get_genesis_block_hash(self):
return self.genesis_block_hash
# Function to create a vote transaction on the blockchain
# We first try to perform the transaction on the current block,
# if it fails being full, we create a new block on the blockchain append
# add the new transaction to the newly created block.
def new_vote(self, vote_for):
# We will try to add the new transaction on the last block on the blockchain
# If the current_block is already full, then we create a new block and add a transaction to it
current_block = self.blockchain[-1]
if len(current_block.block) >= self.block_size:
self.blockchain.append(Block(len(self.blockchain), self.block_size, current_block.get_ending_hash()))
current_block = self.blockchain[-1]
# We are now sure that the current_block does have space for a new transaction
transaction_status = current_block.new_vote(vote_for)
# Function to summarise the blockchain
def summary(self, only_hashes = True):
for block in self.blockchain:
print("\nBlock: " + str(block.block_id))
t = 0
for transaction in block.block:
print(" " + str(t) + ": " + transaction['hash'])
if not only_hashes:
print(" vote_for: " + str(transaction['vote_for']) + " timestamp: " + str(transaction['timestamp']))
t += 1
| ketanv3/blockchain-evm | blockchain.py | blockchain.py | py | 2,050 | python | en | code | 0 | github-code | 36 |
42491423724 | import pytest
from demo_app import create_app
from demo_app import db as _db
from demo_app.blog.models import Author, Category, Entry
@pytest.fixture(scope='session')
def app():
app = create_app('testing')
app_context = app.app_context()
app_context.push()
yield app
app_context.pop()
@pytest.fixture(scope='session')
def app_client(app):
client = app.test_client()
return client
@pytest.fixture(scope='module')
def db():
_db.create_all()
yield _db
_db.drop_all()
@pytest.fixture(scope='function')
def session(db):
session = db.create_scoped_session()
db.session = session
yield session
session.remove()
@pytest.fixture()
def create_authors(session):
mike = Author(name='Mike', description="Hi, I'm Mike Doe", email='mike@example.com')
jane = Author(name='Jane', description="Hi, I'm Jane Doe", email='jane@example.com')
session.add_all([mike, jane])
session.commit()
@pytest.fixture()
def create_categories(session):
python = Category(name='Python')
javascript = Category(name='Javascript')
session.add_all([python, javascript])
session.commit()
@pytest.fixture()
def create_entries(session, create_authors, create_categories):
mike = Author.query.filter_by(name='Mike').first()
javascript = Category.query.filter_by(name='Javascript').first()
python = Category.query.filter_by(name='Python').first()
entry1 = Entry(title='Hello World', body='This is my first entry', \
author=mike)
entry1.en_ca.append(javascript)
entry1.en_ca.append(python)
session.add(entry1)
session.commit()
| AlexPG/flask-demo-app | tests/conftest.py | conftest.py | py | 1,640 | python | en | code | 0 | github-code | 36 |
72240231143 | from datetime import datetime
from typing import Any, Dict, List
import jsonlines
from tinydb import TinyDB, where
from higgins import const
class DateTimeSerializer():
OBJ_CLASS = datetime # The class this serializer handles
def encode(self, obj):
return obj.strftime('%Y-%m-%dT%H:%M:%S')
def decode(self, s):
return datetime.strptime(s, '%Y-%m-%dT%H:%M:%S')
def load_database(db_path: str = const.TINY_DB_PATH) -> TinyDB:
return TinyDB(db_path)
def truncate(table_name: str, db: TinyDB) -> None:
table = db.table(table_name)
table.truncate()
def insert(table_name: str, records: List[Dict], db: TinyDB) -> None:
table = db.table(table_name)
table.insert_multiple(records)
def query(table_name: str, field_name: str, field_value: Any, db: TinyDB) -> List[Dict]:
table = db.table(table_name)
records = table.search(where(field_name) == field_value)
return records
def export_openai_jsonl(table_name: str, field_name: str, db: TinyDB, export_path: str):
# Export in the openai format needed for search: https://beta.openai.com/docs/guides/search
table = db.table(table_name)
with jsonlines.open(export_path, 'w') as writer:
for record in table:
writer.write({"text": record[field_name], "metadata": ""})
def load_jsonl(jsonl_path: str, table_name: str, db: TinyDB):
table = db.table(table_name)
with jsonlines.open(jsonl_path) as reader:
for record in reader:
table.insert(record)
if __name__ == "__main__":
db = load_database()
print(db)
table = db.table("episodes")
print(table)
chat_text = 'Brendan: Hello. Higgins: How can I help you?'
insert(
table_name="episodes",
records=[
{'context': {'active_window': 'Google Chrome', 'running_applications': []}, 'chat_text': chat_text, 'start_time': '2021-09-03T11:54:54'},
{'context': {'active_window': 'App Store', 'running_applications': []}, 'chat_text': chat_text, 'start_time': '2021-09-03T11:54:54'}
],
db=db
)
print(table.all())
rows = query(table_name="episodes", field_name="chat_text", field_value=chat_text, db=db)
print(rows)
export_path = "data/episode_openai.jsonl"
export_openai_jsonl(
table_name="episodes",
field_name="chat_text",
db=db,
export_path=export_path
)
from higgins.utils import jsonl_utils
records = jsonl_utils.open_jsonl(export_path)
print(records)
truncate("episodes", db)
| bfortuner/higgins | higgins/database/tiny.py | tiny.py | py | 2,550 | python | en | code | 7 | github-code | 36 |
7748613059 | from tensorflow.keras.preprocessing import image as imageprep
import os
import numpy as np
from PIL import Image
import json
import requests
from io import BytesIO
def image_to_np_array(img_path, image_size):
img = imageprep.load_img(img_path, target_size=(image_size, image_size))
img = imageprep.img_to_array(img)
return img
def to_np_array(img, image_size):
img = img.resize((image_size, image_size))
img = imageprep.img_to_array(img)
if (img.shape[2] == 4):
img = img[..., :3]
return img
def file_to_np_array(file, image_size):
img = Image.open(file)
img = img.resize((image_size, image_size))
img = imageprep.img_to_array(img)
if (img.shape[2] == 4):
img = img[..., :3]
return img
def url_to_np_array(url, image_size):
if url.endswith(('.png', '.jpg', '.jpeg')):
response = requests.get(url)
img = file_to_np_array(BytesIO(response.content), image_size)
return img
else:
return None
def mkdir(dir_path):
os.makedirs(dir_path, exist_ok=True)
def image_array_from_dir(dir_path, image_size, valid_file_types):
image_paths = os.listdir(dir_path)
image_paths = [os.path.join(dir_path, file_) for file_ in image_paths if file_.split(
".")[1] in valid_file_types]
image_links = [file_.split("build/")[1] for file_ in image_paths]
image_holder = []
for img_path in image_paths:
img_path = os.path.join(dir_path, img_path)
image_holder.append(image_to_np_array(img_path, image_size))
return np.asarray(image_holder), image_links
def load_json_file(file_path):
with open(file_path, 'r') as f:
data = json.load(f)
return data
def save_json_file(file_path, data):
with open(file_path, 'w') as f:
json.dump(data, f)
| cloudera/CML_AMP_Image_Analysis | lib/utils.py | utils.py | py | 1,813 | python | en | code | 10 | github-code | 36 |
17109457983 | from .code_ast import ASTFile
from .goto import Goto
from .util import bf_move
class CodeLinker:
def __init__(self, code: ASTFile):
self.code: ASTFile = code
def process(self) -> str:
code, declarations = self.code.process()
pos = 0
data = ""
for i in code:
if isinstance(i, str):
data += i
continue
if isinstance(i, Goto):
var = i.var.get_var(declarations).pos
data += bf_move(var - pos)
pos = var
continue
raise Exception
return data
| PashkovD/braincompiler | braincompiler/linker.py | linker.py | py | 629 | python | en | code | 0 | github-code | 36 |
30064471237 | from aljoadmin.models import Comment
from django import forms
class CommentForm(forms.ModelForm):
content = forms.CharField(
widget=forms.Textarea(attrs={'style':'width:100%; height:80px;'}),
label=''
)
class Meta:
model = Comment
fields = ('content',) | 97kim/aljo | aljoadmin/forms.py | forms.py | py | 264 | python | en | code | 1 | github-code | 36 |
73923124264 | #!/usr/bin/env python3
import requests
import json
import sys
from collections import OrderedDict
def get_versions():
url = 'https://api.github.com/repos/jenkinsci/swamp-plugin/releases'
versions = set()
response = requests.get(url)
if response.status_code == 200:
response = response.json()
for rp in response:
if 'tag_name' in rp.keys() and rp['tag_name'].startswith('swamp'):
versions.add(rp['tag_name'].partition('swamp-')[-1])
return versions
def get_stats():
versions = get_versions()
if isinstance(versions, str):
versions = versions.split()
stats = OrderedDict()
for version in versions:
data = {"type": "file",
"repoKey": "releases",
"path": "org/continuousassurance/swamp/jenkins/swamp/{version}/swamp-{version}.hpi".format(version=version)}
response = requests.post('https://repo.jenkins-ci.org/ui/artifactgeneral', json=data)
if response.status_code == 200:
info = json.loads(response.text)
stats['swamp-jenkins-plugin-{version}'.format(version=version)] = info['info']['downloaded']
else:
print(response, file=sys.stderr)
return stats
if __name__ == '__main__':
print(get_stats())
| vamshikr/swamp-plugin-stats | src/jenkins.py | jenkins.py | py | 1,306 | python | en | code | 0 | github-code | 36 |
36002000545 | # -*- coding: utf-8 -*-
"""
Created on Thu Jan 27 15:26:23 2022
@author: lidon
"""
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 13 14:57:16 2021
@author: a
"""
import numpy as np
import scipy.stats
import math
# Markov chain class
class Markov:
# state: states of a Markov Chain
# transition: transition matrix
# pi: original distribution
def __init__(self,state,transition,pi=None):
self.state=state
self.transition=transition
if pi:
self.pi=pi
# if pi not specified, start from a uniform distribution
else:
self.pi=np.array([1 for i in range(0,len(state))])
self.pi=self.pi/len(self.state)
# start: the state that starts
# length: number of the path length
def sample(self,length):
start=np.random.choice(self.state,1,p=self.pi)[0]
path=[start]
for i in range(0,length-1):
index=np.where(self.state==start)[0][0]
start=np.random.choice(self.state,1,p=self.transition[index,:])[0]
path.append(start)
path=np.array(path)
return path
# hidden markov model class
# this class is capable of generating simulated paths with missing observations
class HMM(Markov):
# h_state, o_state: a list of hidden state and observable state
# trans_prob, obs_prob: transition matrix
# obs_prob: matrix that transform hidden state to obs state
# pi: initial distribution
def __init__(self,h_state,o_state,trans_prob,obs_prob,pi):
self.h_state=h_state
self.state=h_state
self.o_state=o_state
self.transition=trans_prob
self.obs_prob=obs_prob
self.pi=pi
# sample the observable path
def sample_obs(self,hidden_path):
obs=[]
for i in range(0,len(hidden_path)):
index=np.where(self.state==hidden_path[i])[0][0]
new_obs=np.random.choice(self.o_state,1,p=self.obs_prob[index,:])[0]
obs.append(new_obs)
obs=np.array(obs)
return obs
# return the index of a hidden variable in the hidden_state list
def hidden_index(self, h_var):
index=np.where(self.h_state==h_var)[0][0]
return index
# return the index of an observed variable in the observe state list
def obs_index(self,o_var):
index=np.where(self.o_state==o_var)[0][0]
return index
# generate size sequences, each of length length
# return observation path and hidden path
def generate_seq(self,size,length):
hidden_data=[]
observe_data=[]
for i in range(0,size):
h=self.sample(length)
o=self.sample_obs(h)
hidden_data.append(h)
observe_data.append(o)
hidden_data=np.array(hidden_data)
observe_data=np.array(observe_data)
return hidden_data,observe_data
# generate a sequences with missing observations
def generate_partial_seq(self,size,length,p=0.3):
hidden_data=[]
observe_data=[]
for i in range(0,size):
h=self.sample(length)
o=self.sample_obs(h)
hidden_data.append(h)
observe_data.append(o)
for i in range(0,len(observe_data)):
for j in range(0,len(observe_data[0])):
if np.random.binomial(1,p):
observe_data[i][j]=None
# if a whole sequence is missing, delete it
if sum(observe_data[i]==None)==len(observe_data[i]):
observe_data[i]=observe_data[i-1]
hidden_data=np.array(hidden_data)
observe_data=np.array(observe_data)
return hidden_data,observe_data
'''
# HMM construction
transition=np.array(
[[0.6,0.2,0.1,0.05,0.05],[0.05,0.6,0.2,0.1,0.05],[0.05,0.05,0.6,0.2,0.1],[0.05,0.05,0.1,0.6,0.2],
[0.05,0.05,0.1,0.2,0.6]]
)
state=np.array(['A','B','C','D','E'])
hidden_state=state
obs_state=np.array(['Blue','Red','Green','Purple','Grey'])
obs_prob=np.array([[0.5,0.3,0.05,0.05,0.1],[0.1,0.5,0.3,0.05,0.05],[0.05,0.1,0.5,0.3,0.05],
[0.05,0.05,0.1,0.5,0.3],[0.3,0.05,0.05,0.1,0.5]
])
pi=[0.5,0.2,0.2,0.1,0]
MC=HMM(hidden_state,obs_state,transition,obs_prob,pi)
'''
| lidongrong/miss_hmm | code/HMM.py | HMM.py | py | 4,510 | python | en | code | 0 | github-code | 36 |
35909639327 | import pygame
import pytest
from scoreboard import Scoreboard
from settings import Settings
from game_stats import GameStats
@pytest.fixture
def scoreboard():
""" 创建一个新的 Scoreboard 实例 """
pygame.init()
ai_settings = Settings()
screen = pygame.display.set_mode((ai_settings.screen_width, ai_settings.screen_height))
stats = GameStats(ai_settings)
return Scoreboard(ai_settings, screen, stats)
def test_prep_score(scoreboard):
""" 测试 prep_score 方法 """
scoreboard.prep_score()
assert isinstance(scoreboard.score_iamge, pygame.Surface)
#assert scoreboard.score_iamge.get_rect().top == 12
def test_prep_high_score(scoreboard):
""" 测试 prep_high_score 方法 """
scoreboard.prep_high_score()
assert isinstance(scoreboard.high_score_iamge, pygame.Surface)
#assert scoreboard.high_score_iamge.get_rect().centerx == scoreboard.screen_rect.centerx
def test_prep_level(scoreboard):
""" 测试 prep_level 方法 """
scoreboard.prep_level()
assert isinstance(scoreboard.level_image, pygame.Surface)
def test_prep_ships(scoreboard):
""" 测试 prep_ships 方法 """
scoreboard.prep_ships()
assert len(scoreboard.ships.sprites()) == scoreboard.stats.ships_left
def test_show_score(scoreboard):
""" 测试 show_score 方法 """
scoreboard.show_score()
assert isinstance(scoreboard.score_iamge, pygame.Surface)
assert isinstance(scoreboard.high_score_iamge, pygame.Surface)
assert isinstance(scoreboard.level_image, pygame.Surface)
assert isinstance(scoreboard.ships, pygame.sprite.Group)
#assert scoreboard.score_iamge.get_rect().top == 12
#assert scoreboard.high_score_iamge.get_rect().centerx == scoreboard.screen_rect.centerx
#assert scoreboard.level_image.get_rect().top == scoreboard.score_rect.bottom + 10
| shixiaoxiya/py_course_zly_ | Projects/project_code/third2_left _test/test_scoreboard.py | test_scoreboard.py | py | 1,852 | python | en | code | 0 | github-code | 36 |
310628557 | import logging
import collections
import html
import gw2buildutil
from . import util as gw2util
logger = logging.getLogger(__name__)
PAGE_ID = 'build'
PAGE_ID_PREFIX = 'builds/'
PAGE_TITLE_PREFIX = 'Guild Wars 2 build: '
def build (gw2site):
textbody_renderer = gw2buildutil.textbody.Renderer(
gw2buildutil.textbody.RenderFormat.RST_HTML, {'heading level': 3})
with gw2buildutil.api.storage.FileStorage() as api_storage:
for build in gw2site.builds.values():
logger.info(f'render {build.metadata}')
dest_page_id = PAGE_ID_PREFIX + gw2util.get_build_id(build)
page_title = PAGE_TITLE_PREFIX + str(build.metadata)
texts = {}
if build.intro.description is not None:
texts['desc'] = textbody_renderer.render(
build.intro.description, build.metadata, api_storage)
if build.notes is not None:
texts['notes'] = textbody_renderer.render(
build.notes, build.metadata, api_storage)
if build.usage is not None:
texts['usage'] = textbody_renderer.render(
build.usage, build.metadata, api_storage)
gw2site.render_page_template(PAGE_ID, page_title, {
'build': build,
'texts': texts,
}, dest_page_id=dest_page_id)
| ikn/ikn.org.uk | lib/iknsite/gw2/build.py | build.py | py | 1,379 | python | en | code | 0 | github-code | 36 |
34398257612 | import qrcode
data = "Winson is the goat no cappa"
img = qrcode.make(data)
qr = qrcode.QRCode(version = 1, box_size = 10, border = 5)
qr.add_data(data)
qr.make(fit=True)
img = qr.make_image(fill_color = 'red', back_color = 'white')
img.save('C:/Users/wilko/Desktop/python12projects/qrcode/qrcode.png') | Riamuwilko/python_beginner_projects | qrcode/main.py | main.py | py | 303 | python | en | code | 0 | github-code | 36 |
937990517 | import logging
from django.core.exceptions import ValidationError
from django import forms
from django.utils.translation import gettext as _
from custody.models import MultiSigAddress
from coldstoragetransfers.helpers.btc import BTCHelper
class MultiSigAddressForm(forms.ModelForm):
class Meta:
model = MultiSigAddress
exclude = ['address', 'redeem_script']
def clean(self):
#{'currency': <Currency: ETH>, 'user_addresses': <QuerySet [<UserAddress: BTC_tipu>, <UserAddress: ETH_tipu>]>, 'minimum_signatures': 2}
this_currency = self.cleaned_data['currency'].symbol
errors = {}
for a in self.cleaned_data['user_addresses']:
if a.currency.symbol != this_currency:
errors['user_addresses'] = _("Child addresses' currency must match selected currency.")
if self.cleaned_data['minimum_signatures'] > len(self.cleaned_data['user_addresses']):
errors['minimum_signatures'] = _("This amount can't be less than the number of child addresses.")
if errors:
raise ValidationError(errors)
super().clean()
"""
doesn't work on save_model in the MultisigAddressAdmin due to:
<MultiSigAddress: BTC_None>" needs to have a value for field "id" before this many-to-many relationship can be used.
because new models don't exist in the db, relationship can't be accessed. must happen here.
"""
raw_public_keys = [ua.address for ua in self.cleaned_data['user_addresses']]
# predictable ordering is required for multisig creation
raw_public_keys.sort()
#should only happen once !
if not self.instance.pk:
create_payload = BTCHelper().add_multisig_address(self.cleaned_data['minimum_signatures'], raw_public_keys)
self.instance.address = create_payload['address']
self.instance.redeem_script = create_payload['redeemScript']
| chriscslaughter/nodestack | custody/forms.py | forms.py | py | 1,960 | python | en | code | 0 | github-code | 36 |
33953035551 | import messageUtils as mu
import threading
class Listener(threading.Thread):
def __init__(self, socketO, caller, connection=None):
threading.Thread.__init__(self)
self.caller = caller # Client or Server object
self.connection = connection # Connection object or None if Client is calling this
if self.connection==None: # caller is a Client
self.check = self.caller
else: # caller is a Server
self.check = self.connection
self.socketO = socketO # socket object
print("[DEBUG] listener set up")
def run(self):
while self.check.connectionOn:
received = self.socketO.recv(1024)
if len(received)!=0:
self.caller.receivedMsg(self.connection, received)
received=""
return
class Sender(threading.Thread):
def __init__(self, caller):
threading.Thread.__init__(self)
self.caller = caller # Client or Server object
print("[DEBUG] sender set up")
def run(self):
while self.caller.connectionOn:
msg = raw_input()
self.caller.sendMessage(msg)
return
| rehnarehu/netproj | chat/mythreads.py | mythreads.py | py | 997 | python | en | code | 0 | github-code | 36 |
19450895845 | # -*- coding: utf-8 -*-
import os
import sys
import datetime
import struct
import wave
def argumentsparser():
usage = "Usage: python {} inputfile.kamata_programs".format(__file__)
arguments = sys.argv
if len(arguments) == 1 or len(arguments) > 2:
return usage
arguments.pop(0)
if not arguments[0].endswith('.kamata_programs') or arguments[0].startswith('-'):
return usage
if __name__ == '__main__' :
if argumentsparser() is None :
filesize = os.path.getsize(sys.argv[0])
readpoint = 96
filenumber = 1
now = datetime.datetime.now()
dirname = "{0:%y%m%d%H%M%S}".format(now)
fin = open(sys.argv[0], mode="rb")
if readpoint < filesize :
os.makedirs(dirname, exist_ok=True)
print("inputfile size =", filesize)
while readpoint < filesize :
fin.seek(readpoint)
filename = "{0:03d}".format(filenumber)
data = struct.unpack("B", fin.read(1))
i = 0
while not data[0] == 0 and i < 12 :
filename += chr(data[0])
data = struct.unpack("B", fin.read(1))
i += 1
filename += ".wav"
readpoint += 616
fout = wave.Wave_write(dirname + "/" + filename)
fout.setparams((
1, # mono
1, # 8 bits = 1 byte
48000, # sampling bitrate
32, # samples
"NONE", # not compressed
"not compressed" # not compressed
))
for i in range(32):
fin.seek(readpoint)
valuekey = fin.read(4)
bitvalue = round(struct.unpack('<f', valuekey)[0] * 255)
fout.writeframesraw(struct.pack("B", bitvalue))
print("readpoint =", readpoint, " , ", bitvalue)
readpoint += 12
print("-----------------")
fout.close()
filenumber += 1
readpoint += 4
fin.close()
print(filenumber - 1, "wave files are created in the", dirname, "folder successfully.")
print("The format is monoral, 8-bit, 48kHz and 32 samples. Files are expected to be readable for an ELZ_1 synthesizer.")
else:
print(argumentsparser())
| amariichi/kamata2wav | kamata2wav.py | kamata2wav.py | py | 2,462 | python | en | code | 0 | github-code | 36 |
5913567690 | import torch
from torch import optim, nn
import os
from tqdm.auto import tqdm
from model import *
from data import *
from torch.cuda.amp import autocast, GradScaler
from validate_and_test import *
def load_checkpointed_model_params(model, optimizer, resume_checkpoint):
checkpoint = torch.load(resume_checkpoint)
model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
start_epoch = checkpoint['epoch']
# Things we are keeping track of
epoch_numbers = checkpoint['epoch_numbers']
training_losses = checkpoint['training_losses']
validation_losses = checkpoint['validation_losses']
training_accuracy = checkpoint['training_accuracy']
validation_accuracy = checkpoint['validation_accuracy']
print(f"Model checkpoint {resume_checkpoint} loaded! Will resume the epochs from number #{start_epoch}")
return model, optimizer, start_epoch, epoch_numbers, training_losses, training_accuracy, validation_losses, validation_accuracy
def save_model_checkpoint(experiment, model, optimizer, params, epoch, epoch_numbers, training_losses,
validation_losses, training_accuracy, validation_accuracy):
# set the model to train mode so that in case there was a validation before it doesnt impact the saved weights (as we have dropouts!)
model.train()
# create the directory if it doesn't exist
model_save_directory = os.path.join(params["save_dir"], experiment)
os.makedirs(model_save_directory, exist_ok=True)
# Checkpoint the model at the end of each epoch
checkpoint_path = os.path.join(params["save_dir"], experiment, f'model_epoch_{epoch + 1}.pt')
torch.save({
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'epoch': epoch + 1,
'epoch_numbers': epoch_numbers,
'training_losses': training_losses,
'validation_losses': validation_losses,
'training_accuracy': training_accuracy,
'validation_accuracy': validation_accuracy,
}, checkpoint_path)
print(f"Save checkpointed the model at the path {checkpoint_path}")
def train_model(model, train_loader, val_loader, num_epochs, params, experiment, epoch_saver_count=5,
resume_checkpoint=None):
# Device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
torch.cuda.empty_cache()
# Things we are keeping track of
start_epoch = 0
epoch_numbers = []
training_losses = []
validation_losses = []
training_accuracy = []
validation_accuracy = []
# Adam optimizer
optimizer = optim.Adam(model.parameters(), lr=params['learning_rate'], weight_decay=params['weight_decay'])
# loss
criterion = nn.CrossEntropyLoss()
# load checkpoint
if resume_checkpoint:
model, optimizer, start_epoch, epoch_numbers, training_losses, training_accuracy, validation_losses, validation_accuracy = load_checkpointed_model_params(
model,
optimizer,
resume_checkpoint
)
# Set up one-cycle learning rate scheduler
sched = torch.optim.lr_scheduler.OneCycleLR(
optimizer, params['learning_rate'],
epochs=num_epochs,
steps_per_epoch=len(train_loader)
)
# Custom progress bar for total epochs with color and displaying average epoch loss
total_progress_bar = tqdm(total=num_epochs, desc=f"Total Epochs", position=0,
bar_format="{desc}: {percentage}% |{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}]",
dynamic_ncols=True, ncols=100, colour='red')
# training loop
for epoch in range(start_epoch, start_epoch + num_epochs):
#set to train mode
model.train()
epoch_training_loss = 0.0
train_correct_predictions = 0
total_samples = 0
# Custom progress bar for each epoch with color
epoch_progress_bar = tqdm(total=len(train_loader), desc=f"Epoch {epoch + 1}/{start_epoch + num_epochs}",
position=1, leave=False, dynamic_ncols=True, ncols=100, colour='green')
for batch_idx, data in enumerate(train_loader):
# get the data and outputs
images, labels = data
images = images.to(device)
labels = labels.to(device)
output_logits = model(images)
loss = criterion(output_logits, labels)
loss.backward()
# Gradient clipping
nn.utils.clip_grad_value_(model.parameters(), params['grad_clip'])
optimizer.step()
optimizer.zero_grad()
# print(f"Curr LR -> {optimizer.param_groups[0]['lr']}")
# scheduler update
sched.step()
epoch_training_loss += loss.item()
# batch stats
# Compute training accuracy for this batch
output_probs = nn.Softmax(dim=1)(output_logits)
predicted = torch.argmax(output_probs, 1)
batch_correct_predictions = (predicted == labels).sum().item()
batch_size = labels.size(0)
train_correct_predictions += batch_correct_predictions
total_samples += batch_size # batch size basically
# Update the epoch progress bar (overwrite in place)
epoch_progress_bar.set_postfix({
"loss": loss.item(),
"batch_acc": batch_correct_predictions / batch_size
})
epoch_progress_bar.update(1)
# Close the epoch progress bar
epoch_progress_bar.close()
# Calculate average loss for the epoch
avg_training_loss_for_epoch = epoch_training_loss / len(train_loader)
# Calculate training accuracy for the epoch
avg_training_accuracy = train_correct_predictions / total_samples
# Validation loop
avg_val_accuracy, avg_val_loss_for_epoch = perform_validation(criterion, device, model, val_loader)
# Store values
training_accuracy.append(avg_training_accuracy)
training_losses.append(avg_training_loss_for_epoch)
validation_accuracy.append(avg_val_accuracy)
validation_losses.append(avg_val_loss_for_epoch)
epoch_numbers.append(epoch + 1)
# Update the total progress bar
total_progress_bar.set_postfix(
{
"loss": avg_training_loss_for_epoch,
"train_acc": avg_training_accuracy,
"val_loss": avg_val_loss_for_epoch,
"val_acc": avg_val_accuracy,
}
)
# Close the tqdm bat
total_progress_bar.update(1)
# Print state
print(
f'Epoch {epoch + 1}: train_loss: {avg_training_loss_for_epoch} | train_accuracy: {avg_training_accuracy} | val_loss: {avg_val_loss_for_epoch} | val_accuracy: {avg_val_accuracy} '
)
# Save model checkpoint periodically
need_to_save_model_checkpoint = (epoch + 1) % epoch_saver_count == 0
if need_to_save_model_checkpoint:
print(f"Going to save model @ Epoch:{epoch + 1}")
save_model_checkpoint(
experiment,
model,
optimizer,
params,
epoch,
epoch_numbers,
training_losses,
validation_losses,
training_accuracy,
validation_accuracy
)
# Close the total progress bar
total_progress_bar.close()
# Return things needed for plotting
return epoch_numbers, training_losses, training_accuracy, validation_losses, validation_accuracy
# %%
# if __name__ == '__main__':
# params = {
# 'batch_size': 32,
# 'learning_rate': 0.0045,
# 'save_dir': 'model_ckpts'
# }
# train_data_loader = create_train_data_loader(32)
# test_data_loader, validation_data_loader = create_test_and_validation_data_loader(32)
#
# full_experiment = "Full Data"
# # Check if GPU is available, otherwise use CPU
# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# full_cifar_model = CIFARClassifier()
# full_cifar_model.to(device)
#
# train_model(
# full_cifar_model,
# train_data_loader,
# validation_data_loader,
# 2,
# params,
# full_experiment,
# epoch_saver_count=1,
# resume_checkpoint=None
# )
| ParasharaRamesh/NUS-CS5242-Neural-Networks-and-Deep-Learning | Assignment 2 (Autoencoders & CNNs)/Question-5_CIFAR10/train.py | train.py | py | 8,557 | python | en | code | 0 | github-code | 36 |
35493639057 | from django.shortcuts import get_object_or_404
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.views import View
from main.models import *
from main.forms import *
from cart.forms import CartAddProductForm
def base_context(request):
context = dict()
context['user'] = request.user
context["site_name"] = "Sushiman" # Строка перед | в title страницы
context["page_name"] = "Главная" # Строка после |
context["page_header"] = "" # Название страницы в display-3 стиле
return context
# Начальная страница
def index(request):
c = base_context(request)
c["page_header"] = "Меню"
c["categories"] = Category.objects.all()
return render(request, 'pages/index.html', c)
# Вьюха для просмотра товаров по категориям меню с фильтрацией
def view_category(request, category_slug):
c = base_context(request)
category = get_object_or_404(Category, slug=category_slug)
c["page_name"] = category.name
c["page_header"] = category.name
c['products'] = Product.objects.filter(category=category)
c['form'] = CartAddProductForm()
return render(request, 'pages/category.html', c)
def adresses(request):
c = base_context(request)
c["page_header"] = "Адреса"
c["page_name"] = "Адреса"
return render(request, 'pages/adresses.html', c)
| SwAsKk/Online_Shop_Django | main/views.py | views.py | py | 1,497 | python | en | code | 0 | github-code | 36 |
43228825355 | # %%
from datasets import load_dataset
from transformers import AutoTokenizer, BertForSequenceClassification, TrainingArguments, Trainer
from transformers import pipeline
# %%
tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased")
model = BertForSequenceClassification.from_pretrained("distilbert-base-uncased").cuda()
# %%
dataset = load_dataset("NgThVinh/dsc_model")
dataset.with_format("torch")
dataset
# %%
# dataset.push_to_hub('NgThVinh/dsc_model')
# %%
dataset['train'][:5]
# %%
dataset['train'].features
# %%
dataset['train'][0]
# %%
# max_length = 0
# for sen in dataset['train']['document']:
# length = len(tokenizer.tokenize(sen))
# max_length = max(length, max_length)
# max_length
# %%
def create_input_sentence(document, claim):
return f"Given claim-document pair where claim: \"{claim}\", document: \"{document}\". Classify the claim to which class it belongs. If the claim contains information about the document, its label will be SUPPORTED, otherwise, its label will be REFUTED. In case the information of the claim cannot be verified based on the given document, its label will be NEI"
# %%
print(create_input_sentence(dataset['train'][100]['document'], dataset['train'][100]['claim']))
# %%
def preprocess_function(examples):
inputs = tokenizer.encode_plus(
create_input_sentence(examples["claim"], examples["document"]),
truncation=True,
padding="max_length",
return_tensors='pt'
)
label = tokenizer.encode_plus(
examples["label"],
truncation=True,
padding="max_length",
return_tensors='pt'
)
examples["input_ids"] = inputs['input_ids'][0]
examples["attention_mask"] = inputs['attention_mask'][0]
examples['labels'] = label['input_ids'][0]
return examples
# %%
print(preprocess_function(dataset['train'][100]))
# %%
train_dataset = dataset["train"].map(preprocess_function, remove_columns=dataset["train"].column_names)
test_dataset = dataset["test"].map(preprocess_function, remove_columns=dataset["test"].column_names)
# %%
# from transformers import DefaultDataCollator
# data_collator = DefaultDataCollator()
# %%
training_args = TrainingArguments(
output_dir="dsc_model",
evaluation_strategy="epoch",
learning_rate=2e-5,
# per_device_train_batch_size=16,
# per_device_eval_batch_size=16,
num_train_epochs=3,
weight_decay=0.01,
push_to_hub=True,
)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=test_dataset,
tokenizer=tokenizer,
# data_collator=data_collator,
)
trainer.train()
| NgThVinh/dsc_uit | main.py | main.py | py | 2,653 | python | en | code | 0 | github-code | 36 |
31826733278 | import argparse
import pprint
import sys
from designspaceProblems import DesignSpaceChecker
def main(args=None):
parser = argparse.ArgumentParser(
description='Check designspace data.')
parser.add_argument(
'input_ds',
metavar='PATH',
help='path to designspace file',
type=argparse.FileType())
options = parser.parse_args(args)
dc = DesignSpaceChecker(options.input_ds.name)
dc.checkEverything()
pprint.pprint(dc.problems)
if __name__ == '__main__':
sys.exit(main())
| LettError/DesignspaceProblems | Lib/designspaceProblems/__main__.py | __main__.py | py | 542 | python | en | code | 18 | github-code | 36 |
30898137207 | """
Graph implementation
class GraphMatrix - adjacency matrix
"""
from collections import deque
class GraphMatrix:
"""
Graph implementation using an adjacency matrix
[
[ ]
[ ]
[ ]
]
"""
def __init__(self, size: int):
""" Inits Graph class with optional graph_matrix """
self.graph_matrix = []
for _ in range(size):
self.graph_matrix.append([0 for _ in range(size)])
self.size = size
def add_edge(self, edge: list):
""" add an edge between two vertices """
if not self.has_edge(edge):
v1, v2 = edge
self.graph_matrix[v1][v2] = 1
self.graph_matrix[v2][v1] = 1
return True
return False
def delete_edge(self, edge: list):
""" removes the edge between two given vertices - if it exists """
if self.has_edge(edge):
v1, v2 = edge
self.graph_matrix[v1][v2] = 0
self.graph_matrix[v2][v1] = 0
return True
return False
def has_edge(self, vertices: list):
""" checks if there's an edge between two given vertices """
if len(vertices) == 2:
v1, v2 = vertices
if self.graph_matrix[v1][v2] and self.graph_matrix[v2][v1]:
return True
return False
def bfs_traversal(self, node):
""" traverses graph level-order - O(v^2) """
self._bfs(node)
def _bfs(self, node):
""" helper method to BFS graph """
visited = [node]
queue = deque()
queue.append(node)
while queue:
node = queue.popleft()
print(node)
for i in range(len(self.graph_matrix)):
temp = self.graph_matrix[node][i]
if temp != 0 and i not in visited:
queue.append(i)
visited.append(i)
def dfs_traversal(self, node):
""" traverses graph in a dfs style - O(v^2) """
visited = set()
self._dfs(visited, node)
def _dfs(self, visited, node):
""" helper method to recursively DFS graph """
if node not in visited:
print(node)
visited.add(node)
for i in range(len(self.graph_matrix[node])):
if self.graph_matrix[node][i] != 0:
self._dfs(visited,i)
if __name__ == '__main__':
# 0 -- 1
# |
# |
# 2 -- 3 -- 4
# | /
# | /
# 5
g = GraphMatrix(6)
# add
print('Adding edges...')
g.add_edge([0,1])
g.add_edge([0,2])
g.add_edge([2,3])
g.add_edge([2,5])
g.add_edge([3,4])
g.add_edge([3,5])
# show
print('***Has edge?***')
print( g.has_edge([5,3]) )
#delete edge
print('***Delete edge: 5,3:***')
print( g.delete_edge([5,3]) )
# # show
print('***Has edge?***')
print( g.has_edge([5,3]) )
# # traversal
print('*** DFS ***')
g.dfs_traversal(0)
print('*** BFS ***')
g.bfs_traversal(0)
| g-areth/algos | src/algorithms/datastructures/graphs/graph_adj_matrix.py | graph_adj_matrix.py | py | 3,053 | python | en | code | 0 | github-code | 36 |
37936099877 | import apache_beam as beam
with beam.Pipeline() as pipeline:
batches_with_keys = (
pipeline
| 'Create produce' >> beam.Create([
('spring', '🍓'),
('spring', '🥕'),
('spring', '🍆'),
('spring', '🍅'),
('summer', '🥕'),
('summer', '🍅'),
('summer', '🌽'),
('fall', '🥕'),
('fall', '🍅'),
('winter', '🍆'),
])
| 'Group into batches' >> beam.GroupIntoBatches(3)
| beam.Map(print)) | ezeparziale/apache-beam-start | examples/groupby_batches.py | groupby_batches.py | py | 529 | python | en | code | 0 | github-code | 36 |
28891383601 | """Initializes and checks the environment needed to run pytype."""
import logging
import sys
from typing import List
from pytype.imports import typeshed
from pytype.platform_utils import path_utils
from pytype.tools import runner
def check_pytype_or_die():
if not runner.can_run("pytype", "-h"):
logging.critical(
"Cannot run pytype. Check that it is installed and in your path")
sys.exit(1)
def check_python_version(exe: List[str], required):
"""Check if exe is a python executable with the required version."""
try:
# python --version outputs to stderr for earlier versions
_, out, err = runner.BinaryRun(exe + ["--version"]).communicate() # pylint: disable=unpacking-non-sequence
version = out or err
version = version.decode("utf-8")
if version.startswith(f"Python {required}"):
return True, None
else:
return False, version.rstrip()
except OSError:
return False, None
def check_python_exe_or_die(required) -> List[str]:
"""Check if a python executable with the required version is in path."""
error = []
if sys.platform == "win32":
possible_exes = (["py", f"-{required}"], ["py3"], ["py"])
else:
possible_exes = ([f"python{required}"], ["python3"], ["python"])
for exe in possible_exes:
valid, out = check_python_version(exe, required)
if valid:
return exe
elif out:
error.append(out)
logging.critical(
"Could not find a valid python%s interpreter in path (found %s)",
required, ", ".join(sorted(set(error))))
sys.exit(1)
def initialize_typeshed_or_die():
"""Initialize a Typeshed object or die.
Returns:
An instance of Typeshed()
"""
try:
return typeshed.Typeshed()
except OSError as e:
logging.critical(str(e))
sys.exit(1)
def compute_pythonpath(filenames):
"""Compute a list of dependency paths."""
paths = set()
for f in filenames:
containing_dir = path_utils.dirname(f)
if path_utils.exists(path_utils.join(containing_dir, "__init__.py")):
# If the file's containing directory has an __init__.py, we assume that
# the file is in a (sub)package. Add the containing directory of the
# top-level package so that 'from package import module' works.
package_parent = path_utils.dirname(containing_dir)
while path_utils.exists(path_utils.join(package_parent, "__init__.py")):
package_parent = path_utils.dirname(package_parent)
p = package_parent
else:
# Otherwise, the file is a standalone script. Add its containing directory
# to the path so that 'import module_in_same_directory' works.
p = containing_dir
paths.add(p)
# Reverse sorting the paths guarantees that child directories always appear
# before their parents. To see why this property is necessary, consider the
# following file structure:
# foo/
# bar1.py
# bar2.py # import bar1
# baz/
# qux1.py
# qux2.py # import qux1
# If the path were [foo/, foo/baz/], then foo/ would be used as the base of
# the module names in both directories, yielding bar1 (good) and baz.qux1
# (bad). With the order reversed, we get bar1 and qux1 as expected.
return sorted(paths, reverse=True)
| google/pytype | pytype/tools/environment.py | environment.py | py | 3,242 | python | en | code | 4,405 | github-code | 36 |
37129616360 | import numpy as np
import cv2
import NeuralNetwork
import json
import os
import matplotlib.pyplot as plt
#defining the initial parameters and the learning rate
batch_size = 10
nn_hdim = 2048
learning_rate = 0.1
f1 = "relu"
f2 = "sigmoid"
threshold = 0.0001
sd_init = 0.01
sd_init_w2 = sd_init
def make_json(W1, W2, b1, b2, id1, id2, activation1, activation2, nn_h_dim, path_to_save):
"""
make json file with trained parameters.
W1: numpy arrays of shape (1024, nn_h_dim)
W2: numpy arrays of shape (nn_h_dim, 1)
b1: numpy arrays of shape (1, nn_h_dim)
b2: numpy arrays of shape (1, 1)
nn_hdim - 2048
id1: id1 - str '204214928'
id2: id2 - str '308407907'
activation1: 'ReLU'
activation2: 'sigmoid'
"""
trained_dict = {'weights': (W1.tolist(), W2.tolist()),
'biases': (b1.tolist(), b2.tolist()),
'nn_hdim': nn_h_dim,
'activation_1': activation1,
'activation_2': activation2,
'IDs': (id1, id2)}
file_path = os.path.join(path_to_save, 'trained_dict_{}_{}'.format(
trained_dict.get('IDs')[0], trained_dict.get('IDs')[1])
)
with open(file_path, 'w') as f:
json.dump(trained_dict, f, indent=4)
def load_image(prefix, number, data_vec, label_vec, is_training):
if is_training:
path = "data\\training\\"
else:
path = "data\\validation\\"
path = path + prefix + number + ".png"
image = cv2.imread(path, flags=cv2.IMREAD_GRAYSCALE)
data_vec.append(image.flatten() / 255.0)
if prefix == "pos_":
label_vec.append(1)
else:
label_vec.append(0)
def load_data(train_data, val_data, train_label, val_label):
# load train data
for i in range(256):
load_image("neg_", str(i), train_data, train_label, True)
load_image("pos_", str(i), train_data, train_label, True)
for i in range(256, 334):
load_image("neg_", str(i), val_data, val_label, False)
load_image("pos_", str(i), val_data, val_label, False)
return np.asarray(train_data), np.asarray(val_data), np.asarray(train_label), np.asarray(val_label),
def main():
convergence_flag = False
previous_loss = np.inf
counter = 0
accuracy_per_training_epoch = 0
loss_per_training_epoch = 0
train_data = []
val_data = []
train_label = []
val_label = []
epoch_training_loss = []
epoch_validation_loss= []
epoch_training_accuracy = []
epoch_validation_accuracy = []
train_data, val_data, train_label, val_label = load_data(train_data, val_data, train_label, val_label)
my_net = NeuralNetwork.NeuralNetwork(learning_rate, f1, f2, sd_init, sd_init_w2)
epoc = 0
my_net.forward_pass(val_data, val_label)
my_net.calculate_accuracy(val_label)
print("Inintial validation loss: ", my_net.loss, "Inintial accuracy: ", my_net.accuracy)
while not convergence_flag:
batch_count = 0
shuffler = np.random.permutation(len(train_label))
train_label = train_label[shuffler]
train_data = train_data[shuffler]
if (not epoc % 10) and (epoc != 0):
my_net.learning_rate = my_net.learning_rate / 2
for i in range(0, len(train_label), batch_size):
batch = train_data[i:batch_size + i, :]
batch_labels = train_label[i:batch_size + i]
my_net.forward_pass(batch, batch_labels)
my_net.calculate_accuracy(batch_labels)
accuracy_per_training_epoch += my_net.accuracy
loss_per_training_epoch += my_net.loss
# print("epoc:", epoc, "batch:", batch_count, "loss:", my_net.loss, "accuracy:",
# my_net.accuracy, "prediction:", my_net.a2, np.round(my_net.a2).squeeze(), "real labels:", batch_labels)
my_net.backward_pass(batch_labels)
my_net.compute_gradient(batch)
batch_count += 1
accuracy_per_training_epoch = accuracy_per_training_epoch/(len(train_label)/batch_size)
loss_per_training_epoch = loss_per_training_epoch/(len(train_label)/batch_size)
epoch_training_accuracy.append(accuracy_per_training_epoch)
epoch_training_loss.append(loss_per_training_epoch)
accuracy_per_training_epoch = 0
loss_per_training_epoch = 0
my_net.forward_pass(val_data, val_label)
my_net.calculate_accuracy(val_label)
if (my_net.loss - previous_loss) <= threshold:
counter += 1
else:
counter = 0
if epoc > 100:
convergence_flag = (counter >= 3)
print("Validation loss: ", my_net.loss, "Accuracy:", my_net.accuracy, "learning rate:", my_net.learning_rate)
previous_loss = my_net.loss
epoch_validation_accuracy.append(my_net.accuracy)
epoch_validation_loss.append(my_net.loss)
epoc += 1
## plotting section-----------------------------------------------------------------------------------------------
trained_dict = {
'weights': (my_net.W1, my_net.W2),
'biases': (my_net.b1, my_net.b2),
'nn_hdim': 2048,
'activation_1': 'relu',
'activation_2': 'sigmoid',
'IDs': (204214928, 308407907)
}
json_path = ''
make_json(my_net.W1,my_net.W2,my_net.b1,my_net.b2,'204214928','308407907','relu','sigmoid',nn_hdim, json_path)
plt.subplot(2, 1, 1)
plt.plot(range(epoc), epoch_training_loss)
plt.plot(range(epoc), epoch_validation_loss)
plt.scatter(epoc, epoch_training_loss[epoc-1], marker='o')
plt.scatter(epoc, epoch_validation_loss[epoc-1], marker='o')
x = [epoc, epoc]
n = [round(epoch_training_loss[epoc-1], 2), round(epoch_validation_loss[epoc-1], 2)]
for i, txt in enumerate(n):
plt.annotate(txt, (x[i], n[i]))
plt.legend(["training", "validation"])
plt.title('loss and accuracy as function of epoc number')
plt.ylabel('loss [au]')
plt.subplot(2, 1, 2)
plt.plot(range(epoc), epoch_training_accuracy)
plt.plot(range(epoc), epoch_validation_accuracy)
plt.scatter(epoc, epoch_training_accuracy[epoc-1], marker='o')
plt.scatter(epoc, epoch_validation_accuracy[epoc-1], marker='o')
y = [epoc, epoc]
s = [round(epoch_training_accuracy[epoc-1], 2), round(epoch_validation_accuracy[epoc-1], 2)]
for i, txt in enumerate(s):
plt.annotate(txt, (y[i], s[i]))
plt.legend(["training", "validation"])
plt.xlabel('epoc number')
plt.ylabel('accuracy [%]')
plt.show()
if __name__ == "__main__":
main() | leosegre/medic_ip_project | main.py | main.py | py | 6,594 | python | en | code | 0 | github-code | 36 |
74667010345 | from math import sqrt, cos, sin, pi
import numpy as np
import pyvista as pv
# Affine rotation ####
#' Matrix of the affine rotation around an axis
#' @param theta angle of rotation in radians
#' @param P1,P2 the two points defining the axis of rotation
def AffineRotationMatrix(theta, P1, P2):
T = np.vstack(
(
np.hstack((np.eye(3), -P1.reshape(3,1))),
np.array([0, 0, 0, 1])
)
)
invT = np.vstack(
(
np.hstack((np.eye(3), P1.reshape(3,1))),
np.array([0, 0, 0, 1])
)
)
a, b, c = (P2 - P1) / np.linalg.norm(P2 - P1)
d = sqrt(b*b + c*c)
if d > 0:
Rx = np.array([
[1, 0, 0, 0],
[0, c/d, -b/d, 0],
[0, b/d, c/d, 0],
[0, 0, 0, 1]
])
invRx = np.array([
[1, 0, 0, 0],
[0, c/d, b/d, 0],
[0, -b/d, c/d, 0],
[0, 0, 0, 1]
])
else:
Rx = invRx = np.eye(4)
Ry = np.array([
[d, 0, -a, 0],
[0, 1, 0, 0],
[a, 0, d, 0],
[0, 0, 0, 1]
])
invRy = np.array([
[d, 0, a, 0],
[0, 1, 0, 0],
[-a, 0, d, 0],
[0, 0, 0, 1]
])
Rz = np.array([
[cos(theta), -sin(theta), 0, 0],
[sin(theta), cos(theta), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]
])
return invT @ invRx @ invRy @ Rz @ Ry @ Rx @ T
O = np.array([0.0, 0.0, 0.0])
A = np.array([0.0, 10.0, 0.0])
Rot = AffineRotationMatrix(3*pi/4, O, A)
def f(x, y, z, a, b):
return ((
(x * x + y * y + 1) * (a * x * x + b * y * y)
+ z * z * (b * x * x + a * y * y)
- 2 * (a - b) * x * y * z
- a * b * (x * x + y * y)
) ** 2
- 4 * (x * x + y * y) * (a * x * x + b * y * y - x * y * z * (a - b)) ** 2)
def inversion(omega, M):
Omega0 = np.array([omega, 0.0, 0.0])
OmegaM = M - Omega0;
k = np.dot(OmegaM, OmegaM)
return Omega0 + OmegaM / k
def params(alpha, gamma, mu):
beta = sqrt(alpha*alpha - gamma*gamma)
theta = beta * sqrt(mu * mu - gamma*gamma)
omega = (alpha * mu + theta) / gamma
ratio = (
(mu - gamma) * ((alpha - gamma) * (mu + gamma) + theta)
/ ((alpha + gamma) * (mu - gamma) + theta) / (alpha - gamma)
)
R = (
1/ratio * gamma * gamma / ((alpha - gamma) * (mu - gamma) + theta)
* (mu - gamma) / ((alpha + gamma) * (mu - gamma) + theta)
)
omegaT = (
omega - (beta * beta * (omega - gamma))
/ ((alpha - gamma) * (mu + omega) - beta * beta)
/ ((alpha + gamma) * (omega - gamma) + beta * beta)
)
return (omega, omegaT, ratio, R)
alpha = 0.97
gamma = 0.32
mu = 0.56
omega, omegaT, ratio, R = params(alpha, gamma, mu)
OmegaT = np.array([omegaT, 0.0, 0.0])
a = ratio*ratio
b = 0.06
# generate data grid for computing the values
X, Y, Z = np.mgrid[(-1.3):1.3:350j, (-1.6):1.6:350j, (-0.6):0.6:350j]
# create a structured grid
grid = pv.StructuredGrid(X, Y, Z)
# compute and assign the values
values = f(X, Y, Z, a, b)
grid.point_data["values"] = values.ravel(order="F")
# compute the isosurface f(x, y, z) = 0
isosurf = grid.contour(isosurfaces=[0])
# convert to a PolyData mesh
mesh = isosurf.extract_geometry()
# rotate mesh
mesh.transform(Rot)
# transform
points = R * mesh.points
points = np.apply_along_axis(lambda M: inversion(omega, M + OmegaT), 1, points)
newmesh = pv.PolyData(points, mesh.faces)
newmesh["dist"] = np.linalg.norm(mesh.points, axis=1)
pltr = pv.Plotter(window_size=[512, 512])
pltr.set_focus(newmesh.center)
pltr.set_position(newmesh.center - np.array([0.0, 0.0, 7.0]))
pltr.add_background_image("SpaceBackground.png")
pltr.add_mesh(
newmesh, smooth_shading=True, cmap="turbo", specular=25,
show_scalar_bar=False
)
pltr.show()
| stla/PyVistaMiscellanous | InvertedSolidMobiusStrip.py | InvertedSolidMobiusStrip.py | py | 3,809 | python | en | code | 4 | github-code | 36 |
33779283072 | #!/usr/local/bin/python3.7
#############
# Imports #
#############
import globalvars
import modules.conf as conf
import modules.misc as misc
import modules.platform as platform
import modules.special as special
import modules.subst as subst
import configparser
import os
import shutil
import subprocess
###############
# Functions #
###############
def print_info():
"""Print info about operating system and target triple."""
print("\nInformation summary:\n--------------------------------------")
print("OSNAME: " + globalvars.OSNAME)
print("OSVERSION: " + globalvars.OSVERSION)
print("OSRELEASE: " + globalvars.OSRELEASE)
print("OSMAJOR: " + globalvars.OSMAJOR)
print("OSARCH: " + globalvars.OSARCH)
print("STDARCH: " + globalvars.STDARCH)
print("TARGET_TRIPLE: " + globalvars.TGT_TRIPLE)
print("--------------------------------------\n")
print(str(len(packages_present)) + " packages present:")
for p in packages_present:
print(p + ' ', end='')
print("\n" + str(len(packages_missing)) + " packages missing:")
for p in packages_missing:
print(p + ' ', end='')
def ensure_distfile(mode, package):
"""Ensure that the compressed or uncompressed ("mode") distfile for a package is present."""
if mode == "compressed":
distdir = globalvars.SUBSTITUTION_MAP['rbuild_dist_comp_dir']
filename = misc.get_filename('distfiles', package)
hashtype = "md5"
elif mode == "uncompressed":
distdir = globalvars.SUBSTITUTION_MAP['rbuild_dist_uncomp_dir']
filename = os.path.basename(misc.get_tarball_uri(package))
hashtype = "umd5"
else:
misc.die("Invalid ensure_distfile mode \"" + mode + "\"! Aborting...")
absolute_path = distdir + '/' + filename
if not os.path.isfile(absolute_path):
if mode == "compressed":
misc.fetch_file(conf.get_config_value('distfiles', package), distdir, filename)
else:
misc.decompress_file(globalvars.SUBSTITUTION_MAP['rbuild_dist_comp_dir'], misc.get_filename('distfiles', package), distdir)
checksum = misc.get_distfile_checksum(hashtype, package)
misc.verbose_output("Checksum for \"" + package + "\": Comparing for " + mode + " distfile... ")
if checksum == None:
misc.verbose_output("skipping (not available)\n")
else:
if misc.get_file_hash(absolute_path) == checksum:
misc.verbose_output("ok (matches)\n")
else:
if mode == "compressed":
misc.verbose_output("Mismatch! Fetching again...\n")
misc.remove_file_or_dir(absolute_path)
misc.fetch_file(conf.get_config_value('distfiles', package), globalvars.SUBSTITUTION_MAP['rbuild_dist_comp_dir'], filename)
misc.verbose_output("Comparing checksums once more... ")
if misc.get_file_hash(absolute_path) == checksum:
misc.verbose_output("ok (matches)\n")
else:
misc.die("Mismatch again! Bailing out...")
else:
misc.verbose_output("Mismatch! Extracting again...\n")
misc.die("Extract again!")
def ensure_extrafiles_present(package):
"""Ensure that the extra files for a package are present."""
extradir = globalvars.SUBSTITUTION_MAP['rbuild_extra_dir'] + '/' + package
extrafiles = conf.get_config_value('extrafiles', package).split(", ")
md5s = None
if package + "_md5" in conf.config['extrafiles']:
md5s = conf.get_config_value('extrafiles', package + "_md5").split(", ")
misc.verbose_output("Extra files: Ensuring directory \"" + extradir + "\" exists... ")
if not os.path.isdir(extradir):
try:
os.makedirs(extradir)
except OSError as e:
misc.die("\nPatches error: Could not create directory \"" + extradir + "\"! Exiting.")
misc.verbose_output("ok\n")
i = 0
for f in extrafiles:
filename = os.path.basename(f)
absolute_path = extradir + '/' + filename
if not os.path.isfile(absolute_path):
misc.fetch_file(f, extradir, filename)
misc.verbose_output("Comparing checksums for extra file " + str(i) + "... ")
if md5s == None:
misc.verbose_output("skipping (not available)\n")
else:
if misc.get_file_hash(absolute_path) == md5s[i]:
misc.verbose_output("ok (matches)\n")
else:
misc.verbose_output("Mismatch! Fetching again...\n")
misc.remove_file_or_dir(absolute_path)
misc.fetch_file(f, extradir, filename)
misc.verbose_output("Comparing checksums once more... ")
if misc.get_file_hash(absolute_path) == md5s[i]:
misc.verbose_output("ok (matches)\n")
else:
misc.die("Mismatch again! Bailing out...")
i = i + 1
def ensure_clean_wrkdir(package):
"""Ensure that a fresh work directory is present for the package to build."""
wrkdir = misc.get_wrkdir(package)
if os.path.exists(wrkdir):
print("Old workdir found. Deleting... ", end='', flush=True)
misc.remove_file_or_dir(wrkdir)
print("ok")
if package in conf.config['distfiles']:
ensure_distfile("compressed", package)
ensure_distfile("uncompressed", package)
misc.extract_tarball(package)
if package in conf.config['extrafiles']:
if not os.path.exists(wrkdir):
try:
os.makedirs(wrkdir)
except OSError as e:
misc.die("\nFilesystem error: Could not create directory \"" + directory + "\"! Exiting.")
if package in conf.config['extrafiles']:
ensure_extrafiles_present(package)
extradir = globalvars.SUBSTITUTION_MAP['rbuild_extra_dir'] + '/' + package
extrafiles = conf.get_config_value('extrafiles', package).split(", ")
for f in extrafiles:
absolute_path = extradir + '/' + os.path.basename(f)
try:
shutil.copy(absolute_path, wrkdir)
except IOError as e:
misc.die("\nFilesystem error: Could not copy \"" + absolute_path + "\" to \"" + wrkdir + "\"! Exiting.")
def ensure_patchfiles_present(package):
"""Check if patches required to build the package are present, try to fetch them otherwise."""
patches = conf.get_config_value('patches', package).split(", ")
md5s = None
if package + "_md5" in conf.config['patches']:
md5s = conf.get_config_value('patches', package + "_md5").split(", ")
patchdir = globalvars.SUBSTITUTION_MAP['rbuild_patches_dir'] + '/' + package
misc.verbose_output("Patches: Ensuring directory \"" + patchdir + "\" exists... ")
if not os.path.isdir(patchdir):
try:
os.makedirs(patchdir)
except OSError as e:
misc.die("\nPatches error: Could not create directory \"" + patchdir + "\"! Exiting.")
misc.verbose_output("ok\n")
i = 0
for uri in patches:
filename = os.path.basename(uri)
absolute_path = patchdir + '/' + filename
if not os.path.isfile(absolute_path):
misc.fetch_file(uri, patchdir, filename)
misc.verbose_output("Comparing checksums for patch " + str(i) + "... ")
if md5s == None:
misc.verbose_output("skipping (not available)\n")
else:
if misc.get_file_hash(absolute_path) == md5s[i]:
misc.verbose_output("ok (matches)\n")
else:
misc.verbose_output("Mismatch! Fetching again...\n")
misc.remove_file_or_dir(absolute_path)
misc.fetch_file(uri, patchdir, filename)
misc.verbose_output("Comparing checksums once more... ")
if misc.get_file_hash(absolute_path) == md5s[i]:
misc.verbose_output("ok (matches)\n")
else:
misc.die("Mismatch again! Bailing out...")
i = i + 1
def build_package(phase, package):
"""Configure, make or install (phase) a program (package)."""
if phase == "configure":
activity = "Configuring"
env = phase
elif phase == "make":
activity = "Building"
env = phase
elif phase == "install":
activity = "Installing"
env = "make"
else:
misc.die("\nError: Unknown build phase \"" + phase + "\"! Exiting.")
env = misc.prepare_env(env, package)
print(activity + " \"" + package + "\"... ", end='', flush=True)
wrkdir = misc.get_wrkdir(package)
for cmd in conf.get_config_value(phase + "_cmds", package).split(', '):
r = misc.do_shell_cmd(cmd, wrkdir, env)
if r != 0:
misc.die("\nError: " + activity + " failed for package \"" + package + "\"! Exiting.")
print("ok")
def ensure_missing_packages():
"""Build and install missing packages."""
print()
for p in packages_missing:
ensure_clean_wrkdir(p)
if p == "uname":
special.prepare_uname_source()
if p in conf.config['patches']:
ensure_patchfiles_present(p)
if p == "bmake":
special.prepare_bmake_patch()
misc.patch_source(p)
if p in conf.config['configure_cmds']:
build_package('configure', p)
if p in conf.config['make_cmds']:
build_package('make', p)
build_package('install', p)
##########
# Main #
##########
conf.assert_conf_file_present()
conf.config = configparser.ConfigParser()
conf.config.read(globalvars.CONFNAME)
conf.assert_config_valid()
globalvars.OSNAME = platform.get_osname()
if not globalvars.OSNAME in globalvars.OPERATING_SYSTEMS_SUPPORTED:
misc.die("Unsupported OS: \"" + globalvars.OSNAME + "\"!")
globalvars.OSRELEASE = platform.get_os_release()
globalvars.OSVERSION = platform.get_os_version()
globalvars.OSMAJOR = platform.get_os_major()
globalvars.OSARCH = platform.get_os_arch()
globalvars.STDARCH = platform.get_stdarch()
globalvars.TGT_TRIPLE = platform.assemble_triple()
print("System: Set for " + globalvars.TGT_TRIPLE + ".")
subst.populate_substitution_map()
misc.assert_external_binaries_available()
misc.ensure_fs_hierarchy('rjail')
misc.ensure_fs_hierarchy('rbuild')
print("Filesystem: Hierarchy is in place.")
packages_present, packages_missing = misc.detect_packages()
print_info()
if len(packages_missing) > 0:
a = 0
while (a != "N" and a != "Y"):
a = input("\n\nBuild missing packages now? (Y/N) ").upper()
if (a == "N"):
exit(0)
ensure_missing_packages()
print("All done!")
| kraileth/miniraven | miniraven.py | miniraven.py | py | 10,771 | python | en | code | 1 | github-code | 36 |
5808173653 | from datetime import datetime
class DateBuilder:
def __init__(self, raw_date: str):
self.raw_date = raw_date
def get_month(self):
months = {}
for i, m in enumerate(["january", "febuary", "march", "april", "may", "june", "july", "august", "september", "october", "november", "december"]):
months[m] = i + 1
return months[self.raw_date.split(" ")[0].strip().lower()]
def get_day(self):
return int(self.raw_date.split(",", 1)[0].split(" ", 1)[1].split(" - ")[0].strip())
def get_year(self):
try:
return int(self.raw_date.split(",")[1].strip())
except:
return datetime.now().year
def get_time(self):
# 0 - 23
def convert(time):
if "pm" in time:
hr, min = time.split("pm")[0].split(":")
min = float(min)/100.0
time = float(hr)
if time < 12.0:
time += 12.0
time += min
else:
hr, min = time.split("am")[0].split(":")
min = float(min)/100.0
time = float(hr)
if time == 12.0:
time -= time
time += min
return time
temp = self.raw_date.split(",")[-1].split(" to ")
if len(temp) == 1:
return convert(temp[0].strip()), convert(temp[0].strip())
start, end = temp
return convert(start.strip()), convert(end.strip())
def less_than_equal(self, date):
return self.get_year() <= date.get_year() and (self.get_month() < date.get_month() or (self.get_month() == date.get_month() and (self.get_day() < date.get_day() or (self.get_day() == date.get_day() and self.get_time()[-1] <= date.get_time()[-1]))))
def makeDate(self):
start, end = self.get_time()
return (self.get_year(), self.get_month(), self.get_day(), start, end)
def displayDate(self):
o = ""
for value in self.makeDate():
o += str(value) + " "
return o.strip() | Vel4ta/Event_Manager | events/lib/DateBuilder.py | DateBuilder.py | py | 2,102 | python | en | code | 0 | github-code | 36 |
15478699282 | import os
import copy
import sys
import glog
import tifffile
try: from .tools import uity
except: from tools import uity
import numpy as np
from absl import flags, app
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
import controller.processing
class TissueCut(object):
def __init__(self, gpu="-1", num_threads=0):
"""
:param img_type:ssdna,rna
:param model_path:path of weights
"""
self._WIN_SIZE = None
self._model_path = None
self.net_cfg()
self._gpu = gpu
self._model = None
self._num_threads = num_threads
self._init_model()
def net_cfg(self, cfg='weights.json'):
cfg = os.path.join(os.path.dirname(os.path.abspath(__file__)), cfg)
import json
with open(cfg, 'r') as fd:
dct = json.load(fd)
self._WIN_SIZE = dct['tissue']['input']
self._model_path = dct['tissue']['weights_path']
if not os.path.exists(self._model_path):
glog.error('Not found weights file in {}.'.format(self._model_path))
else: glog.info(f"Start load weights from {self._model_path}")
def _init_model(self):
from net.onnx_net import cl_onnx_net
self._model = cl_onnx_net(self._model_path, self._gpu, self._num_threads)
def f_predict(self, img):
"""
:param img:CHANGE
:return: Model input image, mask
"""
img = np.squeeze(img)
src_shape = img.shape[:2]
img = controller.processing.f_tissue_preprocess(img, self._WIN_SIZE)
pred = self._model.f_predict(copy.deepcopy(img))
pred = controller.processing.f_post_process(pred)
pred = uity.f_resize(pred, src_shape)
return img, pred
def tissue_cut(input: str, output: str, gpu: str=-1, num_threads: int=0):
if input is None or output is None:
print("please check your parameters")
return
img = tifffile.imread(input)
sg = TissueCut(gpu=gpu, num_threads=int(num_threads))
img, pred = sg.f_predict(img)
glog.info(f"Predict finish, start write.")
tifffile.imwrite(output, pred, compression="zlib", compressionargs={"level": 8})
glog.info(f"Work Finished.")
def main(argv):
tissue_cut(input=FLAGS.input,
output=FLAGS.output,
gpu=FLAGS.gpu,
num_threads=FLAGS.num_threads)
if __name__ == '__main__':
FLAGS = flags.FLAGS
flags.DEFINE_string('input', '', 'the input img path')
flags.DEFINE_string('output', '', 'the output file')
flags.DEFINE_string('gpu', '-1', 'output path')
flags.DEFINE_integer('num_threads', 0, 'num threads.', lower_bound=0)
app.run(main)
| BGIResearch/StereoCell | stereocell/segmentation/tissue.py | tissue.py | py | 2,701 | python | en | code | 18 | github-code | 36 |
28068881292 | from itertools import combinations
import sys
input = sys.stdin.readline
def solution(orders, course):
answer = {}
for n in course:
food = {}
for i in orders:
combi = list(combinations(sorted(i), n))
for i2 in combi:
try:
food[''.join(i2)] += 1
except:
food[''.join(i2)] = 1
food = dict(filter(lambda x:x[1] == max(food.values()), food.items()))
for idx in food.keys():
answer[idx] = max(food.values())
answer = dict(filter(lambda x:x[1] >= 2, answer.items()))
answer = [ i[0] for i in sorted(answer.items(), key=lambda x : (x[0],x[1]),reverse=False)]
return answer | hwanginbeom/algorithm_study | 2.algorithm_test/21.08.22/21.08.22_gyeonghyeon.py | 21.08.22_gyeonghyeon.py | py | 726 | python | en | code | 3 | github-code | 36 |
40164893998 | from django.shortcuts import render
# Create your views here.
from django.views.decorators.csrf import csrf_exempt
from rest_framework.parsers import JSONParser
from django.http.response import JsonResponse
from djangoapi.models import Department,Employee
from djangoapi.serializers import DepartmentSerializer,EmployeeSerializer
@csrf_exempt
def departmentApi(request,id=0):
if request.method=='GET':
departments=Department.objects.all()
departments_serializer=DepartmentSerializer(departments,many=True)
return JsonResponse(departments_serializer.data,safe=False)
elif request.method=='POST':
department_data=JSONParser().parse(request)
departments_serializers=DepartmentSerializer(data=department_data)
if departments_serializers.is_valid():
departments_serializer.save()
return JsonResponse("Added Sucessfully",safe=False)
return JsonResponse("Failed to Add",safe=False)
elif request.method=='PUT':
department_data=JSONParser().parse(request)
department=Department.objects.get(DepartmentId=department_data['DepartmentId'])
departments_serializers=DepartmentSerializer(department,data=department_data)
if departments_serializers.is_valid():
departments_serializer.save()
return JsonResponse("Added Sucessfully",safe=False)
return JsonResponse("Failed to Add",safe=False)
elif request.method=='DELETE':
departments=Department.objects.GET(DepartmentId=id)
department.delete()
return JsonResponse("Deleted Sucessfully",safe=False)
| 00karina/FullStackApp | djangoapi/views.py | views.py | py | 1,633 | python | en | code | 0 | github-code | 36 |
10503132777 | import re
stroke_dic = dict()
with open('data/stoke.dat', encoding='utf-8') as f:
data = f.readlines()
for string in data:
temp = string.split("|")
temp[2] = temp[2].replace("\n", "")
stroke_dic[temp[1]] = int(temp[2])
split_dic = dict()
with open('data/chaizi-ft.dat', encoding='utf-8') as f:
data = f.readlines()
for string in data:
temp = re.split("\s", string)
if len(temp) < 2:
continue
split_list = list()
for index in range(1, len(temp) - 1):
split_list.append(temp[index])
split_dic[temp[0]] = split_list
def get_stroke_number(word):
total = 0
for i in word:
if "一" in i:
total += 1
elif "二" in i:
total += 2
elif "三" in i:
total += 3
elif "四" in i:
total += 4
elif "五" in i:
total += 5
elif "六" in i:
total += 6
elif "七" in i:
total += 7
elif "八" in i:
total += 8
elif "九" in i:
total += 9
elif "十" in i:
total += 10
else:
total += stroke_dic[i]
return get_final_number(word, total)
# 检查特殊部首笔画
def get_final_number(word, number):
for i in word:
if i in split_dic:
splits = split_dic[i]
if "氵" in splits:
# 水
number += 1
if "扌" in splits:
# 手
number += 1
if splits[0] == "月":
# 肉
number += 2
if "艹" in splits:
# 艸
number += 3
if "辶" in splits:
# 辵
number += 4
if splits[0] == "阜":
# 左阝 阜
number += 6
if "邑" in splits and "阝" in splits:
# 右阝 邑
number += 5
if splits[0] == "玉":
# 王字旁 玉
number += 1
if splits[0] == "示":
# 礻 示
number += 1
if splits[0] == "衣":
# 衤 衣
number += 1
if splits[0] == "衣":
# 犭 犬
number += 1
if splits[0] == "心":
# 忄 心
number += 1
return number
| NanBox/PiPiName | stroke_number.py | stroke_number.py | py | 2,480 | python | en | code | 503 | github-code | 36 |
35936751903 | import covasim as cv
import pandas as pd
import sciris as sc
import pylab as pl
import numpy as np
from matplotlib import ticker
import datetime as dt
import matplotlib.patches as patches
import seaborn as sns
import matplotlib as mpl
from matplotlib.colors import LogNorm
# Filepaths
resultsfolder = 'sweeps'
sensfolder = 'sweepssens'
figsfolder = 'figs'
process = False
# Parameter levels
T = sc.tic()
tlevels = [0.067, 0.1, 0.15, 0.19]
vlevels = np.arange(0, 5) / 4
mlevels = np.arange(0, 4) / 4
nt, nv, nm = len(tlevels), len(vlevels), len(mlevels)
# Fonts and sizes for all figures
font_size = 26
font_family = 'Proxima Nova'
pl.rcParams['font.size'] = font_size
pl.rcParams['font.family'] = font_family
################################################################################################
# Do processing if required
################################################################################################
if process:
for thisfig in [resultsfolder,sensfolder]:
results = {'cum_infections': {}, 'r_eff': {}, 'new_infections':{}, 'cum_quarantined':{}}
for future_test_prob in tlevels:
for name in ['cum_infections', 'r_eff', 'new_infections','cum_quarantined']: results[name][future_test_prob] = {}
for venue_trace_prob in vlevels:
for name in ['cum_infections', 'r_eff', 'new_infections','cum_quarantined']: results[name][future_test_prob][venue_trace_prob] = []
for mask_uptake in mlevels:
print(f'mask_uptake: {mask_uptake}, venue_trace_prob: {venue_trace_prob}, future_test_prob: {future_test_prob}')
msim = sc.loadobj(f'{thisfig}/nsw_tracingsweeps_T{int(future_test_prob * 100)}_M{int(mask_uptake * 100)}_V{int(venue_trace_prob * 100)}.obj')
results['cum_quarantined'][future_test_prob][venue_trace_prob].append(msim.results['cum_quarantined'].values[-1]-msim.results['cum_quarantined'].values[244])
results['cum_infections'][future_test_prob][venue_trace_prob].append(msim.results['cum_infections'].values[-1]-msim.results['cum_infections'].values[244])
results['r_eff'][future_test_prob][venue_trace_prob].append(msim.results['r_eff'].values[-1])
results['new_infections'][future_test_prob][venue_trace_prob].append(msim.results['new_infections'].values)
sc.saveobj(f'{thisfig}/nsw_sweep_results.obj', results)
#else:
# results = sc.loadobj(f'{resultsfolder}/nsw_sweep_results.obj')
################################################################################################################
# Figure 2 and S2: grids of new infections
################################################################################################################
for thisfig in [resultsfolder, sensfolder]:
# Fonts and sizes
fig = pl.figure(figsize=(24,16))
results = sc.loadobj(f'{thisfig}/nsw_sweep_results.obj')
# Subplot sizes
xgapl = 0.05
xgapm = 0.017
xgapr = 0.05
ygapb = 0.05
ygapm = 0.017
ygapt = 0.05
nrows = nt
ncols = nv
dx = (1-(ncols-1)*xgapm-xgapl-xgapr)/ncols
dy = (1-(nrows-1)*ygapm-ygapb-ygapt)/nrows
nplots = nrows*ncols
ax = {}
colors = pl.cm.GnBu(np.array([0.4,0.6,0.8,1.]))
labels = ['0% masks', '25% masks', '50% masks', '75% masks']
epsx = 0.003
epsy = 0.008
llpad = 0.01
rlpad = 0.005
if thisfig==resultsfolder:
pl.figtext(xgapl+dx*nv+xgapm*(nv-1)+rlpad, ygapb+(ygapm+dy)*0+epsy, ' 90% testing ', rotation=90, fontsize=30, fontweight='bold', bbox={'edgecolor':'none', 'facecolor':'silver', 'alpha':0.5, 'pad':4})
pl.figtext(xgapl+dx*nv+xgapm*(nv-1)+rlpad, ygapb+(ygapm+dy)*1+epsy, ' 80% testing ', rotation=90, fontsize=30, fontweight='bold', bbox={'edgecolor':'none', 'facecolor':'silver', 'alpha':0.5, 'pad':4})
pl.figtext(xgapl+dx*nv+xgapm*(nv-1)+rlpad, ygapb+(ygapm+dy)*2+epsy, ' 65% testing ', rotation=90, fontsize=30, fontweight='bold', bbox={'edgecolor':'none', 'facecolor':'silver', 'alpha':0.5, 'pad':4})
pl.figtext(xgapl+dx*nv+xgapm*(nv-1)+rlpad, ygapb+(ygapm+dy)*3+epsy, ' 50% testing ', rotation=90, fontsize=30, fontweight='bold', bbox={'edgecolor':'none', 'facecolor':'silver', 'alpha':0.5, 'pad':4})
elif thisfig==sensfolder:
pl.figtext(xgapl+dx*nv+xgapm*(nv-1)+rlpad, ygapb+(ygapm+dy)*0+epsy, ' 90% symp. testing \n 60% contact testing ', rotation=90, fontsize=26, fontweight='bold', bbox={'edgecolor':'none', 'facecolor':'silver', 'alpha':0.5, 'pad':4})
pl.figtext(xgapl+dx*nv+xgapm*(nv-1)+rlpad, ygapb+(ygapm+dy)*1+epsy, ' 80% symp. testing \n 50% contact testing ', rotation=90, fontsize=26, fontweight='bold', bbox={'edgecolor':'none', 'facecolor':'silver', 'alpha':0.5, 'pad':4})
pl.figtext(xgapl+dx*nv+xgapm*(nv-1)+rlpad, ygapb+(ygapm+dy)*2+epsy, ' 65% symp. testing \n 40% contact testing ', rotation=90, fontsize=26, fontweight='bold', bbox={'edgecolor':'none', 'facecolor':'silver', 'alpha':0.5, 'pad':4})
pl.figtext(xgapl+dx*nv+xgapm*(nv-1)+rlpad, ygapb+(ygapm+dy)*3+epsy, ' 50% symp. testing \n 30% contact testing ', rotation=90, fontsize=26, fontweight='bold', bbox={'edgecolor':'none', 'facecolor':'silver', 'alpha':0.5, 'pad':4})
pl.figtext(xgapl+(dx+xgapm)*0+epsx, ygapb+dy*nm+ygapm*(nm-1)+llpad, ' 0% tracing ', fontsize=30, fontweight='bold', bbox={'edgecolor':'none', 'facecolor':'silver', 'alpha':0.5, 'pad':4})
pl.figtext(xgapl+(dx+xgapm)*1+epsx, ygapb+dy*nm+ygapm*(nm-1)+llpad, ' 25% tracing ', fontsize=30, fontweight='bold', bbox={'edgecolor':'none', 'facecolor':'silver', 'alpha':0.5, 'pad':4})
pl.figtext(xgapl+(dx+xgapm)*2+epsx, ygapb+dy*nm+ygapm*(nm-1)+llpad, ' 50% tracing ', fontsize=30, fontweight='bold', bbox={'edgecolor':'none', 'facecolor':'silver', 'alpha':0.5, 'pad':4})
pl.figtext(xgapl+(dx+xgapm)*3+epsx, ygapb+dy*nm+ygapm*(nm-1)+llpad, ' 75% tracing ', fontsize=30, fontweight='bold', bbox={'edgecolor':'none', 'facecolor':'silver', 'alpha':0.5, 'pad':4})
pl.figtext(xgapl+(dx+xgapm)*4+epsx, ygapb+dy*nm+ygapm*(nm-1)+llpad, ' 100% tracing ', fontsize=30, fontweight='bold', bbox={'edgecolor':'none', 'facecolor':'silver', 'alpha':0.5, 'pad':4})
# Extract plot values
def plinf(pn, what='new_infections'):
# Get series for this plot number
t = list(results['new_infections'].keys())[(nplots-1-pn)//nv]
v = list(results['new_infections'][t].keys())[pn%nv]
if what =='new_infections':
#return np.array(([results['new_infections'][t][v][mm][214:] for mm in range(nm)]))
return np.array([[results['new_infections'][t][v][mm][200+i:214+i].sum() / 14 for i in range(306-214)] for mm in range(nm)])
elif what == 'cum_infections':
return results['cum_infections'][t][v]
@ticker.FuncFormatter
def date_formatter(x, pos):
return (cv.date('2020-09-30') + dt.timedelta(days=x)).strftime('%d-%b')
for pn in range(nplots):
ax[pn] = pl.axes([xgapl+(dx+xgapm)*(pn%ncols), ygapb+(ygapm+dy)*(pn//ncols), dx, dy])
data = plinf(pn)
for mi,mval in enumerate(mlevels):
ax[pn].plot(range(len(data[mi,:])), data[mi,:], '-', lw=4, c=colors[mi], label=labels[mi], alpha=1.0)
val = sc.sigfig(plinf(pn, what='cum_infections')[mi],3) if plinf(pn, what='cum_infections')[mi]<100 else sc.sigfig(plinf(pn, what='cum_infections')[mi],2)
ax[pn].text(0.1, 180-mi*15, val.rjust(6), fontsize=20, family='monospace', color=colors[mi])
ax[pn].set_ylim(0, 200)
ax[pn].xaxis.set_major_formatter(date_formatter)
if pn==4: pl.legend(loc='upper right', frameon=False, fontsize=20)
if pn not in [0,5,10,15]:
ax[pn].set_yticklabels([])
else:
ax[pn].set_ylabel('New infections')
if pn not in range(nv):
ax[pn].set_xticklabels([])
else:
xmin, xmax = ax[pn].get_xlim()
ax[pn].set_xticks(pl.arange(xmin+5, xmax, 40))
if thisfig==resultsfolder: figname = figsfolder+'/fig2_grid.png'
elif thisfig==sensfolder: figname = figsfolder+'/figS2_grid.png'
cv.savefig(figname, dpi=100)
#d = {'testing': [0.067]*nv*nm+[0.1]*nv*nm+[0.15]*nv*nm+[0.19]*nv*nm, 'tracing': [0.0]*nm+[0.25]*nm+[0.5]*nm+[0.75]*nm+[1.0]*nm+[0.0]*nm+[0.25]*nm+[0.5]*nm+[0.75]*nm+[1.0]*nm+[0.0]*nm+[0.25]*nm+[0.5]*nm+[0.75]*nm+[1.0]*nm+[0.0]*nm+[0.25]*nm+[0.5]*nm+[0.75]*nm+[1.0]*nm, 'masks': [0.0,0.25,0.5,0.75]*nt*nv}
#d['val'] = []
#for t in tlevels:
# for v in vlevels:
# d['val'].extend(sc.sigfig(results['cum_infections'][t][v],3))
#import pandas as pd
#df = pd.DataFrame(d)
#df.to_excel('sweepresults.xlsx')
################################################################################################################
# Figure 3: bar plot of cumulative infections
################################################################################################################
mainres = sc.loadobj(f'{resultsfolder}/nsw_sweep_results.obj')
sensres = sc.loadobj(f'{sensfolder}/nsw_sweep_results.obj')
# Subplot sizes
xgapl = 0.07
xgapm = 0.02
xgapr = 0.02
ygapb = 0.1
ygapm = 0.02
ygapt = 0.08
nrows = 1
ncols = 2
dx = (1-(ncols-1)*xgapm-xgapl-xgapr)/ncols
dy = (1-(nrows-1)*ygapm-ygapb-ygapt)/nrows
nplots = nrows*ncols
ax = {}
colors = pl.cm.GnBu(np.array([0.4,0.6,0.8,1.]))
mlabels = ['0% masks', '25% masks', '50% masks', '75% masks']
tlabels = ['50%', '65%', '80%', '90%']
fig = pl.figure(figsize=(24,8*nrows))
x = np.arange(len(tlabels))
width = 0.2 # the width of the bars
# Extract data
datatoplot = {}
datatoplot[0] = np.array([[mainres['cum_infections'][t][1.0][mi] for t in tlevels] for mi in range(nm)])
datatoplot[1] = np.array([[sensres['cum_infections'][t][1.0][mi] for t in tlevels] for mi in range(nm)])
#datatoplot[2] = np.array([[mainres['cum_quarantined'][t][1.0][mi] for t in tlevels] for mi in range(nm)])
#datatoplot[3] = np.array([[sensres['cum_quarantined'][t][1.0][mi] for t in tlevels] for mi in range(nm)])
# Headings
pl.figtext(xgapl+0.001, ygapb+dy*nrows+ygapm*(nrows-1)+0.01, ' Asymptomatic testing equal to symptomatic testing ',
fontsize=30, fontweight='bold', bbox={'edgecolor':'none', 'facecolor':'silver', 'alpha':0.5, 'pad':4})
pl.figtext(xgapl+xgapm+dx+0.001, ygapb+dy*nrows+ygapm*(nrows-1)+0.01, ' Asymptomatic testing lower than symptomatic testing ',
fontsize=30, fontweight='bold', bbox={'edgecolor':'none', 'facecolor':'silver', 'alpha':0.5, 'pad':4})
# Make plots
for pn in range(nplots):
ax[pn] = pl.axes([xgapl+(dx+xgapm)*(pn%ncols), ygapb+(ygapm+dy)*(pn//ncols), dx, dy])
data = datatoplot[pn]
for mi,mval in enumerate(mlevels):
ax[pn].bar(x+width*(mval*4-1.5), data[mi,:], width, color=colors[mi], label=mlabels[mi], alpha=1.0)
ax[pn].set_xticks(x)
ax[pn].set_xticklabels(tlabels)
if pn <2:
ax[pn].set_ylim(0, 20e3)
ax[pn].set_xlabel('Symptomatic testing rate')
else:
ax[pn].set_ylim(0, 250e3)
sc.boxoff()
if pn in [0,2]:
ax[pn].set_ylabel('Cumulative infections')
if pn==1:
pl.legend(loc='upper right', frameon=False, fontsize=20)
ax[pn].set_yticklabels([])
cv.savefig(f'{figsfolder}/fig3_bars.png', dpi=100)
################################################################################################################
# Figure X: trade-off heatmaps
################################################################################################################
# Subplot sizes
xgapl = 0.07
xgapm = 0.02
xgapr = 0.02
ygapb = 0.3
ygapm = 0.02
ygapt = 0.08
nrows = 1
ncols = nv
dx = (1-(ncols-1)*xgapm-xgapl-xgapr)/ncols
dy = (1-(nrows-1)*ygapm-ygapb-ygapt)/nrows
nplots = nrows*ncols
ax = {}
# Create figure
fig = pl.figure(figsize=(24,10))
colors = pl.cm.GnBu(np.array([0.4,0.6,0.8,1.]))
mlabels = ['0% masks', '25% masks', '50% masks', '75% masks']
tlabels = ['50%', '65%', '80%', '90%']
vlabels = ['0% tracing', '25% tracing', '50% tracing', '75% tracing', '100% tracing']
M, T = np.meshgrid(mlevels, tlevels)
mt = M.reshape(nt*nm,)
tt = T.reshape(nt*nm,)
cmin, cmax = 0., 5.
lev_exp = np.arange(0., 5.1, 0.1)
levs = np.power(10, lev_exp)
# Load objects
for pn,vl in enumerate(vlevels):
# Load in scenario multisims
zi1 = np.array([mainres['cum_infections'][ti][vl] for ti in tlevels])
z = zi1.reshape(nt*nm,)
# Set axis and plot
ax[pn] = pl.axes([xgapl+(dx+xgapm)*(pn%ncols), ygapb+(ygapm+dy)*(pn//ncols), dx, dy])
im = ax[pn].imshow(zi1, cmap='Oranges')
# Annotate
for i in range(nm):
for j in range(nt):
c = sc.sigfig(zi1[j, i],3)
ax[pn].text(i, j, str(c), va='center', ha='center')
# Axis and plot labelling
if pn == 0:
ax[pn].set_ylabel('Symptomatic testing rate', fontsize=24, labelpad=20)
ax[pn].set_xlabel('Mask uptake')
ax[pn].set_title(vlabels[pn])
ax[pn*100] = pl.axes([xgapl+(dx+xgapm)*(pn%ncols), 0.05, dx, 0.1])
cbar = pl.colorbar(im, cax=ax[pn*100])
cv.savefig(f'{figsfolder}/figX_heatmaps.png', dpi=100)
sc.toc(T)
| optimamodel/covid_nsw | 1_submission/plot_nsw_sweeps.py | plot_nsw_sweeps.py | py | 13,309 | python | en | code | 2 | github-code | 36 |
3703533790 | from django.shortcuts import render
from django.http import HttpResponseRedirect, HttpResponse
from myapp.forms import MyModelForm
from myapp.models import MyModel
def form_request(request, url, template):
if request.method == 'POST':
form = MyModelForm(request.POST)
if form.is_valid():
name = form.cleaned_data['name']
request.session['name'] = name
mm = MyModel.objects.create(name=name)
mm.save()
return HttpResponseRedirect(url) # Redirect after POST
else:
form = MyModelForm()
args = {}
objs = MyModel.objects.all()
if objs:
args['last_item'] = MyModel.objects.all().order_by('pk').reverse()[0]
args['form'] = form
return render(request, template, args)
def main(request):
return form_request(request, '/add/', 'main.html')
def form_1(request):
return form_request(request, '/1/', 'form_1.html')
def form_2(request):
return form_request(request, '/2/', 'form_2.html')
def form_add(request):
args = {}
name = request.session['name']
args['name'] = name
return render(request, 'add.html', args)
| msampaio/estudo_django | myapp/views.py | views.py | py | 1,153 | python | en | code | 0 | github-code | 36 |
29588126043 | import re
NAME=r'(?P<NAME>[a-zA-Z_][a-zA-Z_0-9])'
NUM=r'(?P<NUM>\d+)'
PLUS=r'(?P<PLUS>\+)'
TIMES=r'(?P<TIMES>\*)'
EQ=r'(?P<EQ>=)'
WS=r'(?P<WS>\s+)'
master_pat=re.compile('|'.join([NAME,NUM,PLUS,TIMES,EQ,WS]))
if __name__=="__main__":
scanner=master_pat.scanner('foo=42')
scanner.matcher()
| chen19901225/SimplePyCode | SimpleCode/PY_CookBook/chapter2/chapter2.py | chapter2.py | py | 301 | python | en | code | 0 | github-code | 36 |
29126825853 | #Grupo PHP
#Kevin Cevallos
#María Camila Navarro
#Joffre Ramírez
import ply.lex as lex
reserved = {
'if': 'IF',
'else': 'ELSE',
'elseif': 'ELSEIF',
#'boolean': 'BOOLEAN',
#'float': 'FLOAT',
#'string': 'STRING',
'null': 'NULL',
'array': 'ARRAY',
#'object': 'OBJECT',
'break': 'BREAK',
'continue': 'CONTINUE',
'return': 'RETURN',
'for each': 'FOREACH',
'echo': 'ECHO',
'print': 'PRINT',
'print_r': 'PRINT_R',
'var_dump': 'VAR_DUMP',
'fgets': 'FGETS',
'fread': 'FREAD',
'fscanf': 'FSCANF',
'fpassthru': 'FPASSTHRU',
'fgetcsv': 'FGETCSV',
'fgetc': 'FGETC',
'file_get_contents': 'FILE_GET_CONTENTS',
'readfile': 'READFILE',
'file': 'FILE',
'parse_ini_file': 'PARSE_INI_FILE',
'implode': 'IMPLODE',
'explode': 'EXPLODE',
'new':'NEW',
'class':'CLASS',
'count': 'COUNT',
'sizeof': 'SIZEOF',
'array_push': 'ARRAY_PUSH',
'sort': 'SORT',
'asort': 'ASORT',
'ksort': 'KSORT',
'unset': 'UNSET',
'var_export': 'VAR_EXPORT',
'shuffle': 'SHUFFLE',
'array_merge': 'ARRAY_MERGE',
'array_search': 'ARRAY_SEARCH',
'array_rand': 'ARRAY_RAND',
'array_chunk': 'ARRAY_CHUNK',
'str_split': 'STR_SPLIT',
'preg_split': 'PREG_SPLIT',
'array_unique': 'ARRAY_UNIQUE',
'function' : 'FUNCTION',
'while' : 'WHILE',
'as' : 'AS'
}
tokens =(
[
#Operadores Matemáticos
'PLUS',
'MINUS',
'TIMES',
'DIVIDE',
'EQUALS',
'MODULO',
#Operadores Lógicos
'AND',
'OR',
'XOR',
'NOT',
#Símbolos
'LPAREN',
'RPAREN',
#'PEIROT',
'RCORCHET',
'LCORCHET',
'OBJECT_OPERATOR',
'COMA',
'OPEN',
'CLOSE',
'END',
'FLECHA',
#Variable
'ID',
#Número
'NUMBER',
'DECIMAL',
#Valor Boolean
'TRUE',
'FALSE',
#Cadena de texto
'TEXT',
#Operadores Comparación
'MAYORQUE',
'MENORQUE',
'IS_EQUAL',
'IS_IDENTICAL',
'IS_NOT_EQUAL',
'IS_NOT_IDENTICAL',
'IS_GREATER_OR_EQUAL',
'IS_SMALLER_OR_EQUAL',
'SPACESHIP',
#Nombre de Funciones
'FNOMBRE'
] + list(reserved.values()))
#Operadores Matemáticos
t_MODULO=r'%'
t_PLUS=r'\+'
t_MINUS=r'-'
t_TIMES=r'\*'
t_DIVIDE=r'/'
t_EQUALS = r'='
#Operadores Lógicos
t_AND = r'and'
t_OR = r'or'
t_XOR = r'xor'
t_NOT = r'!'
#Símbolos
t_OBJECT_OPERATOR=r'->'
t_LPAREN=r'\('
t_RPAREN=r'\)'
t_END = r';'
t_TEXT = r'".*"'
t_FLECHA = r'=>'
#t_PEIROT = r'\.'
t_OPEN = r'<\?php'
t_CLOSE = r'\?>'
t_RCORCHET=r'\}'
t_LCORCHET=r'\{'
t_COMA=r','
#Variable
t_ID = r'(\$([a-z]|[A-Z]))([a-zA-Z0-9]+)?'
#Valor Boolean
#t_TRUE = r'true'
#t_FALSE = r'false'
#Operadores Comparación
t_MAYORQUE = r'>'
t_MENORQUE = r'<'
t_IS_EQUAL = r'=='
t_IS_IDENTICAL = r'==='
t_IS_NOT_EQUAL= r'!='
t_IS_NOT_IDENTICAL= r'!=='
t_IS_GREATER_OR_EQUAL=r'>='
t_IS_SMALLER_OR_EQUAL=r'<='
t_SPACESHIP = r'<=>'
t_ignore = ' \t'
#Número
def t_DECIMAL(t):
r'\d+\.\d+'
t.value = float(t.value)
return t
def t_NUMBER(t):
r'\d+'
t.value = int(t.value)
return t
#Cadena de texto
#Palabras reservadas
def t_CLASS(t):
r'class'
return t
def t_ECHO(t):
r'echo'
return t
def t_NEW(t):
r'new'
return t
'''
def t_BOOLEAN(t):
r'boolean'
return t
def t_STRING(t):
r'string'
return t
'''
def t_TRUE(t):
r'true'
return t
def t_FALSE(t):
r'false'
return t
def t_NULL(t):
r'null'
return t
'''
def t_OBJECT(t):
r'object'
return t
'''
def t_BREAK(t):
r'break'
return t
def t_CONTINUE(t):
r'continue'
return t
def t_RETURN(t):
r'return'
return t
def t_FUNCTION(t):
r'function'
return t
def t_AS(t):
r'as'
return t
#Sentencia if
def t_IF(t):
r'if'
return t
def t_ELSE(t):
r'else'
return t
def t_ELSEIF(t):
r'elseif'
return t
#Lazo
def t_FOREACH(t):
r'foreach'
return t
def t_WHILE(t):
r'while'
return t
#Funciones print
def t_PRINT(t):
r'print'
return t
def t_PRINT_R(t):
r'print_r'
return t
def t_VAR_DUMP(t):
r'var_dump'
return t
#Funciones
def t_FGETS(t):
r'fgets'
return t
def t_FREAD(t):
r'fread'
return t
def t_FSCANF(t):
r'fscanf'
return t
def t_FPASSTHRU(t):
r'fpassthru'
return t
def t_FGETCSV(t):
r'fgetcsv'
return t
def t_FGETC(t):
r'fgetc'
return t
def t_FILE_GET_CONTENTS(t):
r'file_get_contents'
return t
def t_READFILE(t):
r'readfile'
return t
def t_FILE(t):
r'file'
return t
def t_PARSE_INI_FILE(t):
r'parse_ini_file'
return t
def t_IMPLODE(t):
r'implode'
return t
def t_EXPLODE(t):
r'explode'
return t
def t_ARRAY(t):
r'array'
return t
def t_COUNT(t):
r'count'
return t
def t_SIZEOF(t):
r'sizeof'
return t
def t_ARRAY_PUSH(t):
r'array_push'
return t
def t_SORT(t):
r'sort'
return t
def t_ASORT(t):
r'asort'
return t
def t_KSORT(t):
r'ksort'
return t
def t_UNSET(t):
r'unset'
return t
def t_VAR_EXPORT(t):
r'var_export'
return t
def t_SHUFFLE(t):
r'shuffle'
return t
def t_ARRAY_MERGE(t):
r'array_merge'
return t
def t_ARRAY_SEARCH(t):
r'array_search'
return t
def t_ARRAY_RAND(t):
r'array_rand'
return t
def t_ARRAY_CHUNK(t):
r'array_chunk'
return t
def t_STR_SPLIT(t):
r'str_split'
return t
def t_PREG_SPLIT(t):
r'preg_split'
return t
def t_ARRAY_UNIQUE(t):
r'array_unique'
return t
#Nombre de funciones
def t_FNOMBRE(t):
r'(?!or|and|xor)([a-z]|[A-Z])([a-zA-Z0-9_]+)?'
return t
def t_error(t):
print("No es reconocido '%s'"%t.value[0])
t.lexer.skip(1)
def t_newline(t):
r'\n+'
t.lexer.lineno += len(t.value)
lexer=lex.lex()
def analizar(dato):
lexer.input(dato)
while True:
tok =lexer.token()
if not tok:
break
print(tok)
archivo= open("archivo.txt")
for linea in archivo:
#print(">>"+linea)
#analizar(linea)
if len(linea)==0:
break
def ImprimirAnalizar(dato):
texto= dato.split("\n")
cadena=""
for i in texto:
cadena+= "-> "+i
lexer.input(i)
while True:
tok = lexer.token()
if not tok:
break
cadena+="\n"
cadena+=str(tok)
cadena+="\n"
return cadena
| keanceva/ProyectoLP | lexicoLP.py | lexicoLP.py | py | 6,448 | python | en | code | 0 | github-code | 36 |
40109668297 | import tkinter as tk
import tkFont
from tkinter import font
def list_fonts():
font.families()
for f in list(font.families()):
print("Font: ", f)
root = tk.Tk()
btn = tk.Button(root, text="List families", command=list_fonts)
btn.grid(row=0, column=0)
root.mainloop()
| ekim197711/python-tkinter | print_fonts.py | print_fonts.py | py | 299 | python | en | code | 0 | github-code | 36 |
34547409945 | def new_filter(lines, index, inverted):
c = 0
c_1 = 0
for i in lines:
c += 1
if i[index] == "1":
c_1 += 1
if c_1 + c_1 >= c:
print("get 1")
if inverted:
f_v = "0"
else:
f_v = "1"
else:
if inverted:
f_v = "1"
else:
f_v = "0"
n_l = []
for i in lines:
if i[index] == f_v:
n_l.append(i)
return n_l
if __name__ == '__main__':
l = []
filename = "input.txt"
lines = []
for line in open(filename, "r").readlines():
line = line[:-1]
lines.append(line)
a_l = [i for i in lines]
b_l = [i for i in lines]
i = 0
while True:
a_l = new_filter(a_l, i, False)
i += 1
if len(a_l) == 1:
break
i = 0
while True:
b_l = new_filter(b_l, i, True)
i += 1
if len(b_l) == 1:
break
print(int(a_l[0], 2) * int(b_l[0], 2)) | marin-jovanovic/advent-of-code | 2021/03/part_two.py | part_two.py | py | 1,005 | python | en | code | 0 | github-code | 36 |
29073464319 | import csv
import datetime
import pathlib
from typing import Generator
import click
from case_rate._types import Cases, CaseTesting, PathLike
from case_rate.sources._utilities import download_file
from case_rate.storage import InputSource
def _to_date(date: str) -> datetime.date:
'''Converts a date string into a date object.
Parameters
----------
date : str
input date string of the form "DD-MM-YYYY"
Returns
-------
datetime.date
output ``date`` object
'''
dt = datetime.datetime.strptime(date, '%d-%m-%Y')
return dt.date()
def _to_int(number: str) -> int:
'''Converts a numerical string into an integer.
This performs an extra check to see if the input string is ``''``. This is
then treated as a zero. Anything else will result in a ``ValueError``.
Parameters
----------
number : str
input string as a number
Returns
-------
int
the string's integer value
Throws
------
:exc:`ValueError`
if the string is not actually a number
'''
if len(number) == 0:
return 0
if number == 'N/A':
return 0
try:
count = int(number)
except ValueError:
count = int(float(number))
return count
class PublicHealthAgencyCanadaSource(InputSource):
'''Uses reporting data published by the PHAC.
This input source uses a CSV file that's regularly updated by the Public
Health Agency of Canada (PHAC). The default source is
https://health-infobase.canada.ca/src/data/covidLive/covid19.csv. The
data source will link back to the original PHAC site rather than to the
file.
'''
def __init__(self, path: PathLike, url: str, info: str,
update: bool = True):
'''
Parameters
----------
path : path-like object
the path (on disk) where the CSV file is located
url : str
the URL to the Government of Canada's COVID-19 report
info : str optional
the URL to the main information path (not the CSV file)
update : bool, optional
if ``True`` then updates an existing CSV file to the latest version
'''
path = pathlib.Path(path) / 'covid19.csv'
if path.exists():
if update:
click.echo('Updating PHAC COVID-19 report.')
download_file(url, path)
else:
click.echo('Accessing PHAC COVID-19 report.')
download_file(url, path)
self._info = info
self._path = path
@classmethod
def name(cls) -> str:
return 'public-health-agency-canada'
@classmethod
def details(cls) -> str:
return 'Public Health Agency of Canada - Current Situation'
def url(self) -> str:
return self._info
def cases(self) -> Generator[Cases, None, None]:
with self._path.open() as f:
contents = csv.DictReader(f)
for entry in contents:
if entry['prname'] == 'Canada':
continue
# NOTE: PHAC doesn't report resolved cases as of 2022-08-26
yield Cases(
date=_to_date(entry['date']),
province=entry['prname'],
country='Canada',
confirmed=_to_int(entry['totalcases']),
resolved=-1,
deceased=_to_int(entry['numdeaths'])
)
def testing(self) -> Generator[CaseTesting, None, None]:
with self._path.open() as f:
contents = csv.DictReader(f)
for entry in contents:
if entry['prname'] == 'Canada':
continue
# NOTE: PHAC doesn't report testing counts as of 2022-08-26
yield CaseTesting(
date=_to_date(entry['date']),
province=entry['prname'],
country='Canada',
tested=-1,
under_investigation=-1
)
| richengguy/case-rate | src/case_rate/sources/public_health_agency_canada.py | public_health_agency_canada.py | py | 4,097 | python | en | code | 0 | github-code | 36 |
38066575543 | # import the argmax function from numpy to get the index of the maximum value in an array
from numpy import argmax
# import the mnist dataset from keras, which contains 60,000 images of handwritten digits for training and 10,000 images for testing
from keras.datasets import mnist
# import the to_categorical function from keras to convert integer labels to one-hot encoded vectors
from keras.utils import to_categorical
# import the load_img function from keras to load an image from a file
from keras.utils import load_img
# import the img_to_array function from keras to convert an image to a numpy array
from keras.utils import img_to_array
# import the load_model function from keras to load a saved model from a file
from keras.models import load_model
# import the Sequential class from keras to create a linear stack of layers for the model
from keras.models import Sequential
# import the Conv2D class from keras to create a convolutional layer that applies filters to the input image and produces feature maps
from keras.layers import Conv2D
# import the MaxPooling2D class from keras to create a pooling layer that reduces the size of the feature maps by taking the maximum value in each region
from keras.layers import MaxPooling2D
# import the Dense class from keras to create a fully connected layer that performs a linear transformation on the input vector and applies an activation function
from keras.layers import Dense
# import the Flatten class from keras to create a layer that flattens the input tensor into a one-dimensional vector
from keras.layers import Flatten
# import the SGD class from keras to create a stochastic gradient descent optimizer with a learning rate and a momentum parameter
from keras.optimizers import SGD
# import matplotlib.pyplot as plt to plot and show images using matplotlib library
import matplotlib.pyplot as plt
# import os.path to check if a file exists in the current directory
import os.path
# import sys to exit the program if an invalid input is given by the user
import sys
from sklearn.model_selection import KFold
# define the model file name as a global variable
model_file_name = 'mnist_cnn_test_1.h5'
# define a function to load and prepare the train and test dataset
def load_dataset():
# load the mnist dataset using the load_data function from keras and assign the train and test data to four variables: trainX, trainY, testX, testY
(trainX, trainY), (testX, testY) = mnist.load_data()
# reshape the train and test images to have a single channel (grayscale) by adding a dimension of size 1 at the end of each array using the reshape method
trainX = trainX.reshape((trainX.shape[0], 28, 28, 1))
testX = testX.reshape((testX.shape[0], 28, 28, 1))
# one hot encode the train and test labels using the to_categorical function from keras
trainY = to_categorical(trainY)
testY = to_categorical(testY)
# return the four variables as output of the function
return trainX, trainY, testX, testY
# define a function to scale the pixel values of the train and test images
def prep_pixels(train, test):
# convert the train and test images from integers to floats using the astype method
train_norm = train.astype('float32')
test_norm = test.astype('float32')
# normalize the pixel values to range 0-1 by dividing them by 255.0
train_norm = train_norm / 255.0
test_norm = test_norm / 255.0
# return the normalized images as output of the function
return train_norm, test_norm
# define a function to create and compile a CNN model
def define_model():
# create an instance of the Sequential class and assign it to a variable named model
model = Sequential()
# add a convolutional layer with 32 filters of size 3x3, relu activation function, he_uniform weight initialization and input shape of 28x28x1 using the add method and passing an instance of the Conv2D class as argument
model.add(Conv2D(32, (3, 3), activation='relu',
kernel_initializer='he_uniform', input_shape=(28, 28, 1)))
# add a max pooling layer with pool size of 2x2 using the add method and passing an instance of the MaxPooling2D class as argument
model.add(MaxPooling2D((2, 2)))
# add a convolutional layer with 64 filters of size 3x3, relu activation function and he_uniform weight initialization using the add method and passing an instance of the Conv2D class as argument
model.add(Conv2D(64, (3, 3), activation='relu',
kernel_initializer='he_uniform'))
# add another convolutional layer with 64 filters of size 3x3, relu activation function and he_uniform weight initialization using the add method and passing an instance of the Conv2D class as argument
model.add(Conv2D(64, (3, 3), activation='relu',
kernel_initializer='he_uniform'))
# add another max pooling layer with pool size of 2x2 using the add method and passing an instance of the MaxPooling2D class as argument
model.add(MaxPooling2D((2, 2)))
# add a flatten layer to convert the output of the previous layer into a one-dimensional vector using the add method and passing an instance of the Flatten class as argument
model.add(Flatten())
# add a dense layer with 100 units, relu activation function and he_uniform weight initialization using the add method and passing an instance of the Dense class as argument
model.add(Dense(100, activation='relu', kernel_initializer='he_uniform'))
# add another dense layer with 10 units (corresponding to the 10 classes of digits) and softmax activation function to output a probability distribution over the classes using the add method and passing an instance of the Dense class as argument
model.add(Dense(10, activation='softmax'))
# compile the model by specifying the optimizer, loss function and metrics using the compile method
# create an instance of the SGD class with a learning rate of 0.01 and a momentum of 0.9 and assign it to a variable named opt
opt = SGD(learning_rate=0.01, momentum=0.9)
# use the opt variable as the optimizer argument, use categorical_crossentropy as the loss function for multi-class classification and use accuracy as the metric to evaluate the model performance
model.compile(optimizer=opt, loss='categorical_crossentropy',
metrics=['accuracy'])
# return the model as output of the function
return model
# define a function to run the test harness for evaluating a model
def run_test_harness():
# load and prepare the train and test dataset using the load_dataset function and assign them to four variables: trainX, trainY, testX, testY
trainX, trainY, testX, testY = load_dataset()
# scale the pixel values of the train and test images using the prep_pixels function and assign them to two variables: trainX, testX
trainX, testX = prep_pixels(trainX, testX)
# create and compile a cnn model using the define_model function and assign it to a variable named model
model = define_model()
# fit the model on the train dataset using the fit method with 10 epochs (number of iterations over the entire dataset), batch size of 32 (number of samples per gradient update) and verbose set to 1 (progress messages or 0 for not)
model.fit(trainX, trainY, epochs=10, batch_size=32, verbose=1)
# evaluate model
_, acc = model.evaluate(testX, testY, verbose=1)
print('evaluate result > %.3f' % (acc * 100.0))
# save the model to a file using the save method and passing the model file name as argument
model.save(model_file_name)
# define a function to load and prepare an image for prediction
def load_image(filename):
# load an image from a file using the load_img function from keras with grayscale set to True (convert to grayscale) and target_size set to (28, 28) (resize to match the input shape of the model) and assign it to a variable named img
img = load_img(filename, grayscale=True, target_size=(28, 28))
# convert the image to a numpy array using the img_to_array function from keras and assign it to a variable named img
img = img_to_array(img)
# reshape the image array to have a single sample with one channel by adding a dimension of size 1 at the beginning and at the end of the array using the reshape method and assign it to a variable named img
img = img.reshape(1, 28, 28, 1)
# the astype method and normalizing the pixel values to range 0-1 by dividing them by 255.0
img = img.astype('float32')
img = img / 255.0
# return the image array as output of the function
return img
# define a function to load an image and predict the class using the model
def run_example(path):
# load and prepare the image using the load_image function and passing the path argument as filename and assign it to a variable named img
img = load_image(path)
# load the model from a file using the load_model function and passing the model file name as argument and assign it to a variable named model
model = load_model(model_file_name)
# predict the class of the image using the predict method of the model and passing the img variable as argument and assign it to a variable named predict_value
predict_value = model.predict(img)
# get the index of the maximum value in the predict_value array using the argmax function from numpy and assign it to a variable named digit
digit = argmax(predict_value)
# print the digit variable to show the predicted label
print(digit)
# plot and show the image using matplotlib.pyplot library
# use the imshow function to display the image array (the first element of the img variable) with a grayscale colormap
plt.imshow(img[0], cmap='gray')
# use the title function to set a title for the image with 'Predicted label: ' followed by the digit variable
plt.title('Predicted label: ' + str(digit))
# use the show function to display the figure
plt.show()
# -----------------------------------------------
# -----------------------------------------------
# -----------------------------------------------
# ------------------ENTRY POINT------------------
# -----------------------------------------------
# -----------------------------------------------
# -----------------------------------------------
# ask the user if they want to re-train the data or use an existing model file using the input function and assign it to a variable named re_train
re_train = input('re train data and evaluate model? (0 -> false | 1 -> true): ')
# end program if input condition not satisfied by printing a message and using sys.exit function
if re_train != "0" and re_train != "1" and re_train != "":
print("input condition not satisfied")
sys.exit()
# check if a model file exists in the current directory using os.path.isfile function and if re_train is 0 or nothing using logical operators
if os.path.isfile(model_file_name) and (re_train == "0" or re_train == ""):
# load model from file using load_model function and assign it to a variable named model
model = load_model(model_file_name)
else:
# run test harness to train and save a new model using run_test_harness function
run_test_harness()
# run example to load an image and predict its class using run_example function with p as path argument
p = 'test_0_1.png'
run_example(path=p) | mohammadnr2817/digit_classifier | digit_classifier.py | digit_classifier.py | py | 11,367 | python | en | code | 0 | github-code | 36 |
72092398505 | day1 = ("monday", "tuesday", "wednesday") # 변수 day1에 문자열이 요소인 튜플 만들어 대입
day2 = ("thursday", "friday", "saturday") # 변수 day2에 문자열이 요소인 튜플 만들어 대입
day3 = ("sunday", ) # 변수 day3에 문자열이 요소인 튜플 만들어 대입, 요소가 1개인 튜플은 만들때 만드시 요소 뒤에 ,콤마를 써야한다.
day = day1 + day2 + day3 # 변수 day에 튜플 day1, day2, day3를 튜플 연결 연산자 +를 이용하여 새로 만들어진 튜플 대입
print(type(day)) # 표준 출력 함수 print() 호출하고 type() 함수 호출하여 튜플 day의 자료형 출력
print(day) # 표준 출력 함수 pritn() 호출하여 튜플 day 출력
day = day1 + day2 + day3 * 3 # 변수 day에 튜플 day1, day2, day3를 튜플 연결 연산자와 반복 연산자 + *를 이용하여 새로 만들어진 튜플 대입
print(day) # 표준 출력 함수 print() 호출하여 튜플 day 출력
| jectgenius/python | ch05/05-13daytuple.py | 05-13daytuple.py | py | 966 | python | ko | code | 0 | github-code | 36 |
42153809968 | # 1012 유기농배추
import sys
sys.setrecursionlimit(10**6)
case = int(input())
moves = [[0, 1], [0, -1], [-1, 0], [1, 0]]
def dfs(graph, x, y):
graph[y][x] = 2
for move in moves:
nx = x+move[0]
ny = y+move[1]
if 0 <= nx < len(graph[0]) and 0 <= ny < len(graph):
if graph[ny][nx] == 1:
dfs(graph, nx, ny)
for _ in range(case):
cnt = 0
m, n, cabbages = map(int, input().split())
graph = [[0 for _ in range(m)] for _ in range(n)]
for _ in range(cabbages):
x, y = map(int, input().split())
graph[y][x] = 1
for x in range(m):
for y in range(n):
if graph[y][x] == 1:
dfs(graph, x, y)
cnt += 1
print(cnt)
| FeelingXD/algorithm | beakjoon/1012.py | 1012.py | py | 763 | python | en | code | 2 | github-code | 36 |
27142738401 | from django.shortcuts import render, get_object_or_404
from .models import Animal
def index(request):
animais = Animal.objects.all()
return render(request, 'clientes/index.html', {
'animais': animais
})
def ver_animal(request, animal_id):
animal = get_object_or_404(Animal, id=animal_id)
return render(request, 'clientes/ver_cliente.html', {
'animal': animal
})
| LorenzoBorges/Projeto-Veterinario | clientes/views.py | views.py | py | 406 | python | en | code | 0 | github-code | 36 |
74117892903 | # Números primos: Escreva um programa que determine se um número é primo ou não.
num = int(input("Digite um número para verificar se é primo ou não: "))
if num < 2:
print(f"{num} não é primo")
for i in range(2, num):
if num % i == 0:
print(f"{num} não é primo")
break
else:
print(f"{num} é primo")
| kingprobr/Python-Exercises | PrimeNumber.py | PrimeNumber.py | py | 341 | python | pt | code | 0 | github-code | 36 |
11539702751 | import os
import pygame
import pygame.color
from views.panelview import PanelView
class MenuView(PanelView):
def __init__(self, config, bus):
PanelView.__init__(self, config, bus)
self.fntRegText = pygame.font.Font(os.path.join(self.config.script_directory, "assets/Roboto-Regular.ttf"), 16)
dashboard_icon = pygame.image.load(os.path.join(self.config.script_directory, 'assets/icon-dashboard.png'))
graph_icon = pygame.image.load(os.path.join(self.config.script_directory, 'assets/icon-graph.png'))
control_icon = pygame.image.load(os.path.join(self.config.script_directory, 'assets/icon-control.png'))
setting_icon = pygame.image.load(os.path.join(self.config.script_directory, 'assets/icon-setting.png'))
self.menu_items = [{"text": "Dashboard", "icon": dashboard_icon, "name": "dashboard"},
{"text": "Temperature Graph", "icon": graph_icon, "name": "graph"},
{"text": "Control", "icon": control_icon, "name": "control"},
{"text": "Settings", "icon": setting_icon, "name": "settings"}]
self.items_per_page = 4
self.page = 0
self.background_color = pygame.color.Color("#EF3220")
self.divider_color = pygame.color.Color("#CC302B")
def handle_event(self, event):
PanelView.handle_event(self, event)
if event.type == pygame.MOUSEBUTTONUP:
if 40 <= event.pos[1] < 200:
item_pos = ((event.pos[1] - 40) / 40) + (self.page * self.items_per_page)
if len(self.menu_items) > item_pos:
self.bus.publish("viewchange", self.menu_items[item_pos]["name"])
def draw(self, screen):
PanelView.draw(self, screen)
s = pygame.Surface((320, 200))
s.fill(self.background_color)
screen.blit(s, (0, 0))
pygame.draw.line(screen, self.divider_color, (0, 40), (320, 40))
for index, item in enumerate(self.menu_items[self.page * self.items_per_page: self.page + 1 * self.items_per_page]):
file_name_lbl = self.fntRegText.render(item["text"], 1, (255, 255, 255))
ypos = 40 + (index * 40)
screen.blit(file_name_lbl, (40, ypos + 12))
pygame.draw.line(screen, self.divider_color, (0, ypos + 40), (320, ypos + 40))
screen.blit(item["icon"], (0, ypos)) | mcecchi/OctoPiControlPanel | views/menuview.py | menuview.py | py | 2,401 | python | en | code | 1 | github-code | 36 |
27182055536 | # Viết chương trình in bảng cửu chương từ 2 đến n (Xuất ra theo cột)
while True:
n=int(input("Nhập số nguyên n: "))
if n <= 2:
print("Nhập số nguyên n > 2 nha, please")
continue
break
for i in range(1,10):
for j in range(2, n+1):
print("{}x{}={}".format(i, j, i *j), end='\t')
print("\n") | hanhkim/py_fundamental | Tuan3_300923/bai5.py | bai5.py | py | 363 | python | vi | code | 0 | github-code | 36 |
17210427292 | #!/usr/bin/env python3
import rospy
import numpy as np
from nav_msgs.msg import Odometry
from rosflight_msgs.msg import Command
from diff_flatness import diff_flatness
from traj_planner import trajectory_planner
from controller import controller
import yaml
# import matplotlib.pyplot as plt
class simTester:
def __init__(self):
self.pub = rospy.Publisher('/command',Command, queue_size=10, latch=True)
self.command_msg = Command()
self.command_msg.mode = self.command_msg.MODE_ROLL_PITCH_YAWRATE_THROTTLE
self.count = True # used for initiating time
self.past_start = True # used for initiating time for starting trajectory
rospy.Subscriber('/odom',Odometry,self.callback)
# with open('/home/matiss/demo_ws/src/demo/scripts/demo.yaml','r') as f:
# param = yaml.safe_load(f)
param = rospy.get_param('~')
self.mass = param['dynamics']['mass']
self.g = param['dynamics']['g']
self.controller = controller(param) # initiate controller
# calculate the equilibrium force
self.force_adjust = param['dynamics']['mass']*param['dynamics']['g']/param['controller']['equilibrium_throttle']
# plotting variables
# self.time = []
# self.pos_x_des = []
# self.pos_x_actual = []
# self.pos_y_des = []
# self.pos_y_actual = []
# self.pos_z_des = []
# self.pos_z_actual = []
def callback(self,msg):
# for some reason time did not initiate correctly if done
# in the __init__ function
if self.count:
self.start_time = rospy.Time.now()
self.count = False
time = rospy.Time.now()
time_from_start = time.to_sec() - self.start_time.to_sec()
# First 8 seconds stand still
if (time_from_start <= 8.0) and self.past_start:
self.command_msg.x = 0.0
self.command_msg.y = 0.0
self.command_msg.z = 0.0
self.command_msg.F = 0.2
else:
if self.past_start:
self.start_time = rospy.Time.now()
time_from_start = time.to_sec() - self.start_time.to_sec()
self.past_start = False
pose = trajectory_planner(time_from_start)
pos = msg.pose.pose.position
# The mekf gives unusual odometry message, the coordinates are different than NED
pos = np.array([-1.*pos.y,-1.*pos.z,pos.x])
attitude = msg.pose.pose.orientation
vel = msg.twist.twist.linear
vel = np.array([-1.*vel.y,-1.*vel.z,vel.x])
ang_curr = self.euler(attitude)
# plotting variables used for testing in simulation
# self.time.append(time_from_start)
# self.pos_x_actual.append(pos[0])
# self.pos_x_des.append(pose[0][0])
# self.pos_y_actual.append(pos[1])
# self.pos_y_des.append(pose[0][1])
# self.pos_z_actual.append(pos[2])
# self.pos_z_des.append(pose[0][2])
reference = np.array([pose[0],pose[2],pose[4],pose[1]])
state = np.array([pos,vel,ang_curr[2]])
control_inputs = self.controller.update(reference,state)
accel_input = np.array([control_inputs[0],pose[3]+control_inputs[1]])
states = diff_flatness(pose, accel_input, ang_curr,mass=self.mass,g=self.g)
# R = states[1]
# angle = self.angles(R)
force = states[2] / self.force_adjust
force = self.controller.saturate(force)
roll = states[0]
pitch = states[1]
yaw_rate = accel_input[1]
self.command_msg.x = roll
self.command_msg.y = pitch
self.command_msg.z = 0.0 # yaw_rate if variable desired yaw
self.command_msg.F = force
# Ignores x,y,z for testing purposes (delete afterwards)
self.command_msg.ignore = Command.IGNORE_X | Command.IGNORE_Y | Command.IGNORE_Z
self.pub.publish(self.command_msg)
def update(self):
rospy.spin()
def angles(self,R): # ends up unused for now
yaw = np.arctan2(-R[0][1],R[1][1])
pitch = np.arctan2(-R[2][0],R[2][2])
roll = np.arctan2(R[2][1]*np.cos(pitch),R[2][2])
return np.array([roll,pitch,yaw])
def euler(self, quat):
w = quat.w
x = -1.*quat.y
y = -1.*quat.z
z = quat.x
roll = np.arctan2(2.0 * (w * x + y * z), 1. - 2. * (x * x + y * y))
pitch = np.arcsin(2.0 * (w * y - z * x))
yaw = np.arctan2(2.0 * (w * z + x * y), 1. - 2. * (y * y + z * z))
return np.array([roll,pitch,yaw])
if __name__ == '__main__':
rospy.init_node('sim_tester', anonymous=True)
sim_tester = simTester()
while not rospy.is_shutdown():
try:
sim_tester.update()
except rospy.ROSInterruptException:
print("exiting....")
# Plot the states over time and relative to each other
# plt.figure(1)
# plt.subplot(311)
# plt.plot(sim_tester.time,sim_tester.pos_x_actual)
# plt.plot(sim_tester.time,sim_tester.pos_x_des)
# plt.subplot(312)
# plt.plot(sim_tester.time,sim_tester.pos_y_actual)
# plt.plot(sim_tester.time,sim_tester.pos_y_des)
# plt.subplot(313)
# plt.plot(sim_tester.time,sim_tester.pos_z_actual)
# plt.plot(sim_tester.time,sim_tester.pos_z_des)
# plt.figure(2)
# plt.subplot(311)
# plt.plot(sim_tester.pos_x_actual,sim_tester.pos_y_actual)
# plt.plot(sim_tester.pos_x_des,sim_tester.pos_y_des)
# plt.subplot(312)
# plt.plot(sim_tester.pos_x_actual,sim_tester.pos_z_actual)
# plt.plot(sim_tester.pos_x_des,sim_tester.pos_z_des)
# plt.subplot(313)
# plt.plot(sim_tester.pos_y_actual,sim_tester.pos_z_actual)
# plt.plot(sim_tester.pos_y_des,sim_tester.pos_z_des)
# plt.show() | malioni/demo | scripts/sim_tester.py | sim_tester.py | py | 5,990 | python | en | code | 0 | github-code | 36 |
70887541865 | #!/usr/bin/env python
# coding: utf-8
# Leet Code problem: 206
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
# Iteratively
# class Solution:
# def reverseList(self, head: Optional[ListNode]) -> Optional[ListNode]:
# prev = None
# curr = head
# while curr:
# next_p = curr.next
# curr.next = prev
# prev = curr
# curr = next_p
# return prev
# Recursively
# class Solution:
# def reverseList(self, head: Optional[ListNode]) -> Optional[ListNode]:
# if head == None or head.next == None:
# return head
# new_head = self.reverseList(head.next)
# head.next.next = head
# head.next = None
# return new_head
# Recursively 2
class Solution:
def reverseList(self, head: Optional[ListNode], prev=None) -> Optional[ListNode]:
if head == None: return prev
next_head = head.next
head.next = prev
return self.reverseList(next_head, head)
| jwilliamn/trenirovka-code | leetc_206.py | leetc_206.py | py | 1,138 | python | en | code | 0 | github-code | 36 |
2050897159 | from openpyxl import load_workbook, Workbook
from django.core.management import BaseCommand
from django.db.utils import IntegrityError
from nomenclature.models import *
SERVICE_TYPES = [
'Not defined',
'ПРОФ',
'Лабораторное исследование',
'Коммерческий профиль',
'Услуга'
]
class Command(BaseCommand):
def handle(self, *args, **kwargs):
ServiceType.objects.all().delete()
for type in SERVICE_TYPES:
new_type = ServiceType(name=type)
new_type.save()
Group.objects.all().delete()
SubGroup.objects.all().delete()
wb = load_workbook('nomenclature/data/Список групп и подгрупп.xlsx', read_only=True)
first_sheet = wb.worksheets[0]
groups = {}
for row in first_sheet.rows:
if str(row[0].value) not in groups.keys():
groups[str(row[0].value)] = {'name':str(row[1].value), 'subgroups':[]}
groups[str(row[0].value)]['subgroups'].append({'number':str(row[2].value), 'name':str(row[3].value)})
continue
groups[str(row[0].value)]['subgroups'].append({'number':str(row[2].value), 'name':str(row[3].value)})
for group in groups.keys():
new_group = Group(number=group, name=groups[group]['name'])
new_group.save()
for sg in groups[group]['subgroups']:
new_sg = SubGroup(number=sg['number'], name=sg['name'], group=new_group)
new_sg.save()
new_group = Group(number='99', name='не определена!')
new_group.save()
new_sg = SubGroup(number='99', name='не определена!', group=new_group)
new_sg.save()
Service.objects.all().delete()
wb = load_workbook('nomenclature/data/nomenclature.xlsx', read_only=True)
first_sheet = wb.worksheets[0]
for row in first_sheet:
if row[0].value is not None:
try:
gr = Group.objects.get(number=str(row[0].value))
except:
print(f'группа {row[0].value} не найдена, для теста {row[7].value}')
else:
try:
gr = Group.objects.get(number='99')
except:
print(f'группа 99 не найдена, для теста {row[7].value}')
if row[1].value is not None:
try:
sg = SubGroup.objects.get(number=str(row[1].value), group=gr)
except:
print(f'подгруппа {row[1].value} не найдена, для теста {row[7].value}')
else:
try:
sg = SubGroup.objects.get(number='99')
except:
print(f'подгруппа 99 не найдена, для теста {row[7].value}')
if row[3].value is not None:
type = ServiceType.objects.get(name=row[3].value)
else:
type = ServiceType.objects.get(name='Not defined')
if type.name == 'Коммерческий профиль':
new_record = Profile()
else:
new_record = Service()
try:
new_record.subgroup=sg
new_record.salesability=True
new_record.clients_group=row[2].value
new_record.type=type
new_record.classifier_1с=row[4].value
new_record.tcle_code=row[5].value
new_record.tcle_abbreviation=row[6].value
new_record.code=row[7].value
new_record.name=row[8].value
new_record.blanks=row[9].value
new_record.biomaterials=row[10].value
new_record.container=row[11].value
new_record.result_type=row[12].value
new_record.due_date=row[13].value
new_record.save()
except IntegrityError:
print(f'код {row[7].value} - не уникальный, второй раз не добавлен')
profiles = Profile.objects.all()
for profile in profiles:
profile.services.clear()
wb = load_workbook('nomenclature/data/profile.xlsx', read_only=True)
first_sheet = wb.worksheets[0]
for row in first_sheet.rows:
try:
profile = Profile.objects.get(code=row[0].value)
except:
print(f'Профиля {row[0].value} нет в номенклатуре')
try:
service = Service.objects.get(code=row[1].value)
except:
print(f'Услуги {row[1].value} нет в номенклатуре')
continue
profile.services.add(service)
wb = load_workbook('nomenclature/data/test_set.xlsx', read_only=True)
first_sheet = wb.worksheets[0]
for row in first_sheet.rows:
test_set = TestSet(key_code=row[1].value, name=row[2].value, department=row[3].value, addendum_key=row[4].value)
test_set.save()
try:
service = Service.objects.get(code=row[0].value)
except:
print(f'Услуги {row[0].value} - нет в номенклатуре')
service.test_set=test_set
service.save()
wb = load_workbook('nomenclature/data/test.xlsx', read_only=True)
first_sheet = wb.worksheets[0]
for num, row in enumerate(first_sheet.rows):
check_test = Test.objects.filter(keycode=row[0].value)
if not check_test:
test = Test(
keycode=row[0].value,
name=row[1].value,
short_name=row[1].value[:50],
result_type=row[4].value,
decimal_places=5,
kdl_test_key=row[2].value,
measure_unit=row[3].value,
)
test.save()
if row[10].value is not None:
test = Test.objects.get(keycode=row[0].value)
new_reference = Reference(
test=test,
position=int(row[10].value[:-4])
)
if row[5].value is None:
new_reference.sex = 'Любой'
if row[6].value is not None:
if '.' in row[6].value:
age = row[6].value.split('.')
yy = '00' if not age[0] else age[0]
mm = age[1][:2]
dd = age[1][2:]
age_from = f'{yy}:{mm}:{dd}'
new_reference.age_from = age_from
else:
new_reference.age_from = f'{row[6].value}:00:00'
if row[7].value is not None:
if '.' in row[7].value:
age = row[7].value.split('.')
yy = '00' if not age[0] else age[0]
mm = age[1][:2]
dd = age[1][2:]
age_to = f'{yy}:{mm}:{dd}'
new_reference.age_to = age_to
else:
new_reference.age_to = f'{row[7].value}:00:00'
if row[8].value is not None:
new_reference.lower_normal_value = row[8].value
if row[9].value is not None:
new_reference.upper_normal_value = row[9].value
if row[13].value is not None:
new_reference.normal_text = row[13].value
if row[11].value is not None:
new_reference.normal_text = row[13].value
if row[11].value is not None:
new_reference.clinic_interpretation_key = row[11].value
if row[12].value is not None:
new_reference.clinic_interpretation_text = row[12].value
new_reference.save()
wb = load_workbook('nomenclature/data/med.xlsx', read_only=True)
first_sheet = wb.worksheets[0]
for row in first_sheet.rows:
try:
service = Service.objects.get(code=row[0].value)
except:
print(f'Услуги {row[0].value} - нет в номенклатуре')
continue
new_record = MadicineData(
service=service,
alter_name_KC=row[1].value,
alter_name=row[2].value,
note=row[3].value,
volume_pp=row[4].value,
container_pp=row[5].value,
guide_pp=row[6].value,
transport_conditions=row[7].value,
term_assign=row[8].value,
description=row[9].value,
method=row[10].value,
factors=row[11].value,
preparation=row[12].value,
)
new_record.save()
| Sin93/lab | nomenclature/management/commands/import.py | import.py | py | 9,200 | python | en | code | 0 | github-code | 36 |
14299284987 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/1/9 17:59
# @Author : lingxiangxiang
# @File : demonpyexcele.py
import pyExcelerator
#创建workbook和sheet对象
wb = pyExcelerator.Workbook()
ws = wb.add_sheet(u'第一页')
#设置样式
myfont = pyExcelerator.Font()
myfont.name = u'Times New Roman'
myfont.bold = True
mystyle = pyExcelerator.XFStyle()
mystyle.font = myfont
#写入数据,使用样式
ws.write(0, 0, u'hello lingxiangxinag!', mystyle)
#保存该excel文件,有同名文件时直接覆盖
wb.save('mini.xls')
print('创建excel文件完成!')
import pyExcelerator
#parse_xls返回一个列表,每项都是一个sheet页的数据。
#每项是一个二元组(表名,单元格数据)。其中单元格数据为一个字典,键值就是单元格的索引(i,j)。如果某个单元格无数据,那么就不存在这个值
sheets = pyExcelerator.parse_xls('mini.xls')
print(sheets)
| ajing2/python3 | tmptestdemon/dataprocess/demonpyexcele.py | demonpyexcele.py | py | 930 | python | zh | code | 2 | github-code | 36 |
34408041143 | class Solution:
def maxProfit(self, prices: list[int]) -> int:
left, right = 0, 1
max_profit = 0
while right < len(prices):
if prices[left] < prices[right]:
# Calculate profit
profit = prices[right] - prices[left]
max_profit = max(max_profit, profit)
else:
# Update left as the right is smaller now
left = right
right += 1
return max_profit
| anuragMaravi/LeetCode-Solutions | python3/121. Best Time to Buy and Sell Stock.py | 121. Best Time to Buy and Sell Stock.py | py | 494 | python | en | code | 0 | github-code | 36 |
25874083390 | #! python3
# scraper for dark souls armor
import requests
import re
import sqlite3
from bs4 import BeautifulSoup
import time
# connecting to actual database
conn = sqlite3.connect("./databases/armor.db")
# testing connection
# conn = sqlite3.connect(":memory:")
c = conn.cursor()
with conn:
c.execute("""CREATE TABLE IF NOT EXISTS armor(
slot TEXT,
name TEXT,
durability REAL,
weight REAL,
physical REAL,
strike REAL,
slash REAL,
thrust REAL,
magic REAL,
fire REAL,
lightning REAL,
poise REAL,
bleed REAL,
poison REAL,
curse REAL)""")
# create tuple of item types which will be added to table
# tables on website contain no slot info but are always
# in this order so we map these to the items
item_slots = ("helmet", "chest", "gauntlets", "boots")
r = requests.get("https://darksouls.wiki.fextralife.com/Armor").text
soup = BeautifulSoup(r, "lxml")
delay = 0
for a in soup.find_all('a', class_ = "wiki_link wiki_tooltip", href=re.compile(r"\+Set")):
# adaptive delay based on website response time
time.sleep(delay)
start = time.time()
# website has both local and url reference links, this formats the request correctly
if ".com" in a["href"]:
page = requests.get(a["href"])
else:
page = requests.get(f"https://darksouls.wiki.fextralife.com{a['href']}")
end = time.time()
response_time = end - start
delay = response_time * 10
# if bad response from the link we skip attempting to process and move to next link
if not page.ok:
continue
# print(page.url)
info = BeautifulSoup(page.text, "lxml")
# attempts to find second table on page, which has relevant info
try:
table = info.find_all("table")[1]
except IndexError:
continue
# creates the iterator for pulling item type since info is not in table
slots = iter(item_slots)
# each row contains the name and stats of one armor item in the set
for row in table.tbody:
# list for containing scraped info to be stored in db
vals = []
# exception handling when trying to parse table data
try:
data = row.find_all("td")
except AttributeError as e:
pass
# print(e)
else:
# first row only has <th> tags, this skips it
if not data:
continue
# Names of items are contained in <a> tags which link to their pages
# This check skips the total row at bottom of table preventing StopIteration Exception
elif data[0].find('a') is not None:
# each page's table is in order of the slots iterator
vals.append(next(slots))
for line in data:
vals.append(line.text)
# print(line.text)
# once vals is populated we print the values and insert them into db
with conn:
print(f"Inserting {vals}")
c.execute(f"INSERT INTO armor VALUES ({', '.join('?' for i in range(15))})", tuple(vals))
# finally insertion is confirmed by printing all values from db
with conn:
for line in c.execute("SELECT * FROM armor"):
print(line)
| Bipolarprobe/armorcalc | armorscrape.py | armorscrape.py | py | 3,289 | python | en | code | 0 | github-code | 36 |
41774888432 | import time
import sys
sys.path.append("../")
from Utils_1 import Util
import pymysql
from lxml import etree
import requests
import http
from Utils_1.UA import User_Agent
import random
"""
数据来源:中华人民共和国商务部
来源地址:http://femhzs.mofcom.gov.cn/fecpmvc/pages/fem/CorpJWList_nav.pageNoLink.html?session=T&sp=1&sp=S+_t1.CORP_CDE%2C+_t1.id&sp=T&sp=S
数据描述:境外投资企业(机构)备案结果公开名录列表
目标表中文名:境外投资企业公开名录列表
目标表英文名:EXT_INV_ENTP_LST_INF
数据量:3 - 4 (万条)
作者:mcg
状态:完成
记录时间:2019.08.02
备注:对于cookie值,可以再优化。
"""
class FemhzsMofcomGov:
def __init__(self):
self.base_url = "http://femhzs.mofcom.gov.cn/fecpmvc/pages/fem/CorpJWList_nav.pageNoLink.html?" \
"session=T&sp={}&sp=S+_t1.CORP_CDE%2C+_t1.id&sp=T&sp=S"
self.headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,"
"application/signed-exchange;v=b3",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9",
"Connection": "keep-alive",
"Cookie": "JSESSIONID=ACBDC30A40FD783627A075ADB9440B4D; insert_cookie=56224592 ",
"Host": "femhzs.mofcom.gov.cn",
"Referer": "http://femhzs.mofcom.gov.cn/fecpmvc/pages/fem/CorpJWList.html",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/75.0.3770.100 Safari/537.36",
}
self.f_headers = {
"Host": "femhzs.mofcom.gov.cn",
"Connection": "keep-alive",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3",
"Referer": "http://www.mofcom.gov.cn/publicService.shtml",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9"
}
self.util = Util()
self.conn = self.util.MySQL()
self.page = 0
def insert2mysql(self, sql):
try:
self.conn.cursor().execute(sql)
self.conn.commit()
print("插入成功")
except pymysql.err.IntegrityError:
print("插入失败,数据重复")
self.conn.rollback()
except pymysql.err.ProgrammingError:
print("数据异常,已回滚")
self.conn.rollback()
def run(self):
first_req = requests.get(url="http://femhzs.mofcom.gov.cn/fecpmvc/pages/fem/CorpJWList.html",
headers=self.f_headers)
cookies = first_req.headers["Set-Cookie"].replace(" Path=/fecpmvc,", "").replace("; path=/", "")
try:
page = etree.HTML(first_req.text).xpath(
"//em[@class=\"m-page-total-num\"]/text()")[0]
except TimeoutError:
time.sleep(10)
page = etree.HTML(first_req.text).xpath(
"//em[@class=\"m-page-total-num\"]/text()")[0]
except http.client.RemoteDisconnected:
time.sleep(10)
self.headers["User-Agent"] = random.choice(User_Agent)
page = etree.HTML(first_req.text).xpath(
"//em[@class=\"m-page-total-num\"]/text()")[0]
print("共有:{} 页".format(page))
for i in range(1, int(page)):
print(i)
data = {
"session": "T",
"sp": i,
"sp": "S _t1.CORP_CDE, _t1.id",
"sp": "T",
"sp": "S",
}
self.headers["Cookie"] = cookies
url = self.base_url.format(i)
try:
res = requests.get(url=url, headers=self.headers, data=data, timeout=15)
except TimeoutError:
time.sleep(10)
res = requests.get(url=url, headers=self.headers, data=data, timeout=15)
time.sleep(2)
if res.status_code == 200:
print("请求成功,开始解析")
html = etree.HTML(res.text)
for tr in html.xpath("//table[@class=\"m-table\"]/tbody/tr"):
company_name = tr.xpath("./td[1]/text()")[0].strip()
investor_name = tr.xpath("./td[2]/text()")[0].strip()
country = tr.xpath("./td[3]/text()")[0].strip()
# 公司名称编码作为id
md5_company = self.util.MD5(company_name)
# 获取当前时间
otherStyleTime = self.util.get_now_time()
sql = "insert into EXT_INV_ENTP_LST_INF(ID, OVS_INV_ENTP_NM, OVS_INV_NM, INV_CNR, INPT_DT)values('%s','%s','%s','%s','%s')" % (md5_company, company_name, investor_name, country, otherStyleTime)
self.insert2mysql(sql)
else:
print("请求失败, HTTP Code:{}".format(res.status_code))
if __name__ == '__main__':
while True:
f = FemhzsMofcomGov()
f.run()
time.sleep(86400)
| 921016124/Spiders | module/对外投资/femhzs_mofcom_gov.py | femhzs_mofcom_gov.py | py | 5,506 | python | en | code | 0 | github-code | 36 |
74339876262 | #!/usr/bin/env python3
# pip install unittest-xml-reporting
# pip install coverage
# sudo pip install flake8
# pip install --upgrade --pre pybuilder
# sudo pyb install_dependencies publish
# See also as example : https://github.com/yadt/shtub/blob/master/build.py
# sudo apt-get install python-setuptools python-all debhelper dpkg-dev
# sudo -H pip install stdeb
# https://github.com/antevens/listen/blob/master/build.py
from pybuilder.core import Author, init, use_plugin, task
import os, sys
from subprocess import Popen, TimeoutExpired, STDOUT, CalledProcessError, check_output, PIPE
main_rel_version = "1"
name = "restsyscollector"
summary = "REST SYSTEM COLLECTOR"
description = "Rest System Collector for collection and offer system metrics"
authors = [Author("Kay Vogelgesang", "kay.vogelgesang@apachefriends.org")]
url = "http://www.onlinetech.de"
license = "GPL"
def getversion():
try:
bugfix_version = os.environ['BUILD_NUMBER']
release_version = main_rel_version + ".0." + str(bugfix_version)
except:
release_version = main_rel_version + ".0.0"
return release_version
@task
def upload_to_pypi_server():
timeout=10
pypi_server = "testserver01"
twine = "twine"
artifact = "target/dist/" + name + "-" + getversion() + "/dist/" + name + "-" + getversion()
cur_version = sys.version_info
python_short_version = str(cur_version[0]) + "." + str(cur_version[1])
print("[INFO] Use Python in version " + python_short_version + "for building artifacts")
try:
os.environ['BUILD_NUMBER']
jenkins_workspace = os.environ['WORKSPACE']
artifact = jenkins_workspace + "/" + artifact
#twine = jenkins_workspace + "/venv/bin/" + twine
except:
print("[INFO] Not a Jenkins build - do not upload artifacts to pypi-server " + pypi_server)
return
try:
result = check_output(twine + " --version", stderr=STDOUT, shell=True)
result = result.decode("utf-8")
print("[INFO] Find twine as -> " + str(result))
egg_artifact = artifact + "-py" + python_short_version + ".egg"
tar_artifact = artifact + ".tar.gz"
if not os.path.isfile(egg_artifact):
print("[WARN] Cannot find " + egg_artifact + " to upload")
else:
egg_artifact = artifact + "-py" + python_short_version + ".egg"
print("[INFO] Start upload " + egg_artifact + " with twine to -> " + pypi_server)
twine_command = twine + " upload -r " + pypi_server + " " + egg_artifact
print("[INFO] Start -> " + twine_command)
try:
proc = Popen(twine_command, shell=True, stdout=PIPE, stderr=STDOUT)
out, err = proc.communicate(timeout=timeout)
except TimeoutExpired:
proc.kill()
print("[ERROR] Cannot upload " + egg_artifact + " to pypi server " + pypi_server + ". Timeout after "
+ str(timeout) + " seconds ")
sys.exit(1)
except Exception as e:
print("[ERROR] Cannot upload " + egg_artifact + " to pypi server " + pypi_server + "\n" + str(e))
sys.exit(1)
print(str(out.decode("utf-8")))
if "HTTPError:" in out.decode("utf-8"):
print("[ERROR] Cannot upload " + egg_artifact + " to pypi server " + pypi_server + "\n")
sys.exit(1)
if not os.path.isfile(tar_artifact):
print("[WARN] Cannot find " + tar_artifact + ".to upload")
else:
print("[INFO] Start upload " + tar_artifact + " with twine to -> " + pypi_server)
twine_command = twine + " upload -r " + pypi_server + " " + tar_artifact
print("[INFO] Start -> " + twine_command)
try:
proc = Popen(twine_command, shell=True, stdout=PIPE, stderr=STDOUT)
out, err = proc.communicate(timeout=timeout)
except TimeoutExpired:
proc.kill()
print("[ERROR] Cannot upload " + tar_artifact + " to pypi server " + pypi_server + ". Timeout after "
+ str(timeout) + " seconds ")
sys.exit(1)
except Exception as e:
print("[ERROR] Cannot upload " + tar_artifact + " to pypi server " + pypi_server + "\n" + str(e))
sys.exit(1)
print(str(out.decode("utf-8")))
if "HTTPError:" in out.decode("utf-8"):
print("[ERROR] Cannot upload " + tar_artifact + " to pypi server " + pypi_server + "\n")
sys.exit(1)
except Exception as e:
print("[ERROR] Cannot find/execute twine -> " + twine + "\n" + str(e) + "\nPerhaps ~/.pypirc not exists?")
sys.exit(1)
use_plugin("filter_resources")
use_plugin("python.core")
use_plugin("python.unittest")
#use_plugin("python.pyfix_unittest")
use_plugin("python.install_dependencies")
use_plugin("python.pydev")
use_plugin("python.distutils")
use_plugin("copy_resources")
#use_plugin("source_distribution")
#use_plugin("python.flake8")
use_plugin("python.coverage")
#use_plugin('python.integrationtest')
#use_plugin("python.stdeb")
version = getversion()
# default_task = ["analyze", "publish", "package", "make_deb"]
default_task = ["analyze", "publish", "package"]
@init
def initialize(project):
project.build_depends_on('setuptools')
project.build_depends_on('unittest-xml-reporting')
project.build_depends_on('coverage')
project.build_depends_on('flake8')
project.build_depends_on('mock')
project.build_depends_on('unittest2')
project.build_depends_on('Flask')
project.depends_on('argparse')
project.depends_on('Flask')
project.set_property("coverage_threshold_warn", 85)
project.set_property("coverage_break_build", False)
#project.set_property("coverage_reset_modules", True)
#project.set_property('coverage_threshold_warn', 50)
#project.set_property('coverage_branch_threshold_warn', 50)
#project.set_property('coverage_branch_partial_threshold_warn', 50)
project.set_property("dir_dist_scripts", 'scripts')
project.set_property("copy_resources_target", "$dir_dist")
project.set_property('verbose', True)
project.set_property('flake8_verbose_output', True)
project.set_property('flake8_include_test_sources', True)
project.set_property('flake8_ignore', 'E501,E402,E731')
project.set_property('flake8_break_build', False)
project.set_property('deb_package_maintainer', 'Kay Vogelgesang <kay.vogelgesang@apachefriends.org>')
project.set_property('teamcity_output', False)
project.set_property("integrationtest_inherit_environment", True)
#project.get_property('filter_resources_glob', ['**/riainfocli/__init__.py'])
#project.include_file("riainfocli", "*.py")
#project.set_property('filter_resources_glob', ['**/zabbix_json_client.py'])
# project.depends_on('simplejson')
# project.get_property('copy_resources_glob').append('setup.cfg')
project.set_property("distutils_classifiers", [
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Development Status :: 2',
'Environment :: Console',
'Intended Audience :: Systems Administration',
'License :: OSI Approved :: GPL',
'Topic :: Software Development :: REST SYSTEM COLLECTOR'])
project.set_property('distutils_commands', ['bdist'])
project.set_property('distutils_commands', ['sdist'])
project.get_property('distutils_commands').append('bdist_egg')
| kvogelgesang/py-rest-sys-collect | build.py | build.py | py | 7,931 | python | en | code | 0 | github-code | 36 |
18798753930 |
class Song:
"""class to represent a song
attributes:
title (str): the title of the song
artist(str): name of the songs creator.
duration(int): the duration of the song in seconds. may be zerp
"""
def __init__(self, title, artist, duration = 0):
self.title = title
self.artist = artist
self.duration = duration
def get_title(self):
return self.title
name= property(get_title)
class Album:
"""class to represent songs albums
attributes:
name (str): the title of the song
year(int): an artist object represnting the songs creator.
track(list[Song]): a list of songs
method:
add_song:used to add a new song to the album's track list
"""
def __init__(self, name, year, artist=None):
self.name= name
self.year= year
if artist is None:
self.artist = "various artist"
else:
self.artist = artist
self.tracks=[]
def add_song(self, song, position= None):
"""add a song to the track
Args:
song (_type_): _description_
position (_type_, optional): _description_. Defaults to None.
"""
song_found = find_object(song, self.tracks)
if song_found is None:
song_found = Song(song, self.artist)
if position is None:
self.tracks.append(song_found)
else:
self.tracks.insert(position, song_found)
class Artist:
"""class to represent a artist
attributes:
title (str): the title of the song
artist(artist): an artist object represnting the songs creator.
duration(int): the duration of the song in seconds. may be zerp
"""
def __init__(self, name):
self.name = name
self.albums = []
def add_album(self, album):
"""add new album to list
Args:
album (album): album object to add in list
"""
self.albums.append(album)
def add_song(self, name, year, title):
"""add a new a song to the collection of albums
this method will add the song to an album in the collection
Args:
name (str): _description_
year (int): _description_
title (_str): _description_
"""
album_found= find_object(name, self.albums)
if album_found is None:
print(name + " not found")
album_found = Album(name, year, self.name)
self.add_album(album_found)
else:
print("foud album"+ name)
album_found.add_song(title)
def find_object(field, object_list):
"""check 'object list' to see if an object with a 'name' attritube equal to field exists, return it if so."""
for item in object_list:
if item.name == False:
return item
return None
def load_data():
artist_list=[]
with open("albums.txt","r") as album:
for line in album:
artist_field, album_field, year_field, song_field = tuple(line.strip('\n').split('\t'))
year_field = int(year_field)
print("{}:{}:{}:{}".format(artist_field, album_field, year_field, song_field))
new_artist= find_object(artist_field, artist_list)
if new_artist is None:
new_artist=Artist(artist_field)
artist_list.append(new_artist)
new_artist.add_song(album_field, year_field, song_field)
return artist_list
def create_checkfile(artist_list):
with open("checkfile.txt",'w')as checkfile:
for new_artist in artist_list:
for new_album in new_artist.albums:
for new_song in new_album.tracks:
print("{0.name}\t{1.name}\t{1.year}\t{2.title}".format(new_artist, new_album, new_song),
file=checkfile)
if __name__ == '__main__':
artists = load_data()
print("there are {} artist".format(len(artists)))
create_checkfile(artists)
#help(Song.__init__)
#print(Song.__doc__)
#print(Song.__init__.__doc__)
#print(Album.__doc__) | DhanKumari/python_2 | oops_song(new).py | oops_song(new).py | py | 4,515 | python | en | code | 0 | github-code | 36 |
1544319051 | import json
import pandas as pd
# import file
print("reading actors.tsv")
actors_df = pd.read_csv('actors.tsv', sep='\t')
# drop unused columns
print("processing data")
actors_df = actors_df.drop(columns=['nconst', 'birthYear', 'primaryProfession', 'knownForTitles'])
actors_df['primaryName'] = actors_df['primaryName'].str.lower()
actors_df = actors_df.drop_duplicates()
# serializing json
print("serializing to json")
result = actors_df.to_json(orient="records")
parsed = json.loads(result)
json_object = json.dumps(parsed, indent=4)
# writing to json file
print("writing to json file")
with open("actors.json", "w") as outfile:
outfile.write(json_object)
print("Done converting actors.tsv to actors.json. Upload this to MongoDB")
| stephanieyaur/gg-project | actors_modifier.py | actors_modifier.py | py | 742 | python | en | code | null | github-code | 36 |
36772978754 | """
Given a binary tree, return the level order traversal of its nodes' values. (ie, from left to right, level by level).
For example:
Given binary tree [3,9,20,null,null,15,7],
3
/ \
9 20
/ \
15 7
return its level order traversal as:
[
[3],
[9,20],
[15,7]
]
"""
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def levelOrder(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
"""
First append root node to the nodes
while nodes is not empty
pop the first node from list
add value of node in tmpList
if there are left and right child of the node, then append childs to nodes
continue till all elements at this level are traversed
"""
final = [] #this list will contain final list containing one list for each level
queue = []
if root:
queue.append(root) #append root element
while queue:
qSize = len(queue) #number of nodes at this level
level = [] #list to store elements at this level
for elem in range(qSize):
node = queue.pop(0)
level.append(node.val)
#append any left or right childs to the nodes, these will be nodes for next level
if node.left:
queue.append(node.left)
if node.right:
queue.append(node.right)
final.append(level)
return final
| narendra-solanki/python-coding | BinaryTreeLevelOrder.py | BinaryTreeLevelOrder.py | py | 1,730 | python | en | code | 0 | github-code | 36 |
16968865127 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django import forms
from django.contrib.auth import get_user_model
from django.contrib.auth.forms import UserChangeForm, UserCreationForm
from django.utils.translation import ugettext_lazy as _
USERNAME_FIELD_HELP_TEXT = _(
'Required field. Length must not exceed 30 characters. Only letters, digits and symbols @/./+/-/_ are accepted.'
)
USERNAME_LENGTH_VALIDATION_TEXT = _('Username length must not exceed 30 characters.')
# ==============================================================================
# CustomerCreationForm
# ==============================================================================
class CustomerCreationForm(UserCreationForm):
class Meta(UserChangeForm.Meta):
model = get_user_model()
def __init__(self, *args, **kwargs):
super(UserCreationForm, self).__init__(*args, **kwargs)
self.fields['username'].help_text = USERNAME_FIELD_HELP_TEXT
def clean(self):
if 'username' in self.cleaned_data:
username = self.cleaned_data['username']
if username and len(username) > 30:
raise forms.ValidationError(USERNAME_LENGTH_VALIDATION_TEXT)
def save(self, commit=True):
self.instance.is_staff = True
return super(CustomerCreationForm, self).save(commit=False)
# ==============================================================================
# CustomerChangeForm
# ==============================================================================
class CustomerChangeForm(UserChangeForm):
email = forms.EmailField(required=False)
class Meta(UserChangeForm.Meta):
model = get_user_model()
def __init__(self, *args, **kwargs):
initial = kwargs.get('initial', {})
instance = kwargs.get('instance')
initial['email'] = instance.email or ''
super(CustomerChangeForm, self).__init__(initial=initial, *args, **kwargs)
self.fields['username'].help_text = USERNAME_FIELD_HELP_TEXT
def clean(self):
if 'username' in self.cleaned_data:
username = self.cleaned_data['username']
if username and len(username) > 30:
raise forms.ValidationError(USERNAME_LENGTH_VALIDATION_TEXT)
def clean_email(self):
email = self.cleaned_data.get('email').strip()
if not email:
# nullify empty email field in order to prevent unique index collisions
return None
customers = get_user_model().objects.filter(email=email)
if len(customers) and (len(customers) > 1 or self.instance != customers[0]):
msg = _("A customer with the e-mail address ‘{email}’ already exists.")
raise forms.ValidationError(msg.format(email=email))
return email
def save(self, commit=False):
self.instance.email = self.cleaned_data['email']
return super(CustomerChangeForm, self).save(commit)
| infolabs/django-edw | backend/edw/admin/customer/forms.py | forms.py | py | 2,967 | python | en | code | 6 | github-code | 36 |
35396451951 | #!/usr/bin/env python3
# coding=utf-8
'''MDMForm 系统配置主窗口'''
import os
import sys
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtGui import QPalette, QPixmap, QIcon
from PyQt5.QtWidgets import QMainWindow,QMessageBox,QTableWidgetItem,QFileDialog
from PyQt5 import QtSql
from PyQt5.QtSql import QSqlQuery
from openpyxl import load_workbook,Workbook
import warnings
warnings.filterwarnings('ignore')
BASE_DIR= os.path.dirname(os.path.dirname(os.path.abspath(__file__) ) )
sys.path.append( BASE_DIR )
from ui.Ui_MDMForm import Ui_MDMForm
class MDMForm(QMainWindow,Ui_MDMForm):
def __tablesql(self):
"""返回所有配置表的信息,用来对照Excel模板文件信息
Returns:
string: 查询表SQL语句
"""
sql= 'SELECT object_id, \
object_name, \
object_name_cn, \
object_desc, \
template_file, \
template_sheet, \
start_row, \
end_row \
FROM xt_objects \
where object_type=\'T\' \
order by object_id ASC'
return sql
def __columnsql(self,id):
"""指定表的列及对应excel内汉字名称
Args:
id (int): 配置的表ID
Returns:
string: SQL查询列信息
"""
sql= 'SELECT \
object_name, \
object_name_cn, \
object_desc, \
column_mapping \
FROM xt_objects \
where object_type=\'C\' \
and parent_object_id= '
sql += str(id)
#sql += ' and rim(column_mapping) !=\'\' '
sql += ' order by column_mapping asc'
return sql
def __columns(self,id):
"""指定表的列及对应excel内汉字名称
Args:
id (int): 配置的表ID
Returns:
dict: 回指定表的列及对应excel内汉字名称字典
"""
cols = dict()
if not self.db.isOpen():
if not self.db.open():
QMessageBox.critical(self, 'MDM', self.db.lastError().text())
return
query = QSqlQuery()
if not query.exec(self.__columnsql(str(id))):
QMessageBox.critical(self,'MDM', query.lastError().text())
else:
while query.next():
cols[query.value('object_name')] = query.value('object_name_cn')
return cols
def __columnmap(self,id):
"""指定表的列及对应excel内对应列
Args:
id (int): 配置的表ID
Returns:
dict: 回指定表的列及对应excel内对应excel的列
"""
cols = dict()
if not self.db.isOpen():
if not self.db.open():
QMessageBox.critical(self, 'MDM', self.db.lastError().text())
return
query = QSqlQuery()
if not query.exec(self.__columnsql(str(id))):
QMessageBox.critical(self,'MDM', query.lastError().text())
else:
while query.next():
cols[query.value('object_name')] = query.value('column_mapping')
return cols
def __insertsql(self,columnmap,tbname):
"""插入配置表语句
Args:
columnmap (dict): 配置表字段与Excel对应列
tbname (string): 配置表名
Returns:
string: 带参数的sql insert语句
"""
sqlk = ''
sqlv = ''
for k,v in columnmap.items():
if len(str(v))>0:
if len(sqlk) > 0:
sqlk +=','
sqlv +=','
sqlk += str(k)
sqlv += '?'
sql = 'insert into '
sql += tbname
sql += ' ('
sql += sqlk
sql += ') values ('
sql += sqlv
sql +=')'
return sql
def __init__(self):
super().__init__()
self.setupUi(self)
self.setupUiEx()
self.addConnect()
self.db = QtSql.QSqlDatabase.addDatabase('QSQLITE')
self.db.setDatabaseName(os.path.join(BASE_DIR,'db\\mdm.db'))
self.initData()
if self.MDMListWidget.count()>0:
self.MDMListWidget.setCurrentItem(self.MDMListWidget.item(0))
self.mdmListClick()
def closeEvent(self, QCloseEvent):
if self.db.isOpen:
self.db.close()
def setupUiEx(self):
palette = QPalette()
icon = QIcon()
appPath=os.path.join(BASE_DIR,u'res\\icon\\mdmconf.ico')
icon.addPixmap(QPixmap(appPath))
self.setWindowIcon(icon)
def addConnect(self):
self.MDMListWidget.clicked.connect(self.mdmListClick)
self.btnTemplate.clicked.connect(self.templateClick)
self.btnImport.clicked.connect(self.importClick)
self.btnExport.clicked.connect(self.exportClick)
self.btnUpdate.clicked.connect(self.updateClick)
def initData(self):
self.MDMListWidget.clear()
if not self.db.isOpen():
if not self.db.open():
QMessageBox.critical(self, 'MDM', self.db.lastError().text())
return
query = QSqlQuery()
if query.exec(self.__tablesql()):
while query.next():
qItem = QtWidgets.QListWidgetItem()
cols = dict()
cols['object_id'] = query.value('object_id')
cols['object_name'] = query.value('object_name')
cols['object_name_cn'] = query.value('object_name_cn')
cols['object_desc'] = query.value('object_desc')
cols['template_file'] = query.value('template_file')
cols['template_sheet'] = query.value('template_sheet')
cols['start_row'] = query.value('start_row')
cols['end_row'] = query.value('end_row')
qItem.setData(QtCore.Qt.ItemDataRole.UserRole,cols)
qItem.setText(query.value('object_name_cn'))
self.MDMListWidget.addItem(qItem)
else:
QMessageBox.critical(self,'MDM', query.lastError().text())
def showData(self,id,name):
"""绑定配置数据到界面
Args:
id (int): 配置表ID
name (string): 配置表名
"""
cols = self.__columns(str(id))
self.dataTableWidget.clear()
self.dataTableWidget.setRowCount(0)
self.dataTableWidget.setColumnCount(len(cols))
self.dataTableWidget.setHorizontalHeaderLabels(cols.values())
sql = ''
for col in cols.keys():
if(len(sql))>0:
sql += ','
sql += str(col)
sql = 'select ' + sql
sql +=' from '
sql += str(name)
if not self.db.isOpen():
if not self.db.open():
QMessageBox.critical(self, 'MDM', self.db.lastError().text())
return
query = QSqlQuery()
if not query.exec(sql):
QMessageBox.critical(self,'MDM', query.lastError().text())
else:
while query.next():
rows=self.dataTableWidget.rowCount()
self.dataTableWidget.insertRow(rows)
for i in range(len(cols)):
qtitem=QTableWidgetItem(str(query.value(list(cols.keys())[i])))
self.dataTableWidget.setItem(rows,i,qtitem)
def mdmListClick(self):
qItem=self.MDMListWidget.currentItem()
tconfs = dict(qItem.data(QtCore.Qt.ItemDataRole.UserRole))
self.dataLabel.setText(str(tconfs['object_name_cn']) + ' : ' + str(tconfs['object_desc']))
self.temFile.setText(str(tconfs['template_file']))
self.temSheet.setText(str(tconfs['template_sheet']))
self.temStart.setText(str(tconfs['start_row']))
self.temEnd.setText(str(tconfs['end_row']))
self.showData(str(tconfs['object_id']),str(tconfs['object_name']))
def templateClick(self):
if self.MDMListWidget.count()<=0:
QMessageBox.information(self,'MDM', '请先选择要打开配置文件对应的基础数据配置表')
return
qItem=self.MDMListWidget.currentItem()
tconfs = dict(qItem.data(QtCore.Qt.ItemDataRole.UserRole))
if str(tconfs['template_file']) =='':
QMessageBox.information(self,'MDM', '当前基础数据表尚未配置对应的配置文件')
return
appPath=os.path.join(BASE_DIR,str(tconfs['template_file']))
#subprocess.run(appPath)
os.system('start ' + appPath)
#os.startfile(appPath)
return
def importClick(self):
if self.MDMListWidget.count()<=0:
QMessageBox.information(self,'MDM', '请先选择要重新导入数据的基础数据配置表')
return
fNames= QFileDialog.getOpenFileName(self,'导入基础数据', '/','Excel File (*.xlsx)')
if not fNames[0]:
return
qItem=self.MDMListWidget.currentItem()
tconfs = dict(qItem.data(QtCore.Qt.ItemDataRole.UserRole))
sheetName = str(tconfs['template_sheet'])
if QMessageBox.question(self, 'MDM', '确认更新模板配置表[' +sheetName + ']的数据?',QMessageBox.Yes|QMessageBox.No) == QMessageBox.No:
return
startRow = 2 #默认没有设置起始值,则默认从第二行开始(第一行为标题)
if str(tconfs['start_row']).isdigit():
startRow=int(str(tconfs['start_row']))
endRow = 0 #没有设置结束行,默认后面数据行全部加载
if str(tconfs['end_row']).isdigit():
endRow=int(str(tconfs['end_row']))
columnMap =dict()
columnMap = self.__columnmap(str(tconfs['object_id']))
try:
wb= load_workbook(filename=fNames[0],read_only=True,data_only=True)
if not (wb.sheetnames.index(sheetName) >= 0):
QMessageBox.warning(self,'MDM', '选择的文件:' + fNames[0] + ',未包含配置指定的Sheet[' +sheetName + ']')
wb.close()
return
ws=wb[sheetName]
if endRow == 0:
endRow = ws.max_row # type: ignore
sql = 'delete from ' + str(tconfs['object_name'])
if not self.db.isOpen():
if not self.db.open():
QMessageBox.critical(self, 'MDM', self.db.lastError().text())
return
query = QSqlQuery()
if not query.exec_(sql):
QMessageBox.warning(self,'MDM', '清空数据表[' + str(tconfs['object_name_cn'] + ':' + query.lastQuery() + ']失败' + query.lastError().text()))
wb.close()
return
sql = self.__insertsql(columnMap,str(tconfs['object_name']) )
query.prepare(sql)
for iRow in (range(startRow,endRow+1)):
bAllEmptyflag = True
for k,v in columnMap.items():
if len(str(v))<=0:
continue
if ws[str(v)+str(iRow)].value is None:
qvalue =''
else:
qvalue = str(ws[str(v)+str(iRow)].value)# type: ignore
if not(len(qvalue)==0 or qvalue.isspace()):
bAllEmptyflag = False
query.addBindValue(qvalue)
if bAllEmptyflag:
continue
elif not query.exec():
QMessageBox.warning(self,'MDM', '执行语句[' + query.lastQuery() + ']失败,' + query.lastError().text())
wb.close()
return
wb.close()
self.showData(str(tconfs['object_id']),str(tconfs['object_name']))
QMessageBox.information(self,'MDM', '导入数据[' + str(tconfs['object_name_cn'])+ ']完成')
except (NameError,ZeroDivisionError):
QMessageBox.critical(self, '动力电缆计算', '变量名错误或除数为0')
except OSError as reason:
QMessageBox.critical(self, '动力电缆计算', str(reason))
except TypeError as reason:
QMessageBox.critical(self, '动力电缆计算', str(reason))
except :
QMessageBox.information(self,'动力电缆计算','导出数据文件失败')
def exportClick(self):
if self.MDMListWidget.count()<=0:
QMessageBox.information(self,'MDM', '请先选择要重新导入数据的基础数据配置表')
return
fNames= QFileDialog.getSaveFileName(self,'下载基础数据', '/','Excel File (*.xlsx)')
if not fNames[0]:
return
qItem=self.MDMListWidget.currentItem()
tconfs = dict(qItem.data(QtCore.Qt.ItemDataRole.UserRole))
sheetName = str(tconfs['template_sheet'])
startRow = 2 #默认没有设置起始值,则默认从第二行开始(第一行为标题)
if str(tconfs['start_row']).isdigit():
startRow=int(str(tconfs['start_row']))
columnMap = dict()
column = dict()
columnMap = self.__columnmap(str(tconfs['object_id']))
column = self.__columns(str(tconfs['object_id']))
try:
wb = Workbook()
ws = wb.active
ws.title = sheetName
for k,v in columnMap.items():
if len(str(v))<=0:
continue
ws[str(v)+str(startRow-1)] = column[k] # type: ignore
sql = ''
for col in columnMap.keys():
if(len(sql))>0:
sql += ','
sql += str(col)
sql = 'select ' + sql
sql +=' from '
sql += str(tconfs['object_name'])
if not self.db.isOpen():
if not self.db.open():
QMessageBox.critical(self, 'MDM', self.db.lastError().text())
return
query = QSqlQuery()
if not query.exec(sql):
QMessageBox.critical(self,'MDM', query.lastError().text())
return
iRow = startRow
while query.next():
for k,v in columnMap.items():
if len(str(v))<=0:
continue
ws[str(v)+str(iRow)] = str(query.value(str(k))) # type: ignore
iRow += 1
wb.save(fNames[0])
wb.close
QMessageBox.information(self,'MDM','导出数据完成,文件名:' + fNames[0])
except (NameError,ZeroDivisionError):
QMessageBox.critical(self, '动力电缆计算', '变量名错误或除数为0')
except OSError as reason:
QMessageBox.critical(self, '动力电缆计算', str(reason))
except TypeError as reason:
QMessageBox.critical(self, '动力电缆计算', str(reason))
except :
QMessageBox.information(self,'动力电缆计算','导出数据文件失败')
return
def updateClick(self):
if self.MDMListWidget.count()<=0:
QMessageBox.information(self,'MDM', '请先选择要更新模板文件数据的基础数据配置表')
return
qItem=self.MDMListWidget.currentItem()
tconfs = dict(qItem.data(QtCore.Qt.ItemDataRole.UserRole))
if str(tconfs['template_file']) =='':
QMessageBox.information(self,'MDM', '当前基础数据表尚未配置对应的配置文件')
return
tempfile=os.path.join(BASE_DIR,str(tconfs['template_file']))
sheetName = str(tconfs['template_sheet'])
if QMessageBox.question(self, 'MDM', '确认更新本地模板文件:' + tempfile + ',配置表[' + sheetName + ']的数据?',QMessageBox.Yes|QMessageBox.No) == QMessageBox.No:
return
startRow = 2 #默认没有设置起始值,则默认从第二行开始(第一行为标题)
if str(tconfs['start_row']).isdigit():
startRow=int(str(tconfs['start_row']))
try:
columnMap = dict()
column = dict()
columnMap = self.__columnmap(str(tconfs['object_id']))
column = self.__columns(str(tconfs['object_id']))
wb = load_workbook(tempfile,False)
if not (wb.sheetnames.index(sheetName) >= 0):
QMessageBox.warning(self,'MDM', '选择的文件:' + tempfile + ',未包含配置指定的Sheet[' + sheetName + ']')
wb.close()
return
ws=wb[sheetName]
#maxRow = ws.max_row # type: ignore
# #暂未实现清除文档中老数据(考虑有附加列未导入数据库,如图片等)
'''
if startRow >1:
for k,v in columnMap.items():
if len(str(v))<=0:
continue
QMessageBox.information(self,'MDM', str(column[k]))
ws[str(v)+str(startRow-1)] = str(column[k]) # type: ignore
#更新标题暂未实现
'''
sql = ''
for col in columnMap.keys():
if(len(sql))>0:
sql += ','
sql += str(col)
sql = 'select ' + sql
sql +=' from '
sql += str(tconfs['object_name'])
if not self.db.isOpen():
if not self.db.open():
QMessageBox.critical(self, 'MDM', self.db.lastError().text())
return
query = QSqlQuery()
if not query.exec(sql):
QMessageBox.critical(self,'MDM', query.lastError().text())
return
iRow = startRow
while query.next():
for k,v in columnMap.items():
if len(str(v))<=0:
continue
ws[str(v)+str(iRow)] = str(query.value(str(k))) # type: ignore
iRow += 1
wb.save(tempfile)
wb.close
QMessageBox.information(self,'MDM','更新模板文件数据:' + tempfile + '完成')
except (NameError,ZeroDivisionError):
QMessageBox.critical(self, '动力电缆计算', '变量名错误或除数为0')
except OSError as reason:
QMessageBox.critical(self, '动力电缆计算', str(reason))
except TypeError as reason:
QMessageBox.critical(self, '动力电缆计算', str(reason))
except :
QMessageBox.information(self,'动力电缆计算','导出数据文件失败')
return | LeeZhang1979/UniTools | src/MDMForm.py | MDMForm.py | py | 19,446 | python | en | code | 0 | github-code | 36 |
16816398616 | '''import random
countries = ['gt', 'nic', 'cr']
population = {country: random.randint(1, 100)
for country in countries}
print(population)
result2 = {country: population for (country, population) in population.items() if population > 50}
print(result2)
text = 'Hola, si soy una mierda'
unique = {c: text.count(c) for c in text if c in 'aeiou'}
print(unique)'''
def message_creator(text):
# Escribe tu solución 👇
respuestas = {'computadora' : "Con mi computadora puedo programar usando Python",
'celular' : "En mi celular puedo aprender usando la app de Platzi",
'cable' : "¡Hay un cable en mi bota!"}
if text in respuestas.keys():
print(respuestas['cable'])
return respuestas[text]
else:
return 'ArtÃculo no encontrado'
text = 'celular'
response = message_creator(text)
print(response) | Fergg9/Python_one | dictComp_Condi.py | dictComp_Condi.py | py | 871 | python | es | code | 0 | github-code | 36 |
18187575174 | from casinos_manager import CasinosManager
from player import EmptyPlayer, HumanPlayer, MLPlayer, RuleBasePlayer, RandomPlayer
class PlayersManager:
def __init__(self, print_game: bool = True):
self._player_slots = [EmptyPlayer(index=i + 1) for i in range(5)]
self._print_game = print_game
def __len__(self):
cnt = 0
for _ in self._get_exist_players():
cnt += 1
return cnt
def __str__(self):
string = "[PLAYERS]"
for player in self._player_slots:
if isinstance(player, EmptyPlayer):
continue
string += '\n' + str(player)
return string
def reset_game(self):
self.set_num_white_dice()
for player in self._player_slots:
player.reset_game()
def reset_round(self):
for player in self._player_slots:
player.reset_round()
def set_num_white_dice(self, num_white_dice: int = None):
if num_white_dice is None:
empty_cnt = 0
for player in self._player_slots:
if isinstance(player, EmptyPlayer):
empty_cnt += 1
if empty_cnt == 0:
num_white_dice = 0
elif empty_cnt == 1 or empty_cnt == 2:
num_white_dice = 2
elif empty_cnt == 3:
num_white_dice = 4
else:
num_white_dice = 8
for player in self._player_slots:
player.set_num_white_dice(num_white_dice)
def add_player(self, slot_index: int, player_type: str = "Human"):
if isinstance(self._player_slots[slot_index - 1], EmptyPlayer):
if player_type == 'Human':
self._player_slots[slot_index - 1] = HumanPlayer(index=slot_index, print_game=self._print_game)
return True
elif player_type == 'MLPlayer':
self._player_slots[slot_index - 1] = MLPlayer(index=slot_index, print_game=self._print_game)
return True
elif player_type == 'MLPlayerTraining':
self._player_slots[slot_index - 1] = MLPlayer(index=slot_index, print_game=self._print_game, train=True)
return True
elif player_type == 'RuleBase':
self._player_slots[slot_index - 1] = RuleBasePlayer(index=slot_index, print_game=self._print_game)
return True
elif player_type == 'Random':
self._player_slots[slot_index - 1] = RandomPlayer(index=slot_index, print_game=self._print_game)
return True
else:
print("Player type {} not yet implemented".format(player_type))
return False
else:
print("Slot {} is not empty".format(slot_index)) if self._print_game else None
return False
def del_player(self, slot_index: int):
self._player_slots[slot_index - 1] = EmptyPlayer(index=slot_index)
def _get_exist_players(self):
for player in self._player_slots:
if not isinstance(player, EmptyPlayer):
yield player
def get_num_players(self):
return len(self)
def get_players_info(self):
players_info = {}
for player in self._get_exist_players():
num_dice, num_dice_white = player.get_num_dice()
players_info[player.index] = {'num_dice': num_dice, 'num_dice_white': num_dice_white, 'money': player.get_money()}
return players_info
def get_ranking(self):
players_money = {}
for player_i, player in enumerate(self._player_slots):
if not isinstance(player, EmptyPlayer):
players_money[player_i + 1] = player.get_money()
ranking = []
for player_index, player_money in sorted(players_money.items(), key=lambda x: x[1], reverse=True):
ranking.append(player_index)
return ranking
def add_banknotes(self, players_win: dict):
for player_index, banknotes in players_win.items():
if player_index == 0:
continue
self._player_slots[player_index - 1].add_banknotes(banknotes)
def run_turn(self, casinos_manager: CasinosManager, game_info=None):
all_done = True
for player in self._get_exist_players():
casino_index, dice = player.run_turn(game_info=game_info)
if not casino_index:
continue
casinos_manager.add_dice(casino_index=casino_index, dice=dice)
game_info['casinos'] = casinos_manager.get_casinos_info()
all_done = False
return all_done
| KeunhoByeon/LasVegas_Python | players_manager.py | players_manager.py | py | 4,667 | python | en | code | 0 | github-code | 36 |
27702917459 | from re import L
from flask import Flask
from flask import jsonify
from flask import request
from flask_restful import Api, Resource, reqparse
import json
import sys
from get_details import get_name
from process_swipe import process_swipe
import requests
import random
from eventlet import wsgi
import eventlet
from redis_instance import get_instance
from RecommendationEngine import get_swipe_stack
from mysql_connection import get_cursor
import traceback
import pymongo
mongod = pymongo.MongoClient("")
db = mongod["tat"]
collection = db["sessions"]
r = get_instance()
# 1 day
EXPIRATION_TIME = 86400
like_post_args = reqparse.RequestParser()
like_post_args.add_argument(
"foodid", type=int, help="The ID of the food item swiped on")
like_post_args.add_argument("userid", type=str, help="Your UserID")
like_post_args.add_argument("restuarantid", type=int, help="The restaurants ID UserID")
like_post_args.add_argument("authtoken", type=str, help="Authorisation token")
like_post_args.add_argument(
"islike", type=bool, help="If the like was like / dislike")
like_post_args.add_argument(
"isfavourite", type=bool, help="If it was a super like")
like_post_args.add_argument("isGroup", type=bool, help="If the swipe stack you're getting is for a group")
swipestack_args = reqparse.RequestParser()
swipestack_args.add_argument(
"lat", type=float, help="Lattitude of where to search recommendations"
)
swipestack_args.add_argument(
"lng", type=float, help="Longitude of where to search recommendations"
)
swipestack_args.add_argument(
"userid", type=str, help="The userID"
)
swipestack_args.add_argument("authtoken", type=str, help="Authorisation token")
swipestack_args.add_argument("code", type=int, help="Room code")
swipestack_args.add_argument("isGroup", help="If the swipe stack you're getting is for a group")
swipestack_args.add_argument(
"distance", type=float, help="Radius of the circle to search within"
)
app = Flask(__name__)
api = Api(app)
class RecommenderController(Resource):
#get [/swipestack]
def post(self):
args = swipestack_args.parse_args()
print(args)
payload = {"authtoken": args.authtoken, "userid": args.userid}
r = requests.post(
'http://devapi.trackandtaste.com/user/authcheck', json=payload)
if r.status_code != 200:
return '', r.status_code
try:
data = get_swipe_stack(args.lat, args.lng, args.userid, args.distance, args.isGroup == "True", str(args.code))
except Exception as e :
print(e)
print(traceback.format_exc())
return '', 404 # We couldn't find any restaurants
return json.loads(data), 200
class SwipeController(Resource):
# post [/swipe]
def post(self):
args = like_post_args.parse_args()
payload = {"authtoken": args.authtoken, "userid": args.userid}
res = requests.post(
'http://devapi.trackandtaste.com/user/authcheck', json=payload)
if res.status_code != 200:
return '', r.status_code
try:
process_swipe(args.userid, args.foodid,
args.islike, args.isfavourite)
# If the swipe doesn't come from the group page, cache it
# Else caching is handled by the websocket server and is saved in mongo
if not args.isGroup:
print(args)
r.lpush(f"Recommendations-{args.userid}", args.foodid)
r.expire(f"Recommendations-{args.userid}", 7200)
if args.islike or args.isfavourite:
r.lpush(f"Likes-{args.userid}", f"{args.foodid},{args.restuarantid}")
r.expire(f"Likes-{args.userid}", 7200)
except Exception as e:
print(e)
print(traceback.format_exc())
# Food item not found
return '', 404
return '', 201
class ItemController(Resource):
#get [/likeditems]
def get(self):
args = request.args
# If we're dealing with a group
filtered = []
if(args["isGroup"] == "true"):
print("is group")
room = collection.find_one({"code" : args["room"]}, {"restaurantsLiked": 1})
userid = str(args["userID"])
restaurantid = int(args["restaurantID"])
for restaurant in room["restaurantsLiked"]:
if restaurant["restaurantID"] == restaurantid:
for user in restaurant["likes"]:
if user["userID"] == userid:
for item in user["items"]:
filtered.append(str(item))
else:
print("Nota group")
# We're dealing with individual swipe
likedItems = [i.decode("UTF-8") for i in r.lrange(f"Likes-{args['userID']}", 0, -1)]
for item in likedItems:
if item.split(',')[1] == str(args["restaurantID"]):
filtered.append(item.split(',')[0])
if len(filtered) == 0:
return '', 404
cursor = get_cursor()
cursor.execute(f"SELECT Price, FoodNameShort, FoodID FROM FoodItem WHERE FoodID IN({','.join(filtered)});")
result = cursor.fetchall()
items = []
for item in result:
items.append({"price": str(item[0]),"name": item[1], "id": item[2]})
print(items)
return items, 200
api.add_resource(SwipeController, "/swipe")
api.add_resource(RecommenderController, "/swipestack")
api.add_resource(ItemController, "/likeditems")
if __name__ == "__main__":
wsgi.server(eventlet.listen(('', 8000)), app)
| mbruty/COMP2003-2020-O | recommender/main.py | main.py | py | 5,688 | python | en | code | 3 | github-code | 36 |
8857084207 | from tkinter import *
import core
class GUI:
"""
py2048 GUI
"""
windowtitle = "py2048"
tilesize = 50
tilepadding = 5
topheight = 50
bottomheight = 50
def __init__(self, core):
self.core = core
self.window = Tk()
self.window.title(GUI.windowtitle)
self.windowwidth = self.core.board_width * GUI.tilesize + (self.core.board_width + 1) * GUI.tilepadding
self.windowheight = self.core.board_height * GUI.tilesize + (self.core.board_height + 1) * GUI.tilepadding + GUI.topheight + GUI.bottomheight
self.window.geometry(str(self.windowwidth) + "x" + str(self.windowheight) + "+100+100")
self.window.resizable(True, True)
self.topframe = Frame(self.window, height=GUI.topheight)
self.topframe.pack(side="top", fill="x")
self.mainframe = Frame(self.window)
self.mainframe.pack(expand=True, fill="both")
self.bottmframe = Frame(self.window, height=GUI.bottomheight)
self.bottomframe.pack(side="bottom", fill="x")
| StuartSul/py2048 | py2048/gui.py | gui.py | py | 1,051 | python | en | code | 1 | github-code | 36 |
34086497372 | from subprocess import call
import os,sys
def create_udb(repo_):
repo_name = os.path.basename((repo_))
udb_name = repo_name + '.udb'
print(repo_name,udb_name)
call('und create -languages python c++ java ' + udb_name ,shell = True)
call('und add -db '+ udb_name + ' ' + repo_ ,shell = True)
call('und analyze -all ' + udb_name, shell = True)
return udb_name
# python c++ java | akhilsinghal1234/mdd-intern-work | Extraction/batch.py | batch.py | py | 404 | python | en | code | 0 | github-code | 36 |
33677703357 | from app.models import TraceLog
import os
import sys
class Logger:
METHOD = {
"GET": "\033[94mGET\033[m",
"POST": "\033[92mPOST\033[m",
"PUT": "\033[93mPUT\033[m",
"PATCH": "\033[96mPATCH\033[m",
"DELETE": "\033[91mDELETE\033[m"
}
@classmethod
def log(cls, type: str, message: str):
log_type = f"\033[93m[{type}\033[93m]:"
print(f"{log_type: <24}\033[m {message}")
@classmethod
def info(cls, message: str):
cls.log("\033[92mINFO", message)
@classmethod
def error(cls, message: str):
cls.log("\033[91mERROR", message)
@classmethod
def exception(cls, exception: Exception):
exception_type, exception_value, exception_traceback = sys.exc_info()
exception_name = getattr(exception_type, "__name__", "Exception")
log_type = "\033[91mEXCEPTION"
log_exception = f"<{exception_name}({exception.args}): {exception_value}>"
cls.log(log_type, f"Unexpected Error {log_exception}")
if exception_traceback is not None:
fname = os.path.split(exception_traceback.tb_frame.f_code.co_filename)
cls.log(log_type, f"Unexpected Error {exception_type} {fname} {exception_traceback.tb_lineno}")
@classmethod
def middleware(cls, data: TraceLog):
point = "\033[95m\u2022\033[m"
log_url = f"{cls.METHOD[data.method]: <6} {point} http://{data.host}:{data.port}{data.url}"
status_color = 32 if data.status_code in (200, 201) else 31
log_status = f"\033[{status_color}m{data.status_code} {data.status_phrase}"
log_message = f"{log_url} {point} {log_status} \033[93m{data.process_time}ms\033[m"
cls.log("\033[95mTRACE", log_message)
@classmethod
def service(cls, url: str, status_code: str, status_phrase: str, process_time: str):
point = "\033[95m\u2022\033[m"
log_url = f"{cls.METHOD['POST']: <6} {point} {url}"
status_color = 32 if status_code in (200, 201) else 31
log_status = f"\033[{status_color}m{status_code} {status_phrase}"
log_message = f"{log_url} {point} {log_status} \033[93m{process_time}ms\033[m"
cls.log("\033[95mSERVICE", log_message)
| Mauricio-Silva/backend-user | app/utils/logger.py | logger.py | py | 2,230 | python | en | code | 0 | github-code | 36 |
876066722 | from invertpy.brain.mushroombody import PerfectMemory, WillshawNetwork
from invertpy.sense import CompoundEye
from invertsy.agent import VisualNavigationAgent
from invertsy.env.world import Seville2009, SimpleWorld
from invertsy.sim.simulation import VisualNavigationSimulation
from invertsy.sim.animation import VisualNavigationAnimation
import numpy as np
def main(*args):
routes = Seville2009.load_routes(degrees=True)
show = True
replace = True
calibrate = True
nb_scans = 31
nb_ommatidia = 2000
print("Simple World simulation")
for ant_no, rt_no, rt in zip(routes['ant_no'], routes['route_no'], routes['path']):
print("Ant#: %d, Route#: %d, steps#: %d" % (ant_no, rt_no, rt.shape[0]), end='')
mem = PerfectMemory(nb_ommatidia)
# mem = WillshawNetwork(nb_cs=nb_ommatidia, nb_kc=nb_ommatidia * 40, sparseness=0.01, eligibility_trace=.1)
agent_name = "vnsw-%s%s-scan%d-ant%d-route%d%s" % (
mem.__class__.__name__.lower(),
"-pca" if calibrate else "",
nb_scans, ant_no, rt_no,
"-replace" if replace else "")
agent_name += ("-omm%d" % nb_ommatidia) if nb_ommatidia is not None else ""
print(" - Agent: %s" % agent_name)
eye = CompoundEye(nb_input=nb_ommatidia, omm_pol_op=0, noise=0., omm_rho=np.deg2rad(4),
omm_res=10., c_sensitive=[0, 0., 1., 0., 0.])
agent = VisualNavigationAgent(eye, mem, nb_scans=nb_scans, speed=.01)
sim = VisualNavigationSimulation(rt, agent=agent, world=SimpleWorld(), calibrate=calibrate, nb_scans=nb_scans,
nb_ommatidia=nb_ommatidia, name=agent_name, free_motion=not replace)
ani = VisualNavigationAnimation(sim)
ani(save=not show, show=show, save_type="mp4", save_stats=not show)
# sim(save=True)
break
if __name__ == '__main__':
import warnings
import sys
with warnings.catch_warnings():
warnings.simplefilter("ignore")
main(*sys.argv)
| InsectRobotics/InvertSy | examples/test_vis_nav_simple_world.py | test_vis_nav_simple_world.py | py | 2,060 | python | en | code | 1 | github-code | 36 |
73547213863 | #!/usr/local/bin/python3
# coding=utf-8
import random
import copy
from Chord import Chord
from BaseEvent import BaseEvent
import Midi
from Utils import *
class Event (BaseEvent):
def __init__(self, name = None, index = None, time = None, duration = 4, octave = 0, volume = 100, pitches = [], mode = None, channel = 0):
super().__init__(name, [], time, duration, volume, channel)
self.index = index
self.octave = octave
self.pitches = pitches
self.mode = mode
self.ch = None
self.rescale = True
self.defaults = self.defaults.update({
'N' : [],
'o' : 0,
'M' : None
})
def copy(self):
e = Event(self.name, self.index, self.time, self.duration, self.octave, self.volume, self.pitches, self.mode, self.channel)
e.midinotes = self.midinotes
if self.ch != None:
e.ch = self.ch.copy()
return e
def is_valid(self):
return not None in [self.name, self.time, self.duration, self.octave, self.volume]
def parse_name(self, name):
self.name = name
parts = self.name.split('.')
self.name = parts[0]
if len(parts) > 1 and len(parts[1]) > 0:
self.index = parts[1]
self.index = self._normalize_index()
#print("!!INDEX = " + str(self.index))
return self
def parse_arguments(self, text):
super().parse_arguments(text)
value = BaseEvent.parse_op_int_array('N', text)
if value != None: self.pitches = value
value = BaseEvent.parse_op_int('M', text)
if value != None: self.mode = value
value = BaseEvent.parse_op_int('o', text)
if value != None: self.octave = value
return self
def normalize(self):
if self.ch != None and self.midinotes != None and len(self.midinotes) > 0:
notes = self.notes(self.ch)
if notes != None and len(notes) > 0:
self.midinotes = [n + 12 * self.octave for n in notes]
#print("OCTAVE %d" % self.octave)
return self
def renormalize(self):
self.midinotes = []
return self.normalize()
def notes(self, chord):
notes = self._parse_index(self.index, chord)
if self.rescale:
return chord.rescale_notes(notes)
return notes
def resolve_chord_name(self, map, with_index = True):
name = self.name
if self.name == None:
raise RuntimeError("Event name is none")
if not self.name.startswith('@'):
if not self.name in map:
raise RuntimeError("Unrecognized event chord/note name %s, missing from map" % self.name)
else:
name = '@' + str(map[self.name])
if with_index and self.index != None:
name = name + (".%s" % self.index)
return name
def resolved_event(self, map, root, resolve_index = True):
e = self.copy()
e.name = e.resolve_chord_name(map, False)
c = e.chord(map, root, 0)
notes = copy.deepcopy(c.get_notes())
e.ch = c
if e.mode == None:
e.mode = c.mode_code()
e.pitches = notes
e.midinotes = e.pitches[:]
if resolve_index:
if len(notes) < 3:
names = ["@%d" % n for n in notes]
e.name = ','.join(names)
e.index = None
e = e.normalize()
return e
def chord_symbol(self, map):
if self.name == None:
raise RuntimeError("No event name given")
chord = None
if self.name.startswith('@'):
chord = self.name[1:]
elif not self.name in map:
raise RuntimeError("Unrecognized event chord/note name %s, missing from map" % self.name)
else:
chord = str(map[self.name])
return chord + ":%.3f" % (self.duration / 4.0)
def chord(self, map, root, time = 0):
symbol = self.chord_symbol(map)
c = Chord().symbol_chord(symbol)
if self.index != None:
c.set_notes(self.notes(c), c.scale, c.chord_root)
c.set_time((self.time + time) / 4.0)
if not self.octave:
self.octave = 0
c.set_root(root + self.octave * 12)
return c
def get_defaults(self):
defaults = super().get_defaults()
defaults.update({
'N' : None,
'o' : 0,
'N' : [],
'M' : None
})
return defaults
def get_final_notes(self):
return [n + 12 * self.octave for n in super().get_final_notes()]
def format_name(self):
name = self.name
if name == None:
return ''
if self.index != None and len(self.index.strip()) > 0:
name = name + (".%s" % self.index)
return name
def format(self):
pitches = self.pitches
if pitches != None and len(pitches) != 0:
pitches = ','.join(["%d" % p for p in pitches])
else:
pitches = None
return super().format({ 'o' : self.octave, 'M' : self.mode, 'N' : pitches })
def _parse_num(self, value):
try:
return int(value)
except:
pass
return None
def _parse_note_number(self, index):
num = None
if index == None or len(index) == 0:
return None
if index == '*':
num = random.choice(range(0, 12))
else:
num = self._parse_num(index)
if num == None:
return None
return num
@staticmethod
def _split_num(index, delimiter):
if index == None or len(index) == 0:
return (None, None)
pieces = index.split(delimiter)
if len(pieces) < 2:
return (None, None)
(first, last) = pieces
if len(first) == 0:
first = None
if len(last) == 0:
last = None
return (first, last)
def _normalize_index(self):
return self.index
if self.index == None:
return self.index
self.index = str(self.index)
if len(self.index) == 0:
self.index = None
return self.index
prevlen = 0
replacements = {
'#b' : '', 'b#' : '', '♭#' : '', '#♭' : '',
'♯b' : '', 'b♯' : '', '♭♯' : '', '♯♭' : '',
'+-' : '', '-+' : ''
}
while prevlen != len(self.index):
prevlen = len(self.index)
for k, v in replacements.items():
self.index = self.index.replace(k, v)
return self.index
def _parse_note_indicator(self, index, chord):
num = None
if index == None or len(index) == 0 or \
index.startswith('-') or index.startswith('+') or \
(not '+' in index and not '-' in index):
return None
if '+' in index:
(first, last) = self._split_num(index, '+')
if '-' in index:
(first, last) = self._split_num(index, '-')
if first == None:
return None
num = self._parse_note_index(first, chord)
if num == None:
return None
offset = self._parse_num(index)
if offset == None:
offset = 0
return chord.note(num, offset)
def _parse_note_index(self, index, chord):
num = self._parse_note_number(index)
if num == None:
return None
# remap = [2, 1, 0]
# if num in remap:
# num = remap[num] #dis gon get crazay
return chord.note(num)
def _parse_single_altered_index(self, index, chord):
if any_suffix(index, ['`b', '`♭', '`#', '`♯']):
pair = (index[-2:], index[0:-2])
elif any_suffix(index, ['b', '♭', '#', '♯']):
pair = (index[-1:], index[0:-1])
else:
return None
(suffix, index) = pair
num = self._parse_index(index, chord)[0]
num += (0, -1)[suffix in ['b', '♭', '`b', '`♭']]
num += (0, 1)[suffix in ['#', '♯', '`#', '`♯']]
if not suffix.startswith('`'):
return num #chord.rescale_note(num)
return num
def _parse_index(self, index, chord):
initial = index
if index == None or len(index) == 0:
return None
num = self._parse_note_indicator(index, chord)
if num != None:
return [num]
num = self._parse_note_index(index, chord)
if num != None:
return [num]
parts = index.split(',')
if len(parts) > 1:
result = []
for part in parts:
nums = self._parse_index(part, chord)
result = result + nums
return list(set(result))
index = parts[0]
num = self._parse_single_altered_index(parts[0], chord)
if num == None:
raise RuntimeError("Unrecognized format in _parse_index: %s" % initial)
return [num]
"""
# do the single parse
last = index[-1]
rest = index[0:-1]
nums = self._parse_index(rest, chord)
if last in ['b', '♭']:
return [chord.rescale_note(nums[0] - 1)]
if last in ['#', '♯']:
return [chord.rescale_note(nums[0] + 1)]
raise RuntimeError("Unrecognized format in _parse_index: %s" % initial)
"""
@staticmethod
def _variable(var, index):
if index != None and len(index) > 0:
return var + "." + index
return var
@staticmethod
def _time(time = 0):
if time == None:
time = 0
return "T%d" % int(time)
@staticmethod
def _duration(duration = 4):
if duration == None:
duration = 4
return "D%d" % int(duration)
@staticmethod
def _octave(octave = None):
if octave == None or len(str(octave)) == 0:
return None
octave = int(octave)
if octave > 0:
return "o+%d" % octave
return "o%d" % octave
def _test_events():
spec = "@IIIMaj7:T0:D4:c1 @I.0:T4.2:D4 @I.0♭:T4.2:D4 @I.0`♭:T4.2:D4 @IIIMaj7.*:T0:D4:c1"
#spec = ":[60:T4:D4:c1"
events = [e.resolved_event(None, 60, False).renormalize() for e in Event().parse_all(spec)]
for e in events:
print (e.format())
print (Event.format_all(events))
midi = Midi.Midi(1)
midi.open('miditest')
newlist = []
for e in events:
newlist.extend(e.bisect())
events = newlist
for e in events:
e.write(midi)
print (Event.format_all(events))
midi.close()
if __name__ == '__main__':
_test_events()
| psenzee/MuGen | src/Event.py | Event.py | py | 9,557 | python | en | code | 0 | github-code | 36 |
36031367368 | def elimduplicados():
lista = []
n = int(input("Ingrese la cantidad de numeros en la lista: "))
if n.isdigit():
for i in range(0, n):
ele = int(input())
lista.append(ele)
print (list(set(lista)))
else:
print("El valor insertado no es un numero.") | Vitio11/StartPython | Ejercicio19.py | Ejercicio19.py | py | 310 | python | es | code | 0 | github-code | 36 |
69822409383 | # -*- coding: utf-8 -*-
"""Subclass of ``BasisSet`` designed to represent an OpenMX configuration."""
import collections
import json
import pathlib
from typing import Sequence
from importlib_resources import files
from aiida_basis.data.basis import PaoData
from ...metadata import openmx as openmx_metadata
from ..mixins import RecommendedOrbitalConfigurationMixin
from .basis import BasisSet
__all__ = ('OpenmxConfiguration', 'OpenmxBasisSet')
OpenmxConfiguration = collections.namedtuple('OpenmxConfiguration', ['version', 'protocol', 'hardness'])
class OpenmxBasisSet(RecommendedOrbitalConfigurationMixin, BasisSet):
"""Subclass of ``BasisSet`` designed to represent a set of OpenMX PAOs.
The `OpenmxBasisSet` is essentially a `BasisSet` with some additional constraints. It can only
be used to contain the bases and corresponding metadata of the PAO basis sets included with
the OpenMX source code.
"""
_basis_types = (PaoData,)
label_template = 'OpenMX/{version}/{protocol}/{hardness}'
default_configuration = OpenmxConfiguration('19', 'standard', 'soft')
valid_configurations = (
OpenmxConfiguration('19', 'quick', 'soft'), OpenmxConfiguration('19', 'quick', 'hard'),
OpenmxConfiguration('19', 'standard', 'soft'), OpenmxConfiguration('19', 'standard', 'hard'),
OpenmxConfiguration('19', 'precise', 'soft'), OpenmxConfiguration('19', 'precise', 'hard')
# FUTURE: add 2013 configurations
)
url_base = 'https://t-ozaki.issp.u-tokyo.ac.jp/'
url_version = {'19': 'vps_pao2019/', '13': 'vps_pao2013/'}
@classmethod
def get_valid_labels(cls) -> Sequence[str]:
"""Return the tuple of labels of all valid OpenMX basis set configurations.
:return: valid configuration labels.
"""
configurations = set(cls.valid_configurations)
return tuple(cls.format_configuration_label(configuration) for configuration in configurations)
@classmethod
def format_configuration_label(cls, configuration: OpenmxConfiguration) -> str:
"""Format a label for an `OpenmxConfiguration` with the required syntax.
:param configuration: OpenMX basis set configuration.
:returns: label.
"""
return cls.label_template.format(
version=configuration.version, protocol=configuration.protocol, hardness=configuration.hardness
)
@classmethod
def get_configuration_metadata_filepath(cls, configuration: OpenmxConfiguration) -> pathlib.Path:
"""Return the filepath to the metadata JSON of a given `OpenmxConfiguration`.
:param configuration: OpenMX basis configuration.
:return: metadata filepath.
"""
metadata_filename = f'{configuration.version}_{configuration.protocol}_{configuration.hardness}.json'
return files(openmx_metadata) / metadata_filename
@classmethod
def get_configuration_metadata(cls, configuration: OpenmxConfiguration):
"""Return the metadata dictionary for an `OpenmxConfiguration`.
:param configuration: OpenMX basis set configuration.
:returns: metadata dictionary.
"""
metadata_filepath = cls.get_configuration_metadata_filepath(configuration)
try:
with open(metadata_filepath, 'r') as stream:
metadata = json.load(stream)
except FileNotFoundError as exception:
raise FileNotFoundError(
f'Metadata JSON for {cls.format_configuration_label(configuration)} could not be found'
) from exception
except OSError as exception:
raise OSError(
f'Error while opening the metadata file for {cls.format_configuration_label(configuration)}'
) from exception
return metadata
@classmethod
def get_element_metadata(cls, element: str, configuration: OpenmxConfiguration):
"""Return the metadata dictionary for an element from an OpenMX basis set configuration.
:param: element IUPAC element symbol.
:configuration: OpenMX basis set configuration.
:returns: element metadata.
:raises: `ValueError` if the element does not exist in the configuration metadata.
"""
configuration_metadata = cls.get_configuration_metadata(configuration)
try:
metadata = configuration_metadata[element]
except KeyError as exception:
raise ValueError(
f'The element {element} does not have an entry in the metadata of '
'{cls.format_configuration_label(configuration)}'
) from exception
return metadata
# @classmethod
# def get_url_file(cls, element: str, configuration: OpenmxConfiguration):
# """Return the URL for the PAO file for a given basis set label and element.
# :param element: IUPAC element symbol.
# :param configuration: basis set configuration.
# :returns: the URL from which the PAO basis file can be downloaded.
# :raises: `ValueError` if the configuration or the element symbol is invalid.
# """
# if configuration not in cls.valid_configurations:
# raise ValueError(f'{cls.format_configuration_label(configuration)} is not a valid configuration')
# element_metadata = cls.get_pao_metadata(element, configuration)
# url = cls.url_base + cls.url_version[configuration.version] + f'{element}/' + element_metadata['filename']
# return url
# @classmethod
# def get_urls_configuration(cls, configuration: OpenmxConfiguration):
# """Return the URLs for all the PAO files of a given OpenMX basis set configuration.
# :param configuration: OpenMX basis set configuration.
# :returns: list of URLs
# :raises: `ValueError` is the configuration is invalid.
# """
# if configuration not in cls.valid_configurations:
# raise ValueError(f'{cls.format_configuration_label(configuration)} is not a valid configuration')
# configuration_metadata = cls.get_configuration_metadata(configuration)
# url_base = cls.url_base + cls.url_version[configuration.version]
# urls = [
# url_base + f'{element}/' + metadata['filename'] for element, metadata in configuration_metadata.items()
# ]
# return urls
@classmethod
def get_md5s_configuration(cls, configuration: OpenmxConfiguration):
"""Return the MD5s for all the PAO files of a given OpenMX basis set configuration.
:param configuration: OpenMX basis set configuration.
:returns: dictionary of MD5s
:raises: `ValueError` is the configuration is invalid.
"""
if configuration not in cls.valid_configurations:
raise ValueError(f'{cls.format_configuration_label(configuration)} is not a valid configuration')
configuration_metadata = cls.get_configuration_metadata(configuration)
md5s = {element: metadata['md5'] for element, metadata in configuration_metadata.items()}
return md5s
@classmethod
def get_orbital_configs_configuration(cls, configuration: OpenmxConfiguration):
"""Return the orbital configuration tuples for all the PAO files of a given OpenMX basis set configuration.
:param configuration: OpenMX basis set configuration.
:returns: dictionary of MD5s
:raises: `ValueError` is the configuration is invalid.
"""
if configuration not in cls.valid_configurations:
raise ValueError(f'{cls.format_configuration_label(configuration)} is not a valid configuration')
configuration_metadata = cls.get_configuration_metadata(configuration)
orbital_configs = {
element: metadata['orbital_configuration'] for element, metadata in configuration_metadata.items()
}
return orbital_configs
def __init__(self, label=None, **kwargs):
"""Construct a new instance, validating that the label matches the required format."""
if label not in self.get_valid_labels():
raise ValueError(f'the label `{label}` is not a valid OpenMX basis set configuration label.')
super().__init__(label=label, **kwargs)
| azadoks/aiida-basis | aiida_basis/groups/set/openmx.py | openmx.py | py | 8,289 | python | en | code | 0 | github-code | 36 |
74436895785 | # Author Chaudhary Hamdan
from functools import reduce
def factors(n):
return set(reduce(list.__add__,
([i, n//i] for i in range(1, int(n**0.5) + 1) if n % i == 0)))
t = int(input())
for _ in range(t):
n,k = [int(x) for x in input().split()]
if k == 0:
print(0)
continue
if n == 0:
print(0)
continue
setbit = 1<<(k-1)
c = 0
fact = list(factors(n))
for a in fact:
if a & setbit:
c += 1
print(c)
| hamdan-codes/codechef-unrated-contests | Codingo21_CODINGO01.py | Codingo21_CODINGO01.py | py | 536 | python | en | code | 2 | github-code | 36 |
43511996346 | import os
from django.test import TestCase
from django.conf import settings
from django.contrib.auth import get_user_model
from django.core.urlresolvers import reverse
from django.core.exceptions import ImproperlyConfigured
from hitparade.models import *
from hitparade.utils import *
from hitparade.tests.helpers import HPIntegrationTestCase
from django_dynamic_fixture import G, get
import random
import sure
class HPEndpointTestCase(HPIntegrationTestCase):
def test_teams(self):
t = G(Team)
resp = self.get("/v1/teams/")
resp.status_int.should.equal(200)
resp.json.should.have.key('next')
resp.json.should.have.key('previous')
resp.json['count'].should.equal(1)
len(resp.json['results']).should.equal(1)
def test_games(self):
g = G(Game, status=Game.STATUS_UPCOMING)
g2 = G(Game, status=Game.STATUS_CLOSED)
resp = self.get("/v1/games/")
resp.status_int.should.equal(200)
resp.json.should.have.key('next')
resp.json.should.have.key('previous')
resp.json['count'].should.equal(1)
len(resp.json['results']).should.equal(1)
resp.json['results'][0]['status'].should.equal(Game.STATUS_UPCOMING)
resp = self.get("/v1/games/?status=%s" % Game.STATUS_CLOSED)
resp.json['results'][0]['status'].should.equal(Game.STATUS_CLOSED)
def test_players(self):
t = G(Team)
p = G(Player, team=t)
resp = self.get("/v1/players/")
resp.status_int.should.equal(200)
resp.json.should.have.key('next')
resp.json.should.have.key('previous')
resp.json['count'].should.equal(1)
len(resp.json['results']).should.equal(1)
non_existant_team_id = random.randrange(0, 1000000)
resp = self.get("/v1/players/?team_id=%i" % non_existant_team_id)
len(resp.json['results']).should.equal(0)
| HitParade/hitparade | web/hitparade/hitparade/tests/integration/test_endpoints.py | test_endpoints.py | py | 1,924 | python | en | code | 1 | github-code | 36 |
72721111143 | from utilities import util
import binascii
# Challenge 52
STATE_LEN = 2 # 16 bits
AES_BLOCK_SIZE = 16
# merkle damgard construction using AES-128 as a compression function
def md_hash(message, state_len = STATE_LEN, H = None):
# initial state
h = b''.join([util.int_to_bytes((37*i + 42) % 256) for i in range(state_len)])
if not H:
H = h
M = util.padding(message, AES_BLOCK_SIZE)
for i in range(len(M)//AES_BLOCK_SIZE):
Mi = util.get_ith_block(M, i, AES_BLOCK_SIZE)
H = util.ecb_encrypt(Mi, util.padding(H, AES_BLOCK_SIZE))[0:state_len]
return binascii.hexlify(H)
# finds two colliding blocks for a given initial state
def find_block_collision(h):
for b1 in range(pow(256, STATE_LEN)):
m1 = b1.to_bytes(STATE_LEN, 'big')
md1 = md_hash(m1, H = h)
for b2 in range(b1 + 1, pow(256, STATE_LEN)):
m2 = b2.to_bytes(STATE_LEN, 'big')
md2 = md_hash(m2, H = h)
if md2 == md1:
return (m1, m2, binascii.unhexlify(md1))
# generates 2^rounds colliding messages
def generate_many_collisions(rounds):
h = b''.join([util.int_to_bytes((37*i + 42) % 256) for i in range(STATE_LEN)])
colliding_messages = set()
colliding_messages.add(b'')
for i in range(rounds):
new_set = set()
m1, m2, h = find_block_collision(h)
m1 = util.padding(m1, AES_BLOCK_SIZE)
m2 = util.padding(m2, AES_BLOCK_SIZE)
for m in colliding_messages:
new_set.add(m + m1)
new_set.add(m + m2)
colliding_messages = new_set
return colliding_messages
def md_hash_hard(m):
return md_hash(m, state_len = STATE_LEN + 1)
def composed_hash(m):
h1 = binascii.unhexlify(md_hash(m))
h2 = binascii.unhexlify(md_hash_hard(m))
return binascii.hexlify(h1 + h2)
if __name__ == '__main__':
print('Part 1: Generating 16 colliding messages:')
colliding_messages = generate_many_collisions(4)
for m in colliding_messages:
print('{}\t{}'.format(binascii.hexlify(m), md_hash(m)))
print('Success!\n')
print('Part 2: Generating two colliding messages:')
colliding_messages = generate_many_collisions(22)
hash_dict = {}
for m in colliding_messages:
h = md_hash_hard(m)
if h in hash_dict:
m1 = m
m2 = hash_dict[h]
break
else:
hash_dict[h] = m
assert m1 != m2
assert composed_hash(m1) == composed_hash(m2)
print('m1: {}'.format(composed_hash(m1)))
print('m2: {}'.format(composed_hash(m2)))
print('Success!')
| fortenforge/cryptopals | challenges/iterated_hash_multicollisions.py | iterated_hash_multicollisions.py | py | 2,422 | python | en | code | 13 | github-code | 36 |
36315576627 | from __future__ import print_function
import sys
import json
import collections
import getopt
g_debug = False
g_indent = 4
def debug(s):
if g_debug:
print("DEBUG> " + s)
def usage(s):
sys.stderr.write("Usage: %s [-t <indent>] [-d] <[-f <json file>] | txt>\n"
% s)
sys.stderr.write("\t-t: --indent\n")
sys.stderr.write("\t-d: --debug\n")
sys.stderr.write("\t-f: --file\n")
sys.stderr.write("e.g.\n")
sys.stderr.write(" %s -t 8 -d -f foo.json\n" % s)
sys.stderr.write(" %s --indent=4 --debug -f foo.json\n" % s)
sys.stderr.write(" %s '{\"A\": 123, \"B\": \"bcd\"}'\n" % s)
def main(argc, argv):
json_file = None
options, rargv = getopt.getopt(argv[1:],
":f:t:dh",
["file=", "indent=", "debug", "help"])
for opt, arg in options:
if opt in ("-d", "--debug"):
global g_debug
g_debug = True
elif opt in ("-t", "--indent"):
global g_indent
g_indent = int(arg)
elif opt in ("-f", "--file"):
json_file = arg
elif opt in ("-h", "--help"):
usage(argv[0])
return 1
else:
usage(argv[0])
return 1
argc = len(rargv)
if json_file is None:
if argc == 0:
usage(argv[0])
return 1
txt = rargv[0]
else:
with open(json_file, 'r') as f:
txt = ''.join(f.readlines())
obj = json.loads(txt, object_pairs_hook=collections.OrderedDict)
debug(str(type(txt)))
debug(txt)
debug(str(type(obj)))
debug(str(obj))
out = json.dumps(obj, indent=g_indent)
print(out)
return 0
if __name__ == '__main__':
sys.exit(main(len(sys.argv), sys.argv))
| idorax/vCodeHub | sharpsword/python/jsonfmt.py | jsonfmt.py | py | 1,840 | python | en | code | 1 | github-code | 36 |
75071109224 | import requests
import pandas as pd
import numpy as np
import seaborn as sns
from bs4 import BeautifulSoup
import warnings
import nltk
#import surprise
import scipy as sp
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.feature_extraction.text import CountVectorizer
from nltk.corpus import stopwords
from nltk import word_tokenize, RegexpTokenizer
from nltk.stem import SnowballStemmer
from sklearn.feature_extraction.text import TfidfVectorizer
from datetime import datetime, time
import movieMender
class Generos:
def __init__(self):
self.cargaDocumentos()
def cargaDocumentos(self):
self.df_usuaarioO = pd.read_csv('csv/Usuario_0.csv', sep=';')
self.df_usuaarioO = self.df_usuaarioO.drop(columns=["title"])
for usuario_nuevo in range(len(self.df_usuaarioO["movieId"])):
self.df_usuaarioO["userId"] = 0
self.df_usuaarioO["timestamp"] = datetime.now()
self.df_movies = pd.read_csv('csv/movies.csv')
# Carga del dataframe de las peliculas con su sinopsis
self.df_movies = self.df_movies.dropna()
self.df_ratings = pd.read_csv('csv/ratings.csv')
self.df_ratings = self.df_ratings.dropna()
self.df_tags = pd.read_csv('csv/tags.csv')
self.df_tags = self.df_tags.dropna()
self.df_ratings = pd.concat([self.df_usuaarioO, self.df_ratings], axis=0)
self.df_movies_ratings = self.df_ratings.merge(self.df_movies)[
['userId', 'movieId', 'title', 'rating', 'genres']]
self.df_movies_ratings_tags = pd.merge(self.df_movies_ratings, self.df_tags, how='outer')[
['userId', 'movieId', 'title', 'rating', 'genres', 'tag']]
self.df_movies_ratings_tags["tag"] = self.df_movies_ratings_tags["tag"].str.lower()
# self.df_movies_ratings_tags.fillna("vacio", inplace = True)
self.ratings_table = self.df_movies_ratings.pivot_table(index='userId', columns='title', values='rating')
# para cambiar los NAN por 0:
self.ratings_table.fillna(0, inplace=True)
def recomedacionPorGenero(self, nombrePelicula, n_similares):
n_similares=int(n_similares)
genres = list(set([genre for genres in self.df_movies["genres"].str.split("|") for genre in genres]))
genre_matrix = []
for index, row in self.df_movies.iterrows():
genre_list = row["genres"].split("|")
genre_vector = [1 if genre in genre_list else 0 for genre in genres]
genre_matrix.append(genre_vector)
genre_matrix = pd.DataFrame(genre_matrix, columns=genres)
contador = 1
selected_movie = self.df_movies[self.df_movies["title"] == nombrePelicula]
selected_movie_index = selected_movie.index[0]
#sacamos las similitudes de los generos
similarities = cosine_similarity(genre_matrix[selected_movie_index:selected_movie_index+1], genre_matrix).flatten()
#las metemos en una tupla y las ordenamos de mayor a menor
movie_list = [(index, similarity) for index, similarity in enumerate(similarities)]
movie_list.sort(key=lambda x: x[1], reverse=True)
listaSimilar = []
for i in movie_list[0:n_similares]:
listaSimilar.append(i)
#la bandera nos sirve para saltarnos la propia peli que buscamos
#siempre esta a false y si nos encontramos la peli que estamos buscando la activamos a True
#si esta en True al finalizar el bucle significa que ha saltado el titulo que buscabamos para no repetirse a si mismo
#y por lo tanto hay que añadir uno mas para llegar al numero deseado por el usuario
listaPeliculasMostrar = []
bandera=False
if(n_similares>len(self.df_movies)):
n_similares=len(self.df_movies)-1
for movie in movie_list[0:n_similares]:
if(nombrePelicula != self.df_movies.iloc[movie[0]]["title"]):
listaPeliculasMostrar.append(self.df_movies.iloc[movie[0]]["title"])
contador+=1
else:
bandera=True
if(bandera):
mov=movie_list[n_similares][0]
listaPeliculasMostrar.append(self.df_movies.iloc[mov]["title"])
return listaPeliculasMostrar #listaSimilar
def predecirRatingDeUserAPeliculaPorSusGeneros(self, nombrePelicula, user_id):
user_id=int(user_id)
yaVotado = self.df_movies_ratings[(self.df_movies_ratings['title']==nombrePelicula) & (self.df_movies_ratings['userId']==user_id)]["rating"].unique()
if(len(yaVotado)!=0):
prediction = yaVotado[0]
return str(prediction)
else:
# obtener géneros de la película a predecir
movie_genres = self.df_movies_ratings[self.df_movies_ratings['title']==nombrePelicula]["genres"].unique()
generosPeli = movie_genres[0].split("|")
# filtrar valoraciones del usuario para peliculas con generos en comun
user_ratings_ID = self.df_movies_ratings[self.df_movies_ratings['userId'] == user_id]
user_ratings = user_ratings_ID.loc[user_ratings_ID['genres'].str.split('|').apply(lambda x: any(i in x for i in generosPeli))]
# calcular la media de valoraciones del usuario para las peliculas con generos en comun
if user_ratings.empty:
print()
return "Vacio"
else:
#prediction = user_ratings_ID['rating'].mean()
prediction = format(user_ratings['rating'].mean(), '.3f')
return str(prediction)
def recomendacionEnBaseGeneroPelisQueNoHaVistoUsuario(self, user_id, n_similares):
warnings.filterwarnings('ignore')
user_id=int(user_id)
n_similares=int(n_similares)
#warnings.filterwarnings('ignore')
df_movies_rating_user = self.df_movies_ratings[self.df_movies_ratings['userId']==user_id]
df_movies_rating_user = df_movies_rating_user.sort_values(by='rating',ascending=False)
#cogemos los primeros 10 para ver que generos le gustan mas, anteriormente hemos ordenado por genero
genero_mejor_rating_unicos = list(set([genre for genres in df_movies_rating_user.head(10)["genres"].str.split("|") for genre in genres]))
# creamos un diccionario para guardar los generos y cuantas veces se repiten
genre_count = {}
for g in genero_mejor_rating_unicos:
genre_count[g] = df_movies_rating_user.head(10)['genres'].str.count(g).sum()
#ordenamos el diccionario de mayor a menor
genero_mejor_rating = dict(sorted(genre_count.items(), key=lambda x: x[1], reverse=True))
#sacamos las pelis que el usuario no ha visto
df_movies_no_rating_user = self.df_movies[self.df_movies['movieId'].isin(df_movies_rating_user['movieId']) == False]
#creamos en el df una columna por cada genero que le gusta al usuario y le agregamos cuanto le gusta
for genre, weight in genero_mejor_rating.items():
df_movies_no_rating_user[genre] = df_movies_no_rating_user["genres"].str.contains(genre).apply(lambda x: weight if x else 0)
#creamos una nueva columna con la suma de cada fila para saber que peliculas le pueden gustar mas
df_movies_no_rating_user["sumaPesos"] = df_movies_no_rating_user[genero_mejor_rating.keys()].sum(axis=1)
#ordenamos por las pelis que tengan una mayor puntuacion en la columna sumaPesos ya que esto quiere decir que hay muchos generos que le gustan al usuario
df_movies_no_rating_user = df_movies_no_rating_user.sort_values(by='sumaPesos',ascending=False)
df_peliculas_mostrar = df_movies_no_rating_user['title'][0:n_similares]
listaPeliculasMostrar = []
contador = 1
for movie in df_peliculas_mostrar:
listaPeliculasMostrar.append(movie)
contador+=1
return listaPeliculasMostrar | Liixxn/MovieMender | generos.py | generos.py | py | 7,990 | python | es | code | 1 | github-code | 36 |
17793223864 | from dgl.nn.pytorch.conv import SAGEConv
import torch
import torch.nn as nn
import torch.nn.functional as F
import time
import numpy as np
from dgl import DGLGraph
from dgl.data import citation_graph as citegrh
import networkx as nx
class GraphSAGE(nn.Module):
def __init__(self,
in_feats,
n_hidden,
n_classes,
n_layers,
activation,
dropout,
aggregator_type):
super(GraphSAGE, self).__init__()
self.layers = nn.ModuleList()
self.dropout = nn.Dropout(dropout)
self.activation = activation
# input layer
self.layers.append(SAGEConv(in_feats, n_hidden, aggregator_type))
# hidden layers
for i in range(n_layers - 1):
self.layers.append(SAGEConv(n_hidden, n_hidden, aggregator_type))
# output layer
self.layers.append(SAGEConv(n_hidden, n_classes, aggregator_type)) # activation None
def forward(self, graph, inputs):
h = self.dropout(inputs)
for l, layer in enumerate(self.layers):
h = layer(graph, h)
if l != len(self.layers) - 1:
h = self.activation(h)
h = self.dropout(h)
return h
def load_cora_data():
data = citegrh.load_cora()
features = torch.FloatTensor(data.features)
labels = torch.LongTensor(data.labels)
mask = torch.BoolTensor(data.train_mask)
g = DGLGraph(data.graph)
n_classes = data.num_classes
return n_classes, g, features, labels, mask
n_classes, g, features, labels, mask = load_cora_data()
# create GraphSAGE model
model = GraphSAGE(in_feats=features.size()[1],
n_hidden=16,
n_classes=n_classes,
n_layers=1,
activation=F.relu,
dropout=0.5,
aggregator_type='gcn')
# use optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
# initialize graph
dur = []
for epoch in range(50):
model.train()
if epoch >= 3:
t0 = time.time()
logits = model(g, features)
loss = F.cross_entropy(logits[mask], labels[mask])
optimizer.zero_grad()
loss.backward()
optimizer.step()
if epoch >= 3:
dur.append(time.time() - t0)
print("Epoch {:05d} | Loss {:.4f} | Time(s) {:.4f}".format(epoch, loss.item()), np.mean(dur))
| Gabtakt/GNN-lab | GraphSAGE.py | GraphSAGE.py | py | 2,428 | python | en | code | 1 | github-code | 36 |
22564966347 | class Solution:
def maximumDetonation(self, bombs: List[List[int]]) -> int:
graph = defaultdict(list)
for i in range(len(bombs)):
for j in range(len(bombs)):
if i != j:
if( bombs[i][0] - bombs[j][0]) ** 2 + (bombs[i][1] - bombs[j][1]) ** 2 <= (bombs[i][2]) **2:
graph[i].append(j)
def dfs(node, visited):
visited.add(node)
for neigh in graph[node]:
if neigh not in visited:
visited.add(neigh)
dfs(neigh,visited)
return len(visited)
ans = 0
for i in range(len(bombs)):
ans = max(ans,dfs(i,set([i])))
return ans | miedan/competetive-programming | detonate-the-maximum-bombs.py | detonate-the-maximum-bombs.py | py | 756 | python | en | code | 0 | github-code | 36 |
42222604118 | """ new visualizations 2020
Revision ID: 437ffc36a821
Revises: d73f1a3bccf3
Create Date: 2020-07-16 19:48:01.228630
"""
from alembic import op
from sqlalchemy import String, Integer
from sqlalchemy.sql import table, column, text
from caipirinha.migration_utils import get_enable_disable_fk_command
# revision identifiers, used by Alembic.
revision = '437ffc36a821'
down_revision = 'd73f1a3bccf3'
branch_labels = None
depends_on = None
def insert_visualization_type():
tb = table(
'visualization_type',
column('id', Integer),
column('name', String),
column('help', String),
column('icon', String))
all_ops = [
(130, 'indicator', 'Gauge', 'fa-chart'),
(131, 'markdown', 'Markdown text', 'fa-chart'),
(132, 'word-cloud', 'Word cloud', 'fa-chart'),
(133, 'heatmap', 'Heatmap', 'fa-chart'),
(134, 'bubble-chart', 'Bubble chart', 'fa-chart'),
(135, 'force-direct', 'Network graphs', 'fa-chart'),
(136, 'iframe', 'HTML iframe', 'fa-chart'),
(137, 'treemap', 'Treemap', 'fa-chart'),
]
rows = [dict(zip([c.name for c in tb.columns], operation)) for operation in
all_ops]
op.bulk_insert(tb, rows)
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
try:
op.execute(text('BEGIN'))
insert_visualization_type()
op.execute(text('COMMIT'))
except:
op.execute(text('ROLLBACK'))
raise
# noinspection PyBroadException
def downgrade():
try:
op.execute(text('BEGIN'))
op.execute(text(get_enable_disable_fk_command(False)))
op.execute(
text("DELETE FROM visualization WHERE type_id IN (123, 124)"))
op.execute(
text("DELETE FROM visualization_type WHERE id IN (123, 124)"))
op.execute(text(get_enable_disable_fk_command(True)))
op.execute(text('COMMIT'))
except:
op.execute(text('ROLLBACK'))
raise
| eubr-bigsea/caipirinha | migrations/versions/437ffc36a821_new_visualizations_2020.py | 437ffc36a821_new_visualizations_2020.py | py | 2,171 | python | en | code | 1 | github-code | 36 |
17792225774 | from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import os
import re
from builtins import open
from pants.backend.codegen.antlr.java.java_antlr_library import JavaAntlrLibrary
from pants.backend.jvm.targets.java_library import JavaLibrary
from pants.backend.jvm.tasks.nailgun_task import NailgunTask
from pants.base.exceptions import TaskError
from pants.java.jar.jar_dependency import JarDependency
from pants.task.simple_codegen_task import SimpleCodegenTask
from pants.util.dirutil import safe_mkdir, safe_walk
from pants.util.memo import memoized_method
logger = logging.getLogger(__name__)
def antlr4_jar(name):
return JarDependency(org='org.antlr', name=name, rev='4.1')
_DEFAULT_ANTLR_DEPS = {
'antlr3': ('//:antlr-3.4', [JarDependency(org='org.antlr', name='antlr', rev='3.4')]),
'antlr4': ('//:antlr-4', [antlr4_jar(name='antlr4'),
antlr4_jar(name='antlr4-runtime')])
}
# TODO: Refactor this and AntlrPyGen to share a common base class with most of the functionality.
# See comments there for what that would take.
class AntlrJavaGen(SimpleCodegenTask, NailgunTask):
"""Generate .java source code from ANTLR grammar files."""
gentarget_type = JavaAntlrLibrary
sources_globs = ('**/*.java',)
class AmbiguousPackageError(TaskError):
"""Raised when a java package cannot be unambiguously determined for a JavaAntlrLibrary."""
# TODO: Do we need this?
def find_sources(self, target, target_dir):
sources = super(AntlrJavaGen, self).find_sources(target, target_dir)
return [source for source in sources if source.endswith('.java')]
@classmethod
def register_options(cls, register):
super(AntlrJavaGen, cls).register_options(register)
for key, (classpath_spec, classpath) in _DEFAULT_ANTLR_DEPS.items():
cls.register_jvm_tool(register, key, classpath=classpath, classpath_spec=classpath_spec)
def is_gentarget(self, target):
return isinstance(target, JavaAntlrLibrary)
def synthetic_target_type(self, target):
return JavaLibrary
def execute_codegen(self, target, target_workdir):
args = ['-o', target_workdir]
compiler = target.compiler
if target.package is None:
java_package = self._get_sources_package(target)
else:
java_package = target.package
if compiler == 'antlr3':
if target.package is not None:
logger.warn("The 'package' attribute is not supported for antlr3 and will be ignored.")
java_main = 'org.antlr.Tool'
elif compiler == 'antlr4':
args.append('-visitor') # Generate Parse Tree Visitor As Well
# Note that this assumes that there is no package set in the antlr file itself,
# which is considered an ANTLR best practice.
args.append('-package')
args.append(java_package)
java_main = 'org.antlr.v4.Tool'
else:
raise TaskError('Unsupported ANTLR compiler: {}'.format(compiler))
antlr_classpath = self.tool_classpath(compiler)
sources = self._calculate_sources([target])
args.extend(sources)
result = self.runjava(classpath=antlr_classpath, main=java_main, args=args,
workunit_name='antlr')
if result != 0:
raise TaskError('java {} ... exited non-zero ({})'.format(java_main, result))
self._rearrange_output_for_package(target_workdir, java_package)
if compiler == 'antlr3':
self._scrub_generated_timestamps(target_workdir)
def synthetic_target_extra_dependencies(self, target, target_workdir):
# Fetch the right java dependency from the target's compiler option
return self._deps(target.compiler)
@memoized_method
def _deps(self, compiler):
spec = self.get_options()[compiler]
return list(self.resolve_deps([spec])) if spec else []
# This checks to make sure that all of the sources have an identical package source structure, and
# if they do, uses that as the package. If they are different, then the user will need to set the
# package as it cannot be correctly inferred.
def _get_sources_package(self, target):
parents = {os.path.dirname(source) for source in target.sources_relative_to_source_root()}
if len(parents) != 1:
raise self.AmbiguousPackageError('Antlr sources in multiple directories, cannot infer '
'package. Please set package member in antlr target.')
return parents.pop().replace('/', '.')
def _calculate_sources(self, targets):
sources = set()
def collect_sources(tgt):
if self.is_gentarget(tgt):
sources.update(tgt.sources_relative_to_buildroot())
for target in targets:
target.walk(collect_sources)
return sources
_COMMENT_WITH_TIMESTAMP_RE = re.compile(r'^//.*\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d')
def _rearrange_output_for_package(self, target_workdir, java_package):
"""Rearrange the output files to match a standard Java structure.
Antlr emits a directory structure based on the relative path provided
for the grammar file. If the source root of the file is different from
the Pants build root, then the Java files end up with undesired parent
directories.
"""
package_dir_rel = java_package.replace('.', os.path.sep)
package_dir = os.path.join(target_workdir, package_dir_rel)
safe_mkdir(package_dir)
for root, dirs, files in safe_walk(target_workdir):
if root == package_dir_rel:
# This path is already in the correct location
continue
for f in files:
os.rename(
os.path.join(root, f),
os.path.join(package_dir, f)
)
# Remove any empty directories that were left behind
for root, dirs, files in safe_walk(target_workdir, topdown = False):
for d in dirs:
full_dir = os.path.join(root, d)
if not os.listdir(full_dir):
os.rmdir(full_dir)
def _scrub_generated_timestamps(self, target_workdir):
"""Remove the first line of comment from each file if it contains a timestamp."""
for root, _, filenames in safe_walk(target_workdir):
for filename in filenames:
source = os.path.join(root, filename)
with open(source, 'r') as f:
lines = f.readlines()
if len(lines) < 1:
return
with open(source, 'w') as f:
if not self._COMMENT_WITH_TIMESTAMP_RE.match(lines[0]):
f.write(lines[0])
for line in lines[1:]:
f.write(line)
| fakeNetflix/twitter-repo-pants | src/python/pants/backend/codegen/antlr/java/antlr_java_gen.py | antlr_java_gen.py | py | 6,475 | python | en | code | 0 | github-code | 36 |
22346293555 | class Game:
def __init__(self, id):
self.p_one_moved = False
self.p_two_moved = False
self.ready = False
self.id = id
self.moves = [None, None]
self.wins = [0,0]
self.ties = 0
def get_player_move(self, p):
return self.moves[p]
def play(self, player, move):
self.moves[player] = move
if player == 0:
self.p_one_moved = True
else:
self.p_two_moved = True
def connected(self):
return self.ready
def both_p_moved(self):
return self.p_one_moved and self.p_two_moved
def winner(self):
p_one = self.moves[0].upper()[0]
p_two = self.moves[1].upper()[0]
winner = -1
if p_one == "R" and p_two == "S":
winner = 0
elif p_one == "S" and p_two == "R":
winner = 1
elif p_one == "P" and p_two == "R":
winner = 0
elif p_one == "R" and p_two == "P":
winner = 1
elif p_one == "S" and p_two == "P":
winner = 0
elif p_one == "P" and p_two == "S":
winner = 1
return winner
def reset_moves(self):
self.p_one_moved = False
self.p_two_moved = False
| guiltylogik/BasicPythonGames | multi_player/game.py | game.py | py | 1,259 | python | en | code | 0 | github-code | 36 |
74779852584 | from unittest import TestCase
from collections import namedtuple
from P2_Sorting.HeapSort.heap_sort import heap_sort
class Task(object):
def __init__(self, deadline, penalty):
assert isinstance(deadline, int) and deadline > 0
assert penalty > 0
self._penalty = penalty
self._deadline = deadline
@property
def deadline(self):
return self._deadline
@property
def penalty(self):
return self._penalty
def _check_input_or_error(tasks):
if not tasks:
return
n = len(tasks)
for task in tasks:
assert task.deadline <= n
def _check_independent(tasks, deadline_counts, indices, length):
"""
Ex 16.5-2. O(|A|) running time algorithm to check whether a set A of tasks are independent.
:param tasks: all the tasks.
:param deadline_counts: a helper array, where deadline_counts[i] denotes how many tasks have deadlines no greater
than i + 1.
:param indices: indices of tasks to consider.
:param length: indices[0:length] will be considered, which means that length = |A|.
:return: whether the tasks considered are independent.
"""
for i in range(0, len(tasks)):
deadline_counts[i] = 0
for i in range(0, length):
task = tasks[indices[i]]
deadline_counts[task.deadline - 1] += 1
cumulative_deadline_counts = 0
for i in range(0, len(tasks)):
cumulative_deadline_counts += deadline_counts[i]
if cumulative_deadline_counts > i + 1:
return False
return True
def schedule_task(tasks):
"""
O(n^2) running time algorithm to schedule unit-time tasks with deadlines and penalties to get the minimum total
penalty.
:param tasks: tasks to consider.
:return: the optimal schedule of 'early' tasks.
"""
_check_input_or_error(tasks)
n = len(tasks)
tasks = list(tasks)
for i in range(0, n):
tasks[i].index = i
heap_sort(tasks, key=lambda t: -t.penalty)
schedule_on_sorted = [-1] * n
early_count = 0
deadline_counts = [0] * n
for i in range(0, n):
schedule_on_sorted[early_count] = i
if _check_independent(tasks, deadline_counts, schedule_on_sorted, early_count + 1):
early_count += 1
schedule = [-1] * early_count
for i in range(0, early_count):
schedule[i] = schedule_on_sorted[i]
heap_sort(schedule, key=lambda index: tasks[index].deadline)
for i in range(0, early_count):
schedule[i] = tasks[schedule[i]].index
return tuple(schedule)
class TestTaskScheduling(TestCase):
def test_task_scheduling(self):
case_class = namedtuple('Case', 'desc tasks schedules')
cases = (
case_class(desc='Empty', tasks=(), schedules=(
(),
)),
case_class(desc='Single', tasks=(
Task(1, 10),
), schedules=(
(0,),
)),
case_class(desc='Two early', tasks=(
Task(1, 10),
Task(2, 20)
), schedules=(
(0, 1),
)),
case_class(desc='Two late', tasks=(
Task(1, 10),
Task(1, 20)
), schedules=(
(1,),
)),
case_class(desc='Example in textbook', tasks=(
Task(4, 70),
Task(2, 60),
Task(4, 50),
Task(3, 40),
Task(1, 30),
Task(4, 20),
Task(6, 10),
), schedules=(
(1, 3, 0, 2, 6),
)),
case_class(desc='Ex 16.5-1', tasks=(
Task(4, 10),
Task(2, 20),
Task(4, 30),
Task(3, 40),
Task(1, 50),
Task(4, 60),
Task(6, 70),
), schedules=(
(4, 3, 2, 5, 6),
(4, 3, 5, 2, 6),
)),
)
for case in cases:
schedule = schedule_task(case.tasks)
self.assertTrue(schedule in case.schedules, msg='%s, wrong schedule %s' % (case.desc, schedule))
| GarfieldJiang/CLRS | P4_AdvancedTech/Greedy/task_scheduling_with_matroid.py | task_scheduling_with_matroid.py | py | 4,191 | python | en | code | 0 | github-code | 36 |
71967566185 | import subprocess
import pytest
from pipfile2req.requirements import requirement_from_pipfile
def compare_requirements(left, right):
return len(set(left.splitlines()) - set(right.splitlines())) == 0
@pytest.mark.parametrize(
"command,golden_file",
[
("pipfile2req -p tests", "tests/requirements.txt"),
("cd tests && pipfile2req", "tests/requirements.txt"),
("pipfile2req -p tests -d", "tests/dev-requirements.txt"),
("pipfile2req -p tests Pipfile", "tests/requirements-pipfile.txt"),
("pipfile2req -d tests/Pipfile", "tests/dev-requirements-pipfile.txt"),
("pipfile2req -d tests/Pipfile.lock", "tests/dev-requirements.txt"),
("pipfile2req -p tests --sources", "tests/requirements-sources.txt"),
("pipfile2req -p tests Pipfile --sources", "tests/requirements-pipfile-sources.txt"),
],
)
def test_convert_pipfile(command, golden_file):
proc = subprocess.Popen(
command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
output, err = proc.communicate()
with open(golden_file) as f:
assert compare_requirements(
output.decode("utf-8").strip().replace("\r\n", "\n"),
f.read().strip().replace("\r\n", "\n"),
)
def test_convert_include_hash():
command = "pipfile2req -p tests --hashes"
proc = subprocess.Popen(
command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
_, err = proc.communicate()
print(err)
assert proc.returncode == 0
@pytest.mark.parametrize("name,package,req", [
("foo", "*", "foo"),
("foo", {"version": "*"}, "foo"),
("foo", {"version": ">=1.0", "extras": ["test", "sec"]}, "foo[test,sec]>=1.0"),
("foo", {"file": "file:///data/demo-0.0.1.tar.gz"}, "foo @ file:///data/demo-0.0.1.tar.gz"),
("foo", {"file": "file:///data/demo-0.0.1.tar.gz", "extras": ["test", "sec"]}, "foo[test,sec] @ file:///data/demo-0.0.1.tar.gz"),
("foo", {"path": ".", "editable": True, "extras": ["test", "sec"]}, "-e .[test,sec]"),
("foo", {"version": ">=1.0", "markers": "os_name=='nt'", "python_version": "~='3.7'"}, 'foo>=1.0; os_name == "nt" and python_version ~= "3.7"'),
("foo", {"git": "https://github.com/foo/foo.git", "ref": "master", "subdirectory": "sub"}, "git+https://github.com/foo/foo.git@master#egg=foo&subdirectory=sub")
])
def test_convert_requirement(name, package, req):
result = requirement_from_pipfile(name, package)
assert result == req
| frostming/pipfile-requirements | test_pipfile_requirements.py | test_pipfile_requirements.py | py | 2,499 | python | en | code | 49 | github-code | 36 |
29498462869 | from main import validate_amount_payment, define_amount_hour, get_amount_hour, read_data_file
from constant.days_info import list_days
import pytest
def test_validate_valid_line():
assert validate_amount_payment(
"THOMAS=MO08:00-12:00,TU10:00-13:00,TH01:00-04:00,SA14:00-18:00,SU20:00-23:00",
define_amount_hour(list_days), 1) == "The amount to pay : THOMAS is 345"
def test_validate_invalide_line():
assert validate_amount_payment(
"INVALID LINE",
define_amount_hour(list_days), 1) == "The line number 1 is not valid for process"
@pytest.mark.parametrize(
"info_payment, hour_amount, line, expected",
[
(
"THOMAS=MO08:00-12:00,TU10:00-13:00,TH01:00-04:00,SA14:00-18:00,SU20:00-23:00",
define_amount_hour(list_days),
1,
"The amount to pay : THOMAS is 345"
),
(
"INVALID LINE",
define_amount_hour(list_days),
1,
"The line number 1 is not valid for process"
),
(
"JHON=MO10:00-12:00,TH12:00-14:00,FR07:00-11:00,SU20:00-21:00",
define_amount_hour(list_days),
1,
"The amount to pay : JHON is 165"
),
(
"RENE=MO10:00-12:00,TU10:00-12:00,TH01:00-03:00,SA14:00-18:00,SU20:00-21:00",
define_amount_hour(list_days),
1,
"The amount to pay : RENE is 215"
),
(
"ASTRID=MO10:00-12:00,TH12:00-14:00,SU20:00-21:00",
define_amount_hour(list_days),
1,
"The amount to pay : ASTRID is 85"
)
]
)
def test_validate_valid_multiple_line(info_payment, hour_amount, line, expected):
assert validate_amount_payment(info_payment, hour_amount, line) == expected
def test_get_amount_hour_fail_attribute_error():
assert get_amount_hour(None, None, None) == "Invalid values for defined amounts"
def test_open_file_success():
result, _ = read_data_file('files/data.txt')
assert result is True
def test_open_file_file_not_exists():
result, _ = read_data_file('../files/data.txt')
assert result is False | jefvasquezg/acme | test/test_main.py | test_main.py | py | 2,193 | python | en | code | 0 | github-code | 36 |
42469623972 | import os
import morfeusz2
import pandas as pd
from sklearn.metrics import classification_report
def lemmatize_text(text):
if isinstance(text, str):
text = text.split()
morf = morfeusz2.Morfeusz(expand_dag=True, expand_tags=True)
text_new = []
for word in text:
w = morf.analyse(word)[0][0][1].split(':')[0]
if w == 'oko':
w = 'ok'
text_new.append(w)
return " ".join(text_new)
def create_dir(directory):
if not os.path.isdir(directory):
_path = os.path.abspath(directory).split('\\')
for i in range(1, len(_path) + 1):
current_dir = "//".join(_path[:i])
if not os.path.isdir(current_dir):
os.mkdir(current_dir)
def mapping_from_clusters(x):
if x == -1:
return 'neg'
else:
return 'pos'
def classification_report_to_excel(y_test, y_pred, filename):
cr = classification_report(y_test, y_pred, output_dict=True, target_names=['Negative', 'Positive'])
pd.DataFrame(cr).T.to_excel(filename)
| kingagla/reviews_classification | scripts/utils.py | utils.py | py | 1,049 | python | en | code | 3 | github-code | 36 |
70942499944 | from pyspark.sql import SparkSession
from pyspark.sql.functions import col
import boto3
session = boto3.Session(profile_name="***_AdministratorAccess",region_name="us-east-1")
s3 = boto3.resource('s3')
# Inicialize a sessão do Spark
spark = SparkSession.builder.getOrCreate()
# Leia os arquivos Parquet e crie os dataframes
df_imdb = spark.read.parquet("natalias-s3-bucket/Trusted/Parquet/Movies/CSV/")
df_tmdb = spark.read.parquet("natalias-s3-bucket/Trusted/Parquet/Movies/JSON/")
# Selecione as colunas necessárias do dataframe do IMDB
df_imdb = df_imdb.select(
col("id").alias("idImdb"),
col("anolancamento").alias("anoLancamento"),
col("genero").alias("genero"),
col("tituloprincipal").alias("tituloPrincipal"),
col("notamedia").alias("notaMedia")
)
# Selecione as colunas necessárias do dataframe do TMDB
df_tmdb = df_tmdb.select(
col("id").alias("idTmdb"),
col("popularity").alias("popularity"),
col("vote_average").alias("voteAverage"),
col("vote_count").alias("voteCount"),
col("release_date").alias("releaseDate")
)
# Crie a tabela FatoFilmes
df_fato_filmes = df_imdb.join(df_tmdb, "idImdb")
# Crie a tabela DimensaoTmdb
df_dimensao_tmdb = df_tmdb.select(
col("idTmdb"),
col("genre_ids").alias("generos"),
col("original_language").alias("originalLanguage"),
col("releaseDate")
)
# Crie a tabela DimensaoImdb
df_dimensao_imdb = df_imdb.select(
col("idImdb"),
col("anoLancamento"),
col("genero"),
col("tituloPrincipal")
)
# Salve os dataframes resultantes como tabelas temporárias
df_fato_filmes.createOrReplaceTempView("FatoFilmes")
df_dimensao_tmdb.createOrReplaceTempView("DimensaoTmdb")
df_dimensao_imdb.createOrReplaceTempView("DimensaoImdb")
# Execute uma consulta para visualizar os resultados
result = spark.sql("""
SELECT
FatoFilmes.idImdb,
FatoFilmes.idTmdb,
FatoFilmes.notaMedia,
FatoFilmes.numeroVotos,
FatoFilmes.popularity,
FatoFilmes.voteAverage,
FatoFilmes.voteCount,
DimensaoTmdb.genres,
DimensaoTmdb.originalLanguage,
DimensaoTmdb.releaseDate,
DimensaoImdb.anoLancamento,
DimensaoImdb.genero,
DimensaoImdb.tituloPrincipal
FROM
FatoFilmes
JOIN
DimensaoTmdb ON FatoFilmes.idTmdb = DimensaoTmdb.idTmdb
JOIN
DimensaoImdb ON FatoFilmes.idImdb = DimensaoImdb.idImdb
""")
# Salve o DataFrame resultante no S3 em formato Parquet
result.write.parquet("s3://natalias-s3-bucket/Processed-Trusted/Parquet/Movies/resultedparquet") | nataliasguimaraes/compassuol | sprint_09/desafio_etl/processed_trusted/proc_trusted.py | proc_trusted.py | py | 2,573 | python | pt | code | 0 | github-code | 36 |
3060572520 | #!/usr/bin/python
import xlswriter
workbook = xlswriter.Workbook('merge1.xlsx')
worksheet = workbook.add_worksheet()
worksheet.set_column('B:D, 12')
worksheet.set_row(3, 30)
worksheet.set_row(6, 30)
worksheet.set_row(7, 30)
merge_format = workbook.add_format({
'bold': 1,
'border': 1,
'align': 'center',
'valign': 'vcenter',
'fg_color': 'amber'})
worksheet.merge_range('B4:D4', 'Merged Range', merge_format)
worksheet.merge_range('B7:D8', 'Merged Range', merge_format)
workbook.close()
| psmano/pythonworks | pyworks/testxlswriter.py | testxlswriter.py | py | 497 | python | en | code | 0 | github-code | 36 |
9993947987 | import paho.mqtt.client as mqtt
import os, time
import random
from threading import Thread
import sys
USERNAME = "ttdqymlc"
PASSWORD = "x8cN-GqZBJPK"
SERVER = "m16.cloudmqtt.com"
PORT = 14023
QOS = 0
topic_sub = "edgex2device"
topic_pub = "device2edgex"
# -------------------ham cho xu ly du lieu-------------------------------
DEFAULT_NAME = "MasterDevice"
CMD_PUSH = "a"
CMD_PUT = "c"
URL_GET_DEVICE_BY_LABEL = "http://localhost:48081/api/v1/device/label/{}"
URL_POST_DISCOVERY = "http://localhost:49990/api/v1/discovery"
URL_PUT_COMMAND = "http://localhost:48082/api/v1/device/name/{}/command/{}"
URL_ADD_DEVICE = "http://localhost:48081/api/v1/device"
BODY_ADD_DEVICE = "{\"name\":\"%s\",\"adminState\":\"unlocked\",\"operatingState\":\"enabled\",\"protocols\":{\"zigbee\":{\"address\":\"%s\"}},\"service\":{\"name\":\"device-random\"},\"profile\":{\"name\":\"%s\"}}"
template_push = "a#{}#0#MasterRequest#3#value#{}#{}#{}#\n" # name#origin#MasterRequest#size#value#30#40#
INDEX_SIZE_PUT = 3
INDEX_VALUE_PUT = 4
my_name = DEFAULT_NAME
flag_main_continous = True
device_to_edgex_buf = []
# ------- Define Classes -------
class ClassThread (Thread):
def __init__(self, func):
Thread.__init__(self) # , daemon = True)
self.func = func
print(func.__name__)
def run(self):
self.func()
# -------------------------------- Define Functions ----------------------------------
def set_value(str_):
print("sorry, I haven't code fo this part")
# index = str_.find(':')
# resname = str_[:index]
# value = str_[index+1:]
# if str_ == "Switch":
# Switch_value = value
# else:
# pass
# print("\t" + resname + "=" + value)
def set_arr_values(arr):
for x in arr:
set_value(x)
def device_repput_edgex(arr):
print("Thuc hien lenh PUT:")
size = arr[INDEX_SIZE_PUT]
arr_values = arr[INDEX_VALUE_PUT: (INDEX_VALUE_PUT + int(size))]
set_arr_values(arr_values)
def receive_proccess_edgex(input):
global my_name
input.strip()
arr = input.split('#')
if str(arr[1]) == my_name:
print("Device nhan duoc yeu cau:\n\t",input)
if (arr[0] == CMD_PUT ):
device_repput_edgex(arr)
else:
pass
def th_process():
while flag_main_continous:
try:
if len(device_to_edgex_buf) > 0:
data = device_to_edgex_buf.pop(0)
receive_proccess_edgex(data)
except IndexError:
pass
# -----------------------------------------------------------------------
def on_connect(client, userdata, flags, rc):
if rc == 0:
print("Connected to broker")
global Connected
Connected = True
else:
print("Connection failed")
def on_message(client, userdata, message):
device_to_edgex_buf.append(message.payload.decode('utf-8'))
# --------------------------Console Menu---------------------------------------------
def menu():
os.system("clear")
print("---------------------> Tin hoc cong nghiep - DHBKHN <---------------------")
print("1: Trigger Discovery")
print("2: Add new device")
print("3: Get new device by label")
print("4: Send PUT Command")
print("5: Quit")
choice = input(">>> ")
if choice == "1":
url = URL_POST_DISCOVERY
body = "body"
request = template_push.format(my_name, "POST", url, body)
client.publish(topic_pub, request)
os.system("clear")
print(request)
print("Send Request: Discovery")
input("Press to Return Menu")
menu()
elif choice == "2":
url = URL_ADD_DEVICE
devname = input("name new device >>> ")
address = input("protocols: zigbee\n\taddress >>> ")
profile = input("name profile >>> ")
body = BODY_ADD_DEVICE % (devname, address, profile)
request = template_push.format(my_name, "POST", url, body)
client.publish(topic_pub, request)
os.system("clear")
print(request)
print("Add new device :", devname)
input("Press to Return Menu")
menu()
elif choice == "3":
label = input("label >>> ")
url = URL_GET_DEVICE_BY_LABEL.format(label)
body = "body"
request = template_push.format(my_name, "GET", url, body)
client.publish(topic_pub, request)
os.system("clear")
print(request)
print("GET devices by label:", label)
input("Press to Return Menu")
menu()
elif choice == "4":
print ("-------------> Send PUT command <-------------")
device = input("name Device >>> ")
command = input("command >>> ")
url = URL_PUT_COMMAND.format(device, command)
body = input("body {\"a\":\"b\"} >>> ")
request = template_push.format(my_name, "PUT", url, body)
client.publish(topic_pub, request)
os.system("clear")
print(request)
print("Send PUT to device:", device, "command:", command)
input("Press to Return Menu")
menu()
elif choice == "5":
pass
os.system("clear")
if __name__ == "__main__":
if (len(sys.argv) >= 2):
my_name = sys.argv[1]
print(my_name)
Connected = False
client = mqtt.Client()
client.on_connect= on_connect
client.on_message= on_message
client.username_pw_set(username=USERNAME, password=PASSWORD)
client.connect(SERVER, PORT, keepalive=30)
# client.connect("localhost", 1883)
client.loop_start()
while Connected != True:
time.sleep(0.1)
client.subscribe(topic_sub)
thread_mqtt = ClassThread(th_process)
thread_mqtt.setDaemon(True)
thread_mqtt.start()
try:
menu()
except KeyboardInterrupt:
print("exiting")
flag_main_continous = False
client.disconnect()
client.loop_stop()
| phanvanhai/DeviceService-Zigbee | demo/master_device.py | master_device.py | py | 6,353 | python | en | code | 0 | github-code | 36 |
71277311783 | class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
# 21-Merge-Two-Sorted-Lists
""" Logic: Use a loop to go through the linked
lists, store the smaller value in the
new result linkedlist."""
def mergeTwoLists(self, list1: Optional[ListNode], list2: Optional[ListNode]) -> Optional[ListNode]:
result = ListNode()
head = result
while list1 and list2:
if list1.val < list2.val:
head.next = list1
list1 = list1.next
else:
head.next = list2
list2 = list2.next
head = head.next
if list1:
head.next = list1
elif list2:
head.next = list2
return result.next
| aryanv175/leetcode | 21-Merge-Two-Sorted-Lists/solution.py | solution.py | py | 721 | python | en | code | 2 | github-code | 36 |
75084616422 | """
firebase.py
This module caches video information in Firebase using the user's id as the key.
Cached video entries include duration, title, channel name, category, and timestamp.
The timestamp acts as a TTL of 24 hours, and entries older than the TTL are updated by requesting
the video information from the YouTube API.
Functions:
- is_video_cached(): Checks if a video is cached in Firebase and not expired.
- get_uncached_video_ids(): Returns a list of uncached or expired video IDs.
- cache_video_data(): Caches video information in Firebase for a given video ID.
- cache_request(): Caches video information in Firebase if not already cached or expired.
"""
# Necessary imports
import os
import sys
import json
from datetime import datetime, timedelta
# Add the parent directory to sys.path to import local modules
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
# Local modules
from utils.imports import *
from utils.youtube_utils import get_video_information
# Credentials
from config.credentials import *
def is_video_cached(video_id, data_from_cache):
required_attributes = ['timestamp', 'duration', 'title', 'channel_name', 'category']
if not data_from_cache or video_id not in data_from_cache:
return False
for attribute in required_attributes:
if attribute not in data_from_cache[video_id]:
return False
timestamp = datetime.strptime(data_from_cache[video_id]['timestamp'], "%Y-%m-%dT%H:%M:%S.%f")
return datetime.now() - timestamp < timedelta(days=1)
def get_uncached_video_ids(video_ids, data_from_cache):
uncached_video_ids = []
for video_id in video_ids:
if not is_video_cached(video_id, data_from_cache):
uncached_video_ids.append(video_id)
return uncached_video_ids
def cache_video_data(user_email, video_id, video_data):
url = f'{FIREBASE_DB_URL}/{user_email}/{video_id}.json?auth={FIREBASE_API_KEY}'
response = requests.put(url, json.dumps(video_data))
def cache_request(youtube, video_ids):
user_email = USER_ID.replace('@', '-').replace('.', '-')
video_info = {}
# Check if the video_ids are in Firebase cache
url = f'{FIREBASE_DB_URL}/{user_email}.json?auth={FIREBASE_API_KEY}'
response = requests.get(url)
data_from_cache = response.json()
if data_from_cache is None:
data_from_cache = {}
uncached_video_ids = get_uncached_video_ids(video_ids, data_from_cache)
# If there are uncached videos, request the video information from YouTube API
if uncached_video_ids:
video_data = get_video_information(youtube, uncached_video_ids)
# Update the cache with the new video information
for video_id, data in video_data.items():
# Convert duration to ISO 8601 format before storing in Firebase
data['duration'] = isodate.duration_isoformat(data['duration'])
# Add a timestamp to the video data
data['timestamp'] = datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%f")
# Cache the channel name and category
data['channel_name'] = data['channel_name']
data['category'] = data.get('category', 'Unknown')
cache_video_data(user_email, video_id, data)
# Build video_info from cache data and newly fetched data
for video_id in video_ids:
if video_id in data_from_cache:
try:
video_info[video_id] = {
'duration': timedelta(seconds=isodate.parse_duration(data_from_cache[video_id]['duration']).total_seconds()),
'title': data_from_cache[video_id]['title'],
'channel_name': data_from_cache[video_id]['channel_name'],
'category': data_from_cache[video_id].get('category', 'Unknown') # Use .get() to handle the missing 'category' key
}
except isodate.isoerror.ISO8601Error:
pass
elif video_id in video_data:
video_info[video_id] = {
'duration': timedelta(seconds=isodate.parse_duration(video_data[video_id]['duration']).total_seconds()),
'title': video_data[video_id]['title'],
'channel_name': video_data[video_id]['channel_name'],
'category': video_data[video_id]['category']
}
return video_info | ractodev/youtube-wrapped-v1 | utils/firebase.py | firebase.py | py | 4,392 | python | en | code | 1 | github-code | 36 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.