seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
21946827715 | import datetime
import time
from constants import STARTED
def get_uptime():
delta = datetime.datetime.utcnow() - STARTED
hours, remainder = divmod(int(delta.total_seconds()), 3600)
minutes, seconds = divmod(remainder, 60)
days, hours = divmod(hours, 24)
uptime = ("{} days, {:0=2}:{:0=2}:{:0=2}".format(days, hours, minutes, seconds))
return uptime
def get_uptime_fmt():
return "⌚ 稼働時間: {}".format(get_uptime())
| iamtakagi-lab/ai | src/uptime.py | uptime.py | py | 452 | python | en | code | 2 | github-code | 13 |
20221311264 | import json
import boto3
import base64
def lambda_handler(event, context):
client = boto3.resource("dynamodb") #Da acesso aos recursos do DynamoDB
table = client.Table("Secrets") #Da acesso a tabela "Secrets"
#Try e Except para capturar e tratar Exceções que possam ocorrer
try:
id = str(event['token']) #Pega o valor do token que foi passado pelo fetch
response = table.get_item( #Procura na tabela o item com o id, se não encontrar levanta uma exception
Key={
'secret_id': id
}
)
valor = response['Item'] #Pega o item da response e coloca num dictionary
encoded_secret = valor['secret'] #acessa o valor de secret no dictionary valor, o valor esta codificado
decoded = base64.b64decode(encoded_secret.encode('utf-8')) #decodifica o secret usando o decodificado de base64, utilizando o encode utf-8
decoded_secret = decoded.decode('utf-8')
return { #Sucesso, retorna um json com o segredo e o tempo de vida do segredo
'statusCode': 200,
'body': json.dumps(decoded_secret),
'lifetime':valor['lifetime']
}
except:
return { #A exception foi levantada, não havia qualquer token referente ao ID, retorna o status code de not found, 404
'statusCode': 404,
'message': 'Not found'
}
| ayrtonmarinho/getmysecret | back-end/gmsGetSecret/lambda_function.py | lambda_function.py | py | 1,519 | python | pt | code | 0 | github-code | 13 |
23617487370 | from __future__ import unicode_literals
import os
from django.db import models
from django.conf import settings
from django.utils.safestring import mark_safe
from django.template.defaultfilters import truncatechars
from django.db.models.signals import *
from django.dispatch import receiver
# monitoring.tools.pathAndRename import *
from .paketModels import *
from ..tools.pathAndRename import *
from ..tools.imageResize import *
# path_and_rename = PathAndRename(os.path.join(settings.MEDIA_ROOT, 'dokumentasi/'))
path_and_rename = PathAndRename('dokumentasi/')
class Monitoring(models.Model):
nama_paket = models.ForeignKey(Paket, on_delete=models.CASCADE)
catatan = models.TextField(blank=True)
# foto = models.ImageField(upload_to='dokumentasi/')
# foto1 = models.ImageField(upload_to=path_and_rename, default='-')
foto1 = models.ImageField(upload_to=path_and_rename, default='-')
foto2 = models.ImageField(upload_to=path_and_rename, default='-')
foto3 = models.ImageField(upload_to=path_and_rename, default='-')
foto4 = models.ImageField(upload_to=path_and_rename, default='-')
tanggal_upload = models.DateTimeField(auto_now_add=True)
tanggal_monitoring = models.DateField()
persen_fisik = models.DecimalField(default=0, max_digits=5, decimal_places=2)
persen_keuangan = models.DecimalField(default=0, max_digits=5, decimal_places=2)
tampil = models.BooleanField(default=True)
prioritas = models.PositiveSmallIntegerField(default=99)
nama_pptk = models.CharField(max_length=255, default="Nama PPTK", blank=False)
def save(self):
self.lebar = 900
self.tinggi = 700
resize1 = ImageResize(self.foto1, self.lebar, self.tinggi)
resize2 = ImageResize(self.foto2, self.lebar, self.tinggi)
resize3 = ImageResize(self.foto3, self.lebar, self.tinggi)
resize4 = ImageResize(self.foto4, self.lebar, self.tinggi)
self.foto1 = resize1.proses()
self.foto2 = resize2.proses()
self.foto3 = resize3.proses()
self.foto4 = resize4.proses()
super(Monitoring,self).save()
def get_nama_opd(self):
return Paket.nama_opd
def get_sumber_dana(self):
return self.nama_paket.dana
@property
def short_description(self):
return truncatechars(self.keterangan, 20)
def admin_photo(self):
return mark_safe('<img src="{}" width="100" />'.format(self.foto1.url))
admin_photo.short_description = 'Photo'
admin_photo.allow_tags = True
def sumber_dana(self):
return self.dana, " Tahun Anggaran ", self.tahun
def url_foto1(self):
# # returns a URL for either internal stored or external image url
# if self.externalURL:
# return self.externalURL
# else:
# # is this the best way to do this??
return os.path.join('/',settings.MEDIA_URL, 'dokumentasi/',os.path.basename(str(self.foto1)))
def url_foto2(self):
return os.path.join('/',settings.MEDIA_URL, 'dokumentasi/',os.path.basename(str(self.foto2)))
def url_foto3(self):
return os.path.join('/',settings.MEDIA_URL, 'dokumentasi/',os.path.basename(str(self.foto3)))
def url_foto4(self):
return os.path.join('/',settings.MEDIA_URL, 'dokumentasi/',os.path.basename(str(self.foto4)))
def image_tag(self):
# used in the admin site model as a "thumbnail"
return mark_safe('<img src="{}" width="100" height="100"/> '.format(self.url_foto1()) +
'<img src="{}" width="100" height="100"/> '.format(self.url_foto2()) +
'<img src="{}" width="100" height="100"/> '.format(self.url_foto3()) +
'<img src="{}" width="100" height="100"/>'.format(self.url_foto4()))
def __unicode__(self):
# add __str__() if using Python 3.x
return self.keterangan
# ==== delete image file when database is removed
@receiver(models.signals.post_delete, sender=Monitoring)
def auto_delete_file_on_delete(sender, instance, **kwargs):
"""
Deletes file from filesystem
when corresponding `MediaFile` object is deleted.
"""
if instance.foto1:
if os.path.isfile(instance.foto1.path):
os.remove(instance.foto1.path)
if instance.foto2:
if os.path.isfile(instance.foto2.path):
os.remove(instance.foto2.path)
if instance.foto3:
if os.path.isfile(instance.foto3.path):
os.remove(instance.foto3.path)
if instance.foto4:
if os.path.isfile(instance.foto4.path):
os.remove(instance.foto4.path)
@receiver(models.signals.pre_save, sender=Monitoring)
def auto_delete_file_on_change(sender, instance, **kwargs):
"""
Deletes old file from filesystem
when corresponding `MediaFile` object is updated
with new file.
"""
if not instance.pk:
return False
try:
old_foto1 = sender.objects.get(pk=instance.pk).foto1
except sender.DoesNotExist:
return False
new_foto1 = instance.foto1
if not old_foto1 == new_foto1:
if os.path.isfile(old_foto1.path):
os.remove(old_foto1.path)
# try remove file 2
try:
old_foto2 = sender.objects.get(pk=instance.pk).foto2
except sender.DoesNotExist:
return False
new_foto2 = instance.foto2
if not old_foto2 == new_foto2:
if os.path.isfile(old_foto2.path):
os.remove(old_foto2.path)
# try remove file 3
try:
old_foto3 = sender.objects.get(pk=instance.pk).foto3
except sender.DoesNotExist:
return False
new_foto3 = instance.foto3
if not old_foto3 == new_foto3:
if os.path.isfile(old_foto3.path):
os.remove(old_foto3.path)
# try remove file 4
try:
old_foto4 = sender.objects.get(pk=instance.pk).foto4
except sender.DoesNotExist:
return False
new_foto4 = instance.foto4
if not old_foto4 == new_foto4:
if os.path.isfile(old_foto4.path):
os.remove(old_foto4.path)
| BangHeru/p2kacehtengah | monitoring/submodels/monitoringModels.py | monitoringModels.py | py | 6,309 | python | en | code | 0 | github-code | 13 |
36553240073 |
# 다중분류는 이진분류와 달리 y 레이블의 범주 수가 3개 이상
import warnings
warnings.filterwarnings('ignore')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
data = pd.read_csv('Fvote.csv', encoding='utf-8')
X = data[data.columns[1:13]]
y = data[['parties']]
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=42)
from sklearn.linear_model import LogisticRegression
model = LogisticRegression()
model.fit(X_train, y_train)
pred_train = model.predict(X_train)
model.score(X_train, y_train)
# 0.6139240506329114
pred_test = model.predict(X_test)
model.score(X_test, y_test)
# 0.5283018867924528
from sklearn.metrics import confusion_matrix
confusion_train = confusion_matrix(y_train, pred_train)
print("훈련데이터 오차행렬:\n", confusion_train)
# 훈련데이터 오차행렬:
# [[21 2 3 11]
# [ 1 25 2 12]
# [ 6 2 5 6]
# [ 7 8 1 46]]
confusion_test = confusion_matrix(y_test, pred_test)
print("테스트데이터 오차행렬:\n", confusion_test)
# 테스트데이터 오차행렬:
# [[ 6 1 2 4]
# [ 1 9 1 2]
# [ 1 2 1 2]
# [ 2 5 2 12]]
# =================================================================
# Grid Search
# =================================================================
from sklearn.model_selection import GridSearchCV
param_grid = {'C':[0.001, 0.01, 0.1, 1, 10, 100]}
grid_search = GridSearchCV(LogisticRegression(), param_grid, cv=5, return_train_score=True)
grid_search.fit(X_train, y_train)
print("Best Parameter: {}".format(grid_search.best_params_))
print("Best Cross-validity Score: {:.3f}".format(grid_search.best_score_))
# Best Parameter: {'C': 0.1}
# Best Cross-validity Score: 0.544
print("Test set Score: {:.3f}".format(grid_search.score(X_test, y_test)))
# =================================================================
# Random Search
# =================================================================
from sklearn.model_selection import RandomizedSearchCV
from scipy.stats import randint
param_distribs = {'C': randint(low=0.001, high=100)}
random_search = RandomizedSearchCV(LogisticRegression(),
param_distributions=param_distribs,
cv=5,
n_iter=100,
return_train_score=True)
random_search.fit(X_train, y_train)
print("Best Parameter: {}".format(random_search.best_params_))
print("Best Cross-validity Score: {:.3f}".format(random_search.best_score_))
# Best Parameter: {'C': 4}
# Best Cross-validity Score: 0.544
print("Test set Score: {:.3f}".format(random_search.score(X_test, y_test)))
# Test set Score: 0.509
| reasonmii/ref_DataScience | certificate_BigDataAnalytics/08_multiclassification.py | 08_multiclassification.py | py | 2,783 | python | en | code | 14 | github-code | 13 |
74469111058 | from django.conf.urls import url
from django.views.generic import TemplateView
from . import views
app_name = 'heartrisk'
urlpatterns = [
url(r'^index/', views.index, name='index'),
url(r'^get_probability/', views.get_probability, name='get_probability'),
url(r'^final_probability/', views.final_probability, name='final_probability'),
url(r'^upload_file/', views.upload_file, name='upload_file'),
url(r'^final_heartbeat/', views.final_heartbeat, name='final_heartbeat'),
] | himanshumangla/hackData | heartrisk/urls.py | urls.py | py | 476 | python | en | code | 0 | github-code | 13 |
17084752434 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.PetProfiles import PetProfiles
class AlipayInsSceneInsassetprodPetprofilelistQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayInsSceneInsassetprodPetprofilelistQueryResponse, self).__init__()
self._pet_profiles = None
@property
def pet_profiles(self):
return self._pet_profiles
@pet_profiles.setter
def pet_profiles(self, value):
if isinstance(value, list):
self._pet_profiles = list()
for i in value:
if isinstance(i, PetProfiles):
self._pet_profiles.append(i)
else:
self._pet_profiles.append(PetProfiles.from_alipay_dict(i))
def parse_response_content(self, response_content):
response = super(AlipayInsSceneInsassetprodPetprofilelistQueryResponse, self).parse_response_content(response_content)
if 'pet_profiles' in response:
self.pet_profiles = response['pet_profiles']
| alipay/alipay-sdk-python-all | alipay/aop/api/response/AlipayInsSceneInsassetprodPetprofilelistQueryResponse.py | AlipayInsSceneInsassetprodPetprofilelistQueryResponse.py | py | 1,126 | python | en | code | 241 | github-code | 13 |
17636562699 | """
Advent of Code 2022 - Day 3
"""
with open("day3.txt", "r") as file:
input = file.read().split("\n")
input = [(sack[0:len(sack)//2], sack[len(sack)//2:]) for sack in input] #split in half
def getPriority(letter):
"""
Calculate the priority of a letter char
"""
return ord(letter)-ord("a")+1 if letter.islower() else ord(letter)-ord("A")+27
score = 0
for sack in input:
print(sack)
for letter in sack[0]:
if letter in sack[1]: #check if letter in other sack
score += getPriority(letter)
break
print(score)
| lab57/Advent-of-Code | day3.py | day3.py | py | 592 | python | en | code | 0 | github-code | 13 |
41719963422 | import torch
import torch.nn as nn
import torchvision.transforms as transforms
import torchvision
from torchvision import datasets
from torch.utils.data import DataLoader
from torchvision.utils import make_grid
transform = transforms.Compose([
transforms.Resize((32,32)),
transforms.ToTensor(),
])
def get_mnist_train_dataloader(args):
mnist_trainset = datasets.FashionMNIST(
root = args['input_dir'],
train = True,
download=True,
transform = transform
)
mnist_train_dataloader = DataLoader(
mnist_trainset,
batch_size = args['batch_size'],
shuffle=True,
)
return mnist_train_dataloader
def mnist_valid_dataloader(args):
mnist_validset = datasets.FashionMNIST(
root = args['input_dir'],
train=False,
download=True,
transform = transform
)
mnist_valid_dataloader = DataLoader(
mnist_validset,
batch_size = args['batch_size'],
shuffle = False,
)
return mnist_valid_dataloader
DATALOADERDICT = {}
DATALOADERDICT["mnist"] = {"train":get_mnist_train_dataloader,"valid":mnist_valid_dataloader}
| celestialxevermore/DL_Implementations | Autoencoders/ConvolutionalVAE/dataloaders/data_dataloader.py | data_dataloader.py | py | 1,169 | python | en | code | 0 | github-code | 13 |
29881900000 | import socket
import os
from flask import Flask
app = Flask(__name__)
@app.route('/')
def hello_world():
return 'Hello World! Hostname: {hostname} | Version: {version}'.format(
hostname=socket.gethostname(),
version=open('./VERSION.md').read().rstrip('\n')
)
if __name__ == '__main__':
app.run(host="0.0.0.0", debug=True)
| michaltrmac/docker-first-steps | example/docker/python-simple-app/src/app.py | app.py | py | 364 | python | en | code | 0 | github-code | 13 |
14880477577 | '''
https://docs.python.org/3/library/collections.html#namedtuple-factory-function-for-tuples-with-named-fields
'''
import collections
import sys
# print(*(i for i in dir(collections) if not i.startswith("_")), sep = "\n")
Point = collections.namedtuple('Point', ['x', 'y']) # Returns a new tuple subclass named Point
print(Point)
p = Point(11, y=22)
print(p, hash(p), sys.getsizeof(p))
print(p[0:2], p.x, p.y)
print(*(i for i in dir(p) if not i.startswith("_")), sep = "\n")
# наследование класса namedtuple
class P(collections.namedtuple('Point', ['x', 'y'])):
def __str__(self):
return "namedtuple Point x={}, y={}: {}".format(self.x, self.y, type(self))
l = P("aaa","bbb")
print(l) | ekomissarov/edu | py-basics/stdlib/namedtuple.py | namedtuple.py | py | 726 | python | en | code | 0 | github-code | 13 |
72569725459 | import cv2
import numpy as np
import matplotlib.pyplot as plt
def create_vert_alpha_matte(dims, f_width, f_location) -> np.ndarray:
"""
dims: (height, width)
f_width: width
f_location: (height, width)
b_width: (height, width)
b_location: (height, width)
"""
mask = np.zeros(dims, dtype=np.float32)
start_feather = f_location - f_width // 2
end_feather = f_location + f_width // 2
if f_width % 2 != 0:
end_feather += 1
mask[:, end_feather:] = 1.0
mask[:, start_feather:end_feather] = np.linspace(0.0, 1.0, f_width)
return mask
def blend_images_alpha(im1, im2, alpha) -> np.ndarray:
"""
im1: (height, width, 3)
im2: (height, width, 3)
alpha: (height, width)
"""
# error checking <3
assert im1.shape == im2.shape
assert im1.shape[:2] == alpha.shape
assert im2.shape[:2] == alpha.shape
return im2 * alpha[..., np.newaxis] + im1 * (1 - alpha[..., np.newaxis])
if __name__ == '__main__':
apple = cv2.cvtColor(cv2.imread('images/burt_apple.png'), cv2.COLOR_BGR2RGB)
orange = cv2.cvtColor(cv2.imread('images/burt_orange.png'), cv2.COLOR_BGR2RGB)
apple = (apple / 255.0).astype(np.float32)
orange = (orange / 255.0).astype(np.float32)
alpha = create_vert_alpha_matte(apple.shape[:2], 120, apple.shape[1] // 2)
plt.plot()
plt.imshow(alpha, cmap='gray')
plt.show()
blended = blend_images_alpha(apple, orange, alpha)
plt.plot()
plt.imshow(blended)
plt.show()
| christian-armstrong25/lab-compositing | alpha_blending.py | alpha_blending.py | py | 1,547 | python | en | code | 0 | github-code | 13 |
40857471411 | import pandas as pd
from debal_scrap.models import Debal
def app():
debal = Debal()
group = debal.select_group()
data = list(group.expenses())
df = pd.DataFrame(data)
df.to_csv(input("save as <filename.csv>: "))
| tewfik/debal_scrap | debal_scrap/app.py | app.py | py | 236 | python | en | code | 0 | github-code | 13 |
19505765793 | import spotipy
from sys import argv
from pprint import pprint
from spotipy.oauth2 import SpotifyOAuth
# ###########################
# ON REPEAT PLAYLIST IDS
# ###########################
# OMRI - 37i9dQZF1Epk3rCnDbRzoW
# RYAN - 37i9dQZF1EpjwNta6kRS75
# JACK - 37i9dQZF1EpkeEt7H42BOM
# JORDAN - 37i9dQZF1EprSmFIIwNCNf
# BEN - 37i9dQZF1EpywHzaNngt66
# NOAH - 37i9dQZF1EpsXgi13RB1Os
# ##############################
# DISCOVER WEEKLY PLAYLIST IDS
# ##############################
# OMRI - 37i9dQZEVXcQstduaQO7HA
# RYAN - 37i9dQZEVXcDPwgxAaFEWl
# JACK - 37i9dQZEVXcKuLHb4mDlTg
# JORDAN - 37i9dQZEVXcR2OKTWtMNZF
# BEN - 37i9dQZEVXcP1dezheto8s
# NOAH - 37i9dQZEVXcDNQOBvWPlJH
def concat_playlists(playlists):
for val in playlists:
pl = sp.playlist(val)
print("Processing --- Playlist ID:", val, " --- Name: ", pl["name"])
offset = 0
playlistDict = {}
while True:
playlist_items = sp.playlist_items(val,
offset=offset,
fields='items.track.id,total',
additional_types=['track'])
for playlist_item in playlist_items['items']:
trackId = playlist_item['track']['id']
trackObject = sp.track(trackId)
playlistDict[trackId] = trackObject['name']
if len(playlist_items['items']) == 0:
break
offset = offset + len(playlist_items['items'])
for key, val in playlistDict.items():
if key not in trackSet:
trackSet.add(key)
else:
duplicates.add(sp.track(key)["name"])
def split_list(a_list):
half = len(a_list) // 2
return a_list[:half], a_list[half:]
if __name__ == '__main__':
sp = spotipy.Spotify(auth_manager=SpotifyOAuth(scope="playlist-modify-public"))
if len(argv) < 4:
print("Must provide at least 2 playlists to join")
else:
playlist_name = argv[1]
playlists = argv[2:]
# to avoid duplicates in playlist
trackSet = set()
duplicates = set()
concat_playlists(playlists)
trackList = list(trackSet)
print(trackList)
print("DUPLICATES: ")
print(duplicates)
# Noah user id: 1222971566
playlist = sp.user_playlist_create(user="1222971566", name=playlist_name)
playlistId = playlist['id']
playlistHalf1, playlistHalf2 = split_list(trackList)
sp.playlist_add_items(playlistId, playlistHalf1)
sp.playlist_add_items(playlistId, playlistHalf2)
| noahgorstein/spotify_scripts | concatenate_playlists.py | concatenate_playlists.py | py | 2,654 | python | en | code | 1 | github-code | 13 |
28363776980 | """Тут код, результаты в result.json"""
import json
import pandas as pd
from yargy import Parser, rule, or_
from yargy.pipelines import morph_pipeline, caseless_pipeline
from yargy.interpretation import fact
from yargy.predicates import in_
data = pd.read_csv('pristavki.csv', header=None, names=['text'])
Game = fact(
'game',
['series', 'name', 'release'],
)
zelda = rule(
morph_pipeline([
'The Legend of Zelda',
'Зельда',
'Zelda',
]).interpretation(Game.series.const('The Legend of Zelda')),
morph_pipeline([
'Ocarina of Time',
'Breath of the Wild',
'Majors mask',
'A Link Between Worlds',
'Skyward Sword',
'Wind Waker',
]).interpretation(Game.name).optional(),
morph_pipeline(['3D', 'HD']).interpretation(Game.release).optional(),
)
gta = rule(
morph_pipeline([
'Grand Theft Auto',
'GTA',
'ГТА',
]).interpretation(Game.series.const('Grand Theft Auto')),
morph_pipeline([
'3', '4', '5',
'San Andreas',
'Vice City',
'Chinatown Wars',
'Liberty City Stories',
'Vice City Stories',
]).interpretation(Game.name),
morph_pipeline([]).interpretation(Game.release).optional()
)
megaten = rule(
morph_pipeline([
'Shin Megami Tensei',
'Megami Tensei',
]).interpretation(Game.series),
morph_pipeline([
'Persona',
'Devil Summoner',
'Digital Devil Saga',
'Devil Children Shiro',
]).interpretation(Game.name),
in_('234').interpretation(Game.release).optional(),
)
assassin = rule(
morph_pipeline([
"Assassin's Creed",
'Ассассин',
'Ассассин Крид',
'Ассассинс Крид',
]).interpretation(Game.series.const("Assassin's Creed")),
morph_pipeline([
'2', '3', '4',
'II', 'III', 'IV',
'Unity', 'Единство',
'Syndicate', 'Синдикат',
'Rogue', 'Изгой',
]).interpretation(Game.name),
morph_pipeline([
'Bloodlines',
'Liberation',
'Black Flag',
'Чёрный флаг',
]).interpretation(Game.release).optional(),
)
colda = rule(
morph_pipeline([
'Call Of Duty',
'Gall Of Duty',
'кол оф дьюти',
'колда',
]).interpretation(Game.series.const('Call Of Duty')),
morph_pipeline([
'MW',
'Modern Warfare',
'Advanced Warfare',
'Black Ops',
]).interpretation(Game.name),
morph_pipeline([
'1', '2', '3',
'I', 'II', 'III',
'Declassified',
]).interpretation(Game.release).optional(),
)
#можно нанять эксперта и продолжать, но в идеале тут нужна языковая модель: слишком много вариантов
gm = or_(zelda, gta, megaten, assassin, colda).interpretation(Game)
parser = Parser(gm)
matches = []
for sent in data.text:
for match in parser.findall(sent):
matches.append(match.fact.as_json)
print(len(matches))#622
with open('result.json', 'w') as f:
json.dump(matches, f, ensure_ascii=False, indent=4)
| OneAdder/compling2019 | hm_yargy/1/extract_games.py | extract_games.py | py | 3,256 | python | en | code | 0 | github-code | 13 |
30902870779 | # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Finetuning torchvision models for the purpose of predicting
# Tinder swipes, left or right
#
# Based on https://pytorch.org/tutorials/beginner/finetuning_torchvision_models_tutorial.html
#
# Lots of things could be tweaked here:
# - NN architecture
# - Use a model pretrained on face tasks (instead of ImageNet)
# - Grid search hyperparameters
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
from __future__ import print_function, division
from utilities import folder_assertions
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import copy
import uuid
import data
class Libido:
def __init__(self, sorted_data_dir, temp_data_dir, trained_models_dir, pretrained=True, feature_extraction=True):
""" Assertions """
assert sorted_data_dir is not None, "Invalid sorted data folder name"
assert temp_data_dir is not None, "Invalid temporary data folder name"
assert trained_models_dir is not None, "Invalid trained models folder name"
""" Variables """
# Folders
self.sorted_data_dir = sorted_data_dir
self.temp_dir = temp_data_dir
self.temp_dir_save = os.path.join(self.temp_dir, "1/")
self.models_dir = trained_models_dir
self.trainable_model_path = str(uuid.uuid1()) + ".pth"
assert folder_assertions([self.sorted_data_dir, self.temp_dir, self.temp_dir_save, self.models_dir]) == True, "Couldn't create data folders"
# Training hyperparameters
self.num_epochs = 25
self.batch_size = 16
self.pretrained = pretrained
self.feature_extraction = feature_extraction
self.initial_learning_rate = 0.0012
self.learning_rate_decay = 0.0001
self.learning_rate_decay_rate = 2
# Dataset transformations
# -> Augmentation and normalization for training
# -> Normalization for testing
self.data_transforms = {
'train': transforms.Compose([
transforms.Resize(224), # RandomResizedCrop(224), - currently assumes input images are square
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'test': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
# Dataset loaders
self.image_datasets = {x: datasets.ImageFolder(os.path.join(self.sorted_data_dir, x), self.data_transforms[x]) for x in ['train', 'test']}
self.dataloaders = {x: torch.utils.data.DataLoader(self.image_datasets[x], batch_size=self.batch_size, shuffle=True, num_workers=4) for x in
['train', 'test']}
self.dataset_sizes = {x: len(self.image_datasets[x]) for x in ['train', 'test']}
self.class_names = self.image_datasets['train'].classes
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("Using CUDA? {} - {}".format(torch.cuda.is_available(), self.device))
# Initialize model
self.model_ft = models.resnet34(pretrained=self.pretrained)
self.set_parameter_requires_grad(self.model_ft, feature_extraction=self.feature_extraction)
num_ftrs = self.model_ft.fc.in_features
num_classes = len(self.class_names)
self.model_ft.fc = nn.Linear(num_ftrs, num_classes)
self.model_ft = self.model_ft.to(self.device)
def train_model(self, num_epochs=25):
""" Train a model """
""" Assumes self.sorted_data_dir containes train/left, train/right, test/left, test/right """
""" You can obtain this format by using data.setup_entire_dataset """
# Reassign intrinsic parameters
self.num_epochs = num_epochs
# Define loss criteria
criterion = nn.CrossEntropyLoss()
# Observe that all parameters are being optimized - finetuning
optimizer_ft = optim.SGD(self.model_ft.parameters(), lr=self.initial_learning_rate, momentum=0.9)
# Decay LR by a factor of 0.001 every <step_size> epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=self.learning_rate_decay_rate, gamma=self.learning_rate_decay)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
self.model_ft = self._train_model(self.model_ft, criterion, optimizer_ft, exp_lr_scheduler, num_epochs=self.num_epochs)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def _train_model(self, model, criterion, optimizer, scheduler, num_epochs=25):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'test']:
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for inputs, labels in self.dataloaders[phase]:
inputs = inputs.to(self.device)
labels = labels.to(self.device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if training
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
if phase == 'train':
scheduler.step()
epoch_loss = running_loss / self.dataset_sizes[phase]
epoch_acc = running_corrects.double() / self.dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'test' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
print('Best accuracy: {:4f}'.format(best_acc))
# Save after training
torch.save(best_model_wts, os.path.join(self.models_dir, self.trainable_model_path))
# Load model
model.load_state_dict(best_model_wts)
# Visualize best model
self.visualize_model(model)
plt.show()
# Return model
return model
def imshow(self, inp, title=None):
"""Imshow for Tensor"""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
if title is not None:
plt.title(title)
plt.pause(0.001) # pause a bit so that plots are updated
def visualize_model(self, model, num_images=6):
was_training = model.training
model.eval()
images_so_far = 0
fig = plt.figure()
with torch.no_grad():
for i, (inputs, labels) in enumerate(self.dataloaders['test']):
inputs = inputs.to(self.device)
labels = labels.to(self.device)
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
for j in range(inputs.size()[0]):
images_so_far += 1
ax = plt.subplot(num_images // 2, 2, images_so_far)
ax.axis('off')
ax.set_title('predicted: {}'.format(self.class_names[preds[j]]))
self.imshow(inputs.cpu().data[j])
if images_so_far == num_images:
model.train(mode=was_training)
return
model.train(mode=was_training)
def _dataloader_from_temp_folder(self):
"""
TODO: Instead of creating this every time a new batch of images needs to be predicted,
we could just add this dataloader initialization in the beginning (["train", "test", "infer"]), under the assumption
thid dataloader will get updated as files in __temp__ come and go. To be tested.
"""
transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
image_dataset = datasets.ImageFolder(self.temp_dir, transform)
dataloader = torch.utils.data.DataLoader(image_dataset, batch_size=self.batch_size, shuffle=False, num_workers=4)
return dataloader
def _get_batch_predictions(self, dataloader):
latest_model_name = self.get_latest_model()
self.load_pretrained(latest_model_name)
self.model_ft.eval()
with torch.no_grad():
for i, (inputs, labels) in enumerate(dataloader):
inputs = inputs.to(self.device)
labels = labels.to(self.device)
outputs = self.model_ft(inputs)
#_, preds = torch.max(outputs, 1)
return outputs
# TODO: Some math to round up the predictions
def infer(self):
data.preprocess_pipeline(self.temp_dir_save)
dataloader = self._dataloader_from_temp_folder()
# Mean approach
preds = self._get_batch_predictions(dataloader)
mean_per_class = torch.mean(preds, 1)
mean_np = torch.mean(mean_per_class)
mean = int(np.round(mean_np))
result = self.class_names[mean]
# Mode approach
#value = stats.mode(preds.cpu().detach().numpy())[0]
print(result)
return result
def set_parameter_requires_grad(self, model, feature_extraction=False):
if feature_extraction:
for param in model.parameters():
param.requires_grad = False
def show_pretrained_model(self, model_name=None):
if not model_name:
latest_model_name = self.get_latest_model()
self.load_pretrained(latest_model_name) # if it fails, load latest model
else:
self.load_pretrained(model_name) # try to load specified model
self.visualize_model(self.model_ft)
plt.show()
def load_pretrained(self, model_name):
model_path = os.path.join(self.models_dir, model_name)
if not os.path.isfile(model_path):
raise Exception("No model named at {}".format(model_path))
try:
self.model_ft.load_state_dict(torch.load(model_path))
except RuntimeError: # Happens if loaded model was trained on GPU and only CPU is available
self.model_ft.load_state_dict(torch.load(model_path, map_location=torch.device('cpu')))
def get_latest_model(self):
all_models = [os.path.join(self.models_dir, m) for m in os.listdir(self.models_dir) if not os.path.isdir(m)]
latest_model_path = max(all_models, key=os.path.getmtime)
print(f"latest_model_path {latest_model_path}")
return os.path.basename(latest_model_path)
| abreu4/wing-man | libido.py | libido.py | py | 12,755 | python | en | code | 0 | github-code | 13 |
25566125873 | size = int(input())
territory = []
alice_position = []
for row in range(size):
territory.append(input().split())
if "A" in territory[row]:
alice_position = [row, territory[row].index("A")]
territory[alice_position[0]][alice_position[1]] = "*"
moves = {
"up": (-1, 0),
"down": (1, 0),
"left": (0, -1),
"right": (0, 1),
}
collected_tea_bags = 0
while True:
move = input()
move_row, move_col = moves[move]
move_row += alice_position[0]
move_col += alice_position[1]
if move_row < 0 or move_row >= size or move_col < 0 or move_col >= size:
break
current_element = territory[move_row][move_col]
territory[move_row][move_col] = "*"
if current_element == "R":
break
alice_position = [move_row, move_col]
if current_element.isdigit():
collected_tea_bags += int(current_element)
if collected_tea_bags >= 10:
break
if collected_tea_bags < 10:
print("Alice didn't make it to the tea party.")
else:
print("She did it! She went to the party.")
[print(*row, sep=" ") for row in territory] | mustanska/SoftUni | Python_Advanced/Multidimensional Lists/alice_in_wonderland.py | alice_in_wonderland.py | py | 1,122 | python | en | code | 0 | github-code | 13 |
72545300499 | # create by fanfan on 2019/11/14 0014
import tensorflow as tf
from tensorflow.contrib.rnn import GRUCell,LSTMCell,DropoutWrapper,ResidualWrapper,MultiRNNCell
def create_single_cell(num_units,keep_prob,use_residual,cell_type='lstm'):
if cell_type == 'lstm':
cell = LSTMCell(num_units)
else:
cell = GRUCell(num_units)
cell = DropoutWrapper(cell, output_keep_prob=keep_prob)
if use_residual:
cell = ResidualWrapper(cell)
return cell
def create_cell_list(num_layers,num_units,keep_prob,use_residual,cell_type,return_list=False):
cell_list = [create_single_cell(num_units,keep_prob,use_residual,cell_type) for _ in range(num_layers)]
if num_layers == 1:
return cell_list[0]
else:
if return_list:
return cell_list
else:
return MultiRNNCell(cell_list)
def create_encoder(source_emb,enc_seq_len,num_units,use_residual,keep_prob,num_layers,cell_type='lstm'):
'''
:param source_emb: 经过embedding处理的source向量
:param enc_seq_len: source的长度
:param num_units: lstm的size
:param use_residual: 是否使用残差
:param keep_prob: 保留概率,1 - dropout
:param num_layers: rnn的层数
:param cell_type: rnn的类型
:return: output,output_states
'''
enc_cells_fw = create_cell_list(num_layers,num_units,keep_prob,use_residual,cell_type)
enc_cells_bw = create_cell_list(num_layers,num_units,keep_prob,use_residual,cell_type)
enc_outputs, enc_states = tf.nn.bidirectional_dynamic_rnn(enc_cells_fw, enc_cells_bw, source_emb,
sequence_length=enc_seq_len,
dtype=tf.float32)
enc_outputs = tf.concat(enc_outputs, 2)
# 合并输入的states
encoder_states = []
for i in range(num_layers):
if isinstance(enc_states[0][i], tf.contrib.rnn.LSTMStateTuple):
encoder_state_c = tf.concat(values=(enc_states[0][i].c, enc_states[1][i].c), axis=1,
name="encoder_fw_state_c")
encoder_state_h = tf.concat(values=(enc_states[0][i].h, enc_states[1][i].h), axis=1,
name="encoder_fw_state_h")
encoder_state = tf.contrib.rnn.LSTMStateTuple(c=encoder_state_c, h=encoder_state_h)
elif isinstance(enc_states[0][i], tf.Tensor):
encoder_state = tf.concat(values=(enc_states[0][i], enc_states[1][i]), axis=1,
name='bidirectional_concat')
else:
raise TypeError("cell type error in encoder cell")
encoder_states.append(encoder_state)
enc_states = tuple(encoder_states)
return enc_outputs,enc_states | fanfanfeng/nlp_research | dialog_system/attention_seq2seq/tf_model/encoder.py | encoder.py | py | 2,797 | python | en | code | 8 | github-code | 13 |
32053558525 | """The 4Heat integration switch."""
from __future__ import annotations
# from collections.abc import Callable
from dataclasses import dataclass
from typing import Any
from homeassistant.components.switch import SwitchEntity, SwitchEntityDescription
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import SERVICE_TURN_OFF, SERVICE_TURN_ON, STATE_OFF, STATE_ON
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .const import LOGGER
from .coordinator import FourHeatCoordinator
from .entity import (
FourHeatAttributeEntity,
FourHeatEntityDescription,
_setup_descriptions,
async_setup_entry_attribute_entities,
)
from .fourheat import FourHeatDevice
@dataclass
class FourHeatSwitchDescription(FourHeatEntityDescription, SwitchEntityDescription):
"""Class to describe a device switch."""
# description: Callable[[str, FourHeatEntityDescription]] | None = None
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up switch for the device."""
return async_setup_entry_attribute_entities(
hass,
config_entry,
async_add_entities,
_setup_descriptions(
FourHeatSwitch,
FourHeatSwitchDescription,
),
FourHeatSwitch,
)
class FourHeatSwitch(FourHeatAttributeEntity, SwitchEntity):
"""Representation of a 4Heat switch."""
entity_description: FourHeatSwitchDescription
def __init__(
self,
coordinator: FourHeatCoordinator,
device: FourHeatDevice,
attribute: str,
description: FourHeatSwitchDescription,
) -> None:
"""Initialize the switch."""
super().__init__(coordinator, device, attribute, description)
self.control_result: str | None = None
# self._attr_device_class = description.device_class
LOGGER.debug("Additing switch: %s", attribute)
@property
def is_on(self) -> bool:
"""If switch is on."""
if self.control_result:
return self.control_result == STATE_ON
return self.device.status == "on"
async def async_turn_on(self, **kwargs: Any) -> None:
"""Turn on 4heat."""
# await self.set_state(command=SERVICE_TURN_ON)
await self.device.async_send_command(SERVICE_TURN_ON)
self.control_result = STATE_ON
self.async_write_ha_state()
async def async_turn_off(self, **kwargs: Any) -> None:
"""Turn off 4heat."""
# self.control_result = await self.set_state(command=SERVICE_TURN_OFF)
await self.device.async_send_command(SERVICE_TURN_OFF)
self.control_result = STATE_OFF
self.async_write_ha_state()
@callback
def _update_callback(self) -> None:
"""When device updates, clear control result that overrides state."""
self.control_result = None
super()._update_callback()
| anastas78/homeassistant-fourheat | custom_components/fourheat/switch.py | switch.py | py | 3,045 | python | en | code | 0 | github-code | 13 |
36223362496 | from cx_Freeze import setup, Executable
executables = [Executable('hrtfmixer.py', base='Win32GUI')]
build_exe_options = {'packages': ['pysofaconventions', 'scipy.spatial', 'matplotlib.pyplot','mpl_toolkits.mplot3d','scipy.signal','numpy','pyaudio','wave','time','pygame'],
'include_files': ['resources/THK_FFHRIR/HRIR_L2354.sofa']}
setup(name='hrtf_mixer',
version='0.0',
options={'build_exe': build_exe_options},
description='Spatialize audio in real time',
executables=executables) | aechoi/hrtfmixer | setup.py | setup.py | py | 501 | python | en | code | 28 | github-code | 13 |
28905150348 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import sqlite3
read_file = "../data/twitter_id_append.txt"
f = open(read_file,"r")
# how many program finished
s = 0
sep_point = 0
count = 0
lines = f.readlines()
conn = sqlite3.connect('./sqlite.db')
cur = conn.cursor()
try:
cur.execute("""CREATE TABLE corr2(post_id serial,comment_id serial,post text,comment text);""")
except sqlite3.OperationalError:
print("---table corr already exist---")
#cur.execute("""DROP TABLE corr;""")
#cur.execute("""CREATE TABLE corr(post_id serial,comment_id serial,post text,comment text);""")
# detect correspondence from ID_list file
for index,line in enumerate(lines):
tweet_dict = {}
tweet_IDs = line.split(",")
# remove \n from IDlist
for t in range(0,len(tweet_IDs)):
tweet_IDs[t] = tweet_IDs[t].strip("\n")
# # make associative array
# for i in range(0,int(len(tweet_IDs)/2)):
# tweet_dict.update({tweet_IDs[2*i]:tweet_IDs[(2*i)+1]})
# detect value from DB
cur.execute("select post_id,comment_id,post,comment from tweet")
fetch = cur.fetchall()
#print(sep_point)
# search correspondence
for i in range(0,int(len(tweet_IDs)/2)):
print(str(i + s) + "correspondence was found")
post_id = tweet_IDs[2*i]
comment_id = tweet_IDs[(2*i)+1]
column = {"post_id":post_id,"comment_id":comment_id,"post":"","comment":""}
#print("index : " + str(index) )
sep=int(s/2)
# print(type(sep))
for fetch_column in fetch[sep_point:sep_point + 50]:
[fetch_post_id,fetch_comment_id,fetch_post,fetch_comment] = fetch_column
#search post by post_id
if(int(post_id) == int(fetch_post_id)):
column["post"] = fetch_post
if(int(post_id) == int(fetch_comment_id)):
column["post"] = fetch_comment
#search comment by comment_id
if(int(comment_id) == int(fetch_post_id)):
column["comment"] = fetch_post
if(int(comment_id) == int(fetch_comment_id)):
column["comment"] = fetch_comment
if(column["post"] != "" and column["comment"] != ""):
sep_point = sep_point + 1
break
if(column["post"] == ""):
print("text of post_id " + str(post_id) + " was not found.")
if(column["comment"] == ""):
print("text of comment_id " + str(comment_id) + " was not found.")
# write correspondence to DB
cur.execute("""INSERT INTO corr(post_id,comment_id,post,comment) VALUES(%d,%d,'%s','%s')"""\
%(int(column["post_id"]),int(column["comment_id"]),column["post"],column["comment"]))
conn.commit()
print(tweet_dict)
s = s + i + 1
cur.close()
| pauwau/workspace | get_tweet/old_src/post_comment.py | post_comment.py | py | 2,484 | python | en | code | 0 | github-code | 13 |
17191480786 | def is2k_arr(arr):
sum1 = 0
sum2 = 0
if len(arr) % 2 ==0 :
for i in range(len(arr)//2):
sum1 += int(arr[i])
sum2 += int(arr[len(arr)-i-1])
if sum1 == sum2:
return True
return False
if __name__ == '__main__':
times = int(input())
for i in range(times):
line = input()
arr = list(line)
count = len(arr)
result = 0
while count > 0:
# print(count)
if count % 2 == 0:
for j in range(len(arr)-count+1):
# print("j :"+str(j))
# print(arr[0+j:count+j])
x = is2k_arr(arr[0+j:count+j])
if x:
result = count
break
count -= 2
else:
count -= 1
if result > 0:
break
print(result) | yzgqy/myacm | acm/kt3/m1.py | m1.py | py | 927 | python | en | code | 0 | github-code | 13 |
6881894476 | import tkinter as tk
import tkinter.font as tkFont
from Model import Model
from Objective1 import Objective1
from Objective2 import Objective2
from Objective3 import Objective3
from Objective4 import Objective4
from Objective5 import Objective5
from Objective6 import Objective6
class Home:
def __init__(self,root):
self.root=root
self.model=Model()
#setting title
self.root.title("Home")
#setting window size
width=800
height=500
screenwidth = self.root.winfo_screenwidth()
screenheight = self.root.winfo_screenheight()
alignstr = '%dx%d+%d+%d' % (width, height, (screenwidth - width) / 2, (screenheight - height) / 2)
self.root.geometry(alignstr)
self.root.resizable(width=False, height=False)
GLabel_653=tk.Label(self.root)
GLabel_653["activebackground"] = "#a75454"
GLabel_653["activeforeground"] = "#954747"
ft = tkFont.Font(family='Times',size=54)
GLabel_653["font"] = ft
GLabel_653["fg"] = "#040404"
GLabel_653["justify"] = "center"
GLabel_653["text"] = "Result Analysis"
GLabel_653["relief"] = "flat"
GLabel_653.place(x=120,y=30,width=524,height=72)
GLabel_654=tk.Message(self.root,width=320)
ft = tkFont.Font(family='Times',size=29)
GLabel_654["font"] = ft
GLabel_654["fg"] = "#040404"
GLabel_654["justify"] = "left"
GLabel_654["text"] = "Select From the options given on the left navigation bar to obtain desired results."
GLabel_654.place(x=320,y=120,width=320,height=302)
GButton_887=tk.Button(self.root)
GButton_887["bg"] = "#efefef"
ft = tkFont.Font(family='Times',size=10)
GButton_887["font"] = ft
GButton_887["fg"] = "#000000"
GButton_887["justify"] = "center"
GButton_887["text"] = "Performance in a subject"
GButton_887.place(x=30,y=110,width=224,height=41)
GButton_887["command"] = self.Performance_subject
GButton_637=tk.Button(self.root)
GButton_637["bg"] = "#efefef"
ft = tkFont.Font(family='Times',size=10)
GButton_637["font"] = ft
GButton_637["fg"] = "#000000"
GButton_637["justify"] = "center"
GButton_637["text"] = "Performance prediction of a subject"
GButton_637.place(x=30,y=170,width=224,height=41)
GButton_637["command"] = self.prediction_subject
GButton_932=tk.Button(self.root)
GButton_932["bg"] = "#efefef"
ft = tkFont.Font(family='Times',size=10)
GButton_932["font"] = ft
GButton_932["fg"] = "#000000"
GButton_932["justify"] = "center"
GButton_932["text"] = "Backlog Analysis"
GButton_932.place(x=30,y=230,width=224,height=41)
GButton_932["command"] = self.Backlog
GButton_513=tk.Button(self.root)
GButton_513["bg"] = "#efefef"
ft = tkFont.Font(family='Times',size=10)
GButton_513["font"] = ft
GButton_513["fg"] = "#000000"
GButton_513["justify"] = "center"
GButton_513["text"] = "Pass/Fail Analysis "
GButton_513.place(x=30,y=290,width=224,height=41)
GButton_513["command"] = self.Pass_Fail
GButton_84=tk.Button(self.root)
GButton_84["bg"] = "#efefef"
ft = tkFont.Font(family='Times',size=10)
GButton_84["font"] = ft
GButton_84["fg"] = "#000000"
GButton_84["justify"] = "center"
GButton_84["text"] = "Semester wise performance"
GButton_84.place(x=30,y=350,width=224,height=41)
GButton_84["command"] = self.Semester_performance
GButton_180=tk.Button(self.root)
GButton_180["bg"] = "#efefef"
ft = tkFont.Font(family='Times',size=10)
GButton_180["font"] = ft
GButton_180["fg"] = "#000000"
GButton_180["justify"] = "center"
GButton_180["text"] = "Consecutive Year Score Prediction"
GButton_180.place(x=30,y=410,width=224,height=41)
GButton_180["command"] = self.Consecutive_Prediction
def Performance_subject(self):
newWindow = tk.Toplevel(self.root)
Objective1(newWindow,self.model)
def prediction_subject(self):
newWindow = tk.Toplevel(self.root)
Objective2(newWindow,self.model)
def Backlog(self):
newWindow = tk.Toplevel(self.root)
Objective3(newWindow,self.model)
def Pass_Fail(self):
newWindow = tk.Toplevel(self.root)
Objective4(newWindow,self.model)
def Semester_performance(self):
newWindow = tk.Toplevel(self.root)
Objective5(newWindow,self.model)
def Consecutive_Prediction(self):
newWindow = tk.Toplevel(self.root)
Objective6(newWindow,self.model)
if __name__ == "__main__":
root = tk.Tk()
app = Home(root)
root.mainloop()
| RakshithJKashyap/PySpark | DesktopApp/Home.py | Home.py | py | 4,892 | python | en | code | 0 | github-code | 13 |
41687513952 | import csv
import cv2
import os
import numpy as np
from PIL import Image
import cv2
import imutils
import time
# counting the numbers
def is_number(s):
try:
float(s)
return True
except ValueError:
pass
try:
import unicodedata
unicodedata.numeric(s)
return True
except (TypeError, ValueError):
pass
return False
# Take image function
bg = None
def resizeImage(imageName):
basewidth = 100
img = Image.open(imageName)
wpercent = (basewidth/float(img.size[0]))
hsize = int((float(img.size[1])*float(wpercent)))
img = img.resize((basewidth,hsize), Image.ANTIALIAS)
img.save(imageName)
def run_avg(image, aWeight):
global bg
# initialize the background
if bg is None:
bg = image.copy().astype("float")
return
# compute weighted average, accumulate it and update the background
cv2.accumulateWeighted(image, bg, aWeight)
def segment(image, threshold=1):
global bg
# find the absolute difference between background and current frame
diff = cv2.absdiff(bg.astype("uint8"), image)
# threshold the diff image so that we get the foreground
thresholded = cv2.threshold(diff,
threshold,
255,
cv2.THRESH_BINARY)[1]
# get the contours in the thresholded image
(_,cnts, _) = cv2.findContours(thresholded.copy(),
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
# return None, if no contours detected
if len(cnts) == 0:
return
else:
# based on contour area, get the maximum contour which is the hand
segmented = max(cnts, key=cv2.contourArea)
return (thresholded, segmented)
x=0.5 # start point/total width
y=0.8 # start point/total width
threshold = 50 # BINARY threshold
blurValue = 4 # GaussianBlur parameter
bgSubThreshold = 00
learningRate = 0
bgModel = cv2.createBackgroundSubtractorMOG2(0, bgSubThreshold)
# variables
isBgCaptured = 0 # whether the background captured
def removeBG(frame): #Subtracting the background
fgmask = bgModel.apply(frame,learningRate=learningRate)
kernel = np.ones((3, 3), np.uint8)
fgmask = cv2.erode(fgmask, kernel, iterations=1)
res = cv2.bitwise_and(frame, frame, mask=fgmask)
return res
def capture():
print("1 2 3 4 5")
let = input("Enter Number ")
data="Number/" + str(let)+"/"
# initialize weight for running average
aWeight = 0.5
# get the reference to the webcam
camera = cv2.VideoCapture(0)
# region of interest (ROI) coordinates
top, right, bottom, left = 10, 350, 315, 590
# initialize num of frames
num_frames = 0
start_recording = False
# keep looping, until interrupted
i=0
while(True):
# get the current frame
(grabbed, frame) = camera.read()
# resize the frame
frame = imutils.resize(frame, width = 700)
# flip the frame so that it is not the mirror view
frame = cv2.flip(frame, 1)
# clone the frame
clone = frame.copy()
# get the height and width of the frame
(height, width) = frame.shape[:2]
# get the ROI
roi = frame[top:bottom, right:left]
# convert the roi to grayscale and blur it
gray = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (7, 7), 0)
# to get the background, keep looking till a threshold is reached
# so that our running average model gets calibrated
if num_frames < 30:
run_avg(gray, aWeight)
else:
# segment the hand region
hand = segment(gray)
# check whether hand region is segmented
if hand is not None:
# if yes, unpack the thresholded image and
# segmented region
(thresholded, segmented) = hand
img = removeBG(roi)
#img = img[0:int(y * roi.shape[0]),
# int(x * roi.shape[1]):roi.shape[1]] # clip the ROI
cv2.imshow('mask', img)
# draw the segmented region and display the frame
print(segmented.shape)
cv2.drawContours(clone, [segmented + (right, top)], -1, (0, 0, 255))
if start_recording:
cv2.imwrite('Temp.png', img)
cv2.imwrite(data+"template {0}.jpg".format(i), img)
i=i+1
time.sleep(.8)
resizeImage('Temp.png')
#predictedClass, confidence = getPredictedClass()
#showStatistics(predictedClass, confidence)
res = cv2.bitwise_and(roi, roi, mask=thresholded)
#cv2.imshow("Thesholded", res)
# draw the segmented hand
cv2.rectangle(clone, (left, top), (right, bottom), (0,255,0), 2)
# increment the number of frames
num_frames += 1
# display the frame with segmented hand
cv2.imshow("Video Feed", clone)
# observe the keypress by the user
keypress = cv2.waitKey(1) & 0xFF
# if the user pressed "q", then stop looping
if keypress == ord("q"):
break
if keypress == ord("s"):
start_recording = True
capture()
| SoniyaN/LookBasedMediaPlayer | create_data.py | create_data.py | py | 5,684 | python | en | code | 0 | github-code | 13 |
16816284964 | #!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# NASA Jet Propulsion Laboratory
# California Institute of Technology
# (C) 2008-2011 All Rights Reserved
#
# <LicenseText>
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import str
from future import standard_library
standard_library.install_aliases()
import time
from threading import Thread
import logging
logger = logging.getLogger()
class AbstractInterruptableThread(Thread):
"""
A thread class that defines interruptable methods.
"""
def __init__(self):
"""
Initializer.
"""
Thread.__init__(self)
self._isRunnable = True
# end def
def __del__(self):
"""
Finalizer.
"""
pass
# Thread.__del__(self) does not exists
# end def
def stop(self):
"""
Stops this thread if it is running.
"""
# raise NotImplementedError('Subclasses should provide a concrete implementation.')
self._isRunnable = False
# end def
def run(self):
"""
Thread loop.
"""
raise NotImplementedError(
"Subclasses should provide a concrete implementation."
)
# # recommended essentials for implementation:
#
# # reset to true
# self._isRunnable = True
#
# # ----------------------------------------------------------------------
# # thread loop
#
# while self._isRunnable:
#
# # ------------------------------------------------------------------
# # sleep a little
# self.interruptableSleep(seconds)
#
# # end while
# end def
def interruptableJoin(self, timeout=3):
"""
Waits for the thread to end before returning. However, the current
version of Thread.join() does not respond to KeyboardInterrupt while
it is blocking. This version is interruptable to signals like the
KeyboardInterrupt Ctrl-C. It does so by calling Thread.join() in a
loop with a timeout.
@see: U{http://mail.python.org/pipermail/python-bugs-list/2007-July/039156.html}
"Threading.Thread.join() uses a lock to put itself on the "wait_queue".
It then calls thread.Lock.acquire(), which blocks indefinitely.
PyThread_acquire_lock() will not return until the lock is acquired, even
if a signal is sent. Effectively, Ctrl-C is "masked" until the lock is
released, (the joined thread is done), and the main thread comes back
to the interpreter and handles the signal, producing a KeyboardInterrupt Exception."
@param timeout: number of float seconds to wait in the join() before
checking again. The time in between waits IS interruptable.
@raise KeyboardInterrupt: raised when user presses CTRL-C to send interrupt signal.
"""
try:
while self.isAlive():
self.join(timeout) # seconds
# this area and the while check is interruptable.
# end while
except KeyboardInterrupt as e:
logger.debug("=> Thread interrupted. %s" % (str(e)))
# raised when user presses CTRL-C
self.stop()
# wait a little more for the thread to fully stop
while self.isAlive():
self.join(timeout) # seconds
# this area and the while check is interruptable.
# end while
# propagate exception
raise e
# end try-except
# end def
def interruptableSleep(self, seconds):
"""
Sleep for the given seconds, but can respond to stop().
"""
remainingSeconds = seconds
try:
while self._isRunnable and (remainingSeconds > 0):
time.sleep(1)
remainingSeconds -= 1
# this area and the while check is interruptable.
# end while
except KeyboardInterrupt as e:
logger.debug("=> Sleep interrupted. %s" % (str(e)))
# raised when user presses CTRL-C
self.stop()
# wait a little more for the thread to fully stop
while self.isAlive():
self.join(1) # seconds
# this area and the while check is interruptable.
# end while
# propagate exception
raise e
# end try-except
# end def
def sleep(self, seconds):
"""
Sleep for the given seconds.
"""
time.sleep(seconds)
# end def
# end class
| hysds/hysds | hysds/pymonitoredrunner/commons/thread/AbstractInterruptableThread.py | AbstractInterruptableThread.py | py | 4,981 | python | en | code | 15 | github-code | 13 |
5455497762 | from rest_framework import serializers
from rest_framework.fields import EmailField, CharField
from course.models import Course, Tutor
class TutorSerializer(serializers.ModelSerializer):
name = CharField(allow_blank=True, max_length=254, required=False)
email = EmailField(allow_blank=True, max_length=254, required=False)
class Meta:
model = Tutor
fields = ('id', 'name', 'room', 'school', 'email')
class CourseSerializer(serializers.ModelSerializer):
class Meta:
model = Course
fields = ('id', 'name', 'credits', 'duration', 'tutor', 'about')
tutor = TutorSerializer(many=True)
def create(self, validated_data):
tutors_data = validated_data.pop("tutor")
course = Course.objects.create(**validated_data)
for tutor_data in tutors_data:
try:
tutor = Tutor.objects.get(email=tutor_data['email'])
course.tutor.add(tutor)
except Tutor.DoesNotExist:
tutor = Tutor.objects.create(**tutor_data)
course.tutor.add(tutor)
return course
def update(self, instance, validated_data):
instance.name = validated_data.get("name", instance.name)
instance.credits = validated_data.get("credits", instance.credits)
instance.duration = validated_data.get("duration", instance.duration)
instance.about = validated_data.get("about", instance.about)
tutors_data = validated_data.pop("tutor")
print('updated')
for tutor_data in tutors_data:
try:
tutor = Tutor.objects.get(email=tutor_data['email'])
instance.tutor.add(tutor)
except Tutor.DoesNotExist:
tutor = Tutor.objects.create(**tutor_data)
instance.tutor.add(tutor)
instance.save()
return instance
| adds68/mmu-course-api | api/serializers.py | serializers.py | py | 1,895 | python | en | code | 0 | github-code | 13 |
6699081987 | import base64
import json
import re
import sys
def extract_vmess_info(vmess_address):
"""
Extract information from VMESS address
"""
"""
Regular expression pattern to match the base64-encoded JSON payload
"""
pattern = r"vmess://(.*)"
match = re.match(pattern, vmess_address)
if match:
base64_payload = match.group(1)
try:
decoded_payload = base64.b64decode(base64_payload).decode('utf-8')
"""
Parse the decoded payload JSON to extract the information you need
"""
vmess_info = json.loads(decoded_payload)
# server = vmess_info['add']
# port = vmess_info['port']
# user_id = vmess_info['id']
# security = vmess_info.get('tls', '')
print(vmess_info)
# return server, port, user_id, security
except ValueError as e:
print("Invalid VMESS address:", e)
else:
print("Invalid VMESS address")
def vmess_2_server_info(argv):
if len(argv) < 2:
print(f"Usage: {argv[0]} filename")
sys.exit(-1)
else:
with open(argv[1], "r") as f:
lines = f.readlines()
for vmess_address in lines:
# Extract information from VMESS address
extract_vmess_info(vmess_address)
if __name__ == "__main__":
argv = sys.argv
vmess_2_server_info(argv)
| jayxin/vmess_2_v2ray_config | py/vmess_2_server_info.py | vmess_2_server_info.py | py | 1,435 | python | en | code | 0 | github-code | 13 |
42318394189 | import tornado.web
import sqlite3
import json
import logging
from config import *
from webserver.utils import *
# the read-only sqlite connection for this server
# "?mode=ro" requires the newest sqlite python3.4 wrappers to be enabled
dbcon = sqlite3.connect(DBPREFIX + 'media.db?mode=ro', uri=True)
dbcur = dbcon.cursor()
# the main website
class MainHandler(BaseHandler):
def get(self):
self.render("media/main.tpl")
# the tag handler which deals with the tags
class TagViewHandler(BaseHandler):
def get(self, tag):
isAdmin = False
if tag == ADMIN_SECRET:
# list all files ordered by upload date
isAdmin = True
dbcur.execute('SELECT * FROM files order by date_created desc ')
else:
dbcur.execute('SELECT * FROM files where tag = :tag or batchtag = :tag order by date_created asc', {'tag':tag})
files = fetchResultList(dbcur)
if len(files) == 0:
raise tornado.web.HTTPError(404)
return
# management view
admintag = self.get_argument('admin', False)
if isAdmin or admintag:
firstFile = next(iter(files))
if isAdmin or firstFile['changetag'] == admintag:
for i in range(0,len(files)):
files[i]['videoinfostr'] = files[i]['videoinfo']
if len(files[i]['videoinfo']) == 0:
files[i]['videoinfo'] = {}
else:
try:
files[i]['videoinfo'] = json.loads(files[i]['videoinfo'])
except:
files[i]['videoinfo'] = {}
self.render("media/filesoverview.tpl", files=files, tag=tag, admintag=admintag, batchtag=firstFile['batchtag'], isAdmin=isAdmin)
else:
raise tornado.web.HTTPError(403)
return
# if one file, then view that file directly
if len(files) == 1:
file = next(iter(files))
fn = 'media_%010d.bin' % file['id']
return self.nginxDownload(UPLOADS_MEDIA, fn, file['filename'], file['filesize'], True, int(file['videosnapshottime']))
# if multiple files, use the gallery mode
self.render("media/gallery.tpl", files=files, tag=tag)
# the video viewer
class TagViewVideoHandler(BaseHandler):
def get(self, tag):
self.render("media/videoplayer.tpl", tag=tag)
handlers = [
(r"/", MainHandler),
(r"/v/(?P<tag>[^\/]+)", TagViewVideoHandler),
(r"/(?P<tag>[^\/]+)", TagViewHandler),
(r"/(?P<tag>[^\/]+)/.*", TagViewHandler)
] | BeamNG/mediashare | webserver/media_ro.py | media_ro.py | py | 2,645 | python | en | code | 0 | github-code | 13 |
20695012191 | # app.py
from flask import Flask, render_template, request, send_file
import cv2
import numpy as np
import io
from boundingBoxDrawer import boundingBoxDrawer
from ensembleModel import ensembleModel
from objDetectionRCNN import objDetectionRCNN
from objDetectionY8 import objDetectionY8
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/upload', methods=['POST'])
def upload():
if 'file' not in request.files:
return "No file part"
file = request.files['file']
if file.filename == '':
return 'No selected file'
if file:
#Read the image-----------------------------------------------
npimg = np.fromstring(file.read(), np.uint8)
img = cv2.imdecode(npimg, cv2.IMREAD_COLOR)
#Get a copy of the image--------------------------------------
image = img.copy()
#Perform object detection with YoloV8-------------------------
detection_list_Y8 = objDetectionY8(image)
#Perform object detection with FRCNN--------------------------
detection_list_RCNN = objDetectionRCNN(image)
#Ensemble two models------------------------------------------
detection_list = ensembleModel(detection_list_Y8, detection_list_RCNN)
print(detection_list)
#Draw bounding boxes------------------------------------------
objDetImg = boundingBoxDrawer(detection_list, image)
#Convert the image back to bytes for display------------------
_, img_encoded = cv2.imencode('.jpg', image) #objDetImg)
img_bytes = img_encoded.tobytes()
return send_file(io.BytesIO(img_bytes), mimetype='image/jpeg')
if __name__ == '__main__':
app.run(debug=True)
| AAchintha97/objDetection | app.py | app.py | py | 1,761 | python | en | code | 0 | github-code | 13 |
35871715038 | from typing import Union, cast
import kachery_p2p as kp
import numpy as np
class VectorField3D:
def __init__(self, arg: Union[dict, str]):
if isinstance(arg, str):
x = kp.load_json(arg)
if not x:
raise Exception(f'Unable to load: {arg}')
arg = cast(dict, x)
self._load(arg)
self._arg = arg
def serialize(self):
return self._arg
@property
def xgrid(self) -> np.ndarray:
return self._xgrid
@property
def ygrid(self) -> np.ndarray:
return self._ygrid
@property
def zgrid(self) -> np.ndarray:
return self._zgrid
@property
def dim(self) -> int:
return self._values.shape[0]
@property
def values(self) -> np.ndarray:
return self._values
def _load(self, arg: dict):
format = arg.get('vectorfield3d_format')
data = arg.get('data', {})
if format == 'pkl_v1':
pkl_uri = data['pkl_uri']
x = kp.load_pkl(pkl_uri)
if x is None:
raise Exception(f'Unable to load: {pkl_uri}')
self._xgrid = x['xgrid']
self._ygrid = x['ygrid']
self._zgrid = x['zgrid']
self._values = x['values']
else:
raise Exception(f'Unexpected vector3d format: {format}')
@staticmethod
def from_numpy(*, xgrid: np.ndarray, ygrid: np.ndarray, zgrid: np.ndarray, values: np.ndarray):
assert values.ndim == 4
assert values.shape[1] == len(xgrid)
assert values.shape[2] == len(ygrid)
assert values.shape[3] == len(zgrid)
return VectorField3D({
'vectorfield3d_format': 'pkl_v1',
'data': {
'pkl_uri': kp.store_pkl({
'xgrid': xgrid,
'ygrid': ygrid,
'zgrid': zgrid,
'values': values
})
}
}) | scratcharchive/surfaceview2 | src/python/surfaceview2/vectorfield3d/vectorfield3d.py | vectorfield3d.py | py | 1,962 | python | en | code | 0 | github-code | 13 |
35230357062 | #!/usr/bin/python3
"""
This is the "5-text_indentation" module
for the Holberton School Higher Level Programming track.
The 5-text_indentation module supplies one function, matrix_divided().
"""
def text_indentation(text):
""" Prints a text with 2 new lines after
each of these characters: ., ? and : """
if type(text) is not str:
raise TypeError("text must be a string")
text = text.replace('\n', ' ')
for c in ".:?":
text = text.replace(c, c+"\n\n")
lines = (ln.strip() for ln in text.split("\n"))
print(*lines, sep="\n", end='')
if __name__ == "__main__":
import doctest
doctest.testfile("tests/5-text_indentation.txt")
| fernandogmo/holbertonschool-higher_level_programming | 0x07-python-test_driven_development/5-text_indentation.py | 5-text_indentation.py | py | 686 | python | en | code | 1 | github-code | 13 |
16101328592 | from datetime import datetime
import json
import os
import re
import csv
from lxml import etree
import requests
BASE_URL = 'https://s.weibo.com'
TXT_DIR = './txt'
def getHTML(url, needPretty=False):
''' 获取网页 HTML 返回字符串
Args:
url: str, 网页网址
needPretty: bool, 是否需要美化(开发或测试时可用)
Returns:
HTML 字符串
'''
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.125 Safari/537.36'
}
response = requests.get(url, headers=headers)
return response.text
def save(filename, content, time_now):
''' 写文件
Args:
filename: str, 文件路径
content: str/dict, 需要写入的内容
Returns:
None
'''
filename_txt = os.path.join(TXT_DIR, filename + '.txt')
with open(filename_txt, 'w', encoding = 'UTF-8') as wf:
for text in content:
wf.write(str(text) +','+ str(content[text]['hot']) +','+ str(time_now) + '\n')
# 使用 xpath 解析 HTML
def parseHTMLByXPath(content):
''' 使用 xpath 解析 HTML, 提取榜单信息
Args:
content: str, 待解析的 HTML 字符串
Returns:
榜单信息的字典 字典
'''
html = etree.HTML(content)
titles = html.xpath('//tr[position()>1]/td[@class="td-02"]/a[not(contains(@href, "javascript:void(0);"))]/text()')
hrefs = html.xpath('//tr[position()>1]/td[@class="td-02"]/a[not(contains(@href, "javascript:void(0);"))]/@href')
hots = html.xpath('//tr[position()>1]/td[@class="td-02"]/a[not(contains(@href, "javascript:void(0);"))]/../span/text()')
titles = [title.strip() for title in titles]
hrefs = [BASE_URL + href.strip() for href in hrefs]
hots = [int(hot.strip()) for hot in hots]
correntRank = {}
for i, title in enumerate(titles):
correntRank[title] = {'href': hrefs[i], 'hot': hots[i]}
return correntRank
# 更新本日榜单
def updateJSON(correntRank):
''' 更新当天对应小时的 csv 文件
Args:
correntRank: dict, 此时榜单信息
Returns:
排序后的榜单信息字典(小时计)
'''
time_now = datetime.now().strftime('%Y/%m/%d %H:%M:%S')
filename = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
# 文件不存在则创建
#if not os.path.exists(filename):
# os.mknod(filename)
nowRank = {}
for k, v in correntRank.items():
nowRank[k] = v
# 将榜单按 hot 值排序
rank = {k: v for k, v in sorted(nowRank.items(), key=lambda item: item[1]['hot'], reverse=True)}
# 更新当天榜单 csv 文件
save(filename, rank, time_now)
return rank
def main():
url = '/top/summary?cate=realtimehot'
content = getHTML(BASE_URL + url)
correntRank = parseHTMLByXPath(content)
res = updateJSON(correntRank)
if __name__ == '__main__':
main()
| Runner1027/Weibo_Hot_Search | weibo.py | weibo.py | py | 2,963 | python | en | code | 3 | github-code | 13 |
2625039415 | # -*- coding: utf-8 -*-
import pytz
import datetime
import json
from pyramid.view import view_config
from stalker import db, Project, Status, Budget, BudgetEntry, Good, Entity, \
Type, Studio, StatusList, Task
from stalker.db.session import DBSession
import transaction
from webob import Response
import stalker_pyramid
import logging
from stalker_pyramid.views import get_date
#logger = logging.getLogger(__name__)
#logger.setLevel(logging.DEBUG)
from stalker_pyramid import logger_name
logger = logging.getLogger(logger_name)
@view_config(
route_name='create_budget_dialog',
renderer='templates/budget/dialog/create_budget_dialog.jinja2',
permission='Create_Budget'
)
def create_budget_dialog(request):
"""called when creating budget
"""
came_from = request.params.get('came_from', '/')
# logger.debug('came_from %s: '% came_from)
# get logged in user
from stalker_pyramid.views import get_logged_in_user,\
milliseconds_since_epoch
logged_in_user = get_logged_in_user(request)
project_id = request.params.get('project_id', -1)
project = Project.query.filter(Project.id == project_id).first()
if not project:
return Response('No project found with id: %s' % project_id, 500)
from stalker_pyramid.views.auth import PermissionChecker
return {
'has_permission': PermissionChecker(request),
'logged_in_user': logged_in_user,
'project': project,
'came_from': came_from,
'mode': 'Create',
'milliseconds_since_epoch': milliseconds_since_epoch
}
@view_config(
route_name='create_budget',
permission='Create_Budget'
)
def create_budget(request):
"""runs when creating a budget
"""
from stalker_pyramid.views import get_logged_in_user, milliseconds_since_epoch
logged_in_user = get_logged_in_user(request)
utc_now = datetime.datetime.now(pytz.utc)
project_id = request.params.get('project_id', None)
project = Project.query.filter(Project.id == project_id).first()
if not project:
return Response('There is no project with id: %s' % project_id, 500)
name = request.params.get('name', None)
type_id = request.params.get('type_id', None)
type_ = Type.query.filter(Type.id == type_id).first()
description = request.params.get('description', "")
logger.debug("type_id : %s" % type_id)
logger.debug("name : %s" % name)
logger.debug("description : %s" % description)
if not name:
return Response('Please supply a name', 500)
if not type_:
return Response('There is no type with id: %s' % type_id, 500)
status = Status.query.filter(Status.name == 'Planning').first()
generic_data = {
'approved_total_price': 0,
'total_price': 0,
'total_msrp': 0,
'total_cost': 0,
'realized_total_price': 0,
'milestones': [],
'folders': [],
'links': [],
'calendar_editing': 'OFF',
'start_date': milliseconds_since_epoch(project.start),
'end_date': milliseconds_since_epoch(project.end),
'related_budgets': []
}
budget = Budget(
project=project,
name=name,
type=type_,
status=status,
description=description,
created_by=logged_in_user,
date_created=utc_now,
date_updated=utc_now,
generic_text=json.dumps(generic_data)
)
DBSession.add(budget)
transaction.commit()
budget = Budget.query.filter(Budget.name == name).first()
new_budget_id = budget.id
# related_budgets = budget.get_generic_text_attr('related_budgets')
# related_budgets.append(budget.id)
# budget.set_generic_text_attr('related_budgets', related_budgets)
return Response("/budgets/%s/view" % new_budget_id)
@view_config(
route_name='update_budget_dialog',
renderer='templates/budget/dialog/update_budget_dialog.jinja2',
permission='Update_Budget'
)
def update_budget_dialog(request):
"""called when updating dailies
"""
from stalker_pyramid.views import get_logged_in_user,\
milliseconds_since_epoch
logged_in_user = get_logged_in_user(request)
came_from = request.params.get('came_from', '/')
budget_id = request.matchdict.get('id', -1)
budget = Budget.query.filter(Budget.id == budget_id).first()
studio = Studio.query.first()
from stalker_pyramid.views.auth import PermissionChecker
return {
'mode': 'Update',
'has_permission': PermissionChecker(request),
'studio': studio,
'logged_in_user': logged_in_user,
'entity': budget,
'came_from': came_from,
'milliseconds_since_epoch': milliseconds_since_epoch,
}
@view_config(
route_name='update_budget',
permission='Update_Budget'
)
def update_budget(request):
"""runs when updating a budget
"""
logger.debug("update_budget starts")
from stalker_pyramid.views import get_logged_in_user
logged_in_user = get_logged_in_user(request)
utc_now = datetime.datetime.now(pytz.utc)
budget_id = request.matchdict.get('id', -1)
budget = Budget.query.filter(Budget.id == budget_id).first()
if not budget:
transaction.abort()
return Response('No budget with id : %s' % budget_id, 500)
name = request.params.get('name', None)
type_id = request.params.get('type_id', -1)
type_ = Type.query.filter(Type.id == type_id).first()
start_date = request.params.get('start_date', None)
end_date = request.params.get('end_date', None)
description = request.params.get('description', " ")
logger.debug("type_id : %s" % type_id)
logger.debug("name : %s" % name)
logger.debug("description : %s" % description)
logger.debug("start_date : %s" % start_date)
logger.debug("end_date : %s" % end_date)
if not name:
return Response('Please supply a name', 500)
if not type_:
return Response('There is no type with id: %s' % type_id, 500)
if not start_date:
return Response('Please supply a start_date', 500)
if not end_date:
return Response('Please supply a end_date', 500)
budget.name = name
budget.description = description
budget.type = type_
# related_budgets = budget.get_generic_text_attr('related_budgets')
#
# if not related_budgets:
# data = json.loads(budget.generic_text)
# data['related_budgets'] = []
# budget.generic_text = json.dumps(data)
#
# logger.debug("related : %s" % budget.get_generic_text_attr('related_budgets'))
time_delta = int(start_date) - budget.get_generic_text_attr('start_date')
budget.set_generic_text_attr('start_date', int(start_date))
budget.set_generic_text_attr('end_date', int(end_date))
check_project_start_end_date(budget.project)
from stalker_pyramid.views.budgetentry import update_budgetenties_startdate
update_budgetenties_startdate(budget, time_delta)
budget.date_updated = utc_now
budget.updated_by = logged_in_user
logger.debug("update_budget ends")
request.session.flash('success: Successfully updated budget')
return Response('Successfully updated budget')
@view_config(
route_name='inline_update_budget',
permission='Update_Budget'
)
def inline_update_budget(request):
"""Inline updates the given budget with the data coming from the request
"""
logger.debug('INLINE UPDATE BUDGET IS RUNNING')
from stalker_pyramid.views import get_logged_in_user, \
get_date_range, milliseconds_since_epoch
logged_in_user = get_logged_in_user(request)
utc_now = datetime.datetime.now(pytz.utc)
# *************************************************************************
# collect data
attr_name = request.params.get('attr_name', None)
attr_value = request.params.get('attr_value', None)
logger.debug('attr_name %s', attr_name)
logger.debug('attr_value %s', attr_value)
# get task
budget_id = request.matchdict.get('id', -1)
budget = Budget.query.filter(Budget.id == budget_id).first()
# update the task
if not budget:
transaction.abort()
return Response("No budget found with id : %s" % budget_id, 500)
if attr_name and attr_value:
logger.debug('attr_name %s', attr_name)
if attr_name == 'start_and_end_dates':
logger.debug('attr_name %s', attr_name)
start, end = attr_value.split(' - ')
budget.set_generic_text_attr('start_date', int(start))
budget.set_generic_text_attr('end_date', int(end))
logger.debug("int(start) : %s" % budget.get_generic_text_attr('start_date'))
logger.debug("int(end) : %s" % budget.get_generic_text_attr('end_date'))
check_project_start_end_date(budget.project)
budget.updated_by = logged_in_user
budget.date_updated = utc_now
else:
setattr(budget, 'attr_name', attr_value)
else:
logger.debug('not updating')
return Response("MISSING PARAMETERS", 500)
return Response(
'Budget updated successfully %s %s' % (attr_name, attr_value)
)
def check_project_start_end_date(project):
"""updates project start end date by checking budgets' start end dates
"""
budgets = project.budgets
logger.debug('check_project_start_end_date budgets : %s' % len(budgets))
start = 0
end = 0
for budget in budgets:
if budget.status.code not in ['RJD', 'CNCLD']:
start_asmilliseconds = budget.get_generic_text_attr('start_date')
end_asmilliseconds = budget.get_generic_text_attr('end_date')
if start == 0 or start_asmilliseconds < start:
start = start_asmilliseconds
if end == 0 or end_asmilliseconds > end:
end = end_asmilliseconds
if start != 0:
from stalker_pyramid.views import from_milliseconds
project.start = from_milliseconds(start)
project.end = from_milliseconds(end)
logger.debug('check_project_start_end_date ends')
@view_config(
route_name='get_project_budgets',
renderer='json',
permission='List_Budget'
)
def get_budgets(request):
"""returns budgets with the given id
"""
project_id = request.matchdict.get('id')
logger.debug(
'get_budgets is working for the project which id is: %s' % project_id
)
status_code = request.params.get('status_code', None)
status = Status.query.filter(Status.code == status_code).first()
sql_query = """
select
"Budgets".id,
"Budget_SimpleEntities".name,
"Created_By_SimpleEntities".created_by_id,
"Created_By_SimpleEntities".name,
"Type_SimpleEntities".name,
(extract(epoch from "Budget_SimpleEntities".date_created) * 1000)::bigint as date_created,
"Budget_SimpleEntities".description,
"Statuses_SimpleEntities".name,
"Statuses".code,
"Budget_SimpleEntities".generic_text
from "Budgets"
join "SimpleEntities" as "Budget_SimpleEntities" on "Budget_SimpleEntities".id = "Budgets".id
join "Statuses" on "Statuses".id = "Budgets".status_id
join "SimpleEntities" as "Statuses_SimpleEntities" on "Statuses_SimpleEntities".id = "Statuses".id
join "SimpleEntities" as "Created_By_SimpleEntities" on "Created_By_SimpleEntities".id = "Budget_SimpleEntities".created_by_id
left outer join "SimpleEntities" as "Type_SimpleEntities" on "Type_SimpleEntities".id = "Budget_SimpleEntities".type_id
join "Projects" on "Projects".id = "Budgets".project_id
where "Projects".id = %(project_id)s %(additional_condition)s
"""
additional_condition = ''
if status:
additional_condition = 'and "Budgets_Statuses".id=%s' % status.id
budgets = []
sql_query = sql_query % {
'project_id': project_id,
'additional_condition': additional_condition
}
from stalker_pyramid.views.auth import PermissionChecker
result = DBSession.connection().execute(sql_query)
update_budget_permission = \
PermissionChecker(request)('Update_Budget')
for r in result.fetchall():
budget = {
'id': r[0],
'name': r[1],
'created_by_id': r[2],
'created_by_name': r[3],
'item_view_link': '/budgets/%s/view' % r[0],
'type_name': r[4],
'date_created': r[5],
'description': r[6],
'status_name': r[7],
'status_code': r[8],
'generic_data': json.loads(r[9]) if r[9] else {},
}
if update_budget_permission:
budget['item_update_link'] = \
'/budgets/%s/update/dialog' % budget['id']
budget['item_remove_link'] =\
'/entities/%s/delete/dialog?came_from=%s' % (
budget['id'],
request.current_route_path()
)
budget['item_duplicate_link'] =\
'/budgets/%s/duplicate/dialog?came_from=%s' % (
budget['id'],
request.current_route_path()
)
budgets.append(budget)
resp = Response(
json_body=budgets
)
return resp
@view_config(
route_name='get_project_budgets_count',
renderer='json',
permission='List_Budget'
)
def get_budgets_count(request):
"""missing docstring
"""
project_id = request.matchdict.get('id')
logger.debug(
'get_budgets_count is working for the project which id is %s' %
project_id
)
sql_query = """
select count(1) from (
select
"Budgets".id
from "Budgets"
join "Projects" on "Projects".id = "Budgets".project_id
where "Projects".id = %(project_id)s
) as data
"""
sql_query = sql_query % {'project_id': project_id}
from sqlalchemy import text # to be able to use "%" sign use this function
result = DBSession.connection().execute(text(sql_query))
return result.fetchone()[0]
@view_config(
route_name='view_budget_calendar',
renderer='templates/budget/view/view_budget_calendar.jinja2',
permission='Read_Budget'
)
@view_config(
route_name='view_budget_table_summary',
renderer='templates/budget/view/view_budget_table.jinja2',
permission='Read_Budget'
)
@view_config(
route_name='view_budget_table_detail',
renderer='templates/budget/view/view_budget_table.jinja2',
permission='Read_Budget'
)
@view_config(
route_name='view_budget_report',
renderer='templates/budget/view/view_budget_report.jinja2',
permission='Read_Budget'
)
def view_budget(request):
"""view_budget
"""
logger.debug('view_budget')
from stalker_pyramid.views import get_logged_in_user
logged_in_user = get_logged_in_user(request)
studio = Studio.query.first()
budget_id = request.matchdict.get('id')
budget = Budget.query.filter_by(id=budget_id).first()
total_price = budget.get_generic_text_attr('total_price')
total_cost = budget.get_generic_text_attr('total_cost')
approved_total_price = budget.get_generic_text_attr('approved_total_price')
projects = Project.query.all()
mode = request.matchdict.get('mode', None)
logger.debug("mode %s " % mode)
came_from = request.params.get('came_from', request.url)
from stalker_pyramid.views import milliseconds_since_epoch
from stalker_pyramid.views.auth import PermissionChecker
return {
'mode': mode,
'entity': budget,
'has_permission': PermissionChecker(request),
'logged_in_user': logged_in_user,
'milliseconds_since_epoch': milliseconds_since_epoch,
'stalker_pyramid': stalker_pyramid,
'generic_data': json.loads(budget.generic_text),
'budget_calendar_editing': "ON",
'approved_total_price': approved_total_price,
'total_price': total_price,
'total_cost':total_cost,
'projects': projects,
'studio': studio,
'came_from': came_from
}
@view_config(
route_name='change_budget_status_dialog',
renderer='templates/budget/dialog/change_budget_status_dialog.jinja2',
permission='Update_Budget'
)
def change_budget_status_dialog(request):
"""change_budget_status_dialog
"""
logger.debug('change_budget_status_dialog is starts')
budget_id = request.matchdict.get('id')
budget = Budget.query.filter_by(id=budget_id).first()
status_code = request.matchdict.get('status_code')
came_from = request.params.get('came_from', '/')
budget_total_price = budget.get_generic_text_attr('total_price')
return {
'status_code': status_code,
'came_from': came_from,
'budget': budget,
'budget_total_price': budget_total_price
}
@view_config(
route_name='change_budget_status',
permission='Update_Budget'
)
def change_budget_status(request):
from stalker_pyramid.views import get_logged_in_user
logged_in_user = get_logged_in_user(request)
utc_now = datetime.datetime.now(pytz.utc)
budget_id = request.matchdict.get('id')
budget = Budget.query.filter_by(id=budget_id).first()
if not budget:
transaction.abort()
return Response('There is no budget with id %s' % budget_id, 500)
status_code = request.matchdict.get('status_code')
status = Status.query.filter(Status.code == status_code).first()
if not status:
transaction.abort()
return Response('There is no status with code %s' % status_code, 500)
approved_total_price = request.params.get('approved_total_price', None)
if approved_total_price:
budget.set_generic_text_attr("approved_total_price", approved_total_price)
description = request.params.get('description', '')
from stalker_pyramid.views.note import create_simple_note
note = create_simple_note(description,
status.name,
"status_%s" % status.code.lower(),
status.name,
logged_in_user,
utc_now)
budget.notes.append(note)
budget.status = status
budget.updated_by = logged_in_user
budget.date_updated = utc_now
return Response('Budget status is changed successfully')
@view_config(
route_name='duplicate_budget_dialog',
renderer='templates/budget/dialog/duplicate_budget_dialog.jinja2',
permission='Create_Budget'
)
def duplicate_budget_dialog(request):
"""duplicate_budget_dialog
"""
logger.debug('duplicate_budget_dialog is starts')
budget_id = request.matchdict.get('id')
budget = Budget.query.filter_by(id=budget_id).first()
action = '/budgets/%s/duplicate' % budget_id
came_from = request.params.get('came_from', '/')
message = 'Are you sure you want to <strong>change %s type</strong>?'% budget.name
logger.debug('action: %s' % action)
return {
'budget': budget,
'message': message,
'came_from': came_from,
'action': action
}
@view_config(
route_name='duplicate_budget',
permission='Create_Budget'
)
def duplicate_budget(request):
from stalker_pyramid.views import get_logged_in_user
logged_in_user = get_logged_in_user(request)
utc_now = datetime.datetime.now(pytz.utc)
budget_id = request.matchdict.get('id')
budget = Budget.query.filter_by(id=budget_id).first()
if not budget:
transaction.abort()
return Response('There is no budget with id %s' % budget_id, 500)
name = request.params.get('name', None)
description = request.params.get('description', '')
status_code = request.params.get('status_code', 'PLN')
logger.debug("status_code %s " % status_code)
from stalker_pyramid.views.type import query_type
budget_type = budget.type
project = budget.project
status = Status.query.filter(Status.code == status_code).first()
if not name:
return Response('Please supply a name', 500)
if not status:
return Response('Please supply a status', 500)
new_budget = Budget(
project=project,
name=name,
type=budget_type,
status=status,
description=description,
created_by=logged_in_user,
date_created=utc_now,
date_updated=utc_now,
generic_text=budget.generic_text
)
DBSession.add(new_budget)
# related_budgets = budget.get_generic_text_attr('related_budgets')
# related_budgets.append(new_budget.id)
# budget.set_generic_text_attr('related_budgets', related_budgets)
for budget_entry in budget.entries:
new_budget_entry = BudgetEntry(
budget=new_budget,
good=budget_entry.good,
name=budget_entry.name,
type=budget_entry.type,
amount=budget_entry.amount,
cost=budget_entry.cost,
msrp=budget_entry.msrp,
price=budget_entry.price,
unit=budget_entry.unit,
description=budget_entry.description,
created_by=logged_in_user,
date_created=utc_now,
date_updated=utc_now,
generic_text=budget_entry.generic_text
)
DBSession.add(new_budget_entry)
if status_code == 'ATV':
project.set_generic_text_attr('active_budget_id', new_budget.id)
logger.debug("active_budget_id %s " % project.get_generic_text_attr('active_budget_id'))
request.session.flash('success: Budget is duplicated successfully')
return Response('Budget is duplicated successfully')
class ReportExporter(object):
"""A base class for report exporters
"""
def __init__(self, name='', template=''):
self.name = name
self.template = template
def export(self):
"""virtual method that needs to be implemented on child classes
"""
raise NotImplementedError()
@view_config(
route_name='generate_report',
permission='Create_Budget'
)
def generate_report_view(request):
"""generates report and allows the user to download it
"""
from stalker_pyramid.views import get_logged_in_user
logged_in_user = get_logged_in_user(request)
utc_now = datetime.datetime.now(pytz.utc)
budget_id = request.matchdict['id']
from stalker import Budget
budget = Budget.query.filter(Budget.id == budget_id).first()
if budget:
# type = query_type('Budget', 'Pending')
# total_price = request.params.get('total_price', 0)
#
# logger.debug('total_price %s ' % total_price)
#
# budget.generic_text = update_generic_text(budget.generic_text,
# 'total_price',
# total_price,
# 'equal')
#
# budget.type = type
# budget.updated_by = logged_in_user
# budget.date_updated = utc_now
project = budget.project
# client = project.client
# if not client:
# raise Response('No client in the project')
status = Status.query.filter(Status.code == "PREV").first()
budget.status = status
budget.updated_by = logged_in_user
budget.date_updated = utc_now
logger.debug('generating report:')
from stalker_pyramid.views.client import generate_report
temp_report_path = generate_report(budget)
logger.debug('temp_report_path: %s' % temp_report_path)
from pyramid.response import FileResponse
response = FileResponse(
temp_report_path,
request=request,
content_type='application/force-download'
)
report_file_nice_name = '%s_%s.xlsx' % (
project.code, budget.name.replace(' ', '_')
)
response.headers['content-disposition'] = \
str('attachment; filename=%s' % report_file_nice_name)
return response
@view_config(
route_name='set_budget_totals',
permission='Update_Budget'
)
def set_budget_totals(request):
"""set_budget_totals
"""
logger.debug('set_budget_totals method starts')
from stalker_pyramid.views import get_logged_in_user
logged_in_user = get_logged_in_user(request)
utc_now = datetime.datetime.now(pytz.utc)
budget_id = request.matchdict.get('id')
budget = Budget.query.filter_by(id=budget_id).first()
if not budget:
transaction.abort()
return Response('There is no budget with id %s' % budget_id, 500)
total_cost = request.params.get('total_cost', 0)
total_price = request.params.get('total_price', 0)
total_msrp = request.params.get('total_msrp', 0)
budget.set_generic_text_attr("total_cost", total_cost)
budget.set_generic_text_attr("total_price", total_price)
budget.set_generic_text_attr("total_msrp", total_msrp)
budget.updated_by = logged_in_user
budget.date_updated = utc_now
return Response("Successfully, total cost is set to %s and total price is set to %s" %
(total_cost, total_price))
@view_config(
route_name='create_budget_tasks_into_project',
permission='Update_Project'
)
def create_budget_tasks_into_project(request):
"""create_budget_tasks_into_project
"""
logger.debug('create_budget_tasks_into_project method starts')
from stalker_pyramid.views import get_logged_in_user
logged_in_user = get_logged_in_user(request)
utc_now = datetime.datetime.now(pytz.utc)
budget_id = request.matchdict.get('id')
budget = Budget.query.filter_by(id=budget_id).first()
status_list = StatusList.query \
.filter_by(target_entity_type='Task') \
.first()
folders = budget.get_generic_text_attr("folders")
new_tasks_list = []
for folder in folders:
budget_id = request.matchdict.get('id')
budget = Budget.query.filter_by(id=budget_id).first()
status_list = StatusList.query \
.filter_by(target_entity_type='Task') \
.first()
kwargs = {}
kwargs['project'] = budget.project
kwargs['parent'] = None
kwargs['name'] = folder['name']
kwargs['code'] = folder['name']
kwargs['description'] = folder['description']
kwargs['created_by'] = logged_in_user
kwargs['date_created'] = utc_now
kwargs['type'] = None
kwargs['status_list'] = status_list
kwargs['schedule_model'] = 'effort'
kwargs['schedule_timing'] = 1
kwargs['schedule_unit'] = 'h'
kwargs['responsible'] = []
kwargs['resources'] = []
kwargs['depends'] = []
kwargs['priority'] = 500
new_tasks_list.append(kwargs)
create_task_to_project(kwargs)
DBSession.add(budget)
budgetentries = BudgetEntry.query.filter(BudgetEntry.budget == budget).all()
for budgetentry in budgetentries:
if budgetentry.get_generic_text_attr('dataSource') == 'Calendar':
secondaryFactors = budgetentry.get_generic_text_attr("secondaryFactor")
logger.debug('secondaryFactors: %s' % secondaryFactors)
if secondaryFactors:
for secondaryFactor in secondaryFactors:
kwargs = {}
kwargs['project'] = budget.project
filtered_folders = filter(lambda x: x['id'] == secondaryFactor['folder_id'], folders)
parent = None
if filtered_folders:
folder = filtered_folders[0]
with DBSession.no_autoflush:
parent = Task.query.filter(Task.name == folder['name']).first()
kwargs['parent'] = parent
kwargs['name'] = secondaryFactor['task_name']
kwargs['code'] = secondaryFactor['task_name']
# kwargs['description'] = secondaryFactor['description'] if secondaryFactor['description'] else ""
kwargs['created_by'] = logged_in_user
kwargs['date_created'] = utc_now
kwargs['type'] = None
kwargs['status_list'] = status_list
kwargs['schedule_model'] = 'effort'
kwargs['schedule_timing'] = budgetentry.amount
kwargs['schedule_unit'] = 'd'
kwargs['responsible'] = []
kwargs['resources'] = []
kwargs['depends'] = []
kwargs['priority'] = 500
new_tasks_list.append(kwargs)
create_task_to_project(kwargs)
DBSession.add(budget)
DBSession.add_all(budgetentries)
# for new_task_kwargs in new_tasks_list:
# create_task_to_project(new_task_kwargs)
return Response("successfully")
def create_task_to_project(kwargs):
from stalker.exceptions import CircularDependencyError, StatusError
from sqlalchemy.exc import IntegrityError
try:
new_entity = Task(**kwargs)
logger.debug('task %s' % new_entity.name)
DBSession.add(new_entity)
except (AttributeError, TypeError, CircularDependencyError) as e:
logger.debug('The Error Message: %s' % e)
response = Response('%s' % e, 500)
transaction.abort()
return response
else:
DBSession.add(new_entity)
try:
transaction.commit()
# DBSession.add_all(kwargs.values())
DBSession.add(kwargs['project'])
DBSession.add(kwargs['status_list'])
# DBSession.add(kwargs['parent'])
except IntegrityError as e:
logger.debug('The Error Message: %s' % str(e))
transaction.abort()
return Response(str(e), 500)
else:
logger.debug('flushing the DBSession, no problem here!')
DBSession.flush()
logger.debug('finished adding Task')
return Response('Task created successfully')
| eoyilmaz/stalker_pyramid | stalker_pyramid/views/budget.py | budget.py | py | 30,411 | python | en | code | 6 | github-code | 13 |
41567798191 | def solution(number, limit, power):
answer = []
for i in range(1, number+1):
count = 0
for j in range(1, int(i**0.5)+1):
if i % j == 0:
if j * j == i:
count +=1
else:
count += 2
answer.append(count)
for idx, value in enumerate(answer):
if value > limit:
answer[idx] = power
return sum(answer) | bnbbbb/Algotithm | 프로그래머스/unrated/136798. 기사단원의 무기/기사단원의 무기.py | 기사단원의 무기.py | py | 436 | python | en | code | 0 | github-code | 13 |
30534620434 | import PyPDF2
with open('dummy.pdf', 'rb') as file:
reader = PyPDF2.PdfFileReader(file) #PyPDF have method .pdfreader to read pdf but it can only read binary
page = reader.getPage(0) #Pypdf needs to know which pdf page to rotate
page.rotateCounterClockwise(180)
writer = PyPDF2.PdfFileWriter()
writer.addPage(page)
with open('changed.pdf', 'wb') as newfile:
writer.write(newfile)
| AmanVgit/PDFrotator | PDF_rotator.py | PDF_rotator.py | py | 410 | python | en | code | 0 | github-code | 13 |
40229622635 | #!/usr/bin/env python
# coding: utf8
import math
import numpy as np
import json
import gi
gi.require_version('Gtk', '3.0')
gi.require_version('PangoCairo', '1.0')
from gi.repository import Gio, Gtk, Gdk, GLib
from gi.repository import Pango, PangoCairo
from gi.repository.GdkPixbuf import Pixbuf, PixbufRotation, InterpType
import cairo
from .datatypes import *
from .decorator import *
from .dataref import *
from .snake import Snake
from . import NAME, VERSION, AUTHOR, COPYRIGHT, LICENSE_TYPE
iprint = lambda msg: print(f'[INFO]: {msg}')
class Draw_Pack:
def __init__(self, cr=None):
self.cr = cr
def rect_round(self, x, y, lx, ly, r):
self.cr.move_to(x, y+r)
self.cr.arc(x + r, y + r, r, math.pi, -math.pi/2)
self.cr.rel_line_to(lx - 2*r, 0)
self.cr.arc(x + lx - r, y + r, r, -math.pi/2, 0)
self.cr.rel_line_to(0, ly - 2*r)
self.cr.arc(x + lx - r, y + ly - r, r, 0, math.pi/2)
self.cr.rel_line_to(-lx + 2*r, 0)
self.cr.arc(x + r, y + ly - r, r, math.pi/2, math.pi)
self.cr.close_path()
def circle_mark(self, x, y, lx, ly, r):
self.cr.arc(x+lx/2, y+ly/2, r, 0, 2*math.pi)
self.cr.stroke()
def cross_mark(self, x, y, lx, ly, r):
self.cr.move_to(x+r, y+r)
self.cr.line_to(x+lx-r, y+ly-r)
self.cr.move_to(x+lx-r, y+r)
self.cr.line_to(x+r, y+ly-r)
self.cr.stroke()
class Handler:
@staticmethod
def on_draw(widget, cr, app):
app.update_draw(cr)
# stop event pass on
return True
@staticmethod
def on_toggled(widget, app):
"""get avtive state, sync to data"""
active_state = widget.get_active()
widget_label = widget.get_child()
label_text = widget_label.get_text()
widget_label.set_markup(app.toggle_text(label_text, active_state))
if widget is app.tg_auto:
app.data['tg_auto'] = active_state
elif widget is app.tg_run:
app.data['tg_run'] = active_state
if active_state:
# disable combo once snake active
app.area_combo.set_sensitive(False)
app.inhibit_id = app.inhibit(app.window, Gtk.ApplicationInhibitFlags.IDLE, 'Snake is running')
app.timeout_id = GLib.timeout_add(1000/app.data['speed'], app.timer_move, None)
else:
app.reset_timeout()
app.reset_inhibit()
@staticmethod
def on_combo_changed(widget, app):
t_iter = widget.get_active_iter()
if t_iter:
model = widget.get_model()
app.sync_block_area_from_text(model[t_iter][0])
app.req_draw_size_mini()
@staticmethod
def on_combo_entry_activate(widget, app):
entry_text = widget.get_text()
# if text not changed
if entry_text == app.get_block_area_text():
return
if app.sync_block_area_from_text(entry_text):
# text valid and set
model = app.area_combo.get_model()
t_iter = model.get_iter_first()
while t_iter:
if model[t_iter][0] == entry_text:
# if found, set active
app.area_combo.set_active_iter(t_iter)
break
else:
t_iter = model.iter_next(t_iter)
else:
# if not found, append data and set active
t_iter = model.append([entry_text])
app.area_combo.set_active_iter(t_iter)
app.req_draw_size_mini()
else:
# invalid text, recovered from app.data
widget.set_text(app.get_block_area_text())
@staticmethod
def on_spin_value_changed(widget, app):
app.data['speed'] = widget.get_value_as_int()
@staticmethod
def on_color_set(widget, app):
app.sync_color(widget)
@staticmethod
def on_about(action, param, app):
app.show_about_dialog()
@staticmethod
def on_about_exit(widget, response, app):
widget.destroy()
app.about_dialog = None
@staticmethod
def on_keyshot(action, param, app):
app.show_shortcuts()
@staticmethod
def on_keyshot_exit(widget, app):
widget.destroy()
app.shortcuts = None
@staticmethod
def on_reset(action, param, app):
app.reset_game(reset_all=True)
@staticmethod
def on_save(action, param, app):
app.save_or_load_game(is_save=True)
@staticmethod
def on_load(action, param, app):
app.save_or_load_game(is_save=False)
@staticmethod
def on_keyboard_event(widget, event, app):
"""
keyshot:
ui:
esc: unfocus widget
tab: switch focus
h: hide/unhide panel
game control:
r: reset after gameover
p: toggle pause/continue
a: toggle auto/manual
m: switch auto mode
s: submode switch
[]: speed down/up
←→↑↓: direction control
debug:
t: toggle trace display
g: toogle path and graph display
G: force rescan, then display path and graph
x: switch the display of (path, graph)
R: snake reseed
accel:
<Ctrl>R reset game
<Ctrl>S pause and save game
<Ctrl>L pause and load game
<Ctrl>K display the shortcuts
"""
KeyName = Gdk.keyval_name(event.keyval)
keyname = KeyName.lower()
# if <Ctrl>, return and pass on
if event.state & Gdk.ModifierType.CONTROL_MASK:
return False
KEY_PRESS = (event.type == Gdk.EventType.KEY_PRESS)
KEY_RELEASE = (event.type == Gdk.EventType.KEY_RELEASE)
# press 'ESC' to remove focus
if KEY_PRESS and keyname == 'escape':
app.window.set_focus(None)
return True
# if any widget focused, return False to pass on the event
# switch focus by pass on the 'tab' event
if app.window.get_focus() or (KEY_PRESS and keyname == 'tab'):
return False
# now handel all keyboard event here, without pass on
# debug related keyshot
if KEY_PRESS and keyname == 'h':
app.panel.set_visible(not app.panel.get_visible())
elif KEY_PRESS and keyname == 't':
app.data['show_trace'] = not app.data['show_trace']
app.draw.queue_draw()
elif KEY_PRESS and keyname == 'x':
"""
display_md:
True: regular: [path, graph]
False: unsafe: [unsafe_path/col_path, graph_col]
"""
app.data['display_md'] = not app.data['display_md']
iprint(f"display regular: {app.data['display_md']}")
app.draw.queue_draw()
elif KEY_PRESS and keyname == 'g':
if KeyName == 'G':
if not app.dpack.died:
# scan only on alive
app.snake.update_path_and_graph()
app.data['show_graph'] = True
else:
app.data['show_graph'] = not app.data['show_graph']
iprint(f"show map: {app.data['show_graph']}")
app.draw.queue_draw()
if app.dpack.died:
if KEY_PRESS and keyname == 'r':
app.reset_game()
iprint('game reset')
return True
# forbid player mode related keyshot after died
if keyname in [ 'up', 'down', 'left', 'right' ]:
if KEY_PRESS:
pixbuf = app.pix_arrow_key
# 非反向即可转向
if app.snake.aim != -app.map_arrow[keyname][1]:
# save aim to buffer, apply only before the move
app.snake_aim_buf = app.map_arrow[keyname][1]
elif KEY_RELEASE:
pixbuf = app.pix_arrow
app.arrows[keyname].set_from_pixbuf(pixbuf.rotate_simple(app.map_arrow[keyname][0]))
elif KEY_PRESS and keyname == 'p':
state = app.tg_run.get_active()
app.tg_run.set_active(not state)
elif KEY_PRESS and keyname == 'a':
state = app.tg_auto.get_active()
app.tg_auto.set_active(not state)
elif KEY_PRESS and keyname == 'bracketleft':
app.bt_speed.spin(Gtk.SpinType.STEP_BACKWARD, 1)
elif KEY_PRESS and keyname == 'bracketright':
app.bt_speed.spin(Gtk.SpinType.STEP_FORWARD, 1)
elif KEY_PRESS and KeyName == 'R':
app.snake.reseed()
iprint('snake random: reseed')
elif KEY_PRESS and keyname == 's':
app.data['sub_switch'] = not app.data['sub_switch']
iprint(f"sub switch: {app.data['sub_switch']}")
elif KEY_PRESS and keyname == 'm':
automode_list = list(AutoMode)
id_cur = automode_list.index(app.data['auto_mode'])
id_next = (id_cur + 1) % len(automode_list)
app.data['auto_mode'] = automode_list[id_next]
iprint(f"auto_mode: {app.data['auto_mode'].name}")
return True
class SnakeApp(Gtk.Application):
def __init__(self, *args, **kwargs):
# allow multiple instance
super().__init__(*args, application_id='rt.game.snake',
flags=Gio.ApplicationFlags.NON_UNIQUE, **kwargs)
self.window = None
self.data = {
'snake_width': 8,
'block_size': 16,
'block_area': {'width':40, 'height':28},
'block_area_limit': {'min':10, 'max':999},
'block_area_scale': 1,
'block_area_list': ( f'{i*20}x{i*20}' for i in range(1, 11) ),
'bg_color': 'black',
'fg_color': 'grey',
'tg_auto': False,
'tg_run': False,
'speed': 8,
'speed_adj': { 'value':1, 'lower':1, 'upper':99,
'step_increment':1, 'page_increment':10, 'page_size':0 },
'image_icon': './data/icon/snake.svg',
'image_arrow': './data/pix/arrow.svg',
'image_arrow_key': './data/pix/arrow-key.svg',
'image_snake_food': './data/pix/bonus5.svg',
'auto_mode': AutoMode.GRAPH,
'sub_switch': True,
'show_trace': False,
'show_graph': False,
'display_md': True,
}
# which state to save on game save
self.state_to_dump = (
'block_area', 'bg_color', 'fg_color', 'speed', 'tg_auto',
'auto_mode', 'sub_switch', 'show_graph', 'show_trace'
)
# 注意绘图座标系正负与窗口上下左右的关系
self.map_arrow = {
'up': (PixbufRotation.NONE, VECTORS.UP),
'down': (PixbufRotation.UPSIDEDOWN, VECTORS.DOWN),
'left': (PixbufRotation.COUNTERCLOCKWISE, VECTORS.LEFT),
'right': (PixbufRotation.CLOCKWISE, VECTORS.RIGHT)
}
self.snake = Snake(self.data['block_area']['width'], self.data['block_area']['height'])
self.snake_aim_buf = None
# for share of draw parameters
self.dpack = Draw_Pack()
self.inhibit_id = None
self.timeout_id = None
self.about_dialog = None
self.shortcuts = None
## game op: reset, save/load ##
def reset_timeout(self):
if self.timeout_id:
GLib.source_remove(self.timeout_id)
self.timeout_id = None
def reset_inhibit(self):
if self.inhibit_id:
self.uninhibit(self.inhibit_id)
self.inhibit_id = None
def reset_game(self, reset_all=False):
# reset snake and app
self.snake.snake_reset()
self.snake_aim_buf = None
# reset timeout && inihibit
self.reset_timeout()
self.reset_inhibit()
# reset widgets
self.tg_run.set_active(False)
self.tg_auto.set_active(False)
# re-activate widgets
self.tg_run.set_sensitive(True)
self.area_combo.set_sensitive(True)
if reset_all:
self.data['speed'] = 8
self.data['fg_color'] = 'grey'
self.data['bg_color'] = 'black'
self.data['block_area'] = {'width':40, 'height':28}
self.data['auto_mode'] = AutoMode.GRAPH
self.data['sub_switch'] = True
self.data['show_graph'] = False
self.data['show_trace'] = False
self.data['display_md'] = True
self.init_state_from_data()
def save_or_load_game(self, is_save=True):
"""pause on save or load"""
self.tg_run.set_active(False)
filename = self.run_filechooser(is_save)
if filename is None:
return True
try:
if is_save:
text = 'save'
with open(filename, 'w') as fd:
json.dump(self.dump_data_json(), fd)
return True
else:
text = 'load'
with open(filename, 'r') as fd:
if self.load_data_json(json.load(fd)):
return True
except:
# pop dialog for failed operation
self.show_warning_dialog(f"Failed to {text} game")
return False
def dump_data_json(self):
snake_data = self.snake.snake_dump()
app_data = { x:self.data[x] for x in self.state_to_dump }
# convert auto_mode to str
app_data['auto_mode'] = app_data['auto_mode'].name
return { 'snake': snake_data, 'app': app_data }
def load_data_json(self, data):
# reset current game
self.reset_game(reset_all=True)
# load snake first
if not self.snake.snake_load(data['snake']):
return False
# load data for app, without verification
# load only keys in data and state_to_dump
for key in data['app'].keys():
if key in self.state_to_dump:
self.data[key] = data['app'][key]
# convert auto_mode back to enum
self.data['auto_mode'] = AutoMode[self.data['auto_mode']]
# recover gui state, set snake length label
self.init_state_from_data()
# pause and de-sensitive combo_entry after restore
self.tg_run.set_active(False)
self.area_combo.set_sensitive(False)
return True
## the real snake ##
#@count_func_time
def timer_move(self, data):
if self.data['tg_auto'] and not self.snake_aim_buf:
aim = self.snake.get_auto_aim(self.data['auto_mode'], self.data['sub_switch'])
else:
aim = self.snake_aim_buf
self.snake_aim_buf = None
if self.snake.move(aim):
""" if current function not end in time, the timeout callback will
be delayed, which can be checked with time.process_time_ns()
"""
self.timeout_id = GLib.timeout_add(1000/self.data['speed'], self.timer_move, None)
self.lb_length.set_text(f"{self.snake.length}")
self.check_and_update_after_move()
else:
self.dpack.died = True
self.timeout_id = None
self.reset_inhibit()
self.tg_run.set_sensitive(False)
iprint('game over, died')
self.draw.queue_draw()
#@count_func_time
def check_and_update_after_move(self):
"""
when to update:
. eat food
. off-path: head not in path
. end of path
. force update
"""
# if in graph-auto mode
if self.data['tg_auto'] and self.data['auto_mode'] == AutoMode.GRAPH:
# if eat food on move, off-path, or at end of path
if self.snake.head not in self.snake.path[:-1]:
self.snake.update_path_and_graph()
## dialog related ##
def run_filechooser(self, is_save=True):
if is_save:
dialog_action = Gtk.FileChooserAction.SAVE
dialog_button = Gtk.STOCK_SAVE
else:
dialog_action = Gtk.FileChooserAction.OPEN
dialog_button = Gtk.STOCK_OPEN
dialog = Gtk.FileChooserDialog(
'Select File', self.window, dialog_action,
(
Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
dialog_button, Gtk.ResponseType.OK,
),
)
self.filechooser_filter(dialog)
if dialog.run() == Gtk.ResponseType.OK:
filename = dialog.get_filename()
else:
filename = None
dialog.destroy()
return filename
def filechooser_filter(self, dialog):
filter_json = Gtk.FileFilter()
filter_json.set_name('Json')
filter_json.add_mime_type('application/json')
dialog.add_filter(filter_json)
filter_any = Gtk.FileFilter()
filter_any.set_name('Any files')
filter_any.add_pattern('*')
dialog.add_filter(filter_any)
def show_shortcuts(self):
if self.shortcuts:
self.shortcuts.present()
else:
builder = Gtk.Builder()
builder.add_from_file('data/ui/shortcuts.ui')
self.shortcuts = builder.get_object('SHORTCUTS')
self.shortcuts.set_transient_for(self.get_active_window())
# esc and close button issue 'destroy event'
self.shortcuts.connect('destroy', Handler.on_keyshot_exit, self)
self.shortcuts.show()
def show_about_dialog(self):
if self.about_dialog:
self.about_dialog.present()
else:
self.about_dialog = Gtk.AboutDialog()
about_dia = self.about_dialog
# the close button will not issue 'close' event
about_dia.connect('response', Handler.on_about_exit, self)
# about dialog
about_dia.set_authors([AUTHOR])
about_dia.set_program_name(NAME)
about_dia.set_version(VERSION)
about_dia.set_copyright(COPYRIGHT)
about_dia.set_license_type(Gtk.License.__dict__[LICENSE_TYPE])
about_dia.set_logo(self.pix_icon)
about_dia.set_destroy_with_parent(True)
about_dia.set_title(f"关于 {NAME}")
about_dia.set_transient_for(self.get_active_window())
about_dia.show()
def show_warning_dialog(self, text):
dialog = Gtk.MessageDialog(
self.window,
Gtk.DialogFlags.DESTROY_WITH_PARENT,
Gtk.MessageType.WARNING,
Gtk.ButtonsType.CLOSE,
text,
)
# lambda itself is the callback function
dialog.connect('response', lambda *args: dialog.destroy())
dialog.show()
## ui related op ##
def init_state_from_data(self):
"""called on init_ui, reset, load game"""
# reset bt_speed
self.bt_speed.set_value(self.data['speed'])
# reset color
self.set_color(self.color_fg, self.color_bg)
# the toggle button
self.tg_auto.set_active(self.data['tg_auto'])
self.tg_run.set_active(self.data['tg_run'])
# update the combo_entry for area size only, no more operation
combo_entry = self.area_combo.get_child()
combo_entry.set_text(self.get_block_area_text())
# area resize, queue redraw, may reset snake if resize
self.req_draw_size_mini()
# reset length label from snake
self.lb_length.set_text(f"{self.snake.length}")
def get_block_area_text(self):
area = self.data['block_area']
return f"{area['width']}x{area['height']}"
def sync_block_area_from_text(self, text):
try:
width, height = ( int(x) for x in text.split('x') )
if width < self.data['block_area_limit']['min'] or \
width > self.data['block_area_limit']['max'] or \
height < self.data['block_area_limit']['min'] or \
height > self.data['block_area_limit']['max']:
raise Exception()
except:
return None
else:
area = self.data['block_area']
area['width'], area['height'] = ( width, height )
return area
def toggle_text(self, text, active):
try:
lstr, rstr = text.split('/')
except:
return text
if active:
return f'<small>{lstr}</small>/<big><b>{rstr}</b></big>'
else:
return f'<big><b>{lstr}</b></big>/<small>{rstr}</small>'
def init_draw_pack(self):
dp = self.dpack
# dynamic value: the draw widget size
dp.wd_w = lambda: self.draw.get_allocated_width()
dp.wd_h = lambda: self.draw.get_allocated_height()
# snake area size
dp.area_w = self.data['block_area']['width']
dp.area_h = self.data['block_area']['height']
dp.scale = self.data['block_area_scale']
dp.l = self.data['block_size'] # block grid side length, in pixel
dp.s = 0.9 # body block side length, relative to dp.l
dp.r = 0.2 # body block corner radius, relative to dp.l
dp.fn = 'monospace' # dist map text font name
dp.fs = 0.7 # dist map text font size, relative to dp.l
# snake alive cache
dp.died = self.snake.is_died()
# color
rgba = Gdk.RGBA() # free rgba
rgba.parse(self.data['bg_color'])
dp.rgba_bg = tuple(rgba)
rgba.parse(self.data['fg_color'])
dp.rgba_fg = tuple(rgba)
dp.rgba_mark = (*dp.rgba_bg[:3], 0.8) # mark: use bg color, but set alpha to 0.8
dp.rgba_trace = (*dp.rgba_bg[:3], 0.4) # trace: use bg color, but set alpha to 0.4
dp.rgba_path = None
dp.rgba_path_0 = (0, 1, 1, 0.6) # path: regular mode
dp.rgba_path_1 = (1, 0, 1, 0.6) # path: unsafe mode
dp.rgba_text = (0, 1, 0, 0.8) # text for dist map
dp.rgba_over = (1, 0, 1, 0.8) # game over text
dp.rgba_edge = (0, 0, 1, 1) # edge: blue
dp.rgba_black = (0, 0, 0, 1) # black reference
# fg == bg == black: colorful
if (dp.rgba_bg == dp.rgba_fg == dp.rgba_black):
# the color gradient rely on snake's dynamic length
dp.body_color = lambda i: Color_Grad(self.snake.length)[i]
else:
dp.body_color = lambda i: dp.rgba_fg
def req_draw_size_mini(self):
"""call on window resized or init_state_from_data"""
blk_sz = self.data['block_size']
area_w = self.data['block_area']['width']
area_h = self.data['block_area']['height']
# get current monitor resolution
display = Gdk.Display.get_default()
monitor = Gdk.Display.get_monitor(display, 0)
rect = Gdk.Monitor.get_geometry(monitor)
area_lim = (int(rect.width * 0.9), int(rect.height * 0.9))
area = [ blk_sz * (area_w + 2), blk_sz * (area_h + 2) ]
scale_x = min(area[0], area_lim[0])/area[0]
scale_y = min(area[1], area_lim[1])/area[1]
# use the smaller scale
scale = min(scale_x, scale_y)
self.data['block_area_scale'] = scale
if self.snake.area_w != area_w or self.snake.area_h != area_h:
# snake resize && reset is not match
self.snake.area_resize(area_w, area_h, True)
# init/sync draw pack
self.init_draw_pack()
# request for mini size
self.draw.set_size_request(area[0] * scale, area[1] * scale)
# make sure redraw queued
self.draw.queue_draw()
def sync_color(self, *widgets):
for widget in widgets:
if widget is self.color_fg:
self.data['fg_color'] = widget.get_rgba().to_string()
elif widget is self.color_bg:
self.data['bg_color'] = widget.get_rgba().to_string()
self.init_draw_pack()
def set_color(self, *widgets):
rgba = Gdk.RGBA()
for widget in widgets:
if widget is self.color_fg:
rgba.parse(self.data['fg_color'])
elif widget is self.color_bg:
rgba.parse(self.data['bg_color'])
widget.set_rgba(rgba)
def load_widgets(self):
self.builder = Gtk.Builder()
self.builder.add_from_file('data/ui/snake.ui')
self.window = self.builder.get_object('Snake')
self.panel = self.builder.get_object('PANEL')
self.header = self.builder.get_object('HEADER')
self.draw = self.builder.get_object('DRAW')
self.tg_auto = self.builder.get_object('TG_AUTO')
self.tg_run = self.builder.get_object('TG_RUN')
self.lb_length = self.builder.get_object('LB_LENGTH')
self.bt_speed = self.builder.get_object('BT_SPEED')
self.color_fg = self.builder.get_object('COLOR_FG')
self.color_bg = self.builder.get_object('COLOR_BG')
self.area_combo = self.builder.get_object('AREA_COMBO')
self.img_logo = self.builder.get_object('IMG_SNAKE')
def load_image(self):
sz_food = self.data['block_size'] * 1.2
self.pix_icon = Pixbuf.new_from_file(self.data['image_icon'])
self.pix_food = Pixbuf.new_from_file_at_size(self.data['image_snake_food'], sz_food, sz_food)
self.pix_arrow = Pixbuf.new_from_file_at_size(self.data['image_arrow'], 28, 28)
self.pix_arrow_key = Pixbuf.new_from_file_at_size(self.data['image_arrow_key'], 28, 28)
self.img_logo.set_from_pixbuf(self.pix_icon.scale_simple(24, 24, InterpType.BILINEAR))
def init_menu(self):
menu_items = {
'sect_0': [
('重置', 'reset', Handler.on_reset, ['<Ctrl>R']),
('存档', 'save', Handler.on_save, ['<Ctrl>S']),
('读档', 'load', Handler.on_load, ['<Ctrl>L']),
],
'sect_1': [
('快捷键', 'keyshot', Handler.on_keyshot, ['<Ctrl>K']),
('关于', 'about', Handler.on_about, [])
] }
main_menu = Gio.Menu()
for sect in menu_items.keys():
menu_sect = Gio.Menu()
main_menu.append_section(None, menu_sect)
for item in menu_items[sect]:
action_label = item[0]
action_name = item[1].lower()
action_dname = 'app.' + action_name
action_callback = item[2]
action_accels = item[3]
action = Gio.SimpleAction.new(action_name, None)
action.connect('activate', action_callback, self)
self.add_action(action)
self.set_accels_for_action(action_dname, action_accels)
menu_sect.append(action_label, action_dname)
self.set_app_menu(main_menu)
def init_ui(self):
self.load_widgets() # load widgets
self.load_image() # load image resource
self.init_menu() # init app menu
# attach the window to app
self.window.set_application(self)
# header bar
self.header.set_decoration_layout('menu:minimize,close')
# main window
self.window.set_title(NAME)
self.window.set_icon(self.pix_icon)
# connect keyevent
self.window.connect('key-press-event', Handler.on_keyboard_event, self)
self.window.connect('key-release-event', Handler.on_keyboard_event, self)
# draw area
self.draw.connect('draw', Handler.on_draw, self)
# toggle button
self.tg_auto.connect('toggled', Handler.on_toggled, self)
self.tg_run.connect('toggled', Handler.on_toggled, self)
# init via toggle, set_active() only trigger if state changed
self.tg_auto.toggled()
self.tg_run.toggled()
# spin of speed
speed_adj = Gtk.Adjustment(**self.data['speed_adj'])
self.bt_speed.set_adjustment(speed_adj)
self.bt_speed.connect('value-changed', Handler.on_spin_value_changed, self)
# color box
self.color_fg.set_title('前景色')
self.color_bg.set_title('背景色')
self.color_fg.connect('color-set', Handler.on_color_set, self)
self.color_bg.connect('color-set', Handler.on_color_set, self)
# arrow image
self.arrows = {}
for x in [ 'up', 'down', 'left', 'right' ]:
self.arrows[x] = self.builder.get_object(f'IMG_{x.upper()}')
self.arrows[x].set_from_pixbuf(self.pix_arrow.rotate_simple(self.map_arrow[x][0]))
# area: combo box
area_size_store = Gtk.ListStore(str)
for size in self.data['block_area_list']:
area_size_store.append([size])
self.area_combo.set_model(area_size_store)
self.area_combo.set_entry_text_column(0)
self.area_combo.connect('changed', Handler.on_combo_changed, self)
combo_entry = self.area_combo.get_child()
combo_entry.connect('activate', Handler.on_combo_entry_activate, self)
# init gui state from data
self.init_state_from_data()
# to avoid highlight in the entry
#self.bt_speed.grab_focus_without_selecting()
# or just avoid focus on those with entry
self.tg_run.grab_focus()
self.window.show_all()
# remove focus on init, must after show
self.window.set_focus(None)
## Draw related ##
#@count_func_time
def update_draw(self, cr):
"""op pack for update draw"""
dp = self.dpack
dp.cr = cr
self.draw_init(dp)
self.draw_snake(dp)
def draw_init(self, dp):
cr = dp.cr
# draw background
cr.set_source_rgba(*dp.rgba_bg)
cr.rectangle(0,0, dp.wd_w(), dp.wd_h())
cr.fill()
# or use theme-provided background
#context = self.draw.get_style_context()
#Gtk.render_background(context, cr, 0, 0, dp.wd_w(), dp.wd_h())
# make sure center is center
translate = (
(dp.wd_w() - dp.scale * dp.l * dp.area_w)/2,
(dp.wd_h() - dp.scale * dp.l * dp.area_h)/2
)
cr.transform(cairo.Matrix(dp.scale, 0, 0, dp.scale, *translate))
cr.set_line_join(cairo.LINE_JOIN_ROUND)
cr.set_tolerance(0.2)
cr.save()
cr.scale(dp.l, dp.l)
# draw the edge
cr.move_to(0, 0)
cr.rel_line_to(0, dp.area_h)
cr.rel_line_to(dp.area_w, 0)
cr.rel_line_to(0, -dp.area_h)
cr.close_path()
cr.set_source_rgba(*dp.rgba_edge)
cr.set_line_width(0.1)
cr.stroke()
cr.restore()
def draw_snake(self, dp):
cr = dp.cr
# food
if self.snake.food:
pix_sz = Vector(self.pix_food.get_width(), self.pix_food.get_height())
food = self.snake.food * dp.l + (Vector(dp.l, dp.l) - pix_sz)/2
Gdk.cairo_set_source_pixbuf(cr, self.pix_food, *food)
cr.rectangle(*food, *pix_sz)
cr.fill()
cr.save()
# aligned to grid center
xy_offset = (1-dp.s)*dp.l/2
cr.transform(cairo.Matrix(dp.l, 0, 0, dp.l, xy_offset, xy_offset))
# snake body
for i in range(self.snake.length):
dp.rect_round(*self.snake.body[i], dp.s, dp.s, dp.r)
cr.set_source_rgba(*dp.body_color(i))
cr.fill()
# head mark
cr.set_source_rgba(*dp.rgba_mark)
if dp.died:
cr.set_line_width(0.2)
dp.cross_mark(*self.snake.head, dp.s, dp.s, dp.r)
else:
cr.set_line_width(0.12)
dp.circle_mark(*self.snake.head, dp.s, dp.s, dp.s/4)
cr.restore()
if self.data['show_trace']:
self.draw_snake_trace(dp)
if self.data['show_graph']:
if self.data['display_md']:
dp.rgba_path = dp.rgba_path_0
path, graph = (self.snake.path, self.snake.graph)
else:
dp.rgba_path = dp.rgba_path_1
path, graph = (self.snake.path_col, self.snake.graph_col)
if len(path) == 0:
path = self.snake.path_unsafe
# 这里 cairo 直接绘制数字更有效率
self.draw_snake_graph_with_cairo(dp, graph)
#self.draw_snake_graph_with_pango(dp, graph)
self.draw_snake_path(dp, path)
if dp.died:
self.draw_gameover(dp)
def draw_snake_trace(self, dp):
cr = dp.cr
cr.save()
cr.transform(cairo.Matrix(dp.l, 0, 0, dp.l, dp.l/2, dp.l/2))
cr.move_to(*self.snake.body[0])
for pos in self.snake.body[1:]:
cr.line_to(*pos)
cr.set_source_rgba(*dp.rgba_trace)
cr.set_line_width(0.1)
cr.stroke()
cr.restore()
def draw_snake_path(self, dp, path):
# graph path exist and not empty
if path is None or len(path) == 0:
return False
cr = dp.cr
cr.save()
cr.transform(cairo.Matrix(dp.l, 0, 0, dp.l, dp.l/2, dp.l/2))
cr.move_to(*path[0])
for pos in path[1:]:
cr.line_to(*pos)
cr.set_source_rgba(*dp.rgba_path)
cr.set_line_width(0.2)
cr.stroke()
cr.restore()
def draw_snake_graph_with_pango(self, dp, graph):
if graph is None:
return False
cr = dp.cr
cr.save()
cr.translate(dp.l/2, dp.l/2)
font_desc = Pango.FontDescription.from_string(dp.fn)
# set obsolute size in pixel and scaled to pango size
font_desc.set_absolute_size(dp.fs * dp.l * Pango.SCALE)
font_desc.set_weight(Pango.Weight.NORMAL)
pg_layout = PangoCairo.create_layout(cr)
pg_layout.set_font_description(font_desc)
cr.set_source_rgba(*dp.rgba_text)
for x,y in np.transpose(graph.nonzero()):
dist = graph[x, y]
pg_layout.set_text(str(dist), -1)
# without scale, use pixel size directly
width, height = pg_layout.get_pixel_size()
cr.move_to(x * dp.l - width/2, y * dp.l - height/2)
PangoCairo.show_layout(cr, pg_layout)
cr.restore()
def draw_snake_graph_with_cairo(self, dp, graph):
if graph is None:
return False
cr = dp.cr
cr.save()
cr.transform(cairo.Matrix(dp.l, 0, 0, dp.l, dp.l/2, dp.l/2))
cr.set_source_rgba(*dp.rgba_text)
cr.select_font_face(dp.fn, cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_NORMAL)
cr.set_font_size(dp.fs)
for x,y in np.transpose(graph.nonzero()):
dist = graph[x, y]
extent = cr.text_extents(str(dist))
cr.move_to(x - extent.width/2, y + extent.height/2)
cr.show_text(str(dist))
cr.restore()
def draw_gameover(self, dp):
cr = dp.cr
text_go = 'GAME OVER'
text_reset = 'Press "r" to reset'
cr.set_source_rgba(*dp.rgba_over)
cr.select_font_face('Serif', cairo.FONT_SLANT_OBLIQUE, cairo.FONT_WEIGHT_BOLD)
cr.set_font_size(48)
extent_go = cr.text_extents(text_go)
# litte above center
cr.move_to((dp.l * dp.area_w - extent_go.width)/2,
(dp.l * dp.area_h - extent_go.height)/2)
cr.show_text(text_go)
cr.select_font_face(dp.fn, cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_NORMAL)
cr.set_font_size(20)
extent_reset = cr.text_extents(text_reset)
cr.move_to((dp.l * dp.area_w - extent_reset.width)/2,
(dp.l * dp.area_h - extent_reset.height + extent_go.height)/2)
cr.show_text(text_reset)
## App actions ##
def do_activate(self):
if not self.window:
self.init_ui()
else:
self.window.present()
# vi: set ts=4 noexpandtab foldmethod=indent foldignore= :
| rtgiskard/snake | lib/snakeapp.py | snakeapp.py | py | 30,136 | python | en | code | 0 | github-code | 13 |
31512454451 | import tkinter as tk
from PIL import ImageTk
from camera import Camera
from imageTransformers import IImageTransformer, ImageTransformerBuilder
from imageSavers import IImageSaver
class App:
def __init__(self, title: str, webcam: Camera, default_image_transformer: IImageTransformer, image_saver: IImageTransformer):
self.webcam = webcam
self.img_transformer = default_image_transformer
self.image_saver = image_saver
self.root = tk.Tk()
self.root.resizable(False, False)
self.root.title(title)
self.setupUI()
self.update_img()
self.root.mainloop()
def apply_transformations(self):
new_transformation_builder = ImageTransformerBuilder()
if self.flipXChBox.get() == 1:
new_transformation_builder.flip_on_axis('y')
if self.flipYChBox.get() == 1:
new_transformation_builder.flip_on_axis('x')
if self.showEdgesChBox.get() == 1:
new_transformation_builder.detect_edges()
if self.makeNegativeChBox.get() == 1:
new_transformation_builder.negate_image()
self.img_transformer = new_transformation_builder.build()
def save_image(self):
counter = 0
frame_to_save = self.webcam.get_camera_frame(self.img_transformer)
while not self.image_saver.save_image(f'saved_images/camera_saved_{counter}.jpg', frame_to_save):
counter += 1
def update_img(self):
new_cam_img = ImageTk.PhotoImage(self.webcam.get_camera_frame(self.img_transformer))
self.cam_img_label.configure(image=new_cam_img)
self.cam_img_label.image = new_cam_img
self.root.after(1000 // self.webcam.get_framerate(), self.update_img)
def setupUI(self):
self.canvas = tk.Canvas(self.root)
self.canvas.grid(columnspan=2, padx = 10, pady = 10)
cam_img = ImageTk.PhotoImage(self.webcam.get_default_img(self.img_transformer))
self.cam_img_label = tk.Label(self.root, image=cam_img)
self.cam_img_label.image = cam_img
self.cam_img_label.grid(columnspan=2, column = 0, row = 0)
self.flipXChBox = tk.IntVar()
self.flipYChBox = tk.IntVar()
self.showEdgesChBox = tk.IntVar()
self.makeNegativeChBox = tk.IntVar()
self.transformBox1 = tk.Checkbutton(self.root, text='Filp vertically', variable=self.flipXChBox, onvalue=1, offvalue=0)
self.transformBox1.grid(column=0, row = 1, sticky = 'w')
self.transformBox2 = tk.Checkbutton(self.root, text='Filp horizontaly', variable=self.flipYChBox, onvalue=1, offvalue=0)
self.transformBox2.grid(column=1, row = 1, sticky = 'w')
self.transformBox3 = tk.Checkbutton(self.root, text='Show edges', variable=self.showEdgesChBox, onvalue=1, offvalue=0)
self.transformBox3.grid(column=0, row = 2, sticky = 'w')
self.transformBox4 = tk.Checkbutton(self.root, text='Invert colors', variable=self.makeNegativeChBox, onvalue=1, offvalue=0)
self.transformBox4.grid(column=1, row = 2, sticky = 'w')
self.saveBtn = tk.Button(self.root, text='Save image', command=self.save_image)
self.saveBtn.grid(column=0, row=3, sticky = 'nesw')
self.applyBtn = tk.Button(self.root, text='Apply filters', command=self.apply_transformations)
self.applyBtn.grid(column=1, row=3, sticky = 'nesw') | WWykret/Webcam-Image-Transformer | ui.py | ui.py | py | 3,401 | python | en | code | 0 | github-code | 13 |
14549949296 | import re
import pandas as pd
import collections
import nltk
import string
import pickle
import numpy as np
import sys
# Apply the function LIWC_detect to a text. It removes punctuation, tokenizes and matches
# the tokens to LIWC with the help of the star_check function and the nstar_liwc_dict and
# star_liwc_dict. The result is a vector of counts for the 73 LIWC categories (see appended
# list), as well as a counter of matched LIWC tokens, and a counter of tokens in general.
# To a list of these vectors, you can apply the veclist_to_df function, which will transform
# them into a nice pandas dataframe with the LIWC categories as column names. You can divide
# the categories by either of the two counters to get percentage values.
nstar_liwc_dict = dict()
star_liwc_dict = dict()
for i in range(3,14):
star_liwc_dict[i] = dict()
with open('Prosocial_French.dic','rt') as f:
for line in f:
word = line.strip()
if "*" in word:
w = word.replace("*","").lower()
star_liwc_dict[len(w)][w] = np.array([1.])
else:
nstar_liwc_dict[word.lower()] = np.array([1.])
def star_check(word,vec_length=3):
# Check for match with wordstem dictionary (star_liwc_dict). If no match, return vector representing unmatched token.
for i in reversed([3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]):
try:
return(star_liwc_dict[i][word[:i]])
except KeyError:
continue
null_vec = np.zeros(vec_length)
null_vec[-1] = 1
return(null_vec)
def LIWC_detect(text):
# chek for match with normal liwc dictionary (nstar_liwc_dict). If non found, apply star_chek.
vec = np.zeros([3])
if not isinstance(text,str):
return(vec)
# puncts = string.punctuation
puncts = '!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\''
text = nltk.word_tokenize(re.sub('\>|['+puncts+']', ' ', text).lower())
for token in text:
w_emo = nstar_liwc_dict.get(token,0)
if isinstance(w_emo, int):
w_emo = star_check(token)
vec[:] += w_emo
return(vec)
def veclist_to_df(vec_list):
col_list = ['prosoc']
return(pd.DataFrame(data=vec_list,columns=col_list))
with open(sys.argv[1], "rt") as fin:
with open(sys.argv[1]+".out", "wt") as fout:
for line in fin:
fields = line.split("\t")
liwcs = LIWC_detect(fields[6].lower())
fout.write(fields[0] + "\t" + fields[1] + "\t" + fields[2] + "\t" + fields[3] + "\t" + fields[4] + "\t" + fields[5])
l = ""
for v in liwcs:
l = l + "\t" + str(v)
fout.write(l + "\n")
| dgarcia-eu/DavidsUsefulScripts | Prosocial_French_Script.py | Prosocial_French_Script.py | py | 2,592 | python | en | code | 3 | github-code | 13 |
16316698257 | # Python
## Part 1
with open("data.txt") as f:
moves = list(map(int, f.read().splitlines()))
def move_numbers(moves: list[int], rounds: int = 1, decryption_key: int = 1) -> int:
decrypted_moves = [m * decryption_key for m in moves]
indices = list(range(len(decrypted_moves)))
for i in indices * rounds:
idx = indices.index(i)
indices.pop(idx)
indices.insert((idx + decrypted_moves[i]) % len(indices), i)
res = [decrypted_moves[i] for i in indices]
return sum(res[(res.index(0) + offset) % len(res)] for offset in [1000, 2000, 3000])
print(move_numbers(moves))
## Part 2
print(move_numbers(moves, 10, 811589153))
| moritzkoerber/adventofcode | 2022/day20/day20.py | day20.py | py | 667 | python | en | code | 0 | github-code | 13 |
21092013063 | import socket
import pickle
import time
import random
from Send.send_api import *
from Encryption import encryption
from Compression import compression
from Data.data import get_myname, get_myaddr
from Receive import recv
# State graph
DATAKEY = 'data'
CODEKEY = 'code'
METADATA = 'metadata'
def set_data(packet, data):
packet[DATAKEY] = data
return packet
def set_code(packet, code):
packet[CODEKEY] = code
return packet
def set_metadata(packet, metadata):
packet[METADATA] = metadata
return packet
def get_data(packet):
return packet[DATAKEY]
def get_code(packet):
return packet[CODEKEY]
def get_metadata(packet):
return packet[METADATA]
def gen_request_id():
return str(time.time()) + str(random.random())
def construct_packet(packet, encrypt_lock=None):
global MYADDR, MYNAME
if DATAKEY not in packet or CODEKEY not in packet:
raise ValueError('No data or recv handler')
packet[METADATA] = { 'ra': get_myaddr(), 'name': get_myname() }
byte_data = pickle.dumps(packet) # Second pickle... pickled onion?
if encrypt_lock:
byte_data = encryption.encrypt(byte_data, encrypt_lock)
return byte_data
def send_lock_request(host, port):
'''Asks host:port for an ecryption key'''
packet = {}
set_data(packet, None)
set_code(packet, send_lock_request.__name__)
lock_request_packet = construct_packet(packet)
return lock_request_packet
def send(dict_packet, host, port):
'''Sends content (bytes) to host:port'''
# Open the connection
sender = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sender.connect((host, int(port)))
# Send the request for the encryption lock
lock_request_packet = send_lock_request(host, port)
sender.sendall(lock_request_packet)
# Wait for a lock response
encrypt_lock = None
while True:
data = sender.recv(4096)
if not len(data): continue
# Parse the lock reply
_, reply_data, metadata = recv.receive_parse(data)
encrypt_lock = reply_data['lock']
# Encrypt and send the actual message
tosend = construct_packet(dict_packet, encrypt_lock=encrypt_lock)
tosend = compression.compress(tosend)
sender.sendall(tosend)
break
# Close the connection
sender.shutdown(socket.SHUT_WR)
sender.close() | thomaspendock/Wormhole | src/Send/send.py | send.py | py | 2,385 | python | en | code | 0 | github-code | 13 |
33754693001 | ## NAME:
# THREEDtoTWOD.py
## PURPOSE:
# Takes a 3D healpix map and plots the 2D projection centered on a particular RA and dec
# Interactive mode: 2D image rotates (RA changes)
#------------------------------------------------------------------------------------------------
import aipy
import numpy
import pylab
import pyfits
pylab.ion() #interactive mode on
plt = None #just a variable
for ii in numpy.arange(0,2*numpy.pi,0.1): #for loop for ra from 0 to 2pi with steps of 0.1
#get 3D coordinates for every pixel in a 2D image
size = 300 #number of wavelengths when starting in uv domain
img2d = aipy.img.Img(size=size, res = .5)
crd = img2d.get_eq(ra=ii,dec=-(numpy.pi)/12.0) #equatorial coordinates (bug at ra=pi!)
x,y,z = crd
sh = x.shape #remember dimensions of x
mask = x.mask #remember where the masked values are
#x,y,z = x.filled(0), y.filled(0), z.filled(1) #need to put good coordinates in, but these get masked out later anyways with fluxes
x,y,z = x.flatten(), y.flatten(), z.flatten()
#get array of values for image
img3d = aipy.map.Map(fromfits = '/Users/carinacheng/Desktop/Carina/UCBResearch/images/lambda_haslam408_dsds_eq.fits') #reads in 3D image
fluxes = img3d[x,y,z] #gets fluxes already weighted (1D array)
#fluxes = img3d.get(crd)[1] #gets fluxes at the coordinates but they aren't weighted
#plot 2D image
fluxes.shape = sh
fluxes = numpy.ma.array(fluxes, mask=mask)
if plt == None: #for the first time, plot as specified below
plt = pylab.imshow(numpy.fft.fftshift(numpy.log10(fluxes)),interpolation='nearest',origin='lower',extent=(-1,1,-1,1))
pylab.show()
else: #update the plot (redraw it) at each step
plt.set_data(numpy.fft.fftshift(numpy.log10(fluxes)))
pylab.draw()
| HERA-Team/hera_sandbox | ctc/code/THREEDtoTWOD.py | THREEDtoTWOD.py | py | 1,900 | python | en | code | 1 | github-code | 13 |
28609522123 | #Question Link: https://takeuforward.org/data-structure/remove-n-th-node-from-the-end-of-a-linked-list/
#Solution Link (Python3): https://leetcode.com/submissions/detail/656748126/
class Node:
def __init__ (self, data):
self.data = data
self.next = None
def removeKthNode(head, k):
if k == 0:
return head
dummy = Node(-1)
dummy.next = head
f = dummy
s = dummy
for i in range(k):
if f.next is None:
return
f = f.next
while(f.next is not None):
s = s.next
f = f.next
if s == dummy:
return head.next
s.next = s.next.next
return head
def build_linkedList(arr):
head = None
for i in range(len(arr)):
if arr[i] == -1:
break
new = Node(arr[i])
if head is None:
head = new
tail = new
else:
tail.next = new
tail = tail.next
return head
t = int(input().strip())
for i in range(t):
k = int(input().strip())
arr = [int(i) for i in input().strip().split()]
head = build_linkedList(arr)
res_head = removeKthNode(head, k)
while res_head is not None:
print(res_head.data, end= " ")
res_head = res_head.next
print(-1)
| AbhiWorkswithFlutter/StriverSDESheet-Python3-Solutions | Striver SDE Sheet/Day 5/Remove N-th node from the end of a Linked List.py | Remove N-th node from the end of a Linked List.py | py | 1,350 | python | en | code | 3 | github-code | 13 |
2250518699 | """Soft Q Imitation Learning (SQIL) (https://arxiv.org/abs/1905.11108).
Trains a policy via DQN-style Q-learning,
replacing half the buffer with expert demonstrations and adjusting the rewards.
"""
from typing import Any, Dict, List, Optional, Type, Union
import numpy as np
import torch as th
from gymnasium import spaces
from stable_baselines3 import dqn
from stable_baselines3.common import (
buffers,
off_policy_algorithm,
policies,
type_aliases,
vec_env,
)
from imitation.algorithms import base as algo_base
from imitation.data import rollout, types
from imitation.util import logger, util
class SQIL(algo_base.DemonstrationAlgorithm[types.Transitions]):
"""Soft Q Imitation Learning (SQIL).
Trains a policy via DQN-style Q-learning,
replacing half the buffer with expert demonstrations and adjusting the rewards.
"""
expert_buffer: buffers.ReplayBuffer
def __init__(
self,
*,
venv: vec_env.VecEnv,
demonstrations: Optional[algo_base.AnyTransitions],
policy: Union[str, Type[policies.BasePolicy]],
custom_logger: Optional[logger.HierarchicalLogger] = None,
rl_algo_class: Type[off_policy_algorithm.OffPolicyAlgorithm] = dqn.DQN,
rl_kwargs: Optional[Dict[str, Any]] = None,
):
"""Builds SQIL.
Args:
venv: The vectorized environment to train on.
demonstrations: Demonstrations to use for training.
policy: The policy model to use (SB3).
custom_logger: Where to log to; if None (default), creates a new logger.
rl_algo_class: Off-policy RL algorithm to use.
rl_kwargs: Keyword arguments to pass to the RL algorithm constructor.
Raises:
ValueError: if `dqn_kwargs` includes a key
`replay_buffer_class` or `replay_buffer_kwargs`.
"""
self.venv = venv
if rl_kwargs is None:
rl_kwargs = {}
# SOMEDAY(adam): we could support users specifying their own replay buffer
# if we made SQILReplayBuffer a more flexible wrapper. Does not seem worth
# the added complexity until we have a concrete use case, however.
if "replay_buffer_class" in rl_kwargs:
raise ValueError(
"SQIL uses a custom replay buffer: "
"'replay_buffer_class' not allowed.",
)
if "replay_buffer_kwargs" in rl_kwargs:
raise ValueError(
"SQIL uses a custom replay buffer: "
"'replay_buffer_kwargs' not allowed.",
)
self.rl_algo = rl_algo_class(
policy=policy,
env=venv,
replay_buffer_class=SQILReplayBuffer,
replay_buffer_kwargs={"demonstrations": demonstrations},
**rl_kwargs,
)
super().__init__(demonstrations=demonstrations, custom_logger=custom_logger)
def set_demonstrations(self, demonstrations: algo_base.AnyTransitions) -> None:
assert isinstance(self.rl_algo.replay_buffer, SQILReplayBuffer)
self.rl_algo.replay_buffer.set_demonstrations(demonstrations)
def train(self, *, total_timesteps: int, tb_log_name: str = "SQIL", **kwargs: Any):
self.rl_algo.learn(
total_timesteps=total_timesteps,
tb_log_name=tb_log_name,
**kwargs,
)
@property
def policy(self) -> policies.BasePolicy:
assert isinstance(self.rl_algo.policy, policies.BasePolicy)
return self.rl_algo.policy
class SQILReplayBuffer(buffers.ReplayBuffer):
"""A replay buffer that injects 50% expert demonstrations when sampling.
This buffer is fundamentally the same as ReplayBuffer,
but it includes an expert demonstration internal buffer.
When sampling a batch of data, it will be 50/50 expert and collected data.
It can be used in off-policy algorithms like DQN/SAC/TD3.
Here it is used as part of SQIL, where it is used to train a DQN.
"""
def __init__(
self,
buffer_size: int,
observation_space: spaces.Space,
action_space: spaces.Space,
demonstrations: algo_base.AnyTransitions,
device: Union[th.device, str] = "auto",
n_envs: int = 1,
optimize_memory_usage: bool = False,
):
"""Create a SQILReplayBuffer instance.
Args:
buffer_size: Max number of elements in the buffer
observation_space: Observation space
action_space: Action space
demonstrations: Expert demonstrations.
device: PyTorch device.
n_envs: Number of parallel environments. Defaults to 1.
optimize_memory_usage: Enable a memory efficient variant
of the replay buffer which reduces by almost a factor two
the memory used, at a cost of more complexity.
"""
super().__init__(
buffer_size=buffer_size,
observation_space=observation_space,
action_space=action_space,
device=device,
n_envs=n_envs,
optimize_memory_usage=optimize_memory_usage,
handle_timeout_termination=False,
)
self.expert_buffer = buffers.ReplayBuffer(
buffer_size=0,
observation_space=observation_space,
action_space=action_space,
)
self.set_demonstrations(demonstrations)
def set_demonstrations(
self,
demonstrations: algo_base.AnyTransitions,
) -> None:
"""Set the expert demonstrations to be injected when sampling from the buffer.
Args:
demonstrations (algo_base.AnyTransitions): Expert demonstrations.
Raises:
NotImplementedError: If `demonstrations` is not a transitions object
or a list of trajectories.
"""
# If demonstrations is a list of trajectories,
# flatten it into a list of transitions
if not isinstance(demonstrations, types.Transitions):
(
item,
demonstrations,
) = util.get_first_iter_element( # type: ignore[assignment]
demonstrations, # type: ignore[assignment]
)
if isinstance(item, types.Trajectory):
demonstrations = rollout.flatten_trajectories(
demonstrations, # type: ignore[arg-type]
)
if not isinstance(demonstrations, types.Transitions):
raise NotImplementedError(
f"Unsupported demonstrations type: {demonstrations}",
)
n_samples = len(demonstrations)
self.expert_buffer = buffers.ReplayBuffer(
buffer_size=n_samples,
observation_space=self.observation_space,
action_space=self.action_space,
handle_timeout_termination=False,
)
for transition in demonstrations:
self.expert_buffer.add(
obs=transition["obs"],
next_obs=transition["next_obs"],
action=transition["acts"],
done=transition["dones"],
reward=np.array(1.0),
infos=[{}],
)
def add(
self,
obs: np.ndarray,
next_obs: np.ndarray,
action: np.ndarray,
reward: np.ndarray,
done: np.ndarray,
infos: List[Dict[str, Any]],
) -> None:
super().add(
obs=obs,
next_obs=next_obs,
action=action,
reward=np.array(0.0),
done=done,
infos=infos,
)
def sample(
self,
batch_size: int,
env: Optional[vec_env.VecNormalize] = None,
) -> buffers.ReplayBufferSamples:
"""Sample a batch of data.
Half of the batch will be from expert transitions,
and the other half will be from the learner transitions.
Args:
batch_size: Number of elements to sample in total
env: associated gym VecEnv to normalize the observations/rewards
when sampling
Returns:
A mix of transitions from the expert and from the learner.
"""
new_sample_size, expert_sample_size = util.split_in_half(batch_size)
new_sample = super().sample(new_sample_size, env)
expert_sample = self.expert_buffer.sample(expert_sample_size, env)
return type_aliases.ReplayBufferSamples(
*(
th.cat((getattr(new_sample, name), getattr(expert_sample, name)))
for name in new_sample._fields
),
)
| HumanCompatibleAI/imitation | src/imitation/algorithms/sqil.py | sqil.py | py | 8,709 | python | en | code | 1,004 | github-code | 13 |
32227612386 | #! /usr/bin/python3
# coding: utf8
#
# -----------------------------------------------------------
# | "Pysenhower" |
# -----------------------------------------------------------
#
# Python3.2-Programm, mit dem Aufgaben entsprechend ihrer
# Prioritäten organisiert werden können.
#
#
#
# 08/01/15: Version 0.1 (Alpha)
#
import os
import pickle
from prettytable import PrettyTable
# Pfad, um die Listen zu speichern
PATH = "/home/paul/.eisenhower"
# Erstelle Verzeichnis, falls nötig
if not os.path.exists(PATH):
os.makedirs(PATH)
# Tabellenkopftitel definieren
column_name_prio_a = "A: Wichtig und dringend"
column_name_prio_b = "B: Wichtig, aber nicht dringend"
column_name_prio_c = "C: Nicht wichtig und dringend"
column_name_prio_d = "D: Nicht wichtig und nicht dringend"
try:
# Testen, ob Listen geladen werden können
list_prio_a = pickle.load(open(PATH + "/save.a", "rb"))
list_prio_b = pickle.load(open(PATH + "/save.b", "rb"))
list_prio_c = pickle.load(open(PATH + "/save.c", "rb"))
list_prio_d = pickle.load(open(PATH + "/save.d", "rb"))
except IOError:
list_prio_a = ["--"]
list_prio_b = ["--"]
list_prio_c = ["--"]
list_prio_d = ["--"]
pickle.dump( list_prio_a, open(PATH + "/save.a", "wb" ) )
pickle.dump( list_prio_b, open(PATH + "/save.b", "wb" ) )
pickle.dump( list_prio_c, open(PATH + "/save.c", "wb" ) )
pickle.dump( list_prio_d, open(PATH + "/save.d", "wb" ) )
print("Noch keine Datenbank vorhanden. Wurde angelegt. Bitte neustarten!")
exit(0)
else:
# Backup: Aufgabenlisten definieren
#list_prio_a = ["Zeug", "Dinge", "Sachen"]
#list_prio_b = ["Krams", "Stuff"]
#list_prio_c = ["Zeug", "Dinge", "Sachen"]
#list_prio_d = ["Krams", "Stuff", "Zeug", "Dinge", "Sachen"]
#Länge der Tabellenköpfe ermitteln und das Maximum herausfinden
len_head_a = len(column_name_prio_a)
len_head_b = len(column_name_prio_b)
len_head_c = len(column_name_prio_c)
len_head_d = len(column_name_prio_d)
max_head_length = max(len_head_a, len_head_b, len_head_c, len_head_d)
# Tabellenköpfe zentrieren und mit Leerzeichen auffüllen
column_name_prio_a = column_name_prio_a.center(max_head_length, " ")
column_name_prio_b = column_name_prio_b.center(max_head_length, " ")
column_name_prio_c = column_name_prio_c.center(max_head_length, " ")
column_name_prio_d = column_name_prio_d.center(max_head_length, " ")
def exitProgram():
# Programm verlassen
print("Beende jetzt.")
exit(0)
def errorMessage():
# Fehlermeldung
print("Bitte eine gültige Option wählen!")
def saveChanges():
# Listen speichern
pickle.dump( list_prio_a, open(PATH + "/save.a", "wb" ) )
pickle.dump( list_prio_b, open(PATH + "/save.b", "wb" ) )
pickle.dump( list_prio_c, open(PATH + "/save.c", "wb" ) )
pickle.dump( list_prio_d, open(PATH + "/save.d", "wb" ) )
os.system('clear')
print("Änderungen gespeichert.")
def saveQuit():
# Speichern und beenden
saveChanges()
print("Gespeichert. Beende jetzt.")
exit(0)
def equalize_lists():
# Listen gleich lang machen anhand eine Abfrage ihrer Längen und setzen der Numbers-Liste
# Leere Zellen am Ende löschen
while list_prio_a[-1] == "" and list_prio_b[-1] == "":
del list_prio_a[-1]
del list_prio_b[-1]
while list_prio_c[-1] == "" and list_prio_d[-1] == "":
del list_prio_c[-1]
del list_prio_d[-1]
len_a = len(list_prio_a)
len_b = len(list_prio_b)
len_c = len(list_prio_c)
len_d = len(list_prio_d)
while len_b < len_a:
list_prio_b.append("")
len_a = len(list_prio_a)
len_b = len(list_prio_b)
while len_a < len_b:
list_prio_a.append("")
len_a = len(list_prio_a)
len_b = len(list_prio_b)
while len_d < len_c:
list_prio_d.append("")
len_c = len(list_prio_c)
len_d = len(list_prio_d)
while len_c < len_d:
list_prio_c.append("")
len_c = len(list_prio_c)
len_d = len(list_prio_d)
# Variablen global bekannt machen. Es reicht, wenn eine Seite
# betrachtet wird, da die Längen gleich sind. Allerdings bei
# Eins anfangen.
global numbers_a
numbers_a = range(1, len_a + 1)
global numbers_c
numbers_c = range(1, len_c + 1)
def showAll():
# Listen anzeigen
#
# Zuerst: Listen auf gleiche Länge bringen
equalize_lists()
# Listen abbilden
x = PrettyTable(print_empty=False)
x.add_column("#", numbers_a)
x.add_column(column_name_prio_a, list_prio_a)
x.add_column(column_name_prio_b, list_prio_b)
x.align[column_name_prio_a] = "l"
x.align[column_name_prio_b] = "l"
print(x)
y = PrettyTable(print_empty=False)
y.add_column("#", numbers_c)
y.add_column(column_name_prio_c, list_prio_c)
y.add_column(column_name_prio_d, list_prio_d)
y.align[column_name_prio_c] = "l"
y.align[column_name_prio_d] = "l"
print(y)
def addItem():
# Einen neuen Eintrag an einer bestimmten Position erstellen
#
USERINPUT = 0
list_names = ["A", "B", "C", "D"]
list_dict = {"a" : list_prio_a, "b" : list_prio_b, "c" : list_prio_c, "d" : list_prio_d}
# Checken, ob eine gültige Liste und später eine Zahl eingegeben werden.
while True:
LIST = input("\nIn welcher Liste (A, B, C, D) einfügen?\n").upper()
if LIST in list_names:
LIST = LIST.lower()
break
else:
print("\nBitte eine gültige Liste (A, B, C, D) auswählen!\n")
continue
while True:
try:
USERINPUT = int(input("\nAn welcher Position einfügen?\n"))
except ValueError:
print("\nBitte eine Zahl eingeben!\n")
continue
else:
# Hier wird die Option aus einem Dictonary herausgesucht, nachdem die Eingabe
# kleingeschrieben wurde.
POSITION = USERINPUT - 1
LIST = LIST.lower()
TASK = (input("\nWelche Aufgabe soll hinzugefügt werden?\n"))
print(LIST)
list_dict[LIST].insert(POSITION, TASK)
equalize_lists()
break
def delItem():
# Einen Eintrag an einer bestimmten Position löschen
#
USERINPUT = 0
list_names = ["A", "B", "C", "D"]
list_dict = {"a" : list_prio_a, "b" : list_prio_b, "c" : list_prio_c, "d" : list_prio_d}
# Checken, ob eine gültige Liste und später eine Zahl eingegeben werden.
while True:
LIST = input("\nIn welcher Liste (A, B, C, D) löschen?\n").upper()
if LIST in list_names:
LIST = LIST.lower()
break
else:
print("\nBitte eine gültige Liste (A, B, C, D) auswählen!\n")
continue
while True:
try:
USERINPUT = int(input("\nWelche Position löschen?\n"))
except ValueError:
print("\nBitte eine Zahl eingeben!\n")
continue
if USERINPUT > len(list_dict[LIST]):
print("\nBitte eine Zahl eingeben!\n")
continue
else:
# Hier wird die Option aus einem Dictonary herausgesucht, nachdem die Eingabe
# kleingeschrieben wurde.
POSITION = USERINPUT - 1
del list_dict[LIST][POSITION]
equalize_lists()
break
#
# Ablaufsteuerung
#
# Implementierte Funktionen
fncDict = {'Q': exitProgram, 'A': addItem, 'L': delItem, 'W': saveChanges, 'WQ': saveQuit}
RUNNING = 'yes'
while RUNNING == 'yes':
showAll()
print('''\nBitte eine Option wählen:\n
(A): Neue Aufgabe an einer bestimmten Position eintragen
(L): Aufgabe löschen
(V): Aufgabe verschieben
(W): Speichern
(WQ): Speichern und Beenden
(Q): Beenden''')
OPTION = input("\nOption:\n").upper()
try:
NUMBER = int(OPTION) -1
except ValueError:
fncDict.get(OPTION, errorMessage)()
| P9k/pysenhower_ger | pysenhower.py | pysenhower.py | py | 7,364 | python | de | code | 0 | github-code | 13 |
25345562203 | from flask_restplus import Namespace, Resource, fields
from app.main.models.user import User
api = Namespace('users')
user = api.model('User', {
'id': fields.Integer,
'first_name': fields.String,
'last_name': fields.String,
'pseudo': fields.String
})
@api.route('/<int:identifiant>')
@api.response(404, 'Tweet not found')
@api.param('identifiant', 'The user unique identifier')
class UserResource(Resource):
@api.marshal_with(user)
def get(self, identifiant):
return 'user'
| aissaelouafi/twitter-api | app/apis/users.py | users.py | py | 513 | python | en | code | 0 | github-code | 13 |
29237072036 | import dash_bootstrap_components as dbc
import dash_html_components as html
from views.layout.controls import indicators_controls
top_menu = dbc.Navbar([
html.H1('Furni', className='flex-field'),
dbc.Nav([
dbc.NavItem(dbc.NavLink('Indicadores generales', className='btn btn-primary round',href='/indicadores')),
dbc.NavItem(dbc.NavLink('Características', className='btn btn-primary round',href="/indicadores/caracteristicas")),
dbc.NavItem(dbc.NavLink('Clasificador demanda', className='btn btn-secondary round',href='/demanda')),
dbc.NavItem(dbc.NavLink('Predicción futura', className='btn btn-third round',href='/demanda/prediccion')),
],
className='top-menu'
),
],
className='menu flexy-row between')
sidebar_menu = html.Div([
html.Div(
indicators_controls,
id='sidebar_container',
className='wrap flexy-col'
),
html.Div([
html.H6('Developed by DS4A - Grupo 65'),
html.Br(),
html.H6('Juan Manuel Velez Parra'),
html.H6('Nicholas Gooding Rios'),
html.H6('David Felipe Rubio Mendez'),
html.H6('Johann Sebastian Roa Amorocho'),
html.H6('Andrés Manrique Ardila'),
html.H6('---'),
html.H6('Esteban Betancur | TA'),
html.H6('Luis Rojas | TA'),
],
className='team-info'
)
],
id='sidebar',
className='sidebar bg-light'
) | JuanMaVelezPa/DS4A_Project | views/layout/menus.py | menus.py | py | 1,561 | python | en | code | 1 | github-code | 13 |
14619679476 | # Preprocesses NVD data
import re
name_pattern = r'(nvdcve-1.1-){1}(\d){4}'
# Listing JSON files
from os import listdir
from os.path import isfile, join
path = join('data', 'extracted')
data_files = [f for f in listdir(path) if isfile(join(path, f))]
print('{} files identified'.format(len(data_files)))
for item in data_files:
print(item)
import json
# use a stack to append data
full_list = []
total_cves = 0
for data_file in data_files:
file_path = join(path, data_file)
print('Processing {}'.format(file_path))
with open(file_path, encoding='utf-8') as fh:
contents = json.load(fh)
contents = contents.get("CVE_Items", None)
if contents is None:
raise ValueError('CVE_Items not found in {}'.format(file_path))
cve_count = len(contents)
print('Found {} CVEs in {}'.format(cve_count, data_file))
total_cves += cve_count
full_list += contents
print('Total CVEs documented: {}'.format(total_cves))
print('Total CVEs documented: {}'.format(len(total_cves))) | agu3rra/nvd | pre-processor.py | pre-processor.py | py | 1,044 | python | en | code | 0 | github-code | 13 |
36121390161 | from typing import Optional, Dict, Tuple
from itertools import accumulate
import numpy as np
from gym.wrappers.time_limit import TimeLimit
import matplotlib.pyplot as plt
import seaborn as sns
import torch
from tqdm import tqdm
from .agents.utils import Agent
from .agents.DQN import DQNAgent
from .agents.VPG import VPGAgent
# from .models.policy import PolicyFFN
sns.set()
def train_dqn_agent(env: TimeLimit, num_episodes: int = 5000, config: Optional[Dict] = None, device: str = 'cpu',
show: bool = True) -> Agent:
"""
Trains a DQN agent on an environment for a specified number of episodes.
Args:
env: gym-compliant environment for the agent to operate in
num_episodes: number of episodes the agent should learn on
config: config dictionary for the agent
device: device that is supposed to be used for model optimization
show: whether plots should be displayed
Returns:
agent: a trained DQN agent
"""
if config is None:
config = dict()
agent = DQNAgent(env, config, device)
episode_scores = []
episode_successes = []
for i_episode in tqdm(range(num_episodes)):
agent.reset()
ep_score = 0
ep_success = False
while True:
next_state, reward, done, info = agent.take_action()
ep_score += reward
if not ep_success:
ep_success = agent.is_success()
agent.optimize_model()
if done:
episode_scores.append(ep_score)
episode_successes.append(int(ep_success))
break
if i_episode % agent.config['TARGET_UPDATE'] == 0:
agent.update_target()
episode_scores = torch.cat(episode_scores).cpu().numpy()
if show:
x_score = np.arange(len(episode_scores))
y_score = episode_scores
sns.regplot(x_score, y_score, lowess=True, marker='.')
plt.title("Train DQN score")
plt.show()
x_success = np.arange(len(episode_successes))
y_success = np.array(list(accumulate(episode_successes))) / (x_success + 1)
sns.regplot(x_success, y_success, marker='.')
plt.title("Train DQN success rate")
plt.show()
return agent
def train_vpg_agent(env: TimeLimit, num_episodes: int = 5000, config: Optional[Dict] = None, device: str = 'cpu',
show: bool = True) -> Agent:
# TODO: make it possible to use larger trajectory batches
if config is None:
config = dict()
agent = VPGAgent(env, config, device)
episode_scores = []
episode_successes = []
for i_episode in tqdm(range(num_episodes)):
agent.reset()
ep_score = 0
episode_success = False
while True:
next_state, reward, done, info = agent.take_action()
ep_score += reward
if not episode_success:
episode_success = agent.is_success()
if done:
episode_scores.append(ep_score)
episode_successes.append(int(episode_success))
break
agent.optimize_model()
episode_scores = torch.cat(episode_scores).cpu().numpy()
if show:
x_score = np.arange(len(episode_scores))
y_score = episode_scores
sns.regplot(x_score, y_score, lowess=True, marker='.')
plt.title("Train VPG score")
plt.show()
x_success = np.arange(len(episode_successes))
y_success = np.array(list(accumulate(episode_successes))) / (x_success + 1)
sns.regplot(x_success, y_success, marker='.')
plt.title("Train VPG success rate")
plt.show()
return agent
def evaluate_model(agent: Agent, num_episodes: int = 1000, show: bool = True) -> Tuple[float, float]:
"""
Evaluates the agent on its environment for a specified number of episodes
Args:
agent:
num_episodes:
show:
Returns:
"""
episode_scores = []
episode_successes = []
for _ in tqdm(range(num_episodes)):
agent.reset()
ep_score = 0
ep_success = False
while True:
agent.env.render()
next_state, reward, done, info = agent.take_action(remember=False, greedy=True)
ep_score += reward
if not ep_success:
ep_success = agent.is_success()
if done:
episode_scores.append(ep_score)
episode_successes.append(int(ep_success))
break
test_episode_scores = torch.cat(episode_scores).cpu().numpy()
if show:
x_score = np.arange(len(episode_scores))
y_score = episode_scores
sns.regplot(x_score, y_score, lowess=True, marker='.')
plt.title("Test score")
plt.show()
x_success = np.arange(len(episode_successes))
y_success = np.array(list(accumulate(episode_successes))) / (x_success + 1)
sns.regplot(x_success, y_success, marker='.')
plt.title("Test success rate")
plt.show()
mean_score: float = test_episode_scores.mean()
success_rate: float = np.mean(episode_successes)
return mean_score, success_rate
| RedTachyon/rl_robotics | train_eval.py | train_eval.py | py | 5,250 | python | en | code | 0 | github-code | 13 |
370937772 | '''
手写数字识别
'''
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn import svm
from sklearn.neighbors import KNeighborsClassifier
#1.加载数据
data = datasets.load_digits()
print(type(data))
print(data)
#总的图像数目是1797个图像,每个图像是8*8的图
print(data.images.shape)
print(data.data.shape)
print(data.target.shape)
#2.获取X和Y
n_samples = data.images.shape[0]
X = data.images
X = X.reshape((X.shape[0], -1))
Y = data.target
print('特征矩阵信息:样本数目:{},每个样本的特征数目:{}'.format(X.shape[0], X.shape[1]))
print('待预测的目标属性数目:{}'.format(Y.shape))
#划分训练集合测试集
x_train, x_test = X[:int(n_samples/2)], X[int(n_samples/2):]
y_train, y_test = Y[:int(n_samples/2)], Y[int(n_samples/2):]
#3.构建分类模型
algo = svm.SVC(gamma=0.001)
# algo = KNeighborsClassifier(n_neighbors=10)
algo.fit(x_train,y_train)
#4.输出模型效果
print('训练集上的准确率:{}'.format(algo.score(x_train, y_train)))
print('测试集上的准确率:{}'.format(algo.score(x_test, y_test)))
#5.画图看数据
index = 100
image = X[index]
target = Y[index]
image = image.reshape((8, 8))
plt.imshow(image, cmap=plt.cm.gray_r) #灰度图像
plt.title(target)
plt.show() | yyqAlisa/python36 | 自学/sklearn self-study/SVM/手写数字识别.py | 手写数字识别.py | py | 1,281 | python | en | code | 0 | github-code | 13 |
73053295377 | def findRestaurant(list1, list2):
dict1 = dict()
answer = []
for i in range(0, len(list1)):
if list1[i] in list2:
dict1.update({list1[i]: (list1.index(list1[i]) + list2.index(list1[i]))})
for key, value in dict1.items():
if min(dict1.values()) == value:
answer.append(key)
return answer | liv-apuzzio/my_leetcode_solutions | python/599_Minimum_Index_Sum_of_Two_Lists.py | 599_Minimum_Index_Sum_of_Two_Lists.py | py | 346 | python | en | code | 1 | github-code | 13 |
15056855396 | def med(arr):
arr.sort()
return arr[len(arr) // 2]
def mod(arr):
return max(arr, key=arr.count)
def fun():
n = int(input())
medi = []
modi = []
nums = []
for _ in range(n):
num = list(map(int, input().split()))
nums.extend(num)
medi.append(med(num))
modi.append(mod(num))
print(*medi)
print(*modi)
print(med(medi))
print(mod(modi))
print(med(nums))
print(mod(nums))
fun()
| mishutka200101/Python-Practice-2 | task_15.4.py | task_15.4.py | py | 466 | python | en | code | 0 | github-code | 13 |
73837308498 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('WeekPlanner', '0005_auto_20150915_1900'),
]
operations = [
migrations.RenameField(
model_name='educationalactivityresult',
old_name='planned',
new_name='planned_result',
),
migrations.AddField(
model_name='educationalactivityresult',
name='planned_activity',
field=models.ForeignKey(to='WeekPlanner.EducationalActivity', null=True),
),
migrations.AddField(
model_name='physicalactivityresult',
name='planned_result',
field=models.TextField(null=True, blank=True),
),
]
| yarkinsv/PYW | WeekPlanner/migrations/0006_auto_20150918_1735.py | 0006_auto_20150918_1735.py | py | 817 | python | en | code | 0 | github-code | 13 |
15647544617 | import numpy
from matplotlib import pyplot
from scipy import interpolate
nx = 41
dx = 2./(nx-1)
nt = 50
dt = .01
c = 1
x = numpy.linspace(0,2,nx)
xp = x.copy()
u = numpy.ones(nx)
u[10:20] = 2
up = u.copy()
for n in range(nt):
un = u.copy()
for i in range(1,nx-1):
u[i] = un[i]-c*dt/dx*(un[i]-un[i-1])
xp[i] += c*dt
if(xp[i] > 2): xp[i] -= 2
ui = interpolate.interp1d(xp,up)
pyplot.plot(x, u, 'o-', label='FDM')
pyplot.plot(x, ui(x), 'o-', label='PM')
pyplot.axis([0, 2, .5, 2.5])
pyplot.legend(loc='upper right')
pyplot.pause(.05)
pyplot.cla()
pyplot.show()
| gear/HPSC | lec_code/pm/step01.py | step01.py | py | 620 | python | en | code | 0 | github-code | 13 |
1680792736 | #!/usr/bin/env python3
#encoding=utf-8
#-----------------------------------------
# Usage: python3 first_example.py
# Description: Function definition and call
#-----------------------------------------
def intersect(seq1, seq2):
res = []
for x in seq1:
if x in seq2:
res.append(x)
return res
s1 = 'SPAM'
s2 = 'SCAM'
call_result = intersect(s1, s2)
print('The result of s1 intersect s2 in function call is %s' % call_result)
# The same effect, but more simpler
comp_result = [x for x in s1 if x in s2]
print('The reuslt of s1 intersect s2 in list comprehension is %s' % comp_result)
| mindnhand/Learning-Python-5th | Chapter16.FunctionBasics/first_example.py | first_example.py | py | 626 | python | en | code | 0 | github-code | 13 |
28494556710 | import boto3
import json
import logging
from botocore.client import Config
from botocore.vendored.requests.exceptions import ReadTimeout
from traceback import format_exc
logger = logging.getLogger(__name__)
class stepfunctions(object):
def __init__(self, session=None):
config = Config(read_timeout=70)
if session is None:
self.sfn = boto3.client('stepfunctions', config=config)
else:
self.sfn = session.client('stepfunctions', config=config)
def run_activity(self, process, arn, **kwargs):
""" Run an activity around the process function provided """
while True:
logger.info('Querying for task')
try:
task = self.sfn.get_activity_task(activityArn=arn)
except ReadTimeout:
logger.warning('Activity read timed out')
continue
token = task.get('taskToken', None)
if token is None:
continue
logger.debug('taskToken: %s' % token)
try:
payload = task.get('input', '{}')
logger.info('Payload: %s' % payload)
# run process function with payload as kwargs
output = process(json.loads(payload))
# Send task success
self.sfn.send_task_success(taskToken=token,
output=json.dumps(output))
except Exception as e:
err = str(e)
tb = format_exc()
logger.error("Exception when running task: %s - %s" %
(err, json.dumps(tb)))
err = (err[252] + ' ...') if len(err) > 252 else err
self.sfn.send_task_failure(taskToken=token,
error=str(err),
cause=tb)
| matthewhanson/boto3-utils | boto3utils/stepfunctions.py | stepfunctions.py | py | 1,901 | python | en | code | 5 | github-code | 13 |
42663822361 | # import uwebsockets.client
import urequests
import ujson
import time
import machine
import dht
with open('secret') as f:
secret_key = f.read().strip()
def main():
uri = 'http://35.244.13.244/iot/post'
# uri = 'http://192.168.225.201:8000/iot/post' # for testing
# uri = 'ws://echo.websocket.org/' # for websocket testing
# websocket = uwebsockets.client.connect(uri) # for websocket use
led = machine.Pin(16, machine.Pin.OUT)
print("Connecting to {}:".format(uri))
out = dht.DHT11(machine.Pin(12))
# d6 p
FIRST = True
while True:
try:
t = 1799.3
out.measure()
ERROR_IN_MEASUREMENT = False
mesg = ujson.dumps({'secretkey': secret_key,
'temp': out.temperature(),
'humidity': out.humidity(),
'ERROR_IN_MEASUREMENT': ERROR_IN_MEASUREMENT,
'FIRST': FIRST})
print(mesg)
except:
ERROR_IN_MEASUREMENT = True
mesg = ujson.dumps({'secretkey': secret_key,
'temp': out.temperature(),
'humidity': out.humidity(),
'ERROR_IN_MEASUREMENT': ERROR_IN_MEASUREMENT,
'FIRST': FIRST})
print("error", mesg)
led.off()
try:
req = urequests.post(uri, data=mesg)
req = ujson.loads(req.content)
resp = req['message']
print("response : {}".format(resp))
if not led.value():
led.on()
if FIRST:
print("next reading will be taken in {} seconds".format(req['time']))
t = req['time']
if t > 100000:
led.off()
FIRST = False
except Exception as e:
print(e)
for x in range(3):
led.on()
time.sleep(2)
led.off()
time.sleep(2)
t -= 6
# websocket = uwebsockets.client.connect(uri)
# for reconnecting in case it is closed
finally:
print('-'*20)
time.sleep(t)
# websocket.close() #finally closing the websocket connection
| sajankp/iot_project_esp8266 | server.py | server.py | py | 2,355 | python | en | code | 0 | github-code | 13 |
73817130577 | from flask import *
app = Flask(__name__)
@app.route('/')
def homepage():
return "Hello me!"
users = {
'thuhuongvan98': {
"Name": "Van Nguyet Thu Huong",
"Age": 21,
"Address": "Hahaha"
},
'baohoa96' : {
"Name": "Hoa Hoang Bao Hoa",
"Age": 23,
"Address": "Complicated"
},
'htkl172' : {
"Name": "Hoang Tran Khanh Linh",
"Age": 21,
"Address": "Huhuhu"
}
}
@app.route('/about-me')
def about_me():
return render_template('about_me.html',users = users)
#ten bien trong html va trong python co the khac nhau
#render_template không render đc ảnh
@app.route('/school')
def school():
return redirect("https://mindx.edu.vn/", code=302)
@app.route('/bmi/<weight>/<height>')
def bmi(weight,height):
bmi_calculation = int(weight)/((int(height)/100)**2)
if bmi_calculation < 16:
evaluate = "Severely underweight"
elif bmi_calculation <18.5:
evaluate = "Underweight"
elif bmi_calculation < 25:
evaluate = "Normal"
elif bmi_calculation < 30:
evaluate = "Overweight"
elif bmi_calculation >= 30:
evaluate = "Obsese"
return render_template('bmi.html',bmi_calculation = bmi_calculation, evaluate = evaluate)
# @app.route('/user/<username>')
# def user(username):
# users = {
# 'thuhuongvan98': {
# "Name": "Van Nguyet Thu Huong",
# "Age": 21,
# "Relationship_Status": "Married"
# },
# 'baohoa96' : {
# "Name": "Hoa Hoang Bao Hoa",
# "Age": 23,
# "Relationship_Status": "Complicated"
# },
# 'htkl172' : {
# "Name": "Hoang Tran Khanh Linh",
# "Age": 21,
# "Relationship_Status": "Married"
# }
# }
# if username in users:
# return f'{users[username]["Name"]}, {users[username]["Age"]}'
# else:
# return "User not found"
app.run(debug=True, port=3000)
#trong html co the lay data tu dict & list: {{users.htkl172.Age}} | thuhuongvan98/Huong-Van | Lesson 10/web_practice.py | web_practice.py | py | 1,923 | python | en | code | 0 | github-code | 13 |
70196260818 | import os
from datetime import datetime
from flask import Flask, flash, json, render_template, redirect, request, url_for
# ----------------------------#
# Split answer file function #
# ----------------------------#
def split_answer_file(filename):
answers = {}
with open(filename,"r") as file:
for line in file.readlines():
splitdata = line.split(":") # this gives you a list of what's on either side of ':'
answers[splitdata[0]] = splitdata[1].strip() # give the answers dict a key of the first list item and a value of the 2nd
return answers
correct_answers = split_answer_file('./data/geography1/geography1_correct_answer.txt')
print('Dict Correct\t',correct_answers)
incorrect_answers = split_answer_file('./data/geography1/geography1_incorrect_answer.txt')
print('Dict Incorrect\t',incorrect_answers) | bennettpe/practical-python-website | test_get_correct_answers.py | test_get_correct_answers.py | py | 860 | python | en | code | 0 | github-code | 13 |
25837813290 |
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from tqdm import tqdm
from sklearn.metrics import mean_absolute_error
from scipy.optimize import curve_fit
class LinearInterpolator:
def __init__(self, engagement_col: str='engagements'):
self.engagement_col = engagement_col
self.time_col = "times"
def interpolate(self, df: pd.DataFrame):
time_list = []
engagement_list = []
for i in tqdm(range(len(df))):
time_i = df.loc[i, self.time_col].copy()
engagement_i = df.loc[i, self.engagement_col].copy()
times, new_engagements = self._interpolate(time_i, engagement_i)
time_list.append(times)
engagement_list.append(new_engagements)
return time_list, engagement_list
def _interpolate(self, times: np.array, engagements: np.array):
for i in range(int(max(times))):
if i not in times:
insert_val = 0 if i==0 else None
engagements = np.insert(engagements, i, insert_val, axis=0)
times = np.insert(times, i, i, axis=0)
new_engagements = pd.Series(engagements, index=times) \
.interpolate(method='index') \
.round() \
.astype(int) \
.to_numpy()
return times, new_engagements
def predict(self, engagements: np.array, indices: np.array) -> np.array:
return engagements[indices]
def score(self, y: np.array, preds: np.array) -> float:
return mean_absolute_error(y, preds)
def evaluate(self, train: pd.DataFrame, test: pd.DataFrame) -> float:
prediction_count = 0
scores = 0
for i in tqdm(range(len(test))):
engagements = train.loc[i, "interpolated_engagements"]
times = test.loc[i, self.time_col].astype(int)
times = times[np.abs(times) < len(engagements)] # Do this because sometimes randomly split test data is outside the interpolation window
y = test.loc[i, self.engagement_col][:len(times)]
preds = self.predict(engagements, times)
if preds.size != 0: # Sometimes there are no predictions made due to filtering
score = self.score(y, preds)
prediction_count += len(preds)
scores += score
return scores / prediction_count
def plot_predictions(self, train_row, test_row):
x = test_row[self.time_col].astype(int)
x = x[np.abs(x) < len(train_row["interpolated_engagements"])]
y = test_row[self.engagement_col][:len(x)]
ax = plt.axes()
ax.scatter(x, y, color='red', label='Actual Observations')
ax.plot(train_row["complete_time"], train_row["interpolated_engagements"], 'k', label='Interpolated Time Series')
ax.set_ylabel('Engagement')
ax.set_xlabel(self.time_col)
ax.set_ylim(0)
ax.set_xlim(0)
ax.legend()
plt.show()
class CurveFitter:
def __init__(self,):
self.fits = {
"linear": {"function": self.linear, "params": None},
"sigmoid": {"function": self.sigmoid, "params": None},
"logarithmic": {"function": self.logarithmic, "params": None},
"exponential": {"function": self.exponential, "params": None},
}
self.time_col = "times"
def fit(self, x, y):
best_score = None
self.best_fit = None
for i in self.fits:
# Reset logarthmic changes
x = x
y = y
try:
if i == "logarithmic":
x = np.delete(x, 0)
y = np.delete(y, 0)
params, _ = curve_fit(
f=self.fits[i]["function"],
xdata=x,
ydata=y,
p0=self._initialize(x, y, i),
#bounds = self._bound(y),
method='dogbox',
)
score = self.score(fit=i, params= params, x=x, y=y)
if best_score == None or score < best_score:
best_score = score
self.best_fit = i
self.fits[i]["params"] = params
except RuntimeError : #Optimal parameters not found: The maximum number of function evaluations is exceeded.
pass
except ValueError: # ValueError: array must not contain infs or NaNs - get thrown incorrectly
pass
except np.linalg.LinAlgError: # SVD did not converge in Linear Least Squares
pass
def _initialize(self, x, y, fit_type: str):
if fit_type == "sigmoid":
return [max(y), np.median(x), 1, min(y)]
elif fit_type == "logarithmic":
return [6, 1.5, 0.2]
elif fit_type == "exponential":
return [5, 0.6, 40]
else:
return None
def _bound(self, y,):
return (0, max(y))
def sigmoid(self, x, L, x0, k, b):
y = L / (1 + np.exp(-k*(x-x0))) + b
return (y)
def linear(self, x, a, b):
return a * x + b
def logarithmic(self, x, a, b, c):
return a * np.log(x - c) + b
def exponential(self, x, a, b, c):
return a * np.exp(-b * x) + c
def predict(self, fit: str, params: list, x: list) -> list:
return np.nan_to_num(self.fits[fit]["function"](*[x]+list(params)))
def score(self, fit: str, params: list, x: list, y: list) -> float:
return mean_absolute_error(y, self.predict(fit, params, x))
def plot_predictions(self, train_row, test_row):
x = np.concatenate((train_row["times"], test_row["times"] ), axis=0)
y = np.concatenate((train_row["engagements"], test_row["engagements"] ), axis=0)
params = train_row["parameters"]
fit = train_row["fit_type"]
x_fitted = np.linspace(0, np.max(x), 100)
y_fitted = self.predict(fit=fit, params=params, x=x_fitted)
ax = plt.axes()
ax.scatter(train_row["times"], train_row["engagements"], color='blue', label='Train Engagement')
ax.scatter(test_row["times"], test_row["engagements"], color='red', label='Test Engagement')
ax.plot(x_fitted, y_fitted, 'k', label='Fitted curve')
ax.set_ylabel('Engagement')
ax.set_xlabel('Time')
ax.set_ylim(0)
ax.set_xlim(0)
ax.legend()
plt.show()
def train(self, df: pd.DataFrame):
fit_types = []
parameter_list = []
for i in tqdm(range(len(df))):
x = df.loc[i, self.time_col]
y = df.loc[i, "engagements"]
self.fit(x=x, y=y)
fit_types.append(self.best_fit)
parameter_list.append(self.fits[self.best_fit]["params"])
return fit_types, parameter_list
def evaluate(self, train: pd.DataFrame, test: pd.DataFrame) -> float:
prediction_count = 0
score = 0
for i in tqdm(range(len(train))):
fit = train.loc[i, "fit_type"]
params = train.loc[i, "parameters"]
x = test.loc[i, self.time_col]
y = test.loc[i, "engagements"]
if x.size != 0:
# Some elements in test have no values because of the way the random mask was generated
prediction_count += len(y)
score += self.score(fit=fit, params=params, x=x, y=y)
else:
pass
return score / prediction_count | botelhoa/C4D-blogs | Data Wrangling: Missing Engagement Interpolation/code/interpolation.py | interpolation.py | py | 8,125 | python | en | code | 0 | github-code | 13 |
29102173909 | # from scopus import ScopusSearch
import os
import pandas as pd
import numpy as np
import logging
from urllib import parse, request
import urllib.error
import json
from pprint import pprint as pp
BASE_DIR = os.path.abspath(os.path.realpath(__file__))
BASE_DIR = os.path.join(os.path.dirname(BASE_DIR), '..', '..')
os.chdir(BASE_DIR)
FOLNAME_AFF_SEARCH = os.path.join(BASE_DIR, 'data', 'aff_search')
FOLNAME_METRIC_RESPONSE = os.path.join(BASE_DIR, 'data', 'metric_response')
key_aff = 'Institution'
key_acc = 'id_downloaded'
key_id = 'id'
key_met = 'metrics'
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# create file handler which logs info messages
fh = logging.FileHandler(os.path.join(BASE_DIR, 'logs.txt'), 'w', 'utf-8')
fh.setLevel(logging.DEBUG)
# create console handler with a debug log level
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# creating a formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)-8s: %(message)s')
# setting handler format
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(fh)
logger.addHandler(ch)
class my_scopus_search():
def __init__(self, query=None, apiKey=None, httpAccept=None, logger=None):
httpAccept = httpAccept or "application/json"
apiKey = apiKey or "7f59af901d2d86f78a1fd60c1bf9426a"
if query is None:
raise("query is not given, please, give it")
self.query = query
self.httpAccept = httpAccept
self.apiKey = apiKey
self.logger = logger or logging.getLogger(__name__)
self.response = None
self.jres = None
self.logger.debug("my_scopus_search class was initialized")
self.__url = "https://api.elsevier.com/content/search/scopus"
self.http_error = None
def encode(self):
"""encode query"""
self.parsed_url = self.__url + "?query=" + parse.quote_plus(self.query, safe='()')
self.parsed_url = "&".join([self.parsed_url, "apiKey={}".format(self.apiKey), "httpAccept={}".format(parse.quote(self.httpAccept, safe=""))])
self.logger.debug("encoding query to {}".format(self.parsed_url))
return self
def send_request(self):
"""send request"""
response = None
try:
self.logger.debug("sending the request")
response = request.urlopen(self.parsed_url)
self.logger.debug("request retrieved sucessully")
except urllib.error.HTTPError as e:
self.http_error = e.code
if e.code == 404:
self.logger.warning("NOT FOUND")
self.logger.warning("error is {}".format(e))
elif e.code == 400:
self.logger.warning("invalid request, try again")
self.logger.warning("error is {}".format(e))
elif e.code == 401:
self.logger.warning("cannot be authentified due to missing/invalid credentials")
self.logger.warning("error is {}".format(e))
elif e.code == 429:
self.logger.warning("quota exceeded")
self.logger.warning("error is {}".format(e))
else:
self.logger.warning("unkown error")
self.logger.warning("error code is {}".format(e))
except Exception as e:
response = None
self.logger.warning("request retrieved with error, the error code is ", e)
self.response = response
return self
def read_json(self):
if self.response is not None:
output = json.loads(self.response.read())
self.jres = output
def get_search_object(self):
self.logger.debug('start encoding')
self.encode()
self.logger.debug('encoding finished')
self.send_request()
self.read_json()
return self.jres
class my_df():
def __init__(self, df, logger=None):
self.logger = logger or logging.getLogger(__name__)
self.df = df.copy()
self.i = 0
self.n_rows = df.shape[0]
self.inds = df.index.tolist()
self.logger.debug("class was initialized")
def next_row(self):
if self.i < self.n_rows:
row = self.df.loc[self.inds[self.i], :]
self.i = self.i + 1
return row
else:
self.logger.warn("reached the end of the file")
raise("the end of df reached")
def next_aff(self, col_name):
isfree = 0
while isfree == 0:
row = self.next_row()
if np.isnan(row[col_name]):
isfree = 1
return row.name
class scopus_query:
"""in this class I define those keys I need to use to extract number of books for each university
this means that this class is applicable only in certain cases
for more instructions, please, read this:
https://dev.elsevier.com/tips/ScopusSearchTips.htm
"""
def __init__(self, year=None, affil=None, doctype=None):
self.q = dict()
self.query = None # a final string the query
self.set_values(year=year, affil=affil, doctype=doctype)
def set_values(self, year=None, affil=None, doctype=None):
if year is None:
year = 2012
if affil is None:
affil = 'Harvard University'
if doctype is None:
doctype = "bk"
self.affil = affil
self.doctype = doctype
self.year = year
self.q = dict()
self.query = None
return self
def encode(self):
self.q['affil'] = 'affilorg("{}")'.format(self.affil)
self.q['doctype'] = "doctype({})".format(self.doctype)
self.q['year'] = "pubyear = {}".format(self.year)
return self
def set_query(self):
self.query = " AND ".join([self.q[key] for key in self.q.keys()])
self.query.encode()
return self
def get_query(self):
return self.query
if __name__ == "__main__":
aff_fname = "universities_table.csv"
fname_aff_book_count = os.path.join(BASE_DIR, "data", aff_fname)
df_af = pd.read_csv(fname_aff_book_count).set_index("Institution")
df_aff = my_df(df_af, logger=logger)
MY_API_KEY = "e53785aedfc1c54942ba237f8ec0f891"
n = 50
year = 2016
doctype = "bk"
# where to save the results
dname_book_count = os.path.join(BASE_DIR, "data", "aff_book_{}".format(year))
col_dict = {2012: "book_downloaded_2012",
2013: "book_downloaded_2013",
2014: "book_downloaded_2014",
2015: "book_downloaded_2015",
2016: "book_downloaded_2016"}
fname_long_book = 'data/long_book_count.csv'
try:
df_old = pd.read_csv(fname_long_book)
except:
df_old = pd.DataFrame(columns=['name', 'metricType', 'year', 'valueByYear'])
for i in range(n):
logger.debug("getting univerity name")
aff_name = df_aff.next_aff(col_dict[year])
logger.debug("university name is {}".format(aff_name))
logger.debug("creating query")
query = scopus_query(year=year, doctype=doctype, affil='{}'.format(aff_name)).encode().set_query().get_query()
logger.debug("query is {}".format(query))
logger.debug("sending the request")
fname_jres = os.path.join(dname_book_count, "{}_year_{}.json".format(aff_name.title().replace(" ", ""), year))
try:
res = my_scopus_search(query=query, apiKey=MY_API_KEY)
s = res.get_search_object()
q = {'name': aff_name,
'metricType': 'BookCount',
'year': year,
'valueByYear': s['search-results']['opensearch:totalResults']}
df_old = df_old.append(q, ignore_index=True)
logger.debug("respond received sucessfuly")
logger.debug('saving the response to {}'.format(fname_jres))
with open(fname_jres, "w") as f:
json.dump(s, f)
logger.debug("json file saved successfully")
logger.debug("update the file")
df_af.at[aff_name, col_dict[year]] = 1
except Exception as e:
if res.http_error == 401:
logger.debug("error retrieved, error is {}".format(res.http_error))
elif res.http_error == 429:
logger.info("quota is exceeded, terminating the program")
break
else:
logger.debug("error retrieved, error is {}".format(res.http_error))
df_af.at[aff_name, col_dict[year]] = -1
logger.warn('respond has failed, error is ', e)
try:
logger.debug("saving the updated table")
df_af.to_csv(fname_aff_book_count)
logger.debug("table was updated successfully")
except Exception as e:
logger.warn("saving updated talbe has failed, please, save the table manually")
df_old.to_csv(fname_long_book, index=False)
| gnukinad/scival | src/book_count/get_aff_book_count.py | get_aff_book_count.py | py | 9,102 | python | en | code | 1 | github-code | 13 |
16508174953 | import json
from PyQt5.QtCore import pyqtSlot, QTime
from PyQt5.QtGui import QColor
from PyQt5.QtWidgets import (
QMainWindow,
QColorDialog,
QTableWidgetItem,
QApplication, QHBoxLayout,
QLabel,
QPushButton,
QTableWidget,
QVBoxLayout,
QWidget,
QFileDialog,
QTableWidgetSelectionRange,
QTimeEdit,
QTabWidget
)
from gradient_window_controller import GradientWindowController
class SettingsWindow(QMainWindow):
def __init__(self, screens):
super().__init__()
self._init_ui()
self.addButton.clicked.connect(self.on_add)
self.deleteButton.clicked.connect(self.on_delete)
self.upButton.clicked.connect(self.on_up)
self.downButton.clicked.connect(self.on_down)
self.startButton.clicked.connect(self.on_start)
self.saveButton.clicked.connect(self.on_save)
self.loadButton.clicked.connect(self.on_load)
self.gradientWindowController = GradientWindowController(screens)
self._load_settings()
def _create_color_tab(self):
self.colorWidget = QWidget()
self.colorWidget.setAutoFillBackground(True)
self.colorsLayout = QHBoxLayout()
self.colorTable = QTableWidget()
self.colorTable.setRowCount(0)
self.colorTable.setColumnCount(1)
self.colorTable.verticalHeader().hide()
self.colorTable.horizontalHeader().hide()
self.colorTable.setMaximumWidth(40)
self.colorTable.setColumnWidth(20, 30)
self.colorButtonsLayout = QVBoxLayout()
self.addButton = QPushButton('Добавить')
self.upButton = QPushButton('Вверх')
self.downButton = QPushButton('Вниз')
self.deleteButton = QPushButton('Удалить')
self.colorButtonsLayout.addWidget(self.addButton)
self.colorButtonsLayout.addWidget(self.upButton)
self.colorButtonsLayout.addWidget(self.downButton)
self.colorButtonsLayout.addWidget(self.deleteButton)
self.colorButtonsLayout.addStretch()
self.colorsLayout.addWidget(self.colorTable)
self.colorsLayout.addStretch()
self.colorsLayout.addLayout(self.colorButtonsLayout)
self.colorWidget.setLayout(self.colorsLayout)
return self.colorWidget
def _create_intervals_tab(self):
self.intervalsWidget = QWidget()
self.intervalsWidget.setAutoFillBackground(True)
self.intervalsLayout = QVBoxLayout()
self.delayInput = QTimeEdit()
self.delayInput.setDisplayFormat('hh:mm:ss')
self.delayInput.setMinimumTime(QTime(0, 0, 1))
self.repeatInput = QTimeEdit()
self.repeatInput.setDisplayFormat('hh:mm:ss')
self.repeatInput.setMinimumTime(QTime(0, 0, 1))
self.intervalsLayout.addWidget(QLabel('Первый цвет'))
self.intervalsLayout.addWidget(self.delayInput)
self.intervalsLayout.addWidget(QLabel('Интервал'))
self.intervalsLayout.addWidget(self.repeatInput)
self.intervalsLayout.addStretch()
self.intervalsWidget.setLayout(self.intervalsLayout)
return self.intervalsWidget
def _init_ui(self):
self.central = QWidget()
self.centralLayout = QVBoxLayout()
self.tabWidget = QTabWidget()
self.tabWidget.addTab(self._create_color_tab(), 'Цвета')
self.tabWidget.addTab(self._create_intervals_tab(), 'Интервалы')
self.centralLayout.addWidget(self.tabWidget)
self.errorLabel = QLabel()
self.centralLayout.addWidget(self.errorLabel)
self.settingsButtonsLayout = QHBoxLayout()
self.saveButton = QPushButton('Сохранить')
self.loadButton = QPushButton('Загрузить')
self.startButton = QPushButton('Запустить')
self.settingsButtonsLayout.addWidget(self.saveButton)
self.settingsButtonsLayout.addWidget(self.loadButton)
self.settingsButtonsLayout.addWidget(self.startButton)
self.centralLayout.addLayout(self.settingsButtonsLayout)
self.central.setLayout(self.centralLayout)
self.setCentralWidget(self.central)
@pyqtSlot()
def on_add(self):
color = QColorDialog.getColor()
item = QTableWidgetItem()
item.setBackground(color)
table: QTableWidget = self.colorTable
table.insertRow(table.rowCount())
table.setItem(table.rowCount() - 1, 0, item)
@pyqtSlot()
def on_delete(self):
table: QTableWidget = self.colorTable
selected = table.selectedIndexes()
rows = [index.row() for index in selected]
for row in rows:
table.removeRow(row)
@staticmethod
def _swap_items(table: QTableWidget, row1, col1, row2, col2):
item1 = table.takeItem(row1, col1)
item2 = table.takeItem(row2, col2)
table.setItem(row1, col1, item2)
table.setItem(row2, col2, item1)
@pyqtSlot()
def on_up(self):
table: QTableWidget = self.colorTable
selected = table.selectedIndexes()
if selected:
rows = [index.row() for index in selected]
prev_row = 0
for row in sorted(rows):
if row > prev_row:
SettingsWindow._swap_items(table, row, 0, row - 1, 0)
else:
prev_row += 1
table.clearSelection()
table.setRangeSelected(QTableWidgetSelectionRange(min(rows) - 1, 0, max(rows) - 1, 0), True)
@pyqtSlot()
def on_down(self):
table: QTableWidget = self.colorTable
selected = table.selectedIndexes()
if selected:
rows = [index.row() for index in selected]
next_row = table.rowCount() - 1
for row in sorted(rows, reverse=True):
if row < next_row:
SettingsWindow._swap_items(table, row, 0, row + 1, 0)
else:
next_row -= 1
table.clearSelection()
table.setRangeSelected(QTableWidgetSelectionRange(min(rows) + 1, 0, max(rows) + 1, 0), True)
@pyqtSlot()
def on_start(self):
table: QTableWidget = self.colorTable
items = [table.item(row, 0) for row in range(table.rowCount())]
colors = [item.background().color() for item in items]
if not colors:
self.errorLabel.setText('цвета добавь')
return
if len(colors) < 2:
self.errorLabel.setText('давай побольше цветов')
return
if self.errorLabel.text():
self.errorLabel.setText('молодец')
delay = QTime(0, 0).secsTo(self.delayInput.time())
repeat_interval = QTime(0, 0).secsTo(self.repeatInput.time())
self.gradientWindowController.set_colors(colors)
self.gradientWindowController.set_timers(delay, repeat_interval)
self.gradientWindowController.run()
@pyqtSlot()
def on_save(self):
table: QTableWidget = self.colorTable
settings = dict()
items = [table.item(row, 0) for row in range(table.rowCount())]
colors = [item.background().color() for item in items]
rgb_colors = [color.getRgb() for color in colors]
settings['colors'] = rgb_colors
settings['delay'] = QTime(0, 0).secsTo(self.delayInput.time())
settings['repeat_interval'] = QTime(0, 0).secsTo(self.repeatInput.time())
filename = QFileDialog.getSaveFileName(directory='settings.json')[0]
if filename:
with open(filename, 'w') as f:
json.dump(settings, f)
self.errorLabel.setText(f'настройки сохранены в {filename}')
def _load_settings(self, filename='settings.json'):
try:
with open(filename) as f:
settings = json.load(f)
table: QTableWidget = self.colorTable
table.setRowCount(0)
for rgb_color in settings['colors']:
color = QColor.fromRgb(*rgb_color)
item = QTableWidgetItem()
item.setBackground(color)
table.insertRow(table.rowCount())
table.setItem(table.rowCount() - 1, 0, item)
self.delayInput.setTime(QTime(0, 0).addSecs(settings['delay']))
self.repeatInput.setTime(QTime(0, 0).addSecs(settings['repeat_interval']))
self.errorLabel.setText(f'настройки загружены из {filename}')
except FileNotFoundError:
pass
except json.JSONDecodeError or KeyError or TypeError:
self.errorLabel.setText(f'в {filename} ошибка')
@pyqtSlot()
def on_load(self):
filename = QFileDialog.getOpenFileName()[0]
if filename:
self._load_settings(filename)
if __name__ == "__main__":
import sys
app = QApplication(sys.argv)
w = SettingsWindow(app.screens())
w.show()
sys.exit(app.exec_())
| pmineev/GradientScreensaver | settings_window.py | settings_window.py | py | 9,097 | python | en | code | 0 | github-code | 13 |
299063578 | num = 2
sum = 0
while num <= 2480058:
p = 5
val = 0
for c in str(num):
val = val + pow(int(c),p)
if val == num:
sum = sum + num
num = num + 1
print(sum)
| LeStarch/euler-solutions | euler30.py | euler30.py | py | 189 | python | en | code | 0 | github-code | 13 |
29214874341 | import argparse
import json
import logging
import os
import sys
import boto3
from botocore.config import Config
from botocore import UNSIGNED
from e2e_common.util import (
xrun,
atexitrun,
firstFromS3Prefix,
hassuffix,
)
logger = logging.getLogger(__name__)
def main():
ap = argparse.ArgumentParser()
ap.add_argument(
"--s3-source-net",
help="AWS S3 key suffix to test network tarball containing Primary and other nodes. Must be a tar bz2 file.",
)
ap.add_argument(
"--algod-dir",
help="Directory to run algod network in.",
)
ap.add_argument("--verbose", default=False, action="store_true")
args = ap.parse_args()
if not args.s3_source_net:
raise Exception("Must provide --s3-source-net")
if not args.algod_dir:
raise Exception("Must provide --algod-dir")
tarname = f"{args.s3_source_net}.tar.bz2"
# fetch test data from S3
bucket = "algorand-testdata"
s3 = boto3.client("s3", config=Config(signature_version=UNSIGNED))
prefix = "indexer/e2e4"
if "/" in tarname:
cmhash_tarnme = tarname.split("/")
cmhash = cmhash_tarnme[0]
tarname =cmhash_tarnme[1]
prefix+="/"+cmhash
tarpath = os.path.join(args.algod_dir, tarname)
else:
tarpath = os.path.join(args.algod_dir, tarname)
success = firstFromS3Prefix(s3, bucket, prefix, tarname, outpath=tarpath)
if not success:
raise Exception(f"failed to locate tarname={tarname} from AWS S3 path {bucket}/{prefix}")
sourcenet = tarpath
tempnet = os.path.join(args.algod_dir, "net")
xrun(["tar", "-C", args.algod_dir, "-x", "-f", sourcenet])
# Reset the secondary node, and enable follow mode.
# This is what conduit will connect to for data access.
for root, dirs, files in os.walk(os.path.join(tempnet, 'Node', 'tbd-v1')):
for f in files:
if ".sqlite" in f:
os.remove(os.path.join(root, f))
cf = {}
with open(os.path.join(tempnet, "Node", "config.json"), "r") as config_file:
cf = json.load(config_file)
cf['EnableFollowMode'] = True
with open(os.path.join(tempnet, "Node", "config.json"), "w") as config_file:
config_file.write(json.dumps(cf))
return 0
if __name__ == "__main__":
sys.exit(main())
| algorand/indexer | e2e_tests/src/e2e_common/get_test_data.py | get_test_data.py | py | 2,340 | python | en | code | 111 | github-code | 13 |
33700228760 | import bge
bge.render.showMouse(True)
def fix_text():
objs = bge.logic.getCurrentScene().objects
for o in objs:
try:
o.resolution = 1.25
except AttributeError:
pass
def update_resource_meters():
objs = bge.logic.getCurrentScene().objects
gd = bge.logic.globalDict
food_p = gd["food"]/gd["max_food"]
material_p = gd["material"]/gd["max_material"]
science_p = gd["science"]/gd["max_science"]
objs['Food_meter'].localScale.x = min(food_p, 1)
objs['Materials_meter'].localScale.x = min(material_p, 1)
objs['Science_meter'].localScale.x = min(science_p, 1)
objs['Food_amount'].text = "{}/{}".format(int(gd["food"]), int(gd["max_food"]))
objs['Material_amount'].text = "{}/{}".format(int(gd["material"]), int(gd["max_material"]))
objs['Science_amount'].text = "{}/{}".format(int(gd["science"]), int(gd["max_science"]))
objs['Food_worker_count'].text = "{}".format(int(gd["foodworkers"]))
objs['Material_worker_count'].text = "{}".format(int(gd["materialworkers"]))
objs['Science_worker_count'].text = "{}".format(int(gd["scienceworkers"]))
def update(cont):
sens = cont.owner.sensors["Message"]
# We should potentially check if we have actually added an ant; however, it isn't cirtical
# A quick check could be to compare the value of Ant_count.text against the current property
bge.logic.getCurrentScene().objects['Ant_count'].text = "{}/{}".format(bge.logic.globalDict["pop"],bge.logic.globalDict["max_pop"])
if sens.positive:
update_resource_meters()
def datestring(cont):
own = cont.owner
d = bge.logic.globalDict["day"]
s = bge.logic.globalDict["season"]
own.text = "Day {} Year {} - {}".format(int(d % 365), int(d/365), s)
| gandalf3/The-Queen-s-Workers | GUI.py | GUI.py | py | 1,822 | python | en | code | 2 | github-code | 13 |
9844660475 | # -*- coding: utf-8 -*-
from secp256k1 import PublicKey, ALL_FLAGS
from raiden.utils import sha3, GLOBAL_CTX
def recover_publickey(messagedata, signature):
if len(signature) != 65:
raise ValueError('invalid signature')
key = PublicKey(
ctx=GLOBAL_CTX,
flags=ALL_FLAGS, # FLAG_SIGN is required to recover publickeys
)
signature_data = key.ecdsa_recoverable_deserialize(
signature[:64],
ord(signature[64]),
)
message_hash = sha3(messagedata)
publickey_data = key.ecdsa_recover(message_hash, signature_data, raw=True)
publickey = PublicKey(
publickey_data,
ctx=GLOBAL_CTX
)
return publickey.serialize(compressed=False)
def sign(messagedata, private_key):
message_hash = sha3(messagedata)
secp_signature = private_key.ecdsa_sign_recoverable(message_hash, raw=True)
signature_data = private_key.ecdsa_recoverable_serialize(secp_signature)
signature = signature_data[0] + chr(signature_data[1])
if len(signature) != 65:
raise ValueError('invalid signature')
return signature
def address_from_key(key):
return sha3(key[1:])[-20:]
| utzig/raiden | raiden/encoding/signing.py | signing.py | py | 1,170 | python | en | code | null | github-code | 13 |
17659538181 | from collections import Counter
n = int(input())
arr = list(map(int, input().split()))
gap = [[0 for j in range(n)] for i in range(n)]
MAX = -1
#i번째 원소 기준으로 공차d를 구한 후 최빈값의 등장 횟수를 기록
for i in range(n):
d = []
for j in range(n):
if i!=j :
d.append((arr[j] - arr[i]) / (j-i))
# else :
# d.append(0)
#print(d)
MAX = max(MAX, Counter(d).most_common(1)[0][1])
#print(MAX)
print(n - MAX - 1)
| ryuwldnjs/BOJ | 백준/Silver/25401. 카드 바꾸기/카드 바꾸기.py | 카드 바꾸기.py | py | 516 | python | ko | code | 0 | github-code | 13 |
37154207403 | """Custom template tags for metabase embedding."""
import logging
import time
from datetime import date
from datetime import datetime
import jwt
from django import template
from django.conf import settings
from django.template.loader import render_to_string
log = logging.getLogger(__name__) # noqa
register = template.Library()
@register.simple_tag
def metabase_question_embed(question_id, **kwargs):
"""
Embed a question (a graph in an iframe) from metabase.
https://www.metabase.com/learn/embedding/embedding-charts-and-dashboards#an-example-using-django
"""
if not settings.METABASE_SECRET_KEY:
log.warning("Metabase Secret Key is not set - Graphs won't render")
return None
# These parameters must be JSON serializable to be signed
# Notably, dates aren't serializable by default
params = {}
for (k, val) in kwargs.items():
if isinstance(val, (date, datetime)):
params[k] = str(val)
else:
params[k] = val
payload = {
"resource": {"question": question_id},
"params": params,
"exp": round(time.time()) + (60 * 10),
}
log.debug(payload)
token = jwt.encode(payload, settings.METABASE_SECRET_KEY, algorithm="HS256")
iframe_url = (
settings.METABASE_SITE_URL
+ "/embed/question/"
+ token
+ "#bordered=true&titled=false"
)
return render_to_string(
"adserver/metabase/question-iframe.html", {"iframe_url": iframe_url}
)
| abhay340/ethical-ads | adserver/templatetags/metabase.py | metabase.py | py | 1,516 | python | en | code | 0 | github-code | 13 |
44040860266 | #val=int(input("enter a number"))
#if val >100:
# val = val/2
#else:
# val=val*2
#print("the result is", val)
#to print in online
val=int(input("enter a number"))
val=val/2 if val>100 else val * 2
print("the result is", val)
name= input("enter ur name")
print("very good") if name.isalpha() else print("not good") | itzzyashpandey/python-data-science | basics/onliner.py | onliner.py | py | 322 | python | en | code | 0 | github-code | 13 |
14769143752 | #Simple run this file by call function crawl_r()
import requests
from data import *
def crawl_api_main():
http = []
socks4 = []
socks5 = []
for i in http_api:
res = requests.get(i)
http.append(res.text)
for i in socks4_api:
res = requests.get(i)
socks4.append(res.text)
for i in socks5_api:
res = requests.get(i)
socks5.append(res.text)
with open("crawl_data/http_api.txt", 'w', encoding='utf8') as f:
f.write(''.join(http))
with open('crawl_data/socks4_api.txt', 'w', encoding='utf8') as f:
f.write(''.join(socks4))
with open('crawl_data/socks5_api.txt', 'w', encoding='utf8') as f:
f.write(''.join(socks5))
# crawl_api_main() | pEvk/proxy-crawl | crawl_api.py | crawl_api.py | py | 690 | python | en | code | 0 | github-code | 13 |
22191448242 | import inspect
import json
from copy import deepcopy
from dataclasses import dataclass
from functools import lru_cache
from pathlib import Path
from typing import Any, Dict, Iterable, Union
import jsonschema
import yaml
from jsonschema.exceptions import best_match
from linkml_runtime import SchemaView
from linkml_runtime.dumpers import yaml_dumper
from linkml_runtime.linkml_model import SchemaDefinition
from linkml.generators.jsonschemagen import JsonSchemaGenerator
from .. import LOCAL_METAMODEL_YAML_FILE
from .config.datamodel.config import Config, ExtendableConfigs, RuleLevel
@dataclass
class LinterProblem:
message: str
level: Union[RuleLevel, None] = None
schema_name: Union[str, None] = None
schema_source: Union[str, None] = None
rule_name: Union[str, None] = None
@lru_cache
def get_named_config(name: str) -> Dict[str, Any]:
config_path = str(Path(__file__).parent / f"config/{name}.yaml")
with open(config_path) as config_file:
return yaml.safe_load(config_file)
@lru_cache
def get_metamodel_validator() -> jsonschema.Validator:
meta_json_gen = JsonSchemaGenerator(LOCAL_METAMODEL_YAML_FILE, not_closed=False)
meta_json_schema = json.loads(meta_json_gen.serialize())
validator = jsonschema.Draft7Validator(meta_json_schema)
return validator
def merge_configs(original: dict, other: dict):
result = deepcopy(original)
for key, value in other.items():
if isinstance(value, dict):
result[key] = merge_configs(result.get(key, {}), value)
else:
result[key] = value
return result
def _format_path_component(value):
if isinstance(value, int):
return f"[{value}]"
return value
def _format_path(path):
if not path:
return "<root>"
return " > ".join(_format_path_component(p) for p in path)
class Linter:
def __init__(self, config: Dict[str, Any] = {}) -> None:
default_config = deepcopy(get_named_config("default"))
merged_config = config
if config.get("extends") == ExtendableConfigs.recommended.text:
recommended_config = deepcopy(get_named_config(ExtendableConfigs.recommended.text))
merged_config = merge_configs(recommended_config, merged_config)
merged_config = merge_configs(default_config, merged_config)
self.config = Config(**merged_config)
from . import rules
self._rules_map = dict(
[
(cls.id, cls)
for _, cls in inspect.getmembers(rules, inspect.isclass)
if issubclass(cls, rules.LinterRule)
]
)
def validate_schema(self, schema_path: str):
with open(schema_path) as schema_file:
schema = yaml.safe_load(schema_file)
validator = get_metamodel_validator()
for err in validator.iter_errors(schema):
best_err = best_match([err])
message = f"In {_format_path(best_err.absolute_path)}: {best_err.message}"
if best_err.context:
message += f" ({', '.join(e.message for e in best_err.context)})"
yield LinterProblem(
rule_name="valid-schema",
message=message,
level=RuleLevel(RuleLevel.error),
schema_source=schema,
)
def lint(
self,
schema: Union[str, SchemaDefinition],
fix: bool = False,
validate_schema: bool = False,
validate_only: bool = False,
) -> Iterable[LinterProblem]:
if (validate_schema or validate_only) and isinstance(schema, str):
yield from self.validate_schema(schema)
if validate_only:
return
try:
schema_view = SchemaView(schema)
except Exception:
if not validate_schema:
yield LinterProblem(
message="File is not a valid LinkML schema. Use --validate for more details.",
level=RuleLevel(RuleLevel.error),
schema_source=(schema if isinstance(schema, str) else None),
)
return
for rule_id, rule_config in self.config.rules.__dict__.items():
rule_cls = self._rules_map.get(rule_id, None)
if rule_cls is None:
raise ValueError("Unknown rule id: " + rule_id)
if str(rule_config.level) is RuleLevel.disabled.text:
continue
rule = rule_cls(rule_config)
for problem in rule.check(schema_view, fix=fix):
problem.level = rule.config.level
problem.rule_name = rule.id
problem.schema_name = schema_view.schema.name
if isinstance(schema, str):
problem.schema_source = schema
yield problem
if fix and schema_view.schema.source_file:
yaml_dumper.dump(schema_view.schema, schema_view.schema.source_file)
| linkml/linkml | linkml/linter/linter.py | linter.py | py | 4,984 | python | en | code | 228 | github-code | 13 |
1044279742 | """
如果保存的是模型参数
"""
import torch
import torchvision.models as models
torch_model = torch.load("test.pth") # pytorch模型加载
model = models.resnet50()
model.fc = torch.nn.Linear(2048, 4)
model.load_state_dict(torch_model)
batch_size = 1 #批处理大小
input_shape = (3, 244, 384) #输入数据,改成自己的输入shape
# #set the model to inference mode
model.eval()
x = torch.randn(batch_size, *input_shape) # 生成张量
export_onnx_file = "test.onnx" # 目的ONNX文件名
torch.onnx.export(model,
(x),
export_onnx_file,
opset_version=10,
do_constant_folding=True, # 是否执行常量折叠优化
input_names=["input"], # 输入名
output_names=["output"], # 输出名
dynamic_axes={"input":{0:"batch_size"}, # 批处理变量
"output":{0:"batch_size"}}) | songjiahao-wq/untitled | Work/convert onnx/two.py | two.py | py | 970 | python | en | code | 1 | github-code | 13 |
6660684922 | ###################################################################
## Written by Eli Pugh and Ethan Shen ##
## {epugh}, {ezshen} @stanford.edu ##
## Translated from Matlab written by Jiantao Jiao ##
## https://github.com/EEthinker/Universal_directed_information ##
## Based off of: ##
## F. Willems, Y. Shtarkov and T. Tjalkens ##
## 'The context-tree weighting method: basic properties' ##
## https://ieeexplore.ieee.org/document/382012 ##
###################################################################
import numpy as np
from tqdm import tqdm
#==============================================================================
# Function 'ctwupdate' is an update step in the CTW Algorithm
# Inputs:
# countTree: countTree[a,:] is the tree for the count of symbol a a=0,...,M
# betaTree: betaTree[i(s) ] = Pe^s / \prod_{b=0}^{M} Pw^{bs}(x^{t})
# eta: [ p(X_t = 0|.) / p(X_t = M|.), ..., p(X_t = M-1|.) / p(X_t = M|.)
# xt: the current data
def ctwupdate(countTree, betaTree, eta, index, xt, alpha):
# size of the alphabet
Nx = len(eta)
pw = eta
pw = pw/np.sum(pw) # pw(1) pw(2) .. pw(M+1)
index = int(index)
pe = (countTree[:,index-1]+0.5)/(np.sum(countTree[:,index-1])+Nx/2)
temp = betaTree[index-1]
if temp < 1000:
eta[:-1] = (alpha*temp * pe[0:Nx-1] + (1-alpha)*pw[0:Nx-1] ) / ( alpha*temp * pe[Nx-1] + (1-alpha)*pw[Nx-1])
else:
eta[:-1] = (alpha*pe[0:Nx-1] + (1-alpha)*pw[0:Nx-1]/temp ) / ( alpha*pe[Nx-1] + (1-alpha)*pw[Nx-1]/temp)
countTree[xt,index-1] = countTree[xt,index-1] + 1
betaTree[index-1] = betaTree[index-1] * pe[xt]/pw[xt]
return countTree, betaTree, eta
#==============================================================================
# Function 'ctwalgorithm' outputs the universal sequential probability
# assignments given by the Context Tree Weighting Algorithm
# Inputs:
# X: Input sequence
# Nx: Alphabet size of X
# D: depth of the tree
def ctwalgorithm(x, Nx, D):
n = len(x)
countTree = np.zeros( ( Nx, (Nx**(D+1) - 1) // (Nx-1) ))
betaTree = np.ones( (Nx**(D+1) - 1 ) // (Nx-1) )
Px_record = np.zeros((Nx,n-D))
indexweight = Nx**np.arange(D)
offset = (Nx**D - 1) // (Nx-1) + 1
for i in range(n-D):
context = x[i:i+D]
leafindex = np.dot(context,indexweight)+offset
xt = x[i+D]
eta = (countTree[0:Nx,leafindex-1]+0.5)/(countTree[Nx-1,leafindex-1]+0.5)
eta[-1] = 1
# update the leaf
countTree[xt,leafindex-1] = countTree[xt,leafindex-1] + 1
node = np.floor((leafindex+Nx-2)/Nx)
while node != 0:
countTree, betaTree, eta = ctwupdate(countTree, betaTree, eta, node, xt, 1/2)
node = np.floor((node+Nx-2)/Nx)
eta_sum = np.sum(eta[:-1])+1
Px_record[:,i] = eta / eta_sum
return Px_record
| elipugh/directed_information | directed_information/ctwalgorithm.py | ctwalgorithm.py | py | 2,998 | python | en | code | 2 | github-code | 13 |
4970009163 | n = int(input())
a = list(map(int, input().split()))
cnt_num = [0] * (10 ** 5 + 1)
max_length = 0
distinct_num = 0
left = 0
for i in range(n):
cnt_num[a[i]] += 1
if cnt_num[a[i]] == 1:
distinct_num += 1
while left < i and distinct_num > 2:
cnt_num[a[left]] -= 1
if cnt_num[a[left]] == 0:
distinct_num -= 1
left += 1
max_length = max(max_length, i - left + 1)
print(max_length)
| truclycs/code_for_fun | algorithms/python/intermediate/algorithmic_complexity/Approximating a Constant Range.py | Approximating a Constant Range.py | py | 488 | python | en | code | 7 | github-code | 13 |
20169845997 | #Jose Tomas Martinez Lavin
from matplotlib.pylab import *
from scipy.integrate import odeint
m= 1.
f= 1.
chi= 0.2
w= 2.*pi*f
wd= w * sqrt(1.-chi**2)
k= m*w**2
c= 2.*chi*w*m
def eulerint(zp, z0, t, Nsubdivisiones=1):
Nt = len(t)
Ndim = len(z0)
z = zeros((Nt, Ndim))
z[0,:] = z0[0]
z[1,:] = z0[1]
#z (i+1) = zp_i *dt + z_i
for i in range(1, Nt):
t_anterior = t[i-1]
dt = (t[i] - t[i-1])/Nsubdivisiones
z_temp = z[i-1,:].copy()
for k in range(Nsubdivisiones):
z_temp += dt * array(zp(z_temp, t_anterior + k*dt))
z[i,:] = z_temp
return z
def zp(z,t):
x,p = z[0], z[1]
dx = p
dp = -2 * chi * w * p - w**2*x
return dx, dp
z0= [1., 1.]
t= linspace(0, 4., 100)
sol= odeint (zp,z0,t)
z_odeint= sol[:,0]
z_analitica = exp(-chi*w*t) * (1.*cos(wd*t) + ((1. + w*chi*1.)/wd) * sin(wd*t) )
sol= eulerint (zp,z0,t)
z_euler1= sol[:,0]
sol= eulerint (zp,z0,t, Nsubdivisiones=10)
z_euler10= sol[:,0]
sol= eulerint (zp,z0,t, Nsubdivisiones=100)
z_euler100= sol[:,0]
plot(t,z_odeint, label="odeint")
plot(t,z_euler1,"g--", label="eulerint1")
plot(t,z_euler10,"r--", label="eulerint10")
plot(t,z_euler100,"--", label="eulerint100")
plot(t,z_analitica, "k",label="analitica", linewidth=2)
legend()
savefig("Entrega4.png")
show()
| JoseTomasMartinez/MCOC2020-P1 | entrega4.py | entrega4.py | py | 1,314 | python | en | code | 0 | github-code | 13 |
38342907645 | import argparse
import sys
parse = argparse.ArgumentParser()
parse.add_argument("-o",default="color")
parse.add_argument("-emoji",nargs='?',const=True,default=False)
parse.add_argument("-i",type=float,default=1.0)
parse.add_argument("-minframes",type=int,default=24)
parse.add_argument("-output",default="\\Desktop\\output_{0}.gif")
parse.add_argument("-input",default=r"\Desktop\input.gif")
class rgbif_args():
global parse
input_path = r"\Desktop\input.gif"
operator = "color"
output_path = "\\Desktop\\output_{0}.gif"
emoji = False
intensity = 1.0
minimum_frames = 24
def __init__(self):
args = parse.parse_args()
self.operator = args.o
self.emoji = args.emoji
self.intensity = args.i
self.minimum_frames = args.minframes
self.input_path = args.input
self.output_path = args.output.format(self.operator)
| nlcsdev/rgbif.py | arg_handler.py | arg_handler.py | py | 910 | python | en | code | 1 | github-code | 13 |
72221887377 | import os
from django.shortcuts import render, get_object_or_404, redirect
from django.http import HttpResponse, JsonResponse
from django.urls import reverse, reverse_lazy
from django.contrib.auth.decorators import login_required
from books.models import Book, Category
def home(request):
books = Book.objects.all().order_by('-created_at')
categories = Category.objects.all().order_by('id')[:24]
context = {
'categories':categories,
'books':books
}
template_name = 'books/index.html'
return render(request, template_name, context)
@login_required
def book_details(request, slug):
book = get_object_or_404(Book, slug=slug)
context = {
'book':book
}
template_name = 'books/book_detail.html'
return render(request, template_name, context)
@login_required
def download_book(request, slug):
book = get_object_or_404(Book, slug=slug)
filename = os.path.basename(book.file.name)
file = book.file
if file:
response = HttpResponse(file, content_type='application/pdf')
download = request.POST.get("download")
content = "filename=%s" %(filename)
if download:
book.downloads += 1
book.save()
content = content
response['Content-Disposition'] = content
return response
return HttpResponse("Not found") | devmedtz/sogea | books/views/books.py | books.py | py | 1,370 | python | en | code | 2 | github-code | 13 |
23360183298 | '''
Feb-04-2021
594. Longest Harmonious Subsequence
Difficulty: Easy
Link: https://leetcode.com/problems/longest-harmonious-subsequence/
'''
class Solution:
def findLHS(self, nums: List[int]) -> int:
myMap = collections.Counter(nums)
keys = set(nums)
ansMap = {}
for key in keys:
if key-1 not in keys and key+1 not in keys:
ansMap[key] = 0
else:
ansMap[key] = max(myMap[key] + myMap[key-1], myMap[key] + myMap[key+1])
return max(ansMap.values())
| iwajef/leetcode-daily-challenge | Feb-2021/02.04.py | 02.04.py | py | 497 | python | en | code | 0 | github-code | 13 |
2553654886 | from .dataset import Dataset
import artm
import os
import re
import sys
import shutil
import subprocess
import numpy as np
import pandas as pd
from tqdm import tqdm
class DatasetCooc(Dataset):
"""
Class prepare dataset in vw format for WNTM model
"""
def __init__(
self,
data_path: str, # имя такое же, как у параметра обычного Датасета
internals_folder_path: str = None,
cooc_window: int = 10,
min_tf: int = 5,
min_df: int = 5,
threshold: int = 2,
**kwargs
):
"""
Parameters
----------
data_path : str
path to a file with input data for training models
in vowpal wabbit format;
internals_folder_path : str
path to the directory with dataset internals, which includes:
* vowpal wabbit file
* dictionary file
* batches directory
The parameter is optional:
the folder will be created by the dataset if not specified.
This is a part of Dataset internal functioning.
When working with any text collection `data_path` for the first time,
there is no such folder: it will be created by
topicnet.cooking_machines.Dataset class.
cooc_window : int
number of tokens around specific token,
which are used in calculation of
cooccurrences
min_tf : int
minimal value of cooccurrences of a
pair of tokens that are saved in
dictionary of cooccurrences
Optional parameter, default min_tf =5
More info http://docs.bigartm.org/en/stable/tutorials/python_userguide/coherence.html
min_df: int
minimal value of documents in which a
specific pair of tokens occurred
together closely
Optional parameter, default min_df =5
More info http://docs.bigartm.org/en/stable/tutorials/python_userguide/coherence.html
threshold : int
The frequency threshold above which
the received pairs are selected to form
the dataset
"""
self._ordinary_dataset = Dataset(
data_path, # just in case
internals_folder_path=internals_folder_path,
**kwargs
)
_ = self._ordinary_dataset.get_dictionary()
_ = self._ordinary_dataset.get_batch_vectorizer()
# Теперь создана internals папка, батчи и словарь обычного датасета, всё такое
self.dataset_dir = os.path.join(
self._ordinary_dataset._internals_folder_path,
'coocs_dataset', # как-то так: тут уже всё про совстречаемости
)
if not os.path.isdir(self.dataset_dir):
os.mkdir(self.dataset_dir)
self.dataset_name = os.path.basename(data_path)
self.dataset_path = data_path
self.cooc_window = cooc_window
self.min_tf = min_tf
self.min_df = min_df
self._get_vocab()
self._get_cooc_scores(cooc_window, min_tf, min_df)
self._get_vw_cooc(threshold)
super().__init__(self.wntm_dataset_path)
def _get_vocab(self):
batch_vectorizer_path = os.path.join(self.dataset_dir, 'batches')
artm.BatchVectorizer(data_path=self.dataset_path,
data_format='vowpal_wabbit',
target_folder=batch_vectorizer_path)
dictionary = artm.Dictionary()
dictionary.gather(data_path=batch_vectorizer_path)
dictionary_path = batch_vectorizer_path + '/dictionary.txt'
dictionary.save_text(dictionary_path=dictionary_path)
self.vocab_path = os.path.join(self.dataset_dir, 'vocab.txt')
with open(dictionary_path, 'r') as dictionary_file:
with open(self.vocab_path, 'w') as vocab_file:
"""
The first two lines of dictionary_file do not contain data
"""
dictionary_file.readline()
dictionary_file.readline()
for line in dictionary_file:
elems = re.split(', ', line)
vocab_file.write(' '.join(elems[:2]) + '\n')
def _get_cooc_scores(self, cooc_window, min_tf, min_df):
try:
bigartm_tool_path = subprocess.check_output(["which", "bigartm"]).strip()
except FileNotFoundError:
sys.exit(
"""
For use dataset_cooc.py please build bigartm tool
https://bigartm.readthedocs.io/en/stable/installation/linux.html#step-3-build-and-install-bigartm-library
"""
)
cooc_tf_path = os.path.join(self.dataset_dir, 'cooc_tf_')
cooc_df_path = os.path.join(self.dataset_dir, 'cooc_df_')
ppmi_tf_path = os.path.join(self.dataset_dir, 'ppmi_tf_')
ppmi_df_path = os.path.join(self.dataset_dir, 'ppmi_df_')
subprocess.check_output([bigartm_tool_path, '-c', self.dataset_path, '-v',
self.vocab_path, '--cooc-window', str(cooc_window),
'--cooc-min-tf', str(min_tf), '--write-cooc-tf',
cooc_tf_path, '--cooc-min-df', str(min_df),
'--write-cooc-df', cooc_df_path, '--write-ppmi-tf',
ppmi_tf_path, '--write-ppmi-df', ppmi_df_path])
def _transform_coocs_file(
self,
source_file_path: str,
target_file_path: str
):
"""
source_file is assumed to be either ppmi_tf_ or ppmi_df_
"""
vocab = open(self.vocab_path, 'r').readlines()
vocab = [line.strip().split()[0] for line in vocab]
cooc_values = dict()
word_word_value_triples = set()
lines = open(source_file_path, 'r').readlines()
pbar = tqdm(total=len(lines))
for i, l in enumerate(lines):
pbar.update(10)
l_i = l.strip()
words = l_i.split()
words = words[1:] # exclude modality
anchor_word = words[0]
other_word_values = words[1:]
for word_and_value in other_word_values:
other_word, value = word_and_value.split(':')
value = float(value)
cooc_values[(anchor_word, other_word)] = value
if (other_word, anchor_word) not in cooc_values:
cooc_values[(other_word, anchor_word)] = value
word_word_value_triples.add(
tuple([
tuple(sorted([
vocab.index(anchor_word),
vocab.index(other_word)
])),
value
])
)
pbar.close()
new_text = ''
for (w1, w2), v in word_word_value_triples:
new_text += f'{w1} {w2} {v}\n'
with open(target_file_path, 'w') as f:
f.write(''.join(new_text))
return cooc_values
def _get_vw_cooc(self, threshold):
with open(self.vocab_path, 'r') as f:
data = f.readlines()
cooc_values = self._transform_coocs_file(
os.path.join(self.dataset_dir, 'ppmi_tf_'),
os.path.join(self.dataset_dir, 'new_ppmi_tf_')
)
vw_lines = {}
for line in data:
token, modality = line.strip().split()
vw_lines[token] = '{} |{}'.format(token, modality)
for coocs_pair, frequency in cooc_values.items():
(token_doc, token_word) = coocs_pair
if frequency >= threshold:
vw_lines[token_doc] = vw_lines[token_doc] + ' ' + '{}:{}'.format(
token_word, frequency
)
self.wntm_dataset_path = os.path.join(self.dataset_dir, f'new_{self.dataset_name}')
with open(self.wntm_dataset_path, 'w') as f:
f.write('\n'.join(list(vw_lines.values())))
def transform_theta(self, model):
"""
Transform theta matrix
"""
with open(self.dataset_path, 'r') as f:
data = f.readlines()
doc_token = {}
for doc in data:
doc = doc.split()
doc_token[doc[0]] = [token.split(':')[0] for token in doc[2:]]
token_doc = {}
for doc in doc_token:
for token in doc_token[doc]:
if token not in token_doc:
token_doc[token] = [doc]
else:
token_doc[token] += [doc]
doc_inds = {doc: ind for ind, doc in enumerate(doc_token.keys())}
nwd = {token: [0]*len(doc_inds) for token in token_doc}
for token in token_doc:
for doc in token_doc[token]:
nwd[token][doc_inds[doc]] += 1
theta = model.get_theta(dataset=self)
cols = theta.columns
inds = theta.index.values
nwd_matrix = np.array([nwd[token] for token in cols])
new_theta = np.dot(theta.values, nwd_matrix)
return pd.DataFrame(data=new_theta, columns=doc_inds.keys(), index=inds)
def clear_all_cooc_files(self):
"""
Clear cooc_dir folder
"""
shutil.rmtree(os.path.join(self.dataset_dir, 'batches'))
os.remove(self.vocab_path)
os.remove(os.path.join(self.dataset_dir, 'cooc_tf_'))
os.remove(os.path.join(self.dataset_dir, 'cooc_df_'))
os.remove(os.path.join(self.dataset_dir, 'ppmi_tf_'))
os.remove(os.path.join(self.dataset_dir, 'ppmi_df_'))
os.remove(os.path.join(self.dataset_dir, 'new_ppmi_tf_'))
os.remove(self.WNTM_dataset_path)
shutil.rmtree(self.dataset_dir)
| machine-intelligence-laboratory/TopicNet | topicnet/cooking_machine/dataset_cooc.py | dataset_cooc.py | py | 9,968 | python | en | code | 138 | github-code | 13 |
12527631313 |
def exercise_2():
with open('Hamlet.txt', 'r') as f:
lines = 0
words = 0
characters = 0
for line in f.readlines():
characters += len(line)
lines += 1
if line == '':
continue
line.strip()
words_unfiltered = line[:-1].split(' ')
for word in words_unfiltered:
if word == '':
continue
words += 1
print(f'There are {lines} lines')
print(f'The file has {words} words')
print(f'The file is {characters} bytes/characters') # Each character is 1 byte!
print(characters)
def exercise_3():
special_chars = ['(', ')', '!', '.', ';', ',', '?', ':']
with open('Hamlet.txt', 'r') as f:
words = set()
for line in f.readlines():
line.strip()
if line[-1] == '\n':
line = line[:-1]
words_unfiltered = line.split()
for word in words_unfiltered:
word = word.lower()
for special_char in special_chars:
if special_char in word:
word = word.replace(special_char, '')
words.add(word)
print('There are', len(words), 'words')
def exercise_4():
special_chars = ['(', ')', '!', '.', ';', ',', '?', ':']
with open('Hamlet.txt', 'r') as f:
words = {}
for line in f.readlines():
line.strip()
if line[-1] == '\n':
line = line[:-1]
words_unfiltered = line.split()
for word in words_unfiltered:
word = word.lower()
for special_char in special_chars:
if special_char in word:
word = word.replace(special_char, '')
if word not in words:
words[word] = 1
else:
words[word] += 1
keys = []
values = []
for key, value in words.items():
keys.append(key)
values.append(value)
for i in range(20):
common_word = keys[values.index(max(values))]
common_count = max(values)
print(f"{i + 1}: Word '{common_word}' used {common_count} times")
values.remove(common_count)
keys.remove(common_word)
def exercise_5():
special_chars = ['(', ')', '!', '.', ';', ',', '?', ':', '\n',
'[', ']', ' ', '', "'", '"', '-', '&', '1']
# BETTER TO MAKE THIS PART A REGEX, and then REGEX REPLACE!!!
with open('Hamlet.txt', 'r') as f:
characters = set()
for line in f.readlines():
line.strip()
for character in line:
if character in special_chars:
continue
character = character.lower()
characters.add(character)
print(characters)
| RohanBKhatwani/AI-PythonDocstring-Generator | test-files/wordCount.py | wordCount.py | py | 2,972 | python | en | code | 0 | github-code | 13 |
30812696493 | import pytest
from lagom import Container, bind_to_container, injectable
class MyDep:
value: str
def __init__(self, value="testing"):
self.value = value
container = Container()
@bind_to_container(container)
def example_function(message: str, resolved: MyDep = injectable) -> str:
return resolved.value + message
@bind_to_container(container)
async def async_example_function(message: str, resolved: MyDep = injectable) -> str:
return resolved.value + message
def test_functions_decorated_get_the_correct_argument():
assert example_function(message=" world") == "testing world"
def test_injected_arguments_can_over_overridden():
assert example_function(message=" world", resolved=MyDep("set")) == "set world"
@pytest.mark.asyncio
async def test_async_functions_decorated_get_the_correct_argument():
assert await async_example_function(message=" world") == "testing world"
| meadsteve/lagom | tests/test_explicit_partial_functions.py | test_explicit_partial_functions.py | py | 923 | python | en | code | 216 | github-code | 13 |
23154261305 | import sys
from PyQt5 import QtGui, QtWidgets
from matplotlib import image
import run_graphsage_cora as rg
from PyQt5.QtCore import pyqtSignal, QThread
from PyQt5.QtWidgets import QApplication, QMainWindow, QGraphicsPixmapItem, QGraphicsScene
from demo import Ui_MainWindow
import cv2
class MyThread(QThread):
signal = pyqtSignal(str) # 括号里填写信号传递的参数
signal0 = pyqtSignal(str)
def __init__(self, a, b, c, d):
super(MyThread, self).__init__()
self.a = a
self.b = b
self.c = c
self.d = d
def run(self):
try:
self.signal0.emit("正在训练。。。")
eval_res = rg.run(self.a, self.b, self.c, self.d)
self.signal.emit("训练完成!")
self.signal.emit('Test loss: ' + str(eval_res[0]))
self.signal.emit('Test weighted_loss: ' + str(eval_res[1]))
self.signal.emit('Test accuracy: ' + str(eval_res[2]))
except Exception as e:
self.exitcode = 1
self.exitcode = e
class MyThread2(QThread):
signal1 = pyqtSignal(str) # 括号里填写信号传递的参数
signal2 = pyqtSignal(str)
def __init__(self):
super(MyThread2, self).__init__()
def run(self):
self.signal2.emit('开始分类。。。')
rg.classify()
self.signal1.emit('.\\pic.png')
class MainWindow(QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
self.ui = Ui_MainWindow() # UI类的实例化()
self.ui.setupUi(self)
self.band()
def band(self):
self.ui.pushButton.clicked.connect(self.handle_click1)
self.ui.pushButton_2.clicked.connect(self.handle_click2)
self.ui.radioButton.toggled.connect(self.handle_radio)
def handle_click1(self):
#获取参数
func = self.ui.lineEdit.text()
sz = self.ui.lineEdit_2.text()
train = self.ui.lineEdit_3.text()
epoch = self.ui.lineEdit_4.text()
#多线程
self.thread = MyThread(str(func), sz.split(), int(epoch), int(train))
self.thread.signal.connect(self.callback)
self.thread.signal0.connect(self.begin)
self.thread.start() # 启动线程
def handle_click2(self):
# 多线程
self.thread1 = MyThread2()
self.thread1.signal1.connect(self.showimage)
self.thread1.signal2.connect(self.begin)
self.thread1.start() # 启动线程
def handle_radio(self):
if self.ui.radioButton.isChecked():
rg.isselected(1)
else:
rg.isselected(0)
def callback(self, string):
self.ui.textBrowser.append(string)
def begin(self, str):
self.ui.textBrowser.clear()
self.ui.textBrowser.append(str)
def showimage(self, p_str):
self.ui.textBrowser.append('分类完成!')
path = p_str
img = cv2.imread(path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
x = img.shape[1]
y = img.shape[0]
frame = QtGui.QImage(img.data, x, y, QtGui.QImage.Format_RGB888)
pix = QtGui.QPixmap.fromImage(frame)
self.item = QGraphicsPixmapItem(pix) # 创建像素图元
self.scene = QGraphicsScene() # 创建场景
self.scene.addItem(self.item)
self.ui.graphicsView.setScene(self.scene)
self.ui.graphicsView.fitInView(QGraphicsPixmapItem(QtGui.QPixmap(pix))) # 图像自适应大小
self.ui.graphicsView.show()
if __name__ == '__main__':
app = QApplication([]) # 启动一个应用
window = MainWindow() # 实例化主窗口
window.show() # 展示主窗口
app.exec() # 避免程序执行到这一行后直接退出 | Boomerl/Graduation-project | src/run_ui.py | run_ui.py | py | 3,746 | python | en | code | 0 | github-code | 13 |
4359561978 | from bs4 import BeautifulSoup
import datetime
DATE_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
def extract_stops(bs_data):
'''
Recebe o objeto BeautifulSoup e processa as paradas presentes no KML
'''
# Extraindo as Paradas
stops_kml = bs_data.find_all('Placemark')
del stops_kml[-1] # Remover a última, que se refere aos Shapes
# print('Quantidade de Paradas Encontradas:', len(stops_kml))
# print(stops_kml[0].prettify())
start_time = datetime.datetime.strptime(stops_kml[0]('TimeStamp')[0]('when')[0].contents[0], DATE_TIME_FORMAT)
seq = 1
stops = []
for stop in stops_kml:
'''
print(stop.prettify()
# Dados da parada (X, Y e Z)
print(stop('Point')[0]('coordinates')[0].contents)
# float(stop('Point')[0]('coordinates')[0].contents[0].split(',')[1]) -> Latitude
# float(stop('Point')[0]('coordinates')[0].contents[0].split(',')[0]) -> Longitude
'''
timestamp = datetime.datetime.strptime(stop('TimeStamp')[0]('when')[0].contents[0], DATE_TIME_FORMAT)
tdelta = timestamp - start_time
stops.append(
{
'stop':
{
'stop_lat': float(stop('Point')[0]('coordinates')[0].contents[0].split(',')[1]),
'stop_lon': float(stop('Point')[0]('coordinates')[0].contents[0].split(',')[0])
},
'arrival_time': timestamp.time(),
'departure_time': timestamp.time(),
'stop_sequence': seq,
'tdelta': tdelta
}
)
seq += 1
return stops
def extract_shapes(bs_data):
'''
Recebe o objeto BeautifulSoup e processa as shapes presentes no KML
'''
shapes_kml = bs_data.find_all('Placemark', {'id': 'tour'})
shapes_kml = shapes_kml[0]('gx:MultiTrack')[0]('gx:Track')[0].find_all('gx:coord')
# print('Quantidade de Pontos de Shape encontrados:', len(shapes_kml))
shapes = []
seq = 1
for shape in shapes_kml:
'''
# float(shape.contents[0].split()[1]) -> Latitude
# float(shape.contents[0].split()[0]) -> Longitude
'''
shapes.append(
{
'shape_pt_lat': float(shape.contents[0].split()[1]),
'shape_pt_lon': float(shape.contents[0].split()[0]),
'shape_pt_sequence': seq
}
)
seq += 1
shapes.sort(key= lambda e: e['shape_pt_sequence'])
return shapes
def KML(arquivo_entrada):
'''
Processa o arquivo KML para retornar dados em formato Python.
- Requer como parâmetro o nome do arquivo KML no servidor
- Retorna uma tupla com (1) as paradas do arquivo e (2) a shape do arquivo.
'''
with open(arquivo_entrada, 'r') as f:
data = f.read()
bs_data = BeautifulSoup(data, "xml")
stops = extract_stops(bs_data)
shapes = extract_shapes(bs_data)
return stops, shapes | mateusolorenzatti/gtfs-farroupilha-manager | apps/gtfs/helpers/gps2gtfs/KML_helper.py | KML_helper.py | py | 3,005 | python | en | code | 0 | github-code | 13 |
33596617943 | # -*- coding: utf-8 -*-
"""
Created on Wed Mar 6 09:55:58 2019
@author: ajseshad
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from ClassificationLib import FeatureScaling, LogReg, KNN, SVM, KernelSVM, NaiveBayes, DecisionTree, RandomForest
# dataset text can inherently contain commas and quotes
# Hence we use 'tab' as the delimiter - tsv file
def ImportData(filename):
return pd.read_csv(filename, delimiter = '\t', quoting = 3)
import nltk
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
def InitClean():
nltk.download('stopwords')
def CleanText(reviewText):
import re
#remove special characters and digits
cleanReview = re.sub('[^a-zA-Z]', ' ', reviewText)
#lower case
cleanReview = cleanReview.lower()
#remove preposition and other filler words that do not influence review type
reviewWords = cleanReview.split()
#reviewWords = [word for word in reviewWords if word not in set(stopwords.words('english'))]
#
#Stemming - Convert words into their root form, i.e. loved => love to avoid a huge sparse matrix
ps = PorterStemmer()
reviewWords = [ps.stem(word) for word in reviewWords if word not in set(stopwords.words('english'))]
#print(reviewWords)
return ' '.join(reviewWords)
def CleanReview(dataset):
InitClean()
for i in range(0,len(dataset)):
dataset['Review'][i] = CleanText(dataset['Review'][i])
return dataset
# Create a bag of words model
def Vectorize(cleanDataset):
from sklearn.feature_extraction.text import CountVectorizer
cv = CountVectorizer(max_features=1500)
return cv.fit_transform(cleanDataset).toarray()
# -- Classification - Naive Bayes
def SplitDataSet(X,y):
# Splitting the dataset into the Training set and Test set
from sklearn.cross_validation import train_test_split
return train_test_split(X, y, test_size = 0.2, random_state = 0)
def ConfusionEval(cm):
accuracy = (cm[0,0] + cm[1,1]) / (cm[0,0] + cm[0,1] + cm[1,0] + cm[1,1])
precision = cm[1,1] / (cm[0,1] + cm[1,1])
recall = cm[1,1] / (cm[1,0] + cm[1,1])
f1 = 2 * precision * recall / (precision + recall)
return accuracy, precision, recall, f1
dataset = ImportData('Restaurant_Reviews.tsv')
#review = CleanText(dataset['Review'][0])
cleanDataset = CleanReview(dataset)
bagOfWords = Vectorize(dataset['Review'])
X = bagOfWords # Independent variable
y = dataset.iloc[:,1].values
X_train, X_test, y_train, y_test = SplitDataSet(X,y)
fs = FeatureScaling()
#fs.scale(X_train, np.reshape(y_train, (-1,1)), X_test, np.reshape(y_test, (-1,1)))
fs.scale(X_train, y_train, X_test, y_test)
cm1, fs1, _ = NaiveBayes(fs)
accuracy1, precision1, recall1, f11 = ConfusionEval(cm1)
cm2, fs2, _ = LogReg(fs)
accuracy2, precision2, recall2, f12 = ConfusionEval(cm2)
cm3, fs3, _ = KNN(fs)
accuracy3, precision3, recall3, f13 = ConfusionEval(cm3)
cm4, fs4, _ = SVM(fs)
accuracy4, precision4, recall4, f14 = ConfusionEval(cm4)
cm5, fs5, _ = KernelSVM(fs)
accuracy5, precision5, recall5, f15 = ConfusionEval(cm5)
cm6, fs6, _ = DecisionTree(fs)
accuracy6, precision6, recall6, f16 = ConfusionEval(cm6)
cm7, fs7, _ = RandomForest(fs)
accuracy7, precision7, recall7, f17 = ConfusionEval(cm7) | seshajay/FunWithML | nlp.py | nlp.py | py | 3,362 | python | en | code | 0 | github-code | 13 |
12305066287 | import pygtk
import gtk, gobject, cairo
class Screen(gtk.DrawingArea):
__gsignals__ = {"expose-event":"override"}
def do_expose_event(self, event):
cr = self.window.cairo_create()
cr.rectangle(event.area.x, event.area.y,
event.area.width, event.area.height)
cr.clip
self.draw(cr, *self.window.get_size())
def draw(self,cr, width, height):
cr.set_source_rgb(0,0,0.5)
cr.rectangle(0,0,width,height)
cr.fill()
def run(Widget):
window = gtk.Window()
window.connect("delete-event",gtk.main_quit)
widget = Widget()
widget.show()
window.add(widget)
window.present()
gtk.main()
if __name__=="__main__":
run(Screen) | malaania/kurs_pythona | try_gtk.py | try_gtk.py | py | 733 | python | en | code | 0 | github-code | 13 |
15976991416 | import pytest
from cityjson2ifc_cli.convert import cityjson2ifc
@pytest.mark.parametrize("input_model", ["input_model_5907", "input_model_68dn2"])
def test_lod_select(request, input_model, tmp_dir):
"""Can we extract a specific LoD?"""
cm = request.getfixturevalue(input_model)
outfile = tmp_dir / "outfile.ifc"
cityjson2ifc(cm=cm, file_destination=str(outfile),
lod_select="1.2")
def test_lod_split(input_model_5907, tmp_dir):
"""Can we export one IFC per LoD?"""
cm = input_model_5907
outfile = tmp_dir / "outfile.ifc"
cityjson2ifc(cm=cm, file_destination=str(outfile), lod_split=True)
def test_semantic_surfaces_building(input_model_5907, tmp_dir):
"""Can convert the Building's semantic surfaces?"""
cm = input_model_5907
outfile = tmp_dir / "outfile.ifc"
cityjson2ifc(cm=cm, file_destination=str(outfile), lod_select="2.2")
@pytest.mark.parametrize("input_model,name_entity", [
("input_model_5907", "identificatie"),
("input_model_68dn2", "3df_id")]
)
def test_names(request, input_model, name_entity, tmp_dir):
"""Can we assign the various names?"""
cm = request.getfixturevalue(input_model)
outfile = tmp_dir / "outfile.ifc"
cityjson2ifc(cm=cm, file_destination=str(outfile),
name_entity=name_entity, name_site="site-1234",
name_project="project-1234", name_person_given="me",
name_person_family="me me")
| 3DGI/cityjson2ifc | tests/test_convert.py | test_convert.py | py | 1,461 | python | en | code | 2 | github-code | 13 |
6731703617 | import numpy as np
import pandas as pd
from PIL import Image
from typing import Tuple
import torch
from torch.utils.data import Dataset
from transformers.models.bert_japanese.tokenization_bert_japanese import BertJapaneseTokenizer
class MMBTClipDsataset(Dataset):
def __init__(
self,
df: pd.DataFrame,
tokenizer: BertJapaneseTokenizer,
image_transform,
desired_img_size=224,
max_seq_len=48):
super().__init__()
self.df = df
self.tokenizer = tokenizer
self.image_transform = image_transform
self.desired_img_size = desired_img_size
self.max_seq_len = max_seq_len
def __len__(self):
return len(self.df)
def __getitem__(self, index):
row = self.df.iloc[index]
# tokenの取得
sentence = torch.tensor(
self.tokenizer.encode(
row['text'],
max_length=self.max_seq_len,
padding='max_length',
truncation=True
)
)
start_token, sentence, end_token = sentence[0], sentence[1:-1], sentence[-1]
# 画像準備
img = Image.open(row['img_path']).convert("RGB")
sliced_imgs = slice_img(img, self.desired_img_size)
sliced_imgs = [
np.array(self.image_transform(sliced_img))
for sliced_img in sliced_imgs
]
img = resize_pad_img(img, self.desired_img_size)
img = np.array(self.image_transform(img))
sliced_imgs = [img] + sliced_imgs
sliced_imgs = torch.from_numpy(np.array(sliced_imgs))
# 正解ラベル取得
label = torch.tensor(row['is_laugh'])
return {
'image_start_token': start_token,
'image_end_token': end_token,
'sentence': sentence,
'image': sliced_imgs,
'label': label
}
def slice_img(img: Image, desired_size: int):
old_img_size: Tuple[int, int] = img.size
ratio = float(desired_size) / min(old_img_size)
new_size = tuple([int(x * ratio) for x in old_img_size])
new_img = img.resize(new_size, Image.ANTIALIAS)
img_arr = np.array(new_img)
imgs = []
height, width = img_arr.shape[0], img_arr.shape[1]
# 画像の長い方を基準に以下の3つに分ける
# - 左(上)部分 shape = (desizred_size, desizred_size)
# - 左右(上下) shape = (desizred_size, desizred_size)
# - 右(下)部分 shape = (desizred_size, desizred_size)
if height < width:
middle = width // 2
half = desired_size // 2
# 画像左部分(w=0~desired_size)
imgs.append(Image.fromarray(img_arr[:, :desired_size]))
# 画像の左右均等に切り抜いた部分
imgs.append(Image.fromarray(img_arr[:, middle-half:middle+half]))
# 画像右部分(w=width - desired_size~width)
imgs.append(Image.fromarray(img_arr[:, width-desired_size:width]))
else:
middle = height // 2
half = desired_size // 2
# 画像の上部分(h=0~desired_size)
imgs.append(Image.fromarray(img_arr[:desired_size, :]))
# 画像の上下均等に切り抜いた部分
imgs.append(Image.fromarray(img_arr[middle-half:middle+half, :]))
# 画像の下部分(h=height - desired_size~height)
imgs.append(Image.fromarray(img_arr[height-desired_size:height, :]))
return imgs
def resize_pad_img(img: Image, desired_size: int):
old_img_size: Tuple[int, int] = img.size
ratio = float(desired_size) / max(old_img_size)
new_size = tuple([int(x * ratio) for x in old_img_size])
img = img.resize(new_size, Image.ANTIALIAS)
new_img = Image.new('RGB', (desired_size, desired_size))
new_img.paste(img, (
(desired_size - new_size[0]) // 2,
(desired_size - new_size[1]) // 2
))
return new_img
| ryota0051/boke-ai | src/models/MMBT_with_CLIP_encoder/dataset.py | dataset.py | py | 3,911 | python | en | code | 0 | github-code | 13 |
4825967632 | # -*- mode: python ; coding: utf-8 -*-
block_cipher = None
added_files = [
('./core', 'core'),
('./resources', 'resources'),
# ('./config_user/config.json', 'config_user'),
('./packages/marina', 'packages/marina'),
('./packages/launcher', 'packages/launcher'),
('./packages/atlantis', 'packages/atlantis'),
# ('./packages/mb_tools', 'packages/mb_tools'),
('./utilsa', 'utilsa'),
('./vendor', 'vendor'),
# ('c:/hostedtoolcache/windows/python/3.7.7/x64/lib/site-packages/PySide2', 'PySide2'),
# ('c:/hostedtoolcache/windows/python/3.7.7/x64/lib/site-packages/shiboken2', 'shiboken2')
('./venv/lib/python3.7/site-packages/PySide2', 'PySide2'),
('./venv/lib/python3.7/site-packages/shiboken2', 'shiboken2'),
]
a = Analysis(['packages/launcher/main.py'],
pathex=[
'./',
'./core'
],
binaries=[],
datas=added_files,
hiddenimports=[
'logging.config',
'Qt',
'PySide2.QtSvg'
],
hookspath=[],
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
[],
exclude_binaries=True,
name='armada_pipeline',
debug=True,
bootloader_ignore_signals=False,
strip=False,
upx=True,
console=True,
icon='armada_logo.ico' )
coll = COLLECT(exe,
a.binaries,
a.zipfiles,
a.datas,
strip=False,
upx=True,
upx_exclude=[],
name='armada_pipeline')
| Knufflebeast/armada-pipeline | pyinst_macos.spec | pyinst_macos.spec | spec | 1,911 | python | en | code | 27 | github-code | 13 |
27556101794 | import os.path
from loguru import logger
from providers import provider as provider_module
from modules.worker import Worker, handle_tasks
from modules.utils import zipdir
from config import MAX_THREAD, MANGA_STORAGE_PATH, CBZ_STORAGE_PATH
class Fetcher:
manga_name = None
def __init__(self, provider, url):
_module = Fetcher._init_provider_module(provider)
self.generator = _module.Generator
self.url = url
@staticmethod
def _init_provider_module(provider):
provider_module.provider_name = provider
_module = __import__(f'providers.{provider}', globals(), locals(), ['Generator'])
return _module
def all_chapters(self):
generator = self.generator(self.url, folder_path=MANGA_STORAGE_PATH)
logger.info("Parsing {}".format(self.url))
chapters = generator.run()
self.manga_name = generator.get_manga_name()
return chapters
def create_cbz(self):
manga = "{}/{}".format(MANGA_STORAGE_PATH, self.manga_name)
if os.path.isdir(CBZ_STORAGE_PATH) is False:
os.mkdir(CBZ_STORAGE_PATH)
zipdir(basedir=manga, archive_name="{}/{}.cbz".format(CBZ_STORAGE_PATH, self.manga_name))
logger.info("Creating CBZ file success.")
def run(self):
worker = Worker(self.all_chapters(), handle_tasks, MAX_THREAD)
worker.run()
self.create_cbz()
| wongpinter/manga-dl | app/fetcher.py | fetcher.py | py | 1,417 | python | en | code | 0 | github-code | 13 |
38630974551 | from rest_framework import serializers
from .models import Project
from authentication.models import User
class ProjectSerializer(serializers.ModelSerializer):
class Meta:
model = Project
fields = ['id', 'name', 'description', 'type', 'created_time', 'author']
read_only_fields = ('author', 'created_time')
def create(self, validated_data):
validated_data["author"] = self.context["request"].user
return super().create(validated_data)
class ContributorSerializer(serializers.Serializer):
user_id = serializers.IntegerField()
def validate_user_id(self, value):
try:
User.objects.get(id=value)
except User.DoesNotExist:
raise serializers.ValidationError("User not found")
return value
def create(self, validated_data):
user_id = validated_data.get('user_id')
user = User.objects.get(id=user_id)
project = self.context['project']
if project.contributors.filter(id=user_id).exists():
raise serializers.ValidationError("User is already a contributor")
project.contributors.add(user)
return project
def delete(self, instance, validated_data):
user_id = validated_data.get('user_id')
user = User.objects.get(id=user_id)
instance.contributors.remove(user)
return instance
| DomninBenoit/SoftDesk_Support | SoftDesk/projects/serializers.py | serializers.py | py | 1,377 | python | en | code | 0 | github-code | 13 |
37619133742 | #!/usr/bin/env python2
import rospy
from visualization_msgs.msg import Marker
from geometry_msgs.msg import Point
from msgs.msg import LaneEvent
class Node:
ground_z = -3.1
x1_min_ = 1.0
x1_max_ = 71.0
x1_center_ = (x1_min_ + x1_max_) / 2
x1_scale_ = x1_max_ - x1_min_
y1_min_ = 1.5 # right boundary of left lane (m)
y1_max_ = 5.0 # left boundary of left lane (m)
y1_center_ = (y1_min_ + y1_max_) / 2
y1_scale_ = y1_max_ - y1_min_
x2_min_ = -9.0
x2_max_ = 1.0
x2_center_ = (x2_min_ + x2_max_) / 2
x2_scale_ = x2_max_ - x2_min_
y2_min_ = -1.5 # right boundary of left lane (m)
y2_max_ = 5.0 # left boundary of left lane (m)
y2_center_ = (y2_min_ + y2_max_) / 2
y2_scale_ = y2_max_ - y2_min_
x3_min_ = -39.0
x3_max_ = -9.0
x3_center_ = (x3_min_ + x3_max_) / 2
x3_scale_ = x3_max_ - x3_min_
y3_min_ = -1.5 # right boundary of left lane (m)
y3_max_ = 5.0 # left boundary of left lane (m)
y3_center_ = (y3_min_ + y3_max_) / 2
y3_scale_ = y3_max_ - y3_min_
def __init__(self):
rospy.init_node("lane_event_grid")
self.in_topic_ = rospy.get_param("~in_topic")
self.sub_grid_ = rospy.Subscriber(
self.in_topic_, LaneEvent, self.callback_lane_event)
self.pub_grid_ = rospy.Publisher(
self.in_topic_ + "/grid", Marker, queue_size=1)
self.pub_c1_ = rospy.Publisher(
self.in_topic_ + "/c1", Marker, queue_size=1)
self.pub_c2_ = rospy.Publisher(
self.in_topic_ + "/c2", Marker, queue_size=1)
self.pub_c3_ = rospy.Publisher(
self.in_topic_ + "/c3", Marker, queue_size=1)
def assign_grid_corners(self, x1, x2, y1, y2):
p11 = Point()
p11.x = x1
p11.y = y1
p11.z = self.ground_z
p12 = Point()
p12.x = x1
p12.y = y2
p12.z = self.ground_z
p21 = Point()
p21.x = x2
p21.y = y1
p21.z = self.ground_z
p22 = Point()
p22.x = x2
p22.y = y2
p22.z = self.ground_z
return p11, p12, p21, p22
def create_lane_event_grid(self, p11, p12, p21, p22, is_warning):
points = []
points.append(p11)
points.append(p12)
points.append(p12)
points.append(p22)
points.append(p22)
points.append(p21)
points.append(p21)
points.append(p11)
return points
def create_lane_event_grid_main(self, x1, x2, y1, y2, is_warning):
p11, p12, p21, p22 = self.assign_grid_corners(x1, x2, y1, y2)
return self.create_lane_event_grid(p11, p12, p21, p22, is_warning)
def create_lane_event_grid_list(
self,
header,
idx,
is_warning_c1,
is_warning_c2,
is_warning_c3):
marker = Marker()
marker.header.frame_id = header.frame_id
marker.header.stamp = header.stamp
marker.ns = self.in_topic_
marker.id = idx
marker.type = Marker.LINE_LIST
marker.action = Marker.ADD
marker.scale.x = 0.4
marker.lifetime = rospy.Duration(1.0)
marker.color.r = 1.0
marker.color.g = 0.0
marker.color.b = 0.0
marker.color.a = 1.0
marker.points = []
for p in self.create_lane_event_grid_main(
self.x1_max_,
self.x1_min_,
self.y1_max_,
self.y1_min_,
is_warning_c1):
pass
# marker.points.append(p)
for p in self.create_lane_event_grid_main(
self.x2_max_,
self.x2_min_,
self.y2_max_,
self.y2_min_,
is_warning_c2):
marker.points.append(p)
for p in self.create_lane_event_grid_main(
self.x3_max_,
self.x3_min_,
self.y3_max_,
self.y3_min_,
is_warning_c3):
marker.points.append(p)
return marker
def create_lane_event_grid_warning_list(
self,
header,
idx,
x,
y,
z,
scale_x,
scale_y):
marker = Marker()
marker.header.frame_id = header.frame_id
marker.header.stamp = header.stamp
marker.ns = self.in_topic_
marker.id = idx
marker.type = Marker.CUBE
marker.action = Marker.ADD
marker.pose.position.x = x
marker.pose.position.y = y
marker.pose.position.z = z
marker.pose.orientation.x = 0.0
marker.pose.orientation.y = 0.0
marker.pose.orientation.z = 0.0
marker.pose.orientation.w = 1.0
marker.scale.x = scale_x
marker.scale.y = scale_y
marker.scale.z = 0.1
marker.lifetime = rospy.Duration(1.0)
marker.color.r = 1.0
marker.color.g = 0.0
marker.color.b = 0.0
marker.color.a = 0.3
return marker
def callback_lane_event(self, msg):
idx = 0
self.pub_grid_.publish(
self.create_lane_event_grid_list(
msg.header,
idx,
msg.is_in_0_70_incoming,
msg.is_in_n10_0,
msg.is_in_n40_n10_incoming))
idx += 1
if msg.is_in_0_70_incoming:
self.pub_c1_.publish(
self.create_lane_event_grid_warning_list(
msg.header,
idx,
self.x1_center_,
self.y1_center_,
self.ground_z,
self.x1_scale_,
self.y1_scale_,
))
idx += 1
if msg.is_in_n10_0:
self.pub_c2_.publish(
self.create_lane_event_grid_warning_list(
msg.header,
idx,
self.x2_center_,
self.y2_center_,
self.ground_z,
self.x2_scale_,
self.y2_scale_,
))
idx += 1
if msg.is_in_n40_n10_incoming:
self.pub_c3_.publish(
self.create_lane_event_grid_warning_list(
msg.header,
idx,
self.x3_center_,
self.y3_center_,
self.ground_z,
self.x3_scale_,
self.y3_scale_,
))
def run(self):
rospy.spin()
if __name__ == "__main__":
node = Node()
node.run()
| wasn-lab/Taillight_Recognition_with_VGG16-WaveNet | src/detection_viz/scripts/gen_lane_event_grid.py | gen_lane_event_grid.py | py | 6,691 | python | en | code | 2 | github-code | 13 |
26893452765 | import os
import subprocess
from make_enc import*
from testGUI import first_GUI
# ----- GATHERING PATH INFO -----
output = subprocess.getoutput( 'cd' )
full_path = '' + output
# ----- GETTING USERNAME -----
LHS_start = output.find('Users') + len('Users') + 1
user = output[LHS_start:]
RHS_end = user.find('\\')
user = user[:RHS_end]
# ----- GETTING PRE-USERNAME ------
n = full_path.find(user + '\\')
pre_user = full_path[:n]
# ----- BASIC DIR EST ON DESKTOP ------
target = pre_user + user + '\\Desktop'
target = os.path.join( target, 'SecureData' )
# ----------- CREATING OS PATHS -----------
notes = 'NOTES.txt'
idk = 'secretNOTES.txt'
to_NOTES = os.path.join(target, notes)
to_IDK = os.path.join(target, idk)
def create_folder():
if( os.path.exists(target)==False ):
os.mkdir(target)
def create_notes():
if( os.path.exists(to_NOTES)==False ):
f = open(to_NOTES, 'a+', encoding='utf_8')
f.close()
def create_safe_note():
if( os.path.exists(to_IDK)==False ):
f = open(to_IDK, 'a+', encoding='utf_8')
f.close()
def clean_start():
if( os.path.exists(to_IDK)==False and os.path.exists(to_NOTES)==False ):
create_notes()
first_GUI()
exit()
# ------------------ ENCRYPT ... to_NOTES -> to_IDK ------------------
def do_encrypt():
f = open(to_NOTES, 'r', encoding='utf_8')
fi = open(to_IDK, 'a+', encoding='utf_8')
for i in f:
fi.write( enc_motor(i) )
fi.close()
f.close()
os.remove(to_NOTES)
# ------------------ DE-CRYPT ... to_IDK -> to_NOTES ------------------
def do_decrypt():
f = open(to_IDK, 'r', encoding='utf_8')
fi = open(to_NOTES, 'a+', encoding='utf_8')
for i in f:
fi.write( dec_motor(i) )
fi.close()
f.close()
os.remove(to_IDK)
| mwmorale/StoringEncryptionData | OS_manip.py | OS_manip.py | py | 1,900 | python | en | code | 2 | github-code | 13 |
18274948226 |
from assemblyline.al.common.result import ResultSection, Tag, TAG_WEIGHT, Classification, TAG_USAGE, TAG_TYPE
class VirusHitSection(ResultSection):
def __init__(self, virus_name, score, embedded_filename='', detection_type=''):
if embedded_filename:
title = 'Embedded file: %s was identified as %s' % (
embedded_filename, virus_name)
else:
title = 'File was identified as %s' % virus_name
if detection_type:
title += ' (%s)' % detection_type
super(VirusHitSection, self).__init__(
title_text=title,
score=score,
classification=Classification.UNRESTRICTED)
class VirusHitTag(Tag):
def __init__(self, virus_name, context=None):
super(VirusHitTag, self).__init__(
tag_type=TAG_TYPE.AV_VIRUS_NAME,
value=virus_name,
weight=TAG_WEIGHT.MED,
usage=TAG_USAGE.IDENTIFICATION,
classification=Classification.UNRESTRICTED,
context=context)
class AvScanResult(object):
RESULT_OK = 'ok'
def __init__(self):
self.application_name = ''
self.version_application = ''
self.version_dats = ''
self.version_engine = ''
self.results = {}
def add_result(self, file_path, is_virus, virus_name, detection_type='', embedded_file=''):
# Empty embedded file indicates the original file itself (non embedded result).
if file_path not in self.results:
self.results[file_path] = {}
self.results[file_path][embedded_file] = (is_virus, detection_type, virus_name, '')
def get_result(self, file_path):
self.results.get(file_path, None)
def __str__(self):
from cStringIO import StringIO
from pprint import pformat
output = StringIO()
output.write('result:%s - %s - %s\n' % (
self.version_application, self.version_dats, self.version_engine))
output.write('\n%s' % pformat(self.results))
result = output.getvalue()
output.close()
return result
| deeptechlabs/cyberweapons | assemblyline/assemblyline/al/common/av_result.py | av_result.py | py | 2,116 | python | en | code | 78 | github-code | 13 |
37132413130 | age = int(input('Введите ваш возраст: ')) # простой вариант преобразования строки (str) в целое число (int)
#int_age = int(age) # второй вариант более сложный и занимает больше строк
if age < 28:
print('Поздравляю, Вы - Молодой козлик!')
else:
print('Увы, но вы - Старикашка!')
# Цикл while
x = 10
while x > 0:
print('{}'.format(x))
x -= 1
print('С Новым годом!')
# continue for
for i in range(1, 7):
if i == 3:
continue
print(i)
#continue while
i = 1
while i <= 5:
if i == 3:
i += 1
continue
print(i)
i += 1
#Задача
list_1 = [8, 19, 148, 4]
list_2 = [9, 1, 33, 83]
added = []
for i in list_1:
for j in list_2:
added.append(i * j)
print(added)
| lis5662/Hangman- | Test.py | Test.py | py | 934 | python | ru | code | 0 | github-code | 13 |
1434394303 | print('='*20,'欢迎使用好友系统','='*20)
print()
hy=[]
while True :
print('''\
1:\t添加好友
2:\t删除好友
3:\t修改备注
4:\t展示好友
5:\t退出
''')
print('-' * 58)
print('请选择要做得操作')
xz=input()
if xz =='1' :
print('输入要添加的好友')
name_hy=input()
if name_hy in hy:
print(f'{name_hy}已存在')
else :
print(f'{name_hy}'"是否添加好友,y/yse/YSE,取消操作输入任意内容")
xz1 = input('请输入 ')
if xz1 == 'y' or xz1 == 'Y' or xz1 == 'yse' or xz1 == 'YSE':
hy.append(name_hy)
print('添加成功')
else:
print('取消成功')
elif xz =='2' :
if hy == []:
print('好友列表为空')
else :
print('输入要删的序号')
cd = len(hy)
xh = int(input())
if 0 <= xh <= cd:
idnex = xh - 1
print(hy[idnex], '是否删除,删除不可逆,y/yse/YSE,取消操作输入任意内容')
xz2 = input('请输入 ')
if xz2 == 'y' or xz2 == 'Y' or xz2 == 'yse' or xz2 == 'YSE':
hy.pop(idnex)
print('删除成功')
else:
print('删除取消')
elif xz =='3' :
if hy==[] :
print('好友列表为空')
else :
cd1 = len(hy) - 1
name_hy1 = input('请输入要修改的好友名字 ')
if name_hy1 not in hy:
print(f'{name_hy1}不是好友')
elif name_hy1 in hy:
s = hy.index(name_hy1)
if 0 <= cd1 and cd1 <= cd1:
print(hy[s], '是否修改备注,y/yse/YSE,取消操作输入任意内容')
xz3 = input('请输入 ')
if xz3 == 'y' or xz3 == 'Y' or xz3 == 'yse' or xz3 == 'YSE':
hy[s] = input('请输入要修改的备注')
print('备注成功')
else:
print('取消操作')
else:
print('输入有误,没有改好友 请输入4查看')
elif xz =='4' :
if hy==[] :
print('好友列表为空')
else :
i=0
print('序号\t名字')
for hy2 in hy :
i+=1
print(f'{i}\t{hy2}')
elif xz =='5' :
print('退出成功')
break
else:
print('输入有误,重新输入')
| sk0606-sk/python | python1/python3/练习/1/好友管理系统.py | 好友管理系统.py | py | 2,658 | python | zh | code | 0 | github-code | 13 |
23836488255 | from datetime import time, date
from random import choice
from time import sleep
import os
from discord.ext import tasks, commands
import discord
from poll import send_poll_req, create_poll
import utils
TOKEN = os.environ["TOKEN"]
CHAN_ID = int(os.environ["CHAN_ID"])
ADMIN = int(os.environ["ADMIN"])
JOB_TIME = time(9,0)
CONFIG_FILENAME = 'config.json'
JOKES_FILENAME = 'jokes.txt'
class MyBot(commands.Bot):
"""Sets up bot object (mainly for defining recurrent tasks)"""
def __init__(self, *args, **kwargs):
self.config = utils.load_config(CONFIG_FILENAME)
self.jokes = utils.load_jokes(JOKES_FILENAME)
super().__init__(*args, **kwargs)
async def setup_hook(self) -> None:
# start the task to run in the background
self.send_poll.start()
async def on_ready(self):
print(f'Logged in as {self.user} (ID: {self.user.id})')
print('------')
@tasks.loop(time=JOB_TIME)
async def send_poll(self, force=False):
if (utils.is_friday() or force is True) and self.config['days'] != []: # Send the poll only on Fridays
channel = self.get_channel(CHAN_ID)
active_day = lambda day: day.weekday() in self.config['days']
week_num, days = utils.next_week(date.today())
active_days = filter(active_day, days)
title = f'Tollas (hét #{week_num})'
start, end = self.config['time']
hours = range(start, end)
poll = create_poll(title, active_days, hours)
url = send_poll_req(lambda resp : resp.json()['url'], poll)
await channel.send(url)
@send_poll.before_loop
async def before_poll(self):
await self.wait_until_ready() # wait until the bot logs in
intents = discord.Intents.default()
intents.message_content = True
bot = MyBot(command_prefix='!', intents=intents)
@bot.listen()
async def on_message(message):
"""Pin message containing the poll url"""
if message.author.id == bot.user.id and 'strawpoll.com' in message.content:
await message.pin()
@bot.command()
async def joke(ctx):
"""Tells a very funny joke"""
await ctx.send(choice(bot.jokes))
@bot.command()
async def poll(ctx):
"""Send the poll now"""
if ctx.message.author.id == ADMIN:
await bot.send_poll(force=True)
@bot.command()
async def get_schedule(ctx):
"""Shows which days will be in the next poll"""
if ctx.message.author.id == ADMIN:
await ctx.send(f'Schedule for poll is: {list(map(utils.show_day, bot.config["days"]))}')
@bot.command()
async def schedule(ctx, *days: int):
"""Modify next week's poll by specifying the days using numbers 0 to 6"""
if ctx.message.author.id == ADMIN:
bot.config['days'] = days # TODO Handle wrong input
utils.modify_config(bot.config, CONFIG_FILENAME)
print(f'Schedule modified: {days}')
await ctx.send(f'Schedule was modified to: {list(map(utils.show_day, days))}')
@bot.command()
async def shutup(ctx):
"""Temporarily turns off the polls"""
if ctx.message.author.id == ADMIN:
bot.config['days'] = []
utils.modify_config(bot.config, CONFIG_FILENAME)
print(f'Polls turned off')
await ctx.send(f'All right, turning off polls.')
@bot.command()
async def schedule_hours(ctx, *hours: int):
"""Modify next week's poll by specifying time slots"""
if ctx.message.author.id == ADMIN:
bot.config['time'] = hours # TODO Handle wrong input
utils.modify_config(bot.config, CONFIG_FILENAME)
print(f'Timeslots modified: {hours[0]} - {hours[1]}')
await ctx.send(f'Timeslots were modified to: {hours[0]} - {hours[1]}')
@bot.command()
async def get_timeslots(ctx):
"""Show timeslots for next poll"""
if ctx.message.author.id == ADMIN:
start, end = bot.config['time']
await ctx.send(f'Timeslots for poll: {start} - {end}')
@bot.command()
async def call(ctx, target):
"""Call's the reception of target"""
await ctx.send(f'Calling {target}...')
sleep(5)
if (target == "Tüskecsarnok" or target == "Tüske") and date.today().weekday() < 5:
await ctx.send(f'They said they are full and then condescendingly reprimanded me for calling them in the first place during the working days of the week.')
else:
await ctx.send(f'Unfortunately, no response...')
bot.run(TOKEN)
| EarlPitts/badminbot-v2 | bot.py | bot.py | py | 4,409 | python | en | code | 0 | github-code | 13 |
23243413194 | import numpy as np
import torch
import torch.nn.functional as F
from item_to_genre import item_to_genre
import pandas as pd
from sklearn.preprocessing import normalize
from topK import topK, hrK
class XEval(object):
"""
evaluate the explainability
"""
def __init__(self, dataset='100k'):
"""
size: can be either '100k' or '1m', or 'ciao'
"""
if dataset=='100k' or dataset=='1m':
path = "movielens/" + 'ml-' + dataset + '.ratings'
path_i ="movielens/" + 'ml-' + dataset + '.iave'
path_u ="movielens/" + 'ml-' + dataset + '.uave'
elif dataset=='ymovie':
path = 'ymovies/clean_data.txt'
path_i = "ymovies/ymovie.iave"
path_u = "ymovies/ymovie.uave"
else:
raise Exception('not supported dataset!')
self.dataset = dataset # the dataset name
# load rating data
self.data_df = pd.read_csv(path) # dataframe
self.data = self.data_df.values[:, 0:3] # numpy, note here still float type
# load averages
self.i_ave_df = pd.read_csv(path_i, index_col=0) # item id as index
self.u_ave_df = pd.read_csv(path_u, index_col=0)
ave_dict = {'100k': 3.530, '1m': 3.620, 'ymovie': 4.1}
self.ave = ave_dict[self.dataset] # the global average
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def get_general_pref(self, uid):
"""
given uids, output the preference vectors
uid is a tensor [batch_size, 1]
"""
# u_rated_item = self.data[np.isin(self.data[:, 0], uid)]
# u_rated_asp = item_to_genre(u_rated_item[:, 1], data_size=self.dataset)
# u_concat = np.concatenate((u_rated_item, u_rated_asp), axis=1)
# u_pref = np.sum(u_concat, axis=0, where=(u_concat[:,0]==0))
asp_list = []
uid = uid.cpu().numpy()
for u in uid:
u_rated_item = self.data[self.data[:, 0]==u]
# aspects for movies rated by a user
u_rated_asp = item_to_genre(u_rated_item[:, 1], data_size=self.dataset)
u_rated_asp = np.nan_to_num(u_rated_asp) # important, avoid nan
u_rated_asp = u_rated_asp.astype(float) # for later calculation
# ratings given by a user
u_rating = u_rated_item[:, 2].astype(float)
ave_rating = u_rating.mean()
weights = (u_rating - ave_rating) / 5.0 # minus global average, 5 scale
# weights = weights / weights.sum()
# weighted sum over movies
u_pref = np.dot(u_rated_asp.T, weights).T
# u_pref_norm = np.linalg.norm(u_pref, ord=2)
# u_pref = u_pref / u_pref_norm
u_pref = normalize(u_pref[:,np.newaxis], axis=0).ravel()
u_pref = u_pref.reshape([1, -1])
asp_list.append(u_pref)
pref = np.concatenate(asp_list, axis=0)
return pref
def get_u_ave(self):
user = self.data[:, 0].astype(int)
user = np.unique(user)
u_ave_list = []
for u in user:
u_rated_item = self.data[self.data[:, 0]==u]
u_rating = u_rated_item[:, 2].astype(float)
u_ave_rating = u_rating.mean()
u_ave_list.append(u_ave_rating)
u_ave = np.array(u_ave_list)
# name = ['uid', 'ave']
data = {'uid': user, 'ave': u_ave}
df = pd.DataFrame(data)
if self.dataset == '100k':
path = 'movielens/ml-100k.uave'
elif self.dataset == '1m':
path = 'movielens/ml-1m.uave'
elif self.dataset == 'ymovie':
path = 'ymovies/ymovie.uave'
df.to_csv(path, index=False, float_format='%.3f')
def get_i_ave(self):
item = self.data[:, 1].astype(int)
item = np.unique(item)
i_ave_list = []
for i in item:
i_rated_user = self.data[self.data[:, 1]==i]
i_rating = i_rated_user[:, 2].astype(float)
i_ave_rating = i_rating.mean()
i_ave_list.append(i_ave_rating)
i_ave = np.array(i_ave_list)
# name = ['uid', 'ave']
data = {'mid': item, 'ave': i_ave}
df = pd.DataFrame(data)
if self.dataset == '100k':
path = 'movielens/ml-100k.iave'
elif self.dataset == '1m':
path = 'movielens/ml-1m.iave'
elif self.dataset == 'ymovie':
path = 'ymovies/ymovie.iave'
df.to_csv(path, index=False, float_format='%.3f')
def get_all_ave(self):
# this function is for average calculation purpose
ratings = self.data[:, 2].astype(float)
ave = ratings.sum()
print(ave)
# '100k': 3.530
def get_u_pref(self, uid):
ave = self.ave # global average
df = self.data_df
u_rated = df.loc[df['uid'].isin(uid)]
item_rated = u_rated['mid']
item_ave = self.i_ave_df.loc[item_rated.values].values
item_bias = item_ave - self.ave
user_ave = self.u_ave_df.loc[u_rated['uid'].values].values
user_bias = user_ave - self.ave
weight = u_rated[['rating']].values - (self.ave + item_bias + user_bias)
weight = weight.flatten()
# u_rated['weight'] = weight
u_rated_asp = item_to_genre(item_rated, data_size=self.dataset).values
# calculate the weighted rating
u_pref = np.multiply(u_rated_asp.T, weight).T / 5.0
u_pref_list = u_pref.tolist()
u_rated['asp'] = u_pref_list
u_rated['asp'] = u_rated['asp'].apply(lambda x: np.array(x)) # convert to array
# u_rated['asp'] = u_rated['asp'].multiply(weight)
u_rated = u_rated[['uid', 'asp']]
pref = u_rated.groupby(['uid']).sum()
# if self.dataset == '100k':
# path = 'movielens/ml-100k.upref'
# pref.to_csv(path, index=False)
pref_list = pref['asp'].tolist()
pref_ary = np.array(pref_list)
return pref_ary
def get_cos_sim(self, uid, predicted):
"""
predicted: a torch tensor [batch, num_asp]
uid: a torch tensor [batch]
"""
pref = self.get_u_pref(uid)
# convert to tensor.cuda
pref = torch.tensor(pref, dtype=torch.float).to(self.device)
pref = F.normalize(pref, p=1, dim=-1)
sim = F.cosine_similarity(pref, predicted, dim=-1)
return sim
def get_specific_cos_sim(self, uid, asp, predicted):
"""
predicted: a torch tensor [batch, num_asp]
uid: a torch tensor [batch]
"""
pref = self.get_u_pref(uid)
# convert to tensor.cuda
pref = torch.tensor(pref, dtype=torch.float).to(self.device)
pref = F.normalize(pref, p=1, dim=-1)
spec_pref = pref * asp
spec_pred = predicted * asp
sim = F.cosine_similarity(spec_pred, spec_pref, dim=-1)
return sim
def get_rank(self, pref): # pref is a (batch, num_asp) tensor
pref = np.abs(pref)
rank = np.argsort(pref, axis=-1)
return rank
def get_top_K_pos_(self, pref, pred, K=3): # top K aspects thats are positive
"""
pref: given preference
pred: predicted preference
"""
pref_rank = self.get_rank(pref)
pred_rank = self.get_rank(pred)
pref_top_K = (pref_rank < K).astype(float)
pred_top_K = (pred_rank < K).astype(float)
acc_top_K = np.multiply(pref_top_K, pred_top_K).sum(axis=-1) / float(K)
return acc_top_K
def get_top_K_pos(self, uid, pred, K=5, M=3):
pref = self.get_u_pref(uid)
pred = pred.cpu().data.numpy()
# return self.get_top_K_pos_(pref, pred, K)
if self.dataset == '100k':
num_user = 643
elif self.dataset == '1m':
num_user = 6040
elif self.dataset == 'ymovie':
num_user = 7642
return topK(pref, pred, K, M, num_user)
def get_hr_K(self, uid, pred, K=3):
pref = self.get_u_pref(uid)
pred = pred.cpu().data.numpy()
return hrK(pref, pred, K)
if __name__ == '__main__':
xeval = XEval(dataset='1m')
# xeval.get_general_pref(torch.tensor([0, 1]))
a = xeval.get_u_ave()
b = xeval.get_i_ave()
# c = xeval.get_top_K_pos(a, b) | pd90506/AMCF | evaluate.py | evaluate.py | py | 8,358 | python | en | code | 15 | github-code | 13 |
11836791086 | # main -> controller -> db_controller
import json
from controller import controller
from controller.db_controller import config
from controller import json_writer
ask_for_dbs = True
crud_choice = ""
dbs_choice = ""
json_writer.connect()
def getOverview():
jsonFile = open('Schema/main/main.json')
data = json.load(jsonFile)
return data.items()
def getTable(tablename):
returnOverview = getOverview()
for key, value in returnOverview:
if (tablename == key):
return value.items()
init_db = True
while (True):
if init_db:
json_writer.connect()
init_db = False
if(ask_for_dbs):
print("Zur Auswahl stehen:")
print("1: Relationales Datenbanksystem (PostgreSQL)")
print("2: Key-Value-System (Redis)")
print("3: Dokumentenspeicher (MongoDB)")
dbs_choice = input("Gewähltes DBS: ")
ask_for_dbs = False
print("Zur Auswahl stehen:")
print("1: Create")
print("2: Read")
print("3: Update")
print("4: Delete")
print("5: Anderes DBS")
crud_choice = input("Auswahl: ")
if crud_choice == "1":
returnOverview = getOverview()
for key, value in returnOverview:
print(key)
c_table = input("Welche Tabelle (name): ")
returnTable = getTable(c_table)
print("Welche Werte sollen hinzugefügt werden?: ")
values = []
for key, value in returnTable:
id = False
for subkey, subvalue in value.items():
if subkey == "key" and subvalue == "id":
id = True
if not id:
value = input("Für " + key + ": ")
values.append({ key: value })
valueStack = {
"table": c_table,
"query": values
}
string = {
"mode": "c",
"values": valueStack
}
dbs = {
"dbs": dbs_choice,
"params": string
}
controller.recieveInput(json.dumps(dbs))
elif crud_choice == "2":
returnOverview = getOverview()
print("+++++++++++++++++++")
print("Alle Tabellen:")
print()
for key, value in returnOverview:
print(key)
print()
r_table = input("Welche Tabelle (name): ")
returnTable = getTable(r_table)
print("Alle Spalten in " + r_table + ": ")
print()
for key, value in returnTable:
print(key)
print()
print("[Bitte mit ',' trennen. z.B job, titel, name oder *]")
values = input("Was soll selektiert werden?: ")
print()
print("Sollen Filter hinzugefügt werden?")
print()
where = []
for key, value in returnTable:
choice = input("Soll nach " + key + " gefiltert werden? [j/n]")
if choice == "j" or choice == "y":
operator = input("Welcher Operator soll genutzt werden? (<,>,=...): ")
filter = input("Mit was soll " + key + " verglichen werden?: ")
where.append({ key: [operator, filter] })
print()
valueStack = {
"table": r_table,
"query": values,
"where": where
}
string = {
"mode": "r",
"values": valueStack
}
dbs = {
"dbs": dbs_choice,
"params": string
}
print(controller.recieveInput(json.dumps(dbs)))
elif crud_choice == "3":
returnOverview = getOverview()
print("+++++++++++++++++++")
print("Alle Tabellen:")
print()
for key, value in returnOverview:
print(key)
print()
u_table = input("Welche Tabelle (name): ")
returnTable = getTable(u_table)
print("Welche Spalten in " + u_table + " sollen geändert werden? ")
print()
values = []
for key, value in returnTable:
choice = input("Soll " + key + " geändert werden? [j/n]")
if choice == "j" or choice == "y":
change = input("Zu was soll " + key + " geändert werden?: ")
values.append({ key: change })
print()
print()
print("Sollen Filter hinzugefügt werden?")
print()
where = []
for key, value in returnTable:
choice = input("Soll nach " + key + " gefiltert werden? [j/n]")
if choice == "j" or choice == "y":
operator = input("Welcher Operator soll genutzt werden? (<,>,=...): ")
filter = input("Mit was soll " + key + " verglichen werden?: ")
where.append({ key: [operator, filter] })
print()
valueStack = {
"table": u_table,
"query": values,
"where": where
}
string = {
"mode": "u",
"values": valueStack
}
dbs = {
"dbs": dbs_choice,
"params": string
}
print(controller.recieveInput(json.dumps(dbs)))
elif crud_choice == "4":
returnOverview = getOverview()
print("+++++++++++++++++++")
print("Alle Tabellen:")
print()
for key, value in returnOverview:
print(key)
print()
d_table = input("Welche Tabelle (name): ")
returnTable = getTable(d_table)
print()
print("Sollen Filter hinzugefügt werden?")
print()
where = []
for key, value in returnTable:
choice = input("Soll nach " + key + " gefiltert werden? [j/n]")
if choice == "j" or choice == "y":
operator = input("Welcher Operator soll genutzt werden? (<,>,=...): ")
filter = input("Mit was soll " + key + " verglichen werden?: ")
where.append({ key: [operator, filter] })
print()
valueStack = {
"table": d_table,
"where": where
}
string = {
"mode": "d",
"values": valueStack
}
dbs = {
"dbs": dbs_choice,
"params": string
}
controller.recieveInput(json.dumps(dbs))
elif crud_choice == "5":
ask_for_dbs = True
| joshelboy/db2_a1 | main.py | main.py | py | 6,350 | python | en | code | 0 | github-code | 13 |
3900675070 | import sys
from flask_restful import Resource, reqparse
from flask import jsonify
from flask_jwt_simple import create_jwt, jwt_required, get_jwt_identity
from server import bcrypt
from server.models.GradeDistribution import GradeDistribution
from server.models.User import User
from server.models.Lecturer import Lecturer
from server.models.Note import Note
from server.models.Course import Course
from server.models.Term import Term
from server.models.Comment import Comment
from server.models.Event import Event
from server.helpers import response
parser = reqparse.RequestParser()
parser.add_argument('name', type=str, help='Name must be a string')
parser.add_argument('email', type=str, help='Email must be a string')
parser.add_argument('password', type=str, help='Password must be a string')
parser.add_argument('confirm_password', type=str, help='Confirm Password must be a string')
parser.add_argument('comment', type=str, help='Comment must be a string')
parser.add_argument('title', type=str, help='Title must be a string')
parser.add_argument('description', type=str, help='Description must be a string')
parser.add_argument('started_at', type=str, help='Started at must be a string')
parser.add_argument('max_participant', type=int, help='Max participant must be a number')
parser.add_argument('password', type=str, help='Password must be a string')
parser.add_argument('passwordConfirm', type=str, help='Password must be a string')
parser.add_argument('email', type=str, help='Email must be a string')
parser.add_argument('slug', type=str, help='Slug must be a string')
parser.add_argument('season', type=str, help='Season must be a string')
parser.add_argument('term_year', type=str, help='Term year must be a string')
parser.add_argument('user_id', type=int, help='User Id must be a number')
parser.add_argument('course_id', type=int, help='Course Id must be a number')
parser.add_argument('term_id', type=int, help='Term Id must be a number')
parser.add_argument('english', type=bool, help='English must be a boolean')
parser.add_argument('course_code', type=int, help='Course Code must be an int')
parser.add_argument('content', type=str, help='Content must be a string')
parser.add_argument('lecturer', type=str, help='Lecturer must be a string')
parser.add_argument('link', type=str, help='Link must be a string')
class GetAllUsers(Resource):
def get(self):
users = User().where().orderBy().get()
return response({
'users': users.data()
})
class UserUpdateAdmin(Resource):
def post(self, user_id):
args = parser.parse_args()
# Check if the email is already taken or not
email = args['email']
user = User().where('email', email).first()
if user.exists() and user.ATTRIBUTES['id'] != user_id:
return response({
'errors': 'This email is already taken'
}, 400)
# Update user
user = User().where('id', '=', user_id).first()
if user.exists() is True:
user.update({
'name': args['name'],
'email': args['email'],
'slug': user.generateSlug(name=args['name']),
})
return response({
'user': user.data()
})
return response({
'errors': [
'User could not found'
]
}, 404)
class UserDeleteAdmin(Resource):
def post(self, user_id):
user = User().where('id', '=', user_id).first()
print(user_id, file=sys.stderr)
if user.exists():
Comment().where('user_id', user_id).get().delete()
Event().where('user_id', user_id).get().delete()
GradeDistribution().where('user_id', user_id).get().delete()
lecturers = Lecturer().where('user_id', user_id).get()
for lecturer in lecturers.data():
Comment().where([['type', '=', 'lecturers'], ['type_id', '=', lecturer['id']]]).get().delete()
GradeDistribution().where('lecturer_id', '=', lecturer['id']).get().delete()
lecturers.delete()
notes = Note().where('user_id', user_id).get()
for note in notes.data():
Comment().where([['type', '=', 'notes'], ['type_id', '=', note['id']]]).get().delete()
notes.delete()
user.delete()
return response({
'message': 'User deleted successfully'
}, 202)
return response({
'message': 'User does not exist'
}, 404)
class LecturerDeleteAdmin(Resource):
@jwt_required
def post(self, lecturer_id):
lecturer = Lecturer().where('id', lecturer_id).first()
if lecturer.exists() is True:
Comment().where([['type', '=', 'lecturers'], ['type_id', '=', lecturer_id]]).get().delete()
GradeDistribution().where('lecturer_id', '=', lecturer_id).get().delete()
lecturer.delete()
return response({
'message': 'Lecturer deleted'
})
return response({
'errors': [
'Lecturer could not found'
]
}, 401)
class LecturerUpdateAdmin(Resource):
def post(self, lecturer_id):
args = parser.parse_args()
# Check if the email is already taken or not
email = args['email']
lecturer = Lecturer().where('email', email).first()
if lecturer.exists() and lecturer.ATTRIBUTES['id'] != lecturer_id:
return response({
'errors': 'This email is already taken'
}, 400)
lecturer = Lecturer().where('id', '=', lecturer_id).first()
if lecturer.exists() is True:
lecturer.update({
'name': args['name'],
'email': args['email'],
'slug': lecturer.generateSlug(name=args['name'])
})
return response({
'lecturer': lecturer.data()
})
return response({
'errors': [
'Lecturer could not found'
]
}, 404)
class NoteDeleteAdmin(Resource):
@jwt_required
def post(self, note_id):
note = Note().where('id', '=', note_id).first()
if note.exists():
note.delete()
return response({
'message': 'Note deleted successfully'
}, 202)
return response({
'message': 'Note does not exist'
}, 404)
class NoteUpdateAdmin(Resource):
@jwt_required
def post(self, note_id):
args = parser.parse_args()
title = args['title']
content = args['content']
lecturer = args['lecturer']
link = args['link']
course_id = args['course_id']
course_code = args['course_code']
english = args['english']
term_id = args['term_id']
note = Note().where('id', '=', note_id).first()
if note.exists() is False:
return response({
'message': 'That note does not exist'
}, 401)
note.update({
'title': title,
'content': content,
'lecturer': lecturer,
'link': link,
'course_id': course_id,
'course_code': course_code,
'english': english,
'term_id': term_id,
'slug': note.generateSlug(name=title)
})
return response({
'message': 'Note successfully updated!'
}, 200)
class CourseAddAdmin(Resource):
@jwt_required
def post(self):
args = parser.parse_args()
name = args['name']
course = Course()
course.create({
'name': name,
})
if course.validate() is True:
course.save()
return response({
'course': course.data()
}, 200)
return response({
'errors': course.getErrors()
}, 400)
class CourseDeleteAdmin(Resource):
@jwt_required
def post(self, course_id):
course = Course().where('id', '=', course_id).first()
notes = Note().where('course_id', course_id).get()
for note in notes:
note.delete()
grade_dists = GradeDistribution().where('course_id', course_id).get()
for grade_dist in grade_dists:
grade_dist.delete()
if course.exists():
course.delete()
return response({
'message': 'Course deleted successfully'
}, 202)
return response({
'message': 'Course does not exist'
}, 404)
class CourseUpdateAdmin(Resource):
@jwt_required
def post(self, course_id):
args = parser.parse_args()
name = args['name']
course = Course().where('id', '=', course_id).first()
if course.exists() is False:
return response({
'message': 'That course does not exist'
}, 401)
course.update({
'name': name,
})
return response({
'message': 'Course successfully updated!'
}, 200)
class TermAddAdmin(Resource):
@jwt_required
def post(self):
args = parser.parse_args()
season = args['season']
term_year = args['term_year']
term = Term()
term.create({
'season': season,
'term_year': term_year
})
if term.validate() is True:
term.save()
return response({
'course': term.data()
}, 200)
return response({
'errors': term.getErrors()
}, 400)
class TermDeleteAdmin(Resource):
@jwt_required
def post(self, term_id):
term = Term().where('id', '=', term_id).first()
notes = Note().where('term_id', term_id).get()
for note in notes:
note.delete()
grade_dists = GradeDistribution().where('term_id', term_id).get()
for grade_dist in grade_dists:
grade_dist.delete()
if term.exists():
term.delete()
return response({
'message': 'Term deleted successfully'
}, 202)
return response({
'message': 'Term does not exist'
}, 404)
class TermUpdateAdmin(Resource):
@jwt_required
def post(self, term_id):
args = parser.parse_args()
season = args['season']
term_year = args['term_year']
term = Term().where('id', '=', term_id).first()
if term.exists() is False:
return response({
'message': 'That term does not exist'
}, 401)
term.update({
'season': season,
'term_year': term_year,
})
return response({
'message': 'Term successfully updated!'
}, 200)
class CommentDeleteAdmin(Resource):
@jwt_required
def post(self, comment_id):
comment = Comment().where('id', '=', comment_id).first()
if comment.exists():
comment.delete()
return response({
'message': 'Comment deleted successfully'
}, 202)
return response({
'message': 'Comment not found in the database'
}, 404)
class CommentUpdateAdmin(Resource):
@jwt_required
def post(self, comment_id):
args = parser.parse_args()
commentText = args['comment']
comment = Comment().where('id', '=', comment_id).first()
if comment.exists() is False:
return response({
'message': 'That comment does not exist'
}, 401)
comment.update({
'comment': commentText,
})
return response({
'message': 'Comment successfully updated!'
}, 200)
class EventDeleteAdmin(Resource):
@jwt_required
def post(self, event_id):
event = Event().where('id', '=', event_id).first()
if event.exists():
event.delete()
return response({
'message': 'Event deleted successfully'
}, 202)
return response({
'message': 'Event does not exist'
}, 404)
class EventUpdateAdmin(Resource):
@jwt_required
def post(self, event_id):
args = parser.parse_args()
title = args['title']
description = args['description']
max_participant = args['max_participant']
started_at = args['started_at']
event = Event().where('id', '=', event_id).first()
if event.exists() is False:
return response({
'message': 'That event does not exist'
}, 401)
event.update({
'title': title,
'description': description,
'max_participant': max_participant,
'started_at': started_at
})
return response({
'message': 'Event successfully updated!'
}, 200)
| alitolga/ITU-CS-Database-Project | server/resources/admin.py | admin.py | py | 13,050 | python | en | code | 0 | github-code | 13 |
42116011928 | import sys
lines=sys.stdin.readlines()
N=int(lines[0].split()[0])
e=[[] for i in range(N)]
for line in lines[1:]:
[a,b,w]=[int(x) for x in line.split()]
e[a-1].append((b-1,w))
mx=[{} for i in range(N)]
mx[0][0]=0
for k in range(N):
for (t,w) in e[k]:
for (n,s) in mx[k].items():
if (not n+1 in mx[t]) or (s+w>mx[t][n+1]):
mx[t][n+1] = s+w
print("IMPOSSIBLE" if not mx[N-1] else max([float(s)/n for (n,s) in mx[N-1].items()]))
| Kodsport/swedish-olympiad-2018 | final/trevligvag/submissions/partially_accepted/par_slow.py | par_slow.py | py | 474 | python | en | code | 0 | github-code | 13 |
12458347774 | from rest_framework import serializers
from veterinarian_information.models import AcademicInformation
class AcademicInformationSerializer(serializers.ModelSerializer):
title = serializers.CharField(
max_length=256, required=True, error_messages={
'required': 'Por favor, ingrese un título',
'max_length': 'El título no puede tener más de 256 caracteres'
})
university = serializers.CharField(
max_length=100, required=True, error_messages={
'required': 'Por favor, ingrese una universidad',
'max_length': 'La universidad no puede tener más de 100 caracteres'
})
year = serializers.DateField(
required=True, error_messages={
'invalid': 'La fecha de finalización debe tener el formato YYYY-MM-DD',
'required': 'Por favor, ingrese una fecha'
})
country = serializers.CharField(max_length=32, required=True, error_messages={
'required': 'Por favor, ingrese un país',
'max_length': 'El país no puede tener más de 32 caracteres'
})
academic_degree = serializers.CharField(max_length=100, required=True, error_messages={
'required': 'Por favor, ingrese un título académico',
'max_length': 'El título académico no puede tener más de 100 caracteres'
})
currently = serializers.BooleanField(required=False)
class Meta:
model = AcademicInformation
exclude = ['id', 'added_time', 'update_time']
| Eliana-Janneth/vetapp-backend | veterinarian_information/serializers/academic_information.py | academic_information.py | py | 1,517 | python | es | code | 0 | github-code | 13 |
7729389650 | from bokeh.plotting import figure
from bokeh.embed import components
from bokeh.io import output_file, show
from bokeh.layouts import row
import datetime
import sqlite3
plant_data_db = '/var/jail/home/team07/plant_data.db'
def request_handler(request):
if request['method'] == "GET":
id = request["values"]["id"]
one_day_ago = datetime.datetime.now() - datetime.timedelta(seconds = 60*60*24)
if request["values"]["esp"] == "true":
kind = request["values"]["data_type"]
with sqlite3.connect(plant_data_db) as c:
result = []
data = c.execute("""SELECT * FROM plant_data WHERE plant_id = ? AND time_ > ? ORDER BY time_ DESC;""", (id, one_day_ago)).fetchall()
if (kind == 'light'):
index = 2
elif (kind == 'humidity'):
index = 3
elif (kind == 'water'):
index = 4
elif (kind == 'temperature'):
index = 5
else:
return "invalid type of measurement :("
for i in range(48):
try:
result.append(data[i][index])
except:
continue
result.reverse()
return result
with sqlite3.connect(plant_data_db) as c:
data = c.execute("""SELECT * FROM plant_data WHERE plant_id = ? AND time_ > ? ORDER BY time_ ASC;""", (id, one_day_ago)).fetchall()
light = [elem[2] for elem in data]
times = [datetime.datetime.strptime(elem[6],'%Y-%m-%d %H:%M:%S.%f') for elem in data]
light_figure = figure(x_axis_type="datetime", plot_height = 300)
light_figure.line(times, light, legend_label = id + " light", line_width = 4, color = 'yellow')
light_figure.xaxis.axis_label = "Time"
light_figure.yaxis.axis_label = "Light (Lux)"
humidity = [elem[3] for elem in data]
humidity_figure = figure(x_axis_type="datetime", plot_height = 300)
humidity_figure.line(times, humidity, legend_label = id + " humidity", line_width=4)
humidity_figure.xaxis.axis_label = "Time"
humidity_figure.yaxis.axis_label = "Humidity (percent)"
script1, div1 = components(row(light_figure, humidity_figure))
water = [elem[4] for elem in data]
water_figure = figure(x_axis_type="datetime", plot_height = 300)
water_figure.line(times, water, legend_label = id + " water", line_width = 4, color = 'green')
water_figure.xaxis.axis_label = "Time"
water_figure.yaxis.axis_label = "Moisture"
temperature = [elem[5] for elem in data]
temp_fig = figure(x_axis_type="datetime", plot_height = 300)
temp_fig.line(times, temperature, legend_label = id + " temperature", line_width = 4, color='red')
temp_fig.xaxis.axis_label = "Time"
temp_fig.yaxis.axis_label = "Temperature (farenheit)"
script2, div2 = components(row(temp_fig, water_figure))
return f'''<!DOCTYPE html>
<html> <script src="https://cdn.bokeh.org/bokeh/release/bokeh-2.3.0.min.js"></script>
<body>
{div1}
{div2}
</body>
{script1}
{script2}
</html>
'''
else:
return "Invalid request"
| zjohhson/Projects | You Grow Girl!/plant_data_grapher.py | plant_data_grapher.py | py | 3,716 | python | en | code | 0 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.