seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
14806245075 | import timeit
def t1():
li = []
for i in range(10000):
li.append(i)
def t2():
li = []
for i in range(10000):
li = li +[i]
def t3():
li = [i for i in range(10000)]
def t4():
li = list(range(10000))
def t5():
li = []
for i in range(10000):
li.insert(0, i)
time1 = timeit.Timer('t1()', 'from __main__ import t1')
time2 = timeit.Timer('t2()', 'from __main__ import t2')
time3 = timeit.Timer('t3()', 'from __main__ import t3')
time4 = timeit.Timer('t4()', 'from __main__ import t4')
time5 = timeit.Timer('t5()', 'from __main__ import t5')
print('append:%s' % time1.timeit(number=100)) # 0.127835
print('[]+[]:%s' % time2.timeit(number=100)) # 19.7184115
print('列表推导式:%s' % time3.timeit(number=100)) # 0.04835290000000114
print('list():%s' % time4.timeit(number=100)) # 0.026356299999999777
print('insert:%s' % time5.timeit(number=100)) # 2.6034982000000007
| penguinsss/Project | 基础语法/列表类型性能测试.py | 列表类型性能测试.py | py | 978 | python | en | code | 0 | github-code | 36 |
34212063715 | # https://www.acmicpc.net/problem/14500
# solution
# 1) 초기 좌표 (i,j)를 정한다
# 2) 인접한 좌표에 대해 dfs 하며 4개 블럭으로 가능한 합의 최대값을 갱신한다
# 3) dfs 불가능한 'ㅗ' 모양 블럭으로 가능한 값을 계산해 최대값을 갱신한다
# 4) 1)로 돌아가 새로운 초기 좌표(i, j) 정한다. brute-forcely 순회한다
# 5) 순회를 마치고 최대값을 출력한다
# TIL
# O(N)인 초기화 함수(reset_visited)를 brute-force에 이용하는 것은 가볍지 않은 코드이다. 결과적으로 O(N^2)가 된다
# 테트로미노의 길이인 4 level 동안 현재 노드에서 가능한 방향을 모두 탐색하는 dfs 함수
def dfs(i,j,cnt,this_sum):
global nums, visited, max_sum
di = [1,0,-1,0] # row 방향 움직임
dj = [0,1,0,-1] # col 방향 움직임
if cnt == 4: # 4번째 블럭까지 계산했을때
max_sum = max(this_sum, max_sum) # global max_sum보다 크면 갱신
return
for d_idx in range(4):
next_i = i+di[d_idx]
next_j = j+dj[d_idx]
if ( (next_i < N) and (next_i >= 0) and (next_j < M) and (next_j >= 0) and visited[next_i][next_j] == False ):
visited[next_i][next_j] = True
dfs(next_i, next_j, cnt+1, this_sum+nums[next_i][next_j])
visited[next_i][next_j] = False
# 'ㅗ' 모양 블럭의 경우 dfs 탐색이 불가(revisit 필요)하기에 따로 계산 해주자
def sum_directly(locations:list): # 4블럭의 좌표롤 모두 받아 값을 계산하는 함수
global nums
sum_d = 0
for loc in locations:
sum_d += nums[loc[0]][loc[1]]
return sum_d
def fucking_block(i,j): # 'ㅗ' 모양 블럭으로 가능한 결과 계산하는 함수
global nums, max_sum
# 'ㅗ' 블럭으로 가능한 4가지 경우를 brute-forcely 계산하자
four_ways = {
'ㅗ':[(i,j-1),(i,j),(i-1,j),(i,j+1)],
'ㅓ':[(i,j-1),(i,j),(i-1,j),(i+1,j)],
'ㅜ':[(i,j-1),(i,j),(i+1,j),(i,j+1)],
'ㅏ':[(i-1,j),(i,j),(i,j+1),(i+1,j)]
}
for locs in four_ways.values():
calc_valid = True
for loc in locs:
if not ( (loc[0] >= 0) and (loc[0] < N) and (loc[1] >= 0) and (loc[1] < M) ):
calc_valid = False
break # 불가능한 좌표있으면 중단
if calc_valid == True: # 계산 가능할 때만 계산
max_sum = max(max_sum, sum_directly(locs))
# nums의 방문 여부를 표시한 리스트 visited를 reset하는 함수
## -> 사용하지 말자(시간초과)
# def reset_visited():
# visited =[]
# for _ in range(N):
# visited.append([False for _ in range(M)])
# return visited
if __name__ == "__main__":
N,M = tuple(map(int, input().split()))
nums = [] # N by M 인 정수의 리스트
for _ in range(N):
nums.append(list(map(int, input().split())))
visited =[]
for _ in range(N):
visited.append([False for _ in range(M)])
max_sum = 0
for i in range(N):
for j in range(M):
# visited = reset_visited() # -> 사용하지 말자
visited[i][j] = True # (i,j)를 방문한 상태
dfs(i,j,1,nums[i][j]) # (i,j)에서부터 dfs 탐색
fucking_block(i,j) # 'ㅗ' 모양 블럭으로 가능한 결과 계산
visited[i][j] = False
print(max_sum)
| chankoo/problem-solving | graph/14500-테트로미노.py | 14500-테트로미노.py | py | 3,502 | python | ko | code | 1 | github-code | 36 |
20591380597 | from django.contrib.auth.models import AbstractUser
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
from api_yamdb.settings import ADMIN, MODERATOR, ROLE_CHOICES, USER
from .validators import validate_year
class User(AbstractUser):
"""Модель пользователя, добавлено поле bio и role,
так же поле email теперь должно быть уникальным и не может быть пустым """
bio = models.TextField(max_length=500, blank=True)
role = models.CharField(
choices=ROLE_CHOICES,
blank=True, max_length=50,
default=USER)
email = models.EmailField(
unique=True, blank=False, max_length=254, verbose_name='email address')
confirmation_code = models.CharField(max_length=50, blank=True)
data_confirmation_code = models.DateTimeField(
auto_now_add=True,)
class Meta:
ordering = ['role']
verbose_name = 'Пользователь'
verbose_name_plural = 'Пользователи'
@property
def is_admin(self):
return self.role == ADMIN
@property
def is_user(self):
return self.role == USER
@property
def is_moderator(self):
return self.role == MODERATOR
# Categories, genres, titles
class Category(models.Model):
"""Category model"""
name = models.CharField(
max_length=256,
verbose_name="Category name",
)
slug = models.SlugField(
max_length=50,
unique=True,
verbose_name="Category slug",
)
class Meta:
verbose_name = 'Категория'
verbose_name_plural = 'Категории'
ordering = ['slug']
def __str__(self):
return self.slug
class Genre(models.Model):
"""Genre model"""
name = models.CharField(
max_length=256,
verbose_name="Genre name",
)
slug = models.SlugField(
max_length=50,
unique=True,
verbose_name="Genre slug",
)
class Meta:
verbose_name = 'Жанр'
verbose_name_plural = 'Жанры'
ordering = ['slug']
def __str__(self):
return self.slug
class Title(models.Model):
"""Title model"""
name = models.CharField(
max_length=100,
verbose_name="Product name",
)
year = models.PositiveSmallIntegerField(
verbose_name="The year of publishing",
validators=[validate_year],
)
category = models.ForeignKey(
Category,
blank=True,
null=True,
on_delete=models.SET_NULL,
related_name="titles",
verbose_name="Product category",
)
genre = models.ManyToManyField(
Genre,
blank=True,
related_name="titles",
verbose_name="Product genre",
)
description = models.CharField(
max_length=100,
blank=True,
null=True,
verbose_name="Product Description",
)
class Meta:
verbose_name = 'Произведение'
verbose_name_plural = 'Произведения'
ordering = ['name']
def __str__(self):
return self.name
class Review(models.Model):
title = models.ForeignKey(
Title,
verbose_name='Произведение',
on_delete=models.CASCADE,
related_name='reviews'
)
text = models.TextField(
verbose_name='Текст',
)
author = models.ForeignKey(
User,
verbose_name='Автор',
on_delete=models.CASCADE,
related_name='reviews',
)
score = models.PositiveSmallIntegerField(
verbose_name='Рейтинг',
validators=[MinValueValidator(1), MaxValueValidator(10)]
)
pub_date = models.DateTimeField(
verbose_name='Дата публикации',
auto_now_add=True,
db_index=True
)
class Meta:
verbose_name = 'Отзыв'
verbose_name_plural = 'Отзывы'
ordering = ['pub_date']
constraints = [
models.UniqueConstraint(
fields=['title', 'author'],
name='unique_review'
),
]
class Comment(models.Model):
review = models.ForeignKey(
Review,
verbose_name='Отзыв',
on_delete=models.CASCADE,
related_name='comments'
)
text = models.TextField(
verbose_name='Текст',
)
author = models.ForeignKey(
User,
verbose_name='Пользователь',
on_delete=models.CASCADE,
related_name='comments'
)
pub_date = models.DateTimeField(
verbose_name='Дата публикации',
auto_now_add=True,
db_index=True
)
class Meta:
verbose_name = 'Комментарий'
verbose_name_plural = 'Комментарии'
ordering = ['pub_date']
| QBC1/api_yamdb | api_yamdb/reviews/models.py | models.py | py | 4,957 | python | en | code | 2 | github-code | 36 |
13081546133 | from graphics import *;
from random import *
window = GraphWin("Window", 500,500);
window.setBackground("white")
square = []
for x in range(0,588):
rx = randint(0,500)
ry = randint(0,500)
orx = rx+20
ory = ry+20
y = Rectangle(Point(rx, ry), Point(orx, ory))
y.draw(window)
rgb= randint(0,255)
y.setFill(color_rgb(rgb,rgb,rgb))
square.append(y)
brx = randint(150,300)
bry = randint(150,300)
obrx = brx+20
obry = bry+20
blue = Rectangle(Point(brx, bry), Point(obrx, obry))
blue.draw(window)
blue.setOutline("cyan")
blue.setFill("cyan")
while True:
window.getMouse()
lmx = blue.getP1()
lmy = blue.getP2()
olmx = lmx.getX()
olmy = lmy.getY()
m = window.getMouse().getX()
om = window.getMouse().getY()
print(m)
print(olmx)
print(om)
print(olmy)
if(m >= olmx and m <= olmx+20 and om <= olmy and om >= olmy-20):
break;
else:
rx = randint(-5,5)
ry = randint(-5,5)
blue.move(rx,ry)
#ra = randint(0,589)
while True:
for ra in square:
orx = randint(-2,2)
ra.move(orx,orx)
ra.undraw()
ra.draw(window)
#print(ra)
window.getMouse();
window.close();
| Kevinloritsch/Buffet-Dr.-Neato | Python Warmup/Warmup #7/run7.py | run7.py | py | 1,194 | python | en | code | 1 | github-code | 36 |
12484573322 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from pathlib import Path
from urllib.parse import urlparse
from urllib.request import urlretrieve
from sklearn.metrics import roc_auc_score
def download(url):
"""Downloads a file if it doesn't already exist.
Args:
url: string or Path
Returns: string filename
"""
pr = urlparse(url)
path = Path(pr.path)
filename = path.name
if not Path(filename).exists():
local_filename, headers = urlretrieve(url, filename)
assert local_filename == filename
print(f"Downloaded {filename}")
return filename
def download_data_files():
path = "https://raw.githubusercontent.com/drivendataorg/tutorial-flu-shot-learning/main/data/"
filenames = [
"training_set_features.csv",
"training_set_labels.csv",
"test_set_features.csv",
"submission_format.csv",
]
for filename in filenames:
url = f"{path}/{filename}"
download(url)
def decorate(**options):
"""Decorate the current axes.
Call decorate with keyword arguments like
decorate(title='Title',
xlabel='x',
ylabel='y')
The keyword arguments can be any of the axis properties
https://matplotlib.org/api/axes_api.html
"""
ax = plt.gca()
ax.set(**options)
handles, labels = ax.get_legend_handles_labels()
if handles:
ax.legend(handles, labels)
plt.tight_layout()
def crosstab(x, y):
"""Make a cross tabulation and normalize the columns as percentages.
Args:
x: sequence of values that go in the index
y: sequence of values that go in the columns
returns: DataFrame
"""
return pd.crosstab(x, y, normalize="columns") * 100
def value_counts(seq, **options):
"""Version of value_counts that works with any sequence type.
Args:
seq: sequence
options: passed to pd.Series.value_counts
Returns: pd.Series
"""
return pd.Series(seq).value_counts(**options)
def score_model(model, features_df, labels_df):
"""Compute the average AUC score for the two labels.
Args:
model: fitted Scikit-learn model
features_df: DataFrame of features
labels_df: DataFrame of labels
Returns: float AUC score
"""
pred1, pred2 = model.predict_proba(features_df)
y_pred1 = pred1.T[1]
score1 = roc_auc_score(labels_df["h1n1_vaccine"], y_pred1)
y_pred2 = pred2.T[1]
score2 = roc_auc_score(labels_df["seasonal_vaccine"], y_pred2)
return (score1 + score2) / 2
def make_submission(model, test_features_df):
"""Make a DataFrame ready for submission to the competition.
Args:
model: fitted Scikit-learn model
test_features_df: DataFrame of features
Returns: DataFrame of predicted probabilities
"""
pred1, pred2 = model.predict_proba(test_features_df)
d = dict(h1n1_vaccine=pred1.T[1], seasonal_vaccine=pred2.T[1])
return pd.DataFrame(d, index=test_features_df.index)
| drivendataorg/tutorial-flu-shot-learning | utils.py | utils.py | py | 3,053 | python | en | code | 2 | github-code | 36 |
1729374906 | #
# @lc app=leetcode.cn id=26 lang=python3
#
# [26] 删除有序数组中的重复项
#
# @lc code=start
class Solution:
def removeDuplicates(self, nums: List[int]) -> int:
length = len(nums)
i, j = 0, 1
if length == 0:
return 0
# for j in range(1,length):
# if nums[i] != nums[j]:
# nums[i + 1] = nums[j]
# i += 1
# return i + 1
while j < length:
if nums[i] != nums[j]:
if j-i>1:
nums[i + 1] = nums[j]
i += 1
j += 1
return i + 1
# @lc code=end
| mckaymckay/shuati | 26.删除有序数组中的重复项.py | 26.删除有序数组中的重复项.py | py | 641 | python | en | code | 0 | github-code | 36 |
36953625709 | __all__ = [
'MatchesException',
'Raises',
'raises',
]
import sys
from testtools.compat import (
classtypes,
_error_repr,
isbaseexception,
istext,
)
from ._basic import MatchesRegex
from ._higherorder import AfterPreproccessing
from ._impl import (
Matcher,
Mismatch,
)
class MatchesException(Matcher):
"""Match an exc_info tuple against an exception instance or type."""
def __init__(self, exception, value_re=None):
"""Create a MatchesException that will match exc_info's for exception.
:param exception: Either an exception instance or type.
If an instance is given, the type and arguments of the exception
are checked. If a type is given only the type of the exception is
checked. If a tuple is given, then as with isinstance, any of the
types in the tuple matching is sufficient to match.
:param value_re: If 'exception' is a type, and the matchee exception
is of the right type, then match against this. If value_re is a
string, then assume value_re is a regular expression and match
the str() of the exception against it. Otherwise, assume value_re
is a matcher, and match the exception against it.
"""
Matcher.__init__(self)
self.expected = exception
if istext(value_re):
value_re = AfterPreproccessing(str, MatchesRegex(value_re), False)
self.value_re = value_re
expected_type = type(self.expected)
self._is_instance = not any(issubclass(expected_type, class_type)
for class_type in classtypes() + (tuple,))
def match(self, other):
if type(other) != tuple:
return Mismatch('%r is not an exc_info tuple' % other)
expected_class = self.expected
if self._is_instance:
expected_class = expected_class.__class__
if not issubclass(other[0], expected_class):
return Mismatch('%r is not a %r' % (other[0], expected_class))
if self._is_instance:
if other[1].args != self.expected.args:
return Mismatch('%s has different arguments to %s.' % (
_error_repr(other[1]), _error_repr(self.expected)))
elif self.value_re is not None:
return self.value_re.match(other[1])
def __str__(self):
if self._is_instance:
return "MatchesException(%s)" % _error_repr(self.expected)
return "MatchesException(%s)" % repr(self.expected)
class Raises(Matcher):
"""Match if the matchee raises an exception when called.
Exceptions which are not subclasses of Exception propogate out of the
Raises.match call unless they are explicitly matched.
"""
def __init__(self, exception_matcher=None):
"""Create a Raises matcher.
:param exception_matcher: Optional validator for the exception raised
by matchee. If supplied the exc_info tuple for the exception raised
is passed into that matcher. If no exception_matcher is supplied
then the simple fact of raising an exception is considered enough
to match on.
"""
self.exception_matcher = exception_matcher
def match(self, matchee):
try:
result = matchee()
return Mismatch('%r returned %r' % (matchee, result))
# Catch all exceptions: Raises() should be able to match a
# KeyboardInterrupt or SystemExit.
except:
exc_info = sys.exc_info()
if self.exception_matcher:
mismatch = self.exception_matcher.match(exc_info)
if not mismatch:
del exc_info
return
else:
mismatch = None
# The exception did not match, or no explicit matching logic was
# performed. If the exception is a non-user exception (that is, not
# a subclass of Exception on Python 2.5+) then propogate it.
if isbaseexception(exc_info[1]):
del exc_info
raise
return mismatch
def __str__(self):
return 'Raises()'
def raises(exception):
"""Make a matcher that checks that a callable raises an exception.
This is a convenience function, exactly equivalent to::
return Raises(MatchesException(exception))
See `Raises` and `MatchesException` for more information.
"""
return Raises(MatchesException(exception))
| mongodb/mongo | src/third_party/wiredtiger/test/3rdparty/testtools-0.9.34/testtools/matchers/_exception.py | _exception.py | py | 4,567 | python | en | code | 24,670 | github-code | 36 |
16253007713 | #!/usr/bin/env python3
"""
Database Aggregator from a Kafka Consumer.
Author: Santhosh Balasa
Email: santhosh.kbr@gmail.com
Date: 18/May/2021
"""
import sys
import logging
import psycopg2
from kafka import KafkaConsumer
logging.basicConfig(
format=f"%(asctime)s %(name)s %(levelname)-8s %(message)s",
level=logging.INFO,
datefmt="%Y-%m-%d %H:%M:%S",
)
logger = logging.getLogger(__name__)
# Global
BOOTSRAP_SERVER = "kafka-48ac8c2-santee-fabb.aivencloud.com:12059"
KAFKA_TOPIC = "website_checker"
DATABASE_NAME = "metrics_aggregator"
SERVICE_URI = f"postgres://avnadmin:caerdfvhm59zfn7b@pg-1f19cc97-santee-fabb.aivencloud.com:12057/{DATABASE_NAME}?sslmode=require"
# Kafka Consumer
consumer = KafkaConsumer(
KAFKA_TOPIC,
bootstrap_servers=BOOTSRAP_SERVER,
security_protocol="SSL",
ssl_cafile="kafkaCerts/ca.pem",
ssl_certfile="kafkaCerts/service.cert",
ssl_keyfile="kafkaCerts/service.key",
)
# PostgreSQL
try:
db_conn = psycopg2.connect(SERVICE_URI)
cursor = db_conn.cursor()
cursor.execute("SELECT current_database()")
result = cursor.fetchone()
logger.info(f"Successfully connected to Database: {result[0]}")
except:
logger.error(f"Failed to connect Database: {DATABASE_NAME}")
sys.exit(-1)
# SQL Tables
cursor.execute(
"""CREATE TABLE KEYS(
ID INT PRIMARY KEY NOT NULL,
DATETIME TEXT NOT NULL
);"""
)
cursor.execute(
"""CREATE TABLE VALUES(
ID INT PRIMARY KEY NOT NULL,
URL TEXT NOT NULL,
STATUS TEXT NOT NULL,
ELAPSED_TIME DOUBLE PRECISION NOT NULL
);"""
)
def main():
"""
Main function to consume from Kafka topic and aggregate it to Postgres SQL.
"""
logger.info("Connecting to Aiven PostgreSQL...")
logger.info("Kafka Consumption Begins...")
key_id = 1
for c in consumer:
print(
c.key.decode("utf-8"),
"->",
c.value.decode("utf-8"),
)
key = eval(c.key.decode("utf-8"))["time"] # Evaluate str to a dict
values = eval(c.value.decode("utf-8"))
url = values.get("url", "")
status = values.get("status", "")
elapsed_time = values.get("elapsed_time", 0)
cursor.execute(
f"""INSERT INTO KEYS (ID, DATETIME) \
VALUES ({key_id}, '{key}');"""
)
cursor.execute(
f"""INSERT INTO VALUES (ID, URL, STATUS, ELAPSED_TIME) \
VALUES ({key_id}, '{url}', '{status}', {elapsed_time});"""
)
cursor.execute("""SELECT * FROM VALUES""")
logger.info(cursor.fetchall())
key_id += 1
consumer.close()
if __name__ == "__main__":
main()
| sbalasa/WebMonitor | db_aggregator.py | db_aggregator.py | py | 2,697 | python | en | code | 1 | github-code | 36 |
37597891395 | # -*- coding: utf-8 -*-
"""
Created on Thu May 23 20:49:32 2019
@author: 18443
"""
import os
import time
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data as Data
from torch import optim
from torch.utils.data import DataLoader
import numpy as np
import argparse
#from convattcomb_dataset import MyDataset,PadCollate
from convattcomb_dataset import MyDataset,PadCollate
from dictionary import char_index_dictionary,index_char_dictionary
from Models.model_3single_1combineselfatt import FConvEncoder,CNN_ATT_decoder
use_cuda = torch.cuda.is_available() # pylint: disable=no-member
device = torch.device("cuda" if use_cuda else "cpu") # pylint: disable=no-member
parser = argparse.ArgumentParser()
parser.add_argument('--layer',
type=int, default=5,
help='layer of attention')
parser.add_argument('--PATH1',
default="/lustre/home/zyzhu/experiment2/traindata/CRNN/train108wtestin108w_88accinICDAR13.txt",
help='CRNN output txt')
parser.add_argument('--PATH2',
default="/lustre/home/zyzhu/experiment/traindata/overseg/all_result_100W_no_lm.txt",
help='overseg output txt')
parser.add_argument('--PATH3',
default="/lustre/home/zyzhu/experiment2/traindata/att/seed1006/train108wtestin108w_84accinICDAR13_seed1006.txt",
help='overseg output txt')
parser.add_argument('--testpath1',
default="/lustre/home/zyzhu/experiment2/traindata/CRNN/train108wtestincompetition_88accinICDAR13.txt",
help='CRNN testdataset output txt')
parser.add_argument('--testpath2',
default="/lustre/home/zyzhu/experiment/traindata/overseg/oversegment_testoutput_no_lm.txt",
help='overseg testdataset output txt')
parser.add_argument('--testpath3',
default="/lustre/home/zyzhu/experiment2/traindata/att/seed1006/train108wtestincompetition_84accinICDAR13_seed1006.txt",
help='overseg testdataset output txt')
parser.add_argument('--adam_lr', type=np.float32, default=0.0002,
help='learning rate')
parser.add_argument('--output_dir', default='./model_5layer_CNN64',
help='path to save model')
parser.add_argument('--batch_size', type=int, default=256,
help='size of one training batch')
parser.add_argument('--deviceID', type=list, default=[0,1],
help='deviceID')
parser.add_argument('--weight_decay', type=np.float32, default=0,
help='weight_decay')
parser.add_argument('--weight_clip', type=np.float32, default=0.1,
help='weight_decay')
opt = parser.parse_args()
encoder_a_path=""
encoder_b_path=""
encoder_c_path=""
decoder_path=""
def tensor2list(tensor):
l=[]
for i in tensor.squeeze():
index=int(i)
if (index!=0)and(index!=1)and(index!=2)and(index!=3):
l.append(index)
return l
def tensor2string(tensor,index2word):
string=[]
for i in tensor.squeeze():
index=int(i)
if (index!=0)and(index!=1)and(index!=2)and(index!=3):
string.append(index2word[index])
return ''.join(string)
def editDistance(r, h):
d = np.zeros((len(r)+1)*(len(h)+1), dtype=np.uint8).reshape((len(r)+1, len(h)+1))
for i in range(len(r)+1):
for j in range(len(h)+1):
if i == 0:
d[0][j] = j
elif j == 0:
d[i][0] = i
for i in range(1, len(r)+1):
for j in range(1, len(h)+1):
if r[i-1] == h[j-1]:
d[i][j] = d[i-1][j-1]
else:
substitute = d[i-1][j-1] + 1
insert = d[i][j-1] + 1
delete = d[i-1][j] + 1
d[i][j] = min(substitute, insert, delete)
return d
def evaluate(encoder_a,encoder_b,encoder_c, decoder, eval_data, index2word,savepath,batch_size,epoch,printiter):
data = DataLoader(dataset=eval_data, batch_size=batch_size, collate_fn=PadCollate(dim=0))
counter_correct=0
counter_number=0
for j, (batch_x, batch_y,batch_z, label) in enumerate(data):
batch_x=batch_x.to(device).long()
batch_y=batch_y.to(device).long()
batch_z=batch_z.to(device).long()
label=label.to(device).long()
current_time=time.time()
batch_size=batch_x.size()[0]
pre_buffer=torch.zeros(batch_size,50).fill_(char_index_dictionary['<pad>'])
pre_buffer[:,0]=char_index_dictionary['<s>']
# preoutput_list=[char_index_dictionary['<s>']]
encoder_a_output=encoder_a(batch_x)
encoder_b_output=encoder_b(batch_y)
encoder_c_output=encoder_c(batch_z)
for i in range(1,50):
# preoutput=torch.LongTensor(preoutput_list).unsqueeze(0).to(device)#list to tensor 1*length
preoutput=pre_buffer[:,:i].long()
output,_ =decoder(preoutput,encoder_out1=encoder_a_output,encoder_out2=encoder_b_output,encoder_out3=encoder_c_output)#B*T*7356
# output,_ =decoder(preoutput,combined_output)
_,prediction=torch.topk(output, 1)#B*T*1
# print(prediction.size())
prediction=prediction.squeeze(2)#B*T
# preoutput_list.append(int(prediction.squeeze(0)[-1]))
if all(prediction[:,-1]==char_index_dictionary['</s>']):
break
pre_buffer[:,i]=prediction[:,-1]
for one_predict_index in range(batch_size):
l_target=tensor2list(label[one_predict_index])
l_predict=tensor2list(pre_buffer[one_predict_index])
d=editDistance(l_target, l_predict)
counter_correct=counter_correct+d[len(l_target)][len(l_predict)]
counter_number=counter_number+len(l_target)
if j %printiter==0:
print(i)
print(j)
print('time used:%s'%(time.time()- current_time))
print(tensor2string(batch_x[one_predict_index],index_char_dictionary))
print(tensor2string(batch_y[one_predict_index],index_char_dictionary))
print(tensor2string(batch_z[one_predict_index],index_char_dictionary))
print(tensor2string(label[one_predict_index],index_char_dictionary))
print(tensor2string(prediction[one_predict_index],index_char_dictionary))
# print(l_target)
# print(l_predict)
result = float(d[len(l_target)][len(l_predict)]) / len(l_target) * 100
result = str("%.2f" % result) + "%"
print('WER:%s'%(result))
total_result=float(counter_correct) / counter_number * 100
total_result=str("%.2f" % total_result) + "%"
print(counter_correct)
print(counter_number)
print(' test WER of current time:%s'%(total_result))
print(counter_correct)
print(counter_number)
total_result=float(counter_correct) / counter_number * 100
total_result=str("%.2f" % total_result) + "%"
print('test WER:%s'%(total_result))
torch.save(encoder_a.state_dict(), savepath+'/encoder_a'+str(epoch)+'_acc'+str(total_result)+'.pth')
torch.save(encoder_b.state_dict(), savepath+'/encoder_b'+str(epoch)+'_acc'+str(total_result)+'.pth')
torch.save(encoder_c.state_dict(), savepath+'/encoder_c'+str(epoch)+'_acc'+str(total_result)+'.pth')
torch.save(decoder.state_dict(), savepath+'/decoder'+str(epoch)+'_acc'+str(total_result)+'.pth')
# return eval_loss.item()
def train(encoder_a,
encoder_b,
encoder_c,
decoder,
input_a,
input_b,
input_c,
preout_tensor,
target_tensor,
encoder_a_optimizer,
encoder_b_optimizer,
encoder_c_optimizer,
decoder_optimizer,
criterion,
weightclip
):
encoder_a_optimizer.zero_grad()
encoder_b_optimizer.zero_grad()
encoder_c_optimizer.zero_grad()
decoder_optimizer.zero_grad()
encoder_a_output=encoder_a(input_a)
encoder_b_output=encoder_b(input_b)
encoder_c_output=encoder_c(input_c)
output,_ =decoder(preout_tensor,encoder_out1=encoder_a_output,encoder_out2=encoder_b_output,encoder_out3=encoder_c_output)
output=output.transpose(1, 2).contiguous()
# print(output.size())
# print(target_tensor.size())
loss = criterion(output, target_tensor)
loss.backward()
torch.nn.utils.clip_grad_norm_(encoder_a.parameters(), weightclip)
torch.nn.utils.clip_grad_norm_(encoder_b.parameters(), weightclip)
torch.nn.utils.clip_grad_norm_(encoder_c.parameters(), weightclip)
torch.nn.utils.clip_grad_norm_(decoder.parameters(), weightclip)
encoder_a_optimizer.step()
encoder_b_optimizer.step()
encoder_c_optimizer.step()
decoder_optimizer.step()
return loss.item()
#PATH1="/lustre/home/zyzhu/CRNN64/sementic_85acc.txt"
#PATH2="/lustre/home/zyzhu/conv_att_combine/train_data/all_result_100W_no_lm.txt"
#
#testpath1="/lustre/home/zyzhu/CRNN64/competition_testoutput_85acc.txt"
#testpath2="/lustre/home/zyzhu/conv_att_combine/train_data/text_index_result_no_lm.txt"
##
def trainIters(encoder_a,encoder_b,encoder_c, decoder, n_iters, opt):
if not os.path.exists(opt.output_dir):
os.mkdir(opt.output_dir)
print('making folder')
encoder_a.num_attention_layers = sum(layer is not None for layer in decoder.attention1)+sum(layer is not None for layer in decoder.combine_attention)
encoder_b.num_attention_layers = sum(layer is not None for layer in decoder.attention2)+sum(layer is not None for layer in decoder.combine_attention)
encoder_c.num_attention_layers = sum(layer is not None for layer in decoder.attention3)+sum(layer is not None for layer in decoder.combine_attention)
encoder_a=torch.nn.DataParallel(encoder_a, device_ids=opt.deviceID).cuda()
encoder_b=torch.nn.DataParallel(encoder_b, device_ids=opt.deviceID).cuda()
encoder_c=torch.nn.DataParallel(encoder_c, device_ids=opt.deviceID).cuda()
decoder=torch.nn.DataParallel(decoder, device_ids=opt.deviceID).cuda()
#
encoder_a.load_state_dict(torch.load(encoder_a_path))
encoder_b.load_state_dict(torch.load(encoder_b_path))
encoder_c.load_state_dict(torch.load(encoder_c_path))
decoder.load_state_dict(torch.load(decoder_path))
encoder1_optimizer = optim.Adam(encoder_a.parameters(), lr=opt.adam_lr,betas=(0.5, 0.99),weight_decay=opt.weight_decay)
encoder2_optimizer = optim.Adam(encoder_b.parameters(), lr=opt.adam_lr,betas=(0.5, 0.99),weight_decay=opt.weight_decay)
encoder3_optimizer = optim.Adam(encoder_c.parameters(), lr=opt.adam_lr,betas=(0.5, 0.99),weight_decay=opt.weight_decay)
decoder_optimizer = optim.Adam(decoder.parameters(), lr=opt.adam_lr,betas=(0.5, 0.99),weight_decay=opt.weight_decay)
criterion = nn.CrossEntropyLoss().to(device)
dataset=MyDataset(opt.PATH1,opt.PATH3,opt.PATH2)
test_dataset=MyDataset(opt.testpath1,opt.testpath3,opt.testpath2)
print(len(test_dataset))
train_loader = DataLoader(dataset,shuffle=True,batch_size =opt.batch_size, collate_fn=PadCollate(dim=0))
# encoder_a.eval()
# encoder_b.eval()
# encoder_c.eval()
# decoder.eval()
# with torch.no_grad():
# evaluate(encoder_a,encoder_b,encoder_c, decoder, test_dataset, index_char_dictionary,savepath=opt.output_dir,batch_size=16,epoch=0,printiter=5)
#
# encoder_a.train()
# encoder_b.train()
# encoder_c.train()
# decoder.train()
#
print("start!")
for epoch in range( n_iters ):
#evaluate(encoder=encoder, decoder=decoder, train_data=train_data, max_length=50,index2word=index2word)
for i, (batch_x, batch_y, batch_z, label) in enumerate(train_loader):
batch_x=batch_x.cuda().long()
batch_y=batch_y.cuda().long()
batch_z=batch_z.cuda().long()
label=label.cuda().long()
# print(batch_x)
# print(batch_y.size())
target=label[:,1:]
preoutput=label[:,:-1]
# print(target)
# print(preoutput)
loss = train(encoder_a=encoder_a,encoder_b=encoder_b,encoder_c=encoder_c,
decoder=decoder,
input_a=batch_x,input_b=batch_y, input_c=batch_z,
preout_tensor=preoutput,target_tensor=target,
encoder_a_optimizer=encoder1_optimizer,encoder_b_optimizer=encoder2_optimizer,encoder_c_optimizer=encoder3_optimizer,
decoder_optimizer=decoder_optimizer,
criterion=criterion,weightclip=opt.weight_clip)
if i%20==0:
print('epoch:%d,iter:%d,train_loss:%f'% (epoch,i,loss))
# if (i%2000==0)and(i!=0):
encoder_a.eval()
encoder_b.eval()
encoder_c.eval()
decoder.eval()
with torch.no_grad():
evaluate(encoder_a,encoder_b,encoder_c, decoder, test_dataset, index_char_dictionary,savepath=opt.output_dir,batch_size=64,epoch=epoch,printiter=10)
encoder_a.train()
encoder_b.train()
encoder_c.train()
decoder.train()
encoder_a = FConvEncoder(dictionary=char_index_dictionary,attention_layer=opt.layer)
encoder_b = FConvEncoder(dictionary=char_index_dictionary,attention_layer=opt.layer)
encoder_c = FConvEncoder(dictionary=char_index_dictionary,attention_layer=opt.layer)
decoder = CNN_ATT_decoder(dictionary=char_index_dictionary,attention_layer=opt.layer)
trainIters(encoder_a,encoder_b,encoder_c, decoder, 100,opt)
| yudmoe/neural-combination-of-HCTR | threeinput_training.py | threeinput_training.py | py | 14,833 | python | en | code | 4 | github-code | 36 |
3715520265 | import cv2
import numpy as np
def get_crops(img, annotations, padding=0):
crops = []
new_img = img.copy() # Prevent drawing on original image
for a in annotations:
c = a['coordinates']
y1, y2 = int(c['y'] - c['height'] / 2 - padding), int(c['y'] + c['height'] / 2 + padding)
x1, x2 = int(c['x'] - c['width'] / 2 - padding), int(c['x'] + c['width'] / 2 + padding)
crop = new_img[y1: y2, x1:x2]
crops.append(crop)
return crops
def segment(crops):
segs = []
for c in crops:
gray = cv2.cvtColor(c, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
# noise removal
kernel = np.ones((3,3),np.uint8)
opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN,kernel, iterations = 4)
# sure background area
sure_bg = cv2.dilate(opening,kernel, iterations=3)
# Finding sure foreground area
dist_transform = cv2.distanceTransform(opening,cv2.DIST_L2,5)
ret, sure_fg = cv2.threshold(dist_transform,0.7*dist_transform.max(),255,0)
# Finding unknown region
sure_fg = np.uint8(sure_fg)
unknown = cv2.subtract(sure_bg,sure_fg)
# Marker labelling
ret, markers = cv2.connectedComponents(sure_fg)
# Add one to all labels so that sure background is not 0, but 1
markers = markers+1
# Now, mark the region of unknown with zero
markers[unknown==255] = 0
markers = cv2.watershed(c, markers)
markers[:,[0,-1]] = markers[[0,-1]] = 1
c[markers != 1] = [255,191,0]
segs.append(c)
return segs
def draw(img, annotations, segs, padding=0):
overlay = img.copy()
for i in range(len(annotations)):
a = annotations[i]
c = a['coordinates']
y1, y2 = int(c['y'] - c['height'] / 2 - padding), int(c['y'] + c['height'] / 2 + padding)
x1, x2 = int(c['x'] - c['width'] / 2 - padding), int(c['x'] + c['width'] / 2 + padding)
overlay[y1: y2, x1:x2] = segs[i]
alpha = 0.5
cv2.addWeighted(overlay, alpha, img, 1 - alpha,0, img)
return img
| mattzh72/sframe-visualizer | tools/utils/segment.py | segment.py | py | 1,936 | python | en | code | 0 | github-code | 36 |
38346693249 | def solution(n):
number3 = ""
while n >= 3:
number3 = str(n % 3) + number3
n //= 3
number3 = str(n) + number3
answer = 0
for i in range(0, len(number3)):
answer += int(number3[i]) * 3**(i)
return answer
print(solution(3))
# int(x, radix) : radix 진수로 표현된 문자열 x를 10진수로 변환 후 반환
# int('1332', 4) : 126 (4진수인 1332를 10진수로 변환)
# pythonic한 코드 답안
# def solution(n):
# tmp = ''
# while n:
# tmp += str(n % 3)
# n = n // 3
# answer = int(tmp, 3) ################### int 내장 함수 사용
# return answer | Huey-J/Algorithm_Practice | 파이썬/프로그래머스 Lv1/3진법 뒤집기 (int 진법 변환).py | 3진법 뒤집기 (int 진법 변환).py | py | 652 | python | ko | code | 0 | github-code | 36 |
13145223871 | #!/usr/bin/env python3
import argparse
import configparser
import json
import os
import tempfile
import shutil
import subprocess
import stat
import time
import dateutil
import dateutil.parser
import urllib.parse
from submitty_utils import dateutils, glob
import grade_items_logging
import write_grade_history
import insert_database_version_data
# these variables will be replaced by INSTALL_SUBMITTY.sh
SUBMITTY_INSTALL_DIR = "__INSTALL__FILLIN__SUBMITTY_INSTALL_DIR__"
SUBMITTY_DATA_DIR = "__INSTALL__FILLIN__SUBMITTY_DATA_DIR__"
HWCRON_UID = "__INSTALL__FILLIN__HWCRON_UID__"
INTERACTIVE_QUEUE = os.path.join(SUBMITTY_DATA_DIR, "to_be_graded_interactive")
BATCH_QUEUE = os.path.join(SUBMITTY_DATA_DIR, "to_be_graded_batch")
USE_DOCKER = False
WRITE_DATABASE = True
# ==================================================================================
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("next_directory")
parser.add_argument("next_to_grade")
parser.add_argument("which_untrusted")
return parser.parse_args()
def get_queue_time(next_directory,next_to_grade):
t = time.ctime(os.path.getctime(os.path.join(next_directory,next_to_grade)))
t = dateutil.parser.parse(t)
t = dateutils.get_timezone().localize(t)
return t
def get_submission_path(next_directory,next_to_grade):
queue_file = os.path.join(next_directory,next_to_grade)
if not os.path.isfile(queue_file):
grade_items_logging.log_message("ERROR: the file does not exist " + queue_file)
raise SystemExit("ERROR: the file does not exist",queue_file)
with open(queue_file, 'r') as infile:
obj = json.load(infile)
return obj
def add_permissions(item,perms):
if os.getuid() == os.stat(item).st_uid:
os.chmod(item,os.stat(item).st_mode | perms)
# else, can't change permissions on this file/directory!
def touch(my_file):
with open(my_file,'a') as tmp:
os.utime(my_file, None)
def add_permissions_recursive(top_dir,root_perms,dir_perms,file_perms):
for root, dirs, files in os.walk(top_dir):
add_permissions(root,root_perms)
for d in dirs:
add_permissions(os.path.join(root, d),dir_perms)
for f in files:
add_permissions(os.path.join(root, f),file_perms)
def get_vcs_info(top_dir, semester, course, gradeable, userid, teamid):
form_json_file = os.path.join(top_dir, 'courses', semester, course, 'config', 'form', 'form_'+gradeable+'.json')
with open(form_json_file, 'r') as fj:
form_json = json.load(fj)
course_ini_file = os.path.join(top_dir, 'courses', semester, course, 'config', 'config.ini')
with open(course_ini_file, 'r') as open_file:
course_ini = configparser.ConfigParser()
course_ini.read_file(open_file)
is_vcs = form_json["upload_type"] == "repository"
# PHP reads " as a character around the string, while Python reads it as part of the string
# so we have to strip out the " in python
vcs_type = course_ini['course_details']['vcs_type'].strip('"')
vcs_base_url = course_ini['course_details']['vcs_base_url'].strip('"')
vcs_subdirectory = form_json["subdirectory"] if is_vcs else ''
vcs_subdirectory = vcs_subdirectory.replace("{$gradeable_id}", gradeable)
vcs_subdirectory = vcs_subdirectory.replace("{$user_id}", userid)
vcs_subdirectory = vcs_subdirectory.replace("{$team_id}", teamid)
return is_vcs, vcs_type, vcs_base_url, vcs_subdirectory
# copy the files & directories from source to target
# it will create directories as needed
# it's ok if the target directory or subdirectories already exist
# it will overwrite files with the same name if they exist
def copy_contents_into(source,target,tmp_logs):
if not os.path.isdir(target):
grade_items_logging.log_message("ERROR: the target directory does not exist " + target)
raise SystemExit("ERROR: the target directory does not exist '", target, "'")
if os.path.isdir(source):
for item in os.listdir(source):
if os.path.isdir(os.path.join(source,item)):
if os.path.isdir(os.path.join(target,item)):
# recurse
copy_contents_into(os.path.join(source,item),os.path.join(target,item),tmp_logs)
elif os.path.isfile(os.path.join(target,item)):
grade_items_logging.log_message("ERROR: the target subpath is a file not a directory '" + os.path.join(target,item) + "'")
raise SystemExit("ERROR: the target subpath is a file not a directory '", os.path.join(target,item), "'")
else:
# copy entire subtree
shutil.copytree(os.path.join(source,item),os.path.join(target,item))
else:
if os.path.exists(os.path.join(target,item)):
with open(os.path.join(tmp_logs,"overall.txt"),'a') as f:
print ("\nWARNING: REMOVING DESTINATION FILE" , os.path.join(target,item),
" THEN OVERWRITING: ", os.path.join(source,item), "\n", file=f)
os.remove(os.path.join(target,item))
try:
shutil.copy(os.path.join(source,item),target)
except:
raise SystemExit("ERROR COPYING FILE: " + os.path.join(source,item) + " -> " + os.path.join(target,item))
# copy files that match one of the patterns from the source directory
# to the target directory.
def pattern_copy(what,patterns,source,target,tmp_logs):
with open(os.path.join(tmp_logs,"overall.txt"),'a') as f:
print (what," pattern copy ", patterns, " from ", source, " -> ", target, file=f)
for pattern in patterns:
for my_file in glob.glob(os.path.join(source,pattern),recursive=True):
# grab the matched name
relpath = os.path.relpath(my_file,source)
# make the necessary directories leading to the file
os.makedirs(os.path.join(target,os.path.dirname(relpath)),exist_ok=True)
# copy the file
shutil.copy(my_file,os.path.join(target,relpath))
print (" COPY ",my_file,
" -> ",os.path.join(target,relpath), file=f)
# give permissions to all created files to the hwcron user
def untrusted_grant_rwx_access(which_untrusted,my_dir):
subprocess.call([os.path.join(SUBMITTY_INSTALL_DIR,"bin","untrusted_execute"),
which_untrusted,
"/usr/bin/find",
my_dir,
"-user",
which_untrusted,
"-exec",
"/bin/chmod",
"o+rwx",
"{}",
";"])
# ==================================================================================
# ==================================================================================
def just_grade_item(next_directory,next_to_grade,which_untrusted):
my_pid = os.getpid()
# verify the hwcron user is running this script
if not int(os.getuid()) == int(HWCRON_UID):
grade_items_logging.log_message("ERROR: must be run by hwcron")
raise SystemExit("ERROR: the grade_item.py script must be run by the hwcron user")
# --------------------------------------------------------
# figure out what we're supposed to grade & error checking
obj = get_submission_path(next_directory,next_to_grade)
submission_path = os.path.join(SUBMITTY_DATA_DIR,"courses",obj["semester"],obj["course"],
"submissions",obj["gradeable"],obj["who"],str(obj["version"]))
if not os.path.isdir(submission_path):
grade_items_logging.log_message("ERROR: the submission directory does not exist" + submission_path)
raise SystemExit("ERROR: the submission directory does not exist",submission_path)
print("pid", my_pid, "GRADE THIS", submission_path)
is_vcs, vcs_type, vcs_base_url, vcs_subdirectory = get_vcs_info(SUBMITTY_DATA_DIR,
obj["semester"],
obj["course"],
obj["gradeable"],
obj["who"],
obj["team"])
is_batch_job = next_directory == BATCH_QUEUE
is_batch_job_string = "BATCH" if is_batch_job else "INTERACTIVE"
queue_time = get_queue_time(next_directory,next_to_grade)
queue_time_longstring = dateutils.write_submitty_date(queue_time)
grading_began = dateutils.get_current_time()
waittime = int((grading_began-queue_time).total_seconds())
grade_items_logging.log_message(is_batch_job,which_untrusted,submission_path,"wait:",waittime,"")
# --------------------------------------------------------
# various paths
provided_code_path = os.path.join(SUBMITTY_DATA_DIR,"courses",obj["semester"],obj["course"],"provided_code",obj["gradeable"])
test_input_path = os.path.join(SUBMITTY_DATA_DIR,"courses",obj["semester"],obj["course"],"test_input",obj["gradeable"])
test_output_path = os.path.join(SUBMITTY_DATA_DIR,"courses",obj["semester"],obj["course"],"test_output",obj["gradeable"])
custom_validation_code_path = os.path.join(SUBMITTY_DATA_DIR,"courses",obj["semester"],obj["course"],"custom_validation_code",obj["gradeable"])
bin_path = os.path.join(SUBMITTY_DATA_DIR,"courses",obj["semester"],obj["course"],"bin")
checkout_path = os.path.join(SUBMITTY_DATA_DIR,"courses",obj["semester"],obj["course"],"checkout",obj["gradeable"],obj["who"],str(obj["version"]))
results_path = os.path.join(SUBMITTY_DATA_DIR,"courses",obj["semester"],obj["course"],"results",obj["gradeable"],obj["who"],str(obj["version"]))
# grab a copy of the current history.json file (if it exists)
history_file = os.path.join(results_path,"history.json")
history_file_tmp = ""
if os.path.isfile(history_file):
filehandle,history_file_tmp = tempfile.mkstemp()
shutil.copy(history_file,history_file_tmp)
# get info from the gradeable config file
json_config = os.path.join(SUBMITTY_DATA_DIR,"courses",obj["semester"],obj["course"],"config","form","form_"+obj["gradeable"]+".json")
with open(json_config, 'r') as infile:
gradeable_config_obj = json.load(infile)
# get info from the gradeable config file
complete_config = os.path.join(SUBMITTY_DATA_DIR,"courses",obj["semester"],obj["course"],"config","complete_config","complete_config_"+obj["gradeable"]+".json")
with open(complete_config, 'r') as infile:
complete_config_obj = json.load(infile)
checkout_subdirectory = complete_config_obj["autograding"].get("use_checkout_subdirectory","")
checkout_subdir_path = os.path.join(checkout_path,checkout_subdirectory)
# --------------------------------------------------------------------
# MAKE TEMPORARY DIRECTORY & COPY THE NECESSARY FILES THERE
tmp = os.path.join("/var/local/submitty/autograding_tmp/",which_untrusted,"tmp")
shutil.rmtree(tmp,ignore_errors=True)
os.makedirs(tmp)
# switch to tmp directory
os.chdir(tmp)
# make the logs directory
tmp_logs = os.path.join(tmp,"tmp_logs")
os.makedirs(tmp_logs)
# grab the submission time
with open (os.path.join(submission_path,".submit.timestamp")) as submission_time_file:
submission_string = submission_time_file.read().rstrip()
submission_datetime = dateutils.read_submitty_date(submission_string)
# --------------------------------------------------------------------
# CHECKOUT THE STUDENT's REPO
if is_vcs:
# is vcs_subdirectory standalone or should it be combined with base_url?
if vcs_subdirectory[0] == '/' or '://' in vcs_subdirectory:
vcs_path = vcs_subdirectory
else:
if '://' in vcs_base_url:
vcs_path = urllib.parse.urljoin(vcs_base_url, vcs_subdirectory)
else:
vcs_path = os.path.join(vcs_base_url, vcs_subdirectory)
with open(os.path.join(tmp_logs, "overall.txt"), 'a') as f:
print("====================================\nVCS CHECKOUT", file=f)
print('vcs_base_url', vcs_base_url, file=f)
print('vcs_subdirectory', vcs_subdirectory, file=f)
print('vcs_path', vcs_path, file=f)
print(['/usr/bin/git', 'clone', vcs_path, checkout_path], file=f)
# cleanup the previous checkout (if it exists)
shutil.rmtree(checkout_path,ignore_errors=True)
os.makedirs(checkout_path, exist_ok=True)
subprocess.call(['/usr/bin/git', 'clone', vcs_path, checkout_path])
os.chdir(checkout_path)
# determine which version we need to checkout
what_version = subprocess.check_output(['git', 'rev-list', '-n', '1', '--before="'+submission_string+'"', 'master'])
what_version = str(what_version.decode('utf-8')).rstrip()
if what_version == "":
# oops, pressed the grade button before a valid commit
shutil.rmtree(checkout_path, ignore_errors=True)
else:
# and check out the right version
subprocess.call(['git', 'checkout', '-b', 'grade', what_version])
os.chdir(tmp)
subprocess.call(['ls', '-lR', checkout_path], stdout=open(tmp_logs + "/overall.txt", 'a'))
# --------------------------------------------------------------------
# START DOCKER
container = None
if USE_DOCKER:
container = subprocess.check_output(['docker', 'run', '-t', '-d',
'-v', tmp + ':' + tmp,
'ubuntu:custom']).decode('utf8').strip()
# --------------------------------------------------------------------
# COMPILE THE SUBMITTED CODE
with open(os.path.join(tmp_logs, "overall.txt"), 'a') as f:
print("====================================\nCOMPILATION STARTS", file=f)
# copy submitted files to the tmp compilation directory
tmp_compilation = os.path.join(tmp,"TMP_COMPILATION")
os.mkdir(tmp_compilation)
os.chdir(tmp_compilation)
gradeable_deadline_string = gradeable_config_obj["date_due"]
patterns_submission_to_compilation = complete_config_obj["autograding"]["submission_to_compilation"]
pattern_copy("submission_to_compilation",patterns_submission_to_compilation,submission_path,tmp_compilation,tmp_logs)
if is_vcs:
pattern_copy("checkout_to_compilation",patterns_submission_to_compilation,checkout_subdir_path,tmp_compilation,tmp_logs)
# copy any instructor provided code files to tmp compilation directory
copy_contents_into(provided_code_path,tmp_compilation,tmp_logs)
subprocess.call(['ls', '-lR', '.'], stdout=open(tmp_logs + "/overall.txt", 'a'))
# copy compile.out to the current directory
shutil.copy (os.path.join(bin_path,obj["gradeable"],"compile.out"),os.path.join(tmp_compilation,"my_compile.out"))
# give the untrusted user read/write/execute permissions on the tmp directory & files
add_permissions_recursive(tmp_compilation,
stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP,
stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP,
stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP)
add_permissions(tmp,stat.S_IROTH | stat.S_IXOTH)
add_permissions(tmp_logs,stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
with open(os.path.join(tmp_logs,"compilation_log.txt"), 'w') as logfile:
if USE_DOCKER:
compile_success = subprocess.call(['docker', 'exec', '-w', tmp_compilation, container,
os.path.join(tmp_compilation, 'my_compile.out'), obj['gradeable'],
obj['who'], str(obj['version']), submission_string], stdout=logfile)
else:
compile_success = subprocess.call([os.path.join(SUBMITTY_INSTALL_DIR,"bin","untrusted_execute"),
which_untrusted,
os.path.join(tmp_compilation,"my_compile.out"),
obj["gradeable"],
obj["who"],
str(obj["version"]),
submission_string],
stdout=logfile)
if compile_success == 0:
print ("pid",my_pid,"COMPILATION OK")
else:
print ("pid",my_pid,"COMPILATION FAILURE")
grade_items_logging.log_message(is_batch_job,which_untrusted,submission_path,"","","COMPILATION FAILURE")
#raise SystemExit()
untrusted_grant_rwx_access(which_untrusted,tmp_compilation)
# remove the compilation program
os.remove(os.path.join(tmp_compilation,"my_compile.out"))
# return to the main tmp directory
os.chdir(tmp)
# --------------------------------------------------------------------
# make the runner directory
with open(os.path.join(tmp_logs,"overall.txt"),'a') as f:
print ("====================================\nRUNNER STARTS", file=f)
tmp_work = os.path.join(tmp,"TMP_WORK")
os.makedirs(tmp_work)
os.chdir(tmp_work)
# move all executable files from the compilation directory to the main tmp directory
# Note: Must preserve the directory structure of compiled files (esp for Java)
patterns_submission_to_runner = complete_config_obj["autograding"]["submission_to_runner"]
pattern_copy("submission_to_runner",patterns_submission_to_runner,submission_path,tmp_work,tmp_logs)
if is_vcs:
pattern_copy("checkout_to_runner",patterns_submission_to_runner,checkout_subdir_path,tmp_work,tmp_logs)
patterns_compilation_to_runner = complete_config_obj["autograding"]["compilation_to_runner"]
pattern_copy("compilation_to_runner",patterns_compilation_to_runner,tmp_compilation,tmp_work,tmp_logs)
# copy input files to tmp_work directory
copy_contents_into(test_input_path,tmp_work,tmp_logs)
subprocess.call(['ls', '-lR', '.'], stdout=open(tmp_logs + "/overall.txt", 'a'))
# copy runner.out to the current directory
shutil.copy (os.path.join(bin_path,obj["gradeable"],"run.out"),os.path.join(tmp_work,"my_runner.out"))
# give the untrusted user read/write/execute permissions on the tmp directory & files
add_permissions_recursive(tmp_work,
stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH,
stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH,
stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH)
# raise SystemExit()
# run the run.out as the untrusted user
with open(os.path.join(tmp_logs,"runner_log.txt"), 'w') as logfile:
print ("LOGGING BEGIN my_runner.out",file=logfile)
logfile.flush()
try:
if USE_DOCKER:
runner_success = subprocess.call(['docker', 'exec', '-w', tmp_work, container,
os.path.join(tmp_work, 'my_runner.out'), obj['gradeable'],
obj['who'], str(obj['version']), submission_string], stdout=logfile)
else:
runner_success = subprocess.call([os.path.join(SUBMITTY_INSTALL_DIR,"bin","untrusted_execute"),
which_untrusted,
os.path.join(tmp_work,"my_runner.out"),
obj["gradeable"],
obj["who"],
str(obj["version"]),
submission_string],
stdout=logfile)
logfile.flush()
except Exception as e:
print ("ERROR caught runner.out exception={0}".format(str(e.args[0])).encode("utf-8"),file=logfile)
logfile.flush()
print ("LOGGING END my_runner.out",file=logfile)
logfile.flush()
killall_success = subprocess.call([os.path.join(SUBMITTY_INSTALL_DIR,"bin","untrusted_execute"),
which_untrusted,
os.path.join(SUBMITTY_INSTALL_DIR,"bin","killall.py")],
stdout=logfile)
print ("KILLALL COMPLETE my_runner.out",file=logfile)
logfile.flush()
if killall_success != 0:
msg='RUNNER ERROR: had to kill {} process(es)'.format(killall_success)
print ("pid",my_pid,msg)
grade_items_logging.log_message(is_batch_job,which_untrusted,submission_path,"","",msg)
if runner_success == 0:
print ("pid",my_pid,"RUNNER OK")
else:
print ("pid",my_pid,"RUNNER FAILURE")
grade_items_logging.log_message(is_batch_job,which_untrusted,submission_path,"","","RUNNER FAILURE")
untrusted_grant_rwx_access(which_untrusted,tmp_work)
untrusted_grant_rwx_access(which_untrusted,tmp_compilation)
# --------------------------------------------------------------------
# RUN VALIDATOR
with open(os.path.join(tmp_logs,"overall.txt"),'a') as f:
print ("====================================\nVALIDATION STARTS", file=f)
# copy results files from compilation...
patterns_submission_to_validation = complete_config_obj["autograding"]["submission_to_validation"]
pattern_copy("submission_to_validation",patterns_submission_to_validation,submission_path,tmp_work,tmp_logs)
if is_vcs:
pattern_copy("checkout_to_validation",patterns_submission_to_validation,checkout_subdir_path,tmp_work,tmp_logs)
patterns_compilation_to_validation = complete_config_obj["autograding"]["compilation_to_validation"]
pattern_copy("compilation_to_validation",patterns_compilation_to_validation,tmp_compilation,tmp_work,tmp_logs)
# remove the compilation directory
shutil.rmtree(tmp_compilation)
# copy output files to tmp_work directory
copy_contents_into(test_output_path,tmp_work,tmp_logs)
# copy any instructor custom validation code into the tmp work directory
copy_contents_into(custom_validation_code_path,tmp_work,tmp_logs)
subprocess.call(['ls', '-lR', '.'], stdout=open(tmp_logs + "/overall.txt", 'a'))
# copy validator.out to the current directory
shutil.copy (os.path.join(bin_path,obj["gradeable"],"validate.out"),os.path.join(tmp_work,"my_validator.out"))
# give the untrusted user read/write/execute permissions on the tmp directory & files
add_permissions_recursive(tmp_work,
stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH,
stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH,
stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH)
add_permissions(os.path.join(tmp_work,"my_validator.out"),stat.S_IROTH | stat.S_IXOTH)
# validator the validator.out as the untrusted user
with open(os.path.join(tmp_logs,"validator_log.txt"), 'w') as logfile:
if USE_DOCKER:
validator_success = subprocess.call(['docker', 'exec', '-w', tmp_work, container,
os.path.join(tmp_work, 'my_validator.out'), obj['gradeable'],
obj['who'], str(obj['version']), submission_string], stdout=logfile)
else:
validator_success = subprocess.call([os.path.join(SUBMITTY_INSTALL_DIR,"bin","untrusted_execute"),
which_untrusted,
os.path.join(tmp_work,"my_validator.out"),
obj["gradeable"],
obj["who"],
str(obj["version"]),
submission_string],
stdout=logfile)
if validator_success == 0:
print ("pid",my_pid,"VALIDATOR OK")
else:
print ("pid",my_pid,"VALIDATOR FAILURE")
grade_items_logging.log_message(is_batch_job,which_untrusted,submission_path,"","","VALIDATION FAILURE")
untrusted_grant_rwx_access(which_untrusted,tmp_work)
# grab the result of autograding
grade_result = ""
with open(os.path.join(tmp_work,"grade.txt")) as f:
lines = f.readlines()
for line in lines:
line = line.rstrip('\n')
if line.startswith("Automatic grading total:"):
grade_result = line
# --------------------------------------------------------------------
# MAKE RESULTS DIRECTORY & COPY ALL THE FILES THERE
with open(os.path.join(tmp_logs,"overall.txt"),'a') as f:
print ("====================================\nARCHIVING STARTS", file=f)
subprocess.call(['ls', '-lR', '.'], stdout=open(tmp_logs + "/overall.txt", 'a'))
os.chdir(bin_path)
# save the old results path!
if os.path.isdir(os.path.join(results_path,"OLD")):
shutil.move(os.path.join(results_path,"OLD"),
os.path.join(tmp,"OLD_RESULTS"))
# clean out all of the old files if this is a re-run
shutil.rmtree(results_path,ignore_errors=True)
# create the directory (and the full path if it doesn't already exist)
os.makedirs(results_path)
# bring back the old results!
if os.path.isdir(os.path.join(tmp,"OLD_RESULTS")):
shutil.move(os.path.join(tmp,"OLD_RESULTS"),
os.path.join(results_path,"OLD"))
os.makedirs(os.path.join(results_path,"details"))
patterns_work_to_details = complete_config_obj["autograding"]["work_to_details"]
pattern_copy("work_to_details",patterns_work_to_details,tmp_work,os.path.join(results_path,"details"),tmp_logs)
if not history_file_tmp == "":
shutil.move(history_file_tmp,history_file)
# fix permissions
ta_group_id = os.stat(results_path).st_gid
os.chown(history_file,int(HWCRON_UID),ta_group_id)
add_permissions(history_file,stat.S_IRGRP)
grading_finished = dateutils.get_current_time()
shutil.copy(os.path.join(tmp_work,"results.json"),results_path)
shutil.copy(os.path.join(tmp_work,"grade.txt"),results_path)
# -------------------------------------------------------------
# create/append to the results history
gradeable_deadline_datetime = dateutils.read_submitty_date(gradeable_deadline_string)
gradeable_deadline_longstring = dateutils.write_submitty_date(gradeable_deadline_datetime)
submission_longstring = dateutils.write_submitty_date(submission_datetime)
seconds_late = int((submission_datetime-gradeable_deadline_datetime).total_seconds())
# note: negative = not late
grading_began_longstring = dateutils.write_submitty_date(grading_began)
grading_finished_longstring = dateutils.write_submitty_date(grading_finished)
gradingtime = int((grading_finished-grading_began).total_seconds())
write_grade_history.just_write_grade_history(history_file,
gradeable_deadline_longstring,
submission_longstring,
seconds_late,
queue_time_longstring,
is_batch_job_string,
grading_began_longstring,
waittime,
grading_finished_longstring,
gradingtime,
grade_result)
#---------------------------------------------------------------------
# WRITE OUT VERSION DETAILS
if WRITE_DATABASE:
insert_database_version_data.insert_to_database(
obj["semester"],
obj["course"],
obj["gradeable"],
obj["user"],
obj["team"],
obj["who"],
True if obj["is_team"] else False,
str(obj["version"]))
print ("pid",my_pid,"finished grading ", next_to_grade, " in ", gradingtime, " seconds")
grade_items_logging.log_message(is_batch_job,which_untrusted,submission_path,"grade:",gradingtime,grade_result)
with open(os.path.join(tmp_logs,"overall.txt"),'a') as f:
f.write("FINISHED GRADING!")
# save the logs!
shutil.copytree(tmp_logs,os.path.join(results_path,"logs"))
# --------------------------------------------------------------------
# REMOVE TEMP DIRECTORY
shutil.rmtree(tmp)
# --------------------------------------------------------------------
# CLEAN UP DOCKER
if USE_DOCKER:
subprocess.call(['docker', 'rm', '-f', container])
# ==================================================================================
# ==================================================================================
if __name__ == "__main__":
args = parse_args()
just_grade_item(args.next_directory,args.next_to_grade,args.which_untrusted)
| alirizwi/Submitty | bin/grade_item.py | grade_item.py | py | 29,887 | python | en | code | null | github-code | 36 |
40753869339 | #!/user/bin/env python3 -tt
"""
Task:
https://adventofcode.com/2019/day/9
"""
# Imports
import sys
import os
import re
import math
import time
import itertools
# Global variables
#task="d-9.test"
task="d-9"
infile=task + ".input"
def readInput():
with open('input/' + infile) as file:
data = file.read()
file.close()
return data
class AmpState:
def __init__(self, id = "X", pos = 0, phase = 0, instruction = [], input = 0, output = 0):
self.id = id
self.pos = pos
self.phase = phase
self.instruction = instruction
self.input = input
self.output = output
self.done = False
self.visits = 0
self.rel_base = 0
def printInstruction(self):
print('AmpState instruction: ', self.instruction)
def print(self):
print('AmpState: ', self.id, self.pos, self.rel_base, self.phase, self.instruction, self.input, self.output, self.done, self.visits)
def getValue(mode, amplifier_state, pos, instruction):
# print("Get val mode", mode, amplifier_state.rel_base, pos, instruction[pos])
val = instruction[pos]
if mode == 0:
# print("Positional")
return instruction[val]
if mode == 1:
# print("Absolute")
return val
if mode == 2:
#print("Rel val {} {}".format(instruction[amplifier_state.rel_base + val], amplifier_state.rel_base + val))
return instruction[amplifier_state.rel_base + val]
def getPos(mode, amplifier_state, pos, instruction):
if mode == 2:
#print("REALTIVE POS {} {}".format(amplifier_state.rel_base, amplifier_state.rel_base + instruction[pos]))
return amplifier_state.rel_base + instruction[pos]
return instruction[pos]
def amp(amplifier_state):
input = amplifier_state.input
amplifier_state.visits += 1
instruction = [0 for i in range(10000)]
for i in range(len(amplifier_state.instruction)):
instruction[i] = amplifier_state.instruction[i]
pos = amplifier_state.pos
while instruction[pos] != 99:
cmd_tmp = [int(d) for d in str(instruction[pos])][::-1]
cmd = [0,0,0,0,0]
for i in range(len(cmd_tmp)):
cmd[i] = cmd_tmp[i]
cmd = cmd[::-1]
OP, C, B, A = 10*cmd[3] + cmd[4], cmd[2], cmd[1], cmd[0]
#print("CMD", cmd, pos)
if (OP == 1 or OP == 2):
a = getValue(C, amplifier_state, pos+1, instruction)
b = getValue(B, amplifier_state, pos+2, instruction)
c = getPos(A, amplifier_state, pos+3, instruction)
if OP == 1:
instruction[c] = a + b
if OP == 2:
instruction[c] = (a*b)
pos += 4
# INPUT/OUTPUT
if (OP == 3 or OP == 4):
if OP == 4:
output = getValue(C, amplifier_state, pos+1, instruction)
amplifier_state.output = output
input = output
#print("##########\nOutput: ", output)
#print("##########")
if OP == 3:
# print("OP 3 pos:{} inp:{}".format(pos, input))
instruction[amplifier_state.rel_base + instruction[pos+1]] = input
pos += 2
if OP == 9:
value = getValue(C, amplifier_state, pos+1, instruction)
rb = amplifier_state.rel_base
# print("OP 9 RB old: {} new: {} pos: {} val: {}".format(rb, rb + value, pos+1, value))
amplifier_state.rel_base += value
pos += 2
if (OP > 4 and OP < 9):
first_param = getValue(C, amplifier_state, pos+1, instruction)
second_param = getValue(B, amplifier_state, pos+2, instruction)
third_param = getPos(A, amplifier_state, pos+3, instruction)
if OP == 5:
if first_param != 0:
pos = second_param
else:
pos += 3
if OP == 6:
if first_param == 0:
pos = second_param
else:
pos += 3
if OP == 7:
val_store = 0
if first_param < second_param:
val_store = 1
instruction[third_param] = val_store
pos +=4
if OP == 8:
val_store = 0
if first_param == second_param:
val_store = 1
instruction[third_param] = val_store
pos +=4
amplifier_state.done = True
return amplifier_state
def test():
#instructions = ["1102,34915192,34915192,7,4,7,99,0"]
#instructions = ["104,1125899906842624,99"]
instructions = ["109,1,204,-1,1001,100,1,100,1008,100,16,101,1006,101,0,99"]
print("wdawd")
for ins in instructions:
instruction = [int(c) for c in ins.split(',')]
input = 0
amp_state = AmpState("A", 0, 1, instruction.copy(), input, 0)
amp(amp_state)
amp_state.print()
def a():
instruction = [int(n) for n in readInput().split(',')]
input = 1
amp_state = AmpState("A", 0, 1, instruction.copy(), input, 0)
amp(amp_state)
print("a): BOOST keycode", amp_state.output)
def b():
instruction = [int(n) for n in readInput().split(',')]
input = 2
amp_state = AmpState("A", 0, 1, instruction.copy(), input, 0)
amp(amp_state)
print("b): Distress signal", amp_state.output)
# Main body
if __name__ == '__main__':
# test()
a()
b()
sys.exit(1)
| peter-steiner/adventofcode-2019 | d-9.py | d-9.py | py | 5,123 | python | en | code | 0 | github-code | 36 |
39489518389 | def solve():
n, k = map(int, input().split())
ll = []
for i in range(1, n+1):
if n % i == 0:
ll.append(i)
if len(ll) == k:
return ll[-1]
return 0
if __name__ == '__main__':
print(solve())
| bangalcat/Algorithms | algorithm-python/boj/boj-2501.py | boj-2501.py | py | 257 | python | en | code | 1 | github-code | 36 |
35366813632 | # Image Credits
# Bullet and Spaceship sprite: https://q.utoronto.ca/courses/288975/files/24417060?module_item_id=4444455
# Dinosaur sprite: https://arks.itch.io/dino-characters
# Block sprite: https://replit.com/talk/ask/Pygame-Sprite-Graphics/38044
# Gem, Box, Half platform: https://opengameart.org/content/platformer-art-deluxe
# imports
import pygame
import numpy
import spritesheet
import random
from pygame.locals import *
pygame.init()
#width and height for screen
width = 1500
height = 400
screen = pygame.display.set_mode((width, height))
bullets = pygame.sprite.Group()
# colour constants
BLACK = (0, 0, 0)
clear = (0, 0, 0, 0)
class Sprite(pygame.sprite.Sprite):
def __init__(self, image, startx, starty):
super().__init__()
self.image = pygame.image.load(image)
self.rect = self.image.get_rect()
self.rect.center = [startx, starty]
def update(self):
pass
def draw(self, screen):
screen.blit(self.image, self.rect)
class Player(Sprite):
change_y = 0
def __init__(self):
pygame.sprite.Sprite.__init__(self)
# loading images
sprite_sheet_image = pygame.image.load('dino.png').convert_alpha()
sprite_sheet = spritesheet.SpriteSheet(sprite_sheet_image)
self.dinos = []
self.dinosteps = [4, 6, 3, 4]
self.action = 0
self.t = pygame.time.get_ticks()
self.cooldown = 100
self.frame = 1
self.count = 0
self.direction = True
self.bg = True
self.bullets = 0
#set up the background image
self.background = pygame.image.load('background.png')
self.background = pygame.transform.scale(self.background,(width,height))
# adding the frames of the player sprite to the dinos list
for x in self.dinosteps:
temp = []
for i in range(x):
temp.append(sprite_sheet.get_image(self.count, 24, 24, 3, BLACK))
self.count += 1
self.dinos.append(temp)
# setting the initial player display
self.image = self.dinos[0][0]
self.rect = self.image.get_rect()
self.rect.y = 330
def walk_animation(self):
# updating the player's walking frames
curr = pygame.time.get_ticks()
if curr - self.t >= self.cooldown:
self.frame += 1
self.t = curr
if self.frame >= len(self.dinos):
self.frame = 0
# switching images based on direction
if self.direction:
self.image = self.dinos[self.action][self.frame]
else:
self.image = pygame.transform.flip(self.dinos[self.action][self.frame], True, False)
def jump(self):
self.change_y = -10
# citation: https://q.utoronto.ca/courses/288975/files/24582167?module_item_id=4467158
def calc_grav(self):
if self.change_y == 0:
self.change_y = 1
else:
self.change_y += .35
# See if we are on the ground
if self.rect.y >= height - self.rect.height and self.change_y >= 0:
self.change_y = 0
self.rect.y = height - self.rect.height
def check_collision(self, boxes):
block_hit_list = pygame.sprite.spritecollide(self, boxes, False)
for block in block_hit_list:
if self.direction:
self.rect.right = block.rect.left
elif not self.direction:
# Otherwise if we are moving left, do the opposite
self.rect.left = block.rect.right
def check_under(self, boxes):
block_hit_list = pygame.sprite.spritecollide(self, boxes, False)
for block in block_hit_list:
# Reset our position based on the top/bottom of the object
if self.change_y > 0:
self.rect.bottom = block.rect.top
elif self.change_y < 0:
self.rect.top = block.rect.bottom
self.change_y = 0
def update(self, boxes):
self.calc_grav()
if self.change_y > 0:
self.check_under(boxes)
# moving the player in the direction they press
key = pygame.key.get_pressed()
if key[pygame.K_LEFT]:
self.rect.x -= 5
self.action = 1
self.direction = False
self.walk_animation()
self.check_collision(boxes)
elif key[pygame.K_RIGHT]:
self.rect.x += 5
self.action = 1
self.direction = True
self.walk_animation()
self.check_collision(boxes)
else:
self.action = 0
self.walk_animation()
self.rect.y += self.change_y
# change background and increasing bullets once the player crosses the end
if self.rect.x > 1400:
if self.bg:
self.bg = False
self.background = pygame.image.load('background_01.png')
self.background = pygame.transform.scale(self.background,(width,height))
self.rect.x = 0
self.bullets += 2
else:
self.bg = True
self.background = pygame.image.load('background.png')
self.background = pygame.transform.scale(self.background,(width,height))
self.rect.x = 0
self.bullets += 2
class Enemy(Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
# loading images
player_img = pygame.image.load("enemy.png").convert_alpha()
self.image = pygame.transform.scale(player_img, (100, 100))
self.image.set_colorkey(BLACK)
self.rect = self.image.get_rect()
self.radius = 20
self.rect.x = 1400
self.rect.y = 100
self.speedy = 3
def update(self, player):
# moving the enemy from the bottom to the top of the screen
self.rect.y += self.speedy
if self.rect.y >= 350 or self.rect.y < 50:
self.speedy = -self.speedy
self.shoot(player)
bullets.update()
def shoot(self, player):
# creating more bullets based on how many times the player crossed the screen
while player.bullets >= len(bullets):
b = Bullet(self.rect.x, random.randint(self.rect.top, self.rect.bottom))
bullets.add(b)
class Bullet(Sprite):
def __init__(self, x, y):
pygame.sprite.Sprite.__init__(self)
# loading images and setting start position
self.image = pygame.image.load("laser.png").convert_alpha()
self.rect = self.image.get_rect()
self.rect.y = y
self.rect.x = x
def update(self):
# moving the bullet towards the player, killing it if it goes off screen
self.rect.x -= 3
if self.rect.x < 0:
self.kill()
class Gem(Sprite):
def __init__(self, startx, starty):
super().__init__("gemBlue.png", startx, starty)
class Ledge (Sprite):
def __init__(self, startx, starty):
super().__init__("grassHalf.png", startx, starty)
class Lava (Sprite):
def __init__(self, startx, starty):
super().__init__("liquidLavaTop_mid.png", startx, starty)
class Platform(Sprite):
def __init__(self, startx, starty):
super().__init__("boxAlt.png", startx, starty)
class MovablePlatform(Platform):
def __init__(self, startx, starty, start, end, speed):
super().__init__(startx, starty)
self.start = start
self.end = end
self.speed = speed
self.direction = numpy.sign(end - start)
def update(self):
self.rect.x += self.speed * self.direction
if self.rect.x <= self.start:
self.direction = numpy.sign(self.end - self.start)
elif self.rect.x >= self.end:
self.direction = numpy.sign(self.start - self.end)
def main():
pygame.init()
screen = pygame.display.set_mode((width,height))
clock = pygame.time.Clock()
#all sprites will be added here
player = Player()
players = pygame.sprite.Group()
players.add(player)
enemies = pygame.sprite.Group()
enemy = Enemy()
enemies.add(enemy)
platforms = pygame.sprite.Group()
dangerZone = pygame.sprite.Group()
gems = pygame.sprite.Group()
#platform coordinates
platforms.add(Platform(225, 365))
platforms.add(Platform(295, 365))
platforms.add(Platform(365, 365))
platforms.add(Platform(365, 295))
platforms.add(Ledge(580, 170))
platforms.add(Platform(755,295))
#Left wall border
platforms.add(Platform(-50, 365))
platforms.add(Platform(-50, 295))
platforms.add(Platform(-50, 225))
platforms.add(Platform(-50, 155))
platforms.add(Platform(-50, 85))
platforms.add(Platform(-50, 15))
#Right wall border
platforms.add(Platform(1535,0))
platforms.add(Platform(1535,70))
platforms.add(Platform(1535,140))
platforms.add(Platform(1535,210))
platforms.add(Platform(1535,280))
platforms.add(Platform(1535,350))
platforms.add(Platform(1535,420))
platforms.add(Platform(755,365))
platforms.add(MovablePlatform(485, 295, 400, 650, 1))
#add danger zones
dangerZone.add(Lava(435, 365))
dangerZone.add(Lava(505, 365))
dangerZone.add(Lava(575, 365))
dangerZone.add(Lava(645, 365))
dangerZone.add(Lava(715, 365))
#add gem placement
gems.add(Gem(585, 115))
#Exits game
done = True
while done is True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_UP:
player.jump()
pygame.event.pump()
# Draw loop
screen.fill((0,0,0))
screen.blit(player.background,(0,-1))
for gem in gems:
gem.draw(screen)
for i in range(len(gems)):
if player.rect.colliderect(gem.rect):
gem.image.fill(clear)
for lava in dangerZone:
dangerZone.draw(screen)
for i in range(len(dangerZone)):
if player.rect.colliderect(lava.rect):
done = False
for enemy in enemies:
enemy.draw(screen)
for i in range(len(enemies)):
if player.rect.colliderect(enemy.rect):
done = False
for b in bullets:
b.draw(screen)
for i in range(len(bullets)):
if player.rect.colliderect(b.rect):
done = False
platforms.draw(screen)
player.draw(screen)
player.update(platforms)
pygame.display.flip()
platforms.update()
dangerZone.update()
gems.update()
enemies.update(player)
clock.tick(60)
pygame.quit()
if __name__ == "__main__":
main()
| mashalll/cct211 | main.py | main.py | py | 10,968 | python | en | code | 0 | github-code | 36 |
3494867644 | #!/usr/bin/env python3
# Caoimhe De Buitlear: 19378783
# I acknowledge the DCU Academic Integrity Policy: https://www.dcu.ie/sites/default/files/policy/1_-_integrity_and_plagiarism_policy_ovpaa-v4.pdf
from queue import Queue
from format import format_rr
def round_r(arr):
#time quantum is 10 milliseconds
q = 10
time = 0
burst_times = []
#make a burst times queue
bt = Queue()
# make a process name queue
pq = Queue()
#make a dictionary to store task names and their finish times
d = {}
for v in arr:
task, p, bur = v.split(",")
burst_times.append(int(bur))
# adding the values to the queue
bt.enqueue(int(bur))
pq.enqueue(task)
# initalizing each value in the dictionary with 0
d[task] = 0
while not bt.isEmpty():
v = bt.dequeue()
pnum = pq.dequeue()
#if the value removed from the queue is bigger
#than the quantam time
if v > q:
# decrease the value by the quantam time
v -= q
# increase the time
time += q
# add the remainder of the value to the queue to be executed
bt.enqueue(v)
pq.enqueue(pnum)
elif v <= q:
#if the value is less, then increase the time by how long it takes
time += v
d[pnum] = time
t = ta_time_rr(d)
w = wait_time_rr(d, burst_times)
# avg turnaround time and the individual turnaround times
ta, arrt = t[0], t[1]
# avg wait time and the individual wait times
wt, arrw = w[0], w[1]
format_rr()
i = 0
#loop through and print out
for v in arr:
task, p, bur = v.split(",")
print("{:<8} {:<10} {:<10} {:<10} {:<10}".format(task, p, bur, arrw[i], arrt[i]))
i += 1
return [wt, ta]
def ta_time_rr(d):
# dictionaries in python 3.6 are insertion ordered
total = 0
in_ta = []
i = 0
# add the value to the array and total
for key, value in d.items():
in_ta.append(value)
total += value
i += 1
return [total / i, in_ta]
def wait_time_rr(d, burst):
tot = 0
i = 0
in_wt = []
# for each key, value pair in the dictionary,
# minus the burst time from the value and add it to the total
for key, value in d.items():
s = value - burst[i]
in_wt.append(s)
tot += s
i += 1
return [tot / i, in_wt]
| debuitc4/scheduling_ | round_robin.py | round_robin.py | py | 2,455 | python | en | code | 0 | github-code | 36 |
71607160424 | import numpy as np
import matplotlib.pyplot as plt
from scipy import interpolate
import pyximport
pyximport.install()
import heat_solver
def coeff_dis(x):
alpha2 = np.zeros(len(x))
for i in range(len(x)):
if x[i] > 0.5:
alpha2[i]= 10
elif x[i]< 0.3:
alpha2[i]= 5
else:
alpha2[i] = 1
return alpha2
def coeff_step(x):
alpha2 = np.zeros(len(x))
for i in range(len(x)):
if x[i] < 0.5:
alpha2[i]= 10
else:
alpha2[i] = 1
return alpha2
hinv = 10
kinv = 600
time600 = np.linspace(0,1,num=kinv+1)
x = np.linspace(0,1,num = hinv+1 )
alpha = coeff_dis(x)
u_600 = heat_solver.heat_solver_nl(hinv,kinv, alpha)
hinv = 20
kinv = 2400
x = np.linspace(0,1,num = hinv+1 )
time2400 = np.linspace(0,1,num=kinv+1)
alpha = coeff_dis(x)
u_2400 = heat_solver.heat_solver_nl(hinv,kinv, alpha)
hinv = 40
kinv = 9600
x = np.linspace(0,1,num = hinv+1 )
alpha = coeff_dis(x)
time9600 = np.linspace(0,1,num=kinv+1)
u_9600 = heat_solver.heat_solver_nl(hinv,kinv, alpha)
hinv = 80
kinv = 9600*4
x = np.linspace(0,1,num = hinv+1 )
alpha = coeff_dis(x)
time1000 = np.linspace(0,1,num=kinv+1)
u_1000 = heat_solver.heat_solver_nl(hinv,kinv, alpha)
x = np.linspace(0,1,num=11)
x21 = np.linspace(0,1,num = 21)
u_24 = interpolate.interp1d(x21,u_2400[1,:])
x41 = np.linspace(0,1,num=41)
u_96 = interpolate.interp1d(x41,u_9600[1,:])
x81 = np.linspace(0,1,num = 81 )
u_10 = interpolate.interp1d(x81,u_1000[1,:])
fig1 = plt.figure()
ax1 = fig1.add_subplot(111)
ax1.plot(x,u_600[1,:], label = 'h=1/10')
ax1.plot(x21,u_2400[1,:], label = 'h=1/20')
ax1.plot(x41,u_9600[1,:], label ='h=1/40')
ax1.plot(x81,u_1000[1,:], label ='h=1/80')
ax1.set_title('u at t=1 with $\\alpha$ discontinuous')
ax1.set_xlabel('x')
ax1.set_ylabel('u')
ax1.legend()
plt.savefig('unl_convdis2.pdf', bbox_inches=0)
p_space = np.log((u_600[1,1:10]-u_24(x[1:10]))/(u_24(x[1:10])-u_96(x[1:10])))/np.log(2.)
#print 'spatial convergence order for lambda = 1/6 is ', p_space
p_time = np.log((u_600[1,1:10]-u_24(x[1:10]))/(u_24(x[1:10])-u_96(x[1:10])))/np.log(4.)
#print 'temporal convergence order for lambda = 1/6 is ', p_time
f3, ((ax3,ax4,ax5)) = plt.subplots(3, sharex=True)
ax3.plot(x[1:10],p_space, label = 'space')
ax4.plot(x[1:10],p_time, label = 'time')
ax3.set_title('order of convergence for discontinuous $\\alpha$')
ax3.set_ylabel('p')
ax4.set_ylabel('p')
ax3.legend()
ax4.legend()
ax5.plot(x,1./20*coeff_dis(x), label='$\\alpha\lambda$', marker = 'o')
ax5.set_title('$\\alpha\lambda$ for constant k nonlinear case, $\\alpha$ discontinuous')
ax5.set_xlabel('x')
ax5.set_ylabel('$\\alpha\lambda$')
ax5.set_ylim(bottom=0, top= 0.6)
ax5.legend()
plt.savefig('unl_orderconvdis2.pdf', bbox_inches=0) | jedman/numerics | src1/heat_nl_dis.py | heat_nl_dis.py | py | 2,702 | python | en | code | 0 | github-code | 36 |
36491984140 | """
Module: Classes which are "services" that encapsulate domain logic.
Utilizing Strategy pattern for course registration-related functionality.
"""
from abc import ABC, abstractmethod
import db_utils
class RegistrationContext:
"""Context for performing registration actions via a registration strategy
"""
def __init__(self, student, course, registerable_num, sql_conn,
mongo_conn):
self.student = student
self.course = course
self.registerable_num = registerable_num
self.sql_conn = sql_conn
self.mongo_conn = mongo_conn
self.strategy = None
def set_strategy(self, strategy):
self.strategy = strategy
def register(self):
# Check if course exists
if not self.course:
return 'Course not found'
else: # Perform remaining logic based on strategy given
return self.strategy.execute(self.student, self.course,
self.registerable_num,
self.sql_conn, self.mongo_conn)
class IRegistrationStrategy(ABC):
"""Interface for concrete registration strategies"""
@abstractmethod
def execute(self, student, course, registerable_num, sql_conn, mongo_conn):
pass
class SectionRegistration(IRegistrationStrategy):
"""Strategy for student registering in section"""
def execute(self, student, course, section_number, sql_conn, mongo_conn):
# Check if student already registered in course section
if course.find_student_section(student.username):
return f'Student already registered for section in ' \
f'{course.name}'
# Check if requested section exists
section = course.get_section(section_number)
if not section:
return 'Section not found'
# Check if section has space remaining
elif not section.space_remaining:
return f'Registration denied. Section is full: ' \
f'{str(section.max_registration)} ' \
f'{str(section.max_registration)} students registered'
# Check if student is overloading
elif student.is_fully_registered:
status = 'Pending'
section.add_student(student, 'Pending')
display_str = "Student is overloading on registered " \
"classes, and has been added to section as "\
"'Pending' before department chair "\
"approval."
# Check if course requires instructor approval
elif course.approval_required:
status = 'Tentative'
section.add_student(student, 'Tentative')
display_str = f"{course.name} course requires " \
f"approval from instructor. Student has " \
f"been added to section as 'Tentative' " \
f"before instructor approval."
else: # No restrictions - student is approved
status = 'Approved'
section.add_student(student, 'Approved')
display_str = f"Student successfully registered for " \
f"{course.name} Section " \
f"{str(section_number)}"
# Check if lab required. If so add reminder
if course.lab_required:
display_str += '\n**Reminder: Student required to ' \
'register for a lab for this course.**'
# Add section to student's schedule
student.add_section(section)
# Add section registration to sql db
db_utils.db_add_section_reg(status, sql_conn, student.username,
section_number, course.name)
# Insert log of change in mongo db
log = f"Student '{student.username}' registered in " \
f"{course.name} section {str(section.number)} with " \
f"status '{status}'"
mongo_conn.insert_log(log)
return display_str
class LabRegistration(IRegistrationStrategy):
"""Strategy for student registering in lab"""
def execute(self, student, course, lab_number, sql_conn, mongo_conn):
# Check if student is already registered in course section
if not course.find_student_section(student.username):
return 'Student must first register in a section in ' \
f'{course.name} before registering in a lab'
# Check if student already registered in lab
if course.find_student_lab(student.username):
return f'Student already registered for lab in {course.name}. '\
f'Please use reschedule lab menu option if you would like '\
f'to change into a different lab'
# Check if requested lab exists
lab = course.get_lab(lab_number)
if not lab:
return 'Lab not found'
# Check if lab has space remaining
elif not lab.space_remaining:
return f'Registration denied. Lab is full: ' \
f'{str(lab.max_registration)} / ' \
f'{str(lab.max_registration)} students registered',
# Check if student is overloading
elif student.is_fully_registered:
status = 'Pending'
lab.add_student(student, 'Pending')
display_str = "Student is overloading on registered classes," \
" and has been added to lab as 'Pending' " \
"before department chair approval."
# Check if course requires instructor approval
elif course.approval_required:
status = 'Tentative'
lab.add_student(student, 'Tentative')
display_str = f"{course.name} course requires approval " \
f"from instructor. Student has been added " \
f"to lab as 'Tentative' before instructor approval."
else: # No restrictions - student is approved
status = 'Approved'
lab.add_student(student, 'Approved')
display_str = f"Student successfully registered for " \
f"{course.name} Lab {str(lab_number)}"
# Add lab to student's schedule
student.add_lab(lab)
# Add lab registration to sql db
db_utils.db_add_lab_reg(status, sql_conn, student.username, lab_number,
course.name)
# Insert log of change in mongo db
log = f"Student '{student.username}' registered in " \
f"{course.name} lab {str(lab.number)} with " \
f"status '{status}'"
mongo_conn.insert_log(log)
return display_str
class LabReschedule(IRegistrationStrategy):
"""Strategy for student rescheduling lab"""
def execute(self, student, course, lab_number, sql_conn, mongo_conn):
# Check if student is already registered in course section
if not course.find_student_section(student.username):
return 'Student must first register in a section in ' \
f'{course.name} before registering in a lab'
# Check if student not already registered in lab
if not course.find_student_lab(student.username):
return f'Student is not already registered for lab in ' \
f'{course.name}. Please use register in lab menu ' \
f'option to register for a lab in this course'
# Check if requested lab exists
lab = course.get_lab(lab_number)
if not lab:
return 'Lab not found'
# Check if lab has space remaining
elif not lab.space_remaining:
return f'Registration denied. Lab is full: ' \
f'{str(lab.max_registration)} / ' \
f'{str(lab.max_registration)} students registered',
# Check if student is overloading
elif student.is_fully_registered:
status = 'Pending'
lab.add_student(student, 'Pending')
display_str = "Student is overloading on registered classes," \
" and has been added to rescheduled lab as " \
"'Pending' before department chair approval."
# Check if course requires instructor approval
elif course.approval_required:
status = 'Tentative'
lab.add_student(student, 'Tentative')
display_str = f"{course.name} course requires approval " \
f"from instructor. Student has been added " \
f"to rescheduled lab as 'Tentative' before " \
f"instructor approval."
else: # No restrictions - student is approved
status = 'Approved'
lab.add_student(student, 'Approved')
display_str = f"Student successfully rescheduled into " \
f"{course.name} lab {str(lab_number)}"
# Add lab to student's schedule
student.add_lab(lab)
# Remove old lab registration from sql db
db_utils.db_delete_lab_reg(sql_conn, course.name, student.username)
# Add new lab registration to sql db
db_utils.db_add_lab_reg(status, sql_conn, student.username,
lab_number, course.name)
# Insert log of change in mongo db
log = f"Student '{student.username}' rescheduled into " \
f"{course.name} lab {str(lab.number)} with " \
f"status '{status}'"
mongo_conn.insert_log(log)
return display_str
class CourseDropper:
"""Class for student dropping specific course"""
def __init__(self, student, course_name, sql_conn, mongo_conn):
self.student = student
self.course_name = course_name
self.sql_conn = sql_conn
self.mongo_conn = mongo_conn
def drop_course(self):
schedule = self.student.get_schedule()
section = schedule.get_section(self.course_name)
lab = schedule.get_lab(self.course_name)
# Check if student is registered in course
if not section and not lab:
return f'Student is not currently registered in ' \
f'{self.course_name}'
# Check if student registered in section
if section:
section.remove_student(self.student.username)
schedule.remove_section(self.course_name)
# Check if student registered in lab
if lab:
lab.remove_student(self.student.username)
schedule.remove_lab(self.course_name)
# Delete course registration from sql db
db_utils.db_delete_course_reg(self.sql_conn, self.course_name,
self.student.username)
# Insert log of change in mongo db
log = f"Student '{self.student.username}' has dropped " \
f"{self.course_name}"
self.mongo_conn.insert_log(log)
return f'Student has successfully dropped {self.course_name}'
class AllCourseDropper:
"""Class for student dropping all courses"""
def __init__(self, student, sql_conn, mongo_conn):
self.student = student
self.sql_conn = sql_conn
self.mongo_conn = mongo_conn
def drop_all_courses(self):
schedule = self.student.get_schedule()
sections = schedule.sections.values()
labs = schedule.labs.values()
# Check that student is registered in a course
if not sections and not labs:
return 'Student is not currently registered in any course'
else:
for section in sections:
section.remove_student(self.student.username)
for lab in labs:
lab.remove_student(self.student.username)
schedule.sections = {}
schedule.labs = {}
# Delete all student's registrations from sql db
db_utils.db_delete_all_reg(self.sql_conn, self.student.username)
# Insert log of change in mongo db
log = f"Student '{self.student.username}' has dropped all courses"
self.mongo_conn.insert_log(log)
return 'Student has successfully dropped all courses from ' \
'schedule'
class ApproveDenyRegistration:
"""Class for instructor approving/denying student's registration in a
course"""
def __init__(self, instructor, student_username, course_name, is_approved,
sql_conn, mongo_conn):
self.instructor = instructor
self.student_username = student_username
self.course_name = course_name
self.is_approved = is_approved
self.sql_conn = sql_conn
self.mongo_conn = mongo_conn
def approve_deny_reg(self):
# Check if instructor teaches course
course = self.instructor.get_course(self.course_name)
if not course:
return f'Instructor does not teach {self.course_name}. ' \
f'Approve/deny not performed'
# Check if student is registered in the course
section = course.find_student_section(self.student_username)
lab = course.find_student_lab(self.student_username)
if not section: # Student is not registered in the course
return f'Student {self.student_username} not registered in ' \
f'{self.course_name}'
# Get student's current registration status
student_status = section.get_student(self.student_username)[0]
# Only department chair can approve/deny students who are overloading
if student_status == 'Pending' and not \
self.instructor.is_department_chair:
return f"Only department chair can approve / deny 'Pending' " \
f"student registrations. No action taken."
else: # Otherwise, instructor can approve/deny student
if self.is_approved:
section.set_student_status(self.student_username, 'Approved')
display_str = f"Student '{self.student_username}' is now " \
f"approved for {self.course_name} section " \
f"{str(section.number)}"
else:
section.set_student_status(self.student_username, 'Denied')
display_str = f"Student '{self.student_username}' has been" \
f" denied for {self.course_name} section " \
f"{str(section.number)}"
lab = course.find_student_lab(self.student_username)
if lab: # If student registered in lab, approve/deny in lab
if self.is_approved:
lab.set_student_status(self.student_username, 'Approved')
display_str += f"\nStudent '{self.student_username}' is " \
f"now approved for {self.course_name} lab "\
f"{str(lab.number)}"
else:
lab.set_student_status(self.student_username, 'Denied')
display_str += f"\nStudent '{self.student_username}' has "\
f"been denied for {self.course_name} lab " \
f"{str(lab.number)}"
# Update status in sql db
status = 'Approved' if self.is_approved else 'Denied'
db_utils.db_update_reg_status(status, self.sql_conn,
self.student_username, self.course_name)
# Insert log of change in mongo db
log = f"Instructor '{self.instructor.username}' has {status} " \
f"student {self.student_username} for {self.course_name}"
self.mongo_conn.insert_log(log)
return display_str
class ApprovalRequiredModifier:
"""Class for modifying whether course requires instructor approval"""
def __init__(self, instructor, course_name, approval_required, sql_conn,
mongo_conn):
self.instructor = instructor
self.course_name = course_name
self.approval_required = approval_required
self.sql_conn = sql_conn
self.mongo_conn = mongo_conn
def modify_approval_required(self):
# Check if instructor teaches course
course = self.instructor.get_course(self.course_name)
if not course:
return f'Instructor does not teach {self.course_name}. Approval ' \
f'not modified'
else:
course.set_approval_required(self.approval_required)
if self.approval_required:
display_str = f'{self.course_name} has been set to instructor'\
f' approval required'
else:
display_str = f'{self.course_name} has been set to instructor'\
f' approval not required'
# Update approval required in sql db
app_req = 1 if self.approval_required else 0
db_utils.db_update_approval_required(self.sql_conn, app_req,
self.course_name)
# Insert log of change in mongo db
app_str = "approval required" if self.approval_required else \
"approval not required"
log = f"Instructor '{self.instructor.username}' has set " \
f"course {self.course_name} to {app_str}"
self.mongo_conn.insert_log(log)
return display_str
class Grader:
"""Class for instructor adding a grade to a student's registration in a
section"""
def __init__(self, instructor, student_username, course_name, grade,
sql_conn, mongo_conn):
self.instructor = instructor
self.student_username = student_username
self.course_name = course_name
self.grade = grade
self.sql_conn = sql_conn
self.mongo_conn = mongo_conn
def add_grade(self):
# Check if instructor teaches course
course = self.instructor.get_course(self.course_name)
if not course:
return f'Instructor does not teach {self.course_name}. Grade not '\
f'added'
# Check if student is registered in course
section = course.find_student_section(self.student_username)
if not section:
return f"Student '{self.student_username}' is not registered "\
f"in {self.course_name}. Grade not added"
else:
section.add_grade(self.student_username, self.grade)
display_str = f"Grade successfully added to Student " \
f"'{self.student_username}' in {self.course_name} section "\
f"{str(section.number)}"
# Add grade to sql db
db_utils.db_add_grade(self.sql_conn, self.course_name, section.number,
self.student_username, self.grade)
# Insert log of change in mongo db
log = f"Instructor '{self.instructor.username}' has added " \
f"grade: {str(self.grade)} to student " \
f"'{self.student_username}' for {self.course_name}"
self.mongo_conn.insert_log(log)
return display_str
| colebryant/course-registration-system | src/services.py | services.py | py | 19,419 | python | en | code | 0 | github-code | 36 |
12507519121 | # BFS
# 이모티콘
from collections import deque
s = int(input())
q = deque([(1, 0, 0)]) # 만들어진 이모티콘, 시간
visited = [[False] * 1001 for _ in range(1001)]
visited[1][0] = True
while q:
now, copy, sec = q.popleft()
if now == s:
print(sec)
break
for i in ((now, now), (now+copy, copy), (now-1, copy)):
now2, copy2 = i
if 0 < now2 <= 1000 and 0 < copy2 <= 1000:
if not visited[now2][copy2]:
q.append((now2, copy2, sec+1))
visited[now2][copy2] = True
| Hong-Jinseo/Algorithm | baekjoon/14226.py | 14226.py | py | 565 | python | en | code | 0 | github-code | 36 |
72404306664 | import re
def react(s):
result = []
for c in s:
complement = c.lower() if c.isupper() else c.upper()
if result and result[-1] == complement:
del result[-1]
else:
result.append(c)
return ''.join(result)
assert react('aA') == ''
assert react('abBA') == ''
assert react('abAB') == 'abAB'
assert react('aabAAB') == 'aabAAB'
def shortest(source):
results = []
for i in range(26):
lower = chr(ord('a') + i)
upper = chr(ord('A') + i)
start = source.replace(lower, '').replace(upper, '')
results.append(len(react(start)))
return min(results)
assert shortest('dabAcCaCBAcCcaDA') == 4
assert shortest('baddacabbaUABBACADDAB') == 0
if __name__ == '__main__':
with open('puzzle-input.txt') as f:
polymer = f.read().strip()
if re.match(r'^[a-zA-Z]*$', polymer) is None:
print("bad input")
print(shortest(polymer))
| jorendorff/advent-of-code | 2018/05/polymer.py | polymer.py | py | 944 | python | en | code | 3 | github-code | 36 |
31757113296 | from django.db import models, transaction
from django.contrib.auth.models import AbstractUser
from django.core.exceptions import ValidationError
from django.db.models import JSONField
from django.db.models.signals import post_save
from django.dispatch import receiver
USER_TYPE_CHOICES = (
("customer", "Customer"),
("admin", "Admin"),
("shop_owner", "Shop Owner"),
)
# extend the user model
class Custom_User(AbstractUser):
userType = models.CharField(
max_length=20,
default="customer",
choices=USER_TYPE_CHOICES,
verbose_name="User Type",
)
shopId = models.ForeignKey(
"shop.Shop",
verbose_name="Shop ID",
on_delete=models.CASCADE,
null=True,
blank=True,
)
def save(self, *args, **kwargs):
if self.userType == "admin":
self.shopId = None
super().save(*args, **kwargs)
class Shop(models.Model):
shopId = models.AutoField(primary_key=True)
shopName = models.CharField(
max_length=100,
unique=True,
verbose_name=("Shop Name"),
error_messages={
"unique": "This shop name is already taken.",
"required": "This field is required.",
},
)
description = models.CharField(
max_length=100,
verbose_name=("Description"),
error_messages={"required": "This field is required."},
)
shopOwner = models.ForeignKey(
"shop.Custom_User", verbose_name=("Shop Owner"), on_delete=models.CASCADE
)
def __str__(self):
return self.shopName
class Meta:
verbose_name = "Shop"
verbose_name_plural = "Shops"
class ShopProps(models.Model):
shopPropsId = models.AutoField(primary_key=True)
shopId = models.ForeignKey(
"shop.Shop",
verbose_name=("Shop ID"),
on_delete=models.CASCADE,
error_messages={"required": "This field is required."},
)
props = models.JSONField(
default=dict,
verbose_name=("Shop Properties"),
error_messages={"required": "This field is required."},
blank=True,
null=True,
)
class Meta:
verbose_name = "Shop Property"
verbose_name_plural = "Shop Properties"
class Category(models.Model):
categoryId = models.AutoField(primary_key=True)
name = models.CharField(
max_length=100,
unique=True,
verbose_name=("Category Name"),
error_messages={
"unique": "This category name is already taken.",
"required": "This field is required.",
},
)
description = models.CharField(
max_length=100,
verbose_name=("Description"),
error_messages={"required": "This field is required."},
)
shopId = models.ForeignKey(
"shop.Shop", verbose_name=("Shop ID"), on_delete=models.CASCADE
)
def __str__(self):
return self.name
class Meta:
verbose_name = "Category"
verbose_name_plural = "Categories"
class Product(models.Model):
productId = models.AutoField(primary_key=True, verbose_name=("Product ID"))
name = models.CharField(
max_length=100,
verbose_name=("Product Name"),
error_messages={"required": "name field is required."},
)
description = models.CharField(
max_length=100,
verbose_name=("Description"),
error_messages={"required": "description field is required."},
)
price = models.DecimalField(
max_digits=10,
decimal_places=2,
verbose_name=("Price"),
error_messages={"required": "price field is required."},
)
poster_image_url = models.URLField(
max_length=200,
verbose_name=("Poster Image URL"),
error_messages={"required": "poster_image_url field is required."},
blank=True,
null=True,
)
image_urls = models.JSONField(
default=list, verbose_name=("Image URLs"), blank=True, null=True
)
shopId = models.ForeignKey(
"shop.Shop", verbose_name=("Shop ID"), on_delete=models.CASCADE
)
categoryId = models.ForeignKey(
"shop.Category",
verbose_name=("Category ID"),
on_delete=models.CASCADE,
null=True,
blank=True,
)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.name
class Meta:
verbose_name = "Product"
verbose_name_plural = "Products"
def clean(self):
if self.price <= 0:
raise ValidationError("Price must be greater than zero.")
class Cart(models.Model):
products = JSONField(default=list, blank=True)
userId = models.ForeignKey(
"shop.Custom_User", verbose_name=("User ID"), on_delete=models.CASCADE
)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return str(self.userId) + " Cart"
class Meta:
verbose_name = "Cart"
verbose_name_plural = "Carts"
def clean(self):
for product in self.products:
if product["quantity"] <= 0:
raise ValidationError("Quantity must be greater than zero.")
# Signal to create a new cart for a new customer user
@receiver(post_save, sender=Custom_User)
def create_cart_for_new_customer(sender, instance, created, **kwargs):
print("Signal called")
if created and instance.userType == "customer":
cart = Cart.objects.create(userId=instance)
cart.save()
# # Signal to create a new shop for a new shop owner user
# @receiver(post_save, sender=Custom_User)
# def create_shop_for_new_shop_owner(sender, instance, created, **kwargs):
# if created and instance.userType == 'shop_owner':
# with transaction.atomic():
# shop = Shop.objects.create(shopOwner=instance)
# instance.shopId = shop.shopId
# instance.save()
# signal to update the shopId of the shop owner user when a new shop is created
@receiver(post_save, sender=Shop)
def update_shopId_for_shop_owner(sender, instance, created, **kwargs):
print("Shop Signal called")
if created:
user = Custom_User.objects.get(id=instance.shopOwner.id)
user.shopId = instance
user.save()
| A7med3365/Project4-Backend | shop/models.py | models.py | py | 6,379 | python | en | code | 0 | github-code | 36 |
32233049619 | def surroundedRegions(board):
if len(board) <=2:
return board
oNotOnBoarder = []
oOnBoarder = []
directionMatrix = [[-1,0],[1,0],[0,-1],[0,1]]
for i in range (len(board)):
for j in range (len(board[0])):
if board[i][j] == 'O':
if i == 0 or i == len(board) - 1 or j == 0 or j == len(board[0]) - 1:
oOnBoarder.append([i,j])
else:
oNotOnBoarder.append([i,j])
print(oOnBoarder,'===',oNotOnBoarder)
x = 0
while x < len(oOnBoarder):
for direct in directionMatrix:
nr,nc = oOnBoarder[x][0] + direct[0], oOnBoarder[x][1] + direct[1]
if 1 <= nr and nr < len(board) - 1 and 1 <= nc and nc < len(board[0]) - 1 and board[nr][nc] == 'O':
if [nr,nc] in oNotOnBoarder:
oNotOnBoarder.pop(oNotOnBoarder.index([nr,nc]))
oOnBoarder.append([nr,nc])
x += 1
while len(oNotOnBoarder) != 0:
temp = oNotOnBoarder.pop()
board[temp[0]][temp[1]] = 'X'
return board
board = [["X","O","X","O","X","O"],["O","X","O","X","O","X"],["X","O","X","O","X","O"],["O","X","O","X","O","X"]]
for j in board:
print(j)
for i in (surroundedRegions(board)):
print(i) | Gale6/leetcode--codes | surroundedRegions.py | surroundedRegions.py | py | 1,094 | python | en | code | 0 | github-code | 36 |
36777613557 | from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
driver = webdriver.Chrome()
driver.get('https://www.dummyticket.com/dummy-ticket-for-visa-application/')
driver.maximize_window()
driver.find_element(By.XPATH,"//span[@id='select2-billing_country-container']").click()
country_list = driver.find_elements(By.XPATH,"//span[@class='select2-results']/ul/li")
print(len(country_list))
for country in country_list:
if country.text == 'Australia' :
print(country.text)
country.click()
break | blessycheriyan/Selenium_From_Scratch | part-13/bootstrap.py | bootstrap.py | py | 651 | python | en | code | 0 | github-code | 36 |
3855782251 | # %%
from sklearn.datasets import load_sample_image
import matplotlib.pyplot as plt
import seaborn as sns
with sns.axes_style('dark'):
img = load_sample_image('china.jpg')
plt.imshow(img)
# %%
print (img.shape)
# Rescacle the color so that they lie btw 0 and 1, then reshape the array to be
# a typical scikit-learn input
img_r = (img / 255).reshape(-1,3)
print (img_r.shape)
# %%
from sklearn.cluster import KMeans
import numpy as np
k_colors = KMeans(n_clusters=3).fit(img_r)
y_pred = k_colors.predict(img_r)
centers = k_colors.cluster_centers_
labels = k_colors.labels_
new_img = k_colors.cluster_centers_[k_colors.labels_]
new_img = np.reshape(new_img, (img.shape))
# %%
fig = plt.figure(figsize=(10,10))
ax=fig.add_subplot(1,2,1,xticks=[],yticks=[],title='Original Image')
ax.imshow(img)
ax=fig.add_subplot(1,2,2,xticks=[],yticks=[],
title='Color Compressed Image using K-Means')
ax.imshow(new_img)
plt.show()
# %%
# %%
# %%
# %%
| haininhhoang94/wqu | MScFE650/Kmean_image.py | Kmean_image.py | py | 962 | python | en | code | 21 | github-code | 36 |
35864159569 | # Import dependencies
import numpy as np
from keras.models import Sequential
from keras.layers import Activation, Dropout, UpSampling2D, Conv2D, Conv2DTranspose, MaxPooling2D
from keras.layers.normalization import BatchNormalization
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
import pickle, cv2, sys
# Set system settings
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# Constants
train_data_filename = "train_images_hq.p"
train_labels_filename = "train_labels_hq.p"
INDEX_RANGE_RATE = 1
TEST_SIZE = 1
BATCH_SIZE = 32
EPOCHS = 8
# Load training images and labels from pickle file, return as NumPy array
print("Loading training data/images...")
train_images = np.array(pickle.load(open(train_data_filename, 'rb')))
print("Loading training labels...")
train_labels = np.array(pickle.load(open(train_labels_filename, 'rb')))
# Shuffle data
print("Shuffling training data...")
train_images, train_labels = shuffle(train_images, train_labels)
# Log
print(train_images[0].shape, "->", train_labels[0].shape)
# Show example
blank = np.zeros_like(train_labels[0])
ex = np.dstack((train_labels[0], blank, blank)).astype(np.uint8)
img_ex = cv2.addWeighted(train_images[0], 1, ex, 1, 0)
cv2.imshow("", img_ex)
cv2.waitKey(0)
# Only use limited amount of training data samples
print("Limiting data range to", int(train_images.shape[0] * INDEX_RANGE_RATE), "out of", train_images.shape[0], "samples...")
train_images = train_images[0:int(train_images.shape[0] * INDEX_RANGE_RATE)]
train_labels = train_labels[0:int(train_labels.shape[0] * INDEX_RANGE_RATE)]
# Normalize labels
print("Normalizing training data labels...")
train_labels = train_labels / 255
# Split training data into training and test data (test_size is amount as percentage)
print("Splitting training data into training and testing data...")
X_train, X_val, y_train, y_val = train_test_split(train_images, train_labels, test_size=TEST_SIZE)
input_shape = X_train.shape[1:]
# Define neural network architecture
print("Defining model structure...")
# Use sequential architecture
model = Sequential()
# Add layers
model.add(BatchNormalization(input_shape=input_shape))
model.add(Conv2D(1, (3, 3), padding='valid', strides=(1, 1), activation='relu'))
model.add(Dropout(0.25))
model.add(Conv2D(1, (3, 3), padding='valid', strides=(1, 1), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(8, (3, 3), padding='valid', strides=(1, 1), activation='relu'))
model.add(Dropout(0.25))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(16, (3, 3), padding='valid', strides=(1, 1), activation='relu'))
model.add(Dropout(0.25))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (3, 3), padding='valid', strides=(1, 1), activation='relu'))
model.add(Dropout(0.25))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(UpSampling2D(size=(2, 2)))
model.add(Conv2DTranspose(32, (3, 3), padding='valid', strides=(1, 1), activation='relu'))
model.add(Dropout(0.25))
model.add(UpSampling2D(size=(2, 2)))
model.add(Conv2DTranspose(16, (3, 3), padding='valid', strides=(1, 1), activation='relu'))
model.add(Dropout(0.25))
model.add(UpSampling2D(size=(2, 2)))
model.add(Conv2DTranspose(8, (3, 3), padding='valid', strides=(1, 1), activation='relu'))
model.add(Dropout(0.25))
model.add(UpSampling2D(size=(2, 2)))
model.add(Conv2DTranspose(1, (3, 3), padding='valid', strides=(1, 1), activation='relu'))
model.add(Dropout(0.25))
model.add(Conv2DTranspose(1, (3, 3), padding='valid', strides=(1, 1), activation='relu'))
# Compile the model
model.compile(optimizer='adam', loss='mean_squared_error')
# Train model
model.fit(
X_train, y_train,
batch_size=BATCH_SIZE,
epochs=EPOCHS,
verbose=1,
validation_data=(X_val, y_val)
)
# Store model
model.save('model.h5')
# Show summary of model
model.summary()
# Evaluate model
print(model.evaluate(X_val, y_val, batch_size=BATCH_SIZE)) | codeXing8/LaneRecognition | keras-cnn/train.py | train.py | py | 3,943 | python | en | code | 2 | github-code | 36 |
71760950503 | """module for containing the code that produces charts"""
import os
from bokeh.charts import Bar, output_file, show, Line
from bokeh.models import HoverTool
# bar chart showing total response by HH group split by digital/paper
def bar_response(results_list, output_path):
output_dir = os.path.join(output_path, "charts")
if os.path.isdir(output_dir) is False:
os.mkdir(output_dir)
tools = "pan,wheel_zoom,box_zoom,reset,hover,save"
for df in results_list:
print(df)
p = Bar(df, label='hh_type', values='perc_res', stack='digital', title="a_title",
legend='top_right', tools=tools)
hover = p.select_one(HoverTool)
hover.point_policy = "follow_mouse"
hover.tooltips = [
("count", "@height"),
]
output_file_path = os.path.join(output_dir, 'test bar.html')
output_file(output_file_path)
show(p)
def line_response(results_list, output_path):
# as http://bokeh.pydata.org/en/0.10.0/docs/gallery/line_chart.html
# create df in correct format...
pass | ONSdigital/FOCUS | create_graphs.py | create_graphs.py | py | 1,086 | python | en | code | 0 | github-code | 36 |
42095536498 | from subprocess import call
import win32api
import win32gui
import win32con
import win32com.client
from enum import Enum
import sounddevice as sd
from scipy.io.wavfile import read
import requests
import json
import numpy as np
from settings import Settings
from logging import debug, warning, error
class MixerCommand(Enum):
MIC_MUTE = 0
SOUND_MUTE = 1
PLAY_FILE = 2
MUSIC_TOGGLE_PLAY = 3
MUSIC_NEXT_TRACK = 4
MUSIC_PREV_TRACK = 5
MUSIC_TOGGLE_MUTE = 6
class MusicService(Enum):
VOLUMIO_LOCAL = 0
SPOTIFY = 1
class SoundMixer():
def __init__(self, settings: Settings):
self.WM_APPCOMMAND = 0x319
self.APPCOMMAND_MICROPHONE_VOLUME_MUTE = 0x180000
self.APPCOMMAND_SYSTEM_VOLUME_MUTE = 0x80000
self.IsMuted = False
self.IsSoundMuted = False
self.prev_volume = 20 # default 'not-muted' volume
self.output_volume = 0.1
def setup_sound_device(self, playbackDeviceName: str) -> None:
debug(sd.query_devices())
if playbackDeviceName != "default":
for idx, elem in enumerate(sd.query_devices()):
if playbackDeviceName.lower() in elem['name'].lower():
sd.default.device = idx
break
def send_input_hax(self, hwnd, msg):
for c in msg:
if c == "\n":
win32api.SendMessage(hwnd, win32con.WM_KEYDOWN, win32con.VK_RETURN, 0)
win32api.SendMessage(hwnd, win32con.WM_KEYUP, win32con.VK_RETURN, 0)
else:
win32api.SendMessage(hwnd, win32con.WM_CHAR, ord(c), 0)
def toggleMic(self):
"""
https://stackoverflow.com/questions/50025927/how-mute-microphone-by-python
"""
shell = win32com.client.Dispatch("WScript.Shell")
shell.AppActivate("Discord")
shell.SendKeys("^m", 0)
hwnd_active = win32gui.GetForegroundWindow()
win32api.SendMessage(hwnd_active, self.WM_APPCOMMAND, None, self.APPCOMMAND_MICROPHONE_VOLUME_MUTE)
def toggleSystemSound(self):
hwnd_active = win32gui.GetForegroundWindow()
win32api.SendMessage(hwnd_active, self.WM_APPCOMMAND, None, self.APPCOMMAND_SYSTEM_VOLUME_MUTE)
pass
def playFile(self, filepath):
if filepath is not None:
try:
a = read(filepath)
except Exception as e:
warning(f"Exception occured while reading file {filepath}, {e}")
return False
array = np.array(a[1], dtype=int)
scaled =np.int16(array/np.max(np.abs(array)) * int(32767 * self.output_volume))
try:
sd.play(scaled, a[0])
sd.wait()
sd.stop()
except Exception as e:
error(f"Exception occured while playing file {filepath}, {e}")
return False
return True
def togglePlayMusic(self, service):
if service == MusicService.VOLUMIO_LOCAL.name:
r = requests.get("http://volumio.local/api/v1/commands/?cmd=toggle")
if r.status_code != 200:
warning(f"failed to toggle music, reason: {r.reason}")
else:
warning("Service not implemented")
def playNextTrack(self, service):
if service == MusicService.VOLUMIO_LOCAL.name:
r = requests.get("http://volumio.local/api/v1/commands/?cmd=next")
if r.status_code != 200:
warning(f"failed to skip to next track, reason: {r.reason}")
else:
warning("Service not implemented")
def playPreviousTrack(self, service):
if service == MusicService.VOLUMIO_LOCAL.name:
requests.get("http://volumio.local/api/v1/commands/?cmd=prev")
r = requests.get("http://volumio.local/api/v1/commands/?cmd=prev")
if r.status_code != 200:
warning(f"failed to skip to previous track, reason: {r.reason}")
else:
warning("Service not implemented")
def toggleMuteMusic(self, service):
if service == MusicService.VOLUMIO_LOCAL.name:
newVol = self.prev_volume
currVol = self.getMusicServiceVolume(service)
if currVol > 0:
newVol = 0
self.prev_volume = currVol
r = requests.get(f"http://volumio.local/api/v1/commands/?cmd=volume&volume={newVol}")
if r.status_code != 200:
warning(f"failed to toggle mute music, reason: {r.reason}")
else:
warning("Service not implemented")
def getMusicServiceVolume(self, service=MusicService.VOLUMIO_LOCAL.name):
if service == MusicService.VOLUMIO_LOCAL.name:
r = requests.get("http://volumio.local/api/v1/getState")
j_response = json.loads(r.content.decode())
return j_response["volume"]
def isMusicMuted(self):
return False if self.getMusicServiceVolume() > 0 else True
def isMusicPlaying(self, service=MusicService.VOLUMIO_LOCAL.name):
if service == MusicService.VOLUMIO_LOCAL.name:
r = requests.get("http://volumio.local/api/v1/getState")
j_response = json.loads(r.content.decode())
return True if j_response["status"] == "play" else False
def execCommand(self, action, callback=None):
command = action['command']
if command == MixerCommand.MIC_MUTE.name:
self.toggleMic()
self.IsMuted = not self.IsMuted
elif command == MixerCommand.SOUND_MUTE.name:
self.toggleSystemSound()
self.IsSoundMuted = not self.IsSoundMuted
elif command == MixerCommand.MUSIC_TOGGLE_PLAY.name:
self.togglePlayMusic(action['service'])
elif command == MixerCommand.MUSIC_TOGGLE_MUTE.name:
self.toggleMuteMusic(action['service'])
elif command == MixerCommand.MUSIC_NEXT_TRACK.name:
self.playNextTrack(action['service'])
elif command == MixerCommand.MUSIC_PREV_TRACK.name:
self.playPreviousTrack(action['service'])
elif command == MixerCommand.PLAY_FILE.name:
filepath = action['filepath']
debug(f"Started to play file '{filepath}'")
successful = self.playFile(filepath)
debug("Played file '{0}' successfully: {1}".format(filepath, successful))
if callback is not None:
callback() | schms27/raspi.pico.collection | pico.hid.service/sound_mixer.py | sound_mixer.py | py | 6,492 | python | en | code | 1 | github-code | 36 |
24856744056 | def double_char(string):
result = "".join(x * 2 for x in string)
print(result)
while True:
command = input()
if command == "End":
break
elif command == "SoftUni":
continue
else:
double_char(command) | BorisAtias/SoftUni-Python-Fundamentals-course | Basic Syntax, Conditional Statements and Loops - Exercise/07. Double Char.py | 07. Double Char.py | py | 266 | python | en | code | 0 | github-code | 36 |
40513708895 | import sys
from textblob import TextBlob
import redis
import json
from multiprocessing import Pool
import signal
import logging
import cPickle
import sys
sys.path.insert(0, '../NLP/Wrapper/')
sys.path.insert(0, '../NLP/')
sys.path.insert(0, '../NLP/NaiveBayes')
sys.path.insert(0, '../NLP/MaximumEntropy')
sys.path.insert(0, '../NLP/StochasticGradientDescent')
sys.path.insert(0, '../NLP/SupportVectorMachine')
from wrapper import classifier_wrapper, tweetclass
from trend_utils import getTrends, classifyTrending
import time
from dateutil import parser
import urllib
# Log everything, and send it to stderr.
logging.basicConfig(level=logging.DEBUG)
TWEET_QUEUE_KEY = 'tweet_queue'
TRENDING_TOPICS_KEY = 'trending_keys'
ALL_SENTIMENTS_KEY = 'sentiment_stream'
PERMANENT_TOPICS_KEY = 'permanent_topics'
TOPIC_SENTIMENTS_KEY_PREFIX = 'topic_sentiment_stream:'
MAX_SENTIMENTS = 10000
UPDATE_INT = 40 # seconds. Update interval for trending topics
def signal_handler(signum = None, frame = None):
logging.debug("Recieved signal " + str(signum))
logging.debug("Stopping tweet consumer.")
exit(0)
def main():
logging.debug("Starting tweet consumer.")
#for sig in [signal.SIGTERM, signal.SIGINT, signal.SIGHUP, signal.SIGQUIT]:
# On Windows, signal() can only be called with SIGABRT, SIGFPE, SIGILL, SIGINT, SIGSEGV, or SIGTERM.
# A ValueError will be raised in any other case.
for sig in [signal.SIGTERM, signal.SIGINT]:
signal.signal(sig, signal_handler)
r = redis.Redis('localhost')
f = open("../NLP/Wrapper/test.txt", 'rb')
p = cPickle.load(f)
f.close()
last_updated = None
sentiment_queue_size = r.zcard(ALL_SENTIMENTS_KEY)
while True:
try:
# Update topics and trends every UPDATE_INT seconds
if last_updated is None or time.time() - last_updated > UPDATE_INT:
permanent_topics_json = r.get(PERMANENT_TOPICS_KEY)
if permanent_topics_json:
permanent_topics = json.loads(permanent_topics_json)
else:
permanent_topics = []
all_trending_keywords = r.zrange(TRENDING_TOPICS_KEY, 0, -1)
trending_keywords = all_trending_keywords[-12:]
removing_trending_keywords = all_trending_keywords[:-12]
r.delete(*[TOPIC_SENTIMENTS_KEY_PREFIX + topic for topic in removing_trending_keywords])
last_updated = time.time()
for topic in permanent_topics:
r.zremrangebyscore(TOPIC_SENTIMENTS_KEY_PREFIX + topic, "-inf", last_updated - 86400)
for topic in trending_keywords:
r.zremrangebyscore(TOPIC_SENTIMENTS_KEY_PREFIX + topic, "-inf", last_updated - 86400)
# Get tweet
tweet_json = r.rpop(TWEET_QUEUE_KEY)
if not tweet_json:
time.sleep(1)
continue
tweet = json.loads(tweet_json)
# Get Sentiment
sentiment_classification = p.classify(tweet['text'], "naive_bayes", 0.5)
if sentiment_classification == "positive":
sentiment = 1
elif sentiment_classification == "negative":
sentiment = -1
else:
sentiment = 0
# Format sentiment point correctly and put into correct queue
if sentiment != 0:
# Get coordinates
if tweet['geo'] is not None:
latitude, longitude = tweet['geo']['coordinates']
else:
latitude, longitude = None, None
# Get topic
topics = None
for trend in trending_keywords:
trend_decoded = urllib.unquote(trend).decode('utf8')
if (trend in tweet['text']) or (trend_decoded in tweet['text']):
if topics is None:
topics = []
topics.append(trend_decoded)
for topic in permanent_topics:
for topic_keyword in permanent_topics[topic]:
topic_keyword_decoded = urllib.unquote(topic_keyword).decode('utf8')
if (topic_keyword in tweet['text']) or (topic_keyword_decoded in tweet['text']):
if topics is None:
topics = []
topics.append(topic)
break
# Format sentiment point
sentiment_point_timestamp = time.time()
sentiment_point = {'topic': None, 'latitude': latitude, 'longitude': longitude, 'sentiment': sentiment, 'timestamp': sentiment_point_timestamp}
# Put into general sentiment queue
if sentiment_queue_size >= MAX_SENTIMENTS:
r.zremrangebyrank(ALL_SENTIMENTS_KEY, 0, 0)
sentiment_queue_size -= 1
r.zadd(ALL_SENTIMENTS_KEY, json.dumps(sentiment_point), sentiment_point_timestamp)
sentiment_queue_size += 1
# Belongs to topics? Put into correct queue
if topics is not None:
for topic in topics:
sentiment_point['topic'] = topic
r.zadd(TOPIC_SENTIMENTS_KEY_PREFIX + topic, json.dumps(sentiment_point), sentiment_point_timestamp)
except Exception as e:
logging.exception("Something awful happened!")
if __name__ == '__main__':
main()
| archanl/thetweetrises | backend/tweet_categorize.py | tweet_categorize.py | py | 5,629 | python | en | code | 1 | github-code | 36 |
8451674313 | def count_inversion(nums):
def count_inversion_subarray(l, r):
def merge_sorted_count_inversions(l, m, r):
sorted_A = []
left_start, right_start, inversion_count = l, m, 0
while left_start < m and right_start < r:
if nums[left_start] >= nums[right_start]:
inversion_count += m - left_start
sorted_A.append(nums[right_start])
right_start += 1
else:
sorted_A.append(nums[left_start])
left_start += 1
nums[l:r] = sorted_A + nums[left_start:m] + nums[right_start:r]
return inversion_count
if r - l <= 1:
return 0
m = l + (r-l)//2
return count_inversion_subarray(l, m) + count_inversion_subarray(m, r) + merge_sorted_count_inversions(l, m, r)
return count_inversion_subarray(0, len(nums))
if __name__ == "__main__":
print(count_inversion([1, 7, 3, 23, 6, 2, 8, 4]))
| kashyapa/coding-problems | epi/revise-daily/11_honors_class/inversion_count.py | inversion_count.py | py | 1,017 | python | en | code | 0 | github-code | 36 |
2465805518 | import glob
import os
import statistics
from .pid_data_evaluator import PidDataEvaluator
class OcrEvaluator:
def __init__(self, options):
# set properties
self.correct_line_ocr_log = options.correct_line_ocr_log
self.eval_main_text_only = options.eval_main_text_only
self.eval_annotation_line_order = options.eval_annotation_line_order
self.ocr_edit_distance_list = []
self.line_order_edit_distance_list = []
self.output_root_dir = options.output_root_dir
# create list of PidDataEvaluator
self.pid_data_evaluator_list = []
if (options.pred_single_xml is not None) and (options.gt_single_xml is not None):
pid_string, _ = os.path.splitext(os.path.basename(options.gt_single_xml))
single_pid_evaluator = PidDataEvaluator(self.output_root_dir, pid_string, options.pred_single_xml, options.gt_single_xml, options)
self.pid_data_evaluator_list.append(single_pid_evaluator)
else:
self.pid_data_evaluator_list = self._create_pid_evaluator_list(options)
def do_evaluation(self):
# create PID dir pair list
for pid_data_evaluator in self.pid_data_evaluator_list:
pid_data_evaluator.load_page_evaluators()
pid_data_evaluator.do_evaluation()
self.ocr_edit_distance_list.append(pid_data_evaluator.get_line_ocr_edit_distance_average())
self.line_order_edit_distance_list.append(pid_data_evaluator.get_line_order_edit_distance_average())
def get_ocr_edit_distance_average(self):
if len(self.ocr_edit_distance_list) <= 0:
print('ocr_edit_distance_list is empty')
return -1
return sum(self.ocr_edit_distance_list) / len(self.ocr_edit_distance_list)
def get_ocr_edit_distance_median(self):
line_ocr_edit_distance_list = []
line_ocr_edit_distance_dict = {}
for pid_data_evaluator in self.pid_data_evaluator_list:
line_ocr_edit_distance_dict[pid_data_evaluator.pid_string] = pid_data_evaluator.get_line_ocr_edit_distance_list()
line_ocr_edit_distance_list.extend(pid_data_evaluator.get_line_ocr_edit_distance_list())
ocr_edit_distance_median_low = statistics.median_low(line_ocr_edit_distance_list)
ocr_edit_distance_median_high = statistics.median_high(line_ocr_edit_distance_list)
ocr_edit_distance_median = (ocr_edit_distance_median_low + ocr_edit_distance_median_high) / 2
median_pid_list = []
for pid, single_edit_distance_list in line_ocr_edit_distance_dict.items():
if ocr_edit_distance_median_low in single_edit_distance_list:
median_pid_list.append(pid)
break
for pid, single_edit_distance_list in line_ocr_edit_distance_dict.items():
if ocr_edit_distance_median_high in single_edit_distance_list:
median_pid_list.append(pid)
break
if median_pid_list[0] == median_pid_list[1]:
median_pid_list.pop()
return median_pid_list, ocr_edit_distance_median
def get_line_order_edit_distance_average(self):
if len(self.line_order_edit_distance_list) <= 0:
print('line_order_edit_distance_list is empty')
return -1
return sum(self.line_order_edit_distance_list) / len(self.line_order_edit_distance_list)
def get_line_order_edit_distance_median(self):
line_order_edit_distance_list = []
line_order_edit_distance_dict = {}
for pid_data_evaluator in self.pid_data_evaluator_list:
line_order_edit_distance_dict[pid_data_evaluator.pid_string] = pid_data_evaluator.get_line_order_edit_distance_list()
line_order_edit_distance_list.extend(pid_data_evaluator.get_line_order_edit_distance_list())
line_order_edit_distance_median_low = statistics.median_low(line_order_edit_distance_list)
line_order_edit_distance_median_high = statistics.median_high(line_order_edit_distance_list)
line_order_edit_distance_median = (line_order_edit_distance_median_low + line_order_edit_distance_median_high) / 2
median_pid_list = []
for pid, single_edit_distance_list in line_order_edit_distance_dict.items():
if line_order_edit_distance_median_low in single_edit_distance_list:
median_pid_list.append(pid)
break
for pid, single_edit_distance_list in line_order_edit_distance_dict.items():
if line_order_edit_distance_median_high in single_edit_distance_list:
median_pid_list.append(pid)
break
if median_pid_list[0] == median_pid_list[1]:
median_pid_list.pop()
return median_pid_list, line_order_edit_distance_median
def _create_pid_evaluator_list(self, options):
pid_evaluator_list = []
# get full PID directory list
pred_pid_data_dir_list = [pid_dir for pid_dir in glob.glob(os.path.join(options.pred_data_root_dir, '*')) if os.path.isdir(pid_dir)]
# check if there is xml directory in PID directory, and there is only 1 xml file inside
for pred_pid_data_dir in pred_pid_data_dir_list:
pid_string = os.path.basename(pred_pid_data_dir)
gt_pid_data_dir = os.path.join(options.gt_data_root_dir, pid_string)
try:
# input data validation check
for id, pid_dir in enumerate([pred_pid_data_dir, gt_pid_data_dir]):
# input directory check
if not os.path.isdir(pid_dir):
raise FileNotFoundError('pid directory {0} not found.'.format(pid_dir))
# xml file check
xml_dir = os.path.join(pid_dir, 'xml')
if not os.path.isdir(xml_dir):
raise FileNotFoundError('xml directory not found in {0}.'.format(pid_dir))
if id == 0:
xml_file_list = glob.glob(os.path.join(xml_dir, '*.sorted.xml'))
else:
xml_file_list = glob.glob(os.path.join(xml_dir, '*.xml'))
if len(xml_file_list) != 1:
raise FileNotFoundError('xml file must be only one in each xml directory. : {0}'.format(xml_file_list))
# set instance properties
pred_xml_dir = os.path.join(pred_pid_data_dir, 'xml')
pred_xml_file_list = glob.glob(os.path.join(pred_xml_dir, '*.sorted.xml'))
pred_xml_file_path = pred_xml_file_list[0]
gt_xml_dir = os.path.join(gt_pid_data_dir, 'xml')
gt_xml_file_list = glob.glob(os.path.join(gt_xml_dir, '*.xml'))
gt_xml_file_path = gt_xml_file_list[0]
pid_data_evaluator = PidDataEvaluator(self.output_root_dir, pid_string, pred_xml_file_path, gt_xml_file_path, options)
except FileNotFoundError as err:
print(err)
continue
pid_evaluator_list.append(pid_data_evaluator)
return pid_evaluator_list
| ndl-lab/ndlocr_cli | submodules/ocr_line_eval_script/ocr_evaluator/ocr_evaluator.py | ocr_evaluator.py | py | 7,152 | python | en | code | 325 | github-code | 36 |
18798611360 | required_skills=['python','github','linux']
candidates={
'kannu':{'java','linux','python'},
'mustaf':{'github','java','html','css','python','linux'}
}
interviewees =set()
for candidate , skills in candidates.items():
#if skills.issuperset(required_skills):
if skills > set(required_skills):
interviewees.add(candidate)
print(interviewees) | DhanKumari/python_2 | candidate.py | candidate.py | py | 386 | python | en | code | 0 | github-code | 36 |
4109177627 | import dash
import dash_core_components as dcc
import dash_html_components as html
import plotly.express as px
import pandas as pd
import pickle
import json
import dash_table
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
"""load model"""
with open("/Users/Arsal/examples/raltime_anomaly/model_svm.pkl", 'rb+') as f:
model = pickle.load(f)
"""read_test_data"""
with open("/Users/Arsal/examples/raltime_anomaly/test_df.json", 'r') as myfile:
data = json.load(myfile)
to= pd.DataFrame.from_dict(data[0].values()).T
prediction = model.predict(to)
"""read_columns"""
with open("/Users/Arsal/examples/raltime_anomaly/model_columns.pkl", 'rb+') as col:
cols= pickle.load(col)
# assume you have a "long-form" data frame
# see https://plotly.com/python/px-arguments/ for more options
df = pd.DataFrame({
"Fruit": ["Apples", "Oranges", "Bananas", "Apples", "Oranges", "Bananas"],
"Amount": [4, 1, 2, 2, 4, 5],
"City": ["SF", "SF", "SF", "Montreal", "Montreal", "Montreal"]
})
fig = px.bar(df, x="Fruit", y="Amount", color="City", barmode="group")
app.layout = html.Div(children=[
html.H1(children='Hello Dash'),
html.Div(children='''
Dash: A web application framework for Python.
'''),
# dcc.Graph(
# id='example-graph',
# figure=fig
# ),
dcc.ConfirmDialog(id="table_anomaly")
])
app.layout = dash_table.DataTable(
id='table',
columns=[{"name": i, "id": i} for i in df.columns],
data=df.to_dict('records'),
)
if __name__ == '__main__':
app.run_server(debug=True) | arsalhuda24/credit_card_fraud_detection | fraud_detection/dash-app/app.py | app.py | py | 1,654 | python | en | code | 0 | github-code | 36 |
9824574989 | """
Classes related to OpenAPI-defined operations and their arguments and parameters.
"""
from __future__ import print_function
import argparse
import json
def parse_boolean(value):
"""
A helper to allow accepting booleans in from argparse. This is intended to
be passed to the `type=` kwarg for ArgumentParser.add_argument.
"""
if value.lower() in ('yes', 'true', 'y', '1'):
return True
if value.lower() in ('no', 'false', 'n', '0'):
return False
raise argparse.ArgumentTypeError('Expected a boolean value')
def parse_dict(value):
"""
A helper function to decode incoming JSON data as python dicts. This is
intended to be passed to the `type=` kwarg for ArgumentParaser.add_argument.
"""
if not isinstance(value, str):
print("not a string :(")
raise argparse.ArgumentTypeError('Expected a JSON string')
try:
return json.loads(value)
except:
raise argparse.ArgumentTypeError('Expected a JSON string')
TYPES = {
"string": str,
"integer": int,
"boolean": parse_boolean,
"array": list,
"object": parse_dict,
"number": float,
}
class CLIArg:
"""
An argument passed to the CLI with a flag, such as `--example value`. These
are defined in a requestBody in the api spec.
"""
def __init__(self, name, arg_type, description, path):
self.name = name
self.arg_type = arg_type
self.description = description.replace('\n', '').replace('\r', '')
self.path = path
self.arg_item_type = None # populated during baking for arrays
self.required = False # this is set during baking
class URLParam:
"""
An argument passed to the CLI positionally. These are defined in a path in
the OpenAPI spec, in a "parameters" block
"""
def __init__(self, name, param_type):
self.name = name
self.param_type = param_type
class CLIOperation:
"""
A single operation described by the OpenAPI spec. An operation is a method
on a path, and should have a unique operationId to identify it. Operations
are responsible for parsing their own arguments and processing their
responses with the help of their ResponseModel
"""
def __init__(self, method, url, summary, args, response_model,
params):
self.method = method
self.url = url
self.summary = summary
self.args = args
self.response_model = response_model
self.params = params
def parse_args(self, args):
"""
Given sys.argv after the operation name, parse args based on the params
and args of this operation
"""
# build an argparse
parser = argparse.ArgumentParser(description=self.summary)
for param in self.params:
parser.add_argument(param.name, metavar=param.name,
type=TYPES[param.param_type])
if self.method == "get":
# build args for filtering
for attr in self.response_model.attrs:
if attr.filterable:
parser.add_argument('--'+attr.name, metavar=attr.name)
elif self.method in ("post", "put"):
# build args for body JSON
for arg in self.args:
if arg.arg_type == 'array':
# special handling for input arrays
parser.add_argument('--'+arg.path, metavar=arg.name,
action='append', type=TYPES[arg.arg_item_type])
else:
parser.add_argument('--'+arg.path, metavar=arg.name,
type=TYPES[arg.arg_type])
parsed = parser.parse_args(args)
return parsed
def process_response_json(self, json, handler):
if self.response_model is None:
return
if 'pages' in json:
json = json['data']
else:
json = [json]
handler.print(self.response_model, json)
| rovaughn/linode-cli | linodecli/operation.py | operation.py | py | 4,061 | python | en | code | null | github-code | 36 |
15936612595 | import os
import atexit
import asyncio
import aiohttp
import requests
from scraper import scrape
from models import db, Movie
from flask import Flask, jsonify, request, abort
from apscheduler.schedulers.background import BackgroundScheduler
app = Flask(__name__)
# SQLAlchemy configurations
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('SQLALCHEMY_DATABASE_URI', '')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db.init_app(app)
TMDB_API_KEY = os.environ.get('TMDB_API_KEY', '')
@app.route('/', methods=['GET'])
def index():
# return app.send_static_file('index.html')
return jsonify({'success': True, 'message': 'Connected to server'}), 200
@app.route('/api/all-releases', methods=['GET'])
def all_releases():
if request.method != "GET":
abort(404)
try:
movies = [dict(movie) for movie in db.session.query(Movie).all()]
return jsonify({'success': True, 'message': 'Query processed', 'query_results': movies}), 200
except Exception as e:
print(f"Error: {e}", flush=True)
return jsonify({'success': False, 'message': 'Error processing query'}), 400
finally:
db.session.close()
@app.route('/api/this-weeks-releases', methods=['GET'])
def this_week():
if request.method != "GET":
abort(404)
return jsonify({ 'success': True, 'message': 'Query processed', 'query_results': get_by_week('this week') }), 200
@app.route('/api/last-weeks-releases', methods=['GET'])
def last_week():
if request.method != "GET":
abort(404)
return jsonify({ 'success': True, 'message': 'Query processed', 'query_results': get_by_week('last week') }), 200
@app.route('/api/next-weeks-releases', methods=['GET'])
def next_week():
if request.method != "GET":
abort(404)
return jsonify({ 'success': True, 'message': 'Query processed', 'query_results': get_by_week('next week') }), 200
"""
Get all movies in the database whose release week matches the given query.
"""
def get_by_week(week):
with app.app_context():
try:
movies = Movie.query.filter(Movie.release_week.like(f"%{week}%")).all()
return [dict(movie) for movie in movies]
except Exception as e:
print(f"Error: {e}", flush=True)
return []
finally:
db.session.close()
"""
An application factory for tethering a database to SQLAlchemy models.
For use in initialization or updates.
In practice:
Load in environment variables
Navigate to the backend directory
Import this function and run through a Python interactive session
1. >>> from app import create_app
2. >>> from models import db
3. >>> db.create_all(app=create_app())
"""
def create_app():
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('SQLALCHEMY_DATABASE_URI', '')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db.init_app(app)
return app
"""
Perform a fetch request to the TMDb API, gathering information for a film by its IMDb ID.
Then place this film, along with its release week and IMDb ID, in the database.
"""
async def fetch(session, release_week, imdb_id):
url = f"https://api.themoviedb.org/3/find/{imdb_id}?api_key={TMDB_API_KEY}&language=en-US&external_source=imdb_id"
async with session.get(url) as response:
data = await response.json()
movie_results = data['movie_results']
tv_results = data['tv_results']
if len(movie_results) != 0:
movie = movie_results[0]
with app.app_context():
try:
db.session.add(Movie(imdb_id,
movie['id'],
movie['title'],
movie['poster_path'],
movie['overview'],
movie['vote_average'],
release_week))
db.session.commit()
except Exception as e:
print(f"Error: {e}", flush=True)
finally:
db.session.close()
elif len(tv_results) !=0:
show = tv_results[0]
with app.app_context():
try:
db.session.add(Movie(imdb_id,
show['id'],
show['name'],
show['poster_path'],
show['overview'],
show['vote_average'],
release_week))
db.session.commit()
except Exception as e:
print(f"Error: {e}", flush=True)
finally:
db.session.close()
else:
pass
"""
Gather all fetch requests to the TMDb API as tasks to be performed at once.
Then perform tasks.
"""
async def get_tmdb_data(movies):
async with aiohttp.ClientSession() as session:
with app.app_context():
db.session.query(Movie).delete()
db.session.commit()
tasks = [fetch(session, release_week, imdb_id) for release_week, imdb_id in movies]
await asyncio.gather(*tasks)
"""
Perform a webscrape and organize data into a list of tuples containing the release week and IMDb ID for each movie.
Then for each tuple, using asyncio, retrieve all film's TMDb information at once.
"""
def scrape_n_save():
movies = [(week['release_week'], movie['imdb_id']) for week in scrape() for movie in week['movies']]
asyncio.get_event_loop().run_until_complete(get_tmdb_data(movies))
# Create schedule for mailing status report
scheduler = BackgroundScheduler()
scheduler.start()
scheduler.add_job(func=scrape_n_save, id='cron_scrape_n_save', name='Update DB with new releases every hour', trigger='cron', hour='*')
# Shut down the scheduler when exiting the app
atexit.register(lambda: scheduler.shutdown())
if __name__ == "__main__":
scrape_n_save()
app.run(debug=True, host='0.0.0.0')
| Joseph-Villegas/JS-New-DVD-Releases | backend/app.py | app.py | py | 6,279 | python | en | code | 0 | github-code | 36 |
40294597567 | import requests
import json
from urllib.parse import urlencode, quote_plus
def getBusInterval() :
api_key = 'g2B7EooEAgEwa++yErKYAhIk93i7tdYXP/3i5nOrRMN0Fmt78AnTzkaJUGqdsIUcqd7ITge5nUX0dAK/luCmFg=='
serviceKey = requests.utils.unquote(api_key)
api_url = 'http://ws.bus.go.kr/api/rest/busRouteInfo/getRouteInfo'
params ={'serviceKey' : api_key, 'busRouteId' : '100100124' }
response = requests.get(api_url, params=params)
print(response.content)
def countRouteId():
with open('bus_router_edge_with_transfer.json', 'r', encoding='utf-8') as f:
bus_route_list = json.load(f)
unique_route_ids = set(edge['route_id'] for edge in bus_route_list)
print("고유한 route_id의 개수:", len(unique_route_ids))
countRouteId() | CSID-DGU/2023-2-OSSP1-Idle-3 | data/graphDataProcessing/bus_data_processing/intervalTime/getBusInterval.py | getBusInterval.py | py | 771 | python | en | code | 0 | github-code | 36 |
11998084066 | import htcondor
import classad
import time
def get_existing_resources(self, group):
"""
Get list of worker nodes
"""
try:
coll = htcondor.Collector()
results = coll.query(htcondor.AdTypes.Startd,
'PartitionableSlot=?=True',
["TotalCpus", "Cpus", "TotalMemory", "Memory", "TotalDisk", "ProminenceCloud", "Start"])
except:
return None
workers = []
for result in results:
if group in str(result['Start']) or 'ProminenceGroup' not in str(result['Start']):
capacity = {'cpus': int(result["TotalCpus"]), 'memory': int(result["TotalMemory"]/1024.0)}
free = {'cpus': int(result["Cpus"]), 'memory': int(result["Memory"]/1024.0)}
worker = {'capacity': capacity, 'free': free, 'site': result["ProminenceCloud"]}
workers.append(worker)
# Sort by free CPUs descending
workers = sorted(workers, key=lambda x: x['free']['cpus'], reverse=True)
data = {'existing': workers}
return data
| prominence-eosc/prominence | prominence/backend/resources.py | resources.py | py | 1,061 | python | en | code | 2 | github-code | 36 |
7821929073 | from sys import stdin
n = int(input())
ary = [""]*n
for i in range(n): ary[i] = stdin.readline().strip()
answer_record = [0]*len(ary[0])
answer = ""
# 3번 확인 돌림
for i in range(1, n):
# 글자수 만큼 또 비교해봐
for j in range(len(ary[0])):
# ary[0]번에 들어간 문자열이랑 2,3번째꺼랑 다르면 기록
if(ary[0][j] != ary[i][j]):
answer_record[j] += 1
for i in range(len(ary[0])):
if(answer_record[i]>0):
answer+="?"
else:
answer+=ary[0][i]
print(answer)
| Drizzle03/baekjoon_coding | 20230116/1032.py | 1032.py | py | 559 | python | en | code | 0 | github-code | 36 |
25553186 | from random import *
from time import sleep
#튜플로 랜덤하게 리스트 배치해서 덱 짜기
magic = (["smite", 80, 40], ["ignite", 30, 20], ["orb shield", 0, 10], ["meteor rock", 150, 70], ["originium arts", 100, 45], ["subjective time dilation", 125, 67])
deck = []
mp = 500
def add_magic():
for i in range(0, 3):
temp_magic = []
temp_magic += magic[i]
temp_magic.append(True)
deck.append(temp_magic)
print(f"덱에 마법 추가, {temp_magic[0]}")
def magic_start():
roof = True
while roof:
add_magic()
for value in deck:
print(f"받아라! {value[0]}")
print(f"{value[1]}의 피해를 입혔다! {value[2]}의 마나 소모!")
mp -= value[2]
if mp <= 0:
print(f"현재 마나는 {mp}, 마법을 사용할 수 없다.")
roof = False
pass
else:
print("덱에 있는 마법을 전부 소모했다. 재정렬할까?")
switch = input("0을 입력시 종료합니다: ")
if switch == "0":
roof = False
print(f"현재 마나는 {mp}")
sleep(1)
# 최댓값과 최솟값 제외하여 출력하기
scores = (1, 2, 3, 4, 5)
high, *others, low = scores
print(scores)
#함수에 다수의 값 입력시 튜플로 패킹되어 출력됨
def foo():
return 1, 2, 3, 4, 5
print(foo())
#함수에 다수의 값 입력시 튜플 이용가능
def pee(a, b, c, d, e):
alisa = [a, b, c, d, e]
for value in alisa:
print(value)
def sum(a, b, c, d, e):
return a+b+c+d+e #이런것도 굳이 for문이나 직접 입력하기 안해도됨.
pee(*foo()) # 언패킹은 * 쓰면 됨
sum(*foo()) | kmgyu/baekJoonPractice | some tips/tuple_packing.py | tuple_packing.py | py | 1,742 | python | ko | code | 0 | github-code | 36 |
25464205303 | from django import forms
from .models import Event
from django.core.exceptions import ValidationError
from django.utils import timezone
tz = timezone.get_default_timezone()
class EventForm(forms.ModelForm):
date_date = forms.CharField(max_length=40, required=True, widget=forms.TextInput(attrs={'class': 'form-control'}))
date_time = forms.CharField(max_length=40, required=True, widget=forms.TextInput(attrs={'class': 'form-control'}))
class Meta:
model = Event
fields = ['title', 'abstract', 'description', 'date_date', 'date_time', 'duration', 'language', 'persons', 'room', 'track', 'url', 'remotevideofile', 'videofile']
def __init__(self, *args, initial={}, **kwargs):
if 'instance' in kwargs:
initial["date_date"] = kwargs['instance'].date.astimezone(tz).strftime("%Y-%m-%d")
initial["date_time"] = kwargs['instance'].date.astimezone(tz).strftime("%H:%M")
self.new = False
self.video_url = kwargs['instance'].video_url()
else:
self.new = True
forms.ModelForm.__init__(self, *args, **kwargs, initial=initial)
| voc/voctoimport | event/forms.py | forms.py | py | 1,135 | python | en | code | 0 | github-code | 36 |
30280424346 | import requests
def get_random_wiki_article_link():
WIKI_RANDOM_LINK_API_URL = "https://en.wikipedia.org/w/api.php?action=query&list=random&rnnamespace=0&rnlimit=1&format=json"
response = requests.get(WIKI_RANDOM_LINK_API_URL)
if response.status_code == 200:
random_article_data = response.json()['query']['random']
random_article_title = random_article_data[0]['title']
return random_article_title
else:
print("Something went wrong! Please try again!")
def main():
article_base_url = "https://en.wikipedia.org/wiki/"
while True:
random_article = get_random_wiki_article_link()
user_response = input(f"Would you like to read `{random_article}` (Y/N): ")
if user_response.lower() == 'y':
print(f"{article_base_url}{'_'.join(random_article.split())}")
break
if __name__ == '__main__':
main() | hafeezulkareem/python_scripts | get_random_wiki_article_link.py | get_random_wiki_article_link.py | py | 905 | python | en | code | 0 | github-code | 36 |
4062334058 | def main():
## Sort numbers by the sum of their odd digits in descending order.
numbers = [865, 1169, 1208, 1243, 290]
numbers.sort(key=sumOfOddDigits, reverse=True)
print("Sorted by sum of odd digits:")
print(numbers)
def sumOfOddDigits(num):
listNums = list(str(num))
total = 0
for i in range(len(listNums)):
if int(listNums[i]) % 2 == 1:
total += int(listNums[i])
return total
main()
| guoweifeng216/python | python_design/pythonprogram_design/Ch4/4-2-E61.py | 4-2-E61.py | py | 456 | python | en | code | 0 | github-code | 36 |
27893627629 | open_file = open("mapper_gopi.txt", "r")
sort_output = open("sort_data.txt", "w")
lines = open_file.readlines()
lines.sort()
for line in lines:
sort_output.write(line)
open_file.close()
sort_output.close() | chvnaveenkumar/Crypto-Markets | Problem4/sort.py | sort.py | py | 210 | python | en | code | 0 | github-code | 36 |
3640835274 | import torch
import torch.nn as nn
import torch.optim as optim
from torchtext.legacy.datasets import Multi30k
from torchtext.legacy.data import Field, BucketIterator
import spacy
import numpy as np
import random
import math
import time
from model import Seq2Seq, Encoder, Decoder
def train(model, iterator, optimizer, criterion, clip):
model.train()
epoch_loss = 0
for i, batch in enumerate(iterator):
src = batch.src
trg = batch.trg
optimizer.zero_grad()
output = model(src, trg)
# trg = [trg len, batch size]
# output = [trg len, batch size, output dim]
output_dim = output.shape[-1]
output = output[1:].view(-1, output_dim)
trg = trg[1:].view(-1)
# trg = [(trg len - 1) * batch size]
# output = [(trg len - 1) * batch size, output dim]
loss = criterion(output, trg)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
optimizer.step()
epoch_loss += loss.item()
return epoch_loss / len(iterator)
def evaluate(model, iterator, criterion):
model.eval()
epoch_loss = 0
with torch.no_grad():
for i, batch in enumerate(iterator):
src = batch.src
trg = batch.trg
output = model(src, trg, 0) # turn off teacher forcing
# trg = [trg len, batch size]
# output = [trg len, batch size, output dim]
output_dim = output.shape[-1]
output = output[1:].view(-1, output_dim)
trg = trg[1:].view(-1)
# trg = [(trg len - 1) * batch size]
# output = [(trg len - 1) * batch size, output dim]
loss = criterion(output, trg)
epoch_loss += loss.item()
return epoch_loss / len(iterator)
def init_weights(m):
for name, param in m.named_parameters():
nn.init.uniform_(param.data, -0.08, 0.08)
spacy_de = spacy.load("de_core_news_sm")
spacy_en = spacy.load("en_core_web_sm")
def tokenize_de(text):
"""
Tokenizes German text from a string into a list of strings (tokens) and reverses it
"""
return [tok.text for tok in spacy_de.tokenizer(text)][::-1]
def tokenize_en(text):
"""
Tokenizes English text from a string into a list of strings (tokens)
"""
return [tok.text for tok in spacy_en.tokenizer(text)]
def epoch_time(start_time, end_time):
elapsed_time = end_time - start_time
elapsed_mins = int(elapsed_time / 60)
elapsed_secs = int(elapsed_time - (elapsed_mins * 60))
return elapsed_mins, elapsed_secs
SEED = 1234
random.seed(SEED)
np.random.seed(SEED)
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
torch.backends.cudnn.deterministic = True
N_EPOCHS = 10
CLIP = 1
SRC = Field(tokenize=tokenize_de, init_token="<sos>", eos_token="<eos>", lower=True)
TRG = Field(tokenize=tokenize_en, init_token="<sos>", eos_token="<eos>", lower=True)
train_data, valid_data, test_data = Multi30k.splits(
exts=(".de", ".en"), fields=(SRC, TRG)
)
SRC.build_vocab(train_data, min_freq=2)
TRG.build_vocab(train_data, min_freq=2)
INPUT_DIM = len(SRC.vocab)
OUTPUT_DIM = len(TRG.vocab)
ENC_EMB_DIM = 256
DEC_EMB_DIM = 256
HID_DIM = 512
N_LAYERS = 2
ENC_DROPOUT = 0.5
DEC_DROPOUT = 0.5
BATCH_SIZE = 128
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
train_iterator, valid_iterator, test_iterator = BucketIterator.splits(
(train_data, valid_data, test_data), batch_size=BATCH_SIZE, device=device
)
enc = Encoder(INPUT_DIM, ENC_EMB_DIM, HID_DIM, N_LAYERS, ENC_DROPOUT)
dec = Decoder(OUTPUT_DIM, DEC_EMB_DIM, HID_DIM, N_LAYERS, DEC_DROPOUT)
model = Seq2Seq(enc, dec, device).to(device)
model.apply(init_weights)
optimizer = optim.Adam(model.parameters())
TRG_PAD_IDX = TRG.vocab.stoi[TRG.pad_token]
criterion = nn.CrossEntropyLoss(ignore_index=TRG_PAD_IDX)
best_valid_loss = float("inf")
for epoch in range(N_EPOCHS):
start_time = time.time()
train_loss = train(model, train_iterator, optimizer, criterion, CLIP)
valid_loss = evaluate(model, valid_iterator, criterion)
end_time = time.time()
epoch_mins, epoch_secs = epoch_time(start_time, end_time)
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
torch.save(model.state_dict(), "tut1-model.pt")
print(f"Epoch: {epoch+1:02} | Time: {epoch_mins}m {epoch_secs}s")
print(f"\tTrain Loss: {train_loss:.3f} | Train PPL: {math.exp(train_loss):7.3f}")
print(f"\t Val. Loss: {valid_loss:.3f} | Val. PPL: {math.exp(valid_loss):7.3f}")
model.load_state_dict(torch.load("tut1-model.pt"))
test_loss = evaluate(model, test_iterator, criterion)
print(f"| Test Loss: {test_loss:.3f} | Test PPL: {math.exp(test_loss):7.3f} |")
| HallerPatrick/two_hot_encoding | multihot/seq2seq/train.py | train.py | py | 4,787 | python | en | code | 6 | github-code | 36 |
16764769514 | import dns.resolver
import sys
'''
Returns the dns records specified in rtypes, if you want to change this script feel free to do it. :)
To run this script just type --> python3 dnsenum.py <domain name> e.g domain name <example.com>
For the first import install dnspython using pip3 install dnspython
'''
def main():
try:
domain = sys.argv[1]
except:
print('SYNTAX ERROR ---- python3 dnsenum.py <domain name>')
exit()
rtypes = ['A','AAAA', 'NS','MX', 'TXT', 'SOA', 'PTR','CNAME']
for records in rtypes:
try:
target = dns.resolver.resolve(qname=domain,rdtype=records)
print('/' + '*'*10 + '/')
print(f'{records} records')
print('-'*100)
for e in target:
print(e.to_text() + '\n')
except dns.resolver.NoAnswer:
print('No records found for ' + f'{records}')
except dns.resolver.NXDOMAIN:
print('ERROR ---- The DNS query name does not exist')
exit()
except dns.resolver.NoNameservers:
print('ERROR ---- All nameservers failed to answer the query or you mistyped the domain name')
exit()
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
exit() | Gl4uc0m4/InformationGatheringTools | dnsenum.py | dnsenum.py | py | 1,299 | python | en | code | 0 | github-code | 36 |
5834016480 | import pygame, time
from math import pi, cos, sin
from random import randrange, random
WIDTH = 900
HEIGHT = 900
pygame.init()
screen = pygame.display.set_mode((WIDTH, HEIGHT))
class Branch:
tree = []
random_seed = []
def __init__(self, startPoint, angle, size, width):
self.width = width
self.size = size
self.start = startPoint
self.angle = angle
self.end = self.findEndPoint()
Branch.tree.append(self)
def findEndPoint(self):
x = self.size*cos(pi/2-self.angle)
y = self.size*sin(pi/2-self.angle)
endpoint = (self.start[0] + x, self.start[1] - y)
return endpoint
def show(self):
if self.width<=0:
self.width = 1
pygame.draw.line(screen, (200, 200, 200), (self.start[0], self.start[1]), (self.end[0], self.end[1]), self.width)
def grow_branch(branch, angle):
if branch.size<5:
return "LOL"
if random()>0.1:
B_1 = Branch(branch.end, branch.angle + (angle+ 0.2*angle*randrange(-1,2)), branch.size*(randrange(45,101)/100), branch.width-1)
grow_branch(B_1, angle)
if random()>0.1:
B_2 = Branch(branch.end, branch.angle - (angle+ 0.4*angle*randrange(-1,2)), branch.size*(randrange(45,101)/100), branch.width-1)
grow_branch(B_2, angle)
if random()>0.5:
B_3 = Branch(branch.end, branch.angle - (angle+ 0.6*angle*randrange(-1,2)), branch.size*(randrange(50,101)/100), branch.width-1)
grow_branch(B_3, angle)
B = Branch((WIDTH/2, HEIGHT), 0, 100, 10)
grow_branch(B, pi/9)
screen.fill((30, 30, 30))
for branche in Branch.tree:
branche.show()
pygame.display.flip()
done = False
while not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
if event.type == pygame.KEYDOWN:
if event.key == 32:
screen.fill((30, 30, 30))
Branch.tree = []
B = Branch((WIDTH/2, HEIGHT), 0, 100, 10)
grow_branch(B, pi/9)
for branche in Branch.tree:
branche.show()
pygame.display.flip() | YohannPardes/Fractal-tree | Versions/Tree_generator.py | Tree_generator.py | py | 2,195 | python | en | code | 0 | github-code | 36 |
17076521686 | from fastapi import FastAPI, HTTPException, status
import uvicorn
import requests
app = FastAPI(debug=True)
BTCUSD=[]
@app.get('/')
def index():
return {'msg': 'VSETKO JE OK'}
@app.get('/usd2btc')
def USD_current_price():
re = requests.get('https://api.coindesk.com/v1/bpi/currentprice.json')
if re.status_code == 200:
data = re.json()
USDATA = data['bpi']['USD']
BTCUSD.append({'key':USDATA['rate']})
print({'key':USDATA['rate']})
return {'key':USDATA['rate']}
else:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail='DATA NOT FOUND')
@app.get('/gbp2btc')
def GBP_current_price():
re = requests.get('https://api.coindesk.com/v1/bpi/currentprice.json')
if re.status_code == 200:
data = re.json()
GBDATA = data['bpi']['GBP']
# BTCUSD.append({'key':USDATA['rate']})
print({'key':GBDATA['rate']})
return {'key':GBDATA['rate']}
else:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail='DATA NOT FOUND')
@app.get('/eur2btc')
def EUR_current_price():
re = requests.get('https://api.coindesk.com/v1/bpi/currentprice.json')
if re.status_code == 200:
data = re.json()
EUDATA = data['bpi']['EUR']
# BTCUSD.append({'key':EUDATA['rate']})
print({'key':EUDATA['rate']})
return {'key':EUDATA['rate']}
else:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail='DATA NOT FOUND') | fortisauris/PyDevJR_Course | FA02_FASTAPI_BTC/main.py | main.py | py | 1,498 | python | en | code | 2 | github-code | 36 |
42443816173 | abc = str(input(f'Digite uma frase: ')).strip().upper().split()
abc = ''.join(abc)
inv = ''
for letra in range(len(abc)-1, -1, -1):
inv += abc[letra]
print(f'O inverso de {abc} é {inv}.')
if abc == inv:
print(f'É PALÍNDROMO')
else:
print(f'NÃO É PALÍNDROMO')
| JosueFS/Python | Exercicios/Ex053.py | Ex053.py | py | 277 | python | pt | code | 0 | github-code | 36 |
42578251551 | from tkinter import StringVar, Tk
from tkinter.ttk import Frame
import pytest
from pyDEA.core.gui_modules.data_frame_gui import DataFrame
from tests.test_gui_data_tab_frame import ParamsFrameMock
class ParentMock(Frame):
def __init__(self, parent):
super().__init__(parent)
self.progress_bar = {'value': 100}
@pytest.fixture
def data_book(request):
parent = Tk()
current_categories = []
data_book = DataFrame(ParentMock(parent), ParamsFrameMock(parent),
current_categories,
StringVar(master=parent), StringVar(master=parent))
request.addfinalizer(parent.destroy)
return data_book
def test_change_solution_tab_name(data_book):
new_name = 'New solution name'
data_book.change_solution_tab_name(new_name)
assert data_book.tab(1, option='text') == new_name
def test_reset_progress_bar(data_book):
data_book.reset_progress_bar()
assert data_book.parent.progress_bar['value'] == 0
| araith/pyDEA | tests/test_gui_data_frame.py | test_gui_data_frame.py | py | 998 | python | en | code | 38 | github-code | 36 |
2735470039 | # 3rdpartyimports
import math
from sklearn.model_selection import (
cross_val_score, KFold, train_test_split, GridSearchCV, RepeatedKFold)
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import (OneHotEncoder, StandardScaler,
PolynomialFeatures)
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import (accuracy_score, confusion_matrix,
classification_report, mean_squared_error,
mean_absolute_error)
from sklearn.linear_model import (LinearRegression, LassoCV, Lasso, RidgeCV, Ridge,
ElasticNetCV, ElasticNet, BayesianRidge,
LogisticRegression, SGDRegressor)
from numpy import absolute, mean, std
from scipy.stats import poisson
import numpy as np
import pandas as pd
def create_model(X, y): # generate opposition variables
"""a function that takes in our player averages, last ten averages, opponent and other predictors to generate
a model to predict FTA per 36 value for player. y=historical fta/36"""
dummies = pd.get_dummies(X['MATCHUP'])
X = X.drop('MATCHUP', axis=1)
X = pd.concat([X, dummies], axis=1)
X = X.drop(['FT_PCTlastxgames', 'FG_PCTlastxgames', 'FG3_PCTlastxgames', 'FG_PCT', 'FG3_PCT', 'FT_PCT'], axis=1)
X = X.fillna(0)
X=X.values
y = [0 if math.isnan(x) else x for x in y]
y=y.values
model = Lasso(alpha=1.0)
cv = RepeatedKFold(n_splits=10, n_repeats=3, random_state=1) # pretty typical numbers,can mess around later
scores = cross_val_score(model, X, y, cv=cv, n_jobs=1)
scores = absolute(scores)
print('Mean MAE: %.3f (%.3f)' % (mean(scores), std(scores)))
model.fit(X, y)
return model
def propbet(X, y):
scaler = StandardScaler()
dummies = pd.get_dummies(X['MATCHUP'])
X = X.drop('MATCHUP', axis=1)
X = pd.concat([X, dummies], axis=1)
X = X.drop(['FT_PCTlastxgames', 'FG_PCTlastxgames', 'FG3_PCTlastxgames', 'FG_PCT', 'FG3_PCT', 'FT_PCT'], axis=1)
X = X.fillna(0)
print(X)
y = [0 if math.isnan(x) else x for x in y]
X_train_val, X_test, y_train_val, y_test = train_test_split(
X, y, test_size=.2, random_state=1)
X_train, X_val, y_train, y_val = train_test_split(
X_train_val, y_train_val, test_size=.25, random_state=2)
X_train_scaled = scaler.fit_transform(X_train.values)
X_val_scaled = scaler.fit_transform(X_val.values)
alphavec = 10 ** np.linspace(-2, 2, 200)
lasso_cv = LassoCV(alphas=alphavec, cv=5)
lasso_cv.fit(X_train_scaled, y_train)
lasso_cv.alpha_
for col, coef in zip(X_train.columns, lasso_cv.coef_):
print(f"{col:<16}: {coef:>12,.7f}")
print(
f'R2 for LassoCV Model on train set: {lasso_cv.score(X_train_scaled, y_train)}')
val_set_preds = lasso_cv.predict(X_val_scaled)
print(
f'R2 for LassoCV Model on validation set: {lasso_cv.score(X_val_scaled, y_val)}')
mae = mean_absolute_error(y_val, val_set_preds)
print(f'Mean absolute error for LassoCV model on validation set: {mae}')
alpha = np.logspace(-4, 2, 100) # np.logspace(-4, -.1, 20)
param_grid = dict(alpha=alpha)
grid_en = GridSearchCV(ElasticNet(), param_grid=param_grid,
scoring='neg_mean_absolute_error', cv=5)
grid_result_en = grid_en.fit(X_train, y_train)
print(f'Best Score: {grid_result_en.best_score_}')
print(f'Best Param: {grid_result_en.best_params_}')
elastic_cv = ElasticNetCV(
alphas=[0.0021544346900318843], cv=5, random_state=0)
elastic_cv.fit(X_train, y_train)
print(
f'ElasticNet Mean R Squared Score on training data: {elastic_cv.score(X_train, y_train)}')
print(
f'ElasticNet Mean R Squared Score on validation data: {elastic_cv.score(X_val, y_val)}')
val_set_preds = elastic_cv.predict(X_val)
mae = mean_absolute_error(y_val, val_set_preds)
print(f'Mean absolute error for ElasticNet model on validation set: {mae}')
rmse = mean_squared_error(y_val, val_set_preds, squared=False)
print(
f'Root mean squared error for ElasticNet model on validation set: {rmse}')
for col, coef in zip(X_test.columns, elastic_cv.coef_):
print(f"{col:<16}: {coef:>12,.7f}")
elastic_preds = elastic_cv.predict(X)
X['Model Predictions'] = elastic_preds
return elastic_cv
def predictandpoisson(X, ftpercent, model, line):
"""taking our created model and x values for upcoming games output our projected FTA/36 and use
last ten games minutes average to get a final FTA number for the game, then use poisson to create distribution"""
yhat = model.predict(X)
yhat = yhat * X[0][0]/36 #convert out of per36
yhat = float(yhat * ftpercent)
print("projected makes", yhat)
line=float(line)
drawodds= poisson.pmf(line,yhat)
overodds = 1 - poisson.cdf(line, yhat)
underodds = poisson.cdf(line, yhat)
print("On a line of ",line, " Over odds are: ", overodds, "Draw odds are: ",drawodds, " and Under odds are ", underodds-drawodds)
return [line,overodds,drawodds,underodds-drawodds,yhat]
| chadk94/FreeThrowProjections | model.py | model.py | py | 5,208 | python | en | code | 0 | github-code | 36 |
11909593894 | from pprint import pprint
import boto3
import openpyxl
import time
import csv
def put_object(fileHash, request='', today = int(time.time()), dynamodb=None):
if not dynamodb:
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table('image-reuse-image-hash-dev')
response = table.put_item(
Item={
'fileHash': fileHash,
'createdOn': today,
'requests': request,
'updatedOn': today
}
)
return response
def addtocsv(data):
file = open('final_test.csv', 'a+', newline ='')
# with file:
# write = csv.writer(file)
# write.writerows(data)
writer = csv.writer(file)
for key, value in data.items():
writer.writerow([key, value])
file.close()
dict1 = {}
def append_to_dict(fileHash, request):
if fileHash in dict1:
a = dict1[fileHash]
a = a + request
dict1[fileHash]= a
else:
dict1[fileHash]= request
if __name__ == '__main__':
today = int(time.time())
wb= openpyxl.load_workbook('final_db_data.xlsx')
print('Workbook loaded!')
sh1 = wb['Sheet1']
for i in range (2,640901):
fileHash = sh1.cell(i,1).value
request= [
{
"sourceElementId": sh1.cell(i,2).value,
"clientId": "BACKFILL",
"subLob": sh1.cell(i,4).value,
"sourceSystem": "ICAN",
"createdOn": today,
"lob": "motor"
}
]
append_to_dict(fileHash,request)
#output = put_object(fileHash, request, today)
print("Put object succeeded for item",i, fileHash)
#pprint(output, sort_dicts=False)
#print(dict1)
addtocsv(dict1)
| shakazi/aws_essential_scripts | upload_to_db.py | upload_to_db.py | py | 1,821 | python | en | code | 0 | github-code | 36 |
12433334302 | """This module contains definition of the Cell class."""
class Cell:
"""This class stores information about a single cell."""
def __init__(self, position_init, max_velocity_init, mass_init):
self.position = position_init
self.max_velocity = max_velocity_init
self.mass = mass_init
self.ready = False
def attack(self, target):
"""This method moves this cell as close to another cell as possible
and eats the target cell if its within this cell's reach."""
# Calculate position difference between the cells
pos_diff = target.position - self.position
self.ready = False
# The distance is lower than the Cell's velocity
if abs(pos_diff) < self.max_velocity:
# Move to the position of the target cell
self.position += pos_diff
# Consume the target cell's mass
self.mass += target.mass
return True
# The distance is higher and the position is higher
elif pos_diff > 0.0:
# Move as close to the target as the cell's velocity allows
self.position += self.max_velocity
return False
# Same as above, but the position is lower instead
else:
self.position -= self.max_velocity
return False
| natiiix/Cells | Cells/Cell.py | Cell.py | py | 1,335 | python | en | code | 0 | github-code | 36 |
27283966186 | import copy
import math
from pypaq.lipytools.printout import stamp, progress_
from pypaq.lipytools.pylogger import get_pylogger, get_child
from pypaq.mpython.mptools import Que, QMessage
from torchness.tbwr import TBwr
import random
import statistics
import time
from tqdm import tqdm
from typing import Dict, List, Tuple, Optional, Union
from envy import MODELS_FD, DMK_MODELS_FD, N_TABLE_PLAYERS, PyPoksException
from pologic.potable import QPTable
from podecide.dmk import FolDMK, HuDMK
from gui.gui_hdmk import GUI_HDMK
def stdev_with_none(values) -> Optional[float]:
if len(values) < 2:
return None
return statistics.stdev(values)
# separated factor for two results
def separated_factor(
a_wonH: Optional[float],
a_wonH_mean_stdev: Optional[float],
b_wonH: Optional[float],
b_wonH_mean_stdev: Optional[float],
n_stdev: float) -> float:
if a_wonH_mean_stdev is None or b_wonH_mean_stdev is None:
return 0.0
if a_wonH_mean_stdev + b_wonH_mean_stdev == 0:
return 1000
return abs(a_wonH - b_wonH) / (n_stdev * (a_wonH_mean_stdev + b_wonH_mean_stdev))
# prepares separation report
def separation_report(
dmk_results: Dict,
n_stdev: float,
sep_pairs: Optional[List[Tuple[str,str]]]= None,
max_nf: float= 1.1,
) -> Dict:
sep_nc = 0.0
sep_nf = 0.0
sep_pairs_nc = 0.0
sep_pairs_nf = 0.0
sep_pairs_stat = []
n_dmk = len(dmk_results)
# prepare separation data
dmk_sep = {}
for dn in dmk_results:
wonH_IV_stdev = stdev_with_none(dmk_results[dn]['wonH_IV'])
dmk_sep[dn] = {
'wonH_IV_stdev': wonH_IV_stdev,
'wonH_IV_mean_stdev': wonH_IV_stdev / math.sqrt(len(dmk_results[dn]['wonH_IV'])) if wonH_IV_stdev is not None else None,
'last_wonH_afterIV': dmk_results[dn]['wonH_afterIV'][-1] if dmk_results[dn]['wonH_afterIV'] else None}
# compute separated normalized count & normalized factor
for dn_a in dmk_sep:
dmk_sep[dn_a]['separated'] = n_dmk - 1
for dn_b in dmk_sep:
if dn_a != dn_b:
sf = separated_factor(
a_wonH= dmk_sep[dn_a]['last_wonH_afterIV'],
a_wonH_mean_stdev= dmk_sep[dn_a]['wonH_IV_mean_stdev'],
b_wonH= dmk_sep[dn_b]['last_wonH_afterIV'],
b_wonH_mean_stdev= dmk_sep[dn_b]['wonH_IV_mean_stdev'],
n_stdev= n_stdev)
if sf < 1:
dmk_sep[dn_a]['separated'] -= 1
sep_nf += min(sf, max_nf)
sep_nc += dmk_sep[dn_a]['separated']
n_max = (n_dmk - 1) * n_dmk
sep_nc /= n_max
sep_nf /= n_max
# same for given pairs
if sep_pairs:
for sp in sep_pairs:
sf = separated_factor(
a_wonH= dmk_sep[sp[0]]['last_wonH_afterIV'],
a_wonH_mean_stdev= dmk_sep[sp[0]]['wonH_IV_mean_stdev'],
b_wonH= dmk_sep[sp[1]]['last_wonH_afterIV'],
b_wonH_mean_stdev= dmk_sep[sp[1]]['wonH_IV_mean_stdev'],
n_stdev= n_stdev)
sep_pairs_stat.append(0 if sf<1 else 1)
if sf>=1: sep_pairs_nc += 1
sep_pairs_nf += min(sf, max_nf)
sep_pairs_nc /= len(sep_pairs)
sep_pairs_nf /= len(sep_pairs)
return {
'sep_nc': sep_nc, # <0.0;1.0> normalized count of separated
'sep_nf': sep_nf, # <0.0;1.1> normalized factor of separation
'sep_pairs_nc': sep_pairs_nc, # <0.0;1.0> normalized count of separated pairs
'sep_pairs_nf': sep_pairs_nf, # <0.0;1.1> normalized factor of pairs separation
'sep_pairs_stat': sep_pairs_stat} # [0,1, ..] each par marked as separated or not
# manages games of DMKs (at least QueDMKs)
class GamesManager:
def __init__(
self,
dmk_pointL: List[Dict], # points with eventually added 'dmk_type'
name: Optional[str]= None,
logger= None,
loglevel= 20,
debug_dmks= False,
debug_tables= False):
self.name = name or f'GM_{stamp()}'
if not logger:
logger = get_pylogger(
name= self.name,
folder= MODELS_FD,
level= loglevel)
self.logger = logger
self.debug_tables = debug_tables
self.logger.info(f'*** GamesManager : {self.name} *** starts..')
self.que_to_gm = Que() # here GM receives data from DMKs and Tables
dmk_pointL = copy.deepcopy(dmk_pointL) # copy to not modify original list
dmk_types = [point.pop('dmk_type',FolDMK) for point in dmk_pointL]
dmk_logger = get_child(self.logger, name='dmks_logger', change_level=-10 if debug_dmks else 10)
dmks = [dmk_type(logger=dmk_logger, **point) for dmk_type,point in zip(dmk_types, dmk_pointL)]
self.dmkD = {dmk.name: dmk for dmk in dmks} # Dict[str, dmk_type] INFO:is not typed because DMK may have diff types
for dmk in self.dmkD.values(): dmk.que_to_gm = self.que_to_gm # DMKs are build from folders, they need que to be updated then
self.families = set([dmk.family for dmk in self.dmkD.values()])
self.tbwr = TBwr(logdir=f'{DMK_MODELS_FD}/{self.name}')
self.tables = None
# starts DMKs (starts loops)
def _start_dmks(self):
self.logger.debug('> starts DMKs..')
idmk = tqdm(self.dmkD.values()) if self.logger.level<20 else self.dmkD.values()
for dmk in idmk: dmk.start()
self.logger.debug('> initializing..')
idmk = tqdm(self.dmkD) if self.logger.level < 20 else self.dmkD
for _ in idmk:
message = self.que_to_gm.get()
self.logger.debug(f'>> {message}')
self.logger.debug(f'> initialized {len(self.dmkD)} DMKs!')
message = QMessage(type='start_dmk_loop', data=None)
for dmk in self.dmkD.values(): dmk.que_from_gm.put(message) # synchronizes DMKs a bit..
for _ in self.dmkD:
message = self.que_to_gm.get()
self.logger.debug(f'>> {message}')
self.logger.debug(f'> started {len(self.dmkD)} DMKs!')
def _save_dmks(self):
self.logger.debug('> saves DMKs')
n_saved = 0
message = QMessage(type='save_dmk', data=None)
for dmk in self.dmkD.values():
dmk.que_from_gm.put(message)
n_saved += 1
for _ in range(n_saved):
self.que_to_gm.get()
self.logger.debug('> all DMKs saved!')
# stops DMKs loops
def _stop_dmks_loops(self):
self.logger.debug('Stopping DMKs loops..')
message = QMessage(type='stop_dmk_loop', data=None)
for dmk in self.dmkD.values(): dmk.que_from_gm.put(message)
idmk = tqdm(self.dmkD) if self.logger.level < 20 else self.dmkD
for _ in idmk:
self.que_to_gm.get()
self.logger.debug('> all DMKs loops stopped!')
# stops DMKs processes
def _stop_dmks_processes(self):
self.logger.debug('Stopping DMKs processes..')
message = QMessage(type='stop_dmk_process', data=None)
for dmk in self.dmkD.values(): dmk.que_from_gm.put(message)
idmk = tqdm(self.dmkD) if self.logger.level < 20 else self.dmkD
for _ in idmk:
self.que_to_gm.get()
self.logger.debug('> all DMKs exited!')
# creates new tables & puts players with random policy
def _put_players_on_tables(self):
self.logger.info('> puts players on tables..')
# build dict of lists of players (per family): {family: [(pid, que_to_pl, que_from_pl)]}
fam_ques: Dict[str, List[Tuple[str,Que,Que]]] = {fam: [] for fam in self.families}
for dmk in self.dmkD.values():
for k in dmk.queD_to_player: # {pid: que_to_pl}
fam_ques[dmk.family].append((k, dmk.queD_to_player[k], dmk.que_from_player))
# shuffle players in families
for fam in fam_ques:
random.shuffle(fam_ques[fam])
random.shuffle(fam_ques[fam])
quesLL = [fam_ques[fam] for fam in fam_ques] # convert to list of lists
### convert to flat list
# cut in equal pieces
min_len = min([len(l) for l in quesLL])
cut_quesLL = []
for l in quesLL:
while len(l) > 1.66*min_len:
cut_quesLL.append(l[:min_len])
l = l[min_len:]
cut_quesLL.append(l)
quesLL = cut_quesLL
random.shuffle(quesLL)
random.shuffle(quesLL)
quesL = [] # flat list
qLL_IXL = []
while quesLL:
if not qLL_IXL:
qLL_IXL = list(range(len(quesLL))) # fill indexes
random.shuffle(qLL_IXL) # shuffle them
qLL_IX = qLL_IXL.pop() # now take last index
quesL.append(quesLL[qLL_IX].pop()) # add last from list
if not quesLL[qLL_IX]:
quesLL.pop(qLL_IX) # remove empty list
qLL_IXL = list(range(len(quesLL))) # new indexes then
random.shuffle(qLL_IXL) # shuffle them
num_players = len(quesL)
if num_players % N_TABLE_PLAYERS != 0:
raise PyPoksException(f'num_players ({num_players}) has to be a multiple of N_TABLE_PLAYERS ({N_TABLE_PLAYERS})')
# put on tables
self.tables = []
table_ques = []
table_logger = get_child(self.logger, name='table_logger', change_level=-10) if self.debug_tables else None
while quesL:
table_ques.append(quesL.pop())
if len(table_ques) == N_TABLE_PLAYERS:
self.tables.append(QPTable(
name= f'tbl{len(self.tables)}',
que_to_gm= self.que_to_gm,
pl_ques= {t[0]: (t[1], t[2]) for t in table_ques},
logger= table_logger))
table_ques = []
# starts all tables
def _start_tables(self):
self.logger.debug('> starts tables..')
itbl = tqdm(self.tables) if self.logger.level < 20 else self.tables
for tbl in itbl: tbl.start()
for _ in itbl:
self.que_to_gm.get()
self.logger.debug(f'> tables ({len(self.tables)}) processes started!')
# stops tables
def _stop_tables(self):
self.logger.debug('> stops tables loops..')
message = QMessage(type='stop_table', data=None)
for table in self.tables: table.que_from_gm.put(message)
itbl = tqdm(self.tables) if self.logger.level < 20 else self.tables
for _ in itbl:
self.que_to_gm.get()
# INFO: tables now are just Process objects with target loop stopped
self.logger.debug('> tables loops stopped!')
# runs game, returns DMK results dictionary
def run_game(
self,
game_size= 10000, # number of hands for a game (per DMK)
sleep= 10, # loop sleep (seconds)
progress_report= True,
publish_GM= False,
sep_all_break: bool= False, # breaks game when all DMKs are separated
sep_pairs: Optional[List[Tuple[str,str]]]= None, # pairs of DMK names for separation condition
sep_pairs_factor: float= 0.9, # factor of separated pairs needed to break the game
sep_n_stdev: float= 2.0,
) -> Dict[str, Dict]:
"""
By now, by design run_game() may be called only once,
cause DMK processes are started and then stopped and process cannot be started twice,
there is no real need to change this design.
"""
# save of DMK results + additional DMK info
dmk_results = {
dn: {
'wonH_IV': [], # wonH (won $ / hand) of interval
'wonH_afterIV': [], # wonH (won $ / hand) after interval
'family': self.dmkD[dn].family,
'trainable': self.dmkD[dn].trainable,
'global_stats': None, # SM.global_stats, will be updated by DMK at the end of the game
} for dn in self._get_dmk_focus_names()}
# starts all subprocesses
self._put_players_on_tables()
self._start_tables()
self._start_dmks()
stime = time.time()
time_last_report = stime
n_hands_last_report = 0
self.logger.info(f'{self.name} starts a game..')
loop_ix = 0
while True:
time.sleep(sleep)
reports = self._get_reports({dn: len(dmk_results[dn]['wonH_IV']) for dn in dmk_results}) # actual DMK reports
for dn in reports:
dmk_results[dn]['wonH_IV'] += reports[dn]['wonH_IV']
dmk_results[dn]['wonH_afterIV'] += reports[dn]['wonH_afterIV']
# calculate game factor
n_hands = sum([reports[dn]['n_hands'] for dn in reports])
game_factor = n_hands / len(reports) / game_size
if game_factor >= 1: game_factor = 1
sr = separation_report(
dmk_results= dmk_results,
n_stdev= sep_n_stdev,
sep_pairs= sep_pairs)
sep_nc = sr['sep_nc']
sep_nf = sr['sep_nf']
sep_pairs_nc = sr['sep_pairs_nc']
sep_pairs_nf = sr['sep_pairs_nf']
if publish_GM:
self.tbwr.add(value=sep_nc, tag=f'GM/sep_nc', step=loop_ix)
self.tbwr.add(value=sep_nf, tag=f'GM/sep_nf', step=loop_ix)
if sep_pairs:
self.tbwr.add(value=sep_pairs_nc, tag=f'GM/sep_pairs_nc', step=loop_ix)
self.tbwr.add(value=sep_pairs_nf, tag=f'GM/sep_pairs_nf', step=loop_ix)
# INFO: progress relies on reports, and reports may be prepared in custom way (overridden) by diff GMs
if progress_report:
# progress
passed = (time.time()-stime)/60
left_nfo = ' - '
if game_factor > 0:
full_time = passed / game_factor
left = (1-game_factor) * full_time
left_nfo = f'{left:.1f}'
# speed
hdiff = n_hands-n_hands_last_report
hd_pp = int(hdiff / len(reports))
spd_report = f'{int(hdiff / (time.time()-time_last_report))}H/s (+{hd_pp}Hpp)'
n_hands_last_report = n_hands
time_last_report = time.time()
sep_report_pairs = f'::{sep_pairs_nc:.2f}[{sep_pairs_nf:.2f}]' if sep_pairs else ''
progress_(
current= game_factor,
total= 1.0,
prefix= f'GM: {passed:.1f}min left:{left_nfo}min',
suffix= f'{spd_report} -- SEP:{sep_nc:.2f}[{sep_nf:.2f}]{sep_report_pairs}',
length= 20)
# games break - factor condition
if game_factor == 1:
self.logger.info('> finished game (game factor condition)')
break
# games break - all DMKs separation condition
if sep_all_break and sep_nc == 1.0:
self.logger.info(f'> finished game (all DMKs separation condition), game factor: {game_factor:.2f})')
break
# games break - pairs separation breaking value condition
if sep_pairs and sep_pairs_nc >= sep_pairs_factor:
self.logger.info(f'> finished game (pairs separation factor: {sep_pairs_factor:.2f}, game factor: {game_factor:.2f})')
break
loop_ix += 1
self.tbwr.flush()
self._stop_tables()
self._stop_dmks_loops()
message = QMessage(type='send_global_stats', data=None)
for dn in dmk_results:
self.dmkD[dn].que_from_gm.put(message)
for _ in dmk_results:
message = self.que_to_gm.get()
data = message.data
dmk_name = data.pop('dmk_name')
dmk_results[dmk_name]['global_stats'] = data['global_stats']
self._save_dmks()
self._stop_dmks_processes()
taken_sec = time.time() - stime
taken_nfo = f'{taken_sec / 60:.1f}min' if taken_sec > 100 else f'{taken_sec:.1f}sec'
speed = n_hands / taken_sec
self.logger.info(f'{self.name} finished run_game, avg speed: {speed:.1f}H/s, time taken: {taken_nfo}')
loop_stats = {'speed': speed}
return {
'dmk_results': dmk_results,
'loop_stats': loop_stats}
# prepares list of DMK names GM is focused on while preparing dmk_results
def _get_dmk_focus_names(self) -> List[str]:
return list(self.dmkD.keys())
# asks DMKs to send reports, but only form given IV
def _get_reports(
self,
dmk_report_IV:Dict[str,int] # {dn: from_IV}
) -> Dict[str, Dict]:
reports: Dict[str, Dict] = {} # {dn: {n_hands, wonH_IV, wonH_afterIV}}
for dn,from_IV in dmk_report_IV.items():
message = QMessage(type='send_dmk_report', data=from_IV)
self.dmkD[dn].que_from_gm.put(message)
for _ in dmk_report_IV:
message = self.que_to_gm.get()
report = message.data
dmk_name = report.pop('dmk_name')
reports[dmk_name] = report
return reports
# GamesManager for Play & TRain concept for FolDMKs (some DMKs may play, some DMKs may train)
class GamesManager_PTR(GamesManager):
def __init__(
self,
dmk_point_PLL: Optional[List[Dict]]= None, # playable DMK list
dmk_point_TRL: Optional[List[Dict]]= None, # trainable DMK list
dmk_n_players: int= 60,
name: Optional[str]= None,
**kwargs):
"""
there are 3 possible scenarios:
1.playable & trainable:
dmk_point_PLLa & dmk_point_PLLb are merged together into dmk_point_PLL
dmk_n_players - sets number of players of one trainable DMK (dmk_point_TRL)
number of players of each playable DMK is equal: dmk_n_players * (N_TABLE_PLAYERS - 1)
(each trainable has one table full of playable)
2.only trainable:
dmk_n_players - sets number of players of one trainable DMK
number of tables = len(dmk)*dmk_n_players / N_TABLE_PLAYERS
3.only playable
if there are dmk_point_PLLa AND dmk_point_PLLb...
otherwise dmk_point_PLLa & dmk_point_PLLb are merged together into dmk_point_PLL
...
dmk_n_players - sets number of players of one playable DMK
number of tables = len(dmk)*dmk_n_players / N_TABLE_PLAYERS
TODO: edit this doc
"""
if not dmk_point_PLL: dmk_point_PLL = []
if not dmk_point_TRL: dmk_point_TRL = []
if not (dmk_point_PLL or dmk_point_TRL):
raise PyPoksException('playing OR training DMKs must be given')
n_tables = len(dmk_point_TRL) * dmk_n_players # default when there are both playable & trainable
if not dmk_point_PLL or not dmk_point_TRL:
dmk_dnaL = dmk_point_PLL or dmk_point_TRL
if (len(dmk_dnaL) * dmk_n_players) % N_TABLE_PLAYERS != 0:
raise PyPoksException('Please correct number of DMK players: n DMKs * n players must be multiplication of N_TABLE_PLAYERS')
n_tables = int((len(dmk_dnaL) * dmk_n_players) / N_TABLE_PLAYERS)
# override to train (each DMK by default is saved as a trainable - we set also trainable to have this info here for later usage, it needs n_players to be set)
for dmk in dmk_point_TRL:
dmk.update({
'n_players': dmk_n_players,
'trainable': True})
if dmk_point_PLL:
# both
if dmk_point_TRL:
n_rest_players = n_tables * (N_TABLE_PLAYERS-1)
rest_names = [dna['name'] for dna in dmk_point_PLL]
rest_names = random.choices(rest_names, k=n_rest_players)
for point in dmk_point_PLL:
point.update({
'n_players': len([nm for nm in rest_names if nm == point['name']]),
'trainable': False})
# only playable
else:
play_dna = {
'n_players': dmk_n_players,
'trainable': False}
for dmk in dmk_point_PLL:
dmk.update(play_dna)
self.dmk_name_PLL = [dna['name'] for dna in dmk_point_PLL]
self.dmk_name_TRL = [dna['name'] for dna in dmk_point_TRL]
nm = 'PL' if self.dmk_name_PLL else 'TR'
if self.dmk_name_PLL and self.dmk_name_TRL:
nm = 'TR+PL'
GamesManager.__init__(
self,
dmk_pointL= dmk_point_PLL + dmk_point_TRL,
name= name or f'GM_{nm}_{stamp()}',
**kwargs)
self.logger.info(f'*** GamesManager_PTR started with (PL:{len(dmk_point_PLL)} TR:{len(dmk_point_TRL)}) DMKs on {n_tables} tables')
for dna in dmk_point_PLL + dmk_point_TRL:
self.logger.debug(f'> {dna["name"]} with {dna["n_players"]} players, trainable: {dna["trainable"]}')
# creates new tables & puts players with PTR policy
def _put_players_on_tables(self):
# use previous policy
if not (self.dmk_name_PLL and self.dmk_name_TRL):
return GamesManager._put_players_on_tables(self)
self.logger.info('> puts players on tables with PTR policy..')
ques_PL = []
ques_TR = []
for dmk in self.dmkD.values():
ques = ques_TR if dmk.trainable else ques_PL
for k in dmk.queD_to_player: # {pid: que_to_pl}
ques.append((k, dmk.queD_to_player[k], dmk.que_from_player))
# shuffle players
random.shuffle(ques_PL)
random.shuffle(ques_TR)
# put on tables
self.tables = []
table_ques = []
table_logger = get_child(self.logger, name='table_logger', change_level=-10) if self.debug_tables else None
while ques_TR:
table_ques.append(ques_TR.pop())
while len(table_ques) < N_TABLE_PLAYERS: table_ques.append(ques_PL.pop())
random.shuffle(table_ques)
self.tables.append(QPTable(
name= f'tbl{len(self.tables)}',
que_to_gm= self.que_to_gm,
pl_ques= {t[0]: (t[1], t[2]) for t in table_ques},
logger= table_logger))
table_ques = []
assert not ques_PL and not ques_TR
# adds age update to dmk_results
def run_game(self, **kwargs) -> Dict:
# update trainable age - needs to be done before game, cause after game DMKs are saved
for dmk in self.dmkD.values():
if dmk.trainable: dmk.age += 1
rgd = GamesManager.run_game(self, **kwargs)
for dn in rgd['dmk_results']:
rgd['dmk_results'][dn]['age'] = self.dmkD[dn].age
return rgd
# at GamesManager_PTR we are focused on TRL (or PLL if not)
def _get_dmk_focus_names(self) -> List[str]:
return self.dmk_name_TRL or self.dmk_name_PLL
# manages DMKs for human games
class HuGamesManager(GamesManager):
def __init__(
self,
dmk_names: Union[List[str],str],
logger= None,
loglevel= 20):
if not logger:
logger = get_pylogger(level=loglevel)
if N_TABLE_PLAYERS != 3:
raise PyPoksException('HuGamesManage supports now only 3-handed tables')
logger.info(f'HuGamesManager starts with given dmk_names: {dmk_names}')
h_name = 'hm0'
hdna = {
'name': h_name,
'family': 'h',
'trainable': False,
'n_players': 1,
#'publish': False,
'fwd_stats_step': 10}
if type(dmk_names) is str: dmk_names = [dmk_names]
self.tk_gui = GUI_HDMK(players=[h_name]+dmk_names, imgs_FD='gui/imgs')
hdmk = HuDMK(tk_gui=self.tk_gui, **hdna)
if len(dmk_names) not in [1,2]:
raise PyPoksException('Number of given DMK names must be equal 1 or 2')
ddL = [{
'name': nm,
'trainable': False,
'n_players': N_TABLE_PLAYERS - len(dmk_names),
#'publish': False,
'fwd_stats_step': 10} for nm in dmk_names]
GamesManager.__init__(self, dmk_pointL=ddL, logger=logger)
# update/override with HuDMK
self.dmkD[hdna['name']] = hdmk
self.families.add(hdna['family'])
hdmk.que_to_gm = self.que_to_gm
# starts all subprocesses
def start_games(self):
self._put_players_on_tables()
self._start_tables()
self._start_dmks()
# an alternative way of stopping all subprocesses (dmks & tables)
def kill_games(self):
self.logger.info('HuGamesManager is killing games..')
for dmk in self.dmkD.values(): dmk.kill()
for table in self.tables: table.kill()
def run_tk(self): self.tk_gui.run_tk() | piteren/pypoks | podecide/games_manager.py | games_manager.py | py | 25,904 | python | en | code | 19 | github-code | 36 |
22683092245 | from tensorforce.core.layers.layer import Layer
import tensorflow as tf
import numpy as np
from tensorforce.core import Module, parameter_modules
from tensorforce import TensorforceError, util
class Transformer(Layer):
def __init__(self, name, n_head, hidden_size, num_entities, mlp_layer=1, mask_name='', pooling='average', residual=True,
masking=True, with_embeddings=False, with_ffn=True, post_norm=True, input_spec=None, pre_norm = True,
num_block=1,
summary_labels=()):
"""
Transformer Layer
"""
self.n_head = n_head
self.hidden_size = hidden_size
self.num_entities = num_entities
self.mlp_layer = mlp_layer
self.pooling = pooling
while self.pooling not in ['avg', 'max', 'none']:
self.pooling = 'none'
self.residual = residual
self.masking = masking
self.with_embeddings = with_embeddings
self.with_ffn = with_ffn
self.pre_norm = pre_norm
self.post_norm = post_norm
self.name = name
self.num_block=num_block
self.mask_name = mask_name
super(Transformer, self).__init__(name=name, input_spec=input_spec, summary_labels=summary_labels)
def default_input_spec(self):
return dict(type='float', shape=None)
def get_output_spec(self, input_spec):
if self.pooling is not 'none':
return dict(type='float', shape=(self.hidden_size))
else:
size = int(np.sqrt(self.num_entities))
return dict(type='float', shape=(self.num_entities, self.hidden_size))
def linear(self, a, b, bias):
return tf.nn.bias_add(tf.matmul(a,b), bias)
def tf_initialize(self):
super().tf_initialize()
# qkv embeddings weights
self.qk_weights = self.add_variable(
name='qk_weights', dtype='float', shape=(self.input_spec['shape'][1], self.hidden_size*2),
is_trainable=True, initializer='orthogonal'
)
self.qk_bias = self.add_variable(
name='qk_bias', dtype='float', shape=(self.hidden_size*2,),
is_trainable=True, initializer='zeros'
)
self.v_weights = self.add_variable(
name='v_weights', dtype='float', shape=(self.input_spec['shape'][1], self.hidden_size),
is_trainable=True, initializer='orthogonal'
)
self.v_bias = self.add_variable(
name='v_bias', dtype='float', shape=(self.hidden_size,),
is_trainable=True, initializer='zeros'
)
# FFN
self.mlp_layers_weights = []
self.mlp_layers_bias = []
for i in range(self.mlp_layer):
self.mlp_layers_weights.append(self.add_variable(
name='mlp' + str(i) + '_weights', dtype='float', shape=(self.input_spec['shape'][1], self.hidden_size),
is_trainable=True, initializer='orthogonal'
))
self.mlp_layers_bias.append(self.add_variable(
name='mlp' + str(i) + '_bias', dtype='float', shape=(self.hidden_size,),
is_trainable=True, initializer='zeros'
))
# If with initial embedding
if self.with_embeddings:
self.init_emb_weights = self.add_variable(
name='init_emb_weights', dtype='float', shape=(self.input_spec['shape'][1], self.hidden_size),
is_trainable=True, initializer='orthogonal'
)
self.init_emb_bias = self.add_variable(
name='init_emb_bias', dtype='float', shape=(self.hidden_size,),
is_trainable=True, initializer='zeros'
)
if self.post_norm:
self.post_norm_layer = tf.keras.layers.LayerNormalization(axis=3)
self.post_norm_layer.build(input_shape=((None,1) + self.input_spec['shape']))
for variable in self.post_norm_layer.trainable_weights:
name = variable.name[variable.name.rindex(self.name + '/') + len(self.name) + 1: -2]
self.variables[name] = variable
self.trainable_variables[name] = variable
if self.pre_norm:
self.pre_norm_layer = tf.keras.layers.LayerNormalization(axis=3)
self.pre_norm_layer.build(input_shape=((None,1) + self.input_spec['shape']))
for variable in self.pre_norm_layer.trainable_weights:
name = variable.name[variable.name.rindex(self.name + '/') + len(self.name) + 1: -2]
self.variables[name] = variable
self.trainable_variables[name] = variable
def tf_apply(self, x):
x = x[:, tf.newaxis, :, :]
bs, t, NE, feature = self.shape_list(x)
mask = None
if self.masking:
mask = Module.retrieve_tensor(name=self.mask_name)
size = np.sqrt(NE)
x, mask = self.apply_attention(x, mask)
if self.pooling is not 'none':
if self.pooling == 'avg':
x = self.entity_avg_pooling_masked(x, mask)
elif self.pooling == 'max':
x = self.entity_max_pooling_masked(x, mask)
x = tf.reshape(x, (bs, feature))
else:
# x = tf.reshape(x, (bs, size, size, feature))
#
# mask = tf.reshape(mask, (bs, size, size))
mask = tf.expand_dims(mask, -1)
x = x * mask
x = tf.reshape(x, [bs, self.num_entities, self.hidden_size])
return super().tf_apply(x=x)
def apply_attention(self, x, mask):
# Create a first embedding for each object
if self.with_embeddings:
x = self.linear(x, self.init_emb_weights, self.init_emb_bias)
a = self.self_attention(x, mask, self.n_head, self.hidden_size)
if self.with_ffn:
for i in range(self.mlp_layer):
a = self.linear(a, self.mlp_layers_weights[i], self.mlp_layers_bias[i])
if self.residual:
x = x + a
else:
x = a
if self.post_norm:
x = self.post_norm_layer(x)
return x, mask
def self_attention(self, inp, mask, heads, n_embd):
bs, T, NE, features = self.shape_list(inp)
# Put mask in format correct for logit matrix
entity_mask = None
if mask is not None:
assert np.all(np.array(mask.get_shape().as_list()) == np.array(inp.get_shape().as_list()[:3])), \
f"Mask and input should have the same first 3 dimensions. {self.shape_list(mask)} -- {self.shape_list(inp)}"
entity_mask = mask
mask = tf.expand_dims(mask, -2) # (BS, T, 1, NE)
query, key, value = self.qkv_embed(inp, heads, n_embd)
logits = tf.matmul(query, key, name="matmul_qk_parallel") # (bs, T, heads, NE, NE)
logits /= np.sqrt(n_embd / heads)
softmax = self.stable_masked_softmax(logits, mask)
att_sum = tf.matmul(softmax, value, name="matmul_softmax_value") # (bs, T, heads, NE, features)
out = tf.transpose(att_sum, (0, 1, 3, 2, 4)) # (bs, T, n_output_entities, heads, features)
n_output_entities = self.shape_list(out)[2]
out = tf.reshape(out, (bs, T, n_output_entities, n_embd)) # (bs, T, n_output_entities, n_embd)
return out
def stable_masked_softmax(self, logits, mask):
# Subtract a big number from the masked logits so they don't interfere with computing the max value
if mask is not None:
mask = tf.expand_dims(mask, 2)
logits -= (1.0 - mask) * 1e10
# Subtract the max logit from everything so we don't overflow
logits -= tf.reduce_max(logits, axis=-1, keepdims=True)
unnormalized_p = tf.exp(logits)
# Mask the unnormalized probibilities and then normalize and remask
if mask is not None:
unnormalized_p *= mask
normalized_p = unnormalized_p / (tf.reduce_sum(unnormalized_p, axis=-1, keepdims=True) + 1e-10)
if mask is not None:
normalized_p *= mask
return normalized_p
def qkv_embed(self, inp, heads, n_embd):
bs, T, NE, features = self.shape_list(inp)
if self.pre_norm:
inp = self.pre_norm_layer(inp)
qk = self.linear(inp, self.qk_weights, self.qk_bias)
qk = tf.reshape(qk, (bs, T, NE, heads, n_embd // heads, 2))
# (bs, T, NE, heads, features)
query, key = [tf.squeeze(x, -1) for x in tf.split(qk, 2, -1)]
value = self.linear(inp, self.v_weights, self.v_bias)
value = tf.reshape(value, (bs, T, NE, heads, n_embd // heads))
query = tf.transpose(query, (0, 1, 3, 2, 4),
name="transpose_query") # (bs, T, heads, NE, n_embd / heads)
key = tf.transpose(key, (0, 1, 3, 4, 2),
name="transpose_key") # (bs, T, heads, n_embd / heads, NE)
value = tf.transpose(value, (0, 1, 3, 2, 4),
name="transpose_value") # (bs, T, heads, NE, n_embd / heads)
return query, key, value
def shape_list(self, x):
'''
deal with dynamic shape in tensorflow cleanly
'''
ps = x.get_shape().as_list()
ts = tf.shape(x)
return [ts[i] if ps[i] is None else ps[i] for i in range(len(ps))]
def create_mask(self, x):
'''
Create mask from the input. If the first element is 99, then mask it.
The mask must be 1 for the input and 0 for the
'''
# x = bs, NE, feature
mask = 1 - tf.cast(tf.equal(x[:,:,:,0], 99999999.0), tf.float32)
return mask
def entity_avg_pooling_masked(self, x, mask):
'''
Masks and pools x along the second to last dimension. Arguments have dimensions:
x: batch x time x n_entities x n_features
mask: batch x time x n_entities
'''
mask = tf.expand_dims(mask, -1)
masked = x * mask
summed = tf.reduce_sum(masked, -2)
denom = tf.reduce_sum(mask, -2) + 1e-5
return summed / denom
def entity_max_pooling_masked(self, x, mask):
'''
Masks and pools x along the second to last dimension. Arguments have dimensions:
x: batch x time x n_entities x n_features
mask: batch x time x n_entities
'''
mask = tf.expand_dims(mask, -1)
has_unmasked_entities = tf.sign(tf.reduce_sum(mask, axis=-2, keepdims=True))
offset = (mask - 1) * 1e9
masked = (x + offset) * has_unmasked_entities
return tf.reduce_max(masked, -2)
class Mask(Layer):
def __init__(self, name, num_entities, tensors, value=99.0, input_spec=None, summary_labels=()):
"""
Transformer Layer
"""
self.value = value
self.num_entities = num_entities
self.tensors = (tensors,) if isinstance(tensors, str) else tuple(tensors)
super(Mask, self).__init__(name=name, input_spec=input_spec, summary_labels=summary_labels)
def tf_apply(self, x):
tensors = list()
for tensor in self.tensors:
if tensor == '*':
tensors.append(x)
else:
last_scope = Module.global_scope.pop()
tensors.append(Module.retrieve_tensor(name=tensor))
Module.global_scope.append(last_scope)
shape = self.output_spec['shape']
for n, tensor in enumerate(tensors):
for axis in range(util.rank(x=tensor), len(shape)):
tensor = tf.expand_dims(input=tensor, axis=axis)
tensors[n] = tensor
masks = []
for tensor in tensors:
tensor = tensor[:, tf.newaxis, :, :]
tensor = tf.cast(tensor, tf.float32)
mask = 1 - tf.cast(tf.equal(tensor[:, :, :, 0], self.value), tf.float32)
masks.append(mask)
mask = tf.concat(values=masks, axis=2)
return mask
def default_input_spec(self):
return dict(type=None, shape=None)
def get_output_spec(self, input_spec):
# mask: batch x time x n_entities
return dict(type='float', shape=(1, self.num_entities))
class OutputPositionItem(Layer):
def __init__(self, name, t, input_spec=None, summary_labels=()):
self.t = t
super(OutputPositionItem, self).__init__(name=name, input_spec=input_spec, summary_labels=summary_labels)
def tf_apply(self, x):
x = x[:,:,0:2]
return x
def default_input_spec(self):
return dict(type=None, shape=None)
def get_output_spec(self, input_spec):
# mask: batch x time x n_entities
if self.t == 'items':
return dict(type='float', shape=(20, 2))
else:
return dict(type='float', shape=(1, 2))
class ScatterEmbedding(Layer):
def __init__(self, name, indices_name = 'global', size = 10, hidden_size = 64,
base = False, input_spec = None, summary_labels=()):
"""
This layer will create the scattered map. It takes as input the items embedding and global/local indices.
It returns a map (batch_size, w, h, features).
"""
self.indices_name = indices_name
self.size = size
self.size = size
self.hidden_size = hidden_size
self.base = base
self.indices_name = indices_name
super(ScatterEmbedding, self).__init__(name=name, input_spec=input_spec, summary_labels=summary_labels)
def tf_apply(self, x):
BS, entities, features = self.shape_list(x)
self.features = features
size = self.size
indices = Module.retrieve_tensor(name=self.indices_name)
indices = tf.reshape(indices, (BS, entities))
indices = tf.cast(indices, tf.int32)
if self.indices_name is not 'global_indices':
indices = tf.where(tf.greater_equal(indices, 0), indices, -(size*size*BS - 1))
indices = tf.where(tf.less_equal(indices, size*size - 1), indices, -(size*size*BS - 1))
# @tf.function
# def create_scatter(a):
# ind = a[0]
# ind_int = tf.cast(ind, tf.int32)
# items = a[1]
# scatter_b = tf.scatter_nd(ind_int, items, [size*size, features])
# return [scatter_b, ind]
#
# def dummy_fn(a):
# return a
#
# scattered_map = tf.map_fn(create_scatter, [indices, x])[0]
# scattered_map = tf.reshape(scattered_map, (BS, size, size, features))
x = tf.reshape(x, (BS*entities, features))
a_rows = tf.expand_dims(tf.range(BS, dtype=tf.int32), 1)
a_rows *= (size*size)
indices = indices + a_rows
indices = tf.reshape(indices, (BS*entities, 1))
scattered_map = tf.scatter_nd(indices, x, [BS*size*size, features])
scattered_map = tf.reshape(scattered_map, (BS, size, size, features))
return scattered_map
def shape_list(self, x):
ps = x.get_shape().as_list()
ts = tf.shape(x)
return [ts[i] if ps[i] is None else ps[i] for i in range(len(ps))]
def default_input_spec(self):
return dict(type=None, shape=None)
def get_output_spec(self, input_spec):
return dict(type='float', shape=(self.size, self.size, self.hidden_size))
| SestoAle/Adaptive-NPCs-with-procedural-entities | new_layers/Transformer.py | Transformer.py | py | 15,414 | python | en | code | 2 | github-code | 36 |
43296309454 | from pypy.interpreter import gateway
from rpython.rlib.objectmodel import dict_to_switch
from rpython.rlib.unroll import unrolling_iterable
app = gateway.applevel("""
def syntax_warning(msg, fn, lineno, offset):
import warnings
try:
warnings.warn_explicit(msg, SyntaxWarning, fn, lineno)
except SyntaxWarning:
raise SyntaxError(msg, (fn, lineno, offset, msg))
""", filename=__file__)
_emit_syntax_warning = app.interphook("syntax_warning")
del app
def syntax_warning(space, msg, fn, lineno, offset):
"""Raise an applevel SyntaxWarning.
If the user has set this warning to raise an error, a SyntaxError will be
raised."""
w_msg = space.newtext(msg)
w_filename = space.newtext(fn)
w_lineno = space.newint(lineno)
w_offset = space.newint(offset)
_emit_syntax_warning(space, w_msg, w_filename, w_lineno, w_offset)
def parse_future(tree, feature_flags):
from pypy.interpreter.astcompiler import ast
future_lineno = 0
future_column = 0
flags = 0
have_docstring = False
body = None
if isinstance(tree, ast.Module):
body = tree.body
elif isinstance(tree, ast.Interactive):
body = tree.body
if body is None:
return 0, 0, 0
for stmt in body:
if isinstance(stmt, ast.Expr) and isinstance(stmt.value, ast.Str):
if have_docstring:
break
else:
have_docstring = True
elif isinstance(stmt, ast.ImportFrom):
if stmt.module == "__future__":
future_lineno = stmt.lineno
future_column = stmt.col_offset
for alias in stmt.names:
assert isinstance(alias, ast.alias)
# If this is an invalid flag, it will be caught later in
# codegen.py.
flags |= feature_flags.get(alias.name, 0)
else:
break
else:
break
return flags, future_lineno, future_column
class ForbiddenNameAssignment(Exception):
def __init__(self, name, node):
self.name = name
self.node = node
def check_forbidden_name(name, node=None):
"""Raise an error if the name cannot be assigned to."""
if name in ("None", "__debug__"):
raise ForbiddenNameAssignment(name, node)
# XXX Warn about using True and False
def mangle(name, klass):
if not name.startswith('__'):
return name
# Don't mangle __id__ or names with dots. The only time a name with a dot
# can occur is when we are compiling an import statement that has a package
# name.
if name.endswith('__') or '.' in name:
return name
try:
i = 0
while klass[i] == '_':
i = i + 1
except IndexError:
return name
return "_%s%s" % (klass[i:], name)
def intern_if_common_string(space, w_const):
# only intern identifier-like strings
if not space.is_w(space.type(w_const), space.w_text):
return w_const
for c in space.text_w(w_const):
if not (c.isalnum() or c == '_'):
return w_const
return space.new_interned_w_str(w_const)
| mozillazg/pypy | pypy/interpreter/astcompiler/misc.py | misc.py | py | 3,176 | python | en | code | 430 | github-code | 36 |
751441711 | import torch
from torch import nn
import torch.nn.functional as F
#Useful for nn.Sequential
class Flatten(nn.Module):
def forward(self, input):
return input.view(input.size(0), -1)
#Picked from Udacity's PyTorch course
class CIFARNet(nn.Module):
def __init__(self, z_dim):
super(CIFARNet, self).__init__()
# convolutional layer (sees 32x32x3 image tensor)
self.conv1 = nn.Conv2d(3, 16, 3, padding=1)
self.conv2 = nn.Conv2d(16, 32, 3, padding=1)
self.conv3 = nn.Conv2d(32, 64, 3, padding=1)
self.pool = nn.MaxPool2d(2, 2)
self.fc1 = nn.Linear(64 * 4 * 4, 500)
self.fc2 = nn.Linear(500, 10)
self.g = nn.Linear(10, z_dim)
self.dropout = nn.Dropout(0.25)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = self.pool(F.relu(self.conv3(x)))
x = x.view(-1, 64 * 4 * 4)
#x = self.dropout(x)
x = F.relu(self.fc1(x))
#x = self.dropout(x)
x = F.relu(self.fc2(x))
x = self.g(x)
return x
class CIFARNet2(nn.Module):
def __init__(self, z_dim):
super(CIFARNet2, self).__init__()
# convolutional layer (sees 32x32x3 image tensor)
self.conv1 = nn.Conv2d(3, 16, 3, padding=1)
self.conv2 = nn.Conv2d(16, 32, 3, padding=1)
self.conv3 = nn.Conv2d(32, 64, 3, padding=1)
self.pool = nn.MaxPool2d(2, 2)
self.fc1 = nn.Linear(64 * 4 * 4, 500)
self.g = nn.Sequential(nn.Linear(500, 100),
nn.ReLU(),
nn.Linear(100, z_dim))
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = self.pool(F.relu(self.conv3(x)))
x = x.view(-1, 64 * 4 * 4)
x = F.relu(self.fc1(x))
x = self.g(x)
return x | guptv93/saycam-metric-learning | model/cifar_model.py | cifar_model.py | py | 1,929 | python | en | code | 8 | github-code | 36 |
4714548611 | """
Write simple languoid stats to build/languoids.json.
This is to allow comparison between two branches of the repos.
Intended usage:
```
git checkout master
glottolog-admin writelanguoidstats
git checkout <OTHER_BRANCH>
glottolog-admin check --old-languoids
```
"""
try:
from git import Repo
except ImportError: # pragma: no cover
Repo = None
from clldutils import jsonlib
def run(args): # pragma: no cover
if Repo:
assert str(Repo(str(args.repos.repos)).active_branch) == 'master', \
'Command should be run on master branch'
res = {'language': [], 'family': [], 'dialect': []}
for lang in args.repos.languoids():
res[lang.level.name].append(lang.id)
jsonlib.dump(res, args.repos.build_path('languoids.json'))
| glottolog/pyglottolog | src/pyglottolog/admin_commands/writelanguoidstats.py | writelanguoidstats.py | py | 772 | python | en | code | 20 | github-code | 36 |
9512653187 | x, y = input("x,y : ").split(",")
x, y = float(x), float(y)
# Sqaure a
ax1 , ay1 = 0, 0
ax2 , ay2 = 40,40
# Sqare b
bx1 , by1 = -40, -20
bx2 , by2 = 10, 20
# C is intersect of a and b
isInA = x > ax1 and x < ax2 and y > ay1 and y < ay2
isInB = x > bx1 and x < bx2 and y > by1 and y < by2
if isInA and isInB:
print(f"( {x} , {y} ) is in C")
elif isInA:
print(f"( {x} , {y} ) is in A")
elif isInB:
print(f"( {x} , {y} ) is in B")
else:
print(f"( {x} , {y} ) is in D")
| ratchanonp/comproglab | 64-1LAB3/6434480323Lab3P3.py | 6434480323Lab3P3.py | py | 488 | python | en | code | 0 | github-code | 36 |
2114660989 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
class MyUser(models.Model):
id = models.IntegerField(primary_key=True, verbose_name='ID')
username = models.CharField(max_length=255)
@classmethod
def get_sharding_table(cls, id=None):
piece = id % 2 + 1
return cls._meta.db_table + str(piece)
@classmethod
def sharding_get(cls, id=None, **kwargs):
assert isinstance(id, int), 'id must be integer!'
table = cls.get_sharding_table(id)
sql = "SELECT * FROM %s" % table
kwargs['id'] = id
condition = ' AND '.join([k + '=%s' for k in kwargs])
params = [str(v) for v in kwargs.values()]
where = " WHERE " + condition
try:
return cls.objects.raw(sql + where, params=params)[0] # 这里应该模仿Queryset中get的处理方式
except IndexError:
# 其实应该抛Django的那个DoesNotExist异常
return None
class Meta:
db_table = 'user_'
# class User1(MyUser):
# class Meta:
# db_table = 'user_1'
# class User2(MyUser):
# class Meta:
# db_table = 'user_2'
| the5fire/django-sharding-demo | sharding_demo/app/models.py | models.py | py | 1,189 | python | en | code | 0 | github-code | 36 |
15151358717 | # Imports
load("@npm//@bazel/typescript:index.bzl", "ts_library")
load("@build_bazel_rules_nodejs//:index.bzl", "pkg_npm")
load("@npm//@bazel/jasmine:index.bzl", "jasmine_node_test")
load("//tools:defs.bzl",
"SOLUTION_PACKAGE_NAME",
"TYPESCRIPT_PRODMODE_TARGET",
"TYPESCRIPT_DEVMODE_TARGET",
"TYPESCRIPT_PRODMODE_MODULE",
"TYPESCRIPT_DEVMODE_MODULE"
)
def sn_package(name, deps = [], srcs = None, test_srcs = None):
module_name = SOLUTION_PACKAGE_NAME + "/" + name
module_source = name + "_source"
module_spec_source = name + "spec_source"
module_spec = name + "_spec"
module_spec_bin = module_spec + "_bin"
deps = deps + [
"@npm//tslib"
]
spec_deps = deps + [
"@npm//@types/jasmine",
]
native.filegroup(
name = module_source,
srcs = native.glob(
include = ["**/*.ts"],
exclude = ["**/*.spec.ts"]
) if not srcs else srcs
)
native.filegroup(
name = module_spec_source,
srcs = native.glob(
include = ["**/*.ts"],
) if not test_srcs else test_srcs
)
ts_library(
name = name,
module_name = module_name,
package_name = module_name,
srcs = [module_source],
deps = deps,
prodmode_target = TYPESCRIPT_PRODMODE_TARGET,
devmode_target = TYPESCRIPT_DEVMODE_TARGET,
prodmode_module = TYPESCRIPT_PRODMODE_MODULE,
devmode_module = TYPESCRIPT_DEVMODE_MODULE,
)
ts_library(
name = module_spec,
module_name = module_name,
package_name = module_name,
srcs = [module_spec_source],
deps = spec_deps,
tsconfig = "//:tsconfig.spec.json",
prodmode_target = TYPESCRIPT_PRODMODE_TARGET,
devmode_target = TYPESCRIPT_DEVMODE_TARGET,
prodmode_module = TYPESCRIPT_PRODMODE_MODULE,
devmode_module = TYPESCRIPT_DEVMODE_MODULE,
)
jasmine_node_test(
name = module_spec_bin,
srcs = [module_spec],
)
| sqlProvider/solution | tools/package.bzl | package.bzl | bzl | 2,038 | python | en | code | 0 | github-code | 36 |
70562638504 | import sys
input = sys.stdin.readline
N = int(input())
schedule = sorted([list(map(int, input().rstrip().split())) for _ in range(N)], key = lambda x : (x[1], x[0]))
tmp = 0
result = 0
for start, end in schedule:
if start >= tmp:
result += 1
tmp = end
print(result)
| zsmalla/algorithm-jistudy-season1 | src/chapter4/1_그리디알고리즘(1)/임지수/1931_python_임지수.py | 1931_python_임지수.py | py | 291 | python | en | code | 0 | github-code | 36 |
72242760743 | from django.shortcuts import render
from django.http.request import QueryDict
from django.urls import reverse
from django.http import HttpResponseRedirect
from django.views.generic.base import TemplateView
from six.moves.urllib.parse import urlparse
from rest_framework.renderers import JSONRenderer
from rest_framework.permissions import IsAuthenticatedOrReadOnly
from rest_framework.viewsets import GenericViewSet as DRFGenericViewset
from rest_framework.mixins import CreateModelMixin, ListModelMixin, RetrieveModelMixin, UpdateModelMixin, \
DestroyModelMixin
from .renderers import PrepairAPIRenderer
from api.flightplan_client import FlightPlanAPIClient
from accounts.models import Member
def index(request):
user = request.user
data = {}
if request.POST:
icao = request.POST.get('icao', None)
client = FlightPlanAPIClient()
response = client.get(icao=icao.lower())
if response.get('pk'):
pk = response.get('pk')
return HttpResponseRedirect(reverse('dashboard') + '/?airportpk={}'.format(pk))
else:
error_code = response.get('error')
if error_code == 429:
return render(request, 'index.html', {'over_limit': True})
elif not error_code:
general_error = 'An Unknown error has occurred. Contact site Admin.'
return render(request, 'index.html', {'error_code': general_error, 'icao': icao})
else:
return render(request, 'index.html', {'error_code': error_code, 'icao': icao})
if user.id:
try:
member = Member.objects.get(user=user)
if member.home_airport:
home_airport_pk = member.home_airport.pk
home_airport_icao = member.home_airport.icao
data = {'home_airport_pk': home_airport_pk, 'home_airport_icao': home_airport_icao}
except Member.DoesNotExist:
pass # Data error, do not return empty dictionary
except Member.MultipleObjectsReturned:
pass # Data error, do not return empty dictionary
return render(request, 'index.html', data)
class DashboardTemplateView(TemplateView):
template_name = 'dashboard.html'
def get_context_data(self, **kwargs):
context = super(DashboardTemplateView, self).get_context_data(**kwargs)
airport_pk = self.request.GET.get('airportpk', 0)
user_id = self.request.user.id if self.request.user.id else 0
if urlparse(self.request.path).path == '/dashboard/':
base_redirect = 1
else:
base_redirect = 0
context['airport_pk'] = airport_pk
context['user_id'] = user_id
context['base_redirect'] = base_redirect
return context
class PrepairViewSet(CreateModelMixin,
ListModelMixin,
RetrieveModelMixin,
UpdateModelMixin,
DestroyModelMixin,
DRFGenericViewset):
"""
Base DRF Viewset for all objects
Default CRUD Methods are all inherited through DRF Mixins
"""
prepair_browsable = ['get', 'head', 'options']
renderer_classes = (JSONRenderer, PrepairAPIRenderer)
permission_classes = (IsAuthenticatedOrReadOnly,)
# These values are set within the subclass Model Viewsets
prepair_model_class = None
queryset = None
serializer_class = None
filter_fields = tuple()
iexact_filter_fields = tuple()
def filter_queryset(self, queryset=None, is_list_call=False):
request_params = self.request.query_params
filter_kwargs = {}
for filter_field in self.filter_fields:
if filter_field in request_params:
initial_filter_field = filter_field
if isinstance(request_params, QueryDict):
values_list = request_params.getlist(filter_field)
else:
values_list = request_params.get(filter_field)
# Django ORM does not support iexact__in, so must choose one or the other
if isinstance(values_list, list) and len(values_list) > 1:
filter_kwargs[filter_field + '__in'] = values_list
else:
if filter_field in self.iexact_filter_fields:
filter_field += '__iexact'
filter_kwargs[filter_field] = request_params[initial_filter_field]
return self.prepair_model_class.objects.filter(**filter_kwargs)
| bfolks2/django-aviation | prepair/views.py | views.py | py | 4,566 | python | en | code | 2 | github-code | 36 |
10841211976 | import logging
import pathlib
from flask import Blueprint, g, request, make_response
from flask_restplus import Resource, Namespace, fields, abort
from photos.model import SourceFolder
from photos.scanner import scan_source_folder
log = logging.getLogger(__name__)
sources_blueprint = Blueprint("sources", __name__)
ns = Namespace("sources")
folder_fields = ns.model("SourceFolder", {"folder": fields.String, "stats": fields.Raw})
@ns.route("/_scan")
class Scan(Resource):
def post(self):
counts = dict()
for source in g.session.query(SourceFolder):
n_photos = scan_source_folder(g.session, source)
counts[source.folder] = n_photos
return counts
def normalize_folder(f):
return str(pathlib.Path(f))
@ns.route("/", defaults={"folder": None})
@ns.route("/<string:folder>")
class SourceFolders(Resource):
@ns.expect(folder_fields, validate=True)
def post(self, folder):
folder = normalize_folder(folder or request.get_json()["folder"])
g.session.add(SourceFolder(folder=folder))
response = make_response("", 201)
return response
@ns.marshal_with(folder_fields)
def get(self, folder):
if folder:
f = g.session.query(SourceFolder).get(folder)
if f is None:
abort(404, "Folder not found.")
else:
return g.session.query(SourceFolder).all()
| sebbegg/photos | photos/web/resources/scanner.py | scanner.py | py | 1,419 | python | en | code | 0 | github-code | 36 |
33586861988 | #!/usr/bin/env python3
import sys
from testflows.core import *
append_path(sys.path, "..")
from helpers.common import Pool, join, run_scenario
from helpers.argparser import argparser
@TestModule
@Name("ldap")
@ArgumentParser(argparser)
def regression(self, local, clickhouse_binary_path, parallel=None, stress=None):
"""ClickHouse LDAP integration regression module.
"""
top().terminating = False
args = {"local": local, "clickhouse_binary_path": clickhouse_binary_path}
if stress is not None:
self.context.stress = stress
if parallel is not None:
self.context.parallel = parallel
tasks = []
with Pool(3) as pool:
try:
run_scenario(pool, tasks, Feature(test=load("ldap.authentication.regression", "regression")), args)
run_scenario(pool, tasks, Feature(test=load("ldap.external_user_directory.regression", "regression")), args)
run_scenario(pool, tasks, Feature(test=load("ldap.role_mapping.regression", "regression")), args)
finally:
join(tasks)
if main():
regression()
| ByConity/ByConity | tests/testflows/ldap/regression.py | regression.py | py | 1,093 | python | en | code | 1,352 | github-code | 36 |
37130243038 | from tkinter import messagebox
import tkinter as tk
import tkinter.ttk as ttk
from PIL import Image,ImageTk
from pathlib import Path
import random
from minigames.game_components import GamePlay, Player
from minigames.playerdatabase import UserDataBase
class GuessTheNumber(tk.Frame, GamePlay):
'''
Game used to guess a number or numbers from a given set of numbers
e.g. guess one number between 1..9 or bet if a sum of two dices is less, equsl or bigger than 7
generated numbers can be unique or not.
'''
def __init__(self, parent, player_info):
'''
parameters:
title : str, the name of the game
numrange : range e.g. range(0, 10)
numofnums : int, how many numbers are generated
unique: bool, are the generated numbers unique
'''
'''initializes Window's attributes'''
self.close_screen = parent
parent.update()
self.width = parent.winfo_width()
self.height = parent.winfo_height()
super().__init__(master=parent)
self.parent = parent
self.title = "Guess the Number"
game_rules = messagebox.showinfo('Rules', message="Guess a number between 0 and 100")
self.numrange = range(0,101)
self.unique = True
self.numofnums = 1
self.__magic = random.choices(self.numrange, k=self.numofnums) #@ver3 sample changed to choice allowing generation eith replacement
self.__magic = random.choices(self.numrange, k=self.numofnums)
self.guesses = {'correct':set(), 'wrong':set()} # all the guesses, wrong and correct
# current user playing the game
self.player_info = player_info
# database object
self.database = UserDataBase()
# player information of current user
self.player_information = self.database.find_player(self.player_info)
# function to determine does game open new session or run old if it was not finished
self.check_game_state()
self.rules = """
Guess the Number Game Rules:
- Guess the Number, is a game that is played with a number between 0 and 100.
At the beginning of the game, a number is generated but is not shown to the player.
After the number is generated, the player needs to enter a number to guess the generated number.
- If the player's guess is greater than the generated number, s/he will get a hint as Too high!,
and the player needs to enter another number to guess the generated number based on the information given.
- If the player's guess is less than the generated number, s/he will get a hint as Too low!,
and the player needs to enter another number to guess the generated number based on the information given.
- If the player's guess is equal to the generated number, s/he will get a message stating that:
Player's guess (the number s/he entered) is correct, and the player guessed it with only n amount of guesses.
The game ends.
- If the player wants to continue to play the game, s/he can click on the Restart button to restart the game.
The game will generate a new number and the player can start to guess the new number.
"""
# Create text label for the game
game_label = ttk.Label(self, text=self.title, font=("Helvetica", 40))
game_label.grid(row=0, column=0, columnspan=5, sticky=tk.NSEW)
# Closing the game button
close_button = ttk.Button(self, text='Quit', command=self.__close)
close_button.grid(row=5, column=4, sticky=tk.NSEW)
# Text box for guessing the number
guess_label = ttk.Label(self, text='Your Guess:')
guess_label.grid(row=2, column=0, sticky=tk.NSEW)
self.guess_entry = ttk.Spinbox(self, from_=1, to=100) #ttk.Entry(self, width=10)
self.guess_entry.grid(row=2, column=1, sticky=tk.NSEW)
# Pop up window for the rules
rules_button = ttk.Button(self, text='Rules', command=self.show_rules)
rules_button.grid(row=4, column=4, sticky=tk.NSEW)
# Create enter button to chech input of textbox
enter_button = ttk.Button(self, text="Enter", command=self.game_play)
enter_button.grid(row=2, column=2, sticky=tk.NSEW)
@property
def magic(self):
return self.__magic
@magic.setter
def magic(self, value):
raise ValueError('magic can not be set')
@property
def numofnums(self)->int:
return self.__numofnums
@numofnums.setter
def numofnums(self, num:int)->None:
print(num, len(self.numrange))
if self.unique and 1 <= num < len(self.numrange):
self.__numofnums = num
elif not self.unique:
self.__numofnums = num
else:
raise ValueError('the number of numbers to generate < range and > 0')
def check_game_state(self):
'''
function to check whether to start new game session or continue an old one
'''
print(self.player_information)
# in case player did not finish the game
if self.player_information[3] == 0:
self.magic[0] = self.player_information[4] # original number to be guessed
self.guesses = {'correct': set(), 'wrong': set(range(self.player_information[5]-1))} # original amounts of attempts
# in case player did finish the game
else:
self.restart()
def check(self, num:int)->bool:
'''
checks if the num is in correct numbers
parameters:
num: int, integer user guessed
return:
bool, True if numofnums > 1 and guess is in magic or
num < magic[0] if numofnums = 1
'''
if num in self.__magic:
self.guesses['correct'].add(num)
return True
else:
self.guesses['wrong'].add(num)
return False
def checksum(self, num):
'''
returns the result of sustraction
of sum of magic numbers and given numner
'''
return sum(self.__magic) - num
def isover(self):
'''
Checks if the game is over
return:
True if all the numbers are correctly guessed
'''
return self.guesses['correct'] == set(self.__magic)
def restart(self) -> None:
'''
re-initializes the game to start a new
'''
self.__magic = random.sample(self.numrange, self.numofnums)
self.guesses = {'correct':set(), 'wrong':set()} # all the guesses, wrong and correct
def __close(self):
'''asking if closing is intended'''
if messagebox.askyesno("Close", "Do you want to close the Guess The Number game?"):
# add the number to be guessed
self.database.add_guess_the_number_score(self.magic[0],self.player_info)
# add the number of attempts user has made
self.database.add_guess_the_number_player_attempts(len(self.guesses['correct'])+len(self.guesses['wrong']), self.player_info)
# store 1 ( True ) for current game state because player guess was correct
self.database.add_guess_the_number_game_state(0,self.player_info)
# see database immidiately after to cofirm correct save
self.database.see_database()
# destroy game window
self.parent.destroy()
def show_rules(self):
rules = ttk.tkinter.messagebox.showinfo(title="Rules", message=self.rules)
return rules
""" User guesses random generated number """
def game_play(self):
self.player_guess = int(self.guess_entry.get())
self.game_result = self.check(self.player_guess)
if ( self.game_result == True ):
# add the number to be guessed
self.database.add_guess_the_number_score(self.magic[0],self.player_info)
# add the number of attempts user has made
self.database.add_guess_the_number_player_attempts(len(self.guesses['correct']) + len(self.guesses['wrong']), self.player_info)
# store 1 ( True ) for current game state because player guess was correct
self.database.add_guess_the_number_game_state(1,self.player_info)
# see database immidiately after to cofirm correct save
self.database.see_database()
msg_box = messagebox.showinfo('Info screen', message=f"You guess it right! ({self.player_guess}) with only {len(self.guesses['correct']) + len(self.guesses['wrong'])} guesses!")
exit_box = tk.messagebox.askquestion('Exit Application', 'Would you like to play again?')
if exit_box == 'yes':
tk.messagebox.showinfo('Info screen', 'You will now return to the game and you can guess new number. Good luck!')
self.restart()
else:
self.close_screen.destroy()
elif ( self.game_result != True and int(self.player_guess) > self.magic[0] ):
# add the number to be guessed
self.database.add_guess_the_number_score(self.magic[0],self.player_info)
# add the number of attempts user has made
self.database.add_guess_the_number_player_attempts(len(self.guesses['correct']) + len(self.guesses['wrong']), self.player_info)
# store 1 ( True ) for current game state because player guess was correct
self.database.add_guess_the_number_game_state(0,self.player_info)
# see database immidiately after to cofirm correct save
self.database.see_database()
messagebox.showinfo('Info Screen', message="Too high!")
elif ( self.game_result != True and int(self.player_guess) < self.magic[0] ):
# add the number to be guessed
self.database.add_guess_the_number_score(self.magic[0],self.player_info)
# add the number of attempts user has made
self.database.add_guess_the_number_player_attempts(len(self.guesses['correct']) + len(self.guesses['wrong']), self.player_info)
# store 1 ( True ) for current game state because player guess was correct
self.database.add_guess_the_number_game_state(0,self.player_info)
# see database immidiately after to cofirm correct save
self.database.see_database()
messagebox.showinfo('Info Screen', message="Too low!")
if __name__ == "__main__":
app = GuessTheNumber()
app.mainloop() | urasayanoglu/tkinter_minigames | minigames/guessthenumber.py | guessthenumber.py | py | 11,059 | python | en | code | 0 | github-code | 36 |
41629760399 | from django.shortcuts import render, redirect
import smtplib
from django.contrib.auth import get_user_model
from django.contrib.auth.decorators import login_required
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserChangeForm
from django.views import generic
from django.contrib.auth.mixins import LoginRequiredMixin
from django.urls import reverse_lazy
from .forms import editForm
def home(request):
if request.user.is_authenticated:
title = 'Account'
else:
title = 'Login'
return render(request, 'votesite/home.html', {'title' : title})
def handler404(request, exception):
return render(request, 'votesite/404.html', status=404)
def contact(request):
if request.method == "POST":
firstname = request.POST['firstname']
lastname = request.POST['lastname']
email = request.POST['email']
subject = request.POST['subject']
message = request.POST['message']
uid = request.POST['uid']
msg = firstname + ' ' + lastname + '\n' + 'ID: ' + uid + '\n' + email + '\n' + subject + ': ' + message
connection = smtplib.SMTP('smtp.gmail.com', 587)
connection.ehlo()
connection.starttls()
connection.ehlo()
username = settings.EMAIL_HOST_USER
passw = settings.EMAIL_HOST_PASSWORD
connection.login(username, passw)
connection.sendmail(
email,
[settings.EMAIL_HOST_USER],
msg
)
return render(request, 'votesite/messagesent.html', {'firstname': firstname})
else:
return render(request, 'votesite/contact.html', {})
@login_required
def profile(request):
username = request.user.first_name
return render(request, 'votesite/profile.html', {'username': username})
@login_required
def update(request):
if request.method == 'POST':
form = editForm(request.POST, instance=request.user)
if form.is_valid():
form.save()
username = request.user.first_name
return render(request, 'votesite/profile.html', {'message' : "Form Submitted Successfully!", 'username': username})
else:
form = editForm(instance=request.user)
return render(request, 'votesite/update.html', {'form' : form}) | nrking0/votesite | votesite/views.py | views.py | py | 2,346 | python | en | code | 0 | github-code | 36 |
18255309368 | import sys
sys.path.append("..") # for dev
import api2ch
api = api2ch.Api('b')
print(api.board.name)
api.board = 'vg'
print(api.board.name)
print(api.board.category)
thread = api.get_thread(24536772)
captcha = api.get_captcha()
print(api.get_captcha_img(captcha))
value = input('Captcha answer: ')
api.set_captcha_answer(captcha, value)
comment = '''Newfags cannot\n T\nR E'''
# print(api.send_post(board='b', comment=comment, email='', thread=165433076, captcha=captcha))
| slowpojkee/dvach.api | examples/test.py | test.py | py | 479 | python | en | code | null | github-code | 36 |
23013402799 | import socketserver
import os
import re
#strings
help = 'commands:\n'
help += 'start\n'
help += 'exit\n'
#not sure if I still need this:
socketstate = 0
tricksite = ''
#socket server class
class tcpsocket(socketserver.BaseRequestHandler):
def handle(self):
global socketstate
global tricksite
self.data = self.request.recv(1024).strip()
dat = ''
try:
dat = self.data.decode()
except Exception as e:
print(e)
##discord detection:
#if dat.find('Discordbot') > -1:
# a = open(os.getcwd() + '')
if dat.find('Discordbot') > -1:
#load image response with header
a = open(os.getcwd() + '/netcatresponse.txt')
b = a.read()
a.close()
self.request.sendall(b.encode())
#watch as packets come in:
print('\n' + dat)
if dat.find('Discordbot') == -1:
responsepacket = "HTTP/1.1 302 Found\nLocation: " + tricksite
respsend = responsepacket.encode()
self.request.sendall(respsend)
print('\n' + dat)
#program go
def main():
global help
global socketstate
global tricksite
HOST, PORT = ("", 80)
a = input('Direct users to where?\n').rstrip()
tricksite = a
with socketserver.TCPServer((HOST, PORT), tcpsocket) as server:
server.serve_forever()
if __name__ == "__main__":
main() | thcsparky/bigclickskid | phish.py | phish.py | py | 1,571 | python | en | code | 0 | github-code | 36 |
7235498245 | #!/bin/python3
import math
import os
import random
import re
import sys
# Complete the hackerlandRadioTransmitters function below.
def hackerlandRadioTransmitters(arr, k):
count = 0
i = 0
arr.sort()
while i < len(arr) :
mRange = k
while i < len(arr) - 1 :
diff = abs(arr[i+1] - arr[i])
mRange -= diff
if mRange < 0 :
break
i += 1
mRange = k
while i < len(arr)- 1 :
diff = abs(arr[i+1] -arr [i])
mRange -= diff
if mRange < 0 :
break
i += 1
count += 1
i += 1
return count
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
nk = input().split()
n = int(nk[0])
k = int(nk[1])
x = list(map(int, input().rstrip().split()))
result = hackerlandRadioTransmitters(x, k)
fptr.write(str(result) + '\n')
fptr.close()
| Suraj-Upadhyay/ProblemSolving | hackerrank/Search/04-HackerlandRadioTransmitters.py | 04-HackerlandRadioTransmitters.py | py | 963 | python | en | code | 1 | github-code | 36 |
18694457154 | # -*- coding: utf-8 -*-
"""
Created on Tue Mar 21 11:14:59 2023
@author: giamp
"""
import scipy
import logging
import numpy as np
import hcd
import matplotlib.pyplot as plt
from hppdWC import utils
from hppdWC import plots
def cutInTime(x, y, interval):
'''
Given x (time array), y (values), and interval, erases all the x,y pairs whose x is
before interval[0] and after interval[1]
Parameters
----------
x : np.array or list
time array.
y : np.array or list
correspoding values.
interval : list of 2 values [start finish]
DESCRIPTION.
Returns
-------
x : np.array or list
only the values after start and before stop.
y : np.array or list
only the corresponding values to x.
'''
start = interval[0]
stop = interval[1]
if start < stop:
# first cut signal and then time, time gives the condition
# cut the tails
y = y[x<=stop]
x = x[x<=stop]
# cut the heads
y = y[x>=start]
x = x[x>=start]
# reset the time
x = x - x[0]
else:
logging.warning('not cutting the arrays since stop is before start')
return x, y
def syncXcorr(signal1, signal2, time1, time2, step = 0.01, \
interval1 = [0, 0], interval2 = [0, 0]):
'''
Computes the delay of signal2 with respect to signal1 using cross correlation.
To do so, a similar pattern should be present in both signals.
"time1" and "time2" contain the relative time of the recording and should:
- be in the same measurement unit (eg: seconds)
- start both from 0
The returned value "delay" will be in the same measurement unit.
"signal1" is the one that gives the t=0, while the advance/delay in the
starting of the recording of "signal2" is computed.
The returned value is "delay", which expresses:
- the timing delay of signal2 wrt to signal1,
- the OPPOSITE (minus sign) of the timing delay in the recording
If the recording of 2 starts before 1, when plotting the two signals,
you see the event happening in 1 first and then in 2.
To graphically synchronize them, it's necessary to move 2 towards right
To timewise synchronize them, it's necessary to cut the first frames of 2
(the ones when 2 was already recording and 1 wasn't) and to reset the timer of 2
If "delay" is *POSITIVE*, then signal2 started to be recorded AFTER "delay" time.
To synchronize the two signals, it's necessary to add values in the head of
signal2
NOT SYNC SIGNALS
-----------****------- signal1
--------****------- signal2
delay = 3 -> signal2 started to be recorded 3 after
SYNC SIGNALS
-----------****------- signal1
add--------****------- signal2
If "delay" is *NEGATIVE*, then signal2 started to be recorded BEFORE "delay" time.
To synchronize the two signals, it's necessary to cut values from the head of
signal2
NOT SYNC SIGNALS
-----------****------- signal1
--------------****------- signal2
delay = -3 -> signal2 started to be recorded 3 before
SYNC SIGNALS
-----------****------- signal1
-----------****------- signal2
Parameters
----------
signal1 : array
Contains the y value of signal 1
signal2 : array
Contains the y value of signal 2
time1 : array
Contains the x value of signal 1
time2 : array
Contains the x value of signal 2
step : int, optional
To perform cross correlation, both signals should be at the same
frequency, it's necessary to resample them. The step should be in the
same measurement units of time1 and time2
The default is 0.01.
interval1 : list of 2 values: [startTime endTime], optional
Part of the signal1 that should be considered when executing the xcorr.
The default is [0, 0], which means the whole signal.
interval2 : list of 2 values: [startTime endTime], optional
Part of the signal2 that should be considered when executing the xcorr.
The default is [0, 0], which means the whole signal.
showPlot : bool, optional
If the function should display a plot regarding the execution.
The default is False.
device1 : string, optional
Name of device 1 in the plot.
The default is 'device 1'.
device2 : string, optional
Name of device 2 in the plot.
The default is 'device 2'.
userTitle : string, optional
To be added in the title
The default is ''.
Returns
-------
delay : float
Delay in the same temporal measurement unit of the two signals
If POSITIVE, signal2 started to be recorded AFTER signal1
If NEGATIVE, signal2 started to be recorded BEFORE signal1
maxError : float
maxError = step / 2
'''
# keeping sure that the variables are numpy.arrays
signal1, _ = utils.toFloatNumpyArray(signal1)
signal2, _ = utils.toFloatNumpyArray(signal2)
time1, _ = utils.toFloatNumpyArray(time1)
time2, _ = utils.toFloatNumpyArray(time2)
signal1 = fillNanWithInterp(signal1, time1)
signal2 = fillNanWithInterp(signal2, time2)
# # eventually cutting the signal1
# if interval1 != [0, 0]:
# time1, signal1 = cutInTime(time1, signal1, interval1)
# # eventually cutting the signal2
# if interval2 != [0, 0]:
# time2, signal2 = cutInTime(time2, signal2, interval2)
# user delay
# since the xcorrelation works on the y values only, the cutting of the
# signals should be taken into account as an additional delay
userDelay = interval1[0] - interval2[0]
# resampling both signals on the same frequency
y1, x1, _ = resampleWithInterp(signal1, time1, step, 'time step')
y2, x2, _ = resampleWithInterp(signal2, time2, step, 'time step')
# eventually cutting the signal1
if interval1 != [0, 0]:
x1, y1 = cutInTime(x1, y1, interval1)
# eventually cutting the signal2
if interval2 != [0, 0]:
x2, y2 = cutInTime(x2, y2, interval2)
# eventually remove last element from signal with more value
if len(x2)!=len(x1):
if len(x2)>len(x1):
x2=x2[0:-1]
y2=y2[0:-1]
else:
x1=x1[0:-1]
y1=y1[0:-1]
# putting the values around 0
y1 = y1 - np.mean(y1)
y2 = y2 - np.mean(y2)
# normalizing from -1 to 1
y1 = y1 / np.max(np.abs(y1))
y2 = y2 / np.max(np.abs(y2))
# compute correlation
corr = scipy.signal.correlate(y1, y2)
lags = scipy.signal.correlation_lags(len(y1), len(y2))
# where there is max correlation
index = np.argmax(corr)
delay = lags[index]*step
# adding the userDelay to the one computed on the signals
delay = delay + userDelay
maxError = step/2
results=[x1, y1, interval1, x2, y2, interval2, delay, lags, step, userDelay, maxError, corr, index]
return results
def plot_syncXcorr(results, device1, device2, userTitle = '', col1 = 'C0', col2 = 'C1'):
[x1,y1,interval1,x2,y2,interval2,delay,lags,step,userDelay,maxError,corr,index]=results
if delay > 0:
mainTitle = r"{} ({:.2f}-{:.2f}) started {:.3f} $\pm$ {:.3f} after {} ({:.2f}-{:.2f})".format(device2, interval2[0], interval2[1], np.absolute(delay), maxError, device1, interval1[0], interval1[1])
mainTitle = r"{} ({:.2f}-{:.2f}) started {:.3f} after {} ({:.2f}-{:.2f})".format(device2, interval2[0], interval2[1], np.absolute(delay), device1, interval1[0], interval1[1])
elif delay < 0:
mainTitle = r"{} ({:.2f}-{:.2f}) started {:.3f} $\pm$ {:.3f} before {} ({:.2f}-{:.2f})".format(device2, interval2[0], interval2[1], np.absolute(delay), maxError, device1, interval1[0], interval1[1])
mainTitle = r"{} ({:.2f}-{:.2f}) started {:.3f} before {} ({:.2f}-{:.2f})".format(device2, interval2[0], interval2[1], np.absolute(delay), device1, interval1[0], interval1[1])
else:
mainTitle = r"{} started at the same time of {}".format(device2, device1)
if userTitle != '':
mainTitle = mainTitle + ' - ' + userTitle
fig, ax = plots.drawInSubPlots(\
listXarrays = \
[[(x1 + interval1[0]).tolist(),(x2 + interval2[0]).tolist()],\
(lags*step + userDelay).tolist(), \
[(x1 + interval1[0]).tolist(),(x2 + interval2[0] +delay).tolist()]],\
listYarrays = \
[[y1.tolist(), y2.tolist()], \
corr,\
[y1.tolist(), y2.tolist()]], \
listOfTitles = \
['not synchronized signals', \
'correlation according to shift',\
'synchronized signals'], \
sharex = False, nrows = 3, mainTitle = mainTitle, listOfkwargs=[[{'color': col1},{'color': col2}],{'marker':''}], listOfLegends = [[device1, device2], ['']])
for this_ax in [ax[0], ax[2]]:
this_ax2 = this_ax.twinx()
this_ax.set_xlabel('time [s]')
this_ax.set_ylabel(device1, color = col1)
this_ax2.set_ylabel(device2, color = col2)
this_ax.set_xlim(np.min([np.min(x1 + interval1[0]), np.min(x2 + interval2[0]), np.min(x2 + interval2[0] + delay)]), np.max([np.max(x1 + interval1[0]), np.max(x2 + interval2[0]), np.max(x2 + interval2[0] + delay)]))
this_ax = ax[1]
this_ax.axvline(lags[index]*step + userDelay, color = 'r')
this_ax.set_xlabel('lag (time [s])')
this_ax.set_ylabel('correlation')
this_ax.set_xlim(np.min(lags*step + userDelay), np.max(lags*step + userDelay))
return fig, ax
#plots.syncXcorr(x1, y1, interval1, device1, x2, y2, interval2, device2, delay, lags, step, userDelay, maxError, corr, index, userTitle, col1 = col1, col2 = col2)
# plots.syncXcorrOld(x1, y1, interval1, device1, x2, y2, interval2, device2, delay, lags, step, userDelay, maxError, corr, index, userTitle = '', col1 = 'C0', col2 = 'C1')
def fillNanWithInterp(y, x = 0, mode = 'linear'):
'''
Given an array containing nans, fills it with the method specified in mode.
If x is given, the y values returned are the one corresponding to the x specified
If x is not given, y is assumed to be sampled at a fixed frequency
Parameters
----------
y : np.array
original array of values containing nans to be corrected
x : np.array, optional
time array of acquisition of signal y.
The default is 0, which assumes that y is sampled at a fixed frequency
mode : string, optional
kind of interpolation to be performed, passed to scipy.interpolate.interp1d(kind = )
Please refer to documentation
https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.interp1d.html
The default is 'linear'.
Returns
-------
yinterp : np.array
contains the data with nan replaced from interpolated value
'''
# keeping sure that the variables are numpy.arrays
x, _ = utils.toFloatNumpyArray(x)
y, _ = utils.toFloatNumpyArray(y)
# if x is not given, it's assumed that the y array is equally spaced
if np.array_equal(0, x):
x = np.arange(0, len(y), 1)
# find the indexes where y is not nan
notNanIndexes = ~np.isnan(y)
# if the first or the last value of y are nan, copy the closest value
if notNanIndexes[0] == False:
y[0] = y[notNanIndexes][0]
# y[0] = y[notNanIndexes[0]]
if notNanIndexes[-1] == False:
y[-1] = y[notNanIndexes][-1]
# y[-1] = y[notNanIndexes[-1]]
# find again the indexes where y is not nan
# now the first and the last value are not nan, and they're the extremes of
# the interpolation
notNanIndexes = ~np.isnan(y)
# considering only the not nan value
yClean = y[notNanIndexes]
xClean = x[notNanIndexes]
# feeding the interpolator with only the not nan values and obtaining a function
finterp = scipy.interpolate.interp1d(xClean, yClean, mode)
# computing the values of function on the original x
yinterp = finterp(x)
return yinterp
def resampleWithInterp(y, x = 0, xparam = 0.01, param = 'time step', mode = 'linear'):
'''
Given a signal y and his time array x, resamples it using interpolation
the three modes to use this function are:
- specifying the time *step*:
the output is resampled with the given step
- specifying the *frequency*:
the output is resampled with the given frequency
- specifying the *time array*:
the output is resampled on the given time array
If signal y has contains nan, they are filled with the function fillNanWithInterp()
Parameters
----------
y : np.array
original array of values
x : np.array, optional
time array of acquisition of signal y.
The default is 0, which assumes that y is sampled at a fixed frequency
xparam : float, integer or array, optional
if param == 'time step'
specifies the time step
if param == 'frequency'
specifies the frequency
if param == 'time array'
is equal to the time array where the resampling should be done.
The default is 0.01 and goes with 'time step' specified in param
param : string, optional
To specify if the resampling should be done on a signal computed on the
given time step, frequency or on the given time array.
The default is 'time step' and goes with '0.001' specified in xparam
mode : string, optional
kind of interpolation to be performed, passed to scipy.interpolate.interp1d(kind = )
Please refer to documentation
https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.interp1d.html
The default is 'linear'.
Returns
-------
yinterp : np.array
Values of the resampled signal
xinterp : np.array
Time array of the resampled signal
finterp : function
Interpolator function, only works between the extremities of x
'''
# keeping sure that the variables are numpy.arrays
x, _ = utils.toFloatNumpyArray(x)
y, _ = utils.toFloatNumpyArray(y)
xparam, _ = utils.toFloatNumpyArray(xparam)
# if x is not given, it's assumed that the y array is equally spaced
if np.array_equal(0, x):
if mode != 'time array':
x = np.arange(0, len(y), 1)
else:
logging.error('asking to resample on a given time array but not \
specifiying the input time array')
return None
# if y contains at least one nan, fill the space
if np.isnan(y).any():
logging.warning('nan values detected, filling them with ' + mode + ' method')
y = fillNanWithInterp(y, x, mode)
# the three modes to use this function are:
# - specifying the time *step*
# - specifying the *frequency*
# - specifying the *time array*
validParams = ['time step', 'frequency', 'time array']
if param == validParams[0]: # given step
step = xparam
xinterp = np.arange(np.min(x), np.max(x), step)
elif param == validParams[1]: # given freq
freq = xparam
step = 1/freq
xinterp = np.arange(np.min(x), np.max(x), step)
elif param == validParams[2]: # given time array
xinterp = xparam
# # eventually cutting the time array
# xinterp = xinterp[xinterp<=np.max(x)]
# xinterp = xinterp[xinterp>=np.min(x)]
# warning the user if the time array specified exceeds the limits
if (xinterp[0] < np.min(x) or xinterp[-1] > np.max(x)):
logging.warning('Using extrapolation: ' + \
'\nInterpolator has values between {:.2f} and {:.2f}'\
.format(np.min(x), np.max(x)) + \
' and computation between {:.2f} and {:.2f} is asked.'\
.format(xparam[0], xparam[-1]))
else:
logging.error('not valid param. Valid params are: ' + str(validParams))
return None
# feeding the interpolator with the input values and obtaining a function
finterp = scipy.interpolate.interp1d(x, y, kind = mode, fill_value = 'extrapolate')
# computing the values of the function on the xinterp
yinterp = finterp(xinterp)
return yinterp, xinterp, finterp
def syncCameraCapsleeve(led_data,cap_data):
capbump = hcd.capsleeve.find_first_bump(cap_data)
threshold = led_data['Red'].iloc[0:60].mean()
dev = led_data['Red'].iloc[0:60].std()
for i in range(len(led_data['Time(s)'])):
if i>59 and led_data.at[i,"Red"]>threshold+4*dev:
leddelay=led_data.at[i,"Time(s)"]
break
csini= capbump - leddelay
return csini
def plotSyncedCameraCapsleeve(cap_data,led_data,csini):
acceldata=cap_data["Accelerometer Y (g)"].to_numpy()
time=cap_data["Time (s)"].to_numpy()
reddata=led_data['Red'].to_numpy()
timeled=led_data['Time(s)'].to_numpy()
acceldata = acceldata - np.mean(acceldata)
reddata = reddata - np.mean(reddata)
# normalizing from -1 to 1
acceldata = acceldata / np.max(np.abs(acceldata))
reddata = reddata / np.max(np.abs(reddata))
if csini>0:
acceldata=acceldata[time>csini]
time=time[0:-(len(time)-len(acceldata))]
if csini<0:
reddata=reddata[timeled>csini]
plt.figure()
fig=plt.plot(time,acceldata)
plt.plot(timeled,reddata)
return fig
| mmtlab/wheelchair_contact_detection | hcd/xcorrelation.py | xcorrelation.py | py | 17,731 | python | en | code | 0 | github-code | 36 |
12430891780 | class Board:
def __init__(self, rows, columns, position):
self.rows = rows
self.columns = columns
self.unmarked = {i for i in position}
self.position = position
def mark(self, number) -> bool:
if number in self.unmarked:
self.unmarked.remove(number)
i, j = self.position[number]
self.rows[i].remove(number)
self.columns[j].remove(number)
return (not self.rows[i] or not self.columns[j])
numbers = []
boards = []
with open("testinput.txt") as f:
lines = f.readlines()
numbers = [int(x) for x in lines[0].strip().split(",")]
for i in range(2, len(lines), 6):
rows = [[int(x) for x in lines[j].split()] for j in range(i, i + 5)]
columns = [[rows[j][i] for j in range(5)] for i in range(5)]
position = dict()
for i in range(5):
for j in range(5):
position[rows[i][j]] = (i, j)
rows = [set(x) for x in rows]
columns = [set(x) for x in columns]
boards.append(Board(rows, columns, position))
def question1():
for n in numbers:
for b in boards:
if b.mark(n):
return n*sum(b.unmarked)
def question2():
res = 0
count = 0
for b in boards:
c = 0
for n in numbers:
c += 1
if b.mark(n):
if c > count:
count = c
res = n*sum(b.unmarked)
break
return res
print(question2())
| hieu-lee/AoC2021 | Day4/solution.py | solution.py | py | 1,539 | python | en | code | 1 | github-code | 36 |
216104646 |
from ast import Add
from flask import render_template, session, request, url_for, flash, redirect
from loja.produtos.models import Addproduto, Marca, Categoria
from loja import app, db, bcrypt
from .formulario import LoginFormulario, RegistrationForm
from .models import User
import os
@app.route('/admin')
def admin():
if "email" not in session:
flash('Faça seu login!', 'danger')
return redirect(url_for('login'))
produtos = Addproduto.query.all()
return render_template('admin/index.html', tittle='Pagina Ferronorte', produtos=produtos)
@app.route('/marcas')
def marcas():
if "email" not in session:
flash('Faça seu login!', 'danger')
return redirect(url_for('login'))
marcas = Marca.query.order_by(Marca.id.desc()).all()
return render_template('admin/marca.html', tittle='Pagina Marcas', marcas=marcas)
@app.route('/categorias')
def categorias():
if "email" not in session:
flash('Faça seu login!', 'danger')
return redirect(url_for('login'))
categorias = Categoria.query.order_by(Categoria.id.desc()).all()
return render_template('admin/marca.html', tittle='Pagina Categoria', categorias=categorias)
@app.route('/registrar', methods=['GET', 'POST'])
def registrar():
form = RegistrationForm(request.form)
if request.method == 'POST' and form.validate():
user = User(name=form.name.data, username=form.username.data, email=form.email.data,
password=form.password.data)
db.session.add(user)
db.session.commit()
flash(f'Obrigado {form.name.data} por registrar!', 'success')
return redirect(url_for('login'))
return render_template('admin/registrar.html', form=form, tittle="Pagina de Registros")
@app.route('/login', methods=['GET', 'POST'])
def login():
form = LoginFormulario(request.form)
if request.method == "POST" and form.validate():
user = User.query.filter_by(email=form.email.data).first()
if user:
session['email'] = form.email.data
flash(f'Olá {form.email.data} !', 'success')
return redirect(request.args.get('next') or url_for('admin'))
else:
flash('Nao foi possivel entrar no sistema!', 'danger')
return render_template('admin/login.html', form=form, tittle='Pagina Login')
| ReinierSoares/SiteFlask | loja/admin/rotas.py | rotas.py | py | 2,351 | python | en | code | 0 | github-code | 36 |
23413088374 | # -*- coding: utf-8 -*-
"""
A disk cache layer to store url and its html.
"""
from __future__ import print_function
import os
import zlib
import diskcache
class CompressedDisk(diskcache.Disk): # pragma: no cover
"""
Serialization Layer. Value has to be bytes or string type, and will be
compressed using zlib before stored to disk.
- Key: str, url.
- Value: str or bytes, html or binary content.
"""
def __init__(self,
directory,
compress_level=6,
value_type_is_binary=False,
**kwargs):
self.compress_level = compress_level
self.value_type_is_binary = value_type_is_binary
if value_type_is_binary is True:
self._decompress = self._decompress_return_bytes
self._compress = self._compress_bytes
elif value_type_is_binary is False:
self._decompress = self._decompress_return_str
self._compress = self._compress_str
else:
msg = "`value_type_is_binary` arg has to be a boolean value!"
raise ValueError(msg)
super(CompressedDisk, self).__init__(directory, **kwargs)
def _decompress_return_str(self, data):
return zlib.decompress(data).decode("utf-8")
def _decompress_return_bytes(self, data):
return zlib.decompress(data)
def _compress_str(self, data):
return zlib.compress(data.encode("utf-8"), self.compress_level)
def _compress_bytes(self, data):
return zlib.compress(data, self.compress_level)
def get(self, key, raw):
data = super(CompressedDisk, self).get(key, raw)
return self._decompress(data)
def store(self, value, read, **kwargs):
if not read:
value = self._compress(value)
return super(CompressedDisk, self).store(value, read, **kwargs)
def fetch(self, mode, filename, value, read):
data = super(CompressedDisk, self). \
fetch(mode, filename, value, read)
if not read:
data = self._decompress(data)
return data
def create_cache(directory,
compress_level=6,
value_type_is_binary=False,
**kwargs):
"""
Create a html cache. Html string will be automatically compressed.
:type directory: str
:param directory: path for the cache directory.
:type compress_level: int
:param compress_level: 0 ~ 9, 9 is slowest and smallest.
:type value_type_is_binary: bool
:param value_type_is_binary: default False.
:param kwargs: other arguments.
:rtype: diskcache.Cache
:return: a `diskcache.Cache()`
"""
cache = diskcache.Cache(
directory,
disk=CompressedDisk,
disk_compress_level=compress_level,
disk_value_type_is_binary=value_type_is_binary,
**kwargs
)
return cache
def create_cache_here(this_file: str,
compress_level: int = 6,
value_type_is_binary: bool = False,
**kwargs) -> diskcache.Cache:
"""
Create a disk cache at the current directory. Cache file will be stored at
``here/.cache`` dir.
:param this_file: always __file__.
:param compress_level: compress level 1 is minimal, 9 is maximum compression.
:param value_type_is_binary: if True, the value expected to be binary.
otherwise string.
:param kwargs: additional keyword arguments
:return: a ``diskcache.Cache`` object
"""
return create_cache(
directory=os.path.join(os.path.dirname(this_file), ".cache"),
compress_level=compress_level,
value_type_is_binary=value_type_is_binary,
**kwargs
)
| MacHu-GWU/crawlib-project | crawlib/cache.py | cache.py | py | 3,738 | python | en | code | 1 | github-code | 36 |
17211879792 | from datetime import date
atual = date.today().year
totmaior = 0
totmenor = 0
for c in range(1, 8):
nasc = int(input(f'Em que ano a {c}° pessoa nasceu? '))
idade = atual - nasc
if idade >= 18:
totmaior += 1
else:
totmenor += 1
print(f'No total contamos {totmaior} maior de idade e {totmenor} menor de idade.')
| GRSFFE/PythonExercicios | ex054.py | ex054.py | py | 344 | python | pt | code | 0 | github-code | 36 |
19735062070 | #!/usr/bin/python3
from pyrob.api import *
@task
def task_8_28():
direction = -1
while wall_is_above() == True:
if wall_is_on_the_left() == True:
direction = 1
if direction == -1:
move_left()
elif direction == 1:
move_right()
while wall_is_above() == False:
move_up()
while wall_is_on_the_left() ==False:
move_left()
if __name__ == '__main__':
run_tasks()
| miketoreno88/robot-tasks-master-Python | task_18.py | task_18.py | py | 468 | python | en | code | 0 | github-code | 36 |
30024110320 | from itertools import combinations
from scipy.optimize import fsolve
from copy import copy
from pdb import set_trace
INFT = float(10**10)
class Bound(object):
def __init__(self,x,y,r):
self.x , self.y , self.r = x , y , r
def fit(self,another_bound):
if another_bound.x == INFT :
return self.x + self.r <= 1.0
elif another_bound.x == - INFT :
return self.x - self.r >= 0.0
elif another_bound.y == INFT :
return self.y + self.r <= 1.0
elif another_bound.y == - INFT :
return self.y - self.r >= 0.0
else:
return (self.r + another_bound.r)**2 <= (self.x - another_bound.x)**2 + (self.y - another_bound.y)**2
# return (self.r + another_bound.r)**2 <= (self.x - another_bound.x)**2 + (self.y - another_bound.y)**2
def fit_all(self,bounds):
for i in bounds:
if not self.fit(i):
return False
return True
# bound( x , y , r )
bound_set0 = [
Bound( -INFT , 0.0 , INFT ),
Bound( INFT , 0.0 , INFT ),
Bound( 0.0, -INFT, INFT ),
Bound( 0.0, INFT, INFT ),
Bound( 0.5, 0.5, 0)
]
def find(bound_set):
new_bound_set = copy(bound_set)
max_r = 0
for selected_3_bound in list(combinations(bound_set, 3)):
new_bound = Bound(solve(selected_3_bound)[0],solve(selected_3_bound)[1],solve(selected_3_bound)[2])
# set_trace()
if new_bound.fit_all(new_bound_set) and new_bound.r > max_r:
max_r = new_bound.r
max_bound = new_bound
new_bound_set.append(max_bound)
return new_bound_set
def solve(three_bounds):
def fi(solution,bound):
if bound.x == INFT :
return solution[0] + solution[2] - 1.0
elif bound.x == - INFT :
return solution[0] - solution[2] - 0.0
elif bound.y == INFT :
return solution[1] + solution[2] - 1.0
elif bound.y == - INFT :
return solution[1] - solution[2] - 0.0
else:
return -(solution[2] + bound.r)**2 + (solution[0] - bound.x)**2 + (solution[1] - bound.y)**2
# return -(solution[2] + bound.r)**2 + (solution[0] - bound.x)**2 + (solution[1] - bound.y)**2
f = lambda x :[
fi(x,three_bounds[0]),
fi(x,three_bounds[1]),
fi(x,three_bounds[2])
]
return fsolve(f,[0.5,0.5,0.0])
# test:
for x in find(find(find(find(find(find(bound_set0))))))[len(bound_set0):]:
print(x.x)
print(x.y)
print(x.r)
print('---')
| ElderTrump/ball_in_box | ball_in_box/key_function.py | key_function.py | py | 2,583 | python | en | code | null | github-code | 36 |
39521526537 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 29 02:35:41 2017
@author: samara
"""
from tkinter import *
import InterfaceEstacionamentoV
import Estacionamento
class InterfaceA():
def __init__(self):
self.interface = InterfaceEstacionamentoV.InterfaceV()
self.interface.janela.title("Alterar Dados do Estacionamento")
self.interface.lblNome["text"]= "Nome: (Não pode ser alterado)"
self.nomeAnterior = self.interface.txtNome.get()
self.bntAlt = Button(self.interface.cont5, text="Alterar", bg="navy", command=self.alterarDadosEstacionamento).pack(side=LEFT, padx=20)
self.lblmsg = Label(self.interface.cont6, text="", bg="navy", fg="white")
self.lblmsg.pack()
self.bntSair = Button(self.cont5, text="Sair", bg="navy", command=self.sairDaInterface).pack(side=RIGHT)
def alterarDadosEstacionamento(self):
qtdeVagasDispCarro = self.interface.txtQtdeVagasDispCarro.get()
qtdeVagasDispMoto = self.interface.txtQtdeVagasDispMoto.get()
taxa15minC = self.interface.txtTaxa15minC.get()
taxaDiariaC = self.interface.txtTaxaDiariaC.get()
taxaMensalC = self.interface.txtTaxaMensalC.get()
taxa15minM = self.interface.txtTaxa15minM.get()
taxaDiariaM = self.interface.txtTaxaDiariaM.get()
taxaMensalM = self.interface.txtTaxaMensalM.get()
estacionamento = Estacionamento.Estacionamento(self.nomeAnterior, qtdeVagasDispCarro, qtdeVagasDispMoto)
retorno = estacionamento.alterarDadosEstacionamento()
estacionamento.alterarTaxasEstacionamento(self.nomeAnterior, taxa15minC, taxaDiariaC, taxaMensalC, taxa15minM, taxaDiariaM, taxaMensalM)
self.lblmsg["text"] = retorno
def sairDaInterface(self):
self.janela.destroy()
| samarasleal/Python-ParkingSystem | InterfaceEstacionamentoA.py | InterfaceEstacionamentoA.py | py | 1,944 | python | pt | code | 0 | github-code | 36 |
8247731797 | import cvxopt
import cvxopt.solvers
from cvxopt.solvers import lp
from numpy import array
cvxopt.solvers.options['show_progress'] = False # disable cvxopt output
try:
import cvxopt.glpk
GLPK_IF_AVAILABLE = 'glpk'
# GLPK is the fastest LP solver I could find so far:
# <https://scaron.info/blog/linear-programming-in-python-with-cvxopt.html>
# ... however, it's verbose by default, so tell it to STFU:
cvxopt.solvers.options['glpk'] = {'msg_lev': 'GLP_MSG_OFF'} # cvxopt 1.1.8
cvxopt.solvers.options['msg_lev'] = 'GLP_MSG_OFF' # cvxopt 1.1.7
cvxopt.solvers.options['LPX_K_MSGLEV'] = 0 # previous versions
except ImportError:
# issue a warning as GLPK is the best LP solver in practice
print("CVXOPT import: GLPK solver not found")
GLPK_IF_AVAILABLE = None
def cvxopt_matrix(M):
if isinstance(M, cvxopt.matrix):
return M
return cvxopt.matrix(M)
def cvxopt_solve_lp(c, G, h, A=None, b=None, solver=GLPK_IF_AVAILABLE):
"""
Solve a linear program defined by:
minimize
c.T * x
subject to
G * x <= h
A * x == b
using the LP solver from `CVXOPT <http://cvxopt.org/>`_.
Parameters
----------
c : array, shape=(n,)
Linear-cost vector.
G : array, shape=(m, n)
Linear inequality constraint matrix.
h : array, shape=(m,)
Linear inequality constraint vector.
A : array, shape=(meq, n), optional
Linear equality constraint matrix.
b : array, shape=(meq,), optional
Linear equality constraint vector.
solver : string, optional
Solver to use, default is GLPK if available
Returns
-------
x : array, shape=(n,)
Optimal (primal) solution of the LP, if one exists.
Raises
------
ValueError
If the LP is not feasible.
"""
args = [cvxopt_matrix(c), cvxopt_matrix(G), cvxopt_matrix(h)]
if A is not None:
args.extend([cvxopt_matrix(A), cvxopt_matrix(b)])
sol = lp(*args, solver=solver)
if 'optimal' not in sol['status']:
raise ValueError("LP optimum not found: %s" % sol['status'])
return array(sol['x']).reshape((array(c).shape[0],))
| furiiibond/Tinder | venv/Lib/site-packages/lpsolvers/cvxopt_.py | cvxopt_.py | py | 2,213 | python | en | code | 0 | github-code | 36 |
74157473702 | from django.urls import path
from . import views
app_name = "core"
urlpatterns = [
path('author/', views.AuthorList.as_view(), name='list-author'),
path('author/<int:pk>/', views.AuthorDetail.as_view(), name='detail-author'),
path('book/', views.BookList.as_view(), name='list-book'),
path('book/<int:pk>/', views.BookDetail.as_view(), name='detail-book'),
]
| PauloGuillen/library | libraryapi/core/urls.py | urls.py | py | 377 | python | en | code | 0 | github-code | 36 |
25348981844 | import pandas as pd
def build(gps, game_id):
players = []
for i in gps.PlayerID.unique():
counter = 0
prev_a = 0.0
for j in range(1, 3):
for k in gps[(gps.PlayerID == i) & (gps.Half == j)].FrameID.values: # first half second half
ax = list(gps[(gps.PlayerID == i) & (gps.FrameID == k) & (gps.Half == j)].AccelX)[0]
ay = list(gps[(gps.PlayerID == i) & (gps.FrameID == k) & (gps.Half == j)].AccelY)[0]
az = list(gps[(gps.PlayerID == i) & (gps.FrameID == k) & (gps.Half == j)].AccelZ)[0]
a = ax ** 2 + ay ** 2 + az ** 2
if (prev_a > 5.0 and a < 5.0) or (prev_a < 5.0 and a > 5.0):
counter += 1
prev_a = a
players.append({'GameID': game_id, 'PlayerID': i, 'IntenseEvents': counter})
return players
if __name__ == '__main__':
df = pd.read_csv('gps.csv')
for i in df.GameID.unique():
if i > 3:
print('game{0}.csv'.format(i))
game_df = df[df.GameID == i]
count_df = pd.DataFrame(build(game_df, i))
count_df.to_csv('game{0}.csv'.format(i))
| magickaiyang/archive | datafest/intense_event.py | intense_event.py | py | 1,178 | python | en | code | 0 | github-code | 36 |
42149656698 | from PIL import Image
import numpy as np
import cv2
img = Image.open('back_img.jpg')
size = img.size
x_length = size[0]
print('x_length:', x_length)
y_length = size[1]
print('y_length:', y_length)
im_num = np.array(img)
img_blur = cv2.GaussianBlur(im_num, (5, 5), 0)
img_gray = cv2.cvtColor(img_blur, cv2.COLOR_BGR2GRAY)
img_canny = cv2.Canny(img_gray, 100, 200)
cv2.imwrite('back_img_func.jpg', img_canny)
cv2.imshow('image', img_canny)
cv2.waitKey(0)
| magicnian/neteasy | myTest.py | myTest.py | py | 463 | python | en | code | 0 | github-code | 36 |
34788181082 | """First migration
Revision ID: 1e99703f8998
Revises:
Create Date: 2022-03-30 17:34:52.872031
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '1e99703f8998'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('resources',
sa.Column('resource_uuid', sa.String(), nullable=False),
sa.Column('name', sa.String(), nullable=True),
sa.Column('description', sa.String(), nullable=True),
sa.Column('created', sa.DateTime(), nullable=True),
sa.Column('updated', sa.DateTime(), nullable=True),
sa.Column('state', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('resource_uuid')
)
op.create_index(op.f('ix_resources_resource_uuid'), 'resources', ['resource_uuid'], unique=False)
op.create_table('entries',
sa.Column('resource_uuid', sa.String(), nullable=True),
sa.Column('entry_uuid', sa.String(), nullable=False),
sa.Column('private_body', sa.String(), nullable=True),
sa.Column('public_body', sa.String(), nullable=True),
sa.Column('created', sa.DateTime(), nullable=True),
sa.Column('updated', sa.DateTime(), nullable=True),
sa.Column('state', sa.String(), nullable=True),
sa.ForeignKeyConstraint(['resource_uuid'], ['resources.resource_uuid'], ),
sa.PrimaryKeyConstraint('entry_uuid')
)
op.create_index(op.f('ix_entries_entry_uuid'), 'entries', ['entry_uuid'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_entries_entry_uuid'), table_name='entries')
op.drop_table('entries')
op.drop_index(op.f('ix_resources_resource_uuid'), table_name='resources')
op.drop_table('resources')
# ### end Alembic commands ###
| gilde-der-nacht/website | olymp/app/storage/migrations/versions/1e99703f8998_first_migration.py | 1e99703f8998_first_migration.py | py | 1,893 | python | en | code | 3 | github-code | 36 |
32191704409 | import time
from threading import Timer
import xmlrpc.client
from .edit import Edit
from .utils import firmwareWarning
import json
import os
import base64
class Session(object):
"""
Session object
"""
def __init__(self, sessionURL, mainAPI, autoHeartbeat=True, autoHeartbeatInterval=10):
self.url = sessionURL
self.mainAPI = mainAPI
self.defaultAutoHeartbeatInterval = autoHeartbeatInterval
self.rpc = xmlrpc.client.ServerProxy(self.url)
self.connected = True
self._edit = None
if autoHeartbeat:
self.rpc.heartbeat(autoHeartbeatInterval)
self.autoHeartbeatInterval = autoHeartbeatInterval
self.autoHeartbeatTimer = Timer(autoHeartbeatInterval - 1, self.doAutoHeartbeat)
self.autoHeartbeatTimer.start()
else:
self.rpc.heartbeat(300)
def __del__(self):
self.cancelSession()
@property
def OperatingMode(self):
"""
Get the current operation mode for the session.
:return: (int)
0: run mode
1: edit mode
"""
result = int(self.mainAPI.getParameter("OperatingMode"))
return result
@property
def edit(self) -> Edit:
"""
Requesting an Edit object with this property. If the edit mode is False at the moment,
the edit mode will be activated with this request with the function setOperationMode(1).
:return: Edit object
"""
if not self.OperatingMode:
return self.setOperatingMode(mode=1)
else:
self._edit = Edit(editURL=self.url + 'edit/',
sessionAPI=self,
mainAPI=self.mainAPI)
return self._edit
def startEdit(self) -> Edit:
"""
Starting the edit mode and requesting an Edit object.
:return:
"""
self.rpc.setOperatingMode(1)
self._edit = Edit(editURL=self.url + 'edit/',
sessionAPI=self,
mainAPI=self.mainAPI)
return self._edit
def stopEdit(self) -> None:
"""
Stopping the edit mode.
:return: None
"""
self.rpc.setOperatingMode(0)
self._edit = None
def heartbeat(self, heartbeatInterval: int) -> int:
"""
Extend the live time of edit-session If the given value is outside the range of "SessionTimeout",
the saved default timeout will be used.
:param heartbeatInterval: (int) requested timeout-interval till next heartbeat, in seconds
:return: (int) the used timeout-interval, in seconds
"""
result = self.rpc.heartbeat(heartbeatInterval)
return result
def doAutoHeartbeat(self) -> None:
"""
Auto Heartbeat Timer for automatic extending the live time of edit-session.
If the given value is outside the range of "SessionTimeout", the saved default timeout will be used.
:return: None
"""
newHeartbeatInterval = self.heartbeat(self.autoHeartbeatInterval)
self.autoHeartbeatInterval = newHeartbeatInterval
# schedule event a little ahead of time
self.autoHeartbeatTimer = Timer(self.autoHeartbeatInterval - 1, self.doAutoHeartbeat)
self.autoHeartbeatTimer.start()
def cancelSession(self) -> None:
"""
Explicit stopping this session If an application is still in edit-mode, it will implicit do the same
as "stopEditingApplication". If an import or export is still being processed, the session is kept alive
until the import/export has finished, although the method returns immediately.
:return: None
"""
if self.autoHeartbeatTimer:
self.autoHeartbeatTimer.cancel()
self.autoHeartbeatTimer.join()
self.autoHeartbeatTimer = None
if self.connected:
self.rpc.cancelSession()
self.connected = False
def exportConfig(self) -> bytearray:
"""
Exports the whole configuration of the sensor-device and stores it at the desired path.
:return: (bytearray) configuration as one data-blob :binary/base64
"""
# increase heartbeat interval which will prevent a closed session after the "long" export progress
self.heartbeat(heartbeatInterval=30)
config = self.rpc.exportConfig()
config_bytes = bytearray()
config_bytes.extend(map(ord, str(config)))
while self.getExportProgress() < 1.0:
time.sleep(1)
self.cleanupExport()
self.mainAPI.waitForConfigurationDone()
return config_bytes
def importConfig(self, config: str, global_settings=True, network_settings=False, applications=True) -> None:
"""
Import whole configuration, with the option to skip specific parts.
:param config: (str) The config file (*.o2d5xxcfg) as a Binary/base64 data
:param global_settings: (bool) Include Globale-Configuration (Name, Description, Location, ...)
:param network_settings: (bool) Include Network-Configuration (IP, DHCP, ...)
:param applications: (bool) Include All Application-Configurations
:return: None
"""
# This is required due to the long import progress which may take longer than 10 seconds (default)
self.heartbeat(heartbeatInterval=30)
if global_settings:
self.rpc.importConfig(config, 0x0001)
if network_settings:
self.rpc.importConfig(config, 0x0002)
if applications:
self.rpc.importConfig(config, 0x0010)
while self.getImportProgress() < 1.0:
time.sleep(1)
self.mainAPI.waitForConfigurationDone()
def exportApplication(self, applicationIndex: int) -> bytearray:
"""
Exports one application-config.
:param applicationIndex: (int) application index
:return: None
"""
config = self.rpc.exportApplication(applicationIndex)
application_bytes = bytearray()
application_bytes.extend(map(ord, str(config)))
while self.getExportProgress() < 1.0:
time.sleep(1)
else:
self.cleanupExport()
return application_bytes
def importApplication(self, application: str) -> int:
"""
Imports an application-config and creates a new application with it.
:param application: (str) application-config as one-data-blob: binary/base64
:return: (int) index of new application in list
"""
if not self.OperatingMode:
self.setOperatingMode(mode=1)
index = int(self.rpc.importApplication(application))
while self.getImportProgress() < 1.0:
time.sleep(1)
self.setOperatingMode(mode=0)
else:
index = int(self.rpc.importApplication(application))
while self.getImportProgress() < 1.0:
time.sleep(1)
self.mainAPI.waitForConfigurationDone()
return index
def getImportProgress(self) -> float:
"""
Get the progress of the asynchronous configuration import (yields 1.0 when the last import has finished).
Returns xmlrpc errors occurring during import.
:return: (float) progress (0.0 to 1.0)
"""
try:
result = self.rpc.getImportProgress()
return result
except xmlrpc.client.Fault as fault:
if fault.faultCode == 101107:
return 1.0
def getExportProgress(self) -> float:
"""
Returns the progress of the ongoing export (configuration or application). After the export is done
this method returns 1.0 until the cleanupExport() is called.
:return: (float) progress (0.0 to 1.0)
"""
try:
result = self.rpc.getExportProgress()
return result
except xmlrpc.client.Fault as fault:
if fault.faultCode == 101110:
return 1.0
finally:
self.cleanupExport()
def cleanupExport(self) -> None:
"""
Removes the exported configuration/application binary archive file from the device tmpfs.
Shall be called after the file is fully downloaded by the user with HTTP GET request.
:return: None
"""
self.rpc.cleanupExport()
def getApplicationDetails(self, applicationIndex: [int, str]) -> dict:
"""
The method returns details about the application line ApplicationType,
TemplateInfo and Models with Type and Name.
:param applicationIndex: (int) application Index
:return: (dict) json-string containing application parameters, models and image settings
"""
result = json.loads(self.rpc.getApplicationDetails(applicationIndex))
return result
def resetStatistics(self) -> None:
"""
Resets the statistic data of current active application.
:return: None
"""
self.rpc.resetStatistics()
self.mainAPI.waitForConfigurationDone()
@staticmethod
def writeApplicationConfigFile(applicationName: str, data: bytearray) -> None:
"""
Stores the application data as an o2d5xxapp-file in the desired path.
:param applicationName: (str) application name as str
:param data: (bytearray) application data
:return: None
"""
extension = ".o2d5xxapp"
filename, file_extension = os.path.splitext(applicationName)
if not file_extension == extension:
applicationName = filename + extension
with open(applicationName, "wb") as f:
f.write(data)
@staticmethod
def writeConfigFile(configName: str, data: bytearray) -> None:
"""
Stores the config data as an o2d5xxcfg-file in the desired path.
:param configName: (str) application file path as str
:param data: (bytearray) application data
:return: None
"""
extension = ".o2d5xxcfg"
filename, file_extension = os.path.splitext(configName)
if not file_extension == extension:
configName = filename + extension
with open(configName, "wb") as f:
f.write(data)
def readApplicationConfigFile(self, applicationFile: str) -> str:
"""
Read and decode an application-config file.
:param applicationFile: (str) application config file path
:return: (str) application data
"""
result = self.readConfigFile(configFile=applicationFile)
return result
@firmwareWarning
def readConfigFile(self, configFile: str) -> str:
"""
Read and decode a device-config file.
:param configFile: (str) config file path
:return: (str) config data
"""
if isinstance(configFile, str):
if os.path.exists(os.path.dirname(configFile)):
with open(configFile, "rb") as f:
encodedZip = base64.b64encode(f.read())
decoded = encodedZip.decode()
return decoded
else:
raise FileExistsError("File {} does not exist!".format(configFile))
def setOperatingMode(self, mode) -> [None, Edit]:
"""
Changes the operation mode of the device. Setting this to "edit" will enable the "EditMode"-object on RPC.
:param mode: 1 digit
0: run mode
1: edit mode
2: simulation mode (Not implemented!)
:return: None or Edit object
"""
if mode == 0: # stop edit mode
self.stopEdit()
elif mode == 1: # start edit mode
return self.startEdit()
else:
raise ValueError("Invalid operating mode")
def __getattr__(self, name):
# Forward otherwise undefined method calls to XMLRPC proxy
return getattr(self.rpc, name)
| ifm/o2x5xx-python | source/rpc/session.py | session.py | py | 12,153 | python | en | code | 3 | github-code | 36 |
947383902 | pkgname = "tig"
pkgver = "2.5.8"
pkgrel = 1
build_style = "gnu_configure"
make_cmd = "gmake"
make_dir = "."
make_install_args = ["install-doc-man"]
hostmakedepends = ["gmake", "automake", "asciidoc", "xmlto", "pkgconf"]
makedepends = ["ncurses-devel"]
depends = ["git"]
pkgdesc = "Text-mode interface for git"
maintainer = "Wesley Moore <wes@wezm.net>"
license = "GPL-2.0-or-later"
url = "https://github.com/jonas/tig"
source = f"{url}/releases/download/{pkgname}-{pkgver}/{pkgname}-{pkgver}.tar.gz"
sha256 = "b70e0a42aed74a4a3990ccfe35262305917175e3164330c0889bd70580406391"
# test suite tries to access /dev/tty which fails
options = ["!check"]
def post_install(self):
self.install_completion("contrib/tig-completion.bash", "bash")
self.install_completion("contrib/tig-completion.zsh", "zsh")
| chimera-linux/cports | contrib/tig/template.py | template.py | py | 805 | python | en | code | 119 | github-code | 36 |
23741088080 | #! /Users/tianranmao/Projects/so1.0/venv/bin/python
import requests
from bs4 import BeautifulSoup
import datetime
import pytz
import time
import re
import os
# --------------------------------------------------------------------
# Main Function
# --------------------------------------------------------------------
def scrape_web(url):
try:
source = requests.get(url, timeout=20).text
except Exception as e:
print(e)
return None
soup = BeautifulSoup(source, 'lxml')
paragraph = soup.find('p').text
print(paragraph)
print()
return paragraph
if __name__ == "__main__":
week_day_dict = {
0 : 'Monday',
1 : 'Tuesday',
2 : 'Wednesday',
3 : 'Thursday',
4 : 'Friday',
5 : 'Saturday',
6 : 'Sunday'
}
#url_510300_jan = "http://yunhq.sse.com.cn:32041//v1/sho/list/tstyle/510300_01?callback=jQuery112402078220234177265_1577088059316&select=contractid%2Clast%2Cchg_rate%2Cpresetpx%2Cexepx&order=contractid%2Cexepx%2Case&_=1577088059323"
#url_510300_feb = "http://yunhq.sse.com.cn:32041//v1/sho/list/tstyle/510300_02?callback=jQuery112402078220234177265_1577088059316&select=contractid%2Clast%2Cchg_rate%2Cpresetpx%2Cexepx&order=contractid%2Cexepx%2Case&_=1577088059351"
url_510300_mar = "http://yunhq.sse.com.cn:32041//v1/sho/list/tstyle/510300_03?callback=jQuery112402078220234177265_1577088059318&select=contractid%2Clast%2Cchg_rate%2Cpresetpx%2Cexepx&order=contractid%2Cexepx%2Case&_=1577088059356"
url_510300_apr = "http://yunhq.sse.com.cn:32041//v1/sho/list/tstyle/510300_04?callback=jQuery112409417454011549969_1582766597079&select=contractid%2Clast%2Cchg_rate%2Cpresetpx%2Cexepx&order=contractid%2Cexepx%2Case&_=1582766597086"
url_510300_jun = "http://yunhq.sse.com.cn:32041//v1/sho/list/tstyle/510300_06?callback=jQuery112402078220234177265_1577088059336&select=contractid%2Clast%2Cchg_rate%2Cpresetpx%2Cexepx&order=contractid%2Cexepx%2Case&_=1577088059360"
url_510300_sep = "http://yunhq.sse.com.cn:32041//v1/sho/list/tstyle/510300_09?callback=jQuery11240028350739831281335_1579742947846&select=contractid%2Clast%2Cchg_rate%2Cpresetpx%2Cexepx&order=contractid%2Cexepx%2Case&_=1579742947854"
url_510300 = "http://yunhq.sse.com.cn:32041//v1/sh1/line/510300?callback=jQuery1124083017185515941_1577089469213&begin=0&end=-1&select=time%2Cprice%2Cvolume&_=1577089469215"
#url_510050_jan = "http://yunhq.sse.com.cn:32041/v1/sho/list/tstyle/510050_01?callback=jQuery112408090383939976182_1574904018122&select=contractid%2Clast%2Cchg_rate%2Cpresetpx%2Cexepx&_=1574904018127"
#url_510050_feb = "http://yunhq.sse.com.cn:32041//v1/sho/list/tstyle/510050_02?callback=jQuery112407089919710187241_1577321533000&select=contractid%2Clast%2Cchg_rate%2Cpresetpx%2Cexepx&order=contractid%2Cexepx%2Case&_=1577321533005"
url_510050_mar = "http://yunhq.sse.com.cn:32041/v1/sho/list/tstyle/510050_03?callback=jQuery111206287606767948288_1564018683263&select=contractid%2Clast%2Cchg_rate%2Cpresetpx%2Cexepx&_=1564018683268"
url_510050_apr = "http://yunhq.sse.com.cn:32041//v1/sho/list/tstyle/510050_04?callback=jQuery112409417454011549969_1582766597079&select=contractid%2Clast%2Cchg_rate%2Cpresetpx%2Cexepx&order=contractid%2Cexepx%2Case&_=1582766597082"
url_510050_jun = "http://yunhq.sse.com.cn:32041/v1/sho/list/tstyle/510050_06?callback=jQuery111209494863322515489_1571879875297&select=contractid%2Clast%2Cchg_rate%2Cpresetpx%2Cexepx&_=1571879875304"
url_510050_sep = "http://yunhq.sse.com.cn:32041//v1/sho/list/tstyle/510050_09?callback=jQuery11240028350739831281335_1579742947844&select=contractid%2Clast%2Cchg_rate%2Cpresetpx%2Cexepx&order=contractid%2Cexepx%2Case&_=1579742947849"
url_510050 = "http://yunhq.sse.com.cn:32041/v1/sh1/line/510050?callback=jQuery111208396578891098054_1563195335181&begin=0&end=-1&select=time%2Cprice%2Cvolume & _ =1563195335188"
url_list = [url_510300, url_510300_mar, url_510300_apr, url_510300_jun, url_510300_sep, url_510050, url_510050_mar, url_510050_apr, url_510050_jun, url_510050_sep]
while True:
now_shanghai = datetime.datetime.now(tz=pytz.timezone('Asia/Shanghai'))
file_name = f"./txt/{now_shanghai.strftime('%Y-%m-%d')}.txt"
if not os.path.exists(file_name):
with open(file_name, 'w') as f:
pass
for url in url_list:
paragraph = scrape_web(url)
if paragraph!=None:
pattern_date = re.compile('"date":(\d+),')
match_date = re.search(pattern_date, paragraph)
webdate = int(match_date.group(1))
realdate = int(now_shanghai.strftime('%Y%m%d'))
# print("web date is: {}".format(webdate))
# print("real date is: {}".format(realdate))
pattern_time = re.compile('"time":(\d+),')
match_time = re.search(pattern_time, paragraph)
webtime = int(match_time.group(1))
realTimeString = now_shanghai.strftime('%H%M%S')
realTime = int(realTimeString)
# print("web time is: {}".format(webtime))
# print("real time is: {}".format(realTime))
weekday = now_shanghai.weekday()
workday = weekday != 5 and weekday != 6 and webdate==realdate
time_start = 93000
time_break = 113000
time_restart = 130000
time_stop = 150000
time_near = 91500
market_open = workday and ((webtime >= time_start and realTime < time_break) or (webtime >= time_restart and realTime <= time_stop))
nearly_open = workday and ((time_break <= realTime and webtime < time_restart) or (time_near < webtime < time_start))
if market_open:
with open(file_name, 'a') as f:
try:
f.write(paragraph)
f.write('\n')
print('writing to file...')
except Exception as e:
print(e)
if market_open:
print('{} {}{}:{}{}:{}{} markets open'.format(week_day_dict[weekday], realTimeString[0],realTimeString[1],
realTimeString[2],realTimeString[3],
realTimeString[4],realTimeString[5]))
#print('waiting for 5 seconds')
#time.sleep(5)
elif nearly_open:
print('{} {}{}:{}{}:{}{} markets opening soon'.format(week_day_dict[weekday], realTimeString[0],realTimeString[1],
realTimeString[2],realTimeString[3],
realTimeString[4],realTimeString[5]))
print('waiting for 10 seconds')
time.sleep(10)
else:
print('{} {}{}:{}{}:{}{} markets closed'.format(week_day_dict[weekday], realTimeString[0],realTimeString[1],
realTimeString[2],realTimeString[3],
realTimeString[4],realTimeString[5]))
print('waiting for 10 minutes')
time.sleep(600)
| timmao78/so1.0 | get_txt.py | get_txt.py | py | 7,564 | python | en | code | 0 | github-code | 36 |
1437749316 | from django.urls import path, include
from . import views
from django.contrib.auth.views import auth_login
urlpatterns = [
path('', views.index, name='main_home'),
path('login', views.index, name='main_login'),
path('account/', views.account, name='main_account'),
path('feed/', views.feed, name='main_feed'),
path('search/', views.search, name='main_search'),
path('message/', views.message, name='main_message'),
path('master/', views.master, name='main_master'),
path('organization/', views.organization, name='main_organization'),
]
| chavkin94/YouDeo | main/urls.py | urls.py | py | 570 | python | en | code | 0 | github-code | 36 |
7003663358 | # any all function practice
def all_sum(*args):
total = 0
for i in args:
total += i
return total
# print(all_sum(1,2,3,4)) # correct input
# print(all_sum(1,2,3,4, "salman", ["salman"])) # wrong input
## here we solve the problem of wrong input using all function
def all_add(*args):
if all([(type(arg) == int or type(arg) == float) for arg in args]):
total = 0
for i in args:
total += i
return total
else:
return "wrong input"
print(all_add(1,2,3,4))
print(all_add(1,2,3,4, "salman", ["salman"]))
| salmansaifi04/python | chapter11(enumurator-function_type)/08_any_all_function_practice.py | 08_any_all_function_practice.py | py | 590 | python | en | code | 0 | github-code | 36 |
12198741928 | import pandas as pd
import streamlit as st
import fitz
from PIL import Image
from dataExtractor import DataExtractor
from image2 import Canvas
from firebase import FirebaseDB
import json
from st_keyup import st_keyup
json_data = {'Tear Down': ['cable', 'bomba', 'intake'],
'Production': ['simulacion', 'equipo'],
'Artificial Lift': ['cable', 'bomba', 'intake', 'motor', 'sensor', 'protector'],
'Efficiency': ['general', 'bomba', 'motor']}
st.write(f"Define a new extraction {st.session_state.user['nombre']}")
def read_pdf(uploaded):
file = fitz.open(stream=uploaded.read())
return file
def create_image_list_from_pdf(file):
images = []
for page_number in range(len(file)):
page = file.load_page(page_number)
pix = page.get_pixmap(matrix=fitz.Matrix(2, 2))
image = Image.frombytes("RGB", (pix.width, pix.height), pix.samples)
w, h = 700, 500
resized_image = image.resize((w, h))
images.append(resized_image)
return images
def replace_image_in_canvas(canvas, image, key):
new_image = image # Get the new image
new_key = key # Get the new key
canvas.reset_canvas(new_image, new_key) # Call the reset_canvas method of the canvas object
def load_canvas(image, page_number, draw_mode, update):
canvas = Canvas(image, draw_mode, update_streamlit=update)
canvas.create_canvas(page_number)
canvas.process_drawing()
return canvas
def store_scaled_coordinates(page_number, coordinates, delete_columns):
if coordinates is not None:
# Fill the 'page' column with the page_number value
coordinates['page'] = page_number
# Drop the specified columns
coordinates = coordinates.drop(delete_columns, axis=1)
return pd.DataFrame(coordinates)
def present_dataframe(dataframe, markdown):
if isinstance(dataframe, pd.DataFrame):
st.subheader(markdown)
st.dataframe(dataframe)
else:
st.write("No DataFrame was provided.")
def first_page():
st.subheader("PDF Document")
st.write("Upload and define attributes.")
file = st.file_uploader("Upload PDF", type=['pdf'])
if file is not None:
if "file" not in st.session_state:
st.session_state["file"] = file
if "regex" not in st.session_state:
st.session_state["regex"] = pd.DataFrame()
if "subject" not in st.session_state:
st.session_state["subject"] = None
def get_report_main_topics(report):
st.session_state.atributos_reporte = json_data[report]
def add_coordinates_to_firebase(dataframe, collection_name, subject):
firebase_db = FirebaseDB()
return firebase_db.add_coordinates(dataframe, collection_name, subject)
def selection():
# Usage example
subject_name = "testmail/test2"
#To upload a report
st.subheader("PDF Document Extraction")
uploaded_file = st.file_uploader("Upload PDF sample", type=['pdf'])
realtime_update = st.checkbox("Update in realtime", True)
st.write("Select between defining the area of the table (rect),"
"or modify a predefined area (transform)")
drawing_mode = st.selectbox("Drawing tool:", ["rect", "transform"])
if uploaded_file is not None:
if "compiled_scales" not in st.session_state:
st.session_state["compiled_scales"] = pd.DataFrame()
if "page_number" not in st.session_state:
st.session_state["page_number"] = 0
pdf = read_pdf(uploaded_file)
image_list = create_image_list_from_pdf(pdf)
canvas_obj = load_canvas(image_list[st.session_state["page_number"]],
st.session_state["page_number"],
drawing_mode, realtime_update)
st.caption("Note: This canvas version could define columns or cells with None values,"
" consider to select a table or area of it in order that the table extraction preview"
" contains the elements you want.")
present_dataframe(st.session_state["compiled_scales"], "All Scaled Coordinates")
objects_df = canvas_obj.get_objects_dataframe()
all_scaled_coordinates = None
if objects_df is not None and 'type' in objects_df.columns:
table_objects = objects_df.loc[objects_df['type'] == 'rect']
if len(table_objects) > 0:
difference = (len(st.session_state["atributos_reporte"])-len(st.session_state["compiled_scales"]))
#st.write(difference)
data = st.session_state["atributos_reporte"][-difference:] if difference > 0 else []
#st.write(data)
all_scaled_coordinates = canvas_obj.process_tables(table_objects,
pdf.load_page(st.session_state["page_number"]),
data)
if all_scaled_coordinates is not None:
st.markdown("### Scaled Page Coordinates")
st.table(all_scaled_coordinates)
st.markdown("### Extracted Page Tables")
table_count = 0
for _, row in all_scaled_coordinates.iterrows():
top = row['Top']
left = row['Left']
height = row['Final height']
width = row['Final width']
titles = row['Title']
data_extractor = DataExtractor(uploaded_file, st.session_state["page_number"] + 1, top, left,
width, height)
tables, title = data_extractor.extract_tables(titles)
if tables:
st.subheader(f"Table {titles}")
table_count += 1
for i in range(len(tables)):
st.dataframe(tables[i])
else:
st.write("No tables were extracted.")
else:
st.write("No rectangle selections found on the canvas.")
else:
st.write("No rectangle selections found on the canvas.")
canvas_element = st.empty() # Create an empty element to display the canvas
if "disabled" not in st.session_state:
st.session_state["disabled"] = False
next_button = st.button("Next", disabled=st.session_state["disabled"])
save_button = st.button("Save", disabled=not st.session_state["disabled"])
if next_button:
canvas_element.empty() # Clear the canvas element
st.session_state["page_number"] += 1
new_scaled_coordinates = store_scaled_coordinates(st.session_state["page_number"], all_scaled_coordinates,
["scaleX", "scaleY", "Width", "Height"])
if new_scaled_coordinates is not None:
st.session_state["compiled_scales"] = pd.concat([st.session_state["compiled_scales"],
new_scaled_coordinates], ignore_index=True)
if st.session_state["page_number"] >= len(image_list) - 1:
# st.session_state["page_number"] = 0
st.session_state["disabled"] = True
canvas_obj.reset_canvas(image_list[st.session_state["page_number"]], st.session_state["page_number"])
if st.session_state["disabled"]:
st.write("Before you save, define the mail subject for extraction (this implies how will be the subject"
" text when an email arrives to your inbox):")
subject_value = st_keyup("", value="Report_sample", key="subject")
st.write(f"Subject: {subject_value}")
subject = st.session_state.user['email'] + "/" + subject_value
subject_name = subject.replace(" ", "_")
st.write(f"Final parameters: {subject}")
if save_button:
st.session_state["page_number"] += 1
new_scaled_coordinates = store_scaled_coordinates(st.session_state["page_number"], all_scaled_coordinates,
["scaleX", "scaleY", "Width", "Height"])
if new_scaled_coordinates is not None:
st.session_state["compiled_scales"] = pd.concat([st.session_state["compiled_scales"],
new_scaled_coordinates], ignore_index=True)
present_dataframe(st.session_state["compiled_scales"], "Final Scaled Coordinates")
id_num = add_coordinates_to_firebase(st.session_state["compiled_scales"], "db_coord", subject_name)
st.markdown("Data saved with id " + str(id_num))
st.button("Finish", on_click= lambda : reset_all(uploaded_file))
canvas_obj.display_canvas()
else:
st.session_state["page_number"] = 0
st.session_state["disabled"] = False
st.session_state["compiled_scales"] = pd.DataFrame()
# Función para mostrar la pantalla dependiendo del botón seleccionado
def mostrar_pantalla():
# Inicializar el session state
if 'boton_seleccionado' not in st.session_state:
st.session_state.boton_seleccionado = None
if 'input_text' not in st.session_state:
st.session_state.input_text = False
if not st.session_state.user['imap']:
st.header("Additional process")
st.subheader("As this app works with email (IMAP), it is important to get access to your email account.")
input_text = st.text_input("Input you mail password", key='input_text_value')
if st.button("Save"):
firebasedb = FirebaseDB()
firebasedb.set_user_data(st.session_state.user['uid'], 'ek', input_text)
# Cambia el valor a True para mostrar los botones
st.session_state.user['imap'] = True
st.caption(":red[Gmail:] _For Gmail accounts, it is important to enable IMAP and input an app password, "
"for this you can look at the next link:_ https://support.google.com/mail/answer/185833?hl=es-419")
else:
# Mostrar el header dependiendo del botón seleccionado
if st.session_state.boton_seleccionado is not None:
if 'atributos_reporte' not in st.session_state:
st.session_state.atributos_reporte = []
st.header(f"Report type: {st.session_state.boton_seleccionado}")
print(st.session_state.boton_seleccionado)
get_report_main_topics(st.session_state.boton_seleccionado)
print(st.session_state.atributos_reporte)
st.write(st.session_state.atributos_reporte)
selection()
# Botones para seleccionar
if st.session_state.boton_seleccionado is None:
if st.button('Tear Down', key='button1',
on_click=lambda: st.session_state.update(boton_seleccionado="Tear Down")):
pass
if st.button('Production', key='button2',
on_click=lambda: st.session_state.update(boton_seleccionado="Production")):
pass
if st.button('Artificial Lift', key='button3',
on_click=lambda: st.session_state.update(boton_seleccionado="Artificial Lift")):
pass
if st.button('Efficiency', key='button4',
on_click=lambda: st.session_state.update(boton_seleccionado="Efficiency")):
pass
if st.session_state.boton_seleccionado is None:
st.write("Please, select a report type")
# Mostrar la pantalla
mostrar_pantalla()
def reset_all(file):
st.session_state.boton_seleccionado = None
st.session_state["page_number"] = 0
st.session_state["disabled"] = False
st.session_state["compiled_scales"] = pd.DataFrame()
file = None | gapastorv/st_rca_project | v2-incomplete/pages/Parsing.py | Parsing.py | py | 12,399 | python | en | code | 0 | github-code | 36 |
15197021379 | import argparse
import socket
import sys
import json
import urllib.request
import redis
import base64
import re
import boto3
import os
import subprocess
from faker import Faker
import logging
logging.basicConfig(level=logging.DEBUG)
fake = Faker('en_US')
Faker.seed(1337)
kms_client = boto3.client('kms')
kms_key_id = os.environ.get('KMS_KEY_ID')
r = redis.Redis(unix_socket_path='/run/redis.sock')
for i in range(1,100):
name = fake.name()
r.set(name, 'bar{}'.format(i))
# Running server you have pass port the server will listen to. For Example:
# $ python3 /app/server.py server 5005
class VsockListener:
# Server
def __init__(self, conn_backlog=128):
self.conn_backlog = conn_backlog
def bind(self, port):
# Bind and listen for connections on the specified port
self.sock = socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM)
self.sock.bind((socket.VMADDR_CID_ANY, port))
self.sock.listen(self.conn_backlog)
def recv_data(self):
# Receive data from a remote endpoint
while True:
try:
logging.info("Let's accept stuff")
(from_client, (remote_cid, remote_port)) = self.sock.accept()
logging.info("Connection from " + str(from_client) + str(remote_cid) + str(remote_port))
query = json.loads(base64.b64decode(from_client.recv(4096).decode()).decode())
logging.info("Message received: {}".format(query))
query_type = list(query.keys())[0]
query = query[query_type]
logging.info("{} {}".format(query_type, query))
if query_type == 'get':
response = query_redis(query)
elif query_type == 'set':
response = put_in_redis(query)
else:
response = "Bad query type\n"
# Send back the response
from_client.send(str(response).encode())
from_client.close()
logging.info("Client call closed")
except Exception as ex:
logging.info(ex)
KMS_PROXY_PORT="8000"
def get_plaintext(credentials):
"""
prepare inputs and invoke decrypt function
"""
# take all data from client
access = credentials['access_key_id']
secret = credentials['secret_access_key']
token = credentials['token']
ciphertext= credentials['ciphertext']
region = credentials['region']
logging.info('ciphertext: {}'.format(ciphertext))
creds = decrypt_cipher(access, secret, token, ciphertext, region)
return creds
def decrypt_cipher(access, secret, token, ciphertext, region):
"""
use KMS Tool Enclave Cli to decrypt cipher text
"""
logging.info('in decrypt_cypher')
proc_params = [
"/app/kmstool_enclave_cli",
"decrypt",
"--region", region,
"--proxy-port", KMS_PROXY_PORT,
"--aws-access-key-id", access,
"--aws-secret-access-key", secret,
"--aws-session-token", token,
"--ciphertext", ciphertext,
]
logging.debug('proc_params: {}'.format(proc_params))
proc = subprocess.Popen(
proc_params,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
ret = proc.communicate()
logging.debug('proc: {}'.format(proc.communicate()))
if ret[0]:
logging.info('No KMS error')
logging.debug('ret[0]: {}'.format(ret[0]))
b64text = proc.communicate()[0].decode().split()[1]
logging.debug('b64text: {}'.format(b64text))
plaintext = base64.b64decode(b64text).decode()
return (0, plaintext)
else:
logging.info('KMS error')
return (1, "KMS Error. Decryption Failed.\n")
def server_handler(args):
server = VsockListener()
server.bind(args.port)
logging.info("Started listening to port : {}".format(args.port))
server.recv_data()
def put_in_redis(query):
status, query = get_plaintext(query)
if status:
logging.info(query)
return query
try:
query = json.loads(query)
except ValueError:
return 'Failed to put in data: Mot valid JSON\n'
for key in query.keys():
r.set(key, query[key])
return "Put the data in\n"
# Get list of current ip ranges for the S3 service for a region.
# Learn more here: https://docs.aws.amazon.com/general/latest/gr/aws-ip-ranges.html#aws-ip-download
def query_redis(query):
status, value = get_plaintext(query)
if status:
logging.info(value)
return value
value = r.get(value)
if value != None:
logging.info("Key exists")
return "The key exists\n"
elif value == None:
logging.info("Key doesn't exist")
return "They key does not exist\n"
else:
logging.info("In Else")
return "Somehow here with value: {}\n".format(value)
def main():
parser = argparse.ArgumentParser(prog='vsock-sample')
parser.add_argument("--version", action="version",
help="Prints version information.",
version='%(prog)s 0.1.0')
subparsers = parser.add_subparsers(title="options")
server_parser = subparsers.add_parser("server", description="Server",
help="Listen on a given port.")
server_parser.add_argument("port", type=int, help="The local port to listen on.")
server_parser.set_defaults(func=server_handler)
if len(sys.argv) < 2:
parser.print_usage()
sys.exit(1)
args = parser.parse_args()
args.func(args)
if __name__ == "__main__":
main()
| SMonaghan/nitro-enclave-with-redis | files/server.py | server.py | py | 5,032 | python | en | code | 0 | github-code | 36 |
35802251836 | import os
import config
from dotenv import load_dotenv
import neuronet
import markups as nav
import actions
import constants
import paths
import user_settings as settings
from utils import set_default_commands
import markovify
import logging
from gtts import gTTS
import asyncio
from aiogram import Bot, types, Dispatcher, executor
"""ENV"""
dotenv_path = os.path.join(os.path.dirname(__file__), ".env")
bot_token = ''
if os.path.exists(dotenv_path):
load_dotenv(dotenv_path)
bot_token = os.getenv("API_TOKEN")
if bot_token == '': bot_token = config.API_TOKEN
"""Log level"""
logging.basicConfig(format = "%(asctime)s - %(levelname)s - %(message)s", level = logging.INFO)
logger = logging.getLogger(__name__)
"""Bot init"""
bot = Bot(token = bot_token)
dp = Dispatcher(bot)
"""Startup function"""
async def on_startup(dp):
await set_default_commands(dp)
"""Voice answer generation"""
def generate(text, out_file):
tts = gTTS(text, lang = "ru")
tts.save(out_file)
"""Get text model"""
def get_model(filename):
with open(filename, encoding = "utf-8") as f: text = f.read()
return markovify.Text(text)
"""Get compliment"""
async def get_compliment():
generator = get_model(paths.PATH_FEMALE_TEXT_MODEL_ANSWER)
statement = True
while statement:
text = generator.make_sentence()
if text is not None: statement = False
return text
"""Start function"""
@dp.message_handler(commands = ["start", "hi", "hello"])
async def start(message: types.Message, commands = "start"):
await bot.send_chat_action(message.from_user.id, types.chat.ChatActions.TYPING)
await asyncio.sleep(1)
await message.answer(f"{actions.ANSWER_HI} {message.from_user.full_name}!",
reply_markup = nav.greet_markup)
"""Error function"""
@dp.errors_handler()
async def error(self):
await logger.warning('update "%s" casused error "%s"', self.exception_name, self.exception_message)
"""On photo"""
@dp.message_handler(content_types = ["photo"])
async def photo(message: types.Message):
filename = "settings_" + str(message.from_user.id) + ".txt"
settings_path = paths.PATH_USER_DATA + filename
is_text = await settings.get_user_settings_text(settings_path)
tmp_pic_file = paths.PATH_USER_DATA + str(message.from_user.id) + ".jpg"
await message.photo[-1].download(destination_file=tmp_pic_file)
result = neuronet.resolve(tmp_pic_file)
os.remove(tmp_pic_file)
if is_text == False:
tmp_audio_file = paths.PATH_USER_DATA + str(message.from_user.id) + ".mp3"
if len(result[0]) == 0:
text = actions.ANSWER_UNDEFINED
if is_text == False:
generate(text, tmp_audio_file)
await bot.send_chat_action(message.chat.id, types.chat.ChatActions.TYPING)
await asyncio.sleep(1)
if is_text == False:
await message.answer_audio(audio = open(tmp_audio_file, "rb"))
os.remove(tmp_audio_file)
return
else:
await message.answer(text)
return
text = result[1][0] + ", на мой скромный взгляд."
if result[0][0] == constants.IS_FEMALE: text = f'{actions.ANSWER_FEMALE} {text}'
elif result[0][0] == constants.IS_MALE: text = f'{actions.ANSWER_MALE} {text}'
print(text)
await bot.send_chat_action(message.from_user.id, types.chat.ChatActions.TYPING)
await asyncio.sleep(1)
if is_text == False:
generate(text, tmp_audio_file)
await message.answer_audio(audio = open(tmp_audio_file, "rb"))
os.remove(tmp_audio_file)
else: await message.answer(text)
text = ""
if result[0][0] == constants.IS_FEMALE: text = await get_compliment()
elif result[0][0] == constants.IS_MALE: text = actions.ANSWER_MALE_WITHOUT_MODEL
print(text)
await bot.send_chat_action(message.from_user.id, types.chat.ChatActions.TYPING)
await asyncio.sleep(1)
if is_text == False:
generate(text, tmp_audio_file)
await message.answer_audio(audio = open(tmp_audio_file, "rb"))
os.remove(tmp_audio_file)
else:
await message.answer(text)
@dp.message_handler()
async def answers(message: types.Message):
filename = "settings_" + str(message.from_user.id) + ".txt"
settings_path = paths.PATH_USER_DATA + filename
if message.text == actions.QUERY_GREETING:
await message.answer(actions.ANSWER_GREETING, reply_markup = nav.main_markup)
elif message.text == actions.QUERY_SETTINGS:
await message.answer(actions.ANSWER_SETTINGS, reply_markup = nav.settings_markup)
elif message.text == actions.QUERY_TEXT_ANSWER:
is_text = True
await settings.set_user_settings_text(settings_path, is_text)
await message.answer(actions.ANSWER_TEXT_ANSWER)
elif message.text == actions.QUERY_VOICE_ANSWER:
is_text = False
await settings.set_user_settings_text(settings_path, is_text)
await message.answer(actions.ANSWER_VOICE_ANSWER)
elif message.text == actions.QUERY_MAIN_MENU:
await message.answer(actions.ANSWER_MAIN_MENU, reply_markup = nav.main_markup)
elif message.text == actions.QUERY_GET_COMPLIMENT:
is_text = await settings.get_user_settings_text(settings_path)
if is_text:
text = await get_compliment()
print(text)
await bot.send_chat_action(message.from_user.id, types.chat.ChatActions.TYPING)
await asyncio.sleep(1)
await message.answer(text)
else:
tmp_audio_file = paths.PATH_USER_DATA + str(message.from_user.id) + ".mp3"
text = await get_compliment()
print(text)
await bot.send_chat_action(message.from_user.id, types.chat.ChatActions.TYPING)
await asyncio.sleep(1)
generate(text, tmp_audio_file)
await message.answer_audio(audio = open(tmp_audio_file, "rb"))
os.remove(tmp_audio_file)
elif message.text == actions.QUERY_START_AUTO_COMPLIMENTS:
is_run = True
await settings.set_user_settings_text(settings_path, is_run)
await asyncio.sleep(1)
await message.answer(actions.ANSWER_START_AUTO_COMPLIMENTS,
reply_markup = nav.auto_compliments_markup)
while is_run == True:
is_run = await settings.get_user_settings_text(settings_path)
text = await get_compliment()
print(text)
await bot.send_chat_action(message.from_user.id, types.chat.ChatActions.TYPING)
await asyncio.sleep(3)
await message.answer(text)
elif message.text == actions.QUERY_STOP_AUTO_COMPLIMENTS:
is_run = False
await settings.set_user_settings_text(settings_path, is_run)
await bot.send_chat_action(message.from_user.id, types.chat.ChatActions.TYPING)
await asyncio.sleep(1)
await message.answer(actions.ANSWER_STOP_AUTO_COMPLIMENTS,
reply_markup = nav.main_markup)
"""Exit function"""
@dp.message_handler(commands = ["exit", "cancel", "bye"])
async def exit(message: types.Message, commands = "exit"):
await bot.send_chat_action(message.from_user.id, types.chat.ChatActions.TYPING)
await asyncio.sleep(1)
await message.answer(f"{actions.ANSWER_BYE} {message.from_user.full_name}!")
"""Run long-polling"""
def main():
executor.start_polling(dp, on_startup=on_startup, skip_updates = True)
if __name__ == "__main__": main()
| Lucifer13Freeman/Sunny-Telegram-Bot | bot.py | bot.py | py | 7,710 | python | en | code | 0 | github-code | 36 |
37478764816 | def load_dataset(key_values):
if key_values['dataset'] == 'cora':
from .preprocessing_cora import clean_cora
table, pairs = clean_cora()
elif key_values['dataset'] == 'restaurant':
from .preprocessing_restaurant import clean_restaurant
table, pairs = clean_restaurant()
elif key_values['dataset'] == 'abt_buy':
from .preprocessing_abt_buy import clean_abt_buy
table, pairs = clean_abt_buy()
elif key_values['dataset'] == 'amzn_gp':
from .preprocessing_amzn_gp import clean_amzn_gp
table, pairs = clean_amzn_gp()
elif key_values['dataset'] == 'census':
from .preprocessing_census import clean_census
table, pairs = clean_census()
elif key_values['dataset'] == 'dblp_acm':
from .preprocessing_dblp_acm import clean_dblp_acm
table, pairs = clean_dblp_acm()
elif key_values['dataset'] == 'febrl_dirty':
from .preprocessing_febrl_dirty import clean_febrl_dirty
table, pairs = clean_febrl_dirty()
if key_values['verbose'] > 0:
print("#####################################################################")
print("CURRENT dataset: "+key_values['dataset'])
print("CURRENT cluster_method: "+key_values['cluster_method'])
print("CURRENT embedding_type: "+key_values['embedding_type'])
print("#####################################################################")
return key_values['dataset'], table, pairs
| JSLKM/thesis_blocking | blocking/preprocessing_datasets/__init__.py | __init__.py | py | 1,521 | python | en | code | 1 | github-code | 36 |
1488340313 | # Code you have previously used to load data
import pandas as pd
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
# Path of the file to read
file_path = './home-data-for-ml-course/train.csv'
data = pd.read_csv(file_path)
# Create target object and call it y
y = data.SalePrice
# Create X
features = ['LotArea', 'YearBuilt', '1stFlrSF', '2ndFlrSF', 'FullBath', 'BedroomAbvGr', 'TotRmsAbvGrd']
X = data[features]
# Split into validation and training data
# train_X, val_X, train_y, val_y = train_test_split(X, y, random_state=1)
# Specify Model
model = RandomForestRegressor(random_state = 1)
# Fit Model
model.fit(X, y)
# Make validation predictions and calculate mean absolute error
val_predictions = model.predict(X)
val_mae = mean_absolute_error(val_predictions, y)
print("Validation MAE: {:,.0f}".format(val_mae))
# print(len(val_predictions))
# print(val_y.columns)
# print("******\n", val_X.columns)
# print(type(val_y))
# # Appying Test Datas
test_data_path = "./home-data-for-ml-course/test.csv"
test_data = pd.read_csv(test_data_path)
test_X = test_data[features]
val_test_predictions = model.predict(test_X)
# val_test_mae = mean_absolute_error(val_test_predictions, test_y)
# print("Validation MAE: {:,.0f}".format(val_test_mae))
# # Run the code to save predictions in the format used for competition scoring
output = pd.DataFrame({'Id': test_data.Id, 'SalePrice': val_test_predictions})
output.to_csv('submission.csv', index=False) | tyrl76/Kaggle | House Prices/main.py | main.py | py | 1,604 | python | en | code | 0 | github-code | 36 |
36810437100 |
MPG = 20
def find_ample_city(gallons, distances):
curr_gas, total_gas, start_city, remaining_gas = 0, 0, 0, 0
for i in range(len(gallons)):
curr_gas = gallons[i]*MPG - distances[i]
if remaining_gas >= 0:
remaining_gas += curr_gas
else:
remaining_gas = curr_gas
start_city = i
total_gas += curr_gas
return start_city if total_gas >= 0 else -1
# gallons = (20, 15, 15, 15, 35, 25, 30, 15, 65, 45, 10, 45, 25)
# distances = (15, 20, 50, 15, 15, 30, 20, 55, 20, 50, 10, 15, 15)
gallons = [50,20, 5, 30, 25, 10, 10]
distances = [900, 600, 200, 400, 600, 200, 100]
assert find_ample_city(gallons, distances) == 3
| oc0de/pyEPI | 17/6.py | 6.py | py | 693 | python | en | code | 0 | github-code | 36 |
74577237222 | import subprocess
from pathlib import Path
from typing import List
RESOURCE_PATH = Path("tests/resources")
def call_main(args: List[str]) -> List[str]:
root_path = Path("./")
filename = root_path / "rmsd/calculate_rmsd.py"
cmd = ["python", f"{filename}", *args]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = proc.communicate()
if stderr is not None:
print(stderr.decode())
return stdout.decode().strip().split("\n")
| charnley/rmsd | tests/context.py | context.py | py | 510 | python | en | code | 431 | github-code | 36 |
7775036999 | import sys
from heapq import heappop, heappush, heapify
class edge():
def __init__(self, src, nbr, weigh):
self.src = src
self.nbr = nbr
self.weigh = weigh
v = int(input())
e = int(input())
graph = {}
for i in range(v):
graph[i] = []
for i in range(e):
a, b, c = map(int, input().split())
graph[a].append(edge(a, b, c))
graph[b].append(edge(b, a, c))
s = int(input())
d = int(input())
w = int(input())
k = int(input())
visited = [0]*v
mxm = -1*sys.maxsize
smxm = ""
mim = sys.maxsize
smim = ""
cmax = sys.maxsize
cpath = ""
fmin = -1*sys.maxsize
fpath = ""
t = []
heapify(t)
def multisolver(graph, s, d, w, k, wsf, psf):
global mxm, smxm, mim, smim, cmax, cpath, fmin, fpath, t
if(s==d):
if(wsf > mxm):
mxm = wsf
smxm = psf
if(wsf < mim):
mim = wsf
smim = psf
if(wsf > w and wsf < cmax):
cmax = wsf
cpath = psf
if(wsf < w and wsf > fmin):
fmin = wsf
fpath = psf
heappush(t, [wsf, psf])
if(len(t)>k):
heappop(t)
return
visited[s] = 1
for i in graph[s]:
if(visited[i.nbr] ==0):
npsf = psf + str(i.nbr)
nwsf = wsf + i.weigh
multisolver(graph, i.nbr, d, w, k, nwsf, npsf)
visited[s] = 0
return
multisolver(graph, s, d, w, k,0,"0")
print("Smallest Path = ", smim,"@", mim, sep ="")
print("Largest Path = ",smxm,"@", mxm, sep ="")
print("Just Larger Path than ", w," = ", cpath, "@",cmax, sep ="")
print("Just Smaller Path than ",w," = ", fpath, "@",fmin, sep ="")
print(k, "th largest path = ", t[0][1],"@", t[0][0],sep ="")
| nishu959/graphpepcoding | graphmuktisolverpep.py | graphmuktisolverpep.py | py | 1,682 | python | en | code | 0 | github-code | 36 |
71354143145 | def animal_cracker(string):
"""
A function that takes two-word string and returns
True if both words begin with the same letter
"""
mystring = string.lower().split(' ')
if mystring[0][0] == mystring[1][0]:
print(f'{mystring} both have the same beginning letter')
else:
print(f'{mystring} does not have the same beginning letter')
animal_cracker('Levelhead llama')
| Aifedayo/Logic | animal_cracker2.py | animal_cracker2.py | py | 411 | python | en | code | 1 | github-code | 36 |
12171400766 | # numbers 리스트로 만들 수 있는 모든 합의 경우의 수
def solution(numbers):
answer = []
for i in range(len(numbers)):
for j in range(i+1, len(numbers)):
answer.append(numbers[i] + numbers[j])
answer = sorted(list(set(answer)))
return answer
print(solution([2,1,3,4,1]))
| hi-rev/TIL | Programmers/level_1/two_plus.py | two_plus.py | py | 323 | python | ko | code | 0 | github-code | 36 |
37939233633 | """!@namespace httpproxy Transport Layer fuer XMLRPClib"""
import xmlrpclib
import urllib2
class Urllib2Transport(xmlrpclib.Transport):
"""!Transport-Layer fuer das XMLRPC-Modul unter Verwendung von urllib2"""
def __init__(self, opener=None, https=False, use_datetime=0):
xmlrpclib.Transport.__init__(self, use_datetime)
self.opener = opener or urllib2.build_opener()
self.https = https
self.verbose = 0
def request(self, host, handler, request_body, verbose=0):
"""!HTTP-Request"""
proto = ('http', 'https')[bool(self.https)]
req = urllib2.Request('%s://%s%s' % (proto, host, handler), request_body)
req.add_header('User-agent', self.user_agent)
self.verbose = verbose
return self.parse_response(self.opener.open(req))
class HTTPProxyTransport(Urllib2Transport):
"""!HTTP-Proxy fuer das XMLRPC-Modul"""
def __init__(self, proxies, use_datetime=0):
self._proxies = proxies
opener = urllib2.build_opener(urllib2.ProxyHandler(proxies))
Urllib2Transport.__init__(self, opener, use_datetime)
def get_proxy_name(self):
"""!Liefert die Connect-Parameter des Proxies zurueck
@return Dictionary mit den Proxy-Parametern
"""
return self._proxies
| spectal/cobbler_tornado | modules/httpproxy.py | httpproxy.py | py | 1,301 | python | en | code | 0 | github-code | 36 |
35658678468 | """The filtersets tests module."""
import pytest
from django.db.models.query import QuerySet
from django.http.request import HttpRequest
from communication.filtersets import (_get_interlocutors, _get_recipients,
_get_reviews, _get_senders)
pytestmark = pytest.mark.django_db
def test_get_reviews(reviews: QuerySet):
"""Should return the filtered list of reviews."""
assert not _get_reviews(None).count()
request = HttpRequest()
user = reviews[0].professional.user
request.user = user
result = _get_reviews(request)
assert result.count() == 1
assert result[0].professional.user == reviews[0].professional.user
def test_get_recipients(messages: QuerySet):
"""Should return recipients."""
assert not _get_recipients(None).count()
request = HttpRequest()
user = messages[0].sender
request.user = user
result = _get_recipients(request)
assert result.count() == 1
assert result[0] == messages[0].recipient
def test_get_senders(messages: QuerySet):
"""Should return senders."""
assert not _get_senders(None).count()
request = HttpRequest()
user = messages[0].recipient
request.user = user
result = _get_senders(request)
assert result.count() == 1
assert result[0] == messages[0].sender
def test_get_interlocutors(messages: QuerySet):
"""Should return interlocutors."""
assert not _get_interlocutors(None).count()
request = HttpRequest()
user = messages[0].recipient
request.user = user
result = _get_interlocutors(request)
assert result.count() == 1
assert result[0] == messages[0].sender
| webmalc/d8base-backend | communication/tests/filtersets_tests.py | filtersets_tests.py | py | 1,658 | python | en | code | 0 | github-code | 36 |
75226770345 | from django.core.exceptions import ValidationError
from django.db import models
from django.urls import reverse
from django.utils.text import slugify
from django_resized import ResizedImageField
from .base import BaseModel
from .images import Images
def file_size(value):
limit = 6 * 1024 * 1024
if value.size > limit:
raise ValidationError("Plik który chcesz wrzucić jest większy niż 6MB.")
class Articles(BaseModel):
id = models.AutoField(primary_key=True)
category = models.ForeignKey(
"category", on_delete=models.CASCADE, verbose_name="Kategoria artykułu"
)
title = models.CharField(verbose_name="Tytyuł artykułu", max_length=256)
slug = models.SlugField(verbose_name="Slug", blank=True, null=True, max_length=256)
body = models.TextField(verbose_name="Treść artukułu")
image = ResizedImageField(
verbose_name="Zdjęcie główne",
size=[1280, 960],
upload_to="images/articles/",
validators=[file_size],
null=True,
blank=True,
)
image_alt = models.CharField(
verbose_name="Alternatywny text dla obrazka",
max_length=125,
blank=True,
null=True,
)
image_title = models.CharField(
verbose_name="Title dla obrazka", blank=True, null=True, max_length=70
)
meta_description = models.CharField(
verbose_name="Meta description dla artykułu", blank=True, null=True, max_length=160
)
meta_title = models.CharField(
verbose_name="Meta title dla artykułu", blank=True, null=True, max_length=60
)
def save(self, *args, **kwargs):
self.slug = slugify(self.title)
super(Articles, self).save()
def get_absolute_url(self):
return reverse(
"article_details",
kwargs={
"category": self.category.slug,
"title": self.slug,
"pk": self.id,
},
)
class Meta:
ordering = ("-created_time",)
verbose_name_plural = "Artykuły"
def images(self):
return Images.objects.filter(article_id=self)
def __str__(self):
return self.category.name + ", " + self.title
| KennyDaktyl/miktel_shop | web/models/articles.py | articles.py | py | 2,215 | python | en | code | 0 | github-code | 36 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.