seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
74761603857 | from rest_framework import serializers
from . import models
from yggram.users import models as user_models
class SmallImageSerializer(serializers.ModelSerializer):
""" Used fro the notifications """
class Meta:
model = models.Image
fields = (
'file',
)
class CountImageSerializer(serializers.ModelSerializer):
class Meta:
model = models.Image
fields = (
'file',
'like_count',
'comment_count'
)
class FeedUserSerializer(serializers.ModelSerializer):
class Meta:
model = user_models.User
fields = (
'username',
'profile_image'
)
class CommentSerializer(serializers.ModelSerializer):
creator = FeedUserSerializer(read_only=True)
class Meta:
model = models.Comment
fields = (
'id',
'message',
'creator'
)
class LikeSerializer(serializers.ModelSerializer):
class Meta:
model = models.Like
fields = '__all__'
class ImageSerializer(serializers.ModelSerializer):
comments = CommentSerializer(many=True)
creator = FeedUserSerializer()
class Meta:
model = models.Image
fields = ("id",
"file",
"location",
"caption",
"comments",
"like_count",
'creator',
'created_at')
class InputImageSerializer(serializers.ModelSerializer):
class Meta:
# "file": ["파일이 제출되지 않았습니다."],"caption": ["이 필드는 필수 항목입니다."] 이와 같은 필수값 에러를 피하기 위한 방법 1
# file = serializers.FileField(required=False) => 이방법은 serializer를 변경하는 방법임.
model = models.Image
fields = (
'file',
'location',
'caption',
)
| zedonora/yggram | yggram/images/serializers.py | serializers.py | py | 1,965 | python | en | code | 0 | github-code | 13 |
12904532595 | import numpy as np
debug = False
# This class runs games
# it takes in the following:
# -> agent_a
# -> agent_b
# -> A payoff table in the following format:
# [[1,1], [1,0]
# [0,1], [0,0]]
# where 0 and 1 are the moves available to
# agent_a and agent_b in symmetric 2-player games
#
# it then runs the game and stores the following bits of data:
# -> (agent_a, agent_b) actions and payoffs
# -> (agent_a, agent_b) whether actions made at random or greedily
class GameRunner:
num_episodes = 10000
# payoff at end of each round:
agent_a_payoffs = []
agent_b_payoffs = []
# action selected by agent:
agent_a_action = []
agent_b_action = []
# greedy or at random?:
agent_a_rg = []
agent_b_rg = []
round_count = 0 # incremented at end of round
# holds number of times 0 or 1 was chosen by agent a or b
a_one_count = 0
b_one_count = 0
a_zero_count = 0
b_zero_count = 0
# I might be storing too much redundant data, but it saves me the pain of having to work with pandas.
# Probabilities of actions over time t.
a_zero_prob = []
b_zero_prob = []
a_one_prob = []
b_one_prob = []
# were initially meant to number of times each agent selected.
# agent_a_one = []
# agent_a_zero = []
# agent_b_one = []
# agent_b_zero = []
# payoff tables should be entered in the following format of payoffs
# where X and Y are the moves available to agents a and b
# aX,bX,aY,bY in each of the indexes represent the payoffs for each agent in a specific move pair.
default_payoff_table = [["aX", "bX"], ["aY", "bY"],
["aY", "bX"], ["aY", "bY"]]
# Payoff table for the prisoners dilemma:
# AgB
# C D
# C [3,3] [0,5]
# AgA
# D [5,0] [0,0]
# pd_payoffs = np.array([(3, 3), (0, 5), (5, 0), (0, 0)])
# pd_a_labels = np.array(["D", "C"])
# pd_actions = np.array([0, 1])
# alpha_decay_rate = 0.9
# epsilon_decay_rate = 0.9
# init for prisoners dilemma specifically
def __init__(self, agent_a, agent_b, payoff_table):
# constructor takes in two agents that are meant to be instantiated in the "main"
# agent constructor takes: agent name, alpha, gamma, epsilon, num_actions, action_labels.
self.agent_a = agent_a
self.agent_b = agent_b
self.payoff_table = np.array(payoff_table)
def play_episodes(self, num_episodes, decay_alpha_a=False, decay_alpha_b=False, decay_point_a=num_episodes/2, decay_point_b=num_episodes/2):
for i in range(num_episodes):
self.pd_play_round()
if i >= decay_point_a:
if decay_alpha_a is True:
self.agent_a.decay_alpha()
if i >= decay_point_b:
if decay_alpha_b is True:
self.agent_b.decay_alpha()
# after a round is played each players payoff will be used to update their respective q_tables.
# this function can be copied and modified for other games
def pd_play_round(self):
#self.round_count += 1
# each agent chooses an action at the start of round
payoff = None
action_a = self.agent_a.select_action(sm=True)
action_b = self.agent_b.select_action(sm=True)
if debug:
print("ACTION SELECTED IN GAME BY A: ", action_a)
print("ACTION SELECTED IN GAME BY B: ", action_b)
# both cooperate
payoff_a = 0
payoff_b = 0
if action_a == 1 and action_b == 1: # -> a.x, b.x
payoff_a = self.payoff_table[0, 0]
payoff_b = self.payoff_table[0, 1]
elif action_a == 1 and action_b == 0: # -> a.x, b.y
payoff_a = self.payoff_table[1, 0]
payoff_b = self.payoff_table[1, 1]
# a cooperates, b defects:
elif action_a == 0 and action_b == 1: # -> a.y, b.x
payoff_a = self.payoff_table[2, 0]
payoff_b = self.payoff_table[2, 1]
# a defects, b cooperates:
elif action_a == 0 and action_b == 0: # -> a.y, b.y
payoff_a = self.payoff_table[3, 0]
payoff_b = self.payoff_table[3, 1]
self.agent_a.update_q_value(action_a, payoff_a)
self.agent_b.update_q_value(action_b, payoff_b)
if debug:
print("Payoff A: ", payoff_a)
print(self.agent_a.q_table)
print("Payoff B: ", payoff_b)
print(self.agent_b.q_table)
print("ROUND CONT ", self.round_count)
self.round_count += 1
self.store_round(action_a=action_a, payoff_a=payoff_a, action_b=action_b, payoff_b=payoff_b,
rg_a=self.agent_a.rg, rg_b=self.agent_b.rg)
def store_round(self, action_a, payoff_a, action_b, payoff_b, rg_a, rg_b):
self.agent_a_action.append(action_a)
self.agent_b_action.append(action_b)
self.agent_a_payoffs.append(payoff_a)
self.agent_b_payoffs.append(payoff_b)
if action_a == 0:
self.a_zero_count += 1
elif action_a == 1:
self.a_one_count += 1
self.a_one_prob.append(round(self.a_one_count/self.round_count, 2))
self.a_zero_prob.append(round((self.round_count - self.a_one_count)/self.round_count, 2))
if action_b == 0:
self.b_zero_count += 1
else:
self.b_one_count += 1
self.b_one_prob.append(round((self.round_count-self.b_zero_count) / self.round_count, 2))
self.b_zero_prob.append(round(self.b_zero_count / self.round_count, 2))
self.agent_a_rg.append(rg_a)
self.agent_b_rg.append(rg_b)
| yaserBK/QLearning | Game_Runner.py | Game_Runner.py | py | 5,732 | python | en | code | 1 | github-code | 13 |
40924869271 | import xml.etree.ElementTree as ET
import sys
def main(filename):
tree = ET.parse(filename)
root = tree.getroot()
print(get_entities(root))
print(get_relationships(root))
logical_transform(get_entities(root), get_relationships(root))
def get_entities(root):
entities = []
for e in root.find('entities'):
entity = e.attrib
entity['attributes'] = [a.attrib for a in e.findall('attribute')]
entities.append(entity)
return entities
def get_entity_with_id(entities, id):
for entity in entities:
if entity['id'] == id:
return entity
def get_relationships(root):
return [r.attrib for r in root.find('relationships')]
def get_primary_key(entity):
for attribute in entity['attributes']:
if attribute['pi'] == 'true':
return attribute['value']
def get_relation_from_entity(relations, entity):
for relation in relations:
if entity['name'] in relation.keys():
return relation
"""
Performs steps in the logical transformation
"""
def logical_transform(entities, relationships):
relations = step_1(entities)
pretty_print_relations(step_4(entities, relationships, relations))
"""
Performs the first step in the logical transformation, which is to create a relation
for each *strong* entity, where composite attributes are expanded and multi-value
attributes are excluded.
Paramters:
entities (list(entity)): A list of dictionaries representing entities.
Returns:
relations (list(relation)): A list of relations (tables)
"""
def step_1(entities):
relations = []
for entity in entities:
simple_attributes = list(map(lambda a : a['value'], entity['attributes']))
relations.append({ entity['name'] : simple_attributes })
return relations
"""
For each binary 1 TO N Relationship identify the relations that represent the participating entity at the N (i.e many) side of the relationship. Include as foreign key in the relation that holds the N side, the primary key of the other entity (that holds the 1 side).
"""
def step_4(entities, relationships, relations):
for relationship in relationships:
if relationship['value'] == 'one-many':
# Put customer (from) pk as fk in order (to)
foreign_key = get_primary_key(get_entity_with_id(entities, relationship['fromEnt']))
relation = get_relation_from_entity(relations, get_entity_with_id(entities, relationship['toEnt']))
relation[list(relation.keys())[0]].append(foreign_key)
return relations
def pretty_print_relations(relations):
for r in relations:
for (name, attributes) in r.items():
print("%s(%s)"%(name.upper(), ', '.join(attributes)))
if __name__ == '__main__':
if len(sys.argv) < 2:
print("You have not provided a file.")
else:
main(sys.argv[1]) | cjpappas/xml-er-transform | main.py | main.py | py | 2,887 | python | en | code | 0 | github-code | 13 |
37284623980 | from fonctions.fonctions_affichage import afficher_pile, afficher_file, afficher_silos
from fonctions.algo_repartition.algorithme import algorithme_tri_silos
from fonctions.algo_repartition.vidange_silos import vidange_silos
from fonctions.fonctions_piles_files import empiler, defiler
from fonctions.fonction_generation import generation_tube
from acsii_art.ascii_art import texte_debut_programme, texte_au_revoir
from time import time
if __name__ == '__main__':
print(texte_debut_programme, end='')
input('• [✔] Appuyez sur entrée pour déclancher l\'évacuation d\'urgence...')
# Génération du tube et de tout les silos nécéssaires.
tube = generation_tube(5)
silo_temporaire = []
silos = [[], [], []]
silo_final = []
print('\n• [~] Vidange du Tube principal en cours...', end='\n\n')
afficher_file(tube, ' Tube ')
afficher_pile(silo_temporaire, 'silo_temp.')
# Défilage du tube dans le silo temporaire.
for _ in range(len(tube)):
empiler(silo_temporaire, defiler(tube))
print('\n• [✔] Vidange du Tube principal terminé !', end='\n\n')
afficher_file(tube, ' Tube ')
afficher_pile(silo_temporaire, 'silo_temp.')
print('\n• [~] Répartition du silo temporaire en cours...')
start_timer = time()
algorithme_tri_silos(silos, silo_temporaire)
afficher_silos(silos)
print('• [✔] Répartition du silo temporaire terminé en {:e} secondes ! '.format(time() - start_timer), end='\n\n')
print('• [~] Regroupement des conteneurs en cours...', end='\n\n')
vidange_silos(silos, silo_final)
afficher_pile(silo_final, 'silo_final')
print('\n• [✔] Regroupement des conteneurs terminé ! À bientôt :)', end='\n\n')
print(texte_au_revoir, end='\n\n')
| romainflcht/APP1 | main.py | main.py | py | 1,790 | python | fr | code | 0 | github-code | 13 |
26297117424 | # -*- coding: utf-8 -*-
"""
Created on Sat Aug 11 19:41:21 2018
@author: DELL
"""
#https://www.analyticsvidhya.com/blog/2017/09/naive-bayes-explained/
# =============================================================================
# How to build a basic model using Naive Bayes in Python?
# Again, scikit learn (python library) will help here to build a Naive Bayes model in Python. There are three types of Naive Bayes model under scikit learn library:
#
# Gaussian: It is used in classification and it assumes that features follow a normal distribution.
#
# Multinomial: It is used for discrete counts. For example, let’s say, we have a text classification problem. Here we can consider bernoulli trials which is one step further and instead of “word occurring in the document”, we have “count how often word occurs in the document”, you can think of it as “number of times outcome number x_i is observed over the n trials”.
#
# Bernoulli: The binomial model is useful if your feature vectors are binary (i.e. zeros and ones). One application would be text classification with ‘bag of words’ model where the 1s & 0s are “word occurs in the document” and “word does not occur in the document” respectively.
#
# Based on your data set, you can choose any of above discussed model. Below is the example of Gaussian model.
#
# Python Code
# =============================================================================
#Import Library of Gaussian Naive Bayes model
from sklearn.naive_bayes import GaussianNB
import numpy as np
#assigning predictor and target variables
x = np.array([[-3,7],[1,5], [1,2], [-2,0], [2,3], [-4,0], [-1,1], [1,1], [-2,2], [2,7], [-4,1], [-2,7]])
Y = np.array([3, 3, 3, 3, 4, 3, 3, 4, 3, 4, 4, 4])
#Create a Gaussian Classifier
model = GaussianNB()
# Train the model using the training sets
model.fit(x, Y)
#Predict Output
predicted= model.predict([[1,2],[3,4]])
print(predicted)
| khanmbjob/python | TextClassification_NB.py | TextClassification_NB.py | py | 1,980 | python | en | code | 0 | github-code | 13 |
43397815861 | from Functions import PrimeSieve
def Solve(p=600851475143):
prime = PrimeSieve()
f = next(prime)
while not p == 1:
if p % f == 0:
p = p / f
else:
f = next(prime)
return f
if __name__ == '__main__':
print(Solve())
| Adam-Hoelscher/ProjectEuler.py | Problem3.py | Problem3.py | py | 276 | python | en | code | 0 | github-code | 13 |
20742770454 | import sys
def isP(n: int):
if n > 1:
for i in range(2,n):
if (n % i) == 0:
return(False)
return(True)
else:
return(False)
if __name__ == "__main__":
min, max = int(sys.argv[1]), int(sys.argv[2])
r = [x for x in range(max, min - 1, -1) if isP(x) and x != 2]
rs = []
for n in r:
if(isP(sum(rs) + n)):
rs.append(n)
print(sum(rs[0:3])) | mgirard772/python_month_of_code | day14/mary_prime_get.py | mary_prime_get.py | py | 432 | python | en | code | 0 | github-code | 13 |
71088759058 | # @Time : 2018/7/21 10:05
# @Author : cap
# @FileName: find_jpg.py
# @Software: PyCharm Community Edition
# @introduction: # 查找所有jpg文件并分类,以dic和list的形式
import os
import pickle
import xml.etree.ElementTree as ET
dict_map = {0: '正常', 1: '吊经', 2: '擦洞', 3: '跳花', 4: '毛洞', 5: '织稀', 6: '扎洞',
7: '缺经', 8: '毛斑', 9: '边扎洞', 10: '缺纬', 11: '油渍', 12: '污渍',
13: '嵌结', 14: '弓纱', 15: '破边', 16: '边针眼', 17: '吊纬', 18: '回边',
19: '剪洞', 20: '黄渍', 21: '楞断', 22: '破洞', 23: '粗纱', 24: '织入', 25: '吊弓',
26: '扎梳', 27: '愣断', 28: '擦伤', 29: '擦毛', 30: '线印', 31: '经粗纱', 32: '经跳花',
33: '蒸呢印', 34: '边缺纬', 35: '修印', 36: '厚薄段', 37: '扎纱', 38: '毛粒',
39: '紧纱', 40: '纬粗纱', 41: '结洞', 42: '耳朵', 43: '边白印', 44: '厚段',
45: '夹码', 46: '明嵌线', 47: '边缺经'}
def get_file_names(path):
# 获取指定path下的所有训练数据jpg文件
test_list_files = []
train_dict_file = {} # key:label ,value:files_list
for cur_dir, sub_dir, sub_files in os.walk(path):
# 测试文件名列表
if cur_dir.split(os.sep)[-1].startswith('xuelang_round1_test_'):
for test_file in sub_files:
if test_file.endswith('.jpg'):
test_file_path = os.path.join(cur_dir, test_file)
test_list_files.append(test_file_path)
# 训练文件名字典列表
elif cur_dir.split(os.sep)[-2].startswith('xuelang_round1_train_'):
# get key
label = cur_dir.split(os.sep)[-1]
# label map
for key, value in dict_map.items():
if value == label:
label = key
break
if label not in train_dict_file.keys():
train_dict_file[label] = []
for train_file in sub_files:
if train_file.endswith('.jpg'):
train_file_path = os.path.join(cur_dir, train_file)
train_dict_file[label].append(train_file_path)
# train nums = 2022
# 获取xml信息{filename:{xml中的信息}}
xmls = {}
for label, filepaths in train_dict_file.items():
for file_path in filepaths:
xml = {}
file_path = file_path.replace('.jpg', '.xml')
if not os.path.isfile(file_path):
xmls[file_path.split(os.sep)[-1][:-4]] = None
else:
root = ET.parse(file_path).getroot()
for ele in root.iter():
xml[ele.tag] = ele.text
xmls[xml['filename'][:-4]] = xml
return test_list_files, train_dict_file, xmls
if __name__ == '__main__':
test_list_files, train_dict_file, xmls = get_file_names('D:\\softfiles\\workspace\\games\\xuelang')
print(test_list_files)
# print(test_list_files, train_dict_file, xmls)
# 对每个label的数据样本数量进行排序,
| zhnin/competitions | tianchi/xuelang/test/find_jpg.py | find_jpg.py | py | 3,137 | python | en | code | 1 | github-code | 13 |
41568419393 | import logging
import requests
from binstar_client import errors
from binstar_client.utils import jencode
import binstar_client
import binstar_build_client
from binstar_build_client.utils.worker_stats import worker_stats
log = logging.getLogger('binstar.build')
class BuildQueueMixin(object):
def register_worker(self, username, queue_name, platform, hostname, dist, name):
url = '%s/build-worker/%s/%s' % (self.domain, username, queue_name)
data, headers = jencode(platform=platform, hostname=hostname, dist=dist,
binstar_version=binstar_client.__version__,
binstar_build_version=binstar_build_client.__version__,
name=name)
res = self.session.post(url, data=data, headers=headers)
self._check_response(res, [200])
return res.json()['worker_id']
def remove_worker(self, username, queue_name, worker_id):
'''Un-register a worker
returns true if worker existed and was removed
'''
url = '%s/build-worker/%s/%s/%s' % (self.domain, username, queue_name, worker_id)
res = self.session.delete(url)
self._check_response(res, [200, 404])
return res.status_code == 200
def pop_build_job(self, username, queue_name, worker_id):
'''Un-register a worker
returns true if worker existed and was removed
'''
url = '%s/build-worker/%s/%s/%s/jobs' % (self.domain, username, queue_name, worker_id)
res = self.session.post(url)
self._check_response(res, [200])
return res.json()
def log_build_output(self, username, queue_name, worker_id, job_id, msg):
'''Fallback log handler if /tagged-log endpoint does not exist'''
url = '%s/build-worker/%s/%s/%s/jobs/%s/log' % (self.domain, username, queue_name, worker_id, job_id)
res = self.session.post(url, data=msg)
self._check_response(res, [201, 200])
try:
result = res.json().get('terminate_build', False)
except ValueError:
result = False
return result
def log_build_output_structured(self,
username,
queue_name,
worker_id,
job_id,
msg,
metadata):
'''Call /tagged-log endpoint or fallback to plain log '''
if getattr(self, 'log_build_output_structured_failed', False):
return self.log_build_output(username, queue_name, worker_id,
job_id, msg)
url = '%s/build-worker/%s/%s/%s/jobs/%s/tagged-log' % (self.domain, username, queue_name, worker_id, job_id)
content = metadata.copy()
content['msg'] = msg
res = self.session.post(url, data=content)
try:
self._check_response(res, [201, 200])
except errors.NotFound:
if hasattr(self, 'log_build_output_structured_failed'):
# it might be a missing build or job that's not found.
# structured log has succeeded at least once, so don't give up!
raise
log.info('Will not attempt structured '
'logging with tags, falling back '
'to plain build log. There is no '
'Repository endpoint %s', url, exc_info=True)
self.log_build_output_structured_failed = True
return self.log_build_output(username, queue_name,
worker_id, job_id,
msg)
else:
self.log_build_output_structured_failed = False
try:
result = res.json().get('terminate_build', False)
except ValueError:
result = False
return result
def finish_build(self, username, queue_name, worker_id, job_id, status='success', failed=False):
url = '%s/build-worker/%s/%s/%s/jobs/%s/finish' % (self.domain, username, queue_name, worker_id, job_id)
data, headers = jencode(status=status, failed=failed)
res = self.session.post(url, data=data, headers=headers)
self._check_response(res, [200])
return res.json()
def push_build_job(self, username, queue_name, worker_id, job_id):
url = '%s/build-worker/%s/%s/%s/jobs/%s/push' % (self.domain, username, queue_name, worker_id, job_id)
res = self.session.post(url)
self._check_response(res, [201])
return
def fetch_build_source(self, username, queue_name, worker_id, job_id):
url = '%s/build-worker/%s/%s/%s/jobs/%s/build-source' % (self.domain, username, queue_name, worker_id, job_id)
res = self.session.get(url, allow_redirects=False, stream=True)
self._check_response(res, allowed=[302, 304, 200])
if res.status_code == 304:
return None
elif res.status_code == 302:
res = requests.get(res.headers['location'], stream=True, verify=True)
return res.raw
def build_queues(self, username=None):
if username:
url = '%s/build-queues/%s' % (self.domain, username)
else:
url = '%s/build-queues' % (self.domain)
res = self.session.get(url)
self._check_response(res)
return res.json()
def build_queue(self, username, queuename):
url = '%s/build-queues/%s/%s' % (self.domain, username, queuename)
res = self.session.get(url)
self._check_response(res)
return res.json()
def remove_build_queue(self, username, queuename):
url = '%s/build-queues/%s/%s' % (self.domain, username, queuename)
res = self.session.delete(url)
self._check_response(res, [201])
return
def add_build_queue(self, username, queuename):
url = '%s/build-queues/%s/%s' % (self.domain, username, queuename)
data, headers = jencode()
res = self.session.post(url, data=data, headers=headers)
self._check_response(res, [201])
return
def build_backlog(self, username, queuename):
url = '%s/build-queues/%s/%s/jobs' % (self.domain, username, queuename)
res = self.session.get(url)
self._check_response(res, [200])
return res.json().get('jobs', [])
def upload_worker_stats(self, username, queue_name, worker_id):
url = '%s/build-worker/%s/%s/%s/worker-stats' % (self.domain, username, queue_name, worker_id)
data, headers = jencode(worker_stats=worker_stats())
res = self.session.post(url, data=data, headers=headers)
self._check_response(res, [201])
return res.json()
| anaconda-graveyard/anaconda-build | binstar_build_client/mixins/build_queue.py | build_queue.py | py | 6,823 | python | en | code | 2 | github-code | 13 |
27345176390 | print("uno")
from tkinter import *
raiz= Tk()
raiz.title("primera ventana")
raiz.config(width=300, height=300)
raiz.resizable(0,0)
raiz.iconbitmap("imagen.ico")
raiz.config(bg="red")
raiz.mainloop() | Williams5656/EJE | miprimer.py | miprimer.py | py | 199 | python | es | code | 1 | github-code | 13 |
71497022419 | # 언어 : Python
# 날짜 : 2021.08.23
# 문제 : BOJ > 음식물 피하기 (https://www.acmicpc.net/problem/1743)
# 티어 : 실버 1
# ======================================================================
import sys
sys.setrecursionlimit(100000)
def dfs(r, c):
global cur_ans
dx, dy = [0, 0, 1, -1], [1, -1, 0, 0]
for i in range(4):
nr, nc = dx[i] + r, dy[i] + c
if 0 <= nr < R and 0 <= nc < C:
if not visited[nr][nc] and arr[nr][nc] == "#":
cur_ans += 1
visited[nr][nc] = True
dfs(nr, nc)
# 입력 및 실행
R, C, K = map(int, input().split())
arr = [["." for _ in range(C)] for _ in range(R)]
for _ in range(K):
r, c = map(int, input().split())
arr[r - 1][c - 1] = "#"
visited = [[False for _ in range(C)] for _ in range(R)]
answer = 0
for r in range(R):
for c in range(C):
if not visited[r][c] and arr[r][c] == "#":
visited[r][c] = True
cur_ans = 1
dfs(r, c)
answer = max(answer, cur_ans)
# 정답
print(answer) | eunseo-kim/Algorithm | BOJ/최고빈출 DFS, BFS 기본문제/04_음식물피하기.py | 04_음식물피하기.py | py | 1,084 | python | en | code | 1 | github-code | 13 |
71763755858 | import numpy as np
from matplotlib import pyplot as plt
from ANN_lab_3.hopfield_net import HopfieldNet, find_two_largest_factors
def display_image(pict, shape, title=None, show=True, fig=None, ax=None):
"""
Display an image.
:param pict:
:param shape:
:param title:
:param show:
:param fig:
:param ax:
:return:
"""
if fig is None or ax is None:
fig, ax = plt.subplots()
ax.imshow(pict.reshape(shape), cmap='gray')
ax.axis('off')
if title is not None:
ax.set_title(title)
if show:
plt.show()
else:
return fig, ax
if __name__ == '__main__':
pict_shape = (32, 32)
pict_size = pict_shape[0] * pict_shape[1]
test_n = 3
# import data from .dat file
train_data = np.loadtxt("data/pict.dat", delimiter=",", dtype=int).reshape(11, pict_size)
# Display the patterns
fig, axs = plt.subplots(3,4)
for i in range(11):
display_image(train_data[i], pict_shape, title=f"Pattern {i+1}", show=False, fig=fig, ax=axs[i//4, i%4])
axs[2, 3].axis('off')
plt.show()
plt.close(fig)
hopfield = HopfieldNet(pict_size, pict_shape)
test = np.zeros((test_n, pict_size))
for i in range(test_n):
test[i] = train_data[i]
hopfield.train(test, sequential=True)
# Test the network
test_dim_1, test_dim_2 = find_two_largest_factors(test_n)
one_dim = False
if test_dim_1 == 1:
one_dim = True
fig, axs = plt.subplots(test_dim_1, test_dim_2)
for i in range(test_n):
output = hopfield.run(train_data[i], sequential=True, update_order="random")
if one_dim:
display_image(output, pict_shape, title=f"Pattern {i}", show=False, fig=fig, ax=axs[i])
else:
display_image(output, pict_shape, title=f"Pattern {i}", show=False, fig=fig, ax=axs[i//test_dim_1, i%test_dim_2])
if not one_dim:
for i in range(test_dim_1):
for j in range(test_dim_2):
axs[i, j].axis('off')
plt.show()
plt.close(fig)
# Test the network
fig, axs = plt.subplots(3, 3)
# make plot square
fig.set_size_inches(8, 8)
# bottom row is three first patterns
for i in range(3):
output = hopfield.run(train_data[i], sequential=True, update_order="random")
display_image(output, pict_shape, title=f"Pattern {i}", show=False, fig=fig, ax=axs[2, i])
# middle row is pattern 10, 11 and 1 with 10% noise
pattern_10 = train_data[9]
pattern_11 = train_data[10]
pattern_1 = train_data[0]
# add noise to pattern 1
pattern_1_noisy = pattern_1.copy()
pattern_1_noisy[np.random.randint(0, pict_size, int(0.1*pict_size))] *= -1
# top row is pattern 10, 11 and 1 with 30% noise
display_image(pattern_10, pict_shape, title=f"input p10", show=False, fig=fig, ax=axs[0, 0])
display_image(pattern_11, pict_shape, title=f"input p11", show=False, fig=fig, ax=axs[0, 1])
display_image(pattern_1_noisy, pict_shape, title=f"input p1 with noise", show=False, fig=fig, ax=axs[0, 2])
# show results
output_10 = hopfield.run(pattern_10, sequential=True, show_gif=True, update_order="random", gif_name="p10")
output_11 = hopfield.run(pattern_11, sequential=True, show_gif=False, update_order="random", gif_name="p11")
output_1 = hopfield.run(pattern_1_noisy, sequential=True, show_gif=False, update_order="random", gif_name="p1")
display_image(output_10, pict_shape, title=f"output p10", show=False, fig=fig, ax=axs[1, 0])
display_image(output_11, pict_shape, title=f"output p11", show=False, fig=fig, ax=axs[1, 1])
display_image(output_1, pict_shape, title=f"output p1 with noise", show=False, fig=fig, ax=axs[1, 2])
plt.show()
| tommasopiehl/ANN_lab1 | ANN_lab_3/pict_memory.py | pict_memory.py | py | 3,768 | python | en | code | 0 | github-code | 13 |
3835256775 | # -*- coding: utf-8 -*-
"""
Created on Mon Oct 8 17:30:59 2018
@author: 606C
"""
import cv2
#import matplotlib.pyplot as plt
mode = True
def get_xy_point(event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
print('x= ',x)
print('y= ',y)
img1 = cv2.imread('Fig1.jpg')
cv2.namedWindow('img1')
cv2.setMouseCallback('img1',get_xy_point)
while(1):
cv2.imshow('img1',img1)
k = cv2.waitKey(1) & 0xFF
if k == ord('m'):
mode = not mode
elif k == 27:
break
cv2.destroyAllWindows() | dennis2110/Computer_Vision | homography/find_xy_point.py | find_xy_point.py | py | 540 | python | en | code | 0 | github-code | 13 |
70614738258 | from django.shortcuts import render
from django.http import HttpResponseRedirect
from task_manager.statuses.forms import CreateStatusForm
from task_manager.statuses.models import Status
def index(request):
statuses = Status.objects.all()
return render( request, 'statuses/index.html', {'statuses': statuses})
def createStatus(request):
if request.method == 'POST':
form = CreateStatusForm(request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect('/statuses')
else:
form = CreateStatusForm()
return render(request, 'statuses/create.html', {'form': form})
def deleteStatus(request, pk):
status = Status.objects.get(id=pk)
form = CreateStatusForm(instance=status)
if request.method == 'POST':
status.delete()
return HttpResponseRedirect('/statuses')
return render(request, 'statuses/delete.html', {'status': status, 'form': form})
def editStatus(request, pk):
status = Status.objects.get(id=pk)
form = CreateStatusForm(instance=status)
if request.method == 'POST':
status.name = request.POST.get('name')
status.save()
return HttpResponseRedirect('/statuses')
else:
return render(request, 'statuses/edit.html', {'status': status, 'form': form})
| zhabinka/python-web-development-project-lvl4 | task_manager/statuses/views.py | views.py | py | 1,319 | python | en | code | 0 | github-code | 13 |
3672501012 |
"""
Build a heirarchical bayesian model
Here we to estimate the positive predictive and sensitivity of different gene targets available
for pubchem.
This seems to be a good resource....
https://docs.pymc.io/projects/examples/en/latest/case_studies/hierarchical_partial_pooling.html
Heres another maybe better resource:
https://blog.dominodatalab.com/ab-testing-with-hierarchical-models-in-python
"""
import pandas as pd
import pymc3 as pm
import numpy as np
def do_bayes(data: pd.DataFrame):
""" builds a heirarchical model for every gene in the data. Columns should be
GeneSymbol, TP, FP, FN, and TN counts """
## using a uniform prior for the probabilities
# was converging around 0.5. Not sure why.
# following this: https://blog.dominodatalab.com/ab-testing-with-hierarchical-models-in-python
# it suggests using a beta distribution
with pm.Model() as heir_model:
a = pm.Exponential("a", 1)
b = pm.Exponential("b", 1)
# priors
p_sens = pm.Beta("p_sens", a, b, shape=data.shape[0])
p_ppv = pm.Beta("p_ppv", a, b, shape=data.shape[0])
# Set of observations, in this case we have two observation datasets.
# liklihoods
sens = pm.Binomial("obs_sens", p=p_sens, n=data.TP+data.FN, observed=data.TP)
ppv = pm.Binomial("obs_ppv", p=p_ppv, n=data.TP+data.FP, observed=data.TP)
with heir_model:
step = pm.Metropolis()
trace = pm.sample(draws=1000, tune=2000, target_accept=0.95, return_inferencedata=False, cores=1)
pd.DataFrame(trace["p_sens"], columns=data.GeneSymbol).to_csv('data/p_sens.csv')
pd.DataFrame(trace["p_ppv"], columns=data.GeneSymbol).to_csv('data/p_ppv.csv')
if __name__ == '__main__':
data = pd.read_csv('data/for_bayes.csv')
print(data)
do_bayes(data) | russodanielp/training_json | bayes_heirarchical.py | bayes_heirarchical.py | py | 1,830 | python | en | code | 0 | github-code | 13 |
70331393298 | import os
import json
import requests
from config import validator, COURSE_DIR, COURSE_YAML_DIR, INDEX_YAML
from course import Course
from parser import yaml
from utils import aplus_json, PrintColor
# os.environ['PLUGIN_API'] = 'http://0.0.0.0:8080/api/v1/'
# os.environ['PLUGIN_TOKEN'] = 'eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJzdWIiOiJkZWZfY291cnNlIiwiaWF0IjoxNTYyODI4MzA0L' \
# 'CJpc3MiOiJzaGVwaGVyZCJ9.MUkoD27P6qZKKMM5juL0e0pZl8OVH6S17N_ZFzC7D0cwOgbcDaAO3S1BauXzhQ' \
# 'OneChPs1KEzUxI2dVF-Od_gpN8_IJEnQnk25XmZYecfdoJ5ST-6YonVmUMzKP7UAcvzCFye7mkX7zJ1ADYtda5' \
# '7IUdyaLSPOWnFBSHX5B4XTzzPdVZu1xkRtb17nhA20SUg9gwCOPD6uLU4ml1aOPHBdiMLKz66inI8txPrRK57G' \
# 'n33m8lVp0WTOOgLV5MkCIpkgVHBl50EHcQFA5KfPet3FBLjpp2I1yThQe_n1Zc6GdnR0v_nqX0JhmmDMOvJ5rh' \
# 'IHZ7B0hEtFy9rKUWOWfcug'
# os.environ['PLUGIN_COURSE'] = "def_course"
if __name__ == "__main__":
# Load and parse course index.yaml
PrintColor.header("Start generating {} A+ json".format(os.environ['PLUGIN_COURSE']))
course = Course(COURSE_YAML_DIR)
course.load(INDEX_YAML)
course_data = course.get_data()
# with open("course.json", "w") as course_json:
# json.dump(course_data, course_json, indent=4, sort_keys=True)
# Validate index.yaml
if not validator.validate(course_data, 'index', major=1):
PrintColor.err("Failed: Invalid index.yaml")
raise ValueError('Failed: Invalid index.yaml')
update_url = os.environ['PLUGIN_API'] + os.environ['PLUGIN_COURSE'] + '/update-index-file'
headers = {
'Authorization': 'Bearer {}'.format(os.environ['PLUGIN_TOKEN'])
}
# Send a request to mooc-grader to update index.yaml
r = requests.post(update_url, headers=headers)
if r.status_code != 200:
PrintColor.err("Failed: Update index.yaml failed")
raise Exception('Failed: Invalid index.yaml: ', r.text)
updated_index = r.json()['updated_index']
with open(os.path.join(COURSE_YAML_DIR, 'updated_index.yaml'), 'w', encoding='utf8') as updated_yaml:
yaml.dump(updated_index, updated_yaml)
aplus_json = aplus_json(course, updated_index)
aplus_json_file = os.path.join(COURSE_DIR,
os.environ['PLUGIN_COURSE'] + "_aplus.json")
with open(aplus_json_file, "w") as f:
json.dump(aplus_json, f, indent=4, sort_keys=True)
PrintColor.success("Success: A+ json file is generated as {}".
format(os.path.relpath(aplus_json_file, start=COURSE_DIR)))
| apluslms/deploy-aplus | main.py | main.py | py | 2,648 | python | en | code | 0 | github-code | 13 |
2514351107 | import logging
from datetime import date
import geocoder
from django import forms
from django.conf import settings
from events.models import ExternalEvent
class ExternalEventForm(forms.ModelForm):
class Meta:
model = ExternalEvent
exclude = []
widgets = {
"starts_on": forms.widgets.DateInput(
format="%Y-%m-%d", attrs={"type": "date"}
),
}
def clean(self):
cleaned_data = super().clean()
if "starts_on" in cleaned_data:
if cleaned_data["starts_on"] < date.today():
raise forms.ValidationError("Nie można dodać wydarzenia w przeszłości")
if "town" in cleaned_data and cleaned_data["town"]:
try:
if not settings.PIOSENKA_GOOGLE_API_GEOCODING_SERVER_KEY:
logging.warning("PIOSENKA_GOOGLE_API_GEOCODING_SERVER_KEY not set")
g = geocoder.google(
cleaned_data["town"],
components="country:PL",
key=settings.PIOSENKA_GOOGLE_API_GEOCODING_SERVER_KEY,
)
cleaned_data["lat"] = g.latlng[0]
cleaned_data["lon"] = g.latlng[1]
except (KeyboardInterrupt, SystemExit):
raise
except:
logging.exception("Geocoder lookup failed")
raise forms.ValidationError(
"Nie udało się nam odnaleźć tej miejscowości na mapie."
)
return cleaned_data
| przem8k/piosenka | events/forms.py | forms.py | py | 1,556 | python | en | code | 7 | github-code | 13 |
28620801106 | # This script created as part of the application process for
# the Star AI course.
# python 3.7.0
message = (
"How many times do you want to display the message "
"\"Reinforcement Learning\" on screen? "
)
user_input = input(message)
try:
times = int(user_input)
for _ in range(times):
print("Reinforcement Learning")
except ValueError:
print(f"You inputted \"{user_input}\", but you need to provide an integer.")
| paulfioravanti/starai | reinforcement_learning.py | reinforcement_learning.py | py | 446 | python | en | code | 0 | github-code | 13 |
4748533295 | # Hjólaleiga Skilaverkefni
# Jón og Sesselja
from time import sleep as wait
from random import randint as rand
class Bike:
def __init__(self, hourly_price, daily_price, weekly_price, nr):
self.hourly_price = hourly_price
self.daily_price = daily_price
self.weekly_price = weekly_price
self.nr = nr
bikes = {"mountain bike":Bike(6, 20, 60, 4), "road bike":Bike(3, 15, 50, 2), "bmx bike":Bike(4, 12, 40, 1)}
'''bikes.append(Bike("Mountain bike", 6, 20, 60, 4))
bikes.append(Bike("Road bike", 3, 15, 50, 2))
bikes.append(Bike("Bmx bike", 4, 12, 40, 1))'''
'''class MountainBike(Bike):
def __init__(self, hourly_price, daily_price, weekly_price, shock_absorber):
Bike.__init__(self,hourly_price,daily_price,weekly_price)
self.shock_absorber = shock_absorber
class RoadBike(Bike):
def __init__(self,hourly_price, daily_price, weekly_price, avgSpeed, tireQuality):
Bike.__init__(self,hourly_price, daily_price, weekly_price)
self.avgSpeed = avgSpeed
self.tireQuality = tireQuality
class BMXBike(Bike):
def __init__(self, hourly_price, daily_price, weekly_price, tireSize, pegs):
Bike.__init__(self, hourly_price, daily_price, weekly_price)
self.tireSize = tireSize
self.pegs = pegs'''
class Customer:
def __init__(self, name, email, cardnumber):
self.name = name
self.email = email
self.cardnumber = int(cardnumber)
self.bikeRented = None
self.rentTime = None
def rentBike(self,time, bike):
if self.bikeRented != None:
print("Customer already rented a bike")
return False
else:
if bikes[bike.lower()].nr > 0:
self.bikeRented = bike
self.rentTime = time
bikes[bike.lower()].nr -= 1
return True
else:
print("This bike does not exist or is sold out")
return False
def returnBike(self):
if self.bikeRented == None:
print("Customer does not have a bike rented")
else:
bikes[bike.lower()].nr += 1
self.bikeRented = None
self.rentTime = None
print("Bike returned, thank you", self.name + "!")
'''mtbikes = [MountainBike(rand(3,20), rand(15,40), rand(30,55), bool(rand(0,1))) for i in range(4)]
bmxbikes = [BMXBike(rand(3,20), rand(15,40), rand(30,55), rand(20, 30), bool(rand(0,1))) for i in range(1)]
rdbikes = [RoadBike(rand(3,20), rand(15,40), rand(30,55), rand(10,20), rand(1,10)) for i in range(2)]'''
#bikes = {"Mountain Bike":len(mtbikes),"BMX Bike": len(bmxbikes),"Road Bike": len(rdbikes)}
customer = Customer(input("Your name:"),input("Your Email: "), input("Your Card Number: "))
while True:
print("""
====== Bike Rental Shop =======
1. Display available bikes
2. Request a bike on hourly basis
3. Request a bike on daily basis
4. Request a bike on weekly basis
5. Return a bike
6. Rented bike information
7. Exit """)
choice = input("Choose an action: ")
if choice == "1":
for k,v in bikes.items():
print(v.nr,k+"s")
elif choice == "2":
for k,v in bikes.items():
print("%s: $%s per hour" % (k, v.hourly_price))
bike = input("What type of bike do you want to rent?: ")
time = input("How many hours do you want it for?")
time2 = time+" Hours"
rented = customer.rentBike(time2,bike)
if rented == True:
print("Total price: $%s" % (int(bikes[bike.lower()].hourly_price) * int(time)))
print("Bike has been rented")
elif choice == "3":
for k,v in bikes.items():
print("%s: $%s per day" % (k, v.daily_price))
bike = input("What type of bike do you want to rent?: ")
time = input("How many days do you want it for?")
time2 = time + " Days"
rented = customer.rentBike(time2, bike)
if rented == True:
print("Total price: $%s" % (int(bikes[ bike.lower() ].daily_price) * int(time)))
print("Bike has been rented")
elif choice == "4":
for k,v in bikes.items():
print("%s: $%s per week" % (k, v.weekly_price))
bike = input("What type of bike do you want to rent?: ")
time = input("How many weeks do you want it for?")
time2 = time + " Weeks"
rented = customer.rentBike(time2, bike)
if rented == True:
print("Total price: $%s" % (int(bikes[ bike.lower() ].weekly_price) * int(time)))
print("Bike has been rented")
elif choice == "5":
customer.returnBike()
elif choice == "6":
if customer.bikeRented != None:
print(f"{customer.name} has a {customer.bikeRented} with {customer.rentTime} left ")
else:
print("Customer does not have a bike rented")
elif choice == "7":
print("Takk fyrir að versla hjá okkur!")
break
else:
print("Thats not a valid action")
wait(1.5)
| nonni1234/hjolaleiga-skilaverk | hjolaleiga.py | hjolaleiga.py | py | 5,081 | python | en | code | 0 | github-code | 13 |
10314493180 | import csv
import json
def csv_to_json(csv_file, json_file):
# Abrir o arquivo CSV de origem
with open(csv_file, 'r') as file:
# Ler as linhas do arquivo CSV
csv_data = csv.DictReader(file)
# Converter as linhas para uma lista de dicionários
data_list = list(csv_data)
# Escrever os dados em formato JSON para o arquivo de destino
with open(json_file, 'w') as file:
file.write(json.dumps(data_list, indent=4))
# Exemplo de uso
csv_to_json('dados.csv', 'dados.json')
| Beguiny/python_projects | document_converter/CSV/csv_para_json.py | csv_para_json.py | py | 543 | python | pt | code | 0 | github-code | 13 |
36375154005 | from tkinter import *
import customtkinter
from threading import Thread
from PIL import ImageTk, Image
import GUIHandler
import logging
import sys
import time
import math
import WeatherStationProperties
properties = WeatherStationProperties.WeatherStationProperties()
customtkinter.set_appearance_mode("dark")
logging.basicConfig(filename=str(properties.getLoggingFolder()) + "WeatherStation.log",
format='%(asctime)s %(module)s %(levelname)s - %(message)s',
filemode='w')
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(logging.Formatter('%(asctime)s %(module)s %(levelname)s - %(message)s'))
logger.addHandler(handler)
# Create Tkinter Object
root = customtkinter.CTk()
root.attributes('-fullscreen',True)
logger.info("Creating Tkinter Object...")
# Specify Grid
Grid.columnconfigure(root,0,weight=2)
Grid.columnconfigure(root,1,weight=1)
Grid.columnconfigure(root,3,weight=2)
Grid.rowconfigure(root,0,weight=10)
Grid.rowconfigure(root,1,weight=1)
Grid.rowconfigure(root,2,weight=5)
Grid.rowconfigure(root,3,weight=1)
Grid.rowconfigure(root,4,weight=1)
Grid.rowconfigure(root,5,weight=1)
logger.info("Specifying Grid...")
# Create Buttons
startButton = customtkinter.CTkButton(root,text="Start Data Collection", fg_color='green',
command = lambda: guiHandler.startDataCollection(), text_color="black",
font=("Inter", 15))
endButton = customtkinter.CTkButton(root,text="End Data Collection", fg_color='yellow',
command = lambda: guiHandler.endDataCollection(), text_color="black",
font=("Inter", 15))
exitButton = customtkinter.CTkButton(root,text="Exit", fg_color='red',
command = lambda: guiHandler.shutdown(), text_color="black",
font=("Inter", 15))
groundStationButton = customtkinter.CTkButton(root,text="Start Rotation",
fg_color='orange', command = lambda: guiHandler.toggleGroundStationRotation(),
text_color="black", font=("Inter", 15))
logger.info("Creating Buttons...")
# Create Labels
temperatureLabel = customtkinter.CTkLabel(root,
text="Temperature: ", font=("Inter", 15), anchor="w")
humidityLabel = customtkinter.CTkLabel(root,
text="Humidity: ", font=("Inter", 15), anchor="w")
pressureLabel = customtkinter.CTkLabel(root,
text="Pressure: ", font=("Inter", 15), anchor="w")
uvLabel = customtkinter.CTkLabel(root,
text="UV: ", font=("Inter", 15), anchor="w")
windDirectionLabel = customtkinter.CTkLabel(root,
text="Wind Direction: ", font=("Inter", 15), anchor="w")
windSpeedLabel = customtkinter.CTkLabel(root,
text="Wind Speed: ", font=("Inter", 15), anchor="w")
logger.info("Creating Labels...")
# Create Image
logo = customtkinter.CTkImage(dark_image=Image.open("assets/logo.png"), size=(400, 300))
logoLabel = customtkinter.CTkLabel(root, text="", image=logo)
logoLabel.image = logo
weatherStationLabel = customtkinter.CTkLabel(root, text="Weather Station GUI", font=("Inter", 25))
logging.info("Creating Image...")
# Set Image grid
logoLabel.grid(row=0,column=1,sticky="NSEW", padx=(20, 20), pady=(20, 1))
weatherStationLabel.grid(row=1,column=1,sticky="NSEW", padx=(20, 20), pady=(1, 20))
# Set Button grid
startButton.grid(row=2,column=0,sticky="NSEW", padx=(20, 20), pady=(20, 20))
endButton.grid(row=2,column=1,sticky="NSEW", padx=(20, 20), pady=(20, 20))
exitButton.grid(row=5,column=2,sticky="NSEW", padx=(20, 20), pady=(20, 20))
groundStationButton.grid(row=2,column=2,sticky="NSEW", padx=(20, 20), pady=(20, 20))
# Set Label grid
temperatureLabel.grid(row=3,column=0,sticky="NSEW", padx=(20, 20), pady=(20, 20))
humidityLabel.grid(row=4,column=0,sticky="NSEW", padx=(20, 20), pady=(20, 20))
pressureLabel.grid(row=5,column=0,sticky="NSEW", padx=(20, 20), pady=(20, 20))
uvLabel.grid(row=3,column=1,sticky="NSEW", padx=(20, 20), pady=(20, 20))
windDirectionLabel .grid(row=4,column=1,sticky="NSEW", padx=(20, 20), pady=(20, 20))
windSpeedLabel.grid(row=5,column=1,sticky="NSEW", padx=(20, 20), pady=(20, 20))
logger.info("Setting grids...")
# Start GUI Handler
guiHandler = GUIHandler.GUIHandler(root, temperatureLabel, humidityLabel, pressureLabel,uvLabel,windDirectionLabel,windSpeedLabel,groundStationButton)
logger.info("Enabling GUIHandler...")
guiHandler.update()
# Execute tkinter
root.mainloop()
logger.info("Ending TKinter Mainloop...")
| luke-redwine-sudo/Weather-Station | WeatherStationMain.py | WeatherStationMain.py | py | 4,392 | python | en | code | 0 | github-code | 13 |
41313477434 | #
# @lc app=leetcode id=274 lang=python3
#
# [274] H-Index
#
# @lc code=start
class Solution:
def hIndex(self, citations: List[int]) -> int:
citations.sort(reverse = True)
h = 0
for i,v in enumerate(citations):
if v >= i+1:
h = i+1
return h
# @lc code=end
| WrathOP/LeetCode | 274.h-index.py | 274.h-index.py | py | 323 | python | en | code | 0 | github-code | 13 |
23810954676 | # -*- coding: utf-8 -*-
"""This module defines the necessary things to load an ASS file,
and the objects that allow to draw text with cairo"""
import codecs, math
from draw import extra
import common
#Constants, don't change anything if you don't want the program to crash
#From Style
S_NAME = 'name'
S_FONT = 'fontname'
S_SIZE = 'fontsize'
S_FORMAT ='format'
S_PCOLOR = 'primarycolour'
S_SCOLOR = 'secondarycolour'
S_OCOLOR = 'outlinecolour'
S_BCOLOR = 'backcolour'
S_MARGINV = 'marginv'
S_MARGINR = 'marginr'
S_MARGINL = 'marginl'
S_OUTLINE = 'outline'
S_ALIGN = 'alignment'
S_SHADOW = 'shadow'
S_BOLD ='bold'
S_ITALIC = 'italic'
S_ANGLE = 'angle'
S_SCALE_X = 'scalex'
S_SCALE_Y = 'scaley'
#From events (Dialogues)
E_FORMAT ='format'
E_DIALOG = 'dialogue'
E_START = 'start'
E_END = 'end'
E_LAYER = 'layer'
E_STYLE = 'style'
E_TEXT = 'text'
E_EFFECT = 'effect'
#From script ([F]ile)
F_EVENTS = '[events]'
F_SINFO = '[script info]'
F_STYLE4P = '[v4+ styles]' #p for PLUS (like "+")
F_STYLE4 = '[v4 styles]'
#This functions are defined out so they can be used for everyone who uses the module
#TODO check if this can be moved to common
def TimeToMS(time):
"""Converts a string from type '0:00:00.00' from ASS to miliseconds in integer"""
h, m, s = time.split(':')
s, ms = s.split('.')
result = int(h) *60 #We assign hours and convert it to minutes
result += int(m) #Add the minutes
result *= 60 #Convert it to seconds
result += int(s) #Add the seconds
result *= 1000 #Converting to ms
result += int(ms)*10
return result #and if there's no strange character around it's all OK
class cProperties():
def __init__(self, other=None, dicc=None):
"""There are 3 ways to create a style:
with nothing, it's done with default values.
cProperties(other=othercopy) or cProperties(othercopy) copies the values of other
cProperties(dicc=dictionary) initialize with the values of a ass dictionary"""
#This ones need to be outside, because if they aren't initialized, in case an instance cProperties is created directly, it will raise an error!!
self.color1 = extra.cCairoColor(number=0xFFFF2020) #Note that the 0x makes them a real number and not a string. primary color
self.color2 = extra.cCairoColor(number=0xFF808080) #secondary color
self.color3 = extra.cCairoColor(number=0xFF101010) #border
self.color4 = extra.cCairoColor(number=0xFF808080) #shadow
#colors: primary Secondary Outline Back
self._layer = 0
if other:
self.CopyAllFrom(other)
else:
#default values
#animatable
#scaling, x and y respectively
self.scale_x = 1.0
self.scale_y = 1.0
#size of border in pixels
self.border = 3
#size of shadow in pixels
self.shadow = 0
self.angle = 0
#position of vector (beginning point of vector)
self.pos_x = 30
self.pos_y = 30
#transformation's origin (and some draws)
self.org_x = 0
self.org_y = 0
#displacement of shadow in pixels, in x and y repsectively
self.shad_x = 0
self.shad_y = 0
#drawing modes
self.mode_fill = 0
self.mode_border = 0
self.mode_shadow = 0
self.mode_particle = 0
#not animatable
#style's name
self._name ='EstiloManualmenteCreado'
#font size
self._size = 12
#font name
self._font = "Verdana"
#Bold
self._bold = False
#Italic
self._italic = False
#margins in pixels, vertical, right and left respectively
self._marginv = 30
self._marginr = 30
self._marginl = 30
#alignment according to ass (an creo)
self._align = 2
#path info #loading a figure, this doesn't have much effect so variables aren't needed
self._x_bearing = 0
self._y_bearing = 0
self._x_advance = 0
self._y_advance = 0
self._ascent = 0
self._descent = 0
self._max_x_advance = 0
self._max_y_advance = 0
#unneeded, I don't put them because if there's an error something should be wrong in coding
#self._line_height = 0
#self._width = 0
if dicc:
self.FromDict(dicc)#bacause the dict could lack values
#this aren't needed, loaded when the vector is created
#there are just there because the ide catch them
self._height = None
self._width = None
self._line_height = None
def CopyAllFrom(self, other):
#This is important because the original style from the dialogues intializes giving a style the moment it's created
self.CopyFrom(other)
#Not animatable
self._name = other._name
self._font = other._font
self._size = other._size
self._bold = other._bold
self._italic = other._italic
self._marginv = other._marginv
self._marginr = other._marginr
self._marginl = other._marginl
self._align = other._align
#self._layer = other._layer # not necessary
def CopyFrom(self, other):
"""Copies the data of another object from the same type
@other it's an object of cProperties type
only the animatable data is copied, it makes them faster that way.
"""
self.pos_x = other.pos_x
self.pos_y = other.pos_y
self.org_x = other.org_x
self.org_y = other.org_y
self.shad_x = other.shad_x
self.shad_y = other.shad_y
self.scale_x = other.scale_x
self.scale_y = other.scale_y
self.angle = other.angle
self.color1.CopyFrom(other.color1)
self.color4.CopyFrom(other.color4)
self.color3.CopyFrom(other.color3)
self.color2.CopyFrom(other.color2)
self.border = other.border
self.shadow = other.shadow
self.mode_fill = other.mode_fill
self.mode_border = other.mode_border
self.mode_shadow = other.mode_shadow
self.mode_particle = other.mode_particle
def FromDict(self, style):
"""Creates the values from a dictionary for internal use"""
#animatable
self.angle = math.radians(common.SafeGetFloat(style, S_ANGLE))
self.color1 = extra.cCairoColor(text=style.get(S_PCOLOR, 0))
self.color3 = extra.cCairoColor(text=style.get(S_OCOLOR, 0))
self.color4 = extra.cCairoColor(text=style.get(S_BCOLOR, 0))
self.color2 = extra.cCairoColor(text=style.get(S_SCOLOR, 0))
self.border = common.SafeGetFloat(style, S_OUTLINE)
self.shadow = int(common.SafeGetFloat(style, S_SHADOW)) #zheo told me that it could be float but it doesn't make sense here
self.scale_x = common.SafeGetFloat(style, S_SCALE_X, 100)/100.0
self.scale_y = common.SafeGetFloat(style, S_SCALE_Y, 100)/100.0
#Not animatable
self._name = style.get(S_NAME, '')
self._font = style.get(S_FONT, '')
self._size = common.SafeGetFloat(style, S_SIZE)
self._bold = not (style.get(S_BOLD, '0') == '0')
self._italic = not (style.get(S_ITALIC, '0') == '0')
self._marginv = int(common.SafeGetFloat(style, S_MARGINV))
self._marginr = int(common.SafeGetFloat(style, S_MARGINR))
self._marginl = int(common.SafeGetFloat(style, S_MARGINL))
self._align = int(common.SafeGetFloat(style, S_ALIGN))
class cSyllable(extra.cVector):
def __init__(self, text='', style=None, parent=None, last_pos=None):
"""
A syllable. It's better that the dialogue creates them because they need a special initialization.
To use the syllable later we must call changeText(text, preposition)
:param text: syllable's text
:param style: Style of the syllable
:param parent: Parent object
:type text: string
:type style: :class:`cProperties`
:type parent: :class:`cDialogue`
"""
extra.cVector.__init__(
self, text=text, style=style, parent=parent, last_pos=last_pos)
#self._text = text
#defaults to [] if its iterable, this is only created if the
#parameter FxsGroup.split_letters is True
#or if you call self.SplitLetters
self._letters = []
def SplitLetters(self):
"""Computes the characters of the syllable...
Use if you change __text
it's slow and eats more ram
to access the Syllables use _letters later
and activate the option in FxsGroup, too.
"""
#we create the array and get common values
self._letters = []
time = self._start
last = (self.original.pos_x, self.original.pos_y)
#If there are chars
if not self._text:#catchs '' and None
self._text = ''
#to avoide duplicated code, even though you shouldn't call this function without text ò_ó
cdur = 0.0
else:
#calculation of each character's duration
cdur = float(self._dur) / len(self._text)
#adding the characters
for (i, tchar) in enumerate(self._text):
char = extra.cVector(
text = tchar, style=self.original, parent=self, last_pos=last)
char._indice = i
char._start = time
char._dur = cdur
char._end = time = (time + cdur)
char.effect = self.effect
last = (char._next_x, char._next_y)
self._letters.append(char)
def Chain(self, function, duration=None):
"""Allows to Chain the characters to a animation.
Before calling this function call DivideLetters
or enable the option in FxsGroup
@function function that will be called for each syllable and the progress
@duration=None duration of the animatio of each character
If not specified, a duration that animates one character per time will be used.
(Note: do not change _text if you don't want inconsistencies)
"""
common.Chain(self._dur, self.progress, self._letters, function, duration)
def FullWiggle(self, amplitude=4, frequency=2, dx=None, dy=None):
"""the wiggle that AbelKM wanted, part 2
"""
#(btw) abelkm expand the doc explaingin this
if dx is None:
dx, dy = self.Wiggle(amplitude, frequency)
o = self.original
if not hasattr(o, 'old_x'):
o.old_x = o.pos_x
o.old_y = o.pos_y
o.pos_x = o.old_x + dx
o.pos_y = o.old_y + dy
for let in self._letters:
o = let.original
if not hasattr(o, 'old_x'):
o.old_x = o.pos_x
o.old_y = o.pos_y
o.pos_x = o.old_x + dx
o.pos_y = o.old_y + dy
class cDialogue(extra.cVector):
"""A Dialogue represents a text line,
This takes the text, each Syllable of the text and it's karaoke times.
This object is the most complex one, almost imposible for you to create, better use cSyllable or directly extra.cVector
"""
def __init__(self, dialogue, styles, max_effect = 0):
"""
@dialogue the dialogue line in ASS form (inner)
@styles array with styles
optionals:
@max_effect maximum number that can take as effect
"""
t_estilo = dialogue[E_STYLE]
est = styles[0]
for i in styles:
if t_estilo == i._name:
est = i
break
estilo = cProperties(est)
#I hate how aweful can become ass
#the "or" is because the scumbag ass indicates the margin for each line. BUUUUUT if it's 0 it takes the style ~_~
estilo._layer = common.SafeGetFloat(dialogue, E_LAYER) or estilo._layer
estilo._marginv = common.SafeGetFloat(dialogue, S_MARGINV) or estilo._marginv
estilo._marginr = common.SafeGetFloat(dialogue, S_MARGINR) or estilo._marginr
estilo._marginl = common.SafeGetFloat(dialogue, S_MARGINL) or estilo._marginl
#note that we haven't given the text yet
extra.cVector.__init__(self, text=None, style=estilo)
#text=None this way the path won't be created, attention! if we don't call changeText the path won't be created and it will raise an error!
#Setting times, translating all to frames
#we sabe the times as ms for better accuracy
self._start = TimeToMS(dialogue[E_START])
self._end = TimeToMS(dialogue[E_END])
self._dur = self._end - self._start
#Setting the effect to use
self.effect = min(max_effect, int(common.SafeGetFloat(dialogue, E_EFFECT)))
#Loading Syllables (this function sets the _text)
self.__SetSyllables( dialogue[E_TEXT] )
#We know the text after parsing the Syllables
#self.SetText(self._text)
def __SetSyllables(self, text):
"""Creats the objects Syllables from a dialogue,
code was taken from ZheoFX (C) brother project
Zheo y Alchemist, thanks guys, you're awesome! :D"""
import re
"""
{(?:\\.)* = takes anything, this was made so if someone put some effect and \k after it, it takes the \k and drops the rest
\\(?:[kK]?[ko]?[kf]) = takes \k, \kf, \ko and \K
(\d+) = any digit, in this case, \k timming
([\\\-a-zA-Z_0-9]*)} = for inline_fx ({\k20\-0} karaoke)
(\s*)([^{]*)(\s*) = space - any alphanumeric character and signs-space"""
#TODO probar con el nuevo regex de alch
#TODO pensar si conviene que cree un cVector en vez de una silaba (si no trae problemas en los events)
#si el anterior es cierto : TODO cuando encuentre la sintaxis de una forma en el dialogo que en vez de crear un dialogo lo cree usando la forma
"""KARA = re.compile(
r'''
(?:\\[\w\d]+)* # ignore tags before k
\\[k|ko|kf](\d+) # k duration in centiseconds(k, K, kf, ko)
(?:\\[\w\d]+)* # ignore tags before inlinefx
(\\-[\w\d]+)* # inlinefx
(?:\\[\w\d]+)* # ignore tags after inlinefx
} # start of text
(?:{\\[\w\d]+})* # ignore tags before k
(\s+)* # prespace
([^\s\{\}]+)* # stripped text
(\s+)* # postspace
''',
re.IGNORECASE | re.UNICODE | re.VERBOSE)"""
texto = re.sub(r'({[\s\w\d]+})*', '', text) #removes comments and tags that don't begin with \
pattern = r'(?:\\[k]?[K|ko|kf])(\d+)(?:\\[\w\d]+)*(\\-[\w\d]+)*(?:\\[\w\d]+)*}([^\{\}]+)*'
#pattern = r"{(?:\\.)*\\(?:[kK]?[ko]?[kf])(\d+)([\\\-a-zA-Z_0-9]*)}([^{]*)"#previous
info = list(re.findall(pattern, texto))
plain_text = ''.join([tx for ti, ifx, tx in info])
if not plain_text:#I don't know why it does this, maybe if there isn't {\k}, re doesn't return anything.
plain_text = re.sub(r'{.*}', '', texto) # lines (removing tags)
self.SetText(plain_text)
#Since pos depends of the alignment and hence of the text's size, we can only
#do it after parsing the Syllables
if self.original.angle:
#this should be used in case of the angle, but it doesn't work good (yet)
#calculating point 0,0 of dialogue, the text's beginning point
pre = self.matrix.transform_point(0, 0)
else:
pre = self.original.pos_x, self.original.pos_y
self._syllables = []
tiempo = self._start
i = 0
for ti, ifx, tx in info:
syl = cSyllable(tx, self.original, parent=self, last_pos=pre)
syl._indice = i
syl._start = tiempo
dur = int(ti)*10.0
syl._dur = dur
syl._end = tiempo = syl._start + syl._dur
if len(ifx)>2 :
try:
ifx = int(ifx[2:])
except:
ifx = None
else:
ifx = None
#ifx=None to allow effects = 0
syl.effect = ifx or self.effect
self._syllables.append(syl)
i += 1
pre = syl._next_x, syl._next_y
def Chain(self, function, duration=None):
"""
Allows to chain the Syllables to a animationn
:param function: function to call for each syllable and the progress
:param duraton:
duration of the animation of each syllable
If not specified, a duration that animates
one syllable per time will be used.
:type function: `method`
:type duration: int milliseconds
"""
common.Chain(self._dur, self.progress, self._syllables, function, duration)
def FullWiggle(self, amplitude=4, frequency=2):
"""el wiggle que queria AbelKM"""
#TODO doc
dx, dy = self.Wiggle(amplitude, frequency)
for sil in self._syllables:
sil.FullWiggle(amplitude, frequency , dx, dy)
class Ass():
"""This class parses the .ass file, luckily you won't need to use it"""
def __init__(self, file, max):
"""When initialized, the file to load can be specified
@file .ass file to load
@max maximum number of effects"""
if file:
self.LoadFromFile(file, max)
def __pHeader(self, text):
"""Function to parse a header"""
titulo, valor = text.split(':', 1)
self.info[titulo.strip().lower()] = valor.strip() #the title in lower case makes it more compatible
def __none(self, text):
"""null parseator"""
pass#pass rules
def __v4PStyle(self, text):
"""style parseator"""
titulo, valor = text.split(':', 1) #the 1 is just in case, one never knows
titulo = titulo.strip().lower()
if titulo == S_FORMAT:
self.formato = [v.strip().lower() for v in valor.split(',')] # si que me gusta hacer codigo complicado, no?
else: # this shouldn't be done, we asume that it's not format and it's style, but one never knows
valores = [v.strip().lower() for v in valor.split(',')]
self.styles.append( cProperties(dicc=dict(zip(self.formato, valores))))
def __Events(self, text):
"""event parseator"""
titulo, valor = text.split(':', 1)
titulo = titulo.strip().lower()
if titulo == E_FORMAT :
self.eformato = [v.strip().lower() for v in valor.split(',')]
elif titulo == E_DIALOG:
valores_raw = [v.strip() for v in valor.split(',', len(self.eformato)-1)]
valores = [v.lower() for v in valores_raw[:-1]]
valores.append(valores_raw[-1]) #The text won't be given in lower case this way
nuevo_d = dict(zip(self.eformato, valores))
nuevo_d[E_EFFECT] = int(common.SafeGetFloat(nuevo_d, E_EFFECT))
d = cDialogue(nuevo_d, self.styles, self.max_effect)
d._indice = self.index
self.dialogues.append(d)
self.index += 1
def LoadFromFile(self, file, max):
"""Loading a ass file"""
self.info = {}
self.styles = []
self.dialogues = []
self.index = 0
self.max_effect = max
f = codecs.open(file, mode='r', encoding='utf-8')
parser = self.__none
for line in f:
s = line.strip().lower()
if (s=="") or (s[0] ==";"): #with s="" we avoid errors if it's an empty line
pass # me la soban los comentarios y las lineas en blanco ^_^
elif s==F_EVENTS: #The events go here because it's more efficient, generally there will be lot of them
parser = self.__Events
elif s == F_SINFO:
parser = self.__pHeader
elif (s== F_STYLE4P) or (s==F_STYLE4):
parser = self.__v4PStyle
else:
parser(line)
| jerobarraco/kafx | branches/kafx/libs/asslib.py | asslib.py | py | 17,575 | python | en | code | 1 | github-code | 13 |
19351873486 | MAX_EXP = 200
GRID_DEPTH = 3
INF = float('inf')
VERBOSE = True
PLOT_LOG_SCALE = True
USE_BIGFLOAT = False
STEP = 1.1
OPTIMIZATION_METHOD = 'L-BFGS-B'
# OPTIMIZATION_METHOD = 'TNC'
INITIAL_GRID_COUNT = 20
INITIAL_GRID_STEP = 3
DEFAULT_ERR_SCALE = 1
DEFAULT_K = 21
DEFAULT_READ_LENGTH = 100
DEFAULT_REPEAT_MODEL = 0
DEFAULT_MIN_SINGLECOPY_RATIO = 0.3
AUTO_SAMPLE_TARGET_COVERAGE = 12
AUTO_TRIM_PRECISION = 6
NOISE_THRESHOLD = 10**-6
MAX_ERRORS = 8
MAX_NOTRIM = 25
try:
from multiprocessing import cpu_count
DEFAULT_THREAD_COUNT = cpu_count()
except NotImplementedError:
DEFAULT_THREAD_COUNT = 2
| mhozza/covest | covest/constants.py | constants.py | py | 608 | python | en | code | 5 | github-code | 13 |
14312045609 | from bs4 import BeautifulSoup
import requests
source = requests.get('http://tibia.pl/exp-table').text
soup = BeautifulSoup(source, 'html5lib')
for search in soup.find_all('tr'):
info = search.text.split()
lvl = info[0]
exp = info[1]
if lvl.isnumeric() and exp.isnumeric():
print("Poziom: ",lvl,", Doświadczenie: ", exp) | Pablit4o/aplikacje-internetowe-21716-185ic | Lab5/scrape-web-2.py | scrape-web-2.py | py | 347 | python | en | code | 0 | github-code | 13 |
25392061458 | from django.conf.urls import include, url
from django.contrib import admin
from account import views as account_views
# from reports import views as reports_views
from groups import views as group_views
from messaging.views import *
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^home/', account_views.home, name='home'),
url(r'^investor_info/', account_views.investor_info, name='investor_info'),
url(r'^company_info/', account_views.company_info, name='company_info'),
url(r'^signup/', account_views.signupform, name='signup'),
url(r'^login/', account_views.log, name='login'),
url(r'^auth/', account_views.auth),
url(r'^showdata/', account_views.showdata),
url(r'^logout/', account_views.loggedout, name = 'logout'),
url(r'^loggedin/', account_views.loggedin, name='loggedin'),
url(r'^invalid/', account_views.invalid),
url(r'^reports/', include('reports.urls', namespace='reports')),
url(r'groups/$', group_views.groupHome, name = 'groups'),
url(r'makeGroups/$', group_views.submit_groups, name = 'makeGroups'),
url(r'viewGroups/$', group_views.view_groups, name = 'viewGroups'),
url(r'leaveGroups/$', group_views.leave_groups),
url(r'selectGroupToChange/$', group_views.view_groups_for_adding, name= 'selectGroup'),
url(r'selectMembersToAdd/$', group_views.select_members_to_add, name = 'selectMembers'),
url(r'addMembers/$', group_views.add_members, name = 'addMembers'),
url(r'invalidSubmitGroup/$', group_views.invalid_submit_group),
url(r'groupSuccessPage/$', group_views.success),
#SITE MANAGER GROUP URLS
url(r'groupHomeSiteManager/$', group_views.groupHomeSiteManager, name = 'groupHomeSiteManager'),
url(r'deleteGroupsSiteManager/$', group_views.delete_groups_site_manager),
url(r'viewGroupsSiteManager/$', group_views.view_groups_site_manager, name = 'viewGroupsSiteManager'),
url(r'selectGroupToAddMembersSiteManager/$', group_views.view_groups_for_adding_site_manager, name = 'selectGroupToAddMembersSiteManager'),
url(r'selectMembersToAddSiteManager/$', group_views.select_members_to_add_site_manager),
url(r'addMembersSiteManager/$', group_views.add_members_site_manager, name = 'addMembersSiteManager'),
url(r'selectGroupToDeleteMembersSiteManager/$', group_views.view_groups_for_deleting_site_manager, name = 'selectGroupToDeleteMembersSiteManager'),
url(r'selectMembersToDeleteSiteManager/$', group_views.select_members_to_delete_site_manager),
url(r'deleteMembersSiteManager/$', group_views.delete_members_site_manager),
url(r'revokeSm/$', account_views.revoke_sm),
url(r'suspendUser/$', account_views.sus_user),
url(r'makeSiteManager/$', account_views.make_sm),
url(r'messageHome/$', messageHome, name= 'messaging'),
url(r'makeMessages/$', new_messages, name = 'makeMessages'),
url(r'makeGroupMessages/$', message_groups, name = 'makeGroupMessages'),
url(r'viewMessages/$', view_messages, name = 'viewMessages'),
url(r'deleteMessages/$', delete_messages),
url(r'invalidSubmitMessage/$', invalid_submit_message),
url(r'messageSuccessPage/$', success),
# url(r'enterPrivateKey/$', enter_password),
url(r'decryptMessages/$', decrypt_messages),
url(r'^fdaLogin/$', account_views.fdalogin),
url(r'^viewReports/$', account_views.view_reports),
url(r'^viewOne/$', account_views.view_one),
url(r'^getEncrypt/$', account_views.get_encrypt),
url(r'^$', account_views.index)
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | jyou543/cs3240-s17-team22 | Fintech/urls.py | urls.py | py | 3,762 | python | en | code | 0 | github-code | 13 |
19870715404 | import tweepy #https://github.com/tweepy/tweepy
from datetime import datetime
from datetime import timedelta
class KEY:
def __init__(self, _consumer_key, _consumer_secret, _access_key, _access_secret):
self.consumer_key = _consumer_key
self.consumer_secret = _consumer_secret
self.access_key = _access_key
self.access_secret = _access_secret
self.auth = tweepy.OAuthHandler(_consumer_key, _consumer_secret)
self.auth.set_access_token(_access_key, _access_secret)
self.api = tweepy.API(self.auth, wait_on_rate_limit=True)
class Endpoint:
def __init__(self, name, limit, timeout):
self.name = name
self.count = 0
self.limit = limit
self.start = datetime.now()
self.timeout = timeout
def reset(self):
self.count = 0
self.start = datetime.now()
def increment(self):
self.count += 1
if self.count > self.limit:
if self.calcTimeout() > 0:
return False
else:
return True
else:
return True
def printf(self):
print("Endpoint", self.name, self.count, self.limit, self.start, self.timeout)
def calcTimeout(self):
# print("HERE TIMEOUT")
timeoutDelta = (timedelta(seconds = self.timeout) - (datetime.now() - self.start)).total_seconds()
# print('timeoutDelta', timeoutDelta)
return timeoutDelta
class API:
def __init__(self, api):
self.api = api
self.original = Endpoint('original', 150, 60*15)
self.retweets = Endpoint('retweets', 150, 60*15)
class apiObject:
def __init__(self, apis):
self.apis = apis
self.curr = 0
self.count = len(apis)
def currAPI(self):
return self.apis[self.curr]
def validOriginalAPI(self):
print("HERE VALID")
for i in range(0, self.count):
endpoint = self.apis[i].original
# print('___', i, self.apis[i].original)
# endpoint.printf()
if endpoint.count < endpoint.limit:
self.curr = i
return True
else:
timeout = endpoint.calcTimeout()
# print('*___', i, timeout )
if timeout < 0:
self.curr = i
return True
return False
def originalTimeout(self):
minTimeout = 15*60
for i in range(0, self.count):
timeout = self.apis[i].original.calcTimeout()
if timeout < minTimeout:
minTimeout = timeout
return minTimeout
def validRetweetsAPI(self):
# print("HERE VALID")
for i in range(0, self.count):
endpoint = self.apis[i].retweets
# print('___', i, self.apis[i].original)
# endpoint.printf()
if endpoint.count < endpoint.limit:
self.curr = i
return True
else:
timeout = endpoint.calcTimeout()
# print('*___', i, timeout )
if timeout < 0:
self.curr = i
return True
return False
def retweetTimeout(self):
minTimeout = 900
for i in range(0, self.count):
timeout = self.apis[i].retweets.calcTimeout()
if timeout < minTimeout:
minTimeout = timeout
return minTimeout
def printAPI(self):
for i in range(0, self.count):
print("API", i, self.apis[i].original.count, self.apis[i].retweets.count)
def reset(self):
for i in range(0, self.count):
timeout = self.apis[i].retweets.calcTimeout()
if timeout < 0:
self.apis[i].retweets.reset()
for i in range(0, self.count):
timeout = self.apis[i].original.calcTimeout()
if timeout < 0:
self.apis[i].original.reset()
| Nealsoni00/cs376-server | api.py | api.py | py | 3,236 | python | en | code | 0 | github-code | 13 |
72605103699 | import sys
sys.setrecursionlimit(1000000)
def melt(l, y, x):
moves = [[0, 1], [1, 0], [0, -1], [-1, 0]]
for move in moves:
dy, dx = move
if not (len(l) > y+dy >= 0 and len(l[0]) > x+dx >= 0):
continue
if l[y+dy][x+dx] == '.' or l[y+dy][x+dx] == 'L':
l[y][x] = '#'
def find(l, y, x, v):
if l[y][x] == 'X' or v[y][x] == True:
return False
if l[y][x] == 'L':
return True
v[y][x] = True
moves = [[0, 1], [1, 0], [0, -1], [-1, 0]]
for move in moves:
dy, dx = move
if not (len(l) > y+dy >= 0 and len(l[0]) > x+dx >= 0) and l[y+dy][x+dx] != 'X' and v[y+dy][x+dx] != True:
continue
if find(l, y+dy, x+dx, v):
return True
return False
R, C = map(int, sys.stdin.readline().replace("\n", "").split(" "))
lake = []
for _ in range(R):
lake.append(list(sys.stdin.readline().replace("\n", "")))
day = 0
onetime = False
for y in range(R):
for x in range(C):
if lake[y][x] == 'L':
lake[y][x] = 'S'
onetime = True
break
if onetime:
break
while day < 5:
breaker = False
for y in range(R):
for x in range(C):
if lake[y][x] == 'S':
visited = [[False for _ in range(C)] for _ in range(R)]
if find(lake, y, x, visited):
print(day)
sys.exit()
else:
breaker = True
break
if breaker == True:
break
for y in range(R):
for x in range(C):
if lake[y][x] == 'X':
melt(lake, y, x)
for y in range(R):
for x in range(C):
if lake[y][x] == '#':
lake[y][x] = '.'
day += 1
| gitdog01/AlgoPratice | random/no_category/3197/main.py | main.py | py | 1,820 | python | en | code | 0 | github-code | 13 |
21583053826 | import debug # pyflakes:ignore
import io
import json
import os
from django.conf import settings
from django.urls import reverse
from ietf.doc.models import Document
from ietf.group.factories import RoleFactory
from ietf.meeting.models import SchedTimeSessAssignment, SchedulingEvent
from ietf.meeting.factories import MeetingFactory, SessionFactory
from ietf.person.models import Person
from ietf.name.models import SessionStatusName
from ietf.utils.test_utils import TestCase
from ietf.utils.mail import outbox
from ietf.secr.proceedings.proc_utils import (import_audio_files,
get_timeslot_for_filename, normalize_room_name, send_audio_import_warning,
get_or_create_recording_document, create_recording, get_next_sequence,
_get_session, _get_urls_from_json)
SECR_USER='secretary'
class ProceedingsTestCase(TestCase):
def test_main(self):
"Main Test"
MeetingFactory(type_id='ietf')
RoleFactory(name_id='chair',person__user__username='marschairman')
url = reverse('ietf.secr.proceedings.views.main')
self.client.login(username="secretary", password="secretary+password")
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
# test chair access
self.client.logout()
self.client.login(username="marschairman", password="marschairman+password")
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
class VideoRecordingTestCase(TestCase):
def test_get_session(self):
session = SessionFactory()
meeting = session.meeting
number = meeting.number
name = session.group.acronym
date = session.official_timeslotassignment().timeslot.time.strftime('%Y%m%d')
time = session.official_timeslotassignment().timeslot.time.strftime('%H%M')
self.assertEqual(_get_session(number,name,date,time),session)
def test_get_urls_from_json(self):
path = os.path.join(settings.BASE_DIR, "../test/data/youtube-playlistitems.json")
with io.open(path) as f:
doc = json.load(f)
urls = _get_urls_from_json(doc)
self.assertEqual(len(urls),2)
self.assertEqual(urls[0]['title'],'IETF98 Wrap Up')
self.assertEqual(urls[0]['url'],'https://www.youtube.com/watch?v=lhYWB5FFkg4&list=PLC86T-6ZTP5jo6kIuqdyeYYhsKv9sUwG1')
class RecordingTestCase(TestCase):
settings_temp_path_overrides = TestCase.settings_temp_path_overrides + ['MEETING_RECORDINGS_DIR']
def test_page(self):
meeting = MeetingFactory(type_id='ietf')
url = reverse('ietf.secr.proceedings.views.recording', kwargs={'meeting_num':meeting.number})
self.client.login(username="secretary", password="secretary+password")
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_post(self):
session = SessionFactory(status_id='sched',meeting__type_id='ietf')
meeting = session.meeting
group = session.group
url = reverse('ietf.secr.proceedings.views.recording', kwargs={'meeting_num':meeting.number})
data = dict(group=group.acronym,external_url='http://youtube.com/xyz',session=session.pk)
self.client.login(username="secretary", password="secretary+password")
response = self.client.post(url,data,follow=True)
self.assertEqual(response.status_code, 200)
self.assertContains(response, group.acronym)
# now test edit
doc = session.materials.filter(type='recording').first()
external_url = 'http://youtube.com/aaa'
url = reverse('ietf.secr.proceedings.views.recording_edit', kwargs={'meeting_num':meeting.number,'name':doc.name})
response = self.client.post(url,dict(external_url=external_url),follow=True)
self.assertEqual(response.status_code, 200)
self.assertContains(response, external_url)
def test_import_audio_files(self):
session = SessionFactory(status_id='sched',meeting__type_id='ietf')
meeting = session.meeting
timeslot = session.official_timeslotassignment().timeslot
self.create_audio_file_for_timeslot(timeslot)
import_audio_files(meeting)
self.assertEqual(session.materials.filter(type='recording').count(),1)
def create_audio_file_for_timeslot(self, timeslot):
filename = self.get_filename_for_timeslot(timeslot)
path = os.path.join(settings.MEETING_RECORDINGS_DIR,'ietf' + timeslot.meeting.number,filename)
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
with io.open(path, "w") as f:
f.write('dummy')
def get_filename_for_timeslot(self, timeslot):
'''Returns the filename of a session recording given timeslot'''
return "{prefix}-{room}-{date}.mp3".format(
prefix=timeslot.meeting.type.slug + timeslot.meeting.number,
room=normalize_room_name(timeslot.location.name),
date=timeslot.time.strftime('%Y%m%d-%H%M'))
def test_import_audio_files_shared_timeslot(self):
meeting = MeetingFactory(type_id='ietf',number='72')
mars_session = SessionFactory(meeting=meeting,status_id='sched',group__acronym='mars')
ames_session = SessionFactory(meeting=meeting,status_id='sched',group__acronym='ames')
scheduled = SessionStatusName.objects.get(slug='sched')
SchedulingEvent.objects.create(
session=mars_session,
status=scheduled,
by=Person.objects.get(name='(System)')
)
SchedulingEvent.objects.create(
session=ames_session,
status=scheduled,
by=Person.objects.get(name='(System)')
)
timeslot = mars_session.official_timeslotassignment().timeslot
SchedTimeSessAssignment.objects.create(timeslot=timeslot,session=ames_session,schedule=meeting.schedule)
self.create_audio_file_for_timeslot(timeslot)
import_audio_files(meeting)
doc = mars_session.materials.filter(type='recording').first()
self.assertTrue(doc in ames_session.materials.all())
self.assertTrue(doc.docalias.filter(name='recording-72-mars-1'))
self.assertTrue(doc.docalias.filter(name='recording-72-ames-1'))
def test_normalize_room_name(self):
self.assertEqual(normalize_room_name('Test Room'),'testroom')
self.assertEqual(normalize_room_name('Rome/Venice'), 'rome_venice')
def test_get_timeslot_for_filename(self):
session = SessionFactory(meeting__type_id='ietf')
timeslot = session.timeslotassignments.first().timeslot
name = self.get_filename_for_timeslot(timeslot)
self.assertEqual(get_timeslot_for_filename(name),timeslot)
def test_get_or_create_recording_document(self):
session = SessionFactory(meeting__type_id='ietf', meeting__number=72, group__acronym='mars')
# test create
filename = 'ietf42-testroom-20000101-0800.mp3'
docs_before = Document.objects.filter(type='recording').count()
doc = get_or_create_recording_document(filename,session)
docs_after = Document.objects.filter(type='recording').count()
self.assertEqual(docs_after,docs_before + 1)
self.assertTrue(doc.external_url.endswith(filename))
# test get
docs_before = docs_after
doc2 = get_or_create_recording_document(filename,session)
docs_after = Document.objects.filter(type='recording').count()
self.assertEqual(docs_after,docs_before)
self.assertEqual(doc,doc2)
def test_create_recording(self):
session = SessionFactory(meeting__type_id='ietf', meeting__number=72, group__acronym='mars')
filename = 'ietf42-testroomt-20000101-0800.mp3'
url = settings.IETF_AUDIO_URL + 'ietf{}/{}'.format(session.meeting.number, filename)
doc = create_recording(session, url)
self.assertEqual(doc.name,'recording-72-mars-1')
self.assertEqual(doc.group,session.group)
self.assertEqual(doc.external_url,url)
self.assertTrue(doc in session.materials.all())
def test_get_next_sequence(self):
session = SessionFactory(meeting__type_id='ietf', meeting__number=72, group__acronym='mars')
meeting = session.meeting
group = session.group
sequence = get_next_sequence(group,meeting,'recording')
self.assertEqual(sequence,1)
def test_send_audio_import_warning(self):
length_before = len(outbox)
send_audio_import_warning(['recording-43-badroom-20000101-0800.mp3'])
self.assertEqual(len(outbox), length_before + 1)
self.assertTrue('Audio file import' in outbox[-1]['Subject'])
| ietf-tools/old-datatracker-branches | ietf/secr/proceedings/tests.py | tests.py | py | 8,814 | python | en | code | 5 | github-code | 13 |
42053692807 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
from democritus.elements import Elements
from democritus.molecules import Molecules
OWNER = 'Research Labs'
def test_create_group_from_tcex_json():
e = Elements(OWNER)
data = {
"attribute": [
{
"type": "Description",
"value": "Test Description"
}
],
"name": "Robin Sparkles",
"tag": [
{
"name": "APT"
},{
"name": "CrimeWare"
}
],
"type": "Adversary"
}
e.create_group_from_tcex_json(data)
e.process()
def _create_indicator():
e = Elements(OWNER)
e.add_default_metadata('File', {
'attributes': [{
'type': 'Description',
'value': 'Test'
}, {
'type': 'Source',
'value': 'Test'
}]
})
e.create_indicator('File', 'D69AA87FC248F7FAAF5C3BD0B1B1359C')
e.tcex.jobs.file_occurrence({
"date" : "2014-11-03T00:00:00-05:00",
"fileName" : "win999301.dll",
"hash": "D69AA87FC248F7FAAF5C3BD0B1B1359C",
"path" : "C:\\Windows\\System"
})
e.process()
def test_deduplication_1():
_create_indicator()
time.sleep(1)
_create_indicator()
e = Elements(OWNER)
ind_json = e.get_item('File', 'D69AA87FC248F7FAAF5C3BD0B1B1359C', include_attributes=True, include_file_occurrences=True)
assert len(ind_json['attribute']) == 2
assert len(ind_json['fileOccurrences']) == 1
def test_file_attribute_deduplication():
e = Elements(OWNER)
file_summary = '{} : {} : {}'.format('a'*32, 'b'*40, 'c'*64)
indicator_data = {
'indicators': [{
'summary': file_summary,
'attribute': [{
'type': 'Description',
'value': 'Test'
}, {
'type': 'Source',
'value': 'Test'
}],
'type': 'File'
}]
}
e.create_from_tcex_json(indicator_data)
e.create_from_tcex_json(indicator_data)
ind_json = e.get_item('File', 'a'*32, include_attributes=True, include_file_occurrences=True)
assert len(ind_json['attribute']) == 2
def test_type():
"""Make sure the json returned for a specific item includes the type of the item."""
_create_indicator()
e = Elements(OWNER)
items = e.get_items_by_type('Address', include_attributes=True, include_tags=True)
for item in items:
assert item.get('type')
def test_attribute_deduplication():
old_attributes = [{
'type': 'Description',
'value': '1'
}, {
'type': 'Description',
'value': '2'
}]
new_attributes = [{
'type': 'Description',
'value': '1'
}, {
'type': 'Description',
'value': '3'
}]
e = Elements()
deduplicated_attributes = e._deduplicate_attributes(old_attributes, new_attributes)
assert len(deduplicated_attributes) == 1
old_attributes = [{
'type': 'Description',
'value': '1'
}]
new_attributes = [{
'type': 'Source',
'value': '1'
}]
e = Elements()
deduplicated_attributes = e._deduplicate_attributes(old_attributes, new_attributes)
assert len(deduplicated_attributes) == 1
old_attributes = [{
'type': 'Description',
'value': '1'
}]
new_attributes = [{
'type': 'Description',
'value': '1'
}]
e = Elements()
deduplicated_attributes = e._deduplicate_attributes(old_attributes, new_attributes)
assert len(deduplicated_attributes) == 0
def test_file_occurrence_deduplication():
old_file_occurrences = [{
'fileName': 'a',
'path': 'b',
'date': 'c'
}, {
'fileName': 'a',
'path': 'b',
'date': 'd'
}]
new_file_occurrences = [{
'fileName': 'a',
'path': 'b',
'date': 'c',
'hash': 'hashValue'
}, {
'fileName': 'aa',
'path': 'b',
'date': 'c',
'hash': 'hashValue'
}]
e = Elements()
deduplicated_file_occurrences = e._deduplicate_file_occurrences(old_file_occurrences, new_file_occurrences, 'hashValue : hashValue2 : hashValue3')
assert len(deduplicated_file_occurrences) == 1
old_file_occurrences = [{
'fileName': 'a',
'path': 'b',
'date': 'c'
}]
new_file_occurrences = [{
'fileName': 'aa',
'path': 'b',
'date': 'c',
'hash': 'hashValue'
}]
e = Elements()
deduplicated_file_occurrences = e._deduplicate_file_occurrences(old_file_occurrences, new_file_occurrences, 'hashValue : hashValue2 : hashValue3')
assert len(deduplicated_file_occurrences) == 1
old_file_occurrences = [{
'fileName': 'a',
'path': 'b',
'date': 'c'
}]
new_file_occurrences = [{
'fileName': 'a',
'path': 'b',
'date': 'c',
'hash': 'hashValue'
}]
e = Elements()
deduplicated_file_occurrences = e._deduplicate_file_occurrences(old_file_occurrences, new_file_occurrences, 'hashValue : hashValue2 : hashValue3')
assert len(deduplicated_file_occurrences) == 0
def test_log_processing_invalid_indicator():
e = Elements(owner=OWNER, process_logs=True)
e.create_indicator('URL', 'https://HIGHTOWER.space')
errors = e.process()
assert len(errors['exclusion_list_failure']) == 0
assert len(errors['invalid_indicator']) == 1
assert ' - tcex - ERROR - Failed adding indicator https://HIGHTOWER.space type URL ({"status":"Failure","message":"Please enter a valid Url"}).' in errors['invalid_indicator'][0]
def test_log_processing_excluded_indicator():
e = Elements(owner=OWNER, process_logs=True)
e.create_indicator('URL', 'https://google.com')
errors = e.process()
assert len(errors['exclusion_list_failure']) >= 1
assert len(errors['invalid_indicator']) == 0
assert 'Failed adding indicator https://google.com type URL ({"status":"Failure","message":"This indicator is contained on a system-wide exclusion list."}).' in errors['exclusion_list_failure'][0]
def test_logging():
e = Elements(owner=OWNER, process_logs=True)
e.create_indicator('URL', 'https://HIGHTOWER.space')
e.process()
# read the log file to make sure errors where logged
with open(e.tcex.log.handlers[-1].baseFilename, 'r') as f:
text = f.read()
assert 'Failed adding indicator https://HIGHTOWER.space type URL' in text
# TODO: the test below is not working yet b/c the ability to retrieve associations when using `get_items_by_type` has not been implemented yet
def test_get_associations():
m = Molecules(owner=OWNER)
m.create_from_symbolic_pattern('inc-file', 2)
m.process()
incidents = m.get_items_by_type('incidents', include_associations=True)
assert len(incidents[-1]['associations']) == 1
assert len(incidents[-2]['associations']) == 1
def test_group_deduplication():
"""Make sure groups are being deduplicated based on group name."""
e = Elements(owner=OWNER)
e.create_group('Threat', 'Test threat')
e.process()
original_threat_count = len(e.get_items_by_type('threat'))
# try to create an threat with the same name and make sure it is not created
e.create_group('Threat', 'Test threat')
e.process(dont_create_duplicate_groups=True)
new_threat_count = len(e.get_items_by_type('threat'))
assert new_threat_count == original_threat_count
def test_indicator_deletion():
e = Elements(owner=OWNER)
e.create_indicator('Address', '5.4.33.2')
e.process()
assert len(e.get_item('Address', '5.4.33.2')) > 0
print("e.get_item('Address', '5.4.33.2') {}".format(e.get_item('Address', '5.4.33.2')))
e.delete_indicator('Address', '5.4.33.2')
assert len(e.get_item('Address', '5.4.33.2')) == 0
def test_attribute_deduplication_cidr():
# this function assumes there is a cidr range indicator ('1.2.3.4/20') with a description attribute with a value of 'Test'
e = Elements(owner=OWNER)
e.create_from_tcex_json({
'indicators': [{
"attribute": [
{
"type": "Description",
"value": "Test"
}
],
"confidence": 5,
"rating": "3",
"summary": "1.2.3.4/20",
"type": "CIDR"
}]
})
e.process()
e = Elements(OWNER)
ind_json = e.get_item('CIDR', '1.2.3.4/20', include_attributes=True, include_file_occurrences=True)
print("ind_json {}".format(ind_json))
assert len(ind_json['attribute']) == 1
| fhightower-tc/old-tcex-utility | tests/test_elements.py | test_elements.py | py | 8,622 | python | en | code | 0 | github-code | 13 |
31392612179 | from pathlib import Path
class Solution:
def __init__(self):
with open(Path(__file__).parent / "input", "r") as f:
self.input = f.readlines()
def solve_part_1(self):
stack_len = 10007
stack = list(range(stack_len))
for line in self.input:
if "new" in line:
stack = list(reversed(stack))
if "cut" in line:
cut_num = int(line.split(" ")[1])
if cut_num < 0:
cut_num = len(stack) + cut_num
stack = stack[cut_num:] + stack[:cut_num]
if "increment" in line:
increment = int(line.split(" ")[-1])
indices = [x * increment % len(stack) for x in range(len(stack))]
new_list = [-1] * len(stack)
for ind, card in zip(indices, stack):
new_list[ind] = card
stack = new_list
answer = stack.index(2019)
print(answer)
return answer
def solve_part_2(self):
stack_len = 119315717514047
# stack_len = 10
num_shuffle = 101741582076661
# Model each shuffle as an affine function mod n
a = 1
b = 0
for line in self.input:
if "new" in line:
a *= -1
b += a
if "cut" in line:
cut_num = int(line.split(" ")[1])
b += (cut_num * a) % stack_len
if "increment" in line:
increment = int(line.split(" ")[-1])
a *= pow(increment, -1, stack_len)
a = a % stack_len
b = b % stack_len
print(f"{a} * x + {b}")
# y_n = a^n * x + sum_0_n-1(a^k * b)
a_n = pow(a, num_shuffle, stack_len)
bsum_ak = b * (1 - a_n) * pow(1 - a, -1, stack_len)
answer = a_n * 2020 + bsum_ak
print(a_n)
print(bsum_ak)
answer = answer % stack_len
print(answer)
return answer
def save_results(self):
with open(Path(__file__).parent / "part1", "w") as opened_file:
opened_file.write(str(self.solve_part_1()))
with open(Path(__file__).parent / "part2", "w") as opened_file:
opened_file.write(str(self.solve_part_2()))
if __name__ == "__main__":
solution = Solution()
solution.save_results()
| Gramet/adventofcode | 2019/day22/solution.py | solution.py | py | 2,376 | python | en | code | 0 | github-code | 13 |
3745999878 | #!/usr/bin/env python
import os
import sys
module_dir = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, os.path.join(module_dir, "..", ".."))
from data.base import get_version
from data.base import setup
_V = get_version()
_D = [
"omim-data-borders",
"omim-data-essential",
"omim-data-files",
"omim-data-fonts",
"omim-data-styles",
]
setup(__file__, "all", [], install_requires=["{}=={}".format(d, _V) for d in _D])
| organicmaps/organicmaps | tools/python/data/all/setup.py | setup.py | py | 455 | python | en | code | 7,565 | github-code | 13 |
37944007538 |
__doc__ = """Run pythia 6 validation monitoring"""
from PyJobTransformsCore.trf import *
from PyJobTransformsCore.full_trfarg import *
from PyJobTransformsCore.trfutil import *
from HepMCAnalysis_i.Pythia6TrfConfig import pythia6config
class Pythia6valid_trf( JobTransform ):
def __init__(self):
JobTransform.__init__(self,authors = [ Author('Some author', 'someauthor@cern.ch') ] ,skeleton='HepMCAnalysis_i/Pythia6skeleton.py' ,help = __doc__ ,config = pythia6config )
#add arguments
self.add( HistogramFileArg() )
self.add( MaxEventsArg() )
def doPreRunActions(self):
JobTransform.doPreRunActions(self)
# execute it if not imported
if __name__ == '__main__':
trf = Pythia6valid_trf()
sys.exit(trf.exeSysArgs().exitCode())
| rushioda/PIXELVALID_athena | athena/Generators/HepMCAnalysis_i/scripts/Pythia6valid_trf.py | Pythia6valid_trf.py | py | 826 | python | en | code | 1 | github-code | 13 |
72088373777 | # -*- coding: utf-8 -*-
"""
Created on Mon Jun 3 20:55:13 2019
@author: ZQZ
"""
# import smtplib
import re, os, sys
import pandas as pd
from collections import Counter
# from email.header import Header
# from email.utils import formataddr
# from email.mime.text import MIMEText
from sklearn.externals import joblib
# from email.mime.multipart import MIMEMultipart
from Bio.SeqUtils.ProtParam import ProteinAnalysis
args = sys.argv
def readFasta(file):
if os.path.exists(file) == False:
print('Error: "' + file + '" does not exist.')
sys.exit(1)
with open(file) as f:
records = f.read()
if re.search('>', records) == None:
print('The input file seems not in fasta format.')
sys.exit(1)
records = records.split('>')[1:]
myFasta = []
for fasta in records:
array = fasta.split('\n')
name, sequence = array[0].split()[0], re.sub('[^ARNDCQEGHILKMFPSTWYV-]', '-', ''.join(array[1:]).upper())
myFasta.append([name, sequence])
return myFasta
fastas = readFasta(args[1])
def AAC(fastas):
# AA = kw['order'] if kw['order'] != None else 'ACDEFGHIKLMNPQRSTVWY'
#AA = 'ARNDCQEGHILKMFPSTWYV'
BBBAAC="HKPQR"
encodings = []
header = ['#Name']
for i in BBBAAC:
header.append(i)
encodings.append(header)
for i in fastas:
name, sequence = i[0], re.sub('-', '', i[1])
count = Counter(sequence)
for key in BBBAAC:
count[key] = count[key]/len(sequence)
code = [name]
for aa in BBBAAC:
code.append(count[aa])
encodings.append(code)
return encodings
def GAAC(fastas):
group = {
'alphatic': 'GAVLMI',
'aromatic': 'FYW',
'postivecharge': 'KRH',
'negativecharge': 'DE',
'uncharge': 'STCPNQ'
}
groupKey = group.keys()
encodings1 = []
header = []
for key in groupKey:
header.append(key)
encodings1.append(header)
for i in fastas:
name, sequence = i[0], re.sub('-', '', i[1])
code = []
count = Counter(sequence)
myDict = {}
for key in groupKey:
for aa in group[key]:
myDict[key] = myDict.get(key, 0) + count[aa]
for key in groupKey:
code.append(myDict[key]/len(sequence))
encodings1.append(code)
return encodings1
def protein_length(fastas):
encodings2 = []
header = ["length"]
encodings2.append(header)
for i in fastas:
name, sequence = i[0], re.sub('-', '', i[1])
code = []
length=len(sequence)
Norlen=(length-5)/(82-5)
code.append(Norlen)
encodings2.append(code)
return encodings2
def molecular_weight(fastas):
#seq_new=seq.replace('X','').replace('B','')
encodings3 = []
header = ["Weight"]
encodings3.append(header)
for i in fastas:
name, sequence = i[0], re.sub('-', '', i[1])
code = []
analysed_seq = ProteinAnalysis(sequence)
analysed_seq.monoisotopic = True
mw = analysed_seq.molecular_weight()
Normw=(mw-513.222346)/(9577.017286-513.222346)
code.append(Normw)
encodings3.append(code)
return(encodings3)
def savetsv(encodings, file = 'encoding.tsv'):
with open(file, 'w') as f:
if encodings == 0:
f.write('Descriptor calculation failed.')
else:
for i in range(len(encodings[0])-1):
f.write(encodings[0][i] + '\t')
f.write(encodings[0][-1] + '\n')
for i in encodings[1:]:
f.write(i[0] + '\t')
for j in range(1, len(i) - 1):
f.write(str(float(i[j])) + '\t')
f.write(str(float(i[len(i)-1])) + '\n')
return None
myFun = "AAC(fastas)"
myFun1 = "GAAC(fastas)"
myFun2 = "protein_length(fastas)"
myFun3 = "molecular_weight(fastas)"
encodings,encodings1,encodings2,encodings3= eval(myFun),eval(myFun1),eval(myFun2),eval(myFun3)
encodings=ziped =list(map(lambda x:x[0]+x[1]+x[2]+x[3],zip(encodings,encodings1,encodings2,encodings3)))
clf=joblib.load('BBPmodeling')
PreData=pd.DataFrame(encodings[1:])
PreDataX=PreData.drop([0],axis=1)
pred=clf.predict_proba(PreDataX)[:,1]
result=pd.DataFrame(list(zip(list(PreData[0]),pred)))
result.columns = ['Name', 'predict']
result.to_csv(args[2],index=False,header=True)
| loneMT/BBPpred | codes/predict.py | predict.py | py | 4,354 | python | en | code | 0 | github-code | 13 |
72764868499 | import json
import discord
import requests
import asyncio
from threading import Timer
from discord.utils import get
from discord.ext import commands
from core.cog_core import Cog_Extension
with open('setting.json', 'r', encoding='utf8') as jfile:
jdata = json.load(jfile)
# detecting price alerts
async def detectPriceAlert(bot):
for item in bot.watchlist:
if not item['reached']:
floorUSD = getCollectionFloor(
item['collection_slug']) * getEthPrice()
targetUSD = int(item['price'])
if item['operator'] == '>=':
condition = (floorUSD >= targetUSD)
elif item['operator'] == '>':
condition = (floorUSD > targetUSD)
elif item['operator'] == '=':
condition = (floorUSD == targetUSD)
elif item['operator'] == '<=':
condition = (floorUSD <= targetUSD)
else:
condition = (floorUSD < targetUSD)
if condition:
item['reached'] = True
title = (f'Price Reached: {item["collection_slug"]}')
url = (
f'https://opensea.io/collection/{item["collection_slug"]}')
msg = (
f'The floor price of {item["collection_slug"]} has just reached the target price ({item["operator"]} {item["price"]} USD).\n'
)
msg += (
f'The current floor price is {getCollectionFloor(item["collection_slug"])} ETH / {format(floorUSD, ".2f")} USD.\n'
)
embed = discord.Embed(title=title,
url=url,
description=msg,
color=discord.Color.blue())
await sendMessage(bot, embed)
# set a thread that runs detectPriceAlert every 60 second
await asyncio.sleep(60)
t = Timer(60.0, await detectPriceAlert(bot))
t.start()
# send discord notificaiton to a channel
async def sendMessage(bot, embed):
await discord.utils.get(bot.get_all_channels(),
name='watchlist-alert').send(embed=embed)
def getCollectionFloor(collection_slug):
url = f'https://api.opensea.io/api/v1/collection/{collection_slug}'
response = requests.get(url, headers={"Accept": "application/json"})
data = response.json()
return data["collection"]["stats"]["floor_price"]
def getEthPrice():
URL = 'https://api.coingecko.com/api/v3/coins/markets?vs_currency=usd'
r = requests.get(url=URL)
data = r.json()
return data[1]['current_price']
def invalidCollectionSlug(collection_slug):
url = f'https://api.opensea.io/api/v1/collection/{collection_slug}'
response = requests.get(url, headers={"Accept": "application/json"})
data = response.json()
if 'collection' in data:
return False
return True
def invalidOperator(operator):
if operator in ['>=', '>', '=', '<=', '<']:
return False
return True
def getWatchlistMsg(bot):
msg = ''
counter = 1
for item in bot.watchlist:
reach_msg = '' if not item[
'reached'] else ' :white_check_mark: Reached!'
msg += (
f'#{counter} {item["collection_slug"]} {item["operator"]} {item["price"]} USD{reach_msg}\n'
)
counter += 1
return msg
def getCollectionFloor(collection_slug):
url = f'https://api.opensea.io/api/v1/collection/{collection_slug}'
response = requests.get(url, headers={"Accept": "application/json"})
data = response.json()
return data["collection"]["stats"]["floor_price"]
class Watchlist(Cog_Extension):
"""
NFT price alerts for opensea
"""
@commands.command()
async def wl(self, ctx):
"""
Show NFT watchlist
"""
if self.bot.watchlist:
msg = getWatchlistMsg(self.bot)
embed = discord.Embed(description=msg, color=discord.Color.blue())
else:
msg = "You haven't added any collection to watchlist.\n"
msg += "Please use `\wl_add <colletion_slug> <operator> <price> USD` to set the watchlist."
embed = discord.Embed(description=msg, color=discord.Color.red())
await ctx.send(embed=embed)
@commands.command()
async def wl_add(self, ctx, collection_slug: str, operator: str, price: str):
"""
Set price alerts for NFT collections
"""
if invalidCollectionSlug(collection_slug):
msg = 'Please check the collection slug.'
embed = discord.Embed(description=msg, color=discord.Color.red())
elif invalidOperator(operator):
msg = 'Please check the operator. Only accept `<`, `<=`, `=`, `>=`, `>`.'
embed = discord.Embed(description=msg, color=discord.Color.red())
elif not price.isnumeric():
msg = 'Please check the input format: <price> must be an integer.'
embed = discord.Embed(description=msg, color=discord.Color.red())
else:
obj = {
'collection_slug': collection_slug,
'operator': operator,
'price': price,
'reached': False
}
self.bot.watchlist.append(obj)
msg = (
f'Successfully set price alert for {collection_slug} at {price} USD.\n'
)
embed = discord.Embed(description=msg, color=discord.Color.blue())
await ctx.send(embed=embed)
@commands.command()
async def wl_rm(self, ctx, number: str):
"""
Reomove price alert for NFT collection
"""
index = int(number) - 1
if len(self.bot.watchlist) == 0 or 0 > index >= len(self.bot.watchlist):
msg = 'Please check the number you want to remove.'
embed = discord.Embed(description=msg, color=discord.Color.red())
else:
self.bot.watchlist.pop(index)
msg = (f'Removed successfully.\n')
if len(self.bot.watchlist) > 0:
msg += (f'Watchlist now:\n{getWatchlistMsg(self.bot)}')
embed = discord.Embed(description=msg, color=discord.Color.blue())
await ctx.send(embed=embed)
@commands.command()
async def wl_start(self, ctx):
"""
Start detecting price for NFT collections
"""
if self.bot.watchlist:
msg = (f'Started detecting price alert for:\n{getWatchlistMsg(self.bot)}')
msg += (f'Price notifications will be sent to #watchlist-alert.')
embed = discord.Embed(description=msg, color=discord.Color.blue())
await ctx.send(embed=embed)
guild = ctx.guild
category = get(guild.categories, id=jdata['subscribed_category_ID'])
channel = get(guild.text_channels, name='watchlist-alert')
if channel is None:
channel = await guild.create_text_channel('watchlist-alert', category=category)
await detectPriceAlert(self.bot)
else:
msg = "You haven't added any collection to watchlist.\n"
embed = discord.Embed(description=msg, color=discord.Color.red())
await ctx.send(embed=embed)
@commands.command()
async def wl_clear(self, ctx):
"""
Clear NFT watchlist
"""
self.bot.watchlist = []
msg = 'Watchlist cleared successfully.'
embed = discord.Embed(description=msg, color=discord.Color.blue())
await ctx.send(embed=embed)
def setup(bot):
bot.watchlist = []
bot.add_cog(Watchlist(bot))
| freewayfuh/dApp-discord-bot | cmds/watchlist.py | watchlist.py | py | 7,882 | python | en | code | 1 | github-code | 13 |
30241203403 | "============================JSON============================"
#JavaScript Object Notation - единый формат, в котором
# могут храниться только те типы данных,
# которые есть во всех яз-прог поддерживающие json
# числа itn, float
# строки str
# словари dict
# булевые значения True, False
# списки list
# пустое значение None
import json
# сериализациа - перевод из python в json
# dump - принимает файл с питона в жс
# dumps - функция которая переводит python обьект в
#json строку
# десерализация - перевод из json в python
# load - принимает файл с жс питон
# loads - функция которая переводит json строку в
#python обьект
print(dir(json)) #показывает подсказки
python_list = [1,2,3]
json_list = json.dumps(python_list)
print(type(python_list)) #list списки
print(type(json_list)) #str строки
print(python_list) #[1, 2, 3]
print(json_list) #"[1,2,3]"
json_dict = '{"a":1, "b":2}'
python_dict = json.loads(json_dict)
print(type(json_dict)) #<class 'str'>
print(type(python_dict)) #<class 'dict'>
list_ = [
1,2,3,
4.5,
(1,2,3),
{"A":1},
'beka',
True, False, None
]
with open("test.json", "w") as file:
json.dump(list_, file)
with open("test.json", "r") as file:
res = json.load(file)
print(res)
#[1, 2, 3, 4.5, [1, 2, 3], {'A': 1}, 'beka', True, False, None]
# with open("test.txt", "w") as file:
# json.dump(list_, file) | Bekaaaaaaaa/python27---lections- | files/json_.py | json_.py | py | 1,728 | python | ru | code | 0 | github-code | 13 |
70587946258 | import numpy as np
import cv2
from keras.models import Sequential
from keras.layers import Dense, Activation
num_inputs=18
depth=3
image=cv2.imread('lena.png')
out = image.reshape(len(image)**2, 3)
inp = np.array(list(map(lambda x: list(("{0:"+str(num_inputs)+"b}").format(x).replace(' ', '0')), range(len(out)))))
model = Sequential()
model.add(Dense(128, input_dim=num_inputs, activation='relu'))
for i in range(depth-1):
model.add(Dense(128, activation='relu'))
model.add(Dense(3))
model.compile(optimizer='adam',
loss='mse')
model.load_weights("weights.hdf5", by_name=False)
model.fit(inp, out, epochs=4200, batch_size=len(inp))
model.save_weights("weights.hdf5")
predicted_image=model.predict(inp).reshape(512,512,3)
cv2.imwrite('a.png', predicted_image)
| corollari/overfitted | main.py | main.py | py | 788 | python | en | code | 1 | github-code | 13 |
22726911764 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
import matplotlib.pyplot as plt
plt.rcParams.update({'figure.max_open_warning': 0})
import seaborn as sns
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# In[2]:
# modeling
from scipy import stats
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV, RepeatedKFold, cross_val_score,cross_val_predict,KFold
from sklearn.metrics import make_scorer,mean_squared_error
from sklearn.linear_model import LinearRegression, Lasso, Ridge, ElasticNet
from sklearn.svm import LinearSVR, SVR
from sklearn.neighbors import KNeighborsRegressor
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor,AdaBoostRegressor
from xgboost import XGBRegressor
from sklearn.preprocessing import PolynomialFeatures,MinMaxScaler,StandardScaler
# In[3]:
data_train = pd.read_table('../input/zhengqi/zhengqi_train.txt',sep='\t')
data_test = pd.read_table('../input/zhengqi/zhengqi_test.txt',sep='\t')
# In[4]:
data_train.head(2)
data_test.head(2)
# In[5]:
# 将train和test相融合
data_train['origin'] = 'train'
data_test['origin'] = 'test'
data_all = pd.concat([data_train,data_test],axis=0,ignore_index=True)
data_all.head()
# In[6]:
# explore feature distribution
fig = plt.figure(figsize=(6,6))
for column in data_all.columns[0:-2]:
g = sns.kdeplot(data_all[column][(data_all["origin"] == "train")], color="Red", shade = True)
g = sns.kdeplot(data_all[column][(data_all["origin"] == "test")], ax =g, color="Blue", shade= True)
g.set_xlabel(column)
g.set_ylabel('Frequency')
g = g.legend(['train','test'])
plt.show()
# In[7]:
fig = plt.figure(figsize=(10, 10))
for i in range(len(data_all.columns)-2):
g = sns.FacetGrid(data_all, col='origin')
g = g.map(sns.distplot, data_all.columns[i])
# In[8]:
#删除特征"V5","V9","V11","V17","V22","V28",训练集和测试集分布不均
for column in ["V5","V9","V11","V17","V22","V28"]:
g = sns.kdeplot(data_all[column][(data_all["origin"] == "train")], color="Red", shade = True)
g = sns.kdeplot(data_all[column][(data_all["origin"] == "test")], ax =g, color="Blue", shade= True)
g.set_xlabel(column)
g.set_ylabel("Frequency")
g = g.legend(["train","test"])
plt.show()
data_all.drop(["V5","V9","V11","V17","V22","V28"],axis=1,inplace=True)
# In[9]:
# figure parameters
# 观察每个特征数据的离散程度,以及其分布与正态分布的区别
data_train1=data_all[data_all["origin"]=="train"].drop("origin",axis=1)
fcols = 2
frows = len(data_train.columns)
plt.figure(figsize=(5*fcols,4*frows))
i=0
for col in data_train1.columns:
i+=1
ax=plt.subplot(frows,fcols,i)
sns.regplot(x=col, y='target', data=data_train, ax=ax,
scatter_kws={'marker':'.','s':3,'alpha':0.3},
line_kws={'color':'k'});
plt.xlabel(col)
plt.ylabel('target')
i+=1
ax=plt.subplot(frows,fcols,i)
sns.distplot(data_train[col].dropna() , fit=stats.norm)
plt.xlabel(col)
# In[10]:
# 找出相关程度
plt.figure(figsize=(20, 16)) # 指定绘图对象宽度和高度
colmn = data_train1.columns.tolist() # 列表头
matrix_corr = data_train1[colmn].corr(method="spearman") # 相关系数矩阵,即给出了任意两个变量之间的相关系数
mask = np.zeros_like(matrix_corr, dtype=np.bool) # 构造与mcorr同维数矩阵 为bool型
mask[np.triu_indices_from(mask)] = True # 角分线右侧为True
cmap = sns.diverging_palette(220, 10, as_cmap=True) # 返回matplotlib colormap对象
g = sns.heatmap(matrix_corr, mask=mask, cmap=cmap, square=True, annot=True, fmt='0.2f') # 热力图(看两两相似度)
plt.show()
# In[11]:
print(data_train1.corr().abs())
# In[12]:
print(data_all.columns)
# In[13]:
# Threshold for removing correlated variables
threshold = 0.1
# Absolute value correlation matrix
corr_matrix = data_train1.corr().abs()
drop_col=corr_matrix[corr_matrix["target"]<threshold].index
data_all.drop(drop_col,axis=1,inplace=True)
# In[14]:
print(drop_col)
# In[15]:
print(data_all.columns)
# In[16]:
# normalise numeric columns 将剩下的数字数据正则化
cols_numeric=list(data_all.columns)
cols_numeric.remove("origin")
def scale_minmax(col):
return (col-col.min())/(col.max()-col.min())
scale_cols = [col for col in cols_numeric if col!='target']
data_all[scale_cols] = data_all[scale_cols].apply(scale_minmax,axis=0) # axis=0说明这个apply函数是以每一列数据作为对象
data_all[scale_cols].describe()
# In[17]:
# 对每一个特征进行boxcox变换, 并且输出图像,将变换前与变换后的图像进行对比
fcols = 6
frows = len(cols_numeric)-1
plt.figure(figsize=(4*fcols,4*frows))
i=0
for var in cols_numeric:
if var!='target':
dat = data_all[[var, 'target']].dropna() # dat是data_all中将选取var列以及‘target’列后除去空值后的数据
i+=1
plt.subplot(frows,fcols,i)
sns.distplot(dat[var] , fit=stats.norm); # 画出var这一列的dist函数以及拟合正态分布曲线
plt.title(var+' Original')
plt.xlabel('')
i+=1
plt.subplot(frows,fcols,i)
_=stats.probplot(dat[var], plot=plt) # probplot是画出分位数的图形
plt.title('skew='+'{:.4f}'.format(stats.skew(dat[var])))
plt.xlabel('')
plt.ylabel('')
i+=1
plt.subplot(frows,fcols,i)
plt.plot(dat[var], dat['target'],'.',alpha=0.5)
plt.title('corr='+'{:.2f}'.format(np.corrcoef(dat[var], dat['target'])[0][1]))
i+=1
plt.subplot(frows,fcols,i)
trans_var, lambda_var = stats.boxcox(dat[var].dropna()+1)
trans_var = scale_minmax(trans_var)
sns.distplot(trans_var , fit=stats.norm);
plt.title(var+' Tramsformed')
plt.xlabel('')
i+=1
plt.subplot(frows,fcols,i)
_=stats.probplot(trans_var, plot=plt)
plt.title('skew='+'{:.4f}'.format(stats.skew(trans_var)))
plt.xlabel('')
plt.ylabel('')
i+=1
plt.subplot(frows,fcols,i)
plt.plot(trans_var, dat['target'],'.',alpha=0.5)
plt.title('corr='+'{:.2f}'.format(np.corrcoef(trans_var,dat['target'])[0][1]))
# In[18]:
cols_transform=data_all.columns[0:-2]
for col in cols_transform:
# transform column
data_all.loc[:,col], _ = stats.boxcox(data_all.loc[:,col]+1) # 对col列的所有行组成的一维数组进行box-cox转换,并且对所有特征列都进行这个变换
# In[19]:
print(data_all.target.describe())
plt.figure(figsize=(12,4))
plt.subplot(1,2,1)
sns.distplot(data_all.target.dropna() , fit=stats.norm);
plt.subplot(1,2,2)
_=stats.probplot(data_all.target.dropna(), plot=plt)
# In[20]:
# 将traget进行对数转换,以提高正则性
sp = data_train.target
data_train.target1 =np.power(1.5,sp)
print(data_train.target1.describe())
plt.figure(figsize=(12,4))
plt.subplot(1,2,1)
sns.distplot(data_train.target1.dropna(),fit=stats.norm);
plt.subplot(1,2,2)
_=stats.probplot(data_train.target1.dropna(), plot=plt)
# In[21]:
# 对于所有的特征列,在同一张图上画出train和test的ked曲线(这一段的用处是啥)
for column in data_all.columns[0:-2]:
g = sns.kdeplot(data_all[column][(data_all["origin"] == "train")], color="Red", shade = True)
g = sns.kdeplot(data_all[column][(data_all["origin"] == "test")], ax =g, color="Blue", shade= True)
g.set_xlabel(column)
g.set_ylabel("Frequency")
g = g.legend(["train","test"])
plt.show()
# In[22]:
#function to get training samples
def get_training_data():
# extract training samples
from sklearn.model_selection import train_test_split
df_train = data_all[data_all["origin"]=="train"]
df_train["label"]=data_train.target1
# split SalePrice and features
y = df_train.target
X = df_train.drop(["origin","target","label"],axis=1)
X_train,X_valid,y_train,y_valid=train_test_split(X,y,test_size=0.3,random_state=100)
return X_train,X_valid,y_train,y_valid
# xtract test data
def get_test_data():
df_test = data_all[data_all["origin"]=="test"].reset_index(drop=True)
return df_test.drop(["origin","target"],axis=1)
# In[23]:
from sklearn.metrics import make_scorer
# metric for evaluation
def rmse(y_true, y_pred):
diff = y_pred - y_true
sum_sq = sum(diff**2)
n = len(y_pred)
return np.sqrt(sum_sq/n)
def mse(y_ture,y_pred):
return mean_squared_error(y_ture,y_pred)
# scorer to be used in sklearn model fitting
rmse_scorer = make_scorer(rmse, greater_is_better=False)
mse_scorer = make_scorer(mse, greater_is_better=False)
# In[24]:
# function to detect outliers based on the predictions of a model
def find_outliers(model, X, y, sigma=3):
# predict y values using model
try:
y_pred = pd.Series(model.predict(X), index=y.index)
# if predicting fails, try fitting the model first
except:
model.fit(X,y)
y_pred = pd.Series(model.predict(X), index=y.index)
# calculate residuals between the model prediction and true y values
resid = y - y_pred
mean_resid = resid.mean()
std_resid = resid.std()
# calculate z statistic, define outliers to be where |z|>sigma
z = (resid - mean_resid)/std_resid
outliers = z[abs(z)>sigma].index
# print and plot the results
print('R2=',model.score(X,y))
print('rmse=',rmse(y, y_pred))
print("mse=",mean_squared_error(y,y_pred))
print('---------------------------------------')
print('mean of residuals:',mean_resid)
print('std of residuals:',std_resid)
print('---------------------------------------')
print(len(outliers),'outliers:')
print(outliers.tolist())
# print the page of outliers in y and y_pred
plt.figure(figsize=(15,5))
ax_131 = plt.subplot(1,3,1)
plt.plot(y,y_pred,'.')
plt.plot(y.loc[outliers],y_pred.loc[outliers],'ro')
plt.legend(['Accepted','Outlier'])
plt.xlabel('y')
plt.ylabel('y_pred');
# print the outliers in y and (y-y_pred)
ax_132=plt.subplot(1,3,2)
plt.plot(y,y-y_pred,'.')
plt.plot(y.loc[outliers],y.loc[outliers]-y_pred.loc[outliers],'ro')
plt.legend(['Accepted','Outlier'])
plt.xlabel('y')
plt.ylabel('y - y_pred');
# print the outliers in z with the hist image
ax_133=plt.subplot(1,3,3)
z.plot.hist(bins=50,ax=ax_133)
z.loc[outliers].plot.hist(color='r',bins=50,ax=ax_133)
plt.legend(['Accepted','Outlier'])
plt.xlabel('z')
plt.savefig('outliers.png')
return outliers
# In[25]:
# get training data
from sklearn.linear_model import Ridge
X_train, X_valid,y_train,y_valid = get_training_data()
test=get_test_data()
# find and remove outliers using a Ridge model
outliers = find_outliers(Ridge(), X_train, y_train)
# permanently remove these outliers from the data
#df_train = data_all[data_all["origin"]=="train"]
#df_train["label"]=data_train.target1
#df_train=df_train.drop(outliers)
X_outliers=X_train.loc[outliers]
y_outliers=y_train.loc[outliers]
X_t=X_train.drop(outliers)
y_t=y_train.drop(outliers)
# In[26]:
def get_trainning_data_omitoutliers():
y1=y_t.copy()
X1=X_t.copy()
return X1,y1
# In[27]:
from sklearn.preprocessing import StandardScaler
def train_model(model, param_grid=[], X=[], y=[],
splits=5, repeats=5):
# get unmodified training data, unless data to use already specified
if len(y)==0:
X,y = get_trainning_data_omitoutliers()
#poly_trans=PolynomialFeatures(degree=2)
#X=poly_trans.fit_transform(X)
#X=MinMaxScaler().fit_transform(X)
# create cross-validation method
rkfold = RepeatedKFold(n_splits=splits, n_repeats=repeats)
# perform a grid search if param_grid given
if len(param_grid)>0:
# setup grid search parameters
gsearch = GridSearchCV(model, param_grid, cv=rkfold,
scoring="neg_mean_squared_error",
verbose=1, return_train_score=True)
# search the grid
gsearch.fit(X,y)
# extract best model from the grid
model = gsearch.best_estimator_
best_idx = gsearch.best_index_
# get cv-scores for best model
grid_results = pd.DataFrame(gsearch.cv_results_)
cv_mean = abs(grid_results.loc[best_idx,'mean_test_score'])
cv_std = grid_results.loc[best_idx,'std_test_score']
# no grid search, just cross-val score for given model
else:
grid_results = []
cv_results = cross_val_score(model, X, y, scoring="neg_mean_squared_error", cv=rkfold)
cv_mean = abs(np.mean(cv_results))
cv_std = np.std(cv_results)
# combine mean and std cv-score in to a pandas series
cv_score = pd.Series({'mean':cv_mean,'std':cv_std})
# predict y using the fitted model
y_pred = model.predict(X)
# print stats on model performance
print('----------------------')
print(model)
print('----------------------')
print('score=',model.score(X,y))
print('rmse=',rmse(y, y_pred))
print('mse=',mse(y, y_pred))
print('cross_val: mean=',cv_mean,', std=',cv_std)
# residual plots
y_pred = pd.Series(y_pred,index=y.index)
resid = y - y_pred
mean_resid = resid.mean()
std_resid = resid.std()
z = (resid - mean_resid)/std_resid
n_outliers = sum(abs(z)>3)
plt.figure(figsize=(15,5))
ax_131 = plt.subplot(1,3,1)
plt.plot(y,y_pred,'.')
plt.xlabel('y')
plt.ylabel('y_pred');
plt.title('corr = {:.3f}'.format(np.corrcoef(y,y_pred)[0][1]))
ax_132=plt.subplot(1,3,2)
plt.plot(y,y-y_pred,'.')
plt.xlabel('y')
plt.ylabel('y - y_pred');
plt.title('std resid = {:.3f}'.format(std_resid))
ax_133=plt.subplot(1,3,3)
z.plot.hist(bins=50,ax=ax_133)
plt.xlabel('z')
plt.title('{:.0f} samples with z>3'.format(n_outliers))
return model, cv_score, grid_results
# In[28]:
# places to store optimal models and scores
opt_models = dict()
score_models = pd.DataFrame(columns=['mean','std'])
# no. k-fold splits
splits=5
# no. k-fold iterations
repeats=5
# In[29]:
# different model to fit and predict
model = 'Ridge'
opt_models[model] = Ridge()
alph_range = np.arange(0.25,6,0.25)
param_grid = {'alpha': alph_range}
opt_models[model],cv_score,grid_results = train_model(opt_models[model], param_grid=param_grid,
splits=splits, repeats=repeats)
cv_score.name = model
score_models = score_models.append(cv_score)
plt.figure()
plt.errorbar(alph_range, abs(grid_results['mean_test_score']),
abs(grid_results['std_test_score'])/np.sqrt(splits*repeats))
plt.xlabel('alpha')
plt.ylabel('score')
model = 'Lasso'
opt_models[model] = Lasso()
alph_range = np.arange(1e-4,1e-3,4e-5)
param_grid = {'alpha': alph_range}
opt_models[model], cv_score, grid_results = train_model(opt_models[model], param_grid=param_grid,
splits=splits, repeats=repeats)
cv_score.name = model
score_models = score_models.append(cv_score)
plt.figure()
plt.errorbar(alph_range, abs(grid_results['mean_test_score']),abs(grid_results['std_test_score'])/np.sqrt(splits*repeats))
plt.xlabel('alpha')
plt.ylabel('score')
model ='ElasticNet'
opt_models[model] = ElasticNet()
param_grid = {'alpha': np.arange(1e-4,1e-3,1e-4),
'l1_ratio': np.arange(0.1,1.0,0.1),
'max_iter':[100000]}
opt_models[model], cv_score, grid_results = train_model(opt_models[model], param_grid=param_grid,
splits=splits, repeats=1)
cv_score.name = model
score_models = score_models.append(cv_score)
# linear svm regression
model='LinearSVR'
opt_models[model] = LinearSVR()
crange = np.arange(0.1,1.0,0.1)
param_grid = {'C':crange,
'max_iter':[1000]}
opt_models[model], cv_score, grid_results = train_model(opt_models[model], param_grid=param_grid,
splits=splits, repeats=repeats)
cv_score.name = model
score_models = score_models.append(cv_score)
plt.figure()
plt.errorbar(crange, abs(grid_results['mean_test_score']),abs(grid_results['std_test_score'])/np.sqrt(splits*repeats))
plt.xlabel('C')
plt.ylabel('score')
# knn
model = 'KNeighbors'
opt_models[model] = KNeighborsRegressor()
param_grid = {'n_neighbors':np.arange(3,11,1)}
opt_models[model], cv_score, grid_results = train_model(opt_models[model], param_grid=param_grid,
splits=splits, repeats=1)
cv_score.name = model
score_models = score_models.append(cv_score)
plt.figure()
plt.errorbar(np.arange(3,11,1), abs(grid_results['mean_test_score']),abs(grid_results['std_test_score'])/np.sqrt(splits*1))
plt.xlabel('n_neighbors')
plt.ylabel('score')
# gradient boosting
model = 'GradientBoosting'
opt_models[model] = GradientBoostingRegressor()
param_grid = {'n_estimators':[150,250,350],
'max_depth':[1,2,3],
'min_samples_split':[5,6,7]}
opt_models[model], cv_score, grid_results = train_model(opt_models[model], param_grid=param_grid,
splits=splits, repeats=1)
cv_score.name = model
score_models = score_models.append(cv_score)
# randomforest
model = 'RandomForest'
opt_models[model] = RandomForestRegressor()
param_grid = {'n_estimators':[100,150,200],
'max_features':[8,12,16,20,24],
'min_samples_split':[2,4,6]}
opt_models[model], cv_score, grid_results = train_model(opt_models[model], param_grid=param_grid,
splits=5, repeats=1)
cv_score.name = model
score_models = score_models.append(cv_score)
# xgboost
model = 'XGB'
opt_models[model] = XGBRegressor()
param_grid = {'n_estimators':[100,200,300,400,500],
'max_depth':[1,2,3],
}
opt_models[model], cv_score,grid_results = train_model(opt_models[model], param_grid=param_grid,
splits=splits, repeats=1)
cv_score.name = model
score_models = score_models.append(cv_score)
# In[30]:
#
def model_predict(test_data,test_y=[],stack=False):
#poly_trans=PolynomialFeatures(degree=2)
#test_data1=poly_trans.fit_transform(test_data)
#test_data=MinMaxScaler().fit_transform(test_data)
i=0
y_predict_total=np.zeros((test_data.shape[0],))
if stack:
for model in mix_models.keys():
y_predict=mix_models[model].predict(test_data)
y_predict_total+=y_predict
i+=1
if len(test_y)>0:
print("{}_mse:".format(model),mean_squared_error(y_predict,test_y))
y_predict_mean=np.round(y_predict_total/i,3)
if len(test_y)>0:
print("mean_mse:",mean_squared_error(y_predict_mean,test_y))
else:
y_mix_mean=pd.Series(y_predict_mean)
return y_mix_mean
else:
for model in opt_models.keys():
if model!="LinearSVR" and model!="KNeighbors":
y_predict=opt_models[model].predict(test_data)
y_predict_total+=y_predict
i+=1
if len(test_y)>0:
print("{}_mse:".format(model),mean_squared_error(y_predict,test_y))
y_predict_mean=np.round(y_predict_total/i,3)
if len(test_y)>0:
print("mean_mse:",mean_squared_error(y_predict_mean,test_y))
else:
y_predict_mean=pd.Series(y_predict_mean)
return y_predict_mean
# In[31]:
model_predict(X_valid,y_valid)
# In[32]:
# to create stack features
def create_stack_features(test_data):
features={}
columns=[]
for model in opt_models.keys():
columns.append(model)
features[model]=opt_models[model].predict(test_data)
stack_feature=pd.DataFrame(features,columns=columns)
return stack_feature
# In[33]:
mix_x_train=create_stack_features(X_t)
mix_y_train=pd.Series(y_t.values)
mix_x_valid=create_stack_features(X_valid)
mix_y_valid=pd.Series(y_valid.values)
mix_x_test=create_stack_features(test)
# In[34]:
mix_models = dict()
score_models = pd.DataFrame(columns=['mean','std'])
# no. k-fold splits
splits=5
# no. k-fold iterations
repeats=5
# In[35]:
model = 'Ridge'
mix_models[model] = Ridge()
alph_range = np.arange(0.25,6,0.25)
param_grid = {'alpha': alph_range}
mix_models[model],cv_score,grid_results = train_model(mix_models[model], param_grid=param_grid, X=mix_x_train,y=mix_y_train,
splits=splits, repeats=repeats)
cv_score.name = model
score_models = score_models.append(cv_score)
plt.figure()
plt.errorbar(alph_range, abs(grid_results['mean_test_score']),
abs(grid_results['std_test_score'])/np.sqrt(splits*repeats))
plt.xlabel('alpha')
plt.ylabel('score')
# In[36]:
model = 'Lasso'
mix_models[model] = Lasso()
alph_range = np.arange(1e-4,1e-3,4e-5)
param_grid = {'alpha': alph_range}
mix_models[model], cv_score, grid_results = train_model(mix_models[model], param_grid=param_grid, X=mix_x_train,y=mix_y_train,
splits=splits, repeats=repeats)
cv_score.name = model
score_models = score_models.append(cv_score)
plt.figure()
plt.errorbar(alph_range, abs(grid_results['mean_test_score']),abs(grid_results['std_test_score'])/np.sqrt(splits*repeats))
plt.xlabel('alpha')
plt.ylabel('score')
# In[38]:
model ='ElasticNet'
mix_models[model] = ElasticNet()
param_grid = {'alpha': np.arange(1e-4,1e-3,1e-4),
'l1_ratio': np.arange(0.1,1.0,0.1),
'max_iter':[100000]}
mix_models[model], cv_score, grid_results = train_model(mix_models[model], param_grid=param_grid, X=mix_x_train,y=mix_y_train,
splits=splits, repeats=1)
cv_score.name = model
score_models = score_models.append(cv_score)
# In[40]:
model = 'XGB'
mix_models[model] = XGBRegressor()
param_grid = {'n_estimators':[100,200,300,400,500],
'max_depth':[1,2,3],
}
mix_models[model], cv_score,grid_results = train_model(mix_models[model], param_grid=param_grid, X=mix_x_train,y=mix_y_train,
splits=splits, repeats=1)
cv_score.name = model
score_models = score_models.append(cv_score)
# In[41]:
model = 'GradientBoosting'
mix_models[model] = GradientBoostingRegressor()
param_grid = {'n_estimators':[150,250,350],
'max_depth':[1,2,3],
'min_samples_split':[5,6,7]}
mix_models[model], cv_score, grid_results = train_model(mix_models[model], param_grid=param_grid, X=mix_x_train,y=mix_y_train,
splits=splits, repeats=1)
cv_score.name = model
score_models = score_models.append(cv_score)
# In[42]:
model = 'RandomForest'
mix_models[model] = RandomForestRegressor()
param_grid = {'n_estimators':[100,150,200],
'max_features':[2,3,4,5],
'min_samples_split':[2,4,6]}
mix_models[model], cv_score, grid_results = train_model(mix_models[model], param_grid=param_grid, X=mix_x_train,y=mix_y_train,
splits=5, repeats=1)
cv_score.name = model
score_models = score_models.append(cv_score)
# In[43]:
model_predict(mix_x_valid,mix_y_valid,stack=True)
# In[ ]:
| kksignal/industry-stream-predict | industry stream predict.py | industry stream predict.py | py | 24,624 | python | en | code | 0 | github-code | 13 |
9520397117 | import json
from clip.simple_tokenizer import SimpleTokenizer
if __name__ == "__main__":
tok = SimpleTokenizer()
encoder = tok.encoder
ranks = dict(("`".join(k), v) for k, v in tok.bpe_ranks.items())
with open("models/tokenizer.json", "w") as f:
json.dump({
"bpe_ranks": ranks,
"encoder": encoder
}, f)
| simon987/sist2-models | clip/create_tokenizer_data.py | create_tokenizer_data.py | py | 363 | python | en | code | 1 | github-code | 13 |
17250031221 | from itertools import product
from collections import defaultdict
def count_up(p, q, r):
return product(*[range(p,q+1)]*r)
def force_solve(lhs, rhs):
l = len(lhs) + len(rhs)
for mults in count_up(1, 10, l):
nl, nr = lhs.copy(), rhs.copy()
lm, rm = mults[:len(lhs)], mults[len(lhs):]
le, re = defaultdict(int), defaultdict(int)
for el, ml in zip(nl, lm):
for x in el.split():
en, n = x.split('_')
le[en] += int(n) * ml
for er, mr in zip(nr, rm):
for x in er.split():
en, n = x.split('_')
re[en] += int(n) * mr
if le == re:
return mults
return False
def process(s):
r = []
for c in s:
if c.isupper():
if r and '_' not in r[-1]:
r[-1] += '_1'
r.append(c)
elif c.islower():
r[-1] += c
else:
r[-1] += '_' + c
if r and '_' not in r[-1]:
r[-1] += '_1'
return ' '.join(r)
def interface(l, r):
ls, rs = l.replace(' ', '').split('+'), r.replace(' ', '').split('+')
sol = force_solve([process(x) for x in ls], [process(y) for y in rs])
if not sol:
return False
lm, rm = sol[:len(ls)], sol[len(ls):]
return ('+'.join(str(i)*(i>1)+j for i,j in zip(lm,ls)),
'+'.join(str(i)*(i>1)+j for i,j in zip(rm,rs)))
| nayakrujul/balance-equation | balance/balance.py | balance.py | py | 1,413 | python | en | code | 1 | github-code | 13 |
42075608238 | """
Base module for processing all timeseries based data.
Use this as the base module for processing illfiles,bf.txt,weafiles.
Dependencies: none
Python version : 2.7
"""
from __future__ import print_function
from __future__ import division
import logging
logger = logging.getLogger("__main__")
logging.basicConfig(format='%(asctime)s -%(levelname)s module:%(module)s function:%(funcName)s message--%(message)s')
import sys
import datetime as _dt
def timeStamp(mval,dval,tval,yearval=2015):
"""return _dt object from month,date and time values"""
hour = int(tval)
minute = int((tval-hour)*60)
return _dt.datetime(yearval,int(mval),int(dval),hour,minute)
def readTextFile(filename,delimiter=False):
"Separate a textfile into lists"
with open(filename,'r') as ptsfile:
for lines in ptsfile:
if delimiter:
yield lines.split(delimiter)
else:
yield lines.split()
class TimeArray(object):
"""Base class for all the time series readStadicData. Use this to process illfiles, wea files etc."""
def __init__(self,filename,timeoffset=False,delimiter=False):
self.filename=filename
self.timedata,self.extradata = self.readfile(filename,delimiter) #Run readfile and extract readStadicData.
def readfile(self,filenm,delimiter):
"""A generator to yield time stamps and numerical readStadicData
Numerical readStadicData can be ill values, weather file values etc."""
timefiledata = []
timefileextra = {}
fixtimeflag = False
hour0=hour1=False
if isinstance(filenm,(str,unicode)):
timeobject = readTextFile(filenm,delimiter)
else:
timeobject = filenm
for idx,lines in enumerate(timeobject):
try:
floatlines = map(float,lines)
month,date,hour = floatlines[:3]
dataval = floatlines[3:]
##Code to check if the first time value is 1.0. If so subtract every hour by 0.5
if not hour1 or hour0:
hour1 = hour
if hour0==1.0 and hour1==2.0:
#Go back to the array that stores timevalues and fix the hour and time stamp
timefiledata[0]['h']=0.5
timefiledata[0]['tstamp']=timeStamp(1,1,0.5)
fixtimeflag=True
if not hour0:
hour0 = hour
if fixtimeflag:
hour = hour-0.5
##Code to check if the first time value is 1.0. If so subtract every hour by 0.5
timestamp = timeStamp(month,date,hour)
timefiledata.append({"m":month,"d":date,"h":hour,"readStadicData":dataval,"tstamp":timestamp})
except ValueError:
print(sys.exc_info())
timefileextra[lines[0]]=lines[1]
return (timefiledata,timefileextra)
#Test the module.
if __name__ == '__main__':
__housekeeping__()
illfile = TimeArray(r'examples\test_BF.txt')
print(illfile.timedata[:5])
print(illfile.extradata)
epwFile = TimeArray(r'examples\statecollege.epw',delimiter=',') | sariths/stadicViewer | StadicViewer/gui/dataStructures/timeSeries.py | timeSeries.py | py | 3,404 | python | en | code | 1 | github-code | 13 |
2250421859 | """This is a simple example demonstrating how to clone the behavior of an expert.
Refer to the jupyter notebooks for more detailed examples of how to use the algorithms.
"""
import numpy as np
from stable_baselines3 import PPO
from stable_baselines3.common.evaluation import evaluate_policy
from stable_baselines3.ppo import MlpPolicy
from imitation.algorithms import bc
from imitation.data import rollout
from imitation.data.wrappers import RolloutInfoWrapper
from imitation.policies.serialize import load_policy
from imitation.util.util import make_vec_env
rng = np.random.default_rng(0)
env = make_vec_env(
"seals:seals/CartPole-v0",
rng=rng,
post_wrappers=[lambda env, _: RolloutInfoWrapper(env)], # for computing rollouts
)
def train_expert():
# note: use `download_expert` instead to download a pretrained, competent expert
print("Training a expert.")
expert = PPO(
policy=MlpPolicy,
env=env,
seed=0,
batch_size=64,
ent_coef=0.0,
learning_rate=0.0003,
n_epochs=10,
n_steps=64,
)
expert.learn(1_000) # Note: change this to 100_000 to train a decent expert.
return expert
def download_expert():
print("Downloading a pretrained expert.")
expert = load_policy(
"ppo-huggingface",
organization="HumanCompatibleAI",
env_name="seals-CartPole-v0",
venv=env,
)
return expert
def sample_expert_transitions():
# expert = train_expert() # uncomment to train your own expert
expert = download_expert()
print("Sampling expert transitions.")
rollouts = rollout.rollout(
expert,
env,
rollout.make_sample_until(min_timesteps=None, min_episodes=50),
rng=rng,
)
return rollout.flatten_trajectories(rollouts)
transitions = sample_expert_transitions()
bc_trainer = bc.BC(
observation_space=env.observation_space,
action_space=env.action_space,
demonstrations=transitions,
rng=rng,
)
evaluation_env = make_vec_env(
"seals:seals/CartPole-v0",
rng=rng,
env_make_kwargs={"render_mode": "human"}, # for rendering
)
print("Evaluating the untrained policy.")
reward, _ = evaluate_policy(
bc_trainer.policy, # type: ignore[arg-type]
evaluation_env,
n_eval_episodes=3,
render=True, # comment out to speed up
)
print(f"Reward before training: {reward}")
print("Training a policy using Behavior Cloning")
bc_trainer.train(n_epochs=1)
print("Evaluating the trained policy.")
reward, _ = evaluate_policy(
bc_trainer.policy, # type: ignore[arg-type]
evaluation_env,
n_eval_episodes=3,
render=True, # comment out to speed up
)
print(f"Reward after training: {reward}")
| HumanCompatibleAI/imitation | examples/quickstart.py | quickstart.py | py | 2,727 | python | en | code | 1,004 | github-code | 13 |
7438277931 | #!/usr/bin/env python
"""
Basic web server thing
"""
from bottle import request, response, debug, run, error, route, static_file
import bottle
import logging
import json
import os
FRONT_END_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'front_end')
@error(400)
def error400(error):
logging.info('AN ERROR HAS OCCURED')
return bottle.HTTPResponse(json.dumps({
'error': error.body
}), error.status)
@error(404)
def error404(error):
return error400(error)
# To aid local development:
@route('/')
@route('/<filepath:path>')
def server_static(filepath="index.html"):
return static_file(filepath, root=FRONT_END_DIR)
def main():
# It will just run on 8080 by default
debug(True)
run(reloader=True)
if __name__ == "__main__":
sys.exit(main())
| rdooley/et_al | server.py | server.py | py | 809 | python | en | code | 0 | github-code | 13 |
11602040162 | # Question: Given a string and a pattern, find the smallest substring in the given string which has all the characters of the given pattern.
# Example 1:
# Input: String="aabdec", Pattern="abc"
# Output: "abdec"
# Explanation: The smallest substring having all characters of the pattern is "abdec"
# Example 2:
# Input: String="abdabca", Pattern="abc"
# Output: "abc"
# Explanation: The smallest substring having all characters of the pattern is "abc".
# Example 3:
# Input: String="adcad", Pattern="abc"
# Output: ""
# Explanation: No substring in the given string has all characters of the pattern.
def find_substring(str, pattern):
matched = 0
window_start = 0
substr_start = 0
min_length = len(str) + 1
char_freq = {}
for char in pattern:
if char not in char_freq:
char_freq[char] = 0
char_freq[char] += 1
for window_end in range(len(str)):
right_char = str[window_end]
if right_char in char_freq:
char_freq[right_char] -= 1
if char_freq[right_char] == 0:
matched += 1
while matched == len(char_freq):
subStringLength = window_end - window_start + 1
if subStringLength < min_length:
min_length = subStringLength
substr_start = window_start
left_char = str[window_start]
window_start += 1
if left_char in char_freq:
if char_freq[left_char] == 0:
matched -= 1
char_freq[left_char] += 1
if min_length > len(str):
return ""
return str[substr_start : substr_start + min_length]
print(find_substring("aabdec", "abc"))
print(find_substring("abdabca", "abc"))
print(find_substring("adcad", "abc")) | webdevlex/algorithms-in-python | grokking/1 - Sliding Window/1.10 - Smallest Window containing Substring (hard).py | 1.10 - Smallest Window containing Substring (hard).py | py | 1,784 | python | en | code | 0 | github-code | 13 |
41922270163 | import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
import scipy.misc
import shutil
import os
import conv_model
max_iter = 20000
batch_size = 256
output_path = 'output'
log_path = 'log'
freq_print = 20
freq_save = 1000
KEEP_RATE = 0.7
IM_HEIGHT = 28
IM_WIDTH = 28
IM_SIZE = IM_HEIGHT * IM_WIDTH
def store_result(batch_res, fname, grid_size=(8, 8), grid_pad=5):
# display function
batch_res = 0.5 * batch_res.reshape((batch_res.shape[0], IM_HEIGHT, IM_WIDTH)) + 0.5
img_h, img_w = batch_res.shape[1], batch_res.shape[2]
grid_h = img_h * grid_size[0] + grid_pad * (grid_size[0] - 1)
grid_w = img_w * grid_size[1] + grid_pad * (grid_size[1] - 1)
img_grid = np.zeros((grid_h, grid_w), dtype=np.uint8)
for i, res in enumerate(batch_res):
if i >= grid_size[0] * grid_size[1]:
break
img = (res) * 255
img = img.astype(np.uint8)
row = (i // grid_size[0]) * (img_h + grid_pad)
col = (i % grid_size[1]) * (img_w + grid_pad)
img_grid[row:row + img_h, col:col + img_w] = img
scipy.misc.imsave(fname, img_grid)
def train_gan():
# collecting data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
# placeholders
x_data = tf.placeholder(dtype=tf.float32, shape=[batch_size, IM_SIZE], name="x_data")
keep_pl = tf.placeholder(dtype=tf.float32, name="dropout_keep_rate")
# build model
with tf.variable_scope("generator_model"):
x_generated = conv_model.build_generator(batch_size)
with tf.variable_scope("discriminator_model") as scope: # we use only one model for discriminator with 2 inputs
dis_gen = conv_model.build_discriminator(x_generated, keep_pl)
scope.reuse_variables()
dis_data = conv_model.build_discriminator(x_data, keep_pl)
with tf.name_scope('generator_loss'):
g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=dis_gen, labels=tf.ones_like(dis_gen)))
dis_logits_on_generated = tf.reduce_mean(tf.sigmoid(dis_gen))
with tf.name_scope('discriminator_loss'):
d_loss_real = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=dis_data, labels=tf.fill([batch_size,1],0.9)))
d_loss_fake = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=dis_gen, labels=tf.zeros_like(dis_gen)))
d_loss = d_loss_fake + d_loss_real
dis_logits_on_real = tf.reduce_mean(tf.sigmoid(dis_data))
optimzer = tf.train.AdamOptimizer(0.0001)
# collecting 2 list of training variables corresponding to discriminator and generator
tvars = tf.trainable_variables() # return list trainable variables
d_vars = [var for var in tvars if 'd_' in var.name]
g_vars = [var for var in tvars if 'g_' in var.name]
for var in d_vars: # display trainable vars for sanity check
print(var.name)
for var in g_vars:
print(var.name)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
g_trainer = optimzer.minimize(g_loss, var_list=g_vars, name='generator_trainer')
d_trainer = optimzer.minimize(d_loss, var_list=d_vars, name='discriminator_trainer')
# summary
tf.summary.scalar('Generator_loss', g_loss)
tf.summary.scalar('Discriminator_real_loss', d_loss_real)
tf.summary.scalar('Discriminator_fake_loss', d_loss_fake)
tf.summary.scalar('Discriminator_total_loss', d_loss)
tf.summary.scalar('logits_discriminator_on_generated', dis_logits_on_generated)
tf.summary.scalar('logits_discriminator_on_real', dis_logits_on_real)
tf.summary.image('Generated_images', x_generated, 10) # add 10 generated images to summary
x_data_reshaped = tf.reshape(x_data, shape=[-1, 28, 28, 1])
tf.summary.image('data_images', x_data_reshaped, 10)
merged = tf.summary.merge_all()
# train
with tf.Session() as sess:
# write tensorflow summary for monitoring on tensorboard
writer = tf.summary.FileWriter(log_path, sess.graph)
sess.run(tf.global_variables_initializer())
for i in range(max_iter):
x_batch, _ = mnist.train.next_batch(batch_size)
x_batch = 2 * x_batch.astype(np.float32) - 1 # set image dynamic to [-1 1]
sess.run(d_trainer, feed_dict={x_data: x_batch, keep_pl: KEEP_RATE})
sess.run(g_trainer, feed_dict={x_data: x_batch, keep_pl: KEEP_RATE})
if i % 1 == 0:
print("step %d" % (i))
if i % freq_print == 0:
summary = sess.run(merged, feed_dict={x_data: x_batch, keep_pl: KEEP_RATE})
writer.add_summary(summary, i)
if i % freq_save == 0:
sample_images = sess.run(x_generated, feed_dict={x_data: x_batch, keep_pl: KEEP_RATE})
store_result(sample_images, os.path.join(output_path, "sample%s.jpg" % i))
# saver.save(sess, os.path.join(output_path, "model"), global_step=global_step)
im_g_sample = sess.run(x_generated)
store_result(im_g_sample, os.path.join(output_path, "final_samples.jpg"))
if __name__ == '__main__':
if os.path.exists(output_path):
shutil.rmtree(output_path)
os.mkdir(output_path)
if os.path.exists(log_path):
shutil.rmtree(log_path)
os.mkdir(log_path)
print("start training")
train_gan()
| bruno-31/toy-gan | gan.py | gan.py | py | 5,449 | python | en | code | 0 | github-code | 13 |
71900046737 | import matplotlib.pyplot as plt
import networkx as nx
import random
import tkinter as tk
from tkinter import simpledialog
# This function checks that there are undefended provinces with respect to the problem or not.
def IsFeasible(G):
x = 0
y = 0
z = 0
for node in G.nodes:
if G.nodes[node]["Number"] == 0:
y = 0
x = x+1
neighbor_list = [n for n in G.neighbors(node)]
for neighbor in neighbor_list:
z+=1
num = G.nodes[neighbor]["Number"]
if num == 2:
y = y+1
if y == 0 and z > 0:
break
z = 0
if x == 0:
return True
else:
if y == 0 and z > 0:
return False
else:
return True
# This function calculates the weight of the graph.
def weight(G):
index = 0
sum = 0
for node in G.nodes:
sum += G.nodes[chr(ord("A") + index)]["Number"]
index+=1
return sum
# This function solves a problem through exhaustion: it goes through all possible choices until a solution is found.
def BruteForce(G):
G2=nx.Graph()
sum = 1000
node_num = G.number_of_nodes()
comb_num = 1
count = 0
same = 0
rows, cols = (100000, node_num)
arr = [[0]*cols]*rows
rows2, cols2 = (1, node_num)
arr2 = [0]*node_num
control = True
t = 0
finish = 0
for i in range(node_num):
comb_num *= 3
while finish < comb_num:
control = True
for i in range(G.number_of_nodes()):
x = random.randint(0,2)
arr2[i] = x
for j in range (count):
same = 0
for k in range (G.number_of_nodes()):
if arr2[k] == arr[j][k]:
same+=1
if same == G.number_of_nodes():
control = False
break
if control == True:
finish += 1
for k in range (G.number_of_nodes()):
arr[count][k] = arr2[k]
l = 0
for node in G.nodes:
G.nodes[node]["Number"] = arr[count][l]
l+=1
check = IsFeasible(G)
if check == True:
sum2 = weight(G)
if sum2 < sum:
sum = sum2
for node in G.nodes:
G2.add_node(node,Number = G.nodes[node]["Number"])
for edge in G.edges:
G2.add_edge(edge[0],edge[1])
count+=1
return G2
ROOT = tk.Tk()
ROOT.withdraw()
while(True):
nodeNum = simpledialog.askstring(title="Test",
prompt="Enter vertex number:")
if nodeNum.isdigit():
nodeNum = int(nodeNum)
maxEdgeNum = (nodeNum * nodeNum-1)/2
break
while(True):
edgeNum = simpledialog.askstring(title="Test",
prompt="Enter edge number:")
if edgeNum.isdigit():
edgeNum = int(edgeNum)
if edgeNum <= maxEdgeNum:
break
G = nx.Graph()
G2 = nx.Graph()
Rdn = [None] * 100
count = 0
counter = 0
check = True
check2 = True
for i in range(nodeNum):
G.add_node(chr(ord("A") + i))
for j in range(edgeNum):
while True:
num1 = random.randint(0,nodeNum-1)
ch1 = chr(ord("A") + num1)
num2 = random.randint(0,nodeNum-1)
ch2 = chr(ord("A") + num2)
if ch1 != ch2:
for edge in G.edges:
if ch1 != edge[0] or ch2 != edge[1]:
if ch1 != edge[1] or ch2 != edge[0]:
check2 = True
else:
check2 = False
break
else:
check2 = False
break
if check2 == True:
G.add_edge(ch1,ch2)
break
check2 = True
G2 = BruteForce(G)
color_map = []
for node in G2.nodes:
if G2.nodes[node]["Number"] == 0:
color_map.append('blue')
elif G2.nodes[node]["Number"] == 1:
color_map.append('green')
else:
color_map.append('red')
nx.draw(G2,with_labels=True,
node_color=color_map,node_size=1000,
font_color="white",font_size=10,font_family="Times New Roman", font_weight="bold",
edge_color="lightgray",
width=5)
plt.show()
| mhmtacar/Roman-Domination-Number | BruteForce.py | BruteForce.py | py | 4,369 | python | en | code | 0 | github-code | 13 |
2061037791 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.test import TestCase
from dynsettings.decorators import override_dynsettings
from dynsettings.models import SettingCache
from dynsettings.tests import dyn_settings
class OverrideDynsettingsTestCase(TestCase):
"""
Verify the override dynsettings decorator changes values for tests
"""
def setUp(self):
self.value_instance = SettingCache.get_value_object('TEST_THREE')
def test_decorator(self):
# first value set for TEST_THREE is 'Start Value'
self.value_instance.set_test_value('Start Value')
self.assertEqual(
SettingCache._test_values['TEST_THREE'],
'Start Value'
)
@override_dynsettings((dyn_settings.TEST_THREE, 'override',))
def test_decorator_changed_value(self):
# value changes to 'override'
self.assertEqual(SettingCache._test_values['TEST_THREE'], 'override')
| infoscout/dynsettings | dynsettings/tests/test_decorators.py | test_decorators.py | py | 961 | python | en | code | 0 | github-code | 13 |
1346718941 | import hashlib
import os
from datetime import datetime
from typing import List, Tuple, Optional
import pickle
from dateutil.parser import parse as dateutil_parse
from torch.utils.data import DataLoader
from pyhealth import BASE_CACHE_PATH
from pyhealth.utils import create_directory
MODULE_CACHE_PATH = os.path.join(BASE_CACHE_PATH, "datasets")
create_directory(MODULE_CACHE_PATH)
# basic tables which are a part of the defined datasets
DATASET_BASIC_TABLES = {
"MIMIC3Dataset": {"PATIENTS", "ADMISSIONS"},
"MIMIC4Dataset": {"patients", "admission"},
}
def hash_str(s):
return hashlib.md5(s.encode()).hexdigest()
def strptime(s: str) -> Optional[datetime]:
"""Helper function which parses a string to datetime object.
Args:
s: str, string to be parsed.
Returns:
Optional[datetime], parsed datetime object. If s is nan, return None.
"""
# return None if s is nan
if s != s:
return None
return dateutil_parse(s)
def padyear(year: str, month='1', day='1') -> str:
"""Pad a date time year of format 'YYYY' to format 'YYYY-MM-DD'
Args:
year: str, year to be padded. Must be non-zero value.
month: str, month string to be used as padding. Must be in [1, 12]
day: str, day string to be used as padding. Must be in [1, 31]
Returns:
padded_date: str, padded year.
"""
return f"{year}-{month}-{day}"
def flatten_list(l: List) -> List:
"""Flattens a list of list.
Args:
l: List, the list of list to be flattened.
Returns:
List, the flattened list.
Examples:
>>> flatten_list([[1], [2, 3], [4]])
[1, 2, 3, 4]R
>>> flatten_list([[1], [[2], 3], [4]])
[1, [2], 3, 4]
"""
assert isinstance(l, list), "l must be a list."
return sum(l, [])
def list_nested_levels(l: List) -> Tuple[int]:
"""Gets all the different nested levels of a list.
Args:
l: the list to be checked.
Returns:
All the different nested levels of the list.
Examples:
>>> list_nested_levels([])
(1,)
>>> list_nested_levels([1, 2, 3])
(1,)
>>> list_nested_levels([[]])
(2,)
>>> list_nested_levels([[1, 2, 3], [4, 5, 6]])
(2,)
>>> list_nested_levels([1, [2, 3], 4])
(1, 2)
>>> list_nested_levels([[1, [2, 3], 4]])
(2, 3)
"""
if not isinstance(l, list):
return tuple([0])
if not l:
return tuple([1])
levels = []
for i in l:
levels.extend(list_nested_levels(i))
levels = [i + 1 for i in levels]
return tuple(set(levels))
def is_homo_list(l: List) -> bool:
"""Checks if a list is homogeneous.
Args:
l: the list to be checked.
Returns:
bool, True if the list is homogeneous, False otherwise.
Examples:
>>> is_homo_list([1, 2, 3])
True
>>> is_homo_list([])
True
>>> is_homo_list([1, 2, "3"])
False
>>> is_homo_list([1, 2, 3, [4, 5, 6]])
False
"""
if not l:
return True
# if the value vector is a mix of float and int, convert all to float
l = [float(i) if type(i) == int else i for i in l]
return all(isinstance(i, type(l[0])) for i in l)
def collate_fn_dict(batch):
return {key: [d[key] for d in batch] for key in batch[0]}
def get_dataloader(dataset, batch_size, shuffle=False):
dataloader = DataLoader(
dataset,
batch_size=batch_size,
shuffle=shuffle,
collate_fn=collate_fn_dict,
)
return dataloader
if __name__ == "__main__":
print(list_nested_levels([1, 2, 3]))
print(list_nested_levels([1, [2], 3]))
print(list_nested_levels([[1, [2], [[3]]]]))
print(is_homo_list([1, 2, 3]))
print(is_homo_list([1, 2, [3]]))
print(is_homo_list([1, 2.0]))
| sunlabuiuc/PyHealth | pyhealth/datasets/utils.py | utils.py | py | 3,910 | python | en | code | 778 | github-code | 13 |
26512233840 | '''Desenvolva um programa que leia nome, idade e sexo de quatro pessoas. No final do programa,
mostre:
A média de idade do grupo;
Qual é o nome do homem mais velho;
Quantas mulheres têm menos de 20 anos'''
count_wo=0
count_ma=0
nom_hom=""
med=0
for i in range(1,5):
print("{:=^20}".format("{}ª pessoa").format(i))
name = str(input("Nome:")).strip()
age = int(input("Idade:"))
sex=str(input("Sexo: [M/F]")).strip().lower()
if sex=="m":
if age>count_ma:
count_ma=age
nom_hom=name #como o nome vem junto da idade, sempre que uma idade for maior do que a
#idade anterior, o nome se tornará a variável "nom_hom"
med+=age/4
if sex=="f":
if age<20:
count_wo+=1
nom_hom=nom_hom.capitalize()
print("A média das idades é {:.1f}".format(med))
print("O número de mulheres com menos é 20 anos é {}".format(count_wo))
print("O nome do homem mais velho é {}, com {} anos".format(nom_hom,count_ma))
| MLucasf/PythonExercises | ex056.py | ex056.py | py | 1,002 | python | pt | code | 0 | github-code | 13 |
10280125747 | #!/usr/bin/env python
import subprocess
import argparse
import os
exe_path = "/proj/kweeks/bin/"
smo = "shapemapper_out"
rmo = "ringmapper_out"
pmo = "pairmapper_out"
apo = "arcplot_out"
dmo = "dancemapper_out"
fco = "foldclusters_out"
def sbatch(command, params, dep=None):
if dep is not None:
params["dependency"] = f"afterok:{dep}"
params["kill-on-invalid-dep"] = "yes"
sb_prefix = "sbatch"
sb_command = f"--wrap='source activate py2-MaP; {command}'"
sb_params = " ".join([f"--{k}={v}" for k, v in params.items()])
process = f"{sb_prefix} {sb_params} {sb_command}"
print(f"{process}\n")
sbatch_response = subprocess.getoutput(process)
print(sbatch_response)
job_id = sbatch_response.split(' ')[-1].strip()
return job_id
def stringify_params(params):
return " ".join([f"--{k} {params[k]}".strip() for k in params])
def shapemapper(s, m, u, fas, input_type="folders", dep=None, amplicon=False,
sm_params={}):
command = exe_path+"shapemapper-2.1.5/shapemapper "
command += f"{stringify_params(sm_params)} "
command += f"--target {' '.join(fas)} "
command += f"--name {s} "
input_types = ["folders", "flashed", "deduped"]
valid_input_type = (input_type in input_types)
assert valid_input_type, f"input_type not in accepted list: {input_types}"
if input_type == "folders":
command += f"--modified --folder Sample_{m} "
command += f"--untreated --folder Sample_{u} "
elif input_type == "flashed":
command += f"--modified --U Sample_{m}/out.extendedFrags.fastq "
command += f"--untreated --U Sample_{u}/out.extendedFrags.fastq "
elif input_type == "deduped":
command += f"--modified --U Sample_{m}/combined_trimmed_deduped.fastq "
command += f"--untreated --U Sample_{u}/out.extendedFrags.fastq "
if amplicon:
command += "--amplicon "
else:
command += "--random-primer-len 9 "
command += "--output-parsed-mutations "
command += "--per-read-histograms "
command += "--overwrite"
params = {"mem": "4g",
"time": "10:00:00",
"job-name": "shapemapper",
"output": f"sbatch_out/{s}/sm_%A.out",
"ntasks": "6",
"nodes": "1"}
return sbatch(command, params, dep)
def ringmapper(s, fa, t, dep=None, rm_params={}):
command = exe_path+"RingMapper/ringmapper.py "
command += f"{stringify_params(rm_params)} "
command += f"--fasta {fa} "
command += f"--untreated {smo}/{s}_Untreated_{t}_parsed.mut "
command += f"{smo}/{s}_Modified_{t}_parsed.mut "
command += f"{rmo}/{s}_{t}_rings.txt"
params = {"mem": "4g",
"time": "3:00:00",
"job-name": f"ringmapper-{s}",
"output": f"sbatch_out/{s}/{t}/rm_%A.out"}
return sbatch(command, params, dep)
def pairmapper(s, t, dms=True, dep=None, pm_params={}):
command = exe_path+"pairmapper.py "
command += f"{stringify_params(pm_params)} "
command += f"--profile {smo}/{s}_{t}_profile.txt "
command += f"--untreated_parsed {smo}/{s}_Untreated_{t}_parsed.mut "
command += f"--modified_parsed {smo}/{s}_Modified_{t}_parsed.mut "
command += f"--out {pmo}/{s}_{t} --override_qualcheck "
if not dms:
command += "--notDMS"
params = {"job-name": f"pairmapper-{s}",
"output": f"sbatch_out/{s}/{t}/pm_%A.out",
"mem": "4g",
"time": "3:00:00"}
return sbatch(command, params, dep)
def arcplot(s, t, ct, data, dms=True, dep=None):
command = "arcPlot.py "
command += f"--ct {ct} "
if dms:
command += f"--dmsprofile {smo}/{s}_{t}.shape "
else:
command += f"--profile {smo}/{s}_{t}.shape "
if data == "rings":
command += f"--ringsig {rmo}/{s}_{t}_rings.txt "
command += f"{apo}/{s}_{t}_rings.pdf"
if data == "pairs":
command += f"--pairmap {pmo}/{s}_{t}-pairmap.txt,all "
command += f"{apo}/{s}_{t}_pairmap.pdf"
elif data == "allcorrs":
command += f"--ringsig {pmo}/{s}_{t}-allcorrs.txt "
command += f"{apo}/{s}_{t}_allcorrs.pdf"
params = {"job-name": f"arcplot-{s}-{data}",
"output": f"sbatch_out/{s}/{t}/ap_{data}_%A.out"}
return sbatch(command, params, dep)
def dancemapper_sub1M_fit(s, t, dep=None, dm1_params={}):
command = f"python {exe_path}DanceMapper/DanceMapper.py "
command += f"{stringify_params(dm1_params)} "
command += f"--profile {smo}/{s}_{t}_profile.txt "
command += f"--modified_parsed {smo}/{s}_Modified_{t}_parsed.mut "
command += "--undersample 1000000 --fit --maxcomponents 3 "
command += f"--outputprefix {dmo}/{s}_{t}"
params = {"job-name": "dancemapper",
"output": f"sbatch_out/{s}/{t}/dm_fit_%A.out",
"time": "7-00:00:00",
"mem": "10g"}
return sbatch(command, params, dep)
def dancemapper_read_rings_pairs(s, t, dms=True, dep=None, dm2_params={}):
command = f"python {exe_path}DanceMapper/DanceMapper.py "
command += f"{stringify_params(dm2_params)} "
command += f"--profile {smo}/{s}_{t}_profile.txt "
command += f"--modified_parsed {smo}/{s}_Modified_{t}_parsed.mut "
command += f"--untreated_parsed {smo}/{s}_Untreated_{t}_parsed.mut "
command += f"--outputprefix {dmo}/{s}_{t} "
if not dms:
command += "--notDMS "
command += f"--readfromfile {dmo}/{s}_{t}.bm "
command += "--ring --pairmap"
params = {"job-name": f"dancemapper_{s}",
"output": f"sbatch_out/{s}/{t}/dm_corrs_%A.out",
"time": "3-00:00:00",
"mem": "30g"}
return sbatch(command, params, dep)
def foldclusters(s, t, dms=True, dep=None, fc_params={}):
command = f"python {exe_path}DanceMapper/foldClusters.py "
command += f"{stringify_params(fc_params)} "
command += f"--bp {dmo}/{s}_{t} "
command += "--prob --pk "
if not dms:
command += "--notDMS "
command += f"{dmo}/{s}_{t}-reactivities.txt {fco}/{s}-{t}"
params = {"job-name": f"foldclusters_{s}",
"output": f"sbatch_out/{s}/{t}/fc_%A.out",
"time": "1:00:00"}
return sbatch(command, params, dep)
def parse_args():
prs = argparse.ArgumentParser()
prs.add_argument("s", type=str, help="Name for outputs")
prs.add_argument("m", type=str, help="Sample # for fastqs")
prs.add_argument("u", type=str, help="Sample # for fastqs")
prs.add_argument("--fas", type=str, nargs='+',
help="location of fasta file")
prs.add_argument("--cts", type=str, nargs='+', help="location of ct file")
prs.add_argument("--dms", action="store_true", help="Is this DMS?")
prs.add_argument("--input", type=str, help="folders, flashed, or deduped")
prs.add_argument("--steps", type=int, nargs="+",
default=[1, 2, 3, 4, 5, 6],
help=("1=Shapemapper, 2=RingMapper, 3=PairMapper, "
"4=Dance-fit, 5=Dance-corrs, 6=foldClusters"))
prs.add_argument("--amplicon", action="store_true", default=False,
help="use amplicon flag with Shapemapper2")
prs.add_argument("--sm_params", type=str, nargs="+",
help="custom parameters for Shapemapper")
prs.add_argument("--rm_params", type=str, nargs="+",
help="custom parameters for Ringmapper")
prs.add_argument("--pm_params", type=str, nargs="+",
help="custom parameters for Pairmapper")
prs.add_argument("--dm1_params", type=str, nargs="+",
help="custom parameters for Dancemapper1")
prs.add_argument("--dm2_params", type=str, nargs="+",
help="custom parameters for Dancemapper2")
prs.add_argument("--fc_params", type=str, nargs="+",
help="custom parameters for FoldClusters")
args = prs.parse_args()
for arg in ["sm", "rm", "pm", "dm1", "dm2", "fc"]:
arg = f"{arg}_params"
if getattr(args, arg) is not None:
params = getattr(args, arg)
k_v_pairs = [pair.split(":") for pair in params]
setattr(args, arg, {k: v for k, v in k_v_pairs})
else:
setattr(args, arg, {})
return args
def main(s, m, u, fas, input="folders", cts=None, dms=False, amplicon=False,
steps=[1, 2, 3, 4, 5, 6], sm_params={}, rm_params={}, pm_params={},
dm1_params={}, dm2_params={}, fc_params={}):
for dir in ["sbatch_out", f"sbatch_out/{s}", smo, rmo, pmo, apo, dmo, fco]:
try:
os.mkdir(dir)
except FileExistsError:
pass
smid, rmid, pmid, dmid, dm2id = None, None, None, None, None
if 1 in steps:
smid = shapemapper(s, m, u, fas, input, None, amplicon, sm_params)
for fa, ct in zip(fas, cts):
t = fa[:-3]
if 2 in steps:
rmid = ringmapper(s, fa, t, smid, rm_params)
_ = arcplot(s, t, ct, "rings", dms, rmid)
if 3 in steps:
pmid = pairmapper(s, t, dms, smid, pm_params)
_ = arcplot(s, t, ct, "pairs", dms, pmid)
_ = arcplot(s, t, ct, "allcorrs", dms, pmid)
if 4 in steps:
dmid = dancemapper_sub1M_fit(s, t, smid, dm1_params)
if 5 in steps:
dm2id = dancemapper_read_rings_pairs(s, t, dms, dmid, dm2_params)
if 6 in steps:
foldclusters(s, t, dms, dm2id, fc_params)
if __name__ == "__main__":
main(**vars(parse_args()))
| Weeks-UNC/longleaf-dotfiles | pipelines/map-pipeline.py | map-pipeline.py | py | 9,561 | python | en | code | 2 | github-code | 13 |
39115456920 | def main():
entrada = input()
qtd_vertices = int(entrada)
estudantes = list(range(qtd_vertices))
vertices = dict()
for i in range(len(estudantes)):
entrada = input()
entrada = list(map(int, entrada.split()))
entrada = entrada[1:]
vertices[i] = {'color':'null','vizinhos':entrada}
vertices[0]['color'] = 'branco'
fila = [0]
while len(fila) > 0:
atual = fila.pop(0)
for vizinho in vertices[atual]['vizinhos']:
if vertices[vizinho]['color'] == 'null':
if vertices[atual]['color'] == 'branco':
vertices[vizinho]['color'] = 'preto'
else:
vertices[vizinho]['color'] = 'branco'
fila.append(vizinho)
elif vertices[atual]['color'] == vertices[vizinho]['color'] and atual != vizinho:
print('Impossivel')
return
print('Vai la, tio Willian!')
if __name__ == '__main__':
main()
| LorhanSohaky/UFSCar | 2018/PAA/T2/Debate.py | Debate.py | py | 1,019 | python | pt | code | 1 | github-code | 13 |
37595690391 | ###########################################################
#### Initialization --- do not change #####################
###########################################################
print('hallo')
import sys
sys.path.append('D:\BeamlineControllPython\programming_python')
import p05.devices, p05.nano, p05.tools ################
import numpy, time, os, PyTango ################
pmac = p05.devices.PMACdict() ################
currScript = os.path.abspath(__file__) ################
from sys import exit ################
from sys import argv
import p05.tools.misc as misc
###########################################################
#### end initialization ###################################
###########################################################
scriptname, beamtime, prefix, rotCenter,sampleOut, exptime, speed, smearing = argv
rotCenter = float(rotCenter)
sampleOut = float (sampleOut)
smearing = int(smearing)
if exptime == 'None':
exptime = None
else:
exptime = float (exptime)
if speed == 'None':
speed = None
else:
speed = float (speed)
######Check parameters from here ########################
#beamtime = '11008942'
#prefix = '20200616_02_NEWHAMA_50'
#
#rotCenter = -10.9250
#sampleOut = 5
#
#smearing = 5
#exptime = 0.05
#speed = None
######Check parameters until here ########################
det_size = 2048
overhead = 0.01
if speed == None:
speed = smearing * 180./ (numpy.pi *det_size/2*exptime) # maximal speed of rotation axis for given exptime, maximum 1 pixel smearing
elif exptime == None:
exptime = smearing * 180./ (numpy.pi *det_size/2*speed) # exptime for maximal 1 pixel smearing, caculated from speed
num_images = int(180/speed /(exptime+ overhead)) -1
print('speed: ' + str(speed))
print('total scan time (s): ' + str(180/speed))
print('total scan time (min): ' + str(180/speed/60))
print('overhead: ' + str(overhead))
print('detector size: ' + str(det_size))
print('exposure time: ' + str(exptime))
print('expected number of images: ' + str(num_images))
print('Efficency: ' + str(num_images*exptime/(180/speed )))
num_flat = 20
startangle = -11
target_pos = 171
scriptname = str(prefix) + '.py'
nanoScript = p05.nano.NanoScriptHelper(pmac, currScript, 'hzg', str(beamtime), str(prefix), exptime, \
closeShutter=False, \
useSmarAct=False, \
useStatusServer=False, \
usePCO=False, \
useASAP=False, \
useASAPcomm=False, \
useHamamatsu=True, \
disableSideBunchReacquisition = True,\
logRotPos = True,\
useHamaTrigger =False)
# Move to start position
pmac.SetRotSpeed(30)
time.sleep(0.1)
pmac.Move('Sample_Rot',startangle, WaitForMove=True)
time.sleep(0.5)
# Take reference images
pmac.Move('SampleStage_x', rotCenter+sampleOut)
nanoScript.SetCurrentName('ref',iNumber2 =0, imgNumber=0)
time.sleep(1)
nanoScript.HamaTakeRef(num_img=num_flat)
pmac.Move('SampleStage_x', rotCenter)
#time.sleep(60)
# Start Tomo
pmac.SetRotSpeed(speed)
time.sleep(0.1)
nanoScript.SetCurrentName('img',iNumber = 0,imgNumber=0)
time.sleep(1)
nanoScript.HamaTakeTomo(target_pos)
time.sleep(0.5)
# Take reference images at the end
pmac.Move('SampleStage_x', rotCenter+sampleOut)
nanoScript.SetCurrentName('ref',iNumber2 =0, imgNumber=0)
time.sleep(1)
nanoScript.HamaTakeRef(num_img=num_flat)
pmac.Move('SampleStage_x', rotCenter)
nanoScript.FinishScan() | hereon-wpi/p05nano | 01_standard_flyScan.py | 01_standard_flyScan.py | py | 3,672 | python | en | code | 0 | github-code | 13 |
34737417249 | import math
import numpy as np
import torch
from torch import nn
import test_softmax
# 下面的代码用来生成假数据。
# 生成假数据的公式如下:
# y = 5 + 1.2 * x / (1!) + (-3.4) * x^2 / (2!)
# + 5.6 * x^3 / (3!) + normal()
max_degree = 20 # 多项式的最⼤阶数
n_train, n_test = 100, 100 # 训练和测试数据集⼤⼩
true_w = np.zeros(max_degree) # 分配⼤量的空间
# 给出生成假数据的系数。
true_w[0:4] = np.array([5, 1.2, -3.4, 5.6])
# 生成正态分布的随机数。
features = np.random.normal(size=(n_train + n_test, 1))
# 将随机数打乱。
np.random.shuffle(features)
# 生成多项式特征。
# 生成一个max_degree大小的列表[1 ,...,19]。之后求幂。
# np.power([2,3], [3,4])表示分别求 2 的 3 次方和 3 的 4 次方。
poly_features = np.power(features, np.arange(max_degree).reshape(1, -1))
# gamma函数计算阶乘进行重新缩放。
# 为了避免⾮常⼤的梯度值或损失值。我们将特征从xi调整为xi / i! 的原因,
# 这样可以避免很⼤的i带来的特别⼤的指数值。
for i in range(max_degree):
poly_features[:, i] /= math.gamma(i + 1) # gamma(n)=(n-1)!
# labels的维度:(n_train+n_test,)
# 把结果乘以参数。计算得到假数据的标签。
labels = np.dot(poly_features, true_w)
# 加上normal分布的噪声。
labels += np.random.normal(scale=0.1, size=labels.shape)
# 这行代码返回:
# 假数据。
# 正态分布的随机数。
# 假数据每一个多项式项的系数。
# 假数据的标签。
# NumPy ndarray转换为tensor
true_w, features, poly_features, labels = [torch.tensor(x, dtype=
torch.float32) for x in [true_w, features, poly_features, labels]]
print("features[:2], poly_features[:2, :], labels[:2] : \n",
features[:2], poly_features[:2, :], labels[:2])
# 实现⼀个函数来评估模型在给定数据集上的损失。
def evaluate_loss(net, data_iter, loss): #@save
"""评估给定数据集上模型的损失"""
metric = test_softmax.Accumulator(2) # 损失的总和,样本数量
for X, y in data_iter:
# 使用多项式计算X。
out = net(X)
# y和X同结构,
y = y.reshape(out.shape)
# 计算损失。
l = loss(out, y)
metric.add(l.sum(), l.numel())
return metric[0] / metric[1]
from torch.utils import data
# 2. 调⽤框架中现有的API来读取数据。
def load_array(data_arrays, batch_size, is_train=True): #@save
"""构造⼀个PyTorch数据迭代器"""
dataset = data.TensorDataset(*data_arrays)
return data.DataLoader(dataset, batch_size, shuffle=is_train)
# 定义训练函数。
def train(train_features, test_features, train_labels, test_labels,
num_epochs=400):
# 指定损失函数。
loss = nn.MSELoss(reduction='none')
# 获得输入数据的形状。
input_shape = train_features.shape[-1]
# 不设置偏置,因为我们已经在多项式中实现了它
net = nn.Sequential(nn.Linear(input_shape, 1, bias=False))
# 批量大小,不少于10个。
batch_size = min(10, train_labels.shape[0])
# 加载数据。
train_iter = load_array((train_features, train_labels.reshape(-1,1)),
batch_size)
test_iter = load_array((test_features, test_labels.reshape(-1,1)),
batch_size, is_train=False)
# 指定梯度下降操作。
trainer = torch.optim.SGD(net.parameters(), lr=0.01)
# 初始化动画对象。
animator = test_softmax.Animator(xlabel='epoch', ylabel='loss', yscale='log',
xlim=[1, num_epochs], ylim=[1e-3, 1e2],
legend=['train', 'test'])
# 开始循环。默认是400次。
for epoch in range(num_epochs):
test_softmax.train_epoch_ch3(net, train_iter, loss, trainer)
# 每二十次绘制一次损失。
if epoch == 0 or (epoch + 1) % 20 == 0:
animator.add(epoch + 1, (evaluate_loss(net, train_iter, loss),
evaluate_loss(net, test_iter, loss)))
print('weight:', net[0].weight.data.numpy())
# 这里有一个背景就是:数据是用poly_features表示的三阶多项式函数生成的。
# 下面我们也使用poly_features表示的三阶多项式函数。
# 展示三阶多项式函数拟合(正常)
# ⾸先使⽤三阶多项式函数,它与数据⽣成函数的阶数相同。
# 从多项式特征中选择前4个维度,即1,x,x^2/2!,x^3/3!
train(poly_features[:n_train, :4], poly_features[n_train:, :4],
labels[:n_train], labels[n_train:])
# 下面使用poly_features表示的一阶多项式函数。
# 线性函数拟合(⽋拟合)
# 从多项式特征中选择前2个维度,即1和x
train(poly_features[:n_train, :2], poly_features[n_train:, :2],
labels[:n_train], labels[n_train:])
# 展示⾼阶多项式函数拟合(过拟合)
# 从多项式特征中选取所有维度
train(poly_features[:n_train, :], poly_features[n_train:, :],
labels[:n_train], labels[n_train:], num_epochs=1500)
| lucelujiaming/luceluDiveIntoDeepLearning | ch04_multilayer-perceptrons/test_normal.py | test_normal.py | py | 4,819 | python | zh | code | 0 | github-code | 13 |
28401719431 | from __future__ import unicode_literals
import os, re, json
# Metadata
RAW_URL = r'https://www.pluralsight.com/courses/'
HTML_FILE = os.path.join("...", "data", "search_results.html")
JSON_OUTPUT_FILE = os.path.join("...", "data", "courses.json")
def lookaround_tags(start_tag, end_tag):
# Contruct regular expression
lookbehind = r'(?<=' + start_tag + r')'
lookahead = r'(?=' + end_tag + r')'
wildcard = r'.*?'
regex = "%s%s%s"%(lookbehind,wildcard,lookahead)
# Compile it and return
lookaround = re.compile(regex)
return lookaround
def store_dict_as_json(dictionary, filepath):
path = os.path.dirname(filepath)
if not os.path.exists(path):
os.mkdir(path)
with open(filepath, 'wt') as f:
json.dump(dictionary, f, sort_keys=True, indent=4)
def scrape_and_store_courses():
# Search results encapsulation
search_tag=r'<div class="search-result__title">'
div_tag=r'</div>'
result_lookaround = lookaround_tags(search_tag, div_tag)
# Encapsulation within search results
quote = r'"'; gt = r'>'; a_tag = r'</a>'
courseid_lookaround = lookaround_tags(RAW_URL, quote)
title_lookaround = lookaround_tags(gt, a_tag)
# Parse data/search_results.html and put course data in a dicionary
course_dict = {}
with open(HTML_FILE, 'rt') as f:
for line in f.readlines():
search_line = result_lookaround.search(line)
if search_line:
title_tag = search_line.group()
courseid = courseid_lookaround.search(title_tag).group()
title = title_lookaround.search(title_tag).group()
course_dict[courseid] = title
# Store dictionary as a json file
store_dict_as_json(course_dict, JSON_OUTPUT_FILE)
if __name__ == "__main__":
scrape_and_store_courses() | wenliangz/plura_py_private | scrapeutils/scrape_html_to_json.py | scrape_html_to_json.py | py | 1,856 | python | en | code | 0 | github-code | 13 |
26806301955 | from django.urls import path, include, re_path
from rest_framework import routers
from rest_framework_simplejwt.views import (
TokenObtainPairView,
TokenRefreshView,
)
from account.api.v1.viewsets import UserRegistrationView, LoginTokenObtainView, GoogleLogin, FacebookLogin, AppleLogin
router = routers.SimpleRouter()
urlpatterns = [
path('', include(router.urls)),
path('signup/', UserRegistrationView.as_view(), name="signup"),
path('login/', LoginTokenObtainView.as_view(), name='token_obtain_pair'),
path('login/refresh/', TokenRefreshView.as_view(), name='token_refresh'),
re_path(r'^login/google/$', GoogleLogin.as_view(), name='google_login'),
re_path(r'^login/facebook/$', FacebookLogin.as_view(), name='facebook_login'),
re_path(r'^login/apple/$', AppleLogin.as_view(), name='apple_login'),
]
| sibtainDev/django_social_login | account/api/v1/urls.py | urls.py | py | 847 | python | en | code | 0 | github-code | 13 |
6921789676 | while True:
try:
x,y = input("Fraction: ").strip().split("/")
x = int(x)
y = int(y)
if y == 0:
raise
fuel = str(x) + "/" + str(y)
if fuel == "1/4":
print("25%")
elif fuel == "1/2":
print("50%")
elif fuel == "2/4":
print("50%")
elif fuel == "3/4":
print("75%")
elif fuel == "4/4":
print("F")
else:
print("E")
except:
print("There is a problem with your input")
else:
break
| mobile-desk/cs50p-projects | ps3/fuel_gauge.py | fuel_gauge.py | py | 603 | python | en | code | 0 | github-code | 13 |
10331692850 | from discord.ext import commands
from bot import Commands, run_timer
bot = commands.Bot(command_prefix=commands.when_mentioned_or('='), description='Techraptor Control Bot')
@bot.event
async def on_ready():
run_timer()
print('Logged in as: {0} (ID: {0.id})'.format(bot.user))
bot.add_cog(Commands(bot))
bot.run('')
| Techraptor/TechBot | main.py | main.py | py | 344 | python | en | code | 0 | github-code | 13 |
26062210399 | # -*- coding: utf-8 -*-
import numpy as np
class Network(object):
def __init__(self,sizes):
self.num_layers = len(sizes)
self.sizes = sizes
self.biases = [np.random.rand(y,1) for y in sizes[1:]] #随机从正态分布(均值0,方差1)中生成
self.weights = [np.random.rand(y,x)
for x,y in zip(sizes[:1],sizes[1:])]# 存储连接第二层和第三层的权重
| Rheasilvia/PythonLearning | Handwriting/mnielsen/Network.py | Network.py | py | 429 | python | en | code | 0 | github-code | 13 |
40614163600 | import os
import copy
import move
import board
import numpy as np
from model import *
from config import *
def cast(string, res=None):
pad = copy.deepcopy(move.initPad)
i = 0
while i < len(string):
x = ord(string[i]) - ord('0')
y = ord(string[i + 1]) - ord('0')
id = int(i / 2)
pad[id][1] = x
pad[id][2] = y
i += 2
move.board_show(pad)
vec = move.Matrix(pad)
ob = board.CBoard(pad)
item = ob.setMat(vec)
ret = sess.run(h_output, feed_dict={x_: [item]})
ret = ret[0]
cnt = 0
while True:
tmp = int(np.argmax(ret))
id = int(tmp / 90) + 16
x = int((tmp % 90) / 10)
y = (tmp % 90) % 10
if ret[tmp] < 0.000001:
return
if (x, y) in ob.mov[id]:
if type(res) == list:
if ret[tmp] > MOVE_THRESHOLD:
res.append((tmp, ret[tmp]))
# if len(res)>6 or ret[tmp]<0.000001:
if ret[tmp] < MOVE_THRESHOLD:
return
else:
return tmp
ret[tmp] = 0.0
def main():
print('initial complete')
for i in range(1):
lst = []
print(cast('8979695949392919097717866646260600102030405060708012720323436383', lst))
print(lst)
sum = 0.0
for item in lst:
sum += item[1]
print('len:%d sum:%g' % (len(lst), sum))
txtBoard = "d:/txtboard.txt"
tagBoard = "d:/tagboard.txt"
txtMove = "d:/txtmove.txt"
tagMove = "d:/tagmove.txt"
while True:
# 标识棋盘已写
if os.path.exists(tagBoard):
with open(txtBoard, 'r') as cin:
for line in cin:
pass
print(line)
lst = []
ret = cast(line, lst)
f = open(txtMove, 'w')
for ret in lst:
print(ret)
f.write(str(ret[0]) + '\n')
f.flush()
f.close()
os.remove(tagBoard)
ff = open(tagMove, 'w')
ff.close()
if __name__ == '__main__':
main()
| VGxiaozhao/ChineseChess | CNN/cnn_go.py | cnn_go.py | py | 2,114 | python | en | code | 0 | github-code | 13 |
69983077138 | #Fibonacci's sequence using recursion
def fib(n):
if n < 2:
return n
else:
# fn = fn-1 + fn-2
return fib(n-1) + fib(n-2)
for x in range(10):
print(fib(x)) | cabrera-evil/python | MD2/Partial-02/fibonacci.py | fibonacci.py | py | 195 | python | en | code | 2 | github-code | 13 |
21800478452 | # Time Limit per Test: 3 seconds
# Memory Limit per Test: 512 megabytes
# Using: PyPy 3-64
# Solution Link: https://codeforces.com/contest/1795/submission/195399945
'''
Question Link: https://codeforces.com/contest/1795/problem/D
You are given an undirected graph consisting of 𝑛
vertices and 𝑛
edges, where 𝑛
is divisible by 6
. Each edge has a weight, which is a positive (greater than zero) integer.
The graph has the following structure: it is split into 𝑛3
triples of vertices, the first triple consisting of vertices 1,2,3
, the second triple consisting of vertices 4,5,6
, and so on. Every pair of vertices from the same triple is connected by an edge. There are no edges between vertices from different triples.
You have to paint the vertices of this graph into two colors, red and blue. Each vertex should have exactly one color, there should be exactly 𝑛2
red vertices and 𝑛2
blue vertices. The coloring is called valid if it meets these constraints.
The weight of the coloring is the sum of weights of edges connecting two vertices with different colors.
Let 𝑊
be the maximum possible weight of a valid coloring. Calculate the number of valid colorings with weight 𝑊
, and print it modulo 998244353
.
Input
The first line contains one integer 𝑛
(6≤𝑛≤3⋅105
, 𝑛
is divisible by 6
).
The second line contains 𝑛
integers 𝑤1,𝑤2,…,𝑤𝑛
(1≤𝑤𝑖≤1000
) — the weights of the edges. Edge 1
connects vertices 1
and 2
, edge 2
connects vertices 1
and 3
, edge 3
connects vertices 2
and 3
, edge 4
connects vertices 4
and 5
, edge 5
connects vertices 4
and 6
, edge 6
connects vertices 5
and 6
, and so on.
Output
Print one integer — the number of valid colorings with maximum possible weight, taken modulo 998244353
.
'''
'''
Sample Input:
12
1 3 3 7 8 5 2 2 2 2 4 2
Sample Output:
36
'''
import sys
input = sys.stdin.readline
#rounds = int(input())
MOD = 998244353 #look out
nmax = 10**5+1 #look out
fact = [1] * (nmax+1)
for i in range(2, nmax+1):
fact[i] = fact[i-1] * i % MOD
inv = [1] * (nmax+1)
inv[nmax] = pow(fact[nmax], MOD-2, MOD)
for i in range(nmax-1, 0, -1):
inv[i] = inv[i+1] * (i+1) % MOD
def C(n, m):
return fact[n] * inv[m] % MOD * inv[n-m] % MOD if 0 <= m <= n else 0
for ii in range(1):
out=0
mod=998244353
length=int(input())
edge=list(map(int,input().split())) #1 to 2, 1 to 3, 2 to 3...
cur=(C(length//3,length//6))
cur%=mod
two=0
three=0
for g in range(0,length,3):
v1=edge[g]
v2=edge[g+1]
v3=edge[g+2]
if v1!=v2 and v1!=v3 and v2!=v3:
continue
elif v1==v2==v3:
three+=1
else:
small=min(v1,v2,v3)
if small==v1 and small==v2:
two+=1
elif small==v1 and small==v3:
two+=1
elif small==v2 and small==v3:
two+=1
time1=pow(2,two,mod)
time2=pow(3,three,mod)
cur=cur*time1*time2%mod
print(cur)
| Squirtleee/AlgoPractice | Solutions/Triangle Coloring.py | Triangle Coloring.py | py | 2,934 | python | en | code | 0 | github-code | 13 |
2792179790 | # Write a Python program to create a person class.
# Include attributes like name, country and date of birth.
# Implement a method to determine the person's age.
months = ["january", "februry", "march", "april", "may", "june", "july", "august", "september", "october", "november", "december"]
class Person:
def __init__(self, name, country, birthday, age):
self.name = name
self.country = country
self.birthday = birthday
self.age = age
date = self.birthday.split(" ")
if int(date[0]) == 1:
date[0].replace("1st")
elif int(date[0]) == 2:
date[0].replace("2nd")
elif int(date[0]) == 3:
date[0].replace("3rd")
else:
date[0].replace(f"{date[0]}th")
print(f"{name} will turn {int(age) + 1} on the {date[0]} of {months[int(date[1])]}, 2024")
person1 = Person("Markus Petterson", "Afghanistan", "22 2 1945", "78")
person1.__init__() | Nebrocebro/Programmering-2 | Prog2-NeoMalmros/obj-ori-ex-2.py | obj-ori-ex-2.py | py | 961 | python | en | code | 0 | github-code | 13 |
42110537778 | import sys
sys.setrecursionlimit(10 ** 8)
ini = lambda: int(sys.stdin.readline())
inl = lambda: [int(x) for x in sys.stdin.readline().split()]
ins = lambda: sys.stdin.readline().rstrip()
debug = lambda *a, **kw: print("\033[33m", *a, "\033[0m", **dict(file=sys.stderr, **kw))
A, B = inl()
def solve():
xt = []
for h in range(25):
if h in [11, 23]:
continue
if h in [0, 12, 24]:
xt.append((h, 0))
continue
m = 60 * (h % 12) / 11
xt.append((h, m))
s = 60 * A + B
ans = 10 ** 9
for (h, m) in xt:
t = h * 60 + m
if t < s:
continue
ans = min(ans, int((t - s) * 60))
return ans
print(solve())
| keijak/comp-pub | yukicoder/1236/main.py | main.py | py | 723 | python | en | code | 0 | github-code | 13 |
2568603214 | import pygame
from pygame.sprite import Sprite
class Ship(Sprite):
"""
Класс где мы реализовываем работу корабля.
"""
def __init__(self, ai_settings, screen):
"""
Функция иницирования работы корабля, его изображения, настроек.
"""
super(Ship, self).__init__()
self.screen = screen
self.ai_settings = ai_settings
self.image = pygame.image.load('images/ship.bmp')
self.rect = self.image.get_rect()
self.screen_rect = screen.get_rect()
self.rect.centerx = self.screen_rect.centerx
self.rect.bottom = self.screen_rect.bottom
self.center = float(self.rect.centerx)
self.moving_right = False
self.moving_left = False
def center_ship(self):
"""
Находим центр, для поставления корабля на центр экрана
"""
self.center = self.screen_rect.centerx
def update(self):
"""
Реализация движения
"""
if self.moving_right and self.rect.right < self.screen_rect.right:
self.center += self.ai_settings.ship_speed_factor
if self.moving_left and self.rect.left > 0:
self.center -= self.ai_settings.ship_speed_factor
self.rect.centerx = self.center
def blitme(self):
"""
Отрисовка корабля
"""
self.screen.blit(self.image, self.rect)
| MaxKuznetsovGHST/alien_invasion | ship.py | ship.py | py | 1,583 | python | ru | code | 0 | github-code | 13 |
32029130635 | from dataclasses import dataclass
from typing import Optional
from langchain import FewShotPromptTemplate, PromptTemplate
from langchain.chains import LLMChain
from langchain.docstore.document import Document
from summ.classify.classes import Classes
from summ.shared.chain import Chain
from summ.shared.utils import dedent
@dataclass
class Source:
"""A chunk from a source data file."""
file: str
classes: list[Classes]
chunk: str
@dataclass
class Fact:
"""An individual fact from some interview."""
fact: str
source: str
class Factifier(Chain):
"""Factifiers are responsible for taking a Document with a chunk,
and extracting a list of facts."""
DEFAULT_CONTEXT = "This is the start of the conversation."
EXAMPLES = [
{
"context": "The conversation so far has covered the backround of the speaker. He is in sales at UiPath.",
"chunk": "We had a client where they would, they had like a huge database legacy database of like their inventory in the store. Whenever they would whenever they would do any type of like inventory accounts, they would shut down for like eight hours but they wouldn't go in there and see the differences between like the database and it will take them 16 hours to do. Yes, insane. We built a bot that will go in there and do like we like to call it, auditing and reconciliation of all the inventories, as long as they gave us like a spreadsheet, and you could do it in an hour.",
"facts": [
"A client had a large legacy database for inventory in their store.",
"The inventory reconciliation process would shut down the store for 8 hours.",
"The process of reconciling the database would take 16 hours to complete.",
"A bot was built to perform inventory auditing and reconciliation.",
"The bot can complete the process in an hour as long as a spreadsheet is provided.",
],
"new_context": " An RPA developer talks about a bot he made. The bot was created to reconcile a client's inventory database which used to take 16 hours to complete and shut down the store for 8 hours, and can now be done in an hour.",
}
]
EXAMPLE_TEMPLATE = PromptTemplate(
template=dedent(
"""
---
Context:
{{ context }}
Paragraph:
{{ chunk }}
Facts:
- {{ facts | join("\n- ") }}
Context:
{{ new_context }}
---
"""
),
input_variables=["context", "chunk", "facts", "new_context"],
template_format="jinja2",
)
PROMPT_TEMPLATE = FewShotPromptTemplate(
example_prompt=EXAMPLE_TEMPLATE,
examples=EXAMPLES,
input_variables=["context", "chunk"],
prefix=dedent(
"""
Your task is to take the context of a conversation, and a paragraph, and extract any pertinent facts from it.
The facts should only cover new information introduced in the paragraph. The context is only for background; do not use it to generate facts.
You will also generate a new context, by taking the old context and modifying it if needed to account for the additional paragraph. You do not need to change the old context if it is suitable; simply return it again.
Here is an example:
"""
),
suffix=dedent(
"""
Now the real one:
---
Context:
{context}
Paragraph:
{chunk}
Facts:
-
"""
),
)
def __init__(self, *args, context: Optional[str] = None, **kwargs):
super().__init__(*args, **kwargs)
self.context = context or self.DEFAULT_CONTEXT
def parse(self, results: str) -> tuple[list[str], str]:
try:
idx = results.lower().index("context")
facts_raw, context_raw = results[:idx], results[idx:]
context = "\n".join(context_raw.splitlines()[1:])
except ValueError:
facts_raw, context = results, self.context
facts = self._parse(facts_raw.splitlines(), prefix=r"-+")
return facts, context
def factify(self, doc: Document) -> list[str]:
"""Returns a list of facts from the given document."""
chain = LLMChain(llm=self.llm, prompt=self.PROMPT_TEMPLATE)
results = "- " + self.cached(
"factify",
chain,
doc,
lambda d: {"chunk": d.page_content, "context": self.context},
)
facts, self.context = self.parse(results)
return facts
| yasyf/summ | summ/factify/factifier.py | factifier.py | py | 4,757 | python | en | code | 141 | github-code | 13 |
24016051138 | from params_hom import *
from processingNetwork import ProcessingNetwork
from matplotlib import pyplot as plt
import pickle
from utils import *
# Load pretraining policy
with open('qtable_policy.pickle','rb') as file:
custom_policy = pickle.load(file)
print('Q-table policy:', custom_policy)
dir_path = path()
with open(os.path.join(dir_path, 'uniform_discretization.pickle'), 'rb') as file:
bins = pickle.load(file)
# Instantiate processing network
net = ProcessingNetwork(sensors, A, Q, window_length, window_num)
net.set_quantization(bins)
state_space_dim = net.state_dim
action_space_dim = net.action_dim
# Run one episode (horizon K)
obs = net.reset() # reset env
epi_reward = 0 # set initial reward to 0
done = False # episode not done
state_epi = []
action_epi = []
index = 0
discount_factor = .99
while not done:
# Get action
action = custom_policy[obs]
# Append current state
state_epi.append(obs)
# Append current action
action_epi.append(action)
# Apply environment transition
new_obs, reward, done = net.step(action)
# Compute return for this episode
epi_reward += (discount_factor**index)*reward
# Update state observation
obs = new_obs
index += 1
state_epi.append(obs)
# Visualize return
print(f'Return for this episode: {epi_reward}')
for i in range(10):
print(f'Mean of the {i+1} window: {np.mean(-net.get_Ps()[(int(window_length)*i):(i+1)*int(window_length)])}')
# Visualize visited states
print(f'States at custom episode: {state_epi}')
# Visualize applied actions
print(f'Actions at custom episode: {action_epi}')
# Plot tr(P) along episode
plt.plot([l for l in range(int(time_horizon))], net.get_Ps())
for k in range(window_num):
plt.axvline(x=int(window_length)*k, color='k', label='axvline - full height')
for kk in bins:
plt.axhline(y=kk, color='r', linestyle='-')
plt.show()
plt.close() | lucaballotta/ProcessingNetworks-RL | CDC paper/test_custom_policy.py | test_custom_policy.py | py | 1,904 | python | en | code | 1 | github-code | 13 |
3763837274 | import cv2
import numpy as np
from skimage.exposure import match_histograms
from watchdog.events import *
reference = cv2.imread('D:/aiImg/img/1.png') # 目标图像
def piliangzhifanghu(image_reade,img_output):
data_base_dir = image_reade # 输入文件夹的路径
outfile_dir = img_output # 输出文件夹的路径
processed_number = 0 # 统计处理图片的数量
for file in os.listdir(data_base_dir): # 遍历目标文件夹图片
read_img_name = data_base_dir + '//' + file.strip() # 取图片完整路径
image = cv2.imread(read_img_name) # 读入图片
while (1):
matched = match_histograms(image, reference, channel_axis=-1)
# cv2.imshow("demo", matched)
# k = cv2.waitKey(1)
# if k == 13: # 按回车键确认处理、保存图片到输出文件夹和读取下一张图片
processed_number += 1
out_img_name = outfile_dir + '//' + file.strip()
cv2.imwrite(out_img_name, matched)
print("已处理的照片数为",processed_number)
print("按enter键以确保您的操作并处理下一张图片")
break
def quhongzhang(img_output1):
processed_number = 0 # 统计处理图片的数量
piliangzhifanghu('D:/aiImg/input','D:/aiImg/quchuzhang')
for file in os.listdir('D:/aiImg/quchuzhang'):
data_base_dir = 'D:/aiImg/quchuzhang'
read_img_name = data_base_dir + '//' + file.strip() # 取图片完整路径
image = cv2.imread(read_img_name) # 读入图片
B_channel, G_channel, R_channel = cv2.split(image)
# 多传入一个参数cv2.THRESH_OTSU,并且把阈值thresh设为0,算法会找到最优阈值
thresh, ret = cv2.threshold(R_channel, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
# 实测调整为95%效果好一些
filter_condition = int(thresh * 0.95)
_, red_thresh = cv2.threshold(R_channel, filter_condition, 255, cv2.THRESH_BINARY)
# 把图片转回 3 通道
result_img = np.expand_dims(red_thresh, axis=2)
result_img = np.concatenate((result_img, result_img, result_img), axis=-1)
out_img_name1 = 'D:/aiImg/quchuzhang' + '//' + file.strip()
cv2.imwrite(out_img_name1, result_img)
print("已处理的照片数为", processed_number)
break
| sdy555/pythonTest | function/批量去除章印.py | 批量去除章印.py | py | 2,386 | python | en | code | 0 | github-code | 13 |
74880905618 | from seismic_zfp.read import SgzReader
import segyio
import time
import os
import sys
from PIL import Image
import numpy as np
from matplotlib import cm
base_path = sys.argv[1]
LINE_NO = int(sys.argv[2])
CLIP = 200
SCALE = 1.0/(2.0*CLIP)
with segyio.open(os.path.join(base_path, '0.sgy'), strict=False) as segyfile:
t0 = time.time()
il_ids = [h[189] for h in segyfile.header if h[193] == LINE_NO]
trace_ids = [i for i, h in enumerate(segyfile.header) if h[193] == LINE_NO]
slice_segy = np.zeros((max(il_ids) - min(il_ids) + 1, len(segyfile.samples)))
for i, trace_id in enumerate(trace_ids):
slice_segy[il_ids[i] - min(il_ids), :] = segyfile.trace[trace_id]
print("segyio took", time.time() - t0)
with SgzReader(os.path.join(base_path, '0.sgz')) as reader:
t0 = time.time()
slice_sgz = reader.read_crossline(LINE_NO-reader.xlines[0])
print("SgzReader took", time.time() - t0)
slice_sgz = slice_sgz[min(il_ids) - reader.xlines[0]: max(il_ids) - reader.ilines[0] + 1]
im = Image.fromarray(np.uint8(cm.seismic((slice_sgz.T.clip(-CLIP, CLIP) + CLIP) * SCALE)*255))
im.save(os.path.join(base_path, 'out_crossline-sgz.png'))
im = Image.fromarray(np.uint8(cm.seismic((slice_segy.T.clip(-CLIP, CLIP) + CLIP) * SCALE)*255))
im.save(os.path.join(base_path, 'out_crossline-sgy.png'))
im = Image.fromarray(np.uint8(cm.seismic(((slice_segy-slice_sgz).T.clip(-CLIP, CLIP) + CLIP) * SCALE)*255))
im.save(os.path.join(base_path, 'out_crossline-dif.png'))
| equinor/seismic-zfp | examples/sgz_reading/read-crossline-unstructured.py | read-crossline-unstructured.py | py | 1,496 | python | en | code | 57 | github-code | 13 |
19933241667 | # 给定一个 m x n 的矩阵,如果一个元素为 0,则将其所在行和列的所有元素都设为 0。请使用原地算法。
# 示例 1:
# 输入:
# [
# [1,1,1],
# [1,0,1],
# [1,1,1]
# ]
# 输出:
# [
# [1,0,1],
# [0,0,0],
# [1,0,1]
# ]
class Solution:
def setZeroes(self, matrix: List[List[int]]) -> None:
"""
Do not return anything, modify matrix in-place instead.
"""
row_num, col_num = len(matrix), len(matrix[0])
# 创建集合set()用于存放需要置零的行和列
row_set, col_set = set(), set()
for row in range(row_num):
for col in range(col_num):
if matrix[row][col]==0:
row_set.add(row)
col_set.add(col)
# 将记录的行、列中的元素赋值为0
# 再次遍历赋值
for row in range(row_num):
for col in range(col_num):
if row in row_set or col in col_set:
matrix[row][col] = 0
# # 或者行列单独赋值均可
# for row in row_set:
# for col in range(col_num):
# matrix[row][col] = 0
# for col in col_set:
# for row in range(row_num):
# matrix[row][col] = 0
| Vivhchj/LeeeCode_Notes | 73.矩阵置零_mid.py | 73.矩阵置零_mid.py | py | 1,296 | python | en | code | 0 | github-code | 13 |
16324784397 | # django-salesforce
#
# by Phil Christensen
# (c) 2012-2013 Freelancers Union (http://www.freelancersunion.org)
# See LICENSE.md for details
#
"""
A database backend for the Django ORM.
Allows access to all Salesforce objects accessible via the SOQL API.
"""
import logging
import warnings
import django
__version__ = "0.7.2"
DJANGO_18_PLUS = True # unused by us now - backward compatibility
DJANGO_19_PLUS = django.VERSION[:2] >= (1, 9)
DJANGO_110_PLUS = django.VERSION[:2] >= (1, 10)
DJANGO_111_PLUS = django.VERSION[:2] >= (1, 11)
if django.VERSION[:3] < (1, 8, 4) or django.VERSION[:2] > (1, 11):
# Statistically three or more blocking issues can be expected by every
# new major Django version. Strict check before support is better.
raise ImportError("Django version between 1.8.4 and 1.11.x is required "
"for this django-salesforce.")
if DJANGO_111_PLUS:
warnings.warn("Support for Django 1.11 is still in pre-release quality. "
"Test your app properly after upgrading.")
log = logging.getLogger(__name__)
# Default version of Force.com API.
# It can be set by setattr() to any lower or higher supported value.
# (The highest version can be set by "salesforce.utils.set_highest_api_version()".
# It is useful for development, a constant version is for production.)
# Example for settings.py:
# >>> import salesforce
# >>> setattr(salesforce, 'API_VERSION', '37.0')
API_VERSION = '39.0' # Spring '17
| jhflorey/djangoSalesforceMaster | salesforce/__init__.py | __init__.py | py | 1,475 | python | en | code | 1 | github-code | 13 |
30711886159 | # pLADMPSAP
# By ZincCat
from scipy.optimize import minimize
import numpy as np
from matplotlib import pyplot as plt
# from joblib import Parallel, delayed
np.random.seed(19890817)
n = 70
s = 30
x = np.random.normal(0, 1, (n, s))
y = np.random.choice([0, 1], s)
w0 = np.random.normal(0, 1, n)
def f(w):
return np.sum(np.log(1+np.exp(-y*np.dot(w, x))))/s
def gradient_f(w):
temp = np.exp(-y*np.dot(w, x))
return np.sum(-temp*y/(1+temp)*x, axis=1)/s
def descent(w, grad, value, mode='2', alpha=0.4, beta=0.8, eta=1e-7):
# 梯度下降函数
# 输入目前x取值, 梯度, 梯度的范数, 下降模式
# 输出下降后x取值, 步长t
# 下降模式为'2'时采用2范数, 为'inf'时采用无穷范数
g = grad(w)
grad_norm = np.linalg.norm(g)
if grad_norm <= eta:
return w, True
normalized_grad = g/grad_norm
t = 1.0
if mode == '2':
# l_2 norm
while f(w - t*normalized_grad) > value - alpha*t*np.dot(g, normalized_grad):
t *= beta
w -= t*normalized_grad
elif mode == 'inf':
# l_infty norm
while f(w - t*np.sign(normalized_grad)) > value - alpha*t*np.dot(g, np.sign(normalized_grad)):
t *= beta
w -= t*np.sign(normalized_grad)
return w, False
def gd(w0, eta=1e-5, maxIter=1000):
w = w0.copy()
timestep = 0
while timestep <= maxIter:
value = f(w)
print("Iteration:", timestep, "Error", value)
w, finished = descent(w, gradient_f, value,
mode='2', eta=eta) # 此时使用2范数
if finished:
break
timestep += 1
return w
def grad(i, w):
temp = np.exp(-y[i]*np.dot(w, x[:, i]))
return -temp*y[i]/(1+temp)*x[:, i]/s
def pLADMAPSAP(w0, beta, eps1=1e-7, eps2=1e-5, maxBeta=1e1, maxIter=1e7, rho0=1.9):
w = w0.copy()
W = np.random.rand(s, n)
newW = np.zeros_like(W)
L = np.zeros_like(W)
dL = np.zeros_like(W)
Li = np.linalg.norm(x, axis=1)/4/s
eta = s*np.ones_like(Li)
tau = Li + beta*eta
timestep = 0
values = []
while timestep <= maxIter:
if timestep % 1000 == 0:
print(timestep, f(w))
values.append(f(w))
# naive multithreading, however, too slow when matrices are small
# Parallel(n_jobs=-1, backend='threading')(delayed(update)(i, W, w, Lam, beta)
# for i in range(s))
# sequential update
for i in range(s):
newW[i] = w - L[i]/tau - grad(i, W[i])/tau
dL[i] = W[i]-w
w = (np.sum(W, axis=0)+np.sum(L, axis=0)/tau)/s
L += tau*dL
crit = np.linalg.norm(dL) < eps1
W = newW
# if beta*np.max(np.sqrt(n)*dW/np.linalg.norm(w)) < eps2:
# rho = rho0
# crit2 = True
# else:
# rho = 1
# crit2 = False
# beta = min(maxBeta, beta*rho)
# tau = Li + beta*eta
if crit: # and crit2:
print("Finished!!!")
print(timestep, f(w))
break
timestep += 1
return w, values
w3, values = pLADMAPSAP(w0, 0.001)
print(f(w3))
plt.plot(values)
plt.xlabel("Value")
plt.ylabel("Steps")
plt.savefig("pLADMPSAP")
w = gd(w0)
print(f(w))
"""
w2 = minimize(f, w0, jac=gradient_f)
print(w2.fun)
"""
| zinccat/Convex-Analysis-homework | 20/2.py | 2.py | py | 3,365 | python | en | code | 15 | github-code | 13 |
43263227092 | def main():
def nearlist(n0, lst):
res = [[] for _ in range(n0)]
for a, b in lst:
res[a - 1].append(b - 1)
res[b - 1].append(a - 1)
return res
def bfs(s0, n0):
dist = [-1] * n0
dist[s0] = 0
que = [s0]
for q in que:
for i in near[q]:
if dist[i] > -1:
continue
dist[i] = dist[q] + 1
que.append(i)
res = q
return res, dist[res]
near = nearlist(N, AB)
dp = [0] * N
dp[0:2] = [1, 0]
for i in range(2, N):
dp[i] = (0 in dp[i-2:i])
_, l = bfs(bfs(0, N)[0], N)
return print('First' if dp[l] else 'Second')
if __name__ == '__main__':
N = int(input())
AB = [list(map(int, input().split())) for _ in range(N-1)]
main()
| Shirohi-git/AtCoder | agc/agc033_c.py | agc033_c.py | py | 846 | python | en | code | 2 | github-code | 13 |
4060511988 | import pandas as pd
df_prices=pd.read_csv("prices.csv")
df_prices_adjusted=pd.read_csv("prices-split-adjusted.csv")
df_securities=pd.read_csv("securities.csv")
#print(df_prices)
def process_prices(df):
df['date']=df['date'].apply(lambda x:x[:10])
df_filtered=df[['date', 'symbol','close']]
df_pivot=pd.pivot_table(df_filtered,index=['date'], columns=['symbol'], values=['close'])
df_pivot.columns=df_pivot.columns.droplevel()
return df_pivot
df_prices_processed=process_prices(df_prices)
df_prices_adjusted_processed=process_prices(df_prices_adjusted)
#print(df_prices_processed)
def add_effective_return(df):
df_out=pd.DataFrame()
for col in df.columns:
df_out[col+'_effective_return']=df[col]/df[col].shift(1)-1
return df_out
df_prices_processed_with_returns = add_effective_return(df_prices_processed)
df_prices_adjusted_processed_with_returns=add_effective_return(df_prices_adjusted_processed)
df_merged=df_prices_processed_with_returns.merge(df_prices_adjusted_processed_with_returns, on='date', suffixes=('_normal', '_adjusted'))
symbols=df_prices_processed.columns
splits=[]
for symbol in symbols:
df_symbol=df_merged[[symbol+'_effective_return_normal',symbol+'_effective_return_adjusted']]
difference_array=df_symbol[symbol+'_effective_return_normal']-df_symbol[symbol+'_effective_return_adjusted'] > 0.001
if len(df_symbol.loc[difference_array])>0:
splits.append(symbol)
print(splits,len(splits))
import numpy as np
def add_log_return(df):
df_out = pd.DataFrame()
for col in df.columns:
df_out[col]=df[col]/df[col].shift(1)-1
return df_out
df_prices_adjusted_log_returns = add_log_return(df_prices_processed)
return_dict={}
for col in df_prices_adjusted_log_returns:
col_notnull=df_prices_adjusted_log_returns.loc[df_prices_adjusted_log_returns[col].notnull(),col]
yearly_return=255*col_notnull.mean()
return_dict[col]=yearly_return
print(return_dict)
df_securities['yearly_return']=df_securities['Ticker symbol'].map(return_dict)
sector_average_returns=df_securities.groupby('GICS Sector').mean()
print(sector_average_returns)
df_filtered=df_prices_adjusted_log_returns[['A']]
df_filtered['cumsum']=df_filtered.cumsum()
import matplotlib.pyplot as plt
df_filtered['cumsum'].plot()
plt.savefig('A stock price.pdf')
plt.show | Vilmos97/NumFin4 | eset.py | eset.py | py | 2,333 | python | en | code | 0 | github-code | 13 |
5640908700 | # importing packages
from CharacterProfiles import *
from Statics import *
import numpy as np
import itertools as it
from time import perf_counter
# returns iterable power set of an iterable
def powerset(iterable):
s = list(iterable)
return it.chain.from_iterable(it.combinations(s, r) for r in range(len(s) + 1))
# converts superimpose level to scalar
def SItoScalar(level, multiplier):
if level in np.arange(1, 6):
scalar = 1 - 0.25 * (level - 1) + multiplier * 0.25 * (level - 1)
return scalar
return 1
# update energy gains from basic, skill, and ult
def updateEnergy(options, char):
skillAttacks, ultAttacks = char.attacksWith
ER = (1 +
int(1 in options) * ENERGY_ROPE_5S + int(2 in options) * VONWACQ +
int(3 in options) * POST_OP * SItoScalar(postOpSuperimpose, 2) +
int(5 in options) * ASTA_E4 +
int(6 in options) * ENERGY_ROPE_4S)
basic = (char.basicEnergy +
int(8 in options) * BEFORE_TUTORIAL +
int(9 in options) * MESHING_COGS) * ER
skill = (char.skillEnergy +
int(4 in options) * int(SHARED_FEELING * SItoScalar(sharedFeelingSuperimpose, 2)) +
skillAttacks * (int(8 in options) * BEFORE_TUTORIAL +
int(9 in options) * MESHING_COGS)) * ER
ult = (char.ultEnergy +
int(7 in options) * THIEF +
ultAttacks * (int(8 in options) * BEFORE_TUTORIAL +
int(9 in options) * MESHING_COGS)) * ER
return basic, skill, ult
# finds lowest number of skill casts to reach max energy
def cheapestCombo(basic, skill, ult, rotationTurns, maxEnergy):
skillCasts = 0
totalEnergy = ult
while skillCasts < rotationTurns and totalEnergy + skillCasts * skill + (rotationTurns - skillCasts) * basic \
< maxEnergy - 0.2: # game rounds up when within 0.2 of max energy
skillCasts += 1
return skillCasts, totalEnergy + skillCasts * skill + (rotationTurns - skillCasts) * basic
# culls subsets that are either parents of 1) illegal subsets or 2) smaller subsets
def cullIllegalResults(results):
culledResults = []
smallSubsets = []
smallSubsets += ILLEGAL_OPTION_COMBOS
for optionsSubset, skillCasts, comboEnergy in results:
isSmall = 1
for smallSubset in smallSubsets:
if set(smallSubset) <= set(optionsSubset):
isSmall = 0
if isSmall:
if len(optionsSubset) > 0:
smallSubsets.append(optionsSubset)
culledResults.append([optionsSubset, skillCasts, comboEnergy])
return np.asarray(culledResults, dtype=object)
# rewrites option list from numbers to names
def readableOptions(options):
if len(options) == 0:
return 'none'
readable = ''
for option in options:
readable += OPTION_DICT[option] + ', '
readable = readable[:-2]
return readable
# get un-culled results given a character and turns per rotation
def rawResults(char, rotationTurns):
maxEnergy = char.maxEnergy
results = []
for optionsSubset in powerset(char.options):
basic, skill, ult = updateEnergy(optionsSubset, char)
skillCasts, comboEnergy = cheapestCombo(basic, skill, ult, rotationTurns, maxEnergy)
if comboEnergy >= maxEnergy - 0.2:
results.append([optionsSubset, skillCasts, comboEnergy])
return results
# cull and print results given results, character, and turns per rotation
def cullAndPrint(results, char, rotationTurns):
results = np.asarray(results, dtype=object)
print(f'{char.name}: {char.maxEnergy} ENERGY IN {rotationTurns} TURNS')
if len(results) == 0:
print('No results found!')
else:
for i in range(rotationTurns + 1):
currentResults = results[np.where(results[:, 1] == rotationTurns - i)]
currentResults = cullIllegalResults(currentResults)
if len(currentResults) > 0:
print(
f' - {rotationTurns - i} SKILL {i} BASIC ({"%.2f" % (2 * i / rotationTurns - 1)} SPT)')
for optionsSubset, skillCasts, comboEnergy in currentResults:
print(f'{readableOptions(optionsSubset)}: {"%.2f" % comboEnergy} energy')
return
if __name__ == '__main__':
# start timer
t0 = perf_counter()
# parameters
char = Natasha
rotationTurns = 4
postOpSuperimpose = 1
sharedFeelingSuperimpose = 3
# get un-culled results
results = rawResults(char, rotationTurns)
# cull and print results
cullAndPrint(results, char, rotationTurns)
# timing code
t1 = perf_counter()
print()
print(f'Got results in {"%.5f" % (t1 - t0)} s')
| prodbywinter/StarRail | EnergyCalcs.py | EnergyCalcs.py | py | 4,736 | python | en | code | 0 | github-code | 13 |
19905712207 | class Solution:
def combine(self, n, k):
"""
:type n: int
:type k: int
:rtype: List[List[int]]
"""
#recursive
if k == 1:
return [[x,] for x in range(1, n+1)]
if n == k:
return [[i for i in range(1, n+1)]]
return [i + [n] for i in self.combine(n-1,k-1)] + [i for i in self.combine(n-1, k)]
#backstring
self.res = []
L = [i for i in range(1,n+1)]
self.dfs(L, k, 0, [])
return self.res
def dfs(self, n, k, index, path):
if len(path) == k:
self.res.append(path)
if len(path) > k:
return
for i,num in enumerate(n[index:]):
self.dfs(n, k, index+i+1, path + [num])
| littleliona/leetcode | medium/77.combinations.py | 77.combinations.py | py | 769 | python | en | code | 0 | github-code | 13 |
28911961430 | import h5py
import numpy as np
import os
import argparse
import math
batch_size = 5000
def hdf5_process(in_file, out_file):
# initialise the output
tmp_file = os.path.join(os.path.dirname(out_file), ".tmp." + os.path.basename(out_file))
combined = h5py.File(tmp_file, 'w')
try:
fileread = h5py.File(in_file, 'r')
features_data = fileread['features']
actions_data = fileread['probs']
rewards_data = fileread['rewards']
size = len(features_data)
combined.require_dataset(
name='features',
dtype=np.uint8,
shape=(size, 8, 8, 18),
maxshape=(None, 8, 8, 18),
chunks=True,
compression="lzf")
combined.require_dataset(
name='pi_from',
dtype=np.float,
shape=(size, 64),
maxshape=(None, 64),
chunks=True,
compression="lzf")
combined.require_dataset(
name='pi_to',
dtype=np.float,
shape=(size, 64),
maxshape=(None, 64),
chunks=True,
compression="lzf")
combined.require_dataset(
name='rewards',
dtype=np.int8,
shape=(size, 1),
maxshape=(None, 1),
chunks=True,
compression="lzf")
features = combined["features"]
actions_from = combined["pi_from"]
actions_to = combined["pi_to"]
rates = combined["rewards"]
offset = 0
while offset < size:
if size - offset >= batch_size:
read_size = batch_size
else:
read_size = size - offset
feature_batch = np.zeros((read_size, 8, 8, 18))
from_batch = np.zeros((read_size, 64))
to_batch = np.zeros((read_size, 64))
rate_batch = np.zeros((read_size, 1))
for i in range(read_size):
feature_batch[i] = features_data[offset + i]
rate_batch[i] = rewards_data[offset + i]
probs = actions_data[offset + i] # size is 4096
for j in range(len(probs)):
if probs[j] > 0:
value = math.sqrt(probs[j])
from_square = int(j/64)
to_square = j % 64
from_batch[i, from_square] += value
to_batch[i, to_square] += value
from_batch[i] = from_batch[i]/np.sum(from_batch[i])
to_batch[i] = to_batch[i]/np.sum(to_batch[i])
features[offset:offset+read_size] = feature_batch
actions_from[offset:offset+read_size] = from_batch
actions_to[offset:offset+read_size] = to_batch
rates[offset:offset+read_size] = rate_batch
offset += read_size
print("percentage:", offset/size)
fileread.close()
combined.close()
os.rename(tmp_file, out_file)
except Exception as e:
os.remove(tmp_file)
raise e
def run_convert(cmd_line_args=None):
"""Run cancatenations. command-line args may be passed in as a list
"""
parser = argparse.ArgumentParser(
description='Convert the features in hdf5 files',
epilog="A hdf5 files is needed")
parser.add_argument("--outfile", "-o", help="Destination to write data (hdf5 file)", required=True)
parser.add_argument("--infile", "-i", help="Source HDF5 files to process", required=True)
if cmd_line_args is None:
args = parser.parse_args()
else:
args = parser.parse_args(cmd_line_args)
hdf5_process(args.infile, args.outfile)
if __name__ == '__main__':
run_convert()
| xiaoyaohu0325/chess_deeplearning | preprocessing/features_converter.py | features_converter.py | py | 3,761 | python | en | code | 1 | github-code | 13 |
25329032064 | import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import mannwhitneyu
class Detector:
def __init__(self):
pass
def detect(self, ts):
self.ts = ts
self.err_indices = []
return self.err_indices
def plot_result(self):
plt.plot(self.ts)
plt.plot(self.err_indices, self.ts[self.err_indices], ".r")
class ChowTest(Detector):
def __init__(self, n):
self.n = n
raise NotImplementedError
class WilcoxonTest(Detector):
def __init__(self, n=45, m=20, alpha=0.01):
self.n = n
self.m = m
self.alpha = alpha
def detect(self, ts):
self.ts = ts
self.N = ts.shape[0]
self.err_indices = []
for i in range(self.N - self.n):
x = ts[i : i + self.m]
y = ts[i + self.m : i + self.n]
res = mannwhitneyu(x, y)
if res.pvalue < self.alpha:
self.err_indices.append(i + self.m)
return self.err_indices
class CusumTest(Detector):
def __init__(self, climit=5, mshift=1):
self.climit = climit
self.mshift = mshift
self.ilower = []
self.iupper = []
def detect(self, ts, tmean=25, tdev=25):
self.ts = ts
self.N = ts.shape[0]
self.mean = np.mean(ts[:tmean])
self.dev = np.std(ts[:tdev])
self.lowersum = np.zeros(self.N)
self.uppersum = np.zeros(self.N)
for i, x in enumerate(self.ts[1:], start=1):
L = self.lowersum[i - 1] + x - self.mean + self.dev * self.mshift / 2
U = self.uppersum[i - 1] + x - self.mean - self.dev * self.mshift / 2
self.lowersum[i] = min(0, L)
self.uppersum[i] = max(0, U)
self.ilower = np.where(self.lowersum < -self.climit * self.dev)
self.iupper = np.where(self.uppersum > self.climit * self.dev)
self.err_indices = np.union1d(self.ilower, self.iupper)
return self.err_indices
def plot_statistics(self):
plt.plot(self.lowersum, "r")
plt.plot(self.uppersum, "b")
plt.plot([-self.climit * self.dev] * self.N, "--r", linewidth=0.5)
plt.plot([self.climit * self.dev] * self.N, "--b", linewidth=0.5)
plt.xlim([0, self.N])
| maksimchup/Error_tracking_GTS | detectors.py | detectors.py | py | 2,285 | python | en | code | 0 | github-code | 13 |
26570577445 | """Dad joke plugin."""
import server_utils
from plugins._base_plugins import BaseCommandPlugin
class DadJokeCommand(BaseCommandPlugin):
"""Dad joke plugin."""
COMMAND = 'dadjoke'
async def run(self) -> None:
"""Dad jokes provided by icanhazdadjoke.com."""
url = 'https://icanhazdadjoke.com/'
headers = {
'User-Agent': 'https://github.com/amorphousWaste/twitch_bot',
'Accept': 'application/json',
}
_, response = await server_utils.get_request(url, headers)
if not response:
return -1
await self.send_message(response['joke'])
| amorphousWaste/twitch_bot_public | twitch_bot/plugins/dad_joke_command.py | dad_joke_command.py | py | 638 | python | en | code | 0 | github-code | 13 |
5976050231 | import string
import typing
import unittest
import tempfile
import os
import random
import file_helper
from random_test_case_helper import get_random_catalog_page_url, get_random_detail_page_url, get_random_filename
class ImageExistsTestCase (unittest.TestCase):
# region Helpers
@staticmethod
def get_random_image_id_and_filename() -> typing.Tuple[str, str]:
image_id = str(random.randint(100000, 300000))
extension = random.choice([".jpg", ".png", ".webp"])
return (image_id, f"{image_id}{extension}")
# endregion
def test_existing_images_in_root_dir(self):
with tempfile.TemporaryDirectory() as dir_name:
for _ in range(10):
image_id, filename = ImageExistsTestCase.get_random_image_id_and_filename()
filename = os.path.join(dir_name, filename)
# Create the test file.
with open(filename, "a"):
pass
self.assertTrue(file_helper.image_exists(image_id, dir_name))
def test_existing_images_in_sub_dirs(self):
with tempfile.TemporaryDirectory() as root_dir_name:
for sub_dir_len in range(3, 10):
dir_name = "".join(random.choices(string.ascii_lowercase + string.digits, k=sub_dir_len))
dir_name = os.path.join(root_dir_name, dir_name)
os.mkdir(dir_name)
for _ in range(5):
image_id, filename = ImageExistsTestCase.get_random_image_id_and_filename()
filename = os.path.join(dir_name, filename)
# Create the test file.
with open(filename, "a"):
pass
self.assertTrue(file_helper.image_exists(image_id, root_dir_name))
def test_absent_images_in_root_dir(self):
test_ids = [str(random.randint(100000, 300000)) for _ in range(10)]
with tempfile.TemporaryDirectory() as dir_name:
for test_id in test_ids:
self.assertFalse(file_helper.image_exists(test_id, dir_name))
class LoadFileTestCase (unittest.TestCase):
# region Helpers
@staticmethod
def get_random_comment() -> str:
"""
Gets a random comment string starting with a random comment prefix from `file_helper.COMMENT_PREFIXES`.
"""
comment_start_seq = random.choice(file_helper.COMMENT_PREFIXES)
line_length = random.randint(0, 50)
return_value = "".join(random.choices(string.ascii_uppercase + string.ascii_lowercase + string.digits, k=line_length))
return_value = comment_start_seq + return_value
return return_value
@staticmethod
def get_random_comments() -> typing.List[str]:
# Generate comments and blank lines.
comments_and_empty_lines = []
for _ in range(random.randint(5, 100)):
comments_and_empty_lines.append(LoadFileTestCase.get_random_comment())
for _ in range(random.randint(5, 100)):
comments_and_empty_lines.append("")
return comments_and_empty_lines
@staticmethod
def get_random_urls() -> typing.List[str]:
urls = []
for _ in range(random.randint(5, 100)):
urls.append(get_random_catalog_page_url())
for _ in range(random.randint(5, 100)):
urls.append(get_random_detail_page_url())
random.shuffle(urls)
return urls
@staticmethod
def get_random_ids() -> typing.List[str]:
ids = []
for _ in range(random.randint(10, 200)):
ids.append(str(random.randint(1, 9999999)))
return ids
# endregion
# region Empty file
def test_non_existent_file(self):
with tempfile.TemporaryDirectory() as root_dir_name: # Use a temporary dir to prevent filename collisions.
filename = get_random_filename()
filename = os.path.join(root_dir_name, filename)
with self.assertRaises(FileNotFoundError):
file_helper.get_urls_from_file(filename)
with self.assertRaises(FileNotFoundError):
file_helper.get_ids_from_file(filename)
def test_empty_file(self):
with tempfile.TemporaryDirectory() as root_dir_name:
# Create an empty file.
filename = get_random_filename()
filename = os.path.join(root_dir_name, filename)
with open(filename, "w"):
pass
# get_urls_from_file
urls_from_file = file_helper.get_urls_from_file(filename)
self.assertIsInstance(urls_from_file, list)
self.assertEqual(len(urls_from_file), 0)
self.assertFalse(urls_from_file)
# get_ids_from_file
ids_from_file = file_helper.get_ids_from_file(filename)
self.assertIsInstance(ids_from_file, set)
self.assertEqual(len(ids_from_file), 0)
self.assertFalse(ids_from_file)
def test_comment_only_file(self):
file_content = ""
for _ in range(random.randint(5, 100)):
file_content += LoadFileTestCase.get_random_comment()
file_content += "\n"
with tempfile.TemporaryDirectory() as root_dir_name:
# Create a comment-only file.
filename = get_random_filename()
filename = os.path.join(root_dir_name, filename)
with open(filename, "w") as f:
f.write(file_content)
# get_urls_from_file
urls_from_file = file_helper.get_urls_from_file(filename)
self.assertIsInstance(urls_from_file, list)
self.assertEqual(len(urls_from_file), 0)
self.assertFalse(urls_from_file)
# get_ids_from_file
ids_from_file = file_helper.get_ids_from_file(filename)
self.assertIsInstance(ids_from_file, set)
self.assertEqual(len(ids_from_file), 0)
self.assertFalse(ids_from_file)
# endregion
# region URLs file
def test_url_only_file(self):
# Generate URLs.
urls = self.get_random_urls()
# Generate file content from URLs.
file_content = "\n".join(urls)
with tempfile.TemporaryDirectory() as root_dir_name:
# Create a URL-only file.
filename = get_random_filename()
filename = os.path.join(root_dir_name, filename)
with open(filename, "w") as f:
f.write(file_content)
# Should return all URLs.
urls_from_file = file_helper.get_urls_from_file(filename)
self.assertIsInstance(urls_from_file, list)
self.assertEqual(urls_from_file, urls)
def test_url_comment_blank_mixture(self):
urls = self.get_random_urls()
comments_and_empty_lines = self.get_random_comments()
# Mix URLs and comments.
urls_and_comments = comments_and_empty_lines + [None for _ in urls]
random.shuffle(urls_and_comments)
url_i = 0
for i in range(len(urls_and_comments)):
if urls_and_comments[i] is None:
# Replace placeholders with true URLs.
urls_and_comments[i] = urls[url_i]
url_i += 1
# Generate file content.
file_content = "\n".join(urls_and_comments)
with tempfile.TemporaryDirectory() as root_dir_name:
# Create the file.
filename = get_random_filename()
filename = os.path.join(root_dir_name, filename)
with open(filename, "w") as f:
f.write(file_content)
# Should return URLs in the correct order.
urls_from_file = file_helper.get_urls_from_file(filename)
self.assertIsInstance(urls_from_file, list)
self.assertEqual(urls_from_file, urls)
# endregion
# region IDs file
def test_id_only_file(self):
ids = self.get_random_ids()
# Generate file content from IDs.
file_content = "\n".join(ids)
with tempfile.TemporaryDirectory() as root_dir_name:
# Create the file.
filename = get_random_filename()
filename = os.path.join(root_dir_name, filename)
with open(filename, "w") as f:
f.write(file_content)
# Should return all IDs.
ids_from_file = file_helper.get_ids_from_file(filename)
self.assertIsInstance(ids_from_file, set)
self.assertEqual(ids_from_file, set(ids))
def test_id_comment_blank_mixture(self):
ids = self.get_random_ids()
comments_and_empty_lines = self.get_random_comments()
# Mix IDs and comments.
ids_and_comments = ids + comments_and_empty_lines
random.shuffle(ids_and_comments) # Order doesn't matter: Will convert to `set`
# Generate file content.
file_content = "\n".join(ids_and_comments)
with tempfile.TemporaryDirectory() as root_dir_name:
# Create the file.
filename = get_random_filename()
filename = os.path.join(root_dir_name, filename)
with open(filename, "w") as f:
f.write(file_content)
# Should return all IDs.
ids_from_file = file_helper.get_ids_from_file(filename)
self.assertIsInstance(ids_from_file, set)
self.assertEqual(ids_from_file, set(ids))
# endregion
if __name__ == "__main__":
unittest.main()
| MacJim/Safebooru-Downloader | test_file_helper.py | test_file_helper.py | py | 9,514 | python | en | code | 0 | github-code | 13 |
70073798739 | # -*- coding: utf-8 -*-
#Integrantes: Carolina Hong y Andrés Pirela
import ply.lex as lex
import ply.yacc as yacc
reserved={
'kcal' : 'KCAL',
'ate' : 'ATE',
'limit' : 'LIMIT',
'average' : 'AVERAGE',
'intake': 'INTAKE',
'=' : 'ASIGN',
'sum': 'SUM',
'day': 'DAY',
'food': 'FOOD',
'with': 'WITH'
}
tokens = [
'NUMBER', #gramos consumidos de x comida
'ID', #tipo de alimento
'DATE'
]+list(reserved.values())
t_KCAL = r'kcal'
t_ATE = r'ate'
t_LIMIT = r'limit'
t_AVERAGE = r'average'
t_INTAKE = r'intake'
t_ASIGN = r'='
t_SUM = r'sum'
t_DAY = r'day'
t_FOOD = r'food'
t_WITH = r'with'
# Expresión regular para la fecha (yyyy-mm-dd)
def t_DATE(t):
r'\d{4}-\d{2}-\d{2}'
#\d reconoce enteros
#{n}: numero de digitos
return t
def t_NUMBER(t):
r'[0-9]+' #como reconocer un numero: valores entre 0 y 9 repetidos 1 o mas veces
t.value = int(t.value)
return t
def t_ID(t):
r'[a-zA-Z_]+' #reconcocer letras
t.type = reserved.get(t.value,'ID') #asociar a un ID
return t
t_ignore = ' \t' #ignorar espacio o tabulacion
#error de interpretacion
def t_error(t):
print(f"Error lexico: Carácter no valido {t.value[0]}")
t.lexer.skip(1) #no se cierra el programa si hay error
calorias = {
'verduras': {
'ajo': 169,
'apio': 20,
'cebolla': 47,
'brocoli': 31,
'berenjena': 29,
'lechuga': 18,
'esparragos': 26,
'espinaca': 32,
'zanahoria': 42,
'tomate': 22,
'pepino': 12,
'repollo': 19
},
'frutas': {
'arandanos': 41,
'cereza': 47,
'ciruela': 44,
'coco': 646,
'frambuesa': 40,
'frutilla': 36,
'granada': 65,
'kiwi': 51,
'limon': 39,
'mandarina': 40,
'mango': 57,
'manzana': 52,
'melon': 31,
'naranja': 44,
'pera': 61,
'pina': 51,
'platano': 90,
'pomelo': 30,
'sandia': 30,
'uva': 81
},
'lacteos': {
'helado': 167,
'leche_condensada': 350,
'leche_descremada': 36,
'crema': 298,
'queso_blanco': 70,
'queso_cheddar': 381,
'queso_mozzarella': 245,
'queso_parmesano': 393,
'yogur': 62
},
'carnes': {
'bacon': 665,
'lomo_cerdo': 208,
'chorizo': 468,
'gallina': 369,
'hamburguesa': 230,
'jamon': 380,
'pollo': 134,
'salami': 325,
'salchicha': 315,
'vacuno': 129,
'asado': 401
},
'productos del mar': {
'almejas': 50,
'atun': 280,
'calamar': 82,
'cangrejo': 85,
'caviar': 233,
'langosta': 67,
'mejillon': 74,
'ostras': 80,
'pulpo': 57,
'salmon': 172,
'sardina': 151,
'sardinas': 151,
'salmon_ahumado': 154,
'trucha': 94
},
'azucares': {
'azucar': 380,
'chocolate': 550,
'cacao': 366,
'miel': 300,
'mermelada': 280,
'nutella': 549,
'helado_de_agua': 139
},
'cereales': {
'arroz_blanco': 354,
'arroz_integral': 350,
'avena': 367,
'cereal': 360,
'harina_maiz': 349,
'harina_trigo': 340,
'pan': 240
},
'legumbres': {
'garbanzos': 361,
'lentejas': 336
},
'huevos': {
'clara': 48,
'yema': 368,
'huevo_duro': 147,
'huevo_entero': 162
},
'refrescos': {
'agua_tonica': 34,
'cafe': 1,
'cerveza': 45,
'champaña': 100,
'leche_almendra': 335,
'pisco': 210,
'te': 1,
'ron': 244,
'vodka': 315,
'whisky': 244,
'vino': 160,
},
'aceites': {
'aceite_girasol': 900,
'aceite_oliva': 900,
'mantequilla': 752,
'margarina': 752
},
'otros': {}
}
#reglas gramaticales
import datetime
from numpy import random
intakes = {}
def p_expresion(t):
'''
expresion : kcal_expr
| ate_expr
| average_intake
| limit
| sum_day
| food_with
'''
def p_expr_numero(t):
'expresion : NUMBER'
target_kcal=t[1]
foods = {}
min_diff = float('inf')
for tipo, alimentos in calorias.items():
for alimento, kcal in alimentos.items():
diff = abs(kcal - target_kcal)
if diff < min_diff:
foods = {alimento: kcal}
min_diff = diff
elif diff == min_diff:
foods[alimento] = kcal
print(f"ALimentos con alrededor de {target_kcal} kcal:")
for alimento, kcal in foods.items():
print(f"{alimento}: {kcal} kcal")
def p_expr_id(t):
'expresion : ID'
global calorias
alimento = t[1]
for tipo, alimentos in calorias.items():
if alimento in alimentos:
t[0] = alimentos[alimento]
print(f"{alimento}: {alimentos[alimento]} kcal")
return
print(f"Alimento {alimento} no disponible")
t[0] = 0
limit = 1000000
#ej: limit 1050, se interpreta como limite de 1050 kcal
def p_limit(t):
'limit : LIMIT NUMBER'
global limit
cal_max = t[2]
limit = t[2]
print(f"Limite de calorias diarias establecido a {cal_max} kcal")
# expresion -> ate number id
#ej: ate 50 tomate, se interpreta como 50 gramos de tomate
def p_expr_ate(t):
'ate_expr : ATE NUMBER ID'
global calorias
gramos = t[2]
alimento = t[3]
fecha = datetime.date.today()
for tipo, alimentos in calorias.items():
if alimento in alimentos:
cal_100g = alimentos[alimento]
cal_total = (cal_100g/100) * int(gramos)
suma_cal = sum(intakes.get(fecha, {}).values())
if (suma_cal + cal_total) > limit:
print(f"Se ha superado el limite de calorias diarias {limit} kcal")
print(f"Quedan solo {round(limit - suma_cal,3)} kcal disponibles")
t[0] = 0
return
t[0] = cal_total
if fecha in intakes:
if alimento in intakes[fecha].keys():
intakes[fecha][alimento] += cal_total
else:
intakes[fecha][alimento] = cal_total
else:
intakes[fecha] = {alimento: cal_total}
print(f"Registro de consumo exitoso para el dia {fecha}: {alimento} ({round(cal_total, 3)} kcal)")
if limit != 1000000:
print(f"Quedan {round(limit - suma_cal - cal_total, 3)} kcal disponibles")
return
print(f"Alimento {alimento} no disponible")
t[0] = 0
#ej: kcal pera
def p_kcal_expr(t):
'kcal_expr : KCAL ID '
global calorias
food = ""
alimento = t[2]
cal = -1
for tipo, alimentos in calorias.items():
if alimento in alimentos:
food = tipo
cal = alimentos[alimento]
break
if cal != -1:
print(f"{alimento} ({food}): {cal} kcal en 100 gramos")
else:
print(f"No se encontraron calorias para el alimento {alimento}")
#ej: kcal carne = 200
def p_registro_consumo(t):
'kcal_expr : KCAL ID ASIGN NUMBER'
alimento = t[2]
cal = t[4]
global calorias
if alimento not in calorias['otros']:
calorias['otros'][alimento] = cal
print(f"Registro de {alimento} exitoso")
else:
print(f"El alimento {alimento} ya existe")
# average_intake -> average intake
def p_average_intake(t):
'average_intake : AVERAGE INTAKE'
cal_total = 0
dias = 0
for consumidos in intakes.values():
cal_total += sum(consumidos.values())
dias +=1
if dias > 0:
average = cal_total/dias
print(f"Promedio de calorías diario: {average} kcal")
else:
print("No hay registros")
from datetime import date
# sum_day -> sum day yyyy-mm-dd
#ej: sum day 2023-06-14
def p_sum_day(t):
'sum_day : SUM DAY DATE'
fecha_str = t[3]
year, month, day = fecha_str.split('-')
fecha = date(int(year), int(month), int(day))
cal_total = 0
if fecha in intakes:
daily_intake = intakes[fecha]
for calorias in daily_intake.values():
cal_total += calorias
print(f"Calorias totales del dia {fecha}: {round(cal_total, 3)} kcal")
else:
print(f"No hay registros del dia {fecha}");
# expression -> food with number
#ej: food with 400
def p_expr_food_with(t):
'food_with : FOOD WITH NUMBER'
global calorias
comida = {}
cal_disp = float(t[3])
#Un plato promedio lo consideraremos que de las calorias totales 55% son de alguna proteina, 44% de algun cereal y 5% de alguna verdura
p = cal_disp*0.55
c = cal_disp*0.4
v = cal_disp*0.05
proteina = random.randint(0,4) #de carne si es igual a 0, de mar si es igual a 1, legumbres si es igual a 2 y huevo si es igual a 3
if proteina == 0:
d = calorias['carnes']
alimento = random.choice(list(d.keys()))
cal = d[alimento]
gramos = p*100/(cal)
comida[alimento] = [gramos, p]
elif proteina == 1:
d = calorias['productos del mar']
alimento = random.choice(list(d.keys()))
cal = d[alimento]
gramos = p*100/(cal)
comida[alimento] = [gramos, p]
elif proteina == 2:
d = calorias['legumbres']
alimento = random.choice(list(d.keys()))
cal = d[alimento]
gramos = p*100/(cal)
comida[alimento] = [gramos, p]
else:
d = calorias['huevos']
alimento = random.choice(list(d.keys()))
cal = d[alimento]
gramos = p*100/(cal)
comida[alimento] = [gramos, p]
cereal = random.choice(list(calorias['cereales'].keys()))
cal_c = calorias['cereales'][cereal]
gramos_c = c*100/(cal_c)
comida[cereal] = [gramos_c, c]
verdura = random.choice(list(calorias['verduras'].keys()))
cal_v = calorias['verduras'][verdura]
gramos_v = v*100/(cal_v)
comida[verdura] = [gramos_v, v]
print(f"A continuacion se muestra un ejemplo de un plato que tiene un total de {cal_disp} kcal")
for alimento in comida:
print(f"{alimento} : {round(comida[alimento][0], 3)} gramos ({round(comida[alimento][1], 3)} kcal)")
def p_error(t):
print("Error de sintaxis")
lexer=lex.lex()
parser=yacc.yacc()
while True:
try:
data = input()
except EOFError:
break
parser.parse(data)
| caroohong/CalorEase | calorease.py | calorease.py | py | 10,636 | python | es | code | 0 | github-code | 13 |
73643891217 | # Ejercicio 863: Extraer todas las palabras que se hallen entre comillas dobles desde una cadena de caracteres.
import re
texto = '"Python", "JavaScript", "C++", "Java"'
patron = r'"(.*?)"'
lenguajes = re.findall(patron, texto)
print(lenguajes)
print()
for l in lenguajes:
print(l)
| Fhernd/PythonEjercicios | Parte001/ex863_extraer_palabras_entre_comillas_dobles.py | ex863_extraer_palabras_entre_comillas_dobles.py | py | 293 | python | es | code | 126 | github-code | 13 |
7899241629 | from tree import Node
def retr_subgraph(val, g, starting_cands, degree, size_of_sub_graph, list_of_elements_found):
root_node = Node(val)
for i in range (0,len(starting_cands)):
# if len(sub_graph_found) == size_of_sub_graph:
# print("Returned")
# return sub_graph_found
list_of_elements_found.append(starting_cands[i])
root_node.add_child(Node(starting_cands[i]))
# print(root_node.children)
print("Exploring node : " + str(starting_cands[i]))
child_of_cand = list(g.neighbors(starting_cands[i]))
# print("Neighbors found are " + str(child_of_cand))
for x in range(0, len(child_of_cand)):
if (g.degree[child_of_cand[x]] == degree):
if child_of_cand[x] not in list_of_elements_found:
print("Degree Matched at neighbour " + str(child_of_cand[x]) + " and recursing on it")
element = []
element.append(child_of_cand[x])
ret_node = retr_subgraph(child_of_cand[x],g, element, degree, size_of_sub_graph, list_of_elements_found)
root_node.add_child(ret_node)
list_of_elements_found.pop(len(list_of_elements_found)-1)
return root_node | 4m4npr33t/De-Anonymising_Social_Networks | codes/retr.py | retr.py | py | 1,277 | python | en | code | 0 | github-code | 13 |
73783070098 | import numpy as np
import matplotlib.pyplot as plt
# ----------- 2
T_e = 1
T = 100
sigma_Q = 1
sigma_px = 1
sigma_py = 30
x_init = np.array([[3, 40, -4, 20]]).transpose()
x_kalm = x_init
P_kalm = np.eye(4)
F = np.array([[1, T_e, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, T_e],
[0, 0, 0, 1]])
Q = sigma_Q ** 2 * np.array([[T_e ** 3 / 3, T_e ** 2 / 2, 0, 0],
[T_e ** 2 / 2, T_e, 0, 0],
[0, 0, T_e ** 3 / 3, T_e ** 2 / 2],
[0, 0, T_e ** 2 / 2, T_e]])
H = np.array([[1, 0, 0, 0],
[0, 0, 1, 0]])
R = np.array([[sigma_px ** 2, 0],
[0, sigma_py ** 2]])
# ----------- 3
def creer_trajectoire(F, Q, x_init, T):
traj = []
x_previous = x_init.copy()
for i in range(0, T):
U = np.random.multivariate_normal([0, 0, 0, 0], Q).reshape((4, 1))
real = np.matmul(F, x_previous) + U
x_previous = real.copy()
traj.append(real)
return np.array(traj).transpose().reshape(4, T)
vecteur_x = creer_trajectoire(F, Q, x_init, T)
# ----------- 4
def creer_observations(H, R, vecteur_x, T):
obs = []
X_t = vecteur_x.copy().transpose()
for i in range(0, T):
X = np.array(X_t[i]).reshape(4, 1)
V = np.random.multivariate_normal([0, 0], R).reshape((2, 1))
Y = np.matmul(H, X) + V
obs.append(Y)
return np.array(obs).transpose().reshape(2, T)
vecteur_y = creer_observations(H, R, vecteur_x, T)
# ----------- 5
def tracer(vecteur_x, vecteur_y):
x_x = vecteur_x[0]
y_x = vecteur_x[2]
x_y = vecteur_y[0]
y_y = vecteur_y[1]
fig, ax = plt.subplots()
ax.plot(x_x, y_x, 'r--', label="Trajectoire réelle")
ax.plot(x_y, y_y, 'b+', label="Trajectoire observée")
ax.legend()
plt.title("Trajectoire réelle contre trajectoire observée")
plt.show()
tracer(vecteur_x, vecteur_y)
# ----------- 6
def filtre_de_kalman(F, Q, H, R, y_k, x_kalm_prec, P_kalm_prec):
# Partie prédiction
x_kalm_prediction = np.matmul(F, x_kalm_prec)
P_kalm_prediction = Q + np.matmul(np.matmul(F, P_kalm_prec), F.transpose())
# Partie mise à jour
S = np.matmul(np.matmul(H, P_kalm_prediction), H.transpose()) + R
K = np.matmul(np.matmul(P_kalm_prediction, H.transpose()), np.linalg.inv(S))
X_kalm_k = x_kalm_prediction + np.matmul(K, y_k.reshape(2, 1) - np.matmul(H, x_kalm_prediction))
P_kalm_k = np.matmul(np.eye(4) - np.matmul(K, H), P_kalm_prediction)
return [X_kalm_k, P_kalm_k]
# ----------- 7
x_est = []
x_kalm_prec = x_kalm
P_kalm_prec = P_kalm
for y_k in vecteur_y.transpose():
[X_kalm_k, P_kalm_k] = filtre_de_kalman(F, Q, H, R, y_k, x_kalm_prec, P_kalm_prec)
x_kalm_prec = X_kalm_k
P_kalm_prec = P_kalm_k
x_est.append(np.random.multivariate_normal(X_kalm_k.reshape(1, 4).tolist()[0], P_kalm_k))
x_est = np.array(x_est)
# ----------- 8
def err_quadra(k, vecteur_x, x_est):
diff = (vecteur_x.reshape(T, 4)[k] - x_est.reshape(T, 4)[k])
err = np.dot(diff, diff)
# err = np.matmul((vecteur_x[k] - x_est.transpose()[k]).transpose().reshape(T, 1), (vecteur_x.transpose()[k] - x_est.transpose()[k]))
return err
def erreur_moyenne(vecteur_x, x_est, T):
sum = 0
for i in range(0, T):
sum = err_quadra(i, vecteur_x, x_est) ** .5
return sum / (T)
def erreur_moyenne_propose(vecteur_x, x_est, T):
sum = 0
for i in range(0, T):
sum = err_quadra(i, vecteur_x, x_est) ** .5
return sum / (T*max_y_value(vecteur_x))
def max_y_value(vecteur_x):
return abs(sum(abs(vecteur_x[2])))
# ----------- 9 & 10
def tracer_estime(vecteur_x, vecteur_y, x_est, T):
x_x = vecteur_x[0]
y_x = vecteur_x[2]
x_y = vecteur_y[0]
y_y = vecteur_y[1]
fig, ax = plt.subplots()
ax.plot(x_x, y_x, 'r--', label="Trajectoire réelle")
ax.plot(x_y, y_y, 'b+', label="Trajectoire observée")
ax.plot(x_est.transpose()[0], x_est.transpose()[2], "k.", label="Trajectoire estimée")
ax.legend()
plt.title("Trajectoire réelle, observée et estimée - Erreur moyenne absolue : " + str(erreur_moyenne(vecteur_x, x_est, T)) + " ; Erreur moyenne relative : " + str(erreur_moyenne_propose(vecteur_x, x_est, T)))
plt.show()
#abscisse = []
#for i in range(0, len(x_x)):
# abscisse.append(i)
#fig, ax = plt.subplots()
#ax.plot(abscisse, x_x, 'r--', label="X réelle")
#ax.plot(abscisse, x_y, 'b+', label="X observée")
#ax.plot(abscisse, x_est.transpose()[0], "k.", label="X estimée")
#ax.legend()
#plt.title("Trajectoire en X réelle, observée et estimée - Erreur moyenne absolue : " + str(
# erreur_moyenne(vecteur_x, x_est, T)) + " ; Erreur moyenne relative : " + str(
# erreur_moyenne_propose(vecteur_x, x_est, T)))
#plt.show()
#abscisse = []
#for i in range(0, len(x_y)):
# abscisse.append(i)
#fig, ax = plt.subplots()
#ax.plot(abscisse, y_x, 'r--', label="Y réelle")
#ax.plot(abscisse, y_y, 'b+', label="Y observée")
#ax.plot(abscisse, x_est.transpose()[2], "k.", label="Y estimée")
#ax.legend()
#plt.title("Trajectoire en Y réelle, observée et estimée - Erreur moyenne absolue : " + str(
# erreur_moyenne(vecteur_x, x_est, T)) + " ; Erreur moyenne relative : " + str(
# erreur_moyenne_propose(vecteur_x, x_est, T)))
#plt.show()
tracer_estime(vecteur_x, vecteur_y, x_est, T)
######---------- | Application | ----------######
# ----------- 1
# On peut faire une moyenne entre la dernière valeur captée et la précédante
# ----------- 2
import scipy.io as scipyio
def filtre_de_kalman_avion(F, Q, H, R, y_k, x_kalm_prec, P_kalm_prec):
# Partie prédiction
x_kalm_prediction = np.matmul(F, x_kalm_prec)
P_kalm_prediction = Q + np.matmul(np.matmul(F, P_kalm_prec), F.transpose())
# Partie mise à jour
S = np.matmul(np.matmul(H, P_kalm_prediction), H.transpose()) + R
K = np.matmul(np.matmul(P_kalm_prediction, H.transpose()), np.linalg.inv(S))
X_kalm_k = x_kalm_prediction + np.matmul(K, y_k.reshape(2, 1) - np.matmul(H, x_kalm_prediction))
P_kalm_k = np.matmul(np.eye(4) - np.matmul(K, H), P_kalm_prediction)
return [X_kalm_k, P_kalm_k, x_kalm_prediction, P_kalm_prediction]
vecteur_x_avion_ligne_dic = scipyio.loadmat("vecteur_x_avion_ligne.mat")
vecteur_x_avion_voltige_dic = scipyio.loadmat("vecteur_x_avion_voltige.mat")
vecteur_y_avion_ligne_dic = scipyio.loadmat("vecteur_y_avion_ligne.mat")
vecteur_y_avion_voltige_dic = scipyio.loadmat("vecteur_y_avion_voltige.mat")
vecteur_x_avion_ligne = []
for value in vecteur_x_avion_ligne_dic.values():
if type(value) != np.ndarray:
continue
for y in value:
vecteur_x_avion_ligne.append(y)
vecteur_x_avion_ligne = np.array(vecteur_x_avion_ligne)
vecteur_x_avion_voltige = []
for value in vecteur_x_avion_voltige_dic.values():
if type(value) != np.ndarray:
continue
for y in value:
vecteur_x_avion_voltige.append(y)
vecteur_x_avion_voltige = np.array(vecteur_x_avion_voltige)
vecteur_y_avion_ligne = []
for value in vecteur_y_avion_ligne_dic.values():
if type(value) != np.ndarray:
continue
for y in value:
vecteur_y_avion_ligne.append(y)
vecteur_y_avion_ligne = np.array(vecteur_y_avion_ligne)
vecteur_y_avion_voltige = []
for value in vecteur_y_avion_voltige_dic.values():
if type(value) != np.ndarray:
continue
for y in value:
vecteur_y_avion_voltige.append(y)
vecteur_y_avion_voltige = np.array(vecteur_y_avion_voltige)
if True:
def tracer_trajectoire(vecteur_x, vecteur_y):
x_est = []
x_kalm_prec = x_kalm
P_kalm_prec = P_kalm
y_last = [0, 0]
for y_k in vecteur_y.transpose():
if str(y_k[0]) == "nan" or str(y_k[1]) == "nan":
x_k = np.random.multivariate_normal(y_last[0].reshape(1, 4).tolist()[0], y_last[1])
x_est.append(x_k)
continue
[X_kalm_k, P_kalm_k, m, P] = filtre_de_kalman_avion(F, Q, H, R, y_k, x_kalm_prec, P_kalm_prec)
x_kalm_prec = X_kalm_k
P_kalm_prec = P_kalm_k
y_last = [m, P]
x_est.append(np.random.multivariate_normal(X_kalm_k.reshape(1, 4).tolist()[0], P_kalm_k))
x_est = np.array(x_est)
tracer_estime(vecteur_x, vecteur_y, x_est, T)
tracer_trajectoire(vecteur_x_avion_ligne, vecteur_y_avion_ligne)
tracer_trajectoire(vecteur_x_avion_voltige, vecteur_y_avion_voltige)
######---------- | Partie 2 | ----------######
# ----------- 1
def cylindric(p_x, p_y):
r = (p_x ** 2 + p_y ** 2) ** 0.5
theta = np.arctan(p_y / p_x)
return [theta, r]
# ----------- 2
def H(X):
p_x = X[0][0]
p_y = X[2][0]
[theta, r] = cylindric(p_x, p_y)
return np.array([[theta],
[r]])
# Que devient la loi g_k(y_k|x_k) ?
# ----------- 3
sigma_angle = np.pi / 180
sigma_dist = 10
R = np.array([[sigma_angle ** 2, 0],
[0, sigma_dist ** 2]])
def creer_observation_radar(R, vecteur_x, T):
obs = []
X_t = vecteur_x.copy().transpose()
for i in range(0, T):
X = np.array(X_t[i]).reshape(4, 1)
V = np.random.multivariate_normal([0, 0], R).reshape((2, 1))
Y = H(X) + V
obs.append(Y)
return np.array(obs).transpose().reshape(2, T)
# ----------- 4
# Non car non linéaire
# ----------- 5
def f(X):
return np.arctan(X[2][0] / X[0][0])
def g(X):
return (X[0][0] ** 2 + X[2][0] ** 2) ** 0.5
def y_k(x_predic, x_k):
vector = np.array([[f(x_predic)],
[g(x_predic)]])
v_k = np.random.multivariate_normal([0, 0], R).reshape((2, 1))
return vector + np.matmul(H(x_predic), (x_k - x_predic)) + v_k
# ----------- 6
def H_tilde(X):
x = X[0][0]
y = X[2][0]
return np.array([[-y / (x ** 2 * (1 + (y / x) ** 2)), 0, 1 / (x * (1 + (y / x) ** 2)), 0],
[x / ((x ** 2 + y ** 2) ** 0.5), 0, y / ((x ** 2 + y ** 2) ** 0.5), 0]])
def filtre_de_kalman_radar(F, Q, R, y_k, x_kalm_prec, P_kalm_prec):
# Partie prédiction
x_kalm_prediction = np.matmul(F, x_kalm_prec)
P_kalm_prediction = Q + np.matmul(np.matmul(F, P_kalm_prec), F.transpose())
H_tild = H_tilde(x_kalm_prediction)
# Indication
y_k_prime = y_k.reshape(2, 1)
# Partie mise à jour
S = np.matmul(np.matmul(H_tild, P_kalm_prediction), H_tild.transpose()) + R
K = np.matmul(np.matmul(P_kalm_prediction, H_tild.transpose()), np.linalg.inv(S))
X_kalm_k = x_kalm_prediction.reshape(4, 1) + np.matmul(K, (y_k_prime - H(x_kalm_prediction)).reshape(2, 1)).reshape(4, 1)
P_kalm_k = np.matmul(np.eye(4) - np.matmul(K, H_tild), P_kalm_prediction)
return [X_kalm_k, P_kalm_k]
vecteur_y = creer_observation_radar(R, vecteur_x, T)
x_est = []
x_kalm_prec = x_kalm
P_kalm_prec = P_kalm
for y_k in vecteur_y.transpose():
[X_kalm_k, P_kalm_k] = filtre_de_kalman_radar(F, Q, R, y_k, x_kalm_prec, P_kalm_prec)
x_kalm_prec = X_kalm_k
P_kalm_prec = P_kalm_k
x_est.append(np.random.multivariate_normal(X_kalm_k.reshape(1, 4).tolist()[0], P_kalm_k))
vecteur_y_car = []
for i in range(0, T):
r = vecteur_y[1][i]
theta = vecteur_y[0][i]
vecteur_y_car.append([r*np.cos(theta), r*np.sin(theta)])
x_est = np.array(x_est)
vecteur_y_car = np.array(vecteur_y_car).transpose()
tracer_estime(vecteur_x, vecteur_y_car, x_est, T)
def tracer_estime_polaire(vecteur_x, vecteur_y, x_est, T):
def angle(px, py):
return np.arctan(py/px)
def radius(px, py):
return (px**2 + py**2)**.5
X_pol = []
Y_pol = []
X_est_pol = []
for i in range(0, len(vecteur_x[0])):
X_pol.append([angle(vecteur_x[0][i], vecteur_x[2][i]),
radius(vecteur_x[0][i], vecteur_x[2][i])])
Y_pol.append([angle(vecteur_y[0][i], vecteur_y[1][i]),
radius(vecteur_x[0][i], vecteur_x[1][i])])
X_est_pol.append([angle(x_est.transpose()[0][i], x_est.transpose()[2][i]),
radius(x_est.transpose()[0][i], x_est.transpose()[2][i])])
X_pol = np.array(X_pol).transpose()
Y_pol = np.array(Y_pol).transpose()
X_est_pol = np.array(X_est_pol).transpose()
x_x = X_pol[1]
y_x = X_pol[0]
x_y = Y_pol[1]
y_y = Y_pol[0]
fig, ax = plt.subplots()
ax.plot(x_x, y_x, 'r--', label="Trajectoire réelle")
ax.plot(x_y, y_y, 'b+', label="Trajectoire observée")
ax.plot(X_est_pol[1], X_est_pol[0], "k.", label="Trajectoire estimée")
ax.legend()
plt.title("Trajectoire réelle, observée et estimée en polaire - Erreur moyenne : " + str(erreur_moyenne(vecteur_x, x_est, T)) + " ; Erreur moyenne relative : " + str(erreur_moyenne_propose(vecteur_x, x_est, T)))
plt.show()
tracer_estime_polaire(vecteur_x, vecteur_y_car, x_est, T)
# Rapport faire des points +++ plot avec coordonnées polaires : bruit homoskedastique (c en anglais) | AlexandreChaussard/Cassiopee_Chirurgie | Anciens Programmes/TP - Inférences bayésiennes/TP 2 - Filtrage de Kalman/principal.py | principal.py | py | 13,063 | python | fr | code | 1 | github-code | 13 |
17691952682 | import re
import unicodedata
def convert_lower_case(doc):
return doc.lower()
def remove_control_char(doc):
chars = [char for char in doc]
for i in range(len(chars)):
if unicodedata.category(chars[i])[0] == "C":
chars[i] = " "
return "".join(chars)
def remove_non_word_char(doc):
return "".join(ch for ch in doc if ch.isalnum() or ch in [',','.',' ','-'])
def remove_redundant_space(doc):
doc = re.sub('\\s+',' ', doc)
return doc.strip()
def add_dot(doc):
doc = doc.strip()
if doc[0] != '.':
doc = '.' + doc
if doc[len(doc)-1] != '.':
doc += '.'
return doc
def post_process(docs):
res = []
for doc in docs:
doc = convert_lower_case(doc)
doc = remove_control_char(doc)
doc = remove_non_word_char(doc)
doc = remove_redundant_space(doc)
doc = add_dot(doc)
res.append(doc)
return res | vietnguyen012/QA1 | botchat-api/search-engine/postprocess.py | postprocess.py | py | 924 | python | en | code | 0 | github-code | 13 |
4699104737 | #prompting user to enter value
x = input("Enter the value :") #input is string so need to typecast into int for doing math functions
print(x+" is what you entered")
#two ways of string formatting
user_input = input("Enter you name: ")
user_surname = input("Enter surname: ")
message_1 = "Hello %s %s!" %(user_input,user_surname) # %s replaced with user_input and user_surname
message_2 = f"Hello {user_input}!" #directly putting in user_input into the string. This is more readable
message_3 = "this is another way Mr.{}".format(user_input) #Another way of formatting text
print(message_1)
print(message_2)
print(message_3)
# Section 7 For Loops
listval = [9.3,7.5,9.9]
for items in listval:
print(round(items))
# You can also call functions within loops to execute function over the list items
# looping through dictionaries. Iterate through either the keys or the values
student_grades = {'Ash':100,'Dave':34,'Mary':45}
for grades in student_grades.values():
print(grades)
for key,value in student_grades.items():
print("{} has a grade = {}".format(key,value)) #another way of formatting strings.
# While loop. It runs as long as the condition is true
# while 1<2:
# print(1) keeps printing 1. Use ctrl+C to break out of it
i=0
while i<3:
print(i)
i+=1 #Prints 0, 1 and 2
#Use break and continue to get out of while loop or continue. Used with True condition for while to keep it looping forever
while True:
username=input("enter your name:")
if username=='pypy':
break
else:
continue #This will keep running the loop until the user enters pypy | Soundwavepilot1969/Udemy_Python_Mega_Course | Section2_to_12/basics_6_7.py | basics_6_7.py | py | 1,608 | python | en | code | 0 | github-code | 13 |
475336591 | from pyspark.sql import SparkSession
from pyspark.sql.functions import sum,count
if __name__ == '__main__':
spark = SparkSession.builder.appName("NewAssign").master("local[*]").getOrCreate()
sc = spark.sparkContext
#RDD
textf = sc.textFile("/home/saif/LFS/cohort_c9/datasets/ratings.csv")
header = textf.first()
rddf = textf.filter(lambda x: x != header)
splitRDD = rddf.map(lambda x: (int(x.split(",")[1]), float(x.split(",")[2])))
resultRdd = splitRDD.aggregateByKey((0.0, 0), lambda x, y: (x[0] + y, x[1] + 1),
lambda x, y: (x[0] + y[0], x[1] + y[1]))
res = resultRdd.map(lambda x: (x[0], x[1][0] / x[1][1]))
for x in res.take(5):
print(x)
# for x in textf.take(5):
# print(x)
#DF
df = spark.read.format("csv").options(header=True, inferSchema=True).load(
'/home/saif/LFS/cohort_c9/datasets/ratings.csv')
df1 = df.groupBy(df.movieId).agg(sum(df.rating).alias("sums"),count(df.rating).alias("counts"))
df2 = df1.select(df.movieId,(df1.sums/df1.counts).alias("AVG"))
df2.show(5)
df.write.format("csv").mode("overwrite").save("hdfs://localhost:9000/user/saif/HFS/Input/python")
| divya-anand21/PySpark_Lab | c9/rddQ3.py | rddQ3.py | py | 1,212 | python | en | code | 0 | github-code | 13 |
36294187691 | from django.forms import *
from models import Roast, Alert
from django.utils.translation import ugettext as _
class RoastForm(ModelForm):
class Meta:
model = Roast
fields = ['body', 'keys']
widgets = {
'keys': SelectMultiple(attrs={'class': 'js-data-example-ajax-multiple', 'placeholder': 'Roast Description'}),
'body': Textarea(attrs={'class': 'form-control', 'placeholder': 'Roast Description'}),
}
def clean_keys(self):
keys = self.cleaned_data['keys']
if len(self.data['key_errors']) > 0:
errors = []
for error in self.data.getlist('key_errors'):
errors.append(ValidationError(_(error), code='invalid key'))
raise ValidationError(errors)
return self.cleaned_data['keys']
def clean_body(self):
if self.data.getlist('roast_exists')[0] == 't':
raise ValidationError(_('Roast already exists'), code='roast exists')
return self.cleaned_data['body'] | Shoop123/Roast-Dictionary-Django | roasts/forms.py | forms.py | py | 898 | python | en | code | 0 | github-code | 13 |
23248077746 | # encoding: utf-8
"""
Created by misaka-10032 (longqic@andrew.cmu.edu).
"""
class Solution(object):
def strStr(self, haystack, needle):
"""
Naive matching
:type haystack: str
:type needle: str
:rtype: int
"""
l_needle = len(needle)
l_haystack = len(haystack)
if l_haystack < l_needle:
return -1
if l_needle == 0:
return 0
for i in xrange(len(haystack)):
if i+len(needle) > len(haystack):
break
match = True
for j in xrange(len(needle)):
if haystack[i+j] != needle[j]:
match = False
break
if match:
return i
return -1
| misaka-10032/leetcode | coding/00028-strstr/solution.py | solution.py | py | 781 | python | en | code | 1 | github-code | 13 |
11531831074 | #!/usr/bin/python
import collections
import logging
import sys
import termios
import tty
from typing import Dict
def getch() -> str:
"""
Utility function to get a single character from the user
Returns:
str: A single character
"""
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(fd)
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
class Array:
"""
Implements the state machine model used by Brainfuck
"""
def __init__(self) -> None:
self.array = [0] * 30000
self.ptr = 0
def get_data(self) -> int:
"""
Gets the value of the current cell
Returns:
int: [description]
"""
return self.array[self.ptr]
def set_data(self, n: int) -> None:
"""
Sets the value of the current cell
Args:
n (int): The value to be used
"""
self.array[self.ptr] = n
def increment(self) -> None:
"""
Increments the value of the current cell
"""
self.array[self.ptr] += 1
# Handling buffer overflow
if self.array[self.ptr] > 256:
logging.error("Buffer overflow at position %d", self.ptr)
self.array[self.ptr] %= 256
def decrement(self) -> None:
"""
Decrements the value of the current cell
"""
self.array[self.ptr] -= 1
# Handling buffer underflow
if self.array[self.ptr] < 0:
logging.error("Buffer underflow at position %d", self.ptr)
self.array[self.ptr] %= 256
def right(self) -> None:
"""
Moves the data pointer to the right by one, with automatic array expansion if needed
"""
self.ptr += 1
# Auto expanding array
if self.ptr >= len(self.array):
self.array.append(0)
def left(self) -> None:
"""Moves the data pointer to the left by one
Raises:
ValueError: Called if the pointer value goes below 0
"""
if self.ptr > 0:
self.ptr -= 1
else:
raise ValueError("Segmentation Fault")
def make_brace_map(program: str) -> Dict:
"""Creates a brace map for interpreting command blocks
Args:
code (str): The code to be run
Returns:
Dict: The brace map
"""
temp_stack = collections.deque()
brace_map = {}
for position, command in enumerate(program):
if command == '[':
temp_stack.append(position)
if command == ']':
start = temp_stack.popleft()
brace_map[start] = position
brace_map[position] = start
return brace_map
def interpret(program: str) -> None:
"""Interprets the Brainfuck code
Args:
chars (str): The code to be interpreted
"""
brace_map = make_brace_map(program)
machine = Array()
position = 0
while position < len(program):
#time.sleep(0.5)
current_char = program[position]
if current_char == '>':
machine.right()
elif current_char == '<':
machine.left()
elif current_char == '+':
machine.increment()
elif current_char == '-':
machine.decrement()
elif current_char == '.':
sys.stdout.write(chr(machine.get_data()))
elif current_char == ',':
machine.set_data(ord(getch()))
elif (current_char == '[' and machine.get_data() == 0):
position = brace_map[position]
elif (current_char == ']' and machine.get_data() != 0):
position = brace_map[position]
else:
pass
position += 1
def cleanup(code: str) -> str:
return ''.join(filter(lambda x: x in ['.', ',', '[', ']', '<', '>', '+', '-'], code))
def execute(filename: str) -> None:
"""Reads in the Brainfuck code and passes it to the interpreter
Args:
filename (str): The file to read the code from
"""
with open(filename) as f:
interpret(cleanup(f.read()))
def main() -> None:
if len(sys.argv) == 2:
execute(sys.argv[1])
else:
print(f"Usage: {sys.argv[0]} filename")
if __name__ == "__main__":
main() | joshuagawley/bf.py | bf.py | bf.py | py | 4,363 | python | en | code | 0 | github-code | 13 |
33256521247 | # -*- coding: utf-8 -*-
from django.urls import path
from django.contrib.sitemaps.views import sitemap
from django.utils.translation import gettext_lazy as _
from . import views
from marketing.views import email_list_signup
from shop.sitemaps import CatSitemaps
sitemaps = {
'cat': CatSitemaps,
}
app_name = 'shop'
urlpatterns = (
# ex: /
path('', views.all_produit, name='all_produit'),
# ex: subscribe/
path(_('subscribe/'), email_list_signup, name='marketing'),
# ex: search/
path(_('search/'), views.search, name='search'),
# ex: shop/product/
path(_('shop/product/<slug:slug>/<int:id>/'), views.detail_produit, name='detail_produit'),
# ex: shop/marque/
path(_('shop/marque/'), views.all_marques, name='all_marque'),
path(_('shop/marque/<slug:marque_slug>/'), views.liste_marques, name='listing_marques'),
# ex: shop/category/
path(_('shop/category/'), views.all_categorie, name='all_categorie'),
path(_('shop/category/<slug:category_slug>/'), views.liste_categorie, name='listing_categorie'),
path(_('shop/category/<slug:category_slug>/'), views.all_produit, name='all_produit'),
# ex: shop/review/
path(_('shop/review/'), views.review_list, name='review_list'),
path(_('shop/review/<slug:review_id>/'), views.review_detail, name='review_detail'),
path(_('shop/add_review/<int:produit_id>/'), views.add_review, name='add_review'),
# ex: shop/profile/
path('shop/profile/<slug:username>/', views.user_review_list, name='user_review_list'),
path('shop/profile/', views.user_review_list, name='user_review_list'),
# ex: shop/recommendation/ - get wine recommendations for the logged user
path('shop/recommendation/', views.user_recommendation_list, name='user_recommendation_list'),
path('sitemap.xml', sitemap, {'sitemaps': sitemaps}, name='django.contrib.sitemaps.views.sitemap')
)
| sujayshekhar/django-ecommerce | shop/urls.py | urls.py | py | 1,905 | python | en | code | 5 | github-code | 13 |
20349629640 | import sys
sys.path.append('/home/silentknight/School/CS434/434proj/cmpt-434-proj')
import asyncio
from rpcudp.protocol import RPCProtocol
@asyncio.coroutine
def sayhi(protocol, address):
# result will be a tuple - first arg is a boolean indicating whether a
# response was received, and the second argument is the response if one
# was received.
result = yield from protocol.sayhi(address, "Snake Plissken")
print(result[1] if result[0] else "No response received.")
# Start local UDP server to be able to handle responses
port = 4567 if len(sys.argv) == 2 else input("Which port? ")
loop = asyncio.get_event_loop()
listen = loop.create_datagram_endpoint(\
RPCProtocol, local_addr=('127.0.0.1', port))
transport, protocol = loop.run_until_complete(listen)
# Call remote UDP server to say hi
func = sayhi(protocol, ('127.0.0.1', 1234))
loop.run_until_complete(func)
print("leaving...")
| rowan-maclachlan/cmpt-434-proj | async_test/client.py | client.py | py | 931 | python | en | code | 0 | github-code | 13 |
24887920489 | from data import dataset # Input data in datastr
from data import dataset_sorted
import collections
import math
n = len(dataset)
print("Data Statistics: \n")
# Calculate Mean:
def mean(dataset, n):
sum = 0
for i in range (0, n):
sum += dataset_sorted[i]
return sum / n
print("Mean: " + str(mean(dataset, n)) + "\n")
mean = mean(dataset, n)
#Calculate Median:
def median(dataset, n):
if n%2 == 0:
return (0.5 * (dataset_sorted[int(n/2 - 1)] + dataset_sorted[int(n/2)]))
elif n%2 != 0:
return dataset_sorted[int((n - 1)/2)]
print("Median: " + str(median(dataset, len(dataset))) + "\n")
#Calculate Mode:
def mode(dataset, n):
data1 = collections.Counter(dataset)
data_list = dict(data1)
max_value = max(list(data1.values()))
mode_val = [num for num, freq in data_list.items() if freq == max_value]
if len(mode_val) == len(dataset):
print("Mode: None\n")
else:
print("Mode: " + ''.join(map(str, mode_val)) + "\n")
mode(dataset, n)
#Calculate standard deviation
def stddev(dataset, n):
sum1 = 0
for i in range (0, n):
sum1 += (dataset[i] - mean) ** 2
return math.sqrt(sum1 / n)
print("Standard deviation: " + str(stddev(dataset, n)) + "\n")
#Calculate variance:
print("Variance: " + str(a ** 2) + "\n")
#Calculate max:
def max(dataset, n):
return dataset_sorted[n-1]
print("Maximum value: " + str(max(dataset, n)) + "\n")
#Calculate min:
def min(dataset, n):
return dataset_sorted[0]
print("Minimum value: " + str(min(dataset, n)) + "\n")
#Calculate range:
def range(dataset, n):
return dataset_sorted[n - 1] - dataset_sorted[0]
print("Range: " + str(range(dataset, n)) + "\n")
#Number of terms:
print("Number of terms (n) : " + str(n) + "\n")
| RT5x/Python-Data-Sorter | Python Statistics/main.py | main.py | py | 1,710 | python | en | code | 0 | github-code | 13 |
7593404667 |
def kanpsack_0_1(vals, weight, W, curr):
"""
Returns the maximum price that we can fill in the knapsack
vals = array of prices
weight = array of weights
W = limit of max weight in knapsack
curr = current index in consideration
"""
if curr == 0:
if weight[curr] <= W:
return vals[curr]
else:
return 0
new_W = W - weight[curr]
select_curr = -1
if weight[curr] <= W:
select_curr = vals[curr] + kanpsack_0_1(vals, weight, new_W, curr-1)
not_Select_curr = kanpsack_0_1(vals, weight, W, curr-1)
return max(select_curr, not_Select_curr)
vals = [20, 5, 10, 40, 15, 25]
weight = [1, 2, 3, 8, 7, 4]
W = 10
last = len(vals) - 1
ans = kanpsack_0_1(vals, weight, W, last)
print(ans)
| shashank231/practice-design | raman1.py | raman1.py | py | 787 | python | en | code | 0 | github-code | 13 |
39157886290 | from machine import Pin, ADC
from time import sleep
lm35_pin = 34
lm35 = ADC(Pin(lm35_pin))
lm35.width(ADC.WIDTH_12BIT)
lm35.atten(ADC.ATTN_11DB)
def read_tempc():
lm35_value = lm35.read()
voltage = (lm35_value / 4096.0) * 3300
tempc = voltage * 0.1
return tempc
def read_tempf():
lm35_value = lm35.read()
voltage = (lm35_value / 4096.0) * 3300
tempc = voltage * 0.1
tempf = (tempc * 1.8) + 32
return tempf
while True:
avg_c = 0.0
avg_f = 0.0
for i in range(100):
c = read_tempc()
avg_c += c
f = read_tempf()
avg_f += f
sleep(0.2)
avg_c /= 100
avg_f /= 100
print('Temperature in C = {}'.format(avg_c))
print('Temperature in F = {}'.format(avg_f))
print("\n")
| akmyat/electronics | esp32_LM35/esp32-LM35-micropython/main.py | main.py | py | 776 | python | en | code | 0 | github-code | 13 |
38287690009 | # no need to import smtplib for this code
# no need to import time for this code
import imaplib
import email
import pickle
import pandas as pd
import keras
from keras import *
from keras import layers, optimizers
from keras.layers import Embedding, Conv1D, GlobalMaxPooling1D, Dense, Dropout
from keras.models import Model, model_from_json
from keras.preprocessing import *
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sa import HybridMlp
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
text = []
category = []
def read_email_from_gmail():
mail = imaplib.IMAP4_SSL('imap.gmail.com')
mail.login('akhilpurushothaman1996@gmail.com','akhil@1996')
mail.select('inbox')
result, data = mail.search(None, 'ALL')
mail_ids = data[0]
id_list = mail_ids.split()
first_email_id = int(id_list[0])
latest_email_id = int(id_list[-1])
for i in range(latest_email_id, latest_email_id - 5, -1):
result, data = mail.fetch(str(i), '(RFC822)' )
for response_part in data:
if isinstance(response_part, tuple):
# from_bytes, not from_string
msg = email.message_from_bytes(response_part[1])
k=msg
mystring=''
for a in [k.get_payload() for k in msg.walk() if k.get_content_type() == 'text/plain']:
mystring+=a+' '
keras.backend.clear_session()
n_hidden_nodes = [10, 5]
epoch = 50
pop_size = 100
dt = pd.read_csv("D:\\Research\\spam_email_detection\\static\\spamham.csv")
dt['Category'] = dt['Category'].replace("ham", 1)
dt['Category'] = dt['Category'].replace("spam", 0)
msgs = dt.values[:, 1]
labels = dt.values[:, 0]
xtrain, xtest, ytrain, ytest = train_test_split(msgs, labels, test_size=0.3, random_state=0)
vector = TfidfVectorizer(stop_words='english')
a = vector.fit_transform(xtrain)
b = vector.transform(xtest)
dataset = [a, ytrain, b, ytest]
model = HybridMlp(dataset, n_hidden_nodes, epoch, pop_size)
model.training()
lst = mystring
yhat = model.prediction(solution=model.solution, x_data=lst)
if round(yhat) > 0.5:
res = 'ham'
else:
res = 'spam'
# path1 = "D:\\Research\\spam_email_detection\\model.h5"
# path2 = "D:\\Research\\spam_email_detection\\model.json"
# path3 = "D:\\Research\\spam_email_detection\\tokenizer.pickle"
#
# with open(path3, "rb") as h:
# tokenizer = pickle.load(h)
#
# jhandle = open(path2, 'r')
#
# jsoncontent = jhandle.read()
#
# jhandle.close()
#
# loadedmodel = model_from_json(jsoncontent)
#
# loadedmodel.load_weights(path1)
#
# lst = [mystring]
#
# f = tokenizer.texts_to_sequences(lst)
#
# trainFeatures = pad_sequences(f, 100, padding='post')
#
# loadedmodel.compile(optimizer='Adam', loss='binary_crossentropy', metrics=['accuracy'])
#
# p = loadedmodel.predict(trainFeatures)
#
# if p[0][0] > 0.5:
# res = 'ham'
#
# else:
# res = 'spam'
email_subject = msg['subject']
email_from = msg['from']
print('From : ' + email_from + '\n')
print('Subject : ' + email_subject + '\n')
print('Message : ' + mystring + '\n')
print("The message is : "+res)
text.append(mystring)
category.append(res)
break
break
return [text, category]
#read_email_from_gmail() | codermedia/spam_email_recognition | detection_cnn.py | detection_cnn.py | py | 4,626 | python | en | code | 0 | github-code | 13 |
9477069285 | # ---------------------------------------------------------- #
# Title: TestHarness
# Description: A main module for testing
# ChangeLog (Who,When,What):
# RRoot,1.1.2030,Created script
# MClark, 3.13.2021, Created Script
# ---------------------------------------------------------- #
if __name__ == "__main__":
import DataClasses as DC
import ProcessingClasses as PC
import IOClasses as IC
else:
raise Exception("This file was not created to be imported")
# Test data module
objP1 = DC.Employee(1, "Don", "Johnson")
objP2 = DC.Employee(2, "Carl", "Sagan")
lstTable = [objP1, objP2]
for row in lstTable:
print(row.to_string(), type(row))
# Test processing module
PC.FileProcessor.save_data_to_file("EmployeeData.txt", lstTable)
lstFileData = PC.FileProcessor.read_data_from_file("EmployeeData.txt")
for row in lstFileData:
nim = DC.Employee(row[0], row[1], row[2])
print(nim.to_string().strip(), type(nim))
# Test IO classes
IC.EmployeeIO.print_menu_items()
IC.EmployeeIO.print_current_list_items(lstTable)
m1 = IC.EmployeeIO.input_employee_data()
print(m1) | MClark89/Assignment09 | TestHarness.py | TestHarness.py | py | 1,120 | python | en | code | 0 | github-code | 13 |
31046436159 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
'''
simple module to translate English to Chinse, Chinese to English
'''
# API key:701380394
# keyfrom:youdao-python
from __future__ import print_function, unicode_literals
import json
import sys
try:
# compatible for python2
from urllib import urlencode
from urllib2 import urlopen
except ImportError:
# compatible for python3
from urllib.parse import urlencode
from urllib.request import urlopen
URL = "http://fanyi.youdao.com/openapi.do?" + \
"keyfrom=youdao-python&key=701380394" + \
"&type=data&doctype=json&version=1.1&"
class Chinese_Translator:
def fetch(self, query_str):
'''
use youdao api to get json result of translation
'''
#print("查询单词:", query_str.strip())
query = {
'q': query_str.strip()
}
url = URL + urlencode(query)
response = urlopen(url, timeout=3)
html = response.read().decode("utf-8")
return html
def print_translate(self, translate):
'''
print translation
'''
#print("有道翻译:", end='')
for trans in translate:
print(trans, end='')
print("")
def parse(self, html):
'''
parse the json result to what user could read
'''
translation = json.loads(html)
if translation.get('errorCode') == 0:
if 'translation' in translation:
#print_translate(translation.get('translation'))
return translation.get('translation')[0]
def sanitize_arg(self, query_str):
'''
sanitize the argument first
'''
if hasattr(query_str, "decode"):
result = query_str.decode("utf8")
result = result.strip("'")
result = result.strip('"')
result = result.encode("utf-8")
else:
result = query_str.strip("'").strip('"')
return result
def translate(self, text=''):
youdao_json = self.fetch(self.sanitize_arg(text))
return self.parse(youdao_json)
#print(Chinese_Translator().translate(text='旁边的阿班突然搜住她的手'))
| khaman1/Video_Translator | library/image/chinese.py | chinese.py | py | 2,221 | python | en | code | 1 | github-code | 13 |
71454096337 | print()
# data_kr.xlsx 파일읽기
import re
from openpyxl import load_workbook
# 원본 문자 : python VS java
# VS를 기준으로 문자열 분리 => ['python', 'java']
pattern = re.compile(" VS ") # 공백까지 확인
print(pattern.split("python VS java"))
print()
# 주민번호 컬럼을 읽어서 화면 출력 단, 주민번호 뒷자리는 *로 변경해서 출력
wb = load_workbook("./RPAbasic/crawl/download/data_kr.xlsx")
ws = wb.active
for each_row in ws.rows:
print(each_row[1].value) # 값 출력 확인
# 찾아야하는 패턴 : 주민등록번호 뒷자리
pattern = re.compile("[0-9]{7}") # 0~9사이의 7개가 와야함.
for each_row in ws.rows:
print(re.sub(pattern, "*******", each_row[1].value))
wb.close() # 엑셀 닫기
print()
| HwangJuu/pythonsource | RPAbasic/regex/regex5.py | regex5.py | py | 777 | python | ko | code | 0 | github-code | 13 |
14494142463 | import pandas as pd
from src.utils import get_root_folder, insert_to_table
from codetiming import Timer
def insert_from_csv(root_path, path, table_name):
path = root_path.joinpath(path).as_posix()
bookmarks_df = pd.read_csv(path)
n_elem = bookmarks_df.shape[0]
with Timer(text=f"Inserted to '{table_name}' {n_elem} items in {{:.4f}} seconds"):
insert_to_table(table_name, bookmarks_df.columns, bookmarks_df.values)
return True
def load_data():
project_path = get_root_folder()
cat_path = project_path.joinpath('data/catalogue.json').as_posix()
catalogue = pd.read_json(cat_path, orient='index')
catalogue['element_uid'] = catalogue.index
n_elem = catalogue.shape[0]
with Timer(text=f"Inserted to 'catalogue' {n_elem} items in {{:.4f}} seconds"):
insert_to_table('catalogue', catalogue.columns, catalogue.values)
insert_from_csv(project_path, 'data/ratings.csv', 'ratings')
insert_from_csv(project_path, 'data/bookmarks.csv', 'bookmarks')
insert_from_csv(project_path, 'data/transactions.csv', 'transactions')
| StepDan23/okko-postgres | src/database/init_db.py | init_db.py | py | 1,085 | python | en | code | 0 | github-code | 13 |
42746230615 | from sys import stdin
# n: number of lines (int)
# k: length of a word (int)
# d: dictionary <key, val> <int, int>
# w: word (string)
# c: character occurence counter array (int list size: 26)
# a: ascii value of 'a' (int)
# j: ascii mapping to c index 'a' -> 0 (int)
def f(w):
"""
This function takes in string 'w' as a parameter.
'w' must be a word with all lower case letters and no spaces.
It counts the number of occurences of each charater of the
alphabet and increments the corresponding index in 'c'.
It converts the 'c' array into a single int returns it to the caller.
"""
a = 97
l = 26
c = [0]*l
for i in w[:k]:
j = ord(i)-a
c[j] += 1
return ''.join([str(x) for x in c])
n, k = [int(x) for x in stdin.readline().split()]
d = {}
for i in stdin.readlines():
w = f(i)
if w not in d.keys():
d[w] = 1
else:
d[w] = 0
s = 0
for i in d.values():
s += i
print(s) | gurpartb/kattis | mr_anaga2.py | mr_anaga2.py | py | 963 | python | en | code | 0 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.