blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1e738f57abaf2f4bade3d418917aad39cbae070f | 7649278f4bda14aaf4ec02b7ae58094e16d98618 | /Project/scripts/cartpole_eval.py | 61018275f50b4f2d739e06cf8596805d284be6f9 | [] | no_license | peng00bo00/optlearningcontrol | 1877381ca749f17caf75ede02a5cb263cbddaa79 | 44eff6d17e4da0b0adc85e5e84cf4b8edb8a1bb8 | refs/heads/master | 2021-01-06T18:44:58.981575 | 2020-05-19T17:44:34 | 2020-05-19T17:44:34 | 241,445,231 | 0 | 0 | null | 2020-02-18T19:11:08 | 2020-02-18T19:11:08 | null | UTF-8 | Python | false | false | 1,121 | py | import numpy as np
import tensorflow as tf
import gym
from gym import wrappers
import os
import matplotlib.pyplot as plt
## environment
env = gym.make('CartPole-v0')
env = wrappers.Monitor(env, '../animations/', force=True)
env.reset()
## GPU configuration
gpus = tf.config.experimental.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
def play(env, policy):
x = env.reset()
terminal = False
rewards = []
while not terminal:
env.render()
u = policy.predict(x.reshape([1, -1]))
u = np.argmax(u)
x, r, terminal, _ = env.step(u)
rewards.append(r)
return np.sum(rewards)
# DQN
policy = tf.keras.models.load_model("../models/DQN_q_network.h5")
play(env, policy)
## Double DQN
policy = tf.keras.models.load_model("../models/DoubleDQN_q_network.h5")
play(env, policy)
## Prioritized Experience Replay
policy = tf.keras.models.load_model("../models/PrioritizedDQN_q_network.h5")
play(env, policy)
## Deuling DQN
policy = tf.keras.models.load_model("../models/DeulDQN_q_network.h5")
play(env, policy) | [
"pengbo_tongji@126.com"
] | pengbo_tongji@126.com |
c5cff275de54420bcb38baed08c685af8327ebac | a11f247da185f02576f1c601044652a41ea8fb04 | /utils/pd_utils.py | d8b7b3ea8068867b26e8b46e5175d60c16d27788 | [] | no_license | JinchaoCai/sportsKG | 341397a13e199d28af3e807447a27eadd260048d | ea6f72fbee9a4fd780d00b7189fbb577cd448af4 | refs/heads/master | 2022-12-15T14:02:25.315709 | 2020-09-11T08:36:26 | 2020-09-11T08:36:26 | 285,804,126 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 517 | py | import pandas as pd
def to_dict_dropna(df):
return [{k.strip():v.strip() if isinstance(v, str) else v for k,v in m.items() if pd.notnull(v)} for m in df.to_dict(orient='records')]
def read_tsv(filename, dropna=True):
df = pd.read_csv(filename, sep='\t')
if dropna:
return to_dict_dropna(df)
else:
return df.to_dict(orient='records')
if __name__ == '__main__':
filename = '/Users/caijinchao/projects/sportsKG/data/basketball/league.tsv'
df = read_tsv(filename)
print(df) | [
"jinchao_cai@apple.com"
] | jinchao_cai@apple.com |
a22da995080be9b7137906676c4dc3cf0bb0d461 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_203/ch11_2019_08_15_11_47_33_547429.py | e6e08c29c09dcae0bbe100b081717e5f1418ada4 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 100 | py | def celsius_para_fahrenheit (x) :
y = 1.8*x+32
return y
celsius_para_farenheit (7)
print (x) | [
"you@example.com"
] | you@example.com |
d8ec9fe884c663a6f32ba053dea2d5ec8a046bf8 | c417eca3685bbbe576d34f03d6d25c790eba723c | /setup.py | 3af7050cea9f208b11a1da34ec8ef1c1f5691e8f | [
"BSD-2-Clause"
] | permissive | flexiblecloud/cloud_ui | 6e48e14df23913f6bcaf23f13e3e772363acd6ff | 99b3db4f5991afe183c69d4b0e5ed3112b5ab8e1 | refs/heads/master | 2020-07-16T16:17:52.696196 | 2019-09-13T14:06:26 | 2019-09-13T14:06:26 | 205,822,689 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,291 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import setuptools
from setuptools import setup
with open("README.md", "r") as fh:
long_description = fh.read()
with open('requirements.txt') as f:
requirements = f.read().splitlines()
def process_requirement(requirement):
if requirement.startswith("git+"):
dep_name = requirement.rpartition("@")[0].rpartition("/")[2]
return f"{dep_name}@{requirement}"
return requirement
print("raw requirements:", requirements)
requirements = [process_requirement(requirement) for requirement in requirements]
print("processed requirements:", requirements)
setup(
name='cloudui',
version='0.0.1',
description='Python trio and remi based library for making web-apps ...',
long_description=long_description,
long_description_content_type="text/markdown",
url='https://github.com/flexiblecloud/cloud_ui',
download_url='https://github.com/flexiblecloud/cloud_ui/archive/master.zip',
keywords=['gui-library', 'remi', 'trio', 'service', 'application', 'platform-independent', 'ui', 'gui'],
author='Andriy Vasyltsiv',
author_email='andriyvasyltsiv@gmail.com',
license='BSD',
packages=setuptools.find_packages(),
include_package_data=True,
install_requires=requirements
)
| [
"andriyvasyltsiv@gmail.com"
] | andriyvasyltsiv@gmail.com |
062479f2d87586d0f4b84baeb3ee922ad34c467f | ddf8fca52a641bf5b98cc626f5f5c1e86c58114c | /OldPythonCode/reverse.py | cb6db963c1c875b4ff7ba29c15daaf81dd9af77f | [] | no_license | mathdoll/python | 5a7a4ce3858d7dfe425deb511c5975fd5f56dd50 | 3105fb88c41092e1312d96a232001fec10de74d2 | refs/heads/master | 2016-08-09T20:37:22.579744 | 2016-03-28T04:21:11 | 2016-03-28T04:21:11 | 54,798,160 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 336 | py | '''
a=["a","b","c","d","100","jjj"]
print(a)
b[0]=a[4]
b[1]=a[3]
b[2]=a[2]
b[3]=a[1]
b[4]=a[0]
'''
def rinurev(a):
size = len(a)
b=[None] *size
for i in range(size):
idx = size-(i+1)
#print (i, idx)
b[i] = a[idx]
return b
'''
c=list(reversed(a))
c.append("rinu")
c.append(a)
print(c)
'''
| [
"Rinisha"
] | Rinisha |
24a02e65a4d81428a3f0677c6a9f0c3de85388a3 | f8f15d6366268bba22dc8aa6e938106efd455689 | /first/teacher/serializer.py | 4cb9b9aa06c2493cf8979c99f2e16db81d6a967a | [] | no_license | 13233039146/first_drf | 2e473d25ae81429f95a4cfa607de1b98e95355a3 | 091fe6a9d979a8930addede96bfcd30f00461a6b | refs/heads/master | 2023-01-06T22:28:09.460407 | 2020-11-04T13:13:39 | 2020-11-04T13:13:39 | 307,693,534 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,429 | py | from rest_framework import serializers
from first.settings import MEDIA_URL
from teacher.models import Teacher
class TeacherSerializer(serializers.Serializer):
name = serializers.CharField()
age = serializers.IntegerField()
# gender = serializers.IntegerField()
job = serializers.CharField()
phone = serializers.CharField()
# pic = serializers.ImageField()
# 自定义自段
gender = serializers.SerializerMethodField()
def get_gender(self, obj):
# gender 值是choices类型 get_字段名_display直接访问值
return obj.get_gender_display()
pic = serializers.SerializerMethodField()
def get_pic(self, obj):
url ='%s%s%s' % ('http://127.0.0.1:8000/',MEDIA_URL,str(obj.pic))
return url
class TeacherDeserialezer(serializers.Serializer):
name = serializers.CharField(min_length=1,
max_length=4,error_messages={
'min_length':'长度太短,请输入正确的名字',
'max_length':'长度太长,请输入正确的名字',
})
age = serializers.IntegerField(max_value=90,min_value=18)
gender = serializers.IntegerField(max_value=2,min_value=0)
job = serializers.CharField()
phone = serializers.CharField(max_length=11,min_length=11)
# pic = serializers.ImageField()
def create(self, validated_data):
return Teacher.objects.create(**validated_data) | [
"690321437@qq.com"
] | 690321437@qq.com |
dca433c6328f30c8a5f0e91f879f6bdff3707c9d | affc5b3dc08765126581399e1b8c76ff37b4515c | /protobuf_compiler/args_validation.py | c95638aed8bebc62c08501365eb8fa2278e26a10 | [
"MIT"
] | permissive | netsaj/python-protobuf-compiler | 19d77add8063574ccd3a6d3fe05cf33a3a6e0932 | e4337641c5aa6970d179dd7ea3559b4e29f4361f | refs/heads/master | 2023-07-22T16:29:29.818681 | 2021-07-09T17:07:01 | 2021-07-09T17:07:01 | 175,302,827 | 9 | 7 | MIT | 2023-07-05T21:14:02 | 2019-03-12T22:02:15 | Python | UTF-8 | Python | false | false | 1,913 | py | import os
from protobuf_compiler.input_folder import define_input
def args_validation(
git_repository='',
proto_input_dir='.',
proto_output_dir='.',
proto_package_version='',
git_repository_token=''
):
# repository defined
if git_repository != '':
if not str(git_repository).startswith("https://") and not str(git_repository).startswith("http://"):
raise ValueError("-g , --git : copy clone with https. example: https://gitlab.com/netsaj/test-repo.git")
if str(git_repository).find("gitlab") == -1 and str(git_repository).find("github") == -1:
raise ValueError("-g , --git : git repository support for gitlab and github")
if proto_input_dir == proto_output_dir:
raise ValueError("src(-d) proto path need be different to output(-o) path ")
if proto_package_version == '':
raise ValueError("package name can't empty")
if git_repository == '':
input = define_input(proto_input_dir, git_repository, git_repository_token)
contain_proto = False
for file in get_list_of_files(input):
if file.lower().endswith(".proto"):
contain_proto = True
break
if not contain_proto:
raise ValueError("src(-d) proto path not contain a proto files (*.proto)")
def get_list_of_files(dirName):
# create a list of file and sub directories
# names in the given directory
listOfFile = os.listdir(dirName)
allFiles = list()
# Iterate over all the entries
for entry in listOfFile:
# Create full path
fullPath = os.path.join(dirName, entry)
# If entry is a directory then get the list of files in this directory
if os.path.isdir(fullPath):
allFiles = allFiles + get_list_of_files(fullPath)
else:
allFiles.append(fullPath)
return allFiles
| [
"Fabio.moreno@cuemby.com"
] | Fabio.moreno@cuemby.com |
652b202ca5b1f1106deeae560e600105807b8316 | c2d636b401ebf4087e983ebff20c94c0337aa838 | /build/lib/hipster/max_heap.py | 90654c240f5a2d48419fc48b8e62841bbea0dc44 | [
"MIT"
] | permissive | soumasish/hipster | cc4c5084efc8bbad81e106a74c4324bce81a0e9c | 1fe3f0526839a2a418a7e9e6a0557bfc4a5ef061 | refs/heads/master | 2020-09-16T13:29:24.853529 | 2019-11-29T20:38:02 | 2019-11-29T20:38:02 | 223,784,288 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 518 | py | import heapq
from hipster.error import HeapError
from hipster.heap import Heap
class MaxHeap(Heap):
def __init__(self):
super().__init__()
def peek(self):
if len(self.heap) == 0:
raise HeapError("Peeking into an empty heap")
with self.read_lock:
return heapq.nlargest(1, self.heap)[0]
def pop(self):
if len(self.heap) == 0:
raise HeapError("Popping off an empty heap")
with self.write_lock:
return self.heap.pop()
| [
"soumasish@gmail.com"
] | soumasish@gmail.com |
17daf2465104d5bf642b1582718ab3d4a61fd2ec | b93ed9abb036d0ac9ee081f3c2f46be7906e2869 | /watchw/models.py | d6896c2e05c5ff068bc9a1a90ab8f1f6d8605ca7 | [] | no_license | mohamadRaafat/WatchW-CS50W-FP | fe2e88d5879961bc78d2ecb849d132a0b7fbb4ca | 8884d64c893d2c3b22eda7658ae49694d4415cd0 | refs/heads/master | 2022-11-05T06:05:57.930725 | 2020-06-26T17:47:21 | 2020-06-26T17:47:21 | 273,964,690 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,772 | py | import string
import random
from django.db import models
WATCH_TYPES = (
("Casual", "Casual"),
("Dress", "Dress"),
("Sports", "Sports"),
("Smart", "Smart"),
)
# Create your models here.
class Item(models.Model):
type = models.CharField(max_length=25, choices=WATCH_TYPES)
name = models.CharField(max_length=128)
product_description = models.TextField()
price = models.CharField(max_length=12)
main_image_url = models.TextField()
sub_image_url1 = models.TextField()
sub_image_url2 = models.TextField()
def __str__(self):
return f"{self.type} - {self.name}"
class Order(models.Model):
user_id = models.IntegerField()
items = models.ManyToManyField(Item, blank=True, related_name='order')
total_price = models.DecimalField(max_digits=8, decimal_places=2)
def __str__(self):
return f"{self.items} - {self.user_id}"
class PastOrder(models.Model):
user_id = models.IntegerField()
items = models.ManyToManyField(Item, blank=True, related_name='past_orders')
date = models.DateTimeField()
order_confirmed = models.BooleanField(default=False)
total_price = models.DecimalField(max_digits=8, decimal_places=2)
def __str__(self):
return f"{self.user_id} - {self.order_confirmed} - {self.date}"
# function to generate a random string for a coupon code
def id_generator(size=8, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
# e3ml coupons gdeeda mn el shell: : id_generator() :D
class CouponCode(models.Model):
coupon = models.CharField(max_length=10)
discount_percentage = models.DecimalField(max_digits=4, decimal_places=2)
def __str__(self):
return self.coupon
| [
"mohamadrafat666@yahoo.com"
] | mohamadrafat666@yahoo.com |
c1a05f90f206938cb1f01ac0bcef73994b5d9bc1 | d08d7c0fcc6a1ef1bcce6e3be02eafe5b856ede8 | /Formant-master/get_fp.py | 62149bbe49fb207311c11db82659c5730c339c58 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | skyto0927/kk_sotsuronData | c2fbaf6585386a4150ead58d73902872d1488503 | 158876251309aff04dfb4b9e58b80da1cd866c29 | refs/heads/master | 2020-12-28T08:22:37.102769 | 2020-02-06T21:18:39 | 2020-02-06T21:18:39 | 238,244,530 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,459 | py | #coding: utf-8
import numpy as np
import scipy.signal
import wave
from LPC import *
#Check version
# Python 3.6.4, 64bit on Win32 (Windows 10)
# numpy (1.14.0)
# scipy (1.0.0)
class Class_get_fp(object):
def __init__(self,NFRAME=640, NSHIFT=320, lpcOrder=32, FreqPoints=1024, max_num_formants=5):
self.NFRAME=NFRAME # 640 sr=16Khz 40mS # 400 sr=16Khz 25mS
self.NSHIFT=NSHIFT # 320 sr=16Khz 20mS # 160 sr=16khz 10mS
self.lpcOrder=lpcOrder
self.FreqPoints = FreqPoints # need many number for precise analysis especially pitch detect
self.window = np.hamming(self.NFRAME) # Windows is Hamming
self.preemph=0.97 # pre-emphasis
self.max_num_formants =max_num_formants # maximum number of formant candidate to detect
def get_fp(self,file_name ):
# 入力:wave ファイル mono 16bit
#
# 出力:LPC対数スペクト周波数の行列
# ホルマント周波数の候補のインデックス
# ピッチ周波数の候補
#
# read wave file
waveFile= wave.open( file_name, 'r')
nchannles= waveFile.getnchannels()
samplewidth = waveFile.getsampwidth()
sampling_rate = waveFile.getframerate()
nframes = waveFile.getnframes()
self.df0 = (sampling_rate /2.) / self.FreqPoints
self.dt0 = 1.0 / sampling_rate
# check input wave file condition
assert nchannles == 1, ' channel is not MONO '
assert samplewidth==2, ' sample width is not 16bit '
buf = waveFile.readframes(-1) # read all at oance
waveFile.close()
# 16bit integer to float32
data = np.frombuffer(buf, dtype='int16')
fdata = data.astype(np.float32)
count= int(((nframes - ( self.NFRAME - self.NSHIFT)) / self.NSHIFT))
# prepare output
spec_out= np.zeros([count,self.FreqPoints])
fout = np.zeros([count,self.max_num_formants])
fout_index = np.ones([count,self.max_num_formants]) * -1
pout = np.zeros(count)
pout_index = np.ones(count) * -1
pos = 0 # position
countr=0
for loop in range(count):
## copy to avoid original over-change
frame = fdata[pos:pos + self.NFRAME].copy()
## pre-emphasis
frame -= np.hstack((frame[0], frame[:-1])) * self.preemph
## do window
windowed = self.window * frame
## get lpc coefficients
a,e=lpc(windowed, self.lpcOrder)
## get lpc spectrum
w, h = scipy.signal.freqz(np.sqrt(e), a, self.FreqPoints) # from 0 to the Nyquist frequency
lpcspec = np.abs(h)
lpcspec[lpcspec < 1.0] = 1.0 # to avoid log(0) error
loglpcspec = 20 * np.log10(lpcspec)
spec_out[loop]=loglpcspec # store to output
## get formant candidate
f_result, i_result=self.formant_detect(loglpcspec, self.df0)
if len(f_result) > self.max_num_formants:
fout[loop]=f_result[0:self.max_num_formants]
fout_index[loop]=i_result[0:self.max_num_formants]
else:
fout[loop]=f_result[0:len(f_result)]
fout_index[loop]=i_result[0:len(f_result)]
## calcuate lpc residual error (= input source)
r_err=residual_error(a, windowed)
## autocorrelation of lpc residual error (= input source)
a_r_err=autocorr(r_err)
a_f_result, a_i_result = self.pitch_detect(a_r_err, self.dt0)
if len(a_f_result) > 0: # if candidate exist,
pout[loop]=a_f_result[0]
pout_index[loop]=a_i_result[0]
## print output of candidates of [formants] pitch, frequency[Hz]
if countr == 0:
print ('candidates of [formants] pitch, frequency[Hz] ')
#print (fout[loop], pout[loop])#####################################
print(loop)
####################################################################
# index count up
countr +=1
# next
pos += self.NSHIFT
return spec_out, fout_index, pout
def formant_detect(self,input0, df0, f_min=250):
# 対数スペクトルから
# 山型(凸)のピークポイントを見つける
#
# 入力:対数スペクトル
# 周波数単位
# (オプション)最低の周波数
#
# 出力:ピークのインデックス
# ピークの周波数
is_find_first= False
f_result=[]
i_result=[]
for i in range (1,len(input0)-1):
if f_min is not None and df0 * i <= f_min :
continue
if input0[i] > input0[i-1] and input0[i] > input0[i+1] :
if not is_find_first :
f_result.append( df0 * i)
i_result.append(i)
is_find_first =True
else:
f_result.append( df0 * i)
i_result.append(i)
return f_result, i_result
def pitch_detect(self, input0, dt0, ratio0=0.2, f_min=100, f_max=500):
# 自己相関の
# 山と谷の両方のピークを求める
#
# 入力:lpc予測残差の自己相関
# 時間単位
# (オプション)自己エネルギー0次成分に対する比率(これ以上を対象とする)
# (オプション)最低の周波数
# (オプション)最大の周波数
#
# 出力:最大ピークのインデックス
# 最大ピークの周波数の値
#
#
is_find_first= False
f_result=[]
i_result=[]
v_result=[]
for i in range (1,len(input0)-1):
if np.abs(input0[i]) < np.abs(input0[0] * ratio0):
continue
fp= 1.0 / (dt0 * i)
if f_max is not None and fp >= f_max :
continue
if f_min is not None and fp <= f_min :
continue
if input0[i] > input0[i-1] and input0[i] > input0[i+1] :
if not is_find_first :
f_result.append( fp)
i_result.append(i)
v_result.append( input0[i])
is_find_first =True
else:
f_result.append( fp)
i_result.append(i)
v_result.append( input0[i])
elif input0[i] < input0[i-1] and input0[i] < input0[i+1] :
if not is_find_first :
f_result.append( fp)
i_result.append(i)
v_result.append( input0[i] )
is_find_first =True
else:
f_result.append( fp)
i_result.append(i)
v_result.append( input0[i])
if is_find_first: # 最大のピークを探す
a=np.argmax( np.array(v_result))
f_result2= [ f_result[np.argmax( np.array(v_result))] ]
i_result2= [ i_result[np.argmax( np.array(v_result))] ]
else: # 候補なし
f_result2=[]
i_result2=[]
return f_result2, i_result2
#This file uses TAB
| [
"skyto0927@gmail.com"
] | skyto0927@gmail.com |
7afe29bbc034fa253e18ba7082d4d3d90b31ac20 | d12c39dc10b03a8bff48951d73c981c7fe99d84d | /archive/id3/ID3.py | eb8fc355c559844d87ed2b8a13be77a773852851 | [] | no_license | bill-mao/python | 1c57c3ef667aabc73ab7e390d27f484766aad6c4 | 261f83eba66f49f1d26f49a6ec8de59d5fec277b | refs/heads/master | 2020-04-12T23:40:29.842128 | 2019-04-17T02:54:36 | 2019-04-17T02:54:36 | 92,309,837 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,271 | py | import time
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
class Tree(object):
'''决策树的节点
用来存储节点类型,
'''
def __init__(self,node_type,category = None, feature = None):
# 节点类型(internal或leaf)
self.node_type = node_type
# dict的键表示特征Ag的可能值ai,值表示根据ai得到的子树
self.dict = {}
# 叶节点表示的类,若是内部节点则为none
self.category = category
# 表示当前的树即将由第feature个特征划分(即第feature特征是使得当前树中信息增益最大的特征)
self.feature = feature
def add_tree(self,key,tree):
self.dict[key] = tree
def predict(self,features):
if self.node_type == 'leaf' or (features[self.feature] not in self.dict):
return self.category
tree = self.dict.get(features[self.feature])
return tree.predict(features)
# 计算数据集x的经验熵H(x)
def calc_ent(x):
x_value_list = set([x[i] for i in range(x.shape[0])])
ent = 0.0
for x_value in x_value_list:
p = float(x[x == x_value].shape[0]) / x.shape[0]
logp = np.log2(p)
ent -= p * logp
return ent
# 计算条件熵H(y/x)
def calc_condition_ent(x, y):
x_value_list = set([x[i] for i in range(x.shape[0])])
ent = 0.0
for x_value in x_value_list:
sub_y = y[x == x_value]
temp_ent = calc_ent(sub_y)
ent += (float(sub_y.shape[0]) / y.shape[0]) * temp_ent
return ent
# 计算信息增益
def calc_ent_grap(x,y):
base_ent = calc_ent(y)
condition_ent = calc_condition_ent(x, y)
ent_grap = base_ent - condition_ent
return ent_grap
# ID3算法
def recurse_train(train_set,train_label,features):
LEAF = 'leaf'
INTERNAL = 'internal'
# 步骤1——如果训练集train_set中的所有实例都属于同一类Ck
label_set = set(train_label)
if len(label_set) == 1:
return Tree(LEAF,category = label_set.pop())
# 步骤2——如果特征集features为空
class_len = [(i,len(list(filter(lambda x:x==i,train_label)))) for i in range(class_num)] # 计算每一个类出现的个数
(max_class,max_len) = max(class_len,key = lambda x:x[1])
if len(features) == 0:
return Tree(LEAF,category = max_class)
# 步骤3——计算信息增益,并选择信息增益最大的特征
max_feature = 0
max_gda = 0
D = train_label
for feature in features:
# print(type(train_set))
A = np.array(train_set[:,feature].flat) # 选择训练集中的第feature列(即第feature个特征)
gda=calc_ent_grap(A,D)
if gda > max_gda:
max_gda,max_feature = gda,feature
# 步骤4——信息增益小于阈值
if max_gda < epsilon:
return Tree(LEAF,category = max_class)
# 步骤5——构建非空子集
sub_features = list(filter(lambda x:x!=max_feature,features))
tree = Tree(INTERNAL,feature=max_feature)
max_feature_col = np.array(train_set[:,max_feature].flat)
feature_value_list = set([max_feature_col[i] for i in range(max_feature_col.shape[0])]) # 保存信息增益最大的特征可能的取值 (shape[0]表示计算行数)
for feature_value in feature_value_list:
index = []
for i in range(len(train_label)):
if train_set[i][max_feature] == feature_value:
index.append(i)
sub_train_set = train_set[index]
sub_train_label = train_label[index]
sub_tree = recurse_train(sub_train_set,sub_train_label,sub_features)
tree.add_tree(feature_value,sub_tree)
return tree
def train(train_set,train_label,features):
return recurse_train(train_set,train_label,features)
def predict(test_set,tree):
result = []
for features in test_set:
tmp_predict = tree.predict(features)
result.append(tmp_predict)
return np.array(result)
print("Start read data...")
time_1 = time.time()
# df = pd.read_csv("D:/DATA/新闻分词结果.csv" )
'''
数据来源:
Creator:
Marko
Bohanec
本次数据一共有7个变量,变量介绍如下:
Buying :buying price(购买价格)
maint : price of the maintenance(保养价格)
. doors :number of doors(门的个数)
Persons :capacity in terms of persons to carry(载人个数)
lug_boot :the size of luggage boot(车身的大小,分为small,med,big)
safety :estimated safety of the car(安全程度,分为low, med, high)
CAR :car acceptability(被接受程度,四个等级:unacc, acc, good, vgood)
我们要根据汽车的Buying,maint,doors,Persons,lug_boot,safety,CAR建立模型来预测汽车的受欢迎程度。
'''
df = pd.read_csv("D:/DATA/汽车满意度2.csv" , engine='python')
features = df.iloc[:,:6].values
labels = df.iloc[:,6].values
# 避免过拟合,采用交叉验证,随机选取33%数据作为测试集,剩余为训练集
train_features, test_features, train_labels, test_labels = \
train_test_split(features, labels, test_size=0.33, random_state=0)
time_2 = time.time()
print('read data cost %f seconds' % (time_2 - time_1))
class_num = len(set(labels)) # MINST数据集有10种labels,分别是“0,1,2,3,4,5,6,7,8,9”
feature_len = len(features[0]) # MINST数据集每个image有28*28=784个特征(pixels)
# for i in np.linspace(0.001, 0.7, 100):
epsilon = 0.001 # 设定阈值
# epsilon = i
# 通过ID3算法生成决策树
print('Start training...')
tree = train(train_features,train_labels,list(range(feature_len)))
time_3 = time.time()
print('training cost %f seconds' % (time_3 - time_2))
print('Start predicting...')
test_predict = predict(test_features,tree)
time_4 = time.time()
print('predicting cost %f seconds' % (time_4 - time_3))
print("预测的结果为:")
print(test_predict)
for i in range(len(test_predict)):
if test_predict[i] == None or type(test_predict[i]) != str:
test_predict[i] = ''
# print('no prediction %d' %i)
score = accuracy_score(test_labels, test_predict)
print("The accruacy score is %f" % score) | [
"mao_bill@163.com"
] | mao_bill@163.com |
20da8a1571be3297fdc2a8720ab6d9c6f804eede | a0801d0e7325b31f0383fc68517e208680bb36d6 | /Kattis/anagramcounting.py | 362adbaa6dfaa8948b5b2fb3c59253bb2a0f31b6 | [] | no_license | conormccauley1999/CompetitiveProgramming | bd649bf04438817c7fa4755df2c2c7727273b073 | a7e188767364be40f625612af3d16182f2d8d4de | refs/heads/master | 2023-05-14T13:19:32.678134 | 2023-05-11T16:07:33 | 2023-05-11T16:07:33 | 179,089,010 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 340 | py | from collections import Counter
_f = { 0: 1 }
def f(n):
if n not in _f:
_f[n] = n * f(n - 1)
return _f[n]
def g(s):
cs = Counter(s)
vs = cs.values()
l = len(s)
r = f(l)
for v in vs:
r //= f(v)
return r
while True:
try:
i = input()
print(g(i))
except:
break
| [
"conormccauley1999@gmail.com"
] | conormccauley1999@gmail.com |
d1360b75dcf43a463f259be1a9df57513d15d448 | 36f6351333d62b11bb42b73be6c9131f0cf03f26 | /smshandler/sms.py | b93ad798c79685b7263d9f8a5154c94f2ec6decb | [] | no_license | antoinevg/phanta | ab30f8ea66b7362db4440b57e15e2e63dee90bc6 | c05cdfb8b78954adb8792b98a507d0a54cb8e3b3 | refs/heads/master | 2020-12-25T14:06:18.520801 | 2011-06-23T08:20:27 | 2011-06-23T08:20:27 | 1,656,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,974 | py | #!/usr/bin/env python
"""*******************************************************
* Schalk-Willem Kruger *
* Phanta SMS script *
*******************************************************"""
import os
import sys
import urllib2
from urllib import urlencode
import httplib
from mechanize import Browser
from sys import argv, stderr, exit
LOGIN_URL = "https://www.vodacom.co.za/portal/site/myaccount/template.VCOMLOGIN/"
SMS_URL = "https://www.vodacom.co.za/portal/site/myaccount/sms/"
PHANTA_HOST = "10.8.0.6"
PHANTA_PORT = "80"
PHANTA_URL = "http://"+PHANTA_HOST+":"+PHANTA_PORT
def send_sms_direct(cellnr, mesg):
try:
os.system(r'/opt/phanta/sendsms '+cellnr+' "'+mesg+'"')
return 0
except:
print "BIG ERROR - CANNOT SEND SMS!!!"
fail(11, "Couldn't send SMS", cellnr+" "+mesg)
return -1
def send_sms(cellnr, mesg):
if (len(mesg)>128): mesg=mesg[:128]
print "Sending SMS to", cellnr, "Message:", mesg
send_sms_direct(cellnr, mesg)
#send_sms_vodacom(cellnr, mesg)
def doreq(method, url, postdata="", headers={}):
#f = urllib2.urlopen(PHANTA_URL+"/pubsub/publish?smsd=true", "message="+msg)
print "Request:", method, url, postdata, headers
conn = httplib.HTTPConnection(PHANTA_HOST+":"+PHANTA_PORT)
conn.request(method, url, postdata, headers)
response = conn.getresponse()
print response.status, response.reason
data = response.read()
print data
conn.close()
return response.status, data
def pubsub_post(cellnr, msg):
print "Pubsub post"
status, data = doreq("POST", "/pubsub/publish?smsnr="+cellnr, "message="+msg)
if (status==302 or status==200):
#send_sms(cellnr, "Your message has been published")
pass
else: send_sms(cellnr, "Error: "+data)
def auth_register(cellnr):
print "Register"
status, data = doreq("POST", "/auth/register", urlencode({"cellphone": cellnr, "hash": "SMSHANDLER"}))
if (status==302 or status==200):
send_sms(cellnr, "Your cellphone number "+cellnr+" has been registered successfully")
else: send_sms(cellnr, "Error: "+data)
def profiles_follow(cellnr, user):
print "Follow"
status, data = doreq("POST", "/profiles/following?smsnr="+cellnr, "username="+user)
# TODO: Reply
if (status==302 or status==200):
send_sms(cellnr, "Your are now following "+user)
else: send_sms(cellnr, "Error: "+data)
def profiles_unfollow(cellnr, user):
print "Follow"
status, data = doreq("DELETE", "/profiles/following?smsnr="+cellnr+"&username="+user)
if (status==302 or status==200):
send_sms(cellnr, "Your are not following "+user+" anymore")
else: send_sms(cellnr, "Error: "+data)
# TODO: Reply
def main():
if len(sys.argv)<2:
print "Error with parameters!"
fail(7, "Error with SMS script parameters", "==SMS==ERROR==")
f = open(sys.argv[1], "r")
global g_cellnr
s = f.readline()
while (len(s)!=1):
if s.split()[0]=="From:": cellnr = s.split()[1]
s = f.readline()
if (cellnr[:2]=="27"): cellnr="0"+cellnr[2:]
g_cellnr = cellnr
print "New message from", cellnr
mes = ""
while (s!=''):
mes+=s.strip()
s = f.readline()
print "Message:", mes
params = mes.split()
if (params[-1]=="www.vodacom.co.za"): params.pop() # FIXME
print "Message [split]:", params
cmd = params.pop(0).lower()
# TODO: Proper dispatcher
if (cmd=="post" or cmd=="p"):
pubsub_post(cellnr, " ".join(params))
elif (cmd=="register" or cmd=="r"):
auth_register(cellnr)
elif (cmd=="follow" or cmd=="f"):
profiles_follow(cellnr, params.pop(0).lower())
elif (cmd=="unfollow" or cmd =="u"):
profiles_unfollow(cellnr, params.pop(0).lower())
else:
#sendsms(cellnr, "Unknown command")
pass
if __name__ == "__main__":
main()
| [
"swk@swk.za.net"
] | swk@swk.za.net |
9776c782f39f617a35f28dcaa298d25f01933375 | 1fca3bb176ce42ca11af59222ee5c520f6a44486 | /merge_kaldi_features.py | 53c2d19399ff9da1032fa52122f6c76503552975 | [] | no_license | Caliope-SpeechProcessingLab/ASICAKaldiGMMRecipe | 07c6beca519a849b058b87a816b54b5a6bf5c8e1 | e5e07a7451c2cca387b3e0091b122d0722c10250 | refs/heads/master | 2021-08-15T20:19:51.487126 | 2021-01-03T10:11:08 | 2021-01-03T10:11:08 | 237,428,272 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,865 | py | #!/usr/bin/env python3
#
# This script takes the folders with the features calculated by kaldi or others (like openSmile) and merge the .ark and .scp
# files in the /data/ folder.
#------------------------------------------------------------------------------------------------------------------
# Variables:
# Inputs:
# * folder_in: name of folders with features calculated.
# Outputs:
# * folder_out: /data/ folder where combined data is saved.
#------------------------------------------------------------------------------------------------------------------
# Authors:
# - Main programmer: Andres Lozano Durán
# - Main Supervisor: Ignacio Moreno Torres
# - Second Supervisor: Enrique Nava Baro
#
# EXAMPLE OF USE
# You need to generate the features in different folder in kaldi. This is used after all features are calculated, but before normalization.
# -----------------------------------------------------------------------------
# cd custom_kaldi
# python3 merge_kaldi_features.py plp mfcc combined_features
# cd ..
# -----------------------------------------------------------------------------
# Eg:
# python3 merge_kaldi_features.py folderIn1 folderIn2 ... folderOut
# python3 merge_kaldi_features.py plp mfcc combined_features
#
import os
import re
import sys
import shutil
import kaldiio
# import subprocess
import numpy as np
def copy_and_overwrite(from_path, to_path):
""" Copy and overweite folder
:param from_path: path data original
:param to_path: path data destination
:returns: None
"""
if os.path.exists(to_path):
shutil.rmtree(to_path)
shutil.copytree(from_path, to_path)
# end copy_and_overwrite
def create_data_folder(folder_name):
""" Create a new data folder
:param folder_name: name of folder
:returns: None
"""
if os.path.exists(folder_name):
shutil.rmtree(folder_name)
os.makedirs(folder_name)
# end create_data_folder
def folder_empty(folder_name):
""" Check if folder is empty
:param folder_name: name of folder
:returns listOfEmptyDirs:List of empty directories in folder
"""
# Create a List
listOfEmptyDirs = list()
# Iterate over the directory tree and check if directory is empty
for dirName in folder_name:
for (dirpath, dirnames, filenames) in os.walk(dirName):
if len(dirnames) == 0 and len(filenames) == 0:
listOfEmptyDirs.append(dirpath)
return listOfEmptyDirs
# end folder_empty
def load_ark_matrix(folder_name,file_name):
""" Load .ark file matrix
:param folder_name: name of folder with .ark data, e.g, 'plp' 'mfcc'
:param file_name: name of .ark file to load
:returns write_dict: Dicctionary with data from -ark file
"""
# Function to load a .ark file and see it in a matrix
# matrix1 = load_ark_matrix('plp','raw_plp_test.1.ark')
# matrix2 = load_ark_matrix('mfcc','raw_mfcc_test.1.ark')
parent_path = os.path.dirname(os.getcwd())
name_file_ark = os.path.join(parent_path, folder_name, file_name)
write_dict={} # kaldiio uses features in the form of a dict
d = kaldiio.load_ark(name_file_ark)
for key_kaldi, array_kaldi in d:
write_dict[key_kaldi] = array_kaldi
return write_dict
# end load_ark_matrix
def calculate_num_cep(feature_name):
""" Calculate the number of ceps employed in function of the feature name
:param feature_name: name of features to check e.g, 'mfcc', 'plp'
:returns numCeps: Number of ceps used in .confg file
"""
# feature_name = 'mfcc' ''plp' 'plp_pitch
# Calculate the number of ceps employed in function of the feature name
config_folder = 'conf'
configFile = open(os.path.join(config_folder, feature_name+'.conf'))
configFile = configFile.read()
configList = re.split(' |\t|\n|=',configFile)
posNumceps = configList.index('--num-ceps')
if feature_name == 'mfcc':
numCeps = int(configList[posNumceps+1]) - 1
else:
numCeps = int(configList[posNumceps+1])
return numCeps
# end calculate_num_cep
def args_function(args):
""" Check args in script
:param args: arguments for script
:returns: None
"""
if args != []:
return args[1:]
else:
raise ValueError('You need to specify folder for features')
# end args_function
#---------------------------------MAIN-----------------------------------------
if __name__ == "__main__":
# sys.argv = ['merge_kaldi_features.py','mfcc','energy','combined_features']
# sys.argv = ['merge_kaldi_features.py','plp','mfcc','energy','combined_features']
args = args_function(sys.argv)
# print(sys.argv)
# parent_path = os.path.dirname(os.getcwd())
print(sys.argv)
folder_in = args[:-1]
folder_out = args[-1]
data_name = 'data'
listEmpty = folder_empty(folder_in)
if listEmpty != []:
s = listEmpty + 'folder contains EMPTY FOLDERS: '
raise ValueError(s + str(listEmpty))
# Acces folder and merge .ark and .scp files
list_file = os.listdir(folder_in[0])
for file_ark in list_file:
if file_ark.endswith('.ark'):
write_dict={} # kaldiio uses features in the form of a dict
for folder in folder_in:
# folder = 'mfcc' folder = 'plp' folder = 'energy'
# file_ark = 'raw_mfcc_train.1.ark'
name_file_ark = file_ark;
name_file_ark = name_file_ark.replace(folder_in[0],folder)
d = kaldiio.load_ark(os.path.join(folder, name_file_ark))
for key_kaldi, array_kaldi in d:
8
# key_kaldi = a1
# array_kaldi = a2
# a1 = 'CA212QL3_M20-utt12'
# a2 = write_dict[a1]
# if key_kaldi == 'CA212QL3_M20-utt12':
# a = array_kaldi
# write_dict['CA101QL3_H32-utt1']
# numCeps = calculate_num_cep(folder)
# Some features like energy can have dim (xx,) wich makes dimensional error
if array_kaldi.ndim == 1:
array_kaldi=np.atleast_2d(array_kaldi).T
# end if
try:
write_dict[key_kaldi] = np.concatenate((write_dict[key_kaldi],array_kaldi),axis=1)
# write_dict[key_kaldi] = np.concatenate((write_dict[key_kaldi],array_kaldi[:,:numCeps]),axis=1)
except:
# Este es el caso en el que todavía no se ha introducido ningún dato
# write_dict[key_kaldi] = array_kaldi[:,:numCeps]
if key_kaldi not in write_dict:
write_dict[key_kaldi] = array_kaldi
else:
diff = len(array_kaldi)-len(write_dict[key_kaldi])
if diff>0:
array_kaldi = array_kaldi[:-diff]
else:
array_kaldi=np.concatenate((array_kaldi,np.zeros((-diff,array_kaldi.shape[1]))),axis=0)
# end if
write_dict[key_kaldi] = np.concatenate((write_dict[key_kaldi],array_kaldi),axis=1)
# end if
# end try
# end for
# end for
destark_filename = name_file_ark.replace(folder_in[-1],data_name)
destark_filename = os.path.join(os.getcwd(),folder_out, destark_filename)
srcscp_filename = destark_filename.replace('ark','scp')
print ("Writing to " + destark_filename)
kaldiio.save_ark(destark_filename, write_dict, scp=srcscp_filename)
# end if
# end for
#end if
| [
"noreply@github.com"
] | noreply@github.com |
812ecf3bbe0ca367c19619ac57273f03fa01b103 | 20bace39d601159ed2de73addb2d0fb3a9b0e41b | /client/asgi.py | e0c55fc251bfb11118b6b32421c7ab95f957289d | [] | no_license | tomjuran/django-flooring-website | 8ff0df562c8f70b8d8f168248279d3be37f816a8 | f90ff1ff81dddf7072071b237ed86100259cad9c | refs/heads/main | 2023-01-11T03:03:13.782042 | 2020-11-10T17:48:50 | 2020-11-10T17:48:50 | 311,739,968 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | """
ASGI config for client project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'client.settings')
application = get_asgi_application()
| [
"noreply@github.com"
] | noreply@github.com |
6fa0fb024936ca115fdd701a9bff446d61c30b9d | 12d1f91023b82779402c622ed8e6619da4945442 | /movies/models.py | 12c27729caaeb3c3e2b9b29ad46f272c06a4d2b4 | [] | no_license | daudulislamsumon/Vidly | cb4c3c1f3364e88d656a5393c0580295c8abd85f | da19afc165563785a04a106bc171dc4a3aa1cb78 | refs/heads/master | 2023-01-22T16:19:39.386561 | 2020-05-19T17:04:20 | 2020-05-19T17:04:20 | 265,310,281 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 555 | py | from django.db import models
from django.utils import timezone
class Genre(models.Model):
name = models.CharField(max_length=255)
def __str__(self):
return self.name
class Movie(models.Model):
title = models.CharField(max_length=255)
relese_year = models.IntegerField()
number_in_stock = models.IntegerField()
daily_rate = models.FloatField()
genre = models.ForeignKey(Genre, on_delete=models.CASCADE)
date_created = models.DateTimeField(default=timezone.now)
def __str__(self):
return self.title
| [
"md.daudulislamsumon@gmail.com"
] | md.daudulislamsumon@gmail.com |
653479ee8c550b8e45c9081a2f9f514c2251ce72 | baf65c626904fa4877510556c5ad5cf86ee8a463 | /day1/18. DecimalToOBH.py | c4b18247d8555ccd2881e537b6a7a25d25b06ce6 | [] | no_license | nihagopala/PythonAssignments | c2e85299cd8c1648e56bd497b198336dbfe9ea8a | 1501d147d037fd7f68caceec1666fd0a49239486 | refs/heads/master | 2021-08-24T01:20:14.538785 | 2017-12-07T12:15:20 | 2017-12-07T12:15:20 | 112,322,777 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 440 | py | #-----------------------------------------------------------------------#
#Program to convert to Decimal into Binary, Octal and hexaDecimal values
#-----------------------------------------------------------------------#
dec = input("Enter number in Decimal Format: ")
decimal=int(dec)
print("The decimal value of",decimal,"is:")
print(bin(decimal),"in binary.")
print(oct(decimal),"in octal.")
print(hex(decimal),"in hexadecimal.") | [
"noreply@github.com"
] | noreply@github.com |
4fdf1bea747a8d67c7f40453558e77f806ca8731 | 49bfc292c42068b6c278cb98c7a55bc46b9902be | /combine_1D_KDE.py | 911159b83f625ffdb6f7a46bc84efe82616c51f4 | [
"MIT"
] | permissive | adamgonzalez/analysis | 003d03248f71092d538a97e04621d16b250c9bb8 | ee6c1025e4d7ea480bdbbacf13f3fb86245a87da | refs/heads/master | 2021-01-01T20:36:57.842731 | 2018-05-10T17:00:04 | 2018-05-10T17:00:04 | 98,895,452 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,517 | py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
@author: adamg
"""
import matplotlib.pyplot as plt
import numpy as np
import matplotlib
import scipy.stats as st
import operator
import time
import os
from matplotlib import gridspec
from matplotlib.colors import LinearSegmentedColormap
from scipy.stats import gaussian_kde
matplotlib.rcParams.update({'font.size': 18})
matplotlib.rcParams['axes.linewidth'] = 1 #set the value globally
plt.rc('font',family='serif')
plt.rc('text',usetex=True)
os.chdir("/Users/agonzalez/Documents/Research/Data/IZw1")
## data = np.genfromtxt("single_sim.txt")
data = np.genfromtxt("multi_sim.txt")
## data = np.genfromtxt("big_sim.txt")
# data = np.genfromtxt("big_sim_aug16.txt")
#
## os.chdir("/Users/agonzalez/Documents/Research/Data/Mrk1501")
## data = np.genfromtxt("suz.txt")
## data = np.genfromtxt("xmm.txt")
x, y = data[:,0], data[:,1]
xmin, xmax = 2.0, 30.0
ymin, ymax = 0.25, 0.75
# ## 2-D Kernel Density Estimation -------------------------------------
t0 = time.time()
xx, yy = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
positions = np.vstack([xx.ravel(), yy.ravel()])
values = np.vstack([x, y])
kernel = st.gaussian_kde(values)
f = np.reshape(kernel(positions).T, xx.shape)
t1 = time.time()
cdict1 = {'blue': ((0.00, 1.0, 1.0),
(0.10, 1.0, 1.0),
(0.20, 1.0, 1.0),
(0.40, 1.0, 1.0),
(0.60, 1.0, 1.0),
(0.80, 1.0, 1.0),
(1.00, 0.1, 0.1)),
'green': ((0.00, 1.0, 1.0),
(0.10, 1.0, 1.0),
(0.20, 0.8, 0.8),
(0.40, 0.6, 0.6),
(0.60, 0.4, 0.4),
(0.80, 0.2, 0.2),
(1.00, 0.0, 0.0)),
'red': ((0.00, 1.0, 1.0),
(0.10, 1.0, 1.0),
(0.20, 0.0, 0.0),
(0.40, 0.0, 0.0),
(0.60, 0.0, 0.0),
(0.80, 0.0, 0.0),
(1.00, 0.0, 0.0)),
}
cmcust = LinearSegmentedColormap('customcmap', cdict1)
# ## -------------------------------------------------------------------
## 1-D Kernel Density Estimations ------------------------------------
t2 = time.time()
x, y = np.sort(x, kind='mergesort'), np.sort(y, kind='mergesort')
t3 = time.time()
t4 = time.time()
xpdf = st.gaussian_kde(x) ; kx = xpdf(x)
t5 = time.time()
t6 = time.time()
ypdf = st.gaussian_kde(y) ; ky = ypdf(y)
t7 = time.time()
# kx = np.genfromtxt('1d_xkde.txt')
# ky = np.genfromtxt('1d_ykde.txt')
## -------------------------------------------------------------------
## Plot up all the results -------------------------------------------
plt.figure(figsize=[12.8,9.6])
gs = gridspec.GridSpec(3,3) ; gs.update(wspace=0, hspace=0)
ax1 = plt.subplot(gs[1:, 0])
ax2 = plt.subplot(gs[0, 1:])
ax3 = plt.subplot(gs[1:, 1:])
ax1.plot(ky, y, 'k', linewidth=1.0)
ax1.set_ylim(ymin,ymax)
ax1.set_ylabel(r'Source Velocity /$c$')
ax1.set_xlabel('Density')
ax1.invert_xaxis()
# ax1.xaxis.set_label_position('top')
# ax1.xaxis.set_ticks_position('top')
ax1.tick_params(axis='both', which='both', direction='in', bottom='on', right='on')
ax2.plot(x, kx, 'k', linewidth=1.0)
ax2.set_xlim(xmin,xmax)
ax2.set_xlabel(r'Source Height /$r_g$')
ax2.set_ylabel('Density', rotation='270', labelpad=20.0)
ax2.yaxis.set_label_position('right')
ax2.yaxis.set_ticks_position('right')
ax2.xaxis.set_label_position('top')
ax2.xaxis.set_ticks_position('top')
ax2.tick_params(axis='both', which='both', direction='in', bottom='on', left='on')
# # ax3.set_xticklabels([]) ; ax3.set_yticklabels([])
# # Contourf plot
cfset = ax3.contourf(xx, yy, f, cmap=cmcust) #cmap=plt.cm.get_cmap(scheme))
# # cbar4 = plt.colorbar(cfset, pad=0.05)#, ticks=[-0.02, 0.0, 0.02, 0.04, 0.06, 0.08, 0.10])
# # cbar4.ax.set_ylabel('Density', rotation='270', labelpad=25.0)
# # Contour plot
cset = ax3.contour(xx, yy, f, colors='k', linewidths=0.5)
# # Hist2d plot
# cfset = ax3.hist2d(x,y[::-1],bins=[56,50], cmap=cmcust)
# Label plot
ax3.invert_xaxis()
ax3.set_xlim(xmin, xmax)
ax3.set_ylim(ymin, ymax)
ax3.set_xlabel(r'Source Height /$r_g$')
ax3.set_ylabel(r'Source Velocity /$c$', rotation='270', labelpad=20.0)
ax3.yaxis.set_label_position('right')
ax3.yaxis.set_ticks_position('right')
ax3.tick_params(axis='both', which='both', direction='in', top='on', left='on')
## ------------------------------------------------------------------
# ####################################################################################################
# Compute the escape velocity for a black hole of mass M at a height R above the black hole
def vesc_calc(G,M,R,c):
v = np.sqrt((2.0*G*M)/R)/c
return v
G = 6.674e-11
c = 2.998e8
M_sun = 1.989e30
# plt.figure()
# ax = plt.subplot(111)
col = ['r','r','r']
res = 50
Vesc = np.zeros([5,res])
R = np.zeros([5,res])
for j in range (0,3):
## I Zw 1
if (j==0):
M_bh = pow(10.0, 7.30)*M_sun ; name = 'Negrete et al. (2012)'
r_g0 = (G*M_bh)/(c**2.0)
if (j==1):
M_bh = pow(10.0, 7.30+0.23)*M_sun ; name = 'Mass + error'
if (j==2):
M_bh = pow(10.0, 7.30-0.19)*M_sun ; name = 'Mass -- error'
## III Zw 2
# if (j==0):
# M_bh = pow(10.0, 8.03)*M_sun ; name = 'van den Bosch (2016)'
# r_g0 = (G*M_bh)/(c**2.0)
# if (j==1):
# M_bh = pow(10.0, 8.03+0.26)*M_sun ; name = '+'
# if (j==2):
# M_bh = pow(10.0, 8.03-0.26)*M_sun ; name = '--'
R_s = (2.0*G*M_bh)/(c**2.0)
r_g = (G*M_bh)/(c**2.0)
R[j][:] = np.logspace(start=np.log10(1.01*R_s), stop=np.log10(1000.0*r_g), num=res)
for i in range (0,res):
Vesc[j][i] = vesc_calc(G,M_bh,R[j][i],c)
# print "Mass of I Zw 1 BH [kg] = ", M_bh
# print "Schwarzschild radius [m] = ", R_s
# print "Gravitationl radius [m] = ", r_g
R[j][:] = R[j][:]/r_g0
if (j!=0):
ax3.plot(R[j][:],Vesc[j][:], color=col[j], dashes=[5,3], alpha=0.75, label=name)
elif (j==0):
ax3.plot(R[j][:],Vesc[j][:], color=col[j], alpha=0.75, label=name)
for i in range (0,res):
R[3][i] = abs(R[0][i]-R[1][i])
R[4][i] = abs(R[0][i]-R[2][i])
# ####################################################################################################
kxmax_index, kxmax_value = max(enumerate(kx), key=operator.itemgetter(1))
kymax_index, kymax_value = max(enumerate(ky), key=operator.itemgetter(1))
ax1.axhline(y=y[kymax_index], color='k', dashes=[5,3], linewidth=1.0)
ax2.axvline(x=x[kxmax_index], color='k', dashes=[5,3], linewidth=1.0)
ax3.axhline(y=y[kymax_index], color='k', dashes=[5,3], linewidth=1.0)
ax3.axvline(x=x[kxmax_index], color='k', dashes=[5,3], linewidth=1.0)
ax3.scatter(x[kxmax_index], y[kymax_index], c='r', s=15.0)
print "Plotting is done!"
print "1D velocity: beta = ", y[kymax_index]
print "1D height: z = ", x[kxmax_index]
print ""
# print "2D KDE: t = ", t1-t0
# print "1D sorting: t = ", t3-t2
# print "1D x KDE: t = ", t5-t4
# print "1D y KDE: t = ", t7-t6
# np.savetxt('2d_kde.txt', f)
# np.savetxt('multi_1d_xkde.txt', kx)
# np.savetxt('multi_1d_ykde.txt', ky)
# plt.savefig('/Users/agonzalez/Desktop/IZw1_kde_separate_gallifrey.png', bbox_inches='tight', dpi=300)
# plt.savefig('/Users/agonzalez/Desktop/IIIZw2_kde_separate_xmm.png', bbox_inches='tight', dpi=300)
# plt.savefig('/Users/agonzalez/Desktop/contour_place_holder_colorbar.ps', format='ps', bbox_inches='tight', dpi=300)
plt.show()
| [
"adamgonzalez@users.noreply.github.com"
] | adamgonzalez@users.noreply.github.com |
2591109258fee0a132b9ad6d0527a3f907c574e5 | 3db9cea3fc39c181887fd7c4420bfdff653f63a2 | /mostcode/src/codegen.py | f2c9903db40899a66dc5a9c2a233107597b326d3 | [] | no_license | aaks123/minicompiler | 688d2d04c50009b40e997ab971b4a5b80d0091b1 | 9bddc15e5fa9de2a3a1712de0469a4b697226b44 | refs/heads/master | 2020-05-02T02:46:49.432908 | 2019-03-26T04:22:22 | 2019-03-26T04:22:22 | 177,711,751 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,597 | py | #!/usr/bin/python
import parser
import sys
import runTimeCode
RTC = {}
outputFile = 'a'
def registerAction(action):
global RTC
regs = ['$t0', '$t1', '$t2', '$t3', '$t4', '$t5', '$t6', '$t7', '$t8', '$t9', '$s0', '$s1', '$s2', '$s3', '$s4']
offset = 12
for reg in regs:
RTC.addLineToCode([action, reg, str(offset)+'($sp)', ''])
offset += 4
def generateMIPSCode(code):
global outputFile
global RTC
sys.stderr = open('dump','w')
ST, TAC = z.parse(code)
# sys.stderr.close()
# TAC.printCode()
ST.printSymbolTableHistory()
print("trying new version\n")
ST.printST()
RTC = runTimeCode.RunTimeCode(ST, TAC)
RTC.fixLabels()
counter = 0
for function in TAC.code:
RTC.addFunction(function)
if (function == 'main'):
RTC.addLineToCode(['sub', '$sp', '$sp', '200'])
RTC.addLineToCode(['la', '$fp', '200($sp)', ''])
RTC.addLineToCode(['la', '$s5', '__myspace__', ''])
RTC.addLineToCode(['lw', '$s7', '0($s5)', ''])
RTC.addLineToCode(['la', '$v0', '-' + str(ST.getAttributeFromFunctionList(function, 'width')) + '($sp)', ''])
RTC.addLineToCode(['sw','$v0', '0($s5)', ''])
RTC.addLineToCode(['li', '$v0', ST.getAttributeFromFunctionList(function, 'width'), ''])
RTC.addLineToCode(['sub', '$sp', '$sp', '$v0'])
else:
RTC.addLineToCode(['sub', '$sp','$sp','72'])
RTC.addLineToCode(['sw','$ra','0($sp)',''])
RTC.addLineToCode(['sw','$fp','4($sp)',''])
RTC.addLineToCode(['la','$fp','72($sp)',''])
RTC.addLineToCode(['li','$v0',ST.getAttributeFromFunctionList(function, 'scopeLevel'),''])
RTC.addLineToCode(['la', '$s5', '__myspace__', ''])
RTC.addLineToCode(['add', '$v0', '$v0', '$v0'])
RTC.addLineToCode(['add', '$v0', '$v0', '$v0'])
RTC.addLineToCode(['add', '$s6', '$v0', '$s5'])
RTC.addLineToCode(['lw','$s7','0($s6)',''])
RTC.addLineToCode(['sw','$s7','8($sp)',''])
RTC.addLineToCode(['la', '$v0', '-' + str(ST.getAttributeFromFunctionList(function, 'width'))+'($sp)' , ''])
RTC.addLineToCode(['sw','$v0','0($s6)',''])
registerAction('sw')
RTC.addLineToCode(['li','$v0',ST.getAttributeFromFunctionList(function, 'width'),''])
RTC.addLineToCode(['sub','$sp','$sp','$v0'])
# Copy the parameters
numParam = ST.getAttributeFromFunctionList(function, 'numParam')
if numParam >4:
parser.error("Too many parameters (max: 4)", None)
for x in range(numParam):
RTC.addLineToCode(['sw','$a' + str(x), str(4*x) + '($sp)', ''])
for line in TAC.code[function]:
if line[3] == 'JUMPLABEL':
counter = 0 ;
RTC.addLineToCode(['jal', RTC.getRegister(line[2]), '', ''])
RTC.reloadParentRegisters(ST.getAttributeFromFunctionList(function, 'scopeLevel'), function)
elif line[3] == 'JUMP_RETURN':
RTC.addLineToCode(['b', function + 'end', '', ''])
elif line[3] == 'PARAM':
RTC.addLineToCode(['move', '$a'+str(counter), RTC.getRegister(line[0]),''])
counter = counter +1 ;
if counter == 5:
parser.error("Too many parameters (max: 4)", None)
elif line[3] == '=':
RTC.addLineToCode(['move', RTC.getRegister(line[0]), RTC.getRegister(line[1]), ''])
elif line[3] == '=i':
RTC.addLineToCode(['li', RTC.getRegister(line[0]), line[1], ''])
elif line[3] == '=REF':
RTC.addLineToCode(['la', RTC.getRegister(line[0]), line[1], ''])
elif line[3] == '+':
RTC.addLineToCode(['add', RTC.getRegister(line[0]), RTC.getRegister(line[1]), RTC.getRegister(line[2])])
elif line[3] == '-':
RTC.addLineToCode(['sub', RTC.getRegister(line[0]), RTC.getRegister(line[1]), RTC.getRegister(line[2])])
elif line[3] == '*':
RTC.addLineToCode(['mult', RTC.getRegister(line[1]), RTC.getRegister(line[2]),''])
RTC.addLineToCode(['mflo', RTC.getRegister(line[0]),'',''])
elif line[3] == '/':
RTC.addLineToCode(['div', RTC.getRegister(line[1]), RTC.getRegister(line[2]), ''])
RTC.addLineToCode(['mflo', RTC.getRegister(line[0]), '', ''])
elif line[3] == '%':
RTC.addLineToCode(['div', RTC.getRegister(line[1]), RTC.getRegister(line[2]), ''])
RTC.addLineToCode(['mfhi', RTC.getRegister(line[0]), '', ''])
elif line[3] == '<':
RTC.addLineToCode(['slt', RTC.getRegister(line[0]), RTC.getRegister(line[1]), RTC.getRegister(line[2])])
elif line[3] == '>':
RTC.addLineToCode(['sgt', RTC.getRegister(line[0]), RTC.getRegister(line[1]), RTC.getRegister(line[2])])
elif line[3] == '<=':
RTC.addLineToCode(['sle', RTC.getRegister(line[0]), RTC.getRegister(line[1]), RTC.getRegister(line[2])])
elif line[3] == '>=':
RTC.addLineToCode(['sge', RTC.getRegister(line[0]), RTC.getRegister(line[1]), RTC.getRegister(line[2])])
elif line[3] == '==':
RTC.addLineToCode(['seq', RTC.getRegister(line[0]), RTC.getRegister(line[1]), RTC.getRegister(line[2])])
elif line[3] == '!=':
RTC.addLineToCode(['sne', RTC.getRegister(line[0]), RTC.getRegister(line[1]), RTC.getRegister(line[2])])
elif line[3] == 'or':
RTC.addLineToCode(['or', RTC.getRegister(line[0]), RTC.getRegister(line[1]), RTC.getRegister(line[2])])
elif line[3] == 'and':
RTC.addLineToCode(['and', RTC.getRegister(line[0]), RTC.getRegister(line[1]), RTC.getRegister(line[2])])
elif line[3] == '>>':
RTC.addLineToCode(['srl', RTC.getRegister(line[0]), RTC.getRegister(line[1]), RTC.getRegister(line[2])])
elif line[3] == '<<':
RTC.addLineToCode(['sll', RTC.getRegister(line[0]), RTC.getRegister(line[1]), RTC.getRegister(line[2])])
elif line[3] == 'COND_GOTO':
RTC.addLineToCode(['beq', RTC.getRegister(line[0]), line[1], line[2]])
elif line[3] == 'GOTO':
RTC.addLineToCode(['b', line[2], '', ''])
elif line[3] == 'FUNCTION_RETURN':
RTC.addLineToCode(['move', RTC.getRegister(line[0]), '$v0', ''])
elif line[3] == 'RETURN':
RTC.addLineToCode(['move', '$v0', RTC.getRegister(line[0]), ''])
RTC.addLineToCode(['b', function + 'end', '', ''])
elif line[3] == 'HALT':
RTC.addLineToCode(['jal', 'exit', '', ''])
elif line[3] == 'PRINT' and line[0] == '':
RTC.addLineToCode(['jal', 'print_newline', '', ''])
elif line[3] == 'PRINT':
RTC.addLineToCode(['move', '$a0', RTC.getRegister(line[0]), ''])
if line[2] == 'NUMBER':
RTC.addLineToCode(['jal', 'print_integer', '', ''])
elif line[2] == 'STRING':
RTC.addLineToCode(['jal', 'print_string', '', ''])
else:
RTC.addLineToCode(['jal', 'print_boolean', '', ''])
else:
RTC.addLineToCode(line)
if function != 'main':
RTC.addLineToCode(['LABEL', function + 'end', '', ''])
RTC.addLineToCode(['addi','$sp','$sp',ST.getAttributeFromFunctionList(function,'width')])
RTC.addLineToCode(['lw','$ra','0($sp)',''])
RTC.addLineToCode(['lw','$fp','4($sp)',''])
RTC.addLineToCode(['lw','$a0','8($sp)',''])
RTC.addLineToCode(['li','$a1',ST.getAttributeFromFunctionList(function, 'scopeLevel'),''])
RTC.addLineToCode(['la', '$s5', '__myspace__', ''])
RTC.addLineToCode(['add', '$a1', '$a1', '$a1'])
RTC.addLineToCode(['add', '$a1', '$a1', '$a1'])
RTC.addLineToCode(['add', '$s6', '$a1', '$s5'])
RTC.addLineToCode(['sw','$a0','0($s6)',''])
registerAction('lw')
RTC.addLineToCode(['addi','$sp','$sp','72'])
RTC.addLineToCode(['jr','$ra','',''])
RTC.printCode(outputFile)
if __name__=="__main__":
z = parser.G1Parser()
library = open('lib/library.py')
libraryCode = library.read()
filename = sys.argv[1]
outputFile = (filename.split('/')[-1]).split('.')[0]
sourcefile = open(filename)
code = sourcefile.read()
# code = libraryCode + code
generateMIPSCode(code)
| [
"noreply@github.com"
] | noreply@github.com |
7f6dff4693647d2194db96d3906772cd9d143a1f | b91e7cf24508ed439bedab13b5bdef2af2151055 | /libs/ple/ple/games/puckworld.py | a74233eb66a121adcf07035118ea371e817316d0 | [
"MIT"
] | permissive | nishithbsk/I-PODRL | 6ae50bcaf7da9f11638eeef52667bac05d916276 | 78f861d0e98b2b0407c078eda56bcfeab102474f | refs/heads/master | 2021-01-11T03:33:25.949824 | 2016-10-25T10:45:28 | 2016-10-25T10:45:28 | 71,019,251 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,608 | py | import pygame
import sys
import math
import base
from pygame.constants import K_w, K_a, K_s, K_d
from primitives import Player, Creep
from utils.vec2d import vec2d
from utils import percent_round_int
class PuckCreep(pygame.sprite.Sprite):
def __init__(self, pos_init, attr, SCREEN_WIDTH, SCREEN_HEIGHT):
pygame.sprite.Sprite.__init__(self)
self.pos = vec2d(pos_init)
self.attr = attr
self.SCREEN_WIDTH = SCREEN_WIDTH
self.SCREEN_HEIGHT = SCREEN_HEIGHT
image = pygame.Surface((self.attr["radius_outer"]*2, self.attr["radius_outer"]*2))
image.fill((0, 0, 0, 0))
image.set_colorkey((0,0,0))
pygame.draw.circle(
image,
self.attr["color_outer"],
(self.attr["radius_outer"], self.attr["radius_outer"]),
self.attr["radius_outer"],
0
)
image.set_alpha(int(255*0.75))
pygame.draw.circle(
image,
self.attr["color_center"],
(self.attr["radius_outer"],self.attr["radius_outer"]),
self.attr["radius_center"],
0
)
self.image = image
self.rect = self.image.get_rect()
self.rect.center = pos_init
def update(self, ndx, ndy, dt):
self.pos.x += ndx * self.attr['speed'] * dt
self.pos.y += ndy * self.attr['speed'] * dt
self.rect.center = (self.pos.x, self.pos.y)
class PuckWorld(base.Game):
"""
Based Karpthy's PuckWorld in `REINFORCEjs`_.
.. _REINFORCEjs: https://github.com/karpathy/reinforcejs
Parameters
----------
width : int
Screen width.
height : int
Screen height, recommended to be same dimension as width.
"""
def __init__(self,
width=64,
height=64):
actions = {
"up": K_w,
"left": K_a,
"right": K_d,
"down": K_s
}
base.Game.__init__(self, width, height, actions=actions)
self.CREEP_BAD = {
"radius_center": percent_round_int(width, 0.047),
"radius_outer": percent_round_int(width, 0.265),
"color_center": (110, 45, 45),
"color_outer": (150, 95, 95),
"speed": 0.05*width
}
self.CREEP_GOOD = {
"radius": percent_round_int(width, 0.047),
"color": (40, 140, 40)
}
self.AGENT_COLOR = (60, 60, 140)
self.AGENT_SPEED = 0.2*width
self.AGENT_RADIUS = percent_round_int(width, 0.047)
self.AGENT_INIT_POS = (self.AGENT_RADIUS, self.AGENT_RADIUS)
self.BG_COLOR = (255, 255, 255)
self.dx = 0
self.dy = 0
self.ticks = 0
def _handle_player_events(self):
self.dx = 0.0
self.dy = 0.0
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.KEYDOWN:
key = event.key
if key == self.actions["left"]:
self.dx -= self.AGENT_SPEED
if key == self.actions["right"]:
self.dx += self.AGENT_SPEED
if key == self.actions["up"]:
self.dy -= self.AGENT_SPEED
if key == self.actions["down"]:
self.dy += self.AGENT_SPEED
def getGameState(self):
"""
Gets a non-visual state representation of the game.
Returns
-------
dict
* player x position.
* player y position.
* players x velocity.
* players y velocity.
* good creep x position.
* good creep y position.
* bad creep x position.
* bad creep y position.
See code for structure.
"""
state = {
"player": {
"x": self.player.pos.x,
"y": self.player.pos.y,
"velocity": {
"x": self.player.vel.x,
"y": self.player.vel.y
}
},
"good_creep": {
"x": self.good_creep.pos.x,
"y": self.good_creep.pos.y
},
"bad_creep": {
"x": self.bad_creep.pos.x,
"y": self.bad_creep.pos.y
},
}
return state
def getScore(self):
return self.score
def game_over(self):
"""
Return bool if the game has 'finished'
"""
return False
def _rngCreepPos(self):
r = self.CREEP_GOOD['radius']
x = self.rng.uniform(r*3, self.width-r*2.5)
y = self.rng.uniform(r*3, self.height-r*2.5)
return ( x, y )
def init(self):
"""
Starts/Resets the game to its inital state
"""
self.player = Player(self.AGENT_RADIUS, self.AGENT_COLOR, self.AGENT_SPEED, self.AGENT_INIT_POS, self.width, self.height)
self.player_group = pygame.sprite.Group()
self.player_group.add( self.player )
self.good_creep = Creep(
self.CREEP_GOOD['color'],
self.CREEP_GOOD['radius'],
self._rngCreepPos(),
(1,1),
0.0,
1.0,
"GOOD",
self.width,
self.height
)
self.bad_creep = PuckCreep((self.width, self.height), self.CREEP_BAD, self.screen_dim[0]*0.75, self.screen_dim[1]*0.75)
self.creeps = pygame.sprite.Group()
self.creeps.add(self.good_creep)
self.creeps.add(self.bad_creep)
self.score = 0
self.ticks = 0
self.lives = -1
def step(self, dt):
"""
Perform one step of game emulation.
"""
dt /= 1000.0
self.ticks += 1
self.screen.fill(self.BG_COLOR)
self._handle_player_events()
self.player_group.update(self.dx, self.dy, dt)
dx = self.player.pos.x-self.good_creep.pos.x
dy = self.player.pos.y-self.good_creep.pos.y
dist_to_good = math.sqrt(dx*dx + dy*dy)
dx = self.player.pos.x-self.bad_creep.pos.x
dy = self.player.pos.y-self.bad_creep.pos.y
dist_to_bad = math.sqrt(dx*dx + dy*dy)
reward = -dist_to_good
if dist_to_bad < self.CREEP_BAD['radius_outer']:
reward += 2.0*(dist_to_bad - self.CREEP_BAD['radius_outer']) / float(self.CREEP_BAD['radius_outer'])
self.score += reward
if self.ticks % 500 == 0:
x,y = self._rngCreepPos()
self.good_creep.pos.x = x
self.good_creep.pos.y = y
ndx = 0.0 if dist_to_bad == 0.0 else dx/dist_to_bad
ndy = 0.0 if dist_to_bad == 0.0 else dy/dist_to_bad
self.bad_creep.update(ndx, ndy, dt)
self.good_creep.update(dt)
self.player_group.draw(self.screen)
self.creeps.draw(self.screen)
if __name__ == "__main__":
import numpy as np
pygame.init()
game = PuckWorld(width=256, height=256)
game.screen = pygame.display.set_mode( game.getScreenDims(), 0, 32)
game.clock = pygame.time.Clock()
game.rng = np.random.RandomState(24)
game.init()
while True:
dt = game.clock.tick_busy_loop(60)
game.step(dt)
pygame.display.update()
| [
"nishith@stanford.edu"
] | nishith@stanford.edu |
e01af6a35d35f8f083ade9a162ab86a061691cfe | 777a7527317e5d265536701a7a8f24f3cd6b5ada | /practice/leetcode/1091_shortest-path-in-binary-matrix.py | 0d16cd348073d0535762056b92ebd68ae235ae6a | [] | no_license | ThanhChinhBK/interview-programing-questions | 22d0906ae2e315af4e48f56fe2296a4ac26ff62e | b58d716348301e9b62c57e98ed34a2fb9c34d033 | refs/heads/master | 2022-05-14T02:29:03.879809 | 2022-04-09T14:32:26 | 2022-04-09T14:32:26 | 116,651,413 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,010 | py | class Solution:
def shortestPathBinaryMatrix(self, grid: List[List[int]]) -> int:
n = len(grid)
visited = [[0] * n for _ in range(n)]
points = [((0,0), 1)]
length = 0
while points:
(cur_x, cur_y), distance = points.pop(0)
if grid[cur_x][cur_y] or visited[cur_x][cur_y]:
continue
if cur_x == n-1 and cur_y == n - 1:
return distance
visited[cur_x][cur_y] = 1
next_points = [
(cur_x-1, cur_y-1),
(cur_x-1, cur_y),
(cur_x-1, cur_y + 1),
(cur_x, cur_y + 1),
(cur_x + 1, cur_y + 1),
(cur_x + 1, cur_y),
(cur_x + 1, cur_y - 1),
(cur_x, cur_y - 1)
]
for (next_x, next_y) in next_points:
if 0 <= next_x < n and 0 <= next_y < n:
points.append(((next_x, next_y), distance + 1))
return -1
| [
"nguyenthanhchinh96@gmail.com"
] | nguyenthanhchinh96@gmail.com |
b0c4e029c47e2ff361892917e87bcfd46c70d0af | 0629a1b32919a6f7e258982dd511eccdb957129c | /scripts/debug_lexer.py | 4b7db41aa4f3caa251713f94ebaf8edde88fc48f | [
"BSD-2-Clause"
] | permissive | mhaberler/pygments | b2729029d2b37d816cea5fb607e7ac075aa2d5a6 | f856d6ee7fa40e563ae0083ad378204f2ec29040 | refs/heads/master | 2020-12-24T11:36:40.040419 | 2016-06-16T17:10:37 | 2016-06-16T17:10:37 | 61,503,624 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 8,765 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Lexing error finder
~~~~~~~~~~~~~~~~~~~
For the source files given on the command line, display
the text where Error tokens are being generated, along
with some context.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import print_function
import os
import sys
# always prefer Pygments from source if exists
srcpath = os.path.join(os.path.dirname(__file__), '..')
if os.path.isdir(os.path.join(srcpath, 'pygments')):
sys.path.insert(0, srcpath)
from pygments.lexer import RegexLexer, ExtendedRegexLexer, LexerContext, \
ProfilingRegexLexer, ProfilingRegexLexerMeta
from pygments.lexers import get_lexer_by_name, find_lexer_class, \
find_lexer_class_for_filename
from pygments.token import Error, Text, _TokenType
from pygments.cmdline import _parse_options
class DebuggingRegexLexer(ExtendedRegexLexer):
"""Make the state stack, position and current match instance attributes."""
def get_tokens_unprocessed(self, text, stack=('root',)):
"""
Split ``text`` into (tokentype, text) pairs.
``stack`` is the inital stack (default: ``['root']``)
"""
tokendefs = self._tokens
self.ctx = ctx = LexerContext(text, 0)
ctx.stack = list(stack)
statetokens = tokendefs[ctx.stack[-1]]
while 1:
for rexmatch, action, new_state in statetokens:
self.m = m = rexmatch(text, ctx.pos, ctx.end)
if m:
if action is not None:
if type(action) is _TokenType:
yield ctx.pos, action, m.group()
ctx.pos = m.end()
else:
if not isinstance(self, ExtendedRegexLexer):
for item in action(self, m):
yield item
ctx.pos = m.end()
else:
for item in action(self, m, ctx):
yield item
if not new_state:
# altered the state stack?
statetokens = tokendefs[ctx.stack[-1]]
if new_state is not None:
# state transition
if isinstance(new_state, tuple):
for state in new_state:
if state == '#pop':
ctx.stack.pop()
elif state == '#push':
ctx.stack.append(ctx.stack[-1])
else:
ctx.stack.append(state)
elif isinstance(new_state, int):
# pop
del ctx.stack[new_state:]
elif new_state == '#push':
ctx.stack.append(ctx.stack[-1])
else:
assert False, 'wrong state def: %r' % new_state
statetokens = tokendefs[ctx.stack[-1]]
break
else:
try:
if ctx.pos >= ctx.end:
break
if text[ctx.pos] == '\n':
# at EOL, reset state to 'root'
ctx.stack = ['root']
statetokens = tokendefs['root']
yield ctx.pos, Text, u'\n'
ctx.pos += 1
continue
yield ctx.pos, Error, text[ctx.pos]
ctx.pos += 1
except IndexError:
break
def main(fn, lexer=None, options={}):
if lexer is not None:
lxcls = get_lexer_by_name(lexer).__class__
else:
lxcls = find_lexer_class_for_filename(os.path.basename(fn))
if lxcls is None:
name, rest = fn.split('_', 1)
lxcls = find_lexer_class(name)
if lxcls is None:
raise AssertionError('no lexer found for file %r' % fn)
print('Using lexer: %s (%s.%s)' % (lxcls.name, lxcls.__module__,
lxcls.__name__))
debug_lexer = False
# if profile:
# # does not work for e.g. ExtendedRegexLexers
# if lxcls.__bases__ == (RegexLexer,):
# # yes we can! (change the metaclass)
# lxcls.__class__ = ProfilingRegexLexerMeta
# lxcls.__bases__ = (ProfilingRegexLexer,)
# lxcls._prof_sort_index = profsort
# else:
# if lxcls.__bases__ == (RegexLexer,):
# lxcls.__bases__ = (DebuggingRegexLexer,)
# debug_lexer = True
# elif lxcls.__bases__ == (DebuggingRegexLexer,):
# # already debugged before
# debug_lexer = True
# else:
# # HACK: ExtendedRegexLexer subclasses will only partially work here.
# lxcls.__bases__ = (DebuggingRegexLexer,)
# debug_lexer = True
lx = lxcls(**options)
lno = 1
if fn == '-':
text = sys.stdin.read()
else:
with open(fn, 'rb') as fp:
text = fp.read().decode('utf-8')
text = text.strip('\n') + '\n'
tokens = []
states = []
def show_token(tok, state):
reprs = list(map(repr, tok))
print(' ' + reprs[1] + ' ' + ' ' * (29-len(reprs[1])) + reprs[0], end=' ')
if debug_lexer:
print(' ' + ' ' * (29-len(reprs[0])) + ' : '.join(state) if state else '', end=' ')
print()
for type, val in lx.get_tokens(text):
lno += val.count('\n')
if type == Error and not ignerror:
print('Error parsing', fn, 'on line', lno)
if not showall:
print('Previous tokens' + (debug_lexer and ' and states' or '') + ':')
for i in range(max(len(tokens) - num, 0), len(tokens)):
if debug_lexer:
show_token(tokens[i], states[i])
else:
show_token(tokens[i], None)
print('Error token:')
l = len(repr(val))
print(' ' + repr(val), end=' ')
if debug_lexer and hasattr(lx, 'ctx'):
print(' ' * (60-l) + ' : '.join(lx.ctx.stack), end=' ')
print()
print()
return 1
tokens.append((type, val))
if debug_lexer:
if hasattr(lx, 'ctx'):
states.append(lx.ctx.stack[:])
else:
states.append(None)
if showall:
show_token((type, val), states[-1] if debug_lexer else None)
return 0
def print_help():
print('''\
Pygments development helper to quickly debug lexers.
scripts/debug_lexer.py [options] file ...
Give one or more filenames to lex them and display possible error tokens
and/or profiling info. Files are assumed to be encoded in UTF-8.
Selecting lexer and options:
-l NAME use lexer named NAME (default is to guess from
the given filenames)
-O OPTIONSTR use lexer options parsed from OPTIONSTR
Debugging lexing errors:
-n N show the last N tokens on error
-a always show all lexed tokens (default is only
to show them when an error occurs)
-e do not stop on error tokens
Profiling:
-p use the ProfilingRegexLexer to profile regexes
instead of the debugging lexer
-s N sort profiling output by column N (default is
column 4, the time per call)
''')
num = 10
showall = False
ignerror = False
lexer = None
options = {}
profile = False
profsort = 4
if __name__ == '__main__':
import getopt
opts, args = getopt.getopt(sys.argv[1:], 'n:l:aepO:s:h')
for opt, val in opts:
if opt == '-n':
num = int(val)
elif opt == '-a':
showall = True
elif opt == '-e':
ignerror = True
elif opt == '-l':
lexer = val
elif opt == '-p':
profile = True
elif opt == '-s':
profsort = int(val)
elif opt == '-O':
options = _parse_options([val])
elif opt == '-h':
print_help()
sys.exit(0)
ret = 0
if not args:
print_help()
for f in args:
ret += main(f, lexer, options)
sys.exit(bool(ret))
| [
"corey@octayn.net"
] | corey@octayn.net |
5868dc3a8a57d076069185449c7b597e72d375ca | 16df490b514a74031072173bdc4f4a2cfa37e8a8 | /LeetCode203.py | 414c798d1dcd9ec1ab055ebb99d426a9c359759a | [] | no_license | hzyhzzh/LeetCode | d7ad522be5eef6919c6c41ec9c2ecaa92b8fa156 | a9c982207d3fc4bcb0513f88b6b5aeaaeb09f554 | refs/heads/master | 2021-01-20T16:35:28.625019 | 2017-09-25T09:08:37 | 2017-09-25T09:08:39 | 68,596,154 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 924 | py | # Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def removeElements(self, head, val):
"""
:type head: ListNode
:type val: int
:rtype: ListNode
"""
prev = head
current = head
if(not prev):return head
while(current):
if(head.val == val):
head = head.next
prev = head
current = prev
elif(current.val == val and current.next is not None):
prev.next = current.next
current = current.next
elif(current.val == val and current.next == None):
prev.next = None
current = current.next
else:
prev = current
current = current.next
return head
| [
"576591256@qq.com"
] | 576591256@qq.com |
9f840be027ab8aea56e57ba7bb048620ae22d8b9 | 4d514eebd2e9913a5d81c25bf3f82d765cc80d00 | /simple_rnn.py | 808486c9b7bf5f57868f3efab6b17b709b2670f3 | [
"Apache-2.0"
] | permissive | EliasPapachristos/NLP-NLU | 55cc890a86a437ef5571d28e5dacd8f76354ead3 | 8c9c179842c64189c2bbe6648b2d82815c498a05 | refs/heads/master | 2020-08-05T23:37:34.632226 | 2019-10-17T14:09:09 | 2019-10-17T14:09:09 | 212,757,563 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,636 | py | from __future__ import print_function, division
from builtins import range, input
# I may need to update this version in the future
# sudo pip install -U future
from keras.models import Model
from keras.layers import Input, LSTM, GRU
import numpy as np
import matplotlib.pyplot as plt
import keras.backend as K
if len(K.tensorflow_backend._get_available_gpus()) > 0:
from keras.layers import CuDNNLSTM as LSTM
from keras.layers import CuDNNGRU as GRU
# Dummy stuff
T = 8
D = 2
M = 3
X = np.random.randn(1, T, D)
def lstm_1():
input_ = Input(shape=(T, D))
rnn = LSTM(M, return_state=True)
x = rnn(input_)
model = Model(inputs=input_, outputs=x)
o, h, c = model.predict(X)
print("o:", o)
print("h:", h)
print("c:", c)
def lstm_2():
input_ = Input(shape=(T, D))
rnn = LSTM(M, return_state=True, return_sequences=True)
# rnn = GRU(M, return_state=True)
x = rnn(input_)
model = Model(inputs=input_, outputs=x)
o, h, c = model.predict(X)
print("o:", o)
print("h:", h)
print("c:", c)
def gru_1():
input_ = Input(shape=(T, D))
rnn = GRU(M, return_state=True)
x = rnn(input_)
model = Model(inputs=input_, outputs=x)
o, h = model.predict(X)
print("o:", o)
print("h:", h)
def gru_2():
input_ = Input(shape=(T, D))
rnn = GRU(M, return_state=True, return_sequences=True)
x = rnn(input_)
model = Model(inputs=input_, outputs=x)
o, h = model.predict(X)
print("o:", o)
print("h:", h)
print("lstm_1:")
lstm_1()
print("lstm_2:")
lstm_2()
print("gru_1:")
gru_1()
print("gru_2:")
gru_2()
| [
"noreply@github.com"
] | noreply@github.com |
858a91e070f27a2a3c26765e7097ef3775f3e2d2 | d1c28fc0af86e47635365c0fce2893634872e394 | /sort_search_medium/94_binary_tree_inorder.py | 12f9ca2fb2b34069ad8ac81cbe88f53da2497a4c | [] | no_license | doyleju/LC | 5a95e07636959477ce27f0374d730c2dfa33f19d | 040182d0ac2461b5f4a1a08d3f6fd0c2f5551c7c | refs/heads/master | 2020-03-22T01:41:15.886629 | 2018-08-21T21:45:39 | 2018-08-21T21:45:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 952 | py | class Solution:
def inorderTraversal(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
"""
#1 54p
result = []
def inorder_recursive(root, result):
if root is None:
return
inorder_recursive(root.left, result)
result.append(root.val)
inorder_recursive(root.right, result)
inorder_recursive(root, result)
return result
"""
#2 75p
# Iterative
stack = []
result = []
node = root
while stack or node:
if node:
stack.append(node)
node = node.left
else:
node = stack.pop()
result.append(node.val)
node = node.right
return result
| [
"noreply@github.com"
] | noreply@github.com |
aab5320af9b48f92a2e321db7cb26674e6d0a401 | 24f2696aab87f1632705a7c8b2d3b866e26aa3ee | /LCA_236.py | 281941167ef5ab53585044747e11fcdfbd20eb5e | [] | no_license | adiggo/leetcode_py | 44a77a0b029f4d92bd0d8e24cad21ceea52e7794 | 4aa3a3a0da8b911e140446352debb9b567b6d78b | refs/heads/master | 2020-04-06T07:05:21.770518 | 2016-07-01T16:00:40 | 2016-07-01T16:00:40 | 30,397,276 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 637 | py | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def lowestCommonAncestor(self, root, p, q):
"""
:type root: TreeNode
:type p: TreeNode
:type q: TreeNode
:rtype: TreeNode
"""
if not root or root == p or root == q:
return root
left = self.lowestCommonAncestor(root.left, p, q)
right = self.lowestCommonAncestor(root.right, p, q)
return right if not left else left if not right else root
| [
"adiggo@gmail.com"
] | adiggo@gmail.com |
e966a809733c647ed153d31bbebf7df6fc19afa7 | b66304878239ecea3e38593112bcb861fe9815db | /project_template/project_template/urls.py | ff276fbbe3737f276113407e1c2fa281c94dbdfe | [] | no_license | cowhite/django_pymongo_admin | e8ecd9fd193cf43489b9ac19d6a0444c719c7e42 | 8d814b248d82d7572e167be5ed2a2418d5eddd42 | refs/heads/master | 2020-07-03T02:12:26.559308 | 2016-11-19T15:18:04 | 2016-11-19T15:18:04 | 74,205,196 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 903 | py | """project_template URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^admin/pymongo/',
include("django_pymongo_admin.urls",
namespace="django-pymongo-admin")),
]
| [
"bhaskar@cowhite.com"
] | bhaskar@cowhite.com |
551bb8ed54a7c9b9c9cf20d5acef50c69f3aa0a7 | 91722e19226e2ac193891c6baad0444fbc2c0659 | /engine/models/distance.py | 9faf0daf71d57780f153ef4d8a2bd67dccc42030 | [] | no_license | Clacket/clacket | 79bf740a837e44d6d82d2d18c8098276b33fc82b | e9aacd967f447cf5d1fb625e57c6e64e0cfcfc78 | refs/heads/master | 2021-01-19T03:34:40.698854 | 2018-01-22T01:22:55 | 2018-01-22T01:22:55 | 83,339,453 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,207 | py | import pickle
class DistanceMatrix(object):
def __init__(self, folder):
self.matrix = {}
self.filename = '{0}/distances.pyc'.format(folder)
def save(self):
with open(self.filename, 'wb') as file:
pickle.dump(self.matrix, file)
def load(self):
with open(self.filename, 'rb') as file:
self.matrix = pickle.load(file)
def get(self, id1, id2):
id1, id2 = int(id1), int(id2)
if id1 == id2:
return 0
else:
smaller = str(min(id1, id2))
bigger = str(max(id1, id2))
return self.matrix[smaller][bigger]
def get_all(self, id):
min_id = min(self.matrix.keys())
all_ids = [min_id] + list(self.matrix[min_id].keys())
return self.get_some(id, all_ids)
def get_some(self, id, ids):
distances = []
for id2 in ids:
distances.append((id2, self.get(id, id2)))
return distances
def update(self, id1, id2, distance):
smaller = min(id1, id2)
bigger = max(id1, id2)
if smaller not in self.matrix:
self.matrix[smaller] = {}
self.matrix[smaller][bigger] = distance
| [
"mrf.mariam@gmail.com"
] | mrf.mariam@gmail.com |
a285770553a45f3b2ad61fd5d089ea633742b9a7 | ef835d16ae3b848120424ee231912bfb2ff81ae8 | /stopword/admin.py | 120f6660526d3877058d98d20ddca5a9617aeaa8 | [] | no_license | nelsondressler/SistBiblioteca | 013b930d9d671997a4aa3948fd4dfabaf87fd67b | cff2a6c83b57b51d1a4d7c232eccf7b48728e600 | refs/heads/master | 2021-08-17T07:22:38.619693 | 2017-11-20T22:24:04 | 2017-11-20T22:24:04 | 104,620,636 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 505 | py | from django.contrib import admin
from .models import Stopword
# Register your models here.
class StopwordAdmin(admin.ModelAdmin):
list_display = ['id', 'nome'] # campos de apresentação
list_display_links = ['id', 'nome'] # campos habilitados para links
search_fields = ['nome'] # campos de busca
raw_id_fields = [] # campos de chave estrangeira (processamento de relacionamento)
def get_ordering(self, request):
return ['id']
admin.site.register(Stopword, StopwordAdmin)
| [
"nelsondr58@gmail.com"
] | nelsondr58@gmail.com |
70cf5c7a6c7e3ea0437ed9788e57a2bea5bc6895 | c18b7691e050ebb18e2bced28ef05c1e5e6d4e69 | /convert_csv.py | ac6c10ad22648d0a94c8828f8a759a210549988e | [] | no_license | tejaalle/Key_Performance_Indicator | d0aa12399c6627cdad87a512c3ad03aab0df820d | da3e5aec05ad937a3f3a82d9107101853bf03e2f | refs/heads/master | 2022-12-13T16:02:08.336709 | 2020-08-22T03:15:56 | 2020-08-22T03:15:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,353 | py | import csv
with open("DNR_Camping_Parks_Reservation_Data_2016.csv",'r',encoding='utf-8-sig') as csv_file1:
csv_listen = csv.DictReader(csv_file1)
csv_listener = csv.reader(csv_file1)
with open("file1.csv", "a", newline='') as csv_file2:
csv_writer = csv.writer(csv_file2)
csv_writer.writerow(csv_listen.fieldnames)
for line in csv_listener:
if 'CANADA ' in line:
with open("file1.csv","a",newline='') as csv_file2:
csv_writer = csv.writer(csv_file2)
csv_writer.writerow(line)
parameters = ["ParkName","State","partySize","BookingType","RateType","Equipment"]
with open("file2.csv","w",newline='') as csv_file4:
csv_writer =csv.writer(csv_file4)
csv_writer.writerow(parameters)
with open("file1.csv","r",encoding='utf-8-sig') as csv_file3:
csv_reader = csv.DictReader(csv_file3,)
for line in csv_reader:
dummy_list = []
dummy_list.append(line["ParkName"])
dummy_list.append(line["State"])
dummy_list.append(line["partySize"])
dummy_list.append(line["BookingType"])
dummy_list.append(line["RateType"])
dummy_list.append(line["Equipment"])
with open("file2.csv", "a", newline='') as csv_file4:
csv_writer = csv.writer(csv_file4)
csv_writer.writerow(dummy_list)
with open("file3.csv","w",newline='') as csv_file6:
csv_writer =csv.writer(csv_file6)
csv_writer.writerow(parameters)
with open("file2.csv","r",encoding='utf-8-sig') as csv_file5:
csv_reader = csv.DictReader(csv_file5)
for line in csv_reader:
dummy_string = line["Equipment"]
if "Less than" in dummy_string:
dummy_string = dummy_string.replace("Less than","LT")
if "Single Tent" in dummy_string:
dummy_string = dummy_string.replace("Single Tent","ST")
line["Equipment"] = dummy_string
dummy_list_1 = []
dummy_list_1.append(line["ParkName"])
dummy_list_1.append(line["State"])
dummy_list_1.append(line["partySize"])
dummy_list_1.append(line["BookingType"])
dummy_list_1.append(line["RateType"])
dummy_list_1.append(line["Equipment"])
with open("file3.csv", "a", newline='') as csv_file6:
csv_writer = csv.writer(csv_file6)
csv_writer.writerow(dummy_list_1)
| [
"tj680478@dal.ca"
] | tj680478@dal.ca |
4af9fff107581efba17158a157bc33c7f8d43be6 | f5390652068c736aea061a0979f27ba32b51784f | /Web/Web/views.py | dfd74dd32a7e1fe8b04516a9a481ccbc516c7484 | [] | no_license | kho903/Project_Reflux | 172c9bd7062f4cc9f84c576412557435f63906b5 | 0f1cdab08bc71d4c219b34839f63cc96f7c90d47 | refs/heads/master | 2022-12-14T03:29:21.008229 | 2020-09-02T09:24:16 | 2020-09-02T09:24:16 | 286,716,990 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 512 | py | from django.contrib.auth.forms import UserCreationForm
from django.urls import reverse_lazy
from django.views.generic.base import TemplateView
from django.views.generic import CreateView
class HomeView(TemplateView):
template_name = 'home.html'
class UserCreateView(CreateView):
template_name = 'registration/register.html'
form_class = UserCreationForm
success_url = reverse_lazy('register_done')
class UserCreateDoneTV(TemplateView):
template_name = 'registration/register_done.html'
| [
"gmldnr2222@naver.com"
] | gmldnr2222@naver.com |
eb3fc2cf03ec041b81a54d5412a9ec0ab7b0ec31 | f293b0f393de3f6db4aa6439990bbbe25b932ccc | /apps/travel_buddy/views.py | 0263928a3c1922aa35e3ac14fbef0836479d659d | [] | no_license | jessahl/travel | 55821e0300d66f09a0e5738135758a2fcedae4f7 | afe71b8a7be9b3908f154919ae32a3f8b135bca7 | refs/heads/master | 2021-08-19T06:24:18.171234 | 2017-11-24T23:36:49 | 2017-11-24T23:36:49 | 111,962,748 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,608 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render, HttpResponse, redirect
from .models import *
from django.contrib import messages
import bcrypt
from django.db.models import Q
def index(request):
return render(request, 'travel_buddy/index.html')
def process(request):
errors = User.objects.validator(request.POST)
if errors:
for error in errors:
print errors[error]
messages.error(request, errors[error])
return redirect('/')
else:
hashed_pw = bcrypt.hashpw(request.POST['password'].encode(), bcrypt.gensalt())
user = User.objects.create(name = request.POST['name'], username = request.POST['username'], email = request.POST['email'], birthday = request.POST['birthday'], password = hashed_pw)
request.session['id'] = user.id
messages.success(request, "You have successfully registered")
return redirect('/travels')
def login(request):
login_return = User.objects.login(request.POST)
if 'user' in login_return:
request.session['id'] = login_return['user'].id
messages.success(request, "You have successfully logged in")
return redirect('/travels')
else:
messages.error(request, login_return['error'])
return redirect('/')
def users(request, user_id):
context={"user": User.objects.get(id=user_id)}
return render(request, 'travel_buddy/travels.html', context)
def travels(request):
return render (request, 'travel_buddy/travels.html')
def destination_list(request):
user = User.objects.get(id =request.session['id'])
context={
'user':user,
'destination': Destination.objects.filter(created_by=user),
'joined_by_user': Destination.objects.filter(joined = user),
'joined_by_others': Destination.objects.exclude(joined = user),
'going': Destination.objects.filter(joined=user),
'created_by_others': Destination.objects.all().exclude(joined__id=request.session['id']),
'created_by': User.objects.get(id =request.session['id'])
}
return render(request, 'travel_buddy/travels.html', context)
def destination(request, destination_id):
context = {
'destination': Destination.objects.get(id = destination_id)
}
return render(request, 'travel_buddy/destination.html', context)
def add(request):
return render (request, 'travel_buddy/add.html')
def logout(request):
for key in request.session.keys():
del request.session[key]
messages.success(request, "You have logged out")
return redirect('/')
def create(request):
print
error_2 = Destination.objects.destination(request.POST)
if 'errors' in error_2:
for error in error_2:
print error_2[error]
messages.error(request, error_2[error])
return redirect('travel_buddy/add')
else:
user = User.objects.get(id =request.session['id'])
destination1 = Destination.objects.create(destination = request.POST['destination'], description = request.POST['description'], date_from =request.POST['date_from'], date_to = request.POST['date_to'], created_by = user)
destination1.joined.add(user)
destination1.save()
messages.error(request, "You have successfully added an destination")
return redirect('/travels')
def join(request, destination_id):
destination = Destination.objects.get(id = destination_id)
user = User.objects.get(id =request.session['id'])
destination.joined.add(user)
return redirect('/travels')
| [
"jessahl@users.noreply.github.com"
] | jessahl@users.noreply.github.com |
dd78f0e4bfba84313de0b82e0d4439015e8332d8 | a07cbfbe6189cb0a59f0b261506850c5cd2b6828 | /sudoku/views.py | e9dc5a15ec12fc01bbe0149ae2436fe37c958d27 | [] | no_license | jack-x/sudoku_django_webapp | e04d15e5d0ccfa4bf0fc059547c51097c1e8233c | 12d688aa43d5a04e48ab2b5ae19b4dffd2290cdb | refs/heads/master | 2021-09-29T12:15:39.056972 | 2020-04-05T22:20:34 | 2020-04-05T22:20:34 | 253,241,287 | 0 | 0 | null | 2021-09-22T18:50:47 | 2020-04-05T13:27:37 | JavaScript | UTF-8 | Python | false | false | 11,426 | py | from django.shortcuts import render
from sudoku import Solver
from sudoku import puzzler as Puzzler
from sudoku import builder as Builder
import random
from django.http import HttpResponse
# Create your views here.
solutionGlobal = []
differenceDict = dict()
sudokuDictionaryResponse = dict()
def index(request):
global solutionGlobal
global differenceDict
global sudokuDictionaryResponse
if request.method == 'POST':
gettingValue = request.POST.get('solveButton')
checkingValue = request.POST.get('Submit')
hintbuttonPress = request.POST.get('hintButton')
veryEasyButton = request.POST.get('veryEasyButton')
easyButton = request.POST.get('easyButton')
mediumButton = request.POST.get('mediumButton')
hardButton = request.POST.get('hardButton')
if hintbuttonPress == 'Pressed':
keyList = list(differenceDict.keys())
if len(keyList) != 0:
choice = random.choice(keyList)
print("replacing this choice {}".format(choice))
sudokuDictionaryResponse[choice] = differenceDict[choice]
del differenceDict[choice]
return render(request,'sudoku/home.html',context = sudokuDictionaryResponse)
if gettingValue == '100GO':
sudokuWebKeyGen = 'cell'
sudokuDictionaryResponse = dict()
for x in range(0,9):
for y in range(0,9):
newKey = sudokuWebKeyGen+str(x)+str(y)
if solutionGlobal[x][y] == 0:
sudokuDictionaryResponse[newKey] = ' '
else:
sudokuDictionaryResponse[newKey] = solutionGlobal[x][y]
sudokuDictionaryResponse["UserSolutionMessage"] = "Full Sudoku Solution Displayed!!"
sudokuDictionaryResponse['LevelName'] = 'Sudoku Solved'
return render(request,'sudoku/home.html',context = sudokuDictionaryResponse)
if checkingValue == 'submitSudoku':
print("User has submitted")
userSolution = dict()
#extract value from page
for x in range(0,9):
for y in range(0,9):
userSolution['cell'+str(x)+str(y)] = request.POST.get('cell'+str(x)+str(y))
print(userSolution)
#comparison
flag = True
for x in range(0,9):
for y in range(0,9):
# print(solutionGlobal)
solutionvalue = solutionGlobal[x][y]
useranswer = userSolution['cell' + str(x) + str(y)]
if str(solutionvalue) != str(useranswer):
flag = False
break
if flag == True:
#user correct solution
print("User solution is correct")
sudokuWebKeyGen = 'cell'
sudokuDictionaryResponse["UserSolutionMessage"] = "Great Job you did it!!"
for x in range(0,9):
for y in range(0,9):
newKey = sudokuWebKeyGen+str(x)+str(y)
if solutionGlobal[x][y] == 0:
sudokuDictionaryResponse[newKey] = ' '
else:
sudokuDictionaryResponse[newKey] = solutionGlobal[x][y]
return render(request,'sudoku/home.html',context = sudokuDictionaryResponse)
else:
print("User solution is false")
sudokuDictionaryResponse["UserSolutionMessage"] = "Wrong Input! Please try again!!"
return render(request,'sudoku/home.html',context = sudokuDictionaryResponse)
if veryEasyButton == 'Pressed':
levelList = [10,11,12,13,14,15]
#Clean up the global variables
solutionGlobal = []
differenceDict = dict()
sudokuDictionaryResponse = dict()
print("Page has been reloaded")
print("Generating new Puzzle")
newPuzzle = Puzzler.createSudokuPuzzleWithSolution(random.choice(levelList))
puzzle = newPuzzle[0]
solution = newPuzzle[1]
# global solutionGlobal
solutionGlobal = solution
print("Loading Differences")
#load differences
for x in range(0,9):
for y in range(0,9):
if puzzle[x][y] == 0:
differenceDict['cell'+str(x)+str(y)] = solution[x][y]
sudokuWebKeyGen = 'cell'
sudokuDictionaryResponse = dict()
for x in range(0,9):
for y in range(0,9):
newKey = sudokuWebKeyGen+str(x)+str(y)
if puzzle[x][y] == 0:
sudokuDictionaryResponse[newKey] = ' '
else:
sudokuDictionaryResponse[newKey] = puzzle[x][y]
# print(differenceDict)
# print(sudokuDictionaryResponse)
sudokuDictionaryResponse['LevelName'] = 'Level: Very Easy'
sudokuDictionaryResponse["UserSolutionMessage"] = "Sudoku Load Complete!!"
return render(request,'sudoku/home.html',context = sudokuDictionaryResponse)
if easyButton == 'Pressed':
levelList = [14,15,16,17,18,19,20,21,22]
#Clean up the global variables
solutionGlobal = []
differenceDict = dict()
sudokuDictionaryResponse = dict()
print("Page has been reloaded")
print("Generating new Puzzle")
newPuzzle = Puzzler.createSudokuPuzzleWithSolution(random.choice(levelList))
puzzle = newPuzzle[0]
solution = newPuzzle[1]
# global solutionGlobal
solutionGlobal = solution
print("Loading Differences")
#load differences
for x in range(0,9):
for y in range(0,9):
if puzzle[x][y] == 0:
differenceDict['cell'+str(x)+str(y)] = solution[x][y]
sudokuWebKeyGen = 'cell'
sudokuDictionaryResponse = dict()
for x in range(0,9):
for y in range(0,9):
newKey = sudokuWebKeyGen+str(x)+str(y)
if puzzle[x][y] == 0:
sudokuDictionaryResponse[newKey] = ' '
else:
sudokuDictionaryResponse[newKey] = puzzle[x][y]
# print(differenceDict)
# print(sudokuDictionaryResponse)
sudokuDictionaryResponse['LevelName'] = 'Level: Easy'
sudokuDictionaryResponse["UserSolutionMessage"] = "Sudoku Load Complete!!"
return render(request,'sudoku/home.html',context = sudokuDictionaryResponse)
if mediumButton == 'Pressed':
levelList = [21,22,23,24,25,26,27,28,29,30,31,32]
#Clean up the global variables
solutionGlobal = []
differenceDict = dict()
sudokuDictionaryResponse = dict()
print("Page has been reloaded")
print("Generating new Puzzle")
newPuzzle = Puzzler.createSudokuPuzzleWithSolution(random.choice(levelList))
puzzle = newPuzzle[0]
solution = newPuzzle[1]
# global solutionGlobal
solutionGlobal = solution
print("Loading Differences")
#load differences
for x in range(0,9):
for y in range(0,9):
if puzzle[x][y] == 0:
differenceDict['cell'+str(x)+str(y)] = solution[x][y]
sudokuWebKeyGen = 'cell'
sudokuDictionaryResponse = dict()
for x in range(0,9):
for y in range(0,9):
newKey = sudokuWebKeyGen+str(x)+str(y)
if puzzle[x][y] == 0:
sudokuDictionaryResponse[newKey] = ' '
else:
sudokuDictionaryResponse[newKey] = puzzle[x][y]
# print(differenceDict)
# print(sudokuDictionaryResponse)
sudokuDictionaryResponse['LevelName'] = 'Level: Medium'
sudokuDictionaryResponse["UserSolutionMessage"] = "Sudoku Load Complete!!"
return render(request,'sudoku/home.html',context = sudokuDictionaryResponse)
if hardButton == 'Pressed':
levelList = [27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48]
#Clean up the global variables
solutionGlobal = []
differenceDict = dict()
sudokuDictionaryResponse = dict()
print("Page has been reloaded")
print("Generating new Puzzle")
newPuzzle = Puzzler.createSudokuPuzzleWithSolution(random.choice(levelList))
puzzle = newPuzzle[0]
solution = newPuzzle[1]
# global solutionGlobal
solutionGlobal = solution
print("Loading Differences")
#load differences
for x in range(0,9):
for y in range(0,9):
if puzzle[x][y] == 0:
differenceDict['cell'+str(x)+str(y)] = solution[x][y]
sudokuWebKeyGen = 'cell'
sudokuDictionaryResponse = dict()
for x in range(0,9):
for y in range(0,9):
newKey = sudokuWebKeyGen+str(x)+str(y)
if puzzle[x][y] == 0:
sudokuDictionaryResponse[newKey] = ' '
else:
sudokuDictionaryResponse[newKey] = puzzle[x][y]
# print(differenceDict)
# print(sudokuDictionaryResponse)
sudokuDictionaryResponse['LevelName'] = 'Level: Hard'
sudokuDictionaryResponse["UserSolutionMessage"] = "Sudoku Load Complete!!"
return render(request,'sudoku/home.html',context = sudokuDictionaryResponse)
#Clean up the global variables
solutionGlobal = []
differenceDict = dict()
sudokuDictionaryResponse = dict()
print("Page has been reloaded")
print("Generating new Puzzle")
newPuzzle = Puzzler.createSudokuPuzzleWithSolution(random.choice([10,11,12,13]))
puzzle = newPuzzle[0]
solution = newPuzzle[1]
# global solutionGlobal
solutionGlobal = solution
print("Loading Differences")
#load differences
for x in range(0,9):
for y in range(0,9):
if puzzle[x][y] == 0:
differenceDict['cell'+str(x)+str(y)] = solution[x][y]
sudokuWebKeyGen = 'cell'
sudokuDictionaryResponse = dict()
for x in range(0,9):
for y in range(0,9):
newKey = sudokuWebKeyGen+str(x)+str(y)
if puzzle[x][y] == 0:
sudokuDictionaryResponse[newKey] = ' '
else:
sudokuDictionaryResponse[newKey] = puzzle[x][y]
# print(differenceDict)
# print(sudokuDictionaryResponse)
sudokuDictionaryResponse['LevelName'] = 'Level: Easy'
sudokuDictionaryResponse["UserSolutionMessage"] = "Sudoku Load Complete!!"
return render(request,'sudoku/home.html',context = sudokuDictionaryResponse)
def newHome(request):
return HttpResponse('Hello Boyz!!')
| [
"namanvrm7@gmail.com"
] | namanvrm7@gmail.com |
6e01c4a06d08fadfa6604f0bbbf5978b02fb3065 | f55e6bd5395444a9173a86c9b738ae6d58ced982 | /quiz/tests.py | 8998e8f93d32a63015e281e85e6c70eb910b6895 | [] | no_license | kiwi137831/ELEC3609 | 3be4a8552ef1cdb6caed763fe7d342a9e7a45bb7 | 64eb00f580ff4ac18a60cbcdaf0b9c475853c375 | refs/heads/master | 2020-03-13T13:28:45.242014 | 2018-04-26T12:25:20 | 2018-04-26T12:25:20 | 131,139,435 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,499 | py | from django.test import TestCase
from quiz.views import *
from django.test import Client
from quiz.view_models import *
class SimpleTest(TestCase):
def setup(self):
self.client = Client()
quiz_tosave = Quiz.objects.create(quizNo=1, courseNo=1)
quiz_tosave.save()
def test_choose_to_add(self):
response = self.client.post('/choose_to_add/1/')
quiz_list = Quiz.objects.all().filter(courseNo=1).order_by('quizNo')
self.assertQuerysetEqual(response.context["all_quiz"], quiz_list)
self.assertEqual(response.context["courseNo"], '1')
def test_insert(self):
response = self.client.post('/insert', {'courseNo':1, 'id':1, 'quizNo':1, 'order':2,
'description': 'a test question', 'firstAnswer': 'a',
'secondAnswer':'a', 'thirdAnswer':'รง','fourthAnswer':'d',
'correctAnswer': 2, 'submit':'add and continue' })
self.assertEqual(response.context['quizNo'], '1')
self.assertEqual(response.context['new_order'], 3)
self.assertEqual(response.context['new_id'], 1)
self.assertEqual(response.context['courseNo'], '1')
def test_get_all_quiz(self):
response = self.client.post('/selectquiz/1/')
quiz_list = Quiz.objects.all().filter(courseNo=1).order_by('quizNo')
self.assertQuerysetEqual(response.context["all_quiz"], quiz_list) | [
"kiwi137831@gmail.com"
] | kiwi137831@gmail.com |
57e2ee283d3febe993b10065b968ba9f581b5a55 | 6a52db9b913c3677dfbcd55776e1a14cddde359d | /parceiros/migrations/0006_auto_20181117_0309.py | 3ab094723e58de570f6ab1ca3fb06592a7e4d342 | [] | no_license | tiagocordeiro/casaconceito-sie | 47a2922f328fa7c9e13e84dae1b6a9135edd6236 | 892e42a655bb4ef08952c5be167e281720f40b49 | refs/heads/master | 2023-08-31T14:48:21.396973 | 2021-05-11T18:18:07 | 2021-05-11T18:18:07 | 140,175,770 | 0 | 0 | null | 2023-09-13T15:14:42 | 2018-07-08T14:38:35 | HTML | UTF-8 | Python | false | false | 497 | py | # Generated by Django 2.1.3 on 2018-11-17 05:09
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('parceiros', '0005_auto_20181117_0251'),
]
operations = [
migrations.AlterField(
model_name='indicacaopagamentos',
name='indicacao',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='parceiros.Indicacao'),
),
]
| [
"tiago@mulhergorila.com"
] | tiago@mulhergorila.com |
ddcf005a9089d92a72a6e6744948b752ea3b244f | b90f745e15ab0687a6dface769be88f4b4fee866 | /str_repr.py | c3bf46037a7825fbd9b5aca1c685fcbe5b814aeb | [] | no_license | tkachoff/sandbox | 6a1ec80acd174fb2324f514a7b225c2d41bad697 | b4980d6d52f89fb546cf0eb7fc24967624efefd8 | refs/heads/master | 2020-12-03T13:05:06.531504 | 2016-08-18T11:10:10 | 2016-08-18T11:10:10 | 65,983,335 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,129 | py | import copy
d1 = {
1: "one",
2: "two",
3: "three",
4: "four",
5: "five",
6: "six",
7: "seven",
8: "eight",
9: "nine",
11: "eleven",
12: "twelve",
13: "thirteen",
14: "fourteen",
15: "fifteen",
16: "sixteen",
17: "seventeen",
18: "eighteen",
19: "nineteen",
}
d2 = {
10: "ten",
20: "twenty",
30: "thirty",
40: "forty",
50: "fifty",
60: "sixty",
70: "seventy",
80: "eighty",
90: "ninety"
}
def get_dict_count(d):
result = copy.deepcopy(d)
for key in result:
result[key] = len(result[key])
return result
d11 = get_dict_count(d1)
d22 = get_dict_count(d2)
def get_first_part_count(n):
result = []
h = n / 100
if h > 0:
result.append(d11[h])
result.append(len("hundred"))
return result
def get_first_part(n):
result = []
h = n / 100
if h > 0:
result.append(d1[h])
result.append("hundred")
return result
def get_second_part(n):
# first_part.append("and")
result = []
h = n % 100
if h in d1.keys():
result.append(d1[h])
elif h in d2.keys():
result.append(d2[h])
elif h != 0:
first = h / 10
second = h % 10
result.append("{0}-{1}".format(d2[first * 10], d1[second]))
return result
def get_second_part_count(n):
# first_part.append("and")
result = []
h = n % 100
if h in d1.keys():
result.append(d11[h])
elif h in d2.keys():
result.append(d22[h])
elif h != 0:
first = h / 10
second = h % 10
result.append(d22[first * 10])
result.append(d11[second])
return result
def get_str_repr(n):
res = []
fp = get_first_part(n)
sp = get_second_part(n)
res.extend(fp)
if len(fp) != 0 and len(sp) != 0:
res.extend(["and"])
res.extend(sp)
return res
def get_total_count(n):
return reduce(lambda a, x: a + x, get_first_part_count(n)) \
+ reduce(lambda a, x: a + x, get_second_part_count(n))
n = 165
print(get_str_repr(n))
print(get_total_count(n))
| [
"atkachou@ddn.com"
] | atkachou@ddn.com |
8ad3db0ec4061062900fc2e03cbbae10b8f45f56 | 498d889585187ca56018b15f38880b8a671442b8 | /utils.py | 5c6fc73da2244ffe9d611253c389cb6fc386f278 | [] | no_license | mandasdasdasd/excel-fe | b89b06681bd7c91000f491a5f85f0c8577ac0fc3 | a81eb0085192c0932992745284c24efda9859241 | refs/heads/master | 2022-12-24T01:53:03.351947 | 2019-12-04T10:09:14 | 2019-12-04T10:09:14 | 205,658,439 | 0 | 0 | null | 2022-12-11T05:10:56 | 2019-09-01T10:01:07 | Vue | UTF-8 | Python | false | false | 218 | py | import hmac, random
class Encryption(object):
def __init__(self):
self.key = "bigdata"
def hmac_md5(self, s):
return hmac.new(self.key.encode('utf-8'), s.encode('utf-8'), 'MD5').hexdigest()
| [
"you@example.com"
] | you@example.com |
2c5f81b3a18e88988cf948967cd25c3d258a6fac | 625851eb6d87f536c33f89505567a6b15ff5176c | /scout.py | 1afe0f2a9c5d2f5da71a988cb50f1d8514a5e8bf | [] | no_license | SnacksOnAPlane/freefood | 5ed34e968e56c020752e174a2644d5a6e33357c3 | 3ab52352ae188779c0392aa35fbc97204e83ea50 | refs/heads/master | 2021-01-23T01:08:26.035781 | 2017-03-22T22:35:24 | 2017-03-22T22:35:24 | 85,879,845 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 462 | py | from bs4 import BeautifulSoup
import requests
import json
import boto
url = 'https://postmates.com/atlanta'
webpage = requests.get(url)
soup = BeautifulSoup(webpage.text, 'html.parser')
free_food = [s for s in soup.body.stripped_strings if 'free' in s.lower()]
if free_food or True:
conn = boto.connect_ses()
body = 'Free Postmates!\n\n' + '\n'.join(free_food)
conn.send_email('freefood@hotdonuts.info', 'Free Food Notice', body, 'rsmiley@gmail.com')
| [
"smiley@callrail.com"
] | smiley@callrail.com |
a4b96917de1814ab6a7a8134aa8dbeac8ecf673c | b98dfa32e47449e873e8e2ec6b93b1550e2fc95b | /prescriptions/views.py | 3ec0775f6b851c8b80b329ddc528c76692e94f20 | [] | no_license | flribeiro/iclinic_prescriptions_api | 7df1557ea4f67ea437bb8af5009b554078b44570 | 98903e7df56d300d525128841f9c61f29e35d20e | refs/heads/master | 2023-03-04T00:41:18.290079 | 2021-02-18T12:30:45 | 2021-02-18T12:30:45 | 337,925,320 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,865 | py | import logging
from typing import OrderedDict
from rest_framework import viewsets
from prescriptions.models import Prescription, Clinic, Physician, Patient
from prescriptions.serializers import PrescriptionSerializer
from prescriptions.services.clinics_api import ClinicsApiService
from prescriptions.services.physicians_api import PhysiciansApiService
from prescriptions.services.patients_api import PatientsApiService
from prescriptions.services.metrics_api import MetricsApiService
from prescriptions.errors.error_catalog import errors
from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.response import Response
from collections import OrderedDict
# Create a logger instance
logger = logging.getLogger(__name__)
@api_view(['POST'])
def post_prescription(request):
serializer = PrescriptionSerializer(data=request.data)
if serializer.is_valid():
pre_data = serializer.initial_data
logger.info(f"API called. Payload: {pre_data}")
api_clinics = ClinicsApiService()
api_physicians = PhysiciansApiService()
api_patients = PatientsApiService()
api_metrics = MetricsApiService()
metrics_body_request = OrderedDict()
clinics_response = api_clinics.get_clinic_by_id(
pre_data['clinic']['id'])
metrics_body_request['clinic_id'] = pre_data['clinic']['id']
metrics_body_request['clinic_name'] = clinics_response.get('name', '')
physicians_response = api_physicians.get_physician_by_id(
pre_data['physician']['id'])
if 'error' in physicians_response:
logger.warning(
f'Error calling Physicians API: {physicians_response}')
return Response(physicians_response, status=status.HTTP_404_NOT_FOUND) if physicians_response['error']['code'] == "02" else Response(physicians_response, status=status.HTTP_503_SERVICE_UNAVAILABLE)
metrics_body_request['physician_id'] = physicians_response.get(
'id', '')
metrics_body_request['physician_name'] = physicians_response.get(
'name', '')
metrics_body_request['physician_crm'] = physicians_response.get(
'crm', '')
patients_response = api_patients.get_patient_by_id(
pre_data['patient']['id'])
if 'error' in patients_response:
logger.warning(f'Error calling Patients API: {patients_response}')
return Response(patients_response, status=status.HTTP_404_NOT_FOUND) if patients_response['error']['code'] == "03" else Response(patients_response, status=status.HTTP_503_SERVICE_UNAVAILABLE)
metrics_body_request['patient_id'] = patients_response.get('id', '')
metrics_body_request['patient_name'] = patients_response.get(
'name', '')
metrics_body_request['patient_email'] = patients_response.get(
'email', '')
metrics_body_request['patient_phone'] = patients_response.get(
'phone', '')
metrics_response = api_metrics.post_metrics(metrics_body_request)
if 'error' in metrics_response:
logger.warning(f'Error calling Metrics API: {metrics_response}')
return Response(metrics_response, status=status.HTTP_503_SERVICE_UNAVAILABLE)
saved_prescription = serializer.save()
prescription = Prescription.objects.get(pk=saved_prescription.pk)
serializer = PrescriptionSerializer(prescription)
response = build_response(serializer.data)
return Response(response)
else:
error = {'error': errors[1]}
logger.warning(f'Invalid request. Error: {error}')
return Response(error, status=status.HTTP_400_BAD_REQUEST)
def build_response(data):
response = OrderedDict()
response['data'] = data if data else 'Not found'
return response
| [
"fabricio.ribeiro@luizalabs.com"
] | fabricio.ribeiro@luizalabs.com |
d70011c8b28df47afcc58eddc65bb73aa7c2a30e | 8de2a5c432fe44bbb2d549537a441f10f61795fc | /graph/graph.py | a85bd3188171f795e8c281d3b579d21b2545588d | [] | no_license | AMIRmh/beyond_classical_AI_search | 3ccdba27b2613ca5c758404bb01d8daf3546f40d | c439ae649432a3639100b8c297f6136fb2737418 | refs/heads/master | 2021-09-04T19:46:57.749625 | 2018-01-21T20:58:51 | 2018-01-21T20:58:51 | 115,197,247 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,097 | py | import copy
import math
class state:
def __init__(self, graph, graph_1, graph_2):
self.grand_graph = graph
self.graph_1 = graph_1
self.graph_2 = graph_2
self.current_graph, self.disjoinments = self.calculate_graph()
def calculate_graph(self):
current_graph = copy.deepcopy(self.grand_graph)
disjoinments = 0
for node_1 in self.graph_1:
for node_2 in self.graph_2:
if current_graph[node_1][node_2] == 1:
current_graph[node_1][node_2] = -1
disjoinments += 1
for node_1 in self.graph_2:
for node_2 in self.graph_1:
if current_graph[node_1][node_2] == 1:
current_graph[node_1][node_2] = -1
return current_graph, disjoinments
def evaluate(self):
return self.disjoinments + abs(len(self.graph_2) - len(self.graph_1))
def __lt__(self, other):
return self.evaluate() > other.evaluate()
def initialize():
arr = []
with open("graph/input_graph", "r") as f:
for line in f:
line = line.replace('\n', '')
arr.append(line.split())
arr = [[int(y) for y in x] for x in arr]
graph_1 = []
for i in range(len(arr[0])):
graph_1.append(i)
graph_2 = [graph_1.pop(0)]
init_state = state(arr, graph_1, graph_2)
return init_state
def neighbor(input_state):
neighbors = []
grand_graph = input_state.grand_graph
graph_1 = input_state.graph_1
graph_2 = input_state.graph_2
i = 0
while i < len(graph_1) and len(graph_1) > 1:
cp_graph_1 = copy.deepcopy(graph_1)
cp_graph_2 = copy.deepcopy(graph_2)
node = cp_graph_1.pop(i)
cp_graph_2.append(node)
new_state = state(grand_graph, cp_graph_1, cp_graph_2)
if is_valid_state(new_state):
neighbors.append(new_state)
i += 1
i = 0
while i < len(graph_2) and len(graph_2) > 1:
cp_graph_1 = copy.deepcopy(graph_1)
cp_graph_2 = copy.deepcopy(graph_2)
node = cp_graph_2.pop(i)
cp_graph_1.append(node)
new_state = state(grand_graph, cp_graph_1, cp_graph_2)
if is_valid_state(new_state):
neighbors.append(new_state)
i += 1
return neighbors
def is_valid_state(new_state):
seen = [False for i in range(len(new_state.graph_1))]
seen[0] = True
fill_valid_path(new_state, new_state.graph_1, new_state.graph_1[0], seen)
for true in seen:
if not true:
return False
seen = [False for i in range(len(new_state.graph_2))]
seen[0] = True
fill_valid_path(new_state, new_state.graph_2, new_state.graph_2[0], seen)
for true in seen:
if not true:
return False
return True
def fill_valid_path(new_state, graph, node, seen):
for graph_node in graph:
if not seen[graph.index(graph_node)]:
if new_state.current_graph[node][graph_node] == 1:
seen[graph.index(graph_node)] = True
fill_valid_path(new_state, graph, graph_node, seen)
def diff(cur, choosed_node):
if len(cur.graph_1) > len(choosed_node.graph_1) :
cp_choosed_graph_1 = copy.deepcopy(choosed_node.graph_1)
cp_cur_graph_1 = copy.deepcopy(cur.graph_1)
for node in cp_choosed_graph_1:
cp_cur_graph_1.remove(node)
return str(cp_cur_graph_1.pop(0)) + ' graph_1 -> graph_2'
else:
cp_choosed_graph_2 = copy.deepcopy(choosed_node.graph_2)
cp_cur_graph_2 = copy.deepcopy(cur.graph_2)
for node in cp_choosed_graph_2:
cp_cur_graph_2.remove(node)
return str(cp_cur_graph_2.pop(0)) + ' graph_2 -> graph_1'
def print_state(init_state):
print("current graph: ")
for i in init_state.current_graph:
print(i)
print("graph1: " + str(init_state.graph_1))
print("graph2: " + str(init_state.graph_2))
print("\n")
def goal(input_state):
return False
| [
"amir.haghollahi@gmail.com"
] | amir.haghollahi@gmail.com |
64217917d491b00b960f878302f437c65851a685 | 010533d6b6901073dfa966ae50420dc4c7b188d0 | /lambda.py | 55cabcb1c26057f5e5c2be0b8bfa7f42a467f775 | [] | no_license | gluemchen/weather | ce1396dd6e49722a04d5f2915a244f6aba4f3673 | b369fb0283cfe9aadd13009bdae1e009fc1ffbfd | refs/heads/master | 2023-06-21T06:16:42.331627 | 2021-07-29T05:37:16 | 2021-07-29T05:37:16 | 384,138,075 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 927 | py | # Python class example
# Defining Vehicle class
class Vehicle:
def __init__(self, brand, model, type):
self.brand = brand
self.model = model
self.type = type
self.gas_tank_size = 14
self.fuel_level = 0
def fuel_up(self):
self.fuel_level = self.gas_tank_size
print("Gas tank is now full.")
def drive(self):
print(f"The {self.model} is now driving!")
# This is an instance of a Class. Also called an object.
# Multiple Objects can be created
toyota_ridgeline_truck = Vehicle("Toyoto", "Ridgeline", "Truck")
ford_escort_compact = Vehicle("Ford", "Escort", "Compact")
jeep_renegade_small_suv = Vehicle("Jeep", "Renegade", "small SUV")
# Access attribute values
print(toyota_ridgeline_truck.brand)
print(ford_escort_compact.model)
print(jeep_renegade_small_suv.type)
# Calling methods
toyota_ridgeline_truck.fuel_up()
toyota_ridgeline_truck.drive()
| [
"gluemchen81@googlemail.com"
] | gluemchen81@googlemail.com |
e7c1985f62c49d3421a63a89ddab7284277b12d7 | 7a874e0803911516c1785fbd6b954640a6f6d69f | /classTermLists/School_data/extract_clean.py | f5fb6f5c7ecd4565666a9cfb45415fed08b5d902 | [] | no_license | nrhawkins/kbp2015-slotfilling-implie | 15dc28d4496c13a0b5a823e889f17154a2c94d24 | ca3d362c23b7b3f40a632b62c8c71f2b7938c4d9 | refs/heads/master | 2021-01-19T20:18:30.165496 | 2016-11-28T21:30:39 | 2016-11-28T21:49:06 | 37,744,178 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,392 | py |
lines = file('school_data.csv','r').read().splitlines()[1:] # first line is the header.
lines = [line.replace('\"', '') for line in lines]
temp = lines
lines = []
for line in temp:
tokens = line.split(',')
lines.append(tokens[1])
lines.append(tokens[4])
temp = lines
lines = []
for line in temp:
tokens = line.split('|')
lines.extend(tokens)
temp = lines
lines = []
for line in temp:
tokens = line.split(';')
lines.extend(tokens)
temp = lines
lines = []
for line in temp:
tokens = line.split('/')
lines.extend(tokens)
lines = [line.strip() for line in lines if len(line.strip()) > 2]
# remove entries that are first or last names in us census data.
firstnames = set(file('../freebase/city/first_names.txt', 'r').read().splitlines())
lastnames = set(file('../freebase/city/last_names.txt', 'r').read().splitlines())
lines = [line for line in lines if (line.lower() not in firstnames and line.lower() not in lastnames)]
# remove entries that are normal english words.
words = set(file('../freebase/city/enable1.txt', 'r').read().splitlines())
lines = [line for line in lines if line.replace("the","").replace("The","").strip().lower() not in words]
# remove if completely lowercase.
lines = [line for line in lines if line.lower() != line]
lines = sorted(list(set(lines))) # make unique and sort.
out = file('schools','w')
out.write('\n'.join(lines))
out.close()
| [
"genelkim@cs.washington.edu"
] | genelkim@cs.washington.edu |
2c12a85637d4448821f4e08fab01976870d8fdca | b3330bd3365767b89afb9c432f4deb722b39ac1c | /python/sort/selection_sort/selection_sort_10.py | d0142c054e2de0f2b0945ab15e296cef179f94f5 | [] | no_license | hguochen/algorithms | 944df332d5b39220bd59cbd62dc74b12e335fb9e | 703e71a5cd9e002d800340df879ed475a404d092 | refs/heads/master | 2022-02-27T12:11:10.607042 | 2022-02-18T21:04:00 | 2022-02-18T21:04:00 | 13,767,503 | 5 | 7 | null | null | null | null | UTF-8 | Python | false | false | 733 | py | # selection sort
def selection_sort(array):
"""
Divides the array into unsorted and sorted sublist. Left sublist contains
list of sorted elements, right sublist contains list of unsorted elements.
Find the least element in unsorted list and put in sorted list.
"""
# traverse the array
for i in xrange(len(array)):
# initialize min index
min_index = i
# find the least element in unsorted list and update min index
for j in xrange(i+1, len(array)):
if array[j] < array[min_index]:
min_index = j
# swap current element with min index value
array[i], array[min_index] = array[min_index], array[i]
# return array
return array
| [
"hguochen@gmail.com"
] | hguochen@gmail.com |
a01b71e2dae640d49f54d02cf08acedbab149c70 | 961931333838aebe8bd17c30c19f3994e32d76ce | /src/leetcode/bfs/279. Perfect Squares.py | 128380fcb8630cd5d95ab5e6415f0e7e36e9fcdd | [] | no_license | MTGTsunami/LeetPython | 5161f9e31dc2ab1855123c2a3a151eb6f4d889bc | f7f3839f631f08a9e5bf8a02398b940f82e43e67 | refs/heads/master | 2023-04-17T16:59:45.621291 | 2021-04-26T07:24:50 | 2021-04-26T07:24:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,624 | py | """
Given a positive integer n, find the least number of perfect square numbers (for example, 1, 4, 9, 16, ...) which sum to n.
Example 1:
Input: n = 12
Output: 3
Explanation: 12 = 4 + 4 + 4.
Example 2:
Input: n = 13
Output: 2
Explanation: 13 = 4 + 9.
"""
class MySolution(object): # A little bit larger than O(n) time
def numSquares(self, n):
"""
:type n: int
:rtype: int
"""
square = [float("inf")] * n
for i in range(1, n + 1):
sqrt = i ** 0.5
floor = int(sqrt)
if sqrt - floor == 0:
square[i - 1] = 1
nearest = floor
else:
while floor >= 1:
square[i - 1] = min(square[i - floor ** 2 - 1] + 1, square[i - 1])
floor -= 1
return square[-1]
class SolutionDP(object):
def numSquares(self, n):
"""
:type n: int
:rtype: int
"""
square = [float("inf")] * (n + 1)
square[0] = 0
for i in range(1, n + 1):
j = 1
while j * j <= i:
square[i] = min(square[i - j * j] + 1, square[i])
j += 1
return square[-1]
class SolutionMath(object):
def numSquares(self, n):
"""
:type n: int
:rtype: int
"""
def isSquare(n):
return (n ** 0.5 - int(n ** 0.5)) == 0
# Based on Lagrange's Four Square theorem, there
# are only 4 possible results: 1, 2, 3, 4.
# If n is a perfect square, return 1.
if isSquare(n):
return 1
# The result is 4 if and only if n can be written in the form of 4^k*(8*m + 7).
# Please refer to Legendre's four-square theorem.
while n % 4 == 0:
n /= 4
if n % 8 == 7:
return 4
for i in range(1, int(n ** 0.5) + 1):
if isSquare(n - i * i):
return 2
return 3
class SolutionBFS(object): # Important
def numSquares(self, n):
"""
:type n: int
:rtype: int
"""
depth = 0
nodes = set([n])
edges = [i * i for i in range(1, int(n ** 0.5) + 1)]
while True:
depth += 1
nextLevel = set()
for node in nodes:
for edge in edges:
if edge == node:
return depth
elif edge < node:
nextLevel.add(node - edge)
else:
break
nodes = nextLevel
| [
"mtgtsunami1219@gmail.com"
] | mtgtsunami1219@gmail.com |
e1de656ba854ec86201e55c1fead133e35c7c2ec | 38e5c33200e2fe87cc39a53a2fe19a807ebeca2c | /manage.py | 044b5ba9512a1c8ff405cf17fb472998e6494abf | [] | no_license | Hitmantejas/Todo-Django | 623157466b808881cc4cdba69ba73db899ca6550 | 8648be091abc0ddb9fd277e03a25550e360aaeea | refs/heads/master | 2023-03-03T19:41:57.187086 | 2021-02-07T07:49:16 | 2021-02-07T07:49:16 | 336,728,800 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 664 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'justdoit.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"tejasbhorde86198@gmail.com"
] | tejasbhorde86198@gmail.com |
ecb16f5a630ad95d5176e9cf813bbe2a8af4ba4d | ffd0f64525bc95b51b422b2067daee166adbea24 | /src/game.py | dadb665d014e49dd49e4a2300595a0f6ddd3802a | [] | no_license | yotam5/NiChaudNiFroid | 54ace05d53cdfecc6a98a803563127d687856ce4 | e125dfb12a2051f61a5cd162fce485119503f14a | refs/heads/main | 2023-07-05T03:19:39.206708 | 2021-08-12T16:46:57 | 2021-08-12T16:46:57 | 395,438,148 | 0 | 0 | null | 2021-08-12T20:37:16 | 2021-08-12T20:37:16 | null | UTF-8 | Python | false | false | 1,384 | py | import pygame
from src.scenes.room import RoomScene
from src.scenes.menu import MenuScene
from src.scenes.gameover import GameOverScene
class Game:
def __init__(self):
self.screen = pygame.display.set_mode((1280, 720), 8)
pygame.display.set_caption("Ni Chaud Ni Froid")
self.current_scene = None
self.is_running = False
self.clock = pygame.time.Clock()
self.font = pygame.font.Font("assets/font/Silver.ttf", 48)
def run(self):
self.is_running = True
self.current_scene = MenuScene(self)
while self.is_running:
self.loop()
def loop(self):
self.dt = self.clock.tick(60)
self.handle_event()
# Current_scene
self.current_scene.handle_event()
self.current_scene.update()
self.current_scene.draw()
pygame.display.flip()
def handle_event(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.kill()
def kill(self):
self.is_running = False
def change_scene(self, scene: str):
pygame.mixer.music.stop()
if scene == "menu":
self.current_scene = MenuScene(self)
elif scene == "room":
self.current_scene = RoomScene(self)
elif scene == "game_over":
self.current_scene = GameOverScene(self)
| [
"asakosan.pro@protonmail.com"
] | asakosan.pro@protonmail.com |
447014af8d175ebb2bf8ef23621961213678fa88 | 2baa8fe44836e3380fb986a7e796f1a70b467c3e | /call-propagate.py | e7fc8d0a289e52044c79dcfef8de1fcf457d0017 | [] | no_license | jancoufal/pyshards | d50c7349868403e1e6e0a07b4e71e7ecaa4a5bda | b1a3ad45fed47c7bd357cb39ae475562ac089796 | refs/heads/master | 2023-03-15T22:00:06.633852 | 2023-03-14T21:54:13 | 2023-03-14T21:54:13 | 179,240,741 | 0 | 0 | null | 2019-06-24T13:37:42 | 2019-04-03T08:04:19 | Python | UTF-8 | Python | false | false | 1,160 | py | #!/bin/env python3
class TestEvents(object):
def on_foo(self):
pass
def on_bar(self, bar_size):
pass
def on_baz(self, baz_size=None):
pass
class TestEventInvoker(object):
def __init__(self, event_handler):
self._events = event_handler
def invoke_events(self):
self._events.on_foo()
self._events.on_bar(42)
self._events.on_baz(baz_size=1337)
class EventPropagator(object):
def test(self):
ei = TestEventInvoker(self._create_test_event_proxy())
ei.invoke_events()
def _create_test_event_proxy(self):
class E(TestEvents):
def __init__(self, owner):
self._owner = owner
def __getattribute__(self, item):
print('__getattribute__', item)
cb_map = {
TestEvents.on_foo.__name__: self._owner._on_test_event,
TestEvents.on_bar.__name__: self._owner._on_test_event,
TestEvents.on_baz.__name__: self._owner._on_test_event,
}
return cb_map[item]
return E(self)
def _on_test_event(self, *args, **kwargs):
print('_on_test_event', *args, **kwargs)
def _private_method(self):
print('private_method')
def main():
ep = EventPropagator()
ep.test()
if __name__ == '__main__':
main()
| [
"jcoufal@netsuite.com"
] | jcoufal@netsuite.com |
cb37297816fccafca71a0535f7833e344f4d556f | ac878a24dad53864803a2b36c594c1513a0433dd | /Ch2/variables_start.py | 593cc56822203caa661fc2766e08af0b93b6704e | [] | no_license | anilsa1400/MyPythonBeginner | 5af426bd6a8aefe1c627cc3b2cc98bc60b53fdad | 95b6666e67495b1361167c56a72c4da5df85cce9 | refs/heads/main | 2023-04-29T13:45:35.942934 | 2021-05-19T14:23:54 | 2021-05-19T14:23:54 | 366,984,504 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | #
# Example file for variables
#
# Declare a variable and initialize it
f = 0
# print(f)
# re-declaring the variable works
# f = "abc"
# print(f)
# ERROR: variables of different types cannot be combined
# print("This is string " + str(123))
# Global vs. local variables in functions
def someFunction():
global f
f = "def"
print(f)
someFunction()
print(f)
del f
print(f)
| [
"anilsa1400@gmail.com"
] | anilsa1400@gmail.com |
bea22b6380e1f80d8d293727e249b8f9779d9fc8 | bf8ba111da414391c9bfc218579ec2d220865764 | /fiterp/fiterp/fiterp/doctype/leave_application_fiterp/test_leave_application_fiterp.py | 259ed56e02b1d6058f04b5c1f70f4da293992390 | [
"MIT"
] | permissive | AbrahamMan/fiterp | e0f76c59a3d31ebf38ce957612cec44d16fe6213 | a0ae1cc4f7b848dbbf51c870319e0aade4b92170 | refs/heads/master | 2021-04-27T19:39:08.494755 | 2018-01-28T11:10:13 | 2018-01-28T11:10:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 220 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2018, ITKMITL and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
class TestLeaveApplicationfiterp(unittest.TestCase):
pass
| [
"nop_itkmitl@hotmail.com"
] | nop_itkmitl@hotmail.com |
19f208a28f2114ce1f37472b9eea2b4a1cf7f629 | 991f6bcf44d937b46843d33d78b4d0fa9c1c18f7 | /courses/urls.py | 3948a5b5310e22502c4fefb909c68aa2202ef956 | [] | no_license | Sajeerks/django-API | a15331d4e5f4d2baa0c28f84ffc17197ac1cb898 | cbc8338e659554ac368eb0f0f5ad727193ba76eb | refs/heads/main | 2023-01-13T09:23:26.700043 | 2020-11-10T05:59:11 | 2020-11-10T05:59:11 | 311,560,083 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 226 | py |
from django.urls import path,include
from .import views
from rest_framework import routers
router = routers.DefaultRouter()
router.register('courses', views.CourseView)
urlpatterns = [
path("",include(router.urls))
]
| [
"sajeersayed@gmai.com"
] | sajeersayed@gmai.com |
c9ec417f68e16aaa3a781bc04a7e47b8cffff73c | c8c0d3e83dbec83ccb89a751dc3e656bb482a2ce | /ZombieGame/modules/coordinates.py | 2e949bd8c3dcf4cac12328f9fe1025eaec8889dd | [] | no_license | Yamase31/python-zombie-game | 80658bcfcb05b819265dfc75c5563391f19b1861 | dfd931ecf5caac9348b652862fc0b018979491d9 | refs/heads/main | 2023-07-07T08:58:43.314898 | 2021-08-10T00:33:36 | 2021-08-10T00:33:36 | 394,479,044 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,382 | py |
# Quick coordinate class to contain both x and y
# Overrides == for easy comparison
class Coordinates(object):
def __init__(self, x, y):
self.x = x
self.y = y
def __eq__(self, other):
return self.x == other.x and self.y == other.y
def __sub__(self, other):
if type(other) == int:
return Coordinates(self.x - other, self.y - other)
if type(other) == Coordinates:
return Coordinates(self.x - other.x, self.y - other.y)
if type(other) == tuple:
return Coordinates(self.x - other[0], self.y - other[1])
def __add__(self, other):
if type(other) == int:
return Coordinates(self.x + other, self.y + other)
if type(other) == Coordinates:
return Coordinates(self.x + other.x, self.y + other.y)
if type(other) == tuple:
return Coordinates(self.x + other[0], self.y + other[1])
def __len__(self):
return 2
def __iter__(self):
self.current = 0
return self
def __next__(self):
if self.current >= len(self):
raise StopIteration
else:
self.current += 1
if self.current == 1:
return self.x
else:
return self.y
if __name__ == '__main__':
c = Coordinates(5,6)
print(*c)
| [
"noreply@github.com"
] | noreply@github.com |
ab918db583fa2a00e03db1802c67bd5310401427 | 22e6e7636f0dd4bb0e575735e71a81bc345cdd7a | /plots_direct_compression.py | bf924b26539e3cf04bb4962cd98f711a604fc606 | [] | no_license | navjotk/error_propagation | dcab4e7fa079fdb050d02ad11dc3d9a93e54479a | 09285eb54cb31deeb95372b872890cfe1cccfa4b | refs/heads/master | 2023-04-16T23:15:00.022161 | 2022-03-23T14:10:36 | 2022-03-23T14:10:36 | 203,205,530 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,816 | py | import h5py
import pyzfp
from argparse import ArgumentParser
import matplotlib
import tikzplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt # noqa
from util import error_L0, error_L1, error_L2, error_Linf, error_psnr, write_results # noqa
error_metrics = {'L0': error_L0, 'L1': error_L1, 'L2': error_L2,
'Linf': error_Linf, 'psnr': error_psnr}
description = ("Script to calculate error on direct compression of wavefields")
parser = ArgumentParser(description=description)
parser.add_argument("--filename", type=str, required=False,
default="uncompressed.h5")
parser.add_argument("--plot", type=str, required=False, default="L0",
choices=error_metrics.keys())
args = parser.parse_args()
filename = args.filename
plot = args.plot
f = h5py.File(filename, 'r')
field = f['data'][()].astype(np.float64)
tolerances = [10**x for x in range(0, -17, -1)]
error_to_plot = []
for atol in tolerances:
print("Compressing at tolerance %s" % str(atol))
compressed = pyzfp.compress(field, tolerance=atol)
decompressed = pyzfp.decompress(compressed, shape=field.shape,
dtype=field.dtype, tolerance=atol)
computed_errors = {}
computed_errors['cf'] = len(field.tostring())/float(len(compressed))
for k, v in error_metrics.items():
computed_errors[k] = v(field, decompressed)
error_function = error_metrics[plot]
error_to_plot.append(computed_errors[plot])
computed_errors['tolerance'] = atol
write_results(computed_errors, 'direct_compression_results.csv')
plt.xscale('log')
plt.yscale('log')
plt.plot(tolerances, error_to_plot)
plt.xlabel("atol")
plt.ylabel(plot)
plt.savefig("direct_%s.pdf" % plot, bbox_inches='tight')
tikzplotlib.save("direct_%s.tex" % plot) | [
"navjotk@gmail.com"
] | navjotk@gmail.com |
83b9398ebef1b2841d29cff940e0595b3f5478ce | aa03bf381871d69fd93143c1697cdcd421cbe7e8 | /src/imageqa_visprior.py | 8c75929f9f66ec24a11fe8b9521fbd7954f5eb17 | [
"MIT"
] | permissive | standardgalactic/imageqa-public | 369073d2e3a9a454986533bb872445c8cafab95f | 4e3ceb092495fb8c1056e55b870631907bb31d46 | refs/heads/master | 2023-06-22T11:59:09.031307 | 2016-03-23T21:56:07 | 2016-03-23T21:56:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,898 | py | import sys
import os
import numpy as np
import nn
import imageqa_test as it
from nltk.corpus import wordnet
lexnameDict = {}
def lookupLexname(word):
if lexnameDict.has_key(word):
return lexnameDict[word]
else:
synsets = wordnet.synsets(word)
# Just pick the first definition
if len(synsets) > 0:
lexname = synsets[0].lexname()
lexnameDict[word] = lexname
return lexname
else:
return None
def locateObjLocation(data, questionDict, questionIdict):
"""
Locate the object of where questions.
Very naive heuristic: take the noun immediately after "where".
"""
where = questionDict['where']
for t in range(data.shape[0] - 1):
if data[t, 0] == where:
for u in range(t + 1, data.shape[0]):
word = questionIdict[data[u, 0] - 1]
lexname = lookupLexname(word)
if (lexname is not None and \
lexname.startswith('noun')) or \
(lexname is None):
return data[u, 0]
print 'not found'
return data[-1, 0]
def locateObjNumberNoun(data, questionDict, questionIdict):
"""
Locate the object of how many questions.
Very naive heuristic: take the noun immediately after "how many".
"""
how = questionDict['how']
many = questionDict['many']
for t in range(data.shape[0] - 2):
if data[t, 0] == how and \
data[t + 1, 0] == many:
for u in range(t + 2, data.shape[0]):
word = questionIdict[data[u, 0] - 1]
lexname = lookupLexname(word)
if (lexname is not None and \
lexname.startswith('noun')) or \
(lexname is None):
return data[u, 0]
print 'not found'
return data[-1, 0]
def locateObjNumber(data, questionDict):
"""
Locate the object of how many questions.
Very naive heuristic: take the word immediately after "how many".
"""
how = questionDict['how']
many = questionDict['many']
for t in range(data.shape[0] - 2):
if data[t, 0] == how and \
data[t + 1, 0] == many:
return data[t + 2, 0]
print 'not found'
def locateObjColor(data):
tmp = 0
for i in range(data.shape[0]):
if data[i, 0] != 0:
tmp = data[i, 0]
else:
return tmp
def extractObjId(
data,
questionType,
questionDict,
questionIdict):
objIds = []
for n in range(data.shape[0]):
if questionType == 'color':
objId = locateObjColor(data[n])
elif questionType == 'number':
objId = locateObjNumberNoun(data[n], questionDict, questionIdict)
elif questionType == 'location':
objId = locateObjLocation(data[n], questionDict, questionIdict)
objIds.append(objId)
return np.array(objIds, dtype='int')
def reindexObjId(
inputData,
objDict,
questionDict,
questionIdict,
questionType):
questionIdictArray = np.array(questionIdict, dtype='object')
objIds = extractObjId(
inputData,
questionType,
questionDict,
questionIdict)
objIds = objIds - 1
obj = questionIdictArray[objIds]
objIds2 = np.zeros(objIds.shape, dtype='int')
for i in range(obj.shape[0]):
if objDict.has_key(obj[i]):
objIds2[i] = objDict[obj[i]]
else:
objIds2[i] = objDict['UNK']
return objIds2
def buildObjDict(
trainData,
questionType,
questionDict,
questionIdict):
objDict = {}
objIdict = []
objIds = extractObjId(
trainData[0],
questionType,
questionDict,
questionIdict)
objIds = objIds - 1
questionIdictArray = np.array(questionIdict, dtype='object')
objList = questionIdictArray[objIds]
for obj in objList:
if not objDict.has_key(obj):
objDict[obj] = len(objIdict)
objIdict.append(obj)
objDict['UNK'] = len(objIdict)
objIdict.append('UNK')
return objDict, objIdict
def trainCount(
trainData,
questionType,
questionDict,
questionIdict,
objDict,
objIdict,
numAns):
"""
Calculates count(w, a), count(a)
"""
count_wa = np.zeros((len(objIdict), numAns))
count_a = np.zeros((numAns))
objIds = extractObjId(
trainData[0],
questionType,
questionDict,
questionIdict)
for i in range(objIds.shape[0]):
objId = objIds[i]
obj = questionIdict[objId - 1]
ansId = trainData[1][i, 0]
objId2 = objDict[obj]
count_wa[objId2, ansId] += 1
count_a[ansId] += 1
# Add UNK count
count_a[-1] += 1
return count_wa, count_a
def runVisPriorOnce(
objId,
count_wa,
count_a,
modelOutput,
delta):
P_w_a = count_wa[objId, :]
P_w_a /= count_a[:]
P_w_a += delta
P_w_a /= (modelOutput.shape[1] * delta + 1)
# (n, c)
P_a_i = modelOutput
# (n, c)
P_wai = P_w_a * P_a_i
P_a_wi = P_wai / np.sum(P_wai, axis=1).reshape(P_wai.shape[0], 1)
return P_a_wi
def calcRate(output, target):
outputMax = np.argmax(output, axis=-1)
outputMax = outputMax.reshape(outputMax.size)
targetReshape = target.reshape(target.size)
equals = (outputMax == targetReshape).astype('int')
rate = np.sum(equals) / \
float(target.size)
return rate, outputMax, equals
def validDelta(
trainData,
validData,
preVisModelOutput,
questionDict,
questionIdict,
numAns,
deltas,
questionType):
objDict, objIdict = buildObjDict(
trainData,
questionType,
questionDict,
questionIdict)
count_wa, count_a = trainCount(
trainData,
questionType,
questionDict,
questionIdict,
objDict,
objIdict,
numAns)
print count_wa
# Reindex valid set
validInput = validData[0]
validTarget = validData[1]
validTargetReshape = validTarget.reshape(validTarget.size)
validObjId = reindexObjId(
validInput,
objDict,
questionDict,
questionIdict,
questionType)
# Run vis model on valid set
validOutput = nn.test(preVisModel, validInput)
print 'Before Prior Valid Accuracy:',
rate, _, __ = calcRate(validOutput, validTarget)
print rate
# Determine best delta
bestRate = 0.0
bestDelta = 0.0
for delta in deltas:
visPriorOutput = runVisPriorOnce(
validObjId,
count_wa,
count_a,
validOutput,
delta)
print 'delta=%f Valid Accuracy:' % delta,
rate, _, __ = calcRate(visPriorOutput, validTarget)
print rate
if rate > bestRate:
bestRate = rate
bestDelta = delta
print 'Best Delta:', bestDelta
return bestDelta
def runVisPrior(
trainData,
testData,
questionType,
visModel,
questionDict,
questionIdict,
numAns,
delta):
objDict, objIdict = buildObjDict(
trainData,
questionType,
questionDict,
questionIdict)
count_wa, count_a = trainCount(
trainData,
questionType,
questionDict,
questionIdict,
objDict,
objIdict,
numAns)
print count_wa
# Reindex test set
testInput = testData[0]
testTarget = testData[1]
testTargetReshape = testTarget.reshape(testTarget.size)
testObjId = reindexObjId(
testInput,
objDict,
questionDict,
questionIdict,
questionType)
# Run vis model on test set
testOutput = nn.test(visModel, testInput)
print 'Before Prior Test Accuracy:',
rate, _, __ = calcRate(testOutput, testTarget)
print rate
# Run on test set
visPriorOutput = runVisPriorOnce(
testObjId,
count_wa,
count_a,
testOutput,
delta)
print 'delta=%f Test Accuracy:' % delta,
rate, _, __ = calcRate(visPriorOutput, testTarget)
print rate
return visPriorOutput
def combineTrainValid(trainData, validData):
trainDataAll = (np.concatenate((trainData[0], validData[0]), axis=0),
np.concatenate((trainData[1], validData[1]), axis=0))
return trainDataAll
def calcAdaBoostAlpha(testOutput, testTarget):
print 'Calculating alpha for boosting...'
rate, _, correct = calcRate(testOutput, testTarget)
alpha = np.log(rate / (1 - rate)) + np.log(float(testOutput.shape[1] - 1))
print 'alpha:', alpha
return alpha
def calcAdaBoostWeights(trainOutput, trainTarget, alpha):
print 'Calculating weights for boosting...'
rate, _, correct = calcRate(trainOutput, trainTarget)
print correct
print 'Train set rate:', rate
correct2 = -(correct.astype('float32') - 0.5) * 2
weights = np.exp(correct2 * alpha)
weights /= np.sum(weights)
weights *= weights.shape[0]
print 'weights:', weights
return weights
if __name__ == '__main__':
"""
Usage:
python imageqa_visprior.py
-pvid {preVisModelId}
-vid {visModelId}
-mid {mainModelId}
-bid {boostModelId}
-vd[ata] {visDataFolder}
-md[ata] {mainDataFolder}
-r[esults] {resultsFolder}
-qtype {color/number/location}
-o[utweights] {outputFolder}
"""
questionType = 'color'
visModelId = None
mainModelId = None
boostModelId = None
outputWeightsFolder = None
for i, flag in enumerate(sys.argv):
if flag == '-pvid':
preVisModelId = sys.argv[i + 1]
elif flag == '-vid':
visModelId = sys.argv[i + 1]
elif flag == '-mid':
mainModelId = sys.argv[i + 1]
elif flag == '-bid':
boostModelId = sys.argv[i + 1]
elif flag == '-vd' or flag == '-vdata':
visDataFolder = sys.argv[i + 1]
elif flag == '-md' or flag == '-mdata':
mainDataFolder = sys.argv[i + 1]
elif flag == '-r' or flag == '-results':
resultsFolder = sys.argv[i + 1]
elif flag == '-qtype':
questionType = sys.argv[i + 1]
elif flag == '-o' or flag == '-outweights':
outputWeightsFolder = sys.argv[i + 1]
data = it.loadDataset(visDataFolder)
testInput = data['testData'][0]
testTarget = data['testData'][1]
deltas = \
[0.000001,
0.000005,
0.00001,
0.00005,
0.0001,
0.0005,
0.001,
0.005,
0.01,
0.05,
0.1,
0.5,
1.0]
preVisModel = it.loadModel(preVisModelId, resultsFolder)
print 'Num answer', len(data['ansIdict'])
bestDelta = validDelta(
data['trainData'],
data['validData'],
preVisModel,
data['questionDict'],
data['questionIdict'],
len(data['ansIdict']),
deltas,
questionType)
trainDataAll = combineTrainValid(data['trainData'], data['validData'])
visModel = it.loadModel(visModelId, resultsFolder)
visTestOutput = runVisPrior(trainDataAll,
data['testData'],
questionType,
visModel,
data['questionDict'],
data['questionIdict'],
len(data['ansIdict']),
bestDelta)
visModelFolder = os.path.join(resultsFolder, visModelId)
answerFilename = os.path.join(visModelFolder,
visModelId + '_prior.test.o.txt')
truthFilename = os.path.join(visModelFolder,
visModelId + '_prior.test.t.txt')
it.outputTxt(
visTestOutput,
testTarget,
data['ansIdict'],
answerFilename,
truthFilename,
topK=1,
outputProb=False)
it.runWups(answerFilename, truthFilename)
if mainModelId is not None:
data_m = it.loadDataset(mainDataFolder)
ansDict_m = data_m['ansDict']
ansIdict = data['ansIdict']
questionDict_m = data_m['questionDict']
questionIdict = data['questionIdict']
newTestInput = np.zeros(testInput.shape, dtype='int')
for n in range(testInput.shape[0]):
newTestInput[n, 0, 0] = testInput[n, 0, 0]
for t in range(1, testInput.shape[1]):
if testInput[n, t, 0] != 0:
word = questionIdict[testInput[n, t, 0] - 1]
newTestInput[n, t, 0] = questionDict_m[word]
else:
break
mainModel = it.loadModel(mainModelId, resultsFolder)
mainTestOutput = nn.test(mainModel, newTestInput)
# Need to extract the class output from mainTestOutput
classNewId = []
for ans in ansIdict:
classNewId.append(ansDict_m[ans])
classNewId = np.array(classNewId, dtype='int')
mainTestOutput = mainTestOutput[:, classNewId]
for i in range(len(ansIdict)):
mixRatio = i / 10.0
ensTestOutput = mixRatio * visTestOutput + \
(1 - mixRatio) * mainTestOutput
print '%.2f VIS+PRIOR & %.2f VIS+BLSTM Accuracy:' % \
(mixRatio, 1 - mixRatio),
rate, _, __ = calcRate(ensTestOutput, testTarget)
print rate
if boostModelId is not None:
boostModel = it.loadModel(boostModelId, resultsFolder)
boostTestOutput = nn.test(boostModel, testInput)
alpha = calcAdaBoostAlpha(visTestOutput, testTarget)
alphaBoost = calcAdaBoostAlpha(boostTestOutput, testTarget)
finalTestOutput = (alpha * visTestOutput + \
alphaBoost * boostTestOutput) / \
(alpha + alphaBoost)
rate, _, __ = calcRate(finalTestOutput, testTarget)
answerFilename = os.path.join(visModelFolder,
visModelId + '_boost.test.o.txt')
truthFilename = os.path.join(visModelFolder,
visModelId + '_boost.test.t.txt')
it.outputTxt(
finalTestOutput,
testTarget,
data['ansIdict'],
answerFilename,
truthFilename,
topK=1,
outputProb=False)
it.runWups(answerFilename, truthFilename)
if outputWeightsFolder is not None:
if not os.path.exists(outputWeightsFolder):
os.makedirs(outputWeightsFolder)
alpha = calcAdaBoostAlpha(visTestOutput, testTarget)
visTrainOutput = runVisPrior(trainDataAll,
trainDataAll,
questionType,
visModel,
data['questionDict'],
data['questionIdict'],
len(data['ansIdict']),
bestDelta)
weights = calcAdaBoostWeights(visTrainOutput, trainDataAll[1], alpha)
trainWeights = weights[:data['trainData'][1].shape[0]]
validWeights = weights[trainWeights.shape[0]:]
np.save(os.path.join(outputWeightsFolder, 'adb-weights-train.npy'), trainWeights)
np.save(os.path.join(outputWeightsFolder, 'adb-weights-valid.npy'), validWeights) | [
"renmengye@gmail.com"
] | renmengye@gmail.com |
b259dc41599ee92dc043a282fa8235f280b9d968 | 6f0a01dde7bf2998cd09e404cc4e2633fbf45dee | /archive/Tensorflow-101/ep-6-broadcasting.py | cee70b546ae0796a84c3a2c40cb581e09e075af2 | [
"MIT"
] | permissive | IncredibleDevHQ/incredible-dev-videos | b8a26b6ac6efad33f0e8dc2903c2b441ab106a69 | 38d5b3d85fd21b8ec8043b90312b500da398d9f9 | refs/heads/main | 2023-06-01T21:05:00.010981 | 2021-06-21T14:57:41 | 2021-06-21T14:57:41 | 341,483,252 | 2 | 2 | MIT | 2021-06-08T11:56:08 | 2021-02-23T08:32:00 | Python | UTF-8 | Python | false | false | 576 | py | # Broadcasting
import tensorflow as tf
x = tf.constant([1, 2, 3])
y = tf.constant(2)
z = tf.constant([2, 2, 2])
tf.print(tf.multiply(x, 2)) #[2 4 6]
tf.print(x * y) #[2 4 6]
tf.print(x * z) #[2 4 6]
x = tf.reshape(x,[3,1])
y = tf.range(1, 5)
print(tf.multiply(x, y))
# [[ 1 2 3 4]
# [ 2 4 6 8]
# [ 3 6 9 12]]
x_stretch = tf.constant(
[[1, 1, 1, 1],
[2, 2, 2, 2],
[3, 3, 3, 3]])
y_stretch = tf.constant(
[[1, 2, 3, 4],
[1, 2, 3, 4],
[1, 2, 3, 4]])
print(x_stretch * y_stretch)
print(tf.broadcast_to(
tf.constant([1, 2, 3,4]), [3, 4]
)) | [
"ashwin.mirskar@gmail.com"
] | ashwin.mirskar@gmail.com |
fb23d8cad946047b56dd79262c71561d4d0dbf1d | 465b778006c691d9553948b58119978604a2bf04 | /setup.py | ee0defde614c583d08699ff84b5dccc40d936c4c | [
"MIT"
] | permissive | nosarthur/coi | 2087d4ae4499ea5a46aadfa1ed4e21fed93632ab | 13efb27adbda111debdc51550bfde3e9fb56eb9b | refs/heads/main | 2023-07-14T15:36:18.519730 | 2021-08-27T17:22:35 | 2021-08-27T17:22:35 | 396,001,039 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,240 | py | from setuptools import setup
long_description = None
with open('README.md', encoding='utf-8') as f:
long_description = f.read()
setup(
name='coi',
packages=['coi'],
version='0.0.8',
license='MIT',
description='See output/input status of all sub-folders',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/nosarthur/coi',
platforms=['linux', 'osx', 'win32'],
keywords=['bash'],
author='Dong Zhou',
author_email='zhou.dong@gmail.com',
entry_points={'console_scripts': ['coi = coi.__main__:main']},
python_requires='~=3.6',
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: POSIX",
"Operating System :: MacOS :: MacOS X",
"Topic :: Software Development :: Version Control :: Git",
"Topic :: Terminals",
"Topic :: Utilities",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
include_package_data=True,
)
| [
"zhou.dong@gmail.com"
] | zhou.dong@gmail.com |
2db11fc713334d1c4d17ecf444cf9726e26cc5dd | 055cf8aeec011f67580bf92a83d94ee6919648cd | /migrations/versions/ad28a44f93c4_initial_migration.py | 18999b6182f1570c2b30ca638cbdbed3b8a6a43e | [
"MIT",
"LicenseRef-scancode-other-permissive"
] | permissive | RisperAkinyi/BlogPost | df82c8fec558425ca1bbce65aa90464176aefb87 | f8ee4c887fceae8e70410b66a12bc5680cf26044 | refs/heads/master | 2022-09-30T19:09:27.969983 | 2019-08-13T07:36:26 | 2019-08-13T07:36:26 | 201,879,164 | 0 | 0 | MIT | 2022-09-16T18:07:44 | 2019-08-12T07:22:39 | Python | UTF-8 | Python | false | false | 2,128 | py | """Initial Migration
Revision ID: ad28a44f93c4
Revises:
Create Date: 2019-08-09 11:05:50.912878
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'ad28a44f93c4'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=255), nullable=True),
sa.Column('email', sa.String(length=255), nullable=True),
sa.Column('bio', sa.String(length=255), nullable=True),
sa.Column('profile_pic_path', sa.String(), nullable=True),
sa.Column('password_hash', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_users_email'), 'users', ['email'], unique=True)
op.create_index(op.f('ix_users_username'), 'users', ['username'], unique=False)
op.create_table('comments',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('comments', sa.String(), nullable=True),
sa.Column('post_id', sa.Integer(), nullable=True),
sa.Column('posted', sa.DateTime(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('posts',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(), nullable=True),
sa.Column('description', sa.String(), nullable=True),
sa.Column('posted', sa.DateTime(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('posts')
op.drop_table('comments')
op.drop_index(op.f('ix_users_username'), table_name='users')
op.drop_index(op.f('ix_users_email'), table_name='users')
op.drop_table('users')
# ### end Alembic commands ###
| [
"email@example.com"
] | email@example.com |
12e0297f0e01b59d69ca42308b250e49c4b45112 | ec1b50134c1d8b7af7374dfdbec876a0c3e9d211 | /6.0001/string.py | 767f2d972b1344a10d00129904f916c95ced4f9a | [] | no_license | Leeboyd/learnPython | 1876d3a5126cb30938a044847a99093512dd88cc | 7fc8938a702ca32a6912490c5c8aa49561f8123e | refs/heads/master | 2023-02-20T03:12:35.529317 | 2022-06-16T23:41:35 | 2022-07-05T03:39:32 | 147,643,970 | 1 | 0 | null | 2023-02-10T22:46:18 | 2018-09-06T08:42:40 | Python | UTF-8 | Python | false | false | 196 | py | s = "abcdefgh"
for index in range(len(s)):
if s[index] == 'i' or s[index] == 'u':
print("There is an i or u")
for char in s:
if char == 'i' or char == 'h':
print("There is an i or h") | [
"jobboy19890101@gmail.com"
] | jobboy19890101@gmail.com |
07d1801bc21bc3ce1f36e57d68f11e9aea47ae54 | f85fbab0cffaa54a136e0938715414383a4eea1f | /Challenge_climate_analysis.py | 4dc0330bdef972cf47b3e48416d2d98207c99eea | [] | no_license | varshajha28/Surfs_Up | 54bd59b9ba867573e974ce74f1bfafa6632c0fd4 | 31ff636167031c20d6de69398d142f02c0a78c73 | refs/heads/master | 2021-03-03T07:22:43.393395 | 2020-03-14T22:02:21 | 2020-03-14T22:02:21 | 245,942,320 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,957 | py | # Add dependancies
%matplotlib inline
from matplotlib import style
style.use('fivethirtyeight')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import datetime as dt
# Python SQL toolkit and Object Relational Mapper
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
#Create the connection between sqlite and pandas
engine = create_engine("sqlite:///hawaii.sqlite")
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# We can view all of the classes that automap found
Base.classes.keys()
# Save references to each table
Measurement = Base.classes.measurement
Station = Base.classes.station
# Create our session (link) from Python to the DB
session = Session(engine)
#Design a query to get June temperature data for all station and allthe years.
#complete the query to extract all desired results and put them in a list.
juneallresults = []
juneallresults = session.query(Measurement.date, Measurement.tobs).filter(func.extract('month', Measurement.date) == 6).all()
june_all_df = pd.DataFrame(juneallresults, columns=['date','temperatures'])
june_all_df.describe()
#june_all_df
#Design a query to get June temperature data for all station and only for two years.
# Calculate the date one year from the last date in data set.
prev_year = dt.date(2017,8,23)- dt.timedelta(days=365)
#complete the query to extract all desired results and put them in a list.
junetwoyearresults = []
junetwoyearresults = session.query(Measurement.date, Measurement.tobs).filter(func.extract('month', Measurement.date) == 6).filter(Measurement.date >= prev_year).all()
june_twoyear_df = pd.DataFrame(junetwoyearresults, columns=['date','temperatures'])
june_twoyear_df.describe()
#june_twoyear_df
#Design a query to get December temperature data for all station and all the years.
#complete the query to extract all desired results and put them in a list.
decemberallresults=[]
decemberallresults = session.query(Measurement.date, Measurement.tobs).filter(func.extract('month', Measurement.date) == 12).all()
dec_all_df = pd.DataFrame(decemberallresults, columns=['date','temperatures'])
dec_all_df.describe()
#dec_all_df
#Design a query to get December temperature data for all station and only for two years.
# Calculate the date one year from the last date in data set.
prev_year = dt.date(2017,8,23)- dt.timedelta(days=365)
#complete the query to extract all desired results and put them in a list.
dectwoyearresults=[]
#complete the query to extract all desired results and put them in a list.
dectwoyearresults = session.query(Measurement.date, Measurement.tobs).filter(func.extract('month', Measurement.date) == 12).filter(Measurement.date >= prev_year).all()
dec_twoyr_df = pd.DataFrame(dectwoyearresults, columns=['date','temperatures'])
dec_twoyr_df.describe()
#dec_twoyr_df | [
"varsha.jha@gmail.com"
] | varsha.jha@gmail.com |
5e882ab6a9df428646946d956286a2353bf597e6 | 2d382086ead32a2de90855fb02662771d7d92c7c | /all_files/migrations/0001_initial.py | 81f3677616ce3d3523a8363974caabad63a4e006 | [] | no_license | accprojects/project | b92f9ebb1ce09ee3d7dc3b77e5c1e7376d6055b2 | 56f51c9581da2c3e8805123c7cdc1652fbfac78b | refs/heads/master | 2021-05-12T19:23:15.617975 | 2018-02-13T11:38:52 | 2018-02-13T11:38:52 | 117,091,582 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,051 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-01-11 12:12
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"lianatagirova598@gmail.com"
] | lianatagirova598@gmail.com |
8b6e1e356ae27b2dc78266d986e81f9a3b47123f | 98710e1b64d2eb527efbffa7f1b3d846cce6024c | /20180801-daguan-nlp-classification/model/lr_tfidf_traintest_feature.py | 89efb82eaf5ac37fbbc9e737c6c55157675b818c | [] | no_license | SheldonWong/competition | bf6f8904cfd89d4649249e453f46f45f0a988604 | 3894c7ec5f9fca35d37702b5402dac9b7b1e04c4 | refs/heads/master | 2020-03-30T07:13:32.702321 | 2018-09-30T09:22:25 | 2018-09-30T09:22:25 | 150,924,222 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,721 | py | import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
from sklearn import svm
from sklearn.calibration import CalibratedClassifierCV
'''
# ~/Downloads/train_set.csv
# ~/workspace/sublime/daguan/train_sample.csv
print('读取数据')
df_train = pd.read_csv('~/Downloads/train_set.csv')
df_test = pd.read_csv('~/Downloads/train_set.csv')
df_train.drop(columns=['word_seg','id'], inplace=True)
df_test.drop(columns=['word_seg'], inplace=True)
print('特征TF-IDF:')
#vectorizer = CountVectorizer(ngram_range=(1,2), min_df=3, max_df=0.9, max_features=100000)
vectorizer = TfidfVectorizer(ngram_range=(1,2), min_df=5, max_df=0.9,
use_idf=True,smooth_idf=True, sublinear_tf=True,norm='l2')
vectorizer.fit(df_train['article'])
# 训练的时候只用到词
x_train = vectorizer.transform(df_train['article'])
y_train = df_train['class'] - 1
x_test = vectorizer.transform(df_test['article'])
'''
import pickle
print('载入特征:')
with open('./feature/tfidf/x_train2.pickle', 'rb') as f:
x_train = pickle.load(f)
df_train = pd.read_csv('~/Downloads/train_set.csv')
df_test = pd.read_csv('~/Downloads/test_set.csv')
y_train = df_train['class'] - 1
#y_train.to_csv('./feature/y_train.csv')
with open('./feature/tfidf/x_test2.pickle', 'rb') as f3:
x_test = pickle.load(f3)
train_X,test_X, train_y, test_y = train_test_split(x_train,
y_train,
test_size = 0.2,
random_state = 0)
#test_X是稀疏矩阵
print('开始用LR训练')
# C越大,惩罚越小
lg = LogisticRegression(C=5,dual=True,verbose=1)
lg.fit(train_X,train_y)
y_pred= lg.predict(test_X)
accuracy = accuracy_score(test_y, y_pred)
y_prob = lg.predict_proba(test_X)
print(accuracy)
print(classification_report(test_y, y_pred))
print('开始SVM训练')
svc = svm.LinearSVC(C=5,dual=True)
lin_svc = CalibratedClassifierCV(base_estimator=svc)
lin_svc.fit(x_train,y_train)
y_pred2= lin_svc.predict(test_X)
accuracy = accuracy_score(test_y, y_pred2)
y_prob2 = lin_svc.predict_proba(test_X)
print(accuracy)
print(classification_report(test_y, y_pred2))
# bad case
# 特征
for row in test_X:
freature_l.append(str(row).replace('\t',' ').replace('\n',' '))
case_df = pd.DataFrame(columns=['feature','class','pred','pred2','prob','prob2'])
case_df['feature'] = freature_l
case_df['class'] = test_y.tolist()
case_df['pred'] = y_pred
case_df['pred2'] = y_pred2
case_df['prob'] = y_prob.tolist()
case_df['prob2'] = y_prob2.tolist()
case_df.to_csv('./result/bad_case3.csv')
## 获取badcase索引
b = case_df['class'] == case_df['pred']
b_l = list(b)
index_l = [i for i in range(len(b_l)) if b_l[i] == False]
c = case_df['class'] == case_df['pred2']
c_l = list(c)
index2_l = [i for i in range(len(c_l)) if c_l[i] == False]
# 二者预测结果不同的列表
d = case_df['pred'] == case_df['pred2']
d_l = list(d)
index3_l = [i for i in range(len(d_l)) if d_l[i] == False]
ana_df = pd.DataFrame(columns=['t-l','t-s','l-s'])
ana_df['t-l'] = index_l
ana_df['t-s'] = index2_l
ana_df['l-s'] = index3_l
ana_df.to_csv('./result/bad_case3_ana.csv')
'''
y_class = lg.predict(x_test)
y_prob = lg.predict_proba(x_test)
df_test['class'] = y_class.tolist()
df_test['class'] = df_test['class'] + 1
df_test['prob'] = y_prob.tolist()
df_result = df_test.loc[:, ['id','class','prob']]
df_result.to_csv('./result/result-tfidf-feature-prob.csv', index=False)
''' | [
"sheldonwong@sheldonwongdeMacBook-Pro.local"
] | sheldonwong@sheldonwongdeMacBook-Pro.local |
e6495c54678fa25589ed821affe20e0b079fbc0a | d5b3011dcc61ea661d395747a4ecac2fc850e667 | /checkout/migrations/0003_order_user_profile.py | f77fa66947ef7054025fbbe925f0b8c909849972 | [] | no_license | Code-Institute-Submissions/learning-management-system-ms4 | d4f544f357351d45fd9dd471774d160cfde7d9a9 | 172738a1a8e37113a40f93a8ed230dae83b1ca93 | refs/heads/master | 2022-11-09T08:56:05.253560 | 2020-06-22T01:08:32 | 2020-06-22T01:08:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 566 | py | # Generated by Django 3.0.6 on 2020-06-12 09:45
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('profiles', '0001_initial'),
('checkout', '0002_auto_20200612_0951'),
]
operations = [
migrations.AddField(
model_name='order',
name='user_profile',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='orders', to='profiles.UserProfile'),
),
]
| [
"bogdan@onis.studio"
] | bogdan@onis.studio |
a43e6873d5770d466c0143a8d8e3abdff3975ac4 | 4bc19f4dd098ebedcb6ee78af0ae12cb633671fe | /static/views.py | 608e8568b487fbee9eb1251fbf226fbe6d45ec5b | [] | no_license | StanislavKraev/rekvizitka | 958ab0e002335613a724fb14a8e4123f49954446 | ac1f30e7bb2e987b3b0bda4c2a8feda4d3f5497f | refs/heads/master | 2021-01-01T05:44:56.372748 | 2016-04-27T19:20:26 | 2016-04-27T19:20:26 | 57,240,406 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 514 | py | from django.http import Http404
from django.shortcuts import render_to_response
from rek.static.models import StaticPage
from django.template.context import RequestContext
def render(request, page_alias=''):
page = StaticPage.objects.get(alias=page_alias, enabled=True)
if not page:
raise Http404()
return render_to_response('static_page_with_sidebar.html',
{'page' : page},
context_instance=RequestContext(request))
| [
"kraevst@yandex.ru"
] | kraevst@yandex.ru |
e2a4d4248d4f5b48e5c69c52e0dad41e541340ba | 33cfcb4561e7320ae0e893fbe774c7eb0a2effe8 | /eg15.01.py | c94d345080db1688fdbb1a237e7fd737f5e8db93 | [] | no_license | Jueee/aByteOfPython | 9c8bc01f0707daef29e52467db0c3f5a94747119 | ae1a4a4b181612463ccdcd0d89c961f22f7ece20 | refs/heads/master | 2021-05-31T14:26:00.790823 | 2016-02-17T05:41:20 | 2016-02-17T05:41:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 877 | py | #!/usr/bin/python
# Filename: list_comprehension.py
# 通过列表综合,可以从一个已有的列表导出一个新的列表。
listone = [2, 3, 4]
listtwo = [2*i for i in listone if i > 2]
print(listtwo)
# 在函数中接收元组和列表
# 当要使函数接收元组或字典形式的参数的时候,有一种特殊的方法,它分别使用*和**前缀。
# 这种方法在函数需要获取可变数量的参数的时候特别有用。
# 由于在args变量前有*前缀,所有多余的函数参数都会作为一个元组存储在args中。
# 如果使用的是**前缀,多余的参数则会被认为是一个字典的键/值对。
def powersum(power, *args):
'''Return the sum of each argument raised to specified power.'''
total = 0
for i in args:
total += pow(i, power)
return total
print(powersum(2,3,4,5))
print(powersum(2,10,100,1000))
| [
"hellojue @foxmail.com"
] | hellojue @foxmail.com |
1a029ae138c31e24a7f6c8323cce780ea4cc4c45 | 209622dae7003dfec237123a8d4645f97e000df6 | /venv/bin/pyreverse | 22abef7cf37e49af31aee031b7d0ea9780fe04af | [] | no_license | vasughatole/promolta | f50d9ced2e07ff05beae1611cc6c52b49956ee17 | 947a91bc55d9dbd853fde011957df2fb9cc30705 | refs/heads/master | 2020-04-17T19:47:01.759322 | 2019-01-21T21:14:02 | 2019-01-21T21:14:02 | 166,878,452 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | #!/home/vasu/python/promolta/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pylint import run_pyreverse
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run_pyreverse())
| [
"vasu@moneytap.com"
] | vasu@moneytap.com | |
645fcfbac504304e88b27def48c4789f31873f48 | b56fb8740e74859f95d44854fd4fb4309e039e84 | /src/twenty_four/lib.py | 6dd6277a98e6e208d8c712d22c573fe3b50249fa | [] | no_license | qsweber/twenty-four-api | 03c20e06f725b94c9eab3643fc4dd60c0b464e0b | fb870a24a41b0bacf595d528b13f8f4bde1118af | refs/heads/master | 2022-11-17T02:34:25.585554 | 2020-07-08T03:34:48 | 2020-07-08T03:34:48 | 107,357,270 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,951 | py | import itertools
import typing
def plus(x: int, y: int) -> int:
return x + y
def minus(x: int, y: int) -> int:
return x - y
def dividedby(x: int, y: int) -> float:
if y == 0:
return 0
return x / float(y)
def times(x: int, y: int) -> int:
return x * y
def get_solutions(numbers: typing.List[int]) -> typing.List[str]:
functions = [plus, minus, dividedby, times]
function_combinations = [f for f in itertools.product(functions, repeat=3)]
combinations = set(i for i in itertools.permutations(numbers))
answers = []
for a, b, c, d in combinations:
for f1, f2, f3 in function_combinations:
res1 = round(f3(f2(f1(a, b), c), d), 7)
if res1 == 24.0:
answers.append(
"((({a} {f1} {b}) {f2} {c}) {f3} {d})".format(
a=a,
b=b,
c=c,
d=d,
f1=f1.__name__,
f2=f2.__name__,
f3=f3.__name__,
)
)
res2 = round(f2(f1(a, b), f3(c, d)), 7)
if res2 == 24.0:
answers.append(
"({a} {f1} {b}) {f2} ({c} {f3} {d})".format(
a=a,
b=b,
c=c,
d=d,
f1=f1.__name__,
f2=f2.__name__,
f3=f3.__name__,
)
)
res3 = round(f1(a, f3(f2(b, c), d)), 7)
if res3 == 24.0:
answers.append(
"{a} {f1} (({b} {f2} {c}) {f3} {d})".format(
a=a,
b=b,
c=c,
d=d,
f1=f1.__name__,
f2=f2.__name__,
f3=f3.__name__,
)
)
res4 = round(f3(f1(a, f2(b, c)), d), 7)
if res4 == 24.0:
answers.append(
"({a} {f1} ({b} {f2} {c})) {f3} {d}".format(
a=a,
b=b,
c=c,
d=d,
f1=f1.__name__,
f2=f2.__name__,
f3=f3.__name__,
)
)
res5 = round(f1(a, f2(b, f3(c, d))), 7)
if res5 == 24.0:
answers.append(
"{a} {f1} ({b} {f2} ({c} {f3} {d}))".format(
a=a,
b=b,
c=c,
d=d,
f1=f1.__name__,
f2=f2.__name__,
f3=f3.__name__,
)
)
return answers
| [
"quinn@quinnweber.com"
] | quinn@quinnweber.com |
284410ab3d19e3be36328ab6005a4c4dea22bbee | c43131586e14cb01bb1449a7318d8c349760f707 | /models/train_bleu.py | e8d50e3dcc9bf9cac0bceb70a7cce2cffb4460ba | [] | no_license | tfaod/arae-project | ef187ecdc711625cf1178e0538e4d200a7085b66 | f5cdb87a41bb3b7407144cc0829e694d57b2ea51 | refs/heads/master | 2022-07-12T01:55:42.790547 | 2020-05-12T17:19:18 | 2020-05-12T17:19:18 | 263,403,628 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 36,213 | py | #!/usr/bin/env python
# coding: utf-8
# In[7]:
import argparse
import os
import time
import math
import numpy as np
import random
import sys
import json
from nltk.translate.bleu_score import sentence_bleu
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
from utils import to_gpu, Corpus, batchify, output_file_name, make_output_directory
from models1 import Seq2Seq2Decoder, Seq2Seq, MLP_G, MLP_Classify
from models import MLP_D
import shutil
import pdb
# In[ ]:
print(sys.version)
# In[ ]:
parser = argparse.ArgumentParser(description='ARAE for Yelp transfer')
# Path Arguments
parser.add_argument('--data_path', type=str, required=True,
help='location of the data corpus')
parser.add_argument('--outf', type=str, default="_output",
help='output directory name')
parser.add_argument('--load_vocab', type=str, default="",
help='path to load vocabulary from')
# Data Processing Arguments
parser.add_argument('--vocab_size', type=int, default=30000,
help='cut vocabulary down to this size '
'(most frequently seen words in train)')
parser.add_argument('--maxlen', type=int, default=25,
help='maximum sentence length')
parser.add_argument('--lowercase', dest='lowercase', action='store_true',
help='lowercase all text')
parser.add_argument('--no-lowercase', dest='lowercase', action='store_true',
help='not lowercase all text')
parser.set_defaults(lowercase=True)
# Model Arguments
parser.add_argument('--emsize', type=int, default=128,
help='size of word embeddings')
parser.add_argument('--nhidden', type=int, default=128,
help='number of hidden units per layer')
parser.add_argument('--nlayers', type=int, default=1,
help='number of layers')
parser.add_argument('--noise_r', type=float, default=0.1,
help='stdev of noise for autoencoder (regularizer)')
parser.add_argument('--noise_anneal', type=float, default=0.9995,
help='anneal noise_r exponentially by this'
'every 100 iterations')
parser.add_argument('--hidden_init', action='store_true',
help="initialize decoder hidden state with encoder's")
parser.add_argument('--arch_g', type=str, default='128-128',
help='generator architecture (MLP)')
parser.add_argument('--arch_d', type=str, default='128-128',
help='critic/discriminator architecture (MLP)')
parser.add_argument('--arch_classify', type=str, default='128-128',
help='classifier architecture')
parser.add_argument('--z_size', type=int, default=32,
help='dimension of random noise z to feed into generator')
parser.add_argument('--temp', type=float, default=1,
help='softmax temperature (lower --> more discrete)')
parser.add_argument('--dropout', type=float, default=0.0,
help='dropout applied to layers (0 = no dropout)')
# Training Arguments
parser.add_argument('--epochs', type=int, default=25,
help='maximum number of epochs')
parser.add_argument('--batch_size', type=int, default=64, metavar='N',
help='batch size')
parser.add_argument('--niters_ae', type=int, default=1,
help='number of autoencoder iterations in training')
parser.add_argument('--niters_gan_d', type=int, default=5,
help='number of discriminator iterations in training')
parser.add_argument('--niters_gan_g', type=int, default=1,
help='number of generator iterations in training')
parser.add_argument('--niters_gan_ae', type=int, default=1,
help='number of gan-into-ae iterations in training')
parser.add_argument('--niters_gan_schedule', type=str, default='',
help='epoch counts to increase number of GAN training '
' iterations (increment by 1 each time)')
parser.add_argument('--lr_ae', type=float, default=1,
help='autoencoder learning rate')
parser.add_argument('--lr_gan_g', type=float, default=1e-04,
help='generator learning rate')
parser.add_argument('--lr_gan_d', type=float, default=1e-04,
help='critic/discriminator learning rate')
parser.add_argument('--lr_classify', type=float, default=1e-04,
help='classifier learning rate')
parser.add_argument('--beta1', type=float, default=0.5,
help='beta1 for adam. default=0.5')
parser.add_argument('--clip', type=float, default=1,
help='gradient clipping, max norm')
parser.add_argument('--gan_gp_lambda', type=float, default=0.1,
help='WGAN GP penalty lambda')
parser.add_argument('--grad_lambda', type=float, default=0.01,
help='WGAN into AE lambda')
parser.add_argument('--lambda_class', type=float, default=1,
help='lambda on classifier')
# Evaluation Arguments
parser.add_argument('--sample', action='store_true',
help='sample when decoding for generation')
parser.add_argument('--log_interval', type=int, default=1,
help='interval to log autoencoder training results')
# Other
parser.add_argument('--seed', type=int, default=1111,
help='random seed')
parser.add_argument('--cuda', dest='cuda', action='store_true',
help='use CUDA')
parser.add_argument('--no-cuda', dest='cuda', action='store_true',
help='not using CUDA')
parser.set_defaults(cuda=True)
parser.add_argument('--device_id', type=str, default='0')
# create default output file name
args.outf = output_file_name(args.outf, "bleu")
# make output directory if it doesn't already exist
make_output_directory(args.outf)
os.environ['CUDA_VISIBLE_DEVICES'] = args.device_id
# Set the random seed manually for reproducibility.
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, "
"so you should probably run with --cuda")
else:
torch.cuda.manual_seed(args.seed)
# In[ ]:
label_ids = {"pos": 1, "neg": 0}
id2label = {1: "pos", 0: "neg"}
# (Path to textfile, Name, Use4Vocab)
datafiles = [(os.path.join(args.data_path, "valid1.txt"), "valid1", False),
(os.path.join(args.data_path, "valid2.txt"), "valid2", False),
(os.path.join(args.data_path, "train1.txt"), "train1", True),
(os.path.join(args.data_path, "train2.txt"), "train2", True)]
referencefiles = [(os.path.join(args.data_path, "valid1_ref0.txt"), "valid1_ref0", False),
(os.path.join(args.data_path, "valid1_ref1.txt"), "valid1_ref1", False),
(os.path.join(args.data_path, "valid1_ref2.txt"), "valid1_ref2", False),
(os.path.join(args.data_path, "valid1_ref3.txt"), "valid1_ref3", False),
(os.path.join(args.data_path, "valid2_ref0.txt"), "valid2_ref0", False),
(os.path.join(args.data_path, "valid2_ref1.txt"), "valid2_ref1", False),
(os.path.join(args.data_path, "valid2_ref2.txt"), "valid2_ref2", False),
(os.path.join(args.data_path, "valid2_ref3.txt"), "valid2_ref3", False)]
valid1_refs = []
with open(referencefiles[0][0], 'r') as v1_ref0, open(referencefiles[1][0], 'r') as v1_ref1, open(referencefiles[2][0], 'r') as v1_ref2, open(referencefiles[3][0], 'r') as v1_ref3:
i = 0
for line in v1_ref0:
references = [", ".join(v1_ref0[i].split(" ")), ", ".join(v1_ref1[i].split(" ")), ", ".join(v1_ref2[i].split(" ")), ", ".join(v1_ref3[i].split(" "))]
valid1_refs.append(references)
i = i + 1
valid2_refs = []
with open(referencefiles[0][0], 'r') as v2_ref0, open(referencefiles[1][0], 'r') as v2_ref1, open(referencefiles[2][0], 'r') as v2_ref2, open(referencefiles[3][0], 'r') as v2_ref3:
i = 0
for line in v2_ref0:
references = [", ".join(v2_ref0[i].split(" ")), ", ".join(v2_ref1[i].split(" ")), ", ".join(v2_ref2[i].split(" ")), ", ".join(v2_ref3[i].split(" "))]
valid2_refs.append(references)
i = i + 1
vocabdict = None
if args.load_vocab != "":
vocabdict = json.load(args.vocab)
vocabdict = {k: int(v) for k, v in vocabdict.items()}
corpus = Corpus(datafiles,
maxlen=args.maxlen,
vocab_size=args.vocab_size,
lowercase=args.lowercase,
vocab=vocabdict)
# dumping vocabulary
with open('{}/vocab.json'.format(args.outf), 'w') as f:
json.dump(corpus.dictionary.word2idx, f)
# save arguments
ntokens = len(corpus.dictionary.word2idx)
print("Vocabulary Size: {}".format(ntokens))
args.ntokens = ntokens
with open('{}/args.json'.format(args.outf), 'w') as f:
json.dump(vars(args), f)
with open("{}/log.txt".format(args.outf), 'w') as f:
f.write(str(vars(args)))
f.write("\n\n")
eval_batch_size = 100
test1_data = batchify(corpus.data['valid1'], eval_batch_size, shuffle=False)
test2_data = batchify(corpus.data['valid2'], eval_batch_size, shuffle=False)
train1_data = batchify(corpus.data['train1'], args.batch_size, shuffle=True)
train2_data = batchify(corpus.data['train2'], args.batch_size, shuffle=True)
print("Loaded data!")
# #Build model#
# Component to experiment with
# In[ ]:
class CNN_D(torch.nn.Module):
def __init__(self, ninput, noutput, layers, activation=nn.LeakyReLU(0.2), gpu=False):
#pdb.set_trace()
super(CNN_D, self).__init__()
self.ninput = ninput
self.noutput = noutput
self.conv1 = nn.Conv2d(self.ninput, 1, kernel_size=4, stride=1, padding=2, bias=True)
self.activation = activation
self.dropout = nn.Dropout(0.3)
#pdb.set_trace()
# flatten before feeding into fully connected
self.fc1 = nn.Linear(256, noutput, bias=True) # calculate size here,1 bias = True)
#256 is palce holder
def forward(self, x):
#pdb.set_trace()
# get last item
x = x.unsqueeze(-1)
x = x.unsqueeze(-1)
#print(x.shape)
#pdb.set_trace()
x=self.conv1(x)
x=self.dropout(self.activation(x))
x=x.view(-1)
#print(self.noutput,'after',x.shape)
logits=self.fc1(x)
#print(logits.shape)
#pdb.set_trace()
return logits
# print("skip this pls")
# In[ ]:
ntokens = len(corpus.dictionary.word2idx)
autoencoder = Seq2Seq2Decoder(emsize=args.emsize, nhidden=args.nhidden, ntokens=ntokens, nlayers=args.nlayers, noise_r=args.noise_r, hidden_init=args.hidden_init, dropout=args.dropout, gpu=args.cuda)
gan_gen = MLP_G(ninput=args.z_size, noutput=args.nhidden, layers=args.arch_g)
gan_disc = MLP_D(ninput=args.nhidden, noutput=1, layers=args.arch_d)
print(gan_disc)
#pdb.set_trace()
classifier = MLP_Classify(
ninput=args.nhidden, noutput=1, layers=args.arch_classify)
g_factor = None
print(autoencoder)
print(gan_gen)
print(gan_disc)
print(classifier)
optimizer_ae = optim.SGD(autoencoder.parameters(), lr=args.lr_ae)
optimizer_gan_g = optim.Adam(gan_gen.parameters(), lr=args.lr_gan_g, betas=(args.beta1, 0.999))
optimizer_gan_d = optim.Adam(gan_disc.parameters(), lr=args.lr_gan_d, betas=(args.beta1, 0.999))
# classify
optimizer_classify = optim.Adam(classifier.parameters(), lr=args.lr_classify, betas=(args.beta1, 0.999))
criterion_ce = nn.CrossEntropyLoss()
if args.cuda:
autoencoder = autoencoder.cuda()
gan_gen = gan_gen.cuda()
gan_disc = gan_disc.cuda()
classifier = classifier.cuda()
criterion_ce = criterion_ce.cuda()
# In[ ]:
def save_model():
print("Saving models")
with open('{}/autoencoder_model.pt'.format(args.outf), 'wb') as f:
torch.save(autoencoder.state_dict(), f)
with open('{}/gan_gen_model.pt'.format(args.outf), 'wb') as f:
torch.save(gan_gen.state_dict(), f)
with open('{}/gan_disc_model.pt'.format(args.outf), 'wb') as f:
torch.save(gan_disc.state_dict(), f)
def train_classifier(whichclass, batch):
classifier.train()
classifier.zero_grad()
source, target, lengths = batch
source = to_gpu(args.cuda, Variable(source))
labels = to_gpu(args.cuda, Variable(
torch.zeros(source.size(0)).fill_(whichclass - 1)))
# Train
code = autoencoder(0, source, lengths, noise=False,
encode_only=True).detach()
scores = classifier(code)
classify_loss = F.binary_cross_entropy(scores.squeeze(1), labels)
classify_loss.backward()
optimizer_classify.step()
classify_loss = classify_loss.cpu().data[0]
pred = scores.data.round().squeeze(1)
accuracy = pred.eq(labels.data).float().mean()
return classify_loss, accuracy
def grad_hook_cla(grad):
return grad * args.lambda_class
def classifier_regularize(whichclass, batch):
autoencoder.train()
autoencoder.zero_grad()
source, target, lengths = batch
source = to_gpu(args.cuda, Variable(source))
target = to_gpu(args.cuda, Variable(target))
flippedclass = abs(2 - whichclass)
labels = to_gpu(args.cuda, Variable(
torch.zeros(source.size(0)).fill_(flippedclass)))
# Train
code = autoencoder(0, source, lengths, noise=False, encode_only=True)
code.register_hook(grad_hook_cla)
scores = classifier(code)
classify_reg_loss = F.binary_cross_entropy(scores.squeeze(1), labels)
classify_reg_loss.backward()
# torch.nn.utils.clip_grad_norm_(autoencoder.parameters(), args.clip)
torch.nn.utils.clip_grad_norm(autoencoder.parameters(), args.clip)
optimizer_ae.step()
return classify_reg_loss
def evaluate_autoencoder(whichdecoder, data_source, epoch):
# Turn on evaluation mode which disables dropout.
autoencoder.eval()
total_loss = 0
ntokens = len(corpus.dictionary.word2idx)
all_accuracies = 0
bcnt = 0
for i, batch in enumerate(data_source):
source, target, lengths = batch
source = to_gpu(args.cuda, Variable(source, volatile=True))
target = to_gpu(args.cuda, Variable(target, volatile=True))
mask = target.gt(0)
masked_target = target.masked_select(mask)
# examples x ntokens
output_mask = mask.unsqueeze(1).expand(mask.size(0), ntokens)
hidden = autoencoder(0, source, lengths, noise=False, encode_only=True)
# output: batch x seq_len x ntokens
if whichdecoder == 1:
output = autoencoder(1, source, lengths, noise=False)
flattened_output = output.view(-1, ntokens)
masked_output = flattened_output.masked_select(output_mask).view(-1, ntokens)
# accuracy
max_vals1, max_indices1 = torch.max(masked_output, 1)
all_accuracies += torch.mean(max_indices1.eq(masked_target).float()).data[0]
max_values1, max_indices1 = torch.max(output, 2)
max_indices2 = autoencoder.generate(2, hidden, maxlen=50)
else:
output = autoencoder(2, source, lengths, noise=False)
flattened_output = output.view(-1, ntokens)
masked_output = flattened_output.masked_select(output_mask).view(-1, ntokens)
# accuracy
max_vals2, max_indices2 = torch.max(masked_output, 1)
all_accuracies += torch.mean(max_indices2.eq(masked_target).float()).data[0]
max_values2, max_indices2 = torch.max(output, 2)
max_indices1 = autoencoder.generate(1, hidden, maxlen=50)
total_loss += criterion_ce(masked_output /
args.temp, masked_target).data
bcnt += 1
aeoutf_from = "{}/{}_output_decoder_{}_from.txt".format(
args.outf, epoch, whichdecoder)
aeoutf_tran = "{}/{}_output_decoder_{}_tran.txt".format(
args.outf, epoch, whichdecoder)
with open(aeoutf_from, 'w') as f_from, open(aeoutf_tran, 'w') as f_trans:
max_indices1 = max_indices1.view(output.size(0), -1).data.cpu().numpy()
max_indices2 = max_indices2.view(output.size(0), -1).data.cpu().numpy()
target = target.view(output.size(0), -1).data.cpu().numpy()
tran_indices = max_indices2 if whichdecoder == 1 else max_indices1
for t, tran_idx in zip(target, tran_indices):
# real sentence
chars = " ".join([corpus.dictionary.idx2word[x] for x in t])
f_from.write(chars)
f_from.write("\n")
# transfer sentence
chars = " ".join([corpus.dictionary.idx2word[x]
for x in tran_idx])
f_trans.write(chars)
f_trans.write("\n")
return total_loss[0] / len(data_source), all_accuracies / bcnt
# overloading method to take input of refernece translations to calc bleu score
def evaluate_autoencoder(whichdecoder, data_source, references, epoch):
# Turn on evaluation mode which disables dropout.
autoencoder.eval()
total_loss = 0
ntokens = len(corpus.dictionary.word2idx)
all_accuracies = 0
bcnt = 0
for i, batch in enumerate(data_source):
source, target, lengths = batch
source = to_gpu(args.cuda, Variable(source, volatile=True))
target = to_gpu(args.cuda, Variable(target, volatile=True))
mask = target.gt(0)
masked_target = target.masked_select(mask)
# examples x ntokens
output_mask = mask.unsqueeze(1).expand(mask.size(0), ntokens)
hidden = autoencoder(0, source, lengths, noise=False, encode_only=True)
# output: batch x seq_len x ntokens
if whichdecoder == 1:
output = autoencoder(1, source, lengths, noise=False)
flattened_output = output.view(-1, ntokens)
masked_output = flattened_output.masked_select(output_mask).view(-1, ntokens)
# accuracy
max_vals1, max_indices1 = torch.max(masked_output, 1)
all_accuracies += torch.mean(max_indices1.eq(masked_target).float()).data[0]
max_values1, max_indices1 = torch.max(output, 2)
max_indices2 = autoencoder.generate(2, hidden, maxlen=50)
else:
output = autoencoder(2, source, lengths, noise=False)
flattened_output = output.view(-1, ntokens)
masked_output = flattened_output.masked_select(output_mask).view(-1, ntokens)
# accuracy
max_vals2, max_indices2 = torch.max(masked_output, 1)
all_accuracies += torch.mean(max_indices2.eq(masked_target).float()).data[0]
max_values2, max_indices2 = torch.max(output, 2)
max_indices1 = autoencoder.generate(1, hidden, maxlen=50)
total_loss += criterion_ce(masked_output /
args.temp, masked_target).data
bcnt += 1
aeoutf_from = "{}/{}_output_decoder_{}_from.txt".format(
args.outf, epoch, whichdecoder)
aeoutf_tran = "{}/{}_output_decoder_{}_tran.txt".format(
args.outf, epoch, whichdecoder)
aeoutf_bleu = "{}/{}_output_decoder_{}_bleu.txt".format(
args.outf, epoch, whichdecoder)
candidate = []
counter = 0
with open(aeoutf_from, 'w') as f_from, open(aeoutf_tran, 'w') as f_trans, open(aeoutf_bleu, 'w') as f_bleu:
max_indices1 = max_indices1.view(output.size(0), -1).data.cpu().numpy()
max_indices2 = max_indices2.view(output.size(0), -1).data.cpu().numpy()
target = target.view(output.size(0), -1).data.cpu().numpy()
tran_indices = max_indices2 if whichdecoder == 1 else max_indices1
for t, tran_idx in zip(target, tran_indices):
# real sentence
chars = " ".join([corpus.dictionary.idx2word[x] for x in t])
f_from.write(chars)
f_from.write("\n")
# transfer sentence
chars = " ".join([corpus.dictionary.idx2word[x]
for x in tran_idx])
candidate = ", ".join([corpus.dictionary.idx2word[x]
for x in tran_idx])
f_trans.write(chars)
f_trans.write("\n")
if counter < len(references):
BLEU_score = sentence_bleu(references[counter], candidate)
f_bleu.write(BLEU_score)
f_bleu.write("\n")
counter = counter + 1
return total_loss[0] / len(data_source), all_accuracies / bcnt
def evaluate_generator(whichdecoder, noise, epoch):
gan_gen.eval()
autoencoder.eval()
# generate from fixed random noise
fake_hidden = gan_gen(noise)
max_indices = autoencoder.generate(whichdecoder, fake_hidden,
maxlen=50, sample=args.sample)
with open("%s/%s_generated%d.txt" % (args.outf, epoch, whichdecoder), "w") as f:
max_indices = max_indices.data.cpu().numpy()
for idx in max_indices:
# generated sentence
words = [corpus.dictionary.idx2word[x] for x in idx]
# truncate sentences to first occurrence of <eos>
truncated_sent = []
for w in words:
if w != '<eos>':
truncated_sent.append(w)
else:
break
chars = " ".join(truncated_sent)
f.write(chars)
#print("word length is:", len(words))
#print('generated output:\n', chars)
f.write("\n")
def train_ae(whichdecoder, batch, total_loss_ae, start_time, i):
autoencoder.train()
optimizer_ae.zero_grad()
source, target, lengths = batch
source = to_gpu(args.cuda, Variable(source))
target = to_gpu(args.cuda, Variable(target))
mask = target.gt(0)
masked_target = target.masked_select(mask)
output_mask = mask.unsqueeze(1).expand(mask.size(0), ntokens)
output = autoencoder(whichdecoder, source, lengths, noise=True)
flat_output = output.view(-1, ntokens)
masked_output = flat_output.masked_select(output_mask).view(-1, ntokens)
loss = criterion_ce(masked_output / args.temp, masked_target)
loss.backward()
# `clip_grad_norm` to prevent exploding gradient in RNNs / LSTMs
#torch.nn.utils.clip_grad_norm_(autoencoder.parameters(), args.clip)
torch.nn.utils.clip_grad_norm(autoencoder.parameters(), args.clip)
optimizer_ae.step()
total_loss_ae += loss.data
accuracy = None
if i % args.log_interval == 0 and i > 0:
probs = F.softmax(masked_output, dim=-1)
max_vals, max_indices = torch.max(probs, 1)
accuracy = torch.mean(max_indices.eq(masked_target).float()).data[0]
cur_loss = total_loss_ae[0] / args.log_interval
elapsed = time.time() - start_time
print('| epoch {:3d} | {:5d}/{:5d} batches | ms/batch {:5.2f} | '
'loss {:5.2f} | ppl {:8.2f} | acc {:8.2f}'
.format(epoch, i, len(train1_data),
elapsed * 1000 / args.log_interval,
cur_loss, math.exp(cur_loss), accuracy))
with open("{}/log.txt".format(args.outf), 'a') as f:
f.write('| epoch {:3d} | {:5d}/{:5d} batches | ms/batch {:5.2f} | '
'loss {:5.2f} | ppl {:8.2f} | acc {:8.2f}\n'.
format(epoch, i, len(train1_data),
elapsed * 1000 / args.log_interval,
cur_loss, math.exp(cur_loss), accuracy))
total_loss_ae = 0
start_time = time.time()
return total_loss_ae, start_time
def train_gan_g():
gan_gen.train()
gan_gen.zero_grad()
noise = to_gpu(args.cuda,Variable(torch.ones(args.batch_size, args.z_size)))
noise.data.normal_(0, 1)
fake_hidden = gan_gen(noise)
errG = gan_disc(fake_hidden)
errG.backward(one)
optimizer_gan_g.step()
return errG
def grad_hook(grad):
return grad * args.grad_lambda
''' Steal from https://github.com/caogang/wgan-gp/blob/master/gan_cifar10.py '''
def calc_gradient_penalty(netD, real_data, fake_data):
bsz = real_data.size(0)
alpha = torch.rand(bsz, 1)
alpha = alpha.expand(bsz, real_data.size(1)) # only works for 2D XXX
alpha = alpha.cuda()
interpolates = alpha * real_data + ((1 - alpha) * fake_data)
interpolates = Variable(interpolates, requires_grad=True)
disc_interpolates = netD(interpolates)
gradients = torch.autograd.grad(outputs=disc_interpolates, inputs=interpolates, grad_outputs=torch.ones( disc_interpolates.size()).cuda(), create_graph=True, retain_graph=True, only_inputs=True)[0]
gradients = gradients.view(gradients.size(0), -1)
gradient_penalty = ((gradients.norm(2, dim=1) - 1)
** 2).mean() * args.gan_gp_lambda
return gradient_penalty
def train_gan_d(whichdecoder, batch):
gan_disc.train()
optimizer_gan_d.zero_grad()
# positive samples ----------------------------
# generate real codes
source, target, lengths = batch
source = to_gpu(args.cuda, Variable(source))
target = to_gpu(args.cuda, Variable(target))
# batch_size x nhidden
real_hidden = autoencoder(whichdecoder, source,lengths, noise=False, encode_only=True)
# loss / backprop
errD_real = gan_disc(real_hidden)
errD_real.backward(one)
# negative samples ----------------------------
# generate fake codes
noise = to_gpu(args.cuda,
Variable(torch.ones(args.batch_size, args.z_size)))
noise.data.normal_(0, 1)
# loss / backprop
fake_hidden = gan_gen(noise)
errD_fake = gan_disc(fake_hidden.detach())
errD_fake.backward(mone)
# gradient penalty
gradient_penalty = calc_gradient_penalty(gan_disc, real_hidden.data, fake_hidden.data)
gradient_penalty.backward()
optimizer_gan_d.step()
errD = -(errD_real.unsqueeze(0) - errD_fake.unsqueeze(0))
return errD, errD_real, errD_fake
def train_gan_d_into_ae(whichdecoder, batch):
autoencoder.train()
optimizer_ae.zero_grad()
source, target, lengths = batch
source = to_gpu(args.cuda, Variable(source))
target = to_gpu(args.cuda, Variable(target))
real_hidden = autoencoder(whichdecoder, source, lengths, noise=False, encode_only=True)
real_hidden.register_hook(grad_hook)
errD_real = gan_disc(real_hidden)
errD_real.backward(mone)
#torch.nn.utils.clip_grad_norm_(autoencoder.parameters(), args.clip)
torch.nn.utils.clip_grad_norm(autoencoder.parameters(), args.clip)
optimizer_ae.step()
return errD_real
# In[6]:
print("Training...")
with open("{}/log.txt".format(args.outf), 'a') as f:
f.write('Training...\n')
# schedule of increasing GAN training loops
if args.niters_gan_schedule != "":
gan_schedule = [int(x) for x in args.niters_gan_schedule.split("-")]
else:
gan_schedule = []
niter_gan = 25
fixed_noise = to_gpu(args.cuda,Variable(torch.ones(args.batch_size, args.z_size)))
fixed_noise.data.normal_(0, 1)
one = to_gpu(args.cuda, torch.FloatTensor([1]))
mone = one * -1
#one = to_gpu(args.cuda, torch.FloatTensor([1]))
#mone = Variable(torch.tensor(-1.0).cuda())#one * -1
#one = Variable(one, requires_grad=True).cuda()#torch.tensor(1.0, dtype=torch.float64,device=torch.device('cuda:0'))
#mone = #Variable(mone, requires_grad=True).cuda()
#mone = torch.tensor(-1.0, dtype=torch.float64,device=torch.device('cuda:0'))
for epoch in range(1, args.epochs + 1):
# update gan training schedule
if epoch in gan_schedule:
niter_gan += 1
print("GAN training loop schedule increased to {}".format(niter_gan))
with open("{}/log.txt".format(args.outf), 'a') as f:
f.write("GAN training loop schedule increased to {}\n".
format(niter_gan))
total_loss_ae1 = 0
total_loss_ae2 = 0
classify_loss = 0
epoch_start_time = time.time()
start_time = time.time()
niter = 0
niter_global = 1
# loop through all batches in training data
while niter < len(train1_data) and niter < len(train2_data):
# train autoencoder ----------------------------
for i in range(args.niters_ae):
if niter == len(train1_data):
break # end of epoch
total_loss_ae1, start_time = train_ae(1, train1_data[niter], total_loss_ae1, start_time, niter)
total_loss_ae2, _ = train_ae(2, train2_data[niter], total_loss_ae2, start_time, niter)
# train classifier ----------------------------
classify_loss1, classify_acc1 = train_classifier(
1, train1_data[niter])
classify_loss2, classify_acc2 = train_classifier(
2, train2_data[niter])
classify_loss = (classify_loss1 + classify_loss2) / 2
classify_acc = (classify_acc1 + classify_acc2) / 2
# reverse to autoencoder
classifier_regularize(1, train1_data[niter])
classifier_regularize(2, train2_data[niter])
niter += 1
# train gan ----------------------------------
for k in range(niter_gan):
# train discriminator/critic
for i in range(args.niters_gan_d):
# feed a seen sample within this epoch; good for early training
if i % 2 == 0:
batch = train1_data[
random.randint(0, len(train1_data) - 1)]
whichdecoder = 1
else:
batch = train2_data[
random.randint(0, len(train2_data) - 1)]
whichdecoder = 2
errD, errD_real, errD_fake = train_gan_d(whichdecoder, batch)
# train generator
for i in range(args.niters_gan_g):
errG = train_gan_g()
# train autoencoder from d
for i in range(args.niters_gan_ae):
if i % 2 == 0:
batch = train1_data[
random.randint(0, len(train1_data) - 1)]
whichdecoder = 1
else:
batch = train2_data[
random.randint(0, len(train2_data) - 1)]
whichdecoder = 2
errD_= train_gan_d_into_ae(whichdecoder, batch)
niter_global += 1
if niter_global % 100 >= 0:
print('[%d/%d][%d/%d] Loss_D: %.4f (Loss_D_real: %.4f '
'Loss_D_fake: %.4f) Loss_G: %.4f'
% (epoch, args.epochs, niter, len(train1_data),
errD.data[0], errD_real.data[0],
errD_fake.data[0], errG.data[0]))
print("Classify loss: {:5.2f} | Classify accuracy: {:3.3f}\n".format(
classify_loss, classify_acc))
with open("{}/log.txt".format(args.outf), 'a') as f:
f.write('[%d/%d][%d/%d] Loss_D: %.4f (Loss_D_real: %.4f '
'Loss_D_fake: %.4f) Loss_G: %.4f\n'
% (epoch, args.epochs, niter, len(train1_data),
errD.data[0], errD_real.data[0],
errD_fake.data[0], errG.data[0]))
f.write("Classify loss: {:5.2f} | Classify accuracy: {:3.3f}\n".format(
classify_loss, classify_acc))
# exponentially decaying noise on autoencoder
autoencoder.noise_r = autoencoder.noise_r * args.noise_anneal
# end of epoch ----------------------------
# evaluation
test_loss, accuracy = evaluate_autoencoder(1, test1_data[:1000], valid1_refs, epoch)
print('-' * 89)
print('| end of epoch {:3d} | time: {:5.2f}s | test loss {:5.2f} | '
'test ppl {:5.2f} | acc {:3.3f}'.
format(epoch, (time.time() - epoch_start_time),
test_loss, math.exp(test_loss), accuracy))
print('-' * 89)
with open("{}/log.txt".format(args.outf), 'a') as f:
f.write('-' * 89)
f.write('\n| end of epoch {:3d} | time: {:5.2f}s | test loss {:5.2f} |'
' test ppl {:5.2f} | acc {:3.3f}\n'.
format(epoch, (time.time() - epoch_start_time),
test_loss, math.exp(test_loss), accuracy))
f.write('-' * 89)
f.write('\n')
test_loss, accuracy = evaluate_autoencoder(2, test2_data[:1000], valid2_refs, epoch)
print('-' * 89)
print('| end of epoch {:3d} | time: {:5.2f}s | test loss {:5.2f} | '
'test ppl {:5.2f} | acc {:3.3f}'.
format(epoch, (time.time() - epoch_start_time),
test_loss, math.exp(test_loss), accuracy))
print('-' * 89)
with open("{}/log.txt".format(args.outf), 'a') as f:
f.write('-' * 89)
f.write('\n| end of epoch {:3d} | time: {:5.2f}s | test loss {:5.2f} |'
' test ppl {:5.2f} | acc {:3.3f}\n'.
format(epoch, (time.time() - epoch_start_time),
test_loss, math.exp(test_loss), accuracy))
f.write('-' * 89)
f.write('\n')
evaluate_generator(1, fixed_noise, "end_of_epoch_{}".format(epoch))
evaluate_generator(2, fixed_noise, "end_of_epoch_{}".format(epoch))
# shuffle between epochs
train1_data = batchify(
corpus.data['train1'], args.batch_size, shuffle=True)
train2_data = batchify(
corpus.data['train2'], args.batch_size, shuffle=True)
# test_loss, accuracy = evaluate_autoencoder(1, test1_data, epoch + 1)
# adding references as inputs to calculate bleu score
test_loss, accuracy = evaluate_autoencoder(1, test1_data, valid1_refs, epoch + 1)
print('-' * 89)
print('| end of epoch {:3d} | time: {:5.2f}s | test loss {:5.2f} | '
'test ppl {:5.2f} | acc {:3.3f}'.
format(epoch, (time.time() - epoch_start_time),
test_loss, math.exp(test_loss), accuracy))
print('-' * 89)
with open("{}/log.txt".format(args.outf), 'a') as f:
f.write('-' * 89)
f.write('\n| end of epoch {:3d} | time: {:5.2f}s | test loss {:5.2f} |'
' test ppl {:5.2f} | acc {:3.3f}\n'.
format(epoch, (time.time() - epoch_start_time),
test_loss, math.exp(test_loss), accuracy))
f.write('-' * 89)
f.write('\n')
# test_loss, accuracy = evaluate_autoencoder(2, test2_data, epoch + 1)
# adding references as inputs to calculate bleu score
test_loss, accuracy = evaluate_autoencoder(2, test2_data, valid2_refs, epoch + 1)
print('-' * 89)
print('| end of epoch {:3d} | time: {:5.2f}s | test loss {:5.2f} | '
'test ppl {:5.2f} | acc {:3.3f}'.
format(epoch, (time.time() - epoch_start_time),
test_loss, math.exp(test_loss), accuracy))
print('-' * 89)
with open("{}/log.txt".format(args.outf), 'a') as f:
f.write('-' * 89)
f.write('\n| end of epoch {:3d} | time: {:5.2f}s | test loss {:5.2f} |'
' test ppl {:5.2f} | acc {:3.3f}\n'.
format(epoch, (time.time() - epoch_start_time),
test_loss, math.exp(test_loss), accuracy))
f.write('-' * 89)
f.write('\n')
# In[ ]:
# In[ ]:
| [
"alice@Alices-MBP-8.T-mobile.com"
] | alice@Alices-MBP-8.T-mobile.com |
c2ab7ebb09d2e187c4dbf7afea60cfab0e18c38b | 72eb6f8dcfe34996e9c16769fd272d0d4383743f | /OS/MP-3/test.py | 63a530e8860c13151c7675b4a9c1d76e81a69305 | [] | no_license | abhishekkrm/Projects | a11daabc3a051b02f8b899d6058878d08b7613d8 | e7cd5a414ee330ac32671b4eab060949227fe3c7 | refs/heads/master | 2021-03-19T11:16:33.525390 | 2015-02-15T22:15:43 | 2015-02-15T22:15:43 | 30,843,645 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 192 | py | import getopt
try :
#Keep Receiving the message unless self.cr_lf is found
time_val = 0
if time_val <= 0:
raise "timeout"
print ("DD")
except:
print ("HI")
print("TP")
| [
"am2633@cornell.edu"
] | am2633@cornell.edu |
55a78abf836afadcc6c928b21b04a242cf25d686 | 77266fcd99d4b4770a6e22bf669150b1576c4e73 | /Assignment4/venv/Scripts/pip3-script.py | 85df86302a9b7e83b935b7e11b27688aeb77fbbc | [] | no_license | NourAdel/GA | d67ad9b9ed3cd1dcc6cfe00968217f5ed7b61b33 | c407eb51e6c35b9369298bf96b409a9e4aaeb627 | refs/heads/master | 2020-05-18T00:06:59.353710 | 2019-04-29T10:42:14 | 2019-04-29T10:42:14 | 184,050,703 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | #!D:\College\Genetic\Assignment4\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3')()
)
| [
"noura7305@gmail.com"
] | noura7305@gmail.com |
22f6c9a8e5f0d726c42869ef47714dc1722c3f56 | 1d230df0e7b96bdb9d0b56f6f14ac5379915ed4f | /tensorflow/python/keras/metrics.py | b18f12612a849d8d8b7e2465ff8075d35764000e | [
"Apache-2.0"
] | permissive | plddxr/tensorflow | afc5404ca9c089ca84700f9f055ef2bdc6c824f5 | 611edc8c515678c1d1b95ece09e6a374c9790716 | refs/heads/master | 2020-03-25T21:49:46.011109 | 2018-08-09T18:38:42 | 2018-08-09T18:43:28 | 144,191,426 | 1 | 0 | Apache-2.0 | 2018-08-09T18:48:44 | 2018-08-09T18:48:43 | null | UTF-8 | Python | false | false | 21,647 | py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=unused-import
"""Built-in metrics.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from abc import ABCMeta
from abc import abstractmethod
import types
import six
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.keras.losses import binary_crossentropy
from tensorflow.python.keras.losses import categorical_crossentropy
from tensorflow.python.keras.losses import cosine_proximity
from tensorflow.python.keras.losses import hinge
from tensorflow.python.keras.losses import kullback_leibler_divergence
from tensorflow.python.keras.losses import logcosh
from tensorflow.python.keras.losses import mean_absolute_error
from tensorflow.python.keras.losses import mean_absolute_percentage_error
from tensorflow.python.keras.losses import mean_squared_error
from tensorflow.python.keras.losses import mean_squared_logarithmic_error
from tensorflow.python.keras.losses import poisson
from tensorflow.python.keras.losses import sparse_categorical_crossentropy
from tensorflow.python.keras.losses import squared_hinge
from tensorflow.python.keras.utils.generic_utils import deserialize_keras_object
from tensorflow.python.keras.utils.generic_utils import serialize_keras_object
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import confusion_matrix
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import weights_broadcast_ops
from tensorflow.python.training import distribute as distribute_lib
from tensorflow.python.util import tf_decorator
from tensorflow.python.util.tf_export import tf_export
def check_is_tensor_or_operation(x, name):
"""Raises type error if the given input is not a tensor or operation."""
if not (isinstance(x, ops.Tensor) or isinstance(x, ops.Operation)):
raise TypeError('{0} must be a Tensor or Operation, given: {1}'.format(
name, x))
def update_state_wrapper(update_state_fn):
"""Decorator to wrap metric `update_state()` with `defun()`, `add_update()`.
Args:
update_state_fn: function that accumulates metric statistics.
Returns:
If eager execution is enabled, returns None.
If graph execution is enabled, returns an update op. This op should be
executed to update the metric state with the given inputs.
"""
def decorated(metric_obj, *args, **kwargs):
"""Decorated function with `defun()` and `add_update()`."""
# Converting update_state_fn() into a graph function, so that
# we can return a single op that performs all of the variable updates.
# Assigning to a different method name to avoid reference cycle.
defuned_update_state_fn = function.defun(update_state_fn)
update_op = defuned_update_state_fn(*args, **kwargs)
if update_op is not None: # update_op will be None in eager execution.
metric_obj.add_update(update_op, inputs=True)
check_is_tensor_or_operation(
update_op, 'Metric {0}\'s update'.format(metric_obj.name))
return update_op
return tf_decorator.make_decorator(update_state_fn, decorated)
def result_wrapper(result_fn):
"""Decorator to wrap metric `result()` function in `merge_call()`.
Result computation is an idempotent operation that simply calculates the
metric value using the state variables.
If metric state variables are distributed across towers/devices and
`result()` is requested from the context of one device - This function wraps
`result()` in a distribution strategy `merge_call()`. With this,
the metric state variables will be aggregated across devices.
Args:
result_fn: function that computes the metric result.
Returns:
The metric result tensor.
"""
def decorated(metric_obj, *args):
"""Decorated function with merge_call."""
tower_context = distribute_lib.get_tower_context()
if tower_context is None: # if in cross tower context already
result_t = result_fn(*args)
else:
# TODO(psv): Test distribution of metrics using different distribution
# strategies.
# Creating a wrapper for merge_fn. merge_call invokes the given merge_fn
# with distribution object as the first parameter. We create a wrapper
# here so that the result function need not have that parameter.
def merge_fn_wrapper(distribution, merge_fn, *args):
# We will get `PerDevice` merge function. Taking the first one as all
# are identical copies of the function that we had passed below.
return distribution.unwrap(merge_fn)[0](*args)
# Wrapping result in merge_call. merge_call is used when we want to leave
# tower mode and compute a value in cross tower mode.
result_t = tower_context.merge_call(merge_fn_wrapper, result_fn, *args)
check_is_tensor_or_operation(result_t,
'Metric {0}\'s result'.format(metric_obj.name))
return result_t
return tf_decorator.make_decorator(result_fn, decorated)
def safe_div(numerator, denominator):
"""Divides two tensors element-wise, returning 0 if the denominator is <= 0.
Args:
numerator: A `Tensor`.
denominator: A `Tensor`, with dtype matching `numerator`.
Returns:
0 if `denominator` <= 0, else `numerator` / `denominator`
"""
t = math_ops.truediv(numerator, denominator)
zero = array_ops.zeros_like(t, dtype=denominator.dtype)
condition = math_ops.greater(denominator, zero)
zero = math_ops.cast(zero, t.dtype)
return array_ops.where(condition, t, zero)
def squeeze_or_expand_dimensions(y_pred, y_true, sample_weight):
"""Squeeze or expand last dimension if needed.
1. Squeezes last dim of `y_pred` or `y_true` if their rank differs by 1
(using `confusion_matrix.remove_squeezable_dimensions`).
2. Squeezes or expands last dim of `sample_weight` if its rank differs by 1
from the new rank of `y_pred`.
If `sample_weight` is scalar, it is kept scalar.
This will use static shape if available. Otherwise, it will add graph
operations, which could result in a performance hit.
Args:
y_pred: Predicted values, a `Tensor` of arbitrary dimensions.
y_true: Optional label `Tensor` whose dimensions match `y_pred`.
sample_weight: Optional weight scalar or `Tensor` whose dimensions match
`y_pred`.
Returns:
Tuple of `y_pred`, `y_true` and `sample_weight`. Each of them possibly has
the last dimension squeezed,
`sample_weight` could be extended by one dimension.
"""
if y_true is not None:
# squeeze last dim of `y_pred` or `y_true` if their rank differs by 1
y_true, y_pred = confusion_matrix.remove_squeezable_dimensions(
y_true, y_pred)
y_pred.get_shape().assert_is_compatible_with(y_true.get_shape())
if sample_weight is None:
return y_pred, y_true, None
sample_weight = ops.convert_to_tensor(sample_weight)
weights_shape = sample_weight.get_shape()
weights_rank = weights_shape.ndims
if weights_rank == 0: # If weights is scalar, do nothing.
return y_pred, y_true, sample_weight
y_pred_shape = y_pred.get_shape()
y_pred_rank = y_pred_shape.ndims
if (y_pred_rank is not None) and (weights_rank is not None):
# Use static rank.
if weights_rank - y_pred_rank == 1:
sample_weight = array_ops.squeeze(sample_weight, [-1])
elif y_pred_rank - weights_rank == 1:
sample_weight = array_ops.expand_dims(sample_weight, [-1])
return y_pred, y_true, sample_weight
# Use dynamic rank.
weights_rank_tensor = array_ops.rank(sample_weight)
rank_diff = weights_rank_tensor - array_ops.rank(y_pred)
maybe_squeeze_weights = lambda: array_ops.squeeze(sample_weight, [-1])
def _maybe_expand_weights():
return control_flow_ops.cond(
math_ops.equal(rank_diff,
-1), lambda: array_ops.expand_dims(sample_weight, [-1]),
lambda: sample_weight)
def _maybe_adjust_weights():
return control_flow_ops.cond(
math_ops.equal(rank_diff, 1), maybe_squeeze_weights,
_maybe_expand_weights)
# squeeze or expand last dim of `sample_weight` if its rank differs by 1
# from the new rank of `y_pred`.
sample_weight = control_flow_ops.cond(
math_ops.equal(weights_rank_tensor, 0), lambda: sample_weight,
_maybe_adjust_weights)
return y_pred, y_true, sample_weight
class Metric(Layer):
"""Encapsulates metric logic and state.
Usage with eager execution:
```python
m = SomeMetric(...)
for input in ...:
m.update_state(input)
print('Final result: ', m.result().numpy())
```
Usage with graph execution:
```python
m = SomeMetric(...)
init_op = tf.global_variables_initializer() # Initialize variables
with tf.Session() as sess:
sess.run(init_op)
for input in ...:
update_op = m.update_state(input)
sess.run(update_op)
print('Final result: ', sess.run(m.result()))
```
To be implemented by subclasses:
* `__init__()`: All state variables should be created in this method by
calling `self.add_weight()` like: `self.var = self.add_weight(...)`
* `update_state()`: Has all updates to the state variables like:
self.var.assign_add(...).
* `result()`: Computes and returns a value for the metric
from the state variables.
Example subclass implementation:
```
class BinaryTruePositives(Metric):
def __init__(self, name='binary-true-positives', dtype=None):
super(BinaryTruePositives, self).__init__(name=name, dtype=dtype)
self.true_positives = self.add_weight(
'true_positives', initializer=init_ops.zeros_initializer)
def update_state(self, y_true, y_pred, sample_weight=None):
y_true = math_ops.cast(y_true, dtypes.bool)
y_pred = math_ops.cast(y_pred, dtypes.bool)
y_pred, y_true, sample_weight = squeeze_or_expand_dimensions(
y_pred, y_true, sample_weight)
values = math_ops.logical_and(
math_ops.equal(y_true, True), math_ops.equal(y_pred, True))
values = math_ops.cast(values, self._dtype)
if sample_weight is not None:
sample_weight = math_ops.cast(sample_weight, self._dtype)
values = math_ops.multiply(values, sample_weight)
state_ops.assign_add(self.true_positives, math_ops.reduce_sum(values))
def result(self):
return array_ops.identity(self.true_positives)
```
"""
__metaclass__ = ABCMeta
def __init__(self, name=None, dtype=None):
super(Metric, self).__init__(name=name, dtype=dtype)
self.stateful = True # All metric layers are stateful.
self.built = True
self._dtype = K.floatx() if dtype is None else dtypes.as_dtype(dtype).name
def __new__(cls, *args, **kwargs):
obj = super(Metric, cls).__new__(cls, *args, **kwargs)
obj.update_state = types.MethodType(
update_state_wrapper(obj.update_state), obj)
obj.result = types.MethodType(result_wrapper(obj.result), obj)
return obj
def __call__(self, *args, **kwargs):
"""Accumulates statistics and then computes metric result value.
Args:
*args:
**kwargs: A mini-batch of inputs to the Metric,
passed on to `update_state()`.
Returns:
The metric value tensor.
"""
update_op = self.update_state(*args, **kwargs) # pylint: disable=not-callable
with ops.control_dependencies([update_op]):
return self.result() # pylint: disable=not-callable
def reset_states(self):
"""Resets all of the metric state variables.
This function is called between epochs/steps,
when a metric is evaluated during training.
"""
for v in self.variables:
K.set_value(v, 0)
@abstractmethod
def update_state(self, *args, **kwargs):
"""Accumulates statistics for the metric.
Note: This function is executed as a graph function in graph mode.
This means:
a) Operations on the same resource are executed in textual order.
This should make it easier to do things like add the updated
value of a variable to another, for example.
b) You don't need to worry about collecting the update ops to execute.
All update ops added to the graph by this function will be executed.
As a result, code should generally work the same way with graph or
eager execution.
and adds the update op to the metric layer.
Args:
*args:
**kwargs: A mini-batch of inputs to the Metric.
"""
NotImplementedError('Must be implemented in subclasses.')
@abstractmethod
def result(self):
"""Computes and returns the metric value tensor.
Result computation is an idempotent operation that simply calculates the
metric value using the state variables.
"""
NotImplementedError('Must be implemented in subclasses.')
### For use by subclasses ###
def add_weight(self,
name,
shape=(),
aggregation=vs.VariableAggregation.SUM,
synchronization=vs.VariableSynchronization.ON_READ,
initializer=None):
"""Adds state variable. Only for use by subclasses."""
return super(Metric, self).add_weight(
name=name,
shape=shape,
dtype=self._dtype,
trainable=False,
initializer=initializer,
synchronization=synchronization,
aggregation=aggregation)
### End: For use by subclasses ###
class Mean(Metric):
"""Computes the (weighted) mean of the given values.
This metric creates two variables, `total` and `count` that are used to
compute the average of `values`. This average is ultimately returned as `mean`
which is an idempotent operation that simply divides `total` by `count`.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
"""
def __init__(self, name='mean', dtype=None):
"""Creates a `Mean` instance.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
"""
super(Mean, self).__init__(name=name, dtype=dtype)
# Create new state variables
self.total = self.add_weight(
'total', initializer=init_ops.zeros_initializer)
self.count = self.add_weight(
'count', initializer=init_ops.zeros_initializer)
def update_state(self, values, sample_weight=None):
"""Accumulates statistics for computing the mean.
For example, if `values` is [1, 3, 5, 7] then the mean is 4. If
the `sample_weight` is specified as [1, 1, 0, 0] then the mean would be 2.
Args:
values: Per-example value.
sample_weight: Optional weighting of each example. Defaults to 1.
"""
values = math_ops.cast(values, self._dtype)
if sample_weight is None:
num_values = math_ops.cast(array_ops.size(values), self._dtype)
else:
sample_weight = math_ops.cast(sample_weight, self._dtype)
# Update dimensions of weights to match with values if possible.
values, _, sample_weight = squeeze_or_expand_dimensions(
values, None, sample_weight)
try:
# Broadcast weights if possible.
sample_weight = weights_broadcast_ops.broadcast_weights(
sample_weight, values)
except ValueError:
# Reduce values to same ndim as weight array
ndim = K.ndim(values)
weight_ndim = K.ndim(sample_weight)
values = math_ops.reduce_mean(
values, axis=list(range(weight_ndim, ndim)))
num_values = math_ops.reduce_sum(sample_weight)
values = math_ops.multiply(values, sample_weight)
values = math_ops.reduce_sum(values)
# Update state variables
state_ops.assign_add(self.total, values)
state_ops.assign_add(self.count, num_values)
def result(self):
return safe_div(self.total, self.count)
class MeanMetricWrapper(Mean):
"""Wraps a stateless metric function with the Mean metric."""
def __init__(self, fn, name=None, dtype=None, **kwargs):
"""Creates a `MeanMetricWrapper` instance.
Args:
fn: The metric function to wrap, with signature
`fn(y_true, y_pred, **kwargs)`.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
**kwargs: The keyword arguments that are passed on to `fn`.
"""
super(MeanMetricWrapper, self).__init__(name=name, dtype=dtype)
self._fn = fn
self._fn_kwargs = kwargs
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates metric statistics.
`y_true` and `y_pred` should have the same shape.
Args:
y_true: The ground truth values.
y_pred: The predicted values.
sample_weight: Optional weighting of each example. Defaults to 1. Can be
a `Tensor` whose rank is either 0, or the same rank as `y_true`,
and must be broadcastable to `y_true`.
"""
y_true = math_ops.cast(y_true, self._dtype)
y_pred = math_ops.cast(y_pred, self._dtype)
y_pred, y_true, sample_weight = squeeze_or_expand_dimensions(
y_pred, y_true, sample_weight)
matches = self._fn(y_true, y_pred, **self._fn_kwargs)
super(MeanMetricWrapper, self).update_state(
matches, sample_weight=sample_weight)
def get_config(self):
config = self._fn_kwargs
base_config = super(MeanMetricWrapper, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class BinaryAccuracy(MeanMetricWrapper):
"""Calculates how often predictions matches labels.
This metric creates two local variables, `total` and `count` that are used to
compute the frequency with which `y_pred` matches `y_true`. This frequency is
ultimately returned as `binary accuracy`: an idempotent operation that simply
divides `total` by `count`.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
"""
def __init__(self, name='binary-accuracy', dtype=None, threshold=0.5):
"""Creates a `BinaryAccuracy` instance.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
threshold: (Optional) Float representing the threshold for deciding
whether prediction values are 1 or 0.
"""
super(BinaryAccuracy, self).__init__(
binary_accuracy, name, dtype=dtype, threshold=threshold)
@tf_export('keras.metrics.binary_accuracy')
def binary_accuracy(y_true, y_pred, threshold=0.5):
threshold = math_ops.cast(threshold, y_pred.dtype)
y_pred = math_ops.cast(y_pred > threshold, y_pred.dtype)
return K.mean(math_ops.equal(y_true, y_pred), axis=-1)
@tf_export('keras.metrics.categorical_accuracy')
def categorical_accuracy(y_true, y_pred):
return math_ops.cast(
math_ops.equal(
math_ops.argmax(y_true, axis=-1), math_ops.argmax(y_pred, axis=-1)),
K.floatx())
def sparse_categorical_accuracy(y_true, y_pred):
return math_ops.cast(
math_ops.equal(
math_ops.reduce_max(y_true, axis=-1),
math_ops.cast(math_ops.argmax(y_pred, axis=-1), K.floatx())),
K.floatx())
@tf_export('keras.metrics.top_k_categorical_accuracy')
def top_k_categorical_accuracy(y_true, y_pred, k=5):
return K.mean(
nn.in_top_k(y_pred, math_ops.argmax(y_true, axis=-1), k), axis=-1)
@tf_export('keras.metrics.sparse_top_k_categorical_accuracy')
def sparse_top_k_categorical_accuracy(y_true, y_pred, k=5):
return K.mean(
nn.in_top_k(y_pred,
math_ops.cast(math_ops.reduce_max(y_true, axis=-1), 'int32'),
k),
axis=-1)
# Aliases
mse = MSE = mean_squared_error
mae = MAE = mean_absolute_error
mape = MAPE = mean_absolute_percentage_error
msle = MSLE = mean_squared_logarithmic_error
cosine = cosine_proximity
@tf_export('keras.metrics.serialize')
def serialize(metric):
return serialize_keras_object(metric)
@tf_export('keras.metrics.deserialize')
def deserialize(config, custom_objects=None):
return deserialize_keras_object(
config,
module_objects=globals(),
custom_objects=custom_objects,
printable_module_name='metric function')
@tf_export('keras.metrics.get')
def get(identifier):
if isinstance(identifier, dict):
config = {'class_name': str(identifier), 'config': {}}
return deserialize(config)
elif isinstance(identifier, six.string_types):
return deserialize(str(identifier))
elif callable(identifier):
return identifier
else:
raise ValueError('Could not interpret '
'metric function identifier: %s' % identifier)
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
585841a0ab7fec5d5e554df56e9525b8542746bf | a273c33036b697eaa90b01a22e5f01a31c61fda5 | /exercises/ListChaine.py | 5889a61caab9ac0da539cc442f6346bf568c7634 | [] | no_license | allaok/codestores | 1a55ed8798f6c99476fe24f27fda9a3c3fa03116 | f000bbb2518a8202875cbbcf6cc3a11e57db5792 | refs/heads/master | 2021-01-19T05:44:06.981591 | 2015-07-29T22:56:16 | 2015-07-29T22:56:16 | 39,902,477 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | __author__ = 'PWXG8293'
class Element:
def __init__(self):
self.value = None
self.next = None
class Liste:
def __init__(self):
self.first = None
def append(self, value):
element = Element()
element.value = value
element.next = None
if self.first is None:
self.first = element
else:
ptr = self.first
while ptr.next is not None:
ptr = ptr.next
ptr.next = element | [
"alexis.koalla@orange.com"
] | alexis.koalla@orange.com |
08a41f586570d5ba0baa10410a977b1169ac947f | 4be9a5bdb8e051001b78c8f127ccc1a7f85c14e7 | /mapping/migrations/0033_auto_20170129_0939.py | 90fce4536a94b43eded5f95299f301669aa5c874 | [] | no_license | quentin-david/heimdall | f72a85606e7ab53683df2023ef5eaba762198211 | 84a429ee52e1891bc2ee4eb07a084dff209c789c | refs/heads/master | 2021-01-21T10:26:28.895663 | 2017-07-21T19:19:46 | 2017-07-21T19:19:46 | 83,432,596 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 503 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-29 09:39
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mapping', '0032_servicewebserver_reverse_proxy'),
]
operations = [
migrations.AlterField(
model_name='servicereverseproxy',
name='servername',
field=models.CharField(blank=True, max_length=30, null=True),
),
]
| [
"david@hemdgsa01.local.lan"
] | david@hemdgsa01.local.lan |
ebf043a81c81d7202f4783736d677c16d360a834 | 828695b32588933b87b8a58c9f68a3d1ce23db17 | /jobs/migrations/0001_initial.py | 4b98fd0982e563eb9eed2e0d1246a174992521c9 | [] | no_license | jimpfred/portfolio | 6a82a632319c0628b54a1b68b13238d10be67458 | 3e62cacd52052844d6a8400fc142ba04d5bb0740 | refs/heads/main | 2023-07-12T08:00:09.698957 | 2021-08-17T21:42:33 | 2021-08-17T21:42:33 | 397,274,756 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 556 | py | # Generated by Django 3.2.4 on 2021-08-16 19:47
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Job',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to='images/')),
('summary', models.CharField(max_length=200)),
],
),
]
| [
"jimpfred@yahoo.com"
] | jimpfred@yahoo.com |
71bac15afe4dcfaa41d8dd9a2894a3b9ff0f5e83 | 5503712ed14239e48b5dc2fb66e38250f1c14320 | /accounts/migrations/0002_auto_20200506_2119.py | 20372336adb7672ab88f189debfe85514eb19dfe | [] | no_license | Aexki/sajas-hotel_management | ee8238e8f3a13a0e061a9951df7244014d948902 | f0ac71378153ce97c1588ecef3857fcde3cd4035 | refs/heads/master | 2022-07-03T03:36:34.818041 | 2020-05-09T15:53:43 | 2020-05-09T15:53:43 | 262,008,143 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 713 | py | # Generated by Django 3.0.5 on 2020-05-06 15:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='cabservice',
name='completed',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='complaintservice',
name='completed',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='roomservice',
name='completed',
field=models.BooleanField(default=False),
),
]
| [
"50047387+Aexki@users.noreply.github.com"
] | 50047387+Aexki@users.noreply.github.com |
bb5ab0eba71a2dc209d64e6c93ae9e1d690a3bab | 7c1abd2ec952d022342098990d4ee2382bd18266 | /tests/test_01.py | 6a34cdac5b44ae9e2caa6699102c5839ed98f80d | [
"MIT"
] | permissive | evestidor/svc-stock-price-simulator | 7679177bc06a6e230478cf383b718afb108120cc | 2ddfb504933959c19f8bd2b7d295b117c10fe27a | refs/heads/master | 2022-05-07T19:48:36.449467 | 2019-07-03T15:27:11 | 2019-07-03T15:27:11 | 192,607,658 | 0 | 0 | MIT | 2022-04-22T21:46:32 | 2019-06-18T20:24:01 | Python | UTF-8 | Python | false | false | 71 | py |
class TestDummy:
def test_assert_true(self):
assert True
| [
"tiagoliradsantos@gmail.com"
] | tiagoliradsantos@gmail.com |
05d19c9a1a2febd779681ba4ce0cd85888d449d4 | 783a97b257ec086d6c7e2109840f1ad91f8e52bf | /scraper/src/config/config_validator.py | f432d0d299c047068b9d3b7fd3fbad2aa75bbfab | [
"MIT"
] | permissive | thefrenchmatt/docs-scraper | 949af243655201c865bc7f64cb906c7b597953a6 | 70665daafaea6dfefc48de3ca107c2a97cc9fc0d | refs/heads/master | 2022-07-05T03:34:20.627595 | 2020-05-14T12:05:59 | 2020-05-14T12:05:59 | 267,131,268 | 1 | 0 | NOASSERTION | 2020-05-26T19:17:39 | 2020-05-26T19:17:38 | null | UTF-8 | Python | false | false | 2,824 | py |
class ConfigValidator:
config = None
def __init__(self, config):
self.config = config
def validate(self):
"""Check for all needed parameters in config"""
if not self.config.index_uid:
raise ValueError('index_uid is not defined')
# Start_urls is mandatory
if not self.config.start_urls and not self.config.sitemap_urls:
raise ValueError('start_urls is not defined, nor sitemap urls')
# Start urls must be an array
if self.config.start_urls and not isinstance(self.config.start_urls,
list):
raise Exception('start_urls should be list')
# Stop urls must be an array
if self.config.stop_urls and not isinstance(self.config.stop_urls,
list):
raise Exception('stop_urls should be list')
if self.config.js_render and not isinstance(self.config.js_render,
bool):
raise Exception('js_render should be boolean')
# `js_wait` is set to 0s by default unless it is specified
if self.config.js_wait and not isinstance(self.config.js_wait, int):
raise Exception('js_wait should be integer')
if self.config.use_anchors and not isinstance(self.config.use_anchors,
bool):
raise Exception('use_anchors should be boolean')
if self.config.sitemap_alternate_links and not isinstance(
self.config.sitemap_alternate_links, bool):
raise Exception('sitemap_alternate_links should be boolean')
if self.config.sitemap_urls_regexs and not self.config.sitemap_urls:
raise Exception(
'You gave an regex to parse sitemap but you didn\'t provide a sitemap url')
if self.config.sitemap_urls_regexs and not self.config.sitemap_urls:
for regex in self.config.sitemap_urls_regex:
if not isinstance(regex, str):
raise Exception(
'You gave an bad regex: ' + regex + ' must be a string')
if self.config.force_sitemap_urls_crawling and not self.config.sitemap_urls:
raise Exception(
'You want to force the sitemap crawling but you didn\'t provide a sitemap url')
if not self.config.scrape_start_urls and not self.config.scrap_start_urls:
raise Exception(
'Please use only the new variable name: scrape_start_urls')
if self.config.nb_hits_max and not isinstance(self.config.nb_hits_max,
int):
raise Exception('nb_hits_max should be integer')
| [
"clementine@meilisearch.com"
] | clementine@meilisearch.com |
da159e2f2ebf6d02cf5df84e0e70edd3ae7af159 | 08289088124d18029d0ad4388f49ac9c206738e2 | /etl_prefect_core.py | e4ebf3c138cb20b5c050e3ef489bbef516571689 | [] | no_license | AntonioNtV/prefect-pydata-denver-tutorial | e54660172ef484bf9d0610b84b0b9f47d7b2805c | cb6bfb1a236b85a26efdd8b144027351fbe49aa6 | refs/heads/master | 2023-08-10T21:45:38.654271 | 2021-10-07T19:04:31 | 2021-10-07T19:04:31 | 414,699,904 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,919 | py | import requests
import json
import sqlite3
import pathlib
import prefect
from collections import namedtuple
from contextlib import closing
from datetime import timedelta
from prefect import task, Flow
from prefect.tasks.database.sqlite import SQLiteScript
from prefect.schedules import IntervalSchedule
from prefect.engine.results import LocalResult
DATABASE_NAME='cfpbcomplaints.db'
## setup
create_table = SQLiteScript(
db=DATABASE_NAME,
script='CREATE TABLE IF NOT EXISTS complaint (timestamp TEXT, state TEXT, product TEXT, company TEXT, complaint_what_happened TEXT)'
)
def alert_failed(obj, old_state, new_state):
if new_state.is_failed():
logger = prefect.context.get('logger')
logger.info("I actually requested this time!")
## extract
@task(cache_for=timedelta(days=1), state_handlers=[alert_failed], result=LocalResult(dir="{current_path}/results".format(current_path=pathlib.Path(__file__).parent.resolve())))
def get_complaint_data():
r = requests.get("https://www.consumerfinance.gov/data-research/consumer-complaints/search/api/v1/", params={'size':10})
response_json = json.loads(r.text)
logger = prefect.context.get('logger')
logger.info("I actually requested this time!")
return response_json['hits']['hits']
## transform
@task(state_handlers=[alert_failed])
def parse_complaint_data(raw_complaint_data):
complaints = []
Complaint = namedtuple('Complaint', ['data_received', 'state', 'product', 'company', 'complaint_what_happened'])
for row in raw_complaint_data:
source = row.get('_source')
this_complaint = Complaint(
data_received=source.get('date_recieved'),
state=source.get('state'),
product=source.get('product'),
company=source.get('company'),
complaint_what_happened=source.get('complaint_what_happened')
)
complaints.append(this_complaint)
return complaints
## load
@task(state_handlers=[alert_failed])
def store_complaints(parsed_complaint_data):
insert_cmd = "INSERT INTO complaint VALUES (?, ?, ?, ?, ?)"
with closing(sqlite3.connect(DATABASE_NAME)) as conn:
with closing(conn.cursor()) as cursor:
cursor.executemany(insert_cmd, parsed_complaint_data)
conn.commit()
def build_flow(schedule=None):
with Flow("etl flow", schedule=schedule, state_handlers=[alert_failed]) as flow:
db_table = create_table()
raw_complaint_data = get_complaint_data()
parsed_complaint_data = parse_complaint_data(raw_complaint_data)
populated_table = store_complaints(parsed_complaint_data)
populated_table.set_upstream(db_table) # db_table need to happen before populated_table
return flow
schedule = IntervalSchedule(interval=timedelta(minutes=1))
etl_flow = build_flow(schedule)
etl_flow.register(project_name='ETL FIRST PROJECT WITH PREFECT')
| [
"antonio.bertino.neto@ccc.ufcg.edu.br"
] | antonio.bertino.neto@ccc.ufcg.edu.br |
9a98b7f2a056ef505360b11f8b6ffb23274a9882 | d6d2773e7466b31da5ed9e7103d5eb9909db1223 | /web/sales_app/apps/stations/migrations/0002_auto_20170717_2015.py | 3bc58a2cad8a9bae6483af3e326e06df869aafeb | [
"MIT"
] | permissive | iabok/sales-tracker | bee3f0a18ca565a8da2ce356c6842f6af486367c | 7ef2e68f0b0393b983375d092b8469ca88f6b5ce | refs/heads/master | 2021-06-22T00:09:16.723616 | 2017-08-21T23:30:57 | 2017-08-21T23:30:57 | 92,953,561 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 783 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-07-17 20:15
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('stations', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='station',
name='created_date',
field=models.DateTimeField(default=datetime.datetime(2017, 7, 17, 20, 15, 30, 246266, tzinfo=utc)),
),
migrations.AlterField(
model_name='station',
name='modified_date',
field=models.DateTimeField(default=datetime.datetime(2017, 7, 17, 20, 15, 30, 246303, tzinfo=utc)),
),
]
| [
"abokisaac@gmail.com"
] | abokisaac@gmail.com |
88bc0b746f0606f86b7e67ef6a1772fa311c5961 | 3b4094f1161502a3d1dbc5712e6405009c3c4b8c | /wsgi/venv/lib/python2.7/site-packages/bokeh/models/map_plots.py | d178a4dc16cabf517ea3bb3b0b28d01d0b822b07 | [] | no_license | chriotte/FinalCourseworkCloudComputingChristopherOttesen | b604337c7e8064ee07e5a45a38e44ae52cb599ae | 08a0271f831e3f14bc836870c8a39b996b6d1d20 | refs/heads/master | 2021-01-18T19:54:02.090555 | 2017-04-01T15:14:03 | 2017-04-01T15:14:03 | 86,919,333 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,730 | py | """ Models for displaying maps in Bokeh plots.
"""
from __future__ import absolute_import
from ..core import validation
from ..core.validation.warnings import MISSING_RENDERERS, NO_DATA_RENDERERS
from ..core.validation.errors import REQUIRED_RANGE, MISSING_GOOGLE_API_KEY
from ..core.has_props import HasProps
from ..core.properties import abstract
from ..core.properties import Enum, Float, Instance, Int, JSON, Override, String
from ..core.enums import MapType
from .plots import Plot
@abstract
class MapOptions(HasProps):
""" Abstract base class for map options' models.
"""
lat = Float(help="""
The latitude where the map should be centered.
""")
lng = Float(help="""
The longitude where the map should be centered.
""")
zoom = Int(12, help="""
The initial zoom level to use when displaying the map.
""")
@abstract
class MapPlot(Plot):
""" Abstract base class for map plot models.
"""
class GMapOptions(MapOptions):
""" Options for GMapPlot objects.
"""
map_type = Enum(MapType, help="""
The `map type`_ to use for the GMapPlot.
.. _map type: https://developers.google.com/maps/documentation/javascript/reference#MapTypeId
""")
styles = JSON(help="""
A JSON array of `map styles`_ to use for the GMapPlot. Many example styles can
`be found here`_.
.. _map styles: https://developers.google.com/maps/documentation/javascript/reference#MapTypeStyle
.. _be found here: https://snazzymaps.com
""")
class GMapPlot(MapPlot):
""" A Bokeh Plot with a `Google Map`_ displayed underneath.
Data placed on this plot should be specified in decimal lat long coordinates e.g. 37.123, -122.404.
It will be automatically converted into the web mercator projection to display properly over
google maps tiles.
.. _Google Map: https://www.google.com/maps/
"""
# TODO (bev) map plot might not have these
@validation.error(REQUIRED_RANGE)
def _check_required_range(self):
pass
@validation.warning(MISSING_RENDERERS)
def _check_missing_renderers(self):
pass
@validation.warning(NO_DATA_RENDERERS)
def _check_no_data_renderers(self):
pass
@validation.error(MISSING_GOOGLE_API_KEY)
def _check_missing_google_api_key(self):
if self.api_key is None:
return str(self)
map_options = Instance(GMapOptions, help="""
Options for displaying the plot.
""")
border_fill_color = Override(default="#ffffff")
api_key = String(help="""
Google Maps API requires an API key. See https://developers.google.com/maps/documentation/javascript/get-api-key
for more information on how to obtain your own.
""")
| [
"christopher.ottesen@yahoo.com"
] | christopher.ottesen@yahoo.com |
01b5228bafb4cd7e36afa383714ca0ce95b4d5dd | 3d19e1a316de4d6d96471c64332fff7acfaf1308 | /Users/A/anlangner/cordis_v3.py | c960031a72d020159d2fc051da824933e00894a7 | [] | no_license | BerilBBJ/scraperwiki-scraper-vault | 4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc | 65ea6a943cc348a9caf3782b900b36446f7e137d | refs/heads/master | 2021-12-02T23:55:58.481210 | 2013-09-30T17:02:59 | 2013-09-30T17:02:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,592 | py | import scraperwiki
import scrapemark
import feedparser
import csv
import re
import urllib2,sys
import requests
import lxml.html
from BeautifulSoup import BeautifulSoup, NavigableString
# extract project page links from the result page "url"
def extract_links(url):
atom_feed = feedparser.parse(url)
link_list = []
for entry in atom_feed.entries:
print entry.title #+ " - " + entry.link
print entry.link
# experiment with data structure
data = {
'TITLE' : entry.title,
'LINK' : entry.link
}
print data
#scraperwiki.sqlite.save(unique_keys=['TITLE'], data=data)
link_list.append(entry.link)
#csvwriter.writerow([entry.title] + [entry.link])
return link_list
# open details page for "object" and parse the results
def parse_object(object):
html = urllib2.urlopen(object).read()
soup = BeautifulSoup(html)
project_id = soup.find('input', attrs={'name':"REF"}).get('value')
print "Project-ID: " + str(project_id)
detail_url = "http://cordis.europa.eu/newsearch/getDoc?doctype=PROJ&xslt-template=projects/xsl/projectdet_en.xslt&rcn=" + str(project_id)
print "***" + detail_url
details = requests.get(detail_url)
detail_page = details.content
content = BeautifulSoup(detail_page, convertEntities="html", smartQuotesTo="html", fromEncoding="utf-8")
# extract content
data_info = content.find(attrs={'class':'projdates'})
data_coordinator = content.find(attrs={'class': 'projcoord'})
data_details = content.find(attrs={'class': 'projdet'})
data_participants = content.find(attrs={'class': 'participants'})
data_footer = content.find(attrs={'id': 'recinfo'})
# data_tech = content.find(attrs={'class': 'tech'})
# trying to find project description: display all content
print ">>> " str(content)
data_info = lxml.html.fromstring(str(data_info))
data_info = data_info.text_content()
data_coordinator = lxml.html.fromstring(str(data_coordinator))
data_coordinator = data_coordinator.text_content()
data_details = lxml.html.fromstring(str(data_details))
data_details = data_details.text_content()
data_participants = lxml.html.fromstring(str(data_participants))
data_participants = data_participants.text_content()
data_footer = lxml.html.fromstring(str(data_footer))
data_footer = data_footer.text_content()
# REGEXP for fields
# Start date in YYYY-MM-DD: (?<=From\s).{1,}(?=\sto)
# End date in YYYY-MM-DD: (?<=to\s).{1,}(?=\s\|)
# Coordinator: (?<=Coordinator\s).{1,}(?=\s\(\+\))
# Coordinator contact: (?<=Administrative contact:\s).{1,}(?!\n)
# Project title in caps: (?<=\|\s).{1,}(?=\swebsite)
# Cost in EUR: (?<=EUR\s)\d{1,2}(\s\d{3}){1,2}
# EU Contribution: (?<=EU contribution: EUR\s)\d{1,2}(\s\d{3}){1,2}(?!Programme)
# Programme acronym: (?<=Programme acronym:\s)(\w{1,}.){2}
# Contract type: (?<=Contract type:\s).{1,}
# Subprogramme type: (?<=Subprogramme area:\s).{1,}(?=Contract)
# Participants: (?<=\n).{1,}?\n.{1,}?(?=\s\n)
# Participant contact: (?<=Administrative contact:\s).{1,}\n.{1,}(?=Email)
# Record number: (?<=Record number:\s)\d{1,}(?=\s\/)
field_regexp = {
'Title' : '(?<=\|\s).{1,}(?=\swebsite)',
'Start date' : '(?<=From\s).{1,}(?=\sto)',
'End date' : '(?<=to\s).{1,}(?=\s\|)',
'Coordinator' : '(?<=Coordinator\n\n).{1,}(?=\n)',
'Coordinator contact' : '(?<=Administrative contact:\s).{1,}\n.{1,}(?!Email)',
'Project cost' : '(?<=EUR\s)\d{1,2}(\s\d{3}){1,2}',
'EU contribution' : '(?<=EU contribution: EUR\s)\d{1,2}(\s\d{3}){1,2}(?!Programme)',
'Programme' : '(?<=Programme acronym:\s\n)(\w{1,}.){2}',
'Subprogramme' : '(?<=Subprogramme area:\s\n).{1,}(?=\n)',
'Contract' : '(?<=Contract type:\s\n).{1,}',
'Participants' : '(?<=\n).{1,}?\n.{1,}?(?=\s\n)',
'Participant contact' : '(?<=Administrative contact:\s).{1,}\n.{1,}(?=Email)',
'Record number' : '(?<=Record number:\s)\d{1,}(?=\s\/)'
}
# WAAAAH, das hier ist unsagbar hässlich!
project_title = re.search(field_regexp['Title'], data_info)
project_title = project_title.group()
project_start = re.search(field_regexp['Start date'], data_info)
project_start = project_start.group()
project_end = re.search(field_regexp['End date'], data_info)
project_end = project_end.group()
project_coordinator = re.search(field_regexp['Coordinator'], data_coordinator)
project_coordinator = project_coordinator.group()
project_coord_con = re.search(field_regexp['Coordinator contact'], data_coordinator)
project_coord_con = project_coord_con.group()
project_cost = re.search(field_regexp['Project cost'], data_details)
project_cost = project_cost.group()
project_cost = project_cost.replace(" ", "")
project_contribution = re.search(field_regexp['EU contribution'], data_details)
project_contribution = project_contribution.group()
project_contribution = project_contribution.replace(" ", "")
project_programme = re.search(field_regexp['Programme'], data_details)
project_programme = project_programme.group()
project_subprogramme = re.search(field_regexp['Subprogramme'], data_details)
project_subprogramme = project_subprogramme.group()
project_contract = re.search(field_regexp['Contract'], data_details)
project_contract = project_contract.group()
project_participants = re.findall(field_regexp['Participants'], data_participants)
#project_participants = project_participants.group()
project_part_con = re.findall(field_regexp['Participant contact'], data_participants)
#project_part_con = project_part_con.group()
project_reference = re.search(field_regexp['Record number'], data_footer)
project_reference = project_reference.group()
project_desc = {
'Title' : project_title,
'Start date' : project_start,
'End date' : project_end,
'Coordinator' : project_coordinator,
'Coordinator contact' : project_coord_con,
'Project cost' : project_cost,
'EU contribution' : project_contribution,
'Programme' : project_programme,
'Subprogramme' : project_subprogramme,
'Contract' : project_contract,
#'Participants' : project_participants[0],
#'Participant contact' : project_part_con[0],
'Reference' : project_reference
}
scraperwiki.sqlite.save(unique_keys=['Title'], data=project_desc)
print ">>> CORDIS scraper <<<"
applicants = ["rexroth"]
URL_1 = "http://cordis.europa.eu/newsearch/download.cfm?action=query&collection=EN_PROJ&text=%28"
URL_2="%29&sort=all&querySummary=quick&fieldText=%28MATCH%7BCORDIS%2CWEBPAGESEUROPA%7D%3ASOURCE%29&ENGINE_ID=CORDIS_ENGINE_ID&SEARCH_TYPE_ID=CORDIS_SEARCH_ID&descr="
URL_3 = ";%20Projects"
print "Number of searches: " + str(len(applicants))
# Open CSV file
with open ('output.csv', 'w') as csvfile:
csvwriter = csv.writer(open ('output.csv', 'a'))
for applicant in applicants:
list_url = URL_1 + applicant + URL_2 + applicant + URL_3
result_links = extract_links(list_url)
for link in result_links:
parse_object(link)import scraperwiki
import scrapemark
import feedparser
import csv
import re
import urllib2,sys
import requests
import lxml.html
from BeautifulSoup import BeautifulSoup, NavigableString
# extract project page links from the result page "url"
def extract_links(url):
atom_feed = feedparser.parse(url)
link_list = []
for entry in atom_feed.entries:
print entry.title #+ " - " + entry.link
print entry.link
# experiment with data structure
data = {
'TITLE' : entry.title,
'LINK' : entry.link
}
print data
#scraperwiki.sqlite.save(unique_keys=['TITLE'], data=data)
link_list.append(entry.link)
#csvwriter.writerow([entry.title] + [entry.link])
return link_list
# open details page for "object" and parse the results
def parse_object(object):
html = urllib2.urlopen(object).read()
soup = BeautifulSoup(html)
project_id = soup.find('input', attrs={'name':"REF"}).get('value')
print "Project-ID: " + str(project_id)
detail_url = "http://cordis.europa.eu/newsearch/getDoc?doctype=PROJ&xslt-template=projects/xsl/projectdet_en.xslt&rcn=" + str(project_id)
print "***" + detail_url
details = requests.get(detail_url)
detail_page = details.content
content = BeautifulSoup(detail_page, convertEntities="html", smartQuotesTo="html", fromEncoding="utf-8")
# extract content
data_info = content.find(attrs={'class':'projdates'})
data_coordinator = content.find(attrs={'class': 'projcoord'})
data_details = content.find(attrs={'class': 'projdet'})
data_participants = content.find(attrs={'class': 'participants'})
data_footer = content.find(attrs={'id': 'recinfo'})
# data_tech = content.find(attrs={'class': 'tech'})
# trying to find project description: display all content
print ">>> " str(content)
data_info = lxml.html.fromstring(str(data_info))
data_info = data_info.text_content()
data_coordinator = lxml.html.fromstring(str(data_coordinator))
data_coordinator = data_coordinator.text_content()
data_details = lxml.html.fromstring(str(data_details))
data_details = data_details.text_content()
data_participants = lxml.html.fromstring(str(data_participants))
data_participants = data_participants.text_content()
data_footer = lxml.html.fromstring(str(data_footer))
data_footer = data_footer.text_content()
# REGEXP for fields
# Start date in YYYY-MM-DD: (?<=From\s).{1,}(?=\sto)
# End date in YYYY-MM-DD: (?<=to\s).{1,}(?=\s\|)
# Coordinator: (?<=Coordinator\s).{1,}(?=\s\(\+\))
# Coordinator contact: (?<=Administrative contact:\s).{1,}(?!\n)
# Project title in caps: (?<=\|\s).{1,}(?=\swebsite)
# Cost in EUR: (?<=EUR\s)\d{1,2}(\s\d{3}){1,2}
# EU Contribution: (?<=EU contribution: EUR\s)\d{1,2}(\s\d{3}){1,2}(?!Programme)
# Programme acronym: (?<=Programme acronym:\s)(\w{1,}.){2}
# Contract type: (?<=Contract type:\s).{1,}
# Subprogramme type: (?<=Subprogramme area:\s).{1,}(?=Contract)
# Participants: (?<=\n).{1,}?\n.{1,}?(?=\s\n)
# Participant contact: (?<=Administrative contact:\s).{1,}\n.{1,}(?=Email)
# Record number: (?<=Record number:\s)\d{1,}(?=\s\/)
field_regexp = {
'Title' : '(?<=\|\s).{1,}(?=\swebsite)',
'Start date' : '(?<=From\s).{1,}(?=\sto)',
'End date' : '(?<=to\s).{1,}(?=\s\|)',
'Coordinator' : '(?<=Coordinator\n\n).{1,}(?=\n)',
'Coordinator contact' : '(?<=Administrative contact:\s).{1,}\n.{1,}(?!Email)',
'Project cost' : '(?<=EUR\s)\d{1,2}(\s\d{3}){1,2}',
'EU contribution' : '(?<=EU contribution: EUR\s)\d{1,2}(\s\d{3}){1,2}(?!Programme)',
'Programme' : '(?<=Programme acronym:\s\n)(\w{1,}.){2}',
'Subprogramme' : '(?<=Subprogramme area:\s\n).{1,}(?=\n)',
'Contract' : '(?<=Contract type:\s\n).{1,}',
'Participants' : '(?<=\n).{1,}?\n.{1,}?(?=\s\n)',
'Participant contact' : '(?<=Administrative contact:\s).{1,}\n.{1,}(?=Email)',
'Record number' : '(?<=Record number:\s)\d{1,}(?=\s\/)'
}
# WAAAAH, das hier ist unsagbar hässlich!
project_title = re.search(field_regexp['Title'], data_info)
project_title = project_title.group()
project_start = re.search(field_regexp['Start date'], data_info)
project_start = project_start.group()
project_end = re.search(field_regexp['End date'], data_info)
project_end = project_end.group()
project_coordinator = re.search(field_regexp['Coordinator'], data_coordinator)
project_coordinator = project_coordinator.group()
project_coord_con = re.search(field_regexp['Coordinator contact'], data_coordinator)
project_coord_con = project_coord_con.group()
project_cost = re.search(field_regexp['Project cost'], data_details)
project_cost = project_cost.group()
project_cost = project_cost.replace(" ", "")
project_contribution = re.search(field_regexp['EU contribution'], data_details)
project_contribution = project_contribution.group()
project_contribution = project_contribution.replace(" ", "")
project_programme = re.search(field_regexp['Programme'], data_details)
project_programme = project_programme.group()
project_subprogramme = re.search(field_regexp['Subprogramme'], data_details)
project_subprogramme = project_subprogramme.group()
project_contract = re.search(field_regexp['Contract'], data_details)
project_contract = project_contract.group()
project_participants = re.findall(field_regexp['Participants'], data_participants)
#project_participants = project_participants.group()
project_part_con = re.findall(field_regexp['Participant contact'], data_participants)
#project_part_con = project_part_con.group()
project_reference = re.search(field_regexp['Record number'], data_footer)
project_reference = project_reference.group()
project_desc = {
'Title' : project_title,
'Start date' : project_start,
'End date' : project_end,
'Coordinator' : project_coordinator,
'Coordinator contact' : project_coord_con,
'Project cost' : project_cost,
'EU contribution' : project_contribution,
'Programme' : project_programme,
'Subprogramme' : project_subprogramme,
'Contract' : project_contract,
#'Participants' : project_participants[0],
#'Participant contact' : project_part_con[0],
'Reference' : project_reference
}
scraperwiki.sqlite.save(unique_keys=['Title'], data=project_desc)
print ">>> CORDIS scraper <<<"
applicants = ["rexroth"]
URL_1 = "http://cordis.europa.eu/newsearch/download.cfm?action=query&collection=EN_PROJ&text=%28"
URL_2="%29&sort=all&querySummary=quick&fieldText=%28MATCH%7BCORDIS%2CWEBPAGESEUROPA%7D%3ASOURCE%29&ENGINE_ID=CORDIS_ENGINE_ID&SEARCH_TYPE_ID=CORDIS_SEARCH_ID&descr="
URL_3 = ";%20Projects"
print "Number of searches: " + str(len(applicants))
# Open CSV file
with open ('output.csv', 'w') as csvfile:
csvwriter = csv.writer(open ('output.csv', 'a'))
for applicant in applicants:
list_url = URL_1 + applicant + URL_2 + applicant + URL_3
result_links = extract_links(list_url)
for link in result_links:
parse_object(link) | [
"pallih@kaninka.net"
] | pallih@kaninka.net |
952177163a4b1437e1561a38db38fa4d951331ba | 9cb8a9f5172f9af17eda5ca8d3c6ff297f0bf120 | /setup.py | 42aed970f5f0d61d42ac271f5d64f44f3aa5bc6c | [] | no_license | tzyk777/twrapper | 91c35e0a572c533071bc02c75952fd69fd2b4a22 | b07c7c307f324a214c876b0d50ec18771b0012e1 | refs/heads/master | 2021-01-17T16:29:16.968949 | 2016-10-29T21:54:06 | 2016-10-29T21:54:06 | 62,597,082 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 232 | py | from distutils.core import setup
setup(name='twrapper',
version='1.3.1',
description='Python twitter wrapper',
author='Zeyang Tao',
author_email='zeyangtao1020@gmail.com',
packages=['twrapper']
) | [
"zeyangtaooptions@gmail.com"
] | zeyangtaooptions@gmail.com |
12991bf43618c242644e572cf61bc414b413c0b4 | 152ebb6f75ac0d79f824ea219ca095be59b23fd0 | /client-side-module/client-side.py | 62ab47a62a652cdf6bcc0f1acf2da0f6c105606b | [] | no_license | Automated-CAD-Scoring-Suite/Remote-Communication-Module-for-3D-Slicer | f296d1aca08cb90d1feb28e6ee8e1d7e45af70e0 | fc6a2166c3208997bb7ed0aa8cd3ee0c7b6dc794 | refs/heads/main | 2023-02-23T21:16:25.327063 | 2021-01-30T04:04:53 | 2021-01-30T04:04:53 | 333,851,000 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,210 | py | # Import Packages
import SimpleITK as sitk
import nibabel as nib
import matplotlib.pylab as plt
import numpy as np
def find_roi_2D(s):
# rotate -90
s_rotated = np.rot90(s, k=3)
# flip slice
s_fliped = np.flip(s, axis=0)
s_rotated_fliped = np.flip(s_rotated, axis=0)
# Get up and down coordiates
y1 = np.unravel_index(np.argmax(s, axis=None), s.shape)
y2 = np.unravel_index(np.argmax(s_fliped, axis=None), s.shape)
x1 = np.unravel_index(np.argmax(s_rotated, axis=None), s.shape)
x2 = np.unravel_index(np.argmax(s_rotated_fliped, axis=None), s.shape)
# return x1, x2, y1, y2 of image
return x1[0], s.shape[1] - x2[0], y1[0], s.shape[0] - y2[0]
def find_roi(sample):
X1, X2, Y1, Y2, Z1, Z2 = sample.shape[1], 0, sample.shape[0], 0, sample.shape[2], 0
for index in range(sample.shape[2]): # around Z (axial)
# Take slice from sample
s = sample[:, :, index]
# find points
x1, x2, y1, y2 = find_roi_2D(s)
# check for min x1,y1 and max x2,y2
X1 = min(x1, X1)
Y1 = min(y1, Y1)
X2 = max(x2, X2)
Y2 = max(y2, Y2)
for index in range(sample.shape[1]): # around X (sagital)
# Take slice from sample
s = sample[:, index, :]
# find points
z1, z2, y1, y2 = find_roi_2D(s)
# check for min z1,y1 and max z2,y2
Z1 = min(z1, Z1)
Y1 = min(y1, Y1)
Z2 = max(z2, Z2)
Y2 = max(y2, Y2)
for index in range(sample.shape[0]): # around Y (coronal)
# Take slice from sample
s = sample[index, :, :]
# find points
x1, x2, z1, z2 = find_roi_2D(s)
# check for min x1,z1 and max x2,z2
X1 = min(x1, X1)
Z1 = min(z1, Z1)
X2 = max(x2, X2)
Z2 = max(z2, Z2)
return X1, X2, Y1, Y2, Z1, Z2
def crop_roi(sample, x1, x2, y1, y2, z1, z2):
y = (y2 - y1 + 1) if (y1 != 0) else (y2 - y1)
x = (x2 - x1 + 1) if (x1 != 0) else (x2 - x1)
z = (z2 - z1 + 1) if (z1 != 0) else (z2 - z1)
sample_croped = np.empty((y, x, z, 1))
# for index in range(sample_croped.shape[2]):
# # Take slice from sample
# s = sample[:,:, index]
#
# # Crop
# croped_slice = np.copy(s[y1:y2+1 , x1:x2+1])
#
# sample_croped[:,:, index] = croped_slice
sample_croped = sample[y1:y2 + 1, x1:x2 + 1, z1:z2 + 1].copy()
return sample_croped
def load_itk(filename: str):
"""
This function reads a '.mhd' file using SimpleITK
:param filename: Path of .mhd file
:return: The image array, origin and spacing of the image.
"""
# Reads the image using SimpleITK
itkimage = sitk.ReadImage(filename)
# Convert the image to a numpy array first and then shuffle the dimensions to get axis in the order z,y,x
ct_scan = sitk.GetArrayFromImage(itkimage)
# Read the origin of the ct_scan, will be used to convert the coordinates from world to voxel and vice versa.
origin = np.array(list(reversed(itkimage.GetOrigin())))
# Read the spacing along each dimension
spacing = np.array(list(reversed(itkimage.GetSpacing())))
return itkimage, ct_scan, origin, spacing
def plot_view(data):
plt.figure(figsize=(50, 50))
plt.gray()
plt.subplots_adjust(0,0,1,1,0.01,0.01)
for i in range(data.shape[1]):
plt.subplot(8, 8, i+1)
plt.imshow(data[i])
plt.axis('off')
# use plt.savefig(...) here if you want to save the images as .jpg, e.g.,
plt.show()
# itkimage, ct_scan_data, origin, spacing = load_itk('../data/trv1p1cti.mhd')
itkimage, ct_scan_data, origin, spacing = load_itk('../data/trv1p1cti.nii')
# print(itkimage)
# ct_scan_data = nib.load('../data/trv1p1cti.nii').get_fdata()
# ct_scan_data = np.swapaxes(ct_scan_data, 0, 2)
# itkimage2, ct_scan_label, origin2, spacing2 = load_itk('../data/trv1p1cti-heart.nii')
# itkimage2, ct_scan_label, origin2, spacing2 = load_itk('../data/trv1p1cti-heart_4.nii.gz')
# ct_scan_label = np.swapaxes(ct_scan_label, 0, 2)
ct_scan_label = nib.load('../data/trv1p1cti-heart_4.nii').get_fdata()
print(ct_scan_label.shape)
sagital_image = ct_scan_label[213, :, :] # Axis 0
print(sagital_image.shape)
axial_image = ct_scan_label[:, :, 32] # Axis 2
print(axial_image.shape)
coronal_image = ct_scan_label[:, 154, :] # Axis 1
plt.figure(figsize=(20, 10))
plt.style.use('grayscale')
plt.subplot(141)
plt.imshow(np.rot90(sagital_image))
plt.title('Sagital Plane')
plt.axis('off')
plt.subplot(142)
plt.imshow(np.rot90(axial_image))
plt.title('Axial Plane')
plt.axis('off')
plt.subplot(143)
plt.imshow(np.rot90(coronal_image))
plt.title('Coronal Plane')
plt.axis('off')
plt.show()
# x1,x2,y1,y2,z1,z2 = find_roi(ct_scan_label)
# print(x1, x2, y1, y2, z1, z2)
# print(ct_scan_label[32].shape)
# x1,x2,y1,y2 = find_roi_2D(ct_scan_label[32])
# print(x1, x2, y1, y2)
# croped = ct_scan_label[32][x1:x2+1, y1:y2+1]
# cropped_data = crop_roi(ct_scan_data, x1,x2,y1,y2,z1,z2)
# print("New Shape:", cropped_data.shape)
print("Original Shape:", ct_scan_data.shape)
print("Label Shape:", ct_scan_label.shape)
# plot_view(sagital_image)
# plot_view(axial_image)
# plot_view(coronal_image)
| [
"abdullah.m.alrefaey@gmail.com"
] | abdullah.m.alrefaey@gmail.com |
6be743b4b02d6eb6d7f62aab46ff57260ffa042b | f92dfdebb4bf6bc108f51783333520c35afa66da | /api-web/src/www/application/management/commands/publish_rabbitmq_genome_gene.py | 23f7465ee4e41b1adf971b243ae030a6a568b6ea | [] | no_license | duytran92-cse/nas-genodata | 4d8659a135913d226842ff6a013324714ead0458 | 80c88f42145f729c5862a5293012e71548182e1d | refs/heads/master | 2022-11-13T17:24:03.769605 | 2020-06-14T18:59:36 | 2020-06-14T18:59:36 | 272,264,593 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,305 | py | import json, pika, os
from application.models import *
from urad_api import registry
from urad_api_standard.commands import Command as BaseCommand
from django.conf import settings
import json
from application.modules.gene import components as gene_components
from django.db import connection
class Command(BaseCommand):
## PUBLISH
def publish_to_queue(self, iterator, genome_queue, rabbitmq_host, rabbitmq_port):
credentials = pika.PlainCredentials('guest', 'guest')
connection = pika.BlockingConnection(pika.ConnectionParameters(rabbitmq_host, rabbitmq_port, '/', credentials))
channel = connection.channel()
channel.queue_declare(queue=genome_queue)
for x in iterator:
channel.basic_publish(exchange='', routing_key=genome_queue, body=json.dumps(x))
connection.close()
def process(self, params = {}):
# DECLARE VARIABLE
GENOME_QUEUE = settings.GENOME_QUEUE
RABBITMQ_HOST = settings.RABBITMQ_HOST
RABBITMQ_PORT = int(settings.RABBITMQ_PORT)
# Starting
print "[x] Publish data to rabbitmq"
##########################
## Gene
print "[***] Publish GENE data to rabbitmq"
isDone = False
start = 0
gene_manager = gene_components.DataManager()
while not isDone:
end = start + 5000
print 'start: %s, end: %s' % (start, end)
gene = Gene.objects.all()[start:end]
start = end + 1
if gene.count() <= 0:
isDone = True
x = []
for var in gene:
y = ['gene', var.code]
try:
data = gene_manager.get(var.code)
values = {}
arr_disease = []
asso_disease = []
asso_pub = []
for field, value in data.items():
if field in ['synonyms', 'effects','start', 'end','num_exon','chromosome','protein_product','description'] and value['value'] != None:
values[field] = value['value']
# disease field
if field == 'disgenet-diseases' and value['value'] != None:
arr_disease.extend(value['value'])
rs = [ item['disease'] for item in value['value'] ]
asso_disease.extend(rs)
if field == 'gwas-diseases' and value['value'] != None:
try:
for k in value['value']:
arr_disease.append({
'disease': k.get('disease',''),
'pubmedid': k.get('pmid',''),
'sentence': k.get('sentence', '')
})
except Exception as e:
pass
rs = [ item['disease'] for item in value['value'] ]
asso_disease.extend(rs)
if field == 'ctdbase-diseases' and value['value'] != None:
try:
for k in value['value']:
arr_disease.append({
'disease': k.get('disease',''),
'pubmedid': k.get('pmid',''),
'sentence': k.get('evidence', '')
})
except Exception as e:
pass
rs = [ item['disease'] for item in value['value'] ]
asso_disease.extend(rs)
if len(arr_disease) > 0:
values['disgenet-diseases'] = arr_disease
if len(asso_disease) > 0:
values['associated_diseases'] = asso_disease
# publications
if field == 'publications' and value['value'] != None:
values[field] = value['value']
try:
for k in value['value']:
asso_pub.append({
'pmid': k['pmid'],
'title': k['title']
})
except Exception as e:
pass
if field == 'gwas-publications' and value['value'] != None:
asso_pub.extend(value['value'])
if len(asso_pub) > 0:
values['associated_publications'] = asso_pub
if values:
y.append(values)
x.append(y)
except Exception as e:
pass
# Publish rabbitMQ
self.publish_to_queue(x, GENOME_QUEUE, RABBITMQ_HOST, RABBITMQ_PORT)
print "[***] DONE gene"
print "[x] Sent data to RabbitMQ"
| [
"thanh.tran@etudiant.univ-lr.fr"
] | thanh.tran@etudiant.univ-lr.fr |
5da2bd8dc2830c9ae5ea68845892e133cd447295 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startCirq2210.py | 06f183066edd0d13b690b7e34154e944725a31e0 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,734 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=28
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=9
c.append(cirq.rx(-1.9069467407290044).on(input_qubit[2])) # number=20
c.append(cirq.H.on(input_qubit[3])) # number=21
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.Y.on(input_qubit[2])) # number=13
c.append(cirq.rx(0.13823007675795101).on(input_qubit[2])) # number=24
c.append(cirq.H.on(input_qubit[0])) # number=5
c.append(cirq.H.on(input_qubit[1])) # number=6
c.append(cirq.H.on(input_qubit[2])) # number=7
c.append(cirq.H.on(input_qubit[3])) # number=8
c.append(cirq.X.on(input_qubit[3])) # number=1
c.append(cirq.rx(-1.9352210746113125).on(input_qubit[3])) # number=14
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[2])) # number=22
c.append(cirq.Y.on(input_qubit[2])) # number=10
c.append(cirq.H.on(input_qubit[1])) # number=17
c.append(cirq.CZ.on(input_qubit[3],input_qubit[1])) # number=18
c.append(cirq.H.on(input_qubit[1])) # number=19
c.append(cirq.Y.on(input_qubit[2])) # number=11
c.append(cirq.H.on(input_qubit[0])) # number=25
c.append(cirq.CZ.on(input_qubit[1],input_qubit[0])) # number=26
c.append(cirq.H.on(input_qubit[0])) # number=27
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=16
c.append(cirq.Z.on(input_qubit[3])) # number=23
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq2210.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
3d0e56a951a0a89f5feb4223a746a737089a3ea2 | 6fc2feac7ec07870afc927983cb5b048af1f6566 | /src/monju_no_chie/admin.py | 2fa6e5f53d12c868dd20b772492651f03aa9d62e | [] | no_license | SpaceMagical/SpaceMagical | fd2f114e4df89eeb71c6fb5900010eca8746ebfc | e3b4e5eb1f91fd8145d397b9bd4018d5f99ca8df | refs/heads/master | 2021-06-11T02:42:43.704128 | 2016-12-03T16:13:33 | 2016-12-03T16:13:33 | 74,037,601 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 207 | py | from django.contrib import admin
from .models import MonjuNoChie
class MonjuNoChieAdmin(admin.ModelAdmin):
class Meta:
model = MonjuNoChie
admin.site.register(MonjuNoChie, MonjuNoChieAdmin)
| [
"hirotoaoki1349@gmail.com"
] | hirotoaoki1349@gmail.com |
39f29b37f7444cf60b0b9e2cbd3307132c1c48c6 | d094ba0c8a9b1217fbf014aa79a283a49aabe88c | /env/lib/python3.6/site-packages/pandas/tests/io/parser/test_skiprows.py | 1df2ca4fad4d87539cdcdee874cb25a6cd3ce18e | [
"Apache-2.0"
] | permissive | Raniac/NEURO-LEARN | d9274e0baadd97bb02da54bdfcf6ca091fc1c703 | 3c3acc55de8ba741e673063378e6cbaf10b64c7a | refs/heads/master | 2022-12-25T23:46:54.922237 | 2020-09-06T03:15:14 | 2020-09-06T03:15:14 | 182,013,100 | 9 | 2 | Apache-2.0 | 2022-12-09T21:01:00 | 2019-04-18T03:57:00 | CSS | UTF-8 | Python | false | false | 6,948 | py | # -*- coding: utf-8 -*-
"""
Tests that skipped rows are properly handled during
parsing for all of the parsers defined in parsers.py
"""
from datetime import datetime
import numpy as np
import pytest
from pandas.compat import StringIO, lrange, range
from pandas.errors import EmptyDataError
from pandas import DataFrame, Index
import pandas.util.testing as tm
@pytest.mark.parametrize("skiprows", [lrange(6), 6])
def test_skip_rows_bug(all_parsers, skiprows):
# see gh-505
parser = all_parsers
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
result = parser.read_csv(StringIO(text), skiprows=skiprows, header=None,
index_col=0, parse_dates=True)
index = Index([datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)], name=0)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3], index=index)
tm.assert_frame_equal(result, expected)
def test_deep_skip_rows(all_parsers):
# see gh-4382
parser = all_parsers
data = "a,b,c\n" + "\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in range(10)])
condensed_data = "a,b,c\n" + "\n".join([
",".join([str(i), str(i + 1), str(i + 2)])
for i in [0, 1, 2, 3, 4, 6, 8, 9]])
result = parser.read_csv(StringIO(data), skiprows=[6, 8])
condensed_result = parser.read_csv(StringIO(condensed_data))
tm.assert_frame_equal(result, condensed_result)
def test_skip_rows_blank(all_parsers):
# see gh-9832
parser = all_parsers
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = parser.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
index = Index([datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)], name=0)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=index)
tm.assert_frame_equal(data, expected)
@pytest.mark.parametrize("data,kwargs,expected", [
("""id,text,num_lines
1,"line 11
line 12",2
2,"line 21
line 22",2
3,"line 31",1""",
dict(skiprows=[1]),
DataFrame([[2, "line 21\nline 22", 2],
[3, "line 31", 1]], columns=["id", "text", "num_lines"])),
("a,b,c\n~a\n b~,~e\n d~,~f\n f~\n1,2,~12\n 13\n 14~",
dict(quotechar="~", skiprows=[2]),
DataFrame([["a\n b", "e\n d", "f\n f"]], columns=["a", "b", "c"])),
(("Text,url\n~example\n "
"sentence\n one~,url1\n~"
"example\n sentence\n two~,url2\n~"
"example\n sentence\n three~,url3"),
dict(quotechar="~", skiprows=[1, 3]),
DataFrame([['example\n sentence\n two', 'url2']],
columns=["Text", "url"]))
])
def test_skip_row_with_newline(all_parsers, data, kwargs, expected):
# see gh-12775 and gh-10911
parser = all_parsers
result = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(result, expected)
def test_skip_row_with_quote(all_parsers):
# see gh-12775 and gh-10911
parser = all_parsers
data = """id,text,num_lines
1,"line '11' line 12",2
2,"line '21' line 22",2
3,"line '31' line 32",1"""
exp_data = [[2, "line '21' line 22", 2],
[3, "line '31' line 32", 1]]
expected = DataFrame(exp_data, columns=[
"id", "text", "num_lines"])
result = parser.read_csv(StringIO(data), skiprows=[1])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("data,exp_data", [
("""id,text,num_lines
1,"line \n'11' line 12",2
2,"line \n'21' line 22",2
3,"line \n'31' line 32",1""",
[[2, "line \n'21' line 22", 2],
[3, "line \n'31' line 32", 1]]),
("""id,text,num_lines
1,"line '11\n' line 12",2
2,"line '21\n' line 22",2
3,"line '31\n' line 32",1""",
[[2, "line '21\n' line 22", 2],
[3, "line '31\n' line 32", 1]]),
("""id,text,num_lines
1,"line '11\n' \r\tline 12",2
2,"line '21\n' \r\tline 22",2
3,"line '31\n' \r\tline 32",1""",
[[2, "line '21\n' \r\tline 22", 2],
[3, "line '31\n' \r\tline 32", 1]]),
])
def test_skip_row_with_newline_and_quote(all_parsers, data, exp_data):
# see gh-12775 and gh-10911
parser = all_parsers
result = parser.read_csv(StringIO(data), skiprows=[1])
expected = DataFrame(exp_data, columns=["id", "text", "num_lines"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("line_terminator", [
"\n", # "LF"
"\r\n", # "CRLF"
"\r" # "CR"
])
def test_skiprows_lineterminator(all_parsers, line_terminator):
# see gh-9079
parser = all_parsers
data = "\n".join(["SMOSMANIA ThetaProbe-ML2X ",
"2007/01/01 01:00 0.2140 U M ",
"2007/01/01 02:00 0.2141 M O ",
"2007/01/01 04:00 0.2142 D M "])
expected = DataFrame([["2007/01/01", "01:00", 0.2140, "U", "M"],
["2007/01/01", "02:00", 0.2141, "M", "O"],
["2007/01/01", "04:00", 0.2142, "D", "M"]],
columns=["date", "time", "var", "flag",
"oflag"])
if parser.engine == "python" and line_terminator == "\r":
pytest.skip("'CR' not respect with the Python parser yet")
data = data.replace("\n", line_terminator)
result = parser.read_csv(StringIO(data), skiprows=1, delim_whitespace=True,
names=["date", "time", "var", "flag", "oflag"])
tm.assert_frame_equal(result, expected)
def test_skiprows_infield_quote(all_parsers):
# see gh-14459
parser = all_parsers
data = "a\"\nb\"\na\n1"
expected = DataFrame({"a": [1]})
result = parser.read_csv(StringIO(data), skiprows=2)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("kwargs,expected", [
(dict(), DataFrame({"1": [3, 5]})),
(dict(header=0, names=["foo"]), DataFrame({"foo": [3, 5]}))
])
def test_skip_rows_callable(all_parsers, kwargs, expected):
parser = all_parsers
data = "a\n1\n2\n3\n4\n5"
result = parser.read_csv(StringIO(data),
skiprows=lambda x: x % 2 == 0,
**kwargs)
tm.assert_frame_equal(result, expected)
def test_skip_rows_skip_all(all_parsers):
parser = all_parsers
data = "a\n1\n2\n3\n4\n5"
msg = "No columns to parse from file"
with pytest.raises(EmptyDataError, match=msg):
parser.read_csv(StringIO(data), skiprows=lambda x: True)
def test_skip_rows_bad_callable(all_parsers):
msg = "by zero"
parser = all_parsers
data = "a\n1\n2\n3\n4\n5"
with pytest.raises(ZeroDivisionError, match=msg):
parser.read_csv(StringIO(data), skiprows=lambda x: 1 / 0)
| [
"leibingye@outlook.com"
] | leibingye@outlook.com |
0ade196c2880c7c5454d81108adc3086b4ced438 | c7bd791903d36d5ee5e828cd90939e3358b5845a | /contacts/migrations/0001_initial.py | a972d5ff6aacc21aa3e2cf7fb92ed4c8be41ba86 | [] | no_license | Saxena611/bp_real_estate | b638ac477fcf8e44dccfb5d58473c83efa94e5cb | e2ce50678894f6f542864c525b9d8fcdb91f8669 | refs/heads/master | 2023-06-12T05:03:37.845739 | 2021-07-11T06:03:50 | 2021-07-11T06:03:50 | 330,562,496 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 963 | py | # Generated by Django 3.0.3 on 2021-03-21 06:28
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Contact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('request', models.CharField(max_length=100)),
('request_id', models.IntegerField()),
('name', models.CharField(max_length=200)),
('email', models.CharField(max_length=100)),
('phone', models.CharField(max_length=100)),
('message', models.TextField(blank=True)),
('contact_date', models.DateTimeField(blank=True, default=datetime.datetime.now)),
('user_id', models.IntegerField(blank=True)),
],
),
]
| [
"Animesh.Saxena@amdocs.com"
] | Animesh.Saxena@amdocs.com |
b6e06bd57873d7cd596aa92ffcccf76eb8c487d1 | 6544fa558a6c08e4c67de393ed0d0ab554533839 | /DjangoProjects/DjangoProjects7EnquiryPageAndDeleteRoom/Rento/rento/rooms/models.py | 8ec356ad12ac7f769c1d4c1fecbeb6b79ef96526 | [] | no_license | tennnmani/bitproject7-DjangoPython- | 498878276ca0c847d0cf2ca73c1091074720d6e5 | fe13b4822c4cc5686a478dbfee915c108b6f9278 | refs/heads/main | 2023-02-21T16:56:10.842672 | 2021-02-25T04:13:52 | 2021-02-25T04:13:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,505 | py | from django.db import models
from django.urls import reverse
# Create your models here.
class City(models.Model):
name = models.CharField(max_length=120) # max_length = required
def __str__(self):
return self.name
class Location(models.Model):
city = models.OneToOneField(City, on_delete=models.CASCADE)
location = models.CharField(max_length=120) # max_length = required
code = models.CharField(max_length=500) # max_length = required
def __str__(self):
return self.location
@staticmethod
def get_all_locations():
return Location.objects.all()
class Room(models.Model):
city = models.ForeignKey(City, on_delete=models.DO_NOTHING)
location = models.ForeignKey(Location, on_delete=models.DO_NOTHING)
house_number = models.CharField(max_length=120) # max_length = required
description = models.TextField(blank=True, null=True)
floor = models.IntegerField()
price = models.PositiveIntegerField()
image1 = models.ImageField(upload_to='')
image2 = models.ImageField(upload_to='')
image3 = models.ImageField(upload_to='')
water = models.BooleanField(default=False)
internet = models.BooleanField(default=False)
parking = models.BooleanField(default=False)
description = models.TextField(max_length=500,blank=False, null=False)
date_posted = models.DateField(auto_now_add=True)
views = models.IntegerField(default=0)
blocked = models.BooleanField(default=False)
public = models.BooleanField(default=True)
private = models.BooleanField(default=False)
def __str__(self):
return str(self.pk)
@staticmethod
def get_all_rooms():
return Room.objects.all()
@staticmethod
def get_all_rooms_by_filter(location_id):
if location_id:
return Room.objects.filter(location = location_id)
else:
return Room.get_all_rooms()
@staticmethod
def get_all_rooms_by_waterinternetparkingfilter(water_id,internet_id,parking_id):
return Room.objects.filter(water = water_id, parking = internet_id, internet = parking_id)
@staticmethod
def get_all_rooms_by_allfilter(location_id,water_id,internet_id,parking_id):
return Room.objects.filter(location = location_id,water = water_id, parking = internet_id, internet = parking_id)
# def get_absolute_url(self):
# return reverse("products:product-detail", kwargs={"id": self.id}) #f"/products/{self.id}/" | [
"diwakartop10now@gmail.com"
] | diwakartop10now@gmail.com |
4f7296681cdfba9a427661da990e966abc246734 | ce8d7f8171da70b75b805f2ba5b2dfaeed651c9b | /geopy.py | 31f7ec0757edea1530872f3c706b0334a5752853 | [] | no_license | DesPenny/Everythingbackpacker | 5798ebd827c095f0aea48b1a5efa9e1191f8d0a0 | 8c45242de8aa41f00d77d95057d9daa0a61c41f3 | refs/heads/master | 2016-09-06T21:53:25.982409 | 2014-05-01T12:30:37 | 2014-05-01T12:30:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 181 | py | from geopy import geocoders
g = geocoders.GoogleV3()
def find():
print 'What would you like to search?'
query = raw_input()
place, (lat, lng) = g.geocode(query)
return place | [
"despenny@gmail.com"
] | despenny@gmail.com |
2ce3c8a48982b584a60b2a960d76c25d0d5a33c3 | f8965d7b16e3cf70370b3bd181ef855a2ab89768 | /services/student_service.py | 1008f3f8b34bb77616f5d65ad565592c882b9575 | [
"Apache-2.0"
] | permissive | tuannguyendang/montypython | 59cae6fc6069cf5356670132470cdd52bad00d67 | c0b8ff7a8130e811ba16bfab8d5e013eac37f432 | refs/heads/main | 2023-05-12T02:24:50.693432 | 2021-05-31T02:14:16 | 2021-05-31T02:14:16 | 325,188,168 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,287 | py | from uuid import uuid4
from services import StudentAssignmentService
from services.abstract import Assignment
class StudentService:
def __init__(self):
self.student_graders = {}
self.assignment_class = {}
def register(self, assignment_class):
if not issubclass(assignment_class, Assignment):
raise RuntimeError("Your class does not have the right methods")
id = uuid4()
self.assignment_class[id] = assignment_class
return id
def start_assignment(self, student, id):
self.student_graders[student] = StudentAssignmentService(
student, self.assignment_class[id]
)
def get_lesson(self, student):
assignment = self.student_graders[student]
return assignment.lesson()
def check_assignment(self, student, code):
assignment = self.student_graders[student]
return assignment.check(code)
def assignment_summary(self, student):
grader = self.student_graders[student]
return f"""
{student}'s attempts at {grader.assignment.__class__.__name__}:
attempts: {grader.attempts}
correct: {grader.correct_attempts}
passed: {grader.correct_attempts > 0}
"""
| [
"tuan193@gmail.com"
] | tuan193@gmail.com |
fffce95d0709e83632fe51584057dd7a2f48896d | 51e56d62ba688b5cc323a3ee3890b87934ae7682 | /5_Arg_Pr_Nac.py | 4c377c901ba044f8930cb6e0a34c4005c39cbf51 | [] | no_license | BogdansProgsCo/OLIMP_FREEDOM | b239df814af90e1dc5fd0aff15ee1c5e921a61f6 | 3e631a223b6215d136104eba70bc35203dfe47cf | refs/heads/main | 2023-05-14T13:19:14.760929 | 2021-06-14T18:21:40 | 2021-06-14T18:21:40 | 376,906,591 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 319,195 | py | import requests
from bs4 import BeautifulSoup
import re
import datetime as dt
headers = {"User-Agent": 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:45.0) Gecko/20100101 Firefox/45.0'}
url = 'https://nb-bet.com/Teams/2136-Atlanta-Buenos-Ayres-statistika-komandi'
r = requests.get(url, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print(soup.title.text)
nbbet = soup.select(".a-dotted-hover")
one = ("...".join((str(i) for i in nbbet)))
two = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s[(-][0-9]{1}\s[:-]\s[0-9]{1}[\)-]', one)
three = (" ".join((str(i) for i in two)))
full_time = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s', three)
first_half_time = re.findall(r'[(][0-9]{1}\s[:]\s[0-9]{1}[)]', three)
next_game = soup.select(".first-td-content-results-auto")
next_game_1 = ("...".join((str(i) for i in next_game)))
next_game_1_1 = next_game_1.replace(' ', '')
next_game_2 = re.findall(r'\d{2}[.]\d{2}[.]\d{2}', next_game_1_1)
next_games = next_game_2[10:]
next_games_1 = len(next_games)
date = []
y = '.20'
if len(next_game_2) >= 6:
b = next_game_2[5]
bb = b[:6 - 1] + y + b[6:]
date.append(bb)
else:
pass
if len(next_game_2) >= 7:
c = next_game_2[6]
cc = c[:6 - 1] + y + c[6:]
date.append(cc)
else:
pass
if len(next_game_2) >= 8:
d = next_game_2[7]
dd = d[:6 - 1] + y + d[6:]
date.append(dd)
else:
pass
if len(next_game_2) >= 9:
e = next_game_2[8]
ee = e[:6 - 1] + y + e[6:]
date.append(ee)
else:
pass
if len(next_game_2) >= 10:
f = next_game_2[9]
ff = f[:6 - 1] + y + f[6:]
date.append(ff)
else:
pass
if len(next_game_2) >= 11:
g = next_game_2[10]
gg = g[:6 - 1] + y + g[6:]
date.append(gg)
else:
pass
if len(next_game_2) >= 12:
h = next_game_2[11]
hh = h[:6 - 1] + y + h[6:]
date.append(hh)
else:
pass
if len(next_game_2) >= 13:
j = next_game_2[12]
jj = j[:6 - 1] + y + j[6:]
date.append(jj)
else:
pass
if len(next_game_2) >= 14:
k = next_game_2[13]
kk = k[:6 - 1] + y + k[6:]
date.append(kk)
else:
pass
if len(next_game_2) >= 15:
l = next_game_2[14]
ll = l[:6 - 1] + y + l[6:]
date.append(ll)
else:
pass
if len(next_game_2) >= 16:
m = next_game_2[15]
mm = m[:6 - 1] + y + m[6:]
date.append(mm)
else:
pass
if len(next_game_2) >= 17:
n = next_game_2[16]
nn = n[:6 - 1] + y + n[6:]
date.append(nn)
else:
pass
if len(next_game_2) >= 18:
o = next_game_2[17]
oo = o[:6 - 1] + y + o[6:]
date.append(oo)
else:
pass
if len(next_game_2) >= 19:
p = next_game_2[18]
pp = p[:6 - 1] + y + p[6:]
date.append(pp)
else:
pass
if len(next_game_2) >= 20:
q = next_game_2[19]
qq = q[:6 - 1] + y + q[6:]
date.append(qq)
else:
pass
if len(next_game_2) >= 21:
r = next_game_2[20]
rr = r[:6 - 1] + y + r[6:]
date.append(rr)
else:
pass
if len(date) >= 1:
date_0 = dt.datetime.strptime(date[0], '%d.%m.%Y')
if len(date) >= 2:
date_1 = dt.datetime.strptime(date[1], '%d.%m.%Y')
if len(date) >= 3:
date_2 = dt.datetime.strptime(date[2], '%d.%m.%Y')
if len(date) >= 4:
date_3 = dt.datetime.strptime(date[3], '%d.%m.%Y')
if len(date) >= 5:
date_4 = dt.datetime.strptime(date[4], '%d.%m.%Y')
if len(date) >= 6:
date_5 = dt.datetime.strptime(date[5], '%d.%m.%Y')
if len(date) >= 7:
date_6 = dt.datetime.strptime(date[6], '%d.%m.%Y')
if len(date) >= 8:
date_7 = dt.datetime.strptime(date[7], '%d.%m.%Y')
if len(date) >= 9:
date_8 = dt.datetime.strptime(date[8], '%d.%m.%Y')
if len(date) >= 10:
date_9 = dt.datetime.strptime(date[9], '%d.%m.%Y')
if len(date) >= 11:
date_10 = dt.datetime.strptime(date[10], '%d.%m.%Y')
if len(date) >= 12:
date_11 = dt.datetime.strptime(date[11], '%d.%m.%Y')
if len(date) >= 13:
date_12 = dt.datetime.strptime(date[12], '%d.%m.%Y')
if len(date) >= 14:
date_13 = dt.datetime.strptime(date[13], '%d.%m.%Y')
if len(date) >= 15:
date_14 = dt.datetime.strptime(date[14], '%d.%m.%Y')
if len(date) >= 16:
date_15 = dt.datetime.strptime(date[15], '%d.%m.%Y')
if date_0 < date_1:
next_game_3 = cc
else:
if date_1 < date_2:
next_game_3 = dd
else:
if date_2 < date_3:
next_game_3 = ee
else:
if date_3 < date_4:
next_game_3 = ff
else:
if date_4 < date_5:
next_game_3 = gg
else:
if date_5 < date_6:
next_game_3 = hh
else:
if date_6 < date_7:
next_game_3 = jj
else:
if date_7 < date_8:
next_game_3 = kk
else:
if date_8 < date_9:
next_game_3 = ll
else:
if date_9 < date_10:
next_game_3 = mm
else:
if date_10 < date_11:
next_game_3 = nn
else:
if date_11 < date_12:
next_game_3 = oo
else:
if date_12 < date_13:
next_game_3 = pp
else:
if date_13 < date_14:
next_game_3 = qq
else:
if date_14 < date_15:
next_game_3 = rr
else:
pass
draw = 6
odd_even = 10
under_15 = 5
over_25 = 10
under_25 = 10
both_scores = 10
drw_frst_tm = 8
no_goal_frst_tm = 6
drw_NOdrw = 7
NOdrw_drw = 8
und15_ovr15 = 7
ovr15_und15 = 7
und25_ovr25 = 7
ovr25_und25 = 7
both_noboth = 7
noboth_both = 7
drw_NOdrw_ft = 7
NOdrw_drw_ft = 7
goal_NOgoal_ft = 7
NOgoal_goal_ft = 7
od_ev = 8
ev_od = 8
a = "Arg Prim Nac.txt"
champ = "Арг Пр Нац"
team = "Атланта Б-Айрес"
def adding_team():
c = "Argentina Prim Nac"
b = "Atlanta Bue Ares"
new_file = open(a, "a+")
new_file.write('\n _______ ' + c + ' _______')
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
def clean_file():
new_file = open(a, 'w+')
new_file.seek(0)
new_file.close()
def create_file():
new_file = open(a, "a+")
new_file.close()
def draws(x):
count = 0
for i in x:
if (i == '0 : 0 ' or i == '1 : 1 ' or i == '2 : 2 ' or i == '3 : 3 ' or i == '4 : 4 '
or i == '5 : 5 ' or i == '6 : 6 ' or i == '7 : 7 '):
count += 1
else:
break
if count >= draw:
print(f'\033[1;34m{next_game_3}\033[1;35m ({next_games_1}) \033[1;36m {champ:10} '
f'\033[1;33m {team :15} \033[1;31m ничей = {count} \033[0m')
b = str(count)
new_file = open(a, "a+")
new_file.write('\n draws = ' + b)
new_file.close()
def odd(x):
count = 0
for i in x:
if (i != '0 : 0 ' and i != '1 : 1 ' and i != '2 : 2 ' and i != '3 : 3 '
and i != '4 : 4 ' and i != '5 : 5 ' and i != '6 : 6 ' and i != '7 : 7 '
and i != '0 : 6 ' and i != '6 : 0 '
and i != '1 : 5 ' and i != '5 : 1 ' and i != '1 : 7 ' and i != '7 : 1 '
and i != '2 : 0 ' and i != '0 : 2 ' and i != '2 : 4 ' and i != '4 : 2 '
and i != '2 : 6 ' and i != '6 : 2 '
and i != '3 : 1 ' and i != '1 : 3 ' and i != '3 : 5 ' and i != '5 : 3 '
and i != '4 : 6 ' and i != '6 : 4 ' and i != '4 : 0 ' and i != '0 : 4 '
and i != '7 : 3 ' and i != '3 : 7 ' and i != '5 : 7 ' and i != '7 : 5 '
and i != '8 : 2 ' and i != '2 : 8 '):
count += 1
else:
break
if count >= odd_even:
print(f'\033[1;34m{next_game_3}\033[1;35m ({next_games_1}) \033[1;36m {champ:10} '
f'\033[1;33m {team :15} \033[1;31m не-чет = {count} \033[0m')
b = str(count)
new_file = open(a, "a+")
new_file.write('\n odd = ' + b)
new_file.close()
def even(x):
count = 0
for i in x:
if (i == '0 : 0 ' or i == '1 : 1 ' or i == '2 : 2 ' or i == '3 : 3 '
or i == '4 : 4 ' or i == '5 : 5 ' or i == '6 : 6 ' or i == '7 : 7 '
or i == '0 : 6 ' or i == '6 : 0 '
or i == '1 : 5 ' or i == '5 : 1 ' or i == '1 : 7 ' or i == '7 : 1 '
or i == '2 : 0 ' or i == '0 : 2 ' or i == '2 : 4 ' or i == '4 : 2 '
or i == '2 : 6 ' or i == '6 : 2 '
or i == '3 : 1 ' or i == '1 : 3 ' or i == '3 : 5 ' or i == '5 : 3 '
or i == '4 : 6 ' or i == '6 : 4 ' or i == '4 : 0 ' or i == '0 : 4 '
or i == '7 : 3 ' or i == '3 : 7 ' or i == '5 : 7 ' or i == '7 : 5 '
or i == '8 : 2 ' or i == '2 : 8 '):
count += 1
else:
break
if count >= odd_even:
print(f'\033[1;34m{next_game_3}\033[1;35m ({next_games_1}) \033[1;36m {champ:10} '
f'\033[1;33m {team :15} \033[1;31m чет = {count} \033[0m')
b = str(count)
new_file = open(a, "a+")
new_file.write('\n even = ' + b)
new_file.close()
def under_1_5(x):
count = 0
for i in x:
if i == '0 : 0 ' or i == '1 : 0 ' or i == '0 : 1 ':
count += 1
else:
break
if count >= under_15:
print(f'\033[1;34m{next_game_3}\033[1;35m ({next_games_1}) \033[1;36m {champ:10} '
f'\033[1;33m {team :15} \033[1;31m мен 1.5 = {count} \033[0m')
b = str(count)
new_file = open(a, "a+")
new_file.write('\n under_1.5 = ' + b)
new_file.close()
def over_2_5(x):
count = 0
for i in x:
if (i != '0 : 0 ' and i != '1 : 1 ' and i != '1 : 0 '
and i != '0 : 1 ' and i != '2 : 0 ' and i != '0 : 2 '):
count += 1
else:
break
if count >= over_25:
print(f'\033[1;34m{next_game_3}\033[1;35m ({next_games_1}) \033[1;36m {champ:10} '
f'\033[1;33m {team :15} \033[1;31m бол 2.5 = {count} \033[0m')
b = str(count)
new_file = open(a, "a+")
new_file.write('\n over_2.5 = ' + b)
new_file.close()
def under_2_5(x):
count = 0
for i in x:
if (i == '0 : 0 ' or i == '1 : 1 ' or i == '1 : 0 '
or i == '0 : 1 ' or i == '2 : 0 ' or i == '0 : 2 '):
count += 1
else:
break
if count >= under_25:
print(f'\033[1;34m{next_game_3}\033[1;35m ({next_games_1}) \033[1;36m {champ:10} '
f'\033[1;33m {team :15} \033[1;31m мен 2.5 = {count} \033[0m')
b = str(count)
new_file = open(a, "a+")
new_file.write('\n under_2.5 = ' + b)
new_file.close()
def both_score(x):
count = 0
for i in x:
if (i != '0 : 0 ' and i != '1 : 0 ' and i != '0 : 1 ' and i != '2 : 0 '
and i != '0 : 2 ' and i != '0 : 3 ' and i != '3 : 0 ' and i != '4 : 0 '
and i != '0 : 4 ' and i != '0 : 5 ' and i != '5 : 0 ' and i != '0 : 6 '
and i != '6 : 0 '):
count += 1
else:
break
if count >= both_scores:
print(f'\033[1;34m{next_game_3}\033[1;35m ({next_games_1}) \033[1;36m {champ:10} '
f'\033[1;33m {team :15} \033[1;31m обе зибили = {count} \033[0m')
b = str(count)
new_file = open(a, "a+")
new_file.write('\n both_score = ' + b)
new_file.close()
def both_no_score(x):
count = 0
for i in x:
if (i == '0 : 0 ' or i == '1 : 0 ' or i == '0 : 1 ' or i == '2 : 0 ' or i == '0 : 2 '
or i == '0 : 3 ' or i == '3 : 0 ' or i == '4 : 0 ' or i == '0 : 4 '
or i == '0 : 5 ' or i == '5 : 0 ' or i == '0 : 6 ' or i == '6 : 0 '
or i == '0 : 7 ' or i == '7 : 0 '):
count += 1
else:
break
if count >= both_scores:
print(f'\033[1;34m{next_game_3}\033[1;35m ({next_games_1}) \033[1;36m {champ:10} '
f'\033[1;33m {team :15} \033[1;31m обе НЕ зибили = {count} \033[0m')
b = str(count)
new_file = open(a, "a+")
new_file.write('\n both_no_score = ' + b)
new_file.close()
def draws_first_time(x):
count = 0
for i in x:
if (i == '(0 : 0)' or i == '(1 : 1)' or i == '(2 : 2)' or i == '(3 : 3)' or i == '(4 : 4)'
or i == '(5 : 5)'):
count += 1
else:
break
if count >= drw_frst_tm:
print(f'\033[1;34m{next_game_3}\033[1;35m ({next_games_1}) \033[1;36m {champ:10} '
f'\033[1;33m {team :15} \033[1;31m ничьи 1-й тайм = {count} \033[0m')
b = str(count)
new_file = open(a, "a+")
new_file.write('\n draws_first_time = ' + b)
new_file.close()
def no_goal_first_time(x):
count = 0
for i in x:
if i == '(0 : 0)':
count += 1
else:
break
if count >= no_goal_frst_tm:
print(f'\033[1;34m{next_game_3}\033[1;35m ({next_games_1}) \033[1;36m {champ:10} '
f'\033[1;33m {team :15} \033[1;31m 0:0 1-й тайм = {count} \033[0m')
b = str(count)
new_file = open(a, "a+")
new_file.write('\n no_goal_first_time = ' + b)
new_file.close()
def Odd_Even(x):
count = 0
olimp = []
for i in x:
if (i != '0 : 0 ' and i != '1 : 1 ' and i != '2 : 2 ' and i != '3 : 3 ' and i != '4 : 4 '
and i != '5 : 5 ' and i != '2 : 0 ' and i != '0 : 2 ' and i != '1 : 3 '
and i != '3 : 1 ' and i != '4 : 2 ' and i != '2 : 4 ' and i != '3 : 5 '
and i != '5 : 3 ' and i != '4 : 6 ' and i != '6 : 4 ' and i != '4 : 0 '
and i != '0 : 4 ' and i != '1 : 5 ' and i != '5 : 1 ' and i != '2 : 6 '
and i != '6 : 2 ' and i != '3 : 7 ' and i != '7 : 3 ' and i != '0 : 6 '
and i != '6 : 0 ' and i != '1 : 7 ' and i != '7 : 1 ' and i != '2 : 8 '
and i != '8 : 2 '):
olimp.append("+")
else:
olimp.append("-")
if olimp[0] == '+':
count += 1
if len(olimp) >= 2 and olimp[1] == '-':
count += 1
if len(olimp) >= 3 and olimp[2] == '+':
count += 1
if len(olimp) >= 4 and olimp[3] == '-':
count += 1
if len(olimp) >= 5 and olimp[4] == '+':
count += 1
if len(olimp) >= 6 and olimp[5] == '-':
count += 1
if len(olimp) >= 7 and olimp[6] == '+':
count += 1
if len(olimp) >= 8 and olimp[7] == '-':
count += 1
if len(olimp) >= 9 and olimp[8] == '+':
count += 1
if len(olimp) >= 10 and olimp[9] == '-':
count += 1
if len(olimp) >= 11 and olimp[10] == '+':
count += 1
if count >= od_ev:
print(f'\033[1;34m{next_game_3}\033[1;35m ({next_games_1}) \033[1;36m {champ:10} '
f'\033[1;33m {team :15} \033[1;31m НЕчет_чет = {count} \033[0m')
b = str(count)
new_file = open(a, "a+")
new_file.write('\n odd_even = ' + b)
new_file.close()
def Even_Odd(x):
count = 0
olimp = []
for i in x:
if (i != '0 : 0 ' and i != '1 : 1 ' and i != '2 : 2 ' and i != '3 : 3 '
and i != '4 : 4 ' and i != '5 : 5 ' and i != '2 : 0 ' and i != '0 : 2 '
and i != '1 : 3 ' and i != '3 : 1 ' and i != '4 : 2 ' and i != '2 : 4 '
and i != '3 : 5 ' and i != '5 : 3 ' and i != '4 : 6 ' and i != '6 : 4 '
and i != '4 : 0 ' and i != '0 : 4 ' and i != '1 : 5 ' and i != '5 : 1 '
and i != '2 : 6 ' and i != '6 : 2 ' and i != '3 : 7 ' and i != '7 : 3 '
and i != '0 : 6 ' and i != '6 : 0 ' and i != '1 : 7 ' and i != '7 : 1 '
and i != '2 : 8 ' and i != '8 : 2 '):
olimp.append("-")
else:
olimp.append("+")
if olimp[0] == '+':
count += 1
if len(olimp) >= 2 and olimp[1] == '-':
count += 1
if len(olimp) >= 3 and olimp[2] == '+':
count += 1
if len(olimp) >= 4 and olimp[3] == '-':
count += 1
if len(olimp) >= 5 and olimp[4] == '+':
count += 1
if len(olimp) >= 6 and olimp[5] == '-':
count += 1
if len(olimp) >= 7 and olimp[6] == '+':
count += 1
if len(olimp) >= 8 and olimp[7] == '-':
count += 1
if len(olimp) >= 9 and olimp[8] == '+':
count += 1
if len(olimp) >= 10 and olimp[9] == '-':
count += 1
if len(olimp) >= 11 and olimp[10] == '+':
count += 1
if count >= ev_od:
print(f'\033[1;34m{next_game_3}\033[1;35m ({next_games_1}) \033[1;36m {champ:10} '
f'\033[1;33m {team :15} \033[1;31m чет_НЕчет = {count} \033[0m')
b = str(count)
new_file = open(a, "a+")
new_file.write('\n even_odd = ' + b)
new_file.close()
def draws_NOdraws(x):
count = 0
olimp = []
for i in x:
if i == '0 : 0 ' or i == '1 : 1 ' or i == '2 : 2 ' or i == '3 : 3 ' or i == '4 : 4 ' \
or i == '5 : 5 ' or i == '6 : 6 ' or i == '7 : 7 ' or i == '8 : 8 ':
olimp.append("+")
else:
olimp.append("-")
if olimp[0] == '+':
count += 1
if len(olimp) >= 2 and olimp[1] == '-':
count += 1
if len(olimp) >= 3 and olimp[2] == '+':
count += 1
if len(olimp) >= 4 and olimp[3] == '-':
count += 1
if len(olimp) >= 5 and olimp[4] == '+':
count += 1
if len(olimp) >= 6 and olimp[5] == '-':
count += 1
if len(olimp) >= 7 and olimp[6] == '+':
count += 1
if len(olimp) >= 8 and olimp[7] == '-':
count += 1
if len(olimp) >= 9 and olimp[8] == '+':
count += 1
if len(olimp) >= 10 and olimp[9] == '-':
count += 1
if len(olimp) >= 11 and olimp[10] == '+':
count += 1
if count >= drw_NOdrw:
print(f'\033[1;34m{next_game_3}\033[1;35m ({next_games_1}) \033[1;36m {champ:10} '
f'\033[1;33m {team :15} \033[1;31m ничья_НЕничья = {count} \033[0m')
b = str(count)
new_file = open(a, "a+")
new_file.write('\n draws_NOdraws = ' + b)
new_file.close()
def NOdraws_draws(x):
count = 0
olimp = []
for i in x:
if i == '0 : 0 ' or i == '1 : 1 ' or i == '2 : 2 ' or i == '3 : 3 ' \
or i == '4 : 4 ' or i == '5 : 5 ' or i == '6 : 6 ' or i == '7 : 7 ':
olimp.append("-")
else:
olimp.append("+")
if olimp[0] == '+':
count += 1
if len(olimp) >= 2 and olimp[1] == '-':
count += 1
if len(olimp) >= 3 and olimp[2] == '+':
count += 1
if len(olimp) >= 4 and olimp[3] == '-':
count += 1
if len(olimp) >= 5 and olimp[4] == '+':
count += 1
if len(olimp) >= 6 and olimp[5] == '-':
count += 1
if len(olimp) >= 7 and olimp[6] == '+':
count += 1
if len(olimp) >= 8 and olimp[7] == '-':
count += 1
if len(olimp) >= 9 and olimp[8] == '+':
count += 1
if len(olimp) >= 10 and olimp[9] == '-':
count += 1
if len(olimp) >= 11 and olimp[10] == '+':
count += 1
if count >= NOdrw_drw:
print(f'\033[1;34m{next_game_3}\033[1;35m ({next_games_1}) \033[1;36m {champ:10} '
f'\033[1;33m {team :15} \033[1;31m НЕничья_ничья = {count} \033[0m')
b = str(count)
new_file = open(a, "a+")
new_file.write('\n NOdraws_draws = ' + b)
new_file.close()
def under15_over15(x):
count = 0
olimp = []
for i in x:
if i == '0 : 0 ' or i == '1 : 0 ' or i == '0 : 1 ':
olimp.append("+")
else:
olimp.append("-")
if olimp[0] == '+':
count += 1
if len(olimp) >= 2 and olimp[1] == '-':
count += 1
if len(olimp) >= 3 and olimp[2] == '+':
count += 1
if len(olimp) >= 4 and olimp[3] == '-':
count += 1
if len(olimp) >= 5 and olimp[4] == '+':
count += 1
if len(olimp) >= 6 and olimp[5] == '-':
count += 1
if len(olimp) >= 7 and olimp[6] == '+':
count += 1
if len(olimp) >= 8 and olimp[7] == '-':
count += 1
if len(olimp) >= 9 and olimp[8] == '+':
count += 1
if len(olimp) >= 10 and olimp[9] == '-':
count += 1
if len(olimp) >= 11 and olimp[10] == '+':
count += 1
if count >= und15_ovr15:
print(f'\033[1;34m{next_game_3}\033[1;35m ({next_games_1}) \033[1;36m {champ:10} '
f'\033[1;33m {team :15} \033[1;31m мен_бол 1.5 = {count} \033[0m')
b = str(count)
new_file = open(a, "a+")
new_file.write('\n under_over 1.5 = ' + b)
new_file.close()
def over15_under15(x):
count = 0
olimp = []
for i in x:
if i == '0 : 0 ' or i == '1 : 0 ' or i == '0 : 1 ':
olimp.append("-")
else:
olimp.append("+")
if olimp[0] == '+':
count += 1
if len(olimp) >= 2 and olimp[1] == '-':
count += 1
if len(olimp) >= 3 and olimp[2] == '+':
count += 1
if len(olimp) >= 4 and olimp[3] == '-':
count += 1
if len(olimp) >= 5 and olimp[4] == '+':
count += 1
if len(olimp) >= 6 and olimp[5] == '-':
count += 1
if len(olimp) >= 7 and olimp[6] == '+':
count += 1
if len(olimp) >= 8 and olimp[7] == '-':
count += 1
if len(olimp) >= 9 and olimp[8] == '+':
count += 1
if len(olimp) >= 10 and olimp[9] == '-':
count += 1
if len(olimp) >= 11 and olimp[10] == '+':
count += 1
if count >= ovr15_und15:
print(f'\033[1;34m{next_game_3}\033[1;35m ({next_games_1}) \033[1;36m {champ:10} '
f'\033[1;33m {team :15} \033[1;31m бол_мен 1.5 = {count} \033[0m')
b = str(count)
new_file = open(a, "a+")
new_file.write('\n over_under 1.5 = ' + b)
new_file.close()
def under25_over25(x):
count = 0
olimp = []
for i in x:
if (i == '0 : 0 ' or i == '1 : 1 ' or i == '1 : 0 '
or i == '0 : 1 ' or i == '2 : 0 ' or i == '0 : 2 '):
olimp.append("+")
else:
olimp.append("-")
if olimp[0] == '+':
count += 1
if len(olimp) >= 2 and olimp[1] == '-':
count += 1
if len(olimp) >= 3 and olimp[2] == '+':
count += 1
if len(olimp) >= 4 and olimp[3] == '-':
count += 1
if len(olimp) >= 5 and olimp[4] == '+':
count += 1
if len(olimp) >= 6 and olimp[5] == '-':
count += 1
if len(olimp) >= 7 and olimp[6] == '+':
count += 1
if len(olimp) >= 8 and olimp[7] == '-':
count += 1
if len(olimp) >= 9 and olimp[8] == '+':
count += 1
if len(olimp) >= 10 and olimp[9] == '-':
count += 1
if len(olimp) >= 11 and olimp[10] == '+':
count += 1
if count >= und25_ovr25:
print(f'\033[1;34m{next_game_3}\033[1;35m ({next_games_1}) \033[1;36m {champ:10} '
f'\033[1;33m {team :15} \033[1;31m мен_бол 2.5 = {count} \033[0m')
b = str(count)
new_file = open(a, "a+")
new_file.write('\n under_over 2.5 = ' + b)
new_file.close()
def over25_under25(x):
count = 0
olimp = []
for i in x:
if (i == '0 : 0 ' or i == '1 : 1 ' or i == '1 : 0 '
or i == '0 : 1 ' or i == '2 : 0 ' or i == '0 : 2 '):
olimp.append("-")
else:
olimp.append("+")
if olimp[0] == '+':
count += 1
if len(olimp) >= 2 and olimp[1] == '-':
count += 1
if len(olimp) >= 3 and olimp[2] == '+':
count += 1
if len(olimp) >= 4 and olimp[3] == '-':
count += 1
if len(olimp) >= 5 and olimp[4] == '+':
count += 1
if len(olimp) >= 6 and olimp[5] == '-':
count += 1
if len(olimp) >= 7 and olimp[6] == '+':
count += 1
if len(olimp) >= 8 and olimp[7] == '-':
count += 1
if len(olimp) >= 9 and olimp[8] == '+':
count += 1
if len(olimp) >= 10 and olimp[9] == '-':
count += 1
if len(olimp) >= 11 and olimp[10] == '+':
count += 1
if count >= ovr25_und25:
print(f'\033[1;34m{next_game_3}\033[1;35m ({next_games_1}) \033[1;36m {champ:10} '
f'\033[1;33m {team :15} \033[1;31m бол_мен 2.5 = {count} \033[0m')
b = str(count)
new_file = open(a, "a+")
new_file.write('\n over_under 2.5 = ' + b)
new_file.close()
def both_noboth_score(x):
count = 0
olimp = []
for i in x:
if (i != '0 : 0 ' and i != '1 : 0 ' and i != '0 : 1 '
and i != '2 : 0 ' and i != '0 : 2 ' and i != '0 : 3 '
and i != '3 : 0 ' and i != '4 : 0 ' and i != '0 : 4 '
and i != '0 : 5 ' and i != '5 : 0 ' and i != '0 : 6 '
and i != '6 : 0 ' and i != '0 : 7 ' and i != '7 : 0 '
and i != '0 : 8 ' and i != '8 : 0 '):
olimp.append("+")
else:
olimp.append("-")
if olimp[0] == '+':
count += 1
if len(olimp) >= 2 and olimp[1] == '-':
count += 1
if len(olimp) >= 3 and olimp[2] == '+':
count += 1
if len(olimp) >= 4 and olimp[3] == '-':
count += 1
if len(olimp) >= 5 and olimp[4] == '+':
count += 1
if len(olimp) >= 6 and olimp[5] == '-':
count += 1
if len(olimp) >= 7 and olimp[6] == '+':
count += 1
if len(olimp) >= 8 and olimp[7] == '-':
count += 1
if len(olimp) >= 9 and olimp[8] == '+':
count += 1
if len(olimp) >= 10 and olimp[9] == '-':
count += 1
if len(olimp) >= 11 and olimp[10] == '+':
count += 1
if count >= both_noboth:
print(f'\033[1;34m{next_game_3}\033[1;35m ({next_games_1}) \033[1;36m {champ:10} '
f'\033[1;33m {team :15} \033[1;31m обе_необе забили = {count} \033[0m')
b = str(count)
new_file = open(a, "a+")
new_file.write('\n both_noboth score = ' + b)
new_file.close()
def noboth_both_score(x):
count = 0
olimp = []
for i in x:
if (i != '0 : 0 ' and i != '1 : 0 ' and i != '0 : 1 ' and i != '2 : 0 '
and i != '0 : 2 ' and i != '0 : 3 ' and i != '3 : 0 '
and i != '4 : 0 ' and i != '0 : 4 ' and i != '0 : 5 '
and i != '5 : 0 ' and i != '0 : 6 ' and i != '6 : 0 '
and i != '0 : 7 ' and i != '7 : 0 '):
olimp.append("-")
else:
olimp.append("+")
if olimp[0] == '+':
count += 1
if len(olimp) >= 2 and olimp[1] == '-':
count += 1
if len(olimp) >= 3 and olimp[2] == '+':
count += 1
if len(olimp) >= 4 and olimp[3] == '-':
count += 1
if len(olimp) >= 5 and olimp[4] == '+':
count += 1
if len(olimp) >= 6 and olimp[5] == '-':
count += 1
if len(olimp) >= 7 and olimp[6] == '+':
count += 1
if len(olimp) >= 8 and olimp[7] == '-':
count += 1
if len(olimp) >= 9 and olimp[8] == '+':
count += 1
if len(olimp) >= 10 and olimp[9] == '-':
count += 1
if len(olimp) >= 11 and olimp[10] == '+':
count += 1
if count >= noboth_both:
print(f'\033[1;34m{next_game_3}\033[1;35m ({next_games_1}) \033[1;36m {champ:10} '
f'\033[1;33m {team :15} \033[1;31m необе_обе забили = {count} \033[0m')
print(' ')
b = str(count)
new_file = open(a, "a+")
new_file.write('\n noboth_both score = ' + b)
new_file.close()
def draws_NOdraws_first_time(x):
count = 0
olimp = []
for i in x:
if i == '(0 : 0)' or i == '(1 : 1)' or i == '(2 : 2)' or i == '(3 : 3)' \
or i == '(4 : 4)' or i == '(5 : 5)':
olimp.append("+")
else:
olimp.append("-")
if olimp[0] == '+':
count += 1
if len(olimp) >= 2 and olimp[1] == '-':
count += 1
if len(olimp) >= 3 and olimp[2] == '+':
count += 1
if len(olimp) >= 4 and olimp[3] == '-':
count += 1
if len(olimp) >= 5 and olimp[4] == '+':
count += 1
if len(olimp) >= 6 and olimp[5] == '-':
count += 1
if len(olimp) >= 7 and olimp[6] == '+':
count += 1
if len(olimp) >= 8 and olimp[7] == '-':
count += 1
if len(olimp) >= 9 and olimp[8] == '+':
count += 1
if len(olimp) >= 10 and olimp[9] == '-':
count += 1
if len(olimp) >= 11 and olimp[10] == '+':
count += 1
if count >= drw_NOdrw_ft:
print(f'\033[1;34m{next_game_3}\033[1;35m ({next_games_1}) \033[1;36m {champ:10} '
f'\033[1;33m {team :15} \033[1;31m ничья_НЕничья 1-й тайм = {count} \033[0m')
b = str(count)
new_file = open(a, "a+")
new_file.write('\n draws_NOdraws 1st time = ' + b)
new_file.close()
def NOdraws_draws_first_time(x):
count = 0
olimp = []
for i in x:
if i == '(0 : 0)' or i == '(1 : 1)' or i == '(2 : 2)' or i == '(3 : 3)'\
or i == '(4 : 4)' or i == '(5 : 5)' or i == '(6 : 6)':
olimp.append("-")
else:
olimp.append("+")
if olimp[0] == '+':
count += 1
if len(olimp) >= 2 and olimp[1] == '-':
count += 1
if len(olimp) >= 3 and olimp[2] == '+':
count += 1
if len(olimp) >= 4 and olimp[3] == '-':
count += 1
if len(olimp) >= 5 and olimp[4] == '+':
count += 1
if len(olimp) >= 6 and olimp[5] == '-':
count += 1
if len(olimp) >= 7 and olimp[6] == '+':
count += 1
if len(olimp) >= 8 and olimp[7] == '-':
count += 1
if len(olimp) >= 9 and olimp[8] == '+':
count += 1
if len(olimp) >= 10 and olimp[9] == '-':
count += 1
if len(olimp) >= 11 and olimp[10] == '+':
count += 1
if count >= NOdrw_drw_ft:
print(f'\033[1;34m{next_game_3}\033[1;35m ({next_games_1}) \033[1;36m {champ:10} '
f'\033[1;33m {team :15} \033[1;31m НЕничья_ничья 1-й тайм = {count} \033[0m')
b = str(count)
new_file = open(a, "a+")
new_file.write('\n NOdraws_draws 1st time = ' + b)
new_file.close()
def goal_NOgoal_first_time(x):
count = 0
olimp = []
for i in x:
if i != '(0 : 0)':
olimp.append("+")
else:
olimp.append("-")
if olimp[0] == '+':
count += 1
if len(olimp) >= 2 and olimp[1] == '-':
count += 1
if len(olimp) >= 3 and olimp[2] == '+':
count += 1
if len(olimp) >= 4 and olimp[3] == '-':
count += 1
if len(olimp) >= 5 and olimp[4] == '+':
count += 1
if len(olimp) >= 6 and olimp[5] == '-':
count += 1
if len(olimp) >= 7 and olimp[6] == '+':
count += 1
if len(olimp) >= 8 and olimp[7] == '-':
count += 1
if len(olimp) >= 9 and olimp[8] == '+':
count += 1
if len(olimp) >= 10 and olimp[9] == '-':
count += 1
if len(olimp) >= 11 and olimp[10] == '+':
count += 1
if count >= goal_NOgoal_ft:
print(f'\033[1;34m{next_game_3}\033[1;35m ({next_games_1}) \033[1;36m {champ:10} '
f'\033[1;33m {team :15} \033[1;31m гол-НЕгол 1-й тайм = {count} \033[0m')
b = str(count)
new_file = open(a, "a+")
new_file.write('\n goal_NOgoal 1st time = ' + b)
new_file.close()
def NOgoal_goal_first_time(x):
count = 0
olimp = []
for i in x:
if i != '(0 : 0)':
olimp.append("-")
else:
olimp.append("+")
if olimp[0] == '+':
count += 1
if len(olimp) >= 2 and olimp[1] == '-':
count += 1
if len(olimp) >= 3 and olimp[2] == '+':
count += 1
if len(olimp) >= 4 and olimp[3] == '-':
count += 1
if len(olimp) >= 5 and olimp[4] == '+':
count += 1
if len(olimp) >= 6 and olimp[5] == '-':
count += 1
if len(olimp) >= 7 and olimp[6] == '+':
count += 1
if len(olimp) >= 8 and olimp[7] == '-':
count += 1
if len(olimp) >= 9 and olimp[8] == '+':
count += 1
if len(olimp) >= 10 and olimp[9] == '-':
count += 1
if len(olimp) >= 11 and olimp[10] == '+':
count += 1
if count >= NOgoal_goal_ft:
print(f'\033[1;34m{next_game_3}\033[1;35m ({next_games_1}) \033[1;36m {champ:10} '
f'\033[1;33m {team :15} \033[1;31m НЕгол_гол 1-й тайм = {count} \033[0m')
b = str(count)
new_file = open(a, "a+")
new_file.write('\n NOgoal_goal 1st time = ' + b)
new_file.close()
clean_file()
create_file()
adding_team()
draws(full_time)
odd(full_time)
even(full_time)
under_1_5(full_time)
over_2_5(full_time)
under_2_5(full_time)
both_score(full_time)
both_no_score(full_time)
draws_first_time(first_half_time)
no_goal_first_time(first_half_time)
Odd_Even(full_time)
Even_Odd(full_time)
draws_NOdraws(full_time)
NOdraws_draws(full_time)
under15_over15(full_time)
over15_under15(full_time)
under25_over25(full_time)
over25_under25(full_time)
both_noboth_score(full_time)
noboth_both_score(full_time)
draws_NOdraws_first_time(first_half_time)
NOdraws_draws_first_time(first_half_time)
goal_NOgoal_first_time(first_half_time)
NOgoal_goal_first_time(first_half_time)
url = 'https://nb-bet.com/Teams/4576-Klub-A-Germes-statistika-komandi'
r = requests.get(url, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print(soup.title.text)
nbbet = soup.select(".a-dotted-hover")
one = ("...".join((str(i) for i in nbbet)))
two = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s[(-][0-9]{1}\s[:-]\s[0-9]{1}[\)-]', one)
three = (" ".join((str(i) for i in two)))
full_time = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s', three)
first_half_time = re.findall(r'[(][0-9]{1}\s[:]\s[0-9]{1}[)]', three)
next_game = soup.select(".first-td-content-results-auto")
next_game_1 = ("...".join((str(i) for i in next_game)))
next_game_1_1 = next_game_1.replace(' ', '')
next_game_2 = re.findall(r'\d{2}[.]\d{2}[.]\d{2}', next_game_1_1)
next_games = next_game_2[10:]
next_games_1 = len(next_games)
date = []
y = '.20'
if len(next_game_2) >= 6:
b = next_game_2[5]
bb = b[:6 - 1] + y + b[6:]
date.append(bb)
else:
pass
if len(next_game_2) >= 7:
c = next_game_2[6]
cc = c[:6 - 1] + y + c[6:]
date.append(cc)
else:
pass
if len(next_game_2) >= 8:
d = next_game_2[7]
dd = d[:6 - 1] + y + d[6:]
date.append(dd)
else:
pass
if len(next_game_2) >= 9:
e = next_game_2[8]
ee = e[:6 - 1] + y + e[6:]
date.append(ee)
else:
pass
if len(next_game_2) >= 10:
f = next_game_2[9]
ff = f[:6 - 1] + y + f[6:]
date.append(ff)
else:
pass
if len(next_game_2) >= 11:
g = next_game_2[10]
gg = g[:6 - 1] + y + g[6:]
date.append(gg)
else:
pass
if len(next_game_2) >= 12:
h = next_game_2[11]
hh = h[:6 - 1] + y + h[6:]
date.append(hh)
else:
pass
if len(next_game_2) >= 13:
j = next_game_2[12]
jj = j[:6 - 1] + y + j[6:]
date.append(jj)
else:
pass
if len(next_game_2) >= 14:
k = next_game_2[13]
kk = k[:6 - 1] + y + k[6:]
date.append(kk)
else:
pass
if len(next_game_2) >= 15:
l = next_game_2[14]
ll = l[:6 - 1] + y + l[6:]
date.append(ll)
else:
pass
if len(next_game_2) >= 16:
m = next_game_2[15]
mm = m[:6 - 1] + y + m[6:]
date.append(mm)
else:
pass
if len(next_game_2) >= 17:
n = next_game_2[16]
nn = n[:6 - 1] + y + n[6:]
date.append(nn)
else:
pass
if len(next_game_2) >= 18:
o = next_game_2[17]
oo = o[:6 - 1] + y + o[6:]
date.append(oo)
else:
pass
if len(next_game_2) >= 19:
p = next_game_2[18]
pp = p[:6 - 1] + y + p[6:]
date.append(pp)
else:
pass
if len(next_game_2) >= 20:
q = next_game_2[19]
qq = q[:6 - 1] + y + q[6:]
date.append(qq)
else:
pass
if len(next_game_2) >= 21:
r = next_game_2[20]
rr = r[:6 - 1] + y + r[6:]
date.append(rr)
else:
pass
if len(date) >= 1:
date_0 = dt.datetime.strptime(date[0], '%d.%m.%Y')
if len(date) >= 2:
date_1 = dt.datetime.strptime(date[1], '%d.%m.%Y')
if len(date) >= 3:
date_2 = dt.datetime.strptime(date[2], '%d.%m.%Y')
if len(date) >= 4:
date_3 = dt.datetime.strptime(date[3], '%d.%m.%Y')
if len(date) >= 5:
date_4 = dt.datetime.strptime(date[4], '%d.%m.%Y')
if len(date) >= 6:
date_5 = dt.datetime.strptime(date[5], '%d.%m.%Y')
if len(date) >= 7:
date_6 = dt.datetime.strptime(date[6], '%d.%m.%Y')
if len(date) >= 8:
date_7 = dt.datetime.strptime(date[7], '%d.%m.%Y')
if len(date) >= 9:
date_8 = dt.datetime.strptime(date[8], '%d.%m.%Y')
if len(date) >= 10:
date_9 = dt.datetime.strptime(date[9], '%d.%m.%Y')
if len(date) >= 11:
date_10 = dt.datetime.strptime(date[10], '%d.%m.%Y')
if len(date) >= 12:
date_11 = dt.datetime.strptime(date[11], '%d.%m.%Y')
if len(date) >= 13:
date_12 = dt.datetime.strptime(date[12], '%d.%m.%Y')
if len(date) >= 14:
date_13 = dt.datetime.strptime(date[13], '%d.%m.%Y')
if len(date) >= 15:
date_14 = dt.datetime.strptime(date[14], '%d.%m.%Y')
if len(date) >= 16:
date_15 = dt.datetime.strptime(date[15], '%d.%m.%Y')
if date_0 < date_1:
next_game_3 = cc
else:
if date_1 < date_2:
next_game_3 = dd
else:
if date_2 < date_3:
next_game_3 = ee
else:
if date_3 < date_4:
next_game_3 = ff
else:
if date_4 < date_5:
next_game_3 = gg
else:
if date_5 < date_6:
next_game_3 = hh
else:
if date_6 < date_7:
next_game_3 = jj
else:
if date_7 < date_8:
next_game_3 = kk
else:
if date_8 < date_9:
next_game_3 = ll
else:
if date_9 < date_10:
next_game_3 = mm
else:
if date_10 < date_11:
next_game_3 = nn
else:
if date_11 < date_12:
next_game_3 = oo
else:
if date_12 < date_13:
next_game_3 = pp
else:
if date_13 < date_14:
next_game_3 = qq
else:
if date_14 < date_15:
next_game_3 = rr
else:
pass
team = "Клуб А. Гермес"
def adding_team():
b = "Klub-A-Germes"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
create_file()
adding_team()
draws(full_time)
odd(full_time)
even(full_time)
under_1_5(full_time)
over_2_5(full_time)
under_2_5(full_time)
both_score(full_time)
both_no_score(full_time)
draws_first_time(first_half_time)
no_goal_first_time(first_half_time)
Odd_Even(full_time)
Even_Odd(full_time)
draws_NOdraws(full_time)
NOdraws_draws(full_time)
under15_over15(full_time)
over15_under15(full_time)
under25_over25(full_time)
over25_under25(full_time)
both_noboth_score(full_time)
noboth_both_score(full_time)
draws_NOdraws_first_time(first_half_time)
NOdraws_draws_first_time(first_half_time)
goal_NOgoal_first_time(first_half_time)
NOgoal_goal_first_time(first_half_time)
url = 'https://nb-bet.com/Teams/2891-Khimnasiya-Mendosa-statistika-komandi'
r = requests.get(url, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print(soup.title.text)
nbbet = soup.select(".a-dotted-hover")
one = ("...".join((str(i) for i in nbbet)))
two = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s[(-][0-9]{1}\s[:-]\s[0-9]{1}[\)-]', one)
three = (" ".join((str(i) for i in two)))
full_time = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s', three)
first_half_time = re.findall(r'[(][0-9]{1}\s[:]\s[0-9]{1}[)]', three)
next_game = soup.select(".first-td-content-results-auto")
next_game_1 = ("...".join((str(i) for i in next_game)))
next_game_1_1 = next_game_1.replace(' ', '')
next_game_2 = re.findall(r'\d{2}[.]\d{2}[.]\d{2}', next_game_1_1)
next_games = next_game_2[10:]
next_games_1 = len(next_games)
date = []
y = '.20'
if len(next_game_2) >= 6:
b = next_game_2[5]
bb = b[:6 - 1] + y + b[6:]
date.append(bb)
else:
pass
if len(next_game_2) >= 7:
c = next_game_2[6]
cc = c[:6 - 1] + y + c[6:]
date.append(cc)
else:
pass
if len(next_game_2) >= 8:
d = next_game_2[7]
dd = d[:6 - 1] + y + d[6:]
date.append(dd)
else:
pass
if len(next_game_2) >= 9:
e = next_game_2[8]
ee = e[:6 - 1] + y + e[6:]
date.append(ee)
else:
pass
if len(next_game_2) >= 10:
f = next_game_2[9]
ff = f[:6 - 1] + y + f[6:]
date.append(ff)
else:
pass
if len(next_game_2) >= 11:
g = next_game_2[10]
gg = g[:6 - 1] + y + g[6:]
date.append(gg)
else:
pass
if len(next_game_2) >= 12:
h = next_game_2[11]
hh = h[:6 - 1] + y + h[6:]
date.append(hh)
else:
pass
if len(next_game_2) >= 13:
j = next_game_2[12]
jj = j[:6 - 1] + y + j[6:]
date.append(jj)
else:
pass
if len(next_game_2) >= 14:
k = next_game_2[13]
kk = k[:6 - 1] + y + k[6:]
date.append(kk)
else:
pass
if len(next_game_2) >= 15:
l = next_game_2[14]
ll = l[:6 - 1] + y + l[6:]
date.append(ll)
else:
pass
if len(next_game_2) >= 16:
m = next_game_2[15]
mm = m[:6 - 1] + y + m[6:]
date.append(mm)
else:
pass
if len(next_game_2) >= 17:
n = next_game_2[16]
nn = n[:6 - 1] + y + n[6:]
date.append(nn)
else:
pass
if len(next_game_2) >= 18:
o = next_game_2[17]
oo = o[:6 - 1] + y + o[6:]
date.append(oo)
else:
pass
if len(next_game_2) >= 19:
p = next_game_2[18]
pp = p[:6 - 1] + y + p[6:]
date.append(pp)
else:
pass
if len(next_game_2) >= 20:
q = next_game_2[19]
qq = q[:6 - 1] + y + q[6:]
date.append(qq)
else:
pass
if len(next_game_2) >= 21:
r = next_game_2[20]
rr = r[:6 - 1] + y + r[6:]
date.append(rr)
else:
pass
if len(date) >= 1:
date_0 = dt.datetime.strptime(date[0], '%d.%m.%Y')
if len(date) >= 2:
date_1 = dt.datetime.strptime(date[1], '%d.%m.%Y')
if len(date) >= 3:
date_2 = dt.datetime.strptime(date[2], '%d.%m.%Y')
if len(date) >= 4:
date_3 = dt.datetime.strptime(date[3], '%d.%m.%Y')
if len(date) >= 5:
date_4 = dt.datetime.strptime(date[4], '%d.%m.%Y')
if len(date) >= 6:
date_5 = dt.datetime.strptime(date[5], '%d.%m.%Y')
if len(date) >= 7:
date_6 = dt.datetime.strptime(date[6], '%d.%m.%Y')
if len(date) >= 8:
date_7 = dt.datetime.strptime(date[7], '%d.%m.%Y')
if len(date) >= 9:
date_8 = dt.datetime.strptime(date[8], '%d.%m.%Y')
if len(date) >= 10:
date_9 = dt.datetime.strptime(date[9], '%d.%m.%Y')
if len(date) >= 11:
date_10 = dt.datetime.strptime(date[10], '%d.%m.%Y')
if len(date) >= 12:
date_11 = dt.datetime.strptime(date[11], '%d.%m.%Y')
if len(date) >= 13:
date_12 = dt.datetime.strptime(date[12], '%d.%m.%Y')
if len(date) >= 14:
date_13 = dt.datetime.strptime(date[13], '%d.%m.%Y')
if len(date) >= 15:
date_14 = dt.datetime.strptime(date[14], '%d.%m.%Y')
if len(date) >= 16:
date_15 = dt.datetime.strptime(date[15], '%d.%m.%Y')
if date_0 < date_1:
next_game_3 = cc
else:
if date_1 < date_2:
next_game_3 = dd
else:
if date_2 < date_3:
next_game_3 = ee
else:
if date_3 < date_4:
next_game_3 = ff
else:
if date_4 < date_5:
next_game_3 = gg
else:
if date_5 < date_6:
next_game_3 = hh
else:
if date_6 < date_7:
next_game_3 = jj
else:
if date_7 < date_8:
next_game_3 = kk
else:
if date_8 < date_9:
next_game_3 = ll
else:
if date_9 < date_10:
next_game_3 = mm
else:
if date_10 < date_11:
next_game_3 = nn
else:
if date_11 < date_12:
next_game_3 = oo
else:
if date_12 < date_13:
next_game_3 = pp
else:
if date_13 < date_14:
next_game_3 = qq
else:
if date_14 < date_15:
next_game_3 = rr
else:
pass
team = "Химнасия Мендоса"
def adding_team():
b = "Khimnasiya-Mendosa"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
create_file()
adding_team()
draws(full_time)
odd(full_time)
even(full_time)
under_1_5(full_time)
over_2_5(full_time)
under_2_5(full_time)
both_score(full_time)
both_no_score(full_time)
draws_first_time(first_half_time)
no_goal_first_time(first_half_time)
Odd_Even(full_time)
Even_Odd(full_time)
draws_NOdraws(full_time)
NOdraws_draws(full_time)
under15_over15(full_time)
over15_under15(full_time)
under25_over25(full_time)
over25_under25(full_time)
both_noboth_score(full_time)
noboth_both_score(full_time)
draws_NOdraws_first_time(first_half_time)
NOdraws_draws_first_time(first_half_time)
goal_NOgoal_first_time(first_half_time)
NOgoal_goal_first_time(first_half_time)
url = 'https://nb-bet.com/Teams/1548-Independente-Rivadavia-statistika-komandi'
r = requests.get(url, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print(soup.title.text)
nbbet = soup.select(".a-dotted-hover")
one = ("...".join((str(i) for i in nbbet)))
two = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s[(-][0-9]{1}\s[:-]\s[0-9]{1}[\)-]', one)
three = (" ".join((str(i) for i in two)))
full_time = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s', three)
first_half_time = re.findall(r'[(][0-9]{1}\s[:]\s[0-9]{1}[)]', three)
next_game = soup.select(".first-td-content-results-auto")
next_game_1 = ("...".join((str(i) for i in next_game)))
next_game_1_1 = next_game_1.replace(' ', '')
next_game_2 = re.findall(r'\d{2}[.]\d{2}[.]\d{2}', next_game_1_1)
next_games = next_game_2[10:]
next_games_1 = len(next_games)
date = []
y = '.20'
if len(next_game_2) >= 6:
b = next_game_2[5]
bb = b[:6 - 1] + y + b[6:]
date.append(bb)
else:
pass
if len(next_game_2) >= 7:
c = next_game_2[6]
cc = c[:6 - 1] + y + c[6:]
date.append(cc)
else:
pass
if len(next_game_2) >= 8:
d = next_game_2[7]
dd = d[:6 - 1] + y + d[6:]
date.append(dd)
else:
pass
if len(next_game_2) >= 9:
e = next_game_2[8]
ee = e[:6 - 1] + y + e[6:]
date.append(ee)
else:
pass
if len(next_game_2) >= 10:
f = next_game_2[9]
ff = f[:6 - 1] + y + f[6:]
date.append(ff)
else:
pass
if len(next_game_2) >= 11:
g = next_game_2[10]
gg = g[:6 - 1] + y + g[6:]
date.append(gg)
else:
pass
if len(next_game_2) >= 12:
h = next_game_2[11]
hh = h[:6 - 1] + y + h[6:]
date.append(hh)
else:
pass
if len(next_game_2) >= 13:
j = next_game_2[12]
jj = j[:6 - 1] + y + j[6:]
date.append(jj)
else:
pass
if len(next_game_2) >= 14:
k = next_game_2[13]
kk = k[:6 - 1] + y + k[6:]
date.append(kk)
else:
pass
if len(next_game_2) >= 15:
l = next_game_2[14]
ll = l[:6 - 1] + y + l[6:]
date.append(ll)
else:
pass
if len(next_game_2) >= 16:
m = next_game_2[15]
mm = m[:6 - 1] + y + m[6:]
date.append(mm)
else:
pass
if len(next_game_2) >= 17:
n = next_game_2[16]
nn = n[:6 - 1] + y + n[6:]
date.append(nn)
else:
pass
if len(next_game_2) >= 18:
o = next_game_2[17]
oo = o[:6 - 1] + y + o[6:]
date.append(oo)
else:
pass
if len(next_game_2) >= 19:
p = next_game_2[18]
pp = p[:6 - 1] + y + p[6:]
date.append(pp)
else:
pass
if len(next_game_2) >= 20:
q = next_game_2[19]
qq = q[:6 - 1] + y + q[6:]
date.append(qq)
else:
pass
if len(next_game_2) >= 21:
r = next_game_2[20]
rr = r[:6 - 1] + y + r[6:]
date.append(rr)
else:
pass
if len(date) >= 1:
date_0 = dt.datetime.strptime(date[0], '%d.%m.%Y')
if len(date) >= 2:
date_1 = dt.datetime.strptime(date[1], '%d.%m.%Y')
if len(date) >= 3:
date_2 = dt.datetime.strptime(date[2], '%d.%m.%Y')
if len(date) >= 4:
date_3 = dt.datetime.strptime(date[3], '%d.%m.%Y')
if len(date) >= 5:
date_4 = dt.datetime.strptime(date[4], '%d.%m.%Y')
if len(date) >= 6:
date_5 = dt.datetime.strptime(date[5], '%d.%m.%Y')
if len(date) >= 7:
date_6 = dt.datetime.strptime(date[6], '%d.%m.%Y')
if len(date) >= 8:
date_7 = dt.datetime.strptime(date[7], '%d.%m.%Y')
if len(date) >= 9:
date_8 = dt.datetime.strptime(date[8], '%d.%m.%Y')
if len(date) >= 10:
date_9 = dt.datetime.strptime(date[9], '%d.%m.%Y')
if len(date) >= 11:
date_10 = dt.datetime.strptime(date[10], '%d.%m.%Y')
if len(date) >= 12:
date_11 = dt.datetime.strptime(date[11], '%d.%m.%Y')
if len(date) >= 13:
date_12 = dt.datetime.strptime(date[12], '%d.%m.%Y')
if len(date) >= 14:
date_13 = dt.datetime.strptime(date[13], '%d.%m.%Y')
if len(date) >= 15:
date_14 = dt.datetime.strptime(date[14], '%d.%m.%Y')
if len(date) >= 16:
date_15 = dt.datetime.strptime(date[15], '%d.%m.%Y')
if date_0 < date_1:
next_game_3 = cc
else:
if date_1 < date_2:
next_game_3 = dd
else:
if date_2 < date_3:
next_game_3 = ee
else:
if date_3 < date_4:
next_game_3 = ff
else:
if date_4 < date_5:
next_game_3 = gg
else:
if date_5 < date_6:
next_game_3 = hh
else:
if date_6 < date_7:
next_game_3 = jj
else:
if date_7 < date_8:
next_game_3 = kk
else:
if date_8 < date_9:
next_game_3 = ll
else:
if date_9 < date_10:
next_game_3 = mm
else:
if date_10 < date_11:
next_game_3 = nn
else:
if date_11 < date_12:
next_game_3 = oo
else:
if date_12 < date_13:
next_game_3 = pp
else:
if date_13 < date_14:
next_game_3 = qq
else:
if date_14 < date_15:
next_game_3 = rr
else:
pass
team = "Индп Ривадавиа"
def adding_team():
b = "Independente-Rivadavia"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
create_file()
adding_team()
draws(full_time)
odd(full_time)
even(full_time)
under_1_5(full_time)
over_2_5(full_time)
under_2_5(full_time)
both_score(full_time)
both_no_score(full_time)
draws_first_time(first_half_time)
no_goal_first_time(first_half_time)
Odd_Even(full_time)
Even_Odd(full_time)
draws_NOdraws(full_time)
NOdraws_draws(full_time)
under15_over15(full_time)
over15_under15(full_time)
under25_over25(full_time)
over25_under25(full_time)
both_noboth_score(full_time)
noboth_both_score(full_time)
draws_NOdraws_first_time(first_half_time)
NOdraws_draws_first_time(first_half_time)
goal_NOgoal_first_time(first_half_time)
NOgoal_goal_first_time(first_half_time)
url = 'https://nb-bet.com/Teams/574-Tigre-statistika-komandi'
r = requests.get(url, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print(soup.title.text)
nbbet = soup.select(".a-dotted-hover")
one = ("...".join((str(i) for i in nbbet)))
two = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s[(-][0-9]{1}\s[:-]\s[0-9]{1}[\)-]', one)
three = (" ".join((str(i) for i in two)))
full_time = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s', three)
first_half_time = re.findall(r'[(][0-9]{1}\s[:]\s[0-9]{1}[)]', three)
next_game = soup.select(".first-td-content-results-auto")
next_game_1 = ("...".join((str(i) for i in next_game)))
next_game_1_1 = next_game_1.replace(' ', '')
next_game_2 = re.findall(r'\d{2}[.]\d{2}[.]\d{2}', next_game_1_1)
next_games = next_game_2[10:]
next_games_1 = len(next_games)
date = []
y = '.20'
if len(next_game_2) >= 6:
b = next_game_2[5]
bb = b[:6 - 1] + y + b[6:]
date.append(bb)
else:
pass
if len(next_game_2) >= 7:
c = next_game_2[6]
cc = c[:6 - 1] + y + c[6:]
date.append(cc)
else:
pass
if len(next_game_2) >= 8:
d = next_game_2[7]
dd = d[:6 - 1] + y + d[6:]
date.append(dd)
else:
pass
if len(next_game_2) >= 9:
e = next_game_2[8]
ee = e[:6 - 1] + y + e[6:]
date.append(ee)
else:
pass
if len(next_game_2) >= 10:
f = next_game_2[9]
ff = f[:6 - 1] + y + f[6:]
date.append(ff)
else:
pass
if len(next_game_2) >= 11:
g = next_game_2[10]
gg = g[:6 - 1] + y + g[6:]
date.append(gg)
else:
pass
if len(next_game_2) >= 12:
h = next_game_2[11]
hh = h[:6 - 1] + y + h[6:]
date.append(hh)
else:
pass
if len(next_game_2) >= 13:
j = next_game_2[12]
jj = j[:6 - 1] + y + j[6:]
date.append(jj)
else:
pass
if len(next_game_2) >= 14:
k = next_game_2[13]
kk = k[:6 - 1] + y + k[6:]
date.append(kk)
else:
pass
if len(next_game_2) >= 15:
l = next_game_2[14]
ll = l[:6 - 1] + y + l[6:]
date.append(ll)
else:
pass
if len(next_game_2) >= 16:
m = next_game_2[15]
mm = m[:6 - 1] + y + m[6:]
date.append(mm)
else:
pass
if len(next_game_2) >= 17:
n = next_game_2[16]
nn = n[:6 - 1] + y + n[6:]
date.append(nn)
else:
pass
if len(next_game_2) >= 18:
o = next_game_2[17]
oo = o[:6 - 1] + y + o[6:]
date.append(oo)
else:
pass
if len(next_game_2) >= 19:
p = next_game_2[18]
pp = p[:6 - 1] + y + p[6:]
date.append(pp)
else:
pass
if len(next_game_2) >= 20:
q = next_game_2[19]
qq = q[:6 - 1] + y + q[6:]
date.append(qq)
else:
pass
if len(next_game_2) >= 21:
r = next_game_2[20]
rr = r[:6 - 1] + y + r[6:]
date.append(rr)
else:
pass
if len(date) >= 1:
date_0 = dt.datetime.strptime(date[0], '%d.%m.%Y')
if len(date) >= 2:
date_1 = dt.datetime.strptime(date[1], '%d.%m.%Y')
if len(date) >= 3:
date_2 = dt.datetime.strptime(date[2], '%d.%m.%Y')
if len(date) >= 4:
date_3 = dt.datetime.strptime(date[3], '%d.%m.%Y')
if len(date) >= 5:
date_4 = dt.datetime.strptime(date[4], '%d.%m.%Y')
if len(date) >= 6:
date_5 = dt.datetime.strptime(date[5], '%d.%m.%Y')
if len(date) >= 7:
date_6 = dt.datetime.strptime(date[6], '%d.%m.%Y')
if len(date) >= 8:
date_7 = dt.datetime.strptime(date[7], '%d.%m.%Y')
if len(date) >= 9:
date_8 = dt.datetime.strptime(date[8], '%d.%m.%Y')
if len(date) >= 10:
date_9 = dt.datetime.strptime(date[9], '%d.%m.%Y')
if len(date) >= 11:
date_10 = dt.datetime.strptime(date[10], '%d.%m.%Y')
if len(date) >= 12:
date_11 = dt.datetime.strptime(date[11], '%d.%m.%Y')
if len(date) >= 13:
date_12 = dt.datetime.strptime(date[12], '%d.%m.%Y')
if len(date) >= 14:
date_13 = dt.datetime.strptime(date[13], '%d.%m.%Y')
if len(date) >= 15:
date_14 = dt.datetime.strptime(date[14], '%d.%m.%Y')
if len(date) >= 16:
date_15 = dt.datetime.strptime(date[15], '%d.%m.%Y')
if date_0 < date_1:
next_game_3 = cc
else:
if date_1 < date_2:
next_game_3 = dd
else:
if date_2 < date_3:
next_game_3 = ee
else:
if date_3 < date_4:
next_game_3 = ff
else:
if date_4 < date_5:
next_game_3 = gg
else:
if date_5 < date_6:
next_game_3 = hh
else:
if date_6 < date_7:
next_game_3 = jj
else:
if date_7 < date_8:
next_game_3 = kk
else:
if date_8 < date_9:
next_game_3 = ll
else:
if date_9 < date_10:
next_game_3 = mm
else:
if date_10 < date_11:
next_game_3 = nn
else:
if date_11 < date_12:
next_game_3 = oo
else:
if date_12 < date_13:
next_game_3 = pp
else:
if date_13 < date_14:
next_game_3 = qq
else:
if date_14 < date_15:
next_game_3 = rr
else:
pass
team = "Тигре"
def adding_team():
b = "Tigre"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
create_file()
adding_team()
draws(full_time)
odd(full_time)
even(full_time)
under_1_5(full_time)
over_2_5(full_time)
under_2_5(full_time)
both_score(full_time)
both_no_score(full_time)
draws_first_time(first_half_time)
no_goal_first_time(first_half_time)
Odd_Even(full_time)
Even_Odd(full_time)
draws_NOdraws(full_time)
NOdraws_draws(full_time)
under15_over15(full_time)
over15_under15(full_time)
under25_over25(full_time)
over25_under25(full_time)
both_noboth_score(full_time)
noboth_both_score(full_time)
draws_NOdraws_first_time(first_half_time)
NOdraws_draws_first_time(first_half_time)
goal_NOgoal_first_time(first_half_time)
NOgoal_goal_first_time(first_half_time)
url = 'https://nb-bet.com/Teams/1552-Oll-Boyz-statistika-komandi'
r = requests.get(url, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print(soup.title.text)
nbbet = soup.select(".a-dotted-hover")
one = ("...".join((str(i) for i in nbbet)))
two = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s[(-][0-9]{1}\s[:-]\s[0-9]{1}[\)-]', one)
three = (" ".join((str(i) for i in two)))
full_time = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s', three)
first_half_time = re.findall(r'[(][0-9]{1}\s[:]\s[0-9]{1}[)]', three)
next_game = soup.select(".first-td-content-results-auto")
next_game_1 = ("...".join((str(i) for i in next_game)))
next_game_1_1 = next_game_1.replace(' ', '')
next_game_2 = re.findall(r'\d{2}[.]\d{2}[.]\d{2}', next_game_1_1)
next_games = next_game_2[10:]
next_games_1 = len(next_games)
date = []
y = '.20'
if len(next_game_2) >= 6:
b = next_game_2[5]
bb = b[:6 - 1] + y + b[6:]
date.append(bb)
else:
pass
if len(next_game_2) >= 7:
c = next_game_2[6]
cc = c[:6 - 1] + y + c[6:]
date.append(cc)
else:
pass
if len(next_game_2) >= 8:
d = next_game_2[7]
dd = d[:6 - 1] + y + d[6:]
date.append(dd)
else:
pass
if len(next_game_2) >= 9:
e = next_game_2[8]
ee = e[:6 - 1] + y + e[6:]
date.append(ee)
else:
pass
if len(next_game_2) >= 10:
f = next_game_2[9]
ff = f[:6 - 1] + y + f[6:]
date.append(ff)
else:
pass
if len(next_game_2) >= 11:
g = next_game_2[10]
gg = g[:6 - 1] + y + g[6:]
date.append(gg)
else:
pass
if len(next_game_2) >= 12:
h = next_game_2[11]
hh = h[:6 - 1] + y + h[6:]
date.append(hh)
else:
pass
if len(next_game_2) >= 13:
j = next_game_2[12]
jj = j[:6 - 1] + y + j[6:]
date.append(jj)
else:
pass
if len(next_game_2) >= 14:
k = next_game_2[13]
kk = k[:6 - 1] + y + k[6:]
date.append(kk)
else:
pass
if len(next_game_2) >= 15:
l = next_game_2[14]
ll = l[:6 - 1] + y + l[6:]
date.append(ll)
else:
pass
if len(next_game_2) >= 16:
m = next_game_2[15]
mm = m[:6 - 1] + y + m[6:]
date.append(mm)
else:
pass
if len(next_game_2) >= 17:
n = next_game_2[16]
nn = n[:6 - 1] + y + n[6:]
date.append(nn)
else:
pass
if len(next_game_2) >= 18:
o = next_game_2[17]
oo = o[:6 - 1] + y + o[6:]
date.append(oo)
else:
pass
if len(next_game_2) >= 19:
p = next_game_2[18]
pp = p[:6 - 1] + y + p[6:]
date.append(pp)
else:
pass
if len(next_game_2) >= 20:
q = next_game_2[19]
qq = q[:6 - 1] + y + q[6:]
date.append(qq)
else:
pass
if len(next_game_2) >= 21:
r = next_game_2[20]
rr = r[:6 - 1] + y + r[6:]
date.append(rr)
else:
pass
if len(date) >= 1:
date_0 = dt.datetime.strptime(date[0], '%d.%m.%Y')
if len(date) >= 2:
date_1 = dt.datetime.strptime(date[1], '%d.%m.%Y')
if len(date) >= 3:
date_2 = dt.datetime.strptime(date[2], '%d.%m.%Y')
if len(date) >= 4:
date_3 = dt.datetime.strptime(date[3], '%d.%m.%Y')
if len(date) >= 5:
date_4 = dt.datetime.strptime(date[4], '%d.%m.%Y')
if len(date) >= 6:
date_5 = dt.datetime.strptime(date[5], '%d.%m.%Y')
if len(date) >= 7:
date_6 = dt.datetime.strptime(date[6], '%d.%m.%Y')
if len(date) >= 8:
date_7 = dt.datetime.strptime(date[7], '%d.%m.%Y')
if len(date) >= 9:
date_8 = dt.datetime.strptime(date[8], '%d.%m.%Y')
if len(date) >= 10:
date_9 = dt.datetime.strptime(date[9], '%d.%m.%Y')
if len(date) >= 11:
date_10 = dt.datetime.strptime(date[10], '%d.%m.%Y')
if len(date) >= 12:
date_11 = dt.datetime.strptime(date[11], '%d.%m.%Y')
if len(date) >= 13:
date_12 = dt.datetime.strptime(date[12], '%d.%m.%Y')
if len(date) >= 14:
date_13 = dt.datetime.strptime(date[13], '%d.%m.%Y')
if len(date) >= 15:
date_14 = dt.datetime.strptime(date[14], '%d.%m.%Y')
if len(date) >= 16:
date_15 = dt.datetime.strptime(date[15], '%d.%m.%Y')
if date_0 < date_1:
next_game_3 = cc
else:
if date_1 < date_2:
next_game_3 = dd
else:
if date_2 < date_3:
next_game_3 = ee
else:
if date_3 < date_4:
next_game_3 = ff
else:
if date_4 < date_5:
next_game_3 = gg
else:
if date_5 < date_6:
next_game_3 = hh
else:
if date_6 < date_7:
next_game_3 = jj
else:
if date_7 < date_8:
next_game_3 = kk
else:
if date_8 < date_9:
next_game_3 = ll
else:
if date_9 < date_10:
next_game_3 = mm
else:
if date_10 < date_11:
next_game_3 = nn
else:
if date_11 < date_12:
next_game_3 = oo
else:
if date_12 < date_13:
next_game_3 = pp
else:
if date_13 < date_14:
next_game_3 = qq
else:
if date_14 < date_15:
next_game_3 = rr
else:
pass
team = "Олл Бойз"
def adding_team():
b = "Oll-Boyz"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
create_file()
adding_team()
draws(full_time)
odd(full_time)
even(full_time)
under_1_5(full_time)
over_2_5(full_time)
under_2_5(full_time)
both_score(full_time)
both_no_score(full_time)
draws_first_time(first_half_time)
no_goal_first_time(first_half_time)
Odd_Even(full_time)
Even_Odd(full_time)
draws_NOdraws(full_time)
NOdraws_draws(full_time)
under15_over15(full_time)
over15_under15(full_time)
under25_over25(full_time)
over25_under25(full_time)
both_noboth_score(full_time)
noboth_both_score(full_time)
draws_NOdraws_first_time(first_half_time)
NOdraws_draws_first_time(first_half_time)
goal_NOgoal_first_time(first_half_time)
NOgoal_goal_first_time(first_half_time)
url = 'https://nb-bet.com/Teams/561-Kilmes-statistika-komandi'
r = requests.get(url, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print(soup.title.text)
nbbet = soup.select(".a-dotted-hover")
one = ("...".join((str(i) for i in nbbet)))
two = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s[(-][0-9]{1}\s[:-]\s[0-9]{1}[\)-]', one)
three = (" ".join((str(i) for i in two)))
full_time = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s', three)
first_half_time = re.findall(r'[(][0-9]{1}\s[:]\s[0-9]{1}[)]', three)
next_game = soup.select(".first-td-content-results-auto")
next_game_1 = ("...".join((str(i) for i in next_game)))
next_game_1_1 = next_game_1.replace(' ', '')
next_game_2 = re.findall(r'\d{2}[.]\d{2}[.]\d{2}', next_game_1_1)
next_games = next_game_2[10:]
next_games_1 = len(next_games)
date = []
y = '.20'
if len(next_game_2) >= 6:
b = next_game_2[5]
bb = b[:6 - 1] + y + b[6:]
date.append(bb)
else:
pass
if len(next_game_2) >= 7:
c = next_game_2[6]
cc = c[:6 - 1] + y + c[6:]
date.append(cc)
else:
pass
if len(next_game_2) >= 8:
d = next_game_2[7]
dd = d[:6 - 1] + y + d[6:]
date.append(dd)
else:
pass
if len(next_game_2) >= 9:
e = next_game_2[8]
ee = e[:6 - 1] + y + e[6:]
date.append(ee)
else:
pass
if len(next_game_2) >= 10:
f = next_game_2[9]
ff = f[:6 - 1] + y + f[6:]
date.append(ff)
else:
pass
if len(next_game_2) >= 11:
g = next_game_2[10]
gg = g[:6 - 1] + y + g[6:]
date.append(gg)
else:
pass
if len(next_game_2) >= 12:
h = next_game_2[11]
hh = h[:6 - 1] + y + h[6:]
date.append(hh)
else:
pass
if len(next_game_2) >= 13:
j = next_game_2[12]
jj = j[:6 - 1] + y + j[6:]
date.append(jj)
else:
pass
if len(next_game_2) >= 14:
k = next_game_2[13]
kk = k[:6 - 1] + y + k[6:]
date.append(kk)
else:
pass
if len(next_game_2) >= 15:
l = next_game_2[14]
ll = l[:6 - 1] + y + l[6:]
date.append(ll)
else:
pass
if len(next_game_2) >= 16:
m = next_game_2[15]
mm = m[:6 - 1] + y + m[6:]
date.append(mm)
else:
pass
if len(next_game_2) >= 17:
n = next_game_2[16]
nn = n[:6 - 1] + y + n[6:]
date.append(nn)
else:
pass
if len(next_game_2) >= 18:
o = next_game_2[17]
oo = o[:6 - 1] + y + o[6:]
date.append(oo)
else:
pass
if len(next_game_2) >= 19:
p = next_game_2[18]
pp = p[:6 - 1] + y + p[6:]
date.append(pp)
else:
pass
if len(next_game_2) >= 20:
q = next_game_2[19]
qq = q[:6 - 1] + y + q[6:]
date.append(qq)
else:
pass
if len(next_game_2) >= 21:
r = next_game_2[20]
rr = r[:6 - 1] + y + r[6:]
date.append(rr)
else:
pass
if len(date) >= 1:
date_0 = dt.datetime.strptime(date[0], '%d.%m.%Y')
if len(date) >= 2:
date_1 = dt.datetime.strptime(date[1], '%d.%m.%Y')
if len(date) >= 3:
date_2 = dt.datetime.strptime(date[2], '%d.%m.%Y')
if len(date) >= 4:
date_3 = dt.datetime.strptime(date[3], '%d.%m.%Y')
if len(date) >= 5:
date_4 = dt.datetime.strptime(date[4], '%d.%m.%Y')
if len(date) >= 6:
date_5 = dt.datetime.strptime(date[5], '%d.%m.%Y')
if len(date) >= 7:
date_6 = dt.datetime.strptime(date[6], '%d.%m.%Y')
if len(date) >= 8:
date_7 = dt.datetime.strptime(date[7], '%d.%m.%Y')
if len(date) >= 9:
date_8 = dt.datetime.strptime(date[8], '%d.%m.%Y')
if len(date) >= 10:
date_9 = dt.datetime.strptime(date[9], '%d.%m.%Y')
if len(date) >= 11:
date_10 = dt.datetime.strptime(date[10], '%d.%m.%Y')
if len(date) >= 12:
date_11 = dt.datetime.strptime(date[11], '%d.%m.%Y')
if len(date) >= 13:
date_12 = dt.datetime.strptime(date[12], '%d.%m.%Y')
if len(date) >= 14:
date_13 = dt.datetime.strptime(date[13], '%d.%m.%Y')
if len(date) >= 15:
date_14 = dt.datetime.strptime(date[14], '%d.%m.%Y')
if len(date) >= 16:
date_15 = dt.datetime.strptime(date[15], '%d.%m.%Y')
if date_0 < date_1:
next_game_3 = cc
else:
if date_1 < date_2:
next_game_3 = dd
else:
if date_2 < date_3:
next_game_3 = ee
else:
if date_3 < date_4:
next_game_3 = ff
else:
if date_4 < date_5:
next_game_3 = gg
else:
if date_5 < date_6:
next_game_3 = hh
else:
if date_6 < date_7:
next_game_3 = jj
else:
if date_7 < date_8:
next_game_3 = kk
else:
if date_8 < date_9:
next_game_3 = ll
else:
if date_9 < date_10:
next_game_3 = mm
else:
if date_10 < date_11:
next_game_3 = nn
else:
if date_11 < date_12:
next_game_3 = oo
else:
if date_12 < date_13:
next_game_3 = pp
else:
if date_13 < date_14:
next_game_3 = qq
else:
if date_14 < date_15:
next_game_3 = rr
else:
pass
team = "Кильмес"
def adding_team():
b = "Kilmes"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
create_file()
adding_team()
draws(full_time)
odd(full_time)
even(full_time)
under_1_5(full_time)
over_2_5(full_time)
under_2_5(full_time)
both_score(full_time)
both_no_score(full_time)
draws_first_time(first_half_time)
no_goal_first_time(first_half_time)
Odd_Even(full_time)
Even_Odd(full_time)
draws_NOdraws(full_time)
NOdraws_draws(full_time)
under15_over15(full_time)
over15_under15(full_time)
under25_over25(full_time)
over25_under25(full_time)
both_noboth_score(full_time)
noboth_both_score(full_time)
draws_NOdraws_first_time(first_half_time)
NOdraws_draws_first_time(first_half_time)
goal_NOgoal_first_time(first_half_time)
NOgoal_goal_first_time(first_half_time)
url = 'https://nb-bet.com/Teams/1560-Santamarina-statistika-komandi'
r = requests.get(url, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print(soup.title.text)
nbbet = soup.select(".a-dotted-hover")
one = ("...".join((str(i) for i in nbbet)))
two = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s[(-][0-9]{1}\s[:-]\s[0-9]{1}[\)-]', one)
three = (" ".join((str(i) for i in two)))
full_time = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s', three)
first_half_time = re.findall(r'[(][0-9]{1}\s[:]\s[0-9]{1}[)]', three)
next_game = soup.select(".first-td-content-results-auto")
next_game_1 = ("...".join((str(i) for i in next_game)))
next_game_1_1 = next_game_1.replace(' ', '')
next_game_2 = re.findall(r'\d{2}[.]\d{2}[.]\d{2}', next_game_1_1)
next_games = next_game_2[10:]
next_games_1 = len(next_games)
date = []
y = '.20'
if len(next_game_2) >= 6:
b = next_game_2[5]
bb = b[:6 - 1] + y + b[6:]
date.append(bb)
else:
pass
if len(next_game_2) >= 7:
c = next_game_2[6]
cc = c[:6 - 1] + y + c[6:]
date.append(cc)
else:
pass
if len(next_game_2) >= 8:
d = next_game_2[7]
dd = d[:6 - 1] + y + d[6:]
date.append(dd)
else:
pass
if len(next_game_2) >= 9:
e = next_game_2[8]
ee = e[:6 - 1] + y + e[6:]
date.append(ee)
else:
pass
if len(next_game_2) >= 10:
f = next_game_2[9]
ff = f[:6 - 1] + y + f[6:]
date.append(ff)
else:
pass
if len(next_game_2) >= 11:
g = next_game_2[10]
gg = g[:6 - 1] + y + g[6:]
date.append(gg)
else:
pass
if len(next_game_2) >= 12:
h = next_game_2[11]
hh = h[:6 - 1] + y + h[6:]
date.append(hh)
else:
pass
if len(next_game_2) >= 13:
j = next_game_2[12]
jj = j[:6 - 1] + y + j[6:]
date.append(jj)
else:
pass
if len(next_game_2) >= 14:
k = next_game_2[13]
kk = k[:6 - 1] + y + k[6:]
date.append(kk)
else:
pass
if len(next_game_2) >= 15:
l = next_game_2[14]
ll = l[:6 - 1] + y + l[6:]
date.append(ll)
else:
pass
if len(next_game_2) >= 16:
m = next_game_2[15]
mm = m[:6 - 1] + y + m[6:]
date.append(mm)
else:
pass
if len(next_game_2) >= 17:
n = next_game_2[16]
nn = n[:6 - 1] + y + n[6:]
date.append(nn)
else:
pass
if len(next_game_2) >= 18:
o = next_game_2[17]
oo = o[:6 - 1] + y + o[6:]
date.append(oo)
else:
pass
if len(next_game_2) >= 19:
p = next_game_2[18]
pp = p[:6 - 1] + y + p[6:]
date.append(pp)
else:
pass
if len(next_game_2) >= 20:
q = next_game_2[19]
qq = q[:6 - 1] + y + q[6:]
date.append(qq)
else:
pass
if len(next_game_2) >= 21:
r = next_game_2[20]
rr = r[:6 - 1] + y + r[6:]
date.append(rr)
else:
pass
if len(date) >= 1:
date_0 = dt.datetime.strptime(date[0], '%d.%m.%Y')
if len(date) >= 2:
date_1 = dt.datetime.strptime(date[1], '%d.%m.%Y')
if len(date) >= 3:
date_2 = dt.datetime.strptime(date[2], '%d.%m.%Y')
if len(date) >= 4:
date_3 = dt.datetime.strptime(date[3], '%d.%m.%Y')
if len(date) >= 5:
date_4 = dt.datetime.strptime(date[4], '%d.%m.%Y')
if len(date) >= 6:
date_5 = dt.datetime.strptime(date[5], '%d.%m.%Y')
if len(date) >= 7:
date_6 = dt.datetime.strptime(date[6], '%d.%m.%Y')
if len(date) >= 8:
date_7 = dt.datetime.strptime(date[7], '%d.%m.%Y')
if len(date) >= 9:
date_8 = dt.datetime.strptime(date[8], '%d.%m.%Y')
if len(date) >= 10:
date_9 = dt.datetime.strptime(date[9], '%d.%m.%Y')
if len(date) >= 11:
date_10 = dt.datetime.strptime(date[10], '%d.%m.%Y')
if len(date) >= 12:
date_11 = dt.datetime.strptime(date[11], '%d.%m.%Y')
if len(date) >= 13:
date_12 = dt.datetime.strptime(date[12], '%d.%m.%Y')
if len(date) >= 14:
date_13 = dt.datetime.strptime(date[13], '%d.%m.%Y')
if len(date) >= 15:
date_14 = dt.datetime.strptime(date[14], '%d.%m.%Y')
if len(date) >= 16:
date_15 = dt.datetime.strptime(date[15], '%d.%m.%Y')
if date_0 < date_1:
next_game_3 = cc
else:
if date_1 < date_2:
next_game_3 = dd
else:
if date_2 < date_3:
next_game_3 = ee
else:
if date_3 < date_4:
next_game_3 = ff
else:
if date_4 < date_5:
next_game_3 = gg
else:
if date_5 < date_6:
next_game_3 = hh
else:
if date_6 < date_7:
next_game_3 = jj
else:
if date_7 < date_8:
next_game_3 = kk
else:
if date_8 < date_9:
next_game_3 = ll
else:
if date_9 < date_10:
next_game_3 = mm
else:
if date_10 < date_11:
next_game_3 = nn
else:
if date_11 < date_12:
next_game_3 = oo
else:
if date_12 < date_13:
next_game_3 = pp
else:
if date_13 < date_14:
next_game_3 = qq
else:
if date_14 < date_15:
next_game_3 = rr
else:
pass
team = "Сантамарина"
def adding_team():
b = "Santamarina"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
create_file()
adding_team()
draws(full_time)
odd(full_time)
even(full_time)
under_1_5(full_time)
over_2_5(full_time)
under_2_5(full_time)
both_score(full_time)
both_no_score(full_time)
draws_first_time(first_half_time)
no_goal_first_time(first_half_time)
Odd_Even(full_time)
Even_Odd(full_time)
draws_NOdraws(full_time)
NOdraws_draws(full_time)
under15_over15(full_time)
over15_under15(full_time)
under25_over25(full_time)
over25_under25(full_time)
both_noboth_score(full_time)
noboth_both_score(full_time)
draws_NOdraws_first_time(first_half_time)
NOdraws_draws_first_time(first_half_time)
goal_NOgoal_first_time(first_half_time)
NOgoal_goal_first_time(first_half_time)
url = 'https://nb-bet.com/Teams/1553-Braun-Adroge-statistika-komandi'
r = requests.get(url, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print(soup.title.text)
nbbet = soup.select(".a-dotted-hover")
one = ("...".join((str(i) for i in nbbet)))
two = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s[(-][0-9]{1}\s[:-]\s[0-9]{1}[\)-]', one)
three = (" ".join((str(i) for i in two)))
full_time = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s', three)
first_half_time = re.findall(r'[(][0-9]{1}\s[:]\s[0-9]{1}[)]', three)
next_game = soup.select(".first-td-content-results-auto")
next_game_1 = ("...".join((str(i) for i in next_game)))
next_game_1_1 = next_game_1.replace(' ', '')
next_game_2 = re.findall(r'\d{2}[.]\d{2}[.]\d{2}', next_game_1_1)
next_games = next_game_2[10:]
next_games_1 = len(next_games)
date = []
y = '.20'
if len(next_game_2) >= 6:
b = next_game_2[5]
bb = b[:6 - 1] + y + b[6:]
date.append(bb)
else:
pass
if len(next_game_2) >= 7:
c = next_game_2[6]
cc = c[:6 - 1] + y + c[6:]
date.append(cc)
else:
pass
if len(next_game_2) >= 8:
d = next_game_2[7]
dd = d[:6 - 1] + y + d[6:]
date.append(dd)
else:
pass
if len(next_game_2) >= 9:
e = next_game_2[8]
ee = e[:6 - 1] + y + e[6:]
date.append(ee)
else:
pass
if len(next_game_2) >= 10:
f = next_game_2[9]
ff = f[:6 - 1] + y + f[6:]
date.append(ff)
else:
pass
if len(next_game_2) >= 11:
g = next_game_2[10]
gg = g[:6 - 1] + y + g[6:]
date.append(gg)
else:
pass
if len(next_game_2) >= 12:
h = next_game_2[11]
hh = h[:6 - 1] + y + h[6:]
date.append(hh)
else:
pass
if len(next_game_2) >= 13:
j = next_game_2[12]
jj = j[:6 - 1] + y + j[6:]
date.append(jj)
else:
pass
if len(next_game_2) >= 14:
k = next_game_2[13]
kk = k[:6 - 1] + y + k[6:]
date.append(kk)
else:
pass
if len(next_game_2) >= 15:
l = next_game_2[14]
ll = l[:6 - 1] + y + l[6:]
date.append(ll)
else:
pass
if len(next_game_2) >= 16:
m = next_game_2[15]
mm = m[:6 - 1] + y + m[6:]
date.append(mm)
else:
pass
if len(next_game_2) >= 17:
n = next_game_2[16]
nn = n[:6 - 1] + y + n[6:]
date.append(nn)
else:
pass
if len(next_game_2) >= 18:
o = next_game_2[17]
oo = o[:6 - 1] + y + o[6:]
date.append(oo)
else:
pass
if len(next_game_2) >= 19:
p = next_game_2[18]
pp = p[:6 - 1] + y + p[6:]
date.append(pp)
else:
pass
if len(next_game_2) >= 20:
q = next_game_2[19]
qq = q[:6 - 1] + y + q[6:]
date.append(qq)
else:
pass
if len(next_game_2) >= 21:
r = next_game_2[20]
rr = r[:6 - 1] + y + r[6:]
date.append(rr)
else:
pass
if len(date) >= 1:
date_0 = dt.datetime.strptime(date[0], '%d.%m.%Y')
if len(date) >= 2:
date_1 = dt.datetime.strptime(date[1], '%d.%m.%Y')
if len(date) >= 3:
date_2 = dt.datetime.strptime(date[2], '%d.%m.%Y')
if len(date) >= 4:
date_3 = dt.datetime.strptime(date[3], '%d.%m.%Y')
if len(date) >= 5:
date_4 = dt.datetime.strptime(date[4], '%d.%m.%Y')
if len(date) >= 6:
date_5 = dt.datetime.strptime(date[5], '%d.%m.%Y')
if len(date) >= 7:
date_6 = dt.datetime.strptime(date[6], '%d.%m.%Y')
if len(date) >= 8:
date_7 = dt.datetime.strptime(date[7], '%d.%m.%Y')
if len(date) >= 9:
date_8 = dt.datetime.strptime(date[8], '%d.%m.%Y')
if len(date) >= 10:
date_9 = dt.datetime.strptime(date[9], '%d.%m.%Y')
if len(date) >= 11:
date_10 = dt.datetime.strptime(date[10], '%d.%m.%Y')
if len(date) >= 12:
date_11 = dt.datetime.strptime(date[11], '%d.%m.%Y')
if len(date) >= 13:
date_12 = dt.datetime.strptime(date[12], '%d.%m.%Y')
if len(date) >= 14:
date_13 = dt.datetime.strptime(date[13], '%d.%m.%Y')
if len(date) >= 15:
date_14 = dt.datetime.strptime(date[14], '%d.%m.%Y')
if len(date) >= 16:
date_15 = dt.datetime.strptime(date[15], '%d.%m.%Y')
if date_0 < date_1:
next_game_3 = cc
else:
if date_1 < date_2:
next_game_3 = dd
else:
if date_2 < date_3:
next_game_3 = ee
else:
if date_3 < date_4:
next_game_3 = ff
else:
if date_4 < date_5:
next_game_3 = gg
else:
if date_5 < date_6:
next_game_3 = hh
else:
if date_6 < date_7:
next_game_3 = jj
else:
if date_7 < date_8:
next_game_3 = kk
else:
if date_8 < date_9:
next_game_3 = ll
else:
if date_9 < date_10:
next_game_3 = mm
else:
if date_10 < date_11:
next_game_3 = nn
else:
if date_11 < date_12:
next_game_3 = oo
else:
if date_12 < date_13:
next_game_3 = pp
else:
if date_13 < date_14:
next_game_3 = qq
else:
if date_14 < date_15:
next_game_3 = rr
else:
pass
team = "Браун Адроге"
def adding_team():
b = "Braun-Adroge"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
create_file()
adding_team()
draws(full_time)
odd(full_time)
even(full_time)
under_1_5(full_time)
over_2_5(full_time)
under_2_5(full_time)
both_score(full_time)
both_no_score(full_time)
draws_first_time(first_half_time)
no_goal_first_time(first_half_time)
Odd_Even(full_time)
Even_Odd(full_time)
draws_NOdraws(full_time)
NOdraws_draws(full_time)
under15_over15(full_time)
over15_under15(full_time)
under25_over25(full_time)
over25_under25(full_time)
both_noboth_score(full_time)
noboth_both_score(full_time)
draws_NOdraws_first_time(first_half_time)
NOdraws_draws_first_time(first_half_time)
goal_NOgoal_first_time(first_half_time)
NOgoal_goal_first_time(first_half_time)
url = 'https://nb-bet.com/Teams/1554-San-Martin-Tukuman-statistika-komandi'
r = requests.get(url, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print(soup.title.text)
nbbet = soup.select(".a-dotted-hover")
one = ("...".join((str(i) for i in nbbet)))
two = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s[(-][0-9]{1}\s[:-]\s[0-9]{1}[\)-]', one)
three = (" ".join((str(i) for i in two)))
full_time = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s', three)
first_half_time = re.findall(r'[(][0-9]{1}\s[:]\s[0-9]{1}[)]', three)
next_game = soup.select(".first-td-content-results-auto")
next_game_1 = ("...".join((str(i) for i in next_game)))
next_game_1_1 = next_game_1.replace(' ', '')
next_game_2 = re.findall(r'\d{2}[.]\d{2}[.]\d{2}', next_game_1_1)
next_games = next_game_2[10:]
next_games_1 = len(next_games)
date = []
y = '.20'
if len(next_game_2) >= 6:
b = next_game_2[5]
bb = b[:6 - 1] + y + b[6:]
date.append(bb)
else:
pass
if len(next_game_2) >= 7:
c = next_game_2[6]
cc = c[:6 - 1] + y + c[6:]
date.append(cc)
else:
pass
if len(next_game_2) >= 8:
d = next_game_2[7]
dd = d[:6 - 1] + y + d[6:]
date.append(dd)
else:
pass
if len(next_game_2) >= 9:
e = next_game_2[8]
ee = e[:6 - 1] + y + e[6:]
date.append(ee)
else:
pass
if len(next_game_2) >= 10:
f = next_game_2[9]
ff = f[:6 - 1] + y + f[6:]
date.append(ff)
else:
pass
if len(next_game_2) >= 11:
g = next_game_2[10]
gg = g[:6 - 1] + y + g[6:]
date.append(gg)
else:
pass
if len(next_game_2) >= 12:
h = next_game_2[11]
hh = h[:6 - 1] + y + h[6:]
date.append(hh)
else:
pass
if len(next_game_2) >= 13:
j = next_game_2[12]
jj = j[:6 - 1] + y + j[6:]
date.append(jj)
else:
pass
if len(next_game_2) >= 14:
k = next_game_2[13]
kk = k[:6 - 1] + y + k[6:]
date.append(kk)
else:
pass
if len(next_game_2) >= 15:
l = next_game_2[14]
ll = l[:6 - 1] + y + l[6:]
date.append(ll)
else:
pass
if len(next_game_2) >= 16:
m = next_game_2[15]
mm = m[:6 - 1] + y + m[6:]
date.append(mm)
else:
pass
if len(next_game_2) >= 17:
n = next_game_2[16]
nn = n[:6 - 1] + y + n[6:]
date.append(nn)
else:
pass
if len(next_game_2) >= 18:
o = next_game_2[17]
oo = o[:6 - 1] + y + o[6:]
date.append(oo)
else:
pass
if len(next_game_2) >= 19:
p = next_game_2[18]
pp = p[:6 - 1] + y + p[6:]
date.append(pp)
else:
pass
if len(next_game_2) >= 20:
q = next_game_2[19]
qq = q[:6 - 1] + y + q[6:]
date.append(qq)
else:
pass
if len(next_game_2) >= 21:
r = next_game_2[20]
rr = r[:6 - 1] + y + r[6:]
date.append(rr)
else:
pass
if len(date) >= 1:
date_0 = dt.datetime.strptime(date[0], '%d.%m.%Y')
if len(date) >= 2:
date_1 = dt.datetime.strptime(date[1], '%d.%m.%Y')
if len(date) >= 3:
date_2 = dt.datetime.strptime(date[2], '%d.%m.%Y')
if len(date) >= 4:
date_3 = dt.datetime.strptime(date[3], '%d.%m.%Y')
if len(date) >= 5:
date_4 = dt.datetime.strptime(date[4], '%d.%m.%Y')
if len(date) >= 6:
date_5 = dt.datetime.strptime(date[5], '%d.%m.%Y')
if len(date) >= 7:
date_6 = dt.datetime.strptime(date[6], '%d.%m.%Y')
if len(date) >= 8:
date_7 = dt.datetime.strptime(date[7], '%d.%m.%Y')
if len(date) >= 9:
date_8 = dt.datetime.strptime(date[8], '%d.%m.%Y')
if len(date) >= 10:
date_9 = dt.datetime.strptime(date[9], '%d.%m.%Y')
if len(date) >= 11:
date_10 = dt.datetime.strptime(date[10], '%d.%m.%Y')
if len(date) >= 12:
date_11 = dt.datetime.strptime(date[11], '%d.%m.%Y')
if len(date) >= 13:
date_12 = dt.datetime.strptime(date[12], '%d.%m.%Y')
if len(date) >= 14:
date_13 = dt.datetime.strptime(date[13], '%d.%m.%Y')
if len(date) >= 15:
date_14 = dt.datetime.strptime(date[14], '%d.%m.%Y')
if len(date) >= 16:
date_15 = dt.datetime.strptime(date[15], '%d.%m.%Y')
if date_0 < date_1:
next_game_3 = cc
else:
if date_1 < date_2:
next_game_3 = dd
else:
if date_2 < date_3:
next_game_3 = ee
else:
if date_3 < date_4:
next_game_3 = ff
else:
if date_4 < date_5:
next_game_3 = gg
else:
if date_5 < date_6:
next_game_3 = hh
else:
if date_6 < date_7:
next_game_3 = jj
else:
if date_7 < date_8:
next_game_3 = kk
else:
if date_8 < date_9:
next_game_3 = ll
else:
if date_9 < date_10:
next_game_3 = mm
else:
if date_10 < date_11:
next_game_3 = nn
else:
if date_11 < date_12:
next_game_3 = oo
else:
if date_12 < date_13:
next_game_3 = pp
else:
if date_13 < date_14:
next_game_3 = qq
else:
if date_14 < date_15:
next_game_3 = rr
else:
pass
team = "Сан-М Тукуман"
def adding_team():
b = "San-Martin-Tukuman"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
create_file()
adding_team()
draws(full_time)
odd(full_time)
even(full_time)
under_1_5(full_time)
over_2_5(full_time)
under_2_5(full_time)
both_score(full_time)
both_no_score(full_time)
draws_first_time(first_half_time)
no_goal_first_time(first_half_time)
Odd_Even(full_time)
Even_Odd(full_time)
draws_NOdraws(full_time)
NOdraws_draws(full_time)
under15_over15(full_time)
over15_under15(full_time)
under25_over25(full_time)
over25_under25(full_time)
both_noboth_score(full_time)
noboth_both_score(full_time)
draws_NOdraws_first_time(first_half_time)
NOdraws_draws_first_time(first_half_time)
goal_NOgoal_first_time(first_half_time)
NOgoal_goal_first_time(first_half_time)
url = 'https://nb-bet.com/Teams/2137-Deportivo-Riestra-statistika-komandi'
r = requests.get(url, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print(soup.title.text)
nbbet = soup.select(".a-dotted-hover")
one = ("...".join((str(i) for i in nbbet)))
two = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s[(-][0-9]{1}\s[:-]\s[0-9]{1}[\)-]', one)
three = (" ".join((str(i) for i in two)))
full_time = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s', three)
first_half_time = re.findall(r'[(][0-9]{1}\s[:]\s[0-9]{1}[)]', three)
next_game = soup.select(".first-td-content-results-auto")
next_game_1 = ("...".join((str(i) for i in next_game)))
next_game_1_1 = next_game_1.replace(' ', '')
next_game_2 = re.findall(r'\d{2}[.]\d{2}[.]\d{2}', next_game_1_1)
next_games = next_game_2[10:]
next_games_1 = len(next_games)
date = []
y = '.20'
if len(next_game_2) >= 6:
b = next_game_2[5]
bb = b[:6 - 1] + y + b[6:]
date.append(bb)
else:
pass
if len(next_game_2) >= 7:
c = next_game_2[6]
cc = c[:6 - 1] + y + c[6:]
date.append(cc)
else:
pass
if len(next_game_2) >= 8:
d = next_game_2[7]
dd = d[:6 - 1] + y + d[6:]
date.append(dd)
else:
pass
if len(next_game_2) >= 9:
e = next_game_2[8]
ee = e[:6 - 1] + y + e[6:]
date.append(ee)
else:
pass
if len(next_game_2) >= 10:
f = next_game_2[9]
ff = f[:6 - 1] + y + f[6:]
date.append(ff)
else:
pass
if len(next_game_2) >= 11:
g = next_game_2[10]
gg = g[:6 - 1] + y + g[6:]
date.append(gg)
else:
pass
if len(next_game_2) >= 12:
h = next_game_2[11]
hh = h[:6 - 1] + y + h[6:]
date.append(hh)
else:
pass
if len(next_game_2) >= 13:
j = next_game_2[12]
jj = j[:6 - 1] + y + j[6:]
date.append(jj)
else:
pass
if len(next_game_2) >= 14:
k = next_game_2[13]
kk = k[:6 - 1] + y + k[6:]
date.append(kk)
else:
pass
if len(next_game_2) >= 15:
l = next_game_2[14]
ll = l[:6 - 1] + y + l[6:]
date.append(ll)
else:
pass
if len(next_game_2) >= 16:
m = next_game_2[15]
mm = m[:6 - 1] + y + m[6:]
date.append(mm)
else:
pass
if len(next_game_2) >= 17:
n = next_game_2[16]
nn = n[:6 - 1] + y + n[6:]
date.append(nn)
else:
pass
if len(next_game_2) >= 18:
o = next_game_2[17]
oo = o[:6 - 1] + y + o[6:]
date.append(oo)
else:
pass
if len(next_game_2) >= 19:
p = next_game_2[18]
pp = p[:6 - 1] + y + p[6:]
date.append(pp)
else:
pass
if len(next_game_2) >= 20:
q = next_game_2[19]
qq = q[:6 - 1] + y + q[6:]
date.append(qq)
else:
pass
if len(next_game_2) >= 21:
r = next_game_2[20]
rr = r[:6 - 1] + y + r[6:]
date.append(rr)
else:
pass
if len(date) >= 1:
date_0 = dt.datetime.strptime(date[0], '%d.%m.%Y')
if len(date) >= 2:
date_1 = dt.datetime.strptime(date[1], '%d.%m.%Y')
if len(date) >= 3:
date_2 = dt.datetime.strptime(date[2], '%d.%m.%Y')
if len(date) >= 4:
date_3 = dt.datetime.strptime(date[3], '%d.%m.%Y')
if len(date) >= 5:
date_4 = dt.datetime.strptime(date[4], '%d.%m.%Y')
if len(date) >= 6:
date_5 = dt.datetime.strptime(date[5], '%d.%m.%Y')
if len(date) >= 7:
date_6 = dt.datetime.strptime(date[6], '%d.%m.%Y')
if len(date) >= 8:
date_7 = dt.datetime.strptime(date[7], '%d.%m.%Y')
if len(date) >= 9:
date_8 = dt.datetime.strptime(date[8], '%d.%m.%Y')
if len(date) >= 10:
date_9 = dt.datetime.strptime(date[9], '%d.%m.%Y')
if len(date) >= 11:
date_10 = dt.datetime.strptime(date[10], '%d.%m.%Y')
if len(date) >= 12:
date_11 = dt.datetime.strptime(date[11], '%d.%m.%Y')
if len(date) >= 13:
date_12 = dt.datetime.strptime(date[12], '%d.%m.%Y')
if len(date) >= 14:
date_13 = dt.datetime.strptime(date[13], '%d.%m.%Y')
if len(date) >= 15:
date_14 = dt.datetime.strptime(date[14], '%d.%m.%Y')
if len(date) >= 16:
date_15 = dt.datetime.strptime(date[15], '%d.%m.%Y')
if date_0 < date_1:
next_game_3 = cc
else:
if date_1 < date_2:
next_game_3 = dd
else:
if date_2 < date_3:
next_game_3 = ee
else:
if date_3 < date_4:
next_game_3 = ff
else:
if date_4 < date_5:
next_game_3 = gg
else:
if date_5 < date_6:
next_game_3 = hh
else:
if date_6 < date_7:
next_game_3 = jj
else:
if date_7 < date_8:
next_game_3 = kk
else:
if date_8 < date_9:
next_game_3 = ll
else:
if date_9 < date_10:
next_game_3 = mm
else:
if date_10 < date_11:
next_game_3 = nn
else:
if date_11 < date_12:
next_game_3 = oo
else:
if date_12 < date_13:
next_game_3 = pp
else:
if date_13 < date_14:
next_game_3 = qq
else:
if date_14 < date_15:
next_game_3 = rr
else:
pass
team = "Депортиво Риестра"
def adding_team():
b = "Deportivo-Riestra"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
create_file()
adding_team()
draws(full_time)
odd(full_time)
even(full_time)
under_1_5(full_time)
over_2_5(full_time)
under_2_5(full_time)
both_score(full_time)
both_no_score(full_time)
draws_first_time(first_half_time)
no_goal_first_time(first_half_time)
Odd_Even(full_time)
Even_Odd(full_time)
draws_NOdraws(full_time)
NOdraws_draws(full_time)
under15_over15(full_time)
over15_under15(full_time)
under25_over25(full_time)
over25_under25(full_time)
both_noboth_score(full_time)
noboth_both_score(full_time)
draws_NOdraws_first_time(first_half_time)
NOdraws_draws_first_time(first_half_time)
goal_NOgoal_first_time(first_half_time)
NOgoal_goal_first_time(first_half_time)
url = 'https://nb-bet.com/Teams/552-Atletiko-Rafaela-statistika-komandi'
r = requests.get(url, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print(soup.title.text)
nbbet = soup.select(".a-dotted-hover")
one = ("...".join((str(i) for i in nbbet)))
two = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s[(-][0-9]{1}\s[:-]\s[0-9]{1}[\)-]', one)
three = (" ".join((str(i) for i in two)))
full_time = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s', three)
first_half_time = re.findall(r'[(][0-9]{1}\s[:]\s[0-9]{1}[)]', three)
next_game = soup.select(".first-td-content-results-auto")
next_game_1 = ("...".join((str(i) for i in next_game)))
next_game_1_1 = next_game_1.replace(' ', '')
next_game_2 = re.findall(r'\d{2}[.]\d{2}[.]\d{2}', next_game_1_1)
next_games = next_game_2[10:]
next_games_1 = len(next_games)
date = []
y = '.20'
if len(next_game_2) >= 6:
b = next_game_2[5]
bb = b[:6 - 1] + y + b[6:]
date.append(bb)
else:
pass
if len(next_game_2) >= 7:
c = next_game_2[6]
cc = c[:6 - 1] + y + c[6:]
date.append(cc)
else:
pass
if len(next_game_2) >= 8:
d = next_game_2[7]
dd = d[:6 - 1] + y + d[6:]
date.append(dd)
else:
pass
if len(next_game_2) >= 9:
e = next_game_2[8]
ee = e[:6 - 1] + y + e[6:]
date.append(ee)
else:
pass
if len(next_game_2) >= 10:
f = next_game_2[9]
ff = f[:6 - 1] + y + f[6:]
date.append(ff)
else:
pass
if len(next_game_2) >= 11:
g = next_game_2[10]
gg = g[:6 - 1] + y + g[6:]
date.append(gg)
else:
pass
if len(next_game_2) >= 12:
h = next_game_2[11]
hh = h[:6 - 1] + y + h[6:]
date.append(hh)
else:
pass
if len(next_game_2) >= 13:
j = next_game_2[12]
jj = j[:6 - 1] + y + j[6:]
date.append(jj)
else:
pass
if len(next_game_2) >= 14:
k = next_game_2[13]
kk = k[:6 - 1] + y + k[6:]
date.append(kk)
else:
pass
if len(next_game_2) >= 15:
l = next_game_2[14]
ll = l[:6 - 1] + y + l[6:]
date.append(ll)
else:
pass
if len(next_game_2) >= 16:
m = next_game_2[15]
mm = m[:6 - 1] + y + m[6:]
date.append(mm)
else:
pass
if len(next_game_2) >= 17:
n = next_game_2[16]
nn = n[:6 - 1] + y + n[6:]
date.append(nn)
else:
pass
if len(next_game_2) >= 18:
o = next_game_2[17]
oo = o[:6 - 1] + y + o[6:]
date.append(oo)
else:
pass
if len(next_game_2) >= 19:
p = next_game_2[18]
pp = p[:6 - 1] + y + p[6:]
date.append(pp)
else:
pass
if len(next_game_2) >= 20:
q = next_game_2[19]
qq = q[:6 - 1] + y + q[6:]
date.append(qq)
else:
pass
if len(next_game_2) >= 21:
r = next_game_2[20]
rr = r[:6 - 1] + y + r[6:]
date.append(rr)
else:
pass
if len(date) >= 1:
date_0 = dt.datetime.strptime(date[0], '%d.%m.%Y')
if len(date) >= 2:
date_1 = dt.datetime.strptime(date[1], '%d.%m.%Y')
if len(date) >= 3:
date_2 = dt.datetime.strptime(date[2], '%d.%m.%Y')
if len(date) >= 4:
date_3 = dt.datetime.strptime(date[3], '%d.%m.%Y')
if len(date) >= 5:
date_4 = dt.datetime.strptime(date[4], '%d.%m.%Y')
if len(date) >= 6:
date_5 = dt.datetime.strptime(date[5], '%d.%m.%Y')
if len(date) >= 7:
date_6 = dt.datetime.strptime(date[6], '%d.%m.%Y')
if len(date) >= 8:
date_7 = dt.datetime.strptime(date[7], '%d.%m.%Y')
if len(date) >= 9:
date_8 = dt.datetime.strptime(date[8], '%d.%m.%Y')
if len(date) >= 10:
date_9 = dt.datetime.strptime(date[9], '%d.%m.%Y')
if len(date) >= 11:
date_10 = dt.datetime.strptime(date[10], '%d.%m.%Y')
if len(date) >= 12:
date_11 = dt.datetime.strptime(date[11], '%d.%m.%Y')
if len(date) >= 13:
date_12 = dt.datetime.strptime(date[12], '%d.%m.%Y')
if len(date) >= 14:
date_13 = dt.datetime.strptime(date[13], '%d.%m.%Y')
if len(date) >= 15:
date_14 = dt.datetime.strptime(date[14], '%d.%m.%Y')
if len(date) >= 16:
date_15 = dt.datetime.strptime(date[15], '%d.%m.%Y')
if date_0 < date_1:
next_game_3 = cc
else:
if date_1 < date_2:
next_game_3 = dd
else:
if date_2 < date_3:
next_game_3 = ee
else:
if date_3 < date_4:
next_game_3 = ff
else:
if date_4 < date_5:
next_game_3 = gg
else:
if date_5 < date_6:
next_game_3 = hh
else:
if date_6 < date_7:
next_game_3 = jj
else:
if date_7 < date_8:
next_game_3 = kk
else:
if date_8 < date_9:
next_game_3 = ll
else:
if date_9 < date_10:
next_game_3 = mm
else:
if date_10 < date_11:
next_game_3 = nn
else:
if date_11 < date_12:
next_game_3 = oo
else:
if date_12 < date_13:
next_game_3 = pp
else:
if date_13 < date_14:
next_game_3 = qq
else:
if date_14 < date_15:
next_game_3 = rr
else:
pass
team = "Атлетико Рафаэла"
def adding_team():
b = "Atletiko-Rafaela"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
create_file()
adding_team()
draws(full_time)
odd(full_time)
even(full_time)
under_1_5(full_time)
over_2_5(full_time)
under_2_5(full_time)
both_score(full_time)
both_no_score(full_time)
draws_first_time(first_half_time)
no_goal_first_time(first_half_time)
Odd_Even(full_time)
Even_Odd(full_time)
draws_NOdraws(full_time)
NOdraws_draws(full_time)
under15_over15(full_time)
over15_under15(full_time)
under25_over25(full_time)
over25_under25(full_time)
both_noboth_score(full_time)
noboth_both_score(full_time)
draws_NOdraws_first_time(first_half_time)
NOdraws_draws_first_time(first_half_time)
goal_NOgoal_first_time(first_half_time)
NOgoal_goal_first_time(first_half_time)
url = 'https://nb-bet.com/Teams/1563-Khimnasiya-Khukhuy-statistika-komandi'
r = requests.get(url, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print(soup.title.text)
nbbet = soup.select(".a-dotted-hover")
one = ("...".join((str(i) for i in nbbet)))
two = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s[(-][0-9]{1}\s[:-]\s[0-9]{1}[\)-]', one)
three = (" ".join((str(i) for i in two)))
full_time = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s', three)
first_half_time = re.findall(r'[(][0-9]{1}\s[:]\s[0-9]{1}[)]', three)
next_game = soup.select(".first-td-content-results-auto")
next_game_1 = ("...".join((str(i) for i in next_game)))
next_game_1_1 = next_game_1.replace(' ', '')
next_game_2 = re.findall(r'\d{2}[.]\d{2}[.]\d{2}', next_game_1_1)
next_games = next_game_2[10:]
next_games_1 = len(next_games)
date = []
y = '.20'
if len(next_game_2) >= 6:
b = next_game_2[5]
bb = b[:6 - 1] + y + b[6:]
date.append(bb)
else:
pass
if len(next_game_2) >= 7:
c = next_game_2[6]
cc = c[:6 - 1] + y + c[6:]
date.append(cc)
else:
pass
if len(next_game_2) >= 8:
d = next_game_2[7]
dd = d[:6 - 1] + y + d[6:]
date.append(dd)
else:
pass
if len(next_game_2) >= 9:
e = next_game_2[8]
ee = e[:6 - 1] + y + e[6:]
date.append(ee)
else:
pass
if len(next_game_2) >= 10:
f = next_game_2[9]
ff = f[:6 - 1] + y + f[6:]
date.append(ff)
else:
pass
if len(next_game_2) >= 11:
g = next_game_2[10]
gg = g[:6 - 1] + y + g[6:]
date.append(gg)
else:
pass
if len(next_game_2) >= 12:
h = next_game_2[11]
hh = h[:6 - 1] + y + h[6:]
date.append(hh)
else:
pass
if len(next_game_2) >= 13:
j = next_game_2[12]
jj = j[:6 - 1] + y + j[6:]
date.append(jj)
else:
pass
if len(next_game_2) >= 14:
k = next_game_2[13]
kk = k[:6 - 1] + y + k[6:]
date.append(kk)
else:
pass
if len(next_game_2) >= 15:
l = next_game_2[14]
ll = l[:6 - 1] + y + l[6:]
date.append(ll)
else:
pass
if len(next_game_2) >= 16:
m = next_game_2[15]
mm = m[:6 - 1] + y + m[6:]
date.append(mm)
else:
pass
if len(next_game_2) >= 17:
n = next_game_2[16]
nn = n[:6 - 1] + y + n[6:]
date.append(nn)
else:
pass
if len(next_game_2) >= 18:
o = next_game_2[17]
oo = o[:6 - 1] + y + o[6:]
date.append(oo)
else:
pass
if len(next_game_2) >= 19:
p = next_game_2[18]
pp = p[:6 - 1] + y + p[6:]
date.append(pp)
else:
pass
if len(next_game_2) >= 20:
q = next_game_2[19]
qq = q[:6 - 1] + y + q[6:]
date.append(qq)
else:
pass
if len(next_game_2) >= 21:
r = next_game_2[20]
rr = r[:6 - 1] + y + r[6:]
date.append(rr)
else:
pass
if len(date) >= 1:
date_0 = dt.datetime.strptime(date[0], '%d.%m.%Y')
if len(date) >= 2:
date_1 = dt.datetime.strptime(date[1], '%d.%m.%Y')
if len(date) >= 3:
date_2 = dt.datetime.strptime(date[2], '%d.%m.%Y')
if len(date) >= 4:
date_3 = dt.datetime.strptime(date[3], '%d.%m.%Y')
if len(date) >= 5:
date_4 = dt.datetime.strptime(date[4], '%d.%m.%Y')
if len(date) >= 6:
date_5 = dt.datetime.strptime(date[5], '%d.%m.%Y')
if len(date) >= 7:
date_6 = dt.datetime.strptime(date[6], '%d.%m.%Y')
if len(date) >= 8:
date_7 = dt.datetime.strptime(date[7], '%d.%m.%Y')
if len(date) >= 9:
date_8 = dt.datetime.strptime(date[8], '%d.%m.%Y')
if len(date) >= 10:
date_9 = dt.datetime.strptime(date[9], '%d.%m.%Y')
if len(date) >= 11:
date_10 = dt.datetime.strptime(date[10], '%d.%m.%Y')
if len(date) >= 12:
date_11 = dt.datetime.strptime(date[11], '%d.%m.%Y')
if len(date) >= 13:
date_12 = dt.datetime.strptime(date[12], '%d.%m.%Y')
if len(date) >= 14:
date_13 = dt.datetime.strptime(date[13], '%d.%m.%Y')
if len(date) >= 15:
date_14 = dt.datetime.strptime(date[14], '%d.%m.%Y')
if len(date) >= 16:
date_15 = dt.datetime.strptime(date[15], '%d.%m.%Y')
if date_0 < date_1:
next_game_3 = cc
else:
if date_1 < date_2:
next_game_3 = dd
else:
if date_2 < date_3:
next_game_3 = ee
else:
if date_3 < date_4:
next_game_3 = ff
else:
if date_4 < date_5:
next_game_3 = gg
else:
if date_5 < date_6:
next_game_3 = hh
else:
if date_6 < date_7:
next_game_3 = jj
else:
if date_7 < date_8:
next_game_3 = kk
else:
if date_8 < date_9:
next_game_3 = ll
else:
if date_9 < date_10:
next_game_3 = mm
else:
if date_10 < date_11:
next_game_3 = nn
else:
if date_11 < date_12:
next_game_3 = oo
else:
if date_12 < date_13:
next_game_3 = pp
else:
if date_13 < date_14:
next_game_3 = qq
else:
if date_14 < date_15:
next_game_3 = rr
else:
pass
team = "Химнасия Хухуй"
def adding_team():
b = "Khimnasiya-Khukhuy"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
create_file()
adding_team()
draws(full_time)
odd(full_time)
even(full_time)
under_1_5(full_time)
over_2_5(full_time)
under_2_5(full_time)
both_score(full_time)
both_no_score(full_time)
draws_first_time(first_half_time)
no_goal_first_time(first_half_time)
Odd_Even(full_time)
Even_Odd(full_time)
draws_NOdraws(full_time)
NOdraws_draws(full_time)
under15_over15(full_time)
over15_under15(full_time)
under25_over25(full_time)
over25_under25(full_time)
both_noboth_score(full_time)
noboth_both_score(full_time)
draws_NOdraws_first_time(first_half_time)
NOdraws_draws_first_time(first_half_time)
goal_NOgoal_first_time(first_half_time)
NOgoal_goal_first_time(first_half_time)
url = 'https://nb-bet.com/Teams/3915-Estudiantes-Rio-Kuarto-statistika-komandi'
r = requests.get(url, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print(soup.title.text)
nbbet = soup.select(".a-dotted-hover")
one = ("...".join((str(i) for i in nbbet)))
two = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s[(-][0-9]{1}\s[:-]\s[0-9]{1}[\)-]', one)
three = (" ".join((str(i) for i in two)))
full_time = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s', three)
first_half_time = re.findall(r'[(][0-9]{1}\s[:]\s[0-9]{1}[)]', three)
next_game = soup.select(".first-td-content-results-auto")
next_game_1 = ("...".join((str(i) for i in next_game)))
next_game_1_1 = next_game_1.replace(' ', '')
next_game_2 = re.findall(r'\d{2}[.]\d{2}[.]\d{2}', next_game_1_1)
next_games = next_game_2[10:]
next_games_1 = len(next_games)
date = []
y = '.20'
if len(next_game_2) >= 6:
b = next_game_2[5]
bb = b[:6 - 1] + y + b[6:]
date.append(bb)
else:
pass
if len(next_game_2) >= 7:
c = next_game_2[6]
cc = c[:6 - 1] + y + c[6:]
date.append(cc)
else:
pass
if len(next_game_2) >= 8:
d = next_game_2[7]
dd = d[:6 - 1] + y + d[6:]
date.append(dd)
else:
pass
if len(next_game_2) >= 9:
e = next_game_2[8]
ee = e[:6 - 1] + y + e[6:]
date.append(ee)
else:
pass
if len(next_game_2) >= 10:
f = next_game_2[9]
ff = f[:6 - 1] + y + f[6:]
date.append(ff)
else:
pass
if len(next_game_2) >= 11:
g = next_game_2[10]
gg = g[:6 - 1] + y + g[6:]
date.append(gg)
else:
pass
if len(next_game_2) >= 12:
h = next_game_2[11]
hh = h[:6 - 1] + y + h[6:]
date.append(hh)
else:
pass
if len(next_game_2) >= 13:
j = next_game_2[12]
jj = j[:6 - 1] + y + j[6:]
date.append(jj)
else:
pass
if len(next_game_2) >= 14:
k = next_game_2[13]
kk = k[:6 - 1] + y + k[6:]
date.append(kk)
else:
pass
if len(next_game_2) >= 15:
l = next_game_2[14]
ll = l[:6 - 1] + y + l[6:]
date.append(ll)
else:
pass
if len(next_game_2) >= 16:
m = next_game_2[15]
mm = m[:6 - 1] + y + m[6:]
date.append(mm)
else:
pass
if len(next_game_2) >= 17:
n = next_game_2[16]
nn = n[:6 - 1] + y + n[6:]
date.append(nn)
else:
pass
if len(next_game_2) >= 18:
o = next_game_2[17]
oo = o[:6 - 1] + y + o[6:]
date.append(oo)
else:
pass
if len(next_game_2) >= 19:
p = next_game_2[18]
pp = p[:6 - 1] + y + p[6:]
date.append(pp)
else:
pass
if len(next_game_2) >= 20:
q = next_game_2[19]
qq = q[:6 - 1] + y + q[6:]
date.append(qq)
else:
pass
if len(next_game_2) >= 21:
r = next_game_2[20]
rr = r[:6 - 1] + y + r[6:]
date.append(rr)
else:
pass
if len(date) >= 1:
date_0 = dt.datetime.strptime(date[0], '%d.%m.%Y')
if len(date) >= 2:
date_1 = dt.datetime.strptime(date[1], '%d.%m.%Y')
if len(date) >= 3:
date_2 = dt.datetime.strptime(date[2], '%d.%m.%Y')
if len(date) >= 4:
date_3 = dt.datetime.strptime(date[3], '%d.%m.%Y')
if len(date) >= 5:
date_4 = dt.datetime.strptime(date[4], '%d.%m.%Y')
if len(date) >= 6:
date_5 = dt.datetime.strptime(date[5], '%d.%m.%Y')
if len(date) >= 7:
date_6 = dt.datetime.strptime(date[6], '%d.%m.%Y')
if len(date) >= 8:
date_7 = dt.datetime.strptime(date[7], '%d.%m.%Y')
if len(date) >= 9:
date_8 = dt.datetime.strptime(date[8], '%d.%m.%Y')
if len(date) >= 10:
date_9 = dt.datetime.strptime(date[9], '%d.%m.%Y')
if len(date) >= 11:
date_10 = dt.datetime.strptime(date[10], '%d.%m.%Y')
if len(date) >= 12:
date_11 = dt.datetime.strptime(date[11], '%d.%m.%Y')
if len(date) >= 13:
date_12 = dt.datetime.strptime(date[12], '%d.%m.%Y')
if len(date) >= 14:
date_13 = dt.datetime.strptime(date[13], '%d.%m.%Y')
if len(date) >= 15:
date_14 = dt.datetime.strptime(date[14], '%d.%m.%Y')
if len(date) >= 16:
date_15 = dt.datetime.strptime(date[15], '%d.%m.%Y')
if date_0 < date_1:
next_game_3 = cc
else:
if date_1 < date_2:
next_game_3 = dd
else:
if date_2 < date_3:
next_game_3 = ee
else:
if date_3 < date_4:
next_game_3 = ff
else:
if date_4 < date_5:
next_game_3 = gg
else:
if date_5 < date_6:
next_game_3 = hh
else:
if date_6 < date_7:
next_game_3 = jj
else:
if date_7 < date_8:
next_game_3 = kk
else:
if date_8 < date_9:
next_game_3 = ll
else:
if date_9 < date_10:
next_game_3 = mm
else:
if date_10 < date_11:
next_game_3 = nn
else:
if date_11 < date_12:
next_game_3 = oo
else:
if date_12 < date_13:
next_game_3 = pp
else:
if date_13 < date_14:
next_game_3 = qq
else:
if date_14 < date_15:
next_game_3 = rr
else:
pass
team = "Эстудиантес Рио Куарто"
def adding_team():
b = "Estudiantes-Rio-Kuarto"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
create_file()
adding_team()
draws(full_time)
odd(full_time)
even(full_time)
under_1_5(full_time)
over_2_5(full_time)
under_2_5(full_time)
both_score(full_time)
both_no_score(full_time)
draws_first_time(first_half_time)
no_goal_first_time(first_half_time)
Odd_Even(full_time)
Even_Odd(full_time)
draws_NOdraws(full_time)
NOdraws_draws(full_time)
under15_over15(full_time)
over15_under15(full_time)
under25_over25(full_time)
over25_under25(full_time)
both_noboth_score(full_time)
noboth_both_score(full_time)
draws_NOdraws_first_time(first_half_time)
NOdraws_draws_first_time(first_half_time)
goal_NOgoal_first_time(first_half_time)
NOgoal_goal_first_time(first_half_time)
url = 'https://nb-bet.com/Teams/2140-Almirante-Braun-statistika-komandi'
r = requests.get(url, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print(soup.title.text)
nbbet = soup.select(".a-dotted-hover")
one = ("...".join((str(i) for i in nbbet)))
two = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s[(-][0-9]{1}\s[:-]\s[0-9]{1}[\)-]', one)
three = (" ".join((str(i) for i in two)))
full_time = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s', three)
first_half_time = re.findall(r'[(][0-9]{1}\s[:]\s[0-9]{1}[)]', three)
next_game = soup.select(".first-td-content-results-auto")
next_game_1 = ("...".join((str(i) for i in next_game)))
next_game_1_1 = next_game_1.replace(' ', '')
next_game_2 = re.findall(r'\d{2}[.]\d{2}[.]\d{2}', next_game_1_1)
next_games = next_game_2[10:]
next_games_1 = len(next_games)
date = []
y = '.20'
if len(next_game_2) >= 6:
b = next_game_2[5]
bb = b[:6 - 1] + y + b[6:]
date.append(bb)
else:
pass
if len(next_game_2) >= 7:
c = next_game_2[6]
cc = c[:6 - 1] + y + c[6:]
date.append(cc)
else:
pass
if len(next_game_2) >= 8:
d = next_game_2[7]
dd = d[:6 - 1] + y + d[6:]
date.append(dd)
else:
pass
if len(next_game_2) >= 9:
e = next_game_2[8]
ee = e[:6 - 1] + y + e[6:]
date.append(ee)
else:
pass
if len(next_game_2) >= 10:
f = next_game_2[9]
ff = f[:6 - 1] + y + f[6:]
date.append(ff)
else:
pass
if len(next_game_2) >= 11:
g = next_game_2[10]
gg = g[:6 - 1] + y + g[6:]
date.append(gg)
else:
pass
if len(next_game_2) >= 12:
h = next_game_2[11]
hh = h[:6 - 1] + y + h[6:]
date.append(hh)
else:
pass
if len(next_game_2) >= 13:
j = next_game_2[12]
jj = j[:6 - 1] + y + j[6:]
date.append(jj)
else:
pass
if len(next_game_2) >= 14:
k = next_game_2[13]
kk = k[:6 - 1] + y + k[6:]
date.append(kk)
else:
pass
if len(next_game_2) >= 15:
l = next_game_2[14]
ll = l[:6 - 1] + y + l[6:]
date.append(ll)
else:
pass
if len(next_game_2) >= 16:
m = next_game_2[15]
mm = m[:6 - 1] + y + m[6:]
date.append(mm)
else:
pass
if len(next_game_2) >= 17:
n = next_game_2[16]
nn = n[:6 - 1] + y + n[6:]
date.append(nn)
else:
pass
if len(next_game_2) >= 18:
o = next_game_2[17]
oo = o[:6 - 1] + y + o[6:]
date.append(oo)
else:
pass
if len(next_game_2) >= 19:
p = next_game_2[18]
pp = p[:6 - 1] + y + p[6:]
date.append(pp)
else:
pass
if len(next_game_2) >= 20:
q = next_game_2[19]
qq = q[:6 - 1] + y + q[6:]
date.append(qq)
else:
pass
if len(next_game_2) >= 21:
r = next_game_2[20]
rr = r[:6 - 1] + y + r[6:]
date.append(rr)
else:
pass
if len(date) >= 1:
date_0 = dt.datetime.strptime(date[0], '%d.%m.%Y')
if len(date) >= 2:
date_1 = dt.datetime.strptime(date[1], '%d.%m.%Y')
if len(date) >= 3:
date_2 = dt.datetime.strptime(date[2], '%d.%m.%Y')
if len(date) >= 4:
date_3 = dt.datetime.strptime(date[3], '%d.%m.%Y')
if len(date) >= 5:
date_4 = dt.datetime.strptime(date[4], '%d.%m.%Y')
if len(date) >= 6:
date_5 = dt.datetime.strptime(date[5], '%d.%m.%Y')
if len(date) >= 7:
date_6 = dt.datetime.strptime(date[6], '%d.%m.%Y')
if len(date) >= 8:
date_7 = dt.datetime.strptime(date[7], '%d.%m.%Y')
if len(date) >= 9:
date_8 = dt.datetime.strptime(date[8], '%d.%m.%Y')
if len(date) >= 10:
date_9 = dt.datetime.strptime(date[9], '%d.%m.%Y')
if len(date) >= 11:
date_10 = dt.datetime.strptime(date[10], '%d.%m.%Y')
if len(date) >= 12:
date_11 = dt.datetime.strptime(date[11], '%d.%m.%Y')
if len(date) >= 13:
date_12 = dt.datetime.strptime(date[12], '%d.%m.%Y')
if len(date) >= 14:
date_13 = dt.datetime.strptime(date[13], '%d.%m.%Y')
if len(date) >= 15:
date_14 = dt.datetime.strptime(date[14], '%d.%m.%Y')
if len(date) >= 16:
date_15 = dt.datetime.strptime(date[15], '%d.%m.%Y')
if date_0 < date_1:
next_game_3 = cc
else:
if date_1 < date_2:
next_game_3 = dd
else:
if date_2 < date_3:
next_game_3 = ee
else:
if date_3 < date_4:
next_game_3 = ff
else:
if date_4 < date_5:
next_game_3 = gg
else:
if date_5 < date_6:
next_game_3 = hh
else:
if date_6 < date_7:
next_game_3 = jj
else:
if date_7 < date_8:
next_game_3 = kk
else:
if date_8 < date_9:
next_game_3 = ll
else:
if date_9 < date_10:
next_game_3 = mm
else:
if date_10 < date_11:
next_game_3 = nn
else:
if date_11 < date_12:
next_game_3 = oo
else:
if date_12 < date_13:
next_game_3 = pp
else:
if date_13 < date_14:
next_game_3 = qq
else:
if date_14 < date_15:
next_game_3 = rr
else:
pass
team = "Альмиранте Браун"
def adding_team():
b = "Almirante-Braun"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
create_file()
adding_team()
draws(full_time)
odd(full_time)
even(full_time)
under_1_5(full_time)
over_2_5(full_time)
under_2_5(full_time)
both_score(full_time)
both_no_score(full_time)
draws_first_time(first_half_time)
no_goal_first_time(first_half_time)
Odd_Even(full_time)
Even_Odd(full_time)
draws_NOdraws(full_time)
NOdraws_draws(full_time)
under15_over15(full_time)
over15_under15(full_time)
under25_over25(full_time)
over25_under25(full_time)
both_noboth_score(full_time)
noboth_both_score(full_time)
draws_NOdraws_first_time(first_half_time)
NOdraws_draws_first_time(first_half_time)
goal_NOgoal_first_time(first_half_time)
NOgoal_goal_first_time(first_half_time)
url = 'https://nb-bet.com/Teams/2145-San-Telmo-statistika-komandi'
r = requests.get(url, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print(soup.title.text)
nbbet = soup.select(".a-dotted-hover")
one = ("...".join((str(i) for i in nbbet)))
two = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s[(-][0-9]{1}\s[:-]\s[0-9]{1}[\)-]', one)
three = (" ".join((str(i) for i in two)))
full_time = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s', three)
first_half_time = re.findall(r'[(][0-9]{1}\s[:]\s[0-9]{1}[)]', three)
next_game = soup.select(".first-td-content-results-auto")
next_game_1 = ("...".join((str(i) for i in next_game)))
next_game_1_1 = next_game_1.replace(' ', '')
next_game_2 = re.findall(r'\d{2}[.]\d{2}[.]\d{2}', next_game_1_1)
next_games = next_game_2[10:]
next_games_1 = len(next_games)
date = []
y = '.20'
if len(next_game_2) >= 6:
b = next_game_2[5]
bb = b[:6 - 1] + y + b[6:]
date.append(bb)
else:
pass
if len(next_game_2) >= 7:
c = next_game_2[6]
cc = c[:6 - 1] + y + c[6:]
date.append(cc)
else:
pass
if len(next_game_2) >= 8:
d = next_game_2[7]
dd = d[:6 - 1] + y + d[6:]
date.append(dd)
else:
pass
if len(next_game_2) >= 9:
e = next_game_2[8]
ee = e[:6 - 1] + y + e[6:]
date.append(ee)
else:
pass
if len(next_game_2) >= 10:
f = next_game_2[9]
ff = f[:6 - 1] + y + f[6:]
date.append(ff)
else:
pass
if len(next_game_2) >= 11:
g = next_game_2[10]
gg = g[:6 - 1] + y + g[6:]
date.append(gg)
else:
pass
if len(next_game_2) >= 12:
h = next_game_2[11]
hh = h[:6 - 1] + y + h[6:]
date.append(hh)
else:
pass
if len(next_game_2) >= 13:
j = next_game_2[12]
jj = j[:6 - 1] + y + j[6:]
date.append(jj)
else:
pass
if len(next_game_2) >= 14:
k = next_game_2[13]
kk = k[:6 - 1] + y + k[6:]
date.append(kk)
else:
pass
if len(next_game_2) >= 15:
l = next_game_2[14]
ll = l[:6 - 1] + y + l[6:]
date.append(ll)
else:
pass
if len(next_game_2) >= 16:
m = next_game_2[15]
mm = m[:6 - 1] + y + m[6:]
date.append(mm)
else:
pass
if len(next_game_2) >= 17:
n = next_game_2[16]
nn = n[:6 - 1] + y + n[6:]
date.append(nn)
else:
pass
if len(next_game_2) >= 18:
o = next_game_2[17]
oo = o[:6 - 1] + y + o[6:]
date.append(oo)
else:
pass
if len(next_game_2) >= 19:
p = next_game_2[18]
pp = p[:6 - 1] + y + p[6:]
date.append(pp)
else:
pass
if len(next_game_2) >= 20:
q = next_game_2[19]
qq = q[:6 - 1] + y + q[6:]
date.append(qq)
else:
pass
if len(next_game_2) >= 21:
r = next_game_2[20]
rr = r[:6 - 1] + y + r[6:]
date.append(rr)
else:
pass
if len(date) >= 1:
date_0 = dt.datetime.strptime(date[0], '%d.%m.%Y')
if len(date) >= 2:
date_1 = dt.datetime.strptime(date[1], '%d.%m.%Y')
if len(date) >= 3:
date_2 = dt.datetime.strptime(date[2], '%d.%m.%Y')
if len(date) >= 4:
date_3 = dt.datetime.strptime(date[3], '%d.%m.%Y')
if len(date) >= 5:
date_4 = dt.datetime.strptime(date[4], '%d.%m.%Y')
if len(date) >= 6:
date_5 = dt.datetime.strptime(date[5], '%d.%m.%Y')
if len(date) >= 7:
date_6 = dt.datetime.strptime(date[6], '%d.%m.%Y')
if len(date) >= 8:
date_7 = dt.datetime.strptime(date[7], '%d.%m.%Y')
if len(date) >= 9:
date_8 = dt.datetime.strptime(date[8], '%d.%m.%Y')
if len(date) >= 10:
date_9 = dt.datetime.strptime(date[9], '%d.%m.%Y')
if len(date) >= 11:
date_10 = dt.datetime.strptime(date[10], '%d.%m.%Y')
if len(date) >= 12:
date_11 = dt.datetime.strptime(date[11], '%d.%m.%Y')
if len(date) >= 13:
date_12 = dt.datetime.strptime(date[12], '%d.%m.%Y')
if len(date) >= 14:
date_13 = dt.datetime.strptime(date[13], '%d.%m.%Y')
if len(date) >= 15:
date_14 = dt.datetime.strptime(date[14], '%d.%m.%Y')
if len(date) >= 16:
date_15 = dt.datetime.strptime(date[15], '%d.%m.%Y')
if date_0 < date_1:
next_game_3 = cc
else:
if date_1 < date_2:
next_game_3 = dd
else:
if date_2 < date_3:
next_game_3 = ee
else:
if date_3 < date_4:
next_game_3 = ff
else:
if date_4 < date_5:
next_game_3 = gg
else:
if date_5 < date_6:
next_game_3 = hh
else:
if date_6 < date_7:
next_game_3 = jj
else:
if date_7 < date_8:
next_game_3 = kk
else:
if date_8 < date_9:
next_game_3 = ll
else:
if date_9 < date_10:
next_game_3 = mm
else:
if date_10 < date_11:
next_game_3 = nn
else:
if date_11 < date_12:
next_game_3 = oo
else:
if date_12 < date_13:
next_game_3 = pp
else:
if date_13 < date_14:
next_game_3 = qq
else:
if date_14 < date_15:
next_game_3 = rr
else:
pass
team = "Сан-Тельмо"
def adding_team():
b = "San-Telmo"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
create_file()
adding_team()
draws(full_time)
odd(full_time)
even(full_time)
under_1_5(full_time)
over_2_5(full_time)
under_2_5(full_time)
both_score(full_time)
both_no_score(full_time)
draws_first_time(first_half_time)
no_goal_first_time(first_half_time)
Odd_Even(full_time)
Even_Odd(full_time)
draws_NOdraws(full_time)
NOdraws_draws(full_time)
under15_over15(full_time)
over15_under15(full_time)
under25_over25(full_time)
over25_under25(full_time)
both_noboth_score(full_time)
noboth_both_score(full_time)
draws_NOdraws_first_time(first_half_time)
NOdraws_draws_first_time(first_half_time)
goal_NOgoal_first_time(first_half_time)
NOgoal_goal_first_time(first_half_time)
url = 'https://nb-bet.com/Teams/1555-Ferro-statistika-komandi'
r = requests.get(url, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print(soup.title.text)
nbbet = soup.select(".a-dotted-hover")
one = ("...".join((str(i) for i in nbbet)))
two = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s[(-][0-9]{1}\s[:-]\s[0-9]{1}[\)-]', one)
three = (" ".join((str(i) for i in two)))
full_time = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s', three)
first_half_time = re.findall(r'[(][0-9]{1}\s[:]\s[0-9]{1}[)]', three)
next_game = soup.select(".first-td-content-results-auto")
next_game_1 = ("...".join((str(i) for i in next_game)))
next_game_1_1 = next_game_1.replace(' ', '')
next_game_2 = re.findall(r'\d{2}[.]\d{2}[.]\d{2}', next_game_1_1)
next_games = next_game_2[10:]
next_games_1 = len(next_games)
date = []
y = '.20'
if len(next_game_2) >= 6:
b = next_game_2[5]
bb = b[:6 - 1] + y + b[6:]
date.append(bb)
else:
pass
if len(next_game_2) >= 7:
c = next_game_2[6]
cc = c[:6 - 1] + y + c[6:]
date.append(cc)
else:
pass
if len(next_game_2) >= 8:
d = next_game_2[7]
dd = d[:6 - 1] + y + d[6:]
date.append(dd)
else:
pass
if len(next_game_2) >= 9:
e = next_game_2[8]
ee = e[:6 - 1] + y + e[6:]
date.append(ee)
else:
pass
if len(next_game_2) >= 10:
f = next_game_2[9]
ff = f[:6 - 1] + y + f[6:]
date.append(ff)
else:
pass
if len(next_game_2) >= 11:
g = next_game_2[10]
gg = g[:6 - 1] + y + g[6:]
date.append(gg)
else:
pass
if len(next_game_2) >= 12:
h = next_game_2[11]
hh = h[:6 - 1] + y + h[6:]
date.append(hh)
else:
pass
if len(next_game_2) >= 13:
j = next_game_2[12]
jj = j[:6 - 1] + y + j[6:]
date.append(jj)
else:
pass
if len(next_game_2) >= 14:
k = next_game_2[13]
kk = k[:6 - 1] + y + k[6:]
date.append(kk)
else:
pass
if len(next_game_2) >= 15:
l = next_game_2[14]
ll = l[:6 - 1] + y + l[6:]
date.append(ll)
else:
pass
if len(next_game_2) >= 16:
m = next_game_2[15]
mm = m[:6 - 1] + y + m[6:]
date.append(mm)
else:
pass
if len(next_game_2) >= 17:
n = next_game_2[16]
nn = n[:6 - 1] + y + n[6:]
date.append(nn)
else:
pass
if len(next_game_2) >= 18:
o = next_game_2[17]
oo = o[:6 - 1] + y + o[6:]
date.append(oo)
else:
pass
if len(next_game_2) >= 19:
p = next_game_2[18]
pp = p[:6 - 1] + y + p[6:]
date.append(pp)
else:
pass
if len(next_game_2) >= 20:
q = next_game_2[19]
qq = q[:6 - 1] + y + q[6:]
date.append(qq)
else:
pass
if len(next_game_2) >= 21:
r = next_game_2[20]
rr = r[:6 - 1] + y + r[6:]
date.append(rr)
else:
pass
if len(date) >= 1:
date_0 = dt.datetime.strptime(date[0], '%d.%m.%Y')
if len(date) >= 2:
date_1 = dt.datetime.strptime(date[1], '%d.%m.%Y')
if len(date) >= 3:
date_2 = dt.datetime.strptime(date[2], '%d.%m.%Y')
if len(date) >= 4:
date_3 = dt.datetime.strptime(date[3], '%d.%m.%Y')
if len(date) >= 5:
date_4 = dt.datetime.strptime(date[4], '%d.%m.%Y')
if len(date) >= 6:
date_5 = dt.datetime.strptime(date[5], '%d.%m.%Y')
if len(date) >= 7:
date_6 = dt.datetime.strptime(date[6], '%d.%m.%Y')
if len(date) >= 8:
date_7 = dt.datetime.strptime(date[7], '%d.%m.%Y')
if len(date) >= 9:
date_8 = dt.datetime.strptime(date[8], '%d.%m.%Y')
if len(date) >= 10:
date_9 = dt.datetime.strptime(date[9], '%d.%m.%Y')
if len(date) >= 11:
date_10 = dt.datetime.strptime(date[10], '%d.%m.%Y')
if len(date) >= 12:
date_11 = dt.datetime.strptime(date[11], '%d.%m.%Y')
if len(date) >= 13:
date_12 = dt.datetime.strptime(date[12], '%d.%m.%Y')
if len(date) >= 14:
date_13 = dt.datetime.strptime(date[13], '%d.%m.%Y')
if len(date) >= 15:
date_14 = dt.datetime.strptime(date[14], '%d.%m.%Y')
if len(date) >= 16:
date_15 = dt.datetime.strptime(date[15], '%d.%m.%Y')
if date_0 < date_1:
next_game_3 = cc
else:
if date_1 < date_2:
next_game_3 = dd
else:
if date_2 < date_3:
next_game_3 = ee
else:
if date_3 < date_4:
next_game_3 = ff
else:
if date_4 < date_5:
next_game_3 = gg
else:
if date_5 < date_6:
next_game_3 = hh
else:
if date_6 < date_7:
next_game_3 = jj
else:
if date_7 < date_8:
next_game_3 = kk
else:
if date_8 < date_9:
next_game_3 = ll
else:
if date_9 < date_10:
next_game_3 = mm
else:
if date_10 < date_11:
next_game_3 = nn
else:
if date_11 < date_12:
next_game_3 = oo
else:
if date_12 < date_13:
next_game_3 = pp
else:
if date_13 < date_14:
next_game_3 = qq
else:
if date_14 < date_15:
next_game_3 = rr
else:
pass
team = "Ферро"
def adding_team():
b = "Ferro"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
create_file()
adding_team()
draws(full_time)
odd(full_time)
even(full_time)
under_1_5(full_time)
over_2_5(full_time)
under_2_5(full_time)
both_score(full_time)
both_no_score(full_time)
draws_first_time(first_half_time)
no_goal_first_time(first_half_time)
Odd_Even(full_time)
Even_Odd(full_time)
draws_NOdraws(full_time)
NOdraws_draws(full_time)
under15_over15(full_time)
over15_under15(full_time)
under25_over25(full_time)
over25_under25(full_time)
both_noboth_score(full_time)
noboth_both_score(full_time)
draws_NOdraws_first_time(first_half_time)
NOdraws_draws_first_time(first_half_time)
goal_NOgoal_first_time(first_half_time)
NOgoal_goal_first_time(first_half_time)
url = 'https://nb-bet.com/Teams/2390-Atletiko-Mitre-statistika-komandi'
r = requests.get(url, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print(soup.title.text)
nbbet = soup.select(".a-dotted-hover")
one = ("...".join((str(i) for i in nbbet)))
two = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s[(-][0-9]{1}\s[:-]\s[0-9]{1}[\)-]', one)
three = (" ".join((str(i) for i in two)))
full_time = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s', three)
first_half_time = re.findall(r'[(][0-9]{1}\s[:]\s[0-9]{1}[)]', three)
next_game = soup.select(".first-td-content-results-auto")
next_game_1 = ("...".join((str(i) for i in next_game)))
next_game_1_1 = next_game_1.replace(' ', '')
next_game_2 = re.findall(r'\d{2}[.]\d{2}[.]\d{2}', next_game_1_1)
next_games = next_game_2[10:]
next_games_1 = len(next_games)
date = []
y = '.20'
if len(next_game_2) >= 6:
b = next_game_2[5]
bb = b[:6 - 1] + y + b[6:]
date.append(bb)
else:
pass
if len(next_game_2) >= 7:
c = next_game_2[6]
cc = c[:6 - 1] + y + c[6:]
date.append(cc)
else:
pass
if len(next_game_2) >= 8:
d = next_game_2[7]
dd = d[:6 - 1] + y + d[6:]
date.append(dd)
else:
pass
if len(next_game_2) >= 9:
e = next_game_2[8]
ee = e[:6 - 1] + y + e[6:]
date.append(ee)
else:
pass
if len(next_game_2) >= 10:
f = next_game_2[9]
ff = f[:6 - 1] + y + f[6:]
date.append(ff)
else:
pass
if len(next_game_2) >= 11:
g = next_game_2[10]
gg = g[:6 - 1] + y + g[6:]
date.append(gg)
else:
pass
if len(next_game_2) >= 12:
h = next_game_2[11]
hh = h[:6 - 1] + y + h[6:]
date.append(hh)
else:
pass
if len(next_game_2) >= 13:
j = next_game_2[12]
jj = j[:6 - 1] + y + j[6:]
date.append(jj)
else:
pass
if len(next_game_2) >= 14:
k = next_game_2[13]
kk = k[:6 - 1] + y + k[6:]
date.append(kk)
else:
pass
if len(next_game_2) >= 15:
l = next_game_2[14]
ll = l[:6 - 1] + y + l[6:]
date.append(ll)
else:
pass
if len(next_game_2) >= 16:
m = next_game_2[15]
mm = m[:6 - 1] + y + m[6:]
date.append(mm)
else:
pass
if len(next_game_2) >= 17:
n = next_game_2[16]
nn = n[:6 - 1] + y + n[6:]
date.append(nn)
else:
pass
if len(next_game_2) >= 18:
o = next_game_2[17]
oo = o[:6 - 1] + y + o[6:]
date.append(oo)
else:
pass
if len(next_game_2) >= 19:
p = next_game_2[18]
pp = p[:6 - 1] + y + p[6:]
date.append(pp)
else:
pass
if len(next_game_2) >= 20:
q = next_game_2[19]
qq = q[:6 - 1] + y + q[6:]
date.append(qq)
else:
pass
if len(next_game_2) >= 21:
r = next_game_2[20]
rr = r[:6 - 1] + y + r[6:]
date.append(rr)
else:
pass
if len(date) >= 1:
date_0 = dt.datetime.strptime(date[0], '%d.%m.%Y')
if len(date) >= 2:
date_1 = dt.datetime.strptime(date[1], '%d.%m.%Y')
if len(date) >= 3:
date_2 = dt.datetime.strptime(date[2], '%d.%m.%Y')
if len(date) >= 4:
date_3 = dt.datetime.strptime(date[3], '%d.%m.%Y')
if len(date) >= 5:
date_4 = dt.datetime.strptime(date[4], '%d.%m.%Y')
if len(date) >= 6:
date_5 = dt.datetime.strptime(date[5], '%d.%m.%Y')
if len(date) >= 7:
date_6 = dt.datetime.strptime(date[6], '%d.%m.%Y')
if len(date) >= 8:
date_7 = dt.datetime.strptime(date[7], '%d.%m.%Y')
if len(date) >= 9:
date_8 = dt.datetime.strptime(date[8], '%d.%m.%Y')
if len(date) >= 10:
date_9 = dt.datetime.strptime(date[9], '%d.%m.%Y')
if len(date) >= 11:
date_10 = dt.datetime.strptime(date[10], '%d.%m.%Y')
if len(date) >= 12:
date_11 = dt.datetime.strptime(date[11], '%d.%m.%Y')
if len(date) >= 13:
date_12 = dt.datetime.strptime(date[12], '%d.%m.%Y')
if len(date) >= 14:
date_13 = dt.datetime.strptime(date[13], '%d.%m.%Y')
if len(date) >= 15:
date_14 = dt.datetime.strptime(date[14], '%d.%m.%Y')
if len(date) >= 16:
date_15 = dt.datetime.strptime(date[15], '%d.%m.%Y')
if date_0 < date_1:
next_game_3 = cc
else:
if date_1 < date_2:
next_game_3 = dd
else:
if date_2 < date_3:
next_game_3 = ee
else:
if date_3 < date_4:
next_game_3 = ff
else:
if date_4 < date_5:
next_game_3 = gg
else:
if date_5 < date_6:
next_game_3 = hh
else:
if date_6 < date_7:
next_game_3 = jj
else:
if date_7 < date_8:
next_game_3 = kk
else:
if date_8 < date_9:
next_game_3 = ll
else:
if date_9 < date_10:
next_game_3 = mm
else:
if date_10 < date_11:
next_game_3 = nn
else:
if date_11 < date_12:
next_game_3 = oo
else:
if date_12 < date_13:
next_game_3 = pp
else:
if date_13 < date_14:
next_game_3 = qq
else:
if date_14 < date_15:
next_game_3 = rr
else:
pass
team = "Атлетико Митре"
def adding_team():
b = "Atletiko-Mitre"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
create_file()
adding_team()
draws(full_time)
odd(full_time)
even(full_time)
under_1_5(full_time)
over_2_5(full_time)
under_2_5(full_time)
both_score(full_time)
both_no_score(full_time)
draws_first_time(first_half_time)
no_goal_first_time(first_half_time)
Odd_Even(full_time)
Even_Odd(full_time)
draws_NOdraws(full_time)
NOdraws_draws(full_time)
under15_over15(full_time)
over15_under15(full_time)
under25_over25(full_time)
over25_under25(full_time)
both_noboth_score(full_time)
noboth_both_score(full_time)
draws_NOdraws_first_time(first_half_time)
NOdraws_draws_first_time(first_half_time)
goal_NOgoal_first_time(first_half_time)
NOgoal_goal_first_time(first_half_time)
url = 'https://nb-bet.com/Teams/2142-Defensores-de-Belgrano-statistika-komandi'
r = requests.get(url, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print(soup.title.text)
nbbet = soup.select(".a-dotted-hover")
one = ("...".join((str(i) for i in nbbet)))
two = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s[(-][0-9]{1}\s[:-]\s[0-9]{1}[\)-]', one)
three = (" ".join((str(i) for i in two)))
full_time = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s', three)
first_half_time = re.findall(r'[(][0-9]{1}\s[:]\s[0-9]{1}[)]', three)
next_game = soup.select(".first-td-content-results-auto")
next_game_1 = ("...".join((str(i) for i in next_game)))
next_game_1_1 = next_game_1.replace(' ', '')
next_game_2 = re.findall(r'\d{2}[.]\d{2}[.]\d{2}', next_game_1_1)
next_games = next_game_2[10:]
next_games_1 = len(next_games)
date = []
y = '.20'
if len(next_game_2) >= 6:
b = next_game_2[5]
bb = b[:6 - 1] + y + b[6:]
date.append(bb)
else:
pass
if len(next_game_2) >= 7:
c = next_game_2[6]
cc = c[:6 - 1] + y + c[6:]
date.append(cc)
else:
pass
if len(next_game_2) >= 8:
d = next_game_2[7]
dd = d[:6 - 1] + y + d[6:]
date.append(dd)
else:
pass
if len(next_game_2) >= 9:
e = next_game_2[8]
ee = e[:6 - 1] + y + e[6:]
date.append(ee)
else:
pass
if len(next_game_2) >= 10:
f = next_game_2[9]
ff = f[:6 - 1] + y + f[6:]
date.append(ff)
else:
pass
if len(next_game_2) >= 11:
g = next_game_2[10]
gg = g[:6 - 1] + y + g[6:]
date.append(gg)
else:
pass
if len(next_game_2) >= 12:
h = next_game_2[11]
hh = h[:6 - 1] + y + h[6:]
date.append(hh)
else:
pass
if len(next_game_2) >= 13:
j = next_game_2[12]
jj = j[:6 - 1] + y + j[6:]
date.append(jj)
else:
pass
if len(next_game_2) >= 14:
k = next_game_2[13]
kk = k[:6 - 1] + y + k[6:]
date.append(kk)
else:
pass
if len(next_game_2) >= 15:
l = next_game_2[14]
ll = l[:6 - 1] + y + l[6:]
date.append(ll)
else:
pass
if len(next_game_2) >= 16:
m = next_game_2[15]
mm = m[:6 - 1] + y + m[6:]
date.append(mm)
else:
pass
if len(next_game_2) >= 17:
n = next_game_2[16]
nn = n[:6 - 1] + y + n[6:]
date.append(nn)
else:
pass
if len(next_game_2) >= 18:
o = next_game_2[17]
oo = o[:6 - 1] + y + o[6:]
date.append(oo)
else:
pass
if len(next_game_2) >= 19:
p = next_game_2[18]
pp = p[:6 - 1] + y + p[6:]
date.append(pp)
else:
pass
if len(next_game_2) >= 20:
q = next_game_2[19]
qq = q[:6 - 1] + y + q[6:]
date.append(qq)
else:
pass
if len(next_game_2) >= 21:
r = next_game_2[20]
rr = r[:6 - 1] + y + r[6:]
date.append(rr)
else:
pass
if len(date) >= 1:
date_0 = dt.datetime.strptime(date[0], '%d.%m.%Y')
if len(date) >= 2:
date_1 = dt.datetime.strptime(date[1], '%d.%m.%Y')
if len(date) >= 3:
date_2 = dt.datetime.strptime(date[2], '%d.%m.%Y')
if len(date) >= 4:
date_3 = dt.datetime.strptime(date[3], '%d.%m.%Y')
if len(date) >= 5:
date_4 = dt.datetime.strptime(date[4], '%d.%m.%Y')
if len(date) >= 6:
date_5 = dt.datetime.strptime(date[5], '%d.%m.%Y')
if len(date) >= 7:
date_6 = dt.datetime.strptime(date[6], '%d.%m.%Y')
if len(date) >= 8:
date_7 = dt.datetime.strptime(date[7], '%d.%m.%Y')
if len(date) >= 9:
date_8 = dt.datetime.strptime(date[8], '%d.%m.%Y')
if len(date) >= 10:
date_9 = dt.datetime.strptime(date[9], '%d.%m.%Y')
if len(date) >= 11:
date_10 = dt.datetime.strptime(date[10], '%d.%m.%Y')
if len(date) >= 12:
date_11 = dt.datetime.strptime(date[11], '%d.%m.%Y')
if len(date) >= 13:
date_12 = dt.datetime.strptime(date[12], '%d.%m.%Y')
if len(date) >= 14:
date_13 = dt.datetime.strptime(date[13], '%d.%m.%Y')
if len(date) >= 15:
date_14 = dt.datetime.strptime(date[14], '%d.%m.%Y')
if len(date) >= 16:
date_15 = dt.datetime.strptime(date[15], '%d.%m.%Y')
if date_0 < date_1:
next_game_3 = cc
else:
if date_1 < date_2:
next_game_3 = dd
else:
if date_2 < date_3:
next_game_3 = ee
else:
if date_3 < date_4:
next_game_3 = ff
else:
if date_4 < date_5:
next_game_3 = gg
else:
if date_5 < date_6:
next_game_3 = hh
else:
if date_6 < date_7:
next_game_3 = jj
else:
if date_7 < date_8:
next_game_3 = kk
else:
if date_8 < date_9:
next_game_3 = ll
else:
if date_9 < date_10:
next_game_3 = mm
else:
if date_10 < date_11:
next_game_3 = nn
else:
if date_11 < date_12:
next_game_3 = oo
else:
if date_12 < date_13:
next_game_3 = pp
else:
if date_13 < date_14:
next_game_3 = qq
else:
if date_14 < date_15:
next_game_3 = rr
else:
pass
team = "Дефенсорес де Бельграно"
def adding_team():
b = "Defensores-de-Belgrano"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
create_file()
adding_team()
draws(full_time)
odd(full_time)
even(full_time)
under_1_5(full_time)
over_2_5(full_time)
under_2_5(full_time)
both_score(full_time)
both_no_score(full_time)
draws_first_time(first_half_time)
no_goal_first_time(first_half_time)
Odd_Even(full_time)
Even_Odd(full_time)
draws_NOdraws(full_time)
NOdraws_draws(full_time)
under15_over15(full_time)
over15_under15(full_time)
under25_over25(full_time)
over25_under25(full_time)
both_noboth_score(full_time)
noboth_both_score(full_time)
draws_NOdraws_first_time(first_half_time)
NOdraws_draws_first_time(first_half_time)
goal_NOgoal_first_time(first_half_time)
NOgoal_goal_first_time(first_half_time)
url = 'https://nb-bet.com/Teams/2389-Agropekuario-statistika-komandi'
r = requests.get(url, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print(soup.title.text)
nbbet = soup.select(".a-dotted-hover")
one = ("...".join((str(i) for i in nbbet)))
two = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s[(-][0-9]{1}\s[:-]\s[0-9]{1}[\)-]', one)
three = (" ".join((str(i) for i in two)))
full_time = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s', three)
first_half_time = re.findall(r'[(][0-9]{1}\s[:]\s[0-9]{1}[)]', three)
next_game = soup.select(".first-td-content-results-auto")
next_game_1 = ("...".join((str(i) for i in next_game)))
next_game_1_1 = next_game_1.replace(' ', '')
next_game_2 = re.findall(r'\d{2}[.]\d{2}[.]\d{2}', next_game_1_1)
next_games = next_game_2[10:]
next_games_1 = len(next_games)
date = []
y = '.20'
if len(next_game_2) >= 6:
b = next_game_2[5]
bb = b[:6 - 1] + y + b[6:]
date.append(bb)
else:
pass
if len(next_game_2) >= 7:
c = next_game_2[6]
cc = c[:6 - 1] + y + c[6:]
date.append(cc)
else:
pass
if len(next_game_2) >= 8:
d = next_game_2[7]
dd = d[:6 - 1] + y + d[6:]
date.append(dd)
else:
pass
if len(next_game_2) >= 9:
e = next_game_2[8]
ee = e[:6 - 1] + y + e[6:]
date.append(ee)
else:
pass
if len(next_game_2) >= 10:
f = next_game_2[9]
ff = f[:6 - 1] + y + f[6:]
date.append(ff)
else:
pass
if len(next_game_2) >= 11:
g = next_game_2[10]
gg = g[:6 - 1] + y + g[6:]
date.append(gg)
else:
pass
if len(next_game_2) >= 12:
h = next_game_2[11]
hh = h[:6 - 1] + y + h[6:]
date.append(hh)
else:
pass
if len(next_game_2) >= 13:
j = next_game_2[12]
jj = j[:6 - 1] + y + j[6:]
date.append(jj)
else:
pass
if len(next_game_2) >= 14:
k = next_game_2[13]
kk = k[:6 - 1] + y + k[6:]
date.append(kk)
else:
pass
if len(next_game_2) >= 15:
l = next_game_2[14]
ll = l[:6 - 1] + y + l[6:]
date.append(ll)
else:
pass
if len(next_game_2) >= 16:
m = next_game_2[15]
mm = m[:6 - 1] + y + m[6:]
date.append(mm)
else:
pass
if len(next_game_2) >= 17:
n = next_game_2[16]
nn = n[:6 - 1] + y + n[6:]
date.append(nn)
else:
pass
if len(next_game_2) >= 18:
o = next_game_2[17]
oo = o[:6 - 1] + y + o[6:]
date.append(oo)
else:
pass
if len(next_game_2) >= 19:
p = next_game_2[18]
pp = p[:6 - 1] + y + p[6:]
date.append(pp)
else:
pass
if len(next_game_2) >= 20:
q = next_game_2[19]
qq = q[:6 - 1] + y + q[6:]
date.append(qq)
else:
pass
if len(next_game_2) >= 21:
r = next_game_2[20]
rr = r[:6 - 1] + y + r[6:]
date.append(rr)
else:
pass
if len(date) >= 1:
date_0 = dt.datetime.strptime(date[0], '%d.%m.%Y')
if len(date) >= 2:
date_1 = dt.datetime.strptime(date[1], '%d.%m.%Y')
if len(date) >= 3:
date_2 = dt.datetime.strptime(date[2], '%d.%m.%Y')
if len(date) >= 4:
date_3 = dt.datetime.strptime(date[3], '%d.%m.%Y')
if len(date) >= 5:
date_4 = dt.datetime.strptime(date[4], '%d.%m.%Y')
if len(date) >= 6:
date_5 = dt.datetime.strptime(date[5], '%d.%m.%Y')
if len(date) >= 7:
date_6 = dt.datetime.strptime(date[6], '%d.%m.%Y')
if len(date) >= 8:
date_7 = dt.datetime.strptime(date[7], '%d.%m.%Y')
if len(date) >= 9:
date_8 = dt.datetime.strptime(date[8], '%d.%m.%Y')
if len(date) >= 10:
date_9 = dt.datetime.strptime(date[9], '%d.%m.%Y')
if len(date) >= 11:
date_10 = dt.datetime.strptime(date[10], '%d.%m.%Y')
if len(date) >= 12:
date_11 = dt.datetime.strptime(date[11], '%d.%m.%Y')
if len(date) >= 13:
date_12 = dt.datetime.strptime(date[12], '%d.%m.%Y')
if len(date) >= 14:
date_13 = dt.datetime.strptime(date[13], '%d.%m.%Y')
if len(date) >= 15:
date_14 = dt.datetime.strptime(date[14], '%d.%m.%Y')
if len(date) >= 16:
date_15 = dt.datetime.strptime(date[15], '%d.%m.%Y')
if date_0 < date_1:
next_game_3 = cc
else:
if date_1 < date_2:
next_game_3 = dd
else:
if date_2 < date_3:
next_game_3 = ee
else:
if date_3 < date_4:
next_game_3 = ff
else:
if date_4 < date_5:
next_game_3 = gg
else:
if date_5 < date_6:
next_game_3 = hh
else:
if date_6 < date_7:
next_game_3 = jj
else:
if date_7 < date_8:
next_game_3 = kk
else:
if date_8 < date_9:
next_game_3 = ll
else:
if date_9 < date_10:
next_game_3 = mm
else:
if date_10 < date_11:
next_game_3 = nn
else:
if date_11 < date_12:
next_game_3 = oo
else:
if date_12 < date_13:
next_game_3 = pp
else:
if date_13 < date_14:
next_game_3 = qq
else:
if date_14 < date_15:
next_game_3 = rr
else:
pass
team = "Агропекуарио"
def adding_team():
b = "Agropekuario"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
create_file()
adding_team()
draws(full_time)
odd(full_time)
even(full_time)
under_1_5(full_time)
over_2_5(full_time)
under_2_5(full_time)
both_score(full_time)
both_no_score(full_time)
draws_first_time(first_half_time)
no_goal_first_time(first_half_time)
Odd_Even(full_time)
Even_Odd(full_time)
draws_NOdraws(full_time)
NOdraws_draws(full_time)
under15_over15(full_time)
over15_under15(full_time)
under25_over25(full_time)
over25_under25(full_time)
both_noboth_score(full_time)
noboth_both_score(full_time)
draws_NOdraws_first_time(first_half_time)
NOdraws_draws_first_time(first_half_time)
goal_NOgoal_first_time(first_half_time)
NOgoal_goal_first_time(first_half_time)
url = 'https://nb-bet.com/Teams/2144-Barrakas-Sentral-statistika-komandi'
r = requests.get(url, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print(soup.title.text)
nbbet = soup.select(".a-dotted-hover")
one = ("...".join((str(i) for i in nbbet)))
two = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s[(-][0-9]{1}\s[:-]\s[0-9]{1}[\)-]', one)
three = (" ".join((str(i) for i in two)))
full_time = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s', three)
first_half_time = re.findall(r'[(][0-9]{1}\s[:]\s[0-9]{1}[)]', three)
next_game = soup.select(".first-td-content-results-auto")
next_game_1 = ("...".join((str(i) for i in next_game)))
next_game_1_1 = next_game_1.replace(' ', '')
next_game_2 = re.findall(r'\d{2}[.]\d{2}[.]\d{2}', next_game_1_1)
next_games = next_game_2[10:]
next_games_1 = len(next_games)
date = []
y = '.20'
if len(next_game_2) >= 6:
b = next_game_2[5]
bb = b[:6 - 1] + y + b[6:]
date.append(bb)
else:
pass
if len(next_game_2) >= 7:
c = next_game_2[6]
cc = c[:6 - 1] + y + c[6:]
date.append(cc)
else:
pass
if len(next_game_2) >= 8:
d = next_game_2[7]
dd = d[:6 - 1] + y + d[6:]
date.append(dd)
else:
pass
if len(next_game_2) >= 9:
e = next_game_2[8]
ee = e[:6 - 1] + y + e[6:]
date.append(ee)
else:
pass
if len(next_game_2) >= 10:
f = next_game_2[9]
ff = f[:6 - 1] + y + f[6:]
date.append(ff)
else:
pass
if len(next_game_2) >= 11:
g = next_game_2[10]
gg = g[:6 - 1] + y + g[6:]
date.append(gg)
else:
pass
if len(next_game_2) >= 12:
h = next_game_2[11]
hh = h[:6 - 1] + y + h[6:]
date.append(hh)
else:
pass
if len(next_game_2) >= 13:
j = next_game_2[12]
jj = j[:6 - 1] + y + j[6:]
date.append(jj)
else:
pass
if len(next_game_2) >= 14:
k = next_game_2[13]
kk = k[:6 - 1] + y + k[6:]
date.append(kk)
else:
pass
if len(next_game_2) >= 15:
l = next_game_2[14]
ll = l[:6 - 1] + y + l[6:]
date.append(ll)
else:
pass
if len(next_game_2) >= 16:
m = next_game_2[15]
mm = m[:6 - 1] + y + m[6:]
date.append(mm)
else:
pass
if len(next_game_2) >= 17:
n = next_game_2[16]
nn = n[:6 - 1] + y + n[6:]
date.append(nn)
else:
pass
if len(next_game_2) >= 18:
o = next_game_2[17]
oo = o[:6 - 1] + y + o[6:]
date.append(oo)
else:
pass
if len(next_game_2) >= 19:
p = next_game_2[18]
pp = p[:6 - 1] + y + p[6:]
date.append(pp)
else:
pass
if len(next_game_2) >= 20:
q = next_game_2[19]
qq = q[:6 - 1] + y + q[6:]
date.append(qq)
else:
pass
if len(next_game_2) >= 21:
r = next_game_2[20]
rr = r[:6 - 1] + y + r[6:]
date.append(rr)
else:
pass
if len(date) >= 1:
date_0 = dt.datetime.strptime(date[0], '%d.%m.%Y')
if len(date) >= 2:
date_1 = dt.datetime.strptime(date[1], '%d.%m.%Y')
if len(date) >= 3:
date_2 = dt.datetime.strptime(date[2], '%d.%m.%Y')
if len(date) >= 4:
date_3 = dt.datetime.strptime(date[3], '%d.%m.%Y')
if len(date) >= 5:
date_4 = dt.datetime.strptime(date[4], '%d.%m.%Y')
if len(date) >= 6:
date_5 = dt.datetime.strptime(date[5], '%d.%m.%Y')
if len(date) >= 7:
date_6 = dt.datetime.strptime(date[6], '%d.%m.%Y')
if len(date) >= 8:
date_7 = dt.datetime.strptime(date[7], '%d.%m.%Y')
if len(date) >= 9:
date_8 = dt.datetime.strptime(date[8], '%d.%m.%Y')
if len(date) >= 10:
date_9 = dt.datetime.strptime(date[9], '%d.%m.%Y')
if len(date) >= 11:
date_10 = dt.datetime.strptime(date[10], '%d.%m.%Y')
if len(date) >= 12:
date_11 = dt.datetime.strptime(date[11], '%d.%m.%Y')
if len(date) >= 13:
date_12 = dt.datetime.strptime(date[12], '%d.%m.%Y')
if len(date) >= 14:
date_13 = dt.datetime.strptime(date[13], '%d.%m.%Y')
if len(date) >= 15:
date_14 = dt.datetime.strptime(date[14], '%d.%m.%Y')
if len(date) >= 16:
date_15 = dt.datetime.strptime(date[15], '%d.%m.%Y')
if date_0 < date_1:
next_game_3 = cc
else:
if date_1 < date_2:
next_game_3 = dd
else:
if date_2 < date_3:
next_game_3 = ee
else:
if date_3 < date_4:
next_game_3 = ff
else:
if date_4 < date_5:
next_game_3 = gg
else:
if date_5 < date_6:
next_game_3 = hh
else:
if date_6 < date_7:
next_game_3 = jj
else:
if date_7 < date_8:
next_game_3 = kk
else:
if date_8 < date_9:
next_game_3 = ll
else:
if date_9 < date_10:
next_game_3 = mm
else:
if date_10 < date_11:
next_game_3 = nn
else:
if date_11 < date_12:
next_game_3 = oo
else:
if date_12 < date_13:
next_game_3 = pp
else:
if date_13 < date_14:
next_game_3 = qq
else:
if date_14 < date_15:
next_game_3 = rr
else:
pass
team = "Барракас Сентраль"
def adding_team():
b = "Barrakas-Sentral"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
create_file()
adding_team()
draws(full_time)
odd(full_time)
even(full_time)
under_1_5(full_time)
over_2_5(full_time)
under_2_5(full_time)
both_score(full_time)
both_no_score(full_time)
draws_first_time(first_half_time)
no_goal_first_time(first_half_time)
Odd_Even(full_time)
Even_Odd(full_time)
draws_NOdraws(full_time)
NOdraws_draws(full_time)
under15_over15(full_time)
over15_under15(full_time)
under25_over25(full_time)
over25_under25(full_time)
both_noboth_score(full_time)
noboth_both_score(full_time)
draws_NOdraws_first_time(first_half_time)
NOdraws_draws_first_time(first_half_time)
goal_NOgoal_first_time(first_half_time)
NOgoal_goal_first_time(first_half_time)
url = 'https://nb-bet.com/Teams/3914-Alvarado-statistika-komandi'
r = requests.get(url, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print(soup.title.text)
nbbet = soup.select(".a-dotted-hover")
one = ("...".join((str(i) for i in nbbet)))
two = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s[(-][0-9]{1}\s[:-]\s[0-9]{1}[\)-]', one)
three = (" ".join((str(i) for i in two)))
full_time = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s', three)
first_half_time = re.findall(r'[(][0-9]{1}\s[:]\s[0-9]{1}[)]', three)
next_game = soup.select(".first-td-content-results-auto")
next_game_1 = ("...".join((str(i) for i in next_game)))
next_game_1_1 = next_game_1.replace(' ', '')
next_game_2 = re.findall(r'\d{2}[.]\d{2}[.]\d{2}', next_game_1_1)
next_games = next_game_2[10:]
next_games_1 = len(next_games)
date = []
y = '.20'
if len(next_game_2) >= 6:
b = next_game_2[5]
bb = b[:6 - 1] + y + b[6:]
date.append(bb)
else:
pass
if len(next_game_2) >= 7:
c = next_game_2[6]
cc = c[:6 - 1] + y + c[6:]
date.append(cc)
else:
pass
if len(next_game_2) >= 8:
d = next_game_2[7]
dd = d[:6 - 1] + y + d[6:]
date.append(dd)
else:
pass
if len(next_game_2) >= 9:
e = next_game_2[8]
ee = e[:6 - 1] + y + e[6:]
date.append(ee)
else:
pass
if len(next_game_2) >= 10:
f = next_game_2[9]
ff = f[:6 - 1] + y + f[6:]
date.append(ff)
else:
pass
if len(next_game_2) >= 11:
g = next_game_2[10]
gg = g[:6 - 1] + y + g[6:]
date.append(gg)
else:
pass
if len(next_game_2) >= 12:
h = next_game_2[11]
hh = h[:6 - 1] + y + h[6:]
date.append(hh)
else:
pass
if len(next_game_2) >= 13:
j = next_game_2[12]
jj = j[:6 - 1] + y + j[6:]
date.append(jj)
else:
pass
if len(next_game_2) >= 14:
k = next_game_2[13]
kk = k[:6 - 1] + y + k[6:]
date.append(kk)
else:
pass
if len(next_game_2) >= 15:
l = next_game_2[14]
ll = l[:6 - 1] + y + l[6:]
date.append(ll)
else:
pass
if len(next_game_2) >= 16:
m = next_game_2[15]
mm = m[:6 - 1] + y + m[6:]
date.append(mm)
else:
pass
if len(next_game_2) >= 17:
n = next_game_2[16]
nn = n[:6 - 1] + y + n[6:]
date.append(nn)
else:
pass
if len(next_game_2) >= 18:
o = next_game_2[17]
oo = o[:6 - 1] + y + o[6:]
date.append(oo)
else:
pass
if len(next_game_2) >= 19:
p = next_game_2[18]
pp = p[:6 - 1] + y + p[6:]
date.append(pp)
else:
pass
if len(next_game_2) >= 20:
q = next_game_2[19]
qq = q[:6 - 1] + y + q[6:]
date.append(qq)
else:
pass
if len(next_game_2) >= 21:
r = next_game_2[20]
rr = r[:6 - 1] + y + r[6:]
date.append(rr)
else:
pass
if len(date) >= 1:
date_0 = dt.datetime.strptime(date[0], '%d.%m.%Y')
if len(date) >= 2:
date_1 = dt.datetime.strptime(date[1], '%d.%m.%Y')
if len(date) >= 3:
date_2 = dt.datetime.strptime(date[2], '%d.%m.%Y')
if len(date) >= 4:
date_3 = dt.datetime.strptime(date[3], '%d.%m.%Y')
if len(date) >= 5:
date_4 = dt.datetime.strptime(date[4], '%d.%m.%Y')
if len(date) >= 6:
date_5 = dt.datetime.strptime(date[5], '%d.%m.%Y')
if len(date) >= 7:
date_6 = dt.datetime.strptime(date[6], '%d.%m.%Y')
if len(date) >= 8:
date_7 = dt.datetime.strptime(date[7], '%d.%m.%Y')
if len(date) >= 9:
date_8 = dt.datetime.strptime(date[8], '%d.%m.%Y')
if len(date) >= 10:
date_9 = dt.datetime.strptime(date[9], '%d.%m.%Y')
if len(date) >= 11:
date_10 = dt.datetime.strptime(date[10], '%d.%m.%Y')
if len(date) >= 12:
date_11 = dt.datetime.strptime(date[11], '%d.%m.%Y')
if len(date) >= 13:
date_12 = dt.datetime.strptime(date[12], '%d.%m.%Y')
if len(date) >= 14:
date_13 = dt.datetime.strptime(date[13], '%d.%m.%Y')
if len(date) >= 15:
date_14 = dt.datetime.strptime(date[14], '%d.%m.%Y')
if len(date) >= 16:
date_15 = dt.datetime.strptime(date[15], '%d.%m.%Y')
if date_0 < date_1:
next_game_3 = cc
else:
if date_1 < date_2:
next_game_3 = dd
else:
if date_2 < date_3:
next_game_3 = ee
else:
if date_3 < date_4:
next_game_3 = ff
else:
if date_4 < date_5:
next_game_3 = gg
else:
if date_5 < date_6:
next_game_3 = hh
else:
if date_6 < date_7:
next_game_3 = jj
else:
if date_7 < date_8:
next_game_3 = kk
else:
if date_8 < date_9:
next_game_3 = ll
else:
if date_9 < date_10:
next_game_3 = mm
else:
if date_10 < date_11:
next_game_3 = nn
else:
if date_11 < date_12:
next_game_3 = oo
else:
if date_12 < date_13:
next_game_3 = pp
else:
if date_13 < date_14:
next_game_3 = qq
else:
if date_14 < date_15:
next_game_3 = rr
else:
pass
team = "Альварадо"
def adding_team():
b = "Alvarado"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
create_file()
adding_team()
draws(full_time)
odd(full_time)
even(full_time)
under_1_5(full_time)
over_2_5(full_time)
under_2_5(full_time)
both_score(full_time)
both_no_score(full_time)
draws_first_time(first_half_time)
no_goal_first_time(first_half_time)
Odd_Even(full_time)
Even_Odd(full_time)
draws_NOdraws(full_time)
NOdraws_draws(full_time)
under15_over15(full_time)
over15_under15(full_time)
under25_over25(full_time)
over25_under25(full_time)
both_noboth_score(full_time)
noboth_both_score(full_time)
draws_NOdraws_first_time(first_half_time)
NOdraws_draws_first_time(first_half_time)
goal_NOgoal_first_time(first_half_time)
NOgoal_goal_first_time(first_half_time)
url = 'https://nb-bet.com/Teams/555-Belgrano-statistika-komandi'
r = requests.get(url, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print(soup.title.text)
nbbet = soup.select(".a-dotted-hover")
one = ("...".join((str(i) for i in nbbet)))
two = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s[(-][0-9]{1}\s[:-]\s[0-9]{1}[\)-]', one)
three = (" ".join((str(i) for i in two)))
full_time = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s', three)
first_half_time = re.findall(r'[(][0-9]{1}\s[:]\s[0-9]{1}[)]', three)
next_game = soup.select(".first-td-content-results-auto")
next_game_1 = ("...".join((str(i) for i in next_game)))
next_game_1_1 = next_game_1.replace(' ', '')
next_game_2 = re.findall(r'\d{2}[.]\d{2}[.]\d{2}', next_game_1_1)
next_games = next_game_2[10:]
next_games_1 = len(next_games)
date = []
y = '.20'
if len(next_game_2) >= 6:
b = next_game_2[5]
bb = b[:6 - 1] + y + b[6:]
date.append(bb)
else:
pass
if len(next_game_2) >= 7:
c = next_game_2[6]
cc = c[:6 - 1] + y + c[6:]
date.append(cc)
else:
pass
if len(next_game_2) >= 8:
d = next_game_2[7]
dd = d[:6 - 1] + y + d[6:]
date.append(dd)
else:
pass
if len(next_game_2) >= 9:
e = next_game_2[8]
ee = e[:6 - 1] + y + e[6:]
date.append(ee)
else:
pass
if len(next_game_2) >= 10:
f = next_game_2[9]
ff = f[:6 - 1] + y + f[6:]
date.append(ff)
else:
pass
if len(next_game_2) >= 11:
g = next_game_2[10]
gg = g[:6 - 1] + y + g[6:]
date.append(gg)
else:
pass
if len(next_game_2) >= 12:
h = next_game_2[11]
hh = h[:6 - 1] + y + h[6:]
date.append(hh)
else:
pass
if len(next_game_2) >= 13:
j = next_game_2[12]
jj = j[:6 - 1] + y + j[6:]
date.append(jj)
else:
pass
if len(next_game_2) >= 14:
k = next_game_2[13]
kk = k[:6 - 1] + y + k[6:]
date.append(kk)
else:
pass
if len(next_game_2) >= 15:
l = next_game_2[14]
ll = l[:6 - 1] + y + l[6:]
date.append(ll)
else:
pass
if len(next_game_2) >= 16:
m = next_game_2[15]
mm = m[:6 - 1] + y + m[6:]
date.append(mm)
else:
pass
if len(next_game_2) >= 17:
n = next_game_2[16]
nn = n[:6 - 1] + y + n[6:]
date.append(nn)
else:
pass
if len(next_game_2) >= 18:
o = next_game_2[17]
oo = o[:6 - 1] + y + o[6:]
date.append(oo)
else:
pass
if len(next_game_2) >= 19:
p = next_game_2[18]
pp = p[:6 - 1] + y + p[6:]
date.append(pp)
else:
pass
if len(next_game_2) >= 20:
q = next_game_2[19]
qq = q[:6 - 1] + y + q[6:]
date.append(qq)
else:
pass
if len(next_game_2) >= 21:
r = next_game_2[20]
rr = r[:6 - 1] + y + r[6:]
date.append(rr)
else:
pass
if len(date) >= 1:
date_0 = dt.datetime.strptime(date[0], '%d.%m.%Y')
if len(date) >= 2:
date_1 = dt.datetime.strptime(date[1], '%d.%m.%Y')
if len(date) >= 3:
date_2 = dt.datetime.strptime(date[2], '%d.%m.%Y')
if len(date) >= 4:
date_3 = dt.datetime.strptime(date[3], '%d.%m.%Y')
if len(date) >= 5:
date_4 = dt.datetime.strptime(date[4], '%d.%m.%Y')
if len(date) >= 6:
date_5 = dt.datetime.strptime(date[5], '%d.%m.%Y')
if len(date) >= 7:
date_6 = dt.datetime.strptime(date[6], '%d.%m.%Y')
if len(date) >= 8:
date_7 = dt.datetime.strptime(date[7], '%d.%m.%Y')
if len(date) >= 9:
date_8 = dt.datetime.strptime(date[8], '%d.%m.%Y')
if len(date) >= 10:
date_9 = dt.datetime.strptime(date[9], '%d.%m.%Y')
if len(date) >= 11:
date_10 = dt.datetime.strptime(date[10], '%d.%m.%Y')
if len(date) >= 12:
date_11 = dt.datetime.strptime(date[11], '%d.%m.%Y')
if len(date) >= 13:
date_12 = dt.datetime.strptime(date[12], '%d.%m.%Y')
if len(date) >= 14:
date_13 = dt.datetime.strptime(date[13], '%d.%m.%Y')
if len(date) >= 15:
date_14 = dt.datetime.strptime(date[14], '%d.%m.%Y')
if len(date) >= 16:
date_15 = dt.datetime.strptime(date[15], '%d.%m.%Y')
if date_0 < date_1:
next_game_3 = cc
else:
if date_1 < date_2:
next_game_3 = dd
else:
if date_2 < date_3:
next_game_3 = ee
else:
if date_3 < date_4:
next_game_3 = ff
else:
if date_4 < date_5:
next_game_3 = gg
else:
if date_5 < date_6:
next_game_3 = hh
else:
if date_6 < date_7:
next_game_3 = jj
else:
if date_7 < date_8:
next_game_3 = kk
else:
if date_8 < date_9:
next_game_3 = ll
else:
if date_9 < date_10:
next_game_3 = mm
else:
if date_10 < date_11:
next_game_3 = nn
else:
if date_11 < date_12:
next_game_3 = oo
else:
if date_12 < date_13:
next_game_3 = pp
else:
if date_13 < date_14:
next_game_3 = qq
else:
if date_14 < date_15:
next_game_3 = rr
else:
pass
team = "Бельграно"
def adding_team():
b = "Belgrano"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
create_file()
adding_team()
draws(full_time)
odd(full_time)
even(full_time)
under_1_5(full_time)
over_2_5(full_time)
under_2_5(full_time)
both_score(full_time)
both_no_score(full_time)
draws_first_time(first_half_time)
no_goal_first_time(first_half_time)
Odd_Even(full_time)
Even_Odd(full_time)
draws_NOdraws(full_time)
NOdraws_draws(full_time)
under15_over15(full_time)
over15_under15(full_time)
under25_over25(full_time)
over25_under25(full_time)
both_noboth_score(full_time)
noboth_both_score(full_time)
draws_NOdraws_first_time(first_half_time)
NOdraws_draws_first_time(first_half_time)
goal_NOgoal_first_time(first_half_time)
NOgoal_goal_first_time(first_half_time)
url = 'https://nb-bet.com/Teams/572-San-Martin-statistika-komandi'
team = "Сан-Мартин"
def adding_team():
b = "San-Martin"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
r = requests.get(url, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print(soup.title.text)
nbbet = soup.select(".a-dotted-hover")
one = ("...".join((str(i) for i in nbbet)))
two = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s[(-][0-9]{1}\s[:-]\s[0-9]{1}[\)-]', one)
three = (" ".join((str(i) for i in two)))
full_time = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s', three)
first_half_time = re.findall(r'[(][0-9]{1}\s[:]\s[0-9]{1}[)]', three)
next_game = soup.select(".first-td-content-results-auto")
next_game_1 = ("...".join((str(i) for i in next_game)))
next_game_1_1 = next_game_1.replace(' ', '')
next_game_2 = re.findall(r'\d{2}[.]\d{2}[.]\d{2}', next_game_1_1)
next_games = next_game_2[10:]
next_games_1 = len(next_games)
date = []
y = '.20'
if len(next_game_2) >= 6:
b = next_game_2[5]
bb = b[:6 - 1] + y + b[6:]
date.append(bb)
else:
pass
if len(next_game_2) >= 7:
c = next_game_2[6]
cc = c[:6 - 1] + y + c[6:]
date.append(cc)
else:
pass
if len(next_game_2) >= 8:
d = next_game_2[7]
dd = d[:6 - 1] + y + d[6:]
date.append(dd)
else:
pass
if len(next_game_2) >= 9:
e = next_game_2[8]
ee = e[:6 - 1] + y + e[6:]
date.append(ee)
else:
pass
if len(next_game_2) >= 10:
f = next_game_2[9]
ff = f[:6 - 1] + y + f[6:]
date.append(ff)
else:
pass
if len(next_game_2) >= 11:
g = next_game_2[10]
gg = g[:6 - 1] + y + g[6:]
date.append(gg)
else:
pass
if len(next_game_2) >= 12:
h = next_game_2[11]
hh = h[:6 - 1] + y + h[6:]
date.append(hh)
else:
pass
if len(next_game_2) >= 13:
j = next_game_2[12]
jj = j[:6 - 1] + y + j[6:]
date.append(jj)
else:
pass
if len(next_game_2) >= 14:
k = next_game_2[13]
kk = k[:6 - 1] + y + k[6:]
date.append(kk)
else:
pass
if len(next_game_2) >= 15:
l = next_game_2[14]
ll = l[:6 - 1] + y + l[6:]
date.append(ll)
else:
pass
if len(next_game_2) >= 16:
m = next_game_2[15]
mm = m[:6 - 1] + y + m[6:]
date.append(mm)
else:
pass
if len(next_game_2) >= 17:
n = next_game_2[16]
nn = n[:6 - 1] + y + n[6:]
date.append(nn)
else:
pass
if len(next_game_2) >= 18:
o = next_game_2[17]
oo = o[:6 - 1] + y + o[6:]
date.append(oo)
else:
pass
if len(next_game_2) >= 19:
p = next_game_2[18]
pp = p[:6 - 1] + y + p[6:]
date.append(pp)
else:
pass
if len(next_game_2) >= 20:
q = next_game_2[19]
qq = q[:6 - 1] + y + q[6:]
date.append(qq)
else:
pass
if len(next_game_2) >= 21:
r = next_game_2[20]
rr = r[:6 - 1] + y + r[6:]
date.append(rr)
else:
pass
if len(date) >= 1:
date_0 = dt.datetime.strptime(date[0], '%d.%m.%Y')
if len(date) >= 2:
date_1 = dt.datetime.strptime(date[1], '%d.%m.%Y')
if len(date) >= 3:
date_2 = dt.datetime.strptime(date[2], '%d.%m.%Y')
if len(date) >= 4:
date_3 = dt.datetime.strptime(date[3], '%d.%m.%Y')
if len(date) >= 5:
date_4 = dt.datetime.strptime(date[4], '%d.%m.%Y')
if len(date) >= 6:
date_5 = dt.datetime.strptime(date[5], '%d.%m.%Y')
if len(date) >= 7:
date_6 = dt.datetime.strptime(date[6], '%d.%m.%Y')
if len(date) >= 8:
date_7 = dt.datetime.strptime(date[7], '%d.%m.%Y')
if len(date) >= 9:
date_8 = dt.datetime.strptime(date[8], '%d.%m.%Y')
if len(date) >= 10:
date_9 = dt.datetime.strptime(date[9], '%d.%m.%Y')
if len(date) >= 11:
date_10 = dt.datetime.strptime(date[10], '%d.%m.%Y')
if len(date) >= 12:
date_11 = dt.datetime.strptime(date[11], '%d.%m.%Y')
if len(date) >= 13:
date_12 = dt.datetime.strptime(date[12], '%d.%m.%Y')
if len(date) >= 14:
date_13 = dt.datetime.strptime(date[13], '%d.%m.%Y')
if len(date) >= 15:
date_14 = dt.datetime.strptime(date[14], '%d.%m.%Y')
if len(date) >= 16:
date_15 = dt.datetime.strptime(date[15], '%d.%m.%Y')
if date_0 < date_1:
next_game_3 = cc
else:
if date_1 < date_2:
next_game_3 = dd
else:
if date_2 < date_3:
next_game_3 = ee
else:
if date_3 < date_4:
next_game_3 = ff
else:
if date_4 < date_5:
next_game_3 = gg
else:
if date_5 < date_6:
next_game_3 = hh
else:
if date_6 < date_7:
next_game_3 = jj
else:
if date_7 < date_8:
next_game_3 = kk
else:
if date_8 < date_9:
next_game_3 = ll
else:
if date_9 < date_10:
next_game_3 = mm
else:
if date_10 < date_11:
next_game_3 = nn
else:
if date_11 < date_12:
next_game_3 = oo
else:
if date_12 < date_13:
next_game_3 = pp
else:
if date_13 < date_14:
next_game_3 = qq
else:
if date_14 < date_15:
next_game_3 = rr
else:
pass
create_file()
adding_team()
draws(full_time)
odd(full_time)
even(full_time)
under_1_5(full_time)
over_2_5(full_time)
under_2_5(full_time)
both_score(full_time)
both_no_score(full_time)
draws_first_time(first_half_time)
no_goal_first_time(first_half_time)
Odd_Even(full_time)
Even_Odd(full_time)
draws_NOdraws(full_time)
NOdraws_draws(full_time)
under15_over15(full_time)
over15_under15(full_time)
under25_over25(full_time)
over25_under25(full_time)
both_noboth_score(full_time)
noboth_both_score(full_time)
draws_NOdraws_first_time(first_half_time)
NOdraws_draws_first_time(first_half_time)
goal_NOgoal_first_time(first_half_time)
NOgoal_goal_first_time(first_half_time)
url = 'https://nb-bet.com/Teams/1547-Chakarita-Khuniors-statistika-komandi'
team = "Чакарита Хуниорс"
def adding_team():
b = "Chakarita-Khuniors"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
r = requests.get(url, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print(soup.title.text)
nbbet = soup.select(".a-dotted-hover")
one = ("...".join((str(i) for i in nbbet)))
two = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s[(-][0-9]{1}\s[:-]\s[0-9]{1}[\)-]', one)
three = (" ".join((str(i) for i in two)))
full_time = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s', three)
first_half_time = re.findall(r'[(][0-9]{1}\s[:]\s[0-9]{1}[)]', three)
next_game = soup.select(".first-td-content-results-auto")
next_game_1 = ("...".join((str(i) for i in next_game)))
next_game_1_1 = next_game_1.replace(' ', '')
next_game_2 = re.findall(r'\d{2}[.]\d{2}[.]\d{2}', next_game_1_1)
next_games = next_game_2[10:]
next_games_1 = len(next_games)
date = []
y = '.20'
if len(next_game_2) >= 6:
b = next_game_2[5]
bb = b[:6 - 1] + y + b[6:]
date.append(bb)
else:
pass
if len(next_game_2) >= 7:
c = next_game_2[6]
cc = c[:6 - 1] + y + c[6:]
date.append(cc)
else:
pass
if len(next_game_2) >= 8:
d = next_game_2[7]
dd = d[:6 - 1] + y + d[6:]
date.append(dd)
else:
pass
if len(next_game_2) >= 9:
e = next_game_2[8]
ee = e[:6 - 1] + y + e[6:]
date.append(ee)
else:
pass
if len(next_game_2) >= 10:
f = next_game_2[9]
ff = f[:6 - 1] + y + f[6:]
date.append(ff)
else:
pass
if len(next_game_2) >= 11:
g = next_game_2[10]
gg = g[:6 - 1] + y + g[6:]
date.append(gg)
else:
pass
if len(next_game_2) >= 12:
h = next_game_2[11]
hh = h[:6 - 1] + y + h[6:]
date.append(hh)
else:
pass
if len(next_game_2) >= 13:
j = next_game_2[12]
jj = j[:6 - 1] + y + j[6:]
date.append(jj)
else:
pass
if len(next_game_2) >= 14:
k = next_game_2[13]
kk = k[:6 - 1] + y + k[6:]
date.append(kk)
else:
pass
if len(next_game_2) >= 15:
l = next_game_2[14]
ll = l[:6 - 1] + y + l[6:]
date.append(ll)
else:
pass
if len(next_game_2) >= 16:
m = next_game_2[15]
mm = m[:6 - 1] + y + m[6:]
date.append(mm)
else:
pass
if len(next_game_2) >= 17:
n = next_game_2[16]
nn = n[:6 - 1] + y + n[6:]
date.append(nn)
else:
pass
if len(next_game_2) >= 18:
o = next_game_2[17]
oo = o[:6 - 1] + y + o[6:]
date.append(oo)
else:
pass
if len(next_game_2) >= 19:
p = next_game_2[18]
pp = p[:6 - 1] + y + p[6:]
date.append(pp)
else:
pass
if len(next_game_2) >= 20:
q = next_game_2[19]
qq = q[:6 - 1] + y + q[6:]
date.append(qq)
else:
pass
if len(next_game_2) >= 21:
r = next_game_2[20]
rr = r[:6 - 1] + y + r[6:]
date.append(rr)
else:
pass
if len(date) >= 1:
date_0 = dt.datetime.strptime(date[0], '%d.%m.%Y')
if len(date) >= 2:
date_1 = dt.datetime.strptime(date[1], '%d.%m.%Y')
if len(date) >= 3:
date_2 = dt.datetime.strptime(date[2], '%d.%m.%Y')
if len(date) >= 4:
date_3 = dt.datetime.strptime(date[3], '%d.%m.%Y')
if len(date) >= 5:
date_4 = dt.datetime.strptime(date[4], '%d.%m.%Y')
if len(date) >= 6:
date_5 = dt.datetime.strptime(date[5], '%d.%m.%Y')
if len(date) >= 7:
date_6 = dt.datetime.strptime(date[6], '%d.%m.%Y')
if len(date) >= 8:
date_7 = dt.datetime.strptime(date[7], '%d.%m.%Y')
if len(date) >= 9:
date_8 = dt.datetime.strptime(date[8], '%d.%m.%Y')
if len(date) >= 10:
date_9 = dt.datetime.strptime(date[9], '%d.%m.%Y')
if len(date) >= 11:
date_10 = dt.datetime.strptime(date[10], '%d.%m.%Y')
if len(date) >= 12:
date_11 = dt.datetime.strptime(date[11], '%d.%m.%Y')
if len(date) >= 13:
date_12 = dt.datetime.strptime(date[12], '%d.%m.%Y')
if len(date) >= 14:
date_13 = dt.datetime.strptime(date[13], '%d.%m.%Y')
if len(date) >= 15:
date_14 = dt.datetime.strptime(date[14], '%d.%m.%Y')
if len(date) >= 16:
date_15 = dt.datetime.strptime(date[15], '%d.%m.%Y')
if date_0 < date_1:
next_game_3 = cc
else:
if date_1 < date_2:
next_game_3 = dd
else:
if date_2 < date_3:
next_game_3 = ee
else:
if date_3 < date_4:
next_game_3 = ff
else:
if date_4 < date_5:
next_game_3 = gg
else:
if date_5 < date_6:
next_game_3 = hh
else:
if date_6 < date_7:
next_game_3 = jj
else:
if date_7 < date_8:
next_game_3 = kk
else:
if date_8 < date_9:
next_game_3 = ll
else:
if date_9 < date_10:
next_game_3 = mm
else:
if date_10 < date_11:
next_game_3 = nn
else:
if date_11 < date_12:
next_game_3 = oo
else:
if date_12 < date_13:
next_game_3 = pp
else:
if date_13 < date_14:
next_game_3 = qq
else:
if date_14 < date_15:
next_game_3 = rr
else:
pass
create_file()
adding_team()
draws(full_time)
odd(full_time)
even(full_time)
under_1_5(full_time)
over_2_5(full_time)
under_2_5(full_time)
both_score(full_time)
both_no_score(full_time)
draws_first_time(first_half_time)
no_goal_first_time(first_half_time)
Odd_Even(full_time)
Even_Odd(full_time)
draws_NOdraws(full_time)
NOdraws_draws(full_time)
under15_over15(full_time)
over15_under15(full_time)
under25_over25(full_time)
over25_under25(full_time)
both_noboth_score(full_time)
noboth_both_score(full_time)
draws_NOdraws_first_time(first_half_time)
NOdraws_draws_first_time(first_half_time)
goal_NOgoal_first_time(first_half_time)
NOgoal_goal_first_time(first_half_time)
url = 'https://nb-bet.com/Teams/2135-Deportivo-Moron-statistika-komandi'
team = "Депортиво Морон"
def adding_team():
b = "Deportivo-Moron"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
r = requests.get(url, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print(soup.title.text)
nbbet = soup.select(".a-dotted-hover")
one = ("...".join((str(i) for i in nbbet)))
two = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s[(-][0-9]{1}\s[:-]\s[0-9]{1}[\)-]', one)
three = (" ".join((str(i) for i in two)))
full_time = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s', three)
first_half_time = re.findall(r'[(][0-9]{1}\s[:]\s[0-9]{1}[)]', three)
next_game = soup.select(".first-td-content-results-auto")
next_game_1 = ("...".join((str(i) for i in next_game)))
next_game_1_1 = next_game_1.replace(' ', '')
next_game_2 = re.findall(r'\d{2}[.]\d{2}[.]\d{2}', next_game_1_1)
next_games = next_game_2[10:]
next_games_1 = len(next_games)
date = []
y = '.20'
if len(next_game_2) >= 6:
b = next_game_2[5]
bb = b[:6 - 1] + y + b[6:]
date.append(bb)
else:
pass
if len(next_game_2) >= 7:
c = next_game_2[6]
cc = c[:6 - 1] + y + c[6:]
date.append(cc)
else:
pass
if len(next_game_2) >= 8:
d = next_game_2[7]
dd = d[:6 - 1] + y + d[6:]
date.append(dd)
else:
pass
if len(next_game_2) >= 9:
e = next_game_2[8]
ee = e[:6 - 1] + y + e[6:]
date.append(ee)
else:
pass
if len(next_game_2) >= 10:
f = next_game_2[9]
ff = f[:6 - 1] + y + f[6:]
date.append(ff)
else:
pass
if len(next_game_2) >= 11:
g = next_game_2[10]
gg = g[:6 - 1] + y + g[6:]
date.append(gg)
else:
pass
if len(next_game_2) >= 12:
h = next_game_2[11]
hh = h[:6 - 1] + y + h[6:]
date.append(hh)
else:
pass
if len(next_game_2) >= 13:
j = next_game_2[12]
jj = j[:6 - 1] + y + j[6:]
date.append(jj)
else:
pass
if len(next_game_2) >= 14:
k = next_game_2[13]
kk = k[:6 - 1] + y + k[6:]
date.append(kk)
else:
pass
if len(next_game_2) >= 15:
l = next_game_2[14]
ll = l[:6 - 1] + y + l[6:]
date.append(ll)
else:
pass
if len(next_game_2) >= 16:
m = next_game_2[15]
mm = m[:6 - 1] + y + m[6:]
date.append(mm)
else:
pass
if len(next_game_2) >= 17:
n = next_game_2[16]
nn = n[:6 - 1] + y + n[6:]
date.append(nn)
else:
pass
if len(next_game_2) >= 18:
o = next_game_2[17]
oo = o[:6 - 1] + y + o[6:]
date.append(oo)
else:
pass
if len(next_game_2) >= 19:
p = next_game_2[18]
pp = p[:6 - 1] + y + p[6:]
date.append(pp)
else:
pass
if len(next_game_2) >= 20:
q = next_game_2[19]
qq = q[:6 - 1] + y + q[6:]
date.append(qq)
else:
pass
if len(next_game_2) >= 21:
r = next_game_2[20]
rr = r[:6 - 1] + y + r[6:]
date.append(rr)
else:
pass
if len(date) >= 1:
date_0 = dt.datetime.strptime(date[0], '%d.%m.%Y')
if len(date) >= 2:
date_1 = dt.datetime.strptime(date[1], '%d.%m.%Y')
if len(date) >= 3:
date_2 = dt.datetime.strptime(date[2], '%d.%m.%Y')
if len(date) >= 4:
date_3 = dt.datetime.strptime(date[3], '%d.%m.%Y')
if len(date) >= 5:
date_4 = dt.datetime.strptime(date[4], '%d.%m.%Y')
if len(date) >= 6:
date_5 = dt.datetime.strptime(date[5], '%d.%m.%Y')
if len(date) >= 7:
date_6 = dt.datetime.strptime(date[6], '%d.%m.%Y')
if len(date) >= 8:
date_7 = dt.datetime.strptime(date[7], '%d.%m.%Y')
if len(date) >= 9:
date_8 = dt.datetime.strptime(date[8], '%d.%m.%Y')
if len(date) >= 10:
date_9 = dt.datetime.strptime(date[9], '%d.%m.%Y')
if len(date) >= 11:
date_10 = dt.datetime.strptime(date[10], '%d.%m.%Y')
if len(date) >= 12:
date_11 = dt.datetime.strptime(date[11], '%d.%m.%Y')
if len(date) >= 13:
date_12 = dt.datetime.strptime(date[12], '%d.%m.%Y')
if len(date) >= 14:
date_13 = dt.datetime.strptime(date[13], '%d.%m.%Y')
if len(date) >= 15:
date_14 = dt.datetime.strptime(date[14], '%d.%m.%Y')
if len(date) >= 16:
date_15 = dt.datetime.strptime(date[15], '%d.%m.%Y')
if date_0 < date_1:
next_game_3 = cc
else:
if date_1 < date_2:
next_game_3 = dd
else:
if date_2 < date_3:
next_game_3 = ee
else:
if date_3 < date_4:
next_game_3 = ff
else:
if date_4 < date_5:
next_game_3 = gg
else:
if date_5 < date_6:
next_game_3 = hh
else:
if date_6 < date_7:
next_game_3 = jj
else:
if date_7 < date_8:
next_game_3 = kk
else:
if date_8 < date_9:
next_game_3 = ll
else:
if date_9 < date_10:
next_game_3 = mm
else:
if date_10 < date_11:
next_game_3 = nn
else:
if date_11 < date_12:
next_game_3 = oo
else:
if date_12 < date_13:
next_game_3 = pp
else:
if date_13 < date_14:
next_game_3 = qq
else:
if date_14 < date_15:
next_game_3 = rr
else:
pass
create_file()
adding_team()
draws(full_time)
odd(full_time)
even(full_time)
under_1_5(full_time)
over_2_5(full_time)
under_2_5(full_time)
both_score(full_time)
both_no_score(full_time)
draws_first_time(first_half_time)
no_goal_first_time(first_half_time)
Odd_Even(full_time)
Even_Odd(full_time)
draws_NOdraws(full_time)
NOdraws_draws(full_time)
under15_over15(full_time)
over15_under15(full_time)
under25_over25(full_time)
over25_under25(full_time)
both_noboth_score(full_time)
noboth_both_score(full_time)
draws_NOdraws_first_time(first_half_time)
NOdraws_draws_first_time(first_half_time)
goal_NOgoal_first_time(first_half_time)
NOgoal_goal_first_time(first_half_time)
url = 'https://nb-bet.com/Teams/4577-Deportivo-Maypu-statistika-komandi'
team = "Депортиво Майпу"
def adding_team():
b = "Deportivo-Maypu"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
r = requests.get(url, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print(soup.title.text)
nbbet = soup.select(".a-dotted-hover")
one = ("...".join((str(i) for i in nbbet)))
two = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s[(-][0-9]{1}\s[:-]\s[0-9]{1}[\)-]', one)
three = (" ".join((str(i) for i in two)))
full_time = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s', three)
first_half_time = re.findall(r'[(][0-9]{1}\s[:]\s[0-9]{1}[)]', three)
next_game = soup.select(".first-td-content-results-auto")
next_game_1 = ("...".join((str(i) for i in next_game)))
next_game_1_1 = next_game_1.replace(' ', '')
next_game_2 = re.findall(r'\d{2}[.]\d{2}[.]\d{2}', next_game_1_1)
next_games = next_game_2[10:]
next_games_1 = len(next_games)
date = []
y = '.20'
if len(next_game_2) >= 6:
b = next_game_2[5]
bb = b[:6 - 1] + y + b[6:]
date.append(bb)
else:
pass
if len(next_game_2) >= 7:
c = next_game_2[6]
cc = c[:6 - 1] + y + c[6:]
date.append(cc)
else:
pass
if len(next_game_2) >= 8:
d = next_game_2[7]
dd = d[:6 - 1] + y + d[6:]
date.append(dd)
else:
pass
if len(next_game_2) >= 9:
e = next_game_2[8]
ee = e[:6 - 1] + y + e[6:]
date.append(ee)
else:
pass
if len(next_game_2) >= 10:
f = next_game_2[9]
ff = f[:6 - 1] + y + f[6:]
date.append(ff)
else:
pass
if len(next_game_2) >= 11:
g = next_game_2[10]
gg = g[:6 - 1] + y + g[6:]
date.append(gg)
else:
pass
if len(next_game_2) >= 12:
h = next_game_2[11]
hh = h[:6 - 1] + y + h[6:]
date.append(hh)
else:
pass
if len(next_game_2) >= 13:
j = next_game_2[12]
jj = j[:6 - 1] + y + j[6:]
date.append(jj)
else:
pass
if len(next_game_2) >= 14:
k = next_game_2[13]
kk = k[:6 - 1] + y + k[6:]
date.append(kk)
else:
pass
if len(next_game_2) >= 15:
l = next_game_2[14]
ll = l[:6 - 1] + y + l[6:]
date.append(ll)
else:
pass
if len(next_game_2) >= 16:
m = next_game_2[15]
mm = m[:6 - 1] + y + m[6:]
date.append(mm)
else:
pass
if len(next_game_2) >= 17:
n = next_game_2[16]
nn = n[:6 - 1] + y + n[6:]
date.append(nn)
else:
pass
if len(next_game_2) >= 18:
o = next_game_2[17]
oo = o[:6 - 1] + y + o[6:]
date.append(oo)
else:
pass
if len(next_game_2) >= 19:
p = next_game_2[18]
pp = p[:6 - 1] + y + p[6:]
date.append(pp)
else:
pass
if len(next_game_2) >= 20:
q = next_game_2[19]
qq = q[:6 - 1] + y + q[6:]
date.append(qq)
else:
pass
if len(next_game_2) >= 21:
r = next_game_2[20]
rr = r[:6 - 1] + y + r[6:]
date.append(rr)
else:
pass
if len(date) >= 1:
date_0 = dt.datetime.strptime(date[0], '%d.%m.%Y')
if len(date) >= 2:
date_1 = dt.datetime.strptime(date[1], '%d.%m.%Y')
if len(date) >= 3:
date_2 = dt.datetime.strptime(date[2], '%d.%m.%Y')
if len(date) >= 4:
date_3 = dt.datetime.strptime(date[3], '%d.%m.%Y')
if len(date) >= 5:
date_4 = dt.datetime.strptime(date[4], '%d.%m.%Y')
if len(date) >= 6:
date_5 = dt.datetime.strptime(date[5], '%d.%m.%Y')
if len(date) >= 7:
date_6 = dt.datetime.strptime(date[6], '%d.%m.%Y')
if len(date) >= 8:
date_7 = dt.datetime.strptime(date[7], '%d.%m.%Y')
if len(date) >= 9:
date_8 = dt.datetime.strptime(date[8], '%d.%m.%Y')
if len(date) >= 10:
date_9 = dt.datetime.strptime(date[9], '%d.%m.%Y')
if len(date) >= 11:
date_10 = dt.datetime.strptime(date[10], '%d.%m.%Y')
if len(date) >= 12:
date_11 = dt.datetime.strptime(date[11], '%d.%m.%Y')
if len(date) >= 13:
date_12 = dt.datetime.strptime(date[12], '%d.%m.%Y')
if len(date) >= 14:
date_13 = dt.datetime.strptime(date[13], '%d.%m.%Y')
if len(date) >= 15:
date_14 = dt.datetime.strptime(date[14], '%d.%m.%Y')
if len(date) >= 16:
date_15 = dt.datetime.strptime(date[15], '%d.%m.%Y')
if date_0 < date_1:
next_game_3 = cc
else:
if date_1 < date_2:
next_game_3 = dd
else:
if date_2 < date_3:
next_game_3 = ee
else:
if date_3 < date_4:
next_game_3 = ff
else:
if date_4 < date_5:
next_game_3 = gg
else:
if date_5 < date_6:
next_game_3 = hh
else:
if date_6 < date_7:
next_game_3 = jj
else:
if date_7 < date_8:
next_game_3 = kk
else:
if date_8 < date_9:
next_game_3 = ll
else:
if date_9 < date_10:
next_game_3 = mm
else:
if date_10 < date_11:
next_game_3 = nn
else:
if date_11 < date_12:
next_game_3 = oo
else:
if date_12 < date_13:
next_game_3 = pp
else:
if date_13 < date_14:
next_game_3 = qq
else:
if date_14 < date_15:
next_game_3 = rr
else:
pass
create_file()
adding_team()
draws(full_time)
odd(full_time)
even(full_time)
under_1_5(full_time)
over_2_5(full_time)
under_2_5(full_time)
both_score(full_time)
both_no_score(full_time)
draws_first_time(first_half_time)
no_goal_first_time(first_half_time)
Odd_Even(full_time)
Even_Odd(full_time)
draws_NOdraws(full_time)
NOdraws_draws(full_time)
under15_over15(full_time)
over15_under15(full_time)
under25_over25(full_time)
over25_under25(full_time)
both_noboth_score(full_time)
noboth_both_score(full_time)
draws_NOdraws_first_time(first_half_time)
NOdraws_draws_first_time(first_half_time)
goal_NOgoal_first_time(first_half_time)
NOgoal_goal_first_time(first_half_time)
url = 'https://nb-bet.com/Teams/1549-Instituto-statistika-komandi'
team = "Институто"
def adding_team():
b = "Instituto"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
r = requests.get(url, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print(soup.title.text)
nbbet = soup.select(".a-dotted-hover")
one = ("...".join((str(i) for i in nbbet)))
two = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s[(-][0-9]{1}\s[:-]\s[0-9]{1}[\)-]', one)
three = (" ".join((str(i) for i in two)))
full_time = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s', three)
first_half_time = re.findall(r'[(][0-9]{1}\s[:]\s[0-9]{1}[)]', three)
next_game = soup.select(".first-td-content-results-auto")
next_game_1 = ("...".join((str(i) for i in next_game)))
next_game_1_1 = next_game_1.replace(' ', '')
next_game_2 = re.findall(r'\d{2}[.]\d{2}[.]\d{2}', next_game_1_1)
next_games = next_game_2[10:]
next_games_1 = len(next_games)
date = []
y = '.20'
if len(next_game_2) >= 6:
b = next_game_2[5]
bb = b[:6 - 1] + y + b[6:]
date.append(bb)
else:
pass
if len(next_game_2) >= 7:
c = next_game_2[6]
cc = c[:6 - 1] + y + c[6:]
date.append(cc)
else:
pass
if len(next_game_2) >= 8:
d = next_game_2[7]
dd = d[:6 - 1] + y + d[6:]
date.append(dd)
else:
pass
if len(next_game_2) >= 9:
e = next_game_2[8]
ee = e[:6 - 1] + y + e[6:]
date.append(ee)
else:
pass
if len(next_game_2) >= 10:
f = next_game_2[9]
ff = f[:6 - 1] + y + f[6:]
date.append(ff)
else:
pass
if len(next_game_2) >= 11:
g = next_game_2[10]
gg = g[:6 - 1] + y + g[6:]
date.append(gg)
else:
pass
if len(next_game_2) >= 12:
h = next_game_2[11]
hh = h[:6 - 1] + y + h[6:]
date.append(hh)
else:
pass
if len(next_game_2) >= 13:
j = next_game_2[12]
jj = j[:6 - 1] + y + j[6:]
date.append(jj)
else:
pass
if len(next_game_2) >= 14:
k = next_game_2[13]
kk = k[:6 - 1] + y + k[6:]
date.append(kk)
else:
pass
if len(next_game_2) >= 15:
l = next_game_2[14]
ll = l[:6 - 1] + y + l[6:]
date.append(ll)
else:
pass
if len(next_game_2) >= 16:
m = next_game_2[15]
mm = m[:6 - 1] + y + m[6:]
date.append(mm)
else:
pass
if len(next_game_2) >= 17:
n = next_game_2[16]
nn = n[:6 - 1] + y + n[6:]
date.append(nn)
else:
pass
if len(next_game_2) >= 18:
o = next_game_2[17]
oo = o[:6 - 1] + y + o[6:]
date.append(oo)
else:
pass
if len(next_game_2) >= 19:
p = next_game_2[18]
pp = p[:6 - 1] + y + p[6:]
date.append(pp)
else:
pass
if len(next_game_2) >= 20:
q = next_game_2[19]
qq = q[:6 - 1] + y + q[6:]
date.append(qq)
else:
pass
if len(next_game_2) >= 21:
r = next_game_2[20]
rr = r[:6 - 1] + y + r[6:]
date.append(rr)
else:
pass
if len(date) >= 1:
date_0 = dt.datetime.strptime(date[0], '%d.%m.%Y')
if len(date) >= 2:
date_1 = dt.datetime.strptime(date[1], '%d.%m.%Y')
if len(date) >= 3:
date_2 = dt.datetime.strptime(date[2], '%d.%m.%Y')
if len(date) >= 4:
date_3 = dt.datetime.strptime(date[3], '%d.%m.%Y')
if len(date) >= 5:
date_4 = dt.datetime.strptime(date[4], '%d.%m.%Y')
if len(date) >= 6:
date_5 = dt.datetime.strptime(date[5], '%d.%m.%Y')
if len(date) >= 7:
date_6 = dt.datetime.strptime(date[6], '%d.%m.%Y')
if len(date) >= 8:
date_7 = dt.datetime.strptime(date[7], '%d.%m.%Y')
if len(date) >= 9:
date_8 = dt.datetime.strptime(date[8], '%d.%m.%Y')
if len(date) >= 10:
date_9 = dt.datetime.strptime(date[9], '%d.%m.%Y')
if len(date) >= 11:
date_10 = dt.datetime.strptime(date[10], '%d.%m.%Y')
if len(date) >= 12:
date_11 = dt.datetime.strptime(date[11], '%d.%m.%Y')
if len(date) >= 13:
date_12 = dt.datetime.strptime(date[12], '%d.%m.%Y')
if len(date) >= 14:
date_13 = dt.datetime.strptime(date[13], '%d.%m.%Y')
if len(date) >= 15:
date_14 = dt.datetime.strptime(date[14], '%d.%m.%Y')
if len(date) >= 16:
date_15 = dt.datetime.strptime(date[15], '%d.%m.%Y')
if date_0 < date_1:
next_game_3 = cc
else:
if date_1 < date_2:
next_game_3 = dd
else:
if date_2 < date_3:
next_game_3 = ee
else:
if date_3 < date_4:
next_game_3 = ff
else:
if date_4 < date_5:
next_game_3 = gg
else:
if date_5 < date_6:
next_game_3 = hh
else:
if date_6 < date_7:
next_game_3 = jj
else:
if date_7 < date_8:
next_game_3 = kk
else:
if date_8 < date_9:
next_game_3 = ll
else:
if date_9 < date_10:
next_game_3 = mm
else:
if date_10 < date_11:
next_game_3 = nn
else:
if date_11 < date_12:
next_game_3 = oo
else:
if date_12 < date_13:
next_game_3 = pp
else:
if date_13 < date_14:
next_game_3 = qq
else:
if date_14 < date_15:
next_game_3 = rr
else:
pass
create_file()
adding_team()
draws(full_time)
odd(full_time)
even(full_time)
under_1_5(full_time)
over_2_5(full_time)
under_2_5(full_time)
both_score(full_time)
both_no_score(full_time)
draws_first_time(first_half_time)
no_goal_first_time(first_half_time)
Odd_Even(full_time)
Even_Odd(full_time)
draws_NOdraws(full_time)
NOdraws_draws(full_time)
under15_over15(full_time)
over15_under15(full_time)
under25_over25(full_time)
over25_under25(full_time)
both_noboth_score(full_time)
noboth_both_score(full_time)
draws_NOdraws_first_time(first_half_time)
NOdraws_draws_first_time(first_half_time)
goal_NOgoal_first_time(first_half_time)
NOgoal_goal_first_time(first_half_time)
url = 'https://nb-bet.com/Teams/1551-Almagro-statistika-komandi'
team = "Альмагро"
def adding_team():
b = "Almagro"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
r = requests.get(url, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print(soup.title.text)
nbbet = soup.select(".a-dotted-hover")
one = ("...".join((str(i) for i in nbbet)))
two = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s[(-][0-9]{1}\s[:-]\s[0-9]{1}[\)-]', one)
three = (" ".join((str(i) for i in two)))
full_time = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s', three)
first_half_time = re.findall(r'[(][0-9]{1}\s[:]\s[0-9]{1}[)]', three)
next_game = soup.select(".first-td-content-results-auto")
next_game_1 = ("...".join((str(i) for i in next_game)))
next_game_1_1 = next_game_1.replace(' ', '')
next_game_2 = re.findall(r'\d{2}[.]\d{2}[.]\d{2}', next_game_1_1)
next_games = next_game_2[10:]
next_games_1 = len(next_games)
date = []
y = '.20'
if len(next_game_2) >= 6:
b = next_game_2[5]
bb = b[:6 - 1] + y + b[6:]
date.append(bb)
else:
pass
if len(next_game_2) >= 7:
c = next_game_2[6]
cc = c[:6 - 1] + y + c[6:]
date.append(cc)
else:
pass
if len(next_game_2) >= 8:
d = next_game_2[7]
dd = d[:6 - 1] + y + d[6:]
date.append(dd)
else:
pass
if len(next_game_2) >= 9:
e = next_game_2[8]
ee = e[:6 - 1] + y + e[6:]
date.append(ee)
else:
pass
if len(next_game_2) >= 10:
f = next_game_2[9]
ff = f[:6 - 1] + y + f[6:]
date.append(ff)
else:
pass
if len(next_game_2) >= 11:
g = next_game_2[10]
gg = g[:6 - 1] + y + g[6:]
date.append(gg)
else:
pass
if len(next_game_2) >= 12:
h = next_game_2[11]
hh = h[:6 - 1] + y + h[6:]
date.append(hh)
else:
pass
if len(next_game_2) >= 13:
j = next_game_2[12]
jj = j[:6 - 1] + y + j[6:]
date.append(jj)
else:
pass
if len(next_game_2) >= 14:
k = next_game_2[13]
kk = k[:6 - 1] + y + k[6:]
date.append(kk)
else:
pass
if len(next_game_2) >= 15:
l = next_game_2[14]
ll = l[:6 - 1] + y + l[6:]
date.append(ll)
else:
pass
if len(next_game_2) >= 16:
m = next_game_2[15]
mm = m[:6 - 1] + y + m[6:]
date.append(mm)
else:
pass
if len(next_game_2) >= 17:
n = next_game_2[16]
nn = n[:6 - 1] + y + n[6:]
date.append(nn)
else:
pass
if len(next_game_2) >= 18:
o = next_game_2[17]
oo = o[:6 - 1] + y + o[6:]
date.append(oo)
else:
pass
if len(next_game_2) >= 19:
p = next_game_2[18]
pp = p[:6 - 1] + y + p[6:]
date.append(pp)
else:
pass
if len(next_game_2) >= 20:
q = next_game_2[19]
qq = q[:6 - 1] + y + q[6:]
date.append(qq)
else:
pass
if len(next_game_2) >= 21:
r = next_game_2[20]
rr = r[:6 - 1] + y + r[6:]
date.append(rr)
else:
pass
if len(date) >= 1:
date_0 = dt.datetime.strptime(date[0], '%d.%m.%Y')
if len(date) >= 2:
date_1 = dt.datetime.strptime(date[1], '%d.%m.%Y')
if len(date) >= 3:
date_2 = dt.datetime.strptime(date[2], '%d.%m.%Y')
if len(date) >= 4:
date_3 = dt.datetime.strptime(date[3], '%d.%m.%Y')
if len(date) >= 5:
date_4 = dt.datetime.strptime(date[4], '%d.%m.%Y')
if len(date) >= 6:
date_5 = dt.datetime.strptime(date[5], '%d.%m.%Y')
if len(date) >= 7:
date_6 = dt.datetime.strptime(date[6], '%d.%m.%Y')
if len(date) >= 8:
date_7 = dt.datetime.strptime(date[7], '%d.%m.%Y')
if len(date) >= 9:
date_8 = dt.datetime.strptime(date[8], '%d.%m.%Y')
if len(date) >= 10:
date_9 = dt.datetime.strptime(date[9], '%d.%m.%Y')
if len(date) >= 11:
date_10 = dt.datetime.strptime(date[10], '%d.%m.%Y')
if len(date) >= 12:
date_11 = dt.datetime.strptime(date[11], '%d.%m.%Y')
if len(date) >= 13:
date_12 = dt.datetime.strptime(date[12], '%d.%m.%Y')
if len(date) >= 14:
date_13 = dt.datetime.strptime(date[13], '%d.%m.%Y')
if len(date) >= 15:
date_14 = dt.datetime.strptime(date[14], '%d.%m.%Y')
if len(date) >= 16:
date_15 = dt.datetime.strptime(date[15], '%d.%m.%Y')
if date_0 < date_1:
next_game_3 = cc
else:
if date_1 < date_2:
next_game_3 = dd
else:
if date_2 < date_3:
next_game_3 = ee
else:
if date_3 < date_4:
next_game_3 = ff
else:
if date_4 < date_5:
next_game_3 = gg
else:
if date_5 < date_6:
next_game_3 = hh
else:
if date_6 < date_7:
next_game_3 = jj
else:
if date_7 < date_8:
next_game_3 = kk
else:
if date_8 < date_9:
next_game_3 = ll
else:
if date_9 < date_10:
next_game_3 = mm
else:
if date_10 < date_11:
next_game_3 = nn
else:
if date_11 < date_12:
next_game_3 = oo
else:
if date_12 < date_13:
next_game_3 = pp
else:
if date_13 < date_14:
next_game_3 = qq
else:
if date_14 < date_15:
next_game_3 = rr
else:
pass
create_file()
adding_team()
draws(full_time)
odd(full_time)
even(full_time)
under_1_5(full_time)
over_2_5(full_time)
under_2_5(full_time)
both_score(full_time)
both_no_score(full_time)
draws_first_time(first_half_time)
no_goal_first_time(first_half_time)
Odd_Even(full_time)
Even_Odd(full_time)
draws_NOdraws(full_time)
NOdraws_draws(full_time)
under15_over15(full_time)
over15_under15(full_time)
under25_over25(full_time)
over25_under25(full_time)
both_noboth_score(full_time)
noboth_both_score(full_time)
draws_NOdraws_first_time(first_half_time)
NOdraws_draws_first_time(first_half_time)
goal_NOgoal_first_time(first_half_time)
NOgoal_goal_first_time(first_half_time)
url = 'https://nb-bet.com/Teams/2139-Estudiantes-Kaseros-statistika-komandi'
team = "Эстудиантес Касерос"
def adding_team():
b = "Estudiantes-Kaseros"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
r = requests.get(url, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print(soup.title.text)
nbbet = soup.select(".a-dotted-hover")
one = ("...".join((str(i) for i in nbbet)))
two = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s[(-][0-9]{1}\s[:-]\s[0-9]{1}[\)-]', one)
three = (" ".join((str(i) for i in two)))
full_time = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s', three)
first_half_time = re.findall(r'[(][0-9]{1}\s[:]\s[0-9]{1}[)]', three)
next_game = soup.select(".first-td-content-results-auto")
next_game_1 = ("...".join((str(i) for i in next_game)))
next_game_1_1 = next_game_1.replace(' ', '')
next_game_2 = re.findall(r'\d{2}[.]\d{2}[.]\d{2}', next_game_1_1)
next_games = next_game_2[10:]
next_games_1 = len(next_games)
date = []
y = '.20'
if len(next_game_2) >= 6:
b = next_game_2[5]
bb = b[:6 - 1] + y + b[6:]
date.append(bb)
else:
pass
if len(next_game_2) >= 7:
c = next_game_2[6]
cc = c[:6 - 1] + y + c[6:]
date.append(cc)
else:
pass
if len(next_game_2) >= 8:
d = next_game_2[7]
dd = d[:6 - 1] + y + d[6:]
date.append(dd)
else:
pass
if len(next_game_2) >= 9:
e = next_game_2[8]
ee = e[:6 - 1] + y + e[6:]
date.append(ee)
else:
pass
if len(next_game_2) >= 10:
f = next_game_2[9]
ff = f[:6 - 1] + y + f[6:]
date.append(ff)
else:
pass
if len(next_game_2) >= 11:
g = next_game_2[10]
gg = g[:6 - 1] + y + g[6:]
date.append(gg)
else:
pass
if len(next_game_2) >= 12:
h = next_game_2[11]
hh = h[:6 - 1] + y + h[6:]
date.append(hh)
else:
pass
if len(next_game_2) >= 13:
j = next_game_2[12]
jj = j[:6 - 1] + y + j[6:]
date.append(jj)
else:
pass
if len(next_game_2) >= 14:
k = next_game_2[13]
kk = k[:6 - 1] + y + k[6:]
date.append(kk)
else:
pass
if len(next_game_2) >= 15:
l = next_game_2[14]
ll = l[:6 - 1] + y + l[6:]
date.append(ll)
else:
pass
if len(next_game_2) >= 16:
m = next_game_2[15]
mm = m[:6 - 1] + y + m[6:]
date.append(mm)
else:
pass
if len(next_game_2) >= 17:
n = next_game_2[16]
nn = n[:6 - 1] + y + n[6:]
date.append(nn)
else:
pass
if len(next_game_2) >= 18:
o = next_game_2[17]
oo = o[:6 - 1] + y + o[6:]
date.append(oo)
else:
pass
if len(next_game_2) >= 19:
p = next_game_2[18]
pp = p[:6 - 1] + y + p[6:]
date.append(pp)
else:
pass
if len(next_game_2) >= 20:
q = next_game_2[19]
qq = q[:6 - 1] + y + q[6:]
date.append(qq)
else:
pass
if len(next_game_2) >= 21:
r = next_game_2[20]
rr = r[:6 - 1] + y + r[6:]
date.append(rr)
else:
pass
if len(date) >= 1:
date_0 = dt.datetime.strptime(date[0], '%d.%m.%Y')
if len(date) >= 2:
date_1 = dt.datetime.strptime(date[1], '%d.%m.%Y')
if len(date) >= 3:
date_2 = dt.datetime.strptime(date[2], '%d.%m.%Y')
if len(date) >= 4:
date_3 = dt.datetime.strptime(date[3], '%d.%m.%Y')
if len(date) >= 5:
date_4 = dt.datetime.strptime(date[4], '%d.%m.%Y')
if len(date) >= 6:
date_5 = dt.datetime.strptime(date[5], '%d.%m.%Y')
if len(date) >= 7:
date_6 = dt.datetime.strptime(date[6], '%d.%m.%Y')
if len(date) >= 8:
date_7 = dt.datetime.strptime(date[7], '%d.%m.%Y')
if len(date) >= 9:
date_8 = dt.datetime.strptime(date[8], '%d.%m.%Y')
if len(date) >= 10:
date_9 = dt.datetime.strptime(date[9], '%d.%m.%Y')
if len(date) >= 11:
date_10 = dt.datetime.strptime(date[10], '%d.%m.%Y')
if len(date) >= 12:
date_11 = dt.datetime.strptime(date[11], '%d.%m.%Y')
if len(date) >= 13:
date_12 = dt.datetime.strptime(date[12], '%d.%m.%Y')
if len(date) >= 14:
date_13 = dt.datetime.strptime(date[13], '%d.%m.%Y')
if len(date) >= 15:
date_14 = dt.datetime.strptime(date[14], '%d.%m.%Y')
if len(date) >= 16:
date_15 = dt.datetime.strptime(date[15], '%d.%m.%Y')
if date_0 < date_1:
next_game_3 = cc
else:
if date_1 < date_2:
next_game_3 = dd
else:
if date_2 < date_3:
next_game_3 = ee
else:
if date_3 < date_4:
next_game_3 = ff
else:
if date_4 < date_5:
next_game_3 = gg
else:
if date_5 < date_6:
next_game_3 = hh
else:
if date_6 < date_7:
next_game_3 = jj
else:
if date_7 < date_8:
next_game_3 = kk
else:
if date_8 < date_9:
next_game_3 = ll
else:
if date_9 < date_10:
next_game_3 = mm
else:
if date_10 < date_11:
next_game_3 = nn
else:
if date_11 < date_12:
next_game_3 = oo
else:
if date_12 < date_13:
next_game_3 = pp
else:
if date_13 < date_14:
next_game_3 = qq
else:
if date_14 < date_15:
next_game_3 = rr
else:
pass
create_file()
adding_team()
draws(full_time)
odd(full_time)
even(full_time)
under_1_5(full_time)
over_2_5(full_time)
under_2_5(full_time)
both_score(full_time)
both_no_score(full_time)
draws_first_time(first_half_time)
no_goal_first_time(first_half_time)
Odd_Even(full_time)
Even_Odd(full_time)
draws_NOdraws(full_time)
NOdraws_draws(full_time)
under15_over15(full_time)
over15_under15(full_time)
under25_over25(full_time)
over25_under25(full_time)
both_noboth_score(full_time)
noboth_both_score(full_time)
draws_NOdraws_first_time(first_half_time)
NOdraws_draws_first_time(first_half_time)
goal_NOgoal_first_time(first_half_time)
NOgoal_goal_first_time(first_half_time)
url = 'https://nb-bet.com/Teams/573-Temperley-statistika-komandi'
team = "Темперлей"
def adding_team():
b = "Temperley"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
r = requests.get(url, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print(soup.title.text)
nbbet = soup.select(".a-dotted-hover")
one = ("...".join((str(i) for i in nbbet)))
two = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s[(-][0-9]{1}\s[:-]\s[0-9]{1}[\)-]', one)
three = (" ".join((str(i) for i in two)))
full_time = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s', three)
first_half_time = re.findall(r'[(][0-9]{1}\s[:]\s[0-9]{1}[)]', three)
next_game = soup.select(".first-td-content-results-auto")
next_game_1 = ("...".join((str(i) for i in next_game)))
next_game_1_1 = next_game_1.replace(' ', '')
next_game_2 = re.findall(r'\d{2}[.]\d{2}[.]\d{2}', next_game_1_1)
next_games = next_game_2[10:]
next_games_1 = len(next_games)
date = []
y = '.20'
if len(next_game_2) >= 6:
b = next_game_2[5]
bb = b[:6 - 1] + y + b[6:]
date.append(bb)
else:
pass
if len(next_game_2) >= 7:
c = next_game_2[6]
cc = c[:6 - 1] + y + c[6:]
date.append(cc)
else:
pass
if len(next_game_2) >= 8:
d = next_game_2[7]
dd = d[:6 - 1] + y + d[6:]
date.append(dd)
else:
pass
if len(next_game_2) >= 9:
e = next_game_2[8]
ee = e[:6 - 1] + y + e[6:]
date.append(ee)
else:
pass
if len(next_game_2) >= 10:
f = next_game_2[9]
ff = f[:6 - 1] + y + f[6:]
date.append(ff)
else:
pass
if len(next_game_2) >= 11:
g = next_game_2[10]
gg = g[:6 - 1] + y + g[6:]
date.append(gg)
else:
pass
if len(next_game_2) >= 12:
h = next_game_2[11]
hh = h[:6 - 1] + y + h[6:]
date.append(hh)
else:
pass
if len(next_game_2) >= 13:
j = next_game_2[12]
jj = j[:6 - 1] + y + j[6:]
date.append(jj)
else:
pass
if len(next_game_2) >= 14:
k = next_game_2[13]
kk = k[:6 - 1] + y + k[6:]
date.append(kk)
else:
pass
if len(next_game_2) >= 15:
l = next_game_2[14]
ll = l[:6 - 1] + y + l[6:]
date.append(ll)
else:
pass
if len(next_game_2) >= 16:
m = next_game_2[15]
mm = m[:6 - 1] + y + m[6:]
date.append(mm)
else:
pass
if len(next_game_2) >= 17:
n = next_game_2[16]
nn = n[:6 - 1] + y + n[6:]
date.append(nn)
else:
pass
if len(next_game_2) >= 18:
o = next_game_2[17]
oo = o[:6 - 1] + y + o[6:]
date.append(oo)
else:
pass
if len(next_game_2) >= 19:
p = next_game_2[18]
pp = p[:6 - 1] + y + p[6:]
date.append(pp)
else:
pass
if len(next_game_2) >= 20:
q = next_game_2[19]
qq = q[:6 - 1] + y + q[6:]
date.append(qq)
else:
pass
if len(next_game_2) >= 21:
r = next_game_2[20]
rr = r[:6 - 1] + y + r[6:]
date.append(rr)
else:
pass
if len(date) >= 1:
date_0 = dt.datetime.strptime(date[0], '%d.%m.%Y')
if len(date) >= 2:
date_1 = dt.datetime.strptime(date[1], '%d.%m.%Y')
if len(date) >= 3:
date_2 = dt.datetime.strptime(date[2], '%d.%m.%Y')
if len(date) >= 4:
date_3 = dt.datetime.strptime(date[3], '%d.%m.%Y')
if len(date) >= 5:
date_4 = dt.datetime.strptime(date[4], '%d.%m.%Y')
if len(date) >= 6:
date_5 = dt.datetime.strptime(date[5], '%d.%m.%Y')
if len(date) >= 7:
date_6 = dt.datetime.strptime(date[6], '%d.%m.%Y')
if len(date) >= 8:
date_7 = dt.datetime.strptime(date[7], '%d.%m.%Y')
if len(date) >= 9:
date_8 = dt.datetime.strptime(date[8], '%d.%m.%Y')
if len(date) >= 10:
date_9 = dt.datetime.strptime(date[9], '%d.%m.%Y')
if len(date) >= 11:
date_10 = dt.datetime.strptime(date[10], '%d.%m.%Y')
if len(date) >= 12:
date_11 = dt.datetime.strptime(date[11], '%d.%m.%Y')
if len(date) >= 13:
date_12 = dt.datetime.strptime(date[12], '%d.%m.%Y')
if len(date) >= 14:
date_13 = dt.datetime.strptime(date[13], '%d.%m.%Y')
if len(date) >= 15:
date_14 = dt.datetime.strptime(date[14], '%d.%m.%Y')
if len(date) >= 16:
date_15 = dt.datetime.strptime(date[15], '%d.%m.%Y')
if date_0 < date_1:
next_game_3 = cc
else:
if date_1 < date_2:
next_game_3 = dd
else:
if date_2 < date_3:
next_game_3 = ee
else:
if date_3 < date_4:
next_game_3 = ff
else:
if date_4 < date_5:
next_game_3 = gg
else:
if date_5 < date_6:
next_game_3 = hh
else:
if date_6 < date_7:
next_game_3 = jj
else:
if date_7 < date_8:
next_game_3 = kk
else:
if date_8 < date_9:
next_game_3 = ll
else:
if date_9 < date_10:
next_game_3 = mm
else:
if date_10 < date_11:
next_game_3 = nn
else:
if date_11 < date_12:
next_game_3 = oo
else:
if date_12 < date_13:
next_game_3 = pp
else:
if date_13 < date_14:
next_game_3 = qq
else:
if date_14 < date_15:
next_game_3 = rr
else:
pass
create_file()
adding_team()
draws(full_time)
odd(full_time)
even(full_time)
under_1_5(full_time)
over_2_5(full_time)
under_2_5(full_time)
both_score(full_time)
both_no_score(full_time)
draws_first_time(first_half_time)
no_goal_first_time(first_half_time)
Odd_Even(full_time)
Even_Odd(full_time)
draws_NOdraws(full_time)
NOdraws_draws(full_time)
under15_over15(full_time)
over15_under15(full_time)
under25_over25(full_time)
over25_under25(full_time)
both_noboth_score(full_time)
noboth_both_score(full_time)
draws_NOdraws_first_time(first_half_time)
NOdraws_draws_first_time(first_half_time)
goal_NOgoal_first_time(first_half_time)
NOgoal_goal_first_time(first_half_time)
url = 'https://nb-bet.com/Teams/1546-Gilermo-Braun-statistika-komandi'
team = "Гильермо Браун"
def adding_team():
b = "Gilermo-Braun"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
r = requests.get(url, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print(soup.title.text)
nbbet = soup.select(".a-dotted-hover")
one = ("...".join((str(i) for i in nbbet)))
two = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s[(-][0-9]{1}\s[:-]\s[0-9]{1}[\)-]', one)
three = (" ".join((str(i) for i in two)))
full_time = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s', three)
first_half_time = re.findall(r'[(][0-9]{1}\s[:]\s[0-9]{1}[)]', three)
next_game = soup.select(".first-td-content-results-auto")
next_game_1 = ("...".join((str(i) for i in next_game)))
next_game_1_1 = next_game_1.replace(' ', '')
next_game_2 = re.findall(r'\d{2}[.]\d{2}[.]\d{2}', next_game_1_1)
next_games = next_game_2[10:]
next_games_1 = len(next_games)
date = []
y = '.20'
if len(next_game_2) >= 6:
b = next_game_2[5]
bb = b[:6 - 1] + y + b[6:]
date.append(bb)
else:
pass
if len(next_game_2) >= 7:
c = next_game_2[6]
cc = c[:6 - 1] + y + c[6:]
date.append(cc)
else:
pass
if len(next_game_2) >= 8:
d = next_game_2[7]
dd = d[:6 - 1] + y + d[6:]
date.append(dd)
else:
pass
if len(next_game_2) >= 9:
e = next_game_2[8]
ee = e[:6 - 1] + y + e[6:]
date.append(ee)
else:
pass
if len(next_game_2) >= 10:
f = next_game_2[9]
ff = f[:6 - 1] + y + f[6:]
date.append(ff)
else:
pass
if len(next_game_2) >= 11:
g = next_game_2[10]
gg = g[:6 - 1] + y + g[6:]
date.append(gg)
else:
pass
if len(next_game_2) >= 12:
h = next_game_2[11]
hh = h[:6 - 1] + y + h[6:]
date.append(hh)
else:
pass
if len(next_game_2) >= 13:
j = next_game_2[12]
jj = j[:6 - 1] + y + j[6:]
date.append(jj)
else:
pass
if len(next_game_2) >= 14:
k = next_game_2[13]
kk = k[:6 - 1] + y + k[6:]
date.append(kk)
else:
pass
if len(next_game_2) >= 15:
l = next_game_2[14]
ll = l[:6 - 1] + y + l[6:]
date.append(ll)
else:
pass
if len(next_game_2) >= 16:
m = next_game_2[15]
mm = m[:6 - 1] + y + m[6:]
date.append(mm)
else:
pass
if len(next_game_2) >= 17:
n = next_game_2[16]
nn = n[:6 - 1] + y + n[6:]
date.append(nn)
else:
pass
if len(next_game_2) >= 18:
o = next_game_2[17]
oo = o[:6 - 1] + y + o[6:]
date.append(oo)
else:
pass
if len(next_game_2) >= 19:
p = next_game_2[18]
pp = p[:6 - 1] + y + p[6:]
date.append(pp)
else:
pass
if len(next_game_2) >= 20:
q = next_game_2[19]
qq = q[:6 - 1] + y + q[6:]
date.append(qq)
else:
pass
if len(next_game_2) >= 21:
r = next_game_2[20]
rr = r[:6 - 1] + y + r[6:]
date.append(rr)
else:
pass
if len(date) >= 1:
date_0 = dt.datetime.strptime(date[0], '%d.%m.%Y')
if len(date) >= 2:
date_1 = dt.datetime.strptime(date[1], '%d.%m.%Y')
if len(date) >= 3:
date_2 = dt.datetime.strptime(date[2], '%d.%m.%Y')
if len(date) >= 4:
date_3 = dt.datetime.strptime(date[3], '%d.%m.%Y')
if len(date) >= 5:
date_4 = dt.datetime.strptime(date[4], '%d.%m.%Y')
if len(date) >= 6:
date_5 = dt.datetime.strptime(date[5], '%d.%m.%Y')
if len(date) >= 7:
date_6 = dt.datetime.strptime(date[6], '%d.%m.%Y')
if len(date) >= 8:
date_7 = dt.datetime.strptime(date[7], '%d.%m.%Y')
if len(date) >= 9:
date_8 = dt.datetime.strptime(date[8], '%d.%m.%Y')
if len(date) >= 10:
date_9 = dt.datetime.strptime(date[9], '%d.%m.%Y')
if len(date) >= 11:
date_10 = dt.datetime.strptime(date[10], '%d.%m.%Y')
if len(date) >= 12:
date_11 = dt.datetime.strptime(date[11], '%d.%m.%Y')
if len(date) >= 13:
date_12 = dt.datetime.strptime(date[12], '%d.%m.%Y')
if len(date) >= 14:
date_13 = dt.datetime.strptime(date[13], '%d.%m.%Y')
if len(date) >= 15:
date_14 = dt.datetime.strptime(date[14], '%d.%m.%Y')
if len(date) >= 16:
date_15 = dt.datetime.strptime(date[15], '%d.%m.%Y')
if date_0 < date_1:
next_game_3 = cc
else:
if date_1 < date_2:
next_game_3 = dd
else:
if date_2 < date_3:
next_game_3 = ee
else:
if date_3 < date_4:
next_game_3 = ff
else:
if date_4 < date_5:
next_game_3 = gg
else:
if date_5 < date_6:
next_game_3 = hh
else:
if date_6 < date_7:
next_game_3 = jj
else:
if date_7 < date_8:
next_game_3 = kk
else:
if date_8 < date_9:
next_game_3 = ll
else:
if date_9 < date_10:
next_game_3 = mm
else:
if date_10 < date_11:
next_game_3 = nn
else:
if date_11 < date_12:
next_game_3 = oo
else:
if date_12 < date_13:
next_game_3 = pp
else:
if date_13 < date_14:
next_game_3 = qq
else:
if date_14 < date_15:
next_game_3 = rr
else:
pass
create_file()
adding_team()
draws(full_time)
odd(full_time)
even(full_time)
under_1_5(full_time)
over_2_5(full_time)
under_2_5(full_time)
both_score(full_time)
both_no_score(full_time)
draws_first_time(first_half_time)
no_goal_first_time(first_half_time)
Odd_Even(full_time)
Even_Odd(full_time)
draws_NOdraws(full_time)
NOdraws_draws(full_time)
under15_over15(full_time)
over15_under15(full_time)
under25_over25(full_time)
over25_under25(full_time)
both_noboth_score(full_time)
noboth_both_score(full_time)
draws_NOdraws_first_time(first_half_time)
NOdraws_draws_first_time(first_half_time)
goal_NOgoal_first_time(first_half_time)
NOgoal_goal_first_time(first_half_time)
url = 'https://nb-bet.com/Teams/2147-Tristan-Suares-statistika-komandi'
team = "Тристан Суарес"
def adding_team():
b = "Tristan-Suares"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
r = requests.get(url, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print(soup.title.text)
nbbet = soup.select(".a-dotted-hover")
one = ("...".join((str(i) for i in nbbet)))
two = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s[(-][0-9]{1}\s[:-]\s[0-9]{1}[\)-]', one)
three = (" ".join((str(i) for i in two)))
full_time = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s', three)
first_half_time = re.findall(r'[(][0-9]{1}\s[:]\s[0-9]{1}[)]', three)
next_game = soup.select(".first-td-content-results-auto")
next_game_1 = ("...".join((str(i) for i in next_game)))
next_game_1_1 = next_game_1.replace(' ', '')
next_game_2 = re.findall(r'\d{2}[.]\d{2}[.]\d{2}', next_game_1_1)
next_games = next_game_2[10:]
next_games_1 = len(next_games)
date = []
y = '.20'
if len(next_game_2) >= 6:
b = next_game_2[5]
bb = b[:6 - 1] + y + b[6:]
date.append(bb)
else:
pass
if len(next_game_2) >= 7:
c = next_game_2[6]
cc = c[:6 - 1] + y + c[6:]
date.append(cc)
else:
pass
if len(next_game_2) >= 8:
d = next_game_2[7]
dd = d[:6 - 1] + y + d[6:]
date.append(dd)
else:
pass
if len(next_game_2) >= 9:
e = next_game_2[8]
ee = e[:6 - 1] + y + e[6:]
date.append(ee)
else:
pass
if len(next_game_2) >= 10:
f = next_game_2[9]
ff = f[:6 - 1] + y + f[6:]
date.append(ff)
else:
pass
if len(next_game_2) >= 11:
g = next_game_2[10]
gg = g[:6 - 1] + y + g[6:]
date.append(gg)
else:
pass
if len(next_game_2) >= 12:
h = next_game_2[11]
hh = h[:6 - 1] + y + h[6:]
date.append(hh)
else:
pass
if len(next_game_2) >= 13:
j = next_game_2[12]
jj = j[:6 - 1] + y + j[6:]
date.append(jj)
else:
pass
if len(next_game_2) >= 14:
k = next_game_2[13]
kk = k[:6 - 1] + y + k[6:]
date.append(kk)
else:
pass
if len(next_game_2) >= 15:
l = next_game_2[14]
ll = l[:6 - 1] + y + l[6:]
date.append(ll)
else:
pass
if len(next_game_2) >= 16:
m = next_game_2[15]
mm = m[:6 - 1] + y + m[6:]
date.append(mm)
else:
pass
if len(next_game_2) >= 17:
n = next_game_2[16]
nn = n[:6 - 1] + y + n[6:]
date.append(nn)
else:
pass
if len(next_game_2) >= 18:
o = next_game_2[17]
oo = o[:6 - 1] + y + o[6:]
date.append(oo)
else:
pass
if len(next_game_2) >= 19:
p = next_game_2[18]
pp = p[:6 - 1] + y + p[6:]
date.append(pp)
else:
pass
if len(next_game_2) >= 20:
q = next_game_2[19]
qq = q[:6 - 1] + y + q[6:]
date.append(qq)
else:
pass
if len(next_game_2) >= 21:
r = next_game_2[20]
rr = r[:6 - 1] + y + r[6:]
date.append(rr)
else:
pass
if len(date) >= 1:
date_0 = dt.datetime.strptime(date[0], '%d.%m.%Y')
if len(date) >= 2:
date_1 = dt.datetime.strptime(date[1], '%d.%m.%Y')
if len(date) >= 3:
date_2 = dt.datetime.strptime(date[2], '%d.%m.%Y')
if len(date) >= 4:
date_3 = dt.datetime.strptime(date[3], '%d.%m.%Y')
if len(date) >= 5:
date_4 = dt.datetime.strptime(date[4], '%d.%m.%Y')
if len(date) >= 6:
date_5 = dt.datetime.strptime(date[5], '%d.%m.%Y')
if len(date) >= 7:
date_6 = dt.datetime.strptime(date[6], '%d.%m.%Y')
if len(date) >= 8:
date_7 = dt.datetime.strptime(date[7], '%d.%m.%Y')
if len(date) >= 9:
date_8 = dt.datetime.strptime(date[8], '%d.%m.%Y')
if len(date) >= 10:
date_9 = dt.datetime.strptime(date[9], '%d.%m.%Y')
if len(date) >= 11:
date_10 = dt.datetime.strptime(date[10], '%d.%m.%Y')
if len(date) >= 12:
date_11 = dt.datetime.strptime(date[11], '%d.%m.%Y')
if len(date) >= 13:
date_12 = dt.datetime.strptime(date[12], '%d.%m.%Y')
if len(date) >= 14:
date_13 = dt.datetime.strptime(date[13], '%d.%m.%Y')
if len(date) >= 15:
date_14 = dt.datetime.strptime(date[14], '%d.%m.%Y')
if len(date) >= 16:
date_15 = dt.datetime.strptime(date[15], '%d.%m.%Y')
if date_0 < date_1:
next_game_3 = cc
else:
if date_1 < date_2:
next_game_3 = dd
else:
if date_2 < date_3:
next_game_3 = ee
else:
if date_3 < date_4:
next_game_3 = ff
else:
if date_4 < date_5:
next_game_3 = gg
else:
if date_5 < date_6:
next_game_3 = hh
else:
if date_6 < date_7:
next_game_3 = jj
else:
if date_7 < date_8:
next_game_3 = kk
else:
if date_8 < date_9:
next_game_3 = ll
else:
if date_9 < date_10:
next_game_3 = mm
else:
if date_10 < date_11:
next_game_3 = nn
else:
if date_11 < date_12:
next_game_3 = oo
else:
if date_12 < date_13:
next_game_3 = pp
else:
if date_13 < date_14:
next_game_3 = qq
else:
if date_14 < date_15:
next_game_3 = rr
else:
pass
create_file()
adding_team()
draws(full_time)
odd(full_time)
even(full_time)
under_1_5(full_time)
over_2_5(full_time)
under_2_5(full_time)
both_score(full_time)
both_no_score(full_time)
draws_first_time(first_half_time)
no_goal_first_time(first_half_time)
Odd_Even(full_time)
Even_Odd(full_time)
draws_NOdraws(full_time)
NOdraws_draws(full_time)
under15_over15(full_time)
over15_under15(full_time)
under25_over25(full_time)
over25_under25(full_time)
both_noboth_score(full_time)
noboth_both_score(full_time)
draws_NOdraws_first_time(first_half_time)
NOdraws_draws_first_time(first_half_time)
goal_NOgoal_first_time(first_half_time)
NOgoal_goal_first_time(first_half_time)
url = 'https://nb-bet.com/Teams/565-Nueva-Chikago-statistika-komandi'
team = "Нуэва Чикаго"
def adding_team():
b = "Nueva-Chikago"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
r = requests.get(url, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print(soup.title.text)
nbbet = soup.select(".a-dotted-hover")
one = ("...".join((str(i) for i in nbbet)))
two = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s[(-][0-9]{1}\s[:-]\s[0-9]{1}[\)-]', one)
three = (" ".join((str(i) for i in two)))
full_time = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s', three)
first_half_time = re.findall(r'[(][0-9]{1}\s[:]\s[0-9]{1}[)]', three)
next_game = soup.select(".first-td-content-results-auto")
next_game_1 = ("...".join((str(i) for i in next_game)))
next_game_1_1 = next_game_1.replace(' ', '')
next_game_2 = re.findall(r'\d{2}[.]\d{2}[.]\d{2}', next_game_1_1)
next_games = next_game_2[10:]
next_games_1 = len(next_games)
date = []
y = '.20'
if len(next_game_2) >= 6:
b = next_game_2[5]
bb = b[:6 - 1] + y + b[6:]
date.append(bb)
else:
pass
if len(next_game_2) >= 7:
c = next_game_2[6]
cc = c[:6 - 1] + y + c[6:]
date.append(cc)
else:
pass
if len(next_game_2) >= 8:
d = next_game_2[7]
dd = d[:6 - 1] + y + d[6:]
date.append(dd)
else:
pass
if len(next_game_2) >= 9:
e = next_game_2[8]
ee = e[:6 - 1] + y + e[6:]
date.append(ee)
else:
pass
if len(next_game_2) >= 10:
f = next_game_2[9]
ff = f[:6 - 1] + y + f[6:]
date.append(ff)
else:
pass
if len(next_game_2) >= 11:
g = next_game_2[10]
gg = g[:6 - 1] + y + g[6:]
date.append(gg)
else:
pass
if len(next_game_2) >= 12:
h = next_game_2[11]
hh = h[:6 - 1] + y + h[6:]
date.append(hh)
else:
pass
if len(next_game_2) >= 13:
j = next_game_2[12]
jj = j[:6 - 1] + y + j[6:]
date.append(jj)
else:
pass
if len(next_game_2) >= 14:
k = next_game_2[13]
kk = k[:6 - 1] + y + k[6:]
date.append(kk)
else:
pass
if len(next_game_2) >= 15:
l = next_game_2[14]
ll = l[:6 - 1] + y + l[6:]
date.append(ll)
else:
pass
if len(next_game_2) >= 16:
m = next_game_2[15]
mm = m[:6 - 1] + y + m[6:]
date.append(mm)
else:
pass
if len(next_game_2) >= 17:
n = next_game_2[16]
nn = n[:6 - 1] + y + n[6:]
date.append(nn)
else:
pass
if len(next_game_2) >= 18:
o = next_game_2[17]
oo = o[:6 - 1] + y + o[6:]
date.append(oo)
else:
pass
if len(next_game_2) >= 19:
p = next_game_2[18]
pp = p[:6 - 1] + y + p[6:]
date.append(pp)
else:
pass
if len(next_game_2) >= 20:
q = next_game_2[19]
qq = q[:6 - 1] + y + q[6:]
date.append(qq)
else:
pass
if len(next_game_2) >= 21:
r = next_game_2[20]
rr = r[:6 - 1] + y + r[6:]
date.append(rr)
else:
pass
if len(date) >= 1:
date_0 = dt.datetime.strptime(date[0], '%d.%m.%Y')
if len(date) >= 2:
date_1 = dt.datetime.strptime(date[1], '%d.%m.%Y')
if len(date) >= 3:
date_2 = dt.datetime.strptime(date[2], '%d.%m.%Y')
if len(date) >= 4:
date_3 = dt.datetime.strptime(date[3], '%d.%m.%Y')
if len(date) >= 5:
date_4 = dt.datetime.strptime(date[4], '%d.%m.%Y')
if len(date) >= 6:
date_5 = dt.datetime.strptime(date[5], '%d.%m.%Y')
if len(date) >= 7:
date_6 = dt.datetime.strptime(date[6], '%d.%m.%Y')
if len(date) >= 8:
date_7 = dt.datetime.strptime(date[7], '%d.%m.%Y')
if len(date) >= 9:
date_8 = dt.datetime.strptime(date[8], '%d.%m.%Y')
if len(date) >= 10:
date_9 = dt.datetime.strptime(date[9], '%d.%m.%Y')
if len(date) >= 11:
date_10 = dt.datetime.strptime(date[10], '%d.%m.%Y')
if len(date) >= 12:
date_11 = dt.datetime.strptime(date[11], '%d.%m.%Y')
if len(date) >= 13:
date_12 = dt.datetime.strptime(date[12], '%d.%m.%Y')
if len(date) >= 14:
date_13 = dt.datetime.strptime(date[13], '%d.%m.%Y')
if len(date) >= 15:
date_14 = dt.datetime.strptime(date[14], '%d.%m.%Y')
if len(date) >= 16:
date_15 = dt.datetime.strptime(date[15], '%d.%m.%Y')
if date_0 < date_1:
next_game_3 = cc
else:
if date_1 < date_2:
next_game_3 = dd
else:
if date_2 < date_3:
next_game_3 = ee
else:
if date_3 < date_4:
next_game_3 = ff
else:
if date_4 < date_5:
next_game_3 = gg
else:
if date_5 < date_6:
next_game_3 = hh
else:
if date_6 < date_7:
next_game_3 = jj
else:
if date_7 < date_8:
next_game_3 = kk
else:
if date_8 < date_9:
next_game_3 = ll
else:
if date_9 < date_10:
next_game_3 = mm
else:
if date_10 < date_11:
next_game_3 = nn
else:
if date_11 < date_12:
next_game_3 = oo
else:
if date_12 < date_13:
next_game_3 = pp
else:
if date_13 < date_14:
next_game_3 = qq
else:
if date_14 < date_15:
next_game_3 = rr
else:
pass
create_file()
adding_team()
draws(full_time)
odd(full_time)
even(full_time)
under_1_5(full_time)
over_2_5(full_time)
under_2_5(full_time)
both_score(full_time)
both_no_score(full_time)
draws_first_time(first_half_time)
no_goal_first_time(first_half_time)
Odd_Even(full_time)
Even_Odd(full_time)
draws_NOdraws(full_time)
NOdraws_draws(full_time)
under15_over15(full_time)
over15_under15(full_time)
under25_over25(full_time)
over25_under25(full_time)
both_noboth_score(full_time)
noboth_both_score(full_time)
draws_NOdraws_first_time(first_half_time)
NOdraws_draws_first_time(first_half_time)
goal_NOgoal_first_time(first_half_time)
NOgoal_goal_first_time(first_half_time)
url = 'https://nb-bet.com/Teams/1556-Vilya-Dalmine-statistika-komandi'
team = "Вилья Дальмине"
def adding_team():
b = "Vilya-Dalmine"
new_file = open(a, "a+")
new_file.write('\n\n --------------------------------- ' + b)
new_file.close()
r = requests.get(url, headers=headers)
with open('main.html', 'w', encoding='utf-8-sig') as file:
text = file.write(r.text)
soup = BeautifulSoup(r.text, 'lxml')
print(soup.title.text)
nbbet = soup.select(".a-dotted-hover")
one = ("...".join((str(i) for i in nbbet)))
two = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s[(-][0-9]{1}\s[:-]\s[0-9]{1}[\)-]', one)
three = (" ".join((str(i) for i in two)))
full_time = re.findall(r'[0-9]{1}\s[:-]\s[0-9]{1}\s', three)
first_half_time = re.findall(r'[(][0-9]{1}\s[:]\s[0-9]{1}[)]', three)
next_game = soup.select(".first-td-content-results-auto")
next_game_1 = ("...".join((str(i) for i in next_game)))
next_game_1_1 = next_game_1.replace(' ', '')
next_game_2 = re.findall(r'\d{2}[.]\d{2}[.]\d{2}', next_game_1_1)
next_games = next_game_2[10:]
next_games_1 = len(next_games)
date = []
y = '.20'
if len(next_game_2) >= 6:
b = next_game_2[5]
bb = b[:6 - 1] + y + b[6:]
date.append(bb)
else:
pass
if len(next_game_2) >= 7:
c = next_game_2[6]
cc = c[:6 - 1] + y + c[6:]
date.append(cc)
else:
pass
if len(next_game_2) >= 8:
d = next_game_2[7]
dd = d[:6 - 1] + y + d[6:]
date.append(dd)
else:
pass
if len(next_game_2) >= 9:
e = next_game_2[8]
ee = e[:6 - 1] + y + e[6:]
date.append(ee)
else:
pass
if len(next_game_2) >= 10:
f = next_game_2[9]
ff = f[:6 - 1] + y + f[6:]
date.append(ff)
else:
pass
if len(next_game_2) >= 11:
g = next_game_2[10]
gg = g[:6 - 1] + y + g[6:]
date.append(gg)
else:
pass
if len(next_game_2) >= 12:
h = next_game_2[11]
hh = h[:6 - 1] + y + h[6:]
date.append(hh)
else:
pass
if len(next_game_2) >= 13:
j = next_game_2[12]
jj = j[:6 - 1] + y + j[6:]
date.append(jj)
else:
pass
if len(next_game_2) >= 14:
k = next_game_2[13]
kk = k[:6 - 1] + y + k[6:]
date.append(kk)
else:
pass
if len(next_game_2) >= 15:
l = next_game_2[14]
ll = l[:6 - 1] + y + l[6:]
date.append(ll)
else:
pass
if len(next_game_2) >= 16:
m = next_game_2[15]
mm = m[:6 - 1] + y + m[6:]
date.append(mm)
else:
pass
if len(next_game_2) >= 17:
n = next_game_2[16]
nn = n[:6 - 1] + y + n[6:]
date.append(nn)
else:
pass
if len(next_game_2) >= 18:
o = next_game_2[17]
oo = o[:6 - 1] + y + o[6:]
date.append(oo)
else:
pass
if len(next_game_2) >= 19:
p = next_game_2[18]
pp = p[:6 - 1] + y + p[6:]
date.append(pp)
else:
pass
if len(next_game_2) >= 20:
q = next_game_2[19]
qq = q[:6 - 1] + y + q[6:]
date.append(qq)
else:
pass
if len(next_game_2) >= 21:
r = next_game_2[20]
rr = r[:6 - 1] + y + r[6:]
date.append(rr)
else:
pass
if len(date) >= 1:
date_0 = dt.datetime.strptime(date[0], '%d.%m.%Y')
if len(date) >= 2:
date_1 = dt.datetime.strptime(date[1], '%d.%m.%Y')
if len(date) >= 3:
date_2 = dt.datetime.strptime(date[2], '%d.%m.%Y')
if len(date) >= 4:
date_3 = dt.datetime.strptime(date[3], '%d.%m.%Y')
if len(date) >= 5:
date_4 = dt.datetime.strptime(date[4], '%d.%m.%Y')
if len(date) >= 6:
date_5 = dt.datetime.strptime(date[5], '%d.%m.%Y')
if len(date) >= 7:
date_6 = dt.datetime.strptime(date[6], '%d.%m.%Y')
if len(date) >= 8:
date_7 = dt.datetime.strptime(date[7], '%d.%m.%Y')
if len(date) >= 9:
date_8 = dt.datetime.strptime(date[8], '%d.%m.%Y')
if len(date) >= 10:
date_9 = dt.datetime.strptime(date[9], '%d.%m.%Y')
if len(date) >= 11:
date_10 = dt.datetime.strptime(date[10], '%d.%m.%Y')
if len(date) >= 12:
date_11 = dt.datetime.strptime(date[11], '%d.%m.%Y')
if len(date) >= 13:
date_12 = dt.datetime.strptime(date[12], '%d.%m.%Y')
if len(date) >= 14:
date_13 = dt.datetime.strptime(date[13], '%d.%m.%Y')
if len(date) >= 15:
date_14 = dt.datetime.strptime(date[14], '%d.%m.%Y')
if len(date) >= 16:
date_15 = dt.datetime.strptime(date[15], '%d.%m.%Y')
if date_0 < date_1:
next_game_3 = cc
else:
if date_1 < date_2:
next_game_3 = dd
else:
if date_2 < date_3:
next_game_3 = ee
else:
if date_3 < date_4:
next_game_3 = ff
else:
if date_4 < date_5:
next_game_3 = gg
else:
if date_5 < date_6:
next_game_3 = hh
else:
if date_6 < date_7:
next_game_3 = jj
else:
if date_7 < date_8:
next_game_3 = kk
else:
if date_8 < date_9:
next_game_3 = ll
else:
if date_9 < date_10:
next_game_3 = mm
else:
if date_10 < date_11:
next_game_3 = nn
else:
if date_11 < date_12:
next_game_3 = oo
else:
if date_12 < date_13:
next_game_3 = pp
else:
if date_13 < date_14:
next_game_3 = qq
else:
if date_14 < date_15:
next_game_3 = rr
else:
pass
create_file()
adding_team()
draws(full_time)
odd(full_time)
even(full_time)
under_1_5(full_time)
over_2_5(full_time)
under_2_5(full_time)
both_score(full_time)
both_no_score(full_time)
draws_first_time(first_half_time)
no_goal_first_time(first_half_time)
Odd_Even(full_time)
Even_Odd(full_time)
draws_NOdraws(full_time)
NOdraws_draws(full_time)
under15_over15(full_time)
over15_under15(full_time)
under25_over25(full_time)
over25_under25(full_time)
both_noboth_score(full_time)
noboth_both_score(full_time)
draws_NOdraws_first_time(first_half_time)
NOdraws_draws_first_time(first_half_time)
goal_NOgoal_first_time(first_half_time)
NOgoal_goal_first_time(first_half_time)
| [
"noreply@github.com"
] | noreply@github.com |
bcd8ad1f09ff8608cd0e1873f337a00d768cbe32 | d7a05a935169e7b4d1c3cc834ff70633908fb525 | /test_model.py | a5e8711ccbf4e9a40a69659d8d35d06c897711b8 | [] | no_license | NinaWie/classify_satellite_images | 72d44b7cdc9c9c038daccc6e354ede04b0b786d8 | 5810580999e557e56fc09d0404f2faccc9690e9a | refs/heads/master | 2020-08-29T02:02:12.105432 | 2019-10-27T17:27:56 | 2019-10-27T17:27:56 | 217,888,474 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,264 | py | import numpy as np
import os
import cv2
import json
import keras
import argparse
import sys
from keras.applications.vgg16 import preprocess_input
from keras.models import model_from_json
def classify_tif_image(k, l):
"""
This loads the TIF file and creates the 256*256 tiles out of it. it then classifies each tile to one of the 9 classes.
:param k:
:param l:
:return:
"""
img = cv2.imread(inp_path + "/{}-{}.tif".format(k, l))
print(inp_path + "/{}-{}.tif".format(k, l))
print("Finished Loading Image")
shape = img.shape
imcount = 0
img_arr = []
filenames = []
for i in range(0, shape[0] - shape[0] % 256, 256):
for j in range(0, shape[1] - shape[1] % 256, 256):
tile = img[i:i + 256, j:j + 256, :]
assert (tile.shape == (256, 256, 3))
imcount += 1
img_arr.append(tile)
filenames.append("{}-{}_{}_{}".format(k, l, i, j))
assert (len(filenames) == len(img_arr))
img_arr = np.asarray(img_arr)
print(img_arr.shape)
# final data:
img_data = preprocess_input(img_arr.astype(np.float))
sizes = img_data.shape
print(img_data.shape)
# load json and create model
json_file = open(os.path.join(model_path, 'model.json'), 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights(os.path.join(model_path, "model.h5"))
print("Loaded model from disk")
# evaluate loaded model on test data
loaded_model.compile(loss=keras.losses.categorical_crossentropy, optimizer='adam', metrics=['accuracy'])
out = loaded_model.predict(img_data)
mapping = json.load(open(mapping_file, "r"))
res_dir = {filenames[i]: str(mapping[str(np.argmax(out[i]))]) for i in range(len(out))}
output_path = "{}-{}_pred_labels.json".format(k, l)
json.dump(res_dir, open(os.path.join(args.out_dir, output_path), "w"))
print("Saved predicted labels in a dictionary in ", output_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Train/test neural network for recognizing pitch type from joint trajectories')
parser.add_argument('-path_to_data', help='path to data to predict labels for - e.g. tiles', required=True)
parser.add_argument('-path_to_model', help='path to model.h5 and model.json', required=True)
parser.add_argument('-mapping_file', help='path to mapping file', required=True)
parser.add_argument('-out_dir', default=".", help='path to output the predicted labels', required=False)
parser.add_argument('-start', default="1", help='number from which image it should start', required=False)
parser.add_argument('-end', default="2", help='number to which image it should process', required=False)
args = parser.parse_args()
inp_path = args.path_to_data
model_path = args.path_to_model
mapping_file = args.mapping_file
from_num = int(args.start)
to_num = int(args.end)
if not os.path.exists:
print("ERROR: PATH DOES NOT EXIST!")
sys.exit()
for k in range(from_num, to_num):
for l in range(17, 20, 2):
classify_tif_image(k, l)
| [
"ninawiedemann999@gmail.com"
] | ninawiedemann999@gmail.com |
f9a5490c2beeec964b97a7bd6462e457285bbb33 | 530c8697641092d9291514e919f52f79c4ff00d7 | /2d (1).py | 3c9f85943a5580834acc0df43d0b91cbcc24e107 | [] | no_license | haldanuj/EP219 | 59336fcb72ccf76df81f81b37a7c488c1f182d01 | 115633dac1d8f35a014467703672f73761baac7d | refs/heads/master | 2021-01-12T18:19:58.314348 | 2016-11-04T01:01:08 | 2016-11-04T01:01:08 | 69,415,861 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,933 | py |
import numpy as np
import matplotlib.pyplot as plt
import math
file=open('recoilenergydata_EP219.csv','r')
#defined list to store the values of log(L(s))
y=[]
#here defined log(L) function with s as parameter(sigma)
def likelihood(s):
file=open('recoilenergydata_EP219.csv','r')
i=0
sum1=0
sum2=0
sum3=0
sum4=0
while i<40:
if i<5:
[Er, Events]=file.readline().split(',')
m1= float(Events)
n1=float(Er)
n2=1000*(np.exp(-n1/10))
m2=np.log(n2)
sum1=sum1+(-n2 +m1*m2)
#sum1=summation of (-Bi + Di*log(Bi)) where Bi are backgound events and Di are
#observed events for 0<i<5
elif 5<=i<=15:
[Er, Events]=file.readline().split(',')
n1=float(Er)
m1= float(Events)
n2=1000*(np.exp(-n1/10))
t=s*20*(n1-5)
n3=t+n2
m2=np.log(n3)
sum2 =sum2 + (-n3 + m1*m2)
#sum2=summation of (-(Bi+Ti) + Di*log(Bi+Ti)) where Bi are backgound events,Di are
#observed events and Ti are observed events for 5<=i<15
elif 15<i<25:
[Er, Events]=file.readline().split(',')
n1=float(Er)
m1= float(Events)
n2=1000*(np.exp(-n1/10))
t=s*20*(25-n1)
n3=t+n2
m2=np.log(n3)
sum3 =sum3 + (-n3 + m1*m2)
#sum3=summation of (-(Bi+Ti) + Di*log(Bi+Ti)) where Bi are backgound events,Di are
#observed events and Ti are observed events for 15<=i<25
else :
[Er, Events]=file.readline().split(',')
m1= float(Events)
n1=float(Er)
n2=1000*(np.exp(-n1/10))
m2=np.log(n2)
sum4 =sum4 + (-n2 + m1*m2)
#sum4=summation of (-Bi + Di*log(Bi)) where Bi are backgound events and Di are
#observed events for 25<i<40
i=i+1
return (sum1 +sum2+sum3+sum4)
s=np.linspace(0, 1, 100)
y= likelihood(s)
fig, ax = plt.subplots()
ax.plot(s, y)
plt.title('Likelihood plot')
plt.xlabel('sigma')
plt.ylabel('log(sigma)')
plt.show()
| [
"noreply@github.com"
] | noreply@github.com |
dd2c0563158627b1fd4a1e16385be8b08316abe4 | a7d23974abd0d09681c17ca09038dc6dcd80a2ee | /extra/exporters.py | db0c766c8661667fb961183dbe55d8eadda1027e | [] | no_license | world9781/pydir | ccdf8aa560411957cf476324d49c7c1b4e0073c5 | 0c6c878a69bc5e0cabd12142d5fbb014fbade006 | refs/heads/master | 2023-03-15T20:27:57.409027 | 2018-10-22T19:09:11 | 2018-10-22T19:09:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 964 | py | """
Exporters main duty is to represent a directory structure as XML or Json
TO BE IMPLEMENTED
"""
import os
import json
class BaseExporter(object):
"""A base for Writing Directory structure Exportation formats"""
def __init__(self,path_name):
self.pathname = path_name
def repr_as_dict(self,path):
base = {'root-name' : os.path.basename(path) }
if os.path.isdir(path):
base['type'] = 'Directory'
base['children'] = [self.repr_as_dict(os.path.join(path,the_dir)) for the_dir in os.listdir(path)]
else:
base['type'] = "file"
return base
def dump(self,out_file):
raise Exception("Unimplemented")
class JSONExporter(BaseExporter):
"""Export Directory Structure as JSON"""
def __init__(self,*args):
super(JSONExporter,self).__init__(*args)
def dump(self,out_file):
json.dump(self.repr_as_dict(self.pathname),out_file)
| [
"kituyiharry@gmail.com"
] | kituyiharry@gmail.com |
6a6d137d3c8dc70d14aa023a752ffba6f170d4fd | 91af1af67ed219e583b209b40ae5dd34d6f7f355 | /train_net.py | 90d770c1765c7f52a585ded8af49a5bf767545db | [] | no_license | jack20951948/Deep-Clustering | d6f5bfdd97be1f07f114371eafd9f8643ebb6e30 | 4dd8b4d3fef72e597cd142406d343450cf2dd517 | refs/heads/main | 2023-06-28T02:18:58.915727 | 2021-07-18T07:18:10 | 2021-07-18T07:18:10 | 387,109,398 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,555 | py | '''
Script to train the model
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os.path
import time
import numpy as np
import tensorflow as tf
import ipdb
from datagenerator2 import DataGenerator
from model import Model
from GlobalConstont import *
# the .pkl file lists of data set
pkl_list = ['deep-clustering-master/pkl_folder/train.pkl'] # ['../dcdata/' + str(i) + '.pkl' for i in range(1, 12)]
val_list = ['deep-clustering-master/pkl_folder/val.pkl']
sum_dir = 'deep-clustering-master/sum'
train_dir = 'deep-clustering-master/model'
lr = 1e-3
n_hidden = 300
max_steps = 20000000
batch_size = 128
def train():
with tf.Graph().as_default():
# dropout keep probability
p_keep_ff = tf.placeholder(tf.float32, shape=None)
p_keep_rc = tf.placeholder(tf.float32, shape=None)
# generator for training set and validation set
data_generator = DataGenerator(pkl_list, batch_size)
val_generator = DataGenerator(val_list, batch_size)
# placeholder for input log spectrum, VAD info.,
# and speaker indicator function
in_data = tf.placeholder(
tf.float32, shape=[batch_size, FRAMES_PER_SAMPLE, NEFF])
VAD_data = tf.placeholder(
tf.float32, shape=[batch_size, FRAMES_PER_SAMPLE, NEFF])
Y_data = tf.placeholder(
tf.float32, shape=[batch_size, FRAMES_PER_SAMPLE, NEFF, 2])
# init the model
BiModel = Model(n_hidden, batch_size, p_keep_ff, p_keep_rc)
# build the net structure
embedding = BiModel.inference(in_data)
Y_data_reshaped = tf.reshape(Y_data, [-1, NEFF, 2])
VAD_data_reshaped = tf.reshape(VAD_data, [-1, NEFF])
# compute the loss
loss = BiModel.loss(embedding, Y_data_reshaped, VAD_data_reshaped)
# get the train operation
train_op = BiModel.train(loss, lr)
saver = tf.train.Saver(tf.all_variables())
summary_op = tf.summary.merge_all()
sess = tf.Session()
# either train from scratch or a trained model
# saver.restore(sess, 'train/model.ckpt-492000')
# val_loss = np.fromfile('val_loss').tolist()
# init_step = 56001
init = tf.initialize_all_variables()
sess.run(init)
init_step = 0
summary_writer = tf.summary.FileWriter(
sum_dir, sess.graph)
# val_loss = []
last_epoch = data_generator.epoch
for step in range(init_step, init_step + max_steps):
start_time = time.time()
data_batch = data_generator.gen_batch()
# concatenate the samples into batch data
in_data_np = np.concatenate(
[np.reshape(item['Sample'], [1, FRAMES_PER_SAMPLE, NEFF])
for item in data_batch])
VAD_data_np = np.concatenate(
[np.reshape(item['VAD'], [1, FRAMES_PER_SAMPLE, NEFF])
for item in data_batch])
VAD_data_np = VAD_data_np.astype('int')
Y_data_np = np.concatenate(
[np.reshape(item['Target'], [1, FRAMES_PER_SAMPLE, NEFF, 2])
for item in data_batch])
Y_data_np = Y_data_np.astype('int')
# train the model
loss_value, _, summary_str = sess.run(
[loss, train_op, summary_op],
feed_dict={in_data: in_data_np,
VAD_data: VAD_data_np,
Y_data: Y_data_np,
p_keep_ff: 1 - P_DROPOUT_FF,
p_keep_rc: 1 - P_DROPOUT_RC})
summary_writer.add_summary(summary_str, step)
duration = time.time() - start_time
# if np.isnan(loss_value):
# import ipdb; ipdb.set_trace()
assert not np.isnan(loss_value)
if step % 100 == 0:
# show training progress every 100 steps
num_examples_per_step = batch_size
examples_per_sec = num_examples_per_step / duration
sec_per_batch = float(duration)
format_str = (
'%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
'sec/batch, epoch %d)')
print (format_str % (datetime.now(), step, loss_value,
examples_per_sec, sec_per_batch,
data_generator.epoch))
if step % 4000 == 0:
# save model every 4000 steps
checkpoint_path = os.path.join(train_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=step)
if last_epoch != data_generator.epoch:
# doing validation every training epoch
print('Doing validation')
val_epoch = val_generator.epoch
count = 0
loss_sum = 0
# average the validation loss
while(val_epoch == val_generator.epoch):
count += 1
data_batch = val_generator.gen_batch()
in_data_np = np.concatenate(
[np.reshape(item['Sample'],
[1, FRAMES_PER_SAMPLE, NEFF])
for item in data_batch])
VAD_data_np = np.concatenate(
[np.reshape(item['VAD'], [1, FRAMES_PER_SAMPLE, NEFF])
for item in data_batch])
VAD_data_np = VAD_data_np.astype('int')
Y_data_np = np.concatenate(
[np.reshape(item['Target'],
[1, FRAMES_PER_SAMPLE, NEFF, 2])
for item in data_batch])
Y_data_np = Y_data_np.astype('int')
loss_value, = sess.run(
[loss],
feed_dict={in_data: in_data_np,
VAD_data: VAD_data_np,
Y_data: Y_data_np,
p_keep_ff: 1,
p_keep_rc: 1})
loss_sum += loss_value
val_loss.append(loss_sum / count)
print ('validation loss: %.3f' % (loss_sum / count))
np.array(val_loss).tofile('val_loss')
last_epoch = data_generator.epoch
print('%s start' % datetime.now())
train()
| [
"j20951948@gmail.com"
] | j20951948@gmail.com |
1210ab54593eea5b9c24f896a0e2f0ffdb4dc99f | fb96a752515b20e5bb3548cc5eec39b81d463643 | /Advent/2016/day_08/eight.py | f95e8a67803a3b23d5abf78129a798f0349b137b | [] | no_license | kryptn/Challenges | 2de2675ad0a39e13fb983a728dc090af7113b443 | f1aba799fa28e542bf3782cdfa825ff9440bf66c | refs/heads/master | 2021-05-01T02:54:21.404383 | 2016-12-27T17:09:09 | 2016-12-27T17:09:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,160 | py | with open('input.txt') as fd:
data = fd.read()
class Screen:
def __init__(self):
self.grid = [[False]*50 for x in range(6)]
def shift_row(self, row, spaces):
self.grid[row] = self.grid[row][-spaces:]+self.grid[row][:-spaces]
def shift_col(self, col, spaces):
self.grid = zip(*self.grid)
self.shift_row(col, spaces)
self.grid = [list(x) for x in zip(*self.grid)]
def enable(self, length, height):
for x in range(length):
for y in range(height):
self.grid[y][x] = True
def __str__(self):
return '\n'.join(' '.join('#' if x else '.' for x in row) for row in self.grid)
def parse(self, inp):
i = inp.split()
if i[0] == 'rect':
x, y = i[1].split('x')
self.enable(int(x), int(y))
else:
shift = self.shift_row if i[1] == 'row' else self.shift_col
col = int(i[2].split('=')[1])
mag = int(i[4])
shift(col, mag)
s = Screen()
for d in data.splitlines():
s.parse(d)
print('star one: {}\nstar two:\n'.format(sum(sum(x) for x in s.grid)))
print(s)
| [
"kryptn@gmail.com"
] | kryptn@gmail.com |
1398fd23f5db51415ada765ea40ff41dfd172980 | 1472d0b89d3c845f1f40552fcef889cd12ce367e | /classification/quiz.py | 2c461e7f0ef7cf0fe26cd2e9708dabc4dc97b1c1 | [] | no_license | AatmanTogadia/DataMining | 47e45cb26b8c5bfbb8fdda08044517765c7b3c96 | a3005e132dd823f13dd00dff9ad9f9bd7c1870d0 | refs/heads/master | 2020-12-25T10:34:10.452558 | 2016-07-07T04:02:07 | 2016-07-07T04:02:07 | 61,662,458 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,541 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Apr 14 21:53:56 2016
@author: Aatman
"""
__author__ = 'Aatman'
from sklearn.linear_model import LogisticRegression
from sklearn import svm
import pylab as pl
import numpy as np
from sklearn import cross_validation
from sklearn.grid_search import GridSearchCV
import json
import nltk
from nltk.corpus import stopwords
import pickle
english_stopwords = ["a", "about", "above", "above", "across", "after", "afterwards", "again", "against", "all", "almost", "alone", "along", "already", "also","although","always","am","among", "amongst", "amoungst", "amount", "an", "and", "another", "any","anyhow","anyone","anything","anyway", "anywhere", "are", "around", "as", "at", "back","be","became", "because","become","becomes", "becoming", "been", "before", "beforehand", "behind", "being", "below", "beside", "besides", "between", "beyond", "bill", "both", "bottom","but", "by", "call", "can", "cannot", "cant", "co", "con", "could", "couldnt", "cry", "de", "describe", "detail", "do", "done", "down", "due", "during", "each", "eg", "eight", "either", "eleven","else", "elsewhere", "empty", "enough", "etc", "even", "ever", "every", "everyone", "everything", "everywhere", "except", "few", "fifteen", "fify", "fill", "find", "fire", "first", "five", "for", "former", "formerly", "forty", "found", "four", "from", "front", "full", "further", "get", "give", "go", "had", "has", "hasnt", "have", "he", "hence", "her", "here", "hereafter", "hereby", "herein", "hereupon", "hers", "herself", "him", "himself", "his", "how", "however", "hundred", "ie", "if", "in", "inc", "indeed", "interest", "into", "is", "it", "its", "itself", "keep", "last", "latter", "latterly", "least", "less", "ltd", "made", "many", "may", "me", "meanwhile", "might", "mill", "mine", "more", "moreover", "most", "mostly", "move", "much", "must", "my", "myself", "name", "namely", "neither", "never", "nevertheless", "next", "nine", "no", "nobody", "none", "noone", "nor", "not", "nothing", "now", "nowhere", "of", "off", "often", "on", "once", "one", "only", "onto", "or", "other", "others", "otherwise", "our", "ours", "ourselves", "out", "over", "own","part", "per", "perhaps", "please", "put", "rather", "re", "same", "see", "seem", "seemed", "seeming", "seems", "serious", "several", "she", "should", "show", "side", "since", "sincere", "six", "sixty", "so", "some", "somehow", "someone", "something", "sometime", "sometimes", "somewhere", "still", "such", "system", "take", "ten", "than", "that", "the", "their", "them", "themselves", "then", "thence", "there", "thereafter", "thereby", "therefore", "therein", "thereupon", "these", "they", "thickv", "thin", "third", "this", "those", "though", "three", "through", "throughout", "thru", "thus", "to", "together", "too", "top", "toward", "towards", "twelve", "twenty", "two", "un", "under", "until", "up", "upon", "us", "very", "via", "was", "we", "well", "were", "what", "whatever", "when", "whence", "whenever", "where", "whereafter", "whereas", "whereby", "wherein", "whereupon", "wherever", "whether", "which", "while", "whither", "who", "whoever", "whole", "whom", "whose", "why", "will", "with", "within", "without", "would", "yet", "you", "your", "yours", "yourself", "yourselves", "the"]
spanish_stopwords = set(stopwords.words("spanish")) #creating a list of spanish stop-words
all_stopwords=[]
all_stopwords.append(english_stopwords)
all_stopwords.append(spanish_stopwords) #both spanish and eglish stop-words combined.
tweets_nega = []
for line in open('train_nega_tweets.txt').readlines():
tweet=json.loads(line)
temp=tweet['text']
items=[0,temp]
tweets_nega.append(items)
# Extract the vocabulary of keywords
vocab = dict()
for label,text in tweets_nega:
for term in text.split():
term = term.lower()
if len(term) > 2 and term not in all_stopwords:
if vocab.has_key(term):
vocab[term] = vocab[term] + 1
else:
vocab[term] = 1
# Remove terms whose frequencies are less than a threshold (e.g., 20)
vocab = {term: freq for term, freq in vocab.items() if freq > 20}
# Generate an id (starting from 0) for each term in vocab
vocab = {term: idx for idx, (term, freq) in enumerate(vocab.items())}
# Generate X and y
print vocab
X = []
y = []
for class_label, tweet_text in tweets_nega:
x = [0] * len(vocab)
terms = [term1 for term1 in tweet_text.split() if len(term1) > 2]
for term in terms:
if vocab.has_key(term):
x[vocab[term]] += 1
y.append(class_label)
X.append(x)
tweets_posi = []
for line in open('train_posi_tweets.txt').readlines():
tweet=json.loads(line)
lala=tweet['text']
items=[1,lala]
tweets_posi.append(items)
for class_label, tweet_text in tweets_posi:
x = [0] * len(vocab)
terms = [term2 for term2 in tweet_text.split() if len(term2) > 2]
for term in terms:
if vocab.has_key(term):
x[vocab[term]] += 1
y.append(class_label)
X.append(x)
# 10 folder cross validation to estimate the best w and b
svc = svm.SVC(kernel='linear')
Cs = range(1,5)
clf = GridSearchCV(estimator=svc, param_grid=dict(C=Cs), cv = 10)
clf = LogisticRegression()
clf.fit(X, y)
print clf.predict(X)
# predict the class labels of new tweets
#print clf.predict(X)
tweets_test = []
for line in open('test_tweets.txt').readlines():
tweet=json.loads(line)
lala=tweet['text']
#items=[lala]
tweets_test.append(lala)
#print tweets
# Generate X for testing tweets
X=[]
for tweet_text in tweets_test:
x = [0] * len(vocab)
terms = [term3 for term3 in tweet_text.split() if len(term3) > 2]
for term in terms:
if vocab.has_key(term):
x[vocab[term]] += 1
X.append(x)
#print X
y = clf.predict(X)
tweets1=[]
for line in open('test_tweets.txt').readlines():
tweet=json.loads(line)
e_id=tweet['embersId']
text=tweet['text']
items=[e_id,text]
tweets1.append(items)
f1 = open('trained_LR_classifier.pkl', 'w')
f1.write(pickle.dumps(clf))
f1.close()
pred=dict()
t='true'
f='false'
f2=open('predictions.txt','w')
for idx, [tweet_id, tweet_text] in enumerate(tweets1):
if y[idx]==1:
pred.update({tweet_id:t})
else:
pred.update({tweet_id:f})
f2.write(json.dumps(pred))
f2.close()
print '\r\nAmong the total {1} tweets, {0} tweets are predicted as positive.'.format(sum(y), len(y))
| [
"Aatman Togadia"
] | Aatman Togadia |
e91ec979aaed5918fde76b0f5e9594aa88de1975 | 47836a0e9dd477b17a08f0f1fdc0dec284e119eb | /cqt/strats/strategy_long_short_average.py | 5ec4dc0aa62cccce396687634037d60420e694e2 | [] | no_license | Jwang-2007/ML-Crypto-Trading | efe5667c9953bbe6541a183e749a85268b8613d2 | c83bc9ad68efaea65671a2268f6890bfbfccb79e | refs/heads/master | 2021-04-05T20:37:30.585610 | 2020-04-22T19:09:52 | 2020-04-22T19:09:52 | 248,598,960 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,921 | py | from cqt.strats.strategy import Strategy
from cqt.analyze.signal_long_short_crossing import signal_long_short_crossing as slsc
from cqt.analyze.signal_long_short_crossing import signal_average_envelope as sae
import copy
class StrategySimpleMA(Strategy):
def apply_event_logic(self, time, ledger):
coin = 'btc'
if self.env.has_section(coin):
section_coin = self.env.get_section(coin)
ind_coin = slsc(self.env, coin, time, self.rules)
price_coin = section_coin.get_price_close(time)
if ind_coin == -1:
ledger.sell_unit(coin, price_coin)
elif ind_coin == 1:
ledger.buy(coin, price_coin)
else:
pass
return ledger
class StrategyInverseMA(Strategy):
def __init__(self, mdl, ini_prtf, rules):
self.asset_model = mdl
self.initial_portfolio = ini_prtf
self.rules = rules
self.env = mdl
self.initial = ini_prtf
self.prices = copy.deepcopy(self.asset_model.get_section('btc').data)
self.signal=self.prices['price_close'].values*0
def apply_event_logic(self, time, ledger):
coin = 'btc'
if self.env.has_section(coin):
section_coin = self.env.get_section(coin)
ind_coin = slsc(self.env, coin, time, self.rules)
price_coin = section_coin.get_price_close(time)
time_step = self.prices.index.get_loc(time)
if ind_coin == 1:
ledger.sell_unit(coin, price_coin)
self.signal[time_step]=1
elif ind_coin == -1:
ledger.buy(coin, price_coin)
self.signal[time_step]=-1
else:
self.signal[time_step]=0
pass
return ledger
class StrategyBlendMA(Strategy):
def apply_event_logic(self, time, ledger):
coin = 'btc'
if self.env.has_section(coin):
rules_short = self.rules.copy()
rules_short['window_size'] = [rules_short['window_size'][0], rules_short['window_size'][1]]
rules_long = self.rules.copy()
rules_long['window_size'] = [rules_long['window_size'][2], rules_long['window_size'][3]]
ind_coin_long = slsc(self.env, coin, time, rules_long)
ind_coin_short = slsc(self.env, coin, time, rules_short)
strats_long = StrategySimpleMA(self.env, ledger, rules_long)
strats_short = StrategySimpleMA(self.env, ledger, rules_short)
if ind_coin_long == 1:
ledger = strats_long.apply_event_logic(time, ledger)
elif ind_coin_short == -1:
ledger = strats_short.apply_event_logic(time, ledger)
else:
pass
return ledger
| [
"yolandwjx@gmail.com"
] | yolandwjx@gmail.com |
4355e732fc8866cde71cd3a8929fb289585ea09a | cea30cf853c1ddbe517292e8bcaf2265ddfeaa00 | /directions/migrations/0001_initial.py | 60c40bd4b3be8941d7d8643f00deab5c72d5f44f | [] | no_license | mehranj73/london-routes | b80242ecf60fa16c19dd0017be421ed790fe7b30 | 0fa50faf6813fc704379d0e0e4f2ad891e4121b0 | refs/heads/main | 2023-01-20T22:43:56.963476 | 2020-11-26T13:23:08 | 2020-11-26T13:23:08 | 325,857,652 | 1 | 0 | null | 2020-12-31T18:56:12 | 2020-12-31T18:56:11 | null | UTF-8 | Python | false | false | 554 | py | # Generated by Django 3.1.2 on 2020-10-25 18:01
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Direction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('stop_offs', models.CharField(max_length=50)),
('image', models.CharField(max_length=200)),
],
),
]
| [
"anouskaoleary@gmail.com"
] | anouskaoleary@gmail.com |
af071cda274d216298ffa43dad3dc91a802788fa | a97dab5a6d7fa9e65a61193001652198236d5814 | /ircpdb/bot.py | ce6b1bf265fbd7e97b178bd50deab4f663a664cb | [
"BSD-2-Clause"
] | permissive | scshepard/ircpdb | ac083b55fe94485e44859f7dca301361d6411616 | d6f197b02a77113d8e025e3026b64549eb748e2e | refs/heads/master | 2021-01-24T14:27:05.410262 | 2014-10-30T23:09:24 | 2014-10-30T23:09:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,071 | py | import fcntl
import logging
from multiprocessing import Queue
import os
import random
import socket
import textwrap
import time
from irc import strings
from irc.bot import SingleServerIRCBot, ServerSpec
import requests
import six
from .exceptions import DpasteError
logger = logging.getLogger(__name__)
class IrcpdbBot(SingleServerIRCBot):
def __init__(
self, channel, nickname, server, port, password,
limit_access_to, message_wait_seconds,
dpaste_minimum_response_length,
**connect_params
):
self.channel = channel
self.queue = Queue()
self.joined = False
self.pre_join_queue = []
self.message_wait_seconds = message_wait_seconds
self.dpaste_minimum_response_length = dpaste_minimum_response_length
self.limit_access_to = limit_access_to
server = ServerSpec(server, port, password)
super(IrcpdbBot, self).__init__(
[server], nickname, nickname, **connect_params
)
def on_nicknameinuse(self, c, e):
c.nick(
u"%s-%s" % (
c.get_nickname(),
random.randrange(0, 9999)
)
)
def on_welcome(self, c, e):
logger.debug('Received welcome message, joining %s', self.channel)
c.join(self.channel)
self.joined = True
hello_lines = [
"Debugger ready (on host %s)" % socket.gethostname(),
(
"Please prefix debugger commands with either '!' or '%s:'. "
"For pdb help, say '!help'; for a list of ircpdb-specific "
"commands, say '!!help'."
)
]
for line in hello_lines:
self.send_user_message(
self.channel,
line
)
for username, message in self.pre_join_queue:
self.send_user_message(username, message)
def on_privmsg(self, c, e):
self.send_user_message(
e.source.nick,
"Ircdb currently supports sending/receiving messages "
"using only the IRC channel."
)
def on_pubmsg(self, c, e):
# Check if this message is prefixed with the bot's username:
a = e.arguments[0].split(":", 1)
if (
len(a) > 1
and strings.lower(a[0]) == strings.lower(
self.connection.get_nickname()
)
):
self.do_command(e, a[1].strip())
# And, check if the argument was prefixed with a '!'.
if e.arguments[0][0] == '!':
self.do_command(e, e.arguments[0][1:].strip())
return
def do_command(self, e, cmd):
logger.debug('Received command: %s', cmd)
nickname = e.source.nick
if self.limit_access_to and nickname not in self.limit_access_to:
self.send_channel_message(
"I'm sorry, %s, you are not allowed to give commands "
"to this debugger. Please ask one of the following "
"users for permission to use the debugger: %s." % (
nickname,
', '.join(self.limit_access_to)
)
)
return
if cmd.startswith("!allow"):
allows = cmd.split(' ')
self.limit_access_to.extend(allows[1:])
self.send_channel_message(
"The following users have been granted access to the debugger:"
" %s." % (
', '.join(allows[1:])
)
)
return
if cmd.startswith("!set_dpaste_minimum_response_length"):
value = cmd.split(' ')
try:
self.dpaste_minimum_response_length = int(value[1])
self.send_channel_message(
"Messages longer than %s lines will now be posted "
"to dpaste if possible." % (
self.dpaste_minimum_response_length
)
)
except (TypeError, IndexError, ValueError):
self.send_channel_message(
"An error was encountered while setting the "
"dpaste_minimum_response_length setting. %s"
)
return
if cmd.startswith("!set_message_wait_seconds"):
value = cmd.split(' ')
try:
self.message_wait_seconds = float(value[1])
self.send_channel_message(
"There will be a delay of %s seconds between "
"sending each message." % (
self.message_wait_seconds
)
)
except (TypeError, IndexError, ValueError):
self.send_channel_message(
"An error was encountered while setting the "
"message_wait_seconds setting."
)
return
if cmd.startswith("!help"):
available_commands = textwrap.dedent("""
Available Commands:
* !!allow NICKNAME
Add NICKNAME to the list of users that are allowed to
interact with the debugger. Current value: {limit_access_to}.
* !!set_dpaste_minimum_response_length INTEGER
Try to send messages this length or longer in lines
to dpaste rather than sending them to IRC directly.
Current value: {dpaste_minimum_response_length}.
* !!set_message_wait_seconds FLOAT
Set the number of seconds to wait between sending messages
(this is a measure used to prevent being kicked from
Freenode and other IRC servers that enforce limits on the
number of messages a client an send in a given period of
time. Current value: {message_wait_seconds}.
""".format(
limit_access_to=self.limit_access_to,
dpaste_minimum_response_length=(
self.dpaste_minimum_response_length
),
message_wait_seconds=self.message_wait_seconds,
))
self.send_channel_message(
available_commands,
dpaste=True,
)
return
else:
self.queue.put(cmd.strip())
def send_channel_message(self, message, dpaste=None):
return self.send_user_message(
self.channel,
message,
dpaste=dpaste,
)
def send_user_message(self, username, message, dpaste=None):
message_stripped = message.strip()
if not self.joined:
logger.warning(
'Tried to send message %s, '
'but was not yet joined to channel. Queueing...',
message
)
self.pre_join_queue.append(
(username, message, )
)
return
lines = message_stripped.split('\n')
chunked = self.get_chunked_lines(lines)
try:
long_response = len(chunked) >= self.dpaste_minimum_response_length
if (long_response and dpaste is None) or dpaste is True:
dpaste_url = self.send_lines_to_dpaste(lines)
self.send_lines(
username, "%s (%s lines)" % (
dpaste_url,
len(lines)
)
)
return
except DpasteError:
pass
self.send_lines(username, chunked)
def get_chunked_lines(self, lines, chunk_size=450):
chunked_lines = []
for line in lines:
if len(line) > chunk_size:
chunked_lines.extend([
line[i:i+chunk_size]
for i in range(0, len(line), chunk_size)
])
else:
chunked_lines.append(line)
return chunked_lines
def send_lines_to_dpaste(self, lines):
try:
response = requests.post(
'http://dpaste.com/api/v2/',
data={
'content': '\n'.join(lines)
}
)
return response.url
except Exception as e:
raise DpasteError(str(e))
def send_lines(self, target, lines):
if isinstance(lines, six.string_types):
lines = [lines]
for part in lines:
self.connection.send_raw(
'PRIVMSG %s :%s' % (
target,
part
)
)
if self.message_wait_seconds:
time.sleep(self.message_wait_seconds)
def process_forever(self, inhandle, outhandle, timeout=0.1):
self._connect()
# Let's mark out inhandle as non-blocking
fcntl.fcntl(inhandle, fcntl.F_SETFL, os.O_NONBLOCK)
while True:
try:
messages = inhandle.read()
except IOError:
messages = None
if messages:
for message in messages.split('(Pdb)'):
stripped = message.strip()
if stripped:
logger.debug('>> %s', stripped)
self.send_channel_message(stripped)
try:
self.manifold.process_once(timeout)
except UnicodeDecodeError:
# This just *happens* -- I think these are coming from
# maybe MOTD messages? It isn't clear.
logger.warning(
'UnicodeDecodeError raised while processing messages.'
)
while True:
if self.queue.empty():
break
message = self.queue.get(block=False)
logger.debug('<< %s', message)
outhandle.write(u'%s\n' % message)
outhandle.flush()
| [
"adam.coddington@coxinc.com"
] | adam.coddington@coxinc.com |
4c20c568fe6c4a47880f6ed1eb34cc337b469524 | 6404478cd856f018bddf4a047b23d658e29d94cb | /robot_position_estimation.py | c6b4c283acb14211540d045a0e9ed7c24a3fb5c8 | [] | no_license | RokonUZ/robotic-arm-pick-and-place-OpenCv-Python | c11eff3d70f46d47267ee7342ab16f96a90073af | f9ac7e759a93199d56d97b27efcc7c3d085c1e9e | refs/heads/main | 2023-09-03T10:13:18.792301 | 2021-10-27T17:53:24 | 2021-10-27T17:53:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,663 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Nov 8 21:58:01 2020
@author: Tehseen
"""
# This code is used to Find the location of the Origin of the Robotic arm
# with respect to the image frame. We calculate the center point (origin) of the robotic arm
# as well as the rotation of the robotic arm with respect to the image frame.
# These values will then be used in the Camera coordinates to the Robotic arm Coordinates Homogenius Transformation
#First of all place the robotic arm base plate on the table below the camera where we will place the robotic arm afterwards
# Then execute the code. The code will detect the Rectangle in the base plate tool then fild the
# origin and rotation values.
# At the end we will use these values in our main program.
#[Resources]
# https://stackoverflow.com/questions/34237253/detect-centre-and-angle-of-rectangles-in-an-image-using-opencv
# https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_imgproc/py_contours/py_contours_begin/py_contours_begin.html#how-to-draw-the-contours
# https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_imgproc/py_contours/py_contour_features/py_contour_features.html#b-rotated-rectangle
# https://stackoverflow.com/questions/52247821/find-width-and-height-of-rotatedrect
import numpy as np
import cv2
import sys
import time
import yaml
import os
import warnings
warnings.filterwarnings("ignore")
#Constants Declaration
webcam_Resolution_Width = 640.0
webcam_Resolution_Height = 480.0
rectangle_width_in_mm = 49.0 #size of the calibration rectangle (longer side) along x-axis in mm.
# Global Variables
cx = 0.0 #object location in mm
cy = 0.0 #object location in mm
angle = 0.0 #robotic arm rotation angle
one_pixel_length = 0.0 #length of one pixel in cm units
number_of_cm_in_Resolution_width = 0.0 #total number of cm in the camera resolution width
#Reading Camera Matrix and Distortion Coefficients from YAML File
with open(r'Camera Calibration Algorithms/2. camera_calibration_tool-master/calibration.yaml') as file:
documents = yaml.full_load(file) #loading yaml file as Stream
camera_matrix = np.array(documents['camera_matrix']) #extracting camera_matrix key and convert it into Numpy Array (2D Matrix)
distortion_coeff = np.array(documents['dist_coeff'])
extrinsic_matrix = np.array(documents['extrinsics_matrix'])
# print ("\nIntrinsic Matrix\n",camera_matrix)
# print ("\nExtrinsic Matrix\n",extrinsic_matrix)
# print ("\nDistortion Coefficients\n",distortion_coeff)
print("\nCamera Matrices Loaded Succeccfully\n")
def undistortImage(img): #Function to undistort a given image. Function inputs: image, camera matrix and distortion coefficients
try:
mtx = camera_matrix
dist = distortion_coeff
#Now undistort the taken Image https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_calib3d/py_calibration/py_calibration.html
h, w = img.shape[:2]
#alpha = 0 #use to crop the undistorted image
alpha = 1 #use not to crop the undistorted image (adding black pixel)
newcameramtx, roi=cv2.getOptimalNewCameraMatrix(mtx,dist,(w,h),alpha,(w,h))
#undistort
undist_image = cv2.undistort(img, mtx, dist, None, newcameramtx)
return (undist_image) #return undistorted image
except:
print("Error while Undistorting Image")
pass
def calculate_XYZ(u,v): #Function to get World Coordinates from Camera Coordinates in mm
#https://github.com/pacogarcia3/hta0-horizontal-robot-arm/blob/9121082815e3e168e35346efa9c60bd6d9fdcef1/camera_realworldxyz.py#L105
cam_mtx = camera_matrix
Rt = extrinsic_matrix
#Solve: From Image Pixels, find World Points
scalingfactor = 40.0 #this is demo value, Calculate the Scaling Factor first (depth)
tvec1 = Rt[:, 3] #Extract the 4th Column (Translation Vector) from Extrinsic Matric
uv_1=np.array([[u,v,1]], dtype=np.float32)
uv_1=uv_1.T
suv_1=scalingfactor*uv_1
inverse_cam_mtx = np.linalg.inv(cam_mtx)
xyz_c=inverse_cam_mtx.dot(suv_1)
xyz_c=xyz_c-tvec1
R_mtx = Rt[:,[0,1,2]] #Extract first 3 columns (Rotation Matrix) from Extrinsics Matrix
inverse_R_mtx = np.linalg.inv(R_mtx)
XYZ=inverse_R_mtx.dot(xyz_c)
return XYZ
if __name__ == "__main__":
while(1):
try:
#Start reading camera feed (https://answers.opencv.org/question/227535/solvedassertion-error-in-video-capturing/))
cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
#Now Place the base_plate_tool on the surface below the camera.
while(1):
_,frame = cap.read()
#frame = undistortImage(frame)
# cv2.imshow("Live" , frame)
k = cv2.waitKey(5)
if k == 27: #exit by pressing Esc key
cv2.destroyAllWindows()
sys.exit()
if k == 13: #Save the centroid and angle values of the rectangle in a file
result_file = r'Camera Calibration Algorithms/2. camera_calibration_tool-master/robot_position.yaml'
try:
os.remove(result_file) #Delete old file first
except:
pass
print("Saving Robot Position Matrices .. in ",result_file)
cx = (cx * one_pixel_length)/10.0 #pixel to cm conversion
cy = (cy * one_pixel_length)/10.0
data={"robot_position": [cx,cy,angle,number_of_cm_in_Resolution_width]}
with open(result_file, "w") as f:
yaml.dump(data, f, default_flow_style=False)
red = np.matrix(frame[:,:,2]) #extracting red layer (layer No 2) from RGB
green = np.matrix(frame[:,:,1]) #extracting green layer (layer No 1) from RGB
blue = np.matrix(frame[:,:,0]) #extracting blue layer (layer No 0) from RGB
#it will display only the Blue colored objects bright with black background
blue_only = np.int16(blue)-np.int16(red)-np.int16(green)
blue_only[blue_only<0] =0
blue_only[blue_only>255] =255
blue_only = np.uint8(blue_only)
# cv2.namedWindow('blue_only', cv2.WINDOW_AUTOSIZE)
# cv2.imshow("blue_only",blue_only)
# cv2.waitKey(1)
#https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_imgproc/py_thresholding/py_thresholding.html#otsus-binarization
#Gaussian filtering
blur = cv2.GaussianBlur(blue_only,(5,5),cv2.BORDER_DEFAULT)
#Otsu's thresholding
ret3,thresh = cv2.threshold(blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
cv2.namedWindow('Threshold', cv2.WINDOW_AUTOSIZE)
cv2.imshow("Threshold",thresh)
cv2.waitKey(1)
contours,hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
for contour in contours:
area = cv2.contourArea(contour)
if area>100000:
contours.remove(contour)
cnt = contours[0] #Conture of our rectangle
##############################################################
#https://stackoverflow.com/a/34285205/3661547
#fit bounding rectangle around contour
rotatedRect = cv2.minAreaRect(cnt)
#getting centroid, width, height and angle of the rectangle conture
(cx, cy), (width, height), angle = rotatedRect
#centetoid of the rectangle conture
cx=int(cx)
cy=int(cy)
# print (cx,cy) #centroid of conture of rectangle
#Location of Rectangle from origin of image frame in millimeters
x,y,z = calculate_XYZ(cx,cy)
#but we choose the Shorter edge of the rotated rect to compute the angle between Vertical
#https://stackoverflow.com/a/21427814/3661547
if(width > height):
angle = angle+180
else:
angle = angle+90
# print("Angle b/w shorter side with Image Vertical: \n", angle)
#cm-per-pixel calculation
if(width != 0.0):
one_pixel_length = rectangle_width_in_mm/width #length of one pixel in mm (rectangle_width_in_mm/rectangle_width_in_pixels)
number_of_cm_in_Resolution_width = (one_pixel_length*640)/10 #in cm
print(number_of_cm_in_Resolution_width)
##############################################################
#Draw rectangle around the detected object
#https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_imgproc/py_contours/py_contours_begin/py_contours_begin.html#how-to-draw-the-contours
im = cv2.drawContours(frame,[cnt],0,(0,0,255),2)
# cv2.namedWindow('Contours', cv2.WINDOW_AUTOSIZE)
# cv2.imshow("Contours",im)
# cv2.waitKey(1)
cv2.circle(im, (cx,cy), 2,(200, 255, 0),2) #draw center
cv2.putText(im, str("Angle: "+str(int(angle))), (int(cx)-40, int(cy)+60), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,255), 1, cv2.LINE_AA)
cv2.putText(im, str("Center: "+str(cx)+","+str(cy)), (int(cx)-40, int(cy)-50), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,255), 1, cv2.LINE_AA)
cv2.namedWindow('Detected Rect', cv2.WINDOW_AUTOSIZE)
cv2.imshow('Detected Rect',im)
cv2.waitKey(1)
except Exception as e:
print("Error in Main Loop\n",e)
cv2.destroyAllWindows()
sys.exit()
cv2.destroyAllWindows()
| [
"noreply@github.com"
] | noreply@github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.