blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
59ec78c309a9c5a91c649d9873d4c4fb2c8c5b8a | 293f2222d6a72c0e29ad3ff705df32a0fc707ae4 | /main/migrations/0010_auto_20181213_1855.py | a56b31da6757e83d55cf1c9e2c3407ea138bf52b | [] | no_license | leocordero/historicoCasos | 60c6bc3a9cb253a1e65fe598a67280c13d253e4c | 838f3a7dc705f2f786db5b84f93c25bf2ebd2ae3 | refs/heads/main | 2023-03-22T11:30:00.914585 | 2021-03-20T00:16:39 | 2021-03-20T00:16:39 | 349,544,208 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 496 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-12-13 18:55
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0009_limpiezas'),
]
operations = [
migrations.AlterField(
model_name='limpiezas',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
| [
"lchaconz@cisco.com"
] | lchaconz@cisco.com |
e7820778affd4261ddcccf87fbbcd2902e7d72cb | 80d40fe4a1f4a32e0b96cccf2f6850288265404c | /Code/labelStock.py | ca67297b6e9a5668fd0b1ec62417ff87b9422b1b | [] | no_license | wanlinxie/EMOD_finalproject | 9e23ce39b0c766f166ab7d616a860a0c5beaf2cc | 5f07a634dd3a676fb788b592d6b56ba695d60d4c | refs/heads/master | 2020-05-07T15:09:27.596791 | 2019-04-30T00:16:16 | 2019-04-30T00:16:16 | 180,625,049 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | import pandas as pd
import os
dir = 'StockData'
company = ['BA','FB','NFLX','TSLA']
for c in company:
file_path = os.path.join(dir, c + "_pred.csv")
df = pd.read_csv(file_path)
labels = []
for _,row in df.iterrows():
if row['Close'] >= row['Open']:
labels.append(1)
else:
labels.append(0)
df['Label'] = labels
df.to_csv(file_path)
| [
"gw2372@columbia.edu"
] | gw2372@columbia.edu |
0d3e0c450592368e94d4ab28c83d638508c5a9b4 | cb5f2d4943b65d53a7c36080a48057f79dcb4c4a | /core/models.py | a03207ed85358d0ac0e22aa556231499380fa721 | [] | no_license | radaevalex/Portal | 54be04808f52fdda0c7a12919f3d4d152d10d5db | 7134336289e0bf425124c20bcd5c85e33f938591 | refs/heads/master | 2022-05-01T19:23:46.800454 | 2019-10-31T17:46:12 | 2019-10-31T17:46:12 | 218,718,710 | 0 | 0 | null | 2022-04-22T22:34:38 | 2019-10-31T08:28:50 | JavaScript | UTF-8 | Python | false | false | 2,052 | py | from django.db import models
from django.urls import reverse
from django.utils.text import slugify
import unidecode
# Create your models here.
class Indicator(models.Model):
group = models.CharField(max_length=200, verbose_name='Группа')
name = models.CharField(max_length=200, verbose_name='Показатель')
class Meta:
ordering = ('group',)
verbose_name = 'Показатель'
verbose_name_plural = 'Показатели'
def __str__(self):
return '{} {}'.format(self.group, self.name)
class Office(models.Model):
id = models.SmallIntegerField(primary_key=True, verbose_name='Офис')
department = models.CharField(max_length=200, verbose_name='ТО')
city = models.CharField(max_length=200, verbose_name='Город')
slug = models.SlugField(max_length=200, verbose_name='URL', blank=True)
class Meta:
ordering = ('id',)
verbose_name = 'Офис'
verbose_name_plural = 'Офисы'
def save(self, *args, **kwargs):
url = unidecode.unidecode(self.department)
self.slug = slugify(url)
return super(Office, self).save(*args, *kwargs)
def __str__(self):
return '{} {} {}'.format(self.department, self.city, self.id)
class Dynamic(models.Model):
indicator = models.ForeignKey(Indicator,
on_delete=models.CASCADE,
related_name='dynamic',
verbose_name='Показатель')
office = models.ForeignKey(Office,
on_delete=models.CASCADE,
related_name='dynamic',
verbose_name='Офис')
month = models.DateField(verbose_name='Месяц')
value = models.BigIntegerField(verbose_name='Значение')
class Meta:
ordering = ('month',)
verbose_name = 'Динамика показателей'
verbose_name_plural = 'Динамика показателей'
| [
"57213368+radaevalex@users.noreply.github.com"
] | 57213368+radaevalex@users.noreply.github.com |
cfbb540e6dfba1237f2ee80097afe65bc324da40 | 177df2b442866474377498a8b85f3d58410d0193 | /create_glidein_tarball.py | 45b070a821e819c0b6f139301c0d4fe04e8cab66 | [] | no_license | briedel/pyglidein | 6c19f2d310bd15a85df50eb384e8d2f186aaff50 | 835c458e4f7f0dc0dcf785120da31ffa9425f0bd | refs/heads/master | 2020-12-11T03:35:27.540075 | 2017-03-24T14:28:47 | 2017-03-24T14:28:47 | 49,531,789 | 0 | 0 | null | 2016-03-18T17:26:32 | 2016-01-12T22:02:49 | Python | UTF-8 | Python | false | false | 7,707 | py | """
Create a glidein tarball by downloading the source, building it, then
copying what is needed into the tarball.
"""
import sys
import os
import shutil
import subprocess
import tarfile
import tempfile
if sys.version_info[0] < 3 and sys.version_info[1] < 7:
raise Exception('requires python 2.7+')
def libuuid_download(version='1.0.3'):
url = 'http://downloads.sourceforge.net/project/libuuid/libuuid-'+version+'.tar.gz'
subprocess.check_call(['wget', url])
subprocess.check_call(['tar', '-zxf', 'libuuid-'+version+'.tar.gz'])
return 'libuuid-'+version
def libuuid_build():
"""Build uuid statically"""
dirname = libuuid_download()
initial_dir = os.getcwd()
os.chdir(dirname)
try:
if os.path.exists('release_dir'):
shutil.rmtree('release_dir')
os.mkdir('release_dir')
options = ['--enable-static',
'--disable-shared',
'--prefix',os.path.join(os.getcwd(),'release_dir'),
]
subprocess.check_call(['./configure']+options)
subprocess.check_call(['make'])
subprocess.check_call(['make','install'])
return os.path.join(initial_dir,dirname,'release_dir')
finally:
os.chdir(initial_dir)
def cvmfs_download():
url = 'https://github.com/cvmfs/cvmfs/archive/libcvmfs-stable.tar.gz'
subprocess.check_call(['wget', url])
subprocess.check_call(['tar', '-zxf', 'libcvmfs-stable.tar.gz'])
return 'cvmfs-libcvmfs-stable'
def cvmfs_build():
libuuid = libuuid_build()
dirname = cvmfs_download()
initial_dir = os.getcwd()
os.chdir(dirname)
try:
if os.path.exists('release_dir'):
shutil.rmtree('release_dir')
os.mkdir('release_dir')
options = ['-Wno-dev',
'-DINSTALL_MOUNT_SCRIPTS=OFF',
'-DBUILD_SERVER=OFF',
'-DBUILD_CVMFS=OFF',
'-DBUILD_LIBCVMFS=ON',
'-DINSTALL_BASH_COMPLETION=OFF',
'-DUUID_LIBRARY:FILE='+os.path.join(libuuid,'lib','libuuid.a'),
'-DUUID_INCLUDE_DIR:PATH='+os.path.join(libuuid,'include'),
'-DCMAKE_INSTALL_PREFIX='+os.path.join(os.getcwd(),'release_dir'),
]
subprocess.check_call(['cmake']+options)
subprocess.check_call(['make','libpacparser'])
os.chdir('cvmfs')
subprocess.check_call(['make'])
subprocess.check_call(['make','install'])
return os.path.join(initial_dir,dirname,'release_dir')
finally:
os.chdir(initial_dir)
def parrot_download(version):
url = 'http://ccl.cse.nd.edu/software/files/cctools-'+version+'-source.tar.gz'
subprocess.check_call(['wget', url])
subprocess.check_call(['tar', '-zxf', 'cctools-'+version+'-source.tar.gz'])
return 'cctools-'+version+'-source'
def parrot_build(version='6.0.14'):
cvmfs = cvmfs_build()
dirname = parrot_download(version)
initial_dir = os.getcwd()
os.chdir(dirname)
try:
if os.path.exists('release_dir'):
shutil.rmtree('release_dir')
os.mkdir('release_dir')
options = ['--without-system-sand',
'--without-system-allpairs',
'--without-system-wavefront',
'--without-system-makeflow',
# '--without-system-ftp-lite',
# '--without-system-chirp',
'--without-system-umbrella',
'--without-system-resource_monitor',
'--without-system-doc',
'--with-cvmfs-path',cvmfs,
'--prefix',os.path.join(os.getcwd(),'release_dir'),
]
subprocess.check_call(['./configure']+options)
subprocess.check_call(['make'])
subprocess.check_call(['make','install'])
return os.path.join(initial_dir,dirname,'release_dir')
finally:
os.chdir(initial_dir)
def condor_download(version):
version = version.replace('.','_')
url = 'https://github.com/htcondor/htcondor/archive/V'+version+'.tar.gz'
subprocess.check_call(['wget', url])
subprocess.check_call(['tar', '-zxf', 'V'+version+'.tar.gz'])
return 'htcondor-'+version
def condor_build(version='8.6.1'):
dirname = condor_download(version)
initial_dir = os.getcwd()
os.chdir(dirname)
try:
if os.path.exists('release_dir'):
shutil.rmtree('release_dir')
os.mkdir('release_dir')
options = [
'-DHAVE_BACKFILL=OFF',
'-DHAVE_BOINC=OFF',
'-DHAVE_HIBERNATION=OFF',
'-DHAVE_KBDD=OFF',
'-DWANT_GLEXEC=OFF',
'-DWANT_FULL_DEPLOYMENT=OFF',
'-DWITH_BOINC=OFF',
'-DWITH_BOSCO=OFF',
'-DWITH_CAMPUSFACTORY=OFF',
'-DWITH_BLAHP=OFF',
'-DWITH_CURL=OFF',
'-DWITH_COREDUMPER=OFF',
'-DWITH_CREAM=OFF',
'-DWITH_GANGLIA=OFF',
'-DWITH_GLOBUS=OFF',
'-DWITH_GSOAP=OFF',
'-DWITH_LIBDELTACLOUD=OFF',
'-DWITH_LIBVIRT=OFF',
'-DWITH_PYTHON_BINDINGS=OFF',
'-DWITH_UNICOREGAHP=OFF',
'-DWITH_VOMS=OFF',
]
if version > '8.5.2':
options.append('-DWITH_KRB5=OFF')
subprocess.check_call(['cmake','-DCMAKE_INSTALL_PREFIX:PATH='+os.getcwd()+'/release_dir']
+options+['.'])
subprocess.check_call(['make'])
subprocess.check_call(['make','install'])
return os.path.join(initial_dir,dirname,'release_dir')
finally:
os.chdir(initial_dir)
def main():
from optparse import OptionParser
parser = OptionParser()
parser.add_option('--template-dir',dest='template',default='glidein_template',
help='Location of template directory')
parser.add_option('--htcondor-version',dest='condor',default=None,
help='HTCondor version to use')
parser.add_option('--parrot-version',dest='parrot',default=None,
help='Parrot (cctools) version to use')
parser.add_option('-o','--output',dest='output',default='glidein.tar.gz',
help='output tarball name')
(options, args) = parser.parse_args()
if not options.template:
raise Exception('need a template directory')
options.template = os.path.abspath(options.template)
curdir = os.getcwd()
d = tempfile.mkdtemp(dir=os.getcwd())
tarfile_name = os.path.abspath(os.path.expandvars(os.path.expanduser(options.output)))
try:
os.chdir(d)
parrot_opts = {}
if options.parrot:
parrot_opts['version'] = options.parrot
parrot_path = parrot_build(**parrot_opts)
condor_opts = {}
if options.condor:
condor_opts['version'] = options.condor
condor_path = condor_build(**condor_opts)
with tarfile.open(tarfile_name,'w:gz') as tar:
for f in os.listdir(options.template):
tar.add(os.path.join(options.template,f),arcname=f)
tar.add('.',arcname='glideinExec',recursive=False)
for f in os.listdir(condor_path):
tar.add(os.path.join(condor_path,f),arcname=os.path.join('glideinExec',f))
tar.add(os.path.join(parrot_path,'bin','parrot_run'),arcname=os.path.join('GLIDEIN_PARROT','parrot_run'))
tar.add(os.path.join(parrot_path,'lib','libparrot_helper.so'),arcname=os.path.join('GLIDEIN_PARROT','libparrot_helper.so'))
finally:
os.chdir(curdir)
shutil.rmtree(d)
if __name__ == '__main__':
main()
| [
"davids24@gmail.com"
] | davids24@gmail.com |
f1fdec782a19b71a749c643458ec9d0408978d66 | 053221e1d90b365f68701dbd5b6466f30d1f6fd7 | /Day4/vd2.py | d2624b1ae91bd834e7c6b6d1c9a499d95af8c68b | [] | no_license | pytutorial/py2011E | eceb4d563cc807294b08b818edadd521ed8da488 | 306437369b0bfe55a2fa827b098283856242e731 | refs/heads/main | 2023-02-28T23:57:32.851536 | 2021-01-30T14:56:12 | 2021-01-30T14:56:12 | 318,186,117 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 364 | py | # vd2.py
# Nhập vào họ tên đầy đủ của một người
# In ra Họ, tên đệm, tên của người đó
ho_ten = input('Họ và tên:')
#TODO :
items = ho_ten.split()
ho = items[0]
ten = items[-1]
ten_dem = ''
for i in range(1, len(items)-1):
ten_dem += items[i] + ' '
print('Họ: ', ho)
print('Tên đệm:', ten_dem)
print('Tên: ', ten)
| [
"duongthanhtungvn01@gmail.com"
] | duongthanhtungvn01@gmail.com |
6859b7420def17cbc91c49bd229e6028b100e87d | bf3a87fd7725ad4e7e85492509f3e5aa68709fd0 | /chat/.history/Cliente_20191106204840.py | 8b51d56c2ef6c7a8b2f56ce7b17b3a47b7f38cdd | [] | no_license | slalbertojesus/merixo-grpc | f468b4f6349b4367ad6064f175cef7c3e49d829f | 182569a89cad605fd81b095861fd58390729c720 | refs/heads/master | 2020-09-04T21:39:53.488701 | 2019-12-25T02:07:24 | 2019-12-25T02:07:24 | 219,899,136 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 825 | py | import grpc
import uuid
import chat_pb2 as structure
import chat_pb2_grpc as grpc_chat
from Usuario import Usuario
class Cliente():
def IniciarCliente(self):
id = uuid.uuid1()
print(id)
channel = grpc.insecure_channel('localhost:50051')
conn = grpc_chat.ChatAdminStub(channel)
structure.Usuario.id = id.hex
structure.Usuario.usuario = "Choco"
structure.Usuario.activo = True
request = structure.Usuario
#structure._USUARIO.id = id.hex
#structure._USUARIO.usuario = "Choco"
#structure._USUARIO.activo = True
#request = structure._USUARIO
confirmacion = conn.Subscribirse(request)
print(confirmacion)
if __name__ == '__main__':
cliente = Cliente()
cliente.IniciarCliente() | [
"slalbertojesus@gmail.com"
] | slalbertojesus@gmail.com |
2399e7181e58755175157f3bd3bdcb7ce643b9a5 | 1dcac2d5ee9aa09ce6294ef7ad395235c55327f4 | /QR code.py | ccbe1934f51ec968a6c1c530a44dd2997a979b1e | [] | no_license | iris116/Python-simpe-code | b73a5e7f498c08182623f7b7005fdaf520b126c2 | 5f9fdada02c7af77e401909dfc593fb9a816fd9e | refs/heads/master | 2021-05-27T13:19:49.207019 | 2020-04-09T05:02:25 | 2020-04-09T05:02:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,315 | py | # encoding=utf8
import pandas as pd
import qrcode
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
url = r"D:\E Disk\Python\QR\20180126\QR new 20180126.csv" #CHANGE
tips= pd.read_csv(url,encoding="utf-8")
pd.set_option('max_colwidth',10000)
df1=tips
def make_qr(url,pos,name):
qr = qrcode.QRCode(
version=4, # 生成二维码尺寸的大小 1-40 1:21*21(21+(n-1)*4)
error_correction=qrcode.constants.ERROR_CORRECT_M, # L:7% M:15% Q:25% H:30%
box_size=10, # 每个格子的像素大小
border=4, # 边框的格子宽度大小
)
qr.add_data(url)
qr.make(fit=True)
img = qr.make_image()
#img = qrcode.make(url)
text=str(pos)+"_"+str(name)
a="D:/E Disk/Python/QR/20180126/" #CHANGE
b= '.png'
fl= a+text +b
img.save(fl, quality=100)
# 设置所使用的字体
font = ImageFont.truetype("C:\Windows\Fonts\Arial.ttf", 24)
# 打开图片
imageFile = fl
im1 = Image.open(imageFile)
# 画图
draw = ImageDraw.Draw(im1)
draw.text((180, 15), str(pos), font=font) # 设置文字位置/内容/颜色/字体
draw = ImageDraw.Draw(im1)
im1.save(fl, quality=100)
df1.apply(lambda x: make_qr(x['TEXT_URL'],x['CODE_SALESROOM'],x['NAME_SALESROOM']),axis=1)
| [
"Iris.LiuTJ@homecredit.cn"
] | Iris.LiuTJ@homecredit.cn |
b01b5262f42041385ca481940fe74cb546fa6d98 | 89a1673e2fd4c6ae1a2715abea491d55b08cf881 | /train.py | 7052f687eb32a9b0e46719651130e79fbcdd821e | [] | no_license | Fuyubai/Transfomer-with-TensorFlow2 | 01389da5be16000826c46fa092653e584a2ff067 | f5ad91b2f3af919e7ea2aedf2860ab1365516064 | refs/heads/master | 2022-11-13T18:35:56.938147 | 2020-07-04T09:26:30 | 2020-07-04T09:26:30 | 277,065,660 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,576 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Jun 27 15:09:25 2020
@author: Morning
"""
import tensorflow as tf
import tensorflow_datasets as tfds
from model import Transformer, optimizer_adam, loss_function
from components import create_mask
examples, metadata = tfds.load('ted_hrlr_translate/pt_to_en', with_info=True,
as_supervised=True)
train_examples, val_examples = examples['train'], examples['validation']
# tokenizer_en = tfds.features.text.SubwordTextEncoder.build_from_corpus(
# (en.numpy() for pt, en in train_examples), target_vocab_size=2**13)
# tokenizer_pt = tfds.features.text.SubwordTextEncoder.build_from_corpus(
# (pt.numpy() for pt, en in train_examples), target_vocab_size=2**13)
import pickle
with open('tokenizer_en.pickle', 'rb') as handle:
tokenizer_en = pickle.load(handle)
with open('tokenizer_en.pickle', 'rb') as handle:
tokenizer_pt = pickle.load(handle)
BUFFER_SIZE = 20000
BATCH_SIZE = 64
MAX_LENGTH = 40
def encode(lang1, lang2):
lang1 = [tokenizer_pt.vocab_size] + tokenizer_pt.encode(
lang1.numpy()) + [tokenizer_pt.vocab_size+1]
lang2 = [tokenizer_en.vocab_size] + tokenizer_en.encode(
lang2.numpy()) + [tokenizer_en.vocab_size+1]
return lang1, lang2
def filter_max_length(x, y, max_length=MAX_LENGTH):
return tf.logical_and(tf.size(x) <= max_length,
tf.size(y) <= max_length)
def tf_encode(pt, en):
result_pt, result_en = tf.py_function(encode, [pt, en], [tf.int64, tf.int64])
result_pt.set_shape([None])
result_en.set_shape([None])
return result_pt, result_en
train_dataset = train_examples.map(tf_encode)
train_dataset = train_dataset.filter(filter_max_length)
train_dataset = train_dataset.cache()
train_dataset = train_dataset.shuffle(BUFFER_SIZE).padded_batch(BATCH_SIZE, padded_shapes=([None],[None]))
train_dataset = train_dataset.prefetch(tf.data.experimental.AUTOTUNE)
val_dataset = val_examples.map(tf_encode)
val_dataset = val_dataset.filter(filter_max_length).padded_batch(BATCH_SIZE, padded_shapes=([None],[None]))
if __name__ == '__main__':
num_layers = 4
d_model = 128
num_heads = 8
dff = 512
input_vocab_size = tokenizer_pt.vocab_size + 2
target_vocab_size = tokenizer_en.vocab_size + 2
dropout_rate = 0.1
# load model
tf.keras.backend.clear_session()
transfomer = Transformer(num_layers, d_model, num_heads, dff,
input_vocab_size, target_vocab_size,
pe_input=input_vocab_size,
pe_target=target_vocab_size,
rate=dropout_rate)
# optimizer
optimizer = optimizer_adam(d_model)
# loss and accuracy
train_loss = tf.keras.metrics.Mean(name='train_loss')
train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy')
# step
train_step_signature = [tf.TensorSpec(shape=(None, None), dtype=tf.int64),
tf.TensorSpec(shape=(None, None), dtype=tf.int64)]
@tf.function(input_signature=train_step_signature)
def train_step(inp, tar):
tar_real = tar[:, 1:]
tar_inp = tar[:, :-1]
enc_padding_mask, combined_mask, dec_padding_mask = create_mask(inp, tar_inp)
with tf.GradientTape() as tape:
predictions, _ = transfomer(inp, tar_inp, True,
enc_padding_mask,
combined_mask,
dec_padding_mask)
loss = loss_function(tar_real, predictions, n_classes=target_vocab_size, rate=0.1)
gradients = tape.gradient(loss, transfomer.trainable_variables)
optimizer.apply_gradients(zip(gradients, transfomer.trainable_variables))
train_loss(loss)
train_accuracy(tar_real, predictions)
# train
Epochs = 3
for epoch in range(Epochs):
train_loss.reset_states()
train_accuracy.reset_states()
for (batch, (inp, tar)) in enumerate(train_dataset):
train_step(inp, tar)
if batch % 10 == 0:
print ('Epoch {} Batch {} Loss {:.4f} Accuracy {:.4f}'.format(
epoch + 1, batch, train_loss.result(), train_accuracy.result()))
transfomer.save_weights('./weights/v1')
| [
"noreply@github.com"
] | Fuyubai.noreply@github.com |
119a7f33d7c7097b708ebcb520fe969f0c87c133 | 6a5aea472c19453f6115f298d3386cc37b8059e0 | /07 고급정렬_핵심/7_2 Search Kth Number with Sort.py | 166c2b81cd2fd499afe2467c9787f9064828bb54 | [] | no_license | ohnena/algorithm_test | 3d7af16bcbf764bd2af7abc926e3e91e9b7c6ad6 | 308b502a57ac4b5efee9ef45a40d0d944499fca8 | refs/heads/master | 2022-11-08T10:24:24.712711 | 2020-06-29T14:21:12 | 2020-06-29T14:21:12 | 268,235,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,567 | py | # coding=utf-8
#
#
# 7.2 K번째 수
# medium, 정렬, 25분
# //7.1과 같이 많이보는 유형(빠르게 풀 수 있어야!)
# //데이터의 개수는 5백만개이고, 데이터의 범위또한 -10^9 ~ 10^9로 크다. 그래서 계수기반정렬(꼼수)를 쓸수도 없다. 그러므로 무조건 O(NlogN) 정렬알고리즘을 사용해야.
#
# 핵심아이디어
# -최대5백만개 데이터개수. 그러므로 O(NlogN)알고리즘을 사용해야...
# -(연산수를 러프하게 잡아도...5백만*로그2(백만)=1억. 다행히 시간제한이 2초. 1초에 5천만번이라는 마지노선에 부합해서 다행인것...
# -시간에 강한 PyPy3를 선택해서 제출해야.)
#
#
# 계획
# -7.1에서 구현한 merge sort를 가져와서 테스트 해본다.
def merge_sort(array):
if len(array) <= 1:
return array
mid = len(array) // 2
left = merge_sort(array[:mid])
right = merge_sort(array[mid:])
result = []
i, j = 0, 0
while i < len(left) and j < len(right):
if left[i] <= right[j]:
result.append(left[i])
i += 1
else:
result.append(right[j])
j += 1
while i < len(left):
result.append(left[i])
i += 1
while j < len(right):
result.append(right[j])
j += 1
return result
N, K = map(int, input().split(' '))
array = list(map(int, input().split(' ')))
# array = merge_sort(array)
array = sorted(array) # 실제 시험장에선 이 코드를 사용하는게 낫겠지!...
print(array[K-1])
| [
"ohnenaohbaby@gmail.com"
] | ohnenaohbaby@gmail.com |
0571bf416874d62469823d66ae8f9fc52e0a28a8 | 1a09936a84d1d01cf105aad64ed97d78e23d0450 | /application/migrations/0004_auto_20201105_1108.py | 3b787af67355b66a06480b4208ce6051ec522dc1 | [] | no_license | vikram2208/dashboard | ce64927e6d259d0560ea13f66f35c134f9041be6 | 871b264d1831774bab65294577d87380a94977a9 | refs/heads/master | 2023-01-08T23:07:26.929218 | 2020-11-06T14:22:21 | 2020-11-06T14:22:21 | 310,594,744 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | # Generated by Django 3.1.3 on 2020-11-05 11:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('application', '0003_auto_20201104_1545'),
]
operations = [
migrations.AlterField(
model_name='user',
name='password',
field=models.CharField(max_length=200),
),
]
| [
"vickyvpjtqwv@gmail.com"
] | vickyvpjtqwv@gmail.com |
e9689e0654946bbe442befd797dac771d63f7c28 | 4e30d990963870478ed248567e432795f519e1cc | /tests/models/validators/v3_1_1/jsd_a4d5b5da6a50bfaaecc180543fd952.py | 24d54b381f25a25dd8d542827e9372430e636dad | [
"MIT"
] | permissive | CiscoISE/ciscoisesdk | 84074a57bf1042a735e3fc6eb7876555150d2b51 | f468c54998ec1ad85435ea28988922f0573bfee8 | refs/heads/main | 2023-09-04T23:56:32.232035 | 2023-08-25T17:31:49 | 2023-08-25T17:31:49 | 365,359,531 | 48 | 9 | MIT | 2023-08-25T17:31:51 | 2021-05-07T21:43:52 | Python | UTF-8 | Python | false | false | 7,787 | py | # -*- coding: utf-8 -*-
"""Identity Services Engine createDeviceAdminTimeCondition data model.
Copyright (c) 2021 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import json
from builtins import *
import fastjsonschema
from ciscoisesdk.exceptions import MalformedRequest
class JSONSchemaValidatorA4D5B5Da6A50BfAaecC180543Fd952(object):
"""createDeviceAdminTimeCondition request schema definition."""
def __init__(self):
super(JSONSchemaValidatorA4D5B5Da6A50BfAaecC180543Fd952, self).__init__()
self._validator = fastjsonschema.compile(json.loads(
'''{
"$schema": "http://json-schema.org/draft-04/schema#",
"properties": {
"response": {
"allOf": [
{
"properties": {
"conditionType": {
"enum": [
"ConditionAndBlock",
"ConditionAttributes",
"ConditionOrBlock",
"ConditionReference",
"LibraryConditionAndBlock",
"LibraryConditionAttributes",
"LibraryConditionOrBlock",
"TimeAndDateCondition"
],
"type": "string"
},
"isNegate": {
"type": "boolean"
},
"link": {
"properties": {
"href": {
"type": "string"
},
"rel": {
"enum": [
"next",
"previous",
"self",
"status"
],
"type": "string"
},
"type": {
"type": "string"
}
},
"required": [
"href"
],
"type": "object"
}
},
"required": [
"conditionType"
],
"type": "object"
},
{
"properties": {
"conditionType": {
"enum": [
"ConditionAndBlock",
"ConditionAttributes",
"ConditionOrBlock",
"ConditionReference",
"LibraryConditionAndBlock",
"LibraryConditionAttributes",
"LibraryConditionOrBlock",
"TimeAndDateCondition"
],
"type": "string"
},
"datesRange": {
"properties": {
"endDate": {
"type": "string"
},
"startDate": {
"type": "string"
}
},
"required": [
"endDate",
"startDate"
],
"type": "object"
},
"datesRangeException": {
"properties": {
"endDate": {
"type": "string"
},
"startDate": {
"type": "string"
}
},
"required": [
"endDate",
"startDate"
],
"type": "object"
},
"description":
{
"type": "string"
},
"hoursRange": {
"properties": {
"endTime": {
"type": "string"
},
"startTime": {
"type": "string"
}
},
"required": [
"endTime",
"startTime"
],
"type": "object"
},
"hoursRangeException": {
"properties": {
"endTime": {
"type": "string"
},
"startTime": {
"type": "string"
}
},
"required": [
"endTime",
"startTime"
],
"type": "object"
},
"id": {
"type": "string"
},
"isNegate": {
"type": "boolean"
},
"link": {
"properties": {
"href": {
"type": "string"
},
"rel": {
"enum": [
"next",
"previous",
"self",
"status"
],
"type": "string"
},
"type": {
"type": "string"
}
},
"required": [
"href"
],
"type": "object"
},
"name": {
"type": "string"
},
"weekDays": {
"items": {
"enum": [
"Friday",
"Monday",
"Saturday",
"Sunday",
"Thursday",
"Tuesday",
"Wednesday"
],
"type": "string"
},
"type": "array"
},
"weekDaysException": {
"items": {
"enum": [
"Friday",
"Monday",
"Saturday",
"Sunday",
"Thursday",
"Tuesday",
"Wednesday"
],
"type": "string"
},
"type": "array"
}
},
"required": [
"conditionType",
"name"
],
"type": "object"
}
]
},
"version": {
"type": "string"
}
},
"required": [
"response",
"version"
],
"type": "object"
}'''.replace("\n" + ' ' * 16, '')
))
def validate(self, request):
try:
self._validator(request)
except fastjsonschema.exceptions.JsonSchemaException as e:
raise MalformedRequest(
'{} is invalid. Reason: {}'.format(request, e.message)
)
| [
"wastorga@altus.co.cr"
] | wastorga@altus.co.cr |
1a5bc2e46222bc414ee28a87314ee76904b08399 | 9e8ec77e422fe84704988e5dd7ddee27eff0dac8 | /bookmarks/forms.py | 46cfe8cb4cf5aa4ed859874bdb4c4ac3fd1f778a | [] | no_license | cliffkang/djorg-cs7 | 268c3992d221d37298c71f968a8b7e9156fe671b | fab176e646e3450ea78c654102f6eb35592ef4ab | refs/heads/master | 2020-03-18T07:28:16.641237 | 2018-06-13T05:53:59 | 2018-06-13T05:53:59 | 134,454,635 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 175 | py | from django import forms
from .models import Bookmark
class BookmarkForm(forms.ModelForm):
class Meta:
model = Bookmark
fields = ('url', 'name', 'notes') | [
"inlovewithhim@gmail.com"
] | inlovewithhim@gmail.com |
beb997c3172ba54a79ee5d13646eaee6bedb89df | a9228e444f4f7f71aa0fcf7cb752fcff46622c23 | /status-old.py | b116d4f4e769d7bd46366af3ad6ab70bc90a92af | [] | no_license | dje4321/LinuxStatus | 42860bbffc1fd2ae69ab4c21ce401452cb35478c | 17db0d7edd3d67a946473f3759bbc78a6c2abd31 | refs/heads/master | 2020-03-09T22:53:03.276629 | 2018-09-11T05:07:13 | 2018-09-11T05:07:13 | 129,045,626 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,280 | py | #!/usr/bin/python3
import subprocess, os, sys
argv = sys.argv
def displayError(argv,formattedOutput,errors=0,skipError=False):
try:
if errors >= 1 or skipError == True: # Checks if any errors were even found before attempting to display them
print(formattedOutput)
if checkArgv(argv,["--nogui"]) == False: # Check if a GUI based prompt is supposed to be run
os.system("zenity --error --ellipsize --text='{}' 2>/dev/null".format(formattedOutput)) # Display GUI prompt
except Exception as e:
print("displayError()\n{}".format(e))
sys.exit()
def findArgv(argv,condidtion): # Find the position of something in argv and returns the position
try:
for x in range(0,len(argv)):
for i in range(0,len(condidtion)):
if argv[x] == condidtion[i]:
return x
except Exception as e:
print("findArgv()\n{}".format(e))
sys.exit()
def checkArgv(argv,condidition): # Will check to see if a argument has been passed
try:
for x in range(0,len(argv)):
for i in range(0,len(condidition)):
if argv[x] == condidition[i]:
return True
return False
except Exception as e:
print("checkArgv()\n{}".format(e))
sys.exit()
def applyBlacklist(command, blacklist): # Apply a grep based blacklist to a command
try:
if len(blacklist) == 0: # Checks to see if the blacklist even needs to be applied
return command
command += " | grep -v"
for x in range(0,len(blacklist)):
command += " -e {}".format(blacklist[x])
return command
except Exception as e:
print("applyBlacklist()\n{}".format(e))
sys.exit()
def checkSystemd(argv): # Checks for any systemd errors and reports them
try:
formattedOutput = 'The following issues were found with systemd\n'
systemctl,errors = [],0
blacklist = []
if checkArgv(argv,["--sysBlacklist","-sb"]) == True: # Check to see if a blacklist needs to be applied
for x in range(0,len(argv[findArgv(argv,["--sysBlacklist","-sb"]) + 1].split(","))):
blacklist.append(argv[findArgv(argv,["--sysBlacklist","-sb"]) + 1].split(",")[x])
systemctlOutput = subprocess.getoutput(applyBlacklist("systemctl",blacklist)).split('\n') # Runs systemctl with the optional blacklist and stores the output as a newline separted list
for x in range(0,len(systemctlOutput)): # Interate over the lists in systemctl
if systemctlOutput[x].count("failed") >= 1: #Check if anything has failed
systemctl.append(systemctlOutput[x])
for x in range(0,len(systemctl)):
formattedOutput += systemctl[x].split(' ')[1] + ' ' + 'has failed' + '\n'
errors += 1
displayError(argv,formattedOutput,errors)
except Exception as e:
print("checkSystemd()\n{}".format(e))
sys.exit()
def diskUsage(argv): # Checks if any mounted devices have exceeded a threshold
try:
formattedOutput = 'Low diskspace detected\n'
diskSpace,errors = [],0
blacklist = []
threshold = 85 # Default threshold level
if checkArgv(argv,["--diskBlacklist","-db"]) == True: # Check if blacklist argument has been specified
for x in range(0,len(argv[findArgv(argv,["--diskBlacklist","-db"]) + 1].split(","))):
blacklist.append(argv[findArgv(argv,["--diskBlacklist","-db"]) + 1].split(",")[x])
if checkArgv(argv,["--diskThreshold","-dt"]) == True: # Check if we need to adjust the threshold
threshold = int(argv[findArgv(argv,["--diskThreshold","-dt"]) + 1])
dfOutput = subprocess.getoutput(applyBlacklist("df -h",blacklist)).split('\n') # Get the output of df and store as a newline separated list
for x in range(1,len(dfOutput)): # Iterate over each line
for i in range(0,len(dfOutput[x].split(' '))): # Interate over the line
if dfOutput[x].split(' ')[i].count("%") >= 1: # Check if we are on a usage position
if int(dfOutput[x].split(' ')[i].strip("%")) >= threshold: # see if usage exceeds threshold
diskSpace.append(dfOutput[x])
for x in range(0,len(diskSpace)): # Prettify output
for i in range(0,len(diskSpace[x].split(' '))):
if diskSpace[x].split(' ')[i].count("%") >= 1:
formattedOutput += diskSpace[x].split(' ')[i+1] + " is at " + diskSpace[x].split(' ')[i] + " disk usage" + '\n'
errors += 1
displayError(argv,formattedOutput,errors)
except Exception as e:
print("diskUsage()\n{}".format(e))
sys.exit()
def checkTrash(argv):
try:
formattedOutput = 'Trash is not empty\n'
if subprocess.getoutput("$(which dir) $HOME/.local/share/Trash/files").split(' ') != ['']:
displayError(argv,formattedOutput,skipError=True)
except Exception as e:
print("checkTrash()\n{}".format(e))
sys.exit()
def checkEntropy(argv):
try:
threshold = 100
entropy = int(subprocess.getoutput("cat /proc/sys/kernel/random/entropy_avail"))
if checkArgv(argv,["--entropyThreshold","-et"]) == True:
threshold = int(argv[findArgv(argv,["--entropyThreshold","-et"]) + 1])
formattedOutput = "Entropy is below {}\n You should not do anything cryptographicly intensive".format(threshold)
if entropy <= threshold:
displayError(argv,formattedOutput,skipError=True)
except Exception as e:
print("displayEntropy()\n{}".format(e))
sys.exit()
'''
def checkDmesg(argv):
emerg = subprocess.getoutput("dmesg -l err").split('\n')
alert = subprocess.getoutput("dmesg -l alert").split('\n')
crit = subprocess.getoutput("dmesg -l crit").split('\n')
emerg_new = emerg
for x in range(0,len(emerg)):
for i in range(0,len(emerg[x])):
if emerg[x][i] == "]":
emerg[x] += ' '
emerg = emerg[x][i + 2:-1]
break
'''
########################################################################################
# Main Program
if checkArgv(argv,["-h","--help"]) == True: # Check if we need to display the help screen
print(
"""{}
-h --help Prints this help message
--nogui Disables the GUI output and only prints to STDOUT
--diskThreshold -dt Overrides the threshold value for disk space usage
--diskBlacklist -db Blacklists certain strings from disk usage checks. Values are comma separated
--sysBlacklist -sb Blacklists certain strings from systemd checks. Values are comma separated
--enableTrash -et Enables checking Trash to see if its empty
--enableEntropy -ee Checks to see if entropy is too low
--entropyThreshold -et Threshold for total entropy""".format(argv[0]))
sys.exit()
checkSystemd(argv) # Check systemd for errors
diskUsage(argv) # Check for high disk usage
if checkArgv(argv,["--enableTrash","-et"]) == True:
checkTrash(argv)
if checkArgv(argv,["--enableEntropy","-ee"]) == True:
checkEntropy(argv)
#checkDmesg(argv)
| [
"dje4321@gmail.com"
] | dje4321@gmail.com |
98ea6b2de13684cd3eeb60da77c947f887e9470a | e9e17e371e94fb3ec403573dd791edefd37f5b5e | /test/unit/chains/test_chain_ts_wrappers.py | f466cb3cc515e24e0f0d074832f21b60214e068b | [
"BSD-3-Clause"
] | permissive | Melhafiz/FEDOT | 7145b644c4b7a4352a2fadb1fbdec039302dc71d | ac6f028d37dd3cd6890253b4d8143059687712e7 | refs/heads/master | 2023-06-14T01:31:43.389669 | 2021-06-26T20:48:18 | 2021-06-26T20:48:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,182 | py | import numpy as np
from sklearn.metrics import mean_absolute_error
from fedot.core.chains.chain_ts_wrappers import out_of_sample_ts_forecast, in_sample_ts_forecast
from fedot.core.chains.chain import Chain
from fedot.core.chains.node import PrimaryNode, SecondaryNode
from fedot.core.data.data import InputData
from fedot.core.repository.dataset_types import DataTypesEnum
from fedot.core.repository.tasks import Task, TaskTypesEnum, TsForecastingParams
def prepare_input_data(forecast_length):
ts = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 101])
# Forecast for 2 elements ahead
task = Task(TaskTypesEnum.ts_forecasting,
TsForecastingParams(forecast_length=forecast_length))
train_input = InputData(idx=np.arange(0, len(ts)),
features=ts,
target=ts,
task=task,
data_type=DataTypesEnum.ts)
start_forecast = len(ts)
end_forecast = start_forecast + 2
predict_input = InputData(idx=np.arange(start_forecast, end_forecast),
features=ts,
target=None,
task=task,
data_type=DataTypesEnum.ts)
return train_input, predict_input
def get_simple_short_lagged_chain():
# Create simple chain for forecasting
node_lagged = PrimaryNode('lagged')
# Use 4 elements in time series as predictors
node_lagged.custom_params = {'window_size': 4}
node_final = SecondaryNode('linear', nodes_from=[node_lagged])
chain = Chain(node_final)
return chain
def test_out_of_sample_ts_forecast_correct():
simple_length = 2
multi_length = 10
train_input, predict_input = prepare_input_data(simple_length)
chain = get_simple_short_lagged_chain()
chain.fit(train_input)
# Make simple prediction
simple_predict = chain.predict(predict_input)
simple_predicted = np.ravel(np.array(simple_predict.predict))
# Make multi-step forecast for 10 elements (2 * 5 steps)
multi_predicted = out_of_sample_ts_forecast(chain=chain,
input_data=predict_input,
horizon=multi_length)
assert len(simple_predicted) == simple_length
assert len(multi_predicted) == multi_length
def test_in_sample_ts_forecast_correct():
simple_length = 2
multi_length = 10
train_input, predict_input = prepare_input_data(simple_length)
chain = get_simple_short_lagged_chain()
chain.fit(train_input)
multi_predicted = in_sample_ts_forecast(chain=chain,
input_data=predict_input,
horizon=multi_length)
# Take validation part of time series
time_series = np.array(train_input.features)
validation_part = time_series[-multi_length:]
metric = mean_absolute_error(validation_part, multi_predicted)
is_forecast_correct = True
assert is_forecast_correct
| [
"noreply@github.com"
] | Melhafiz.noreply@github.com |
1c61ff625dcda8f52c757ba95061d865d1187f97 | 0d68e0bda89c87acacd3bb6646e2bf311bfa401a | /main.py | b312f4981b750d45e7502635b9407c9453420abb | [] | no_license | sdail007/Twitch-Bot | bfb13b7f3027be3260df217a7b9329eead610e83 | 9f4ae55ce5cb53cf01d8287bd000c81dd77e876c | refs/heads/master | 2020-03-27T00:45:41.890913 | 2019-08-30T04:56:00 | 2019-08-30T04:56:00 | 145,653,893 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,755 | py | import os
import sys
import getopt
import json
from Twitch.AuthenticatedUser import AuthenticatedUser
from BotInterfaces.BotInstance import BotInstance
from Twitch.TwitchConnection import TwitchConnection
from Twitch.TestConnection import TestConnection
from ComponentLoader import ComponentLoader
import redis
r = redis.Redis()
bot = None
botuser = None
componentLoader = None
def Connect(channel):
connection = TwitchConnection(botuser, channel)
bot.set_connection(connection)
bot.start()
def load_components():
components = componentLoader.activate_all()
for component in components:
bot.add_component(component)
def main(argv):
global botuser, bot, componentLoader
#Get token from instance directory
tokenFile = os.path.join(os.path.dirname(__file__), "TwitchToken.json")
botuser = AuthenticatedUser(tokenFile)
bot = BotInstance()
settings_dir = os.path.join(os.path.dirname(__file__), "Settings")
componentLoader = ComponentLoader(settings_dir)
load_components()
PAUSE = True
while PAUSE:
kvp = r.blpop(['messages', 'connection', 'quit'], timeout=1)
if kvp:
print kvp
if kvp[0] == 'messages':
command = json.loads(kvp[1])
bot.send_message(command["args"]["content"])
elif kvp[0] == 'connection':
command = json.loads(kvp[1])
if command["command"] == 'connect':
Connect(command["args"]["channel"])
elif command["command"] == 'disconnect':
bot.stop()
elif kvp[0] == 'quit':
PAUSE = False
bot.stop()
return
if __name__ == "__main__":
main(sys.argv[1:])
| [
"sdail007@users.noreply.github.com"
] | sdail007@users.noreply.github.com |
214799d8203c345e1473fd09b5ae3f22c3fd1506 | 7765d50c7253af8c12917c68edb03ef1ab6961f0 | /imageTester.py | 604da477e66806a34c68f19f1286739e71224a52 | [] | no_license | ah3243/Image-tester | e22994ca4844747bdefbb637a2c3f4d14f5a2cea | 48506db4502b688066b99315142724859d706dd2 | refs/heads/master | 2021-01-22T22:16:09.125246 | 2017-05-30T01:48:51 | 2017-05-30T01:48:51 | 92,765,285 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,909 | py |
import numpy as np
import cv2
# confirm paths are for directories or files
from os import path
# find all directories
from os import listdir
import random
#########
## Get list of Dirs and images
#########
## get object directories
raw_objDirs = listdir('images/')
objDirs = []
for i in raw_objDirs:
if path.isdir('images/'+i):
objDirs.append(i)
## find all images in each objects dir
imgList = []
for x in objDirs:
objImgs = listdir('images/' + x)
row = []
for y in objImgs:
if path.isfile('images/'+ x + '/' + y):
row.append('images/'+ x + '/' + y)
imgList.append(row)
#########
## Get any saved scores
#########
saveFilePath = 'saved.txt'
# cached scores
scores = {}
if path.isfile(saveFilePath)== False or path.getsize(saveFilePath)==0:
print("no save file found generating new one")
f = open(saveFilePath, 'w')
tmpVals = ""
for i in objDirs:
tmpVals += str(i + "\n" + "0" + "\n")
tmpVals += str("NumOfTests" + "\n" + "0" + "\n")
f.write(tmpVals)
f.close()
f = open(saveFilePath, 'r')
tmp = f.read()
objScos = tmp.split('\n')
cnt =0
tmpList = []
for i in objScos:
#filter out the key
if cnt%2==False:
tmpList = []
tmpList.append(objScos[cnt])
# filter out the value
else:
tmpList.append(objScos[cnt])
scores[tmpList[0]] = tmpList[1]
cnt +=1
print("The previously saved scores: {}".format(scores))
f.close()
###########
### Vars for navigation/display
###########
dirCnt = 0 # current dir position
imgCnt = 0 # current image position
dirSize = len(objDirs) # number of objects
imgSize = [] # list with number of images in each dir
cnt = 0
for i in imgList:
imgSize.append(len(imgList[cnt]))
cnt+=1
cv2.namedWindow("main", cv2.WINDOW_AUTOSIZE)
while(True):
###########
## read in the file and display
###########
img = cv2.imread(imgList[dirCnt][imgCnt],1)
cv2.namedWindow("main", cv2.WINDOW_AUTOSIZE)
cv2.resizeWindow("main", 1000, 1000)
# roughly put window in the middle of the screen
cv2.moveWindow("main", 400, 200)
# input the resizing values
img2 = cv2.resize(img, (int(img.shape[1]/10),int(img.shape[0]/10)))
cv2.imshow("main",img2)
##########
## get keyboard input
##########
k = cv2.waitKey(0)
# if the uparrow is pressed then cycle through images for specific object
if k == 63232:
if imgCnt+1 < imgSize[dirCnt]:
imgCnt +=1
else:
imgCnt = 0
elif k == 13:
## show the answer
print("the answer is..: {} \nPress yes if you got the answer right".format(objDirs[dirCnt]))
nameReveal = np.zeros((500,500,3), np.uint8)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(nameReveal,objDirs[dirCnt],(2,150), font, 3,(255,255,255),2,cv2.LINE_AA)
cv2.putText(nameReveal,"Press 'y' if you got it right",(2,400), font, 1,(0,255,255),2,cv2.LINE_AA)
cv2.putText(nameReveal,"or any other to continue",(2,450), font, 1,(0,255,255),2,cv2.LINE_AA)
cv2.imshow("main", nameReveal)
k = cv2.waitKey(0)
# if 'y' is pressed(answered correctly) increment point for object
if k == 121:
tmp = int(scores[objDirs[dirCnt]])+1
scores[objDirs[dirCnt]]= tmp
if dirCnt+1 < dirSize:
dirCnt+=1
else:
dirCnt = 0
elif k == 27 or k == 113: #if q or esc is pressed then exit
print("Saving results..")
f = open(saveFilePath, 'w')
tmpStr = ""
for i in scores:
print("This is i: {}".format(i))
tmpStr +=str(str(i) + '\n' + str(scores[i]) + '\n')
print("This is the resutls: \n{}".format(tmpStr))
f.write(tmpStr)
f.close()
print("exiting")
cv2.destroyAllWindows()
break
| [
"alberthiggins@alberts-MacBook-Pro.local"
] | alberthiggins@alberts-MacBook-Pro.local |
2edca161bb09490c3049a1b1a92fcb77a1b3443e | 10d51973176c5cb4cda6b3180c45f296ff26ba85 | /app/modules/v1/extensions/fun/__init__.py | 68320c333019a7b2ff36792a5f2462b7e5687932 | [
"MIT"
] | permissive | robjporter/PYTHON-APIServer-1 | 013a544e216f993fc622000a0656aec5c3475aef | 57df8e8189834504b3f473993ae12586ec32d5c9 | refs/heads/master | 2023-05-12T14:03:27.253735 | 2016-09-15T08:07:26 | 2016-09-15T08:07:26 | 68,276,327 | 0 | 0 | MIT | 2023-05-01T19:25:41 | 2016-09-15T07:58:04 | HTML | UTF-8 | Python | false | false | 143 | py | from flask import Blueprint
fun = Blueprint( 'fun', __name__, template_folder = 'templates', static_folder = 'static' )
from . import views
| [
"robjporter@outlook.com"
] | robjporter@outlook.com |
c9e34f5b1de4b1321be3eb11022298143163fb5b | 53961f4b2763ad5a45fc2d28a7e165c8e65ad574 | /python-project/Variables_et_fonctions/Calcul_distance_vol_oiseau.py | f54b468a63f7af106b245930f0da47f2dfb965a3 | [] | no_license | matchre/playground-X1rXTswJ | d975bdc9820beca9067f18eb1da380d27d113a00 | 33ed1beec889503e30669b563d61993d014ef7ce | refs/heads/master | 2020-04-03T23:14:06.284377 | 2018-10-26T09:36:26 | 2018-10-26T09:36:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 237 | py | from math import *
def mon_programme(lat_a,long_a,lat_b,long_b):
#Ne pas toucher ce qui précède.
#Les valeurs pour les variables en entrée seront automatiquement données.
#Ecrire ci-dessous en n'oubliant pas d'indenter.
| [
"noreply@github.com"
] | matchre.noreply@github.com |
54c5022a4c7e9f26bf90b81d66529dc3fa315caa | eddbf9518e7384f0e9a1d9e19cbe74855c3f24bd | /2017012343_ZhaoZiJun/matploylib_hw.py | c50b9e841063c65cc67ae896a3084894acb12dc5 | [] | no_license | wanghan79/2019_Python | 9d2391d799efd9545b2afb3565bc5c6d542d1d86 | f856409af92af3990773966d937d58d9d1cade04 | refs/heads/master | 2020-05-05T12:54:30.921361 | 2019-07-20T09:50:03 | 2019-07-20T09:50:03 | 180,050,522 | 11 | 14 | null | 2019-07-15T15:00:03 | 2019-04-08T01:59:24 | Python | UTF-8 | Python | false | false | 476 | py | import matplotlib.pyplot as plt
import numpy as np
"""
简单画了三个函数图像,分别是y=x y=1/x y=x^3
"""
x_values = np.arange(0.,3.,0.05)
plt.plot(x_values,x_values,'r',x_values,x_values**3,'g',
x_values,x_values**(-1),'y')
#plt.plot(input_values,squares,linewidth=5)
plt.title('y=x y=1/x y=x^3',fontsize=24)
plt.xlabel('value',fontsize=14)
plt.ylabel('square of value',fontsize=14)
plt.tick_params(axis='both',which='major',labelsize=14)
plt.show()
| [
"zhaozijun1998.com"
] | zhaozijun1998.com |
405a1959f9d4f85a7a2f446f5fc40e3adc4d2834 | f89cd667200844f019dbf2c93798e7fee96b89e2 | /dynamic-programming/exercises/ugly-numbers.py | ab24762e2184774dfc0008339825acefc4170efc | [] | no_license | radomirbrkovic/algorithms | 575f4540c7aab2daf3e55d0df99030e440ee2060 | 621d0f82e0e4cd253afc0e07772a201b019f7889 | refs/heads/master | 2023-07-15T23:59:29.725946 | 2021-09-01T19:47:08 | 2021-09-01T19:47:08 | 250,455,390 | 0 | 0 | null | 2021-09-01T19:47:09 | 2020-03-27T06:12:52 | Python | UTF-8 | Python | false | false | 596 | py | # Ugly Numbers https://www.geeksforgeeks.org/ugly-numbers/
def maxDivide(a, b):
while a % b == 0:
a = a / b
return a
def isUgly(no):
no = maxDivide(no, 2)
no = maxDivide(no, 3)
no = maxDivide(no, 5)
return 1 if no == 1 else 0
# Function to get the nth ugly number
def getNthUglyNo(n):
i = 1
# ugly number count
count = 1
# Check for all integers untill
# ugly count becomes n
while n > count:
i += 1
if isUgly(i):
count += 1
return i
print("150th ugly number is ", getNthUglyNo(150)) | [
"radomir.brkovic@pmf.edu.rs"
] | radomir.brkovic@pmf.edu.rs |
8f07a65424360d871df98c6d9b13d1a9da425f09 | e777988afd4e335213ac4a85f8c2a82829b54283 | /rdf_plotter2.py | 2b7fa868c0dd5526a89bcb12f8609f84ec3bebbc | [] | no_license | manishapar23/photons | 7ba261de106e85a85b893654ab86b6ef905ebe66 | 86df436067365dc01253102ceaf71a23b4bd15eb | refs/heads/master | 2023-05-29T03:15:45.002298 | 2021-06-07T20:49:20 | 2021-06-07T20:49:20 | 374,708,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,506 | py | import ROOT
RDF = ROOT.ROOT.RDataFrame
ROOT.ROOT.EnableImplicitMT()
import sys,os
Chain = ROOT.TChain("Events")
for path, subdirs, files in os.walk("/cms/xaastorage/NanoAOD/2017/APR20/XtoAAto4G_Signal_official/X1000A100/"):
for name in files:
File = os.path.join(path, name)
if (File.endswith(".root")):
print os.path.join(path, name)
Chain.Add(File)
Rdf = RDF(Chain)
ROOT.gInterpreter.Declare('#include "rdf_vars2.h"')
Rdf = Rdf.Define("total_weight", "9.59447/107003.")
Rdf = Rdf.Define("AM1", "get_AM(nPhoton, Photon_mass, Photon_pt, Photon_eta, Photon_phi, 30., 2.4, 0, 1)")
Rdf = Rdf.Define("AM2", "get_AM(nPhoton, Photon_mass, Photon_pt, Photon_eta, Photon_phi, 30., 2.4, 2, 3)")
Rdf = Rdf.Define("AM3", "get_AM(nPhoton, Photon_mass, Photon_pt, Photon_eta, Photon_phi, 30., 2.4, 0, 2)")
Rdf = Rdf.Define("AM4", "get_AM(nPhoton, Photon_mass, Photon_pt, Photon_eta, Photon_phi, 30., 2.4, 1, 3)")
Rdf = Rdf.Define("AM5", "get_AM(nPhoton, Photon_mass, Photon_pt, Photon_eta, Photon_phi, 30., 2.4, 0, 3)")
Rdf = Rdf.Define("AM6", "get_AM(nPhoton, Photon_mass, Photon_pt, Photon_eta, Photon_phi, 30., 2.4, 1, 2)")
Rdf = Rdf.Filter("nPhoton > 3")
H_AM_1_Lazy = Rdf.Histo1D(("H_AM_1", ";Photons 1 and 2 (GeV);events", 100, 0., 200), "AM1", "total_weight")
H_AM_2_Lazy = Rdf.Histo1D(("H_AM_2", ";Photons 3 and 4 (GeV);events", 100, 0., 200), "AM2", "total_weight")
H_AM_3_Lazy = Rdf.Histo1D(("H_AM_3", ";Photons 1 and 3 (GeV);events", 100, 0., 200), "AM3", "total_weight")
H_AM_4_Lazy = Rdf.Histo1D(("H_AM_4", ";Photons 2 and 4 (GeV);events", 100, 0., 200), "AM4", "total_weight")
H_AM_5_Lazy = Rdf.Histo1D(("H_AM_5", ";Photons 1 and 4 (GeV);events", 100, 0., 200), "AM5", "total_weight")
H_AM_6_Lazy = Rdf.Histo1D(("H_AM_6", ";Photons 2 and 3 (GeV);events", 100, 0., 200), "AM6", "total_weight")
H_AM_1 = H_AM_1_Lazy.GetValue()
H_AM_2 = H_AM_2_Lazy.GetValue()
H_AM_3 = H_AM_3_Lazy.GetValue()
H_AM_4 = H_AM_4_Lazy.GetValue()
H_AM_5 = H_AM_5_Lazy.GetValue()
H_AM_6 = H_AM_6_Lazy.GetValue()
C = ROOT.TCanvas()
C.cd()
H_AM_1.Draw("hist")
C.SaveAs("AM1.root")
C1 = ROOT.TCanvas()
C1.cd()
H_AM_2.Draw("hist")
C1.SaveAs("AM2.root")
C2 = ROOT.TCanvas()
C2.cd()
H_AM_3.Draw("hist")
C2.SaveAs("AM3.root")
C3 = ROOT.TCanvas()
C3.cd()
H_AM_4.Draw("hist")
C3.SaveAs("AM4.root")
C4 = ROOT.TCanvas()
C4.cd()
H_AM_5.Draw("hist")
C4.SaveAs("AM5.root")
C5 = ROOT.TCanvas()
C5.cd()
H_AM_6.Draw("hist")
C5.SaveAs("AM6.root")
| [
"mp1700@hexcms.hexfarm.rutgers.edu"
] | mp1700@hexcms.hexfarm.rutgers.edu |
e69dd206c04139d289ef732a712c97a0a3cab0e9 | 02ce189337754500e0801b39815252f47caa15cf | /terraform-modules/lambda/code/cname-cloudfront-s3/cname-cloudfront-s3.py | 9411c6452de5e25eabdd3b8e712d961f92fe1657 | [
"Apache-2.0"
] | permissive | Mellis3489/domain-protect | 9689f86e696ef6f2b73f10780fefe913cfc96061 | 70f3301815e6cf833705d7e7f9981ea75a245052 | refs/heads/main | 2023-06-23T11:07:04.208194 | 2021-07-12T13:38:44 | 2021-07-12T13:38:44 | 385,260,134 | 0 | 0 | NOASSERTION | 2021-07-12T13:37:53 | 2021-07-12T13:37:52 | null | UTF-8 | Python | false | false | 6,965 | py | #!/usr/bin/env python
import os, boto3
import logging
import json
import requests
from botocore.exceptions import ClientError
from datetime import datetime
def json_serial(obj):
"""JSON serializer for objects not serializable by default json code"""
if isinstance(obj, datetime):
serial = obj.isoformat()
return serial
raise TypeError("Type not serializable")
def assume_role(account, security_audit_role_name, external_id, project, region):
security_audit_role_arn = "arn:aws:iam::" + account + ":role/" + security_audit_role_name
stsclient = boto3.client('sts')
try:
if external_id == "":
assumed_role_object = stsclient.assume_role(RoleArn = security_audit_role_arn, RoleSessionName = project)
print("Assumed " + security_audit_role_name + " role in account " + account)
else:
assumed_role_object = stsclient.assume_role(RoleArn = security_audit_role_arn, RoleSessionName = project, ExternalId = external_id)
print("Assumed " + security_audit_role_name + " role in account " + account)
except Exception:
logging.exception("ERROR: Failed to assume " + security_audit_role_name + " role in AWS account " + account)
credentials = assumed_role_object['Credentials']
aws_access_key_id = credentials["AccessKeyId"]
aws_secret_access_key = credentials["SecretAccessKey"]
aws_session_token = credentials["SessionToken"]
boto3_session = boto3.session.Session(aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, aws_session_token=aws_session_token, region_name=region)
return boto3_session
def vulnerable_cname_cloudfront_s3(domain_name):
try:
response = requests.get('http://' + domain_name)
if response.status_code == 404 and "Code: NoSuchBucket" in response.text:
return "True"
else:
return "False"
except:
pass
try:
response = requests.get('https://' + domain_name)
if response.status_code == 404 and "Code: NoSuchBucket" in response.text:
return "True"
else:
return "False"
except:
return "False"
def lambda_handler(event, context):
# set variables
region = os.environ['AWS_REGION']
org_primary_account = os.environ['ORG_PRIMARY_ACCOUNT']
security_audit_role_name = os.environ['SECURITY_AUDIT_ROLE_NAME']
external_id = os.environ['EXTERNAL_ID']
project = os.environ['PROJECT']
sns_topic_arn = os.environ['SNS_TOPIC_ARN']
vulnerable_domains = []
json_data = {"Findings": []}
boto3_session = assume_role(org_primary_account, security_audit_role_name, external_id, project, region)
client = boto3_session.client(service_name = "organizations")
try:
paginator_accounts = client.get_paginator('list_accounts')
pages_accounts = paginator_accounts.paginate()
for page_accounts in pages_accounts:
accounts = page_accounts['Accounts']
for account in accounts:
account_id = account['Id']
account_name = account['Name']
try:
boto3_session = assume_role(account_id, security_audit_role_name, external_id, project, region)
client = boto3_session.client('route53')
try:
paginator_zones = client.get_paginator('list_hosted_zones')
pages_zones = paginator_zones.paginate()
for page_zones in pages_zones:
hosted_zones = page_zones['HostedZones']
#print(json.dumps(hosted_zones, sort_keys=True, indent=2, default=json_serial))
for hosted_zone in hosted_zones:
if not hosted_zone['Config']['PrivateZone']:
print("Searching for CloudFront CNAME records in hosted zone %s" % (hosted_zone['Name']) )
try:
paginator_records = client.get_paginator('list_resource_record_sets')
pages_records = paginator_records.paginate(HostedZoneId=hosted_zone['Id'], StartRecordName='_', StartRecordType='NS')
for page_records in pages_records:
record_sets = page_records['ResourceRecordSets']
#print(json.dumps(record_sets, sort_keys=True, indent=2, default=json_serial))
for record in record_sets:
if record['Type'] in ['CNAME'] and "cloudfront.net" in record['ResourceRecords'][0]['Value']:
print("checking if " + record['Name'] + " is vulnerable to takeover")
domain_name = record['Name']
try:
result = vulnerable_cname_cloudfront_s3(domain_name)
if result == "True":
print(domain_name + "in " + account_name + " is vulnerable")
vulnerable_domains.append(domain_name)
json_data["Findings"].append({"Account": account_name, "AccountID" : str(account_id), "Domain": domain_name})
except:
pass
except:
pass
except:
pass
except:
print("ERROR: unable to assume role in " + account_name + " account " + account_id)
except Exception:
logging.exception("ERROR: Unable to list AWS accounts across organization with primary account " + org_primary_account)
try:
print(json.dumps(json_data, sort_keys=True, indent=2, default=json_serial))
#print(json_data)
client = boto3.client('sns')
if len(vulnerable_domains) > 0:
response = client.publish(
TargetArn=sns_topic_arn,
Subject="Amazon Route53 CNAME record for CloudFront distribution with missing S3 origin",
Message=json.dumps({'default': json.dumps(json_data)}),
MessageStructure='json'
)
print(response)
except:
logging.exception("ERROR: Unable to publish to SNS topic " + sns_topic_arn)
| [
"paul@celidor.net"
] | paul@celidor.net |
718a0b4008bc8444b0bd0d19273e6dd2e4ba893d | 30ec9b479250c47976f423c2384384f5b2c43bc5 | /libro/problemas_resueltos/capitulo3/problema3_11.py | 265a8932866f3518540893a270a0237b29002ced | [] | no_license | Erika001/CYPErikaGG | 99f6e771085b4bc606277f0e1764f5d394d2e1ab | e2cc3847aef96779f3a590e8a600c008651cf747 | refs/heads/master | 2022-03-28T22:29:59.752566 | 2019-11-26T04:57:38 | 2019-11-26T04:57:38 | 207,672,098 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 825 | py | CAN1 = 0
CAN2 = 0
CAN3 = 0
CAN4 = 0
CAN = 0
VOTO = int(input("Determine el voto para un CAN del 1-4: "))
while(VOTO != 0):
if VOTO == 1:
CAN1 + 1
elif VOTO == 2:
CAN2 + 1
elif VOTO == 3:
CAN3 + 1
elif VOTO == 4:
CAN4 + 1
else:
print("Valor invalido")
VOTO = int(input("Determine el voto para un CAN del 1-4: "))
SUMV = CAN1 + CAN2 + CAN3 + CAN4
POR1 = (CAN1 / SUMV) *100
POR2 = (CAN2 / SUMV) *100
POR3 = (CAN3 / SUMV) *100
POR4 = (CAN4 / SUMV) *100
print(f"Votos candidato 1: {CAN1}, el porcentaje: {POR1}")
print(f"Votos candidato 2: {CAN2}, el porcentaje: {POR2}")
print(f"Votos candidato 3: {CAN3}, el porcentaje: {POR3}")
print(f"Votos candidato 4: {CAN4}, el porcentaje: {POR4}")
print(f"Las cantidades votantes son: {SUMV}")
print("Fin del programa")
| [
"montsedan65@gmail.com"
] | montsedan65@gmail.com |
ecdcad5e93b6e68606c0eae2f5a296d5a2b93568 | 10bb78a77e43ca912334bfe823a454ccab6440fa | /threads.py | 76d89ba56e5ed9815c2db65d68c212d54d8af0e8 | [] | no_license | aegor/pythoncourses | 59fdd660fc208e6da65789785c30117c7d30e6c1 | d77678fe30f4ce381c175493d2d053bfd9706758 | refs/heads/master | 2020-04-13T18:42:18.631788 | 2018-12-28T09:47:07 | 2018-12-28T09:47:07 | 163,382,693 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,342 | py | from time import sleep
import threading
import datetime
exitFlag = 0
class myThread(threading.Thread):
def __init__(self, name, counter):
threading.Thread.__init__(self)
self.threadID = counter
self.name = name
self.counter = counter
def run(self):
print
"Запуск " + self.name
# Получить lock для синхронизации потоков
threadLock.acquire()
sleep(1)
print_date(self.name, self.counter)
# Фиксатор для следующего потока
threadLock.release()
print
"Выход " + self.name
def print_date(threadName, counter):
datefields = []
today = datetime.date.today()
datefields.append(today)
print
"%s[%d]: %s" % (threadName, counter, datefields[0])
threadLock = threading.Lock()
threads = []
# Создание нового потока
thread1 = myThread("Нить", 1)
thread2 = myThread("Нить", 2)
# Запуск нового потока
thread1.start()
thread2.start()
# Добавлять потоки в список нитей
threads.append(thread1)
threads.append(thread2)
# Ждать для всех потоков, чтобы завершить
for t in threads:
t.join()
print
"Выход из программы!!!" | [
"egor@akulovs.com"
] | egor@akulovs.com |
e2b186c8118ed269e1721b6f0dae7ba702d84202 | a79a751541ae56c6bf7b706b6be5e0596412c538 | /challenges/repeated_word/repeated_word.py | d5103fc88196bd47b4091df4efbb3b79b3c118f6 | [] | no_license | surfwalker/data_structures_and_algorithms | fe8f8d23a816d1f4d146f23ee83d570836e1966a | c97f913b45639d5e0d233e9d6ddb3612f581653b | refs/heads/master | 2021-06-18T05:17:25.543665 | 2019-09-26T16:12:03 | 2019-09-26T16:12:03 | 196,094,604 | 0 | 0 | null | 2021-04-20T18:39:40 | 2019-07-09T22:51:30 | Python | UTF-8 | Python | false | false | 365 | py | from hashtable import HashTable
from linked_list import LinkedList, Node
import re
def repeated_word(str):
regex = r"\W+"
words = str.split(' ')
hashtable = HashTable()
for word in words:
word = re.sub(regex, '', word).upper()
if hashtable.contains(word):
return word
else:
hashtable.add(word, None) | [
"alexanderkreid@gmail.com"
] | alexanderkreid@gmail.com |
8408df8d2b02d6b5363aa6719b075516d1e11f4e | 7572879b1f784e1772fc0110929e63f5d16ae351 | /restorent/goodseq.py | 865d4c3da16ca04eb4697edd7d33ccd8b80af7de | [] | no_license | mukulbindal/TestSmartRef | a17445d28e0144df724ef1c221554bce25f26257 | 1d5323f33cee80e19351646d3ae76ebcddf63117 | refs/heads/master | 2022-12-12T18:58:21.990971 | 2020-03-01T15:56:50 | 2020-03-01T15:56:50 | 207,731,055 | 0 | 0 | null | 2022-12-08T06:08:59 | 2019-09-11T05:33:36 | Python | UTF-8 | Python | false | false | 171 | py | def isprime(n):
i=2
while(i*i<=n):
if n%i==0:
return False
i+=1
return True
l=[]
for i in range(2,8001):
if isprime(i):
l.append(i)
print(len(l)) | [
"noreply@github.com"
] | mukulbindal.noreply@github.com |
b686694b6387606811d73d0fb7cf0d8e7b350b03 | 2513419d2bec5c4b137cf6948af1a5bbff466c11 | /Week4/STAR_PE_Alignment.py | 18b56068aca1fefb447e5076e7727089e1863bd7 | [] | no_license | cwarden45/JHU_Coursera_GDS_Capstone | 5fff28d9852c4698559bb97807d7a90951af636d | 3dc38b46e7187dd6d6bb7fca40458a7e1735de8b | refs/heads/main | 2023-03-11T21:09:42.322689 | 2021-03-07T18:37:46 | 2021-03-07T18:37:46 | 336,924,398 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,350 | py | import sys
import re
import os
#NOTE: This code is based upon scripts in the following locations:
#https://storage.googleapis.com/rna_deg/bucket_file_list.txt (from https://sourceforge.net/projects/rnaseq-deg-methodlimit/files/)
#https://github.com/cwarden45/RNAseq_templates/tree/master/TopHat_Workflow
##required samples
unfinishedSamples = ("SRR1554534","SRR1554535","SRR1554536","SRR1554539","SRR1554556","SRR1554561","SRR1554537","SRR1554538","SRR1554541","SRR1554566","SRR1554567","SRR1554568")
##extra samples
#unfinishedSamples = ("SRR1554544","SRR1554546","SRR1554549","SRR1554551","SRR1554553","SRR1554554")
threads = "4"
ref = "/home/cwarden/Ref/STAR/hg19_Bioconductor_UCSC_GTF_50bp"
alignmentFolder = "hg19_STAR_Alignment"
readsFolder = "."
command = "mkdir " + alignmentFolder
os.system(command)
fileResults = os.listdir(readsFolder)
for file in fileResults:
result = re.search("(.*)_1.fastq.gz$",file)
if result:
sample = result.group(1)
if (sample in unfinishedSamples):
print sample
outputSubfolder = alignmentFolder +"/" + sample
command = "mkdir " + outputSubfolder
os.system(command)
read1 = re.sub(".gz$","",readsFolder + "/" + file)
command = "gunzip -c " + read1 + ".gz > " + read1
os.system(command)
read2 = re.sub("_1.fastq","_2.fastq",read1)
command = "gunzip -c " + read2 + ".gz > " + read2
os.system(command)
starPrefix = outputSubfolder +"/" + sample + "_"
command = "/opt/STAR-2.7.2d/bin/Linux_x86_64_static/STAR --genomeDir " + ref+ " --readFilesIn " + read1 + " " + read2 + " --runThreadN " +threads+ " --outFileNamePrefix " + starPrefix + " --twopassMode Basic --outSAMstrandField intronMotif"
os.system(command)
starSam = starPrefix + "Aligned.out.sam"
alnBam = outputSubfolder + "/aligned.bam"
command = "/opt/samtools/samtools view -bS " + starSam + " > " + alnBam
os.system(command)
userBam = alignmentFolder + "/" + sample + ".bam"
command = "/opt/samtools/samtools sort -o " + userBam + " " + alnBam
os.system(command)
command = "rm " + starSam
os.system(command)
command = "rm " + alnBam
os.system(command)
command = "/opt/samtools/samtools index " + userBam
os.system(command)
command = "rm " + read1
os.system(command)
command = "rm " + read2
os.system(command) | [
"noreply@github.com"
] | cwarden45.noreply@github.com |
a8ebacd5b5f22015dfa63f83ea085c2b3ecbeaba | ff64fff81ddc90a8226fda336e3289cdc857d2f7 | /Plant_KnowledgeGraph/src/com/gzu/demo/overview_view.py | 74b3d3ae4e8caff169745a5b528191872e4634a7 | [] | no_license | wvirtue/Plant-Knowledge-Graph | 8bfb2e4ea418e05d9de1300ae0268ed446b2cb4b | 3a4effcad173ea277890e8d3d555b4bf26c60f4d | refs/heads/master | 2023-01-09T18:53:45.394029 | 2020-11-13T14:31:35 | 2020-11-13T14:31:35 | 290,133,098 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,596 | py | # -*- coding: utf-8 -*-
import pinyin as pinyin
from django.shortcuts import render
from django.views.decorators import csrf
import sys
sys.path.append("..")
from toolkit.pre_load import tree
# 读取实体解析的文本
def show_overview(request):
ctx ={}
if 'node' in request.GET:
node = request.GET['node']
fatherList = tree.get_father(node)
branchList = tree.get_branch(node)
leafList = tree.get_leaf(node)
ctx['node'] = "分类专题:["+node+"]"
rownum = 4 #一行的词条数量
leaf = ""
alpha_table = {}
for alpha in range(ord('A'),ord('Z')+1):
alpha_table[chr(alpha)] = []
for p in leafList:
py = pinyin.get_initial(p)
alpha = ord('A')
for s in py:
t = ord(s)
if t>=ord('a') and t <= ord('z'):
t = t+ord('A')-ord('a')
if t>=ord('A') and t <= ord('Z'):
alpha = t
break
alpha_table[chr(alpha)].append(p)
for kk in range(ord('A'),ord('Z')+1):
k = chr(kk)
v = alpha_table[k]
if len(v)==0:
continue
add_num = rownum - len(v)%rownum # 填充的数量
add_num %= rownum
for i in range(add_num): # 补充上多余的空位
v.append('')
leaf += '<div><span class="label label-warning"> '+k+' </span></div><br/>'
for i in range(len(v)):
if i%rownum == 0:
leaf += "<div class='row'>"
leaf += '<div class="col-md-3">'
leaf += '<p><a href="detail?title=' + v[i] + '">'
if len(v[i]) > 10:
leaf += v[i][:10] + '...'
else:
leaf += v[i]
leaf += '</a></p>'
leaf += '</div>'
if i%rownum == rownum-1:
leaf += "</div>"
leaf += '<br/>'
ctx['leaf'] = leaf
# 父节点列表
father = '<ul class="nav nav-pills nav-stacked">'
for p in fatherList:
father += '<li role="presentation"> <a href="overview?node='
father += p + '">'
father += '<i class="fa fa-hand-o-right" aria-hidden="true"></i> ' + p + '</a></li>'
father += '</ul>'
if len(fatherList) == 0:
father = '<p>已是最高级分类</p>'
ctx['father'] = father
# 非叶子节点列表
branch = '<ul class="nav nav-pills nav-stacked">'
for p in branchList:
branch += '<li role="presentation"> <a href="overview?node='
branch += p + '">'
branch += '<i class="fa fa-hand-o-right" aria-hidden="true"></i> ' + p + '</a></li>'
branch += '</ul>'
if len(branchList) == 0:
branch = '<p>已是最低级分类</p>'
ctx['branch'] = branch
# 分类树构建
level_tree = tree.create_UI(node)
ctx['level_tree'] = level_tree
return render(request, "overview.html", ctx)
| [
"1406436535@qq.com"
] | 1406436535@qq.com |
530506920d52d6a2efd16f35fe5e98d71aef13aa | c35b1d9dd99c7b0ad3e8bee3293df7042f9ae39a | /setup.py | 4cf99297b34f551fb2eff5aae9e9ebf4a35f0ce5 | [
"MIT"
] | permissive | grengojbo/django-flatpages-plus | 467b2e82d3f2d3c71629ddab5288e1416e5ddeda | 29af987565dd4c87fa3b0751105b5521e2690374 | refs/heads/master | 2020-12-24T20:42:23.064557 | 2014-03-02T17:29:22 | 2014-03-02T17:29:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 362 | py | from setuptools import setup, find_packages
setup(
name = 'django-flatpages-plus',
version = '0.1',
description = 'A more robust FlatPage app for Django.',
author = 'Dana Woodman',
author_email = 'dana@danawoodman.com',
url = 'https://github.com/danawoodman/django-flatpages-plus',
license = 'MIT',
packages = find_packages(),
)
| [
"dana@danawoodman.com"
] | dana@danawoodman.com |
b97fb55d1d42347a1c75e55752aa6e6c1587cce1 | 342cd75882fbe61c97c8e6abe68baabac058f89b | /xalpha/misc.py | d134fe8d7993fd847215b4f942a940b9c6b4c474 | [
"MIT"
] | permissive | refraction-ray/xalpha | c8b787dd88810fa32e5e2e223854fd7dbe4e3060 | ad5c9d91942bbcba5f4e27af2b26abdb83056b5d | refs/heads/master | 2023-08-05T10:38:14.014019 | 2023-07-24T11:30:06 | 2023-07-24T11:30:06 | 143,284,193 | 1,851 | 384 | MIT | 2022-02-20T14:03:09 | 2018-08-02T11:12:10 | Python | UTF-8 | Python | false | false | 9,598 | py | # -*- coding: utf-8 -*-
"""
modules for misc crawler without unfied API
"""
import re
import pandas as pd
import datetime as dt
import logging
import numpy as np
from bs4 import BeautifulSoup
from functools import lru_cache
logger = logging.getLogger(__name__)
from xalpha.cons import (
rget,
rpost,
rget_json,
rpost_json,
today_obj,
region_trans,
holidays,
_float,
)
from xalpha.universal import lru_cache_time
from xalpha.exceptions import ParserFailure
# 该模块只是保存其他一些爬虫的函数,其接口很不稳定,不提供文档和测试,且随时增删,慎用!
@lru_cache_time(ttl=600, maxsize=64)
def get_ri_status(suburl=None):
"""
broken due to the website redesign
"""
if not suburl:
suburl = "m=cb&a=cb_all" # 可转债
# url = "http://www.richvest.com/index.php?"
url = "http://www.ninwin.cn/index.php?"
url += suburl
r = rget(url, headers={"user-agent": "Mozilla/5.0"})
b = BeautifulSoup(r.text, "lxml")
cl = []
for c in b.findAll("th"):
cl.append(c.text)
nocl = len(cl)
rl = []
for i, c in enumerate(b.findAll("td")):
if i % nocl == 0:
r = []
r.append(c.text)
if i % nocl == nocl - 1:
rl.append(r)
return pd.DataFrame(rl, columns=cl)
@lru_cache_time(ttl=120)
def get_jsl_cb_status():
url = "https://www.jisilu.cn/data/cbnew/cb_list/?___jsl=LST___t=%s" % (
int(dt.datetime.now().timestamp() * 100)
)
r = rpost_json(url)
return [item["cell"] for item in r["rows"]]
@lru_cache_time(ttl=7200, maxsize=512)
def get_sh_status(category="cb", date=None):
url = "http://query.sse.com.cn/commonQuery.do?jsonCallBack=&"
if category in ["cb", "kzz"]:
url += "isPagination=false&sqlId=COMMON_BOND_KZZFLZ_ALL&KZZ=1"
elif category in ["fund", "fs"]:
if not date:
date = today_obj().strftime("%Y%m%d")
date = date.replace("/", "").replace("-", "")
url += "&sqlId=COMMON_SSE_FUND_LOF_SCALE_CX_S&pageHelp.pageSize=10000&FILEDATE={date}".format(
date=date
)
else:
raise ParserFailure("unrecoginzed category %s" % category)
r = rget_json(
url,
headers={
"user-agent": "Mozilla/5.0",
"Host": "query.sse.com.cn",
"Referer": "http://www.sse.com.cn/market/bonddata/data/convertible/",
},
)
return pd.DataFrame(r["result"])
@lru_cache_time(ttl=7200, maxsize=512)
def get_sz_status(category="cb", date=None):
if not date:
date = today_obj().strftime("%Y%m%d")
date = date.replace("/", "").replace("-", "")
date = date[:4] + "-" + date[4:6] + "-" + date[6:]
url = "http://www.szse.cn/api/report/ShowReport/data?"
if category in ["cb", "kzz"]:
pageno = 1
data = []
while True:
suburl = "SHOWTYPE=JSON&CATALOGID=1277&TABKEY=tab1&PAGENO={pageno}&txtDate={date}".format(
date=date, pageno=pageno
)
r = rget_json(url + suburl)
if r[0]["data"]:
data.extend(r[0]["data"])
pageno += 1
else:
break
# df = pd.DataFrame(r[0]["data"])
df = pd.DataFrame(data)
if len(df) == 0:
return
pcode = re.compile(r".*&DM=([\d]*)&.*")
pname = re.compile(r"^([^&]*)&.*")
df["证券代码"] = df["kzjcurl"].apply(lambda s: re.match(pcode, s).groups()[0])
df["证券简称"] = df["kzjcurl"].apply(lambda s: re.match(pname, s).groups()[0])
df["上市日期"] = pd.to_datetime(df["ssrq"])
df["发行量"] = df["fxlnew"]
df["换股价格"] = df["kzjg"]
df["未转股数量"] = df["kzsl"]
df["未转股比例"] = df["kzbl"]
df["转股截止日期"] = pd.to_datetime(df["kzzzrq"])
df = df[["证券代码", "证券简称", "上市日期", "发行量", "换股价格", "未转股数量", "未转股比例", "转股截止日期"]]
return df
@lru_cache_time(ttl=7200, maxsize=512)
def get_sz_fs(code):
url = "http://www.szse.cn/api/report/ShowReport/data?SHOWTYPE=JSON&\
CATALOGID=1945_LOF&txtQueryKeyAndJC={code}".format(
code=code
)
r = rget_json(url)
return _float(r[0]["data"][0]["dqgm"]) * 1e4
def get_tdx_holidays(holidays=None, format="%Y-%m-%d"):
r = rget("https://www.tdx.com.cn/url/holiday/")
r.encoding = "gbk"
b = BeautifulSoup(r.text, "lxml")
l = b.find("textarea").string.split("\n")
if not holidays:
holidays = {}
for item in l:
if item.strip():
c = item.split("|")
if c[2] in region_trans:
rg = region_trans[c[2]]
tobj = dt.datetime.strptime(c[0], "%Y%m%d")
tstr = tobj.strftime(format)
if rg not in holidays:
holidays[rg] = [tstr]
elif tstr not in holidays[rg]:
holidays[rg].append(tstr)
return holidays
def get_163_fundamentals(code, category="lrb"):
# category xjllb zcfzb
url = "http://quotes.money.163.com/service/{category}_{code}.html".format(
category=category, code=code
)
logger.debug("Fetching from %s . in `get_163_fundamentals`" % url)
df = pd.read_csv(url, encoding="gbk")
df = df.set_index("报告日期")
return df.T
@lru_cache()
def get_ttjj_suggestions(keyword):
url = "http://fundsuggest.eastmoney.com/FundSearch/api/FundSearchAPI.ashx?callback=&m=1&key={key}".format(
key=keyword
)
r = rget_json(url)
return r["Datas"]
def get_cb_historical_from_ttjj(code):
if code.startswith("SH") or code.startswith("SZ"):
code = code[2:]
params = {
"type": "RPTA_WEB_KZZ_LS",
"sty": "ALL",
"source": "WEB",
"p": "1",
"ps": "8000",
"st": "date",
"sr": "1",
"filter": "(zcode={code})".format(code=code),
}
url = "http://datacenter.eastmoney.com/api/data/get"
data = []
r = rget_json(url, params=params)
data.extend(r["result"]["data"])
if int(r["result"]["pages"]) > 1:
for i in range(2, int(r["result"]["pages"]) + 1):
params["p"] = str(i)
r = rget_json(url, params=params)
data.extend(r["result"]["data"])
df = pd.DataFrame(data)
df["date"] = pd.to_datetime(df["DATE"])
df["bond_value"] = df["PUREBONDVALUE"]
df["swap_value"] = df["SWAPVALUE"]
df["close"] = df["FCLOSE"]
return df[["date", "close", "bond_value", "swap_value"]]
@lru_cache()
def get_fund_list(ft):
# hh, zq, zs, gp, qdii, fof
r = rget(
"http://fund.eastmoney.com/data/FundGuideapi.aspx?\
dt=0&ft={ft}&sd=&ed=&sc=z&st=desc&pi=1&pn=10000&zf=diy&sh=list".format(
ft=ft
),
headers={
"Host": "fund.eastmoney.com",
"Referer": "http://fund.eastmoney.com/daogou/",
},
)
d = eval(r.text.split("=")[1].replace("null", "None"))
return [code.split(",")[0] for code in d["datas"] if code.strip()]
def update_caldate(path, year, path_out=None):
"""
Update caldate.csv based on ``cons.holidays["CN"]``
"""
r = {"cal_date": [], "is_open": []}
for d in pd.date_range(str(year) + "-01-01", str(year) + "-12-31"):
r["cal_date"].append(d.strftime("%Y-%m-%d"))
if d.weekday() in [5, 6]:
r["is_open"].append(0)
elif d.strftime("%Y-%m-%d") in holidays["CN"]:
r["is_open"].append(0)
else:
r["is_open"].append(1)
ncal = pd.DataFrame(r)
cal = pd.read_csv(path)
if int(year) <= int(cal.iloc[-1]["cal_date"][:4]):
raise ValueError("We already have cal date for year %s" % year)
tcal = pd.concat([cal, ncal], ignore_index=True)
if path_out is None:
path_out = path
tcal.to_csv(path_out, index=False)
## 常见标的合集列表,便于共同分析, 欢迎贡献:)
# 战略配售封基
zlps = ["SZ160142", "SZ161131", "SZ161728", "SH501186", "SH501188", "SH501189"]
# 科创封基
kcfj = [
"SH501073",
"SH501075",
"SH501076",
"SH501078",
"SH501079",
"SH501080",
"SH501081",
"SH501082",
"SH501082",
"SH501085",
]
# 混合基
hh_cand = [
"001500",
"001278",
"001103",
"519697",
"001182",
"001510",
"001508",
"519700",
"519732",
"519056",
"213001",
"161606",
"519091",
"000717",
"000878",
"000452",
]
## some small tools and calculators below
def summary_cb(df, l=None, cutoff=5):
# not functional since richinvest change
for c in ["转债代码"]:
df[c] = df[c].apply(lambda s: s.strip())
for c in ["老式双低", "转债价格", "股票市值", "转债余额"]:
df[c] = df[c].apply(_float)
for c in ["转股溢价率", "价值溢价", "税后收益率"]:
df[c] = df[c].apply(lambda s: float(str(s).strip("%")))
if l is not None:
df = df[df["转债代码"].isin(l)]
d = {}
for c in ["老式双低", "转债价格", "转股溢价率", "价值溢价", "税后收益率", "股票市值"]:
if cutoff == 0:
yj = sorted(df[c])
else:
yj = sorted(df[c])[cutoff:-cutoff]
d[c + "中位数"] = yj[int(len(yj) / 2)]
d[c + "均值"] = round(np.mean(yj), 3)
d["破面值转债数目"] = len([v for v in df["转债价格"] if v < 100])
d["总转债余额"] = round(np.sum(df["转债余额"]), 0)
return d
| [
"kcanamgal@foxmail.com"
] | kcanamgal@foxmail.com |
dd6a0aa71f41894a3ed984a0d468246fedccb363 | 7546c1f8df5845ba58c7ec4f710c4b334afaece4 | /exercise12/exe12.py | af92ee5a3cce446259a6df9b5b10b3c5fee71ab5 | [] | no_license | lagougou/lagougou.github.io | 09a9d40f5bd33b2d06482a9f73b4089bebcec303 | c5e7172da2c107d08b5a24ce999ee2cba7766e1e | refs/heads/master | 2021-01-10T08:03:20.763615 | 2017-04-07T08:55:41 | 2017-04-07T08:55:41 | 52,723,674 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,124 | py | #encoding=utf-8
"""
第 0012 题: 敏感词文本文件 filtered_words.txt,里面的内容 和 0011题一样,当用户输入敏感词语,
则用 星号 * 替换,例如当用户输入「北京是个好城市」,则变成「**是个好城市」。
"""
import sys,re
reload(sys)
sys.setdefaultencoding("utf-8")
def replace_sensitive_words(filename):
content=[]
enter=unicode(raw_input('Enter your words:'))
word1=re.compile(u'[\u4e00-\u9fa5]+')
word2=re.compile(r'[a-zA-Z]+')
with open(filename) as f:
for line in f:
line=line.decode('utf8')
words=word1.findall(line)+word2.findall(line)
for x in words:
content.append(x)
for x in content:
a=0
if x in enter:
while True:
index=enter.find(x,a)
if index!=-1:
enter=enter[:index]+'*'*len(x)+enter[index+len(x):]
a=a+1
else:
break
return enter
return enter
if __name__=='__main__':
print replace_sensitive_words('words.txt')
| [
"jiangruitc@163.com"
] | jiangruitc@163.com |
04f87145bb6614d235315423368be0d9d185d0d1 | ec8926c3815f1f6b64ab701c3c172a3c3abb2202 | /special_escape.py | 87c67b23bb794d4e5586b3024db4b52e518a02e1 | [] | no_license | matanki-saito/EU4JPModAppendixI | 787813f79da5d2337bbfbddb8b2ea0a835c351b4 | 6e4a4cafdf9b70a1df858a5795ef9e65cfa8acc0 | refs/heads/main | 2023-09-01T13:01:28.186557 | 2023-08-28T20:30:38 | 2023-08-28T20:30:38 | 128,426,091 | 3 | 1 | null | 2021-10-02T17:49:22 | 2018-04-06T17:35:24 | Python | UTF-8 | Python | false | false | 10,292 | py | import argparse
import os
import pathlib
eu4_escape_targets = [
ord("¤"),
ord("£"),
ord("§"),
ord("$"),
ord("["),
ord("]"),
0x00, # null character
ord("\\"),
ord(" "),
0x0D, # 改行
0x0A, # 改行
ord("\""),
ord("/"),
ord("{"),
ord("}"),
ord("@"),
ord(";"),
0x80,
0x7E,
ord("½"),
ord("_"),
ord("#") # ymlのコメント
]
ck2_escape_targets = [
0xA4,
0xA3,
0xA7,
0x24,
0x5B,
0x00,
0x5C,
0x20,
0x0D,
0x0A,
0x22,
0x7B,
0x7D,
0x40,
0x3B,
0x80,
0x7E,
0xBD,
0x5F
]
def generate_encoder(game_type, ext):
"""
:param game_type: ゲーム種別
:param ext: 拡張子
:return:
"""
# 定義
if game_type == "eu4":
escape_targets = eu4_escape_targets
high_byte_shift = -9
low_byte_shift = 14
elif game_type == "ck2":
escape_targets = ck2_escape_targets
high_byte_shift = -9
low_byte_shift = 15
else:
raise Exception("typeが不明")
def ___(src_array):
return __(map(ucs_to_cp1252, src_array))
def ____(src_array):
return map(cp1252_to_ucs2, __(src_array))
def __(src_array):
"""
変換器
:param src_array: コードポイント配列
:return:
"""
result = []
for code_point in src_array:
# BMP外の文字
if code_point > 0xFFFF:
print("not convert character")
continue
# null文字
if code_point == 0:
print("Found null character")
continue
high_byte = (code_point >> 8) & 0x000000FF
low_byte = code_point & 0x000000FF
# 2byteじゃない
if high_byte == 0:
result.append(code_point)
continue
escape_char = 0x10
if high_byte in escape_targets:
escape_char += 2
if low_byte in escape_targets:
escape_char += 1
if escape_char == 0x11:
low_byte = low_byte + low_byte_shift
elif escape_char == 0x12:
high_byte = high_byte + high_byte_shift
elif escape_char == 0x13:
low_byte = low_byte + low_byte_shift
high_byte = high_byte + high_byte_shift
else:
pass
result.append(escape_char)
result.append(low_byte)
result.append(high_byte)
return result
if game_type == "eu4":
if ext == "yml":
return ____
elif ext == "txt":
return ___
else:
raise Exception()
elif game_type == "ck2":
if ext in ["csv", "txt"]:
return ___
else:
raise Exception()
else:
raise Exception()
ucs2_to_cp1252_table = {
0x20AC: 0x80, # €
# 0x81
0x201A: 0x82, # ‚
0x0192: 0x83, # ƒ
0x201E: 0x84, # „
0x2026: 0x85, # …
0x2020: 0x86, # †
0x2021: 0x87, # ‡
0x02C6: 0x88, # ˆ
0x2030: 0x89, # ‰
0x0160: 0x8A, # Š
0x2039: 0x8B, # ‹
0x0152: 0x8C, # Œ
# 0x8D
0x017D: 0x8E, # Ž
# 0x8F
# 0x90
0x2018: 0x91, # ‘
0x2019: 0x92, # ’
0x201C: 0x93, # “
0x201D: 0x94, # ”
0x2022: 0x95, # •
0x2013: 0x96, # –
0x2014: 0x97, # —
0x02DC: 0x98, # ˜
0x2122: 0x99, # ™
0x0161: 0x9A, # š
0x203A: 0x9B, # ›
0x0153: 0x9C, # œ
# 0x9D
0x017E: 0x9E, # ž
0x0178: 0x9F # Ÿ
}
cp1252_to_ucs2_table = {
0x80: 0x20AC, # €
# 0x81
0x82: 0x201A, # ‚
0x83: 0x0192, # ƒ
0x84: 0x201E, # „
0x85: 0x2026, # …
0x86: 0x2020, # †
0x87: 0x2021, # ‡
0x88: 0x02C6, # ˆ
0x89: 0x2030, # ‰
0x8A: 0x0160, # Š
0x8B: 0x2039, # ‹
0x8C: 0x0152, # Œ
# 0x8D
0x8E: 0x017D, # Ž
# 0x8F
# 0x90
0x91: 0x2018, # ‘
0x92: 0x2019, # ’
0x93: 0x201C, # “
0x94: 0x201D, # ”
0x95: 0x2022, # •
0x96: 0x2013, # –
0x97: 0x2014, # —
0x98: 0x02DC, # ˜
0x99: 0x2122, # ™
0x9A: 0x0161, # š
0x9B: 0x203A, # ›
0x9C: 0x0153, # œ
# 0x9D
0x9E: 0x017E, # ž
0x9F: 0x0178 # Ÿ
}
def ucs_to_cp1252(code_point):
if code_point in ucs2_to_cp1252_table:
return ucs2_to_cp1252_table[code_point]
else:
return code_point
def cp1252_to_ucs2(code_point):
if code_point in cp1252_to_ucs2_table:
return cp1252_to_ucs2_table[code_point]
else:
return code_point
def generate_printer(game_type, ext):
"""
プリンター生成器
:param game_type: ゲーム種別
:param ext: 拡張子
:return: プリンター
"""
def utf8_printer(src_array, out_file_path):
with open(out_file_path, "wt", encoding="utf_8_sig") as fw:
text = "".join(map(chr, src_array))
fw.write(text)
def cp1252_like_printer(src_array, out_file_path):
"""
純粋にCP1252ではない。(規定されていない0x81や0x90などのコードポイントも出現しうる)、
バイナリモードで書き込む
:param src_array:
:param out_file_path:
:return:
"""
w = bytearray(map(ucs_to_cp1252, src_array))
with open(out_file_path, "wb") as fw:
fw.write(w)
if game_type == "eu4":
if ext == "yml":
return utf8_printer
elif ext == "txt":
return cp1252_like_printer
else:
raise Exception()
elif game_type == "ck2":
if ext in ["csv", "txt"]:
return cp1252_like_printer
else:
raise Exception()
else:
raise Exception()
def target_is_directory(params):
is_out_dir = os.path.isdir(str(params.out))
# 走査
for file_path in pathlib.Path(params.src).glob('**/*.*'):
if file_path.suffix not in ['.yml', '.csv', '.txt']:
continue
# 指定がない場合は、ソースと同じ場所に、同じ名前で拡張子を.encodedにしたものを保存する
if params.out is None:
out_file_path = os.path.join(
os.path.dirname(os.path.abspath(str(file_path))),
os.path.basename(str(file_path)) + ".encode"
)
# 出力先が存在するディレクトリ
elif is_out_dir:
out_file_path = os.path.join(
str(params.out),
str(file_path).replace(str(params.src) + "\\", "")
)
dir_path = os.path.dirname(str(out_file_path))
if not os.path.exists(dir_path):
os.makedirs(dir_path)
else:
raise Exception("出力先が不正")
do_file(in_file_path=file_path,
out_file_path=out_file_path,
encoder=generate_encoder(params.type, os.path.splitext(str(file_path))[1][1:]),
printer=generate_printer(params.type, os.path.splitext(str(file_path))[1][1:]),
is_bom=params.bom)
def target_is_file(params):
# 指定がない場合は、ソースと同じ場所に、同じ名前で拡張子を.utf8にしたものを保存する
if params.out is None:
out_file_path = os.path.join(
os.path.dirname(os.path.abspath(params.src)),
os.path.basename(params.src) + ".encode"
)
# 指定先が存在するディレクトリの場合は、そこに同じ名前で保存する
elif os.path.isdir(params.out):
out_file_path = os.path.join(
params.out,
os.path.basename(params.src)
)
# 指定先がフルパス
elif params.out != "":
out_file_path = params.out
else:
raise Exception("出力先が不正")
do_file(in_file_path=params.src,
out_file_path=out_file_path,
encoder=generate_encoder(params.type, os.path.splitext(str(params.src))[1][1:]),
printer=generate_printer(params.type, os.path.splitext(str(params.src))[1][1:]),
is_bom=params.bom)
def do_file(in_file_path, out_file_path, encoder, printer, is_bom):
"""
ファイルを変換
:param in_file_path: 入力先ファイルパス
:param out_file_path: 出力先ファイルパス
:param encoder: エンコーダー
:param printer: プリンター
:param is_bom : bom付きかどうか
:return:
"""
if is_bom:
encoding = 'utf_8_sig'
else:
encoding = 'utf_8'
# 読み込み
with open(in_file_path, "rt", encoding=encoding) as fr:
printer(src_array=encoder(src_array=map(ord, fr.read())),
out_file_path=out_file_path)
def generate_default_arg_parser():
"""
argumentsパーサーを作成
:return: パーサー
"""
# title
result = argparse.ArgumentParser(
description='Process some integers.'
)
# ソース
result.add_argument(
'src',
metavar='src',
type=str,
help='source file or directory.')
# 出力先
result.add_argument(
'-out',
metavar='X',
dest='out',
help='output directory')
# タイプ
result.add_argument(
'-type',
metavar='X',
dest='type',
default="eu4",
help='eu4 or ck2')
# bomがある
result.add_argument(
'--bom',
action='store_true',
dest='bom',
help='utf8 with bom')
return result
def special_escape(arg_params):
"""
main
:return:
"""
# fileかdirか
if os.path.isfile(arg_params.src):
target_is_file(params=arg_params)
elif os.path.isdir(arg_params.src):
target_is_directory(params=arg_params)
else:
raise Exception("srcがみつからない")
if __name__ == '__main__':
"""
entry-point
"""
parser = generate_default_arg_parser()
params = parser.parse_args()
special_escape(arg_params=params)
| [
"matanki.saito@gmail.com"
] | matanki.saito@gmail.com |
7aa70c42f2ce2ea8e7f44a0a02b47e3e2ab3c449 | 2045db7b3778c8b38ad38e041c56ade542ebdc03 | /magento_bridge/models/magento_website.py | 0489617d4511bee5f4ecffbfc4143617b19e9dd0 | [] | no_license | kkluska/prolighting | 09410b1ee76844a68409afa9c69bfe325a1f3db3 | 61c4e30da80fe6fdcc63bce47781dd27c8d241ac | refs/heads/master | 2022-07-07T06:21:30.904405 | 2020-05-14T11:31:06 | 2020-05-14T13:06:49 | 263,452,038 | 0 | 1 | null | 2020-05-14T13:06:51 | 2020-05-12T21:04:56 | null | UTF-8 | Python | false | false | 2,042 | py | # -*- coding: utf-8 -*-
##########################################################################
#
# Copyright (c) 2015-Present Webkul Software Pvt. Ltd. (<https://webkul.com/>)
# See LICENSE file for full copyright and licensing details.
# License URL : <https://store.webkul.com/license.html/>
#
##########################################################################
from odoo import api, fields, models
class MagentoWebsite(models.Model):
_name = "magento.website"
_description = "Magento Website"
name = fields.Char(string='Website Name', size=64, required=True)
website_id = fields.Integer(string='Magento Webiste Id', readonly=True)
instance_id = fields.Many2one(
'connector.instance', string='Magento Instance')
ecommerce_channel = fields.Selection(
related="instance_id.ecomm_type",
string="eCommerce Channel", store=True)
code = fields.Char(string='Code', size=64, required=True)
sort_order = fields.Char(string='Sort Order', size=64)
default_group_id = fields.Integer(string='Default Store', readonly=True)
is_default = fields.Boolean(string='Is Default', readonly=True)
create_date = fields.Datetime(string='Created Date', readonly=True)
@api.model
def _get_website(self, website):
websiteObj = 0
instanceId = self._context.get('instance_id')
websiteObjs = self.search(
[('website_id', '=', website['website_id']), ('instance_id', '=', instanceId)])
if websiteObjs:
websiteObj = websiteObjs[0]
else:
websiteDict = {
'name' : website['name'],
'code' : website['code'],
'instance_id' : instanceId,
'website_id' : website['website_id'],
'is_default' : website['is_default'],
'sort_order' : website['sort_order'],
'default_group_id' : website['default_group_id']
}
websiteObj = self.create(websiteDict)
return websiteObj
| [
"tkawai21@users.noreply.github.com"
] | tkawai21@users.noreply.github.com |
55fa64187eba903df8566213bae575bcd62e7c4a | 3793a834d9fc4df1295f637d56cda5cbb293c80c | /bs4_douban_movies/bs4_douban_movies.py | 7292748c981aa58a695c0d805e0ca9de1a17e33e | [] | no_license | Zoie1996/python_spider | 211b5c1a69350f37b5d3ef6d611541c738329467 | bbff442f516b4999f2e6465a143f5eb220241ec9 | refs/heads/master | 2020-03-19T14:09:22.453478 | 2019-01-30T02:28:35 | 2019-01-30T02:28:35 | 136,611,202 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,703 | py | import random
from urllib import parse
import requests
import user_agents
def get_response(url):
"""
第一步
:param url: 查询的链接
获取网页响应信息
"""
headers = {
'User-Agent': random.choice(user_agents.agents),
'Referer': 'https://movie.douban.com/'
}
resp = requests.get(url, headers=headers)
# resp.json() 返回ajax加载的json数据
return resp.json()
def get_tags(url):
"""
第二步
获取电影分类标签
"""
resp = get_response(url)
tags = resp['tags']
return tags
def get_result(url, tag):
"""
第三步
通过分类标签获取分类下的电影名称及评分
:param url: 请求的链接
:param tag: 分类标签
"""
resp = get_response(url)
result_list = resp['subjects']
results = ''
for result in result_list:
results += '%s电影: %s 评分:%s \n' % (tag, result['title'], result['rate'])
return results
def save_result(result):
"""
第四步
保存信息
:param result: 查询结果
"""
with open('电影.txt', 'a', encoding='utf-8') as f:
f.write(result + '\n\n')
def main():
# 分类标签链接
tag_url = 'https://movie.douban.com/j/search_tags?type=movie&tag=%E7%83%AD%E9%97%A8&source='
tags = get_tags(tag_url)
for tag in tags:
# 通过标签组装查询的链接
search = parse.urlencode({'tag': tag})
result_url = 'https://movie.douban.com/j/search_subjects?type=movie&%s&sort=recommend&page_limit=20&page_start=0' % search
results = get_result(result_url, tag)
save_result(results)
if __name__ == '__main__':
main()
| [
"18086869080@163.com"
] | 18086869080@163.com |
ad5a6d84bb25521305d207a95027e4c5f19e11a4 | 113e2131bbe8012f0783cc5a9b10b40b2d7f8a15 | /config.py | 42000847e8a8bd31c192150ab9620f602dbcb85b | [] | no_license | DZWH123/mnist-ink | ecdb0372e6a9b503317eb413e90ab15ee76749c2 | a65570be3b7d15066c860f07553846be864d2e7e | refs/heads/master | 2020-05-28T03:34:54.997511 | 2019-06-08T10:00:17 | 2019-06-08T10:00:17 | 188,868,621 | 0 | 0 | null | 2019-05-27T15:31:27 | 2019-05-27T15:31:27 | null | UTF-8 | Python | false | false | 363 | py | SOURCE_URL = "http://yann.lecun.com/exdb/mnist/"
WORK_DIRECTORY = None
IMAGE_SIZE = 28
NUM_CHANNELS = 1
PIXEL_DEPTH = 255
NUM_LABELS = 10
VALIDATION_SIZE = 5000 # Size of the validation set.
SEED = 66478 # Set to None for random seed.
BATCH_SIZE = 64
NUM_EPOCHS = 10
EVAL_BATCH_SIZE = 64
EVAL_FREQUENCY = 100 # Number of steps between evaluations.
FLAGS = None | [
"1657883517@qq.com"
] | 1657883517@qq.com |
d3f2510ba6cd3a4c0f43fd1ed1050df8944d9ffb | b65ddeec3ff06a990f034a21b96d487cdff68574 | /DefEval2020/logisticRegression.py | b73ad89c0a5c2b230d4d253bcba1dbf102dce22e | [] | no_license | NourEldinShobier/DeftEval | d4fe6e8dd6ff4ec321d48db124583b4f29ba9584 | 0f940d20c122525fc9508f49562f4b9401078f3c | refs/heads/master | 2022-09-11T17:46:44.175374 | 2020-06-01T19:23:06 | 2020-06-01T19:23:06 | 268,458,467 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 741 | py | from sklearn.metrics import classification_report
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
import sklearn.linear_model as lm
model = lm.LogisticRegression()
class LogisticRegression:
def start(self, train_df, test_df):
vectorizer = CountVectorizer(ngram_range=(1, 2))
train_table = vectorizer.fit_transform(train_df['sentence'])
test_table = vectorizer.transform(test_df['sentence'])
model.fit(train_table, train_df['label'])
predictions = model.predict(test_table)
print(classification_report(test_df['label'], predictions.round()))
| [
"noreply@github.com"
] | NourEldinShobier.noreply@github.com |
31cdbe882af4808f510d60c5303fc71448bad50f | 28a462a28f443c285ca5efec181ebe36b147c167 | /tests/compile/basic/es2016/Symbol.keyFor.spec | 4405fe814db693ef8c40840d1d430431bc104824 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | kaist-plrg/jstar | 63e71f9156860dc21cccc33a9f6c638dfee448ea | 1282919127ea18a7e40c7a55e63a1ddaaf7d9db4 | refs/heads/main | 2022-07-22T08:12:34.947712 | 2022-02-27T04:19:33 | 2022-02-27T11:06:14 | 384,045,526 | 6 | 4 | NOASSERTION | 2022-02-27T11:05:26 | 2021-07-08T07:53:21 | Python | UTF-8 | Python | false | false | 396 | spec | 1. If Type(_sym_) is not Symbol, throw a *TypeError* exception.
1. For each element _e_ of the GlobalSymbolRegistry List (see <emu-xref href="#sec-symbol.for"></emu-xref>),
1. If SameValue(_e_.[[Symbol]], _sym_) is *true*, return _e_.[[Key]].
1. Assert: GlobalSymbolRegistry does not currently contain an entry for _sym_.
1. Return *undefined*. | [
"h2oche22@gmail.com"
] | h2oche22@gmail.com |
b27373bc38eff28a67ebaad6b5aa01a01e97f5e3 | a884039e1a8b0ab516b80c2186e0e3bad28d5147 | /Livros/Livro-Desenvolvimento web com Flask/Capitulo02/Nível 02/exemplo07a.py | 0129fa2065636c4e62560194d3ba20d2e016d1d8 | [
"MIT"
] | permissive | ramonvaleriano/python- | 6e744e8bcd58d07f05cd31d42a5092e58091e9f0 | ada70918e945e8f2d3b59555e9ccc35cf0178dbd | refs/heads/main | 2023-04-10T14:04:24.497256 | 2021-04-22T18:49:11 | 2021-04-22T18:49:11 | 340,360,400 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 406 | py | # Program: exemplo07a.py
# Author: Ramon R. Valeriano
# Description: Programa do Capítulo 2, para melhorar a fixação
# Developed: 02/03/2020 - 16:29
from flask import Flask, make_response
app = Flask(__name__)
@app.route('/')
def index():
response = make_response('<h1>Este documento esta sendo carregado em um cookie.</h1>')
response.set_cookie('answer', '42')
return response
app.run() | [
"rrvaleriano@gmail.com"
] | rrvaleriano@gmail.com |
3d0bd1a89b630aa32cc590b9eb2705a428b994aa | 6e03af1dfd04494abde860dc32f1b438f1e1913f | /05/wpe05_test.py | f1bd24696d91e7e0cde4d68aa434943c8a097e6e | [] | no_license | feoh/WeeklyPythonExercises | 77974636cbae6a36ee8f82af342b3b2403b3a07a | d3af536cd6f791fdd361d311aca6328d0f967533 | refs/heads/master | 2021-03-25T08:45:40.541053 | 2020-06-20T03:02:14 | 2020-06-20T03:02:14 | 247,602,393 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,221 | py | #!/usr/bin/env python3
import wpe05_solution
import csv
filename = 'cities.csv'
def test_writes_1000_cities():
wpe05_solution.cities_to_csv(wpe05_solution.gist_url, 'cities.csv')
for index, one_row in enumerate(csv.reader(open(filename))):
pass
assert index == 999
def test_each_city_has_four_fields():
wpe05_solution.cities_to_csv(wpe05_solution.gist_url, 'cities.csv')
all_lines_have_four = [len(fields) == 4
for fields in csv.reader(open(filename), delimiter='\t')]
assert all(all_lines_have_four)
def test_first_is_new_york():
wpe05_solution.cities_to_csv(wpe05_solution.gist_url, 'cities.csv')
city, state, rank, population = open(filename).readline().strip().split('\t')
assert city == 'New York'
assert state == 'New York'
assert rank == '1'
assert population == '8405837'
def test_last_is_panama_city():
wpe05_solution.cities_to_csv(wpe05_solution.gist_url, 'cities.csv')
for fields in csv.reader(open(filename), delimiter='\t'):
pass
city, state, rank, population = fields
assert city == 'Panama City'
assert state == 'Florida'
assert rank == '1000'
assert population == '36877' | [
"feoh@feoh.org"
] | feoh@feoh.org |
a58b5f547fe03e29a5ac581bb0c8b2f83405bd75 | 39689b27f278ee090677884889221351333bbd4d | /Python/if_statements_loops/calculate_the_scores_of_class.py | 09a75ce95085018e19b17bcbf361c7fa79bbe897 | [] | no_license | emrekrt1655/Homeworks-Assigments | b3be9b511099229c8ab7b0b6cc4b2f231b8cf1c1 | 4e938107f43c452176837c65485a086e4abe8fd9 | refs/heads/master | 2022-12-24T13:20:43.492210 | 2020-09-30T15:11:46 | 2020-09-30T15:11:46 | 291,432,784 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 547 | py | #!/usr/bin/env python
# coding: utf-8
# ### Write a Python code that calculates the average of scores that students took in a math class at below.
# scores = {"Mary" : 85, "Susan": 75, "Barry" : 65, "Alexis" : 88, "Jane" : 45, "Tina" : 100, "Tom" : 90, "Tim": 60}
# In[1]:
scores = {"Mary" : 85, "Susan": 75, "Barry" : 65, "Alexis" : 88, "Jane" : 45, "Tina" : 100, "Tom" : 90, "Tim": 60}
sum_scores = 0
for i in scores.values():
sum_scores += i
avr = sum_scores / len(scores)
print("the average of scores is", avr )
# In[ ]:
| [
"emrekurtt1655@gmail.com"
] | emrekurtt1655@gmail.com |
eb14ad9cc026342ecb88f0372c9d46218bb7bf1c | 584db1be8b6bdedaa56d186692ad72da5ee07164 | /patron/cells/weights/__init__.py | d83f31e1ab2fdb889a4e774c5b82817b6dad2c51 | [
"Apache-2.0"
] | permissive | casbin/openstack-patron | 66006f57725cf1c3d735cd5529d3459fd77384c8 | b41b1262f3a52c8cc9f6b6bdf87be5a1abcf6d25 | refs/heads/master | 2023-05-31T05:23:37.721768 | 2015-12-31T12:18:17 | 2015-12-31T12:18:17 | 382,054,546 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,286 | py | # Copyright (c) 2012-2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Cell Scheduler weights
"""
from patron import weights
class WeightedCell(weights.WeighedObject):
def __repr__(self):
return "WeightedCell [cell: %s, weight: %s]" % (
self.obj.name, self.weight)
class BaseCellWeigher(weights.BaseWeigher):
"""Base class for cell weights."""
pass
class CellWeightHandler(weights.BaseWeightHandler):
object_class = WeightedCell
def __init__(self):
super(CellWeightHandler, self).__init__(BaseCellWeigher)
def all_weighers():
"""Return a list of weight plugin classes found in this directory."""
return CellWeightHandler().get_all_classes()
| [
"hsluoyz@qq.com"
] | hsluoyz@qq.com |
ffc68c7226a18249bc8e73f4e6ef26bd6e69bd74 | cdbe8744a319e03993777182808d1c1f76e8323d | /utils/cut_frames.py | d130a0da4cffdfc8957d04c4e33913fa61a21c5d | [
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain"
] | permissive | Tabish-Amin/Gender-Detection-Python | d293ac73550b6e0e8fc0abc0ca2600b3f4b36476 | 2434fcbd77c4db3dacabbff83bcd88ce56bcf0ae | refs/heads/main | 2023-07-18T17:50:46.429246 | 2021-09-13T08:11:56 | 2021-09-13T08:11:56 | 405,644,487 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,540 | py | import cv2
import os
import shutil
from moviepy.video.io.ffmpeg_tools import ffmpeg_extract_subclip
def convertMillis(millis):
return str(round(millis//(1000*60*60))) + ":"+str(round(millis//(1000*60)))+ ":" +str((millis/1000)%60)
def cut_video_frames(video_path, s_t, e_t):
dic = {}
video = video_path
s_time = s_t
e_time = e_t
# ffmpeg_extract_subclip("full.mp4", start_seconds, end_seconds, targetname="cut.mp4")
print("Cutting the video according to the given time.")
ffmpeg_extract_subclip(video, s_time, e_time, targetname="cut.mp4")
print("Video cutting Done.")
cap= cv2.VideoCapture('cut.mp4')
i=0
fps = cap.get(cv2.CAP_PROP_FPS)
timestamps = [cap.get(cv2.CAP_PROP_POS_MSEC)]
calc_timestamps = [0.0]
path = "./frames"
if os.path.exists(path):
shutil.rmtree(path)
os.mkdir(path)
else:
os.mkdir(path)
print('Extracting frames.')
while(cap.isOpened()):
ret, frame = cap.read()
if ret == False:
break
timestamps.append(cap.get(cv2.CAP_PROP_POS_MSEC))
calc_timestamps.append(calc_timestamps[-1] + 1000/fps)
name = str(i+1)+'.jpg'
cv2.imwrite(path+ "/" +name, frame)
i+=1
dic[name] = convertMillis(s_time*1000 + calc_timestamps[i])
cap.release()
cv2.destroyAllWindows()
print('Frames Extraction Done.')
try:
if os.path.exists("cut.mp4"):
os.remove("cut.mp4")
except:
pass
return dic | [
"tabishameen8@gmail.com"
] | tabishameen8@gmail.com |
8f0007a69959fe0631b5b9f76f22c08f077de670 | cc5def2b06fd09492123d023fde906d949177353 | /utils/utils.py | 46744ac69c96a5b4f7e3a0e93f6955616b1f0da1 | [
"MIT"
] | permissive | bigvisionai/MRnet-Challenge | d92a480b972b43686a8174062c8a9f4f384d0a7e | ff3abcabc19cb4cfba6464fef1332b7a0456f839 | refs/heads/master | 2023-01-01T04:14:43.580318 | 2020-08-24T11:37:43 | 2020-08-24T11:37:43 | 267,511,472 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,743 | py | import torch
from sklearn import metrics
import numpy as np
def _get_trainable_params(model):
"""Get Parameters with `requires.grad` set to `True`"""
trainable_params = []
for x in model.parameters():
if x.requires_grad:
trainable_params.append(x)
return trainable_params
def _evaluate_model(model, val_loader, criterion, epoch, num_epochs, writer, current_lr, log_every=20):
"""Runs model over val dataset and returns auc and avg val loss"""
# Set to eval mode
model.eval()
y_probs = []
y_gt = []
losses = []
for i, (images, label) in enumerate(val_loader):
if torch.cuda.is_available():
images = [image.cuda() for image in images]
label = label.cuda()
output = model(images)
loss = criterion(output, label)
loss_value = loss.item()
losses.append(loss_value)
probas = torch.sigmoid(output)
y_gt.append(int(label.item()))
y_probs.append(probas.item())
try:
auc = metrics.roc_auc_score(y_gt, y_probs)
except:
auc = 0.5
writer.add_scalar('Val/Loss', loss_value, epoch * len(val_loader) + i)
writer.add_scalar('Val/AUC', auc, epoch * len(val_loader) + i)
if (i % log_every == 0) & (i > 0):
print('''[Epoch: {0} / {1} | Batch : {2} / {3} ]| Avg Val Loss {4} | Val AUC : {5} | lr : {6}'''.
format(
epoch + 1,
num_epochs,
i,
len(val_loader),
np.round(np.mean(losses), 4),
np.round(auc, 4),
current_lr
)
)
writer.add_scalar('Val/AUC_epoch', auc, epoch + i)
val_loss_epoch = np.round(np.mean(losses), 4)
val_auc_epoch = np.round(auc, 4)
return val_loss_epoch, val_auc_epoch
def _train_model(model, train_loader, epoch, num_epochs, optimizer, criterion, writer, current_lr, log_every=100):
# Set to train mode
model.train()
y_probs = []
y_gt = []
losses = []
for i, (images, label) in enumerate(train_loader):
optimizer.zero_grad()
if torch.cuda.is_available():
images = [image.cuda() for image in images]
label = label.cuda()
output = model(images)
loss = criterion(output, label)
loss.backward()
optimizer.step()
loss_value = loss.item()
losses.append(loss_value)
probas = torch.sigmoid(output)
y_gt.append(int(label.item()))
y_probs.append(probas.item())
try:
auc = metrics.roc_auc_score(y_gt, y_probs)
except:
auc = 0.5
writer.add_scalar('Train/Loss', loss_value,
epoch * len(train_loader) + i)
writer.add_scalar('Train/AUC', auc, epoch * len(train_loader) + i)
if (i % log_every == 0) & (i > 0):
print('''[Epoch: {0} / {1} | Batch : {2} / {3} ]| Avg Train Loss {4} | Train AUC : {5} | lr : {6}'''.
format(
epoch + 1,
num_epochs,
i,
len(train_loader),
np.round(np.mean(losses), 4),
np.round(auc, 4),
current_lr
)
)
writer.add_scalar('Train/AUC_epoch', auc, epoch + i)
train_loss_epoch = np.round(np.mean(losses), 4)
train_auc_epoch = np.round(auc, 4)
return train_loss_epoch, train_auc_epoch
def _get_lr(optimizer):
for param_group in optimizer.param_groups:
return param_group['lr']
| [
"jatinprakash1511@gmail.com"
] | jatinprakash1511@gmail.com |
fa1870316e23e8074202fc4f5d1b189bcad90253 | 795c1af15c747a1210ea05d636f30d242cef2aac | /網站免費Python教學/6-10.輔助線.py | f6fa5fe9403452addbd4d29b512982d1d08453d6 | [] | no_license | jiajia0417/MarketDataScience | 701e512dd0d214cdecb0dab4d0ac2233f3ad2e55 | cb168f0fa87081786b5f30191760c21a756da954 | refs/heads/master | 2023-09-04T16:26:37.996762 | 2021-11-23T08:24:53 | 2021-11-23T08:24:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 681 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Apr 13 15:02:51 2021
@author: Ivan
https://marketingliveincode.com/
版權屬於「行銷搬進大程式」所有,若有疑問,可聯絡ivanyang0606@gmail.com
Python免費基礎教學課程
第六章Matplotlib繪圖
輔助線
"""
import matplotlib.pyplot as plt
import numpy as np
# 輔助線
x = np.linspace(0, 2, 30)
plt.plot(x, x, '-o',label='蘋果')
plt.plot(x, x**2, label='香蕉')
plt.plot(x, x**3, label='橘子')
plt.axhline( # 繪製平均線,axvline
2, # y位置
color='#33CCFF', # 線條顏色
linestyle='dashed', # 線條樣式
linewidth=1 # 線條粗度
)
plt.show() | [
"noreply@github.com"
] | jiajia0417.noreply@github.com |
ae0d2c767824c01c8f24bc22888ffbe08eca40d1 | 85dc2da8f942927a3b4aa8a42b087042c131b2a7 | /src/search.py | 295c4c6199749f4ce9aefb77fa028e8ce8a78579 | [] | no_license | Wanghuaichen/auto-blocklist | 6c7cb2a5094c37e3066591c152644ba49837b8bc | ba12c33493934e22daa447192fa3216c91b1ff9c | refs/heads/master | 2020-03-28T00:56:53.230270 | 2018-08-17T19:59:06 | 2018-08-17T19:59:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,806 | py | import sys
import json
import requests
import sqlite3
import time
import dns.resolver
import threading
import csv
import time
import multiprocessing
import queue
from dns.exception import Timeout
from xml.etree import ElementTree
from urllib.parse import urlparse
from tag import *
from fetch import *
from results import *
#DB_NAME = 'db/bigrams.db'
# DB_NAME = 'db/trigrams.db'
DB_NAME = 'db/unigrams.db'
KEYS = 'KEYS'
THREADS = 4
PROCESSES = 4
url_queue = multiprocessing.Queue()
tag_queue = multiprocessing.Queue()
censored_queue = multiprocessing.Queue()
def read_keys(keys_filename):
try:
with open(keys_filename) as keys_file:
keys = json.load(keys_file)
return keys
except IOError as err:
print(err)
exit(-1)
def call_api(url, headers, params):
"Performs an API call"
try:
r = requests.get(url, headers=headers, params=params)
r.raise_for_status()
except HTTPError:
print('Couldn\'t get the response from Microsoft Azure')
except:
print('Error:', sys.exc_info()[0])
return r
def translate_to_cn(term, translator_keys):
"Translate a term from English to Chinese"
params = {'from': 'en',
'to': 'zh'}
params['text'] = term
payload = call_api(translator_keys["url"], translator_keys["headers"], params)
translation = ElementTree.fromstring(payload.text).text
return translation
def translate_to_en(term, translator_keys):
"Translate a term from Chinese to English"
params = {'from': 'zh',
'to': 'en'}
params['text'] = term
payload = call_api(translator_keys["url"], translator_keys["headers"], params)
translation = ElementTree.fromstring(payload.text).text
return translation
def search(tag, search_keys):
"Use a tag to search for candidate"
if tag is None:
print('Tag is NoneType')
webpages = {}
params = {'safeSearch': 'off',
'mkt': 'en-us', # Repeat experiments with Chinese market
'count': '50'}
params['q'] = (tag + ' NOT (site:blogspot.com) NOT (site:blogspot.tw)'
+ ' NOT (site:blogspot.fr) NOT (site:blogspot.jp)'
+ ' NOT (site:tumblr.com) NOT (site:youtube.com)'
+ ' NOT (site:facebook.com) NOT (site:twitter.com)')
payload = call_api(search_keys["url"], search_keys["headers"], params).json()
if 'webPages' in payload and 'value' in payload['webPages']:
webpages = payload['webPages']['value']
else:
webpages = {}
return webpages
def is_censored(domain):
"Determine if a domain is censored in China"
resolver = dns.resolver.Resolver()
# resolver.nameservers = ['220.181.57.217', '223.96.100.100', '1.24.10.10',
# '202.143.16.100', '180.160.10.1', '180.77.100.200',
# '144.0.111.90', '42.101.0.1']
resolver.nameservers = ['220.181.57.218']
resolver.lifetime = 3.0
try:
answer = resolver.query(domain, 'A')
return True
except dns.exception.Timeout:
return False
except:
print('Error:', sys.exc_info()[0])
return False
def url_to_domain(url):
"Extract the domain name from a URL"
parsed_uri = urlparse(url)
domain = '{uri.netloc}'.format(uri=parsed_uri)
return domain
def censor_producer(domains, itr):
for d in domains:
try:
domain = d[0]
if is_censored(domain):
print('CENSORED: %s' % domain)
censored_queue.put(domain)
else:
print('Not censored: %s' % domain)
except:
print('Error:', sys.exc_info()[1])
continue
def censor_consumer(itr):
conn = sqlite3.connect(DB_NAME)
c = conn.cursor()
c.execute('update urls set censored=0, used=1 where iteration=?', (itr,))
while True:
try:
domain = censored_queue.get(block=False)
c.execute('update urls set censored=1, used=0 where domain=? and iteration=?', (domain,itr,))
print(domain)
except queue.Empty:
print('Censored queue emptied')
break
except:
print('Error:', sys.exc_info()[1])
continue
conn.commit()
conn.close()
def tag_producer(censored_urls, itr):
print(len(censored_urls))
for row in censored_urls:
url = row[0]
try:
unigrams = 0
# bigrams = 0
# trigrams = 0
tags = []
grams = fetch_grams(url)
unigram_tags = tf_idf(grams[0], 1)
# bigram_tags = tf_idf(grams[1], 2)
# trigram_tags = tf_idf(grams[2], 3)
# if trigram_tags[1] is not None and trigrams < MAX_TAGS: # Chinese trigrams
# for cn_tag in trigram_tags[1]:
# if trigrams < MAX_TAGS:
# tags.append(cn_tag)
# trigrams += 1
# if trigram_tags[0] is not None and trigrams < MAX_TAGS: # English trigrams
# for en_tag in trigram_tags[0]:
# if trigrams < MAX_TAGS:
# tags.append(en_tag)
# trigrams += 1
# if bigram_tags[1] is not None and bigrams < MAX_TAGS: # Chinese bigrams
# for cn_tag in bigram_tags[1]:
# if bigrams < MAX_TAGS:
# tags.append(cn_tag)
# bigrams += 1
# if bigram_tags[0] is not None and bigrams < MAX_TAGS: # English bigrams
# for en_tag in bigram_tags[0]:
# if bigrams < MAX_TAGS:
# tags.append(en_tag)
# bigrams += 1
if unigram_tags[1] is not None: # Chinese unigrams
for cn_tag in unigram_tags[1]:
if unigrams < MAX_TAGS:
tags.append(cn_tag)
unigrams += 1
if unigram_tags[0] is not None: # English unigrams
for en_tag in unigram_tags[0]:
if unigrams < MAX_TAGS:
tags.append(en_tag)
unigrams += 1
print(url, tags)
vals = (url, tags, itr)
tag_queue.put(vals)
except:
print('Error:', sys.exc_info()[1])
continue
def tag_consumer():
conn = sqlite3.connect(DB_NAME)
c = conn.cursor()
while True:
try:
tuple = tag_queue.get(block=False)
url = tuple[0]
tags = tuple[1]
itr = tuple[2]
for tag in tags:
try:
vals = (tag, 0, 1, itr)
c.execute('INSERT INTO tags VALUES (?,?,?,?)', vals)
print(tag)
except sqlite3.IntegrityError:
print('Duplicate tag:', tag)
c.execute('UPDATE urls SET used=1 WHERE url=?', (url,))
except queue.Empty:
print('Queue emptied')
break
except:
print('Error:', sys.exc_info()[1])
continue
conn.commit()
conn.close()
def url_producer(unused_tags, itr, keys):
"For each tag, search for candidate URLs"
for row in unused_tags:
try:
tag = row[0]
print(tag)
if isEnglish(tag):
translated_tag = translate_to_cn(tag, keys["translator_info"])
search_results = search(translated_tag, keys["search_info"])
else:
search_results = search(tag, keys["search_info"])
for result in search_results:
url = result['url']
parsed_uri = urlparse(url)
domain = '{uri.netloc}'.format(uri=parsed_uri)
# Ignore popular domains
if not ('youtube' in domain or
'tumblr' in domain or
'blogspot' in domain or
'facebook' in domain or
'twitter' in domain):
vals = (url, domain, 0, 0, itr+1, tag)
url_queue.put(vals)
except:
print('Error:', sys.exc_info()[1])
continue
def url_consumer():
"Add search results from the queue to the db"
conn = sqlite3.connect(DB_NAME)
c = conn.cursor()
while True:
try:
tuple = url_queue.get(block=False)
url = tuple[0]
tag = tuple[5]
try:
c.execute('INSERT INTO urls VALUES (?,?,?,?,?,?)', tuple)
print(url)
except sqlite3.IntegrityError:
print('Duplicate URL:', url)
c.execute('UPDATE TAGS SET used=1 WHERE tag=?', (tag,))
except queue.Empty:
print('Queue emptied')
break
except:
print('Error:', sys.exc_info()[1])
continue
conn.commit()
conn.close()
def part_one(itr):
"Test search results for censorship"
conn = sqlite3.connect(DB_NAME)
c = conn.cursor()
domains = c.execute(('select distinct domain from urls ' +
'where iteration=?'), (itr,)).fetchall()
print('# of distinct domains to test:', len(domains))
conn.close()
block_size = int(len(domains)/THREADS)
blocks = [domains[i:i + block_size] for i in range(0, len(domains), block_size)]
_args = [(block, itr) for block in blocks]
threads = []
for i in range(THREADS):
t = threading.Thread(target=censor_producer, args=_args[i])
threads.append(t)
t.start()
for t in threads:
t.join()
censor_consumer(itr)
def part_two(itr):
"Get the tags for newly found URLs that are censored"
conn = sqlite3.connect(DB_NAME)
c = conn.cursor()
censored_urls = c.execute(('select * from urls where ' +
'censored=1 and iteration=?'), (itr,)).fetchall()
censored_urls = censored_urls[:2500]
conn.close()
block_size = int(len(censored_urls)/PROCESSES)
blocks = [censored_urls[i:i + block_size] for i in range(0, len(censored_urls), block_size)]
args = [(block, itr) for block in blocks]
pool = multiprocessing.Pool(PROCESSES)
pool.starmap(tag_producer, args)
tag_consumer()
def part_three(itr, keys):
"Search for candidate URLs using newly extracted tags"
conn = sqlite3.connect(DB_NAME)
c = conn.cursor()
unused_tags = c.execute(('select * from tags where used=0 ' +
'and iteration=?'), (itr,)).fetchall()
print('# of unused tags to search with', len(unused_tags))
conn.close()
block_size = int(len(unused_tags)/PROCESSES)
blocks = [unused_tags[i:i + block_size] for i in range(0, len(unused_tags), block_size)]
args = [(block, itr, keys) for block in blocks]
pool = multiprocessing.Pool(PROCESSES)
pool.starmap(url_producer, args)
url_consumer()
def find_censored_urls(keys):
"Use seeded URLs to search for censored webpages"
for i in range(3,4):
itr = i
print('Itr %d' % itr)
part_two(itr)
part_three(itr, keys)
part_one(itr+1)
if __name__ == "__main__":
# keys = read_keys(KEYS)
# start = time.time()
# find_censored_urls(keys)
# end = time.time()
# print(end - start)
grams = fetch_grams('http://tiananmenmother.org')
trigram_tags = tf_idf(grams[2], 3)
print(trigram_tags)
| [
"austin.hounsel@gmail.com"
] | austin.hounsel@gmail.com |
0fa09516a8811b555a5e7bd54339adfbe1757600 | 9c80c7c526e48ab797b8bca43d854ce9a331c7e7 | /time_accounting/filters.py | 3f6d992e4a02b85ebef9eb518069c41392e88d20 | [] | no_license | Squaad99/core-manufacturing | 8afd40862a1d7b643345efe64dd7b8f0251624f4 | 290c5b424f111dccbad37e48a7cff9a1824248dc | refs/heads/master | 2023-03-17T09:30:17.751113 | 2020-08-18T22:23:21 | 2020-08-18T22:23:21 | 267,694,132 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,775 | py |
import django_filters
from django.db import models
from django.forms import DateInput
from django_filters import FilterSet
from time_accounting.models import TimeReport
from django.utils.translation import ugettext as _
class TimeReportFilter(FilterSet):
choices = [
('today', _('Idag')),
('yesterday', _('Igår')),
('week', _('7 dagar')),
('month', _('Denna månad')),
('year', _('Detta år')),
]
date = django_filters.DateRangeFilter(choices=choices, initial="Igår")
start_date = django_filters.DateFilter(widget=DateInput(attrs={'type': 'date'}), field_name='start_date', lookup_expr='lt', label='Start datum')
end_date = django_filters.DateFilter(widget=DateInput(attrs={'type': 'date'}), field_name='end_date', lookup_expr='gt', label='Slut datum')
class Meta:
model = TimeReport
fields = ['employee']
def filter_queryset(self, queryset):
custom_filters = {}
for name, value in self.form.cleaned_data.items():
if name == 'start_date' or name == 'end_date':
custom_filters.update({name: value})
del self.form.cleaned_data['start_date']
del self.form.cleaned_data['end_date']
for name, value in self.form.cleaned_data.items():
queryset = self.filters[name].filter(queryset, value)
assert isinstance(queryset, models.QuerySet), \
"Expected '%s.%s' to return a QuerySet, but got a %s instead." \
% (type(self).__name__, name, type(queryset).__name__)
if custom_filters['start_date'] and custom_filters['end_date']:
queryset = queryset.filter(date__range=[custom_filters['start_date'], custom_filters['end_date']])
return queryset
| [
"squaaad1993@gmail.com"
] | squaaad1993@gmail.com |
c9e8bf4f04c4148b84a03ab5c6c4c8be69608fa2 | e32b8fc42b1fecfc843eee173680673fdeb42387 | /codility/03.lesson11to15/lesson12.py | bfd7989adcac2ab990afe5ba442ee53cf9183fd0 | [] | no_license | seunghy/algorithm_prac | 4c9c5cdae260a74a805b3ff207f6b7f7096c12fc | 9d0cab3d98967b2fec53946cf0a284339e6dfe87 | refs/heads/master | 2023-04-04T14:09:16.098363 | 2021-04-01T14:59:22 | 2021-04-01T14:59:22 | 307,037,522 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 210 | py | # [Lesson12-1. ChocolatesByNumbers]
def solution(N, M):
if M == 1:
return N
rest = 1
A = N
B = M
while B != 0:
rest = A%B
A = B
B = rest
return N//A
| [
"seunghbs@gmail.com"
] | seunghbs@gmail.com |
f23b6563f026a88014a0de5d7b7b0f417aca2874 | db64fe8a3084a2a584e804e08e4b4e0a03146454 | /textalerts.py | 1c906869654eb16cb0908e15066bbbd1d3a9a20c | [] | no_license | danielthepope/textalerts | 50b6d41055923034f4d5a9f5ffd79e88d6aa6c96 | eb36dafcf82f4a99fb306dbb20d713985439ada6 | refs/heads/master | 2016-08-07T12:25:07.132840 | 2013-09-25T13:20:19 | 2013-09-25T13:20:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 216 | py | #!/usr/bin/env python
import datetime
import os
from time import sleep
alertLocation = '/home/pi/apps/textalerts/alerts.txt'
alertList = open(alertLocation, 'r').read().split('\n')
for s in alertList:
print s
| [
"djpope2365@gmail.com"
] | djpope2365@gmail.com |
d999acb14a4258c765255569ad0349f26990ecdc | 38bf7e24a2150983f482a6749dc661ed4c4a4439 | /docs/source/conf.py | 914308cfb0a62a3b79401f3a79e53ff0e90b1f3c | [] | no_license | guoweikuang/flask_v2ex | 15b6247d979146ada57fe2e6dd7c93f7708297ff | d84c14b1d90be78e634677dee332a63bca69c7fc | refs/heads/master | 2022-12-17T19:36:57.945884 | 2019-10-23T13:25:44 | 2019-10-23T13:25:44 | 116,472,843 | 20 | 5 | null | 2022-11-22T02:08:35 | 2018-01-06T10:09:07 | JavaScript | UTF-8 | Python | false | false | 4,776 | py | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'V2Ex'
copyright = '2018, guoweikuang'
author = 'guoweikuang'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = 'v1.0'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'zh_CN'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'V2Exdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'V2Ex.tex', 'V2Ex Documentation',
'guoweikuang', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'v2ex', 'V2Ex Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'V2Ex', 'V2Ex Documentation',
author, 'V2Ex', 'One line description of project.',
'Miscellaneous'),
] | [
"673411814@qq.com"
] | 673411814@qq.com |
7d3188e91c16ce1529d0e2a8ab93409ff3f0698e | 2b8c84956a5f798dda7048b58f2fd71dfaee0f30 | /pseint-code/test/export/py3/00-programa.py3 | d30c2eddf65454b0effe3f040c4d8fc527258a4f | [
"GPL-1.0-or-later",
"GPL-2.0-only",
"MIT"
] | permissive | leandroniebles/code-compilation-component | 1b087de451c60dea087421f12a3573e216ae620c | 0c9b87fdae5cc21c3abb5dfcb8d52f92d582ae33 | refs/heads/master | 2023-01-22T19:18:04.631246 | 2019-09-30T23:24:57 | 2019-09-30T23:24:57 | 211,906,095 | 0 | 0 | MIT | 2023-01-12T07:38:18 | 2019-09-30T16:36:58 | C++ | UTF-8 | Python | false | false | 50 | py3 | if __name__ == '__main__':
print("Hola Mundo")
| [
"leandroniebles@gmail.com"
] | leandroniebles@gmail.com |
40bb22720b3817300aa35c223fae7cb293bcae29 | cb2bcccf56e7f9194e4d40ced7514b46416bbe4d | /Buddy/Buddy/asgi.py | 3e9fd8384df669c32da91fdafe750e5b187b1373 | [] | no_license | zijin1994/projects | 0a21a0af55b71dcd0b107807fb167c421870ab82 | 34b4602be9bcb26a82a4aed38e362215e805e71f | refs/heads/main | 2023-01-03T10:34:14.409365 | 2020-10-28T07:35:34 | 2020-10-28T07:35:34 | 307,937,684 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 387 | py | """
ASGI config for Buddy project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Buddy.settings')
application = get_asgi_application()
| [
"zijin@ualberta.ca"
] | zijin@ualberta.ca |
35191925acafc83ea20ead8135b3732eb249d9f9 | e6132244015942c5ec75c8eff4f90cd0e9302470 | /src/wshop/apps/shipping/__init__.py | 15be34284e0e5d483fe1421db9999cd651d83f84 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] | permissive | vituocgia/wshop-core | d3173f603861685b523f6b66af502b9e94b7b0c2 | 5f6d1ec9e9158f13aab136c5bd901c41e69a1dba | refs/heads/master | 2020-03-18T08:25:14.669538 | 2018-05-23T05:55:56 | 2018-05-23T05:55:56 | 134,508,443 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 65 | py | default_app_config = 'wshop.apps.shipping.config.ShippingConfig'
| [
"dotiendiep@gmail.com"
] | dotiendiep@gmail.com |
8fbe346c6ff22621bddeaace1696325bcbb55dcc | 2d3c180a17cd3f4b2cc9f1edf3d06ffd82978d97 | /parameters.py | 67b83f25f977b3d711c16c87d1690fe2c4fc2e19 | [] | no_license | homerobse/nma-sdnc | 42825b398b75d1d017b272646d49fb9161bf75fb | aa4c29fe49c68e0c2c365699b64a0d72d9090980 | refs/heads/master | 2022-12-01T11:48:58.431377 | 2020-08-08T01:44:37 | 2020-08-08T01:44:37 | 282,267,932 | 0 | 3 | null | 2020-08-05T05:22:59 | 2020-07-24T16:28:32 | Jupyter Notebook | UTF-8 | Python | false | false | 1,579 | py | import os
from os.path import dirname, join, abspath
import numpy as np
# The download cells will store the data in nested directories starting here:
HCP_DIR_NAME = "hcp"
HCP_DIR = join(dirname(dirname(abspath(__file__))), HCP_DIR_NAME)
# The data shared for NMA projects is a subset of the full HCP dataset
N_SUBJECTS = 339
# The data have already been aggregated into ROIs from the Glasser parcellation
N_PARCELS = 360
# The acquisition parameters for all tasks were identical
TR = 0.72 # Time resolution, in sec
# The parcels are matched across hemispheres with the same order
HEMIS = ["Right", "Left"]
# Each experiment was repeated multiple times in each subject
N_RUNS_REST = 4
N_RUNS_TASK = 2
# Time series data are organized by experiment, with each experiment
# having an LR and RL (phase-encode direction) acquistion
BOLD_NAMES = [
"rfMRI_REST1_LR", "rfMRI_REST1_RL",
"rfMRI_REST2_LR", "rfMRI_REST2_RL",
"tfMRI_MOTOR_RL", "tfMRI_MOTOR_LR",
"tfMRI_WM_RL", "tfMRI_WM_LR",
"tfMRI_EMOTION_RL", "tfMRI_EMOTION_LR",
"tfMRI_GAMBLING_RL", "tfMRI_GAMBLING_LR",
"tfMRI_LANGUAGE_RL", "tfMRI_LANGUAGE_LR",
"tfMRI_RELATIONAL_RL", "tfMRI_RELATIONAL_LR",
"tfMRI_SOCIAL_RL", "tfMRI_SOCIAL_LR"
]
# You may want to limit the subjects used during code development.
# This will use all subjects:
subjects = range(N_SUBJECTS)
regions = np.load(f"{HCP_DIR}/regions.npy").T
region_info = dict(
name=regions[0].tolist(),
network=regions[1],
myelin=regions[2].astype(np.float),
)
with np.load(f"{HCP_DIR}/hcp_atlas.npz") as dobj:
atlas = dict(**dobj) | [
"homero.esmeraldo@cncb.ox.ac.uk"
] | homero.esmeraldo@cncb.ox.ac.uk |
feaaf4a5d9b1524b0cbd47bed17244ae0098cba7 | b7423010e212287b2d822ee06b19ef27ed6aead2 | /setup.py | 5f8e493f91e70ae3e83344e7d22f05dc602712c7 | [
"Apache-2.0"
] | permissive | manish-sin/BetteRoads | 5c4dfd8454cb6063a5882dd80dab739846023e48 | 8bc37ca1ae40c530191cb7be5a0f49123d388356 | refs/heads/master | 2023-02-02T07:44:28.738059 | 2020-12-11T11:31:05 | 2020-12-11T11:31:05 | 317,178,078 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,268 | py | import os
import shutil
# Here we will do some initial setup
# We will remove all the folders and some cvs files which needs to be empty when starting a fresh project
# Currently the folders populated with data for reference
# commands below will remove these folders and create a new one with same name
#Here we define the path of all such folders
Root_dir = os.path.abspath(".")
frames_path = os.path.join(Root_dir, r"frames")
wrong_coordinate_extacrtion_path = os.path.join(Root_dir, r"wrong_coordinate_extacrtion")
road_images_path = os.path.join(Root_dir, r"road_images")
pot_holes_detected_path = os.path.join(Root_dir, r"pot_holes_detected")
#Removing those folders
shutil.rmtree(frames_path)
shutil.rmtree(wrong_coordinate_extacrtion_path )
shutil.rmtree(road_images_path)
shutil.rmtree(pot_holes_detected_path)
#recreating those folders
os.mkdir(frames_path)
os.mkdir(wrong_coordinate_extacrtion_path)
os.mkdir(road_images_path)
os.mkdir(pot_holes_detected_path)
#Removing Files
core_data_path = os.path.join(Root_dir, r"core_data.csv")
os.remove(core_data_path)
super_core_data_path = os.path.join(Root_dir, r"super_core_data.csv")
os.remove(super_core_data_path )
final_csv_path = os.path.join(Root_dir, r"final_csv_data.csv")
os.remove(final_csv_path) | [
"manish.singh35200.com"
] | manish.singh35200.com |
a2363375026edabcb78c0ad54ba1930292c9b397 | 1bfacaa6985e3cd53acc343a71ce0aacbc43e1d4 | /src/parser.py | d134677dafad4330b4b42f01ed9952441855e9f1 | [
"Unlicense"
] | permissive | keesvv/vclfetch | cdb02e2f0a01c4f43068a514845420a5818c8ce0 | c5383a3ad82c9b73466d88fd69bf04de937b31a0 | refs/heads/master | 2020-04-14T03:28:20.433353 | 2018-12-31T14:49:34 | 2018-12-31T14:49:34 | 163,608,778 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,542 | py | import helper
from io import StringIO
from lxml import etree
from lxml.cssselect import CSSSelector
def parse(rawHtml):
parser = etree.HTMLParser()
document = etree.parse(StringIO(rawHtml), parser)
return document
def parseNews(rawHtml):
document = parse(rawHtml)
container = CSSSelector("div.pubItems")(document)[0]
articleList = list()
for newsItem in container:
for prop in newsItem:
if prop.get("class") == "pubThumbnail": thumbnail = helper.getThumbnail(prop)
elif prop.get("class") == "pubDate": date = helper.getDateFormat(prop.text)
elif prop.get("class") == "pubCategory": category = prop.text
elif prop.get("class") == "pubTitle": title = prop.text
elif prop.get("class") == "pubSummary": summary = prop.text
elif prop.get("class") == "pubLink": url = prop.get("href")
article = {
"thumbnail": thumbnail,
"date": date,
"category": category,
"summary": summary,
"title": title,
"url": url
}
articleList.append(article)
return articleList
def parseMessages(rawHtml):
document = parse(rawHtml)
container = CSSSelector("div.pubItems")(document)[0]
messageList = list()
for messageItem in container:
for prop in messageItem:
if prop.get("class") == "pubThumbnail": thumbnail = helper.getThumbnail(prop)
elif prop.get("class") == "pubDate": date = helper.getDateFormat(prop.text)
elif prop.get("class") == "pubCategory": category = prop.text
elif prop.get("class") == "pubTitle": title = prop.text
elif prop.get("class") == "pubSummary": summary = prop.text
elif prop.get("class") == "pubLink": url = prop.get("href")
message = {
"thumbnail": thumbnail,
"date": date,
"category": category,
"summary": summary,
"title": title,
"url": url
}
messageList.append(message)
return messageList
def parseAgenda(rawHtml):
document = parse(rawHtml)
agendaItems = CSSSelector("div.EventDayScroll div a")(document)
itemsList = list()
for agendaItem in agendaItems:
name = agendaItem.text
date = None
item = {
"name": name,
"date": date
}
itemsList.append(item)
return itemsList
| [
"kees.van.voorthuizen@gmail.com"
] | kees.van.voorthuizen@gmail.com |
9bf3d2c051c29082aa33cfeceab377e3427f85ff | 05abb78c60a69422ae3e00a542bbd4573faf8174 | /python-para-zumbis/lista2/exercicio1.py | 9918795b5836c2bd55e4644ea40ede511eb2e42b | [] | no_license | xuting1108/Programas-de-estudo | 72b812d52f5b130a95103c38dbe9e471dc5aa6f9 | 01fe21097055d69c2115cff3da2199429e87dead | refs/heads/master | 2022-10-20T17:06:14.517643 | 2019-04-08T11:16:12 | 2019-04-08T11:16:12 | 179,678,721 | 0 | 1 | null | 2022-10-09T13:13:57 | 2019-04-05T12:38:23 | Python | UTF-8 | Python | false | false | 600 | py | # Faça um Programa que peça os três lados de um triângulo. O programa deverá informar se os valores podem ser um triângulo.
# Indique, caso os lados formem um triângulo, se o mesmo é: equilátero, isósceles ou escaleno.
lado_a = float(input('informe a medida do lado a: '))
lado_b = float(input('informe a medida do lado b: '))
lado_c = float(input('informe a medida do lado c: '))
if lado_a == lado_b == lado_c:
print('o triangulo é equilátero')
elif lado_a == lado_b or lado_a == lado_c or lado_b == lado_c:
print('o triangulo é isósceles')
else:
print('o triangulo é escaleno') | [
"xuting1108@hotmail.com"
] | xuting1108@hotmail.com |
6ae420169c10aaf07b5c33139af280acdab651e0 | 2857f90b0062dd068f811bcd8819151cea2a77bc | /Ejercicios_Web/Web Scraping/covid.py | bbcb842f258d9a0dd7bb1a947800b59fcee85950 | [] | no_license | efrainclg95/Sion | 7d702d52fbdd6133c56b6d3787e86adf75216151 | db7428b70d33189ba886955839a192e032b971b6 | refs/heads/master | 2021-06-26T06:54:24.458852 | 2021-06-17T16:30:41 | 2021-06-17T16:30:41 | 229,121,963 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,011 | py | from bs4 import BeautifulSoup
import requests
import pandas as pd
url = 'https://news.google.com/covid19/map?hl=es-419&gl=PE&ceid=PE%3Aes-419'
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
pais = soup.find_all('div', class_= 'pcAJd')
cantidad = soup.find_all('td', class_= 'l3HOY')
paises = []
total_casos = []
for p in pais:
paises.append(p.text)
for j,k in enumerate(cantidad):
contador = -5
while contador < 300 :
contador = contador + 5
# print(j)
if j == contador:
# print(j,k.text)
total_casos.append(k.text)
# Solución en base a diccionarios
'''dic_covid = dict(zip(paises,total_casos))
v_in_pais = input(str('Ingrese país a consultar: '))
v_out_resultado = str('La cantidad actual de casos COVID19 en '+ v_in_pais + ' es de: ' + dic_covid.get(v_in_pais))
print(v_out_resultado)'''
# Solución usando Pandas
df = pd.DataFrame({'Pais': paises,'Total de Casos': total_casos})
print(df)
| [
"efrainclg95@gmail.com"
] | efrainclg95@gmail.com |
d59fba167dfbf00e4351da026dd5f8908aa21840 | 7f88118c414d463568d9a7ab4b2d45ede318a71e | /tests/test_lattice.py | d568ba3620d4fa6b1ad644892c5b57208eae5fc0 | [] | no_license | Roger-luo/legacy-QMTK | 6bc85af10d02272455567db9b2662872295ed59b | fbf6b924e2ff030ebf50d20f6eba3ce8356313ed | refs/heads/master | 2020-03-28T00:21:04.319431 | 2018-09-04T20:04:56 | 2018-09-04T20:04:56 | 147,401,456 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 712 | py | import unittest
from vmc.lattice import *
class TestLattice(unittest.TestCase):
def test_shape_assertions(self):
name = 'test lattice'
with self.assertRaises(TypeError):
LatticeBase(name, (2, 3))
with self.assertRaises(TypeError):
LatticeBase(name, length=(2, ))
with self.assertRaises(TypeError):
LatticeBase(name, shape="2, 2")
with self.assertRaises(TypeError):
LatticeBase(name)
def test_pbc_assertions(self):
name = 'test lattice'
shape = (2, 3)
with self.assertRaises(TypeError):
LatticeBase(name, shape=shape, pbc=1)
if __name__ == '__main__':
unittest.main()
| [
"hiroger@qq.com"
] | hiroger@qq.com |
b25580e8e69d4336c610b351a087267e1f0111af | 5e1ceb654da66bccbb06004efd7b68d3a1464ccb | /setup.py | 92fe2351a7ec8fa571ed1b759036d6449d8dcf2c | [
"MIT"
] | permissive | sotte/pelper | ad5efd430b4bb034003ea0ea4788883e64da77ac | a2af48a13099607e86672fbcd89ae879f69080ce | refs/heads/master | 2021-11-06T13:29:45.326836 | 2017-01-16T13:09:26 | 2017-01-16T13:09:26 | 33,563,010 | 3 | 0 | MIT | 2021-04-20T17:05:14 | 2015-04-07T19:13:43 | Python | UTF-8 | Python | false | false | 611 | py | #!/usr/bin/env python
# encoding: utf-8
import os
from setuptools import setup
with open(os.path.join(os.path.dirname(__file__), "README.rst")) as f:
long_description = f.read()
setup(
name="pelper",
version="0.1.0",
description="pelper - python helper functions",
long_description=long_description,
author="Stefan Otte",
author_email="stefan.otte@gmail.com",
license="MIT",
url="https://github.com/sotte/pelper",
download_url="https://github.com/sotte/pelper",
keywords="development heper functional decorator contextmanager",
packages=["pelper"],
)
| [
"stefan.otte@gmail.com"
] | stefan.otte@gmail.com |
624a83a4fa1395d933e21974247bb7778e105cac | 6355485c857e1881b9769813f726a1ae489ca3a4 | /codigos/kuttaprimeiraordemedo.py | ac028829574ff2ff2aff45766663411c1d0b647a | [] | no_license | joaocassianox7x/Apostila | 95ab3dd56579888fe82d672285982c945bd71eae | 1af708ad0e3e4cc7ffe29dbedd6beb1e5ba9a699 | refs/heads/master | 2023-01-04T04:41:56.392649 | 2020-10-28T13:42:12 | 2020-10-28T13:42:12 | 269,219,126 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 721 | py | y0=5 #valor inicial
t0=0 #ponto inicial
tf=10 #ponto final
n=10**5 #numero de pontos
dt=(tf-t0)/n
y=[]
y.append(y0)
def func(a,b): #a=y e b=t
return(-a+b)
for i in range(n):
t=i*dt #t
#print(t)
k1=func(y[i],t)
k2=func(y[i]+k1*dt/2,t)
k3=func(y[i]+k2*dt/2,t+dt/2)
k4=func(y[i]+k3*dt,t+dt)
y.append(y[i]+(dt/6)*(k1+k4+2*(k2+k3)))
import numpy as np
import matplotlib.pyplot as plt
t=np.linspace(t0,tf,n+1)
plt.plot(t,t-1+3.5*np.exp(-t),'r')
plt.plot(t,y,'b')
plt.legend(["Solução Analítica","Solução por RK4"],loc="upper left")
plt.grid(True)
plt.xlim(t0,tf)
plt.xlabel("t (s)")
plt.savefig("rk4primeiraordem.png") | [
"joaoqt@DESKTOP-4MP42LT.localdomain"
] | joaoqt@DESKTOP-4MP42LT.localdomain |
c930cfcffa8ceb633d67d2224502bfb7a2b92ba9 | 6cfcf3f315573dbb0df774adaffbe33d33e1f783 | /hail/python/test/hail/fs/test_worker_driver_fs.py | 76e8d312ad855ba9d017611a29777960bfb75b4d | [
"MIT"
] | permissive | chrisvittal/hail | 2f96f2caa8686dfe5dfb0a4226023d80d111cfad | 20fc42f36bbd339f83c30b00081c88e238968ee7 | refs/heads/main | 2023-09-04T16:33:23.441342 | 2023-01-23T17:49:49 | 2023-01-23T17:49:49 | 132,908,677 | 0 | 0 | MIT | 2023-01-23T19:24:52 | 2018-05-10T14:03:56 | Python | UTF-8 | Python | false | false | 4,355 | py | import hail as hl
from hailtop.utils import secret_alnum_string
from hailtop.test_utils import skip_in_azure
@skip_in_azure
def test_requester_pays_no_settings():
try:
hl.import_table('gs://hail-services-requester-pays/hello')
except Exception as exc:
assert "Bucket is a requester pays bucket but no user project provided" in exc.args[0]
else:
assert False
@skip_in_azure
def test_requester_pays_write_no_settings():
random_filename = 'gs://hail-services-requester-pays/test_requester_pays_on_worker_driver_' + secret_alnum_string(10)
try:
hl.utils.range_table(4, n_partitions=4).write(random_filename, overwrite=True)
except Exception as exc:
assert "Bucket is a requester pays bucket but no user project provided" in exc.args[0]
else:
hl.current_backend().fs.rmtree(random_filename)
assert False
@skip_in_azure
def test_requester_pays_with_project():
hl.stop()
hl.init(gcs_requester_pays_configuration='hail-vdc')
assert hl.import_table('gs://hail-services-requester-pays/hello', no_header=True).collect() == [hl.Struct(f0='hello')]
hl.stop()
hl.init(gcs_requester_pays_configuration=('hail-vdc', ['hail-services-requester-pays']))
assert hl.import_table('gs://hail-services-requester-pays/hello', no_header=True).collect() == [hl.Struct(f0='hello')]
hl.stop()
hl.init(gcs_requester_pays_configuration=('hail-vdc', ['hail-services-requester-pays', 'other-bucket']))
assert hl.import_table('gs://hail-services-requester-pays/hello', no_header=True).collect() == [hl.Struct(f0='hello')]
hl.stop()
hl.init(gcs_requester_pays_configuration=('hail-vdc', ['other-bucket']))
try:
hl.import_table('gs://hail-services-requester-pays/hello')
except Exception as exc:
assert "Bucket is a requester pays bucket but no user project provided" in exc.args[0]
else:
assert False
hl.stop()
hl.init(gcs_requester_pays_configuration='hail-vdc')
assert hl.import_table('gs://hail-services-requester-pays/hello', no_header=True).collect() == [hl.Struct(f0='hello')]
@skip_in_azure
def test_requester_pays_with_project_more_than_one_partition():
# NB: this test uses a file with more rows than partitions because Hadoop's Seekable input
# streams do not permit seeking past the end of the input (ref:
# https://hadoop.apache.org/docs/stable/api/org/apache/hadoop/fs/Seekable.html#seek-long-).
#
# Hail assumes that seeking past the end of the input does not raise an EOFException (see, for
# example `skip` in java.io.FileInputStream:
# https://docs.oracle.com/javase/7/docs/api/java/io/FileInputStream.html)
expected_file_contents = [
hl.Struct(f0='idx'),
hl.Struct(f0='0'),
hl.Struct(f0='1'),
hl.Struct(f0='2'),
hl.Struct(f0='3'),
hl.Struct(f0='4'),
hl.Struct(f0='5'),
hl.Struct(f0='6'),
hl.Struct(f0='7'),
hl.Struct(f0='8'),
hl.Struct(f0='9'),
]
hl.stop()
hl.init(gcs_requester_pays_configuration='hail-vdc')
assert hl.import_table('gs://hail-services-requester-pays/zero-to-nine', no_header=True, min_partitions=8).collect() == expected_file_contents
hl.stop()
hl.init(gcs_requester_pays_configuration=('hail-vdc', ['hail-services-requester-pays']))
assert hl.import_table('gs://hail-services-requester-pays/zero-to-nine', no_header=True, min_partitions=8).collect() == expected_file_contents
hl.stop()
hl.init(gcs_requester_pays_configuration=('hail-vdc', ['hail-services-requester-pays', 'other-bucket']))
assert hl.import_table('gs://hail-services-requester-pays/zero-to-nine', no_header=True, min_partitions=8).collect() == expected_file_contents
hl.stop()
hl.init(gcs_requester_pays_configuration=('hail-vdc', ['other-bucket']))
try:
hl.import_table('gs://hail-services-requester-pays/zero-to-nine', min_partitions=8)
except Exception as exc:
assert "Bucket is a requester pays bucket but no user project provided" in exc.args[0]
else:
assert False
hl.stop()
hl.init(gcs_requester_pays_configuration='hail-vdc')
assert hl.import_table('gs://hail-services-requester-pays/zero-to-nine', no_header=True, min_partitions=8).collect() == expected_file_contents
| [
"noreply@github.com"
] | chrisvittal.noreply@github.com |
1d33cf1d0cb93e7b9851d996ca9a16e73cfc0827 | e046c4e5c3a72700eae3e582b47b81ca41b34a85 | /TrackingSystem/studentAttendance/models.py | 8b7ecf271ffa4923a840b191460f728ead0759fb | [] | no_license | preru98/Tracking-API | 6749967f04cbe6f984606cd7a87b229df84fcb3c | c9e927089fb599396f1e0bf0c37137fa511c9220 | refs/heads/master | 2022-04-01T17:11:15.848018 | 2020-01-23T11:18:53 | 2020-01-23T11:18:53 | 233,945,211 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 898 | py | from django.db import models
from pygments.lexers import get_all_lexers
from pygments.styles import get_all_styles
from django.utils import timezone
# Create your models here.
class Student(models.Model):
enrollmentNumber=models.CharField(primary_key=True,max_length=20)
name=models.CharField(max_length=2000)
course=models.CharField(max_length=2000)
year=models.CharField(max_length=2000)
password=models.CharField(max_length=2000)
class Admin(models.Model):
adminID=models.CharField(max_length=2000)
password=models.CharField(max_length=2000)
class Tag(models.Model):
tagUID=models.CharField(max_length=200,primary_key=True)
student=models.OneToOneField(Student,on_delete=models.CASCADE)
class TapTiming(models.Model):
tapAt=models.DateTimeField(default=timezone.now)
tag=models.ForeignKey(Tag, to_field='tagUID',on_delete=models.CASCADE)
| [
"prerna1998.ps@gmail.com"
] | prerna1998.ps@gmail.com |
b9bbeafefaafd8ff7661334198c1365cd73e36d1 | f73bcada5ab8432d2af07b5cb7fd7a38109d3e3a | /.history/parser_20201108170616.py | c0517d7feb1c60b713329f35cfcf547572ddba48 | [] | no_license | mariajbp/gedcomparser | 837bf4ae5628a81e535d233c7c35313c6d86d78c | 6fc55899e5a82c4071991ab94a344b64c014b84d | refs/heads/master | 2023-01-23T09:01:27.459597 | 2020-11-19T23:58:53 | 2020-11-19T23:58:53 | 310,900,559 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,247 | py | #!/usr/bin/python3
#python3 parser.py input/bible.gedcom > test.txt
import sys
from re import *
filename = sys.argv[1].split('/')[1]
assetPath = "assets"
indPath = "individuals"
famPath = "families"
cssPath = "assets/gedcom.css"
def createFamily(fk,fi):
f = open('assets/families/'+fk+'.html', 'w')
f.write('<h4> <a href=\"../index.html\"> return to index </a> </h4>')
f.write('<!DOCTYPE html><html><head> <link rel="stylesheet" type="text/css" href="../index.css"></head>\n')
f.write('<h1> Código da familia: ' + fk + '</h1>')
for keys,values in fi.items():
print(keys)
print(values)
f.close()
def createIndex(fam,indi):
f = open("assets/index.html", 'w')
f.write('<!DOCTYPE html><html><head> <link rel="stylesheet" type="text/css" href="index.css"></head>\n')
f.write('<h1> Ficheiro: ' + filename + '</h1>')
f.write('<div class="row"><div class="column"><h2>Familias</h2>')
for keyf in fam:
f.write('<li> <a href=\"'+famPath+'/'+keyf+'.html\">'+keyf+'</a></li>\n')
f.write('</ul> </div>')
f.write('<div class="column"><h2>Individuos</h2>')
for keyi in indi:
f.write('<li> <a href=\"'+indPath+'/'+keyi+'.html\">'+keyi+'</a></li>\n')
f.write('</ul></div></div>')
f.close()
BG = {}
def procIndi(s,i):
indi = {}
v = search(r'\bNAME\s+(.*)', i)
if v:
indi['name']= v.group(1)
v = findall (r'\bFAMS\s+@(.*)@',i)
indi['fams'] = v
BG[s] = indi
BF = {}
def procFam(f,i):
fam={}
h = search(r'\bHUSB\s+@(.*)@',i)
if h:
fam['husb'] = h.group(1)
w = search(r'\bWIFE\s+@(.*)@',i)
if w:
fam['wife'] = w.group(1)
fam['child'] = findall (r'\bCHIL\s+@(.*)@',i)
BF[f] = fam
def process(t):
items = split(r'\n0',t)
for i in items:
z = search(r'@(I\d+)@ *INDI', i) #procura todos os individuos
if z:
procIndi(z.group(1),i)
f = search(r'@(F\d+)@ *FAM', i) #procura todas as familias
if f:
procFam(f.group(1),i)
with open(sys.argv[1], 'r') as f :
gedcom = f.read()
process(gedcom)
createIndex(BF.keys(), BG.keys())
for k,v in BF.items():
createFamily(k,v)
| [
"mariajbp00@gmail.com"
] | mariajbp00@gmail.com |
fea75ba699fe844ab645cadd70261594e509385c | 2d055d8d62c8fdc33cda8c0b154e2b1e81814c46 | /python/demo_everyday/JCP021.py | 027c3f47f88b0ed1859f4f2fce0a2b838edfc768 | [
"MIT"
] | permissive | harkhuang/harkcode | d9ff7d61c3f55ceeeac4124a2a6ba8a006cff8c9 | faab86571ad0fea04c873569a806d2d7bada2e61 | refs/heads/master | 2022-05-15T07:49:23.875775 | 2022-05-13T17:21:42 | 2022-05-13T17:21:53 | 20,355,721 | 3 | 2 | MIT | 2019-05-22T10:09:50 | 2014-05-31T12:56:19 | C | GB18030 | Python | false | false | 571 | py | '''
【程序21】
题目:猴子吃桃问题:猴子第一天摘下若干个桃子,当即吃了一半,还不瘾,又多吃了一个
第二天早上又将剩下的桃子吃掉一半,又多吃了一个。以后每天早上都吃了前一天剩下
的一半零一个。到第10天早上想再吃时,见只剩下一个桃子了。求第一天共摘了多少。
1.程序分析:采取逆向思维的方法,从后往前推断。
2.程序源代码:
'''
x2 = 1
for day in range(9,0,-1):
x1 = (x2 + 1) * 2
x2 = x1
print x1
| [
"shiyanhk@gmail.com"
] | shiyanhk@gmail.com |
1923ef1562239a13811ff066eaf416932277969a | 739e4b3a85a9fef326ac93dbf3869d0a26d3610b | /gym/atari/zaxxon.py | 4d4f0bf62050b8d0e0f00add4243562b6a8dfa23 | [] | no_license | seantzu/nodes | a56c721855947bc110fafc1568fcfd38122f9cfa | ef7680cea762eac9723f0fa5e0f24ba0aa8c76cd | refs/heads/master | 2022-11-14T01:52:00.909125 | 2020-07-04T20:46:23 | 2020-07-04T20:46:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,391 | py | from PyQt5.QtWidgets import QWidget, QAction
# Action Settings
import configparser
config = configparser.ConfigParser()
config.read('config/action_settings.ini')
DOT = config['ACTION']['DOT']
SEP = config['ACTION']['SEP']
ADD = config['ACTION']['ADD']
SUB = config['ACTION']['SUB']
DIV = config['ACTION']['DIV']
MUL = config['ACTION']['MUL']
SPL = config['ACTION']['SPL']
NPZ = config['ACTION']['NPZ']
MOD = config['ACTION']['MOD']
JSN = config['ACTION']['JSN']
N = '\n'
T = '\t'
TT = '\t\t'
TTT = '\t\t\t'
class Zaxxon(QWidget):
def __init__(self, nodeflow_main, n, nn, config):
super(Zaxxon, self).__init__()
nodeflow_main.createAttribute(node=n, name='Program', preset='String', socket=True, plug=True, dataType='str', dataAttr='Zaxxon-v0')
nodeflow_main.createAttribute(node=n, name='Episode', preset='Integer', socket=True, plug=True, dataType='int', dataAttr=20)
nodeflow_main.createAttribute(node=n, name='Length', preset='Integer', socket=True, plug=True, dataType='int', dataAttr=1000)
nodeflow_main.createAttribute(node=n, name='Observation', preset='String', socket=True, plug=True, dataType='str', dataAttr='')
nodeflow_main.createAttribute(node=n, name='Reward', preset='Float', socket=True, plug=True, dataType='flt', dataAttr='')
nodeflow_main.createAttribute(node=n, name='Done', preset='Bool', socket=True, plug=True, dataType='bool', dataAttr='')
nodeflow_main.createAttribute(node=n, name='Info', preset='String', socket=True, plug=True, dataType='str', dataAttr='')
class ZaxxonAction(QAction):
"""Model Create"""
def __init__(self, attrData, config):
super(ZaxxonAction, self).__init__()
import gym
PROGRAM = attrData.get('Program')
EPISODE = int(attrData.get('Episode'))
LENGTH = int(attrData.get('Length'))
env = gym.make(PROGRAM)
for i_episode in range(EPISODE):
observation = env.reset()
for t in range(LENGTH):
env.render()
#print(observation)
action = env.action_space.sample()
observation, reward, done, info = env.step(action)
if done:
print("Episode finished after {} timesteps".format(t + 1))
break
env.close() | [
"noreply@github.com"
] | seantzu.noreply@github.com |
1e396fbd1caf22ebd33c275b43dfae9d82db65e3 | c3cc6931a0e6e5026287062d92ec40967204677d | /python/rekurencja01.py | 2f8b4dbe83a28d841364ab05c92caa61202f8ba3 | [] | no_license | grabiarzgrabek/gitrepo | fb1b393e8c1cea2cbcdd024711a0c4a8f8b52c4f | 709f5f75bf3d419f8b474783200f8bb70823856d | refs/heads/master | 2021-06-27T23:06:48.773787 | 2020-10-08T10:38:02 | 2020-10-08T10:38:02 | 148,303,897 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 521 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# rekurencja01.py
import turtle
def rysuj_it(ile, bok, kat):
for i in range(ile):
turtle.forward(bok)
turtle.right(kat)
def rysuj_rek(ile, bok, kat):
if ile < 1:
return
turtle.forward(bok)
turtle.right(kat)
rysuj_rek(ile - 1, bok, kat)
def main(args):
turtle.setup(800, 600)
rysuj_rek(4, 200, 90)
turtle.done()
return 0
if __name__ == '__main__':
import sys
sys.exit(main(sys.argv))
| [
"jakub.grabowski@lo1.sandomierz.pl"
] | jakub.grabowski@lo1.sandomierz.pl |
4a5d3fe945019ad4717eef5286af1768dc05b083 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_vicarage.py | 3cfe8615692b2c6a7f3f67bc930f9033fcdd2e06 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 316 | py |
#calss header
class _VICARAGE():
def __init__(self,):
self.name = "VICARAGE"
self.definitions = [u'the house in which a vicar lives']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
2c764f7c8e22340f36512e60342c9aeb7429c598 | 5ce39692ed812520cd8755c0b6f1188946bb5db2 | /list.py | e0b375d480737cafa370d90499dab39f62f2392c | [] | no_license | pavankumar102/Python-tasks | a77ee44059c751eb63eb88719af33f0077b1a021 | 944e684fc261cb62ca6b5dfd6abeb6dfd5aae3ce | refs/heads/master | 2022-06-17T07:33:55.471543 | 2020-05-08T11:49:44 | 2020-05-08T11:49:44 | 262,309,506 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 114 | py | answer2 = []
for num in [1,2,3,4,5,6]:
if num % 2 == 0:
answer2.append(num)
print(answer2) | [
"noreply@github.com"
] | pavankumar102.noreply@github.com |
24901d0ff6fc3eab5e87e460b6957d58801f6d0b | 2751b066fc1b664ac87c810624489e70ecef957c | /Library_Build/BEST_Ancient/6_2temp_modulation.py | d186c029b347086d5f4769dc256b4a122656d1c3 | [] | no_license | anttonalberdi/EvoGenOT | 32147222c90b7fb4f689a77632b2597eabb4b0eb | 855fec9d2e47ca12b95e14dae3f43260caee4d75 | refs/heads/master | 2023-03-16T09:48:17.381275 | 2023-03-10T09:03:23 | 2023-03-10T09:03:23 | 169,191,221 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,141 | py | from opentrons import labware, instruments, modules, robot
from opentrons.legacy_api.modules import tempdeck
'''
ssh into robot and
then run ls /dev/tty*
you are looking for two values with the format /dev/ttyACM*
you will use those values for line 22 and 23.
If you need to know which tempdeck is hooked up to which port.
You will unplug one, then run ls /dev/tty*. There should only be one /dev/ttyACM*.
This will correlate to the tempdeck that is plugged in. Then you plug the other temp deck in and run ls /dev/tty* again.
There should now be two /dev/ttyACM*s, the one that was not there in the previous command will correlate to the
tempdeck you just plugged in.
'''
temp_deck_1 = tempdeck.TempDeck()
temp_deck_2 = tempdeck.TempDeck()
temp_deck_1._port = '/dev/ttyACM3'
temp_deck_2._port = '/dev/ttyACM2'
if not robot.is_simulating():
temp_deck_1.connect()
temp_deck_2.connect()
temp_deck1 = modules.load('tempdeck', '4')
temp_deck2 = modules.load('tempdeck', '7')
temp_deck_1.set_temperature(10)
temp_deck_2.set_temperature(10)
temp_deck_1.wait_for_temp()
temp_deck_2.wait_for_temp()
| [
"genomicsisawesome@gmail.com"
] | genomicsisawesome@gmail.com |
4c9ec16df1b6a85b34a767c4e8a4d46e53d950f7 | 82256eb259bf5fa75a8f15500a6b5a1306a07034 | /addintegers3.py | f6d586ed9a6c80d2002f3850a12e20180a03404d | [] | no_license | dennisnderitu254/Andela-Exercises | 1c0d2c309b6ea113a4d812e313ded867f6dea9a4 | edb17f0ed867a4436478a8d9bf5690a749155781 | refs/heads/master | 2021-05-05T13:38:06.658363 | 2017-10-31T14:35:38 | 2017-10-31T14:35:38 | 105,002,996 | 3 | 3 | null | null | null | null | UTF-8 | Python | false | false | 193 | py | # Exhibiting functional composition
num1 = int(raw_input('Please enter an integer value:'))
num2 = int(raw_input('Please enter another integer value:'))
print(num1, '+', num2, '=', num1 + num2) | [
"dknderitu@gmail.com"
] | dknderitu@gmail.com |
9e5abd1461765252c1be5a3bab071d13a90377ef | 74010a13d5f6be7e3189057608d7a643ae9359ce | /final_report/libcsvanaly2/script_sample/committers_per_moth.py | c7c0ebc1fbcf1ddb6f26b9b319cae43e1df581e9 | [
"CC-BY-3.0"
] | permissive | ricardogarfe/mswl-project-evaluation | 3d993941ebc54763823e93b1ef1208c42a27fdac | cc51cda61af021f859e319369ae555ba4c2f76ea | refs/heads/master | 2016-09-06T11:35:56.216523 | 2013-06-25T08:33:34 | 2013-06-25T08:33:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,458 | py | # Copyright (C) 2007-2011 GSyC/LibreSoft, Universidad Rey Juan Carlos
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# Authors:
# Daniel Izquierdo Cortazar <dizquierdo@libresoft.es>
import matplotlib as mpl
#this avoid the use of the $DISPLAY value for the charts
mpl.use('Agg')
import matplotlib.pyplot as plt
import MySQLdb
import sys
import datetime as dt
def connect(database):
user = 'root'
password = 'admin'
host = 'localhost'
dbaux = database
try:
db = MySQLdb.connect(host,user,password,dbaux)
return db.cursor()
except:
print("Database connection error")
def main(database):
#Commits per committer limited to the 30 first with the highest accumulated activity
# query = "select year(date), month(date), day(date), count(*) from scmlog where year(date) = 2012 group by year(date), month(date), day(date)"
# query = "select committer_id, year(date), month(date), count(*) from scmlog where year(date) = 2012 group by committer_id, year(date), month(date) order by committer_id, year(date), month(date), count(*) desc"
query = "select committer_id, year(date), month(date), count(*) from scmlog where year(date) = 2012 and committer_id = 166 group by committer_id, year(date), month(date) order by month(date), count(*) desc limit 24"
# query "select committer_id, year(date), month(date), day(date), count(*) from scmlog where year(date) = 2012 and committer_id = 166 group by year(date), month(date), day(date) order by month(date), day(date), count(*)"
#Connecting to the data base and retrieving data
connector = connect(database)
results = int(connector.execute(query))
if results > 0:
results_aux = connector.fetchall()
else:
print("Error when retrieving data")
return
total_commits = 0
#Creating the final boxplot
fig = plt.figure()
plt.title('Evolution of Commits')
committer_dict = {}
for date_commit in results_aux:
committer = int(date_commit[0])
committer_dict[committer] = 0
i = 0
for key in committer_dict.keys():
for date_commit in results_aux:
committer = int(date_commit[0])
dates = []
commits = []
if (key == committer):
year = int(date_commit[1])
month = int(date_commit[2])
num_commits = int(date_commit[3])
dates.append(dt.date(year, month, 1))
commits.append(num_commits)
ax = fig.add_subplot(111)
ax.plot(dates, commits, label=str(committer))
print dates, commits, committer
i+=1
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
fig.autofmt_xdate()
plt.savefig('basic_timeseries.eps')
if __name__ == "__main__":main(sys.argv[1])
| [
"ricardogarfe@gmail.com"
] | ricardogarfe@gmail.com |
b48b19e2be663479ec72196ec8100185ff0a4510 | 3f1b33d22ef0aadcc36d9871c9f52ca79b0f78a7 | /setup.py | cbd4973451ae093242b47c37545970dff1fe1f1d | [
"MIT"
] | permissive | ujjwalsh/geolinks | 2f3098eceba000be2fe8b006cc7be54ae730ef11 | 78c45413c2476d37500a04e628cc0219b83456c7 | refs/heads/master | 2022-12-20T06:58:32.943244 | 2020-09-30T21:30:45 | 2020-09-30T21:30:45 | 300,066,926 | 0 | 0 | MIT | 2020-09-30T21:25:33 | 2020-09-30T21:25:32 | null | UTF-8 | Python | false | false | 2,324 | py | # =================================================================
#
# Authors: Tom Kralidis <tomkralidis@gmail.com>
#
# Copyright (c) 2019 Tom Kralidis
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
from setuptools import find_packages, setup
from geolinks import __version__ as version
setup(
name='geolinks',
version=version,
description='Utilities to deal with geospatial links',
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
license='MIT',
platforms='all',
keywords='links geo protocol url href',
author='Tom Kralidis',
author_email='tomkralidis@gmail.com',
maintainer='Tom Kralidis',
maintainer_email='tomkralidis@gmail.com',
url='https://github.com/geopython/geolinks',
packages=find_packages(),
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.5',
'Topic :: Scientific/Engineering :: GIS',
]
)
| [
"tomkralidis@gmail.com"
] | tomkralidis@gmail.com |
782f966d82f9d59fdecaddc71681f7806a75fd57 | 85c856de997b16103d51731b3d528f7d0dd265bf | /PythonIntroduction/FileManager/Core/safeCast.py | 1c759eaf578781799c3ac8e8a0ba3772ed3637c4 | [] | no_license | NyarukouSAMA/py_geekbrains | ff3a5589b85273af9b398098ec30ce95eb452d58 | 7a216719e1cefc1ffbbb1c2bf65f1f185b43ca8f | refs/heads/master | 2021-07-04T11:19:21.850154 | 2020-12-27T22:36:29 | 2020-12-27T22:36:29 | 210,034,160 | 0 | 0 | null | 2019-10-05T06:33:36 | 2019-09-21T18:24:26 | Python | UTF-8 | Python | false | false | 138 | py | def safeCast(val, to_type, default=None):
try:
return to_type(val)
except (ValueError, TypeError):
return default
| [
"afro.detray@gmail.com"
] | afro.detray@gmail.com |
a972d8916751e7929616031a929acb51c7a7b956 | 3e2447737acc8e6bef6728b1a8e5f1d5e6db2968 | /opennem/pipelines/wem/balancing_summary.py | fd431bcabfb48da0aacae2723bb3de03f7e58e17 | [
"MIT"
] | permissive | gaslitbytech/opennem | 5a5197003662725ccd2f82d790cdb1495a975a07 | deec3e2079db9d9d84171010fd0c239170d1e7ce | refs/heads/master | 2023-07-23T14:08:28.949054 | 2020-10-09T03:53:20 | 2020-10-09T03:53:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,148 | py | import csv
import logging
from sqlalchemy.dialects.postgresql import insert
from opennem.db.models.opennem import BalancingSummary
from opennem.pipelines import DatabaseStoreBase
from opennem.schema.network import NetworkWEM
from opennem.utils.dates import parse_date
from opennem.utils.pipelines import check_spider_pipeline
logger = logging.getLogger(__name__)
class WemStoreBalancingSummary(DatabaseStoreBase):
@check_spider_pipeline
def process_item(self, item, spider=None):
s = self.session()
csvreader = csv.DictReader(item["content"].split("\n"))
records_to_store = []
for record in csvreader:
trading_interval = parse_date(
record["Trading Interval"], dayfirst=True, network=NetworkWEM
)
if not trading_interval:
continue
records_to_store.append(
{
"network_id": "WEM",
"network_region": "WEM",
"trading_interval": trading_interval,
"forecast_load": record["Load Forecast (MW)"],
"generation_scheduled": record[
"Scheduled Generation (MW)"
],
"generation_non_scheduled": record[
"Non-Scheduled Generation (MW)"
],
"generation_total": record["Total Generation (MW)"],
"price": record["Final Price ($/MWh)"],
}
)
stmt = insert(BalancingSummary).values(records_to_store)
stmt.bind = self.engine
stmt = stmt.on_conflict_do_update(
constraint="balancing_summary_pkey",
set_={
"price": stmt.excluded.price,
"generation_total": stmt.excluded.generation_total,
},
)
try:
r = s.execute(stmt)
s.commit()
except Exception as e:
logger.error("Error inserting records")
logger.error(e)
finally:
s.close()
return len(records_to_store)
| [
"nc9@protonmail.com"
] | nc9@protonmail.com |
1ac6c6523534f0223ccc49ac14264ea5b5c24ed5 | 82050d2db2b696a96acbdd3c76d1f405cdde36f5 | /dubber.py | 6caa61c937868f16b14d32e5c548051ea4af7384 | [
"MIT"
] | permissive | ryan-hennings/meow-mix | d7a6bf3b52e2a7f18695ecfcbc74ac2f05e12edf | a2880064295f39415dd530e9aaf91ff54241258d | refs/heads/master | 2022-12-14T15:14:44.879080 | 2020-09-19T05:20:49 | 2020-09-19T05:20:49 | 295,260,045 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 615 | py | from pydub import AudioSegment
from pydub.playback import play
from fx import pitch
from alg import mergeSort
cut = AudioSegment.from_file("sounds/perc-g.wav", format="wav")
cut = cut[:200]
res = cut
for ix in range(10):
res = res.append(pitch(cut, ix+1), crossfade=0)
for ix in range(10, 0, -1):
res = res.append(pitch(cut, ix / 4.0), crossfade=1)
for ix in range(18):
res = res.append(pitch(cut, ix / 4.0), crossfade=1)
for ix in range(16):
res = res.append(pitch(cut, (16 - ix) / 2.0), crossfade=1)
res = res.overlay(pitch(res, 1.2, .2), 100)
play(res)
#cut.export("cut.wav", format="wav")
| [
"ryanhennings@gmail.com"
] | ryanhennings@gmail.com |
7dc31a452216853385ec1664c59a02ea650ea8b5 | 0c35b2e6cc49e465c4c239552a81c0598231a15d | /first_test2.py | 2cc2ea644c54197b0331fa0f745987d54e758f61 | [] | no_license | DimBottom/KaggleTitanic | 7c84acd883c08e13d2f34c9a1bfda89a31f160c7 | 27cf5772522def5903f039a0c4041ef35d537c4e | refs/heads/master | 2020-05-22T18:10:23.132814 | 2019-05-13T17:53:31 | 2019-05-13T17:53:31 | 186,467,467 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,110 | py | # -*- coding: utf-8 -*-
"""
Created on Sun May 5 22:57:49 2019
@author: DimBottom
"""
import pandas as pd
import numpy as np
#采集数据
data_train = pd.read_csv(r"C:Users\87515\Desktop\train.csv")
#基本描述
#print(data_train.describe())
import os
if not (os.path.exists('DF')):
os.mkdir('DF')
#data_train.describe().to_csv(r'DF\describe.csv')
#可视化分析
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
fig = plt.figure(figsize = (12,6))
fig.set(alpha = 0)
fig.set(dpi = 72)
##生存情况
#data_train.Survived.value_counts().plot(kind = 'bar', width = .2)
#plt.title('生存情况')
#S = data_train.Survived.value_counts()
#for index, y in enumerate(np.array(data_train.Survived.value_counts())):
# plt.text(index, y+20, '%d' % y, ha='center', va= 'top', size = 14)
#plt.xticks(rotation = 0, size = 14)
#plt.savefig('DF\生存情况')
#plt.show()
##乘客等级
#S = data_train.Pclass
#S.value_counts().plot(kind = 'bar', width = .2)
#plt.title('乘客等级')
#for index, y in enumerate(np.array(S.value_counts())):
# plt.text(index, y+20, '%d' % y, ha='center', va= 'top', size = 14)
#plt.xticks(rotation = 0, size = 14)
#plt.savefig('DF\乘客等级')
#plt.show()
##乘客性别
#S = data_train.Sex
#S.value_counts().plot(kind = 'bar', width = .2)
#plt.title('乘客性别')
#for index, y in enumerate(np.array(S.value_counts())):
# plt.text(index, y+20, '%d' % y, ha='center', va= 'top', size = 14)
#plt.xticks(rotation = 0, size = 14)
##plt.savefig('DF\乘客性别')
#plt.show()
##Female&male与Survived关系
#S1 = data_train.Sex
#S2 = data_train.Survived
#
#plt.subplot(1,2,1)
#S1.value_counts().plot(kind='bar', color = 'g', width=.2, label='0')
#plt.xticks(rotation = 0, size = 14)
#plt.bar(range(0,2), S1[S2 == 1].value_counts(),color = 'b', width=.2,label='1')
#plt.legend()
#plt.title(r'Female&Male的Survived情况', size = 14)
#
#plt.subplot(1,2,2)
#S2.value_counts().plot(kind='bar', width=.2, color = 'g', label='Female')
#plt.xticks(rotation = 0, size = 14)
#plt.bar(range(0,2), S2[S1 == 'male'].value_counts(), color = 'b', width=.2, label='Male')
#plt.legend()
#plt.title('Survived的Female&Male情况')
#
#plt.savefig(r'DF\Female&male与Survived关系')
##年龄与生存的关系
#S1=data_train.Age
#S2=data_train.Surviveds
#s1=S1.value_counts().sort_index()
#s2=S1[S2 == 0].value_counts().sort_index()
#plt.bar(s1.index, s1, width = .6, label='获救')
#plt.bar(s2.index, s2, width = .6, label='未获救')
#plt.legend()
#plt.xticks(rotation = 0, size = 14)
#plt.xticks(range(0,85,5))
#plt.xlim(-1,81)
#plt.ylim(0,31)
#plt.title('年龄与生存的关系', size=14)
#plt.xlabel('年龄', size=14)
#plt.ylabel('')
#plt.savefig('DF\年龄与生存的关系')
#plt.show()
##不同等级船舱的年龄分布
#S1=data_train.Age
#S2=data_train.Pclass
#S1[S2==1].plot(kind='kde', label='头等舱')
#S1[S2==2].plot(kind='kde', label='二等舱')
#S1[S2==3].plot(kind='kde', label='三等舱')
#plt.xlabel('年龄',size=14)
#plt.ylabel('')
#plt.legend()
#plt.title('不同等级船舱的年龄分布', size=14)
#plt.savefig('DF\不同等级船舱的年龄分布')
#print('头等舱平均年龄:',S1[S2==1].mean())
#print('二等舱平均年龄:',S1[S2==2].mean())
#print('三等舱平均年龄:',S1[S2==3].mean())
##各乘客等级的获救情况
#S1=data_train.Pclass
#S2=data_train.Survived
#df=pd.DataFrame({u'获救':S1[S2 == 1].value_counts(), u'未获救':S1[S2 == 0].value_counts()})
#df.plot(kind='bar', stacked=True)
#plt.xticks(rotation=0)
#plt.title(u"各乘客等级的获救情况")
#plt.xlabel(u"乘客等级")
#plt.ylabel(u"人数")
#plt.savefig(r'DF\各乘客等级的获救情况')
#plt.show()
#S1 = data_train.Sex
#S2 = data_train.Age
#df = pd.DataFrame({u'女':S2[S1=='female'],u'男':S2[S1=='male']})
#print(S1[S1=='female'].count())
#print(S1[S1=='male'].count())
#print(df)
#df.plot(kind='bar')
##根据舱等级和性别的获救情况
#S1 = data_train.Sex
#S2 = data_train.Pclass
#S3 = data_train.Survived
#
#plt.subplot(141)
#plt.title(u"高级船舱女性的获救情况")
#plt.bar([0,1], S3[S1=='female'][S2!=3].value_counts().sort_index(), color='#FA2479', width=.5)
#plt.xticks([0,1],[u'未获救',u'获救'])
#plt.xlim([-.5,1.5])
#plt.yticks(range(0,350,100))
#plt.legend([u"女性/高级舱"], loc='best')
#
#plt.subplot(142)
#plt.title(u"低级船舱女性的获救情况")
#plt.bar([0,1], S3[S1=='female'][S2==3].value_counts().sort_index(), color='pink', width=.5)
#plt.xticks([0,1],[u'未获救',u'获救'])
#plt.xlim([-.5,1.5])
#plt.yticks(range(0,350,100))
#plt.legend([u"女性/低级舱"], loc='best')
#
#plt.subplot(143)
#plt.title(u"高级船舱男性的获救情况")
#plt.bar([0,1], S3[S1=='male'][S2!=3].value_counts().sort_index(), color='lightblue', width=.5)
#plt.xticks([0,1],[u'未获救',u'获救'])
#plt.xlim([-.5,1.5])
#plt.yticks(range(0,350,100))
#plt.legend([u"男性/高级舱"], loc='best')
#
#plt.subplot(144)
#plt.title(u"低级船舱男性的获救情况")
#plt.bar([0,1], S3[S1=='male'][S2==3].value_counts().sort_index(), color='steelblue', width=.5)
#plt.xticks([0,1],[u'未获救',u'获救'])
#plt.xlim([-.5,1.5])
#plt.yticks(range(0,350,100))
#plt.legend([u"男性/低级舱"], loc='best')
#
#plt.savefig(r'DF\根据舱等级和性别的获救情况')
#plt.show()
##各登录港口乘客的获救情况
#Survived_0 = data_train.Embarked[data_train.Survived == 0].value_counts()
#Survived_1 = data_train.Embarked[data_train.Survived == 1].value_counts()
#df=pd.DataFrame({u'获救':Survived_1, u'未获救':Survived_0})
#df.plot(kind='bar', stacked=True)
#plt.xticks(rotation=0)
#plt.title(u"各登录港口乘客的获救情况")
#plt.xlabel(u"登录港口")
#plt.ylabel(u"人数")
#plt.savefig(r"DF\各登录港口乘客的获救情况")
#plt.show()
##不同港口的船舱等级、性别情况
#S=data_train.Embarked
#S1=data_train.Pclass
#S2=data_train.Sex
#plt.subplot(131)
#plt.title(u'S港口')
#print(S1[S=='S'].value_counts().sort_index())
#plt.bar([0.5,0.6,0.7],S1[S=='S'].value_counts().sort_index(),width=0.1,color=['pink','lightgreen','lightblue'])
#plt.bar([1.5,1.6],S2[S=='S'].value_counts().sort_index(),width=0.1,color=['steelblue','#FA2479'])
#plt.xlim([0,2])
#plt.xticks([0.6,1.55],[u'船舱等级(1、2、3)',u'性别(女/男)'])
#plt.yticks(range(0,500,100))
#plt.ylabel(u'人数')
#
#plt.subplot(132)
#plt.title(u'C港口')
#plt.bar([0.5,0.6,0.7],S1[S=='C'].value_counts().sort_index(),width=0.1,color=['pink','lightgreen','lightblue'])
#plt.bar([1.5,1.6],S2[S=='C'].value_counts().sort_index(),width=0.1,color=['steelblue','#FA2479'])
#plt.xlim([0,2])
#plt.xticks([0.6,1.55],[u'船舱等级(1、2、3)',u'性别(女/男)'])
#plt.yticks(range(0,500,100))
#plt.ylabel(u'人数')
#
#plt.subplot(133)
#plt.title(u'Q港口')
#plt.bar([0.5,0.6,0.7],S1[S=='Q'].value_counts().sort_index(),width=0.1,color=['pink','lightgreen','lightblue'])
#plt.bar([1.5,1.6],S2[S=='Q'].value_counts().sort_index(),width=0.1,color=['steelblue','#FA2479'])
#plt.xlim([0,2])
#plt.xticks([0.6,1.55],[u'船舱等级(1、2、3)',u'性别(女/男)'])
#plt.yticks(range(0,500,100))
#plt.ylabel(u'人数')
#plt.savefig(r'DF\不同港口的船舱等级、性别情况')
#plt.show()
##不同港口的年龄分布
#S1=data_train.Age
#S2=data_train.Embarked
#S1[S2=='S'].plot(kind='kde', label='S')
#S1[S2=='C'].plot(kind='kde', label='C')
#S1[S2=='Q'].plot(kind='kde', label='Q')
#plt.xlabel('年龄',size=14)
#plt.ylabel('')
#plt.legend()
#plt.title('不同等级船舱的年龄分布', size=14)
#plt.savefig('DF\不同港口的年龄分布')
##船票费用和生存的关系
#S=data_train.sort_values('Fare')
#S1=S.Survived
#S2=S.Fare
#S1.loc[S1==0]=-1
#plt.scatter(S2,S1)
#plt.title('船票费用和生存的关系')
#plt.xlim([-5,515])
#plt.yticks([-1,1])
#ax=plt.gca()
#ax.spines['bottom'].set_position(('data',0))
#ax.spines['top'].set_color('none')
#plt.savefig(r'DF\船票费用和生存的关系')
##兄妹&配偶数和生存的关系
#S=data_train
#S1=S.Survived
#S2=S.SibSp[S1==1].value_counts().sort_index()
#S3=S.SibSp[S1==0].value_counts().sort_index()
##print(S2)
##print(S3)
#plt.bar(S2.index,S2,width=.4)
#plt.bar(S3.index,-S3,width=.4)
#plt.title('兄妹&配偶数和生存的关系')
#plt.xticks(range(0,9))
#ax=plt.gca()
#ax.spines['bottom'].set_position(('data',0))
#ax.spines['top'].set_color('none')
#plt.savefig(r'DF\兄妹&配偶数和生存的关系')
##子女&父母数和生存的关系
#S=data_train
#S1=S.Survived
#S2=S.Parch[S1==1].value_counts().sort_index()
#S3=S.Parch[S1==0].value_counts().sort_index()
##print(S2)
##print(S3)
#plt.bar(S2.index,S2,width=.4)
#plt.bar(S3.index,-S3,width=.4)
#plt.title('子女&父母数和生存的关系')
#plt.xticks(range(0,9))
#ax=plt.gca()
#ax.spines['bottom'].set_position(('data',0))
#ax.spines['top'].set_color('none')
#plt.savefig(r'DF\子女&父母数和生存的关系')
##按Cabin有无看获救情况
#Survived_cabin = data_train.Survived[pd.notnull(data_train.Cabin)].value_counts()
#Survived_nocabin = data_train.Survived[pd.isnull(data_train.Cabin)].value_counts()
#df=pd.DataFrame({u'有':Survived_cabin, u'无':Survived_nocabin}).transpose()
#df.plot(kind='bar', stacked=True)
#plt.title(u"按Cabin有无看获救情况")
#plt.xticks(rotation=0)
#plt.xlabel(u"Cabin有无")
#plt.ylabel(u"人数")
#plt.savefig(r'DF\按Cabin有无看获救情况')
#plt.show()
##年龄和船票费用的关系
#S=data_train.sort_values('Age')
#S1=data_train.Age
#S2=data_train.Fare
#plt.scatter(S1,S2)
#plt.title('年龄和船票费用的关系')
#plt.xlabel(u'年龄')
#plt.ylabel(u'费用')
#plt.ylim(-1,550)
#plt.savefig(u'DF\年龄和船票费用的关系')
#plt.show()
##年龄和SibSp的关系
#S=data_train.sort_values('Age')
#S1=data_train.Age
#S2=data_train.SibSp
#plt.scatter(S1,S2)
#plt.title('年龄和SibSp的关系')
#plt.xlabel(u'年龄')
#plt.ylabel(u'SibSp')
#plt.savefig(u'DF\年龄和SibSp的关系')
#plt.show()
##年龄和Parch的关系
#S=data_train.sort_values('Age')
#S1=data_train.Age
#S2=data_train.Parch
#plt.scatter(S1,S2)
#plt.title('年龄和Parch的关系')
#plt.xlabel(u'年龄')
#plt.ylabel(u'Parch')
#plt.savefig(u'DF\年龄和Parch的关系')
#plt.show()
##年龄和性别的关系
#S=data_train.sort_values('Age')
#S1=S.Age
#S2=S.Sex
#S3=S1[S2=='female'].value_counts()
#S4=S1[S2=='male'].value_counts()
#plt.subplot(211)
#plt.title('年龄和性别的关系')
#plt.bar(S3.index, S3)
#plt.xlabel('女')
#plt.yticks(range(0,25,5))
#plt.ylabel('人数')
#plt.subplot(212)
#plt.bar(S4.index, S4, color='orange')
#plt.xlabel('男')
#plt.yticks(range(0,25,5))
#plt.ylabel('人数')
#plt.savefig(u'DF\年龄和性别的关系')
#plt.show()
| [
"noreply@github.com"
] | DimBottom.noreply@github.com |
a37a3e5c06a94852a443efbc9763344b48274056 | e289dda22b7207f1cfd690373469634b0d905a0a | /tools/rotations.py | d6e9f9f7cc69ecbd15d4b182e3d499b01a0e5ed9 | [] | no_license | kavindukk/EN_EC_674_Python | 67fcac4402185c59251ec2e1a3dc97b93807b402 | 91f2718c10b387d5c59bdae531e4192546400dc4 | refs/heads/master | 2020-12-14T05:25:53.997758 | 2020-03-02T07:40:22 | 2020-03-02T07:40:22 | 234,654,658 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 751 | py | import numpy as np
import math as m
pi = m.pi
s = m.sin
c = m.cos
root = m.sqrt
def Quaternion2Euler(e):
e0 = e[0]
e1 = e[1]
e2 = e[2]
e3 = e[3]
phi = np.arctan2(2*(e0*e1+e2*e3), (e0*e0 + e3*e3 - e1*e1 - e2*e2))
theta = np.arcsin(2*(e0*e2 -e1*e3))
psi = np.arctan2(2*(e0*e3 + e1*e2), (e0*e0 + e1*e1 - e2*e2 - e3*e3))
return [phi, theta, psi]
def Inertial2Body (phi, theta, psi, w_n, w_e, w_d):
w_nb = c(theta)*c(psi)*w_n + c(theta)*s(psi)*w_e - s(theta)*w_d
w_eb = (s(phi)*s(theta)*c(psi) - c(phi)*s(psi))*w_n + (s(phi)*s(theta)*s(psi)+c(phi)*c(psi))*w_e + s(phi)*c(theta)*w_d
w_db = (c(phi)*s(theta)*c(psi)+s(phi)*s(psi))*w_n + (c(phi)*s(theta)*s(psi)-s(phi)*c(psi))*w_e + c(theta)*c(phi)*w_d
wind_b = [w_nb, w_eb, w_db]
return wind_b | [
"z.kavindu@gmail.com"
] | z.kavindu@gmail.com |
dc218baa49f5d462a4566290324df6f6a134c21f | b103359cb3444658c1513bdbf5d283d74a5fdff4 | /storage/UserThread.py | 3ff45daad7938ad19789e2bc441371ecce3e03e1 | [] | no_license | vshrivas/NetworkFlow | 96731af27d88dc94944292a777dbc63e117df88a | d3b9687da1bfbf611f7d25902ca06228413eec67 | refs/heads/master | 2021-05-01T02:28:05.368676 | 2018-04-02T23:02:30 | 2018-04-02T23:02:30 | 121,181,024 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 69 | py | import threading
class UserThread(threading.Thread):
waiting = None | [
"vshrivas@caltech.edu"
] | vshrivas@caltech.edu |
36b5b020d5f13d24e20a731a7f0a2e69f216aaaa | 17d5c779eb2ec6b2e5f3d81d9c18cec3898d5ead | /gestionpedidos/migrations/0002_auto_20200911_1809.py | 23c7ff73e0e2a2cc4d266c823a14c83d1af0f98f | [] | no_license | rithert/ejertiendaonline | e84a4780d54d83c40d46696b4931844607b75208 | ac1f4c1ac30fd3b86fb440bac8d3761dfc0ec765 | refs/heads/master | 2022-12-27T05:07:13.120182 | 2020-09-18T03:09:02 | 2020-09-18T03:09:02 | 294,548,975 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 385 | py | # Generated by Django 3.0.7 on 2020-09-11 23:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gestionpedidos', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='articulos',
name='nombre',
field=models.CharField(max_length=35),
),
]
| [
"rither5711@gmail.com"
] | rither5711@gmail.com |
ee40aa2495c808133fa52cedf694ff25af5f2922 | f4f5b7db5f285f372902e9dcad3ca9a08767507c | /files_checksum_json.py | 822292525615fc5ac7a0e7dbe7f7805feac3e373 | [
"MIT"
] | permissive | pdulapalli/files-checksum-json | 9159e8eca2ae70299f745c8ba0277cd58486ba63 | c7483e1265646308de64714216fb521faeeaabf6 | refs/heads/master | 2020-06-08T18:24:11.755304 | 2019-06-25T02:30:45 | 2019-06-25T02:30:45 | 193,282,101 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,199 | py | #!/usr/bin/env python3
import hashlib
import json
import os
import sys
import re
import path_validation
DEFAULT_BLOCK_SIZE = 65536 # 64kb chunks
def sha256sum(fname, blocksize):
hash = hashlib.sha256()
with open(fname, 'rb') as f:
while True:
block = f.read(blocksize)
if not block:
break
hash.update(block)
return hash.hexdigest()
def main():
stdin_contents = ""
for line in sys.stdin:
stdin_contents += str(line)
whitespace_regex_pattern = re.compile(r'\s+')
files_list = [filePath for filePath in re.sub(whitespace_regex_pattern, '', stdin_contents).split(',') if path_validation.is_pathname_valid(filePath)]
checksums_dict = {}
for i in range(0, len(files_list)):
current_file_path = files_list[i]
if os.path.exists(current_file_path):
current_file_sha256sum = sha256sum(current_file_path, DEFAULT_BLOCK_SIZE)
checksums_dict[current_file_path] = current_file_sha256sum
else:
checksums_dict[current_file_path] = None
# Print results to stdout
print(json.dumps(checksums_dict))
if __name__ == "__main__":
main() | [
"pdulapalli@gmail.com"
] | pdulapalli@gmail.com |
23d7537671b8f992acbe7a31a567ec9b68e14ae2 | 7fdd3ad6000c92efa52d64cfae1893f659a121cf | /Main.py | d8fe6ad7b702cbeb2a87081dcf0ff218909f365c | [] | no_license | vinipachecov/TFG | 3c51c8e63871598fedba85db572fad7cf69ac9fa | fd152bc9f7bdad90c66514ecb8059cdeda4132bd | refs/heads/master | 2021-01-23T21:22:10.789327 | 2017-11-09T20:56:19 | 2017-11-09T20:56:19 | 102,895,700 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,129 | py | # -*- coding: utf-8 -*-
"""
Spyder Editor
Este é um arquivo de script temporário.
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
#
def classification_rate(Y, P):
prediction = predict(Y)
return np.mean(Y == P)
def predict(p_y):
return np.argmax(p_y, axis=1)
def cost(p_y, t):
tot = t * np.log(p_y)
return -tot.sum()
def error_rate(p_y, t):
prediction = predict(p_y)
return np.mean(prediction != t)
# MLP
def forward(X, W1, b1, W2, b2):
Z = 1 / (1 + np.exp(-( X.dot(W1) + b1 )))
# rectifier
# Z = X.dot(W1) + b1
# Z[Z < 0] = 0
# print "Z:", Z
A = Z.dot(W2) + b2
expA = np.exp(A)
Y = expA / expA.sum(axis=1, keepdims=True)
# print "Y:", Y, "are any 0?", np.any(Y == 0), "are any nan?", np.any(np.isnan(Y))
# exit()
return Y, Z
def derivative_w2(Z, T, Y):
return Z.T.dot(Y - T)
def derivative_b2(T, Y):
return (Y - T).sum(axis=0)
def derivative_w1(X, Z, T, Y, W2):
return X.T.dot( ( ( Y-T ).dot(W2.T) * ( Z*(1 - Z) ) ) ) # for sigmoid
#return X.T.dot( ( ( Y-T ).dot(W2.T) * (Z > 0) ) ) # for relu
def derivative_b1(Z, T, Y, W2):
return (( Y-T ).dot(W2.T) * ( Z*(1 - Z) )).sum(axis=0) # for sigmoid
#return (( Y-T ).dot(W2.T) * (Z > 0)).sum(axis=0) # for relu
############
def y2indicator2(y):
N = len(y)
y = y.astype(np.int32)
ind = np.zeros((N, 2))
for val in y:
ind[val, ] = 1
return ind
def y2indicator(y):
N = len(y)
y = y.astype(np.int32)
ind = np.zeros((N, 2))
for i in range(N):
ind[i, y[i]] = 1
return ind
#encoding the y-label
def M_B_0_1(label):
if label == 'M':
return 1
else:
return 0
# Criando setup
cancer_data = pd.read_csv('data.csv')
cancer_data['diagnosis'] = cancer_data['diagnosis'].apply(M_B_0_1)
cols_normalizar = ['radius_mean', 'texture_mean', 'perimeter_mean',
'area_mean', 'smoothness_mean', 'compactness_mean', 'concavity_mean',
'concave_points_mean', 'symmetry_mean', 'fractal_dimension_mean',
'radius_se', 'texture_se', 'perimeter_se', 'area_se', 'smoothness_se',
'compactness_se', 'concavity_se', 'concave_points_se', 'symmetry_se',
'fractal_dimension_se', 'radius_worst', 'texture_worst',
'perimeter_worst', 'area_worst', 'smoothness_worst',
'compactness_worst', 'concavity_worst', 'concave_points_worst',
'symmetry_worst', 'fractal_dimension_worst']
cancer_data[cols_normalizar] = cancer_data[cols_normalizar].apply(lambda x: (x - np.mean(x)) / (np.std(x) ) )
labels_to_drop = ['id','diagnosis']
x_data = cancer_data.drop(labels=labels_to_drop,axis=1)
y_data = cancer_data['diagnosis']
X_train, X_test, y_train, y_test = train_test_split(x_data, y_data, test_size=0.3, random_state=101)
#
Y = cancer_data.iloc[:, 1].values
Y = np.matrix(cancer_data['diagnosis'])
# Encoding categorical data
#Maligo = 1 | Benigno = 0
for i in range(Y.shape[1]):
if Y[0,i] == 'M':
Y[0,i] = 1
else:
Y[0,i] = 0
Y = np.float64(Y)
#################separando com sklearn train-test-split
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.3, random_state=52)
Ytrain_ind = y2indicator2(y_train)
Ytest_ind = y2indicator(y_test)
# training setup
max_iter = 20 # make it 30 for sigmoid
print_period = 10
lr = 0.00004
reg = 0.01
N, D = X_train.shape
batch_sz = 40
n_batches = N // batch_sz
M = 100
K = 2
W1 = np.random.randn(D, M) / 28
b1 = np.zeros(M)
W2 = np.random.randn(M, K) / np.sqrt(M)
b2 = np.zeros(K)
# 1. batch
# cost = -16
LL_batch = []
CR_batch = []
for i in xrange(max_iter):
for j in xrange(n_batches):
Xbatch = X_train[j*batch_sz:(j*batch_sz + batch_sz),]
Ybatch = y_train_ind[j*batch_sz:(j*batch_sz + batch_sz),]
pYbatch, Z = forward(Xbatch, W1, b1, W2, b2)
# print "first batch cost:", cost(pYbatch, Ybatch)
# updates
W2 -= lr*(derivative_w2(Z, Ybatch, pYbatch) + reg*W2)
b2 -= lr*(derivative_b2(Ybatch, pYbatch) + reg*b2)
W1 -= lr*(derivative_w1(Xbatch, Z, Ybatch, pYbatch, W2) + reg*W1)
b1 -= lr*(derivative_b1(Z, Ybatch, pYbatch, W2) + reg*b1)
if j % print_period == 0:
# calculate just for LL
pY, _ = forward(X_test, W1, b1, W2, b2)
print "pY:", pY
ll = cost(pY, Ytest_ind)
LL_batch.append(ll)
print "Cost at iteration i=%d, j=%d: %.6f" % (i, j, ll)
err = error_rate(pY, y_test)
CR_batch.append(err)
print "Error rate:", err
pY, _ = forward(X_test, W1, b1, W2, b2)
print "Final error rate:", error_rate(pY, y_test)
print " accuraccy: ", classification_rate(y_test,predict(pY))
##----------------------------------------------------------------------
# USANDO NUMPY
cancer_data = pd.read_csv('data.csv')
X = cancer_data.iloc[:, 2:-1].values
#
Y = cancer_data.iloc[:, 1].values
Y = np.matrix(cancer_data.iloc[:, 1].values)
Y = cancer_data.iloc[:, 1].values
Y = np.matrix(cancer_data['diagnosis'])
# Encoding categorical data
#Maligo = 1 | Benigno = 0
for i in range(Y.shape[1]):
if Y[0,i] == 'M':
Y[0,i] = 1
else:
Y[0,i] = 0
Y = np.float64(Y)
# normalization
for i in range(X.shape[1]):
X[:,i] = (X[:,i] - X[:,i].mean()) / X[:,i].std()
Y = Y.reshape(Y.shape[1],Y.shape[0])
max_iter = 20 # make it 30 for sigmoid
print_period = 10
lr = 0.00004
reg = 0.01
Xtrain = X[:-150,]
Ytrain = Y[:-150]
Xtest = X[-100:,]
Ytest = Y[-100:]
Ytrain_ind = y2indicator(Ytrain)
Ytest_ind = y2indicator(Ytest)
N, D = Xtrain.shape
batch_sz = 40
n_batches = N // batch_sz
M = 100
K = 2
W1 = np.random.randn(D, M) / 28
b1 = np.zeros(M)
W2 = np.random.randn(M, K) / np.sqrt(M)
b2 = np.zeros(K)
# 1. batch
# cost = -16
LL_batch = []
CR_batch = []
for i in xrange(max_iter):
for j in xrange(n_batches):
Xbatch = Xtrain[j*batch_sz:(j*batch_sz + batch_sz),]
Ybatch = Ytrain_ind[j*batch_sz:(j*batch_sz + batch_sz),]
pYbatch, Z = forward(Xbatch, W1, b1, W2, b2)
# print "first batch cost:", cost(pYbatch, Ybatch)
# updates
W2 -= lr*(derivative_w2(Z, Ybatch, pYbatch) + reg*W2)
b2 -= lr*(derivative_b2(Ybatch, pYbatch) + reg*b2)
W1 -= lr*(derivative_w1(Xbatch, Z, Ybatch, pYbatch, W2) + reg*W1)
b1 -= lr*(derivative_b1(Z, Ybatch, pYbatch, W2) + reg*b1)
if j % print_period == 0:
# calculate just for LL
pY, _ = forward(Xtest, W1, b1, W2, b2)
print "pY:", pY
ll = cost(pY, Ytest_ind)
LL_batch.append(ll)
print "Cost at iteration i=%d, j=%d: %.6f" % (i, j, ll)
err = error_rate(pY, Ytest)
CR_batch.append(err)
print "Error rate:", err
pY, _ = forward(Xtest, W1, b1, W2, b2)
print "Final error rate:", error_rate(pY, Ytest)
print " accuraccy: ", classification_rate(Ytest,predict(pY))
| [
"vinipachecov@gmail.com"
] | vinipachecov@gmail.com |
2582df3bede1dd8bc5d6ba02582db7a5d8aa98d2 | 639fb7aecb24a168df2a146a800e2c1191ba5d3b | /mean.py | b3d6565c052486f6e32220425515568886e707c2 | [] | no_license | Polestar574/class104dhruv | a3178e5fce8629cca53d79e59e7a99c18bb7fae9 | c26ab13439da9612fb72fa6295f3e7784ddb029f | refs/heads/main | 2023-07-17T20:31:21.552744 | 2021-08-25T12:21:15 | 2021-08-25T12:21:15 | 399,807,939 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 489 | py | import csv
with open('height-weight.csv',newline='')as f:
reader=csv.reader(f)
file_data=list(reader)
file_data.pop(0)
# print(file_data)
# sorting data to get the height of people.
new_data=[]
for i in range(len(file_data)):
n_num = file_data[i][1]
new_data.append(float(n_num))
# #getting the mean
n = len(new_data)
total =0
for x in new_data:
total += x
mean = total / n
#
print("Mean / Average is: " + str(mean))
| [
"noreply@github.com"
] | Polestar574.noreply@github.com |
c6e0b7d5beddcf6966817ea03fe82b352e749cfa | d5bde62db54285d935b99a7be6d139cb33a0c91a | /bot.py | 9c37838950e508bcaa9b649cf4b222475c57bc63 | [
"MIT"
] | permissive | Tsnm2/telegram-rss-reader | 71867578fc1ce7cbde8f998e05aa9b7828c0f76b | 0b2225f2bf3b1925d3fbe82f62cef9352426aa0e | refs/heads/master | 2023-07-14T18:32:13.694454 | 2021-08-27T11:51:19 | 2021-08-27T11:51:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,548 | py | import logging
import time
import os
from dotenv import load_dotenv
from telegram import ParseMode, Update
from telegram.ext import (CallbackContext, CommandHandler, Filters,
MessageHandler, Updater)
from db import (add_feed_source, get_all_sources, get_sources,
is_already_present, remove_feed_source,
update_source_timestamp)
from feed import format_feed_item, get_feed_info, read_feed
from archive import capture
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__)
def add_feed(update: Update, context: CallbackContext) -> None:
user = update.effective_chat.id
source = context.args[0]
# /add https://thegradient.pub/rss
if is_already_present(user, source):
context.bot.send_message(
chat_id=update.effective_chat.id, text=source + ' already exists.')
else:
add_feed_source(user, source)
feed_info = get_feed_info(source)
context.bot.send_message(
chat_id=update.effective_chat.id, text=source + ' added.')
context.bot.send_message(
chat_id=update.effective_chat.id, text=feed_info)
def remove_feed(update: Update, context: CallbackContext) -> None:
user = update.effective_chat.id
source = context.args[0]
# /remove https://thegradient.pub/rss
if is_already_present(user, source):
remove_feed_source(user, source)
context.bot.send_message(
chat_id=update.effective_chat.id, text=source + ' removed.')
else:
context.bot.send_message(
chat_id=update.effective_chat.id, text=source + ' does not exist.')
def list_feeds(update: Update, context: CallbackContext) -> None:
userId = update.effective_chat.id
sources = get_sources(userId)
if len(sources):
context.bot.send_message(
chat_id=userId, text="\n".join(sources))
else:
context.bot.send_message(
chat_id=userId, text="No sources added yet")
def archive_link(update: Update, context: CallbackContext):
user = update.effective_chat.id
source = context.args[0]
url, captured = capture(source)
context.bot.send_message(chat_id=user, text=url)
def text(update: Update, context: CallbackContext) -> None:
user = update.effective_chat.id
text_received = update.message.text
context.bot.send_message(
chat_id=user, text='To add a feed use /add feedurl')
def help(update: Update, context: CallbackContext) -> None:
user = update.effective_chat.id
context.bot.send_message(
chat_id=user, text='To add a feed use /add feedurl')
def hello(update: Update, context: CallbackContext) -> None:
update.message.reply_text(f'Hello {update.effective_user.first_name}')
def error(update, context):
update.message.reply_text('an error occured')
logger.error(msg="Exception while handling an update:",
exc_info=context.error)
def fetch_feeds(context: CallbackContext):
sources = get_all_sources()
for source in sources:
feeds = read_feed(source["url"])
logger.info(msg="Found " + str(len(feeds)) +
" feeds from " + source["url"])
entry_index = 0
last_post_updated_time= 0
for entry in feeds:
entry_index = entry_index+1
if entry_index > 10:
break
if entry.has_key('published_parsed'):
post_updated_time = int(time.strftime(
"%Y%m%d%H%M%S", entry.published_parsed))
elif entry.has_key('updated_parsed'):
post_updated_time = int(time.strftime(
"%Y%m%d%H%M%S", entry.updated_parsed))
else:
logger.error(msg=source["url"] + " has no time info")
break
last_updated_time = int(source["last_updated"])
if post_updated_time > last_post_updated_time:
last_post_updated_time = post_updated_time
if post_updated_time > last_updated_time:
context.bot.send_message(chat_id=source["userId"],
text=format_feed_item(entry),
parse_mode=ParseMode.HTML)
if os.getenv('ARCHIVE_POSTS') == 'true':
# Add the link to archive.org
capture(entry.link)
update_source_timestamp(source["userId"], source["url"], last_post_updated_time)
def main():
load_dotenv() # take environment variables from .env.
updater = Updater(os.getenv('TELEGRAM_BOT_TOKEN'))
dispatcher = updater.dispatcher
dispatcher.add_handler(CommandHandler('hello', hello))
dispatcher.add_handler(CommandHandler("help", help))
dispatcher.add_handler(CommandHandler('add', add_feed))
dispatcher.add_handler(CommandHandler('remove', remove_feed))
dispatcher.add_handler(CommandHandler('list', list_feeds))
dispatcher.add_handler(CommandHandler('archive', archive_link))
# add an handler for normal text (not commands)
dispatcher.add_handler(MessageHandler(Filters.text, text))
# add an handler for errors
# dispatcher.add_error_handler(error)
job_queue = updater.job_queue
job_queue.run_repeating(
fetch_feeds, interval=int(os.getenv('FEED_UPDATE_INTERVAL')), first=10)
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
| [
"santhosh.thottingal@gmail.com"
] | santhosh.thottingal@gmail.com |
ae2af4b91ec3e2ffb5472d99b2cbcf5c1c1584f4 | f337a606787e20510326c52417f814769abaca78 | /mini.py | 60e077ea85c5f848b89692b30eed49d2f4432be5 | [] | no_license | samescolas/reverse-game-of-life | f616ae51326df62d685e210050738ba1c6cd3032 | 579189f4c37d7e9d1d602bfa285f95b7ef6b355a | refs/heads/master | 2021-01-01T04:15:25.639194 | 2017-07-26T20:37:55 | 2017-07-26T20:37:55 | 97,152,505 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 456 | py | import numpy as np
# sigmoid function
def nonlin(x,deriv=False):
if (deriv==True):
return x*(1-x)
return 1/(1+np.exp(-x))
# input
X = np.array([
[0,0,1],
[0,1,1],
[1,0,1],
[1,1,1]
])
# output
y = np.array([[0,0,1,1]]).T
np.random.seed(1)
syn0 = 2*np.random.random((3,1)) - 1
for iter in xrange(10000):
l0 = X
l1 = nonlin(np.dot(l0,syn0))
l1_error = y - l1
l1_delta = l1_error * nonlin(l1,True)
syn0 += np.dot(l0.T,l1_delta)
print l1
| [
"samescolas@gmail.com"
] | samescolas@gmail.com |
641ad4f3b344d4ed5e6fe4c291f7531da35f3984 | db081e2ce44f31a25bb3d05a4399f5d7bc13e792 | /gslib/commands/enablelogging.py | dea311075950fc7fd4b47c84926e1c393f322578 | [] | no_license | bopopescu/gamme-2.7 | 00a158fa95d2c730fae5d31868d1b332738027f5 | a291aa8d96444a7c1b3907b47c5786d4f5b97574 | refs/heads/master | 2022-11-24T20:34:47.625090 | 2012-02-06T11:49:35 | 2012-02-06T11:49:35 | 282,525,696 | 0 | 0 | null | 2020-07-25T21:12:19 | 2020-07-25T21:12:18 | null | UTF-8 | Python | false | false | 3,031 | py | # Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from gslib.command import Command
from gslib.command import COMMAND_NAME
from gslib.command import COMMAND_NAME_ALIASES
from gslib.command import CONFIG_REQUIRED
from gslib.command import FILE_URIS_OK
from gslib.command import MAX_ARGS
from gslib.command import MIN_ARGS
from gslib.command import PROVIDER_URIS_OK
from gslib.command import SUPPORTED_SUB_ARGS
from gslib.command import URIS_START_ARG
from gslib.exception import CommandException
from gslib.util import NO_MAX
class EnableLoggingCommand(Command):
"""Implementation of gsutil enablelogging command."""
# Command specification (processed by parent class).
command_spec = {
# Name of command.
COMMAND_NAME : 'enablelogging',
# List of command name aliases.
COMMAND_NAME_ALIASES : [],
# Min number of args required by this command.
MIN_ARGS : 1,
# Max number of args required by this command, or NO_MAX.
MAX_ARGS : NO_MAX,
# Getopt-style string specifying acceptable sub args.
SUPPORTED_SUB_ARGS : 'b:o:',
# True if file URIs acceptable for this command.
FILE_URIS_OK : False,
# True if provider-only URIs acceptable for this command.
PROVIDER_URIS_OK : False,
# Index in args of first URI arg.
URIS_START_ARG : 0,
# True if must configure gsutil before running command.
CONFIG_REQUIRED : True,
}
# Command entry point.
def RunCommand(self):
# Disallow multi-provider enablelogging calls, because the schemas
# differ.
storage_uri = self.UrisAreForSingleProvider(self.args)
if not storage_uri:
raise CommandException('enablelogging command spanning providers not '
'allowed.')
target_bucket = None
target_prefix = None
for opt, opt_arg in self.sub_opts:
if opt == '-b':
target_bucket = opt_arg
if opt == '-o':
target_prefix = opt_arg
if not target_bucket:
raise CommandException('enablelogging requires \'-b <log_bucket>\' '
'option')
for uri_str in self.args:
for uri in self.CmdWildcardIterator(uri_str):
if uri.object_name:
raise CommandException('enablelogging cannot be applied to objects')
print 'Enabling logging on %s...' % uri
self.proj_id_handler.FillInProjectHeaderIfNeeded(
'enablelogging', storage_uri, self.headers)
uri.enable_logging(target_bucket, target_prefix, False, self.headers)
| [
"shanky@shanky.local"
] | shanky@shanky.local |
2172f6eee92de4d83fb46ac989cf2e5efb33dc3a | 2815a09ee544004f6f6e3860a756b747f2c5e05d | /HBPSO_Kmeans_FINAL1.py | 31bfbaa7178272e4bb682c9451698cbfb2f6cc57 | [] | no_license | shoryavardhan/Gait-Pattern-Analysis | 9847ee01a410f160581b84c1d1d48b30345da2c3 | 0eeb894a2f1f6e27e586a540e1bf1fef90792c25 | refs/heads/master | 2022-03-31T17:52:42.921342 | 2019-12-25T19:12:19 | 2019-12-25T19:12:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,378 | py | import random
import numpy as np
import pandas as pd
import Validations_FINAL as validations
import math
#DATASET
dataset=pd.read_csv("dataset/norm.csv")
original_68=dataset.iloc[0:68,1:5].values
original_88=dataset.iloc[0:88,1:5].values
original=dataset.iloc[0:156, 1:5].values #case3
belong = dataset.iloc[0:156,9].values
# print("PSO with KMeans Algorithm")
W = 0.5
c1 = 0.8
c2 = 0.9
n_iterations = 50
target_error = 10e-5
n_particles = 50
#random
f1c = open('dataset/case1_cp88.csv')
f1i = open('dataset/case1_intact68.csv')
class Particle():
def __init__(self):
#case 2 - cp - 88
offset = random.randrange(156)
f1c.seek(offset)
f1c.readline()
random_line1 = f1c.readline()
arr= [0.0,0.0,0.0,0.0]
arr= random_line1.split(',')
cp0=float(arr[0])
cp1=float(arr[1])
cp2=float(arr[2])
cp3=float(arr[3])
#case3 - intact - 68
offset = random.randrange(156)
f1i.seek(offset)
f1i.readline()
random_line1 = f1i.readline()
arr= [0.0,0.0,0.0,0.0]
arr= random_line1.split(',')
in0=float(arr[0])
in1=float(arr[1])
in2=float(arr[2])
in3=float(arr[3])
#cp0=cp1=in0=in1=(-1) ** (bool(random.getrandbits(1))) * random.random()*50
self.position = np.array([cp0,cp1,cp2,cp3,in0,in1,in2,in3])
self.pbest_position = self.position
self.pbest_value = float('inf')
self.velocity = np.array([0,0,0,0,0,0,0,0])
def __str__(self):
print("I am at ", self.position, " my pbest is ", self.pbest_position)
def move(self):
self.position = self.position + self.velocity
class Space():
def __init__(self, target, target_error, n_particles):
self.target = target
self.target_error = target_error
self.n_particles = n_particles-1
self.particles = []
self.gbest_value = float('inf')
self.gbest_position = np.array([random.random()*50, random.random()*50, random.random()*50, random.random()*50,
random.random()*50, random.random()*50, random.random()*50, random.random()*50])
def printing(self):
for particle in self.particles:
particle.__str__()
def fitness(self, particle):
sq_sumcp=sq_sumintact=0
for row in original_88:
sq_sumcp = sq_sumcp+ math.sqrt((particle.position[0]-row[0])**2+(particle.position[1]-row[1])**2
+(particle.position[2]-row[2])**2+(particle.position[3]-row[3])**2)
for row in original_68:
sq_sumintact = sq_sumintact+ math.sqrt((particle.position[4]-row[0])**2+(particle.position[5]-row[1])**2+
(particle.position[6]-row[2])**2+(particle.position[7]-row[3])**2)
sum=((sq_sumcp/88)+(sq_sumintact/68))/2
return sum
def set_pbest(self):
for particle in self.particles:
fitness_cadidate = self.fitness(particle)
if(particle.pbest_value > fitness_cadidate):
particle.pbest_value = fitness_cadidate
particle.pbest_position = particle.position
def set_gbest(self):
for particle in self.particles:
best_fitness_cadidate = self.fitness(particle)
if(self.gbest_value > best_fitness_cadidate):
self.gbest_value = best_fitness_cadidate
self.gbest_position = particle.position
def move_particles(self):
for particle in self.particles:
global W
new_velocity = (W*particle.velocity) + (c1*random.random())*validations.hammingdistance2(particle.pbest_position[0],particle.pbest_position[1],particle.pbest_position[2],particle.pbest_position[3],particle.position[0],particle.position[1],particle.position[2],particle.position[3]) + \
(random.random()*c2) * validations.hammingdistance2(self.gbest_position[0],self.gbest_position[1],self.gbest_position[2],self.gbest_position[3],particle.position[0],particle.position[1],particle.position[3],particle.position[4])
particle.velocity = new_velocity
particle.move()
search_space = Space(0, target_error, n_particles)
particles_vector = [Particle() for p in range(search_space.n_particles)]
part = Particle()
part.position=np.array([59.56, 0.5475, 59.56, 67.5625,130.98013, 131.82135, 3.21945, 3.65202])
particles_vector.append(part)
search_space.particles = particles_vector
#search_space.printing()
iteration = 0
while(iteration < n_iterations):
search_space.set_pbest()
search_space.set_gbest()
if(abs(search_space.gbest_value - search_space.target) <= search_space.target_error):
break
search_space.move_particles()
iteration += 1
#print("\n The best solution is: \n", search_space.gbest_position, "\n in" ,iteration, " iterations")
c1 = np.array([0.0,0.0,0.0,0.0])
c2 = np.array([0.0,0.0,0.0,0.0])
c1[0]= search_space.gbest_position[0]
c1[1]= search_space.gbest_position[1]
c1[2]= search_space.gbest_position[2]
c1[3]= search_space.gbest_position[3]
c2[0]= search_space.gbest_position[4]
c2[1]= search_space.gbest_position[5]
c2[2]= search_space.gbest_position[6]
c2[3]= search_space.gbest_position[7]
'''
print("\nCentroid 1:")
print(c1)
print("\nCentroid 2:")
print(c2)
print("\n")
'''
################Kmeans start
#initialising centroids for the first time
count=0
centroids=[c1,c2]
n=0
while(n<100):
#calculating d first time
r=0
c=0
row,col=(156,2)
d=[[0 for p in range(col)] for q in range(row)]
for row in original:
c=0
for cent in centroids:
d[r][c]=validations.EuclideanDistanceFour(row,cent)
c=c+1
r=r+1
# Finding centroids
cluster0=0
cluster1=0
i=0
sumx1=0
sumy1=0
sumx2=0
sumy2=0
tp=0
c=[0 for p in range(156)]
for dis in d:
if(dis[1]>dis[0]):
if(belong[i]==0):
tp=tp+1
c[i]=0
cluster0=cluster0+1
sumw1=sumy1+original[i][1]
sumx1=sumx1+original[i][0]
sumy1=sumy1+original[i][1]
sumz1=sumy1+original[i][1]
i=i+1
else:
if(belong[i]==1):
tp=tp+1
c[i]=1
cluster1=cluster1+1
sumw2=sumx2+original[i][0]
sumx2=sumy2+original[i][1]
sumy2=sumx2+original[i][0]
sumz2=sumy2+original[i][1]
i=i+1
centroids=[[sumw1/cluster0,sumx1/cluster0,sumy1/cluster0,sumz1/cluster0],
[sumw2/cluster1,sumx2/cluster1,sumy1/cluster1,sumz1/cluster1]]
n=n+1
#making clusters
c0array=[[0 for p in range(2)] for q in range(cluster0)]
c1array=[[0 for p in range(2)] for q in range(cluster1)]
a=i=j=0
for row in original:
if(c[a]== 0):
c0array[i][0]=row[0]
c0array[i][1]=row[1]
i=i+1
if(c[a]== 1):
c1array[j][0]=row[0]
c1array[j][1]=row[1]
j=j+1
a=a+1
aaaa=pd.DataFrame(c0array)
cccc=pd.DataFrame(c1array)
#validity indexes
hcpi_1=validations.CPI(d,belong)
hintra_1=validations.Intra(original,c,cluster0,cluster1)
hinter_1=validations.Inter(original,c,cluster0,cluster1)
hsi_1 = validations.Silhouette(c0array,c1array,cluster0,cluster1)
hmse_1=validations.MSE(c,belong)
hdi_1= validations.DunnIndex(c0array,c1array,centroids,cluster0,cluster1)
#PRINTING
'''
print(" ")
print("CASE 1 ------------------------------")
print("CPI INTRA INTER SC MSE DUNN")
print("%.4f"%hcpi_1," ","%.4f"%hintra_1," ","%.4f"%hinter_1," ","%.4f"%hsi_1," ","%.4f"%hmse_1," ","%.4f"%hdi_1)
print("")
print("CASE 2 ------------------------------")
print("CPI INTRA INTER SC MSE DUNN")
print("%.4f"%hcpi_2," ","%.4f"%hintra_2," ","%.4f"%hinter_2," ","%.4f"%hsi_2," ","%.4f"%hmse_2," ","%.4f"%hdi_2)
print("")
print("CASE 3 ------------------------------")
print("CPI INTRA INTER SC MSE DUNN")
print("%.4f"%hcpi_3," ","%.4f"%hintra_3," ","%.4f"%hinter_3," ","%.4f"%hsi_3," ","%.4f"%hmse_3," ","%.4f"%hdi_3)
'''
| [
"shoryavardhankhanna@gmail.com"
] | shoryavardhankhanna@gmail.com |
3a854018a331ad79715a4b7feabb5f2f29a696db | edc5d93b426350bf39b914cad3050435c86f7511 | /packages/optimizers/__init__.py | 8c786e103ca9b0dcd82f3b5e39b66cb789d3a04d | [] | permissive | Harsh026/AND | 423ac99371b0e6240f0d575bb7276bd07ba46cd4 | 333e7d4f189eb3583e9f2cb0971ef69075716764 | refs/heads/master | 2020-08-28T11:41:20.946057 | 2019-10-26T09:45:30 | 2019-10-26T09:45:30 | 217,687,935 | 0 | 0 | MIT | 2019-10-26T09:45:31 | 2019-10-26T09:44:45 | null | UTF-8 | Python | false | false | 1,097 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2019-01-24 23:26:56
# @Author : Raymond Wong (jiabo.huang@qmul.ac.uk)
# @Link : github.com/Raymond-sci
from . import sgd
from . import adam
from . import rmsprop
from ..register import REGISTER
from ..config import CONFIG as cfg
def require_args():
"""all args for optimizer objects
Arguments:
parser {argparse} -- current version of argparse object
"""
cfg.add_argument('--weight-decay', default=0, type=float,
help='weight decay (L2 penalty)')
known_args, _ = cfg.parse_known_args()
if (REGISTER.is_package_registered(__name__) and
REGISTER.is_class_registered(__name__, known_args.optimizer)):
optimizer = get(known_args.optimizer)
if hasattr(optimizer, 'require_args'):
return optimizer.require_args()
def get(name, instant=False, params=None):
cls = REGISTER.get_class(__name__, name)
if instant:
return cls.get(params)
return cls
def register(name, cls):
REGISTER.set_class(__name__, name, cls)
| [
"549425036@qq.com"
] | 549425036@qq.com |
7ebe27244c5cf13fec8e4034a5734e5974374444 | e8996ea8693231f5700c68ed6355f935a05d8ace | /main/migrations/0002_auto_20201115_1915.py | 360b2272af32e5928aee15af0c7ac0d6c1464102 | [] | no_license | Moxxi/test-task | bb256323d530ae3ef32f8a1ef21906805f7193c9 | ad5f45caabadb5462354fd4454035c37fcf834d9 | refs/heads/main | 2023-01-14T03:46:57.164418 | 2020-11-15T18:30:54 | 2020-11-15T18:30:54 | 313,086,637 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 655 | py | # Generated by Django 3.1.3 on 2020-11-15 16:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='answers',
name='UserId',
field=models.IntegerField(),
),
migrations.AlterField(
model_name='questions',
name='Type',
field=models.CharField(choices=[(models.CharField(max_length=200), 'Text'), ('OC', 'OneChoice'), ('MC', 'ManyChoice')], default=models.CharField(max_length=200), max_length=2),
),
]
| [
"defalt22@list.ru"
] | defalt22@list.ru |
e0018e7c69d08f6c25ca728a3ea0f606a1bcf0a3 | 6f8b8c602c85bb3a2875958bb14661b20270fc78 | /main.py | e277a6c01cd151a1502fc6f1317ab98f24f36327 | [] | no_license | DeN-T4WeR/Shishkarev_compVision | 8454a9f28cbca3de4b5f25bb212b18466f02f9c7 | 71ba93aa6ac34db4e67cc842dbcdeb8404332046 | refs/heads/master | 2023-08-28T16:30:12.162709 | 2021-10-23T16:03:55 | 2021-10-23T16:03:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,088 | py | import cv2
import numpy
img: numpy.ndarray = cv2.imread('robot.jpeg', cv2.IMREAD_COLOR)
h, w, _ = img.shape
# cv2.imshow('color', img)
img_resized = cv2.resize(img, (w//2, h//2), cv2.INTER_NEAREST)
print(img.shape)
cv2.imshow('resized', img_resized)
img_blur1 = cv2.blur(img, (3,3))
img_blur2 = cv2.blur(img, (10,10))
img_blur3 = cv2.blur(img, (50,50))
cv2.imshow('img_blue1', img_blur1)
cv2.imshow('img_blue2', img_blur2)
cv2.imshow('img_blue3', img_blur3)
b, g, r =cv2.split(img)
# cv2.imshow('blue', b)
# cv2.imshow('green', g)
# cv2.imshow('red', r)
merge_img = cv2.merge([b,g,r])
# cv2.imshow('merge_ing', merge_img)
gray_img = cv2.imread('robot.jpeg', cv2.IMREAD_GRAYSCALE)
filled_img = img.copy()
for row in range(0,int (img.shape[0]/2)):
for column in range(int(img.shape[1] / 2), img.shape[1]):
filled_img[row, column]=(0,0,0)
# cv2.imshow('filled, img', filled_img)
# cv2.imshow('filled', filled_img
cropoed_img = img[120:200, 750:850]
cv2.imshow('color', img)
# cv2.imshow('cropoed_img', cropoed_img)
# cv2.imshow('gray_img', gray_img)
cv2.waitKey(0) | [
"user5Krymsk@gmail.com"
] | user5Krymsk@gmail.com |
f77c4b946aa041cf53339f4277a05831580dbf60 | 95174b5832bbb13a1a344379ff014f434cc42015 | /models/bus_route.py | e0d4c870b8f9f6dffdc4ce6f6b4e4b43373bc4d1 | [] | no_license | nardorb/Perfectly-Timed | f0ef1f2a55f24da42a07f82ec05a3b4c044d4c72 | 6b266c4c04e6dd0524c81a9b75e8a126fabfa9ec | refs/heads/master | 2021-01-17T22:29:31.097337 | 2015-03-25T00:44:44 | 2015-03-25T00:44:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 785 | py | from google.appengine.ext import db
class BusRoute(db.Model):
EXCEPTION_NO_PARENT = "`parent` property must be an `Account` object."
route_number = db.StringProperty(default=None)
stops = db.StringProperty(default=[])
def get_account(self):
return self.parent()
def stops_list(self):
return self.stops.split(',')
@classmethod
def get_by_route_number(cls, route_number):
return cls.all().filter('route_number =', route_number).get()
def put(self, *args, **kwargs):
# This is not at the top to prevent circular imports.
from models.account import Account
parent = self.parent()
if not parent or not isinstance(parent, Account):
raise ValueError(self.EXCEPTION_NO_PARENT)
return super(BusRoute, self).put(*args, **kwargs)
| [
"caryl.r.ford@gmail.com"
] | caryl.r.ford@gmail.com |
9fd7421347cdb18aaee26e441c8944adfb19c396 | 96bd8762dea852dcf3d520984de5b1459d528189 | /Lab1/vjezba1b.py | 8860521b3c339817569bd21833dc0de67e79a1de | [] | no_license | NikolaKraljevic/NapredniOperacijskiSustavi | 0f93ad499041dcd3377707c30317bd58983279ee | 403e50e6a6e396366cf06df0ef013822cabda02a | refs/heads/main | 2023-02-13T22:51:51.571551 | 2021-01-09T20:13:33 | 2021-01-09T20:13:33 | 328,237,572 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,830 | py | from multiprocessing import Process, Pipe
import sys
import time
import random
import os
def filozofi(i,slusatelj,pisatelj,N):
#Sudjelovanje na konferenciji
random.seed(i)
spavanjeprvo = random.randint(100,2000)
time.sleep(spavanjeprvo/1000)
vrijeme = time.localtime()
#Filozof salje zahtjev za sjesti
for k in range(N):
if k!=i:
pisatelj[k].send([i,vrijeme,'Zahtjev'])
sporiji = []
brzi = []
poruka = 0
#Pocetak protokola Ricarta i Agrawala
#Primanje svih zahtjeva za sjedanje i razvrstavanje u brze i sporije
for k in range((N-1)):
Primio = slusatelj[i].recv()
print('Filozof ' +str(i)+' je primio poruku: ' )
print(Primio[0],Primio[2])
salje = Primio[0]
if vrijeme>Primio[1] or (vrijeme == Primio[1] and i>salje):
brzi.append(salje)
else:
sporiji.append(salje)
#Pustanje tj slanje odgovora svim procesa sa manjim T
for z in brzi:
pisatelj[z].send([i,vrijeme,'Odgovor'])
#Cekanje odgovora svih procesa
for k in range(N-1):
Primio = slusatelj[i].recv()
print('Filozof ' +str(i)+' je primio poruku: ' )
print(Primio[0],Primio[2])
print('Filozof ' +str(i) + ' je za stolom----------------------')
time.sleep(3)
print('Filozof ' +str(i) + ' je ustao sa stola----------------------')
#Slanje poruka svim sporijim procesima
for z in sporiji:
pisatelj[z].send([i,vrijeme,'Odgovor'])
#Ponovno sudjelovanje na konferenciji
#Kraj Protokola
random.seed(2*i)
spavanjedrugo = random.randint(99,2001)
time.sleep(spavanjedrugo/2000)
os._exit(0)
N = int(sys.stdin.readline())
slusatelj = []
pisatelj = []
#Rad pipeline za svakog filozofa
for i in range(N):
listen,writer = Pipe()
slusatelj.append(listen)
pisatelj.append(writer)
#Inicijalizacija procesa i pozivanje funkcije filozovi
for i in range(N):
if(os.fork()==0):
filozofi(i,slusatelj,pisatelj,N)
| [
"noreply@github.com"
] | NikolaKraljevic.noreply@github.com |
02fcc3a18ac6b0037bd9b7e92c3eee7daf2a61f0 | e4277dd4d7b0b8d5ac4a62d67e1b11e5dc64ddb5 | /server.py | 68509b1b79cbc0c4674bc581d17585d9dbcab284 | [] | no_license | natashagraifman/SD | 3ac92e74e5058b8de3f3f41833313667e9791c87 | 89e164c6057d43bfd999da9654fba3883494c1ab | refs/heads/master | 2021-06-27T01:07:50.033836 | 2021-01-15T14:23:59 | 2021-01-15T14:23:59 | 186,491,753 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,276 | py | from socket import *
from threading import Thread
import os
connections = []
total_connections = 0
def mostraPublicacoes():
global diretorio
todasPub = ""
for root,dirs,files in os.walk(diretorio):
for publicacao in files:
publicacao = (publicacao.split(".")[0])
todasPub += publicacao + "\n"
if(todasPub == ""):
return "Nenhuma publicação."
else:
return todasPub
def novaPublicacao(titulo, usuario, texto):
global diretorio
try:
arq = open(diretorio + titulo + ".txt", "w")
arq.write(usuario + "\n" + texto)
arq.close()
return True
except:
print("Erro ao criar nova publicação.")
return False
def lerPublicacao(titulo):
global diretorio
if(os.path.isfile(diretorio + titulo + ".txt")):
arq = open(diretorio + titulo + ".txt", "r")
texto = arq.read()
arq.close()
return texto
else:
return "Publicação não existe."
def deletaPublicacao(titulo, usuario):
global diretorio
if(os.path.isfile(diretorio + titulo + ".txt")):
arq = open(diretorio + titulo + ".txt", "r")
autor = arq.readline()
if(autor == usuario + "\n"):
arq.close()
os.remove(diretorio + titulo + ".txt")
return "Publicação apagada."
else:
return "Você não tem permissões para apagar essa publicação."
else:
return "Publicação não encontrada."
def convByte(texto):
return bytes(texto, "utf-8")
def recebeConexoesThread(socket):
while True:
s, address = socket.accept()
Thread(target=opcoesUsuarioThread, args=(s, adress)).start()
def opcoesUsuarioThread(client, ip):
client.send(convByte("Escreva seu nome:"))
usuario = client.recv(4096).decode()
client.send(convByte("\nEscreva o número da opção desejada:\n\n1 - Ver todas as publicações\n2 - Escrever uma nova publicação\n3 - Ler publicação\n4 - Apagar publicação\n5 - Sair\n"))
if(True):
while(True):
entrada = client.recv(4096).decode()
opcao = entrada.split(" ")[0]
if(opcao == "1"):
client.send(convByte(mostraPublicacoes()))
elif(opcao == "2"):
client.send(convByte("Escreva o título da sua publicação:\n"))
titulo = client.recv(4096).decode()
client.send(convByte("Escreva o conteúdo:\n"))
texto = client.recv(4096).decode()
if(novaPublicacao(titulo, usuario, texto)):
client.send(convByte("Publicação salva!\n"))
else:
client.send(convByte("Erro ao concluir publicação.\n"))
continue
elif(opcao == "3"):
client.send(convByte("Escreva o título da publicação que deseja ler:\n"))
titulo = client.recv(4096).decode()
client.send(convByte(lerPublicacao(titulo)))
continue
elif(opcao == "4"):
client.send(convByte("Escreva o título da publicação que quer apagar:\n Obs.:Você só pode apagar uma publicação de sua autoria!\n"))
titulo = client.recv(4096).decode()
client.send(convByte(deletaPublicacao(titulo, usuario)))
continue
elif(opcao == "5"):
client.send(convByte("exit"))
print(str(ip) + " disconnected\n")
return
diretorio = os.getcwd() + "\\Publicações\\"
host = "127.0.0.1"
port = 33000
adress = (host, port)
exit = False
if(not os.path.exists(diretorio)):
os.system("mkdir Publicações")
s = socket(AF_INET, SOCK_STREAM)
s.bind((host, port))
s.listen(1)
recebeConexoesThread(s)
| [
"rebecca.fernands@gmail.com"
] | rebecca.fernands@gmail.com |
c54cbc847e347a11beaa33ad2bd3cb4e97c48277 | 28cd350c10e5fe3542f2913e1833f5725aa56fd5 | /prepare_VehicleID.py | 17adc3f1c349e6a19d4ae965ba534f591054547c | [
"MIT"
] | permissive | layumi/Person_reID_baseline_pytorch | dffeb79f25f2fe1b83646746bbb295f2df36bad4 | 4dae9cdf42f71c72a44a64fb23bfc470c501085f | refs/heads/master | 2023-09-03T14:34:04.082508 | 2023-08-17T04:12:26 | 2023-08-17T04:12:26 | 115,712,649 | 4,042 | 1,132 | MIT | 2023-06-19T08:29:17 | 2017-12-29T10:22:41 | Python | UTF-8 | Python | false | false | 2,992 | py | import os
from shutil import copyfile
def copy_file(s, t):
for root, dirs, files in os.walk(s):
for name in files:
copyfile(root+'/'+name,t+'/'+name)
# You only need to change this line to your dataset download path
download_path = './data/VehicleID_V1.0/'
if not os.path.isdir(download_path):
print('please change the download_path')
#---------------------------------------
#train_all
train_path = download_path + '/image'
train_save_path = download_path + '/pytorch/train_test'
if not os.path.isdir(train_save_path):
os.mkdir(train_save_path)
fname = './data/VehicleID_V1.0/attribute/img2vid.txt'
with open(fname) as fp:
for i, line in enumerate(fp):
name, label = line.split(' ')
name = name + '.jpg'
ID = int(label)
src_path = train_path + '/' + name
dst_path = train_save_path + '/p%d'%ID
if not os.path.isdir(dst_path):
os.mkdir(dst_path)
print(src_path, dst_path)
copyfile( src_path, dst_path+'/'+name)
#---------------------------------------
#train
train_list = []
train_only_save_path = download_path + '/pytorch/train'
if not os.path.isdir(train_only_save_path):
os.mkdir(train_only_save_path)
with open(download_path+'train_test_split/train_list.txt', 'r') as f:
for name in f:
name = name.replace('\n','')
train_ID = name.split(' ')
train_ID = int(train_ID[1])
if not train_ID in train_list:
train_list.append(train_ID)
print(len(train_list))
for ID in train_list:
os.system('rsync -r %s/p%d %s'%( train_save_path, ID, train_only_save_path))
#---------------------------------------
#val800
for num in [800,1600,2400]:
val_list = []
query_save_path = download_path + '/pytorch/query%d'%num
gallery_save_path = download_path + '/pytorch/gallery%d'%num
if not os.path.isdir(query_save_path):
os.mkdir(query_save_path)
os.mkdir(gallery_save_path)
with open(download_path+'train_test_split/test_list_%d.txt'%num, 'r') as f:
for name in f:
name = name.replace('\n','')
val_ID = name.split(' ')
val_name = val_ID[0] + '.jpg'
val_ID = int(val_ID[1])
src_path = train_path + '/' + val_name
if val_ID not in val_list:
val_list.append(val_ID)
dst_path = gallery_save_path + '/p%d'%val_ID #For VehicleID QueryNumber > Gallery
if not os.path.isdir(dst_path):
os.mkdir(dst_path)
copyfile( src_path, dst_path+'/'+val_name)
else:
dst_path = query_save_path + '/p%d'%val_ID
if not os.path.isdir(dst_path):
os.mkdir(dst_path)
copyfile( src_path, dst_path+'/'+val_name)
| [
"zdzheng12@gmail.com"
] | zdzheng12@gmail.com |
e1ca012d25d48b3b8953cd4746269e0b143be5ba | 9f39599215823c341589d51cad7305c83377bc4b | /core/migrations/0015_topic.py | 018470b12492e00c4906d36ada61fc6700ccf072 | [] | no_license | andrewmilas10/Math-Team-Website | 2a557595e436ffcd42f6e90a72be4e88ac48e9e7 | 5b3295beee40da19a4fb80b34a5714d38071b4a9 | refs/heads/master | 2020-05-03T23:21:31.626998 | 2019-09-03T02:48:20 | 2019-09-03T02:48:20 | 178,863,824 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,504 | py | # Generated by Django 2.1.7 on 2019-05-09 03:41
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('core', '0014_profile_topicorder'),
]
operations = [
migrations.CreateModel(
name='Topic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('grade', models.CharField(choices=[('Freshman', 'Freshman'), ('Sophomore', 'Sophomore'), ('Junior', 'Junior'), ('Senior', 'Senior')], default='Freshman', max_length=9)),
('topic', models.CharField(choices=[('Ratios, Proportions and Percents', 'Ratios, Proportions and Percents'), ('Number Theory and Divisibility', 'Number Theory and Divisibility'), ('Counting Basics and Probability', 'Counting Basics and Probability'), ('Quadratics', 'Quadratics'), ('Probability', 'Probability'), ('Advanced Geometrical Concepts', 'Advanced Geometrical Concepts'), ('Perimeter, Area and Surface Area', 'Perimeter, Area and Surface Area'), ('Logic, Sets and Venn Diagram', 'Logic, Sets and Venn Diagram'), ('Similarity', 'Similarity'), ('Coordinate Geometry', 'Coordinate Geometry'), ('Circles', 'Circles'), ('Trigonometry', 'Trigonometry'), ('Parametric Equations', 'Parametric Equations'), ('Theory of Equations', 'Theory of Equations'), ('Freshman Regionals', 'Freshman Regionals'), ('Freshman State', 'Freshman State'), ('Sophomore Regionals', 'Sophomore Regionals'), ('Sophomore State', 'Sophomore State'), ('Junior Regionals', 'Junior Regionals'), ('Junior State', 'Junior State'), ('Senior Regionals', 'Senior Regionals'), ('Senior State', 'Senior State')], default='Ratios, Proportions and Percents', max_length=100)),
('description', models.TextField(blank=True, default='')),
('firstFile', models.FileField(upload_to='')),
('secondFile', models.FileField(upload_to='')),
('thirdFile', models.FileField(upload_to='')),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('is_complete', models.BooleanField(default=False)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"andrewmilas10@gmail.com"
] | andrewmilas10@gmail.com |
f0a0fe3b9c739238ec415e98df6f13f0b12a1853 | ba3d065f05729f21145e1cc1125bb1dc90400f6c | /beginpgm88.py | 000fe9a16ddf930dec8bf1d82d4708d76120152c | [] | no_license | ikailash19/guvi | 30c2c7aa13b9b1ad0f4a5827df79c0210d9a5c53 | 47a98a8993683bb03daf094cd1f683d2f1372e44 | refs/heads/master | 2020-06-19T11:37:04.026830 | 2019-08-16T03:55:20 | 2019-08-16T03:55:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 243 | py | n,m=list(map(int,input().split()))
high=n
low=m
if m>n:
high=m
low=n
j=1
for i in range(high**high):
if low*i==high*j:
print(n*i)
yeah=1
exit()
else:
if low*(i+1)<high*j:
pass
elif low*(i+1)>high*j:
j+=1
if n==m:
print(n)
| [
"noreply@github.com"
] | ikailash19.noreply@github.com |
672e5ce2ef9bcad476a3974ece13a3744e8ef81b | 0bb9fc5f5d91ae7a586905f3d28adb551b72fb3a | /demo_1.py | 99dc527226872cdc891c6384710e9d45816c7c94 | [] | no_license | Qinzheng7575/xbot-park | 7e80f31abebd51801b3247b1a7ccc8a58f5f2c4e | 3279580d233022eceb56a926d299ac7c360edd78 | refs/heads/master | 2023-03-03T15:20:17.840366 | 2021-02-06T03:25:24 | 2021-02-06T03:25:24 | 336,444,291 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,753 | py | import pygame
from pygame.locals import *
import time, os
from items_1 import API,BALL
import threading
from threading import Lock, Thread
import numpy as np
import copy
if __name__ == "__main__":
pygame.init()
circle_init=pygame.image.load(r'D:\\ForStudy\Desktop\\Xbot\\1.png')
MAX_r=200
MIN_r=100
r=MIN_r
r_last=MIN_r
flag=0
SUSPEND=0#暂停信号
small_ball=[]
small_ball_2=[]
color=[]
color_2=[]
small_ball_num=20
divider=0#分频器
divider_text=0
ttf_abs ='C:\Windows\Fonts\simhei.ttf'
myfront = pygame.font.Font(ttf_abs, 22)
screen = pygame.display.set_mode((1200, 700), 0, 32)
pygame.display.set_caption("demo_1")
api=API
server=Thread(target=api.pipe_begin,args=(api,))
server.start()
while True:
screen.fill((255,255,255))
for event in pygame.event.get():
if event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 3:
print("click 3")
SUSPEND=1
api.breathe=['222222']
if event.button == 1:
print("click 3")
SUSPEND=0
if SUSPEND==1:
api.breathe=['222222']
else:
pass
# screen.blit(circle_init,(500,350))
flag=int(api.breathe[0][5])
# print(flag,type(flag))
if flag==0:
r_last=r
r+=1
if r>MAX_r:
flag=1
elif flag==1:
r_last=r
r-=1
if r<MIN_r:
flag=0
if divider==0:
# small_ball=[]
# color=[]
# for i in range(np.random.randint(5,10)):
# small_ball.append(BALL(np.random.randint))
# b=BALL(10,0,0)
# print(b.Stride_x,b.Stride_y,b.distance)
for i in range(20):
color.append(api.get_color(api))
temp=BALL(np.random.randint(6,9),300,300)
small_ball.append(temp)
color_2.append(api.get_color(api))
temp_2=BALL(np.random.randint(6,9),900,400)
small_ball_2.append(temp_2)
for i in range(len(small_ball)):
if small_ball[i].if_move!=0:
small_ball[i].move(r)
pygame.draw.circle(screen,color[i], (small_ball[i].x , small_ball[i].y), small_ball[i].r)
# print(b.distance)
if small_ball_2[i].if_move!=0:
small_ball_2[i].move_2(r)
pygame.draw.circle(screen,color_2[i], (small_ball_2[i].x , small_ball_2[i].y), small_ball_2[i].r)
pygame.draw.circle(screen, (155,234,199), (600,350), r)
# pygame.draw.circle(screen, (160,102,211), (600,350), r+20,6)
if divider_text<100:
text_big1 = myfront.render("放松", 1, (160,102,211))
screen.blit(text_big1,(600+r+40,350))
elif 250<divider_text<300:
text_big1 = myfront.render("很好,就这样子", 1, (160,102,211))
screen.blit(text_big1,(600+r+40,350))
else:
if r_last<r:
text_big1 = myfront.render("请缓慢吸气", 1, (160,102,211))
screen.blit(text_big1,(600+r+40,350))
elif r_last>r:
text_big1 = myfront.render("请缓慢呼气", 1, (160,102,211))
screen.blit(text_big1,(600+r+40,350))
#分频
if divider<1:
divider+=0.01
else:
divider=0
if divider_text<300:
divider_text+=1
else:
divider_text=0
time.sleep(0.03)
pygame.display.update()
| [
"2018011214021@std.uestc.edu.cn"
] | 2018011214021@std.uestc.edu.cn |
99c95d96c731ede7c56af9bf5aadcf810551ef36 | ae325d6c0832e7d012d433ff525d3792aee89a7c | /dagger_contrib/serializer/pandas/dataframe/__init__.py | d4a643aed1f766861a0dffe3d3b1aa32c33e4caa | [
"Apache-2.0"
] | permissive | larribas/dagger-contrib | 3bf164cf64b00221be99a69fb2dd0c7ac8998079 | 1833614c82241a404b8e54c74052c5067b0ca104 | refs/heads/main | 2023-08-19T06:05:26.468330 | 2021-10-23T11:41:28 | 2021-10-23T11:41:28 | 410,013,199 | 1 | 1 | Apache-2.0 | 2021-09-29T07:24:36 | 2021-09-24T15:26:05 | Python | UTF-8 | Python | false | false | 285 | py | """Collection of serializers for Pandas DataFrames (https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.html)."""
from dagger_contrib.serializer.pandas.dataframe.as_csv import AsCSV # noqa
from dagger_contrib.serializer.pandas.dataframe.as_parquet import AsParquet # noqa
| [
"noreply@github.com"
] | larribas.noreply@github.com |
16ee84d5d1b6441baaf6dbf58d95f65b16fd49cb | e1b3816615cce62ebe2b6c59b0eb3fbd3693d73b | /solutions/167-two-sum-ii-input-array-is-sorted/two-sum-ii-input-array-is-sorted.py | 60d0a04a154052849aad48a3e763a43ca3bebcba | [] | no_license | fagan2888/leetcode-6 | 1fb18979ffacb82d5db77988b38ecd7371b428b9 | 14176f1752e2bb94dec51bd90dfd412896ed84de | refs/heads/master | 2022-01-10T03:27:51.388066 | 2019-06-15T14:13:48 | 2019-06-15T14:13:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,107 | py | # -*- coding:utf-8 -*-
# Given an array of integers that is already sorted in ascending order, find two numbers such that they add up to a specific target number.
#
# The function twoSum should return indices of the two numbers such that they add up to the target, where index1 must be less than index2.
#
# Note:
#
#
# Your returned answers (both index1 and index2) are not zero-based.
# You may assume that each input would have exactly one solution and you may not use the same element twice.
#
#
# Example:
#
#
# Input: numbers = [2,7,11,15], target = 9
# Output: [1,2]
# Explanation: The sum of 2 and 7 is 9. Therefore index1 = 1, index2 = 2.
#
class Solution(object):
def twoSum(self, numbers, target):
"""
:type numbers: List[int]
:type target: int
:rtype: List[int]
"""
if len(numbers) <= 1:
return None
buffer_dict = {}
for i in range(len(numbers)):
if numbers[i] in buffer_dict:
return [buffer_dict[numbers[i]], i+1]
else: buffer_dict[target - numbers[i]] = i+1
| [
"rzhangpku@pku.edu.cn"
] | rzhangpku@pku.edu.cn |
0ff0d3751ba227d4bf70229f31936df8045ee7f1 | 7cdc028dfb1ee2412ee268038d9b44c77b96052c | /blocwatch_v1/bitcoin/list_transaction_outputs_response.py | 0493e33e064153df610eacb97fd20a539e8316bb | [] | no_license | ChainMonitor/blocwatch-python-sdk | 535e2ac08d30b442a51a4c965beee0ab175c92a3 | 6a09a02c40deda3191bb6a674f916a832f21b907 | refs/heads/master | 2020-03-27T16:24:49.165199 | 2018-09-18T16:13:27 | 2018-09-18T16:13:27 | 146,780,835 | 1 | 0 | null | 2018-09-18T16:13:28 | 2018-08-30T17:07:14 | Python | UTF-8 | Python | false | false | 5,063 | py | # coding: utf-8
"""
Blocwatch REST API
The premier API for blockchain analysis # noqa: E501
OpenAPI spec version: v1.0.0
Contact: support@blocwatch.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from blocwatch_v1.bitcoin.bitcoin_output import BitcoinOutput # noqa: F401,E501
from blocwatch_v1.bitcoin.bitcoin_transaction import BitcoinTransaction # noqa: F401,E501
from blocwatch_v1.bitcoin.page import Page # noqa: F401,E501
class ListTransactionOutputsResponse(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'outputs': 'list[BitcoinOutput]',
'page': 'Page',
'transaction': 'BitcoinTransaction'
}
attribute_map = {
'outputs': 'outputs',
'page': 'page',
'transaction': 'transaction'
}
def __init__(self, outputs=None, page=None, transaction=None): # noqa: E501
"""ListTransactionOutputsResponse - a model defined in Swagger""" # noqa: E501
self._outputs = None
self._page = None
self._transaction = None
self.discriminator = None
if outputs is not None:
self.outputs = outputs
if page is not None:
self.page = page
if transaction is not None:
self.transaction = transaction
@property
def outputs(self):
"""Gets the outputs of this ListTransactionOutputsResponse. # noqa: E501
The requested outputs. # noqa: E501
:return: The outputs of this ListTransactionOutputsResponse. # noqa: E501
:rtype: list[BitcoinOutput]
"""
return self._outputs
@outputs.setter
def outputs(self, outputs):
"""Sets the outputs of this ListTransactionOutputsResponse.
The requested outputs. # noqa: E501
:param outputs: The outputs of this ListTransactionOutputsResponse. # noqa: E501
:type: list[BitcoinOutput]
"""
self._outputs = outputs
@property
def page(self):
"""Gets the page of this ListTransactionOutputsResponse. # noqa: E501
:return: The page of this ListTransactionOutputsResponse. # noqa: E501
:rtype: Page
"""
return self._page
@page.setter
def page(self, page):
"""Sets the page of this ListTransactionOutputsResponse.
:param page: The page of this ListTransactionOutputsResponse. # noqa: E501
:type: Page
"""
self._page = page
@property
def transaction(self):
"""Gets the transaction of this ListTransactionOutputsResponse. # noqa: E501
The transaction that contains the listed outputs. # noqa: E501
:return: The transaction of this ListTransactionOutputsResponse. # noqa: E501
:rtype: BitcoinTransaction
"""
return self._transaction
@transaction.setter
def transaction(self, transaction):
"""Sets the transaction of this ListTransactionOutputsResponse.
The transaction that contains the listed outputs. # noqa: E501
:param transaction: The transaction of this ListTransactionOutputsResponse. # noqa: E501
:type: BitcoinTransaction
"""
self._transaction = transaction
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListTransactionOutputsResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"adavis@airheadtech.com"
] | adavis@airheadtech.com |
20ad1684980e1542102953c8e662db1f0a500789 | fccb5989d902bebeb451807ccc9973048c8213b1 | /misc/examples/dijkstra_generate_example.py | 87ca97a0d2a0a3cb87de8c604ff562e876a3e3b5 | [] | no_license | Ginotuch/cs380-heap-structures | 8daa7d4cc8ed3112dcd8617a0a3469ec97f6d266 | 7084ba4e50a4db1235e70e45f8b3811c5e74b1b9 | refs/heads/master | 2023-01-10T18:07:09.456763 | 2020-11-11T22:22:01 | 2020-11-11T22:22:01 | 224,539,942 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,085 | py | """
Normal Graph:
----------
Generates a random fully connected graph to show the larger
average heap sizes. HeapQueue with tracking is on average
slower than without tracking because of dictionary access
time within the HeapQueue.swap method. Running a timing
profile on it shows that every method except HeapQueue.swap
is faster with tracking than without.
Bad Graph:
----------
This generates a specifically crafted graph and runs Dijkstra's
algorithm to traverse it. First using the new HeapQueue developed,
showing the average size of the heap and the amount of time it
takes. Then again with the standard library heapq implementation
which has a much larger heap size and takes much longer.
Example output with 10^6 nodes on an Intel i5-8250U (8 cores) @ 3.4GHz:
> Using the new HeapQueue
> Average size of heap: 1.999998
> Time taken: 6.92s
>
> Using standard library heapq
> Average size of heap: 500000.499999
> Time taken: 15.89s
"""
import time
from misc.debug_versions.dijkstra import Dijkstra
from misc.debug_versions.dijkstra_heapqueue_no_tracking import Dijkstra as Dijkstra_no_tracking
from misc.debug_versions.dijkstra_heapq import Dijkstra as Dijkstra_heapq
from dijkstra.generate_graph import RandomConnectedGraph
def run_each_heap(graph):
dijkstras = {
"HeapQueue": Dijkstra,
"HeapQueue no tracking": Dijkstra_no_tracking,
"Heapq": Dijkstra_heapq
}
for name, d_object in dijkstras.items():
print("Using", name)
d = d_object(adj_list=graph.adj_list)
t = time.time()
d.start()
print(" Average size of heap:", sum(d.sizes) / len(d.sizes))
print(" Largest size of heap:", max(d.sizes))
print(" Time taken:", str(round(time.time() - t, 2)) + "s\n")
def main_normal():
print("Normal graph run")
graph = RandomConnectedGraph(10 ** 5)
graph.gen_graph()
run_each_heap(graph)
def main_bad():
print("Bad graph run")
graph = RandomConnectedGraph(10 ** 5)
graph.get_bad_graph()
run_each_heap(graph)
main_normal()
main_bad()
| [
"6973573+Ginotuch@users.noreply.github.com"
] | 6973573+Ginotuch@users.noreply.github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.