blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
441621bfb233fecc2a9edaefe943e7ff39ecabd8 | 6ff58789c5ee87a4df2601f02e17df2c2cd4c1fb | /sudoku/sudoku.py | 9bb2c42c516457a729a257c1d9b859c2b33a8fda | [] | no_license | Checkmate50/6110-Assignments | 798d811d11bf7992e0c69ccdbcbe4fcb4c267163 | 7535cda5593a0fad861459af398b802d50df6d20 | refs/heads/master | 2021-01-16T19:32:33.434332 | 2015-05-05T11:07:08 | 2015-05-05T11:07:08 | 30,889,242 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,133 | py | """
Sudoku Problem Solver
Created by Dietrich Geisler
Python version 2.7
3/4/2015
"""
from z3 import *
import sys
import string
import math
def fileParser(fileName):
"""
Takes a fileName which links to a text file containing sudoku data
Note that there are no restrictions on file extension
Returns a 2-dimensional array containing all the known data for this sudoku puzzle
"""
f = open(fileName, 'r')
length = len(string.split(f.readline()))
if int(math.sqrt(length) + 0.5)**2 != length:
print("Sudoku grid edges must be a perfect square")
exit(0)
f.seek(0)
data = [[0]*length for i in range(length)] #Get NxN array, where N is the size of the sudoku puzzle
j = 0
for line in f:
i = 0
splitResults = string.split(line)
for char in splitResults:
if char != '.':
data[j][i] = int(char)
i += 1
j += 1
f.close()
return data, length
toParse = ""
if len(sys.argv) == 1:
toParse = raw_input("Please provide the name of a sudoku puzzle file: ")
else:
toParse = sys.argv[1]
data, length = fileParser(toParse)
s = Solver()
grid = [ [ Int("grid_%s_%s" % (i+1, j+1)) for j in range(length) ] for i in range(length) ]
for i in range(length):
for j in range(length):
if data[j][i] != 0:
s.add(grid[j][i] == data[j][i])
s.add(grid[j][i] <= length, grid[j][i] > 0)
for i in range(length):
for j in range(length):
for k in range(j + 1, length):
s.add(grid[j][i] != grid[k][i]) #Columns
s.add(grid[i][j] != grid[i][k]) #Rows
pass
sqrtl = int(math.sqrt(length))
for i in range(0, length, sqrtl):
for j in range(0, length, sqrtl):
for row1 in range(sqrtl):
for col1 in range(sqrtl):
for row2 in range(sqrtl):
for col2 in range(sqrtl):
if row1 != row1 or col1 != col2:
s.add(grid[col1 + i][row1 + j] != grid[col2 + i][row2 + j]) #Inner Squares
if str(s.check()) != "sat":
print "Sudoku puzzle is not satisfiable (solvable)"
exit(0)
modelValues = string.split(str(s.model()), "\n")
for value in modelValues:
data[int(value[6]) - 1][int(value[8]) - 1] = value[12]
print("")
for line in data:
for item in line:
print(str(item)),
print("") | [
"dgeisler50@gmail.com"
] | dgeisler50@gmail.com |
8afa0cd7031f562df73ad11688bdbae10d0f9bd4 | ad1233abfed0374329410eac8509631eed63dec6 | /PythagoreanTriangle.py | 9b53af267d36443e9546a1848a89a56f36ade45a | [] | no_license | yaayitsamber/Udacity-Python-Projects | 2d87b43d3843cdf02d4247cc25ba8f5908190a6a | eb6fc738c74b5b024b38c02374071fca87b49e73 | refs/heads/master | 2021-01-10T01:11:41.495792 | 2016-02-18T20:30:01 | 2016-02-18T20:30:01 | 50,958,205 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 368 | py | side_1 = raw_input("Enter triangle side 1")
side_2 = raw_input("Enter triangle side 2")
side_3 = raw_input("Enter triangle side 3")
hypotenuse = max(side_1,side_2,side_3)
a = min(side_1, side_2, side_3)
b = (int(hypotenuse) - int(a))
print int(hypotenuse)**2
print int(a)**2
print int(b)**2
if int(hypotenuse)/2 == 0 :
print ("NOPE")
else:
print ("Right on")
| [
"amberhilton06@gmail.com"
] | amberhilton06@gmail.com |
1f548af771d66714e4a97bb5c2e7fad0b12b860a | 69af3274563b3c488fec84903dd37bfb6546aefa | /AmbidexBot/Status.py | 1daade4977dd5a63348a805e9aa45b464b5d431e | [] | no_license | joaomlsantos/AmbidexBot | 5bd23a95be21bac41fc4c53a79240ce70515812a | 1f428e39b9ede17d01b1ff6d4585575b716b64ad | refs/heads/master | 2022-05-01T18:25:46.436282 | 2018-10-08T16:34:46 | 2018-10-08T16:34:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 75 | py | from enum import Enum
class Status(Enum):
DEAD = 0
ALIVE = 1
| [
"joao.l.santos@tecnico.ulisboa.pt"
] | joao.l.santos@tecnico.ulisboa.pt |
fe8974fa7e751cfea487290d10694d7ad661d211 | 491f29501fa7d484a5860f64aef3fa89fb18ca3d | /examples/mechanics/GeometricPrimitives/disk_on_box.py | 275e8e9cb3d623f8b232906ba95792f7316f040e | [
"Apache-2.0"
] | permissive | siconos/siconos-tutorials | e7e6ffbaaea49add49eddd317c46760393e3ef9a | 0472c74e27090c76361d0b59283625ea88f80f4b | refs/heads/master | 2023-06-10T16:43:13.060120 | 2023-06-01T07:21:25 | 2023-06-01T07:21:25 | 152,255,663 | 7 | 2 | Apache-2.0 | 2021-04-08T12:00:39 | 2018-10-09T13:26:39 | Jupyter Notebook | UTF-8 | Python | false | false | 4,596 | py | #!/usr/bin/env python
#
# Example of one object under gravity with one contactor and a ground
# using the Siconos proposed mechanics API
#
from siconos.mechanics.collision.tools import Contactor
from siconos.io.mechanics_run import MechanicsHdf5Runner, MechanicsHdf5Runner_run_options
from siconos.mechanics.collision.bullet import SiconosBulletOptions, SICONOS_BULLET_2D
import siconos.numerics as sn
import siconos.kernel as sk
import math
restart=False
if not restart:
# Creation of the hdf5 file for input/output
with MechanicsHdf5Runner() as io:
# Definition of a sphere
io.add_primitive_shape('Disk', 'Disk', (2,),
insideMargin=0.0, outsideMargin=0.0)
# Definition of the ground shape
io.add_primitive_shape('Ground', 'Box2d', (20, 1),
insideMargin=0.0, outsideMargin=0.0)
# Definition of a non smooth law. As no group ids are specified it
# is between contactors of group id 0.
io.add_Newton_impact_friction_nsl('contact', mu=0.1, e=0.5)
# The sphere object made with an unique Contactor : the sphere shape.
# As a mass is given, it is a dynamic system involved in contact
# detection and in the simulation. With no group id specified the
# Contactor belongs to group 0
io.add_object('disk', [Contactor('Disk')],
translation=[-1, 2.],
orientation = [math.pi/4.0],
velocity=[0, 0, 0.0],
mass=1., inertia =2.0)
# io.add_object('disk2', [Contactor('Disk')],
# translation=[0, 6.],
# velocity=[0, 0, -10.0],
# mass=1., inertia =2.0)
io.add_object('disk2', [Contactor('Disk')],
translation=[4*math.sqrt(2)/2., 2+4*math.sqrt(2)/2.],
orientation = [math.pi/4.0],
velocity=[0, 0, 0.0],
mass=1., inertia =2.0)
io.add_object('disk3', [Contactor('Disk')],
translation=[4*math.sqrt(2), 2.],
orientation = [math.pi/4.0],
velocity=[0, 0, -1.0],
mass=1., inertia =2.0)
# the ground object made with the ground shape. As the mass is
# not given, it is a static object only involved in contact
# detection.
io.add_object('ground', [Contactor('Ground')],
translation=[0, -.5])
# Run the simulation from the inputs previously defined and add
# results to the hdf5 file. The visualisation of the output may be done
# with the vview command.
bullet_options = SiconosBulletOptions()
bullet_options.worldScale = 1.0
bullet_options.contactBreakingThreshold = 0.04
bullet_options.dimension = SICONOS_BULLET_2D
bullet_options.perturbationIterations = 0
bullet_options.minimumPointsPerturbationThreshold = 0
options = sk.solver_options_create(sn.SICONOS_FRICTION_2D_NSGS)
options.iparam[sn.SICONOS_IPARAM_MAX_ITER] = 100000
options.dparam[sn.SICONOS_DPARAM_TOL] = 1e-8
T=2.0
if restart:
T=2.0
#T=1*0.001
hstep=0.01
run_options=MechanicsHdf5Runner_run_options()
run_options['t0']=0
run_options['T']=T
run_options['h']=hstep
run_options['bullet_options']=bullet_options
run_options['solver_options']=options
run_options['constraint_activation_threshold']=1e-05
run_options['Newton_options']=sk.SICONOS_TS_LINEAR
run_options['osns_assembly_type']= sk.GLOBAL_REDUCED
run_options['osi']= sk.MoreauJeanGOSI
run_options['verbose']=True
run_options['with_timer']=True
run_options['explode_Newton_solve']=True
run_options['explode_computeOneStep']=True
#run_options['output_frequency']=output_frequency
with MechanicsHdf5Runner(mode='r+') as io:
# By default earth gravity is applied and the units are those
# of the International System of Units.
# io.run(verbose=True,
# with_timer=False,
# bullet_options=bullet_options,
# face_class=None,
# edge_class=None,
# t0=0,
# T=T,
# h=0.001,
# theta=0.50001,
# Newton_max_iter=1,
# set_external_forces=None,
# solver_options=options,
# numerics_verbose=True,
# output_frequency=None,
# Newton_options= sk.SICONOS_TS_LINEAR_IMPLICIT,
# constraint_activation_threshold=1e-5,
# osi=sk.MoreauJeanGOSI,
# osns_assembly_type= sk.GLOBAL_REDUCED
# )
io.run(run_options)
| [
"vincent.acary@inria.fr"
] | vincent.acary@inria.fr |
128763e94d58774059e8218f401b3f0fd84cad73 | bc9ebb347af6804c1bce6e960148ece1fbb34a47 | /1_python/bronze/10870.py | d22572f825e791fd8b0ffbb53df9d5bdaad4045a | [] | no_license | mooncs/BOJ | 48416fec1a059197a72de61c8d6e72f7fc8b542b | 45d2d5a8a6bf0f10d026f3846b70009914aa90d3 | refs/heads/main | 2023-08-13T15:30:47.395359 | 2021-10-11T13:37:04 | 2021-10-11T13:37:04 | 392,885,432 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 277 | py | # 피보나치 수 5
def fibo(x):
if x <= 1:
return x
return fibo(x-2) + fibo(x-1)
n = int(input())
print(fibo(n))
# # for문
# def fibo(x):
# a, b = 0, 1
# for _ in range(x):
# a, b = b, a+b
# return(a)
# n = int(input())
# print(fibo(n))
| [
"mooncs528@gmail.com"
] | mooncs528@gmail.com |
40aa8ad79278c7537cdc7550405b8ad12b72d6e7 | d554b1aa8b70fddf81da8988b4aaa43788fede88 | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/222/users/4081/codes/1643_1055.py | 5bb3ece083af6c50c3772003c55246d4aea20a12 | [] | no_license | JosephLevinthal/Research-projects | a3bc3ca3b09faad16f5cce5949a2279cf14742ba | 60d5fd6eb864a5181f4321e7a992812f3c2139f9 | refs/heads/master | 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 384 | py | # Teste seu código aos poucos.
# Não teste tudo no final, pois fica mais difícil de identificar erros.
# Use as mensagens de erro para corrigir seu código.
from math import*
v=float(input("velocidade inicial:"))
a=radians(float(input("angulos de tiro:")))
d=float(input("valor da distancia: "))
r=((v**2)*sin(2*a))/9.8
p=d-r
if(abs(p)<0.1):
print("sim")
else:
print("nao") | [
"jvlo@icomp.ufam.edu.br"
] | jvlo@icomp.ufam.edu.br |
a2f97a1b75ca67fd8ec9d7911b476c4bba4758f8 | c9829d3a9b787107a30ff43f929c48ccfeb38e5d | /P13_ContentAggregator/P13_Sumit/ScapeWeb/college.py | da28b98abb831150c8da0182681b10235716fa1e | [] | no_license | githarshp/GDTC_Hack-In | 3fc954943780ce9ccf926d6e5f10c89743b8cc00 | 1682b718f841371a3254d24151fbe7f014251478 | refs/heads/master | 2020-06-16T15:02:48.501256 | 2019-07-07T06:47:54 | 2019-07-07T06:47:54 | 195,095,352 | 1 | 0 | null | 2019-07-03T16:59:19 | 2019-07-03T16:59:18 | null | UTF-8 | Python | false | false | 838 | py | import requests
from bs4 import BeautifulSoup as bs
import sqlite3 as sq
def scapecollege(db):
url = "https://engineering.careers360.com/colleges/ranking"
r = requests.get(url).content
content = bs(r, "html.parser")
con = db.connectcollege()
if con is not None:
for i in reversed(content.find_all('td',class_="colgName")):
try:
title = i.text.strip()
url = i.a['href'].strip()
cursor = con.cursor()
cursor.execute("Insert into college(title, url) values(?,?)",(title, url))
except sq.IntegrityError as e:
pass
except Exception as e:
print("Error : ", e)
con.commit()
con.close()
return True
else:
return False | [
"noreply@github.com"
] | githarshp.noreply@github.com |
fdaf1fd7ce7eed92c8c1be4aced7eb9dd72466c1 | 0fb521f16a5933b07d4cd559d694607e4949b5c0 | /articles_app/migrations/0019_auto_20201105_0051.py | 4b5eda3643d0d53c239418cac4effc6a2b001d11 | [] | no_license | StanMey/Robotreporter | 9566c4391d503c88d673fad76243229451757184 | 5e62f96e541118ae924303b730f18d248022cac0 | refs/heads/main | 2023-03-01T10:59:00.972938 | 2021-01-22T15:50:04 | 2021-01-22T15:50:04 | 301,501,404 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 433 | py | # Generated by Django 3.0.3 on 2020-11-05 00:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('articles_app', '0018_articles_image_file'),
]
operations = [
migrations.AlterField(
model_name='articles',
name='image_file',
field=models.ImageField(null=True, upload_to='images/', verbose_name=''),
),
]
| [
"stan_1998@hotmail.com"
] | stan_1998@hotmail.com |
9562b7a6fd47559620ca4485e797a0115bcde11b | dbf11c1e3ee08b38d7406a3e353421bb1e969786 | /oppari_1.5/indicators.py | 3ecef8bd96b9f73885b7b7263867abb93cc70d80 | [] | no_license | Jounikononen/Projektit | 364771ca08fe5b6c4d4aa6dd7bbc0f18a74a6a67 | b4210162ff2f49368c100ad9abc14592fe0b123c | refs/heads/master | 2021-11-07T01:02:09.384089 | 2021-11-06T08:16:03 | 2021-11-06T08:16:03 | 181,548,508 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 414 | py | #Indikaattorit (SMA)
#SMA50 ja SMA200 (Simple moving average)
def sma(lista):
#SMA200 kaava
sma200 = round(sum(lista) / len(lista),6)
#Listan 50 tuoreinta arvoa (SMA50 varten))
uusiLista = lista[0:-150]
#SMA50 kaava
sma50 = round(sum(uusiLista) / len(uusiLista),6)
#Palautetaan pääohjelmalle SMA200 ja SMA50 arvot
return sma200, sma50
| [
"noreply@github.com"
] | Jounikononen.noreply@github.com |
17d64d2adefc2f0e0933db8af78479fafc29000f | 9a30042300a9f027bdf87ae56af2b0bd36bd3243 | /articles/migrations/0003_article_author.py | bb072056a86f0cff6a5e50c9f5318c5e3b6a8608 | [] | no_license | tanvidhope/Blog | b64470137d07d885555144daa3db5a5045d704c4 | 675168552055836ac62fe544c6e58997c6327d0b | refs/heads/master | 2023-01-20T01:58:13.366400 | 2020-12-03T08:38:48 | 2020-12-03T08:38:48 | 318,098,408 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 588 | py | # Generated by Django 2.2.1 on 2019-05-26 03:42
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('articles', '0002_article_thumb'),
]
operations = [
migrations.AddField(
model_name='article',
name='author',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| [
"tanvidhope@gmail.com"
] | tanvidhope@gmail.com |
fdb67800b59f72f0719f84fc5f29ecac8b6c4259 | bab5956b12f0a2836c81d01bb946d8a72f6d0e66 | /swea/d_3/0304_4751.py | 18375436c0d96a7f8eb5b494881530461066e971 | [] | no_license | haesungbang/Algorithm | 09513fbb483793a07cfe4db5b279685ce14b324b | b72d56abd7c61634f7905c81e115c35fc75c3604 | refs/heads/master | 2023-06-24T16:12:20.789595 | 2021-07-11T11:26:28 | 2021-07-11T11:26:28 | 330,677,541 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 456 | py | # 4751. 다솔이의 다이아몬드 장식
T = int(input())
for t in range(1, T+1):
deco = input()
arr =['','','','','']
i = 0
for j in range(0, 4 * len(deco), 4):
arr[0] += '..#.'
arr[1] += '.#.#'
arr[2] += '#.%s.'%deco[i]
i += 1
arr[3] += '.#.#'
arr[4] += '..#.'
arr[0] += '.'
arr[1] += '.'
arr[2] += '#'
arr[3] += '.'
arr[4] += '.'
for i in arr:
print(i) | [
"haesunbang94@gmail.com"
] | haesunbang94@gmail.com |
c3376b68351381708517c87a7837279eff42a4b7 | 39338e8f068d15504a289609610c61ea60d2d2d5 | /vietnamese/local/get_dataset_info.py | f00dd881b595da5c8ac55612d7ccede0961bdc5c | [] | no_license | minhduc0711/kaldi-exps | a9fdbebf8ad3b6b793407556233123bbb20e591f | 7510591a4866fd1f796e0f79516a6fcb17d504bc | refs/heads/master | 2022-11-12T14:03:52.474011 | 2020-07-13T02:41:51 | 2020-07-13T02:41:51 | 268,758,846 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 615 | py | import argparse as ap
import wave
import contextlib
from pathlib import Path
def get_duration(fname):
with contextlib.closing(wave.open(fname, 'r')) as f:
frames = f.getnframes()
rate = f.getframerate()
duration = frames / float(rate)
return duration
parser = ap.ArgumentParser()
parser.add_argument("data_dir", type=str)
args = parser.parse_args()
data_dir = Path(args.data_dir)
s = 0
cnt = 0
for fname in data_dir.glob("**/*.wav"):
s += get_duration(str(fname))
cnt += 1
hours = s / 3600
print(f"Number of utterances: {cnt}")
print(f"Total audio hours: {hours:.2f}h")
| [
"phamminhduc0711@gmail.com"
] | phamminhduc0711@gmail.com |
6c2b98a894099f068a128d68de56fc0ff0dcdde7 | 2b11e7aa28b84af2e2a7fd8719af89f5fffd8a5b | /tests/test_models/test_user.py | 4b734786dc7c17a6ae7e51cd396963dfe334a4dd | [] | no_license | nikolasribeiro/AirBnB_clone | 6a3e3d65314a0131252461757943468628394ced | 4529c56a706f0d956a238522d912cf6260f2fa28 | refs/heads/main | 2023-03-10T22:10:10.665939 | 2021-02-27T19:33:11 | 2021-02-27T19:33:11 | 338,063,410 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,112 | py | #!/usr/bin/python3
""" Module tests/test_models/test_user"""
import models
from models.base_model import BaseModel
import os
import unittest
class TestBase_Model(unittest.TestCase):
""" class TestBase_Model """
def test_docstring(self):
""" function test_docstring """
msj = "Módulo does not has docstring"
self.assertIsNotNone(models.base_model.__doc__, msj)
msj = "Clase does not has docstring"
self.assertIsNotNone(BaseModel.__doc__, msj)
def test_executable_file(self):
""" function test_executable_file """
is_read_true = os.access("models/base_model.py", os.R_OK)
self.assertTrue(is_read_true)
is_write_true = os.access("models/base_model.py", os.W_OK)
self.assertTrue(is_write_true)
is_exec_true = os.access("models/base_model.py", os.X_OK)
self.assertTrue(is_exec_true)
def test_is_an_instance(self):
""" function test_is_an_instance """
my_model = BaseModel()
self.assertIsInstance(my_model, BaseModel)
def test_id(self):
""" function test_id """
my_model = BaseModel()
my_model1 = BaseModel()
self.assertNotEqual(my_model.id, my_model1.id)
def test_save(self):
""" function test_save """
my_model2 = BaseModel()
first_updated = my_model2.updated_at
my_model2.save()
second_updated = my_model2.updated_at
self.assertNotEqual(first_updated, second_updated)
def test_to_dict(self):
""" function test_to_dict """
my_model3 = BaseModel()
my_dict_model3 = my_model3.to_dict()
self.assertIsInstance(my_dict_model3, dict)
for key, value in my_dict_model3.items():
flag = 0
if my_dict_model3["__class__"] == "BaseModel":
flag += 1
self.assertTrue(flag == 1)
for key, value in my_dict_model3.items():
if key == "created_at":
self.assertIsInstance(value, str)
if key == "updated_at":
self.assertIsInstance(value, str)
| [
"nikolasribeiro2@outlook.com"
] | nikolasribeiro2@outlook.com |
44a4ae446de801e016da6b7bceabff7792398c22 | 136f086eef766450ccc0168bc3dd51cefeea6efc | /ansible/stats.py | 95fc78022ebeb1247147e0b3c5ca72460cc02fb5 | [] | no_license | status-im/infra-utils | da613493be8d314f129740105c4f546dbfc4dd5c | dc620816c11f7d12fa7c988915820bf346fa76ca | refs/heads/master | 2023-07-08T13:38:54.971966 | 2023-06-26T19:51:04 | 2023-06-26T19:51:04 | 141,190,565 | 7 | 7 | null | 2023-08-23T13:42:36 | 2018-07-16T20:25:04 | Python | UTF-8 | Python | false | false | 2,192 | py | #!/usr/bin/env python
import sys
import json
from requests import get
from optparse import OptionParser
HELP_DESCRIPTION = '''
Simple script to output counts of hosts in all fleets and all DCs.
'''.strip()
HELP_EXAMPLE = '''
Example: ./stats.py -u http://consul.example.org:8400
'''
def parse_opts():
parser = OptionParser(description=HELP_DESCRIPTION, epilog=HELP_EXAMPLE)
parser.add_option('-u', '--consul-url', default='http://localhost:8500',
help='Name of virtual network interface.')
return parser.parse_args()
def main():
(opts, args) = parse_opts()
nodes = []
envs = {}
fleets = {}
dcs = {}
data_centers = get('%s/v1/catalog/datacenters' % opts.consul_url)
for dc in data_centers.json():
node = get('%s/v1/catalog/nodes?dc=%s' % (opts.consul_url,dc))
nodes.extend(node.json())
for node in nodes:
meta = node['Meta']
fleet_name = '%s.%s' % (meta['env'], meta['stage'])
# Fix for inconsistency in fleet naming
if meta['stage'] in ['hq', 'misc', 'office', 'bi', 'ci']:
env = envs.setdefault(meta['stage'], [])
else:
env = envs.setdefault(meta['env'], [])
dc = dcs.setdefault(node['Datacenter'], [])
fleet = fleets.setdefault(fleet_name, [])
dc.append(node)
fleet.append(node)
env.append(node)
out = {
'total': len(nodes),
'dcs': {
dc: {
"count": len(hosts),
"vcpus": sum(int(h['Meta'].get('hw_vcpu_count', 0)) for h in hosts),
"memory": sum(int(h['Meta'].get('hw_memory_mb', 0)) for h in hosts),
} for dc, hosts in dcs.items()
},
'envs': {
env: {
"count": len(hosts),
"vcpus": sum(int(h['Meta'].get('hw_vcpu_count', 0)) for h in hosts),
"memory": sum(int(h['Meta'].get('hw_memory_mb', 0)) for h in hosts),
} for env, hosts in envs.items()
},
#'fleets': {fleet: len(hosts) for fleet, hosts in fleets.items()},
}
print(json.dumps(out, indent=2))
if __name__ == '__main__':
main()
| [
"jakub@status.im"
] | jakub@status.im |
6b6b8cd7afdddc7025642855760432169b644d42 | d969dd50a978a303e3b0db5343295c855ec3928f | /models.py | 6cd236ebe56d2a360e9d7268da5e1e1c169c535e | [] | no_license | dennypradipta/travelbasic | 65d199858e6faa76395c4c914b7701b208db1536 | 3ff87e805508c35e665b7216ce561c3f601fb9e7 | refs/heads/master | 2020-03-30T09:48:06.525738 | 2018-10-01T13:27:03 | 2018-10-01T13:27:03 | 151,093,014 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 809 | py | import datetime
from mongoengine import *
connect('travelbasic')
class Passports(Document):
passport_num = StringField(required=True)
name = StringField(required=True)
issue = StringField(required=True)
issue_n = StringField(required=True)
nation = StringField(required=True)
nation_n = StringField(required=True)
birthdate = DateTimeField(required=True)
sex = StringField(required=True)
expirydate = DateTimeField(required=True)
image = StringField(required=True)
image_path = StringField(required=True)
created_at = DateTimeField(required=True, default=datetime.datetime.now())
modified_at = DateTimeField(required=True, default=datetime.datetime.now())
class Users(Document):
username = StringField(required=True)
password = StringField(required=True)
role = StringField(required=True)
| [
"denny.aditya.p@gmail.com"
] | denny.aditya.p@gmail.com |
0019911ca697decb0b65cbb82321771d64b09e04 | ef824a1954a8fd316f29dbb112f550936dc38b5f | /tools/remove_separators.py | e5c3fc869c7eaa7770a6a80c007eaeedfcc4126f | [] | no_license | sullerandras/terraria-hd-textures | de12d7ae206f1b3fbe0883d400866440507f9081 | 7c1c7fdcd444903ece2f91025ff2bcfdd04ffd09 | refs/heads/master | 2023-02-04T16:20:57.833496 | 2023-01-29T09:02:55 | 2023-01-29T09:02:55 | 59,391,213 | 46 | 13 | null | 2022-11-18T05:44:45 | 2016-05-22T02:31:42 | Python | UTF-8 | Python | false | false | 25,523 | py | from __future__ import print_function
import array
import glob
import ntpath
import pickle
import png
import re
import sys
import traceback
from Array3D import Array3D
TRANSPARENT = (0, 0, 0, 0)
def error(*objs):
print("ERROR: ", *objs, file=sys.stderr)
SEPARATORS = [
array.array('B', [0x60, 0x33, 0x57, 0xff]),
array.array('B', [0x6c, 0x4c, 0x6c, 0xff]),
array.array('B', [0x78, 0x37, 0x79, 0xff]),
array.array('B', [0x7e, 0x50, 0x7e, 0xff]),
array.array('B', [0x7f, 0x4b, 0x80, 0xff]),
array.array('B', [0x85, 0x57, 0x86, 0xff]),
array.array('B', [0x8b, 0x77, 0xf9, 0xff]),
array.array('B', [0x8f, 0x50, 0x90, 0xff]),
array.array('B', [0x92, 0x5c, 0x93, 0xff]),
array.array('B', [0x9a, 0x46, 0x9c, 0xff]),
array.array('B', [0xa0, 0x61, 0xa1, 0xff]),
array.array('B', [0xa7, 0x6d, 0xa8, 0xff]),
array.array('B', [0xae, 0x4f, 0xb0, 0xff]),
array.array('B', [0xaf, 0x73, 0xb0, 0xff]),
array.array('B', [0xb6, 0x6f, 0xb8, 0xff]),
array.array('B', [0xb9, 0x46, 0xb9, 0xff]),
array.array('B', [0xbd, 0x6a, 0xa2, 0xff]),
array.array('B', [0xc4, 0x9e, 0xe0, 0xff]),
array.array('B', [0xd5, 0xa8, 0xc2, 0xff]),
array.array('B', [0xd9, 0xc2, 0x98, 0xff]),
array.array('B', [0xdf, 0x77, 0xf9, 0xff]),
array.array('B', [0xe1, 0x55, 0xe5, 0xff]),
array.array('B', [0xe2, 0x8a, 0xee, 0xff]),
array.array('B', [0xe3, 0x36, 0xe8, 0xff]),
array.array('B', [0xe5, 0x8b, 0xe6, 0xff]),
array.array('B', [0xe8, 0xd5, 0x36, 0xff]),
array.array('B', [0xed, 0x77, 0xf9, 0xff]),
array.array('B', [0xf6, 0x77, 0xf9, 0xff]),
array.array('B', [0xf7, 0x73, 0xff, 0xff]),
array.array('B', [0xf7, 0x77, 0xf9, 0xff]),
array.array('B', [0xf8, 0x77, 0xf9, 0xff]),
array.array('B', [0xf8, 0x80, 0xf9, 0xff]),
array.array('B', [0xf9, 0x77, 0x8f, 0xff]),
array.array('B', [0xf9, 0x77, 0xc3, 0xff]),
array.array('B', [0xf9, 0xc5, 0xfa, 0xff]),
array.array('B', [0xfa, 0x6b, 0xfc, 0xff]),
array.array('B', [0xfd, 0x71, 0xff, 0xff]),
array.array('B', [0xff, 0x00, 0xd4, 0xff]),
array.array('B', [0xff, 0x71, 0xdb, 0xff]),
array.array('B', [137, 65, 138, 210]),
array.array('B', [167, 80, 168, 210]),
array.array('B', [185, 102, 200, 204]),
array.array('B', [196, 94, 198, 247]),
array.array('B', [199, 96, 201, 206]),
array.array('B', [203, 98, 205, 210]),
array.array('B', [21, 10, 22, 76]),
]
def SEP(grid_size):
return lambda x: x % grid_size == (grid_size - 1)
SEP9 = lambda x: x % 9 == 8
CLASS_SEPARATORS = {
'glowsnail-27x378': {
'column': [8, 17, 26],
'row': lambda x: x % 9 == 8
},
'jellyfishbowl-18x180': {
'column': [8, 17],
'row': lambda x: x % 9 == 8
},
'liquid-153x8': {
'column': lambda x: x % 9 == 8,
'row': []
},
'shroomtops-93x22': {
'column': lambda x: x % 31 == 30,
'row': [21]
},
'glow-144x198': SEP(9),
'glow-72x252': {
'column': SEP(9),
'row': lambda x: x % 28 == 27 or x % 28 == 17 or x % 28 == 8
},
'glow-65x108': {
'column': [],
'row': SEP(9),
},
'tilecracks-54x36': SEP(9),
'tiles-144x135': SEP(9),
'tiles-144x198': SEP(9),
'tiles-gross-144x198': SEP(9),
'tilesbeach-144x198': SEP(9),
'tiles-26x991': SEP(9),
'tiles-35x604': SEP(9),
'tiles-35x991': SEP(9),
'tiles-883x35': SEP(9),
'tiles-26x35': SEP(9),
'tiles-109x8': SEP(9),
'tiles-550x44': SEP(9),
'tiles-918x27': SEP(9),
'tiles-27x54': SEP(9),
'tiles-117x45': SEP(9),
'tiles-18x198': SEP(9),
'tiles-35x26': SEP(9),
'tiles-162x36': SEP(9),
'tiles-72x18': SEP(9),
'tiles-36x18': SEP(9),
'tiles-17x45': {
'column': [],
'row': [],
},
'tiles-36x684': SEP(9),
'tiles-27x18': SEP(9),
'tiles-54x36': SEP(9),
'tiles-270x54': SEP(9),
'tiles-35x17': SEP(9),
'tiles-63x108': SEP(9),
'tiles-954x36': SEP(9),
'tiles-945x18': SEP(9),
'tiles-945x36': SEP(9),
'tiles-243x293': SEP(9),
'tiles-198x252': SEP(9),
'tiles-108x243': SEP(9),
'tiles-53x26': SEP(9),
'tiles-162x162': SEP(9),
'tiles-27x90': SEP(9),
'tiles-27x36': SEP(9),
'tiles-27x270': SEP(9),
'tiles-27x108': SEP(9),
'tiles-27x81': SEP(9),
'tiles-27x189': SEP(9),
'tiles-243x36': SEP(9),
'tiles-54x18': SEP(9),
'tiles-17x72': SEP(9),
'tiles-207x9': SEP(9),
'tiles-972x54': SEP(9),
'tiles-36x243': SEP(9),
'tiles-107x971': SEP(9),
'tiles-27x162': SEP(9),
'tiles-54x108': SEP(9),
'tiles-126x27': SEP(9),
'tiles-26x400': SEP(9),
'tiles-27x216': SEP(9),
'tiles-90x108': SEP(9),
'tiles-162x45': SEP(9),
'tiles-117x90': SEP(9),
'tiles-54x594': SEP(9),
'tiles-54x486': SEP(9),
'tiles-54x648': SEP(9),
'tiles-54x513': SEP(9),
'tiles-54x612': SEP(9),
'tiles-18x306': SEP(9),
'tiles-27x378': SEP(9),
'tiles-18x288': SEP(9),
'tiles-18x9': SEP(9),
'tiles-27x432': SEP(9),
'tiles-27x360': SEP(9),
'tiles-27x135': SEP(9),
'tiles-27x324': SEP(9),
'tiles-54x432': SEP(9),
'tiles-35x35': SEP(9),
'tiles-27x342': SEP(9),
'tiles-72x72': SEP(9),
'tiles-18x180': SEP(9),
'tiles-18x27': SEP(9),
'tiles-117x360': SEP(9),
'tiles-54x27': SEP(9),
'tiles-117x180': SEP(9),
'tiles-648x27': SEP(9),
'tiles-53x1000': SEP(9),
'tiles-36x162': SEP(9),
'tiles-117x135': SEP(9),
'tiles-72x9': SEP(9),
'tiles-36x297': SEP(9),
'tiles-162x180': SEP(9),
'tiles-36x72': SEP(9),
'tiles-9x126': SEP(9),
'tiles-27x558': SEP(9),
'tiles-27x612': SEP(9),
'tiles-90x18': SEP(9),
'tiles-26x26': SEP(9),
'tiles-17x712': SEP(9),
'tiles-53x8': SEP(9),
'tiles-197x17': SEP(9),
'tiles-26x17': SEP(9),
'tiles-829x17': SEP(9),
'tiles-775x17': SEP(9),
'tiles-910x17': SEP(9),
'tiles-71x585': SEP(9),
'tiles-1000x81': SEP(9),
'tiles-18x53': SEP(9),
'tiles-18x856': SEP(9),
# ===================================
'tiles-207x11': { 'column': SEP9, 'row': [10] },
'tiles-72x17': { 'column': SEP9, 'row': [16] },
'tiles-27x20': { 'column': SEP9, 'row': [19] },
'tiles-162x19': { 'column': SEP9, 'row': [8, 18] },
'tiles-27x27': { 'column': SEP9, 'row': [] },
'tiles-17x17': {
'column': [8],
'row': [8]
},
'tiles-18x18': {
'column': [8, 17],
'row': [17] # this is tricky as 8 should be here for some images for example Tiles_142 and 143
},
'tiles-34x17': { 'column': SEP9, 'row': [8] },
'tiles-53x36': { 'column': SEP9, 'row': [8, 17, 26] },
'tiles-81x9': { 'column': SEP9, 'row': [8] },
'tiles-35x9': { 'column': SEP9, 'row': [] },
'tiles-36x10': { 'column': SEP9, 'row': [9] },
'tiles-45x11': { 'column': SEP9, 'row': [10] },
'tiles-189x17': { 'column': SEP9, 'row': [16] },
'tiles-153x17': { 'column': SEP9, 'row': [16] },
'tiles-8x100': { 'column': [], 'row': [] },
'tiles-32x64': { 'column': [], 'row': [] },
'tiles-8x8': { 'column': [], 'row': [] },
'tiles-9x9': { 'column': [], 'row': [] },
'tiles-9x18': { 'column': [8], 'row': [8, 17] },
'tiles-18x11': { 'column': [8, 17], 'row': [10] },
'tiles-18x760': {
'column': SEP9,
'row': lambda x: x % 20 >= 18 or x % 20 == 8
},
'tiles-26x228': {
'column': SEP9,
'row': [8, 18, 27, 37, 46, 56, 65, 75, 85, 94, 103, 113, 122, 132, 141, 151, 160, 170, 179, 189, 198, 208, 217, 227]
},
'tiles-18x651': {
'column': SEP9,
'row': lambda x: x % 19 == 18 or x % 19 == 8
},
'tiles-1000x57': {
'column': SEP9,
'row': lambda x: x % 19 == 18 or x % 19 == 8
},
'tiles-53x114': {
'column': SEP9,
'row': lambda x: x % 19 == 18 or x % 19 == 8
},
'tiles-964x18': {
'column': SEP9,
'row': [8]
},
'tiles-54x19': {
'column': SEP9,
'row': lambda x: x % 19 == 18 or x % 19 == 8
},
'tiles-54x76': {
'column': SEP9,
'row': lambda x: x % 19 == 18 or x % 19 == 8
},
'tiles-54x153': {
'column': SEP9,
'row': lambda x: x % 19 == 18 or x % 19 == 8
},
'tiles-36x19': {
'column': SEP9,
'row': lambda x: x % 19 == 18 or x % 19 == 8
},
'tiles-25x228': {
'column': SEP9,
'row': lambda x: x % 19 == 18 or x % 19 == 8
},
'tiles-72x605': {
'column': SEP9,
'row': lambda x: (x < 566 and x % 18 == 8) or (x >= 566 and x - 1 % 18 == 8) # row 567 is the empty row instead of 566
},
'tiles-27x196': {
'column': SEP9,
'row': lambda x: x % 28 == 27 or x % 28 == 17
},
'tiles-72x252': {
'column': SEP9,
'row': lambda x: x % 28 == 27 or x % 28 == 17 or x % 28 == 8
},
'tiles-65x108': {
'column': lambda x: x % 11 == 10,
'row': SEP9
},
'tiles-270x19': {
'column': SEP9,
'row': lambda x: x % 19 == 18
},
'tiles-63x27': {
'column': SEP9,
'row': []
},
'tiles-10x76': {
'column': [9],
'row': lambda x: x % 20 == 8
},
'tiles-204x20': {
'column': lambda x: x % 17 == 16,
'row': [19]
},
'tiles-121x44': {
'column': lambda x: x % 11 == 10,
'row': lambda x: x % 11 == 10
},
'tiles-32x22': {
'column': lambda x: x % 11 == 10,
'row': lambda x: x % 11 == 10
},
'tiles-32x22': {
'column': lambda x: x % 11 == 10,
'row': lambda x: x % 11 == 10
},
'tiles-88x132': {
'column': lambda x: x % 11 == 10,
'row': lambda x: x % 11 == 10
},
'tiles-78x14': {
'column': lambda x: x % 13 == 12,
'row': [13]
},
'tiles-63x11': {
'column': SEP9,
'row': [10]
},
'tiles-18x447': {
'column': SEP9,
'row': lambda x: x % 11 == 10
},
'tiles-9x11': {
'column': SEP9,
'row': lambda x: x % 11 == 10
},
'tiles-9x94': {
'column': SEP9,
'row': [9, 18, 27, 36, 46, 56, 65, 74, 83, 93]
},
'tiles-604x10': {
'column': SEP9,
'row': [9]
},
'tiles-66x176': {
'column': [10, 21, 32, 43, 54, 65],
'row': [10, 21, 32, 43, 54, 65, 76, 87, 98, 109, 120, 131, 142, 153, 164, 175]
},
'treebranches-42x63': SEP(21),
'treebranches-42x189': SEP(21),
'treetops-123x41': SEP(41),
'treetops-174x49': { 'column': SEP(58), 'row': [48] },
'treetops-123x164': SEP(41),
'treetops-369x71': { 'column': SEP(41), 'row': [70] },
'wall-234x90': lambda x: x % 18 >= 16,
'wall-234x720': lambda x: x % 18 >= 16,
'wall-234x810': lambda x: x % 18 >= 16,
'wall-234x180': lambda x: x % 18 >= 16,
'wall-234x126': lambda x: x % 18 >= 16,
'wires-45x36': SEP(9),
'wires-20x20': [],
'wires-16x16': [],
'wires-8x8': [],
'wiresnew-144x144': SEP(9),
'witchdoctordefault-25x728': SEP(28),
'wizarddefault-20x736': [],
'wizarddefaultparty-20x736': [],
'wraitheyes-13x100': [],
'xmas-32x64': [],
'xmas-198x65': { 'column': SEP(33), 'row': [64] },
'xmas-363x65': { 'column': SEP(33), 'row': [64] },
'xmas-132x65': { 'column': SEP(33), 'row': [64] },
'xmas-363x260': { 'column': SEP(33), 'row': SEP(65) },
'xmaslight-54x36': SEP(9),
}
def get_separators_for_class(clazz, width, height):
key = '%s-%dx%d' % (clazz, width, height)
if key not in CLASS_SEPARATORS:
if clazz < 'wires' or clazz == 'yzszgq':
return ([], [])
raise Exception('add "%s" to CLASS_SEPARATORS' % key)
s = CLASS_SEPARATORS[key]
if isinstance(s, dict):
column = s['column']
row = s['row']
else:
column = s
row = s
if callable(column):
column = [x for x in range(width) if column(x)]
if callable(row):
row = [x for x in range(height) if row(x)]
return (row, column)
def is_separator_color(color):
return color[3] == 0 or color in SEPARATORS
# def check_grid_size(pixelarray, gridx, gridy):
# if (pixelarray.width < gridx) or (pixelarray.height < gridy):
# return False # cannot be sure that the grid is matching
# total = 0
# match = 0
# for y in range(gridy - 1, pixelarray.height, gridy):
# for x in range(pixelarray.width):
# total += 1
# if is_separator_color(pixelarray.getPixelAt(x, y)):
# match += 1
# for x in range(gridx - 1, pixelarray.width, gridx):
# for yy in range(y - gridy + 1, y):
# total += 1
# if is_separator_color(pixelarray.getPixelAt(x, yy)):
# match += 1
# if (float(match) / float(total)) > 0.99:
# return True
# else:
# print("total: %s, match: %s (%.2f%%)" % (total, match, 100.0 * match / total))
# return False
# def detect_grid_size(pixelarray):
# for x in range(5, pixelarray.width):
# if pixelarray.isTransparent(x, 0):
# continue
# color = pixelarray.getPixelAt(x, 0)
# for y in range(pixelarray.height):
# good = True
# for xx in range(x, -1, -1):
# if color != pixelarray.getPixelAt(xx, y):
# good = False
# break
# if good:
# return (x + 1, y + 1)
# if xx == x:
# break
# # try 9:9 as grid size
# if check_grid_size(pixelarray, 9, 9):
# return (9, 9)
# return (0, 0)
def is_column_all_separator(pixelarray, x):
for y in range(pixelarray.height):
if not is_separator_color(pixelarray.getPixelAt(x, y)):
return False
return True
def is_row_all_separator(pixelarray, y):
for x in range(pixelarray.width):
if not is_separator_color(pixelarray.getPixelAt(x, y)):
return False
return True
def detect_and_clear_separators(pixelarray):
""" Returns: (row_indexes, column_indexes) """
flags = [is_column_all_separator(pixelarray, x) for x in range(pixelarray.width)]
for x in range(pixelarray.width):
if flags[x]:
for y in range(pixelarray.height):
pixelarray.setPixelAt(x, y, TRANSPARENT)
column_indexes = [i - 1 for i in range(2, len(flags)) if not flags[i - 2] and flags[i - 1] and not flags[i]]
flags = [is_row_all_separator(pixelarray, y) for y in range(pixelarray.height)]
for y in range(pixelarray.height):
if flags[y]:
for x in range(pixelarray.width):
pixelarray.setPixelAt(x, y, TRANSPARENT)
row_indexes = [i - 1 for i in range(2, len(flags)) if not flags[i - 2] and flags[i - 1] and not flags[i]]
return (row_indexes, column_indexes)
def remove_separators(input_filename, clazz, pixelarray):
# (row_indexes, column_indexes) = detect_and_clear_separators(pixelarray)
(row_indexes, column_indexes) = get_separators_for_class(clazz, pixelarray.width, pixelarray.height)
# clear separator lines first, because if 2 separator lines are next to
# each other then both lines would remain the same color
for x in column_indexes:
pixels = set()
for y in range(pixelarray.height):
if not pixelarray.isTransparent(x, y):
pixels.add(str(pixelarray.getPixelAt(x, y)))
if len(pixels) >= 3:
if ntpath.basename(input_filename) in ['Tiles_11.png', 'Tiles_19.png', 'Tiles_73.png', 'Tiles_190.png', 'Tiles_198.png', 'Tiles_222.png', 'Tiles_229.png', 'Tiles_247.png', 'Tiles_274.png', 'Tree_Branches_3.png', 'Wall_60.png', 'Wall_73.png', 'Wall_136.png', 'Tiles_505.png']:
# print('Wrong column index in file %s: %s, colors: %s' % (ntpath.basename(input_filename), x, pixels))
pixels = set()
else:
raise Exception('Wrong column index: %s, colors: %s' % (x, pixels))
pixelarray.setPixelAt(x, y, TRANSPARENT)
for y in row_indexes:
pixels = set()
for x in range(pixelarray.width):
if not pixelarray.isTransparent(x, y):
pixels.add(str(pixelarray.getPixelAt(x, y)))
if len(pixels) >= 3:
if ntpath.basename(input_filename) in ['Tiles_17.png', 'Tiles_73.png', 'Tiles_126.png', 'Tiles_203.png', 'Tiles_229.png', 'Tiles_274.png', 'Tiles_32.png', 'Tiles_352.png', 'Tiles_443.png', 'Tiles_69.png', 'Wall_60.png', 'Wall_73.png', 'Tiles_575.png', 'Tiles_80.png', 'Tiles_655.png']:
# print('Wrong row index in file %s: %s, colors: %s' % (ntpath.basename(input_filename), y, pixels))
pixels = set()
else:
raise Exception('Wrong row index: %s, colors: %s' % (y, pixels))
pixelarray.setPixelAt(x, y, TRANSPARENT)
# fill the now empty separator lines with the nearest pixel
if ntpath.basename(input_filename)[0 : 5] != 'Wall_': # don't fill for walls, it looks clunky
for x in column_indexes:
for y in range(pixelarray.height):
pixelarray.setPixelAt(x, y, pixelarray.nearestNonSeparator(x, y, vertical=True))
for y in row_indexes:
for x in range(pixelarray.width):
pixelarray.setPixelAt(x, y, pixelarray.nearestNonSeparator(x, y, vertical=False))
def remove_separators_from_file(input_filename, output_filename):
img=png.Reader(filename=input_filename)
(width, height, pixels, meta) = img.asRGBA8()
data = [row for row in pixels]
pixelarray = Array3D(data, width, height, depth=4)
clazz = class_for_filename(ntpath.basename(input_filename))
remove_separators(input_filename, clazz, pixelarray)
writer = png.Writer(width=width, height=height, alpha=True, bitdepth=8, greyscale=False, compression=9)
print('writing file %s' % output_filename)
f = open(output_filename, 'wb')
try:
writer.write(f, pixelarray.data)
finally:
f.close()
# remove_separators_from_file(
# input_filename='/Users/andras/Downloads/Images_original_extracted-downsized/Tiles_1.png',
# output_filename='/Users/andras/Downloads/Tiles_1.png')
# remove_separators_from_file(
# input_filename='/Users/andras/Downloads/Images_original_extracted-downsized/Wall_87.png',
# output_filename='/Users/andras/Downloads/Wall_87.png')
def stats_for_file(input_filename):
img = png.Reader(filename = input_filename)
(width, height, pixels, meta) = img.asRGBA()
data = [row for row in pixels]
pixelarray = Array3D(data, width, height, depth=4)
column_indexes = [x for x in range(pixelarray.width) if is_column_all_separator(pixelarray, x)]
row_indexes = [y for y in range(pixelarray.height) if is_row_all_separator(pixelarray, y)]
return (ntpath.basename(input_filename), width, height, row_indexes, column_indexes)
def save_to_file(data, filename):
with open(filename, 'wb') as f:
pickle.dump(data, f)
def load_from_file(filename):
with open(filename, 'rb') as f:
return pickle.load(f)
def generate_stats_file():
stats = {}
for input_filename in glob.glob('/Users/andras/Downloads/Images_original_extracted-downsized/*.png'):
print(input_filename)
data = stats_for_file(input_filename=input_filename)
key = '%dx%d' % (data[1], data[2])
if key not in stats:
stats[key] = []
stats[key].append(data)
save_to_file(stats, 'stats.pickle')
def class_for_filename(filename):
filename = filename.lower()
filename = filename.replace('.png', '')
filename = re.sub(r"[_0-9]+", "", filename)
return filename
def process_stats():
stats = load_from_file('stats.pickle')
for key, values in sorted(stats.items()):
if len(values) > 1:
column_index_counts = {}
row_index_counts = {}
class_counts = {}
for (filename, width, height, row_indexes, column_indexes) in values:
for i in column_indexes:
if i not in column_index_counts:
column_index_counts[i] = 0
column_index_counts[i] += 1
for i in row_indexes:
if i not in row_index_counts:
row_index_counts[i] = 0
row_index_counts[i] += 1
clazz = class_for_filename(filename)
if clazz not in class_counts:
class_counts[clazz] = 0
class_counts[clazz] += 1
if len(column_index_counts) > 0 or len(row_index_counts) > 0:
print(key, len(values))
print(' column_index_counts: %s' % (sorted(column_index_counts.items()),))
print(' row_index_counts: %s' % (sorted(row_index_counts.items()),))
print(' types: %s' % (sorted(class_counts.items()),))
def process_grouped_stats():
stats = load_from_file('stats.pickle')
values = [item for items in stats.values() for item in items]
stats = {}
for (filename, width, height, row_indexes, column_indexes) in values:
clazz = class_for_filename(filename)
key = "%s-%4dx%4d" % (clazz, width, height)
if key not in stats:
stats[key] = {
'column_index_counts': {},
'row_index_counts': {},
'class_counts': {},
'count': 0,
'width': width,
'height': height
}
stats[key]['count'] += 1
for i in column_indexes:
if i not in stats[key]['column_index_counts']:
stats[key]['column_index_counts'][i] = 0
stats[key]['column_index_counts'][i] += 1
for i in row_indexes:
if i not in stats[key]['row_index_counts']:
stats[key]['row_index_counts'][i] = 0
stats[key]['row_index_counts'][i] += 1
clazz = class_for_filename(filename)
if clazz not in stats[key]['class_counts']:
stats[key]['class_counts'][clazz] = 0
stats[key]['class_counts'][clazz] += 1
for key, value in sorted(stats.items()):
column_index_counts = value['column_index_counts']
row_index_counts = value['row_index_counts']
class_counts = value['class_counts']
count = value['count']
width = value['width']
height = value['height']
lencic = len(column_index_counts)
lenric = len(row_index_counts)
# if count >= 10 and ((lencic > 0 and lencic*100/width < 50) or (lenric > 0 and lenric*100/height < 50)):
if True:
print('%s (count: %s)' % (key, count))
print(' column_index_counts (%d): %s' % (lencic, sorted(column_index_counts.items()),))
common = [x for x in sorted(column_index_counts.items()) if x[1]>=0.8*count]
print(' common column indexes (%d): %s' % (len(common), common,))
print(' row_index_counts (%d): %s' % (lenric, sorted(row_index_counts.items()),))
common = [x for x in sorted(row_index_counts.items()) if x[1]>=0.8*count]
print(' common row indexes (%d): %s' % (len(common), common,))
print(' types: %s' % (sorted(class_counts.items()),))
def remove_separators_from_all_tiles():
for input_filename in glob.glob('/Users/andras/Downloads/Images_original_extracted-downsized/*.png'):
output_filename = '/Users/andras/Downloads/Images-downsized-removed-separators/%s' % (ntpath.basename(input_filename))
try:
remove_separators_from_file(
input_filename=input_filename,
output_filename=output_filename)
except Exception as err:
print("Unexpected error while processing %s: %s" % (ntpath.basename(input_filename), str(err)))
traceback.print_exc()
raise err
def remove_separators_from_images_in_folder(input_dir, output_dir):
for input_filename in sorted(glob.glob('%s/*.png' % (input_dir,))):
output_filename = '%s/%s' % (output_dir, ntpath.basename(input_filename))
try:
remove_separators_from_file(input_filename=input_filename, output_filename=output_filename)
except Exception as err:
error("Unexpected error while processing %s: %s" % (input_filename, str(err)))
error(traceback.format_exc())
raise err
if len(sys.argv) < 3:
print("Usage: python %s input_dir output_dir" % (sys.argv[0],))
print("Remove separators from all images in input_dir and saves the result in output_dir (with the same name).")
else:
remove_separators_from_images_in_folder(sys.argv[1], sys.argv[2])
# stats_for_file('/Users/andras/Downloads/Images_original_extracted-downsized/Tiles_206.png')
# generate_stats_file()
# process_stats()
# process_grouped_stats()
# import cProfile
# cProfile.run('remove_separators_from_all_tiles()', sort=1)
| [
"suller.andras@gmail.com"
] | suller.andras@gmail.com |
7e64b95c3324accc910bd4ea98b00ab062f400d5 | 9886c4b77da76b6c60ac77fee46fa664891f60a0 | /lib/utils.py | 2794d1c4e187d27e5845e0bdfe18ae48711297cb | [
"MIT"
] | permissive | elias-gerbeth/wp-json-scraper | d2a650d3c6f1bb35700e1359ad029583c6ed9407 | 70995cd804625d44dbec7936ee41d14dff04cad1 | refs/heads/master | 2023-05-10T17:20:56.855080 | 2020-06-01T10:37:46 | 2020-06-01T10:37:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,869 | py | """
Copyright (c) 2018-2020 Mickaël "Kilawyn" Walter
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import json
from urllib.parse import urlsplit, urlunsplit
def get_by_id(value, id):
"""
Utility function to retrieve a value by and ID in a list of dicts, returns
None of no correspondance have been made
param value: the dict to process
param id: the id to get
"""
if value is None:
return None
for val in value:
if 'id' in val.keys() and val['id'] == id:
return val
return None
# Neat code part from https://codereview.stackexchange.com/questions/13027/joini
# ng-url-path-components-intelligently
def url_path_join(*parts):
"""Normalize url parts and join them with a slash."""
schemes, netlocs, paths, queries, fragments = \
zip(*(urlsplit(part) for part in parts))
scheme = first(schemes)
netloc = first(netlocs)
path = '/'.join(x.strip('/') for x in paths if x)
query = first(queries)
fragment = first(fragments)
return urlunsplit((scheme, netloc, path, query, fragment))
def first(sequence, default=''):
return next((x for x in sequence if x), default)
# Code from https://stackoverflow.com/questions/3173320/text-progress-bar-in-th
# e-console
def print_progress_bar (iteration, total, prefix = '', suffix = '', decimals = 1,\
length = 100, fill = '█'):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent \
complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
"""
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / \
float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print('\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\r')
# Print New Line on Complete
if iteration == total:
print()
def get_content_as_json (response_obj):
"""
When a BOM is present (see issue #2), UTF-8 is not properly decoded by
Response.json() method. This is a helper function that returns a json value
even if a BOM is present in UTF-8 text
@params:
response_obj: a requests Response instance
@returns: a decoded json object (list or dict)
"""
if response_obj.content[:3]== b'\xef\xbb\xbf': # UTF-8 BOM
content = response_obj.content.decode("utf-8-sig")
return json.loads(content)
else:
try:
return response_obj.json()
except:
return {} | [
"8983311+MickaelWalter@users.noreply.github.com"
] | 8983311+MickaelWalter@users.noreply.github.com |
592c37e7ec027c486244474774f990071fd77a65 | f77a1f9d4a860dfd5154666dc4117e24627d835d | /gecko_sdk_suite/v2.4/platform/hwconf_data/efr32mg14p/EFR32MG14P.py | d6004d9c602d13c483d843309e846f0748c16f48 | [] | no_license | eosnewyork/gecko_sdk_suite | 9e93da499173edede02c3a986bfb233cf7b528e2 | 9678065284fc0faa89fe8206d51dcde5dcf632c1 | refs/heads/master | 2020-05-18T02:36:02.504381 | 2019-04-29T15:56:40 | 2019-04-29T15:56:40 | 184,101,402 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,164 | py | import os
import glob
import time
import Studio.halConfig as hal
import efr32mg14p.PythonSnippet.RuntimeModel as RuntimeModel
import efr32mg14p.PythonSnippet.ExporterModel as ExporterModel
import efr32mg14p.PythonSnippet.Metadata as Metadata
import efr32mg14p.modules.PIN.PIN_Snippets as PIN_Snippets
import efr32mg14p.modules.PORTIO.PORTIO_Snippets as PORTIO_Snippets
import efr32mg14p.halconfig.halconfig_dependency as dep
import efr32mg14p.modules.ACMP0.ACMP_behavior as ACMP_behavior
import efr32mg14p.modules.ACMP1.ACMP_behavior as ACMP_behavior
import efr32mg14p.modules.ADC0.ADC_behavior as ADC_behavior
import efr32mg14p.modules.ANTDIV.ANTDIV_behavior as ANTDIV_behavior
import efr32mg14p.modules.BATTERYMON.BATTERYMON_behavior as BATTERYMON_behavior
import efr32mg14p.modules.BTL_BUTTON.BTL_BUTTON_behavior as BTL_BUTTON_behavior
import efr32mg14p.modules.BULBPWM.BULBPWM_behavior as BULBPWM_behavior
import efr32mg14p.modules.BULBPWM_COLOR.BULBPWM_COLOR_behavior as BULBPWM_COLOR_behavior
import efr32mg14p.modules.BUTTON.BUTTON_behavior as BUTTON_behavior
import efr32mg14p.modules.CMU.CMU_behavior as CMU_behavior
import efr32mg14p.modules.COEX.COEX_behavior as COEX_behavior
import efr32mg14p.modules.CS5463.CS5463_behavior as CS5463_behavior
import efr32mg14p.modules.DCDC.DCDC_behavior as DCDC_behavior
import efr32mg14p.modules.EMU.EMU_behavior as EMU_behavior
import efr32mg14p.modules.EXTFLASH.EXTFLASH_behavior as EXTFLASH_behavior
import efr32mg14p.modules.EZRADIOPRO.EZRADIOPRO_behavior as EZRADIOPRO_behavior
import efr32mg14p.modules.FEM.FEM_behavior as FEM_behavior
import efr32mg14p.modules.GPIO.GPIO_behavior as GPIO_behavior
import efr32mg14p.modules.I2C0.I2C_behavior as I2C_behavior
import efr32mg14p.modules.I2CSENSOR.I2CSENSOR_behavior as I2CSENSOR_behavior
import efr32mg14p.modules.IDAC0.IDAC_behavior as IDAC_behavior
import efr32mg14p.modules.IOEXP.IOEXP_behavior as IOEXP_behavior
import efr32mg14p.modules.LED.LED_behavior as LED_behavior
import efr32mg14p.modules.LEUART0.LEUART_behavior as LEUART_behavior
import efr32mg14p.modules.PA.PA_behavior as PA_behavior
import efr32mg14p.modules.PRS.PRS_behavior as PRS_behavior
import efr32mg14p.modules.PTI.PTI_behavior as PTI_behavior
import efr32mg14p.modules.PYD1698.PYD1698_behavior as PYD1698_behavior
import efr32mg14p.modules.SERIAL.SERIAL_behavior as SERIAL_behavior
import efr32mg14p.modules.SPIDISPLAY.SPIDISPLAY_behavior as SPIDISPLAY_behavior
import efr32mg14p.modules.SPINCP.SPINCP_behavior as SPINCP_behavior
import efr32mg14p.modules.TIMER0.TIMER_behavior as TIMER_behavior
import efr32mg14p.modules.TIMER1.TIMER_behavior as TIMER_behavior
import efr32mg14p.modules.UARTNCP.UARTNCP_behavior as UARTNCP_behavior
import efr32mg14p.modules.USART0.USART_behavior as USART_behavior
import efr32mg14p.modules.USART1.USART_behavior as USART_behavior
import efr32mg14p.modules.VCOM.VCOM_behavior as VCOM_behavior
import efr32mg14p.modules.VDAC0.VDAC_behavior as VDAC_behavior
import efr32mg14p.modules.VUART.VUART_behavior as VUART_behavior
import efr32mg14p.modules.WDOG.WDOG_behavior as WDOG_behavior
import efr32mg14p.upgrade as upgrade
import efr32mg14p.upgrade.upgradeDispatch as upgradeDispatch
PROFILE = True
@RuntimeModel.bind_document_upgrade
def onUpgrade(event, xmlDevice):
print("Triggering upgrade")
upgradeResults = upgradeDispatch.upgradeMain(upgrade, xmlDevice)
# return without xmlDevice if upgradeResults is empty
if type(upgradeResults) != tuple:
return []
(newXmlDevice, changeText) = upgradeResults
evs = []
change = hal.newXMLDeviceChange(newXmlDevice)
try:
change.setUpgradeText(changeText)
except AttributeError:
print("Could not set upgrade text -- using old Studio?")
print("Text was: {}".format(changeText))
evs.append(change)
return evs
@RuntimeModel.bind_document_load
def onLoad(state):
# Prevent changed properties from enabling parent peripheral
try:
hal.registerDeviceOverride(hal.OVERRIDE_PERIPHERAL_AUTO_ENABLE, True)
except:
# Fall back to misspelled version of the function argument
try:
hal.registerDeviceOverride(hal.OVERRIDE_PERIPHRAL_AUTO_ENABLE, True)
except:
pass
available_modules = Metadata.get_available_modules_for_family()
if PROFILE:
start = time.time()
familyobj = dep.Family(state.device.name)
modules = []
module_instance = ACMP_behavior.ACMP('ACMP0')
module_instance.load_halconfig_model(available_modules, familyobj)
state.set_module_object('ACMP0', module_instance)
modules.append(module_instance)
module_instance = ACMP_behavior.ACMP('ACMP1')
module_instance.load_halconfig_model(available_modules, familyobj)
state.set_module_object('ACMP1', module_instance)
modules.append(module_instance)
module_instance = ADC_behavior.ADC('ADC0')
module_instance.load_halconfig_model(available_modules, familyobj)
state.set_module_object('ADC0', module_instance)
modules.append(module_instance)
module_instance = ANTDIV_behavior.ANTDIV('ANTDIV')
module_instance.load_halconfig_model(available_modules, familyobj)
state.set_module_object('ANTDIV', module_instance)
modules.append(module_instance)
module_instance = BATTERYMON_behavior.BATTERYMON('BATTERYMON')
module_instance.load_halconfig_model(available_modules, familyobj)
state.set_module_object('BATTERYMON', module_instance)
modules.append(module_instance)
module_instance = BTL_BUTTON_behavior.BTL_BUTTON('BTL_BUTTON')
module_instance.load_halconfig_model(available_modules, familyobj)
state.set_module_object('BTL_BUTTON', module_instance)
modules.append(module_instance)
module_instance = BULBPWM_behavior.BULBPWM('BULBPWM')
module_instance.load_halconfig_model(available_modules, familyobj)
state.set_module_object('BULBPWM', module_instance)
modules.append(module_instance)
module_instance = BULBPWM_COLOR_behavior.BULBPWM_COLOR('BULBPWM_COLOR')
module_instance.load_halconfig_model(available_modules, familyobj)
state.set_module_object('BULBPWM_COLOR', module_instance)
modules.append(module_instance)
module_instance = BUTTON_behavior.BUTTON('BUTTON')
module_instance.load_halconfig_model(available_modules, familyobj)
state.set_module_object('BUTTON', module_instance)
modules.append(module_instance)
module_instance = CMU_behavior.CMU('CMU')
module_instance.load_halconfig_model(available_modules, familyobj)
state.set_module_object('CMU', module_instance)
modules.append(module_instance)
module_instance = COEX_behavior.COEX('COEX')
module_instance.load_halconfig_model(available_modules, familyobj)
state.set_module_object('COEX', module_instance)
modules.append(module_instance)
module_instance = CS5463_behavior.CS5463('CS5463')
module_instance.load_halconfig_model(available_modules, familyobj)
state.set_module_object('CS5463', module_instance)
modules.append(module_instance)
module_instance = DCDC_behavior.DCDC('DCDC')
module_instance.load_halconfig_model(available_modules, familyobj)
state.set_module_object('DCDC', module_instance)
modules.append(module_instance)
module_instance = EMU_behavior.EMU('EMU')
module_instance.load_halconfig_model(available_modules, familyobj)
state.set_module_object('EMU', module_instance)
modules.append(module_instance)
module_instance = EXTFLASH_behavior.EXTFLASH('EXTFLASH')
module_instance.load_halconfig_model(available_modules, familyobj)
state.set_module_object('EXTFLASH', module_instance)
modules.append(module_instance)
module_instance = EZRADIOPRO_behavior.EZRADIOPRO('EZRADIOPRO')
module_instance.load_halconfig_model(available_modules, familyobj)
state.set_module_object('EZRADIOPRO', module_instance)
modules.append(module_instance)
module_instance = FEM_behavior.FEM('FEM')
module_instance.load_halconfig_model(available_modules, familyobj)
state.set_module_object('FEM', module_instance)
modules.append(module_instance)
module_instance = GPIO_behavior.GPIO('GPIO')
module_instance.load_halconfig_model(available_modules, familyobj)
state.set_module_object('GPIO', module_instance)
modules.append(module_instance)
module_instance = I2C_behavior.I2C('I2C0')
module_instance.load_halconfig_model(available_modules, familyobj)
state.set_module_object('I2C0', module_instance)
modules.append(module_instance)
module_instance = I2CSENSOR_behavior.I2CSENSOR('I2CSENSOR')
module_instance.load_halconfig_model(available_modules, familyobj)
state.set_module_object('I2CSENSOR', module_instance)
modules.append(module_instance)
module_instance = IDAC_behavior.IDAC('IDAC0')
module_instance.load_halconfig_model(available_modules, familyobj)
state.set_module_object('IDAC0', module_instance)
modules.append(module_instance)
module_instance = IOEXP_behavior.IOEXP('IOEXP')
module_instance.load_halconfig_model(available_modules, familyobj)
state.set_module_object('IOEXP', module_instance)
modules.append(module_instance)
module_instance = LED_behavior.LED('LED')
module_instance.load_halconfig_model(available_modules, familyobj)
state.set_module_object('LED', module_instance)
modules.append(module_instance)
module_instance = LEUART_behavior.LEUART('LEUART0')
module_instance.load_halconfig_model(available_modules, familyobj)
state.set_module_object('LEUART0', module_instance)
modules.append(module_instance)
module_instance = PA_behavior.PA('PA')
module_instance.load_halconfig_model(available_modules, familyobj)
state.set_module_object('PA', module_instance)
modules.append(module_instance)
module_instance = PRS_behavior.PRS('PRS')
module_instance.load_halconfig_model(available_modules, familyobj)
state.set_module_object('PRS', module_instance)
modules.append(module_instance)
module_instance = PTI_behavior.PTI('PTI')
module_instance.load_halconfig_model(available_modules, familyobj)
state.set_module_object('PTI', module_instance)
modules.append(module_instance)
module_instance = PYD1698_behavior.PYD1698('PYD1698')
module_instance.load_halconfig_model(available_modules, familyobj)
state.set_module_object('PYD1698', module_instance)
modules.append(module_instance)
module_instance = SERIAL_behavior.SERIAL('SERIAL')
module_instance.load_halconfig_model(available_modules, familyobj)
state.set_module_object('SERIAL', module_instance)
modules.append(module_instance)
module_instance = SPIDISPLAY_behavior.SPIDISPLAY('SPIDISPLAY')
module_instance.load_halconfig_model(available_modules, familyobj)
state.set_module_object('SPIDISPLAY', module_instance)
modules.append(module_instance)
module_instance = SPINCP_behavior.SPINCP('SPINCP')
module_instance.load_halconfig_model(available_modules, familyobj)
state.set_module_object('SPINCP', module_instance)
modules.append(module_instance)
module_instance = TIMER_behavior.TIMER('TIMER0')
module_instance.load_halconfig_model(available_modules, familyobj)
state.set_module_object('TIMER0', module_instance)
modules.append(module_instance)
module_instance = TIMER_behavior.TIMER('TIMER1')
module_instance.load_halconfig_model(available_modules, familyobj)
state.set_module_object('TIMER1', module_instance)
modules.append(module_instance)
module_instance = UARTNCP_behavior.UARTNCP('UARTNCP')
module_instance.load_halconfig_model(available_modules, familyobj)
state.set_module_object('UARTNCP', module_instance)
modules.append(module_instance)
module_instance = USART_behavior.USART('USART0')
module_instance.load_halconfig_model(available_modules, familyobj)
state.set_module_object('USART0', module_instance)
modules.append(module_instance)
module_instance = USART_behavior.USART('USART1')
module_instance.load_halconfig_model(available_modules, familyobj)
state.set_module_object('USART1', module_instance)
modules.append(module_instance)
module_instance = VCOM_behavior.VCOM('VCOM')
module_instance.load_halconfig_model(available_modules, familyobj)
state.set_module_object('VCOM', module_instance)
modules.append(module_instance)
module_instance = VDAC_behavior.VDAC('VDAC0')
module_instance.load_halconfig_model(available_modules, familyobj)
state.set_module_object('VDAC0', module_instance)
modules.append(module_instance)
module_instance = VUART_behavior.VUART('VUART')
module_instance.load_halconfig_model(available_modules, familyobj)
state.set_module_object('VUART', module_instance)
modules.append(module_instance)
module_instance = WDOG_behavior.WDOG('WDOG')
module_instance.load_halconfig_model(available_modules, familyobj)
state.set_module_object('WDOG', module_instance)
modules.append(module_instance)
if PROFILE:
stop = time.time()
print(" construction of all modules completed in {:.3f} ms".format((stop - start) * 1000))
start = time.time()
# Do the hook installing after all modules have initialized
PIN_Snippets.activate_runtime()
PORTIO_Snippets.activate_runtime()
for module_instance in modules:
module_instance.activate_runtime(state)
force_enable_module = RuntimeModel.get_property_value(
module_instance.get_property('forceenable'),
RuntimeModel.get_studio_module_by_name(module_instance.name, state.mode),
False
)
if force_enable_module == '1':
RuntimeModel.set_module_checked(
RuntimeModel.get_studio_module_by_name(module_instance.name, state.mode),
True,
state,
readonly=True
)
PORTIO_Snippets.onLoad(state)
if PROFILE:
stop = time.time()
print(" activate_runtime() for all modules completed in {:.3f} ms".format((stop - start) * 1000))
| [
"warrick@eosnewyork.io"
] | warrick@eosnewyork.io |
62484937f9bd92b8484eeacd00073556db5a90ab | 43de85543159d7c1a24d03a6fefa5f8a83947ad1 | /mod_scrap.py | 1f8d8ffd5514711f9cc3398ef0f524c72ed76fa6 | [] | no_license | ys-vitela/PIA_PC | 21bb1a123ede3adf717dafd16c70f8cada9efaeb | ff0250786122b39bf76f965f52d449ae815b9dd3 | refs/heads/main | 2023-02-01T03:49:49.351961 | 2020-12-09T01:33:59 | 2020-12-09T01:33:59 | 319,798,904 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,749 | py | #CODIGO PARA DESCARGAAR IMAGENES DE PAGINA WEB Y EL WHOIS
def mod_scrap():
import requests
from bs4 import BeautifulSoup as bs
import os
import re
import whois
from main import dom
#crea la directorio "images" donde se guardarán los archivos descargados de la página web
try:
os.system("mkdir images")
except OSError as e:
if e.errno != errno.EEXIST:
raise
#desde main se importa la variable "dom" la cual tiene la información del argumento -s
page = requests.get(dom)
soup = bs(page.content, "lxml")
#busca los archivos img
imagenes = soup.findAll('img')
direcciones = []
for imagen in imagenes:
try:
direcciones.append(imagen['src'])
imagenes = [x for x in direcciones if x.endswith('.jpg')]
except KeyError:
pass
#ciclo que guarda los archivos .jpg en la carpeta anteriormente creada "images"
j = 1
for x in imagenes:
with open('images/%s'%x.split('/')[-1]+'.png', "wb") as f:
page = requests.get(x)
f.write(page.content)
j = j+1
#función para eliminar espacios de un string
def eliminaespacios(frase):
frase = frase.strip()
count = 0
while True:
spa = " "
for i in range (20):
frase = frase.replace (spa, " ")
spa +=" "
count +=1
if count == 10:
break
return frase
#extraemos la información del dominio com la variable "dom"
#la cual guarda como lista pero la guardamos como string en la variables "ip"
#para después escribirla en un archivo.txt
domain_info = whois.whois(dom)
ip_list = (domain_info.name_servers)
ip = " ".join([str(elem) for elem in ip_list])
ip = eliminaespacios(ip)
f = open("ip.txt", "w")
f.write(ip)
f.close()
| [
"noreply@github.com"
] | ys-vitela.noreply@github.com |
9315cc8bf5f6132cf366ce7e7d880acd7293cd3f | 88eeba6df8382687f36a4765bb298f76465c8e81 | /general/chainerrl/chainerrl/tests/links_tests/test_noisy_linear.py | 49b094838cec68f4f40aa91df7f9371a2755ba50 | [
"MIT"
] | permissive | daniellawson9999/quick_start | db0b6e382efd640754ca1e7800753c94e668423a | 947d61f118433dcd4cb845f27649ebfbc8062ecc | refs/heads/master | 2022-02-23T21:54:16.273530 | 2019-09-27T01:46:41 | 2019-09-27T01:46:41 | 197,873,032 | 0 | 0 | null | 2019-07-20T03:12:34 | 2019-07-20T03:12:31 | null | UTF-8 | Python | false | false | 2,798 | py | import unittest
import chainer
from chainer import cuda
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
import numpy
from chainerrl.links import noisy_linear
@testing.parameterize(*testing.product({
'size_args': [
(5,), # uninitialized from Chainer v2
(None, 5), # uninitialized
(6, 5), # initialized
],
'nobias': [False, True],
}))
class TestFactorizedNoisyLinear(unittest.TestCase):
def setUp(self):
mu = chainer.links.Linear(*self.size_args, nobias=self.nobias)
self.linear = noisy_linear.FactorizedNoisyLinear(mu)
def _test_calls(self, xp):
x_data = xp.arange(12).astype(numpy.float32).reshape((2, 6))
x = chainer.Variable(x_data)
self.linear(x)
self.linear(x_data + 1)
self.linear(x_data.reshape((2, 3, 2)))
def test_calls_cpu(self):
self._test_calls(numpy)
@attr.gpu
def test_calls_gpu(self):
self.linear.to_gpu(0)
self._test_calls(cuda.cupy)
@attr.gpu
def test_calls_gpu_after_to_gpu(self):
mu = self.linear.mu
mu.to_gpu(0)
self.linear = noisy_linear.FactorizedNoisyLinear(mu)
self._test_calls(cuda.cupy)
def _test_randomness(self, xp):
x = xp.random.standard_normal((10, 6)).astype(numpy.float32)
y1 = self.linear(x).array
y2 = self.linear(x).array
d = float(xp.mean(xp.square(y1 - y2)))
# The parameter name suggests that
# xp.sqrt(d / 2) is approx to sigma_scale = 0.4
# In fact, (for each element _[i, j],) it holds:
# \E[(y2 - y1) ** 2] = 2 * \Var(y) = (4 / pi) * sigma_scale ** 2
target = (0.4 ** 2) * 2
if self.nobias:
target *= 2 / numpy.pi
else:
target *= 2 / numpy.pi + numpy.sqrt(2 / numpy.pi) / y1.shape[1]
self.assertGreater(d, target / 3.)
self.assertLess(d, target * 3.)
@condition.retry(3)
def test_randomness_cpu(self):
self._test_randomness(numpy)
@attr.gpu
@condition.retry(3)
def test_randomness_gpu(self):
self.linear.to_gpu(0)
self._test_randomness(cuda.cupy)
def _test_non_randomness(self, xp):
# Noises should be the same in a batch
x0 = xp.random.standard_normal((1, 6)).astype(numpy.float32)
x = xp.broadcast_to(x0, (2, 6))
y = self.linear(x).array
xp.testing.assert_allclose(y[0], y[1], rtol=1e-4)
def test_non_randomness_cpu(self):
self._test_non_randomness(numpy)
@attr.gpu
def test_non_randomness_gpu(self):
self.linear.to_gpu(0)
self._test_non_randomness(cuda.cupy)
| [
"daniellawson9999@gmail.com"
] | daniellawson9999@gmail.com |
e68cd4e6bd9c58300783c58ef5af7d4a342b9a02 | a5fe2130ea434f958f6151cd4d8c92d43f1c1ca1 | /src/tests/test_foo.py | 58530c34370ed3e758e56350196a2c76cf087398 | [] | no_license | DavidArmendariz/django-movies-app | 44da33cc200773ef473ea21f67a1dfff57ea0e96 | b77f1f538bae4a906d0b00597fef8fef97ea409b | refs/heads/master | 2023-03-11T16:43:02.956765 | 2021-02-23T04:28:17 | 2021-02-23T04:28:17 | 338,206,453 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 91 | py | def test_hello_world():
assert "hello_world" == "hello_world"
assert "foo" != "bar" | [
"darmendariz1998@outlook.com"
] | darmendariz1998@outlook.com |
a7d4d5bf7c36dad18109efd3495f3312e958580c | 931515a9fdd4404cb548fb6b80c91590f5d5e3c9 | /presalytics/client/presalytics_ooxml_automation/models/chart_column_collections.py | 91c556a70437a9633b3b183127aef59d065963d3 | [
"MIT"
] | permissive | presalytics/python-client | 2e2fbd617b493ed8be90b844e23b736f294065e3 | 5d80b78562126feeeb49af4738e2c1aed12dce3a | refs/heads/master | 2021-08-18T02:41:06.938468 | 2020-12-07T15:04:18 | 2020-12-07T15:04:18 | 203,414,411 | 4 | 1 | MIT | 2020-03-31T19:27:47 | 2019-08-20T16:31:57 | Python | UTF-8 | Python | false | false | 3,705 | py | # coding: utf-8
"""
OOXML Automation
This API helps users convert Excel and Powerpoint documents into rich, live dashboards and stories. # noqa: E501
The version of the OpenAPI document: 0.1.0-no-tags
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class ChartColumnCollections(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'chart_data_id': 'str',
'id': 'str'
}
attribute_map = {
'chart_data_id': 'chartDataId',
'id': 'id'
}
def __init__(self, chart_data_id=None, id=None): # noqa: E501
"""ChartColumnCollections - a model defined in OpenAPI""" # noqa: E501
self._chart_data_id = None
self._id = None
self.discriminator = None
self.chart_data_id = chart_data_id
if id is not None:
self.id = id
@property
def chart_data_id(self):
"""Gets the chart_data_id of this ChartColumnCollections. # noqa: E501
:return: The chart_data_id of this ChartColumnCollections. # noqa: E501
:rtype: str
"""
return self._chart_data_id
@chart_data_id.setter
def chart_data_id(self, chart_data_id):
"""Sets the chart_data_id of this ChartColumnCollections.
:param chart_data_id: The chart_data_id of this ChartColumnCollections. # noqa: E501
:type: str
"""
self._chart_data_id = chart_data_id
@property
def id(self):
"""Gets the id of this ChartColumnCollections. # noqa: E501
:return: The id of this ChartColumnCollections. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this ChartColumnCollections.
:param id: The id of this ChartColumnCollections. # noqa: E501
:type: str
"""
self._id = id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ChartColumnCollections):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"kevin@chart-a-lot.com"
] | kevin@chart-a-lot.com |
d2d50824723ac39e9f04079ab73ae5f3692d8239 | 42585e86010cf427ee92c98e88397e5b6a76eb5a | /wordpress_cd/main.py | 7a92cfc77aeb596b72b7f1ca1787473265d1cb32 | [] | no_license | rossigee/wordpress-cd | 3b04e57b4d897df62111bf86c24af0d0ac65e6e1 | ecc87770bfb17723b7383f2366756ff4e2aa95b9 | refs/heads/master | 2022-11-29T14:48:56.437324 | 2022-11-05T12:51:00 | 2022-11-05T12:51:00 | 126,587,798 | 3 | 2 | null | 2020-02-18T10:53:42 | 2018-03-24T10:14:46 | Python | UTF-8 | Python | false | false | 3,303 | py | #!/usr/bin/env python
import os
import os.path
import sys
import time
import argparse
import logging
_logger = logging.getLogger(__name__)
def usage():
print("Usage:")
print(" build-wp-site [-v] [-d] Build site artifacts using 'config.xml' in current directory.")
print(" build-wp-plugin [-v] [-d] Build plugin found in current directory.")
print(" build-wp-theme [-v] [-d] Build theme found in current directory.")
print(" test-wp-site [-v] [-d] TODO: Run tests using artifacts from build directory.")
print(" test-wp-plugin [-v] [-d] TODO: Run tests on plugin found in current directory.")
print(" test-wp-theme [-v] [-d] TODO: Run tests on theme found in current directory.")
print(" deploy-wp-site [-v] [-d] Deploy site artifacts to site specified via environment variables.")
print(" deploy-wp-plugin [-v] [-d] Deploy plugin to site specified via environment variables..")
print(" deploy-wp-theme [-v] [-d] Deploy theme to site specified via environment variables.")
print("Arguments:")
print(" -v Be mildly verbose while running.")
print(" -d Include debugging output.")
def main():
# Determine what mode we're running in based on the command line wrapper
# that was invoked
command_run = os.path.basename(sys.argv[0])
# Read common command line arguments
parser = argparse.ArgumentParser()
#parser.add_argument('configfile', metavar='configfile', nargs=1,
# help='name of configuration file to use for this run')
parser.add_argument('-v', dest='verbose', action='store_true')
parser.add_argument('-d', dest='debug', action='store_true')
args = parser.parse_args()
#configfile = args.configfile[0]
# Enable logging if verbosity requested
log_level = logging.WARNING
if args.debug:
log_level = logging.DEBUG
elif args.verbose:
log_level = logging.INFO
logging.basicConfig(
format='%(asctime)s %(levelname)-8s %(message)s',
level=log_level,
datefmt='%Y-%m-%d %H:%M:%S')
# Act according to command run
if command_run[0:6] == 'build-':
import wordpress_cd.build
if command_run == 'build-wp-plugin':
return wordpress_cd.build.build_plugin(args)
elif command_run == 'build-wp-theme':
return wordpress_cd.build.build_theme(args)
elif command_run == 'build-wp-site':
return wordpress_cd.build.build_site(args)
elif command_run[0:5] == 'test-':
import wordpress_cd.test
if command_run == 'test-wp-plugin':
return wordpress_cd.test.test_plugin(args)
elif command_run == 'test-wp-theme':
return wordpress_cd.test.test_theme(args)
elif command_run == 'test-wp-site':
return wordpress_cd.test.test_site(args)
elif command_run[0:7] == 'deploy-':
import wordpress_cd.deploy
if command_run == 'deploy-wp-plugin':
return wordpress_cd.deploy.deploy_plugin(args)
elif command_run == 'deploy-wp-theme':
return wordpress_cd.deploy.deploy_theme(args)
elif command_run == 'deploy-wp-site':
return wordpress_cd.deploy.deploy_site(args)
return usage()
if __name__ == '__main__':
sys.exit(main())
| [
"ross@golder.org"
] | ross@golder.org |
1b0be4c7acdd39ae84b0a1c0fee0c2879a09e030 | a5845ef5aef8b65ede4bae5ce0f24f18c51619e0 | /marisatrie/__init__.py | 57fe875a20b1fc577f59e1587196862df36e1cd8 | [] | no_license | pombredanne/tries | b2b0f20782c50632053d215d58cbc0bbc2f58c5c | 0741f3d31ca19413bd2087312cc9a6cf9c42cc75 | refs/heads/master | 2021-01-22T07:23:30.742948 | 2013-06-27T08:21:20 | 2013-06-27T08:21:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28 | py |
from .marisa_trie import *
| [
"pengkui.luo@gmail.com"
] | pengkui.luo@gmail.com |
7b91366dca5c3259a0892e8a148ebcc43cf4bc44 | 585f3c30f96ee172a35d3edf480a83a7d1316183 | /int2.py | d27b43fcbe148ef4dcaa39c4a72c8946a4a00b8a | [] | no_license | ochestra365/Rasberry_python | 14a0256f3b5242c99e6266a78671c47758d42162 | aea01b97b2b12033d30155d230efb494cfc9ebd4 | refs/heads/main | 2023-06-06T09:54:17.310625 | 2021-06-23T04:55:57 | 2021-06-23T04:55:57 | 371,236,689 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 543 | py | #-*-coding: utf-8-*-
import RPi.GPIO as GPIO
import time
switch = 6
flag = False
GPIO.setmode(GPIO.BCM)
GPIO.setup(switch, GPIO.IN, pull_up_down = GPIO.PUD_DOWN)
GPIO.setup(switch, GPIO.OUT, pull_up_down=GPIO.PUD_DOWN)
signal=True
def swBlink(channel):
try:
while True:
global flag
if GPIO.input(switch)==True:
print("interrupt")
GPIO.output(switch, signal)
flag = True
else:
flag=False
GPIO.add_event_detect(switch, GPIO.RISING, callback=swBlink)
try:
while True:
pass
except KeyboardInterrupt:
GPIO.cleanup()
| [
"ochestra365@naver.com"
] | ochestra365@naver.com |
5a0826ad9f7cbc75cb16320948b0a920328fccb2 | a0d6cbae196c24254fb6f1411d756da0029e092a | /trunk/src/appserver/apps/user_srv_d/main.py | e626d55800d34accfb379fe41899e7ed973f72ca | [] | no_license | newguangzhou/haha-cluster | 8101ee1cb5b5ddbf916268029a33336c6fa0b06d | 4cee4172f3bd7939e0369d46603a62087e206277 | refs/heads/master | 2021-05-16T10:21:38.245881 | 2017-09-25T03:36:07 | 2017-09-25T03:36:07 | 104,700,121 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,348 | py | # -*- coding: utf-8 -*-
import sys
sys.path.append("../../")
sys.path.append("../terminal_srv_d/")
reload(sys)
sys.setdefaultencoding('utf-8')
#import setproctitle
from tornado import ioloop, gen
from tornado.web import Application, url
import tornado.options
from tornado.options import define, options
from lib.console import Console
from lib.pyloader import PyLoader
from lib.auth_dao import AuthDAO
from lib.user_dao import UserDAO
from lib.pet_dao import PetDAO
from lib.global_dao import GlobalDAO
#from lib.device_dao import DeivceDAO
from lib.sys_config import SysConfig
from lib.new_device_dao import NewDeviceDAO
from lib.gid_rpc import GIDRPC
from lib.msg_rpc import MsgRPC
from lib.boradcast_rpc import BroadcastRPC
from lib import sys_config, discover_config
from lib.service_discovery import server_discoverer_worker
from lib.mongo_dao_base import GetMongoClientAndAuth
from concurrent.futures import ThreadPoolExecutor
from lib.service_discovery import server_discoverer_worker
from lib import discover_config
import logging
logger = logging.getLogger(__name__)
support_setptitle = True
try:
import setproctitle
except:
support_setptitle = False
import handlers
define("debug_mode", 0, int,
"Enable debug mode, 1 is local debug, 2 is test, 0 is disable")
define("port", 9100, int, "Listen port, default is 9100")
define("address", "0.0.0.0", str, "Bind address, default is 127.0.0.1")
define("console_port", 9110, int, "Console listen port, default is 9110")
# Parse commandline
tornado.options.parse_command_line()
max_thread_count = 30
# Init pyloader
pyloader = PyLoader("config")
conf = pyloader.ReloadInst("Config")
mongo_pyloader = PyLoader("configs.mongo_config")
mongo_conf = mongo_pyloader.ReloadInst("MongoConfig",
debug_mode=options.debug_mode)
# Set process title
if support_setptitle:
setproctitle.setproctitle(conf.proctitle)
#
worker = server_discoverer_worker.ServerDiscovererWorker()
msg_rpc = MsgRPC(worker.get_discover())
broadcast_rpc = BroadcastRPC(worker.get_discover())
#
thread_pool = ThreadPoolExecutor(max_thread_count)
mongo_client = GetMongoClientAndAuth(mongo_conf.default_meta)
# Init web application
webapp = Application(
[
(r"/user/get_verify_code", handlers.GetVerifyCode),
(r"/user/push_message_cmd", handlers.PushMessageCmd),
(r"/user/login", handlers.Login),
(r"/user/register", handlers.Register),
(r"/user/logout", handlers.Logout),
(r"/user/regen_token", handlers.RegenToken),
(r"/user/set_home_wifi", handlers.SetHomeWifi),
(r"/user/set_home_location", handlers.SetHomeLocation),
(r"/user/get_base_infomation", handlers.GetBaseInfo),
(r"/user/suggest", handlers.Suggest),
(r"/pet/location", handlers.PetLocation),
(r"/pet/location_test", handlers.PetLocation2),
(r"/pet/walk", handlers.PetWalk),
(r"/pet/find", handlers.PetFind),
(r"/pet/get_pet_type_info", handlers.PetTypeInfo),
(r"/pet/get_pet_info", handlers.GetPetInfo),
(r"/pet/get_pet_status", handlers.GetPetStatusInfo),
(r"/pet/add_pet_info", handlers.AddPetInfo),
(r"/pet/update_pet_info", handlers.UpdatePetInfo),
(r"/pet/healthy/get_activity_info", handlers.GetActivityInfo),
(r"/pet/healthy/get_sleep_info", handlers.GetSleepInfo),
(r"/pet/healthy/summary", handlers.Summary),
(r"/pet/healthy/set_sport_info", handlers.SetTargetStep),
(r"/pet/activity", handlers.PetActivity),
(r"/device/add_device_info", handlers.AddDeviceInfo),
(r"/device/get_info", handlers.GetDeviceInfo),
(r"/device/remove_device_info", handlers.RemoveDeviceInfo),
(r"/device/set_sim_info", handlers.SetSimInfo),
(r"/device/switch_light", handlers.SwitchLight),
(r"/device/get_light_status", handlers.GetDeviceSwitchLightStatus),
(r"/device/send_get_wifi_list_cmd", handlers.SendGetWifiListCmd),
(r"/device/get_wifi_list", handlers.GetWifiList),
(r"/device/reboot_device_cmd", handlers.RebootDeviceCmd),
(r"/user/agree_policy", handlers.AgreePolicy),
(r"/device/get_device_status", handlers.GetPetStatusInfo),
(r"/app/get_config", handlers.AppConfig),
(r"/user/set_outdoor_on_off", handlers.OutdoorOnOff),
(r"/user/set_outdoor_wifi", handlers.SetOutdoorWifi),
],
debug=True,
autoreload=True,
pyloader=pyloader,
user_dao=UserDAO.new(mongo_client, thread_pool),
global_dao=GlobalDAO.new(mongo_client, thread_pool),
auth_dao=AuthDAO.new(mongo_client, thread_pool),
pet_dao=PetDAO.new(mongo_client, thread_pool),
device_dao=NewDeviceDAO.new(mongo_client, thread_pool),
broadcast_rpc = broadcast_rpc,
msg_rpc=msg_rpc,
appconfig=conf, )
class _UserSrvConsole(Console):
def handle_cmd(self, stream, address, cmd):
if len(cmd) == 1 and cmd[0] == "quit":
self.send_response(stream, "Byte!")
return False
elif len(cmd) == 0:
pass
elif len(cmd) == 1 and cmd[0] == "reload-config":
newconf = pyloader.ReloadInst("Config")
webapp.settings["appconfig"] = newconf
webapp.settings["gid_rpc"] = GIDRPC(newconf.gid_rpc_url)
self.send_response(stream, "done")
elif len(cmd) == 1 and cmd[0] == "reload-sysconfig":
webapp.settings["sysconfig"].reload()
self.send_response(stream, "done")
else:
self.send_response(stream, "Invalid command!")
return True
# Init console
console = _UserSrvConsole()
console.bind(options.console_port, "127.0.0.1")
console.start()
# Init async
@gen.coroutine
def _async_init():
SysConfig.new(sys_config.DEFAULT_CATEGORY,mongo_client, thread_pool)
yield SysConfig.current().open()
webapp.settings["gid_rpc"] = GIDRPC(SysConfig.current().get(sys_config.SC_GID_RPC_URL))
try:
worker.register(discover_config.USER_SRV_D, options.port, 0, None)
worker.work()
except Exception, e:
print "worker register error exception:", e
logger.exception(e)
exit(0)
ioloop.IOLoop.current().run_sync(_async_init)
# Run web app loop
webapp.listen(options.port, options.address, xheaders=True)
ioloop.IOLoop.current().start()
| [
"bingodongtian@gmail.com"
] | bingodongtian@gmail.com |
9add2000816be03aa99b735ef6573cea1ec11c2f | d08ce5c9b1fdd200b1d4de757d9d44f9acc0a54b | /Message/Message/urls.py | 2955a86e25323baefa074cb02bca6099cfa6b638 | [] | no_license | smartwange/pygame | 82ae829f1448b877473428aa3b857d55af96588c | 75caf2cd360d4badfe57a6e4ff2a00c06f10d1f3 | refs/heads/master | 2022-05-24T14:13:37.825044 | 2020-05-11T03:48:11 | 2020-05-11T03:48:11 | 197,743,887 | 0 | 0 | null | 2022-04-22T23:18:02 | 2019-07-19T09:24:58 | Python | UTF-8 | Python | false | false | 841 | py | """Message URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from apps.message_form.views import message_form
urlpatterns = [
path('admin/', admin.site.urls),
path('message_form/', message_form),
]
| [
"smartwange@gmail.com"
] | smartwange@gmail.com |
be1abbc44622481b2fe3545cb7f02b36454ae323 | 49aed975589e6cbb5d7ba9f2cc3379f05cd0dc90 | /inception/train.py | 2b47719d68e45695783a6bec57481295de581cc3 | [] | no_license | gyuanfan/Tensorflow-Starter | e9ef53f7bece391f824b50fa27522fc56c09a7f1 | f2afe6309e5fce4e66db46f51181760e93b2c361 | refs/heads/master | 2021-09-01T07:00:21.861536 | 2017-12-25T15:35:43 | 2017-12-25T15:35:43 | 114,824,067 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,322 | py | #!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pkgutil
import numpy as np
import os
import datetime
import cv2
#import picpac
import tensorflow as tf
import tensorflow.contrib.slim as slim
import GS_split_299_resize
import sys
import random
import scipy.ndimage.interpolation
from scipy.ndimage import zoom
size=int(sys.argv[2])
max_epoch=200
batch=16
max_rotate=0
max_shift=60
max_scale=1.2
min_scale=0.9
def shift (img,shift_x,shift_y,the_min):
(x,y)=img.shape
newimg=np.ones((x,y))*the_min
if (shift_x>0):
newimg[shift_x:x,:]=img[0:(x-shift_x),:]
else:
newimg[0:(x+shift_x),:]=img[-shift_x:x,:]
img=newimg
if (shift_y>0):
newimg[:,shift_y:y]=img[:,0:(y-shift_y)]
else:
newimg[:,0:(y+shift_y)]=img[:,-shift_y:y]
return newimg
def scaleImage (image,scale):
[x,y]= image.shape
x1=int(round(x*scale))
y1=int(round(y*scale))
image=cv2.resize(image,(y1,x1))
new=np.zeros((x,y))
if (x1>x):
start=int(round(x1/2-x/2))
end=start+x
new=image[start:end,start:end]
else:
new_start=int(round(x-x1)/2)
new_end=new_start+x1
new[new_start:new_end,new_start:new_end]=image
return new
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('opt','adam', '')
flags.DEFINE_string('mixin', None, 'database')
flags.DEFINE_integer('classes', '2', 'number of classes')
flags.DEFINE_integer('resize', None, '')
flags.DEFINE_integer('max_size', None, '')
flags.DEFINE_integer('channels', 2, '')
flags.DEFINE_string('net', 'inception_v3_mod.inception_v3', 'cnn architecture, e.g. vgg.vgg_a')
flags.DEFINE_float('learning_rate', 0.001, 'Initial learning rate.')
flags.DEFINE_bool('decay', True, '')
flags.DEFINE_float('decay_rate', 0.95, '')
flags.DEFINE_float('decay_steps', 100, '')
flags.DEFINE_integer('test_steps', 1000, 'Number of steps to run evaluation.')
flags.DEFINE_integer('save_steps', 1000, 'Number of steps to run evaluation.')
flags.DEFINE_integer('max_steps', 600000, 'Number of steps to run trainer.')
flags.DEFINE_string('model', 'model', 'Directory to put the training data.')
flags.DEFINE_integer('split', 1, 'split into this number of parts for cross-validation')
flags.DEFINE_integer('split_fold', 1, 'part index for cross-validation')
flags.DEFINE_integer('max_to_keep', 20, '')
# load network architecture by name
def inference (inputs, num_classes):
full = FLAGS.net
# e.g. full == 'tensorflow.contrib.slim.python.slim.nets.vgg.vgg_a'
fs = full.split('.')
loader = pkgutil.find_loader('.'.join(fs[:-1]))
module = loader.load_module('')
net = getattr(module, fs[-1])
logits, _ = net(inputs, num_classes)
#logits = tf.squeeze(logits, [1,2]) # resnet output is (N,1,1,C, remove the
return tf.identity(logits, name='logits')
def fcn_loss (logits, labels):
with tf.name_scope('loss'):
labels = tf.to_int32(labels) # float from picpac
xe = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels, name='xentropy')
hit = tf.cast(tf.nn.in_top_k(logits, labels, 1, name="accuracy"), tf.float32)
return [tf.reduce_mean(xe, name='xentropy_mean'), tf.reduce_mean(hit, name='accuracy_total')]
pass
def training (loss, rate):
#tf.scalar_summary(loss.op.name, loss)
global_step = tf.Variable(0, name='global_step', trainable=False)
if FLAGS.decay:
rate = tf.train.exponential_decay(rate, global_step, FLAGS.decay_steps, FLAGS.decay_rate, staircase=True)
#tf.summary.scalar('learning_rate', rate)
if FLAGS.opt == 'adam':
rate /= 100
optimizer = tf.train.AdamOptimizer(rate)
print('adam!')
else:
optimizer = tf.train.GradientDescentOptimizer(rate)
print('gradient!')
pass
return optimizer.minimize(loss, global_step=global_step)
def run_training (train_input_var,train_label_var,test_input_var,test_label_var,fold):
try:
os.makedirs(FLAGS.model)
except:
pass
if not FLAGS.mixin is None:
config['mixin'] = FLAGS.mixin
config['mixin_group_delta'] = 0
if not FLAGS.max_size is None:
config['max_size'] = FLAGS.max_size
with tf.Graph().as_default():
X = tf.placeholder(tf.float32, shape=(None, None, None, FLAGS.channels), name="images")
Y_ = tf.placeholder(tf.float32, shape=(None,), name="labels")
logits = inference(X, FLAGS.classes)
loss, accuracy = fcn_loss(logits, Y_)
train_op = training(loss, FLAGS.learning_rate)
#summary_op = tf.merge_all_summaries()
#summary_writer = tf.train.SummaryWriter(FLAGS.train_dir, tf.get_default_graph())
init = tf.global_variables_initializer()
#graph_txt = tf.get_default_graph().as_graph_def().SerializeToString()
#with open(os.path.join(FLAGS.train_dir, "graph"), "w") as f:
# f.write(graph_txt)
# pass
saver = tf.train.Saver(max_to_keep=FLAGS.max_to_keep)
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
#run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
#run_metadata = tf.RunMetadata()
with tf.Session(config=config) as sess:
sess.run(init)
total_train_image=0
initial_test_loss=100
for epoch in range(0,max_epoch):
loss_sum = 0
accuracy_sum = 0
batch_sum = 0
count=int(train_input_var.shape[0]/batch)
for i in range(0, count):
tmp_input=[]
tmp_label=[]
jjj=i*batch
while (jjj<(i*batch+batch)):
img=train_input_var[jjj,:,:,:]
#img1=img[:,:,0].reshape(size,size)
#img2=img[:,:,1].reshape(size,size)
img1=img[:,:,0]
img2=img[:,:,1]
### rotate
rrr=random.random()
rrr_rotate=rrr*max_rotate
img1=scipy.ndimage.interpolation.rotate(img1, rrr_rotate,reshape=False)
img2=scipy.ndimage.interpolation.rotate(img2, rrr_rotate,reshape=False)
### scale
rrr=random.random()
rrr_scale=rrr*(max_scale-min_scale)+min_scale
img1=scaleImage(img1,rrr_scale)
img2=scaleImage(img2,rrr_scale)
rrr=(random.random()-0.5)*2
rrr_shift_x=int(np.floor(rrr*max_shift))
rrr=(random.random()-0.5)*2
rrr_shift_y=int(np.floor(rrr*max_shift))
#img1=scipy.ndimage.interpolation.shift(img1, [rrr_shift_x,rrr_shift_y])
#img2=scipy.ndimage.interpolation.shift(img2, [rrr_shift_x,rrr_shift_y])
img1=shift(img1,rrr_shift_x,rrr_shift_y,img1.min())
img2=shift(img2,rrr_shift_x,rrr_shift_y,img2.min())
img=np.stack((img1,img2),axis=2)
tmp_input.append(img)
label=train_label_var[jjj]
tmp_label.append(label)
jjj=jjj+1
total_train_image=total_train_image+1
images=np.asarray(tmp_input)
labels=np.asarray(tmp_label)
#print(images.shape, labels.shape)
feed_dict = {X: images,
Y_: labels}
_, loss_value, accuracy_value, ll = sess.run([train_op, loss, accuracy, logits], feed_dict=feed_dict)
loss_sum += loss_value * batch
accuracy_sum += accuracy_value * batch
batch_sum += batch
if (total_train_image ) % 320 == 0:
print(datetime.datetime.now())
print('i %d: loss = %.4f, accuracy = %.4f' % (i+1, loss_sum/batch_sum, accuracy_sum/batch_sum))
loss_sum = 0
accuracy_sum = 0
batch_sum = 0
if (total_train_image ) % 640 == 0:
batch_sum2 = 0
loss_sum2 = 0
accuracy_sum2 = 0
count_test=int(test_input_var.shape[0]/batch)
for test_i in range(0, count_test):
tmp_input=[]
tmp_label=[]
jjj=test_i*batch
while (jjj<(test_i*batch+batch)):
img=test_input_var[jjj,:,:,:]
img1=img[:,:,0].reshape(size,size)
img2=img[:,:,1].reshape(size,size)
img=np.stack((img1,img2),axis=2)
tmp_input.append(img)
label=test_label_var[jjj]
tmp_label.append(label)
jjj=jjj+1
images=np.asarray(tmp_input)
labels=np.asarray(tmp_label)
feed_dict = {X: images,
Y_: labels}
loss_value, accuracy_value,ll = sess.run([loss, accuracy,logits], feed_dict=feed_dict)
batch_sum2 += batch
loss_sum2 += loss_value * batch
accuracy_sum2 += accuracy_value * batch
print('total numer:%d, step:%d' % (batch_sum2, i+1))
print('evaluation: loss = %.4f, accuracy = %.4f, total_loss=%.4f, total_num=%.4f' % (loss_sum2/batch_sum2, accuracy_sum2/batch_sum2,loss_sum2,batch_sum2))
test_loss=loss_sum2/batch_sum2
if (test_loss<initial_test_loss):
#ckpt_path = '%s/%s' % (FLAGS.model, (epoch+ 1))
ckpt_path = '%s/%s' % (FLAGS.model, (fold))
saver.save(sess, ckpt_path)
initial_test_loss=test_loss
pass
pass
pass
pass
pass
def main (_):
(fold0_input, fold0_label, fold1_input, fold1_label, fold2_input, fold2_label, fold3_input, fold3_label, fold4_input, fold4_label)=GS_split_299_resize.GS_split_299_resize(sys.argv[1],size)
train_input_var=np.vstack((fold0_input,fold1_input,fold2_input,fold3_input))
train_label_var=np.concatenate((fold0_label,fold1_label,fold2_label,fold3_label))
test_input_var=fold4_input
test_label_var=fold4_label
run_training(train_input_var,train_label_var,test_input_var,test_label_var,'fold4')
(fold0_input, fold0_label, fold1_input, fold1_label, fold2_input, fold2_label, fold3_input, fold3_label, fold4_input, fold4_label)=GS_split_299_resize.GS_split_299_resize(sys.argv[1],size)
train_input_var=np.vstack((fold4_input,fold1_input,fold2_input,fold3_input))
train_label_var=np.concatenate((fold4_label,fold1_label,fold2_label,fold3_label))
test_input_var=fold0_input
test_label_var=fold0_label
run_training(train_input_var,train_label_var,test_input_var,test_label_var,'fold0')
(fold0_input, fold0_label, fold1_input, fold1_label, fold2_input, fold2_label, fold3_input, fold3_label, fold4_input, fold4_label)=GS_split_299_resize.GS_split_299_resize(sys.argv[1],size)
train_input_var=np.vstack((fold4_input,fold1_input,fold0_input,fold3_input))
train_label_var=np.concatenate((fold4_label,fold1_label,fold0_label,fold3_label))
test_input_var=fold2_input
test_label_var=fold2_label
run_training(train_input_var,train_label_var,test_input_var,test_label_var,'fold2')
(fold0_input, fold0_label, fold1_input, fold1_label, fold2_input, fold2_label, fold3_input, fold3_label, fold4_input, fold4_label)=GS_split_299_resize.GS_split_299_resize(sys.argv[1],size)
train_input_var=np.vstack((fold4_input,fold1_input,fold0_input,fold2_input))
train_label_var=np.concatenate((fold4_label,fold1_label,fold0_label,fold2_label))
test_input_var=fold3_input
test_label_var=fold3_label
run_training(train_input_var,train_label_var,test_input_var,test_label_var,'fold3')
(fold0_input, fold0_label, fold1_input, fold1_label, fold2_input, fold2_label, fold3_input, fold3_label, fold4_input, fold4_label)=GS_split_299_resize.GS_split_299_resize(sys.argv[1],size)
train_input_var=np.vstack((fold4_input,fold3_input,fold0_input,fold2_input))
train_label_var=np.concatenate((fold4_label,fold3_label,fold0_label,fold2_label))
test_input_var=fold1_input
test_label_var=fold1_label
run_training(train_input_var,train_label_var,test_input_var,test_label_var,'fold1')
if __name__ == '__main__':
tf.app.run()
| [
"yuanfang.guan@gmail.com"
] | yuanfang.guan@gmail.com |
828855bc5a1f6617ef25c47b606649d873810864 | 1d49dcfe7a725ed9c21d5e614b7e61c81aae1c88 | /modules/critics/CentralV.py | 62f3043f15838fbfacbcde6b6d31b5066599a20e | [
"Apache-2.0"
] | permissive | xiaojize/SMAC-1 | c405aa22d30a7f176b4b2a29669ae82ea7f0b3c7 | 7aaf4673b0eecafc4ab25f381eea20fc762af56a | refs/heads/master | 2023-06-30T14:37:44.870652 | 2021-07-23T15:15:49 | 2021-07-23T15:15:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 482 | py | import torch.nn as nn
import torch.nn.functional as F
class CentralV_Critic(nn.Module):
def __init__(self, input_shape, args):
super(CentralV_Critic, self).__init__()
self.args = args
self.fc1 = nn.Linear(input_shape, 128)
self.fc2 = nn.Linear(128, 128)
self.fc3 = nn.Linear(128, 1)
def forward(self, state):
x = F.relu(self.fc1(state))
x = F.relu(self.fc2(x))
q = self.fc3(x)
return q | [
"jk96491@naver.com"
] | jk96491@naver.com |
e4fad6f8b529d255a2b5cc6fdd7905813c09dbcb | c1ae731c7132089d09404292b43c8b7789753d3d | /agent/agent/lib/agent_thread/startstop_service.py | 27e8003346904ad64d2081aacca7ce8ed8e2ab16 | [
"Apache-2.0"
] | permissive | isabella232/cronus-agent | 489c1326cb85f6d2a2163c5807b9a54a1e87c644 | f973ef9fd49d0d454a2b4d5e9f17d88943fda8fc | refs/heads/master | 2023-03-12T11:46:33.659523 | 2015-04-17T22:05:55 | 2015-04-17T22:05:55 | 329,880,667 | 0 | 0 | Apache-2.0 | 2021-02-24T03:56:47 | 2021-01-15T10:30:23 | null | UTF-8 | Python | false | false | 6,001 | py | #pylint: disable=W0703,R0912,R0915,R0904,W0105
'''
Copyright 2014 eBay Software Foundation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
""" Thread to perform creation of a service """
import os
import traceback
from agent.lib.utils import islink
from agent.lib.utils import readlink
from agent.lib.errors import Errors
from agent.lib.errors import AgentException
import logging
from agent.lib.agent_thread.manifest_control import ManifestControl
from agent.lib import manifestutil
LOG = logging.getLogger(__name__)
class StartStopService(ManifestControl):
""" This thread will attempt to restart a service
This means going through each package in ACTIVE manifest
call the shutdown
call start
"""
ACTION_STARTUP = 'Startup'
ACTION_SHUTDOWN = 'Shutdown'
ACTION_RESTART = 'Restart'
ACTION_REBOOT = 'Reboot'
THREAD_NAME = 'service_lifecycle'
def __init__(self, threadMgr, service, action):
""" Constructor """
ManifestControl.__init__(self, threadMgr, service, manifest = None)
self.setName(StartStopService.THREAD_NAME)
self.__action = action
self.__LOG = manifestutil.getServiceLogger(self, logging.getLogger(__name__))
def doRun(self):
""" Main body of the thread """
errorMsg = ""
errorCode = None
failed = False
activeManifest = None
try:
activePath = manifestutil.manifestPath(self._service, 'active')
# make sure that the active path exists and it is a link
# Should we check this again since we already have a check in action controller
if not os.path.exists(activePath) or not islink(activePath):
raise AgentException(error = Errors.ACTIVEMANIFEST_MANIFEST_MISSING, errorMsg = 'No active manifest - cannot restart service')
activeManifest = os.path.basename(readlink(activePath))
if self.__action == StartStopService.ACTION_SHUTDOWN:
self.__shutdownManifest(self._service, activeManifest)
elif self.__action == StartStopService.ACTION_STARTUP:
self.__startupManifest(self._service, activeManifest)
elif self.__action == StartStopService.ACTION_RESTART:
self.__restartManifest(self._service, activeManifest)
elif self.__action == StartStopService.ACTION_REBOOT:
self.__rebootManifest(self._service, activeManifest)
else:
raise AgentException(error = Errors.INVALID_LIFECYCLE_ACTION, errorMsg = 'Invalid life cycle action - %s' % self.__action)
self.__LOG.info('Done: %s service for (%s/%s)' % (self.__action, self._service, activeManifest))
self._updateStatus(progress = 100)
except AgentException as exc:
failed = True
errorMsg = '%s Service - Agent Exception - %s' % (self.__action, exc.getMsg())
errorCode = exc.getCode()
except Exception as exc:
failed = True
errorMsg = '%s Service - Unknown error - (%s/%s) - %s - %s' \
% (self.__action, self._service, self._manifest, str(exc), traceback.format_exc(5))
errorCode = Errors.UNKNOWN_ERROR
finally:
if failed:
self.__LOG.error(errorMsg)
if not self._skipCleanupOnFailure() and self.__action != StartStopService.ACTION_SHUTDOWN and self._service and activeManifest:
try:
self.__LOG.info('%s Service %s failed, shutdown to cleanup' % (self.__action, self._service))
self.__shutdownManifest(self._service, activeManifest)
except BaseException as excep:
self.__LOG.error('Cleanup failed - %s' % str(excep))
self._updateStatus(httpStatus = 500, error = errorCode, errorMsg = errorMsg)
def __shutdownManifest(self, service, manifest):
""" shutdown a manifest. This means calling shutdown script on manifest packages
@param service - service of manifest to deactivate
"""
self.__LOG.info("Shutdown active Manifest %s" % (service))
self._execPackages('shutdown', service, manifest, 50, 90, activateFlow = False)
def __startupManifest(self, service, manifest):
""" startup a manifest. This means calling startup script on manifest packages
@param service - service of manifest to activate
"""
self.__LOG.info("Startup active Manifest %s" % (service))
self._execPackages('startup', service, manifest, 50, 90, activateFlow = False)
def __restartManifest(self, service, manifest):
""" restart a manifest. This means calling startup/lcm script on manifest packages
@param service - service of manifest to activate
"""
self.__LOG.info("restart active Manifest %s" % (service))
self._execPackages('restart', service, manifest, 10, 90, activateFlow = False)
def __rebootManifest(self, service, manifest):
""" reboot a manifest. This means calling startup/lcm script on manifest packages
@param service - service of manifest to activate
"""
self.__LOG.info("reboot active Manifest %s" % (service))
self._execPackages('reboot', service, manifest, 10, 90, activateFlow = False)
| [
"ypei@ebay.com"
] | ypei@ebay.com |
0762f01953113d7ff375bd9c7855d4a6bdac7408 | cbb40a3e417a22819e647b1fc0b418d2e5fec493 | /__about__.py | 64563cb77aa3e870546cb80a7038486620d64d65 | [
"MIT"
] | permissive | LowellObservatory/pwrusb | 40d8d0e688dc034c8a4e2bbf54bd30c16734c866 | fba72da5807a5a7f50a46a71628f5a54fe787582 | refs/heads/master | 2020-03-27T04:07:31.456442 | 2016-04-11T19:20:01 | 2016-04-11T19:20:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 675 | py |
__all__ = [
"__title__", "__summary__", "__uri__", "__version__", "__author__",
"__email__", "__license__", "__copyright__",
]
__title__ = "pwrusb"
__summary__ = "A python wrapper for controlling pwrusb.com power strip outlets."
__uri__ = "https://github.com/henryroe/pwrusb"
# VERSION should be PEP386 compatible (http://www.python.org/dev/peps/pep-0386)
# pre-release of a version is, e.g. 0.2.1.dev1 (0.2.1 is *newer* than 0.2.1.dev1)
# post-release of a version is, e.g. 0.2.1-1 (0.2.1 is *older* than 0.2.1-1)
__version__ = "1.0.3"
__author__ = "Henry Roe"
__email__ = "hroe@hroe.me"
__license__ = "MIT License"
__copyright__ = "2016 %s" % __author__
| [
"hroe@hroe.me"
] | hroe@hroe.me |
41d503f2fd5dfb36331bba7d119e640f0115f4a9 | e0e86206df49708df564c44cd2e05a97fddea865 | /weather.py | 170749be0b480c3caa660ead7471328f906e64b3 | [] | no_license | Emory-ADS-Group/weather-api | 3bc67d5bde7216f7f6ddf820196cc132768fdf90 | 91abe4efa7edd814c829b41eb4e7a79c7199505d | refs/heads/master | 2021-01-10T17:16:10.056280 | 2016-02-21T20:27:45 | 2016-02-21T20:27:45 | 52,226,303 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,523 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Feb 13 19:17:52 2016
@author: Mandar
"""
from urllib.request import urlopen
import json
import csv
Pincode = ['30322','30329','92799']
print(Pincode[0])
URL_Pre = 'https://api.weathersource.com/v1/75fdebce9e0417074545/''history_by_postal_code.json?period=day&postal_code_eq='
URL_Post = '&country_eq=US×tamp_between=2012-02-10T00:00:00-05:00,2012-02-12T00:00:00-05:00&fields=postal_code,country,timestamp,tempMax,tempAvg,tempMin,precip,snowfall,windSpdMax,windSpdAvg,windSpdMin,cldCvrMax,cldCvrAvg,cldCvrMin,dewPtMax,dewPtAvg,dewPtMin,feelsLikeMax,feelsLikeAvg,feelsLikeMin,relHumMax,relHumAvg,relHumMin,sfcPresMax,sfcPresAvg,sfcPresMin,spcHumMax,spcHumAvg,spcHumMin,wetBulbMax,wetBulbAvg,wetBulbMin'
URL_Fetch = ''
pin = ''
#URL_Fetch = 'https://api.weathersource.com/v1/75fdebce9e0417074545/''history_by_postal_code.json?period=day&postal_code_eq='+PinCode[0]+'&country_eq=US×tamp_between=2012-02-10T00:00:00-05:00,2012-02-15T00:00:00-05:00&fields=postal_code,country,timestamp,tempMax,tempAvg,tempMin,precip,snowfall,windSpdMax,windSpdAvg,windSpdMin,cldCvrMax,cldCvrAvg,cldCvrMin,dewPtMax,dewPtAvg,dewPtMin,feelsLikeMax,feelsLikeAvg,feelsLikeMin,relHumMax,relHumAvg,relHumMin,sfcPresMax,sfcPresAvg,sfcPresMin,spcHumMax,spcHumAvg,spcHumMin,wetBulbMax,wetBulbAvg,wetBulbMin'
weather_data = open('WeatherData.csv', 'a',newline = '')
csvwriter = csv.writer(weather_data)
count = 0
for i in Pincode:
URL_Fetch = URL_Pre + i + URL_Post
print(URL_Fetch)
response=urlopen(URL_Fetch).read().decode('utf-8')
with open('outputfile.json', 'a') as outf:
{ outf.write(response)}
json_parsed = json.loads(response)
for wh in json_parsed:
if count == 0:
header = wh.keys()
csvwriter.writerow(header)
count += 1
csvwriter.writerow(wh.values())
weather_data.close()
''' with open('outputfile.json') as json_file:
json_parsed = json.load(json_file)
print(json_parsed)
weather_data = open('WeatherData.csv', 'a',newline = '')
csvwriter = csv.writer(weather_data)
count = 0
for wh in json_parsed:
if count == 0:
header = wh.keys()
csvwriter.writerow(header)
count += 1
csvwriter.writerow(wh.values())
weather_data.close()
'''
| [
"mandarpra@gmail.com"
] | mandarpra@gmail.com |
6cc03fb54250c0b2f6556012d2bf83b75474b3f2 | 9d278285f2bc899ac93ec887b1c31880ed39bf56 | /ondoc/cart/migrations/0006_merge_20190326_1307.py | 67a383ce2f25bfc403a88150e01f3f911f341528 | [] | no_license | ronit29/docprime | 945c21f8787387b99e4916cb3ba1618bc2a85034 | 60d4caf6c52a8b70174a1f654bc792d825ba1054 | refs/heads/master | 2023-04-01T14:54:10.811765 | 2020-04-07T18:57:34 | 2020-04-07T18:57:34 | 353,953,576 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 267 | py | # Generated by Django 2.0.5 on 2019-03-26 07:37
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('cart', '0005_auto_20190315_1612'),
('cart', '0004_auto_20190318_1424'),
]
operations = [
]
| [
"sonamsinha@policybazaar.com"
] | sonamsinha@policybazaar.com |
aad4ce1ab32ecf2b0ac846d47ffe05836b87eeeb | 8c9d54d05a4c6211b784185db75f1ec600cca2ce | /decode_json.py | ee12dcf9774c5bc467a027cc8cb165f2e03d3771 | [] | no_license | cardi/retrofuture-client-tools | 1011bea4eaf3062cab157df6f6274fb89d2cafbf | c1acf651322d496c8e87e03062b51c8c570de29b | refs/heads/master | 2021-01-25T07:54:26.807994 | 2017-06-28T01:21:47 | 2017-06-28T01:21:47 | 93,679,063 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 452 | py | #!/usr/bin/env python
# decode_json.py: reads data from stdin, outputs "results" to stdout decoded payload
import json
import sys
import base64
from pprint import pprint
jdata = sys.stdin.read()
# sometimes we have http headers before json
jdata = jdata[ jdata.index('{') : ]
# kind of annoying if we didn't format json properly
data = json.loads(jdata.replace("\'", '"'))
data['results'] = data['results'].decode('base64')
print data['results']
| [
"calvin@isi.edu"
] | calvin@isi.edu |
bbc9346e361617f40137e996c9caee2f66e94355 | 032a0c939d96d0e5307dbce86e11faf7060f4ed9 | /lte/gateway/python/magma/pipelined/tests/test_ipv6_prefix_mapper.py | d33410b7b423133760753874c76ffd7d50ae75a6 | [
"BSD-3-Clause"
] | permissive | radha0018/magma | cac9ff3491dd2661e5dc0aa1f9a304a5428e2d2a | 8436966a4bb3cf7fdc3f567704062b6f9568db25 | refs/heads/master | 2023-05-05T08:26:07.132969 | 2021-05-27T18:44:44 | 2021-05-27T18:44:44 | 371,097,174 | 0 | 2 | NOASSERTION | 2021-05-26T16:26:21 | 2021-05-26T16:15:53 | Go | UTF-8 | Python | false | false | 2,280 | py | """
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from magma.pipelined.ipv6_prefix_store import (
InterfaceIDToPrefixMapper,
get_ipv6_interface_id,
get_ipv6_prefix,
)
class InterfaceMappersTest(unittest.TestCase):
def setUp(self):
self._interface_to_prefix_mapper = InterfaceIDToPrefixMapper()
self._interface_to_prefix_mapper._prefix_by_interface = {}
def test_prefix_mapper_test(self):
ipv6_addrs = ['ba10:5:6c:9:9d21:4407:d337:1928',
'321b:534:6c:9:999:0:d337:1928',
'222b:5334:111c:111::d337:1928']
prefixes = [get_ipv6_prefix(ipv6_addrs[0]),
get_ipv6_prefix(ipv6_addrs[1])]
interfaces = [get_ipv6_interface_id(ipv6_addrs[0]),
get_ipv6_interface_id(ipv6_addrs[1]),
get_ipv6_interface_id(ipv6_addrs[2])]
self._interface_to_prefix_mapper.save_prefix(
interfaces[0], prefixes[0])
self.assertEqual(
self._interface_to_prefix_mapper.get_prefix(
interfaces[0]),
'ba10:5:6c:9::')
self._interface_to_prefix_mapper.save_prefix(
interfaces[1], prefixes[1])
self.assertEqual(interfaces[1], '::999:0:d337:1928')
self.assertEqual(
self._interface_to_prefix_mapper.get_prefix(
interfaces[1]),
prefixes[1])
self._interface_to_prefix_mapper.save_prefix(
interfaces[0], prefixes[1])
self.assertEqual(
self._interface_to_prefix_mapper.get_prefix(
interfaces[0]),
'321b:534:6c:9::')
self.assertEqual(
self._interface_to_prefix_mapper.get_prefix(
interfaces[2]),
None)
if __name__ == "__main__":
unittest.main()
| [
"noreply@github.com"
] | radha0018.noreply@github.com |
4ee6a8bdca790223ff50fd6e205515c6e1e5c9bf | 3265c15c9dfb588f204414db5336cc75819c9ae9 | /Code PIE/calculetrajevitementtronquee.py | a35ce851f6d64b0847155fd694d2786dde5fc877 | [] | no_license | QuentinTeix/PIE-Voiture-Autonome | e424f5d675a490ca690164a6e7c1a234dbe9047a | 17fd5542513c3f17adc33ce8fa59a59bff128e5d | refs/heads/main | 2023-08-13T08:44:21.791551 | 2021-10-08T09:21:34 | 2021-10-08T09:21:34 | 316,242,683 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,876 | py | from paramètres import *
from intersectionarc import *
from calculrayoncourbure import *
from intersection import *
from math import *
def calculetrajevitementtronquee(v,p,sgyp):
#calcule la trajectoire approchée par ligne brisée de la voiture
#revoie 0 si cette traj ne coupe pas la droite OP et trajectoire sous fourme de liste de segments
#renvoie 1 si cette traj coupe la droite OP et trajectoire tronquée (on garde que les segments avant celui qui coupe OP) sous fourme de liste de segments
[positioninit1,positioninit2,orientationinit1,orientationinit2,vinit1,vinit2,deltat,amaxlat,epsilonmax,amax,amin,tsb,l,larg,vmax,N,rv,m,alpha,lanti]=params()
Rminamaxlat=v**2/amaxlat
Rminepsilonmax=tsb*v**2/(epsilonmax*pi/180)+l/(epsilonmax*pi/180)
Rmin=max(Rminamaxlat,Rminepsilonmax)
Rmax=abs(calculrayoncourbure(p))/4
xp=p[0]
yp=p[1]
T=3 #horizont de prévision en secondes
nb=10 #nb de pts que comporte la trajectoire (plus on en a plus c'est précis)
deltaT=T/nb #incréments des temps entre deux pts de la trajectoire
ptstrajectoire=[] #contient les pts de la traj aux instants i*deltaT
#initialiation
vprim=max(v + amin*deltaT,1) #on veut pas de cas ou la vitesse devient trop basse ou mm négative on la règle à 1m/s au min de manière arbitraire
Rminamaxlatprim=vprim**2/amaxlat
Rminepsilonmaxprim=tsb*vprim**2/(epsilonmax*pi/180)+l/(epsilonmax*pi/180)
Rminprim=max(Rminamaxlatprim,Rminepsilonmaxprim)
theta=0
thetaparc=(vprim/Rminprim)*deltaT*sgyp
thetaprim=theta+thetaparc
yprimreft=sgyp*Rminprim*(1-cos(thetaparc)) #ordonnée du pt de traj à t+dt dans le ref de la voiture à t
xprimreft=(Rminprim-yprimreft)*sin(thetaparc) #abscisse du pt de traj à t+dt dans le ref de la voiture à t
rprim=sqrt(xprimreft**2+yprimreft**2)
x=0
y=0
xprim=x+rprim*cos(thetaprim)
yprim=x+rprim*sin(thetaprim)
ptstrajectoire.append([x,y])
ptstrajectoire.append([xprim,yprim])
#hérédité
i=1
while i<nb:
x,y=xprim,yprim
v=vprim
theta=thetaprim
vprim=max(v + amin*deltaT,1)
Rminamaxlatprim=vprim**2/amaxlat
Rminepsilonmaxprim=tsb*vprim**2/(epsilonmax*pi/180)+l/(epsilonmax*pi/180)
Rminprim=max(Rminamaxlatprim,Rminepsilonmaxprim)
thetaparc=(vprim/Rminprim)*deltaT*sgyp
thetaprim=theta+thetaparc
yprimreft=sgyp*Rminprim*(1-cos(thetaparc)) #ordonnée du pt de traj à t+dt dans le ref de la voiture à t
xprimreft=(Rminprim-yprimreft)*sin(thetaparc) #abscisse du pt de traj à t+dt dans le ref de la voiture à t
rprim=sqrt(xprimreft**2+yprimreft**2)
xprim=x+rprim*cos(thetaprim)
yprim=y+rprim*sin(thetaprim)
ptstrajectoire.append([xprim,yprim])
#print('vprim',vprim)
i+=1
#on à maintenant les pts constitutifs de la trajectoire, on approxime la trajectoire à la ligne brisée obtenue en liant les points sur la trajectoire par des segments
#on va chercher à savoir quand cette trajectoire nous ramène sur la ligne droite qui relie le pt ou l'on se trouve actuellement (0,0) au point cible p
trajectoire=[]
i=0
while i<(len(ptstrajectoire)-1):
trajectoire.append([ptstrajectoire[i][0],ptstrajectoire[i][1],ptstrajectoire[i+1][0],ptstrajectoire[i+1][1]])
i=i+1
xp=p[0]
yp=p[1]
j=0
while j<len(trajectoire) and intersection(p,trajectoire[j])!=1:
j+=1
if j==len(trajectoire): #alors aucun segment de la traj ne coupe OP
return(0,trajectoire)
return(1,trajectoire[:(j+2)])
| [
"noreply@github.com"
] | QuentinTeix.noreply@github.com |
b283a0051efe62913bc5c309405a700df0dd9373 | 578b9ff976429bf97bc944e457465f5e98f7973a | /ChuVietTay.py | 0bebe74bfaa9132a3e09b25c64b44d7526cd4013 | [] | no_license | hoangliem98/ChuVietTay | e224b2bdf74eac91d4cf4996c470142e8a4ff3f4 | 36a1864569f49d99d78e32818161e66f617d5872 | refs/heads/master | 2023-01-30T14:30:56.793735 | 2020-12-17T08:36:36 | 2020-12-17T08:36:36 | 322,231,398 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 791 | py | import cv2 as cv
import numpy as np
from matplotlib import pyplot as plt
img = cv.imread('D:\LN\Python\Projects\digits.png', 0);
#lấy chữ bằng cách cắt từ ảnh dùng để nhận diện, có thể thêm 1 ảnh chứa só khác để nhận diện
chuviet = [np.hsplit(row, 100) for row in np.vsplit(img, 50)]
x = np.array(chuviet)
trainData = x[:,:50].reshape(-1,400).astype(np.float32);
test = x[:,50:100].reshape(-1,400).astype(np.float32);
k = np.arange(10);
train_labels = np.repeat(k, 250)[:,np.newaxis];
knn = cv.ml.KNearest_create()
knn.train(trainData, cv.ml.ROW_SAMPLE, train_labels)
kq1, kq2, kq3, kq4 = knn.findNearest(test, 5)
print (kq2)
#print (train)
#cv.imwrite('c1c.jpg', train);
#cv.imwrite('c2c.jpg', trains);
#cv.waitKey(0)
#cv.destroyAllWindows()
| [
"1651012093liem@ou.edu.vn"
] | 1651012093liem@ou.edu.vn |
17261e523784bf2f34722edf40c070552af6fe36 | 3e77a86429ba0f6968f709e77e204cdfe920a041 | /python/python/src/python_problems/webAutomationTestScripts/testSuites/glbRestTests/getMasterBillingAccount.py | 5169d96ece8bece21ea979551aafa018b45765e0 | [] | no_license | ramyamango123/test | a2d9bb6cafe8d7406b76eba526ddded2acf2a3b2 | 85420c9406109a72e1b1d455ea29a5cae9def5a3 | refs/heads/master | 2020-06-07T23:34:08.344051 | 2014-03-12T05:19:05 | 2014-03-12T05:19:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,701 | py | #Get Master Plan Billing Account
#Includes both positive and negative test cases.
#Created by Tarja Rechsteiner on 12.01.09.
import sys
from testSuiteBase import TestSuiteBase
from selenium import selenium
import time
CLIENTIPADDRESS = '127.0.0.1'
FIRSTNAME = 'Tester'
LASTNAME = 'Dummy'
ADDRESS1 = '123 Fake Street'
CITY = 'San Mateo'
STATE = 'CA'
COUNTRY = 'US'
ZIPCODE = '94403'
PHONENUMBER = '555-555-5555'
CREDITCARDNO='378282246310005'
SECURENO='123'
CCMONTH='10'
CCYEAR='2011'
GAMEURL='http://gazillion.com'
PLANID='10003936'
class GetMasterBillingAccount(TestSuiteBase):
def setUp(self):
self.toolBox = self.getGlbToolbox()
self.selenium = selenium("localhost", 4444, "*firefox", "https://stage.ariasystems.net/webclients/dreamworksPay/Handler.php")
self.selenium.start()
self.selenium.window_maximize()
def tearDown(self):
self.selenium.close()
self.selenium.stop()
def test_validInfo(self):
'''Valid information -- TC1'''
userid, billingId = self.validAccountCreation()
result = self.toolBox.getMasterBillingAccount(userid)
self.assertTrue(result.httpStatus() == 200,\
"http status code: " + str(result.httpStatus()))
#structure check
self.assertTrue('masterAccount' in result, "No masterAccount found")
self.assertTrue('accountId' in result['masterAccount'], "No accountId found")
self.assertFalse('errors' in result, "Errors in success XML")
#values check
self.assertEqual(billingId, result['masterAccount']['accountId'], "values don't match")
self.toolBox.scriptOutput("getMasterBillingAccount valid info account", {"userid": userid, "billingId": billingId})
def test_validParentInfo(self):
'''Valid parent information -- TC2'''
userid, billingId = self.validAccountCreationParent()
result = self.toolBox.getMasterBillingAccount(userid)
self.assertTrue(result.httpStatus() == 200,\
"http status code: " + str(result.httpStatus()))
#structure check
self.assertTrue('masterAccount' in result, "No masterAccount found")
self.assertTrue('accountId' in result['masterAccount'], "No accountId found")
self.assertFalse('errors' in result, "Errors in success XML")
#values check
self.assertEqual(billingId, result['masterAccount']['accountId'], "values don't match")
self.toolBox.scriptOutput("getMasterBillingAccount valid parent account", {"userid": userid, "billingId": billingId})
def test_validPaypalInfo(self):
'''Valid Paypal information -- TC3'''
#Failing this testcase since Paypal flow is still inactive
self.fail()
userid, billingId = self.validPaypalAccountCreation()
result = self.toolBox.getMasterBillingAccount(userid)
self.assertTrue(result.httpStatus() == 200,\
"http status code: " + str(result.httpStatus()))
#structure check
self.assertTrue('masterAccount' in result, "No masterAccount found")
self.assertTrue('accountId' in result['masterAccount'], "No accountId found")
self.assertFalse('errors' in result, "Errors in success XML")
#values check
self.assertEqual(billingId, result['masterAccount']['accountId'], "values don't match")
self.toolBox.scriptOutput("getMasterBillingAccount valid paypal account", {"userid": userid, "billingId": billingId})
def test_validParentPaypalInfo(self):
'''Valid Paypal parent information -- TC4'''
#Failing this testcase since Paypal flow is still inactive
self.fail()
userid, billingId = self.validPaypalAccountCreationParent()
result = self.toolBox.getMasterBillingAccount(userid)
self.assertTrue(result.httpStatus() == 200,\
"http status code: " + str(result.httpStatus()))
#structure check
self.assertTrue('masterAccount' in result, "No masterAccount found")
self.assertTrue('accountId' in result['masterAccount'], "No accountId found")
self.assertFalse('errors' in result, "Errors in success XML")
#values check
self.assertEqual(billingId, result['masterAccount']['accountId'], "values don't match")
self.toolBox.scriptOutput("getMasterBillingAccount valid parent paypal account", {"userid": userid, "billingId": billingId})
def test_validInfoNoBilling(self):
'''Valid information with no billing account attached -- TC5'''
_, result = self.toolBox.registerNewUsername()
self.assertTrue('user' in result, "XML from register does not contain user")
userid = result['user']['id']
result = self.toolBox.getMasterBillingAccount(userid)
self.assertTrue(result.httpStatus() == 499,\
"http status code: " + str(result.httpStatus()))
self.failureCheck(result, ['No billing account exists for this user', '16032'])
self.infoFailCheck(result, userid)
self.toolBox.scriptOutput("getMasterBillingAccount no billing account", {"userid": userid})
def test_unvalidatedInfo(self):
'''Unvalidated CC user -- TC6'''
userid, billingId = self.invalidAccountCreation()
result = self.toolBox.getMasterBillingAccount(userid)
self.assertTrue(result.httpStatus() == 499,\
"http status code: " + str(result.httpStatus()))
self.failureCheck(result, ['No billing account exists for this user', '16032'])
self.infoFailCheck(result, userid)
self.toolBox.scriptOutput("getMasterBillingAccount invalid CC account", {"userid": userid, "billingId": billingId})
def test_unvalidatedPaypalInfo(self):
'''Unvalidated paypal user -- TC7'''
userid, billingId = self.invalidPaypalAccountCreation()
result = self.toolBox.getMasterBillingAccount(userid)
self.assertTrue(result.httpStatus() == 499,\
"http status code: " + str(result.httpStatus()))
self.failureCheck(result, ['No billing account exists for this user', '16032'])
self.infoFailCheck(result, userid)
self.toolBox.scriptOutput("getMasterBillingAccount invalid paypal account", {"userid": userid, "billingId": billingId})
def test_missingParams(self):
'''Missing information -- TC8'''
result = self.toolBox.blankGet('getMasterBillingAccount')
self.assertTrue(result.httpStatus() == 400,\
"http status code: " + str(result.httpStatus()))
self.failureCheck(result, ['Not enough parameters to satisfy request', '4000'])
def test_unexpectedValues(self):
'''Empty values -- TC9'''
result = self.toolBox.getMasterBillingAccount('')
self.assertTrue(result.httpStatus() == 499,\
"http status code: " + str(result.httpStatus()))
self.failureCheck(result, ['Parameter values are empty for the request', '4003'])
self.infoFailCheck(result, '')
def test_invalidInfo(self):
'''Invalid account id -- TC10'''
result = self.toolBox.getMasterBillingAccount('00000000000000000')
self.assertTrue(result.httpStatus() == 499,\
"http status code: " + str(result.httpStatus()))
self.failureCheck(result, ['Id does not match any records', '17000'])
self.infoFailCheck(result, '00000000000000000')
def test_invalidTitleCode(self):
'''Invalid title code -- TC11'''
userid, billingId = self.validAccountCreation()
self.toolBox.setTitleCodeParam('somejunk')
result = self.toolBox.getMasterBillingAccount(userid)
self.assertTrue(result.httpStatus() == 499,\
"http status code: " + str(result.httpStatus()))
self.failureCheck(result, ["Title code does not match any records", '17002'])
self.infoFailCheck(result, userid, 'somejunk')
self.toolBox.setTitleCodeParam('KFPW')
def test_emptyTitleCode(self):
'''Blank Title Code -- TC12'''
userid, billingId = self.validAccountCreation()
self.toolBox.setTitleCodeParam('')
result = self.toolBox.getMasterBillingAccount(userid)
self.assertTrue(result.httpStatus() == 499,\
"http status code: " + str(result.httpStatus()))
self.failureCheck(result, ["Parameter values are empty for the request", '4003'])
self.infoFailCheck(result, userid, '')
self.toolBox.setTitleCodeParam('KFPW')
def test_missingTitleCode(self):
'''No Title Code -- TC13'''
userid, billingId = self.validAccountCreation()
self.toolBox.setTitleCodeParam(None)
result = self.toolBox.getMasterBillingAccount(userid)
self.assertTrue(result.httpStatus() == 400,\
"http status code: " + str(result.httpStatus()))
self.failureCheck(result, ['Not enough parameters to satisfy request', '4000'])
self.infoFailCheck(result, userid, None)
self.toolBox.setTitleCodeParam('KFPW')
# Helper Methods #
def validAccountCreation(self):
'''Registers an account for the valid info test'''
username, result = self.toolBox.registerNewUsername()
self.assertTrue('user' in result, "XML from register does not contain user")
gameAcctId = self.toolBox.getGameIdFromUser(username)
id = result['user']['id']
billingType = '1'
result = self.toolBox.createBillingAcct(id,gameAcctId,billingType,CLIENTIPADDRESS,PLANID,firstName=FIRSTNAME,lastName=LASTNAME,
address1=ADDRESS1,city=CITY,state=STATE,country=COUNTRY,zipCode=ZIPCODE,gameUrl=GAMEURL)
self.assertTrue('account' in result, result)
sessionId = result['account']['inSessionID']
flowId = result['account']['flowID']
self.ariaHostedPage(sessionId, flowId)
return id, result['account']['accountId']
def validAccountCreationParent(self):
'''Registers an account for the valid parent info test'''
username, result = self.toolBox.registerNewParent()
self.assertTrue('user' in result, "XML from register does not contain user")
gameAcctId = self.toolBox.getGameIdFromUser(username)
id = result['user']['id']
billingType = '1'
result = self.toolBox.createBillingAcct(id,gameAcctId,billingType,CLIENTIPADDRESS,PLANID,firstName=FIRSTNAME,lastName=LASTNAME,
address1=ADDRESS1,city=CITY,state=STATE,country=COUNTRY,zipCode=ZIPCODE,gameUrl=GAMEURL)
self.assertTrue('account' in result, result)
sessionId = result['account']['inSessionID']
flowId = result['account']['flowID']
self.ariaHostedPage(sessionId, flowId)
return id, result['account']['accountId']
def validPaypalAccountCreation(self):
'''Registers a paypal account for the valid info test'''
username, result = self.toolBox.registerNewUsername()
self.assertTrue('user' in result, "XML from register does not contain user")
gameAcctId = self.toolBox.getGameIdFromUser(username)
id = result['user']['id']
billingType = '11'
result = self.toolBox.createBillingAcct(id,gameAcctId,billingType,CLIENTIPADDRESS,PLANID)
self.assertTrue('account' in result, result)
masterBillingAcctId = result['account']['accountId']
paypalResult = self.toolBox.startPaypalPlan(masterBillingAcctId)
paypalToken = paypalResult['paypal']['paypalToken']
paypalURL = paypalResult['paypal']['returnUrl']
URL = paypalURL + paypalToken
self.acceptPaypalAgreementUsingSelenium(URL)
paypalResult2 = self.toolBox.finishPaypalPlan(masterBillingAcctId, paypalToken)
return id, masterBillingAcctId
def validPaypalAccountCreationParent(self):
'''Registers a paypal account for the valid parent info test'''
username, result = self.toolBox.registerNewParent()
self.assertTrue('user' in result, "XML from register does not contain user")
gameAcctId = self.toolBox.getGameIdFromUser(username)
id = result['user']['id']
billingType = '11'
result = self.toolBox.createBillingAcct(id,gameAcctId,billingType,CLIENTIPADDRESS,PLANID)
self.assertTrue('account' in result, result)
masterBillingAcctId = result['account']['accountId']
paypalResult = self.toolBox.startPaypalPlan(masterBillingAcctId)
paypalToken = paypalResult['paypal']['paypalToken']
paypalURL = paypalResult['paypal']['returnUrl']
URL = paypalURL + paypalToken
self.acceptPaypalAgreementUsingSelenium(URL)
paypalResult2 = self.toolBox.finishPaypalPlan(masterBillingAcctId, paypalToken)
return id, masterBillingAcctId
def invalidAccountCreation(self):
'''Registers an invalid account for the valid info test'''
username, result = self.toolBox.registerNewUsername()
self.assertTrue('user' in result, "XML from register does not contain user")
gameAcctId = self.toolBox.getGameIdFromUser(username)
id = result['user']['id']
billingType = '1'
result = self.toolBox.createBillingAcct(id,gameAcctId,billingType,CLIENTIPADDRESS,PLANID,firstName=FIRSTNAME,lastName=LASTNAME,
address1=ADDRESS1,city=CITY,state=STATE,country=COUNTRY,zipCode=ZIPCODE,gameUrl=GAMEURL)
self.assertTrue('account' in result, result)
return id, result['account']['accountId']
def invalidPaypalAccountCreation(self):
'''Registers an invalid paypal account for the valid info test'''
username, result = self.toolBox.registerNewUsername()
self.assertTrue('user' in result, "XML from register does not contain user")
gameAcctId = self.toolBox.getGameIdFromUser(username)
id = result['user']['id']
billingType = '11'
result = self.toolBox.createBillingAcct(id,gameAcctId,billingType,CLIENTIPADDRESS,PLANID)
self.assertTrue('account' in result, result)
masterBillingAcctId = result['account']['accountId']
return id, masterBillingAcctId
def ariaHostedPage(self, sessionId, flowId):
'''Entering credit card information through selenium'''
sel = self.selenium
sel.open(r"file://///hq-fs01/dept/Dev/QA/Web/KungFuPandaWorld/Web_Services/DB/Web%20Services%20Test.html")
sel.select("wsUrl", "label=" + str(self.toolBox.webHost))
sel.click("//input[@value='set environment']")
sel.wait_for_page_to_load("30000")
sel.is_text_present("Current Environment: " + str(self.toolBox.webHost))
sel.type("ahp_inSessionID", sessionId)
sel.type("ahp_flowID", flowId)
sel.click("ahp_submit")
sel.wait_for_page_to_load("30000")
time.sleep(2)
sel.type("cc_number", "4111111111111111")
sel.click("cc_expire_mm")
sel.select("cc_expire_mm", "label=January")
sel.click("//option[@value='1']")
sel.click("cc_expire_yyyy")
sel.select("cc_expire_yyyy", "label=2012")
sel.click("//option[@value='2012']")
sel.click("cvv")
sel.type("cvv", "123")
sel.click("submitButton")
sel.wait_for_page_to_load("30000")
def acceptPaypalAgreementUsingSelenium(self, URL):
'''Entering paypal information through selenium'''
sel = self.selenium
sel.open("https://www.sandbox.paypal.com/cgi-bin/webscr?cmd=_express-checkout&token=/")
sel.click("link=PayPal Sandbox")
sel.wait_for_page_to_load("30000")
#login to paypal
sel.type("login_email", "sharmila.janardhanan@slipg8.com")
sel.type("login_password", "password")
sel.click("submit")
sel.wait_for_page_to_load("30000")
time.sleep(6)
sel.open(URL)
sel.wait_for_page_to_load("30000")
time.sleep(2)
#login to sandbox test account
sel.type("login_email", "sharmi_1263862208_per@slipg8.com")
sel.type("login_password", "gazillion")
sel.click("login.x")
sel.wait_for_page_to_load("30000")
sel.click("continue")
sel.wait_for_page_to_load("30000")
self.assertEqual("Paypal Callback", sel.get_title())
time.sleep(1)
def failureCheck(self, result, expected) :
'''Determines whether there are multiple error messages or not and calls appropriate helper method'''
#checking for XML structure
self.assertFalse('user' in result, "XML structure returned success XML")
self.assertTrue('errors' in result, "XML structure failed, no errors")
self.assertTrue('error' in result['errors'], "XML structure failed, no error")
self.assertTrue('code' in result['errors']['error'], "XML structure failed, no code")
self.assertTrue('message' in result['errors']['error'], "XML structure failed, no message")
self.assertTrue('parameters' in result['errors']['error'], "XML structure failed, parameters")
self.assertFalse('masterAccount' in result, "XML structure failed, masterAccount present")
# Checks for messages
self.assertEqual(result['errors']['error']['message'], expected[0], "Expected error message not found. Found: " + str(result['errors']['error']['message']) + " " + expected[0])
self.assertEqual(result['errors']['error']['code'], expected[1], "Expected error code not found. Found: " + str(result['errors']['error']['code']))
def infoFailCheck(self, result, userId, titleCode='KFPW') :
'''Checks that the information passed is equal to the information given for one error message'''
parameters = self.toolBox.httpParamToDict(result['errors']['error']['parameters'])
self.assertTrue(len(parameters) != 0, "Parameters string did not resolve to pairs" + str(result))
self.assertTrue(parameters['accountId'] == userId, "UserId returned not equal to userId given: " + userId + " " + str(parameters))
self.assertTrue(parameters['service'] == "getMasterBillingAccount", "Service returned not equal to service called: getMasterBillingAccount" + str(parameters))
if titleCode == None :
self.assertFalse('titleCode' in parameters, "titleCode not passed, but included in return XML: " + str(parameters))
else :
self.assertTrue(parameters['titleCode'] == titleCode, "Title code returned not equal to title code called: " + titleCode + " " + str(parameters)) | [
"ramya@Ramyas-MacBook-Air.local"
] | ramya@Ramyas-MacBook-Air.local |
5d7db1fb83ce424b4d6fd59de99f2307f69a15fb | a4843f673692e4c703d09b4a625ecf14d0c238ec | /bin/attic/mpest_consensus_tree.py | 7d5a2ef17dd937a7396cd8a6b38dda6498a8a8a5 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | carloliveros/phyluce | db0fd5f04b6eb1f362ce7a95ca9dde5514301ff8 | 1b227a51b655820154ecca4e9e52ac7722d7a71e | refs/heads/master | 2020-04-25T13:02:32.637099 | 2019-05-10T18:34:24 | 2019-05-10T18:34:24 | 136,492,179 | 0 | 0 | NOASSERTION | 2019-02-26T21:03:20 | 2018-06-07T14:52:12 | Python | UTF-8 | Python | false | false | 3,920 | py | #!/usr/bin/env python
# encoding: utf-8
"""
consensus_tree.py
Created by Nick Crawford on 2010-05-19.
Copyright (c) 2010
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses
The author may be contacted at ngcrawford@gmail.com
PURPOSE: compute the majority tree (> 0.5), given a list of trees on input.
If inputting mpest trees, will also take a control file and label the leaves
given the control file contents.
USAGE: python ../../consensus_tree.py --input=917_loci_19_species_mpest.tree \
--control-file=917_loci_19_species.control
--output=917_loci_19_species_mpest_consensus.tree
"""
import os
import sys
import glob
import optparse
import dendropy
import pdb
def interface():
'''Command-line interface'''
usage = "usage: %prog [options]"
p = optparse.OptionParser(usage)
p.add_option('--input', dest = 'input', action='store',
type='string', default = None, help='The path to the configuration file.',
metavar='FILE')
p.add_option('--control-file', dest = 'control', action='store',
type='string', default = None, help='The path to the configuration file.',
metavar='FILE')
p.add_option('--output', dest = 'output', action='store',
type='string', default = None, help='The path to the output file.',
metavar='FILE')
(options,arg) = p.parse_args()
options.input = os.path.abspath(os.path.expanduser(options.input))
options.output = os.path.abspath(os.path.expanduser(options.output))
if not options.input:
p.print_help()
sys.exit(2)
if not os.path.isfile(options.input):
print "You must provide a valid path to the configuration file."
p.print_help()
sys.exit(2)
return options, arg
def make_consensus(treefile, min_freq=0.5):
"""generatae a consensus tree from the input"""
trees = dendropy.TreeList()
for tree_file in [treefile]:
trees.read_from_path(tree_file, "newick")
con_tree = trees.consensus(min_freq)
return con_tree
def newick2nexus(in_dir):
"""some doc"""
alignments = glob.glob(os.path.join(in_dir, '*.tre'))
phyml_trees = dendropy.TreeList()
for tree_file in alignments:
phyml_trees.read_from_path(tree_file, 'newick',)
return phyml_trees
def get_species_dict(control):
"""docstring for get_species_dict"""
control_file = open(control, 'rU')
[control_file.readline() for i in xrange(3)]
species = []
for line in control_file.readlines():
ls = line.strip().split('\t')
if ls[0] == '0':
break
else:
species.append(ls[0])
return dict(zip(xrange(1, len(species) + 1), species))
def main():
options, arg = interface()
if options.control:
# get a dict of the species in the file
sp_dict = get_species_dict(options.control)
cons = make_consensus(options.input)
# rename the leaves with something other than 1,2,3
for leaf in cons.leaf_nodes():
current = leaf.taxon.label
new = sp_dict[int(current)]
leaf.taxon.label = new
else:
cons = make_consensus(options.input)
if options.output:
outf = open(options.output, 'w')
outf.write(cons.as_string('newick'))
outf.close()
else:
print cons.as_string('newick')
if __name__ == '__main__':
main()
| [
"brant.faircloth@gmail.com"
] | brant.faircloth@gmail.com |
d1d6cce15c3f159ecea43f243c0fd9bec07ca85b | 7974fcc4dfe34f96ceace6f1f8288a21446c5268 | /pycamhd/native/__init__.py | b35f2864d013d4e6cf35eeb60bcb95f0db1200b2 | [
"MIT"
] | permissive | amarburg/pycamhd | 4240aba5c1924b3ec87f3517374da8322ac9ed3c | d7ba6f55ab7215a6095a5734eddf3d7864c4d925 | refs/heads/master | 2021-07-08T04:07:21.538237 | 2017-10-05T20:33:07 | 2017-10-05T20:33:07 | 105,929,491 | 0 | 0 | null | 2017-10-05T19:07:58 | 2017-10-05T19:07:58 | null | UTF-8 | Python | false | false | 102 | py | #!/usr/bin/env python
from .pycamhd import *
__version__ = '0.7.0dev-native'
VERSION = __version__
| [
"amarburg@apl.washington.edu"
] | amarburg@apl.washington.edu |
15ef76fd1b0480e1a00ff6a183f756261b21306d | 6899f55b07bd6d49da2d331dfce217f92673ed34 | /Accounts/migrations/0021_auto_20201218_2042.py | 724846eecd1e6aa2cacb1d6ff9a966c2dc5e54cf | [
"MIT"
] | permissive | Khushiraikar1/sudhaksha_maxo | e72945f2d2e6ec985b27a67f2db4465cf3a72ce2 | ccaba5426b8fcac0d6772bdb78916cb0cd0c09e7 | refs/heads/main | 2023-02-11T12:09:35.046523 | 2021-01-15T16:37:55 | 2021-01-15T16:37:55 | 317,636,328 | 2 | 6 | MIT | 2021-01-15T15:40:49 | 2020-12-01T18:46:39 | HTML | UTF-8 | Python | false | false | 369 | py | # Generated by Django 3.1.3 on 2020-12-18 20:42
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('Accounts', '0020_auto_20201218_1916'),
]
operations = [
migrations.RenameField(
model_name='timetable',
old_name='lass_obj',
new_name='clsobj',
),
]
| [
"anandajith911@gmail.com"
] | anandajith911@gmail.com |
fcaecbff0852a2ba436801351884668e1010e044 | d2d6886bde409f7631fc7acd2d72acd046355a0d | /alyBlog/utils/captcha/captcha.py | 63d8026a3f5bf9a9aed22a03af731bc9b4808c4e | [
"WTFPL"
] | permissive | Hx-someone/aly-blog | 611202828e4984050f6cf982114df4641524cd68 | e0205777d2ff1642fde5741a5b5c1b06ad675001 | refs/heads/master | 2021-04-16T08:24:13.695843 | 2020-04-05T06:39:02 | 2020-04-05T06:39:02 | 249,340,860 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,755 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# refer to `https://bitbucket.org/akorn/wheezy.captcha`
import random
import string
import os.path
from io import BytesIO
from PIL import Image
from PIL import ImageFilter
from PIL.ImageDraw import Draw
from PIL.ImageFont import truetype
class Bezier:
def __init__(self):
self.tsequence = tuple([t / 20.0 for t in range(21)])
self.beziers = {}
def pascal_row(self, n):
""" Returns n-th row of Pascal's triangle
"""
result = [1]
x, numerator = 1, n
for denominator in range(1, n // 2 + 1):
x *= numerator
x /= denominator
result.append(x)
numerator -= 1
if n & 1 == 0:
result.extend(reversed(result[:-1]))
else:
result.extend(reversed(result))
return result
def make_bezier(self, n):
""" Bezier curves:
http://en.wikipedia.org/wiki/B%C3%A9zier_curve#Generalization
"""
try:
return self.beziers[n]
except KeyError:
combinations = self.pascal_row(n - 1)
result = []
for t in self.tsequence:
tpowers = (t ** i for i in range(n))
upowers = ((1 - t) ** i for i in range(n - 1, -1, -1))
coefs = [c * a * b for c, a, b in zip(combinations,
tpowers, upowers)]
result.append(coefs)
self.beziers[n] = result
return result
class Captcha(object):
def __init__(self):
self._bezier = Bezier()
self._dir = os.path.dirname(__file__)
# self._captcha_path = os.path.join(self._dir, '..', 'static', 'captcha')
@staticmethod
def instance():
if not hasattr(Captcha, "_instance"):
Captcha._instance = Captcha()
return Captcha._instance
def initialize(self, width=200, height=75, color=None, text=None, fonts=None):
# self.image = Image.new('RGB', (width, height), (255, 255, 255))
self._text = text if text else random.sample(string.ascii_uppercase + string.ascii_uppercase + '3456789', 4)
self.fonts = fonts if fonts else \
[os.path.join(self._dir, 'fonts', font) for font in ['Arial.ttf', 'Georgia.ttf', 'actionj.ttf']]
self.width = width
self.height = height
self._color = color if color else self.random_color(0, 200, random.randint(220, 255))
@staticmethod
def random_color(start, end, opacity=None):
red = random.randint(start, end)
green = random.randint(start, end)
blue = random.randint(start, end)
if opacity is None:
return red, green, blue
return red, green, blue, opacity
# draw image
def background(self, image):
Draw(image).rectangle([(0, 0), image.size], fill=self.random_color(238, 255))
return image
@staticmethod
def smooth(image):
return image.filter(ImageFilter.SMOOTH)
def curve(self, image, width=4, number=6, color=None):
dx, height = image.size
dx /= number
path = [(dx * i, random.randint(0, height))
for i in range(1, number)]
bcoefs = self._bezier.make_bezier(number - 1)
points = []
for coefs in bcoefs:
points.append(tuple(sum([coef * p for coef, p in zip(coefs, ps)])
for ps in zip(*path)))
Draw(image).line(points, fill=color if color else self._color, width=width)
return image
def noise(self, image, number=50, level=2, color=None):
width, height = image.size
dx = width / 10
width -= dx
dy = height / 10
height -= dy
draw = Draw(image)
for i in range(number):
x = int(random.uniform(dx, width))
y = int(random.uniform(dy, height))
draw.line(((x, y), (x + level, y)), fill=color if color else self._color, width=level)
return image
def text(self, image, fonts, font_sizes=None, drawings=None, squeeze_factor=0.75, color=None):
color = color if color else self._color
fonts = tuple([truetype(name, size)
for name in fonts
for size in font_sizes or (65, 70, 75)])
draw = Draw(image)
char_images = []
for c in self._text:
font = random.choice(fonts)
c_width, c_height = draw.textsize(c, font=font)
char_image = Image.new('RGB', (c_width, c_height), (0, 0, 0))
char_draw = Draw(char_image)
char_draw.text((0, 0), c, font=font, fill=color)
char_image = char_image.crop(char_image.getbbox())
for drawing in drawings:
d = getattr(self, drawing)
char_image = d(char_image)
char_images.append(char_image)
width, height = image.size
offset = int((width - sum(int(i.size[0] * squeeze_factor)
for i in char_images[:-1]) -
char_images[-1].size[0]) / 2)
for char_image in char_images:
c_width, c_height = char_image.size
mask = char_image.convert('L').point(lambda i: i * 1.97)
image.paste(char_image,
(offset, int((height - c_height) / 2)),
mask)
offset += int(c_width * squeeze_factor)
return image
# draw text
@staticmethod
def warp(image, dx_factor=0.27, dy_factor=0.21):
width, height = image.size
dx = width * dx_factor
dy = height * dy_factor
x1 = int(random.uniform(-dx, dx))
y1 = int(random.uniform(-dy, dy))
x2 = int(random.uniform(-dx, dx))
y2 = int(random.uniform(-dy, dy))
image2 = Image.new('RGB',
(width + abs(x1) + abs(x2),
height + abs(y1) + abs(y2)))
image2.paste(image, (abs(x1), abs(y1)))
width2, height2 = image2.size
return image2.transform(
(width, height), Image.QUAD,
(x1, y1,
-x1, height2 - y2,
width2 + x2, height2 + y2,
width2 - x2, -y1))
@staticmethod
def offset(image, dx_factor=0.1, dy_factor=0.2):
width, height = image.size
dx = int(random.random() * width * dx_factor)
dy = int(random.random() * height * dy_factor)
image2 = Image.new('RGB', (width + dx, height + dy))
image2.paste(image, (dx, dy))
return image2
@staticmethod
def rotate(image, angle=25):
return image.rotate(
random.uniform(-angle, angle), Image.BILINEAR, expand=1)
def captcha(self, path=None, fmt='JPEG'):
"""Create a captcha.
Args:
path: save path, default None.
fmt: image format, PNG / JPEG.
Returns:
A tuple, (text, StringIO.value).
For example:
('JGW9', '\x89PNG\r\n\x1a\n\x00\x00\x00\r...')
"""
image = Image.new('RGB', (self.width, self.height), (255, 255, 255))
image = self.background(image)
image = self.text(image, self.fonts, drawings=['warp', 'rotate', 'offset'])
image = self.curve(image)
image = self.noise(image)
image = self.smooth(image)
text = "".join(self._text)
out = BytesIO()
image.save(out, format=fmt)
return text, out.getvalue()
def generate_captcha(self):
self.initialize()
return self.captcha("")
captcha = Captcha.instance()
if __name__ == '__main__':
print(captcha.generate_captcha())
| [
"âhx120841@163.com"
] | âhx120841@163.com |
05d3a4af0a6b318c8e5709e5275ec443b604d5b8 | 6437af616b0752b24e1b62bc98d302b2e04a7c85 | /pagnition/getUrlMainPage.py | 02acf2acba9ee6934e0f89b3c250939f51aa5607 | [] | no_license | kangqiwang/imageWebCrawler | 4c7ebc0c93fd52b27f08a0f79302885d95f53a6e | 76fe21802a5a03638e324e6d18fe5698a69aba70 | refs/heads/master | 2022-05-31T00:51:39.649907 | 2019-08-28T15:06:37 | 2019-08-28T15:06:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,108 | py | import requests
from requests_html import HTMLSession
import sys
from urllib.parse import urlparse
import pandas as pd
from elasticsearch import Elasticsearch
import re
def getUrl():
url = 'https://'+sys.argv[1]
session = HTMLSession()
response=session.get(url)
urltem=[]
for i in response.html.absolute_links:
try:
domain = urlparse(i).hostname
except:
domain= ''
if domain == urlparse(url).hostname:
urltem.append(i)
urltem=urltem+[i.replace('http:','https:') for i in urltem if i.startswith('http:')]
print(urltem)
urltem=list(dict.fromkeys(urltem))
print('finish the one '+ str(len(urltem)))
urldeep1=[]
for i in urltem:
if not i.startswith('//'):
response = session.get(i)
for j in response.html.absolute_links:
try:
domain = urlparse(j).hostname
except:
domain = ''
if domain == urlparse(url).hostname:
urldeep1.append(j)
urldeep1= urldeep1+[i.replace('http:','https:') for i in urldeep1 if i.startswith('http:')]
urldeep1=list(dict.fromkeys(urldeep1))
print('finish the two ' + str(len(urldeep1)))
urldeep2=[]
for i in urldeep1:
if not i.startswith('//'):
response = session.get(i)
for j in response.html.absolute_links:
try:
domain = urlparse(j).hostname
except:
domain = ''
if domain == urlparse(url).hostname:
urldeep2.append(j)
urldeep2= urldeep2+[i.replace('http:','https:') for i in urldeep2 if i.startswith('http:')]
urldeep2=list(dict.fromkeys(urldeep2))
print('finish the three '+ str(len(urldeep2)))
savedf = pd.Series(list(dict.fromkeys(urltem + urldeep1 + urldeep2)))
savedf.to_csv("pagnition/generateUrl/mattel.csv", index=False)
def siteMap():
es = Elasticsearch([{
'host' : 'localhost',
'port' : '9200'
}])
urlCollection=[]
url ='http://'+ sys.argv[1]+'/robots.txt'
url='http://www.petsathome.com/sm/sitemapindex.xml'
session = HTMLSession()
response=session.get(url)
urlList =re.findall("(?P<url>https?://[^\s]+)</loc>", response.text)
for i in urlList:
if ".xml" in i:
session=HTMLSession()
response=session.get(i)
if response.status_code ==200:
urlCollection = urlCollection+ re.findall("(?P<url>https?://[^\s]+)</loc>", response.text)
else:
urlCollection.append(i)
print(urlCollection)
savedf = pd.Series(urlCollection)
savedf.to_csv("pagnition/output/zazzle.csv", index=False)
# print(response.text)
# url = 'http://http://www.petsathome.com/' # url from to crawl
# logfile = 'errlog.log' # path to logfile
# oformat = 'xml' # output format
# crawl = pysitemap.Crawler(url=url, logfile=logfile, oformat=oformat)
# crawl.crawl()
siteMap()
# getUrl()
| [
"kang@sourcemogul.com"
] | kang@sourcemogul.com |
0a96b9e23f3959b90612fa56da5231472a211679 | fe2f683069e70cab827f86ddb6c880dce18e4249 | /chap04/polygon.py | b11ac6cf785097b6c6976527c25a8433983306f0 | [] | no_license | tchoetso/SoftwareDesign | 8f3b77944b47928807a233ce7ec9e7bcfaee719f | 4f8ba7f7950a68f0ba400b632ef23a55ba7af7ab | refs/heads/master | 2021-01-17T21:52:36.643619 | 2013-12-15T05:56:22 | 2013-12-15T05:56:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 827 | py | #Author: Tenzin Choetso
# Chapter 4
from swampy.TurtleWorld import *
import math
world = TurtleWorld()
bob = Turtle() #bob needs to defined outside
bob.delay = 0.5
def square(t,length):
print t
for i in range(4):
fd (t,length)
lt(t)
def polygon(t,length,n):
print t
for i in range(n):
fd (t,length)
lt(t,360/n)
def circle(t,r):
print t
n = int (r/2)
length = (2*math.pi*r)/n # circumference of the cirlce
polygon(t,length,n)
def arc(t,r,angle):
print t
length = (2*math.pi*r)*angle/360 # circumference of the cirlce
n = int(length/3)+1
steplength = length/n
stepangle = float(angle)/n
for i in range(n):
fd (t,steplength)
lt(t,stepangle)
#square(bob,50)
#arc(bob,50,180)
#circle(bob,50)
polygon(bob,75,6)
| [
"tchoetso920@gmail.com"
] | tchoetso920@gmail.com |
285c58cff6bab461a2a96c005792abe48c41134e | a713542969e64a251266e215f55ef7f753d83874 | /src/base/nets/vgg.py | 90c96d662c8a34a3a982255da29f7d2a93f35b27 | [] | no_license | czczup/MusesStyle | 874aeae17d1eb017c6f7de012839867371d0e0a6 | 0f6f767c5e58e16101fa63e4b524da6dffd2381c | refs/heads/master | 2022-04-16T12:29:28.051394 | 2020-04-08T13:45:42 | 2020-04-08T13:45:42 | 166,505,943 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 11,258 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains model definitions for versions of the Oxford VGG network.
These model definitions were introduced in the following technical report:
Very Deep Convolutional Networks For Large-Scale Image Recognition
Karen Simonyan and Andrew Zisserman
arXiv technical report, 2015
PDF: http://arxiv.org/pdf/1409.1556.pdf
ILSVRC 2014 Slides: http://www.robots.ox.ac.uk/~karen/pdf/ILSVRC_2014.pdf
CC-BY-4.0
More information can be obtained from the VGG website:
www.robots.ox.ac.uk/~vgg/research/very_deep/
Usage:
with slim.arg_scope(vgg.vgg_arg_scope()):
outputs, end_points = vgg.vgg_a(inputs)
with slim.arg_scope(vgg.vgg_arg_scope()):
outputs, end_points = vgg.vgg_16(inputs)
@@vgg_a
@@vgg_16
@@vgg_19
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
slim = tf.contrib.slim
def vgg_arg_scope(weight_decay=0.0005):
"""Defines the VGG arg scope.
Args:
weight_decay: The l2 regularization coefficient.
Returns:
An arg_scope.
"""
with slim.arg_scope([slim.conv2d, slim.fully_connected],
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(weight_decay),
biases_initializer=tf.zeros_initializer()):
with slim.arg_scope([slim.conv2d], padding='SAME') as arg_sc:
return arg_sc
def vgg_a(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.5,
spatial_squeeze=True,
scope='vgg_a'):
"""Oxford Net VGG 11-Layers version A Example.
Note: All the fully_connected layers have been transformed to conv2d layers.
To use in classification mode, resize input to 224x224.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of predicted classes.
is_training: whether or not the model is being trained.
dropout_keep_prob: the probability that activations are kept in the dropout
layers during training.
spatial_squeeze: whether or not should squeeze the spatial dimensions of the
outputs. Useful to remove unnecessary dimensions for classification.
scope: Optional scope for the variables.
Returns:
the last op containing the log predictions and end_points dict.
"""
with tf.variable_scope(scope, 'vgg_a', [inputs]) as sc:
end_points_collection = sc.name+'_end_points'
# Collect outputs for conv2d, fully_connected and max_pool2d.
with slim.arg_scope([slim.conv2d, slim.max_pool2d],
outputs_collections=end_points_collection):
net = slim.repeat(inputs, 1, slim.conv2d, 64, [3, 3], scope='conv1')
net = slim.max_pool2d(net, [2, 2], scope='pool1')
net = slim.repeat(net, 1, slim.conv2d, 128, [3, 3], scope='conv2')
net = slim.max_pool2d(net, [2, 2], scope='pool2')
net = slim.repeat(net, 2, slim.conv2d, 256, [3, 3], scope='conv3')
net = slim.max_pool2d(net, [2, 2], scope='pool3')
net = slim.repeat(net, 2, slim.conv2d, 512, [3, 3], scope='conv4')
net = slim.max_pool2d(net, [2, 2], scope='pool4')
net = slim.repeat(net, 2, slim.conv2d, 512, [3, 3], scope='conv5')
net = slim.max_pool2d(net, [2, 2], scope='pool5')
# Use conv2d instead of fully_connected layers.
net = slim.conv2d(net, 4096, [7, 7], padding='VALID', scope='fc6')
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='dropout6')
net = slim.conv2d(net, 4096, [1, 1], scope='fc7')
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='dropout7')
net = slim.conv2d(net, num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
scope='fc8')
# Convert end_points_collection into a end_point dict.
end_points = slim.utils.convert_collection_to_dict(end_points_collection)
if spatial_squeeze:
net = tf.squeeze(net, [1, 2], name='fc8/squeezed')
end_points[sc.name+'/fc8'] = net
return net, end_points
vgg_a.default_image_size = 224
def vgg_16(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.5,
spatial_squeeze=True,
scope='vgg_16'):
"""Oxford Net VGG 16-Layers version D Example.
Note: All the fully_connected layers have been transformed to conv2d layers.
To use in classification mode, resize input to 224x224.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of predicted classes.
is_training: whether or not the model is being trained.
dropout_keep_prob: the probability that activations are kept in the dropout
layers during training.
spatial_squeeze: whether or not should squeeze the spatial dimensions of the
outputs. Useful to remove unnecessary dimensions for classification.
scope: Optional scope for the variables.
Returns:
the last op containing the log predictions and end_points dict.
"""
with tf.variable_scope(scope, 'vgg_16', [inputs]) as sc:
end_points_collection = sc.name+'_end_points'
# Collect outputs for conv2d, fully_connected and max_pool2d.
with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d],
outputs_collections=end_points_collection):
net = slim.repeat(inputs, 2, slim.conv2d, 64, [3, 3], scope='conv1')
net = slim.max_pool2d(net, [2, 2], scope='pool1')
net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3], scope='conv2')
net = slim.max_pool2d(net, [2, 2], scope='pool2')
net = slim.repeat(net, 3, slim.conv2d, 256, [3, 3], scope='conv3')
net = slim.max_pool2d(net, [2, 2], scope='pool3')
net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv4')
net = slim.max_pool2d(net, [2, 2], scope='pool4')
net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv5')
net = slim.max_pool2d(net, [2, 2], scope='pool5')
# Use conv2d instead of fully_connected layers.
net = slim.conv2d(net, 4096, [7, 7], padding='VALID', scope='fc6')
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='dropout6')
net = slim.conv2d(net, 4096, [1, 1], scope='fc7')
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='dropout7')
net = slim.conv2d(net, num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
scope='fc8')
# Convert end_points_collection into a end_point dict.
end_points = slim.utils.convert_collection_to_dict(end_points_collection)
if spatial_squeeze:
net = tf.squeeze(net, [1, 2], name='fc8/squeezed')
end_points[sc.name+'/fc8'] = net
return net, end_points
vgg_16.default_image_size = 224
def vgg_19(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.5,
spatial_squeeze=True,
scope='vgg_19'):
"""Oxford Net VGG 19-Layers version E Example.
Note: All the fully_connected layers have been transformed to conv2d layers.
To use in classification mode, resize input to 224x224.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of predicted classes.
is_training: whether or not the model is being trained.
dropout_keep_prob: the probability that activations are kept in the dropout
layers during training.
spatial_squeeze: whether or not should squeeze the spatial dimensions of the
outputs. Useful to remove unnecessary dimensions for classification.
scope: Optional scope for the variables.
Returns:
the last op containing the log predictions and end_points dict.
"""
with tf.variable_scope(scope, 'vgg_19', [inputs]) as sc:
end_points_collection = sc.name+'_end_points'
# Collect outputs for conv2d, fully_connected and max_pool2d.
with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d],
outputs_collections=end_points_collection):
net = slim.repeat(inputs, 2, slim.conv2d, 64, [3, 3], scope='conv1')
net = slim.max_pool2d(net, [2, 2], scope='pool1')
net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3], scope='conv2')
net = slim.max_pool2d(net, [2, 2], scope='pool2')
net = slim.repeat(net, 4, slim.conv2d, 256, [3, 3], scope='conv3')
net = slim.max_pool2d(net, [2, 2], scope='pool3')
net = slim.repeat(net, 4, slim.conv2d, 512, [3, 3], scope='conv4')
net = slim.max_pool2d(net, [2, 2], scope='pool4')
net = slim.repeat(net, 4, slim.conv2d, 512, [3, 3], scope='conv5')
net = slim.max_pool2d(net, [2, 2], scope='pool5')
# Use conv2d instead of fully_connected layers.
net = slim.conv2d(net, 4096, [7, 7], padding='VALID', scope='fc6')
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='dropout6')
net = slim.conv2d(net, 4096, [1, 1], scope='fc7')
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='dropout7')
net = slim.conv2d(net, num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
scope='fc8')
# Convert end_points_collection into a end_point dict.
end_points = slim.utils.convert_collection_to_dict(end_points_collection)
if spatial_squeeze:
net = tf.squeeze(net, [1, 2], name='fc8/squeezed')
end_points[sc.name+'/fc8'] = net
return net, end_points
vgg_19.default_image_size = 224
# Alias
vgg_d = vgg_16
vgg_e = vgg_19
| [
"wztxy89@163.com"
] | wztxy89@163.com |
4e641a5c9104fc7eb202447df307f6e6b74bd146 | a441fda200674f0f98c6f1c27126a8f4ab8d0871 | /Algorithmic Toolbox/1.1 sum of digits.py | eacc6312815f77bc8be2e9e26c97dfdb79cacbf7 | [] | no_license | June24-Wu/Coursera-Bilibili_Course_Source_Code | 222d6d852ffe80df0b23bcb6ffd94d37c5864523 | 3ef9860daf508d96b4e7be4bc37b26c325f4c1b4 | refs/heads/master | 2023-04-23T17:12:29.885591 | 2021-05-06T03:36:00 | 2021-05-06T03:36:00 | 356,558,732 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 52 | py | #Python 3
a,b = input().split()
print(int(a)+int(b)) | [
"932187556@qq.com"
] | 932187556@qq.com |
d581e305ac079f2af1725f50e4bd33b9987b30cf | 79b1d3d8ffbda5297fff6fefe2528e303bf2110a | /RSGGenFragment/RSToQQ/RSGravitonToQuarkQuark_W-0p1_M_3250_TuneCUETP8M1_13TeV_pythia8_cfi.py | 4743fb46d70ff29d63a01653fe65bceda8571ccf | [] | no_license | yguler/MCFragments-1 | 25745a043653d02be3a4c242c1a85af221fc34b3 | 7c4d10ee59e00f997221109bf006819fd645b92f | refs/heads/master | 2021-01-13T14:09:12.811554 | 2016-12-11T15:57:37 | 2016-12-11T15:57:37 | 76,184,433 | 0 | 0 | null | 2016-12-11T15:59:22 | 2016-12-11T15:59:22 | null | UTF-8 | Python | false | false | 1,323 | py | import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.Pythia8CUEP8M1Settings_cfi import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
comEnergy = cms.double(13000.0),
crossSection = cms.untracked.double(0.00000782),
filterEfficiency = cms.untracked.double(1),
maxEventsToPrint = cms.untracked.int32(0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
pythiaPylistVerbosity = cms.untracked.int32(1),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CUEP8M1SettingsBlock,
processParameters = cms.vstring(
'ExtraDimensionsG*:ffbar2G* = on',
'ExtraDimensionsG*:kappaMG = 1.439532822',
'5100039:m0 = 3250',
'5100039:onMode = off',
'5100039:onIfAny = 1 2 3 4 5'
),
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CUEP8M1Settings',
'processParameters',
)
)
)
ProductionFilterSequence = cms.Sequence(generator)
| [
"emine.gurpinar@cern.ch"
] | emine.gurpinar@cern.ch |
326f4d0adf929a5b6d06809845942caa89522e42 | 9cc882126c7cc511d758d567d5801bd3351a9769 | /notes/migrations/0002_auto_20180828_0947.py | d7d4c136ea9cf8db5fd7495970b5a3405c279871 | [] | no_license | bradencodes/Intro-Django | 5357cb3fcbc10c021c9314ef35620976cde3c277 | e31ad2d67b89db30c55c2ee7b99167816866c005 | refs/heads/master | 2020-03-27T09:16:46.035726 | 2018-09-06T20:18:13 | 2018-09-06T20:18:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 664 | py | # Generated by Django 2.1 on 2018-08-28 16:47
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('notes', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='note',
name='created_at',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='note',
name='last_modified',
field=models.DateTimeField(auto_now=True),
),
]
| [
"divebombwalker@gmail.com"
] | divebombwalker@gmail.com |
59c831653985037d0c1a7ab69a9c140208d718d9 | 12fc465e9530efd7dd7fd3e0396371df21d43938 | /mailer/mail/migrations/0001_initial.py | 5c343e7fb02409473adc14d1caea43aaef501cd1 | [] | no_license | SMEETT/Mail-CS50Web | 5731953efd676ca01acac196e2e42989f06179f7 | b19d828ee7f48323f3a6ac533f0a2a9ad3b0734e | refs/heads/master | 2023-01-16T04:04:34.110179 | 2020-11-23T11:59:39 | 2020-11-23T11:59:39 | 284,594,809 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,874 | py | # Generated by Django 3.0.8 on 2020-08-03 15:48
from django.conf import settings
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name='Email',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('subject', models.CharField(max_length=255)),
('body', models.TextField(blank=True)),
('timestamp', models.DateTimeField(auto_now_add=True)),
('read', models.BooleanField(default=False)),
('archived', models.BooleanField(default=False)),
('recipients', models.ManyToManyField(related_name='emails_received', to=settings.AUTH_USER_MODEL)),
('sender', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='emails_sent', to=settings.AUTH_USER_MODEL)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='emails', to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"tbr@tutamail.com"
] | tbr@tutamail.com |
651829bd3adb0e1664849bd3a398cbaf87f47495 | 81f7d86117e5701673f2c302544c4081bcd66067 | /pytorch_forecasting/utils.py | 92c1bbbf8ede65c4045eb66799d10202a39f32cd | [
"MIT"
] | permissive | kashif/pytorch-forecasting | 3bc46cf4bdf7248201d35a17483927188cbce122 | a60367a5014c972cd648f901b7d1a7caa95e8cd1 | refs/heads/master | 2023-06-05T07:27:26.390368 | 2021-06-16T06:50:41 | 2021-06-16T07:21:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,877 | py | """
Helper functions for PyTorch forecasting
"""
from contextlib import redirect_stdout
import os
from typing import Any, Callable, Dict, List, Tuple, Union
import torch
from torch.fft import irfft, rfft
import torch.nn.functional as F
from torch.nn.utils import rnn
def integer_histogram(
data: torch.LongTensor, min: Union[None, int] = None, max: Union[None, int] = None
) -> torch.Tensor:
"""
Create histogram of integers in predefined range
Args:
data: data for which to create histogram
min: minimum of histogram, is inferred from data by default
max: maximum of histogram, is inferred from data by default
Returns:
histogram
"""
uniques, counts = torch.unique(data, return_counts=True)
if min is None:
min = uniques.min()
if max is None:
max = uniques.max()
hist = torch.zeros(max - min + 1, dtype=torch.long, device=data.device).scatter(
dim=0, index=uniques - min, src=counts
)
return hist
def groupby_apply(
keys: torch.Tensor, values: torch.Tensor, bins: int = 95, reduction: str = "mean", return_histogram: bool = False
) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
"""
Groupby apply for torch tensors
Args:
keys: tensor of groups (``0`` to ``bins``)
values: values to aggregate - same size as keys
bins: total number of groups
reduction: either "mean" or "sum"
return_histogram: if to return histogram on top
Returns:
tensor of size ``bins`` with aggregated values and optionally with counts of values
"""
if reduction == "mean":
reduce = torch.mean
elif reduction == "sum":
reduce = torch.sum
else:
raise ValueError(f"Unknown reduction '{reduction}'")
uniques, counts = keys.unique(return_counts=True)
groups = torch.stack([reduce(item) for item in torch.split_with_sizes(values, tuple(counts))])
reduced = torch.zeros(bins, dtype=values.dtype, device=values.device).scatter(dim=0, index=uniques, src=groups)
if return_histogram:
hist = torch.zeros(bins, dtype=torch.long, device=values.device).scatter(dim=0, index=uniques, src=counts)
return reduced, hist
else:
return reduced
def profile(function: Callable, profile_fname: str, filter: str = "", period=0.0001, **kwargs):
"""
Profile a given function with ``vmprof``.
Args:
function (Callable): function to profile
profile_fname (str): path where to save profile (`.txt` file will be saved with line profile)
filter (str, optional): filter name (e.g. module name) to filter profile. Defaults to "".
period (float, optional): frequency of calling profiler in seconds. Defaults to 0.0001.
"""
import vmprof
from vmprof.show import LinesPrinter
# profiler config
with open(profile_fname, "wb+") as fd:
# start profiler
vmprof.enable(fd.fileno(), lines=True, period=period)
# run function
function(**kwargs)
# stop profiler
vmprof.disable()
# write report to disk
if kwargs.get("lines", True):
with open(f"{os.path.splitext(profile_fname)[0]}.txt", "w") as f:
with redirect_stdout(f):
LinesPrinter(filter=filter).show(profile_fname)
def get_embedding_size(n: int, max_size: int = 100) -> int:
"""
Determine empirically good embedding sizes (formula taken from fastai).
Args:
n (int): number of classes
max_size (int, optional): maximum embedding size. Defaults to 100.
Returns:
int: embedding size
"""
if n > 2:
return min(round(1.6 * n ** 0.56), max_size)
else:
return 1
def create_mask(size: int, lengths: torch.LongTensor, inverse: bool = False) -> torch.BoolTensor:
"""
Create boolean masks of shape len(lenghts) x size.
An entry at (i, j) is True if lengths[i] > j.
Args:
size (int): size of second dimension
lengths (torch.LongTensor): tensor of lengths
inverse (bool, optional): If true, boolean mask is inverted. Defaults to False.
Returns:
torch.BoolTensor: mask
"""
if inverse: # return where values are
return torch.arange(size, device=lengths.device).unsqueeze(0) < lengths.unsqueeze(-1)
else: # return where no values are
return torch.arange(size, device=lengths.device).unsqueeze(0) >= lengths.unsqueeze(-1)
_NEXT_FAST_LEN = {}
def next_fast_len(size):
"""
Returns the next largest number ``n >= size`` whose prime factors are all
2, 3, or 5. These sizes are efficient for fast fourier transforms.
Equivalent to :func:`scipy.fftpack.next_fast_len`.
Implementation from pyro
:param int size: A positive number.
:returns: A possibly larger number.
:rtype int:
"""
try:
return _NEXT_FAST_LEN[size]
except KeyError:
pass
assert isinstance(size, int) and size > 0
next_size = size
while True:
remaining = next_size
for n in (2, 3, 5):
while remaining % n == 0:
remaining //= n
if remaining == 1:
_NEXT_FAST_LEN[size] = next_size
return next_size
next_size += 1
def autocorrelation(input, dim=0):
"""
Computes the autocorrelation of samples at dimension ``dim``.
Reference: https://en.wikipedia.org/wiki/Autocorrelation#Efficient_computation
Implementation copied form `pyro <https://github.com/pyro-ppl/pyro/blob/dev/pyro/ops/stats.py>`_.
:param torch.Tensor input: the input tensor.
:param int dim: the dimension to calculate autocorrelation.
:returns torch.Tensor: autocorrelation of ``input``.
"""
# Adapted from Stan implementation
# https://github.com/stan-dev/math/blob/develop/stan/math/prim/mat/fun/autocorrelation.hpp
N = input.size(dim)
M = next_fast_len(N)
M2 = 2 * M
# transpose dim with -1 for Fourier transform
input = input.transpose(dim, -1)
# centering and padding x
centered_signal = input - input.mean(dim=-1, keepdim=True)
# Fourier transform
freqvec = torch.view_as_real(rfft(centered_signal, n=M2))
# take square of magnitude of freqvec (or freqvec x freqvec*)
freqvec_gram = freqvec.pow(2).sum(-1)
# inverse Fourier transform
autocorr = irfft(freqvec_gram, n=M2)
# truncate and normalize the result, then transpose back to original shape
autocorr = autocorr[..., :N]
autocorr = autocorr / torch.tensor(range(N, 0, -1), dtype=input.dtype, device=input.device)
autocorr = autocorr / autocorr[..., :1]
return autocorr.transpose(dim, -1)
def unpack_sequence(sequence: Union[torch.Tensor, rnn.PackedSequence]) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Unpack RNN sequence.
Args:
sequence (Union[torch.Tensor, rnn.PackedSequence]): RNN packed sequence or tensor of which
first index are samples and second are timesteps
Returns:
Tuple[torch.Tensor, torch.Tensor]: tuple of unpacked sequence and length of samples
"""
if isinstance(sequence, rnn.PackedSequence):
sequence, lengths = rnn.pad_packed_sequence(sequence, batch_first=True)
# batch sizes reside on the CPU by default -> we need to bring them to GPU
lengths = lengths.to(sequence.device)
else:
lengths = torch.ones(sequence.size(0), device=sequence.device, dtype=torch.long) * sequence.size(1)
return sequence, lengths
def padded_stack(
tensors: List[torch.Tensor], side: str = "right", mode: str = "constant", value: Union[int, float] = 0
) -> torch.Tensor:
"""
Stack tensors along first dimension and pad them along last dimension to ensure their size is equal.
Args:
tensors (List[torch.Tensor]): list of tensors to stack
side (str): side on which to pad - "left" or "right". Defaults to "right".
mode (str): 'constant', 'reflect', 'replicate' or 'circular'. Default: 'constant'
value (Union[int, float]): value to use for constant padding
Returns:
torch.Tensor: stacked tensor
"""
full_size = max([x.size(-1) for x in tensors])
def make_padding(pad):
if side == "left":
return (pad, 0)
elif side == "right":
return (0, pad)
else:
raise ValueError(f"side for padding '{side}' is unknown")
out = torch.stack(
[
F.pad(x, make_padding(full_size - x.size(-1)), mode=mode, value=value) if full_size - x.size(-1) > 0 else x
for x in tensors
],
dim=0,
)
return out
def to_list(value: Any) -> List[Any]:
"""
Convert value or list to list of values.
If already list, return object directly
Args:
value (Any): value to convert
Returns:
List[Any]: list of values
"""
if isinstance(value, (tuple, list)) and not isinstance(value, rnn.PackedSequence):
return value
else:
return [value]
def unsqueeze_like(tensor: torch.Tensor, like: torch.Tensor):
"""
Unsqueeze last dimensions of tensor to match another tensor's number of dimensions.
Args:
tensor (torch.Tensor): tensor to unsqueeze
like (torch.Tensor): tensor whose dimensions to match
"""
n_unsqueezes = like.ndim - tensor.ndim
if n_unsqueezes < 0:
raise ValueError(f"tensor.ndim={tensor.ndim} > like.ndim={like.ndim}")
elif n_unsqueezes == 0:
return tensor
else:
return tensor[(...,) + (None,) * n_unsqueezes]
def apply_to_list(obj: Union[List[Any], Any], func: Callable) -> Union[List[Any], Any]:
"""
Apply function to a list of objects or directly if passed value is not a list.
This is useful if the passed object could be either a list to whose elements
a function needs to be applied or just an object to whicht to apply the function.
Args:
obj (Union[List[Any], Any]): list/tuple on whose elements to apply function,
otherwise object to whom to apply function
func (Callable): function to apply
Returns:
Union[List[Any], Any]: list of objects or object depending on function output and
if input ``obj`` is of type list/tuple
"""
if isinstance(obj, (list, tuple)) and not isinstance(obj, rnn.PackedSequence):
return [func(o) for o in obj]
else:
return func(obj)
class OutputMixIn:
"""
MixIn to give namedtuple some access capabilities of a dictionary
"""
def __getitem__(self, k):
if isinstance(k, str):
return getattr(self, k)
else:
return super().__getitem__(k)
def get(self, k, default=None):
return getattr(self, k, default)
def items(self):
return zip(self._fields, self)
def keys(self):
return self._fields
def move_to_device(
x: Union[
Dict[str, Union[torch.Tensor, List[torch.Tensor], Tuple[torch.Tensor]]],
torch.Tensor,
List[torch.Tensor],
Tuple[torch.Tensor],
],
device: Union[str, torch.DeviceObjType],
) -> Union[
Dict[str, Union[torch.Tensor, List[torch.Tensor], Tuple[torch.Tensor]]],
torch.Tensor,
List[torch.Tensor],
Tuple[torch.Tensor],
]:
"""
Move object to device.
Args:
x (dictionary of list of tensors): object (e.g. dictionary) of tensors to move to device
device (Union[str, torch.DeviceObjType]): device, e.g. "cpu"
Returns:
x on targeted device
"""
if isinstance(device, str):
device = torch.device(device)
if isinstance(x, dict):
for name in x.keys():
x[name] = move_to_device(x[name], device=device)
elif isinstance(x, OutputMixIn):
for xi in x:
move_to_device(xi, device=device)
return x
elif isinstance(x, torch.Tensor) and x.device != device:
x = x.to(device)
elif isinstance(x, (list, tuple)) and x[0].device != device:
x = [move_to_device(xi, device=device) for xi in x]
return x
def detach(
x: Union[
Dict[str, Union[torch.Tensor, List[torch.Tensor], Tuple[torch.Tensor]]],
torch.Tensor,
List[torch.Tensor],
Tuple[torch.Tensor],
],
) -> Union[
Dict[str, Union[torch.Tensor, List[torch.Tensor], Tuple[torch.Tensor]]],
torch.Tensor,
List[torch.Tensor],
Tuple[torch.Tensor],
]:
"""
Detach object
Args:
x: object to detach
Returns:
detached object
"""
if isinstance(x, torch.Tensor):
return x.detach()
elif isinstance(x, (OutputMixIn, dict)):
return {name: detach(xi) for name, xi in x.items()}
elif isinstance(x, (list, tuple)):
return [detach(xi) for xi in x]
else:
return x
| [
"beitner.jan@bcg.com"
] | beitner.jan@bcg.com |
fce04a514fffe8a02d5e89c07e2b5d7a96db6d0e | 2cdd241dbc7a1d6efa278e670ebeae149ea1ec05 | /node_modules/uws/build/config.gypi | ad839c7980968e6d3dbe9867ee5bc8b3b620c19b | [
"Zlib"
] | permissive | mngyuan/rollbot | 72b0f64c036216b5abc432720f70e87afe6c024c | 2c462de1ea7bbe41948b26b2bb4b46587dc382d9 | refs/heads/master | 2021-06-17T02:44:00.400543 | 2017-06-02T08:09:33 | 2017-06-02T08:09:33 | 93,142,976 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,460 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"coverage": "false",
"debug_devtools": "node",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_data_file": "icudt58l.dat",
"icu_data_in": "../../deps/icu-small/source/data/in/icudt58l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "deps/icu-small",
"icu_small": "true",
"icu_ver_major": "58",
"llvm_version": 0,
"node_byteorder": "little",
"node_enable_d8": "false",
"node_enable_v8_vtunejit": "false",
"node_install_npm": "false",
"node_module_version": 51,
"node_no_browser_globals": "false",
"node_prefix": "/usr/local/Cellar/node/7.4.0",
"node_release_urlbase": "",
"node_shared": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_use_bundled_v8": "true",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_lttng": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"node_use_v8_platform": "true",
"openssl_fips": "",
"openssl_no_asm": 0,
"shlib_suffix": "51.dylib",
"target_arch": "x64",
"uv_parent_path": "/deps/uv/",
"uv_use_dtrace": "true",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_inspector": "true",
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_random_seed": 0,
"v8_use_snapshot": "true",
"want_separate_host_toolset": 0,
"want_separate_host_toolset_mkpeephole": 0,
"xcode_version": "8.0",
"nodedir": "/Users/phorust/.node-gyp/7.4.0",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"version_git_tag": "true",
"init_license": "MIT",
"registry": "https://registry.yarnpkg.com",
"lastUpdateCheck": "1496387170198",
"save_prefix": "^",
"strict_ssl": "true",
"version_git_message": "v%s",
"version_git_sign": "",
"ignore_scripts": "",
"user_agent": "yarn/0.19.1 npm/? node/v7.4.0 darwin x64",
"init_version": "1.0.0",
"ignore_optional": "",
"version_tag_prefix": "v"
}
}
| [
"kevinmlee.x@gmail.com"
] | kevinmlee.x@gmail.com |
846e4b78d7c886c44c4e431ff2909e85839f2292 | 3d1604af821eb83d907c7bd7b201d81b949d5875 | /src/iCalendarMIP.py | 87e93e21f846508b59db602ca6bb2e0a6bb19e1c | [] | no_license | alswl/ical-calculator | 1d00ca7ffc67a107a4f015e083a64bf5eb203b78 | cd1001e5f5274144a8b7279556d5e5f3174eddcf | refs/heads/master | 2021-01-20T10:38:46.906720 | 2014-01-21T14:04:24 | 2014-01-21T14:04:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,776 | py | # -*- coding:utf-8 -*-
"""iCalendar Message-Based Interoperability Protocol (iMIP) (RFC6048, RFC2447) library
Sends valid email invitations and receives them via SMTP / PoP account
Created on Feb 11, 2013
@author: Oberron
"""
import sys
sys.path.append("../../src/") #to overide previous installs
import icalendar #@UnresolvedImport
import datetime
import smtplib
SMTP_info_file = "C:/sw/misc/SMTP.txt"
""" above file needs to contain 2 lines: first one is the login, 2nd one is the password"""
class iMIP():
""" Generates an email invite (and sends it via SMTP) containing valid iCalendar file
"""
SMTP_login = ""
SMTP_password = ""
def __init__(self):
SMTP = open(SMTP_info_file).readlines()
self.SMTP_login = SMTP[0].replace("\n","").replace("\r","")
self.SMTP_password = SMTP[1].replace("\n","").replace("\r","")
self.ical = icalendar.ics()
def CreateiCal(self):
pass
def writeMail(self):
pass
def sendEmail(self,from_address,to_address,email_as_string):
mailServer = smtplib.SMTP('smtp.gmail.com', 587)
mailServer.ehlo()
mailServer.starttls()
mailServer.ehlo()
mailServer.login(self.SMTP_login, self.SMTP_password)
# mailServer.sendmail(fro, attendees, msg.as_string())
mailServer.sendmail(from_address, to_address, email_as_string)
mailServer.close()
pass
myCal = iMIP()
#print myCal.SMTP_login
#print myCal.SMTP_password
dtstart = datetime.datetime(2013,4,25,9,0,0)
dtend = dtstart + datetime.timedelta(hours =1)
dtstamp= datetime.datetime(2013,2,6,10,11,00)
organizer = "test@test.com"
attendee = "test@test.com"
uid = "CALEVENT_TS090519840000000005"
description = "test"
summary = "test"
location = "Test"
sequence = 1
status = "CONFIRMED"
myCal.ical.events = [{"DTSTART":{"val":dtstart},"DTEND":{"val":dtend},"DTSTAMP":{"val":dtstamp},
"ORGANIZER":{"val":organizer,"prop":"CN=Test test"},
"ATTENDEE":{"val":attendee},"UID":{"val":uid},
"DESCRIPTION":{"val":description},"SUMMARY":{"val":summary},
"LOCATION":{"val":location},"SEQUENCE":{"val":sequence},
"STATUS":{"val":status}}]
mycal1 = myCal.ical.Gen_iCalendar(method="REQUEST",append=False)
print "mycal1 is compliant:",myCal.ical.isCalendarStringCompliant(mycal1)
myCal.ical.updateEvent(uid,{"DTSTART":{"val":dtstart+datetime.timedelta(hours=1)},"DTEND":{"val":dtend+datetime.timedelta(hours=1)}})
mycal2 = myCal.ical.Gen_iCalendar(method="REQUEST",append=False)
print "mycal2 is compliant:",myCal.ical.isCalendarStringCompliant(mycal1)
| [
"one.annum@gmail.com@e1cb2c10-74b4-a52f-94e4-9c71d713a321"
] | one.annum@gmail.com@e1cb2c10-74b4-a52f-94e4-9c71d713a321 |
fb5968a2eab69f9449d33ac5db3bf1925503160f | f26d7ecbe11ea33b0806944a5335209c2293a0c4 | /c1app/admin.py | 7e35286f495c52808ae1de924b34734c77a2d00c | [] | no_license | kwanHI/LFdjango10 | 64b177230d432d40b9b4ad4e901632b9e39cad5b | 6ec139677b86d82847ee88ba258f910aa4edb255 | refs/heads/master | 2021-05-13T13:01:41.257054 | 2018-01-08T15:39:38 | 2018-01-08T15:39:38 | 116,695,266 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 168 | py | from django.contrib import admin
from .models import UserProfile, Client10
# Register your models here.
admin.site.register(UserProfile)
admin.site.register(Client10)
| [
"limpsangsriKwan@gmail.com"
] | limpsangsriKwan@gmail.com |
245891ab60390c902727c53b79d0e0632a0a26d4 | daef437c4cd7ed6ca4be3989f6c7b990ee1efcc7 | /blog/migrations/0002_comment.py | e925e461c50d8aa9a0b4c6fa60b7e99800b4ab0c | [] | no_license | YY-U/Myblog-web-app | b157624f5cd56b3b0b56d30f40fe5ae62973c78f | fd90497dc0aa7e6b3efb6e6d5a24c165fde6de33 | refs/heads/main | 2023-08-28T05:13:40.068384 | 2021-09-23T14:33:34 | 2021-09-23T14:33:34 | 401,692,821 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 936 | py | # Generated by Django 3.2.3 on 2021-08-14 10:10
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('author', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('approved_comment', models.BooleanField(default=False)),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='blog.post')),
],
),
]
| [
"chxgw030@yahoo.co.jp"
] | chxgw030@yahoo.co.jp |
0997db820df5512beb330aedeb592bcd7ec5f840 | cb7c3673ad937c282a39be74d0aee8628e75928d | /tests/test_utils/output/uriandcurie.py | 2c0bb6edc92ee3846661835fdd4a574c30b2da97 | [
"CC0-1.0"
] | permissive | bpow/linkml | 649d6d48f39a8c51efa92fba7eb25c1d8854b472 | ab83c0caee9c02457ea5a748e284dee6b547fcd6 | refs/heads/main | 2023-05-05T18:46:04.501897 | 2021-05-13T21:17:03 | 2021-05-13T21:17:03 | 371,163,928 | 0 | 0 | CC0-1.0 | 2021-05-26T20:42:13 | 2021-05-26T20:42:12 | null | UTF-8 | Python | false | false | 4,918 | py | # Auto generated from uriandcurie.yaml by pythongen.py version: 0.9.0
# Generation date: 2021-03-26 14:22
# Schema: uriandcurie
#
# id: http://example.org/test/uriandcurie
# description:
# license:
import dataclasses
import sys
import re
from typing import Optional, List, Union, Dict, ClassVar, Any
from dataclasses import dataclass
from linkml_model.meta import EnumDefinition, PermissibleValue, PvFormulaOptions
from linkml.utils.slot import Slot
from linkml.utils.metamodelcore import empty_list, empty_dict, bnode
from linkml.utils.yamlutils import YAMLRoot, extended_str, extended_float, extended_int
from linkml.utils.dataclass_extensions_376 import dataclasses_init_fn_with_kwargs
from linkml.utils.formatutils import camelcase, underscore, sfx
from linkml.utils.enumerations import EnumDefinitionImpl
from rdflib import Namespace, URIRef
from linkml.utils.curienamespace import CurieNamespace
from linkml.utils.metamodelcore import Curie, ElementIdentifier, NCName, NodeIdentifier, URI, URIorCURIE
metamodel_version = "1.7.0"
# Overwrite dataclasses _init_fn to add **kwargs in __init__
dataclasses._init_fn = dataclasses_init_fn_with_kwargs
# Namespaces
M = CurieNamespace('m', 'http://example.org/test/uriandcurie')
SHEX = CurieNamespace('shex', 'http://www.w3.org/ns/shex#')
XSD = CurieNamespace('xsd', 'http://www.w3.org/2001/XMLSchema#')
DEFAULT_ = M
# Types
class String(str):
""" A character string """
type_class_uri = XSD.string
type_class_curie = "xsd:string"
type_name = "string"
type_model_uri = M.String
class Uriorcurie(URIorCURIE):
""" a URI or a CURIE """
type_class_uri = XSD.anyURI
type_class_curie = "xsd:anyURI"
type_name = "uriorcurie"
type_model_uri = M.Uriorcurie
class Uri(URI):
""" a complete URI """
type_class_uri = XSD.anyURI
type_class_curie = "xsd:anyURI"
type_name = "uri"
type_model_uri = M.Uri
class Curie(Curie):
""" a CURIE """
type_class_uri = XSD.anyURI
type_class_curie = "xsd:anyURI"
type_name = "curie"
type_model_uri = M.Curie
class Ncname(NCName):
""" Prefix part of CURIE """
type_class_uri = XSD.string
type_class_curie = "xsd:string"
type_name = "ncname"
type_model_uri = M.Ncname
class Objectidentifier(ElementIdentifier):
""" A URI or CURIE that represents an object in the model. """
type_class_uri = SHEX.iri
type_class_curie = "shex:iri"
type_name = "objectidentifier"
type_model_uri = M.Objectidentifier
class Nodeidentifier(NodeIdentifier):
""" A URI, CURIE or BNODE that represents a node in a model. """
type_class_uri = SHEX.nonliteral
type_class_curie = "shex:nonliteral"
type_name = "nodeidentifier"
type_model_uri = M.Nodeidentifier
# Class references
class C1Id(ElementIdentifier):
pass
@dataclass
class C1(YAMLRoot):
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = M.C1
class_class_curie: ClassVar[str] = "m:C1"
class_name: ClassVar[str] = "c1"
class_model_uri: ClassVar[URIRef] = M.C1
id: Union[str, C1Id] = None
hasCurie: Optional[Union[str, Curie]] = None
hasURI: Optional[Union[str, URI]] = None
hasNcName: Optional[Union[str, NCName]] = None
id2: Optional[Union[str, NodeIdentifier]] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self.id is None:
raise ValueError("id must be supplied")
if not isinstance(self.id, C1Id):
self.id = C1Id(self.id)
if self.hasCurie is not None and not isinstance(self.hasCurie, Curie):
self.hasCurie = Curie(self.hasCurie)
if self.hasURI is not None and not isinstance(self.hasURI, URI):
self.hasURI = URI(self.hasURI)
if self.hasNcName is not None and not isinstance(self.hasNcName, NCName):
self.hasNcName = NCName(self.hasNcName)
if self.id2 is not None and not isinstance(self.id2, NodeIdentifier):
self.id2 = NodeIdentifier(self.id2)
super().__post_init__(**kwargs)
# Enumerations
# Slots
class slots:
pass
slots.id = Slot(uri=M.id, name="id", curie=M.curie('id'),
model_uri=M.id, domain=None, range=URIRef)
slots.hasCurie = Slot(uri=M.hasCurie, name="hasCurie", curie=M.curie('hasCurie'),
model_uri=M.hasCurie, domain=None, range=Optional[Union[str, Curie]])
slots.hasURI = Slot(uri=M.hasURI, name="hasURI", curie=M.curie('hasURI'),
model_uri=M.hasURI, domain=None, range=Optional[Union[str, URI]])
slots.hasNcName = Slot(uri=M.hasNcName, name="hasNcName", curie=M.curie('hasNcName'),
model_uri=M.hasNcName, domain=None, range=Optional[Union[str, NCName]])
slots.id2 = Slot(uri=M.id2, name="id2", curie=M.curie('id2'),
model_uri=M.id2, domain=None, range=Optional[Union[str, NodeIdentifier]]) | [
"action@github.com"
] | action@github.com |
7193bb2db7bcd806f09266312489de1e2d192446 | 30de2a2be2909f0aee882790af244432b8b071c9 | /sentiment classifier/splitSentiment.py | 73590477b724114208617a6856ccad16ead7a53c | [] | no_license | ctlu22/neuroticism | 126b5b360f8cf1564170bcfc4e87a4db5c7e72a2 | 530acc87638f5538be984ab60ffd5f44acf95bf5 | refs/heads/master | 2021-04-30T09:57:13.695690 | 2018-02-14T04:38:08 | 2018-02-14T04:38:08 | 111,462,884 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,979 | py | import sys
import re
import csv
if len(sys.argv) < 2:
print """USAGE: python splitSentiment.py [input_file]"""
sys.exit()
pos_file = open("positive.csv", 'w')
neg_file = open("negative.csv", 'w')
test_file = open("testset.csv", 'w')
emo_count_file = open("emojiCount.csv", 'w')
pos_out = csv.writer(pos_file, delimiter = '\t', quotechar = '', escapechar = '\\', quoting = csv.QUOTE_NONE)
neg_out = csv.writer(neg_file, delimiter = '\t', quotechar = '', escapechar = '\\', quoting = csv.QUOTE_NONE)
test_out = csv.writer(test_file, delimiter = '\t', quotechar = '', escapechar = '\\', quoting = csv.QUOTE_NONE)
emo_count_out = csv.writer(emo_count_file, delimiter = '\t', quotechar = '', escapechar = '\\', quoting = csv.QUOTE_NONE)
pos_count = 0
neg_count = 0
test_count = 0
sad = '[:=]-?\('
happy = '[:=]-?\)'
for index in range(1, len(sys.argv)):
doc = open(sys.argv[index], 'rU')
in_file = csv.reader(doc, delimiter = ',')
line_idx = 1
for row in in_file:
if (line_idx ==1):
row.extend(["positive_emo_count", "negative_emo_count", "net_emo_count"])
emo_count_out.writerow(row)
line_idx += 1
continue
if len(row) != 5:
print "Skipping: malformatted line: %i" %line_idx
line_ix += 1
continue
text = row[1]
emo_neg_count = len(re.findall(sad, text))
emo_pos_count = len(re.findall(happy, text))
row.extend([emo_neg_count, emo_pos_count, emo_pos_count - emo_neg_count])
if re.search(sad, text) != None and re.search(happy, text) == None:
neg_out.writerow(row)
neg_count += 1
elif re.search(happy, text) != None and re.search(sad, text) == None:
pos_out.writerow(row)
pos_count += 1
else:
test_out.writerow(row)
test_count += 1
emo_count_out.writerow(row)
line_idx += 1
doc.close()
neg_file.close()
pos_file.close()
test_file.close()
emo_count_file.close()
print "Splitting finished. Positive: " + str(pos_count) + " Negative:" + str(neg_count) + " Test:" + str(test_count)
| [
"lu.chunting2009@gmail.com"
] | lu.chunting2009@gmail.com |
94f4de8865ab5f6cc1d9d0e010fd2820a5ad9427 | 1e2a18bcbbaf95cc78f54a9486959dbb563a8bc7 | /payments/gateways/__init__.py | 23b95e7e45069031586b4dadb1f7a7f1a18ff8f1 | [] | no_license | arashsa07/bazinama_events | 326433db2e94edf091f7cfb41d08523abf7d72a7 | 7d25add58f50aa15aef61f780e32e4e7a2eff94e | refs/heads/master | 2020-06-19T18:22:55.508140 | 2019-07-14T10:40:06 | 2019-07-14T10:40:06 | 196,820,270 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 134 | py | from .shahparak import verify_shaparak
from .saman import verify_saman
from .raad import verify_raad
from .bazaar import verify_bazaar | [
"arash.sa07@gmail.com"
] | arash.sa07@gmail.com |
f892964a9f41c2a1957bb3b6940a35faf1590b5a | 27a4619766e85a7297fc0e16c246866e5b0c14d3 | /tests/agents/test_energy_storage.py | a5c7a1705eedac39126189109872767c4c621a83 | [
"BSD-3-Clause"
] | permissive | sugar-215/PowerGridworld | 53b2cadfabd81a337a6159ed4424445741d712b3 | 9ed5ce3a0219e7730f102d2d3c8c40b07da1f084 | refs/heads/main | 2023-09-04T19:34:41.960545 | 2021-11-17T16:45:27 | 2021-11-17T16:45:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 380 | py | from tests.conftest import single_agent_episode_runner
from tests.agents.conftest import energy_storage_config
from gridworld.agents.energy_storage import EnergyStorageEnv
def test_default_env(energy_storage_config):
"""Creates an env instance and runs it (sanity check)."""
env = EnergyStorageEnv(**energy_storage_config)
assert single_agent_episode_runner(env)
| [
"dbiagon@nrel.gov"
] | dbiagon@nrel.gov |
b6e2ce22fb67076c267ba2e1fd71f0b24c1d2878 | 20dba145fd988d5901cfd335efe238c0dce8ac5b | /analytics/decorators/cache_dec.py | df12f6fbc3b61d39bd1710094aebd7c6bc2533c3 | [
"BSD-3-Clause"
] | permissive | ModelDBRepository/228604 | 10be01bf0eeea3ea07ef4c38ebb3b4c771000923 | 8f641f73bcac2700b476663fe656fcad7d63470d | refs/heads/master | 2020-05-29T18:25:57.095212 | 2019-05-31T03:47:54 | 2019-05-31T03:47:54 | 189,299,677 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,489 | py | """ a simple caching of function return values
using the decorator "cached", e.g.
@cached
def foo(a, b, c):
return a*b-c
will cache the result of the calculation foo does, which of course better not be this trivial.
works also for numpy arrays in the parameters.
should of course only be used on functions that do not depend on global parameters (as their state would not be cashed)
"""
import hashlib
import numpy as np
from functools import wraps
cache = {}
hits = 0
misses = 0
no_caching = False
def cached(func):
global cache
def hashit(a):
# builtin hash does weird things with complex number with integer real (or imag?) part : hash(1.5j-1) == hash(1.5j-2)
return (a.__hash__() if not isinstance(a,np.ndarray) else hashlib.sha1(a).hexdigest())
@wraps(func)
def wrapper(*args, **kwargs): # kwargs does not work yet!
global misses, hits
key = tuple([func.__name__]) + tuple(("",hashit(a)) for a in args) + tuple((k,hashit(v)) for k, v in sorted(kwargs.items()))
if no_caching:
return func(*args, **kwargs)
elif not cache.has_key(key):
#print func.__name__ + " missed " + str(key)
cache[key] = func(*args, **kwargs)
misses += 1
else:
hits += 1
#print func.__name__ + " hit"
return cache[key]
return wrapper
def clear_cache():
global cache, misses, hits
cache = {}
hits = 0
misses = 0
| [
"tom.morse@yale.edu"
] | tom.morse@yale.edu |
097fb5d2aa77f9818e892b587791125d742bad04 | 18ba764b234cd3a4f2eebc3d7bb80219aa8854b6 | /rgkit/bots/team10.py | 9bc00557423b7dd8e45a19056a6567805a2ac4b0 | [
"Unlicense"
] | permissive | tomviner/rgkit | 74008bdda044b165a54570cf0d6c0b0714a3be80 | acd4517ca5213c712af0fa29ff16d4ff32e9f443 | refs/heads/master | 2021-05-16T01:35:30.031418 | 2017-10-26T19:46:53 | 2017-10-26T19:46:53 | 106,317,820 | 0 | 0 | null | 2017-10-09T18:04:54 | 2017-10-09T18:04:54 | null | UTF-8 | Python | false | false | 599 | py | import random
import rg
centre = 9, 9
class Robot:
def act(self, game):
loc = self.location
right = (loc[0] + 1, loc[1])
left = (loc[0] - 1, loc[1])
up = (loc[0], loc[1] + 1)
down = (loc[0], loc[1] -1)
directions = [left, right, up, down]
ok_directions = list(filter(safe, directions))
new_loc = random.choice(ok_directions)
return random.choice((['move', new_loc], ['attack', new_loc]))
def safe(location):
types = rg.loc_types(location)
print(location, types)
return 'obstacle' not in set(types)
| [
"sim.david@gmail.com"
] | sim.david@gmail.com |
f888d9b6ea8e0fb8f1ad03e16e336665189babce | 6c5a32e7b393803c0bada26fa8a705bfc88d9f0c | /vdditer.py | 6832571cc5bcf3dab99f51cb54391e3efab0f793 | [] | no_license | idesignitX/vddfit | 6ad32f42c84714e8302f582c5ba8437b4d1ef1f4 | 74e2a2da93cd958bbbabfffb7541b4f7d2738c9e | refs/heads/master | 2023-07-07T14:40:30.014088 | 2021-08-06T14:14:18 | 2021-08-06T14:14:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,516 | py | from numba import njit
import numpy as np
import matplotlib.pyplot as plt
@njit
def simulate_activation(tau, noise, std, smoothing, scale, tau_threshold, *args):
act = 0.0
for i in range(len(tau)):
act = act*smoothing + np.arctan((tau[i] - tau_threshold)*scale) + noise[i]*std
yield act
@njit
def simulate_time(tau, noise, std, smoothing, scale, tau_threshold, act_threshold):
acts = simulate_activation(tau, noise, std, smoothing, scale, tau_threshold)
prev = 0.0
if prev > act_threshold:
return 0.0
for i, act in enumerate(acts):
if act < act_threshold:
prev = act
continue
t = (act_threshold - prev)/(act - prev)
return i + t
return np.nan
@njit
def simulate_times(tau, noise_bank, std, smoothing, scale, tau_threshold, act_threshold):
out = np.empty(len(noise_bank))
for i in range(len(noise_bank)):
out[i] = simulate_time(tau, noise_bank[i], std, smoothing, scale, tau_threshold, act_threshold)
return out
@njit
def stdnormpdf(x):
return np.exp(-x**2/2)/np.sqrt(2*np.pi)
@njit
def sample_lik(vals, sample, dt):
liks = np.empty_like(vals)
n = len(sample)
std = 0.1
bw = std*n**(-1/(1+4))
for i, val in enumerate(vals):
lik = 0.0
for s in sample:
# TODO: Handle non-responders
# TODO: Make a discrete PDF from the empirical CDF?
lik += stdnormpdf((s*dt - val)/bw)
liks[i] = lik/(n*bw)
return liks
from kwopt import minimizer, logbarrier, logitbarrier
def vdd_loss(trials, dt, N=1000):
taus, rts = zip(*trials)
hacktaus = []
for tau in taus:
hacktau = tau.copy()
hacktau[hacktau < 0] = 1e5
hacktaus.append(hacktau)
noises = [np.random.randn(N, len(tau)) for (tau, rts) in trials]
def loss(**kwargs):
lik = 0
for tau, rt, noise in zip(hacktaus, rts, noises):
sample = simulate_times(tau, noise, **kwargs)
lik += np.sum(np.log(sample_lik(rt, sample, dt)))
return -lik
return loss
def fit_vdd(trials, dt, N=1000, init=None):
if init is None:
init = dict(
std=1.0*np.sqrt(dt),
smoothing=0.5,
scale=1.0,
tau_threshold=4.0,
act_threshold=1.0
)
spec = dict(
std= (init['std'], logbarrier),
smoothing= (init['smoothing'], logitbarrier),
scale= (init['scale'], logbarrier),
tau_threshold= (init['tau_threshold'], logbarrier),
act_threshold= (init['act_threshold'], logbarrier)
)
loss = vdd_loss(trials, dt, N)
return minimizer(loss, method='nelder-mead')(**spec)
def gridtest():
def fittingtest():
N = 20
dt = 1/90
dur = 20
ts = np.arange(0, dur, dt)
param = dict(
std=1.0*np.sqrt(dt),
smoothing=0.5,
scale=1.0,
tau_threshold=2.0,
act_threshold=1.0
)
trials = []
for tau0 in (2.0, 3.0, 4.0, 5.0):
tau0 = 4.5
speed = 30.0
dist = tau0*speed - ts*speed
tau = dist/speed
np.random.seed(0)
noise_bank = np.random.randn(N, len(tau))
hacktau = tau.copy()
hacktau[hacktau < 0] = 1e5
sample = simulate_times(hacktau, noise_bank, **param)*dt
trials.append((tau, sample))
result = fit_vdd(trials, dt)
print(result)
#plt.hist(sample)
#plt.show()
def samplingtest():
N = 1000
dt = 1/90
dur = 20
ts = np.arange(0, dur, dt)
tau0 = 4.5
speed = 30.0
dist = tau0*speed - ts*speed
tau = dist/speed
np.random.seed(0)
noise_bank = np.random.randn(N, len(tau))
tau[tau < 0] = 4.5
param = dict(
std=1.0*np.sqrt(dt),
smoothing=0.5,
scale=1.0,
tau_threshold=4.0,
act_threshold=1.0
)
#print("Simulating")
sample = simulate_times(tau, noise_bank, **param)
#responders = np.isfinite(sample)
#print(np.sum(responders)/len(responders)*100)
#plt.hist(sample[responders]*dt, bins=100)
plt.hist(sample*dt, bins=100, density=True)
est = sample_lik(ts, sample, dt)
#from scipy.stats.kde import gaussian_kde
#est = gaussian_kde(sample, bw_method=0.1)(ts)
#plt.plot(ts, est, color='green')
#plt.twinx()
plt.plot(ts, est, color='red')
plt.show()
if __name__ == '__main__':
fittingtest()
#samplingtest()
| [
"jami.pekkanen@helsinki.fi"
] | jami.pekkanen@helsinki.fi |
cde37335c8fd46c394bd630f2b414f47ebcc8761 | e44b48cae7ac78938a05a69e5e5acd8836530ea0 | /App.py | 8d78c2021e89c3a059aadd04d9bc018dd09335fc | [] | no_license | vishwjeet3939/Ola_Prediction | 7522a234e3079608f26bdbe97b9f8eca28d3f355 | 0a822e7d65fc4eea7f77000b0e7de7ab6cc861d2 | refs/heads/master | 2022-11-23T16:49:51.081218 | 2020-07-30T11:11:14 | 2020-07-30T11:11:14 | 283,749,075 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 682 | py | import numpy as np
from flask import Flask,request,jsonify,render_template
import pickle
import math
app = Flask(__name__)
model = pickle.load(open("uber.pkl","rb"))
@app.route('/')
def home():
return render_template('index.html')
@app.route('/predict', methods=['POST'])
def predict():
int_features = [int(x) for x in request.form.values()]
final_features = [np.array(int_features)]
prediction = model.predict(final_features)
output = round(prediction[0],2)
return render_template('index.html', prediction_text="Number of Weekly Rides Should be {}".format(math.floor(output)))
if __name__ == '__main__':
app.run(debug=True)
| [
"noreply@github.com"
] | vishwjeet3939.noreply@github.com |
466d567c62831fb4ecedbcb1bfdcdc30fbba798a | 11fbcc55eecead7bb10c2e645925f912f6ee32c2 | /setup.py | a8cdd9c9fe270299ca02dfb6aa43d02399919cf9 | [
"BSD-3-Clause"
] | permissive | anandubey/randomstringtoken | b519af91c8a1604d68e83c76597fbc4a6c4c3dcc | b4bbc29fe7402252db3f20c9f8d5226baa0509c7 | refs/heads/main | 2023-05-04T14:28:22.403052 | 2021-05-27T16:26:07 | 2021-05-27T16:26:07 | 369,114,495 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,065 | py | import pathlib
from setuptools import setup
# The directory containing this file
CURR_DIR = pathlib.Path(__file__).parent
# The text of the README file
README = (CURR_DIR / "README.md").read_text()
# This call to setup() does all the work
setup(
name="randomstringtoken",
version="0.2",
description="Generate random string token of specified length",
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/p5ypher/randomstringtoken",
author="Anand Kumar Dubey",
author_email="dubey.anandkr@gmail.com",
license="BSD 3-Clause License",
classifiers=[
"License :: OSI Approved :: BSD 3-Clause License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.9",
"Development Status :: 4 - Beta",
],
packages=["randomstringtoken"],
include_package_data=False,
install_requires=[
],
entry_points={
"console_scripts": [
"randomstringtoken=randomstringtoken.__main__:main",
]
},
) | [
"dubey.anandkr@gmail.com"
] | dubey.anandkr@gmail.com |
baf032ea3d1791773949ff5bf09255d17efe7531 | 5b794066e7a4c9093cee207fcd5e4802e5a9b86f | /durin/admin.py | fa0e9b2cc111512a06f4a77fafa6f1287ef2a4df | [
"MIT"
] | permissive | iCodeIN/django-rest-durin | 286860d8dedbb49bc28293209949f493319aa49b | 260a7a6d992179dc91a4c389002880a270d97eab | refs/heads/main | 2023-03-01T14:16:28.077338 | 2021-02-09T21:24:54 | 2021-02-09T21:24:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,248 | py | from django.contrib import admin
from durin import models
@admin.register(models.AuthToken)
class AuthTokenAdmin(admin.ModelAdmin):
"""Django's ModelAdmin for AuthToken.\n
In most cases, you would want to override this to make
``AuthTokenAdmin.raw_id_fields = ("user",)``
"""
exclude = ("token", "expiry")
list_display = (
"token",
"client_name",
"user",
"created",
"expires_in",
)
list_filter = ("client__name", "user")
fieldsets = [
(
"API Auth Token",
{
"fields": ("user", "client"),
"description": """
<h3>Token will be auto-generated on save.</h3>
<h3>Token will carry the same expiry as the
selected client's token TTL.</h3>
""",
},
),
]
def client_name(self, obj):
return obj.client.name
def save_model(self, request, obj, form, change):
return models.AuthToken.objects.create(obj.user, obj.client)
@admin.register(models.Client)
class ClientAdmin(admin.ModelAdmin):
"""
Django's ModelAdmin for Client.
"""
list_display = ("id", "name", "token_ttl")
| [
"eshaan7bansal@gmail.com"
] | eshaan7bansal@gmail.com |
b608dd3e6c69f6ddd5710aa9ce505003bc56588a | c7c188dbd5cd67db196d5f5910806a78d3e8c62e | /ants.py | dd429eae431cea4e23191f97bf38da6e6a80e115 | [] | no_license | EricBWang/ants | 1f502ffebbb3183b4529aac832e9599e8352c2f3 | c18c7c6a10de0711365052204929149f6221316e | refs/heads/main | 2023-08-29T10:53:57.987523 | 2021-11-09T23:41:55 | 2021-11-09T23:41:55 | 426,417,038 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28,890 | py | """CS 61A presents Ants Vs. SomeBees."""
import random
from ucb import main, interact, trace
from collections import OrderedDict
################
# Core Classes #
################
class Place(object):
"""A Place holds insects and has an exit to another Place."""
def __init__(self, name, exit=None):
"""Create a Place with the given NAME and EXIT.
name -- A string; the name of this Place.
exit -- The Place reached by exiting this Place (may be None).
"""
self.name = name
self.exit = exit
self.bees = [] # A list of Bees
self.ant = None # An Ant
self.entrance = None # A Place
# Phase 1: Add an entrance to the exit
# BEGIN Problem 2
if self.exit:
self.exit.entrance = self
# END Problem 2
def add_insect(self, insect):
"""Add an INSECT to this Place.
There can be at most one Ant in a Place, unless exactly one of them is
a BodyguardAnt (Phase 4), in which case there can be two. If add_insect
tries to add more Ants than is allowed, an assertion error is raised.
There can be any number of Bees in a Place.
"""
if insect.is_ant:
if self.ant is None:
self.ant = insect
else:
# Phase 4: Special handling for BodyguardAnt
# BEGIN Problem 7
if self.ant.can_contain(insect):
self.ant.contain_ant(insect)
elif insect.can_contain(self.ant):
insect.contain_ant(self.ant)
self.ant = insect
else:
assert self.ant is None, 'Two ants in {0}'.format(self)
# END Problem 7
else:
self.bees.append(insect)
insect.place = self
def remove_insect(self, insect):
"""Remove an INSECT from this Place.
A target Ant may either be directly in the Place, or be contained by a
container Ant at this place. The true QueenAnt may not be removed. If
remove_insect tries to remove an Ant that is not anywhere in this
Place, an AssertionError is raised.
A Bee is just removed from the list of Bees.
"""
if insect.is_ant:
# Phase 4: Special Handling for BodyguardAnt and QueenAnt
if isinstance(insect, QueenAnt) and not insect.fake:
return None
elif self.ant is insect:
if hasattr(self.ant, 'container') and self.ant.container:
self.ant = self.ant.ant
else:
self.ant = None
else:
if hasattr(self.ant, 'container') and self.ant.container and self.ant.ant is insect:
self.ant.ant = None
else:
assert False, '{0} is not in {1}'.format(insect, self)
else:
self.bees.remove(insect)
insect.place = None
def __str__(self):
return self.name
class Insect(object):
"""An Insect, the base class of Ant and Bee, has armor and a Place."""
is_ant = False
damage = 0
watersafe = False
def __init__(self, armor, place=None):
"""Create an Insect with an ARMOR amount and a starting PLACE."""
self.armor = armor
self.place = place # set by Place.add_insect and Place.remove_insect
def reduce_armor(self, amount):
"""Reduce armor by AMOUNT, and remove the insect from its place if it
has no armor remaining.
>>> test_insect = Insect(5)
>>> test_insect.reduce_armor(2)
>>> test_insect.armor
3
"""
self.armor -= amount
if self.armor <= 0:
self.place.remove_insect(self)
def action(self, colony):
"""The action performed each turn.
colony -- The AntColony, used to access game state information.
"""
def __repr__(self):
cname = type(self).__name__
return '{0}({1}, {2})'.format(cname, self.armor, self.place)
class Bee(Insect):
"""A Bee moves from place to place, following exits and stinging ants."""
name = 'Bee'
damage = 1
watersafe = True
def sting(self, ant):
"""Attack an ANT, reducing its armor by 1."""
ant.reduce_armor(self.damage)
def move_to(self, place):
"""Move from the Bee's current Place to a new PLACE."""
self.place.remove_insect(self)
place.add_insect(self)
def blocked(self):
"""Return True if this Bee cannot advance to the next Place."""
# Phase 3: Special handling for NinjaAnt
# BEGIN Problem 6A
return self.place.ant is not None and self.place.ant.blocks_path is True
# END Problem 6A
def action(self, colony):
"""A Bee's action stings the Ant that blocks its exit if it is blocked,
or moves to the exit of its current place otherwise.
colony -- The AntColony, used to access game state information.
"""
if self.blocked():
self.sting(self.place.ant)
elif self.armor > 0 and self.place.exit is not None:
self.move_to(self.place.exit)
class Ant(Insect):
"""An Ant occupies a place and does work for the colony."""
is_ant = True
implemented = False # Only implemented Ant classes should be instantiated
food_cost = 0
blocks_path = True
container = False
watersafe = False
def __init__(self, armor=1):
"""Create an Ant with an ARMOR quantity."""
Insect.__init__(self, armor)
def can_contain(self, other):
return self.container and self.ant is None and not other.container
class HarvesterAnt(Ant):
"""HarvesterAnt produces 1 additional food per turn for the colony."""
name = 'Harvester'
implemented = True
food_cost = 2
def action(self, colony):
"""Produce 1 additional food for the COLONY.
colony -- The AntColony, used to access game state information.
"""
# BEGIN Problem 1
colony.food += 1
# END Problem 1
class ThrowerAnt(Ant):
"""ThrowerAnt throws a leaf each turn at the nearest Bee in its range."""
name = 'Thrower'
implemented = True
damage = 1
food_cost = 3
def nearest_bee(self, hive):
"""Return the nearest Bee in a Place that is not the HIVE, connected to
the ThrowerAnt's Place by following entrances.
This method returns None if there is no such Bee (or none in range).
"""
# BEGIN Problem 3B
cur_place = self.place
counter = 0
max_r = float('inf')
if hasattr(self,'max_range'):
max_r = self.max_range
if hasattr(self,'min_range'):
for _ in range(self.min_range):
cur_place = cur_place.entrance
while cur_place != hive and not cur_place is None:
bee = random_or_none(cur_place.bees)
if bee:
return bee
else:
if counter < max_r:
cur_place = cur_place.entrance
counter += 1
else:
return
return random_or_none(self.place.bees)
# END Problem 3B
def throw_at(self, target):
"""Throw a leaf at the TARGET Bee, reducing its armor."""
if target is not None:
target.reduce_armor(self.damage)
def action(self, colony):
"""Throw a leaf at the nearest Bee in range."""
self.throw_at(self.nearest_bee(colony.hive))
def random_or_none(s):
"""Return a random element of sequence S, or return None if S is empty."""
if s:
return random.choice(s)
##############
# Extensions #
##############
class Water(Place):
"""Water is a place that can only hold 'watersafe' insects."""
def add_insect(self, insect):
"""Add INSECT if it is watersafe, otherwise reduce its armor to 0."""
# BEGIN Problem 3A
Place.add_insect(self, insect)
if not insect.watersafe:
insect.reduce_armor(insect.armor)
# END Problem 3A
class FireAnt(Ant):
"""FireAnt cooks any Bee in its Place when it expires."""
name = 'Fire'
damage = 3
# BEGIN Problem 4A
food_cost = 5
armor = 1
implemented = True # Change to True to view in the GUI
# END Problem 4A
def reduce_armor(self, amount):
"""Reduce armor by AMOUNT, and remove the FireAnt from its place if it
has no armor remaining. If the FireAnt dies, damage each of the bees in
the current place.
"""
# BEGIN Problem 4A
if self.armor - amount <= 0:
for b in self.place.bees[:]:
b.reduce_armor(self.damage)
Ant.reduce_armor(self, amount)
# END Problem 4A
class LongThrower(ThrowerAnt):
"""A ThrowerAnt that only throws leaves at Bees at least 5 places away."""
name = 'Long'
# BEGIN Problem 4B
min_range = 5
food_cost = 2
implemented = True
# END Problem 4B
class ShortThrower(ThrowerAnt):
"""A ThrowerAnt that only throws leaves at Bees at most 3 places away."""
name = 'Short'
# BEGIN Problem 4B
max_range = 3
food_cost = 2
implemented = True
# END Problem 4B
# BEGIN Problem 5A
class WallAnt(Ant):
name = 'Wall'
implemented = True
food_cost = 4
def __init__(self):
Ant.__init__(self, 4)
# The WallAnt class
# END Problem 5A
class NinjaAnt(Ant):
"""NinjaAnt does not block the path and damages all bees in its place."""
name = 'Ninja'
damage = 1
blocks_path = False
food_cost = 5
# BEGIN Problem 6A
"*** REPLACE THIS LINE ***"
implemented = True # Change to True to view in the GUI
# END Problem 6A
def action(self, colony):
# BEGIN Problem 6A
for b in self.place.bees[:]:
b.reduce_armor(self.damage)
# END Problem 6A
# BEGIN Problem 5B
class ScubaThrower(ThrowerAnt):
"""ScubaThrower is a ThrowerAnt that can be in a Water place.
Consquently, it is more expensive to create."""
name = 'Scuba'
implemented = True
watersafe = True
food_cost = 6
# The ScubaThrower class
# END Problem 5B
class HungryAnt(Ant):
"""HungryAnt will take three turns to digest a Bee in its place.
While digesting, the HungryAnt can't eat another Bee.
"""
name = 'Hungry'
# BEGIN Problem 6B
food_cost = 4
time_to_digest = 3
implemented = True
# END Problem 6B
def __init__(self):
# BEGIN Problem 6B
self.digesting = 0
Ant.__init__(self)
# END Problem 6B
def eat_bee(self, bee):
# BEGIN Problem 6B
if bee is not None:
bee.reduce_armor(bee.armor)
self.digesting = self.time_to_digest
# END Problem 6B
def action(self, colony):
# BEGIN Problem 6B
if self.digesting:
self.digesting -= 1
else:
self.eat_bee(random_or_none(self.place.bees))
# END Problem 6B
class BodyguardAnt(Ant):
"""BodyguardAnt provides protection to other Ants."""
name = 'Bodyguard'
# BEGIN Problem 7
container = True
food_cost = 4
implemented = True # Change to True to view in the GUI
# END Problem 7
def __init__(self):
Ant.__init__(self, 2)
self.ant = None # The Ant hidden in this bodyguard
def contain_ant(self, ant):
# BEGIN Problem 7
self.ant = ant
# END Problem 7
def action(self, colony):
# BEGIN Problem 7
if not self.ant is None:
self.ant.action(colony)
# END Problem 7
class TankAnt(BodyguardAnt):
"""TankAnt provides both offensive and defensive capabilities."""
name = 'Tank'
damage = 1
# BEGIN Problem 8
food_cost = 6
implemented = True # Change to True to view in the GUI
# END Problem 8
def action(self, colony):
# BEGIN Problem 8
BodyguardAnt.action(self, colony)
for b in self.place.bees[:]:
b.reduce_armor(self.damage)
# END Problem 8
class QueenAnt(ScubaThrower): # You should change this line
"""The Queen of the colony. The game is over if a bee enters her place."""
name = 'Queen'
# BEGIN Problem 9
food_cost = 7
fake = False
num_queens = 0
implemented = True # Change to True to view in the GUI
# END Problem 9
def __init__(self):
# BEGIN Problem 9
Ant.__init__(self)
if QueenAnt.num_queens > 0:
self.fake = True
self.doubled = []
QueenAnt.num_queens += 1
# END Problem 9
def action(self, colony):
"""A queen ant throws a leaf, but also doubles the damage of ants
in her tunnel.
Impostor queens do only one thing: reduce their own armor to 0.
"""
# BEGIN Problem 9
if self.fake:
self.reduce_armor(self.armor)
return None
else:
ScubaThrower.action(self, colony)
behind = self.place
while behind.exit is not None:
behind = behind.exit
backant = behind.ant
if backant is not None and not backant in self.doubled:
backant.damage *= 2
self.doubled.append(backant)
if isinstance(backant, BodyguardAnt):
if backant.ant is not None and not backant.ant in self.doubled:
backant.ant.damage *= 2
self.doubled.append(backant.ant)
# END Problem 9
def reduce_armor(self, amount):
"""Reduce armor by AMOUNT, and if the True QueenAnt has no armor
remaining, signal the end of the game.
"""
# BEGIN Problem 9
ScubaThrower.reduce_armor(self, amount)
if self.armor <= 0 and not self.fake:
bees_win()
# END Problem 9
class AntRemover(Ant):
"""Allows the player to remove ants from the board in the GUI."""
name = 'Remover'
implemented = False
def __init__(self):
Ant.__init__(self, 0)
##################
# Status Effects #
##################
def make_slow(action):
"""Return a new action method that calls ACTION every other turn.
action -- An action method of some Bee
"""
# BEGIN Problem EC
def timekeeper(t):
def slow(colony):
nonlocal t
if t > 0:
t -= 1
if colony.time % 2 == 0:
return action(colony)
else:
return action(colony)
return slow
return timekeeper
# END Problem EC
def make_stun(action):
"""Return a new action method that does nothing.
action -- An action method of some Bee
"""
# BEGIN Problem EC
def timekeeper(t):
def stun(colony):
nonlocal t
if t > 0:
t -= 1
return None
else:
return action(colony)
return stun
return timekeeper
# END Problem EC
def apply_effect(effect, bee, duration):
"""Apply a status effect to a BEE that lasts for DURATION turns."""
# BEGIN Problem EC
bee.action = effect(bee.action)(duration)
# END Problem EC
class SlowThrower(ThrowerAnt):
"""ThrowerAnt that causes Slow on Bees."""
name = 'Slow'
# BEGIN Problem EC
food_cost = 4
implemented = True # Change to True to view in the GUI
# END Problem EC
def throw_at(self, target):
if target:
apply_effect(make_slow, target, 3)
class StunThrower(ThrowerAnt):
"""ThrowerAnt that causes Stun on Bees."""
name = 'Stun'
# BEGIN Problem EC
food_cost = 6
implemented = True # Change to True to view in the GUI
# END Problem EC
def throw_at(self, target):
if target:
apply_effect(make_stun, target, 1)
##################
# Bees Extension #
##################
class Wasp(Bee):
"""Class of Bee that has higher damage."""
name = 'Wasp'
damage = 2
class Hornet(Bee):
"""Class of bee that is capable of taking two actions per turn, although
its overall damage output is lower. Immune to status effects.
"""
name = 'Hornet'
damage = 0.25
def action(self, colony):
for i in range(2):
if self.armor > 0:
super().action(colony)
def __setattr__(self, name, value):
if name != 'action':
object.__setattr__(self, name, value)
class NinjaBee(Bee):
"""A Bee that cannot be blocked. Is capable of moving past all defenses to
assassinate the Queen.
"""
name = 'NinjaBee'
def blocked(self):
return False
class Boss(Wasp, Hornet):
"""The leader of the bees. Combines the high damage of the Wasp along with
status effect immunity of Hornets. Damage to the boss is capped up to 8
damage by a single attack.
"""
name = 'Boss'
damage_cap = 8
action = Wasp.action
def reduce_armor(self, amount):
super().reduce_armor(self.damage_modifier(amount))
def damage_modifier(self, amount):
return amount * self.damage_cap/(self.damage_cap + amount)
class Hive(Place):
"""The Place from which the Bees launch their assault.
assault_plan -- An AssaultPlan; when & where bees enter the colony.
"""
def __init__(self, assault_plan):
self.name = 'Hive'
self.assault_plan = assault_plan
self.bees = []
for bee in assault_plan.all_bees:
self.add_insect(bee)
# The following attributes are always None for a Hive
self.entrance = None
self.ant = None
self.exit = None
def strategy(self, colony):
exits = [p for p in colony.places.values() if p.entrance is self]
for bee in self.assault_plan.get(colony.time, []):
bee.move_to(random.choice(exits))
colony.active_bees.append(bee)
class AntColony(object):
"""An ant collective that manages global game state and simulates time.
Attributes:
time -- elapsed time
food -- the colony's available food total
queen -- the place where the queen resides
places -- A list of all places in the colony (including a Hive)
bee_entrances -- A list of places that bees can enter
"""
def __init__(self, strategy, hive, ant_types, create_places, dimensions, food=2):
"""Create an AntColony for simulating a game.
Arguments:
strategy -- a function to deploy ants to places
hive -- a Hive full of bees
ant_types -- a list of ant constructors
create_places -- a function that creates the set of places
dimensions -- a pair containing the dimensions of the game layout
"""
self.time = 0
self.food = food
self.strategy = strategy
self.hive = hive
self.ant_types = OrderedDict((a.name, a) for a in ant_types)
self.dimensions = dimensions
self.active_bees = []
self.configure(hive, create_places)
def configure(self, hive, create_places):
"""Configure the places in the colony."""
self.queen = QueenPlace('AntQueen')
self.places = OrderedDict()
self.bee_entrances = []
def register_place(place, is_bee_entrance):
self.places[place.name] = place
if is_bee_entrance:
place.entrance = hive
self.bee_entrances.append(place)
register_place(self.hive, False)
create_places(self.queen, register_place, self.dimensions[0], self.dimensions[1])
def simulate(self):
"""Simulate an attack on the ant colony (i.e., play the game)."""
num_bees = len(self.bees)
try:
while True:
self.hive.strategy(self) # Bees invade
self.strategy(self) # Ants deploy
for ant in self.ants: # Ants take actions
if ant.armor > 0:
ant.action(self)
for bee in self.active_bees[:]: # Bees take actions
if bee.armor > 0:
bee.action(self)
if bee.armor <= 0:
num_bees -= 1
self.active_bees.remove(bee)
if num_bees == 0:
raise AntsWinException()
self.time += 1
except AntsWinException:
print('All bees are vanquished. You win!')
return True
except BeesWinException:
print('The ant queen has perished. Please try again.')
return False
def deploy_ant(self, place_name, ant_type_name):
"""Place an ant if enough food is available.
This method is called by the current strategy to deploy ants.
"""
constructor = self.ant_types[ant_type_name]
if self.food < constructor.food_cost:
print('Not enough food remains to place ' + ant_type_name)
else:
ant = constructor()
self.places[place_name].add_insect(ant)
self.food -= constructor.food_cost
return ant
def remove_ant(self, place_name):
"""Remove an Ant from the Colony."""
place = self.places[place_name]
if place.ant is not None:
place.remove_insect(place.ant)
@property
def ants(self):
return [p.ant for p in self.places.values() if p.ant is not None]
@property
def bees(self):
return [b for p in self.places.values() for b in p.bees]
@property
def insects(self):
return self.ants + self.bees
def __str__(self):
status = ' (Food: {0}, Time: {1})'.format(self.food, self.time)
return str([str(i) for i in self.ants + self.bees]) + status
class QueenPlace(Place):
"""QueenPlace at the end of the tunnel, where the queen resides."""
def add_insect(self, insect):
"""Add an Insect to this Place.
Can't actually add Ants to a QueenPlace. However, if a Bee attempts to
enter the QueenPlace, a BeesWinException is raised, signaling the end
of a game.
"""
assert not insect.is_ant, 'Cannot add {0} to QueenPlace'
raise BeesWinException()
def ants_win():
"""Signal that Ants win."""
raise AntsWinException()
def bees_win():
"""Signal that Bees win."""
raise BeesWinException()
def ant_types():
"""Return a list of all implemented Ant classes."""
all_ant_types = []
new_types = [Ant]
while new_types:
new_types = [t for c in new_types for t in c.__subclasses__()]
all_ant_types.extend(new_types)
return [t for t in all_ant_types if t.implemented]
class GameOverException(Exception):
"""Base game over Exception."""
pass
class AntsWinException(GameOverException):
"""Exception to signal that the ants win."""
pass
class BeesWinException(GameOverException):
"""Exception to signal that the bees win."""
pass
def interactive_strategy(colony):
"""A strategy that starts an interactive session and lets the user make
changes to the colony.
For example, one might deploy a ThrowerAnt to the first tunnel by invoking
colony.deploy_ant('tunnel_0_0', 'Thrower')
"""
print('colony: ' + str(colony))
msg = '<Control>-D (<Control>-Z <Enter> on Windows) completes a turn.\n'
interact(msg)
def start_with_strategy(args, strategy):
"""Reads command-line arguments and starts a game with those options."""
import argparse
parser = argparse.ArgumentParser(description="Play Ants vs. SomeBees")
parser.add_argument('-d', type=str, metavar='DIFFICULTY',
help='sets difficulty of game (easy/medium/hard/insane)')
parser.add_argument('-w', '--water', action='store_true',
help='loads a full layout with water')
parser.add_argument('--food', type=int,
help='number of food to start with when testing', default=2)
args = parser.parse_args()
assault_plan = make_test_assault_plan()
layout = dry_layout
tunnel_length = 9
num_tunnels = 1
food = args.food
if args.water:
layout = wet_layout
if args.d in ['e', 'easy']:
assault_plan = make_easy_assault_plan()
num_tunnels = 2
elif args.d in ['n', 'normal']:
assault_plan = make_normal_assault_plan()
num_tunnels = 3
elif args.d in ['h', 'hard']:
assault_plan = make_hard_assault_plan()
num_tunnels = 4
elif args.d in ['i', 'insane']:
assault_plan = make_insane_assault_plan()
num_tunnels = 4
hive = Hive(assault_plan)
dimensions = (num_tunnels, tunnel_length)
return AntColony(strategy, hive, ant_types(), layout, dimensions, food).simulate()
###########
# Layouts #
###########
def wet_layout(queen, register_place, tunnels=3, length=9, moat_frequency=3):
"""Register a mix of wet and and dry places."""
for tunnel in range(tunnels):
exit = queen
for step in range(length):
if moat_frequency != 0 and (step + 1) % moat_frequency == 0:
exit = Water('water_{0}_{1}'.format(tunnel, step), exit)
else:
exit = Place('tunnel_{0}_{1}'.format(tunnel, step), exit)
register_place(exit, step == length - 1)
def dry_layout(queen, register_place, tunnels=3, length=9):
"""Register dry tunnels."""
wet_layout(queen, register_place, tunnels, length, 0)
#################
# Assault Plans #
#################
class AssaultPlan(dict):
"""The Bees' plan of attack for the Colony. Attacks come in timed waves.
An AssaultPlan is a dictionary from times (int) to waves (list of Bees).
>>> AssaultPlan().add_wave(4, 2)
{4: [Bee(3, None), Bee(3, None)]}
"""
def add_wave(self, bee_type, bee_armor, time, count):
"""Add a wave at time with count Bees that have the specified armor."""
bees = [bee_type(bee_armor) for _ in range(count)]
self.setdefault(time, []).extend(bees)
return self
@property
def all_bees(self):
"""Place all Bees in the hive and return the list of Bees."""
return [bee for wave in self.values() for bee in wave]
def make_test_assault_plan():
return AssaultPlan().add_wave(Bee, 3, 2, 1).add_wave(Bee, 3, 3, 1)
def make_easy_assault_plan():
plan = AssaultPlan()
for time in range(3, 16, 2):
plan.add_wave(Bee, 3, time, 1)
plan.add_wave(Wasp, 3, 4, 1)
plan.add_wave(NinjaBee, 3, 8, 1)
plan.add_wave(Hornet, 3, 12, 1)
plan.add_wave(Boss, 15, 16, 1)
return plan
def make_normal_assault_plan():
plan = AssaultPlan()
for time in range(3, 16, 2):
plan.add_wave(Bee, 3, time, 2)
plan.add_wave(Wasp, 3, 4, 1)
plan.add_wave(NinjaBee, 3, 8, 1)
plan.add_wave(Hornet, 3, 12, 1)
plan.add_wave(Wasp, 3, 16, 1)
#Boss Stage
for time in range(21, 30, 2):
plan.add_wave(Bee, 3, time, 2)
plan.add_wave(Wasp, 3, 22, 2)
plan.add_wave(Hornet, 3, 24, 2)
plan.add_wave(NinjaBee, 3, 26, 2)
plan.add_wave(Hornet, 3, 28, 2)
plan.add_wave(Boss, 20, 30, 1)
return plan
def make_hard_assault_plan():
plan = AssaultPlan()
for time in range(3, 16, 2):
plan.add_wave(Bee, 4, time, 2)
plan.add_wave(Hornet, 4, 4, 2)
plan.add_wave(Wasp, 4, 8, 2)
plan.add_wave(NinjaBee, 4, 12, 2)
plan.add_wave(Wasp, 4, 16, 2)
#Boss Stage
for time in range(21, 30, 2):
plan.add_wave(Bee, 4, time, 3)
plan.add_wave(Wasp, 4, 22, 2)
plan.add_wave(Hornet, 4, 24, 2)
plan.add_wave(NinjaBee, 4, 26, 2)
plan.add_wave(Hornet, 4, 28, 2)
plan.add_wave(Boss, 30, 30, 1)
return plan
def make_insane_assault_plan():
plan = AssaultPlan()
plan.add_wave(Hornet, 5, 2, 2)
for time in range(3, 16, 2):
plan.add_wave(Bee, 5, time, 2)
plan.add_wave(Hornet, 5, 4, 2)
plan.add_wave(Wasp, 5, 8, 2)
plan.add_wave(NinjaBee, 5, 12, 2)
plan.add_wave(Wasp, 5, 16, 2)
#Boss Stage
for time in range(21, 30, 2):
plan.add_wave(Bee, 5, time, 3)
plan.add_wave(Wasp, 5, 22, 2)
plan.add_wave(Hornet, 5, 24, 2)
plan.add_wave(NinjaBee, 5, 26, 2)
plan.add_wave(Hornet, 5, 28, 2)
plan.add_wave(Boss, 30, 30, 2)
return plan
from utils import *
@main
def run(*args):
Insect.reduce_armor = class_method_wrapper(Insect.reduce_armor,
pre=print_expired_insects)
start_with_strategy(args, interactive_strategy)
| [
"noreply@github.com"
] | EricBWang.noreply@github.com |
f21994842ce2f26db2b2a11ae1b482049221632b | e5fc020acb66afa4ab1f3aae088e1d85633c058a | /config.py | ef05c76c0bf2847ebb59c335137a9a0925deb016 | [
"MIT"
] | permissive | TipsyApple/odd_GAN | 614aec5195f97d2801ed119767a5c6f71824e439 | ab80311b7c9b8555f36048720e8bed85d8c76ab5 | refs/heads/master | 2020-03-17T22:03:53.566148 | 2018-05-18T17:57:41 | 2018-05-18T17:57:41 | 133,986,969 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 915 | py | import os
import numpy as np
# `pip install easydict` if you don't have it
from easydict import EasyDict as edict
__C = edict()
# Consumers can get config by:
# from fast_rcnn_config import cfg
cfg = __C
#
# Training options
#
__C.TRAIN = edict()
# Minibatch size
__C.TRAIN.BATCH_SIZE = 4
# Pixel mean values (BGR order) as a (1, 1, 3) array
# We use the same pixel mean for all networks even though it's not exactly what
# they were trained with
# 存疑,这里是直接复制别人的,做图像均值处理,需要考证一下
__C.PIXEL_MEANS = np.array([[[102.9801, 115.9465, 122.7717]]])
# Data Path
__C.DATA_PATH = '/Users/martin/Documents/code/DataSet/VOC2007'
# Image Set Path
__C.IMAGE_SET_PATH = os.path.join(__C.DATA_PATH, 'ImageSets', 'Main')
# Annotations Path
__C.ANNOTATION_PATH = os.path.join(__C.DATA_PATH, 'Annotations')
__C.JPEG_IMAGE_PATH = os.path.join(__C.DATA_PATH, 'JPEGImages')
| [
"TipsyApple@users.noreply.github.com"
] | TipsyApple@users.noreply.github.com |
b556458049379afa354aa2a80ad214a3013613bc | 5c472a661ef18a4fef46eb8ce589305c30d91b50 | /TALN/check_lyrics.py | cfe9acd6bdc8c15570a3bd968163737c51826721 | [] | no_license | caroubier/ESGI-5a-TALN | d19d8967c19609ba57ed03fdad5c717be78f4861 | 0748c438901456bb2ab3c3ec6d7467f0653393ea | refs/heads/master | 2020-12-14T16:11:38.407174 | 2020-01-23T16:38:21 | 2020-01-23T16:38:21 | 234,802,693 | 1 | 0 | null | 2020-09-02T21:06:25 | 2020-01-18T21:51:35 | Jupyter Notebook | UTF-8 | Python | false | false | 1,021 | py | import spacy
from spacy_langdetect import LanguageDetector
PATH="dataset/"
FILE="sample_lyrics.csv"
PATHFILE=PATH+FILE
def load_nlp_model():
"""
on charge le modèle qu'on utilisera pour déterminer le langage de la chaine
:return: renvoie le modèle spacy
"""
print("loading en_core_web_md")
nlp = spacy.load('en_core_web_md')
print("core loaded")
return nlp
def is_english(nlp, text):
"""
:param nlp: model
:param text: String, texte à analyser
:return: langage du texte analysé
"""
nlp.add_pipe(LanguageDetector(), name='language_detector', last=True)
doc = nlp(text)
lang = doc._.language["language"]
score = doc._.language["score"]
if lang == "en" and score > 0.90:
print("CORPUS IS ENGLISH")
return True
else:
print("CORPUS IS NOT ENGLISH")
return False
if __name__ == '__main__':
nlp = load_nlp_model()
with open(PATHFILE,"r") as ff:
text = ff.read()
is_english(nlp, text) | [
"noreply@github.com"
] | caroubier.noreply@github.com |
2654b97aa05a1e2827ee87d67123214fc44fead8 | a1f5016e58e3a1781a204728c86a6586ccf6a6ca | /yield_data/migrations/0001_initial.py | cc727e04dc4db4883ff43a257794c6887cdb40ec | [] | no_license | UpstatePedro/datavis_proj | 0e5d3038cf1bb6e3c1ac9f4236112604630a1d5e | cd447ee269955eaee2a5692d670a4a8e87801aca | refs/heads/master | 2021-05-04T10:26:53.824673 | 2018-07-08T20:06:12 | 2018-07-08T20:06:12 | 52,166,125 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,076 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-03-23 08:14
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('geo_borders', '0002_usstateborder'),
]
operations = [
migrations.CreateModel(
name='CountyCropYield',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('year', models.IntegerField()),
('crop_name', models.CharField(max_length=20)),
('region_name', models.CharField(max_length=50)),
('region_category', models.CharField(max_length=50)),
('yield_value', models.FloatField()),
('yield_unit', models.CharField(max_length=50)),
('county_code', models.IntegerField()),
('county_boundary_ref', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='geo_borders.UsCountyBorder')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='StateCropYield',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('year', models.IntegerField()),
('crop_name', models.CharField(max_length=20)),
('region_name', models.CharField(max_length=50)),
('region_category', models.CharField(max_length=50)),
('yield_value', models.FloatField()),
('yield_unit', models.CharField(max_length=50)),
('state_fips_code', models.IntegerField()),
('state_boundary_ref', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='geo_borders.UsStateBorder')),
],
options={
'abstract': False,
},
),
]
| [
"piers.m.holden@gmail.com"
] | piers.m.holden@gmail.com |
6d7e6826915f4d4b7165da380fb427715c956736 | 64b92c3da1a58c723b8e98b7de66d0491066fe43 | /validation/validate_hanoi.py | ed17a392cc7935efac9d04813ba1d0e9415b211d | [
"MIT"
] | permissive | geektoni/AlphaNPI2 | c63c9d104fd106477b9196a9e85658b4437ccc8f | 044163cab76950af6cbcc3b9c714c3637c4ab814 | refs/heads/master | 2023-08-04T09:08:38.489587 | 2020-05-13T17:52:06 | 2020-05-13T17:52:06 | 228,867,563 | 3 | 1 | MIT | 2023-07-23T00:44:06 | 2019-12-18T15:21:33 | Python | UTF-8 | Python | false | false | 5,167 | py | from environments.hanoi_env import HanoiEnv, HanoiEnvEncoder
from core.policy import Policy
from core.network_only import NetworkOnly
import core.config as conf
import torch
import argparse
from core.mcts import MCTS
import numpy as np
import time
if __name__ == "__main__":
# Path to load policy
#default_load_path = '../models/hanoi_npi_2019_5_17-11_45_25-1.pth'
default_load_path = '../models/hanoi_npi_2019_5_17-11_45_25-1.pth'
# Get command line params
parser = argparse.ArgumentParser()
parser.add_argument("--seed", help='random seed', default=1, type=int)
parser.add_argument("--load-path", help='path to model to validate', default=default_load_path)
parser.add_argument('--verbose', help='print training monitoring in console', action='store_true')
parser.add_argument('--save-results', help='save training progress in .txt file', action='store_true')
parser.add_argument('--num-cpus', help='number of cpus to use', default=8, type=int)
args = parser.parse_args()
# Get arguments
seed = args.seed
verbose = args.verbose
save_results = args.save_results
load_path = args.load_path
num_cpus = args.num_cpus
# Set number of cpus used
torch.set_num_threads(num_cpus)
# Set random seed
np.random.seed(seed)
torch.manual_seed(seed)
if save_results:
# get date and time
ts = time.localtime(time.time())
date_time = '{}_{}_{}-{}_{}_{}'.format(ts[0], ts[1], ts[2], ts[3], ts[4], ts[5])
results_save_path = '../results/validation_hanoi_npi_{}-{}.txt'.format(date_time, seed)
results_file = open(results_save_path, 'w')
# Load environment constants
env_tmp = HanoiEnv(n=5, encoding_dim=conf.encoding_dim)
num_programs = env_tmp.get_num_programs()
num_non_primary_programs = env_tmp.get_num_non_primary_programs()
observation_dim = env_tmp.get_observation_dim()
programs_library = env_tmp.programs_library
# Load Alpha-NPI policy
encoder = HanoiEnvEncoder(env_tmp.get_observation_dim(), conf.encoding_dim)
indices_non_primary_programs = [p['index'] for _, p in programs_library.items() if p['level'] > 0]
policy = Policy(encoder, conf.hidden_size, num_programs, num_non_primary_programs, conf.program_embedding_dim,
conf.encoding_dim, indices_non_primary_programs, conf.learning_rate)
policy.load_state_dict(torch.load(load_path))
# Start Validation
if verbose:
print('Start validation for model: {}'.format(load_path))
if save_results:
results_file.write('Validation on model: {}'.format(load_path) + ' \n')
t_i = time.time()
for n in [2, 3, 4, 5, 10, 12]:
mcts_rewards_normalized = []
mcts_rewards = []
network_only_rewards = []
max_depth_dict = {1: 8}
mcts_test_params = {'number_of_simulations': conf.number_of_simulations_for_validation,
'max_depth_dict': max_depth_dict, 'temperature': conf.temperature,
'c_puct': conf.c_puct, 'exploit': True, 'level_closeness_coeff': conf.level_closeness_coeff,
'gamma': conf.gamma}
for _ in range(1):
env = HanoiEnv(n=n, encoding_dim=conf.encoding_dim)
hanoi_index = env.programs_library['HANOI']['index']
# Test with MCTS
mcts = MCTS(policy, env, hanoi_index, **mcts_test_params)
res = mcts.sample_execution_trace()
mcts_reward = res[7]
mcts_rewards.append(mcts_reward)
if mcts_reward > 0:
mcts_rewards_normalized.append(1.0)
else:
mcts_rewards_normalized.append(0.0)
env = HanoiEnv(n=n, encoding_dim=conf.encoding_dim)
hanoi_index = env.programs_library['HANOI']['index']
# Test with network alone
network_only = NetworkOnly(policy, env, max_depth_dict)
netonly_reward, _ = network_only.play(hanoi_index)
network_only_rewards.append(netonly_reward)
mcts_rewards_normalized_mean = np.mean(np.array(mcts_rewards_normalized))
mcts_rewards_mean = np.mean(np.array(mcts_rewards))
network_only_rewards_mean = np.mean(np.array(network_only_rewards))
if verbose:
print('n: {}, mcts mean reward: {}, mcts mean normalized reward: {}, '
'network only mean reward: {}'.format(n, mcts_rewards_mean, mcts_rewards_normalized_mean,
network_only_rewards_mean))
if save_results:
str = 'n: {}, mcts mean reward: {}, mcts mean normalized reward: {}, ' \
'network only mean reward: {}'.format(n, mcts_rewards_mean, mcts_rewards_normalized_mean,
network_only_rewards_mean)
results_file.write(str + ' \n')
t_f = time.time()
if verbose:
print('End of Validation!')
duration = t_f - t_i
print('Duration: {} minutes'.format(duration / 60))
if save_results:
results_file.close()
| [
"g.ligner@instadeep.com"
] | g.ligner@instadeep.com |
62d00cfb7dc7b0d89bbbda7fea717146394c477a | 14cf672eb0fb9a4a675beee6d6cdef66ded107b6 | /manage.py | d4168d23cfac0e83f254ebab45e23aed630161e1 | [] | no_license | mateuszkowalke/muzyka_niezalezna | e8abdc57974c45f813e6bc54b7bb02671b918729 | 7d9244d7e0e7e03ad26affae30750f3ae124a7cb | refs/heads/master | 2022-12-19T02:14:11.051839 | 2020-09-09T06:51:50 | 2020-09-09T06:51:50 | 286,457,047 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 637 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'muzyka_niezalezna.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"mateusz.kowalke@gmail.com"
] | mateusz.kowalke@gmail.com |
4bc7058242c4bde692b83e1127a6d785f6ac088d | 3252907f13eb34741fb51b7288abf43bebecc312 | /getAlignmentRegions.py | 5ebeb002adeb6e622e0bed6b1000ab561b0ce8f0 | [] | no_license | NarlikarLab/exoDIVERSITY | 5ef3d8570ac22629009fcec25021351b60137155 | d646fc655d0cf686a11ce29de4731f32fb7a4f6d | refs/heads/master | 2023-08-14T01:34:11.678808 | 2021-10-05T10:35:49 | 2021-10-05T10:35:49 | 310,792,452 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,321 | py | import numpy as np
import re
import commands
import os
def revcomp(seq):
comp = {'A':'T','a':'t','C':'G','c':'g','G':'C','g':'c','T':'A','t':'a','N':'N'}
rcseq = [comp[x] for x in reversed(seq)]
rcseq =''.join(rcseq)
return rcseq
def getSeq(chrom,regionStart,regionEnd,twobitfile,stpref,reportedSt,seqlen):
(status,op) = commands.getstatusoutput('twoBitToFa -seq='+chrom+' -start='+str(regionStart)+' -end='+str(regionEnd)+' '+twobitfile+' stdout')
seq = op.split('\n')
header = seq[0][1:]
seq = ''.join(seq[1:]) # made single line
if len(seq)!=seqlen:
print 'Sequence length is not equal to ',seqlen
print header,seq
exit()
if stpref == '+':
if reportedSt == '+':
pass
else:
seq = ''.join(revcomp(seq))
elif stpref =='-':
if reportedSt == '+':
seq = ''.join(revcomp(seq))
else:
pass
return seq
def getInfo(infofile):
info = open(infofile,'r')
motifmidCoords = {}
motifWidths = {}
l = info.readline()
counter = 0
for l in info:
l = l.strip().split('\t')
header = re.split(r'[:|-]',l[0])
header[1] = int(header[1])
header[2] = int(header[2])
mode = int(l[1])
motstart = header[1] + int(l[2])
if len(l[-1]) < 6:
w = int(l[-1])
else:
w = len(l[-1])
motifmid = motstart + w/2
strand = l[5]
if mode not in motifWidths:
motifWidths[mode] = w
motifmidCoords[mode] = [[header[0],motifmid,strand,counter]]
else:
motifmidCoords[mode].append([header[0],motifmid,strand,counter])
counter += 1
info.close()
return (motifmidCoords,motifWidths,counter)
def createPlots(outdir,modeOrder):
if modeOrder == []:
os.system('Rscript draw1.r '+outdir + '/fastaMatrix.txt '+outdir+'/alignedMotifs.png')
else:
#print 'Rscript draw1.r '+outdir+'/fastaMatrix.txt '+outdir+'/alignedMotifs.png '+' '.join(map(str,modeOrder))
os.system('Rscript draw1.r '+outdir+'/fastaMatrix.txt '+outdir+'/alignedMotifs.png '+' '.join(map(str,modeOrder)))
def getRegions(infofile,shifts,refMode,side,twobitfile,outdir):
modeWiseMotifmid, motifWidths,noOfSeqs = getInfo(infofile)
## Shift the motif mids accordingly
refpwmWidth = motifWidths[refMode]
for m in modeWiseMotifmid.keys():
if m == refMode:
continue
if m == -1: continue
if motifWidths[m] < refpwmWidth:
if shifts[m][0] == 'L': # For left shift
dist = abs(abs(shifts[m][1]) - motifWidths[m]/2)
moveBy = (refpwmWidth/2) - dist
if shifts[m][2] == '-':
moveBy = -1*moveBy
for i in range(len(modeWiseMotifmid[m])):
if modeWiseMotifmid[m][i][2] == '+':
modeWiseMotifmid[m][i][1] += moveBy
elif modeWiseMotifmid[m][i][2] == '-':
modeWiseMotifmid[m][i][1] -= moveBy
elif shifts[m][0] == 'R': # For right shift
dist = abs(shifts[m][1]) + motifWidths[m]/2
moveBy = (refpwmWidth/2) - dist
if shifts[m][2] == '-':
moveBy = -1*moveBy
for i in range(len(modeWiseMotifmid[m])):
if modeWiseMotifmid[m][i][2] == '+':
modeWiseMotifmid[m][i][1] += moveBy
elif modeWiseMotifmid[m][i][2] == '-':
modeWiseMotifmid[m][i][1] -= moveBy
elif motifWidths[m] > refpwmWidth:
if shifts[m][0] == 'L': # For left shift
dist = abs(motifWidths[m]/2 - refpwmWidth/2)
moveBy = abs(shifts[m][1]) - dist
if shifts[m][2] == '-':
moveBy = -1*moveBy
for i in range(len(modeWiseMotifmid[m])):
if modeWiseMotifmid[m][i][2] == '+':
modeWiseMotifmid[m][i][1] += moveBy
elif modeWiseMotifmid[m][i][2] == '-':
modeWiseMotifmid[m][i][1] -= moveBy
elif shifts[m][0] == 'R': # For right shift
dist = abs(motifWidths[m]/2 - refpwmWidth/2)
moveBy = abs(shifts[m][1]) + dist
if shifts[m][2] =='-':
moveBy = -1*moveBy
for i in range(len(modeWiseMotifmid[m])):
if modeWiseMotifmid[m][i][2] == '+':
modeWiseMotifmid[m][i][1] -= moveBy
elif modeWiseMotifmid[m][i][2] == '-':
modeWiseMotifmid[m][i][1] += moveBy
else: # motifWidths[m] == refpwmwidth
if shifts[m][2] == '-':
shifts[m][1] *= -1
if shifts[m][0] == 'L':
for i in range(len(modeWiseMotifmid[m])):
if modeWiseMotifmid[m][i][2] == '+':
modeWiseMotifmid[m][i][1] += shifts[m][1]
elif modeWiseMotifmid[m][i][2] == '-':
modeWiseMotifmid[m][i][1] -= shifts[m][1]
elif shifts[m][0] == 'R':
for i in range(len(modeWiseMotifmid[m])):
if modeWiseMotifmid[m][i][2] == '+':
modeWiseMotifmid[m][i][1] -= shifts[m][1]
elif modeWiseMotifmid[m][i][2] == '-':
modeWiseMotifmid[m][i][1] += shifts[m][1]
'''
If strand preference is +, keep + strand regions as it is and flip the - strand regions
If strand preference is -, flip the + strand regions and keep the - strand regions as it is
'''
alignedSeqs = {}
fmVec = np.zeros(noOfSeqs,dtype=object)
d = {'A':'0','a':'0','C':'1','c':'1','G':'2','g':'2','T':'3','t':'3','N':'4'}
for m in modeWiseMotifmid.keys():
if m==-1: continue
motifwidth = motifWidths[m]
st = shifts[m][2]
for i in range(len(modeWiseMotifmid[m])):
chrom = modeWiseMotifmid[m][i][0]
motifmid = modeWiseMotifmid[m][i][1]
reportedSt = modeWiseMotifmid[m][i][2]
counter = modeWiseMotifmid[m][i][3]
if motifwidth%2==0:
regionStart = motifmid-side
regionEnd = motifmid+side
seqlen = 2*side
seq = getSeq(chrom,regionStart,regionEnd,twobitfile,st,reportedSt,seqlen)
else:
regionStart = motifmid-side
regionEnd = motifmid+side+1
seqlen = side*2 + 1
seq = getSeq(chrom,regionStart,regionEnd,twobitfile,st,reportedSt,seqlen)
seq = seq[:-1]
header = chrom + '\t' +str(regionStart)+'\t'+str(regionEnd)
alignedSeqs[counter] = (header,str(m),reportedSt,str(motifwidth),seq)
fmVec[counter] = '\t'.join([str(m)]+[d[x] for x in seq])+'\n'
out = open(outdir+'/alignedMotifs.txt','w')
fm = open(outdir+'/fastaMatrix.txt','w')
for i in range(noOfSeqs):
out.write('\t'.join(alignedSeqs[i])+'\n')
fm.write(fmVec[i])
out.close()
fm.close()
| [
"anushua.biswas@gmail.com"
] | anushua.biswas@gmail.com |
52a9cfb92f3c0dad0659e22d6a6cb0ad3a802dd1 | e37a8fbfad0172f5e952219d77f9cac4525ded5f | /doctr/__init__.py | 20b78857e757acf3aefefd7d7fa5d8ff77f9275e | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | kapitsa2811/doctr | 4c4cce4b6c5852d2fe811e8b8484ecf99d36d9d2 | 63b1ceec7a1532c9218351234c23eda6f210d5fb | refs/heads/main | 2023-04-22T14:55:56.568309 | 2021-04-28T16:51:40 | 2021-04-28T16:51:40 | 362,700,910 | 1 | 0 | Apache-2.0 | 2021-04-29T05:31:07 | 2021-04-29T05:31:06 | null | UTF-8 | Python | false | false | 98 | py | from .version import __version__ # noqa: F401
from . import documents, models, transforms, utils
| [
"noreply@github.com"
] | kapitsa2811.noreply@github.com |
cc5fe338f1609b34d3796858f25ec9987dcc2422 | 29e878f459426d89f22891685fe7d2fed82d4b32 | /net_base.py | 04411f7e2780795622885de8e109ed5a9efd7c37 | [] | no_license | gjtjx/tf-deeplab-v3 | 8400f174f2198f1e18c7aaf29c188776c6608702 | aa7029d378fd62adb38c138eba0bc2821b1c9f9f | refs/heads/master | 2021-08-18T21:20:42.434082 | 2017-11-23T22:15:30 | 2017-11-23T22:15:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,001 | py | import tensorflow as tf
from tensorflow.contrib import layers
from tensorflow.contrib.learn import ModeKeys
class BaseModel(object):
def __init__(self, mode):
self.training = ModeKeys.TRAIN == mode
self.weights = []
self.biases = []
def _create_weight(self, name, shape):
init = layers.xavier_initializer_conv2d(dtype=tf.float32)
return tf.Variable(init(shape=shape), name=name)
def _create_bias(self, name, shape):
init = tf.constant_initializer(value=0.0, dtype=tf.float32)
return tf.Variable(init(shape=shape), name=name)
def _a_branch(self, inbound, output_size, kernel_size, stride, scope):
conv = layers.conv2d(inbound, output_size, kernel_size, stride, activation_fn=None)
bn = layers.batch_norm(conv, activation_fn=None, is_training=self.training)
return bn
def _aconv_layer(self, inbound, output_size, kernel_size, rate):
aconv_shape = [kernel_size, kernel_size, inbound.get_shape()[3].value, output_size]
weight = self._create_weight('weights', aconv_shape)
bias = self._create_bias('biases', [output_size])
atrous = tf.nn.atrous_conv2d(inbound, weight, rate, padding='SAME')
atrous = tf.nn.bias_add(atrous, bias)
self.weights.append(weight)
self.biases.append(bias)
return atrous
def _aconv_block(self, inbound, output_sizes, kernel_sizes, strides, scope):
with tf.variable_scope(scope):
conv = layers.conv2d(inbound, output_sizes[0], kernel_sizes[0], strides[0], activation_fn=None)
bn = layers.batch_norm(conv, activation_fn=tf.nn.relu, is_training=self.training)
atrous = self._aconv_layer(bn, output_sizes[1], kernel_sizes[1], strides[1])
bn = layers.batch_norm(atrous, activation_fn=tf.nn.relu, is_training=self.training)
conv = layers.conv2d(bn, output_sizes[2], kernel_sizes[2], strides[2], activation_fn=None)
bn = layers.batch_norm(conv, activation_fn=None, is_training=self.training)
return bn
def _conv_block(self, inbound, output_sizes, kernel_sizes, strides, scope):
with tf.variable_scope(scope):
conv = layers.conv2d(inbound, output_sizes[0], kernel_sizes[0], strides[0], activation_fn=None)
bn = layers.batch_norm(conv, activation_fn=tf.nn.relu, is_training=self.training)
conv = layers.conv2d(bn, output_sizes[1], kernel_sizes[1], strides[1], activation_fn=None)
bn = layers.batch_norm(conv, activation_fn=tf.nn.relu, is_training=self.training)
conv = layers.conv2d(bn, output_sizes[2], kernel_sizes[2], strides[2], activation_fn=None)
bn = layers.batch_norm(conv, activation_fn=None, is_training=self.training)
return bn
def _add_block(self, first, second, scope):
with tf.variable_scope(scope):
add = tf.add(first, second)
relu = tf.nn.relu(add, name='relu')
return relu | [
"tuckerdarby@gmail.com"
] | tuckerdarby@gmail.com |
119a8220aff09fff6e4a24259634f20681f8b04d | 238e46a903cf7fac4f83fa8681094bf3c417d22d | /output/python37/Lib/test/test_smtplib.py | 06168e1cb07685c76cf317c32142c808ca20aef4 | [
"BSD-3-Clause",
"bzip2-1.0.6",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-newlib-historical",
"OpenSSL",
"Python-2.0",
"TCL"
] | permissive | baojunli/FastCAE | da1277f90e584084d461590a3699b941d8c4030b | a3f99f6402da564df87fcef30674ce5f44379962 | refs/heads/master | 2023-02-25T20:25:31.815729 | 2021-02-01T03:17:33 | 2021-02-01T03:17:33 | 268,390,180 | 1 | 0 | BSD-3-Clause | 2020-06-01T00:39:31 | 2020-06-01T00:39:31 | null | UTF-8 | Python | false | false | 52,178 | py | import asyncore
import base64
import email.mime.text
from email.message import EmailMessage
from email.base64mime import body_encode as encode_base64
import email.utils
import hmac
import socket
import smtpd
import smtplib
import io
import re
import sys
import time
import select
import errno
import textwrap
import threading
import unittest
from test import support, mock_socket
from test.support import HOST, HOSTv4, HOSTv6
if sys.platform == 'darwin':
# select.poll returns a select.POLLHUP at the end of the tests
# on darwin, so just ignore it
def handle_expt(self):
pass
smtpd.SMTPChannel.handle_expt = handle_expt
def server(evt, buf, serv):
serv.listen()
evt.set()
try:
conn, addr = serv.accept()
except socket.timeout:
pass
else:
n = 500
while buf and n > 0:
r, w, e = select.select([], [conn], [])
if w:
sent = conn.send(buf)
buf = buf[sent:]
n -= 1
conn.close()
finally:
serv.close()
evt.set()
class GeneralTests(unittest.TestCase):
def setUp(self):
smtplib.socket = mock_socket
self.port = 25
def tearDown(self):
smtplib.socket = socket
# This method is no longer used but is retained for backward compatibility,
# so test to make sure it still works.
def testQuoteData(self):
teststr = "abc\n.jkl\rfoo\r\n..blue"
expected = "abc\r\n..jkl\r\nfoo\r\n...blue"
self.assertEqual(expected, smtplib.quotedata(teststr))
def testBasic1(self):
mock_socket.reply_with(b"220 Hola mundo")
# connects
smtp = smtplib.SMTP(HOST, self.port)
smtp.close()
def testSourceAddress(self):
mock_socket.reply_with(b"220 Hola mundo")
# connects
smtp = smtplib.SMTP(HOST, self.port,
source_address=('127.0.0.1',19876))
self.assertEqual(smtp.source_address, ('127.0.0.1', 19876))
smtp.close()
def testBasic2(self):
mock_socket.reply_with(b"220 Hola mundo")
# connects, include port in host name
smtp = smtplib.SMTP("%s:%s" % (HOST, self.port))
smtp.close()
def testLocalHostName(self):
mock_socket.reply_with(b"220 Hola mundo")
# check that supplied local_hostname is used
smtp = smtplib.SMTP(HOST, self.port, local_hostname="testhost")
self.assertEqual(smtp.local_hostname, "testhost")
smtp.close()
def testTimeoutDefault(self):
mock_socket.reply_with(b"220 Hola mundo")
self.assertIsNone(mock_socket.getdefaulttimeout())
mock_socket.setdefaulttimeout(30)
self.assertEqual(mock_socket.getdefaulttimeout(), 30)
try:
smtp = smtplib.SMTP(HOST, self.port)
finally:
mock_socket.setdefaulttimeout(None)
self.assertEqual(smtp.sock.gettimeout(), 30)
smtp.close()
def testTimeoutNone(self):
mock_socket.reply_with(b"220 Hola mundo")
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
smtp = smtplib.SMTP(HOST, self.port, timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertIsNone(smtp.sock.gettimeout())
smtp.close()
def testTimeoutValue(self):
mock_socket.reply_with(b"220 Hola mundo")
smtp = smtplib.SMTP(HOST, self.port, timeout=30)
self.assertEqual(smtp.sock.gettimeout(), 30)
smtp.close()
def test_debuglevel(self):
mock_socket.reply_with(b"220 Hello world")
smtp = smtplib.SMTP()
smtp.set_debuglevel(1)
with support.captured_stderr() as stderr:
smtp.connect(HOST, self.port)
smtp.close()
expected = re.compile(r"^connect:", re.MULTILINE)
self.assertRegex(stderr.getvalue(), expected)
def test_debuglevel_2(self):
mock_socket.reply_with(b"220 Hello world")
smtp = smtplib.SMTP()
smtp.set_debuglevel(2)
with support.captured_stderr() as stderr:
smtp.connect(HOST, self.port)
smtp.close()
expected = re.compile(r"^\d{2}:\d{2}:\d{2}\.\d{6} connect: ",
re.MULTILINE)
self.assertRegex(stderr.getvalue(), expected)
# Test server thread using the specified SMTP server class
def debugging_server(serv, serv_evt, client_evt):
serv_evt.set()
try:
if hasattr(select, 'poll'):
poll_fun = asyncore.poll2
else:
poll_fun = asyncore.poll
n = 1000
while asyncore.socket_map and n > 0:
poll_fun(0.01, asyncore.socket_map)
# when the client conversation is finished, it will
# set client_evt, and it's then ok to kill the server
if client_evt.is_set():
serv.close()
break
n -= 1
except socket.timeout:
pass
finally:
if not client_evt.is_set():
# allow some time for the client to read the result
time.sleep(0.5)
serv.close()
asyncore.close_all()
serv_evt.set()
MSG_BEGIN = '---------- MESSAGE FOLLOWS ----------\n'
MSG_END = '------------ END MESSAGE ------------\n'
# NOTE: Some SMTP objects in the tests below are created with a non-default
# local_hostname argument to the constructor, since (on some systems) the FQDN
# lookup caused by the default local_hostname sometimes takes so long that the
# test server times out, causing the test to fail.
# Test behavior of smtpd.DebuggingServer
class DebuggingServerTests(unittest.TestCase):
maxDiff = None
def setUp(self):
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
# temporarily replace sys.stdout to capture DebuggingServer output
self.old_stdout = sys.stdout
self.output = io.StringIO()
sys.stdout = self.output
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Capture SMTPChannel debug output
self.old_DEBUGSTREAM = smtpd.DEBUGSTREAM
smtpd.DEBUGSTREAM = io.StringIO()
# Pick a random unused port by passing 0 for the port number
self.serv = smtpd.DebuggingServer((HOST, 0), ('nowhere', -1),
decode_data=True)
# Keep a note of what server host and port were assigned
self.host, self.port = self.serv.socket.getsockname()[:2]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
self.thread.join()
# restore sys.stdout
sys.stdout = self.old_stdout
# restore DEBUGSTREAM
smtpd.DEBUGSTREAM.close()
smtpd.DEBUGSTREAM = self.old_DEBUGSTREAM
def get_output_without_xpeer(self):
test_output = self.output.getvalue()
return re.sub(r'(.*?)^X-Peer:\s*\S+\n(.*)', r'\1\2',
test_output, flags=re.MULTILINE|re.DOTALL)
def testBasic(self):
# connect
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.quit()
def testSourceAddress(self):
# connect
src_port = support.find_unused_port()
try:
smtp = smtplib.SMTP(self.host, self.port, local_hostname='localhost',
timeout=3, source_address=(self.host, src_port))
self.assertEqual(smtp.source_address, (self.host, src_port))
self.assertEqual(smtp.local_hostname, 'localhost')
smtp.quit()
except OSError as e:
if e.errno == errno.EADDRINUSE:
self.skipTest("couldn't bind to source port %d" % src_port)
raise
def testNOOP(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (250, b'OK')
self.assertEqual(smtp.noop(), expected)
smtp.quit()
def testRSET(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (250, b'OK')
self.assertEqual(smtp.rset(), expected)
smtp.quit()
def testELHO(self):
# EHLO isn't implemented in DebuggingServer
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (250, b'\nSIZE 33554432\nHELP')
self.assertEqual(smtp.ehlo(), expected)
smtp.quit()
def testEXPNNotImplemented(self):
# EXPN isn't implemented in DebuggingServer
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (502, b'EXPN not implemented')
smtp.putcmd('EXPN')
self.assertEqual(smtp.getreply(), expected)
smtp.quit()
def testVRFY(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (252, b'Cannot VRFY user, but will accept message ' + \
b'and attempt delivery')
self.assertEqual(smtp.vrfy('nobody@nowhere.com'), expected)
self.assertEqual(smtp.verify('nobody@nowhere.com'), expected)
smtp.quit()
def testSecondHELO(self):
# check that a second HELO returns a message that it's a duplicate
# (this behavior is specific to smtpd.SMTPChannel)
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.helo()
expected = (503, b'Duplicate HELO/EHLO')
self.assertEqual(smtp.helo(), expected)
smtp.quit()
def testHELP(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
self.assertEqual(smtp.help(), b'Supported commands: EHLO HELO MAIL ' + \
b'RCPT DATA RSET NOOP QUIT VRFY')
smtp.quit()
def testSend(self):
# connect and send mail
m = 'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.sendmail('John', 'Sally', m)
# XXX(nnorwitz): this test is flaky and dies with a bad file descriptor
# in asyncore. This sleep might help, but should really be fixed
# properly by using an Event variable.
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendBinary(self):
m = b'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.sendmail('John', 'Sally', m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.decode('ascii'), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendNeedingDotQuote(self):
# Issue 12283
m = '.A test\n.mes.sage.'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.sendmail('John', 'Sally', m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendNullSender(self):
m = 'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.sendmail('<>', 'Sally', m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: <>$", re.MULTILINE)
self.assertRegex(debugout, sender)
def testSendMessage(self):
m = email.mime.text.MIMEText('A test message')
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.send_message(m, from_addr='John', to_addrs='Sally')
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Remove the X-Peer header that DebuggingServer adds as figuring out
# exactly what IP address format is put there is not easy (and
# irrelevant to our test). Typically 127.0.0.1 or ::1, but it is
# not always the same as socket.gethostbyname(HOST). :(
test_output = self.get_output_without_xpeer()
del m['X-Peer']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
def testSendMessageWithAddresses(self):
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John'
m['CC'] = 'Sally, Fred'
m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <warped@silly.walks.com>'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
# make sure the Bcc header is still in the message.
self.assertEqual(m['Bcc'], 'John Root <root@localhost>, "Dinsdale" '
'<warped@silly.walks.com>')
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Remove the X-Peer header that DebuggingServer adds.
test_output = self.get_output_without_xpeer()
del m['X-Peer']
# The Bcc header should not be transmitted.
del m['Bcc']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: foo@bar.com$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Sally', 'Fred', 'root@localhost',
'warped@silly.walks.com'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageWithSomeAddresses(self):
# Make sure nothing breaks if not all of the three 'to' headers exist
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John, Dinsdale'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Remove the X-Peer header that DebuggingServer adds.
test_output = self.get_output_without_xpeer()
del m['X-Peer']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: foo@bar.com$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Dinsdale'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageWithSpecifiedAddresses(self):
# Make sure addresses specified in call override those in message.
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John, Dinsdale'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.send_message(m, from_addr='joe@example.com', to_addrs='foo@example.net')
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Remove the X-Peer header that DebuggingServer adds.
test_output = self.get_output_without_xpeer()
del m['X-Peer']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: joe@example.com$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Dinsdale'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertNotRegex(debugout, to_addr)
recip = re.compile(r"^recips: .*'foo@example.net'.*$", re.MULTILINE)
self.assertRegex(debugout, recip)
def testSendMessageWithMultipleFrom(self):
# Sender overrides To
m = email.mime.text.MIMEText('A test message')
m['From'] = 'Bernard, Bianca'
m['Sender'] = 'the_rescuers@Rescue-Aid-Society.com'
m['To'] = 'John, Dinsdale'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Remove the X-Peer header that DebuggingServer adds.
test_output = self.get_output_without_xpeer()
del m['X-Peer']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: the_rescuers@Rescue-Aid-Society.com$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Dinsdale'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageResent(self):
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John'
m['CC'] = 'Sally, Fred'
m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <warped@silly.walks.com>'
m['Resent-Date'] = 'Thu, 1 Jan 1970 17:42:00 +0000'
m['Resent-From'] = 'holy@grail.net'
m['Resent-To'] = 'Martha <my_mom@great.cooker.com>, Jeff'
m['Resent-Bcc'] = 'doe@losthope.net'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# The Resent-Bcc headers are deleted before serialization.
del m['Bcc']
del m['Resent-Bcc']
# Remove the X-Peer header that DebuggingServer adds.
test_output = self.get_output_without_xpeer()
del m['X-Peer']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: holy@grail.net$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('my_mom@great.cooker.com', 'Jeff', 'doe@losthope.net'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageMultipleResentRaises(self):
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John'
m['CC'] = 'Sally, Fred'
m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <warped@silly.walks.com>'
m['Resent-Date'] = 'Thu, 1 Jan 1970 17:42:00 +0000'
m['Resent-From'] = 'holy@grail.net'
m['Resent-To'] = 'Martha <my_mom@great.cooker.com>, Jeff'
m['Resent-Bcc'] = 'doe@losthope.net'
m['Resent-Date'] = 'Thu, 2 Jan 1970 17:42:00 +0000'
m['Resent-To'] = 'holy@grail.net'
m['Resent-From'] = 'Martha <my_mom@great.cooker.com>, Jeff'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
with self.assertRaises(ValueError):
smtp.send_message(m)
smtp.close()
class NonConnectingTests(unittest.TestCase):
def testNotConnected(self):
# Test various operations on an unconnected SMTP object that
# should raise exceptions (at present the attempt in SMTP.send
# to reference the nonexistent 'sock' attribute of the SMTP object
# causes an AttributeError)
smtp = smtplib.SMTP()
self.assertRaises(smtplib.SMTPServerDisconnected, smtp.ehlo)
self.assertRaises(smtplib.SMTPServerDisconnected,
smtp.send, 'test msg')
def testNonnumericPort(self):
# check that non-numeric port raises OSError
self.assertRaises(OSError, smtplib.SMTP,
"localhost", "bogus")
self.assertRaises(OSError, smtplib.SMTP,
"localhost:bogus")
# test response of client to a non-successful HELO message
class BadHELOServerTests(unittest.TestCase):
def setUp(self):
smtplib.socket = mock_socket
mock_socket.reply_with(b"199 no hello for you!")
self.old_stdout = sys.stdout
self.output = io.StringIO()
sys.stdout = self.output
self.port = 25
def tearDown(self):
smtplib.socket = socket
sys.stdout = self.old_stdout
def testFailingHELO(self):
self.assertRaises(smtplib.SMTPConnectError, smtplib.SMTP,
HOST, self.port, 'localhost', 3)
class TooLongLineTests(unittest.TestCase):
respdata = b'250 OK' + (b'.' * smtplib._MAXLINE * 2) + b'\n'
def setUp(self):
self.old_stdout = sys.stdout
self.output = io.StringIO()
sys.stdout = self.output
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(15)
self.port = support.bind_port(self.sock)
servargs = (self.evt, self.respdata, self.sock)
thread = threading.Thread(target=server, args=servargs)
thread.start()
self.addCleanup(thread.join)
self.evt.wait()
self.evt.clear()
def tearDown(self):
self.evt.wait()
sys.stdout = self.old_stdout
def testLineTooLong(self):
self.assertRaises(smtplib.SMTPResponseException, smtplib.SMTP,
HOST, self.port, 'localhost', 3)
sim_users = {'Mr.A@somewhere.com':'John A',
'Ms.B@xn--fo-fka.com':'Sally B',
'Mrs.C@somewhereesle.com':'Ruth C',
}
sim_auth = ('Mr.A@somewhere.com', 'somepassword')
sim_cram_md5_challenge = ('PENCeUxFREJoU0NnbmhNWitOMjNGNn'
'dAZWx3b29kLmlubm9zb2Z0LmNvbT4=')
sim_lists = {'list-1':['Mr.A@somewhere.com','Mrs.C@somewhereesle.com'],
'list-2':['Ms.B@xn--fo-fka.com',],
}
# Simulated SMTP channel & server
class ResponseException(Exception): pass
class SimSMTPChannel(smtpd.SMTPChannel):
quit_response = None
mail_response = None
rcpt_response = None
data_response = None
rcpt_count = 0
rset_count = 0
disconnect = 0
AUTH = 99 # Add protocol state to enable auth testing.
authenticated_user = None
def __init__(self, extra_features, *args, **kw):
self._extrafeatures = ''.join(
[ "250-{0}\r\n".format(x) for x in extra_features ])
super(SimSMTPChannel, self).__init__(*args, **kw)
# AUTH related stuff. It would be nice if support for this were in smtpd.
def found_terminator(self):
if self.smtp_state == self.AUTH:
line = self._emptystring.join(self.received_lines)
print('Data:', repr(line), file=smtpd.DEBUGSTREAM)
self.received_lines = []
try:
self.auth_object(line)
except ResponseException as e:
self.smtp_state = self.COMMAND
self.push('%s %s' % (e.smtp_code, e.smtp_error))
return
super().found_terminator()
def smtp_AUTH(self, arg):
if not self.seen_greeting:
self.push('503 Error: send EHLO first')
return
if not self.extended_smtp or 'AUTH' not in self._extrafeatures:
self.push('500 Error: command "AUTH" not recognized')
return
if self.authenticated_user is not None:
self.push(
'503 Bad sequence of commands: already authenticated')
return
args = arg.split()
if len(args) not in [1, 2]:
self.push('501 Syntax: AUTH <mechanism> [initial-response]')
return
auth_object_name = '_auth_%s' % args[0].lower().replace('-', '_')
try:
self.auth_object = getattr(self, auth_object_name)
except AttributeError:
self.push('504 Command parameter not implemented: unsupported '
' authentication mechanism {!r}'.format(auth_object_name))
return
self.smtp_state = self.AUTH
self.auth_object(args[1] if len(args) == 2 else None)
def _authenticated(self, user, valid):
if valid:
self.authenticated_user = user
self.push('235 Authentication Succeeded')
else:
self.push('535 Authentication credentials invalid')
self.smtp_state = self.COMMAND
def _decode_base64(self, string):
return base64.decodebytes(string.encode('ascii')).decode('utf-8')
def _auth_plain(self, arg=None):
if arg is None:
self.push('334 ')
else:
logpass = self._decode_base64(arg)
try:
*_, user, password = logpass.split('\0')
except ValueError as e:
self.push('535 Splitting response {!r} into user and password'
' failed: {}'.format(logpass, e))
return
self._authenticated(user, password == sim_auth[1])
def _auth_login(self, arg=None):
if arg is None:
# base64 encoded 'Username:'
self.push('334 VXNlcm5hbWU6')
elif not hasattr(self, '_auth_login_user'):
self._auth_login_user = self._decode_base64(arg)
# base64 encoded 'Password:'
self.push('334 UGFzc3dvcmQ6')
else:
password = self._decode_base64(arg)
self._authenticated(self._auth_login_user, password == sim_auth[1])
del self._auth_login_user
def _auth_cram_md5(self, arg=None):
if arg is None:
self.push('334 {}'.format(sim_cram_md5_challenge))
else:
logpass = self._decode_base64(arg)
try:
user, hashed_pass = logpass.split()
except ValueError as e:
self.push('535 Splitting response {!r} into user and password'
'failed: {}'.format(logpass, e))
return False
valid_hashed_pass = hmac.HMAC(
sim_auth[1].encode('ascii'),
self._decode_base64(sim_cram_md5_challenge).encode('ascii'),
'md5').hexdigest()
self._authenticated(user, hashed_pass == valid_hashed_pass)
# end AUTH related stuff.
def smtp_EHLO(self, arg):
resp = ('250-testhost\r\n'
'250-EXPN\r\n'
'250-SIZE 20000000\r\n'
'250-STARTTLS\r\n'
'250-DELIVERBY\r\n')
resp = resp + self._extrafeatures + '250 HELP'
self.push(resp)
self.seen_greeting = arg
self.extended_smtp = True
def smtp_VRFY(self, arg):
# For max compatibility smtplib should be sending the raw address.
if arg in sim_users:
self.push('250 %s %s' % (sim_users[arg], smtplib.quoteaddr(arg)))
else:
self.push('550 No such user: %s' % arg)
def smtp_EXPN(self, arg):
list_name = arg.lower()
if list_name in sim_lists:
user_list = sim_lists[list_name]
for n, user_email in enumerate(user_list):
quoted_addr = smtplib.quoteaddr(user_email)
if n < len(user_list) - 1:
self.push('250-%s %s' % (sim_users[user_email], quoted_addr))
else:
self.push('250 %s %s' % (sim_users[user_email], quoted_addr))
else:
self.push('550 No access for you!')
def smtp_QUIT(self, arg):
if self.quit_response is None:
super(SimSMTPChannel, self).smtp_QUIT(arg)
else:
self.push(self.quit_response)
self.close_when_done()
def smtp_MAIL(self, arg):
if self.mail_response is None:
super().smtp_MAIL(arg)
else:
self.push(self.mail_response)
if self.disconnect:
self.close_when_done()
def smtp_RCPT(self, arg):
if self.rcpt_response is None:
super().smtp_RCPT(arg)
return
self.rcpt_count += 1
self.push(self.rcpt_response[self.rcpt_count-1])
def smtp_RSET(self, arg):
self.rset_count += 1
super().smtp_RSET(arg)
def smtp_DATA(self, arg):
if self.data_response is None:
super().smtp_DATA(arg)
else:
self.push(self.data_response)
def handle_error(self):
raise
class SimSMTPServer(smtpd.SMTPServer):
channel_class = SimSMTPChannel
def __init__(self, *args, **kw):
self._extra_features = []
self._addresses = {}
smtpd.SMTPServer.__init__(self, *args, **kw)
def handle_accepted(self, conn, addr):
self._SMTPchannel = self.channel_class(
self._extra_features, self, conn, addr,
decode_data=self._decode_data)
def process_message(self, peer, mailfrom, rcpttos, data):
self._addresses['from'] = mailfrom
self._addresses['tos'] = rcpttos
def add_feature(self, feature):
self._extra_features.append(feature)
def handle_error(self):
raise
# Test various SMTP & ESMTP commands/behaviors that require a simulated server
# (i.e., something with more features than DebuggingServer)
class SMTPSimTests(unittest.TestCase):
def setUp(self):
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Pick a random unused port by passing 0 for the port number
self.serv = SimSMTPServer((HOST, 0), ('nowhere', -1), decode_data=True)
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
self.thread.join()
def testBasic(self):
# smoke test
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.quit()
def testEHLO(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
# no features should be present before the EHLO
self.assertEqual(smtp.esmtp_features, {})
# features expected from the test server
expected_features = {'expn':'',
'size': '20000000',
'starttls': '',
'deliverby': '',
'help': '',
}
smtp.ehlo()
self.assertEqual(smtp.esmtp_features, expected_features)
for k in expected_features:
self.assertTrue(smtp.has_extn(k))
self.assertFalse(smtp.has_extn('unsupported-feature'))
smtp.quit()
def testVRFY(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
for addr_spec, name in sim_users.items():
expected_known = (250, bytes('%s %s' %
(name, smtplib.quoteaddr(addr_spec)),
"ascii"))
self.assertEqual(smtp.vrfy(addr_spec), expected_known)
u = 'nobody@nowhere.com'
expected_unknown = (550, ('No such user: %s' % u).encode('ascii'))
self.assertEqual(smtp.vrfy(u), expected_unknown)
smtp.quit()
def testEXPN(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
for listname, members in sim_lists.items():
users = []
for m in members:
users.append('%s %s' % (sim_users[m], smtplib.quoteaddr(m)))
expected_known = (250, bytes('\n'.join(users), "ascii"))
self.assertEqual(smtp.expn(listname), expected_known)
u = 'PSU-Members-List'
expected_unknown = (550, b'No access for you!')
self.assertEqual(smtp.expn(u), expected_unknown)
smtp.quit()
def testAUTH_PLAIN(self):
self.serv.add_feature("AUTH PLAIN")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
resp = smtp.login(sim_auth[0], sim_auth[1])
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
def testAUTH_LOGIN(self):
self.serv.add_feature("AUTH LOGIN")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
resp = smtp.login(sim_auth[0], sim_auth[1])
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
def testAUTH_CRAM_MD5(self):
self.serv.add_feature("AUTH CRAM-MD5")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
resp = smtp.login(sim_auth[0], sim_auth[1])
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
def testAUTH_multiple(self):
# Test that multiple authentication methods are tried.
self.serv.add_feature("AUTH BOGUS PLAIN LOGIN CRAM-MD5")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
resp = smtp.login(sim_auth[0], sim_auth[1])
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
def test_auth_function(self):
supported = {'CRAM-MD5', 'PLAIN', 'LOGIN'}
for mechanism in supported:
self.serv.add_feature("AUTH {}".format(mechanism))
for mechanism in supported:
with self.subTest(mechanism=mechanism):
smtp = smtplib.SMTP(HOST, self.port,
local_hostname='localhost', timeout=15)
smtp.ehlo('foo')
smtp.user, smtp.password = sim_auth[0], sim_auth[1]
method = 'auth_' + mechanism.lower().replace('-', '_')
resp = smtp.auth(mechanism, getattr(smtp, method))
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
def test_quit_resets_greeting(self):
smtp = smtplib.SMTP(HOST, self.port,
local_hostname='localhost',
timeout=15)
code, message = smtp.ehlo()
self.assertEqual(code, 250)
self.assertIn('size', smtp.esmtp_features)
smtp.quit()
self.assertNotIn('size', smtp.esmtp_features)
smtp.connect(HOST, self.port)
self.assertNotIn('size', smtp.esmtp_features)
smtp.ehlo_or_helo_if_needed()
self.assertIn('size', smtp.esmtp_features)
smtp.quit()
def test_with_statement(self):
with smtplib.SMTP(HOST, self.port) as smtp:
code, message = smtp.noop()
self.assertEqual(code, 250)
self.assertRaises(smtplib.SMTPServerDisconnected, smtp.send, b'foo')
with smtplib.SMTP(HOST, self.port) as smtp:
smtp.close()
self.assertRaises(smtplib.SMTPServerDisconnected, smtp.send, b'foo')
def test_with_statement_QUIT_failure(self):
with self.assertRaises(smtplib.SMTPResponseException) as error:
with smtplib.SMTP(HOST, self.port) as smtp:
smtp.noop()
self.serv._SMTPchannel.quit_response = '421 QUIT FAILED'
self.assertEqual(error.exception.smtp_code, 421)
self.assertEqual(error.exception.smtp_error, b'QUIT FAILED')
#TODO: add tests for correct AUTH method fallback now that the
#test infrastructure can support it.
# Issue 17498: make sure _rset does not raise SMTPServerDisconnected exception
def test__rest_from_mail_cmd(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.noop()
self.serv._SMTPchannel.mail_response = '451 Requested action aborted'
self.serv._SMTPchannel.disconnect = True
with self.assertRaises(smtplib.SMTPSenderRefused):
smtp.sendmail('John', 'Sally', 'test message')
self.assertIsNone(smtp.sock)
# Issue 5713: make sure close, not rset, is called if we get a 421 error
def test_421_from_mail_cmd(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.noop()
self.serv._SMTPchannel.mail_response = '421 closing connection'
with self.assertRaises(smtplib.SMTPSenderRefused):
smtp.sendmail('John', 'Sally', 'test message')
self.assertIsNone(smtp.sock)
self.assertEqual(self.serv._SMTPchannel.rset_count, 0)
def test_421_from_rcpt_cmd(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.noop()
self.serv._SMTPchannel.rcpt_response = ['250 accepted', '421 closing']
with self.assertRaises(smtplib.SMTPRecipientsRefused) as r:
smtp.sendmail('John', ['Sally', 'Frank', 'George'], 'test message')
self.assertIsNone(smtp.sock)
self.assertEqual(self.serv._SMTPchannel.rset_count, 0)
self.assertDictEqual(r.exception.args[0], {'Frank': (421, b'closing')})
def test_421_from_data_cmd(self):
class MySimSMTPChannel(SimSMTPChannel):
def found_terminator(self):
if self.smtp_state == self.DATA:
self.push('421 closing')
else:
super().found_terminator()
self.serv.channel_class = MySimSMTPChannel
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.noop()
with self.assertRaises(smtplib.SMTPDataError):
smtp.sendmail('John@foo.org', ['Sally@foo.org'], 'test message')
self.assertIsNone(smtp.sock)
self.assertEqual(self.serv._SMTPchannel.rcpt_count, 0)
def test_smtputf8_NotSupportedError_if_no_server_support(self):
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.ehlo()
self.assertTrue(smtp.does_esmtp)
self.assertFalse(smtp.has_extn('smtputf8'))
self.assertRaises(
smtplib.SMTPNotSupportedError,
smtp.sendmail,
'John', 'Sally', '', mail_options=['BODY=8BITMIME', 'SMTPUTF8'])
self.assertRaises(
smtplib.SMTPNotSupportedError,
smtp.mail, 'John', options=['BODY=8BITMIME', 'SMTPUTF8'])
def test_send_unicode_without_SMTPUTF8(self):
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
self.assertRaises(UnicodeEncodeError, smtp.sendmail, 'Alice', 'Böb', '')
self.assertRaises(UnicodeEncodeError, smtp.mail, 'Älice')
def test_name_field_not_included_in_envelop_addresses(self):
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3
)
self.addCleanup(smtp.close)
message = EmailMessage()
message['From'] = email.utils.formataddr(('Michaël', 'michael@example.com'))
message['To'] = email.utils.formataddr(('René', 'rene@example.com'))
self.assertDictEqual(smtp.send_message(message), {})
self.assertEqual(self.serv._addresses['from'], 'michael@example.com')
self.assertEqual(self.serv._addresses['tos'], ['rene@example.com'])
class SimSMTPUTF8Server(SimSMTPServer):
def __init__(self, *args, **kw):
# The base SMTP server turns these on automatically, but our test
# server is set up to munge the EHLO response, so we need to provide
# them as well. And yes, the call is to SMTPServer not SimSMTPServer.
self._extra_features = ['SMTPUTF8', '8BITMIME']
smtpd.SMTPServer.__init__(self, *args, **kw)
def handle_accepted(self, conn, addr):
self._SMTPchannel = self.channel_class(
self._extra_features, self, conn, addr,
decode_data=self._decode_data,
enable_SMTPUTF8=self.enable_SMTPUTF8,
)
def process_message(self, peer, mailfrom, rcpttos, data, mail_options=None,
rcpt_options=None):
self.last_peer = peer
self.last_mailfrom = mailfrom
self.last_rcpttos = rcpttos
self.last_message = data
self.last_mail_options = mail_options
self.last_rcpt_options = rcpt_options
class SMTPUTF8SimTests(unittest.TestCase):
maxDiff = None
def setUp(self):
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Pick a random unused port by passing 0 for the port number
self.serv = SimSMTPUTF8Server((HOST, 0), ('nowhere', -1),
decode_data=False,
enable_SMTPUTF8=True)
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
self.thread.join()
def test_test_server_supports_extensions(self):
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.ehlo()
self.assertTrue(smtp.does_esmtp)
self.assertTrue(smtp.has_extn('smtputf8'))
def test_send_unicode_with_SMTPUTF8_via_sendmail(self):
m = '¡a test message containing unicode!'.encode('utf-8')
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.sendmail('Jőhn', 'Sálly', m,
mail_options=['BODY=8BITMIME', 'SMTPUTF8'])
self.assertEqual(self.serv.last_mailfrom, 'Jőhn')
self.assertEqual(self.serv.last_rcpttos, ['Sálly'])
self.assertEqual(self.serv.last_message, m)
self.assertIn('BODY=8BITMIME', self.serv.last_mail_options)
self.assertIn('SMTPUTF8', self.serv.last_mail_options)
self.assertEqual(self.serv.last_rcpt_options, [])
def test_send_unicode_with_SMTPUTF8_via_low_level_API(self):
m = '¡a test message containing unicode!'.encode('utf-8')
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.ehlo()
self.assertEqual(
smtp.mail('Jő', options=['BODY=8BITMIME', 'SMTPUTF8']),
(250, b'OK'))
self.assertEqual(smtp.rcpt('János'), (250, b'OK'))
self.assertEqual(smtp.data(m), (250, b'OK'))
self.assertEqual(self.serv.last_mailfrom, 'Jő')
self.assertEqual(self.serv.last_rcpttos, ['János'])
self.assertEqual(self.serv.last_message, m)
self.assertIn('BODY=8BITMIME', self.serv.last_mail_options)
self.assertIn('SMTPUTF8', self.serv.last_mail_options)
self.assertEqual(self.serv.last_rcpt_options, [])
def test_send_message_uses_smtputf8_if_addrs_non_ascii(self):
msg = EmailMessage()
msg['From'] = "Páolo <főo@bar.com>"
msg['To'] = 'Dinsdale'
msg['Subject'] = 'Nudge nudge, wink, wink \u1F609'
# XXX I don't know why I need two \n's here, but this is an existing
# bug (if it is one) and not a problem with the new functionality.
msg.set_content("oh là là, know what I mean, know what I mean?\n\n")
# XXX smtpd converts received /r/n to /n, so we can't easily test that
# we are successfully sending /r/n :(.
expected = textwrap.dedent("""\
From: Páolo <főo@bar.com>
To: Dinsdale
Subject: Nudge nudge, wink, wink \u1F609
Content-Type: text/plain; charset="utf-8"
Content-Transfer-Encoding: 8bit
MIME-Version: 1.0
oh là là, know what I mean, know what I mean?
""")
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
self.assertEqual(smtp.send_message(msg), {})
self.assertEqual(self.serv.last_mailfrom, 'főo@bar.com')
self.assertEqual(self.serv.last_rcpttos, ['Dinsdale'])
self.assertEqual(self.serv.last_message.decode(), expected)
self.assertIn('BODY=8BITMIME', self.serv.last_mail_options)
self.assertIn('SMTPUTF8', self.serv.last_mail_options)
self.assertEqual(self.serv.last_rcpt_options, [])
def test_send_message_error_on_non_ascii_addrs_if_no_smtputf8(self):
msg = EmailMessage()
msg['From'] = "Páolo <főo@bar.com>"
msg['To'] = 'Dinsdale'
msg['Subject'] = 'Nudge nudge, wink, wink \u1F609'
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
self.assertRaises(smtplib.SMTPNotSupportedError,
smtp.send_message(msg))
EXPECTED_RESPONSE = encode_base64(b'\0psu\0doesnotexist', eol='')
class SimSMTPAUTHInitialResponseChannel(SimSMTPChannel):
def smtp_AUTH(self, arg):
# RFC 4954's AUTH command allows for an optional initial-response.
# Not all AUTH methods support this; some require a challenge. AUTH
# PLAIN does those, so test that here. See issue #15014.
args = arg.split()
if args[0].lower() == 'plain':
if len(args) == 2:
# AUTH PLAIN <initial-response> with the response base 64
# encoded. Hard code the expected response for the test.
if args[1] == EXPECTED_RESPONSE:
self.push('235 Ok')
return
self.push('571 Bad authentication')
class SimSMTPAUTHInitialResponseServer(SimSMTPServer):
channel_class = SimSMTPAUTHInitialResponseChannel
class SMTPAUTHInitialResponseSimTests(unittest.TestCase):
def setUp(self):
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Pick a random unused port by passing 0 for the port number
self.serv = SimSMTPAUTHInitialResponseServer(
(HOST, 0), ('nowhere', -1), decode_data=True)
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
self.thread.join()
def testAUTH_PLAIN_initial_response_login(self):
self.serv.add_feature('AUTH PLAIN')
smtp = smtplib.SMTP(HOST, self.port,
local_hostname='localhost', timeout=15)
smtp.login('psu', 'doesnotexist')
smtp.close()
def testAUTH_PLAIN_initial_response_auth(self):
self.serv.add_feature('AUTH PLAIN')
smtp = smtplib.SMTP(HOST, self.port,
local_hostname='localhost', timeout=15)
smtp.user = 'psu'
smtp.password = 'doesnotexist'
code, response = smtp.auth('plain', smtp.auth_plain)
smtp.close()
self.assertEqual(code, 235)
@support.reap_threads
def test_main(verbose=None):
support.run_unittest(
BadHELOServerTests,
DebuggingServerTests,
GeneralTests,
NonConnectingTests,
SMTPAUTHInitialResponseSimTests,
SMTPSimTests,
TooLongLineTests,
)
if __name__ == '__main__':
test_main()
| [
"l”ibaojunqd@foxmail.com“"
] | l”ibaojunqd@foxmail.com“ |
f297086489c311f8cd1c11d0a836bbc0882f82df | f3a3d3c684f215cb8943cf873f9f71cc2f18e820 | /Python_Ders5.py | 5f7e997b20f4f03a8cf08f7d61d74de9925d93a8 | [] | no_license | yunuseminyazici/python_exercise | 665a8c35dcb24c2662f725f6ab7ce80072f9827e | d48cde05952299f2fac0fc6fae9b82fef69d0f1e | refs/heads/main | 2023-04-15T12:01:40.798386 | 2021-05-06T16:54:07 | 2021-05-06T16:54:07 | 364,978,536 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 243 | py | """
#For kullanımı
liste=["Elma","Armut","Kiraz"]
for i in liste:
print(i)
"""
# print(*range(10)) #0-9 yazdırma
# print(*range(2,10,2)) #2den başlayarak iki atlayarak yazdırma
for i in range(10):
print(i*"*")
| [
"noreply@github.com"
] | yunuseminyazici.noreply@github.com |
58af6debfeba26a436bd2c0dc77060d5fa098f93 | d686c57e7dd391b22f68ad6302b8f32c37ef3843 | /discord_bind/__init__.py | 0734afceacdf9b801860b7199bb3b4625e99cf49 | [
"MIT"
] | permissive | AndyTempel/django-discord-bind | 6d91c4a6509d71529100930624c50ee8a3bd0763 | 11b305de0762a691776a967b46a1b02f9a63a925 | refs/heads/master | 2021-05-05T17:07:25.918551 | 2020-04-07T16:55:24 | 2020-04-07T16:55:24 | 117,344,262 | 1 | 0 | MIT | 2018-06-23T09:57:40 | 2018-01-13T13:07:32 | Python | UTF-8 | Python | false | false | 1,192 | py | """
The MIT License (MIT)
Copyright (c) 2016, Mark Rogaski
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
# following PEP 386
__version__ = '0.2.0'
default_app_config = 'discord_bind.apps.DiscordBindConfig'
| [
"mrogaski@pobox.com"
] | mrogaski@pobox.com |
6666d2af38738b566c7be68cf876e62332b2f5df | 6c4285fd1780a21b66c79975168ba26e496c8c41 | /homeassistant/components/control4/light.py | e9226cc3716b2056fde62a2d7b3fd3096a7b0b7f | [
"Apache-2.0"
] | permissive | asbach/home-assistant | 802268867df70cb2f0dd242daa1066be06d3a2f7 | 04fc3a1f25e387103ee8a6e86f6dc3b037787428 | refs/heads/dev | 2023-08-25T19:00:15.045852 | 2022-04-05T12:00:45 | 2022-04-05T12:00:45 | 79,063,913 | 0 | 0 | Apache-2.0 | 2023-09-13T06:04:21 | 2017-01-15T22:30:37 | Python | UTF-8 | Python | false | false | 8,186 | py | """Platform for Control4 Lights."""
from __future__ import annotations
import asyncio
from datetime import timedelta
import logging
from pyControl4.error_handling import C4Exception
from pyControl4.light import C4Light
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_TRANSITION,
COLOR_MODE_BRIGHTNESS,
COLOR_MODE_ONOFF,
SUPPORT_TRANSITION,
LightEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_SCAN_INTERVAL
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from . import Control4Entity, get_items_of_category
from .const import CONF_DIRECTOR, CONTROL4_ENTITY_TYPE, DOMAIN
from .director_utils import director_update_data
_LOGGER = logging.getLogger(__name__)
CONTROL4_CATEGORY = "lights"
CONTROL4_NON_DIMMER_VAR = "LIGHT_STATE"
CONTROL4_DIMMER_VAR = "LIGHT_LEVEL"
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up Control4 lights from a config entry."""
entry_data = hass.data[DOMAIN][entry.entry_id]
scan_interval = entry_data[CONF_SCAN_INTERVAL]
_LOGGER.debug(
"Scan interval = %s",
scan_interval,
)
async def async_update_data_non_dimmer():
"""Fetch data from Control4 director for non-dimmer lights."""
try:
return await director_update_data(hass, entry, CONTROL4_NON_DIMMER_VAR)
except C4Exception as err:
raise UpdateFailed(f"Error communicating with API: {err}") from err
async def async_update_data_dimmer():
"""Fetch data from Control4 director for dimmer lights."""
try:
return await director_update_data(hass, entry, CONTROL4_DIMMER_VAR)
except C4Exception as err:
raise UpdateFailed(f"Error communicating with API: {err}") from err
non_dimmer_coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
name="light",
update_method=async_update_data_non_dimmer,
update_interval=timedelta(seconds=scan_interval),
)
dimmer_coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
name="light",
update_method=async_update_data_dimmer,
update_interval=timedelta(seconds=scan_interval),
)
# Fetch initial data so we have data when entities subscribe
await non_dimmer_coordinator.async_refresh()
await dimmer_coordinator.async_refresh()
items_of_category = await get_items_of_category(hass, entry, CONTROL4_CATEGORY)
entity_list = []
for item in items_of_category:
try:
if item["type"] == CONTROL4_ENTITY_TYPE:
item_name = item["name"]
item_id = item["id"]
item_parent_id = item["parentId"]
item_manufacturer = None
item_device_name = None
item_model = None
for parent_item in items_of_category:
if parent_item["id"] == item_parent_id:
item_manufacturer = parent_item["manufacturer"]
item_device_name = parent_item["name"]
item_model = parent_item["model"]
else:
continue
except KeyError:
_LOGGER.exception(
"Unknown device properties received from Control4: %s",
item,
)
continue
if item_id in dimmer_coordinator.data:
item_is_dimmer = True
item_coordinator = dimmer_coordinator
elif item_id in non_dimmer_coordinator.data:
item_is_dimmer = False
item_coordinator = non_dimmer_coordinator
else:
director = entry_data[CONF_DIRECTOR]
item_variables = await director.getItemVariables(item_id)
_LOGGER.warning(
"Couldn't get light state data for %s, skipping setup. Available variables from Control4: %s",
item_name,
item_variables,
)
continue
entity_list.append(
Control4Light(
entry_data,
item_coordinator,
item_name,
item_id,
item_device_name,
item_manufacturer,
item_model,
item_parent_id,
item_is_dimmer,
)
)
async_add_entities(entity_list, True)
class Control4Light(Control4Entity, LightEntity):
"""Control4 light entity."""
def __init__(
self,
entry_data: dict,
coordinator: DataUpdateCoordinator,
name: str,
idx: int,
device_name: str | None,
device_manufacturer: str | None,
device_model: str | None,
device_id: int,
is_dimmer: bool,
) -> None:
"""Initialize Control4 light entity."""
super().__init__(
entry_data,
coordinator,
name,
idx,
device_name,
device_manufacturer,
device_model,
device_id,
)
self._is_dimmer = is_dimmer
if is_dimmer:
self._attr_color_mode = COLOR_MODE_BRIGHTNESS
self._attr_supported_color_modes = {COLOR_MODE_BRIGHTNESS}
else:
self._attr_color_mode = COLOR_MODE_ONOFF
self._attr_supported_color_modes = {COLOR_MODE_ONOFF}
def create_api_object(self):
"""Create a pyControl4 device object.
This exists so the director token used is always the latest one, without needing to re-init the entire entity.
"""
return C4Light(self.entry_data[CONF_DIRECTOR], self._idx)
@property
def is_on(self):
"""Return whether this light is on or off."""
return self.coordinator.data[self._idx]["value"] > 0
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
if self._is_dimmer:
return round(self.coordinator.data[self._idx]["value"] * 2.55)
return None
@property
def supported_features(self) -> int:
"""Flag supported features."""
if self._is_dimmer:
return SUPPORT_TRANSITION
return 0
async def async_turn_on(self, **kwargs) -> None:
"""Turn the entity on."""
c4_light = self.create_api_object()
if self._is_dimmer:
if ATTR_TRANSITION in kwargs:
transition_length = kwargs[ATTR_TRANSITION] * 1000
else:
transition_length = 0
if ATTR_BRIGHTNESS in kwargs:
brightness = (kwargs[ATTR_BRIGHTNESS] / 255) * 100
else:
brightness = 100
await c4_light.rampToLevel(brightness, transition_length)
else:
transition_length = 0
await c4_light.setLevel(100)
if transition_length == 0:
transition_length = 1000
delay_time = (transition_length / 1000) + 0.7
_LOGGER.debug("Delaying light update by %s seconds", delay_time)
await asyncio.sleep(delay_time)
await self.coordinator.async_request_refresh()
async def async_turn_off(self, **kwargs) -> None:
"""Turn the entity off."""
c4_light = self.create_api_object()
if self._is_dimmer:
if ATTR_TRANSITION in kwargs:
transition_length = kwargs[ATTR_TRANSITION] * 1000
else:
transition_length = 0
await c4_light.rampToLevel(0, transition_length)
else:
transition_length = 0
await c4_light.setLevel(0)
if transition_length == 0:
transition_length = 1500
delay_time = (transition_length / 1000) + 0.7
_LOGGER.debug("Delaying light update by %s seconds", delay_time)
await asyncio.sleep(delay_time)
await self.coordinator.async_request_refresh()
| [
"noreply@github.com"
] | asbach.noreply@github.com |
07a1175b2a7015339344497fa2f6692e5aec6bd5 | 866631deca871e7bc0e0fc59cbddc6c4082c9759 | /Code/Extras/New_SO_Full/Decision_Tree_Load.py | 6fae3454a840e72d634ef4c7852aee68711709a6 | [] | no_license | LCS2-IIITD/DiffQue-TIST | e6630641cb58ef13e9788d31d1e9737aada46009 | 90e0e83f6d1c3c1710c74aa38ea04825f1c67e98 | refs/heads/master | 2020-05-27T13:35:03.488111 | 2020-01-22T18:09:52 | 2020-01-22T18:09:52 | 188,641,831 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,954 | py | import networkx as nx
import pickle
from sklearn.svm import LinearSVR
from sklearn.neural_network import MLPClassifier
import numpy as np
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import f1_score
from sklearn import svm
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
st1 = "edges.txt"
st2 = "merger2.txt"
st3 = "rank_nodes_pagerank_without_weight.txt"
st4 = "inverse_rankings.txt"
st5 = "time_diff.txt"
st6 = "ans_count.txt"
nooffeatures = 10
directed_graph = nx.DiGraph()
undirected_graph = nx.DiGraph()
pagerank_dict = {}
leader_fol_dict = {}
bw_centrality = {}
accepted_answer_id = {}
time_diff_of_accepted = {}
degree = {}
graph_file = open(st1)
for line in graph_file:
v1 = int(line.split(" ")[0])
v2 = int(line.split(" ")[1])
directed_graph.add_edge(v1, v2)
if( v1 in degree ):
degree[v1] += 1
else:
degree[v1] = 1
if( v2 in degree ):
degree[v2] += 1
else:
degree[v2] = 1
pagerank_file = open(st3)
for line in pagerank_file:
v1 = int(line.split(" ")[0])
pagerankk = float(line.split(" ")[1])
#pagerankk = "%.8f" % ( float(line.split(" ")[1]) )
pagerank_dict[ v1 ] = pagerankk
max1 = 0
leader_follower_file = open(st4)
for line in leader_follower_file:
v1 = int(line.split(" ")[0])
leader_fol_score = float(line.split(" ")[1])
max1 = max(max1, leader_fol_score)
leader_follower_file = open(st4)
for line in leader_follower_file:
v1 = int(line.split(" ")[0])
leader_fol_score = float(line.split(" ")[1])
leader_fol_dict[ v1 ] = float(leader_fol_score)/max1
max1 = 0
time_diff_acc = open(st5)
for line in time_diff_acc:
v1 = int(line.split(" ")[0])
v2 = float(line.split(" ")[1])
max1 = max(max1, v2)
time_diff_acc = open(st5)
for line in time_diff_acc:
v1 = int(line.split(" ")[0])
v2 = float(line.split(" ")[1])
time_diff_of_accepted[ v1 ] = float(v2)/max1
max1 = 0
answer_count = open(st6)
for line in answer_count:
v1 = int(line.split(" ")[0])
v2 = int(line.split(" ")[1])
max1 = max(max1, v2)
answer_count = open(st6)
for line in answer_count:
v1 = int(line.split(" ")[0])
v2 = int(line.split(" ")[1])
accepted_answer_id[ v1 ] = float(v2)/max1
def check2( vert ):
if( vert in accepted_answer_id ):
return accepted_answer_id[vert]
return 0
def check3( vert ):
if( vert in time_diff_of_accepted ):
return time_diff_of_accepted[vert]
return 1
def getfeatures(vert1, vert2):
vect = []
#vect.append( bw_centrality.get(vert1) )
#vect.append( bw_centrality.get(vert2) )
if( vert1 in leader_fol_dict ):
vect.append( leader_fol_dict.get(vert1) )
else:
vect.append(0)
if( vert2 in leader_fol_dict ):
vect.append( leader_fol_dict.get(vert2) )
else:
vect.append(0)
if( vert1 in pagerank_dict ):
vect.append( pagerank_dict.get(vert1) )
else:
vect.append(0)
if( vert2 in pagerank_dict ):
vect.append( pagerank_dict.get(vert2) )
else:
vect.append(0)
if( vert1 is degree ):
vect.append( degree[vert1] )
else:
vect.append(0)
if( vert2 is degree ):
vect.append( degree[vert2] )
else:
vect.append(0)
vect.append( check2(vert1) )
vect.append( check2(vert2) )
vect.append( check3(vert1) )
vect.append( check3(vert2) )
return vect
def preprocess():
train_x = []
train_y = []
print('hi')
for ed in directed_graph.edges():
v1 = ed[0]
v2 = ed[1]
lab = 1
img = getfeatures(v1, v2)
if( None not in img ):
train_x.append( img )
train_y.append( lab )
print('hi2')
for ed in directed_graph.edges():
v1 = ed[1]
v2 = ed[0]
lab = 0
img = getfeatures(v1, v2)
if( None not in img ):
train_x.append( img )
train_y.append( lab )
return train_x, train_y
train_x, train_y = preprocess()
trainvector = np.reshape( train_x, (len(train_x), nooffeatures) )
trainlabel = np.reshape( train_y, (len(train_y), 1) )
print('Defining')
clf2 = svm.SVC(kernel='linear', C = 1.0)
with open("SVM_Classifier.dump", "rb") as fp: # Unpickling
clf2 = pickle.load(fp)
#clf2 = KNeighborsClassifier(n_neighbors=12)
#clf2 = GaussianNB()
#clf2 = svm.SVC(kernel='rbf') # 0.554045444893
#clf2 = DecisionTreeClassifier(criterion = "gini")
#clf2 = MLPClassifier(solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(15,15,), random_state=1) # 0.554045444893
#print('Training')
#clf2.fit(trainvector, trainlabel)
print('Checking')
correct = 0
incorrect = 0
noofval = 0
rejected = 0
t1 = []
t2 = []
tested_graph_file2 = open(st2)
for line in tested_graph_file2:
v1 = int(line.split(" ")[0])
v2 = int(line.split(" ")[1])
v3 = int(line.split(" ")[2])
img = getfeatures(v1, v2)
try:
labell = clf2.predict([img])
if( v3==v2 ):
t1.append(1)
if( labell==1 ):
t2.append(1)
correct += 1
else:
t2.append(0)
incorrect += 1
print(v1, " ", v2, " ", v3)
else:
t1.append(0)
if( labell==0 ):
t2.append(0)
correct += 1
else:
t2.append(1)
incorrect += 1
print(v1, " ", v2, " ", v3)
except Exception:
rejected += 1
pass
noofval += 1
print( correct, noofval, rejected, incorrect )
print( (float(correct)/noofval) )
print( f1_score(t1, t2, average='macro') )
with open("SVM_Classifier.dump", "wb") as fp1: #Pickling
pickle.dump(clf2, fp1)
| [
"deepakthukral0004@gmail.com"
] | deepakthukral0004@gmail.com |
a108d8f0631873f4b65550ed4b7d482f12e3e8a6 | 02422812b5e93225f6c842ec57aae601cb939a8d | /tests/client/internal_messaging/test_producer.py | fc80a258c21a90b32dbe40386e709df21e14b6aa | [
"Apache-2.0"
] | permissive | gcollard/lightbus | 1af20564bb05df76ed7302f6eb93487c5b17592d | d04deeda8ccef5a582b79255725ca2025a085c02 | refs/heads/master | 2022-12-27T01:02:45.505846 | 2020-10-02T02:18:05 | 2020-10-02T02:18:05 | 300,042,306 | 0 | 0 | Apache-2.0 | 2020-10-02T02:18:06 | 2020-09-30T19:44:52 | Python | UTF-8 | Python | false | false | 2,372 | py | import asyncio
import logging
import pytest
from _pytest.logging import LogCaptureFixture
from lightbus.client.internal_messaging.producer import InternalProducer
pytestmark = pytest.mark.unit
@pytest.mark.asyncio
async def test_queue_monitor(producer: InternalProducer, caplog: LogCaptureFixture, fake_coroutine):
"""Ensure the queue monitor logs as we expect
Note that something we implicitly test for here is that the monitor
does not log lots of duplicate lines. Rather it only logs when
something changes.
"""
producer.size_warning = 3
producer.monitor_interval = 0.01
caplog.set_level(logging.WARNING)
# Start the producer running
producer.start()
# No logging yet
assert not caplog.records
# Add a couple of items to the queue (still under size_warning)
producer.queue.put_nowait(None)
producer.queue.put_nowait(None)
await asyncio.sleep(0.05)
# Still no logging yet
assert not caplog.records
# One more gets us up to the warning level
producer.queue.put_nowait(None)
await asyncio.sleep(0.05)
# Now we have logging
assert len(caplog.records) == 1
assert caplog.records[0].getMessage() == "Queue in InternalProducer now has 3 commands."
caplog.clear() # Clear the log messages
# Let's check we get another messages when the queue gets bigger again
producer.queue.put_nowait(None)
await asyncio.sleep(0.05)
assert len(caplog.records) == 1
assert caplog.records[0].getMessage() == "Queue in InternalProducer now has 4 commands."
caplog.clear() # Clear the log messages
# Now check we get logging when the queue shrinks, but is still above the warning level
producer.queue.get_nowait()
await asyncio.sleep(0.05)
assert len(caplog.records) == 1
assert caplog.records[0].getMessage() == (
"Queue in InternalProducer has shrunk back down to 3 commands."
)
caplog.clear() # Clear the log messages
# Now check we get logging when the queue shrinks to BELOW the warning level
producer.queue.get_nowait()
await asyncio.sleep(0.05)
assert len(caplog.records) == 1
assert caplog.records[0].getMessage() == (
"Queue in InternalProducer has shrunk back down to 2 commands. "
"Queue is now at an OK size again."
)
caplog.clear() # Clear the log messages
| [
"adam@adamcharnock.com"
] | adam@adamcharnock.com |
3b5adac9dca8f817319ada3b9e7cefa9ca0912f5 | 8f9ea3f14bdf2187de759939b2bbc87fe68ccfc0 | /tensorflow/python/training/optimizer.py | a9287a0f0d0391cc6e0b297cce18eebaf9f64291 | [
"Apache-2.0"
] | permissive | davidstanke/bazel-mvn-demo | 4ea43f0ba293a28b916a27eab5f0812e9b753c2c | cff14dddce15ea7152988da576673bd15bab6c6e | refs/heads/master | 2022-10-20T07:52:29.651851 | 2018-11-22T13:17:51 | 2018-11-22T13:17:51 | 157,782,756 | 2 | 0 | Apache-2.0 | 2022-10-04T23:47:05 | 2018-11-15T22:54:09 | C++ | UTF-8 | Python | false | false | 48,470 | py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base class for optimizers."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.training import distribute as distribute_lib
from tensorflow.python.training import slot_creator
from tensorflow.python.training.checkpointable import base as checkpointable
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
def get_filtered_grad_fn(grad_fn):
# `distributed_context.join()` requires that its arguments are parallel
# across threads, and in particular that `grads_and_vars` has the same
# variables in the same order.
# When computing gradients in eager mode with multiple threads, you
# can get extra variables with a gradient of `None`. This happens when
# those variables are accessed in another thread during the gradient
# computation. To get a consistent set of variables, we filter out
# those with `None` gradients.
def filtered_grad_fn(x=None):
return [(g, v) for g, v in grad_fn(x) if g is not None]
return filtered_grad_fn
def _deduplicate_indexed_slices(values, indices):
"""Sums `values` associated with any non-unique `indices`.
Args:
values: A `Tensor` with rank >= 1.
indices: A one-dimensional integer `Tensor`, indexing into the first
dimension of `values` (as in an IndexedSlices object).
Returns:
A tuple of (`summed_values`, `unique_indices`) where `unique_indices` is a
de-duplicated version of `indices` and `summed_values` contains the sum of
`values` slices associated with each unique index.
"""
unique_indices, new_index_positions = array_ops.unique(indices)
summed_values = math_ops.unsorted_segment_sum(
values, new_index_positions,
array_ops.shape(unique_indices)[0])
return (summed_values, unique_indices)
def _var_key(var):
if context.executing_eagerly():
return var._unique_id # pylint: disable=protected-access
return (var.op.graph, var.op.name)
class _OptimizableVariable(object):
"""Interface for abstracting over variables in the optimizers."""
@abc.abstractmethod
def target(self):
"""Returns the optimization target for this variable."""
raise NotImplementedError("Calling an abstract method.")
@abc.abstractmethod
def update_op(self, optimizer, g):
"""Returns the update ops for updating the variable."""
raise NotImplementedError("Calling an abstract method.")
class _RefVariableProcessor(_OptimizableVariable):
"""Processor for Variable."""
def __init__(self, v):
self._v = v
def __str__(self):
return "<_RefVariableProcessor(%s)>" % self._v
def target(self):
return self._v._ref() # pylint: disable=protected-access
def update_op(self, optimizer, g):
if isinstance(g, ops.Tensor):
update_op = optimizer._apply_dense(g, self._v) # pylint: disable=protected-access
if self._v.constraint is not None:
with ops.control_dependencies([update_op]):
return self._v.assign(self._v.constraint(self._v))
else:
return update_op
else:
assert isinstance(g, ops.IndexedSlices), ("Gradient ", g, " is neither a "
"tensor nor IndexedSlices.")
if self._v.constraint is not None:
raise RuntimeError(
"Cannot use a constraint function on a sparse variable.")
# pylint: disable=protected-access
return optimizer._apply_sparse_duplicate_indices(g, self._v)
class _DenseReadResourceVariableProcessor(_OptimizableVariable):
"""Processor for dense ResourceVariables."""
def __init__(self, v):
self._v = v
def target(self):
return self._v
def update_op(self, optimizer, g):
# pylint: disable=protected-access
update_op = optimizer._resource_apply_dense(g, self._v.op.inputs[0])
if self._v.constraint is not None:
with ops.control_dependencies([update_op]):
return self._v.assign(self._v.constraint(self._v))
else:
return update_op
class _DenseResourceVariableProcessor(_OptimizableVariable):
"""Processor for dense ResourceVariables."""
def __init__(self, v):
self._v = v
def target(self):
return self._v
def update_op(self, optimizer, g):
# pylint: disable=protected-access
if isinstance(g, ops.IndexedSlices):
if self._v.constraint is not None:
raise RuntimeError(
"Cannot use a constraint function on a sparse variable.")
return optimizer._resource_apply_sparse_duplicate_indices(
g.values, self._v, g.indices)
update_op = optimizer._resource_apply_dense(g, self._v)
if self._v.constraint is not None:
with ops.control_dependencies([update_op]):
return self._v.assign(self._v.constraint(self._v))
else:
return update_op
class _TensorProcessor(_OptimizableVariable):
"""Processor for ordinary Tensors.
Even though a Tensor can't really be updated, sometimes it is useful to
compute the gradients with respect to a Tensor using the optimizer. Updating
the Tensor is, of course, unsupported.
"""
def __init__(self, v):
self._v = v
def target(self):
return self._v
def update_op(self, optimizer, g):
raise NotImplementedError("Trying to update a Tensor ", self._v)
def _get_processor(v):
"""The processor of v."""
if context.executing_eagerly():
if isinstance(v, ops.Tensor):
return _TensorProcessor(v)
else:
return _DenseResourceVariableProcessor(v)
if isinstance(
v, resource_variable_ops.ResourceVariable) and not v._in_graph_mode: # pylint: disable=protected-access
# True if and only if `v` was initialized eagerly.
return _DenseResourceVariableProcessor(v)
if v.op.type == "VarHandleOp":
return _DenseResourceVariableProcessor(v)
if isinstance(v, variables.Variable):
return _RefVariableProcessor(v)
if isinstance(v, ops.Tensor):
return _TensorProcessor(v)
raise NotImplementedError("Trying to optimize unsupported type ", v)
@tf_export("train.Optimizer")
class Optimizer(
# Optimizers inherit from CheckpointableBase rather than Checkpointable
# since they do most of their dependency management themselves (slot
# variables are special-cased, and non-slot variables are keyed to graphs).
checkpointable.CheckpointableBase):
"""Base class for optimizers.
This class defines the API to add Ops to train a model. You never use this
class directly, but instead instantiate one of its subclasses such as
`GradientDescentOptimizer`, `AdagradOptimizer`, or `MomentumOptimizer`.
### Usage
```python
# Create an optimizer with the desired parameters.
opt = GradientDescentOptimizer(learning_rate=0.1)
# Add Ops to the graph to minimize a cost by updating a list of variables.
# "cost" is a Tensor, and the list of variables contains tf.Variable
# objects.
opt_op = opt.minimize(cost, var_list=<list of variables>)
```
In the training program you will just have to run the returned Op.
```python
# Execute opt_op to do one step of training:
opt_op.run()
```
### Processing gradients before applying them.
Calling `minimize()` takes care of both computing the gradients and
applying them to the variables. If you want to process the gradients
before applying them you can instead use the optimizer in three steps:
1. Compute the gradients with `compute_gradients()`.
2. Process the gradients as you wish.
3. Apply the processed gradients with `apply_gradients()`.
Example:
```python
# Create an optimizer.
opt = GradientDescentOptimizer(learning_rate=0.1)
# Compute the gradients for a list of variables.
grads_and_vars = opt.compute_gradients(loss, <list of variables>)
# grads_and_vars is a list of tuples (gradient, variable). Do whatever you
# need to the 'gradient' part, for example cap them, etc.
capped_grads_and_vars = [(MyCapper(gv[0]), gv[1]) for gv in grads_and_vars]
# Ask the optimizer to apply the capped gradients.
opt.apply_gradients(capped_grads_and_vars)
```
### Gating Gradients
Both `minimize()` and `compute_gradients()` accept a `gate_gradients`
argument that controls the degree of parallelism during the application of
the gradients.
The possible values are: `GATE_NONE`, `GATE_OP`, and `GATE_GRAPH`.
<b>`GATE_NONE`</b>: Compute and apply gradients in parallel. This provides
the maximum parallelism in execution, at the cost of some non-reproducibility
in the results. For example the two gradients of `matmul` depend on the input
values: With `GATE_NONE` one of the gradients could be applied to one of the
inputs _before_ the other gradient is computed resulting in non-reproducible
results.
<b>`GATE_OP`</b>: For each Op, make sure all gradients are computed before
they are used. This prevents race conditions for Ops that generate gradients
for multiple inputs where the gradients depend on the inputs.
<b>`GATE_GRAPH`</b>: Make sure all gradients for all variables are computed
before any one of them is used. This provides the least parallelism but can
be useful if you want to process all gradients before applying any of them.
### Slots
Some optimizer subclasses, such as `MomentumOptimizer` and `AdagradOptimizer`
allocate and manage additional variables associated with the variables to
train. These are called <i>Slots</i>. Slots have names and you can ask the
optimizer for the names of the slots that it uses. Once you have a slot name
you can ask the optimizer for the variable it created to hold the slot value.
This can be useful if you want to log debug a training algorithm, report stats
about the slots, etc.
"""
# Values for gate_gradients.
GATE_NONE = 0
GATE_OP = 1
GATE_GRAPH = 2
def __init__(self, use_locking, name):
"""Create a new Optimizer.
This must be called by the constructors of subclasses.
Args:
use_locking: Bool. If True apply use locks to prevent concurrent updates
to variables.
name: A non-empty string. The name to use for accumulators created
for the optimizer.
Raises:
ValueError: If name is malformed.
"""
if not name:
raise ValueError("Must specify the optimizer name")
self._use_locking = use_locking
self._name = name
# Dictionary of slots.
# {slot_name :
# {_var_key(variable_to_train): slot_for_the_variable, ... },
# ... }
self._slots = {}
self._non_slot_dict = {}
# For implementing Checkpointable. Stores information about how to restore
# slot variables which have not yet been created
# (checkpointable._CheckpointPosition objects).
# {slot_name :
# {_var_key(variable_to_train): [checkpoint_position, ... ], ... },
# ... }
self._deferred_slot_restorations = {}
# TODO(isaprykin): When using a DistributionStrategy, and when an
# optimizer is created in each tower, it might be dangerous to
# rely on some Optimer methods. When such methods are called on a
# per-tower optimizer, an exception needs to be thrown. We do
# allow creation per-tower optimizers however, because the
# compute_gradients()->apply_gradients() sequence is safe.
def get_name(self):
return self._name
def minimize(self, loss, global_step=None, var_list=None,
gate_gradients=GATE_OP, aggregation_method=None,
colocate_gradients_with_ops=False, name=None,
grad_loss=None):
"""Add operations to minimize `loss` by updating `var_list`.
This method simply combines calls `compute_gradients()` and
`apply_gradients()`. If you want to process the gradient before applying
them call `compute_gradients()` and `apply_gradients()` explicitly instead
of using this function.
Args:
loss: A `Tensor` containing the value to minimize.
global_step: Optional `Variable` to increment by one after the
variables have been updated.
var_list: Optional list or tuple of `Variable` objects to update to
minimize `loss`. Defaults to the list of variables collected in
the graph under the key `GraphKeys.TRAINABLE_VARIABLES`.
gate_gradients: How to gate the computation of gradients. Can be
`GATE_NONE`, `GATE_OP`, or `GATE_GRAPH`.
aggregation_method: Specifies the method used to combine gradient terms.
Valid values are defined in the class `AggregationMethod`.
colocate_gradients_with_ops: If True, try colocating gradients with
the corresponding op.
name: Optional name for the returned operation.
grad_loss: Optional. A `Tensor` holding the gradient computed for `loss`.
Returns:
An Operation that updates the variables in `var_list`. If `global_step`
was not `None`, that operation also increments `global_step`.
Raises:
ValueError: If some of the variables are not `Variable` objects.
@compatibility(eager)
When eager execution is enabled, `loss` should be a Python function that
takes elements of `var_list` as arguments and computes the value to be
minimized. If `var_list` is None, `loss` should take no arguments.
Minimization (and gradient computation) is done with respect to the
elements of `var_list` if not None, else with respect to any trainable
variables created during the execution of the `loss` function.
`gate_gradients`, `aggregation_method`, `colocate_gradients_with_ops` and
`grad_loss` are ignored when eager execution is enabled.
@end_compatibility
"""
grads_and_vars = self.compute_gradients(
loss, var_list=var_list, gate_gradients=gate_gradients,
aggregation_method=aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops,
grad_loss=grad_loss)
vars_with_grad = [v for g, v in grads_and_vars if g is not None]
if not vars_with_grad:
raise ValueError(
"No gradients provided for any variable, check your graph for ops"
" that do not support gradients, between variables %s and loss %s." %
([str(v) for _, v in grads_and_vars], loss))
return self.apply_gradients(grads_and_vars, global_step=global_step,
name=name)
def compute_gradients(self, loss, var_list=None,
gate_gradients=GATE_OP,
aggregation_method=None,
colocate_gradients_with_ops=False,
grad_loss=None):
"""Compute gradients of `loss` for the variables in `var_list`.
This is the first part of `minimize()`. It returns a list
of (gradient, variable) pairs where "gradient" is the gradient
for "variable". Note that "gradient" can be a `Tensor`, an
`IndexedSlices`, or `None` if there is no gradient for the
given variable.
Args:
loss: A Tensor containing the value to minimize or a callable taking
no arguments which returns the value to minimize. When eager execution
is enabled it must be a callable.
var_list: Optional list or tuple of `tf.Variable` to update to minimize
`loss`. Defaults to the list of variables collected in the graph
under the key `GraphKeys.TRAINABLE_VARIABLES`.
gate_gradients: How to gate the computation of gradients. Can be
`GATE_NONE`, `GATE_OP`, or `GATE_GRAPH`.
aggregation_method: Specifies the method used to combine gradient terms.
Valid values are defined in the class `AggregationMethod`.
colocate_gradients_with_ops: If True, try colocating gradients with
the corresponding op.
grad_loss: Optional. A `Tensor` holding the gradient computed for `loss`.
Returns:
A list of (gradient, variable) pairs. Variable is always present, but
gradient can be `None`.
Raises:
TypeError: If `var_list` contains anything else than `Variable` objects.
ValueError: If some arguments are invalid.
RuntimeError: If called with eager execution enabled and `loss` is
not callable.
@compatibility(eager)
When eager execution is enabled, `gate_gradients`, `aggregation_method`,
and `colocate_gradients_with_ops` are ignored.
@end_compatibility
"""
if callable(loss):
with backprop.GradientTape() as tape:
if var_list is not None:
tape.watch(var_list)
loss_value = loss()
# Scale loss if using a "mean" loss reduction and multiple towers.
# Have to be careful to call distribute_lib.get_loss_reduction()
# *after* loss() is evaluated, so we know what loss reduction it uses.
# TODO(josh11b): Test that we handle weight decay in a reasonable way.
if distribute_lib.get_loss_reduction() == "mean":
num_towers = distribute_lib.get_distribution_strategy().num_towers
if num_towers > 1:
loss_value *= (1. / num_towers)
if var_list is None:
var_list = tape.watched_variables()
grads = tape.gradient(loss_value, var_list, grad_loss)
return list(zip(grads, var_list))
# Non-callable/Tensor loss case
if context.executing_eagerly():
raise RuntimeError(
"`loss` passed to Optimizer.compute_gradients should "
"be a function when eager execution is enabled.")
# Scale loss if using a "mean" loss reduction and multiple towers.
if distribute_lib.get_loss_reduction() == "mean":
num_towers = distribute_lib.get_distribution_strategy().num_towers
if num_towers > 1:
loss *= (1. / num_towers)
if gate_gradients not in [Optimizer.GATE_NONE, Optimizer.GATE_OP,
Optimizer.GATE_GRAPH]:
raise ValueError("gate_gradients must be one of: Optimizer.GATE_NONE, "
"Optimizer.GATE_OP, Optimizer.GATE_GRAPH. Not %s" %
gate_gradients)
self._assert_valid_dtypes([loss])
if grad_loss is not None:
self._assert_valid_dtypes([grad_loss])
if var_list is None:
var_list = (
variables.trainable_variables() +
ops.get_collection(ops.GraphKeys.TRAINABLE_RESOURCE_VARIABLES))
else:
var_list = nest.flatten(var_list)
# pylint: disable=protected-access
var_list += ops.get_collection(ops.GraphKeys._STREAMING_MODEL_PORTS)
# pylint: enable=protected-access
processors = [_get_processor(v) for v in var_list]
if not var_list:
raise ValueError("No variables to optimize.")
var_refs = [p.target() for p in processors]
grads = gradients.gradients(
loss, var_refs, grad_ys=grad_loss,
gate_gradients=(gate_gradients == Optimizer.GATE_OP),
aggregation_method=aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops)
if gate_gradients == Optimizer.GATE_GRAPH:
grads = control_flow_ops.tuple(grads)
grads_and_vars = list(zip(grads, var_list))
self._assert_valid_dtypes(
[v for g, v in grads_and_vars
if g is not None and v.dtype != dtypes.resource])
return grads_and_vars
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
"""Apply gradients to variables.
This is the second part of `minimize()`. It returns an `Operation` that
applies gradients.
Args:
grads_and_vars: List of (gradient, variable) pairs as returned by
`compute_gradients()`.
global_step: Optional `Variable` to increment by one after the
variables have been updated.
name: Optional name for the returned operation. Default to the
name passed to the `Optimizer` constructor.
Returns:
An `Operation` that applies the specified gradients. If `global_step`
was not None, that operation also increments `global_step`.
Raises:
TypeError: If `grads_and_vars` is malformed.
ValueError: If none of the variables have gradients.
RuntimeError: If you should use `_distributed_apply()` instead.
"""
# This is a default implementation of apply_gradients() that can be shared
# by most optimizers. It relies on the subclass implementing the following
# methods: _create_slots(), _prepare(), _apply_dense(), and _apply_sparse().
# Handle DistributionStrategy case.
if distribute_lib.get_cross_tower_context():
raise RuntimeError("Use `_distributed_apply()` instead of "
"`apply_gradients()` in a cross-tower context.")
# TODO(isaprykin): Get rid of `has_distribution_strategy()` check by
# always calling _distributed_apply(), using the default distribution
# as needed.
if distribute_lib.has_distribution_strategy():
grads_and_vars = get_filtered_grad_fn(lambda _: grads_and_vars)()
return distribute_lib.get_tower_context().merge_call(
self._distributed_apply, grads_and_vars, global_step, name)
# No DistributionStrategy case.
grads_and_vars = tuple(grads_and_vars) # Make sure repeat iteration works.
if not grads_and_vars:
raise ValueError("No variables provided.")
converted_grads_and_vars = []
for g, v in grads_and_vars:
if g is not None:
try:
# Convert the grad to Tensor or IndexedSlices if necessary.
g = ops.convert_to_tensor_or_indexed_slices(g)
except TypeError:
raise TypeError(
"Gradient must be convertible to a Tensor"
" or IndexedSlices, or None: %s" % g)
if not isinstance(g, (ops.Tensor, ops.IndexedSlices)):
raise TypeError(
"Gradient must be a Tensor, IndexedSlices, or None: %s" % g)
p = _get_processor(v)
converted_grads_and_vars.append((g, v, p))
converted_grads_and_vars = tuple(converted_grads_and_vars)
var_list = [v for g, v, _ in converted_grads_and_vars if g is not None]
if not var_list:
raise ValueError("No gradients provided for any variable: %s." %
([str(v) for _, _, v in converted_grads_and_vars],))
with ops.init_scope():
self._create_slots(var_list)
update_ops = []
with ops.name_scope(name, self._name) as name:
self._prepare()
for grad, var, processor in converted_grads_and_vars:
if grad is None:
continue
# We colocate all ops created in _apply_dense or _apply_sparse
# on the same device as the variable.
# TODO(apassos): figure out how to get the variable name here.
if context.executing_eagerly() or isinstance(
var,
resource_variable_ops.ResourceVariable) and not var._in_graph_mode: # pylint: disable=protected-access
scope_name = ""
else:
scope_name = var.op.name
with ops.name_scope("update_" + scope_name), ops.colocate_with(var):
update_ops.append(processor.update_op(self, grad))
if global_step is None:
apply_updates = self._finish(update_ops, name)
else:
with ops.control_dependencies([self._finish(update_ops, "update")]):
with ops.colocate_with(global_step):
if isinstance(global_step, resource_variable_ops.ResourceVariable):
# TODO(apassos): the implicit read in assign_add is slow; consider
# making it less so.
apply_updates = resource_variable_ops.assign_add_variable_op(
global_step.handle,
ops.convert_to_tensor(1, dtype=global_step.dtype),
name=name)
else:
apply_updates = state_ops.assign_add(global_step, 1, name=name)
if not context.executing_eagerly():
if isinstance(apply_updates, ops.Tensor):
apply_updates = apply_updates.op
train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
if apply_updates not in train_op:
train_op.append(apply_updates)
return apply_updates
def _distributed_apply(self,
distribution,
grads_and_vars,
global_step=None,
name=None):
"""A version of `apply_gradients` for cross-tower context.
This is a version of `apply_gradients()` for when you are using a
`DistributionStrategy` and are in a cross-tower context. If in a
tower context, use `apply_gradients()` as normal.
Args:
distribution: A `DistributionStrategy` object.
grads_and_vars: List of (gradient, variable) pairs as returned by
`compute_gradients()`, and then aggregated across towers.
global_step: Optional (mirrored) `Variable` to increment by one
after the variables have been updated.
name: Optional name for the returned operation. Default to the
name passed to the `Optimizer` constructor.
Returns:
An `Operation` that applies the specified gradients across all
towers. If `global_step` was not None, that operation also
increments `global_step`.
"""
reduced_grads = distribution.batch_reduce("sum", grads_and_vars)
var_list = [v for _, v in grads_and_vars]
grads_and_vars = zip(reduced_grads, var_list)
# Note that this is called in a cross-tower context.
self._create_slots(var_list)
def update(v, g):
"""Apply gradients to a replica variable."""
assert v is not None
try:
# Convert the grad to Tensor or IndexedSlices if necessary.
g = ops.convert_to_tensor_or_indexed_slices(g)
except TypeError:
raise TypeError("Gradient must be convertible to a Tensor"
" or IndexedSlices, or None: %s" % g)
if not isinstance(g, (ops.Tensor, ops.IndexedSlices)):
raise TypeError(
"Gradient must be a Tensor, IndexedSlices, or None: %s" % g)
p = _get_processor(v)
scope_name = "" if context.executing_eagerly() else v.op.name
# device_policy is set because non-mirrored tensors will be read in
# `update_op`. `_resource_apply_dense`, `lr_t`, `beta1_t` and `beta2_t`
# is an example.
with ops.name_scope("update_" + scope_name):
return p.update_op(self, g)
with ops.name_scope(name, self._name) as name:
self._prepare()
update_ops = [
op
for grad, var in grads_and_vars
for op in distribution.unwrap(distribution.update(var, update, grad))
]
def finish(self, update_ops):
return self._finish(update_ops, "update")
non_slot_devices = distribution.non_slot_devices(var_list)
finish_updates = distribution.update_non_slot(
non_slot_devices, finish, self, update_ops)
if global_step is None:
apply_updates = distribution.group(finish_updates, name=name)
else:
with ops.control_dependencies(distribution.unwrap(finish_updates)):
apply_updates = distribution.group(distribution.update(
global_step, state_ops.assign_add, 1, name=name))
if not context.executing_eagerly():
if isinstance(apply_updates, ops.Tensor):
apply_updates = apply_updates.op
train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
if apply_updates not in train_op:
train_op.append(apply_updates)
return apply_updates
def get_slot(self, var, name):
"""Return a slot named `name` created for `var` by the Optimizer.
Some `Optimizer` subclasses use additional variables. For example
`Momentum` and `Adagrad` use variables to accumulate updates. This method
gives access to these `Variable` objects if for some reason you need them.
Use `get_slot_names()` to get the list of slot names created by the
`Optimizer`.
Args:
var: A variable passed to `minimize()` or `apply_gradients()`.
name: A string.
Returns:
The `Variable` for the slot if it was created, `None` otherwise.
"""
# pylint: disable=protected-access
named_slots = self._slots.get(name, None)
if not named_slots:
return None
if hasattr(var, "_mirrored_container"):
# NOTE: If this isn't patched, then there is no `handle` in
# `_resource_apply_dense`.
mirrored_container = var._mirrored_container()
assert mirrored_container is not None
if context.executing_eagerly():
key = mirrored_container._unique_id
else:
key = (mirrored_container.graph, mirrored_container._shared_name)
# pylint: enable=protected-access
mirrored_slot = named_slots.get(key, None)
if mirrored_slot is None: return None
return mirrored_slot.get(device=var.device)
return named_slots.get(_var_key(var), None)
def get_slot_names(self):
"""Return a list of the names of slots created by the `Optimizer`.
See `get_slot()`.
Returns:
A list of strings.
"""
return sorted(self._slots.keys())
def variables(self):
"""A list of variables which encode the current state of `Optimizer`.
Includes slot variables and additional global variables created by the
optimizer in the current default graph.
Returns:
A list of variables.
"""
executing_eagerly = context.executing_eagerly()
current_graph = ops.get_default_graph()
def _from_current_graph(variable):
if executing_eagerly:
# No variable.op in eager mode. We don't expect lots of eager graphs,
# but behavior should be consistent with graph mode.
return variable._graph_key == current_graph._graph_key # pylint: disable=protected-access
else:
return variable.op.graph is current_graph
optimizer_variables = [v for v in self._non_slot_variables()
if _from_current_graph(v)]
for _, variable_dict in self._slots.items():
for _, slot_for_variable in variable_dict.items():
if _from_current_graph(slot_for_variable):
optimizer_variables.append(slot_for_variable)
# Sort variables by name so that the return is deterministic.
return sorted(optimizer_variables, key=lambda v: v.name)
def _create_non_slot_variable(self, initial_value, name, colocate_with):
"""Add an extra variable, not associated with a slot."""
# Recommendation: Use OptimizerV2 if your optimizer uses non-slot variables.
eager = context.executing_eagerly()
graph = None if eager else colocate_with.graph
key = (name, graph)
v = self._non_slot_dict.get(key, None)
if v is None:
self._maybe_initialize_checkpointable()
distribution_strategy = distribute_lib.get_distribution_strategy()
with distribution_strategy.colocate_vars_with(colocate_with):
if eager:
restored_initial_value = self._preload_simple_restoration(
name=name, shape=None)
if restored_initial_value is not None:
initial_value = restored_initial_value
v = variable_scope.variable(initial_value, name=name, trainable=False)
# Restore this variable by name if necessary, but don't add a
# Checkpointable dependency. Optimizers return the current graph's
# non-slot variables from _checkpoint_dependencies explicitly rather
# than unconditionally adding dependencies (since there may be multiple
# non-slot variables with the same name in different graphs, trying to
# save all of them would result in errors).
self._handle_deferred_dependencies(name=name, checkpointable=v)
self._non_slot_dict[key] = v
return v
@property
def _checkpoint_dependencies(self):
"""From Checkpointable. Gather graph-specific non-slot variables to save."""
current_graph_non_slot_variables = []
current_graph_key = ops.get_default_graph()._graph_key # pylint: disable=protected-access
for (name, _), variable_object in sorted(self._non_slot_dict.items(),
# Avoid comparing graphs
key=lambda item: item[0][0]):
if variable_object._graph_key == current_graph_key: # pylint: disable=protected-access
current_graph_non_slot_variables.append(
checkpointable.CheckpointableReference(
name=name, ref=variable_object))
return (super(Optimizer, self)._checkpoint_dependencies
+ current_graph_non_slot_variables)
def _lookup_dependency(self, name):
"""From Checkpointable. Find a non-slot variable in the current graph."""
unconditional = super(Optimizer, self)._lookup_dependency(name)
if unconditional is not None:
return unconditional
graph = None if context.executing_eagerly() else ops.get_default_graph()
return self._get_non_slot_variable(name, graph=graph)
def _get_non_slot_variable(self, name, graph=None):
non_slot = self._non_slot_dict.get((name, graph), None)
if hasattr(non_slot, "_mirrored_container"):
# This is a mirrored non-slot. In order to enable code like `_finish`
# to assign to a non-slot, return the current context replica.
return non_slot.get()
else:
return non_slot
def _non_slot_variables(self):
"""Additional variables created by the `Optimizer`.
Returns:
A list or tuple of variables.
"""
return self._non_slot_dict.values()
def _assert_valid_dtypes(self, tensors):
"""Asserts tensors are all valid types (see `_valid_dtypes`).
Args:
tensors: Tensors to check.
Raises:
ValueError: If any tensor is not a valid type.
"""
valid_dtypes = self._valid_dtypes()
for t in tensors:
dtype = t.dtype.base_dtype
if dtype not in valid_dtypes:
raise ValueError(
"Invalid type %r for %s, expected: %s." % (
dtype, t.name, [v for v in valid_dtypes]))
# --------------
# Methods to be implemented by subclasses if they want to use the
# inherited implementation of apply_gradients() or compute_gradients().
# --------------
def _valid_dtypes(self):
"""Valid types for loss, variables and gradients.
Subclasses should override to allow other float types.
Returns:
Valid types for loss, variables and gradients.
"""
return set(
[dtypes.float16, dtypes.bfloat16, dtypes.float32, dtypes.float64])
def _create_slots(self, var_list):
"""Create all slots needed by the variables.
Args:
var_list: A list of `Variable` objects.
"""
# No slots needed by default
pass
def _prepare(self):
"""Create all needed tensors before applying gradients.
This is called with the name_scope using the "name" that
users have chosen for the application of gradients.
"""
pass
def _apply_dense(self, grad, var):
"""Add ops to apply dense gradients to `var`.
Args:
grad: A `Tensor`.
var: A `Variable` object.
Returns:
An `Operation`.
"""
raise NotImplementedError()
def _resource_apply_dense(self, grad, handle):
"""Add ops to apply dense gradients to the variable `handle`.
Args:
grad: a `Tensor` representing the gradient.
handle: a `Tensor` of dtype `resource` which points to the variable
to be updated.
Returns:
An `Operation` which updates the value of the variable.
"""
raise NotImplementedError()
def _resource_apply_sparse_duplicate_indices(self, grad, handle, indices):
"""Add ops to apply sparse gradients to `handle`, with repeated indices.
Optimizers which override this method must deal with repeated indices. See
the docstring of `_apply_sparse_duplicate_indices` for details. By default
the correct behavior, to sum non-unique indices and their associated
gradients, is enforced by first pre-processing `grad` and `indices` and
passing them on to `_resource_apply_sparse`. Optimizers which deal correctly
with duplicate indices may instead override this method to avoid the
overhead of summing.
Args:
grad: a `Tensor` representing the gradient for the affected indices.
handle: a `Tensor` of dtype `resource` which points to the variable
to be updated.
indices: a `Tensor` of integral type representing the indices for
which the gradient is nonzero. Indices may be repeated.
Returns:
An `Operation` which updates the value of the variable.
"""
summed_grad, unique_indices = _deduplicate_indexed_slices(
values=grad, indices=indices)
return self._resource_apply_sparse(summed_grad, handle, unique_indices)
def _resource_apply_sparse(self, grad, handle, indices):
"""Add ops to apply sparse gradients to the variable `handle`.
Similar to `_apply_sparse`, the `indices` argument to this method has been
de-duplicated. Optimizers which deal correctly with non-unique indices may
instead override `_resource_apply_sparse_duplicate_indices` to avoid this
overhead.
Args:
grad: a `Tensor` representing the gradient for the affected indices.
handle: a `Tensor` of dtype `resource` which points to the variable
to be updated.
indices: a `Tensor` of integral type representing the indices for
which the gradient is nonzero. Indices are unique.
Returns:
An `Operation` which updates the value of the variable.
"""
raise NotImplementedError()
def _apply_sparse_duplicate_indices(self, grad, var):
"""Add ops to apply sparse gradients to `var`, with repeated sparse indices.
Optimizers which override this method must deal with IndexedSlices objects
such as the following:
IndexedSlicesValue(values=[1, 1], indices=[0, 0], dense_shape=[1])
The correct interpretation is:
IndexedSlicesValue(values=[2], indices=[0], dense_shape=[1])
Many optimizers deal incorrectly with repeated indices when updating based
on sparse gradients (e.g. summing squares rather than squaring the sum, or
applying momentum terms multiple times). Adding first is always the correct
behavior, so this is enforced here by reconstructing the IndexedSlices to
have only unique indices, then calling _apply_sparse.
Optimizers which deal correctly with repeated indices may instead override
this method to avoid the overhead of summing indices.
Args:
grad: `IndexedSlices`.
var: A `Variable` object.
Returns:
An `Operation`.
"""
summed_values, unique_indices = _deduplicate_indexed_slices(
values=grad.values, indices=grad.indices)
gradient_no_duplicate_indices = ops.IndexedSlices(
indices=unique_indices,
values=summed_values,
dense_shape=grad.dense_shape)
return self._apply_sparse(gradient_no_duplicate_indices, var)
def _apply_sparse(self, grad, var):
"""Add ops to apply sparse gradients to `var`.
The IndexedSlices object passed to `grad` in this function is by default
pre-processed in `_apply_sparse_duplicate_indices` to remove duplicate
indices (see its docstring for details). Optimizers which can tolerate or
have correct special cases for duplicate sparse indices may override
`_apply_sparse_duplicate_indices` instead of this function, avoiding that
overhead.
Args:
grad: `IndexedSlices`, with no repeated indices.
var: A `Variable` object.
Returns:
An `Operation`.
"""
raise NotImplementedError()
def _finish(self, update_ops, name_scope):
"""Do what is needed to finish the update.
This is called with the `name_scope` using the "name" that
users have chosen for the application of gradients.
Args:
update_ops: List of `Operation` objects to update variables. This list
contains the values returned by the `_apply_dense()` and
`_apply_sparse()` calls.
name_scope: String. Name to use for the returned operation.
Returns:
The operation to apply updates.
"""
return control_flow_ops.group(*update_ops, name=name_scope)
# --------------
# Utility methods for subclasses.
# --------------
def _slot_dict(self, slot_name):
"""Returns a dict for caching slots created under the given name.
Args:
slot_name: Name for the slot.
Returns:
A dict that maps primary `Variable` objects to the slot created
for that variable, under the given slot name.
"""
named_slots = self._slots.get(slot_name, None)
if named_slots is None:
named_slots = {}
self._slots[slot_name] = named_slots
return named_slots
def _get_or_make_slot(self, var, val, slot_name, op_name):
"""Find or create a slot for a variable.
Args:
var: A `Variable` object.
val: A `Tensor`. The initial value of the slot.
slot_name: Name for the slot.
op_name: Name to use when scoping the Variable that
needs to be created for the slot.
Returns:
A `Variable` object.
"""
named_slots = self._slot_dict(slot_name)
if _var_key(var) not in named_slots:
new_slot_variable = slot_creator.create_slot(var, val, op_name)
self._restore_slot_variable(
slot_name=slot_name, variable=var,
slot_variable=new_slot_variable)
named_slots[_var_key(var)] = new_slot_variable
return named_slots[_var_key(var)]
def _get_or_make_slot_with_initializer(self, var, initializer, shape, dtype,
slot_name, op_name):
"""Find or create a slot for a variable, using an Initializer.
Args:
var: A `Variable` object.
initializer: An `Initializer`. The initial value of the slot.
shape: Shape of the initial value of the slot.
dtype: Type of the value of the slot.
slot_name: Name for the slot.
op_name: Name to use when scoping the Variable that
needs to be created for the slot.
Returns:
A `Variable` object.
"""
named_slots = self._slot_dict(slot_name)
if _var_key(var) not in named_slots:
new_slot_variable = slot_creator.create_slot_with_initializer(
var, initializer, shape, dtype, op_name)
self._restore_slot_variable(
slot_name=slot_name, variable=var,
slot_variable=new_slot_variable)
named_slots[_var_key(var)] = new_slot_variable
return named_slots[_var_key(var)]
def _zeros_slot(self, var, slot_name, op_name):
"""Find or create a slot initialized with 0.0.
Args:
var: A `Variable` object.
slot_name: Name for the slot.
op_name: Name to use when scoping the Variable that
needs to be created for the slot.
Returns:
A `Variable` object.
"""
named_slots = self._slot_dict(slot_name)
if _var_key(var) not in named_slots:
new_slot_variable = slot_creator.create_zeros_slot(var, op_name)
self._restore_slot_variable(
slot_name=slot_name, variable=var,
slot_variable=new_slot_variable)
named_slots[_var_key(var)] = new_slot_variable
return named_slots[_var_key(var)]
# --------------
# For implementing the Checkpointable interface.
# --------------
def _restore_slot_variable(self, slot_name, variable, slot_variable):
"""Restore a newly created slot variable's value."""
variable_key = _var_key(variable)
deferred_restorations = self._deferred_slot_restorations.get(
slot_name, {}).pop(variable_key, [])
# Iterate over restores, highest restore UID first to minimize the number
# of assignments.
deferred_restorations.sort(key=lambda position: position.restore_uid,
reverse=True)
for checkpoint_position in deferred_restorations:
checkpoint_position.restore(slot_variable)
def _create_or_restore_slot_variable(
self, slot_variable_position, slot_name, variable):
"""Restore a slot variable's value, possibly creating it.
Called when a variable which has an associated slot variable is created or
restored. When executing eagerly, we create the slot variable with a
restoring initializer.
No new variables are created when graph building. Instead,
_restore_slot_variable catches these after normal creation and adds restore
ops to the graph. This method is nonetheless important when graph building
for the case when a slot variable has already been created but `variable`
has just been added to a dependency graph (causing us to realize that the
slot variable needs to be restored).
Args:
slot_variable_position: A `checkpointable._CheckpointPosition` object
indicating the slot variable `Checkpointable` object to be restored.
slot_name: The name of this `Optimizer`'s slot to restore into.
variable: The variable object this slot is being created for.
"""
named_slots = self._slot_dict(slot_name)
variable_key = _var_key(variable)
slot_variable = named_slots.get(variable_key, None)
if (slot_variable is None and context.executing_eagerly() and
slot_variable_position.is_simple_variable()
# Defer slot variable creation if there is an active variable creator
# scope. Generally we'd like to eagerly create/restore slot variables
# when possible, but this may mean that scopes intended to catch
# `variable` also catch its eagerly created slot variable
# unintentionally (specifically make_template would add a dependency on
# a slot variable if not for this case). Deferring is mostly harmless
# (aside from double initialization), and makes variable creator scopes
# behave the same way they do when graph building.
and not ops.get_default_graph()._variable_creator_stack): # pylint: disable=protected-access
initializer = checkpointable.CheckpointInitialValue(
checkpoint_position=slot_variable_position)
slot_variable = self._get_or_make_slot(
var=variable,
val=initializer,
slot_name=slot_name,
op_name=self._name)
# Slot variables are not owned by any one object (because we don't want to
# save the slot variable if the optimizer is saved without the non-slot
# variable, or if the non-slot variable is saved without the optimizer;
# it's a dependency hypergraph with edges of the form (optimizer, non-slot
# variable, variable)). So we don't _track_ slot variables anywhere, and
# instead special-case this dependency and otherwise pretend it's a normal
# graph.
if slot_variable is not None:
# If we've either made this slot variable, or if we've pulled out an
# existing slot variable, we should restore it.
slot_variable_position.restore(slot_variable)
else:
# We didn't make the slot variable. Defer restoring until it gets created
# normally. We keep a list rather than the one with the highest restore
# UID in case slot variables have their own dependencies, in which case
# those could differ between restores.
self._deferred_slot_restorations.setdefault(
slot_name, {}).setdefault(variable_key, []).append(
slot_variable_position)
| [
"davidstanke@gmail.com"
] | davidstanke@gmail.com |
cb2c66246218d18c73711d4760222ad0c1230cb8 | 571a89f94f3ebd9ec8e6b618cddb7d05811e0d62 | /chokudai_S001/h/main.py | dee5983b58febe63c07ef1f8bf5b7db686e13a53 | [] | no_license | ryu19-1/atcoder_python | 57de9e1db8ff13a107b5861f8f6a231e40366313 | cc24b3c2895aad71d40cefbb8e2893dc397b8f4f | refs/heads/master | 2023-05-10T05:32:16.507207 | 2021-05-19T17:48:10 | 2021-05-19T17:48:10 | 368,954,430 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 511 | py | #!/usr/bin/env python3
import sys
from collections import deque, Counter
from heapq import heappop, heappush
from bisect import bisect_left
from itertools import accumulate
sys.setrecursionlimit(10**6)
INF = 10**12
m = 10**9 + 7
def main():
N = int(input())
a = list(map(int, input().split()))
dp = [INF] * N
for i in range(N):
d = bisect_left(dp, a[i])
dp[d] = a[i]
# print(i, dp)
ans = bisect_left(dp, INF)
print(ans)
if __name__ == "__main__":
main()
| [
"ryu1007kami@gmail.com"
] | ryu1007kami@gmail.com |
eac93448f682961cac9392c005e6e93abf7cac29 | e5664b40c9d0a828c009b30ed8fe62666d04bf62 | /falcon_marshmallow/_version.py | ceaa700e54e94982b6e19e2fb7dede45e5f07725 | [
"MIT"
] | permissive | evilr00t/falcon-marshmallow | 9eb348fd68e1b0c85927e77f62bc02fc093ad28e | 97f169c78f11a638b1f21b3a977bb5df8d071be5 | refs/heads/master | 2022-02-23T05:04:37.315682 | 2019-10-12T19:37:11 | 2019-10-12T19:37:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 499 | py | # -*- coding: utf-8 -*-
"""
version.py module
The version set here will be automatically incorporated into setup.py
and also set as the __version__ attribute for the package.
"dev", "rc", and other verison tags should be added using the
``setup.py egg_info`` command when creating distributions.
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
__version_info__ = (0, 4, 0)
__version__ = ".".join([str(ver) for ver in __version_info__])
| [
"msplanchard@gmail.com"
] | msplanchard@gmail.com |
13b853a6cd8d40e6ac83da2a30b3fc8150df4029 | 2f39059afaa7208ac922cdd0527ccd502af8f2c2 | /Python_serwer_rpc/rpc_serwer.py | 11626407f0c97bf59fc92a49b5f88f6461f6cfbd | [] | no_license | endablju/TAS_Ksiegarnia | 3ff736819916a51194af025f92bc9921d98b605f | 6490a24dabf44921a72007bef3fd67c7312aa346 | refs/heads/master | 2020-05-18T00:30:40.228021 | 2015-01-27T13:34:21 | 2015-01-27T13:34:21 | 25,406,104 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,910 | py | # -*- coding: utf-8 -*-
from SimpleXMLRPCServer import SimpleXMLRPCServer
from SimpleXMLRPCServer import SimpleXMLRPCRequestHandler
import MySQLdb
class RequestHandler(SimpleXMLRPCRequestHandler):
rpc_paths = ('/RPC2',)
server = SimpleXMLRPCServer(("localhost", 8001),
requestHandler=RequestHandler,
allow_none=True)
server.register_introspection_functions()
#trzeba napisać 4 metody pobieranie Book_object_all, book_object_get, category_object_all, category_object_get
#sqlalchemy
def add_book(title,autor,slug,text,price,quantity):
sql = "INSERT INTO books_book (title,slug,text,autor,price,quantity)" \
"VALUES (%s,%s,%s,%s,%s,%s )"
try:
db = MySQLdb.connect("db4free.net", "ksiegarnia", "tas_projekt", "tasksiegarnia")
cursor = db.cursor()
cursor.execute(sql,(title,slug,text,autor,price,quantity))
db.commit()
print "Dodałem książke"
return "TRUE"
except:
print "Nie dodałem książki. Błąd"
return "FALSE"
db.rollback()
finally:
cursor.close()
db.close()
def delete_book(id):
sql = "DELETE FROM books_book WHERE id=%s"
try:
db = MySQLdb.connect("db4free.net", "ksiegarnia", "tas_projekt", "tasksiegarnia")
cursor = db.cursor()
cursor.execute(sql,(id,))
db.commit()
print "Usunąlem książke"
return "TRUE"
except:
print "Nie udało się usunąć książki. Błąd"
return "FALSE"
finally:
cursor.close()
db.close()
def add_category(name):
sql = "INSERT INTO books_category (name) VALUES (%s)"
try:
db = MySQLdb.connect("db4free.net", "ksiegarnia", "tas_projekt", "tasksiegarnia")
cursor = db.cursor()
cursor.execute(sql,(name,))
db.commit()
print "Dodałem kategorie"
return "TRUE"
except:
print "Nie dodałem kategorii. Błąd"
return "FALSE"
db.rollback()
finally:
cursor.close()
db.close()
def update_book(title,autor,slug,text,price,quantity,id):
sql = "UPDATE books_book SET title=%s, autor=%s, slug=%s, text=%s, price=%s, quantity=%s WHERE id=%s"
try:
db = MySQLdb.connect("db4free.net", "ksiegarnia", "tas_projekt", "tasksiegarnia")
cursor = db.cursor()
cursor.execute(sql,(title,autor,slug,text,price,quantity,id,))
print "Edytowałem"
return "TRUE"
except:
print "Nie udało się zedytować książki. Błąd"
return "FALSE"
db.rollback()
finally:
cursor.close()
db.close()
server.register_function(add_book)
server.register_function(delete_book)
server.register_function(add_category)
server.register_function(update_book)
server.serve_forever()
| [
"simaosrz@gmail.com"
] | simaosrz@gmail.com |
c73e8d7f7187a80ad2438cd73fc2b30a9529daaf | 9dc6f8d91dc56523b9688990d4ae413b0bcbd4e1 | /examples/agf2/04-restart.py | 9bc238f22c51f8a26903e67bf64ba74da0fb1b8c | [
"Apache-2.0"
] | permissive | sunqm/pyscf | 566bc2447d8072cff442d143891c12e6414de01c | dd179a802f0a35e72d8522503172f16977c8d974 | refs/heads/master | 2023-08-15T18:09:58.195953 | 2023-03-27T21:02:03 | 2023-03-27T21:02:03 | 159,149,096 | 80 | 26 | Apache-2.0 | 2022-02-05T00:19:24 | 2018-11-26T10:10:23 | Python | UTF-8 | Python | false | false | 1,027 | py | #!/usr/bin/env python
#
# Author: Oliver J. Backhouse <olbackhouse@gmail.com>
# George H. Booth <george.booth@kcl.ac.uk>
#
'''
An example of restarting an AGF2 calculation.
The agf2.chkfile module provides similar functionality to the existing
chkfile utilities in pyscf, but prevents failure during MPI runs.
'''
import numpy
from pyscf import gto, scf, agf2, lib
mol = gto.M(atom='O 0 0 0; H 0 0 1; H 0 1 0', basis='cc-pvdz', verbose=5)
mf = scf.RHF(mol)
mf.conv_tol = 1e-12
# if we are using MPI, we only want pyscf to save a chkfile on the root process:
if agf2.mpi_helper.rank == 0:
mf.chkfile = 'agf2.chk'
mf.run()
# Run an AGF2 calculation:
gf2 = agf2.AGF2(mf)
gf2.conv_tol = 1e-7
gf2.max_cycle = 3
gf2.run()
# Restore the Mole and SCF first
mol = agf2.chkfile.load_mol('agf2.chk')
mf = scf.RHF(mol)
mf.__dict__.update(agf2.chkfile.load('agf2.chk', 'scf'))
# Restore the AGF2 calculation
gf2a = agf2.AGF2(mf)
gf2a.__dict__.update(agf2.chkfile.load_agf2('agf2.chk')[1])
gf2a.max_cycle = 50
gf2a.run()
| [
"olbackhouse@gmail.com"
] | olbackhouse@gmail.com |
f542f6bc362ff402f8d81483959310e99e676cfc | f3d41b3053a4a1976ce3c9d675b5d552a96fcabc | /pytactoe.py | 025c69903a894ddf94738f56fc3dffb713a1c4fc | [] | no_license | fortjosef/pytactoe | ad68d5eb99576b3c28ab82f76d06baff79926dd9 | 892f9a195ea58f365500e44327f0b4aa25cb50de | refs/heads/master | 2020-12-23T17:06:52.690881 | 2020-03-05T14:38:17 | 2020-03-05T14:38:17 | 237,212,009 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,726 | py | import curses
import sys
import math
from curses import wrapper
def translatePlayer(playerAsInt):
if playerAsInt == -1:
return "X"
elif playerAsInt == 1:
return "Y"
return ""
def checkForWin(playField):
#amount of possible solutions amtRows + amountCols + 2
rowTotals = [0 for x in range(3)]
colTotals = [0 for x in range(3)]
diagTotals = [0, 0]
for y in range(3):
for x in range(3):
rowTotals[y] += playField[y][x]
colTotals[x] += playField[y][x]
if x == y:
diagTotals[0] += playField[y][x]
if x + y == 2:
diagTotals[1] += playField[y][x]
#not doing early returns because when i add shifting there will be the posibility of having both an X and Y win or even more
winsForX = 0
winsForY = 0
for x in range(3):
if rowTotals[x] == -3:
winsForX += 1
if colTotals[x] == -3:
winsForX += 1
if rowTotals[x] == 3:
winsForY += 1
if colTotals[x] == 3:
winsForY += 1
if x < 2:
if diagTotals[x] == -3:
winsForX += 1
if diagTotals[x] == 3:
winsForY += 1
if winsForX == winsForY:
if winsForX > 0:
return 2 #draw
else:
return 0 #no win
if winsForX > winsForY:
return -1
else:
return 1
def game(stdscr):
stdscr.clear()
quit = False
drawScreen = True
computerOpponent = False
curses.curs_set(True)
playField = [
[0,0,0],
[0,0,0],
[0,0,0]
]
cursorX = 0
cursorY = 0
currentPlayer = -1
stdscr.addstr(0, 0, "Arrow Keys to move, Return to place piece")
stdscr.addstr(1, 0, "Q to quit")
#KEY_LEFT, KEY_UP, KEY_RIGHT, KEY_DOWN
while quit == False:
rows, cols = stdscr.getmaxyx()
playFieldXOrigin = math.floor((cols - 5) / 2)
playFieldYOrigin = 5
stdscr.addstr(2, 0, translatePlayer(currentPlayer) + " to move")
stdscr.move(playFieldYOrigin + (cursorY * 2), playFieldXOrigin + (cursorX * 2))
if drawScreen == True:
for y in range(0,5):
if y == 1 or y == 3:
stdscr.addstr(playFieldYOrigin + y, playFieldXOrigin, "-----")
else:
for x in range(0,5):
if x == 1 or x == 3:
stdscr.addstr(playFieldYOrigin + y, playFieldXOrigin + x, "|")
else:
stdscr.addstr(playFieldYOrigin + y, playFieldXOrigin + x, translatePlayer(playField[int(y / 2)][int(x / 2)]))
stdscr.move(playFieldYOrigin + (cursorY * 2), playFieldXOrigin + (cursorX * 2))
drawScreen = False
try:
mykey = stdscr.getkey()
except curses.error:
#combine this with below, would wan other curses errors coming thorugh
pass
except:
type, value, traceback = sys.exc_info()
stdscr.addstr(30, 10, "{} {}".format(str(type), str(value)))
else:
if mykey == 'q':
quit = True
continue
elif mykey == 'KEY_UP':
if cursorY > 0:
cursorY -= 1
stdscr.move(playFieldYOrigin + (cursorY * 2), playFieldXOrigin + (cursorX * 2))
elif mykey == 'KEY_DOWN':
if cursorY < 2:
cursorY += 1
stdscr.move(playFieldYOrigin + (cursorY * 2), playFieldXOrigin + (cursorX * 2))
elif mykey == 'KEY_RIGHT':
if cursorX < 2:
cursorX += 1
stdscr.move(playFieldYOrigin + (cursorY * 2), playFieldXOrigin + (cursorX * 2))
elif mykey == 'KEY_LEFT':
if cursorX > 0:
cursorX -= 1
stdscr.move(playFieldYOrigin + (cursorY * 2), playFieldXOrigin + (cursorX * 2))
elif mykey == '\n':
if playField[cursorY][cursorX] == 0:
playField[cursorY][cursorX] = currentPlayer
drawScreen = True
winResult = checkForWin(playField)
stdscr.addstr(10, 0, str(winResult))
if winResult != 0:
#will need to add handling for a draw
#will also need to add handling for a cat
stdscr.addstr(0, 0, translatePlayer(winResult) + " WINS!!!")
else:
if currentPlayer == -1:
currentPlayer = 1
else:
currentPlayer = -1
else:
curses.beep()
elif mykey == 'KEY_RESIZE':
#stdscr.addstr(31, 10, 'caught a resize')
#rows, cols = stdscr.getmaxyx()
#stdscr.resize(rows, cols)
#stdscr.addstr(40, 10, "{} Rows {} Columns".format(str(rows), str(cols)))
continue
def debugUi(stdscr):
stdscr.clear()
curses.init_pair(1, curses.COLOR_RED, curses.COLOR_BLACK)
for x in range(0, 10):
for y in range(0, 10):
stdscr.addstr(10 + x, 10 + y, str(y), curses.color_pair(x))
oldlen = 0
stdscr.refresh()
stdscr.nodelay(True)
curses.curs_set(True)
origrows, origcols = stdscr.getmaxyx()
quit = False
while quit == False:
rows, cols = stdscr.getmaxyx()
if rows != origrows or cols != origrows:
stdscr.addstr(rows - 1, 10, 'caught a resize')
stdscr.resize(rows, cols)
stdscr.addstr(rows - 1, 10, "{} Rows {} Columns".format(str(rows), str(cols)))
try:
mykey = stdscr.getkey()
except curses.error:
#combine this with below, would wan other curses errors coming thorugh
pass
except:
type, value, traceback = sys.exc_info()
stdscr.addstr(30, 10, "{} {}".format(str(type), str(value)))
else:
if mykey == 'q':
quit = True
continue
#elif mykey == 'KEY_RESIZE':
#stdscr.addstr(31, 10, 'caught a resize')
#rows, cols = stdscr.getmaxyx()
#stdscr.resize(rows, cols)
#stdscr.addstr(40, 10, "{} Rows {} Columns".format(str(rows), str(cols)))
#continue
#try:
rows, cols = stdscr.getmaxyx()
#try:
stdscr.addstr(rows - 1, 10, "{} Rows {} Columns".format(str(rows), str(cols)))
#except:
#type, value, traceback = sys.exc_info()
#stdscr.addstr(30, 10, "{} {}".format(str(type), str(value)))
addin = ''
if len(mykey) < oldlen:
addin = ' ' * (oldlen - len(mykey))
stdscr.addstr(rows - 2, 10, mykey + addin)
oldlen = len(mykey)
#except curses.error:
#pass
def main(stdscr):
stdscr.nodelay(True)
origrows, origcols = stdscr.getmaxyx()
quit = False
drawScreen = True
title = 'PyTacToe'
curses.curs_set(False)
while quit == False:
if drawScreen == True:
stdscr.clear()
stdscr.addstr(1, int((origcols - len(title)) / 2), title)
stdscr.addstr(8, 10, '1) Play a game')
stdscr.addstr(9, 10, '2) Debugging interface')
stdscr.addstr(10,10, 'q) Quit' )
drawScreen = False
try:
mykey = stdscr.getkey()
except curses.error:
#combine this with below, would wan other curses errors coming thorugh
pass
except:
type, value, traceback = sys.exc_info()
stdscr.addstr(30, 10, "{} {}".format(str(type), str(value)))
else:
if mykey == 'q':
quit = True
continue
elif mykey == '1':
game(stdscr)
drawScreen = True
elif mykey == '2':
debugUi(stdscr)
drawScreen = True
elif mykey == 'KEY_RESIZE':
#stdscr.addstr(31, 10, 'caught a resize')
#rows, cols = stdscr.getmaxyx()
#stdscr.resize(rows, cols)
#stdscr.addstr(40, 10, "{} Rows {} Columns".format(str(rows), str(cols)))
continue
#stdscr.addstr(30, 10, "f00")
wrapper(main)
| [
"dethboy@macintosh.home"
] | dethboy@macintosh.home |
93c094ec3ff67c2547a4273d6b6d7dd5b2d36e17 | 528c811306faa4a34bf51fca7955b7a24ac2e30c | /Python/Number of Islands II.py | ea9b85418e2cf1f4baca66002f08cbad1d4cd15e | [] | no_license | ganjingcatherine/LeetCode-1 | 1addbd7e4d9254a146601f9d5e28b8becb8235a6 | 488782d3f1e759da2d32b4e82dbf55b96c431244 | refs/heads/master | 2021-05-11T03:15:16.810035 | 2016-02-06T06:19:18 | 2016-02-06T06:19:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,429 | py | """
A 2d grid map of m rows and n columns is initially filled with water. We may perform an addLand operation which turns the water at position (row, col) into a land. Given a list of positions to operate, count the number of islands after each addLand operation. An island is surrounded by water and is formed by connecting adjacent lands horizontally or vertically. You may assume all four edges of the grid are all surrounded by water.
Example:
Given m = 3, n = 3, positions = [[0,0], [0,1], [1,2], [2,1]].
Initially, the 2d grid grid is filled with water. (Assume 0 represents water and 1 represents land).
0 0 0
0 0 0
0 0 0
Operation #1: addLand(0, 0) turns the water at grid[0][0] into a land.
1 0 0
0 0 0 Number of islands = 1
0 0 0
Operation #2: addLand(0, 1) turns the water at grid[0][1] into a land.
1 1 0
0 0 0 Number of islands = 1
0 0 0
Operation #3: addLand(1, 2) turns the water at grid[1][2] into a land.
1 1 0
0 0 1 Number of islands = 2
0 0 0
Operation #4: addLand(2, 1) turns the water at grid[2][1] into a land.
1 1 0
0 0 1 Number of islands = 3
0 1 0
We return the result as an array: [1, 1, 2, 3]
"""
class union_find:
def __init__(self, m, n):
self.father = {}
self.m = m
self.n = n
for i in range(m):
for j in range(n):
id = self.convert_to_id(i, j)
self.father[id] = id
def find(self, x, y):
parent = self.father[self.convert_to_id(x, y)]
while parent != self.father[parent]:
parent = self.father[parent]
return parent
def compressed_find(self, x, y):
parent = self.father[self.convert_to_id(x, y)]
while parent != self.father[parent]:
parent = self.father[parent]
# set all father to be parent we just get
prev_father = self.father[self.convert_to_id(x, y)]
while prev_father != self.father[prev_father]:
prev_father, self.father[prev_father] = self.father[prev_father], parent
return parent
def union(self, x1, y1, x2, y2):
f1 = self.find(x1, y1)
f2 = self.find(x2, y2)
if f1 != f2:
self.father[f1] = f2
def convert_to_id(self, x, y):
return x * self.n + y
class Solution(object):
def numIslands2(self, m, n, positions):
"""
:type m: int
:type n: int
:type positions: List[List[int]]
:rtype: List[int]
"""
if m == 0 or n == 0:
return []
if not positions or len(positions) == 0:
return []
island = [[False for _ in range(n)] for _ in range(m)]
directions = [[0, -1], [0, 1], [1, 0], [-1, 0]]
count, uf, result = 0, union_find(m, n), []
for position in positions:
x, y = position[0], position[1]
if not island[x][y]:
count += 1
island[x][y] = True
for i in range(4):
nx, ny = x + directions[i][0], y + directions[i][1]
if 0 <= nx < m and 0 <= ny < n and island[nx][ny]:
position_father = uf.find(x, y)
now_father = uf.find(nx, ny)
if position_father != now_father:
count -= 1
uf.union(x, y, nx, ny)
result.append(count)
return result
| [
"anthonyjin0619@gmail.com"
] | anthonyjin0619@gmail.com |
4c3197760ec47ac171e62a4268e307b1c0550183 | 9979f353c690df9c0858530d244bf8b63725100f | /SlowProtScanning.py | 6658c8e52bf229c96772e670cc1213e4362f1891 | [] | no_license | samshimoni/Port-scanning-password-gusting- | 6a08fabf6080be87ffb4a24529935ae3dcdb5b5b | bb187129050616aa98646c09dd29f84f8a7b2189 | refs/heads/master | 2020-03-17T21:09:09.830856 | 2018-05-18T11:31:43 | 2018-05-18T11:31:43 | 133,945,575 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,774 | py | #!/usr/bin/env python
import socket
import subprocess
import sys
import time
import random
from datetime import datetime
class SlowScan(object):
@classmethod
def slowScanning(self,ip):
secondForWait = input("Enter a number of seconds are waiting between each group of porters: ")
remoteServer=ip
remoteServerIP = socket.gethostbyname(remoteServer)
print "-" * 60
print "Please wait, scanning remote host", remoteServerIP
print "-" * 60
HashMapOfPorts = {}
try:
for port in range(22,23):
if port%10==10:
time.sleep(secondForWait)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex((remoteServerIP, port))
if result == 0:
HashMapOfPorts[port] = "Open"
print "Port {}: Open".format(port)
else:
HashMapOfPorts[port] = "Close"
print "Port {}: Close".format(port)
sock.close()
with open("SlowScanFile.txt", "w") as f:
f.write('| Port number | Status |\n')
for key, value in HashMapOfPorts.items():
f.write('| %s | %s |\n' % (key, value))
except KeyboardInterrupt:
print "You pressed Ctrl+C"
sys.exit()
except socket.gaierror:
print 'Hostname could not be resolved. Exiting'
sys.exit()
except socket.error:
print "Couldn't connect to server"
sys.exit()
| [
"noreply@github.com"
] | samshimoni.noreply@github.com |
19dfeb0326b1f4add9e56705427a624d3a376b3f | 284386a55a5620fbb77e15f4a065284cb93cf867 | /env_deep_learning/bin/tensorboard | 3668e3b6f6c5d455bc3ccd50a757fe9edf9de013 | [] | no_license | doctown/ICME_DL_workshop | 4dd7a9a0991a43a1328eac716ba2451389fd9ce7 | 2200bf7ef0eaa21c7185e62947e177aeab18c94c | refs/heads/master | 2021-01-22T18:08:27.942177 | 2018-07-13T21:48:47 | 2018-07-13T21:48:47 | 100,748,225 | 0 | 0 | null | 2017-08-18T20:34:06 | 2017-08-18T20:34:06 | null | UTF-8 | Python | false | false | 309 | #!/Users/davido/Documents/personal/code/deep-learning-workshop/ICME_DL_workshop/env_deep_learning/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from tensorboard.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"david.ogor@brightbytes.net"
] | david.ogor@brightbytes.net | |
5291920b22a0288cb3146261c5e2febf47a16f15 | 77902311f65e02fbcd3830aa57cd997383349af3 | /digital_calculation.py | 8a6010a86bf4b61ffd5dfd1786593da1bd21eda3 | [] | no_license | LeFuGang/ml | 3caeb458b57e10928462469762e25df820de706e | 748293b4eb5d6a72f794909167b3e4a20fa683d5 | refs/heads/master | 2020-03-17T00:41:22.165295 | 2019-04-10T09:04:47 | 2019-04-10T09:04:47 | 133,126,279 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,293 | py | import re
def jia(*args):
'''
加法解析
:param args:
:return:
'''
res = 0
for i in args:
#解决*-和/-的情况
#i = i.replace('*-','#').replace('/-','@')
jian_list = i.split('-')
# 减号的拆分要解决负号的情况:把负号拆分后列表中所有为空字符串的元素替换成'0'
# 还要把*-和/-替换回来
for id,j in enumerate(jian_list):
if j=='':
jian_list[id] = '0'
else:
jian_list[id] = j.replace('#','*-').replace('@','/-')
res += jian(*jian_list)
return res
def jian(*args):
'''
减法解析
:param args:
:return:
'''
#减法需要先算出第一个式子,拿第一个式子去减后面的式子
res = args[0:1]
res = chen(*res[0].split('*'))
for i in args[1:]:
chen_list = i.split('*')
res -= chen(*chen_list)
return res
def chen(*args):
'''
乘法解析
:param args:
:return:
'''
res = 1
for i in args:
chu_list = i.split('/')
res *= chu(*chu_list)
return res
def chu(*args):
'''
除法解析
:param args:
:return:
'''
#除法需要先取出第一个,拿第一个除以后面的
res = args[0:1]
res = float(res[0])
for i in args[1:]:
i = float(i)
res /= i
return res
def simpleCalc(input_str):
'''
不带括号的表达式解析
:param input_str:
:return:
'''
# 去掉最外层的括号
input_str = input_str.strip('()')
# 处理 --、+- 的情况,还有 *-、/- 的情况没处理
input_str = input_str.replace(' ','').replace('--','+').replace('+-','-').replace('*-','#').replace('/-','@')
#print(input_str)
#计算加减乘除
jia_list = input_str.split('+')
res = jia(*jia_list)
return res
#return str(eval(input_str))
def calc(input_str):
'''
计算器入口
:param input_str:
:return:
'''
if len(input_str) == 0:
print('Wrong input')
exit(0)
#print(input_str)
#查找是否还有括号
m = re.search('\([^()]+\)', input_str)
brackets_exists = False
#print(m)
if m == None:#不再有括号了,就直接计算
simple_calc_str = input_str#需要计算的值的字符串就是传入的表达式
else:#还有括号,就把找到的括号中的表达式计算的值替换括号所在位置
brackets_exists = True
simple_calc_str = m.group()#需要计算值的字符串是找到的括号中的表达式
simple_res = str(simpleCalc(simple_calc_str))
if brackets_exists:#还有括号,就把找到的括号中的表达式计算的值替换括号所在位置,进入迭代
return calc(input_str.replace(simple_calc_str,simple_res,1))
else:#没有括号就直接把计算结果返回
return simple_res
if __name__ == '__main__':
# input_str = '3 * 4 + (-4 / 2 - 8 - 3 * 2 + ( 4 - 5 / 2 + 11 - ( 2 * 3 - 9 ) - 12 )) + 20 - 3 * 2 - ( 5 + 8 / 4)'
# input_str = '3/(-1) - (4*-2)/(1+1)/(1+1)'
input_str = '1-2*((60-30+(-40/5)*(9-2*5/3+7/3*99/4*2998+10*568/14))-(-4*3)/(16-3*2))'
input_str = '2 + 2 * 3'
result = calc(input_str)
print(result)
| [
"noreply@github.com"
] | LeFuGang.noreply@github.com |
6f25be5166faf3e615f05f7d0bdfac883634ea64 | 9f7e04a479cc3f1ec20784b07f61bfcab71abbb2 | /main.py | 17643ef9320c91159c364c6c3f001f12b4c10bf9 | [] | no_license | tiriplicamihai/tagging | 2859ca576f83039f0f937fc307d42a467f1fb451 | 35563e7ccd9496136afe6fdb0f22165a8c30162a | refs/heads/master | 2020-12-02T10:02:54.676822 | 2017-08-04T09:36:40 | 2017-08-04T09:36:40 | 96,682,018 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,216 | py | from collections import defaultdict
from copy import deepcopy
import json
import re
from bs4 import BeautifulSoup
import requests
from sklearn.externals import joblib
def main():
text_clf = joblib.load('clf.pkl')
with open('categories_mapping.json', 'r') as f:
categories_mapping = json.load(f)
with open('ads.json', 'r') as f:
ads = json.load(f)
inverse_categories_mapping = {v:k for k, v in categories_mapping.items()}
ads_class_mapping = defaultdict(list)
for ad in ads['ads']:
ad_class = get_class_for_url(text_clf, ad['url'], description=ad['text'])
print 'URL %s has class %s' % (ad['url'], inverse_categories_mapping[ad_class])
ads_class_mapping[ad_class].append(ad)
while True:
url = raw_input('URL: ')
probabilites = get_class_for_url(text_clf, url, proba=True)
tuples = list(enumerate(probabilites))
tuples.sort(key=lambda x: x[1], reverse=True)
candidate1 = tuples[0]
candidate2 = tuples[1]
targeted_ads = deepcopy(ads_class_mapping[candidate1[0]])
# Classes are near which means they are related.
if candidate1[1] - candidate2[1] < 0.2:
targeted_ads.extend(deepcopy(ads_class_mapping[candidate2[0]]))
print targeted_ads
def get_class_for_url(text_clf, url, description='', proba=False):
response = requests.get(url)
if response.status_code != 200:
print 'Unable to retrieve URL'
soup = BeautifulSoup(response.content, 'html.parser')
texts = soup.findAll(text=True)
visible_texts = [text for text in texts if _visible(text)]
if description:
visible_texts.append(description)
doc = ' '.join(visible_texts)
if not proba:
return text_clf.predict([doc])[0]
return text_clf.predict_proba([doc])[0]
def _visible(element):
"""Method used to filter visible text elements. """
if element.parent.name in ['style', 'script', '[document]', 'head', 'link']:
return False
elif re.match('<!--.*-->', element.encode('utf-8')):
return False
value = element.encode('utf-8')
return not value.isspace()
if __name__ == '__main__':
main()
| [
"tiriplica.mihai@gmail.com"
] | tiriplica.mihai@gmail.com |
207eed8b7b95177d0fb13820653cdb7e3542dac8 | 4a4916bc4d3d02a5382972212671251dcb8e104b | /PAC/main.py | 74b5c57a1bb2dc40999919972a46e0e9a092c7d3 | [] | no_license | SaintDevilS/LearningModels | 2181620236be823ef8e7268b28f8ad4ada097ed1 | e4af052fc0f6d8dee1f60ae8037d9cddb0bd5baf | refs/heads/master | 2021-01-18T02:53:33.817158 | 2015-10-26T22:19:12 | 2015-10-26T22:19:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 682 | py | from LearningModels.PAC.PAC_Algorithm import PAC_Algorithm
from LearningModels.PAC.PAC_oracle import PAC_Oracle
from LearningModels.conjunction import Conjunction
from LearningModels.uniform_distribution import UniformDistribution
__author__ = 'yben_000'
def main():
length = 10
concept = Conjunction(length)
function = concept.get_ideal_function()
distribution = UniformDistribution(concept, length)
pac_oracle = PAC_Oracle(concept, distribution)
pac_alg = PAC_Algorithm(pac_oracle, length)
hypo = pac_alg.learn_ideal_function(0.5, 0.1)
print "FUNC IS: " + str(function)
print "HYPO IS: " + str(hypo)
if __name__ == "__main__":
main() | [
"mooncake91@gmail.com"
] | mooncake91@gmail.com |
4d7511f0ec644a213d561887e362f39a9060e767 | 68180bf037f4b60b5cdc577ec758c5c970a75f41 | /practice.py | b1986c6ccbf890e8cedb862a0b83e24faf6aea25 | [] | no_license | nanaka-777/practice | 23e8c19a8ca82d9fb80efa3572a2f598eed2c8de | 69a9ad9626b26003b879ca4cd795100557ee1512 | refs/heads/master | 2020-11-27T08:27:00.717516 | 2019-12-21T03:31:28 | 2019-12-21T03:31:28 | 229,370,306 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19 | py | print(hello)
| [
"nanaka.sato.777@gmail.com"
] | nanaka.sato.777@gmail.com |
87faac5d8aa072a875a49cfb322ef246d6cd4fa9 | d96a3f09cf2c229047d27768b4f92a236ebea86b | /main_code/preprocessing.py | 90762dc1463a083f0774e6ba6fd182976fef7ea8 | [] | no_license | sbakas/PStrip | 436c0ddc50587edd655f2c160ea14f442b73cac0 | e608925e29267508a8af03eb2315c3efa1a309da | refs/heads/master | 2020-03-15T17:28:24.005663 | 2019-01-02T04:52:01 | 2019-01-02T04:52:01 | 132,261,763 | 1 | 1 | null | 2018-05-05T15:43:01 | 2018-05-05T15:43:00 | null | UTF-8 | Python | false | false | 6,775 | py | import sys
import subprocess
import os
import SimpleITK as sitk
import numpy as np
import warnings
sys.path.append(os.path.join(sys.path[0], '..', 'func_code'))
from niftyreg import *
from operations import *
from argparse import Namespace
def create_temp_image(image_path, temp_folder):
print 'creating temp image'
temp_image_file = temp_folder + '/temp_image.nii.gz'
# using float32
temp_image = sitk.Cast(sitk.ReadImage(image_path), sitk.sitkFloat32)
sitk.WriteImage(temp_image, temp_image_file)
del temp_image
def affine_to_atlas(atlas_wo_skull_file, atlas_w_skull_file, atlas_mask_file, temp_folder, args):
# Two steps of affine registration
print 'performing affine registration'
# input image
norm_file = temp_folder + '/norm_output.nii.gz'
# affine output/trans 1
affine_file1 = temp_folder + '/affine_output1.nii.gz'
affine_trans1 = temp_folder + '/affine_trans1.txt'
# affine output/trans 2
affine_file2 = temp_folder + '/affine_output2.nii.gz'
affine_trans2 = temp_folder + '/affine_trans2.txt'
# affine trans/inv_trans
affine_trans = temp_folder + '/affine_trans.txt'
invaff_trans = temp_folder + '/affine_invtrans.txt'
affine_log = temp_folder + '/pre_affine.log'
log = open(affine_log, 'w')
cmd = ""
# reg input -> atlas_w_skull, no mask
cmd += '\n' + nifty_reg_affine(ref=atlas_w_skull_file, flo=norm_file, res=affine_file1, aff=affine_trans1, symmetric=False, init = 'cog')
# reg affine output 1 -> atlas_wo_skull, mask on dilate
cmd += '\n' + nifty_reg_affine(ref=atlas_wo_skull_file, flo=affine_file1, res=affine_file2, aff=affine_trans2, rmask=atlas_mask_file, symmetric=False)
# composite trans
cmd += '\n' + nifty_reg_transform(comp1=affine_trans2, comp2=affine_trans1, comp3=affine_trans)
# get inverse
cmd += '\n' + nifty_reg_transform(invAff1=affine_trans, invAff2=invaff_trans)
if args.verbose == True:
print cmd
process = subprocess.Popen(cmd, shell=True, stdout=log)
process.wait()
log.close()
#remove_nan(affine_file2)
def bias_correction(atlas_erode_mask_file, temp_folder):
print 'performing bias correction'
# input/output file name
affine_file = temp_folder + '/affine_output2.nii.gz'
bias_file = temp_folder + '/bias_output.nii.gz'
# input image
affine_img = sitk.ReadImage(affine_file)
mask_img = sitk.ReadImage(atlas_erode_mask_file)
bias_img = sitk.N4BiasFieldCorrection(image=affine_img, maskImage=mask_img)
sitk.WriteImage(bias_img, bias_file)
del affine_img, bias_img
def intensity_normalization(temp_folder):
print 'performing intensity normalization'
# input/output file name
temp_file = temp_folder + '/temp_image.nii.gz'
norm_file = temp_folder + '/norm_output.nii.gz'
temp_img = sitk.ReadImage(temp_file)
temp_arr = sitk.GetArrayFromImage(temp_img)
# calculate 99th and 1st percentile
intensities = temp_arr.reshape(-1)
i_max = np.percentile(intensities, 99)
i_min = np.percentile(intensities, 1)
# map i_max -> 900, i_min -> 100, affine tranform on intensities, then cutoff [0, 1]
# y = a(x+b)
b = (i_max-9*i_min)/8
a = 800/(i_max-i_min)
norm_img_pre = sitk.ShiftScale(image1=temp_img, shift=b, scale=a)
# cutoff at 0, 1000
norm_img = sitk.IntensityWindowing(norm_img_pre, windowMinimum=0.0, windowMaximum=1000.0, outputMinimum=0.0, outputMaximum=1000.0)
sitk.WriteImage(norm_img, norm_file)
del temp_img, norm_img_pre, norm_img
def histogram_matching(pca_mean_file, atlas_mask_file, temp_folder):
print 'performing histogram matching'
bias_file = temp_folder + '/bias_output.nii.gz'
match_file = temp_folder + '/match_output.nii.gz'
bias_img = sitk.ReadImage(bias_file)
mean_img = sitk.ReadImage(pca_mean_file)
mask_img = sitk.ReadImage(atlas_mask_file)
bias_mask_img = sitk.Mask(bias_img, mask_img)
mean_mask_img = sitk.Mask(mean_img, mask_img)
bias_mask_arr = sitk.GetArrayFromImage(bias_mask_img)
mean_mask_arr = sitk.GetArrayFromImage(mean_mask_img)
bias_arr = sitk.GetArrayFromImage(bias_img)
img_shape = bias_arr.shape
bias_vec = bias_arr.reshape(-1)
bias_mask_vec = bias_mask_arr.reshape(-1)
mean_mask_vec = mean_mask_arr.reshape(-1)
unique_b, inverse_b, counts_b = np.unique(bias_vec, return_inverse=True, return_counts=True)
unique_bm, counts_bm = np.unique(bias_mask_vec, return_counts=True)
unique_mm, counts_mm = np.unique(mean_mask_vec, return_counts=True)
#match im with rm
im_cum = np.cumsum(counts_bm).astype(np.float32)
im_qtl = im_cum/im_cum[-1]
rm_cum = np.cumsum(counts_mm).astype(np.float32)
rm_qtl = rm_cum/rm_cum[-1]
interp_unique_bm = np.interp(im_qtl, rm_qtl, unique_mm)
interp_unique_b = np.interp(unique_b, unique_bm, interp_unique_bm)
match_arr = interp_unique_b[inverse_b].reshape(img_shape)
match_img = sitk.GetImageFromArray(match_arr)
match_img.CopyInformation(bias_img)
sitk.WriteImage(match_img, match_file)
del bias_img, mean_img, mask_img, bias_mask_img, mean_mask_img, bias_mask_arr, mean_mask_arr, bias_arr, match_arr, match_img
def preprocessing(args):
image_path = args.input_image
image_file = os.path.basename(image_path)
image_name = image_file.split('.')[0]
root_folder = os.path.join(sys.path[0], '..')
atlas_w_skull_file = root_folder + '/data/atlas/atlas_w_skull.nii'
atlas_wo_skull_file = root_folder + '/data/atlas/atlas_wo_skull.nii'
atlas_mask_file = root_folder + '/data/atlas/atlas_mask.nii'
atlas_dilate_mask_file = root_folder + '/data/atlas/atlas_mask_dilate.nii'
atlas_erode_mask_file = root_folder + '/data/atlas/atlas_mask_erode.nii'
pca_mean_file = root_folder + '/data/pca/pca_100/pca_warped_mean_100.nii'
temp_folder = os.path.join(os.sys.path[0], '..', 'tmp_res', 'temp_'+image_name)
if os.path.exists(temp_folder):
msg = 'The temp folder exists. You may have tried to extract the brain from this image. The previous results will be overwrited!'
warnings.warn(message = msg, category=Warning)
else:
os.system('mkdir ' + temp_folder)
create_temp_image(image_path, temp_folder)
intensity_normalization(temp_folder)
affine_to_atlas(atlas_wo_skull_file, atlas_w_skull_file, atlas_dilate_mask_file, temp_folder, args)
bias_correction(atlas_erode_mask_file, temp_folder)
histogram_matching(pca_mean_file, atlas_erode_mask_file, temp_folder)
if __name__ == '__main__':
image_path = sys.argv[1]
args = Namespace(input_image=image_path, debug=2, verbose=True)
preprocessing(args)
| [
"xhs400@cs.unc.edu"
] | xhs400@cs.unc.edu |
7fd2976a96b7c65afda200b53acff70df4e20a74 | fc3f6ef338c7969e4519b7b95eddd792b874c209 | /Flask/venv/bin/mako-render | c8e19cbbd73e3429d69acc3be40d9798d2a52d30 | [] | no_license | xjvelazquez/Where2Next | f5a965545dcdddd9b0f604a2fdfca04e6ae44132 | bdb85f646aa7e4627fd97eb71f86bdb76c15d849 | refs/heads/master | 2023-03-26T01:43:54.026689 | 2021-03-27T02:11:33 | 2021-03-27T02:11:33 | 283,160,075 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 419 | #!/mnt/c/Users/Jaime/Desktop/Where2Next/venv/bin/python3
# EASY-INSTALL-ENTRY-SCRIPT: 'Mako==1.1.4','console_scripts','mako-render'
__requires__ = 'Mako==1.1.4'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('Mako==1.1.4', 'console_scripts', 'mako-render')()
)
| [
"jaimevelazquez22@yahoo.com"
] | jaimevelazquez22@yahoo.com | |
7827f38f5e2c123e14f1dadd1cc05a72aad97a39 | 5d9653ca3d77a1e79f73ece726ee70e4fa8761f5 | /venv/Scripts/project0/Part4.py | ad8de811ba7a9f56aaf4c55fa4fb500dc929f3c9 | [] | no_license | LordNecromancer/data_structures_and_algorithem_course_projects | 3665567075f19ab71fb1bbd6f8192aed4a67cc96 | 36ec0289306327b2ddab96beee20ba6f91391e62 | refs/heads/master | 2023-03-02T01:19:44.770512 | 2021-02-12T22:10:40 | 2021-02-12T22:10:40 | 325,384,980 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 787 | py | import math
import time
import matplotlib.pyplot as plt
# n=int(input())
# nums=input('').split(' ')
value=[]
elementNum=[]
for l in range(1,21):
#start = time.clock() *1000
p=[]
str="C:/Users/lordNecromancer/PycharmProjects/ds_project1/in/input{}.txt"
str=str.format(l)
file=open(str,'rt')
nums=[int(r) for r in file.read().replace('\n',' ').split(' ')]
prev=0
max=0
counter=0
for i in range(len(nums)):
p.append(prev+int(nums[i]))
prev=prev+int(nums[i])
counter+=2
m = 0
for i in range(len(nums)):
counter+=1
if(p[i]-m>max):
max=p[i]-m
counter += 1
m=min(p[i],m)
#end = (time.clock()*1000 )
value.append(counter)
elementNum.append(len(nums))
print(value)
print(elementNum)
plt.plot(elementNum,value,'ro')
plt.show()
| [
"mmdp313@gmail.com"
] | mmdp313@gmail.com |
0df67edff7e3522c8bcefb1bdbb749ecf4b7f58d | 15cf010f36acda5074fb3700c43005ceea507bc5 | /calculate_trigger_efficiencies.py | ee5380e7f720d7e56e7c4704bf34a1a448b7dd5b | [] | no_license | mburkart/TriggerStudies | 0e87146b5ffcc9930b9bdda095c2d562074495d4 | ecd9c24c206031fe7235cca4e90b3c74d33c2378 | refs/heads/master | 2020-05-13T23:56:22.266153 | 2019-04-16T12:24:50 | 2019-04-16T12:24:50 | 181,676,573 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,886 | py |
import logging
import argparse
from TauTriggerEfficiency import TauLegEfficiencies
def parse_args():
parser = argparse.ArgumentParser(description="To be filled.")
parser.add_argument("-w", "--working-points", type=str,
nargs="+", default=["all"],
choices=["all", "vloose", "loose", "medium",
"tight", "vtight", "vvtight"],
help="The MVA tau Id working points the"
" efficiencies should be calculated for.")
parser.add_argument("-i", "--input-file", type=str, required=True,
help="The input file containing the ntuples.")
parser.add_argument("-o", "--output-file", type=str, default="output.root",
help="The output file. Defaults to %(default)s")
parser.add_argument("-f", "--file-types", type=str, nargs="+",
choices=["DATA", "MC", "EMB"],
default=["DATA", "MC", "EMB"],
help="The sample types to be processed.")
args = parser.parse_args()
if "all" in args.working_points:
args.working_points = ["vloose", "loose", "medium",
"tight", "vtight", "vvtight"]
return args
def setup_logging(level=logging.WARNING):
logging.basicConfig(level=level)
def main(args):
wps = args.working_points
triggers = ["MuTau", "ETau", "diTau"]
filetypes = args.file_types
eff = TauLegEfficiencies(
args.output_file,
args.input_file)
for wp in wps:
eff.add_wp(wp)
for trg in triggers:
eff.add_trigger_name(trg)
for ft in filetypes:
eff.add_filetype(ft)
eff.create_efficiencies()
return
if __name__ == "__main__":
setup_logging(logging.INFO)
args = parse_args()
main(args)
| [
"maxiburkart@gmail.com"
] | maxiburkart@gmail.com |
8018544975d51c6528a74b50ee6788ef7b69c41f | 7441bc8c4779e85e7f1221ee239f51341ebcdd28 | /elijah_ros_dev/catkin_ws/build/kp07_v11_manipulation/catkin_generated/pkg.develspace.context.pc.py | 0b12a1b9544e691a288f8b95c8b9dc72c41885f2 | [] | no_license | riverray121/Kinoped-Command-and-Control | eb4e2b9cbfa5314009a8ab6e0a802856aab85d79 | 7944c2dd1e9bfce5582de6f9f707991d27b01b45 | refs/heads/main | 2023-04-28T11:21:07.386126 | 2021-05-17T21:41:08 | 2021-05-17T21:41:08 | 368,317,745 | 0 | 0 | null | 2021-05-17T21:41:09 | 2021-05-17T20:45:55 | null | UTF-8 | Python | false | false | 751 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/elijah/catkin_ws/src/kp07_v11_manipulation/include/kp07_v11_manipulation".split(';') if "/home/elijah/catkin_ws/src/kp07_v11_manipulation/include/kp07_v11_manipulation" != "" else []
PROJECT_CATKIN_DEPENDS = "geometric_shapes;moveit_core;moveit_ros_planning;moveit_ros_planning_interface;moveit_visual_tools;pluginlib;roscpp;std_msgs;tf2_eigen;genmsg;message_generation;rospy".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lkp07_v11_manipulation".split(';') if "-lkp07_v11_manipulation" != "" else []
PROJECT_NAME = "kp07_v11_manipulation"
PROJECT_SPACE_DIR = "/home/elijah/catkin_ws/devel"
PROJECT_VERSION = "0.0.0"
| [
"eliriver@gmail.com"
] | eliriver@gmail.com |
70b79c8ebe347e46b42b18e851e6c9a24feec667 | 154eb136cd287303cda71b326b8d7542539f2b2f | /mysite/polls/migrations/0016_auto_20181210_1544.py | 320857450d333f7c789d844224e23fe408343899 | [] | no_license | mjohns12/CMSC447Group6BE | 913f146a625643d0ad219c7462f5d65f57d2b7b4 | 4133c4e26f9eb0db9d918ed667d31dc6f8f4c8f9 | refs/heads/master | 2020-04-09T20:39:52.505792 | 2018-12-19T03:37:59 | 2018-12-19T03:37:59 | 160,580,188 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,308 | py | # Generated by Django 2.1.3 on 2018-12-10 20:44
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('polls', '0015_auto_20181210_1502'),
]
operations = [
migrations.CreateModel(
name='RequiredEquipment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.PositiveIntegerField()),
],
),
migrations.RemoveField(
model_name='equipment',
name='events',
),
migrations.RemoveField(
model_name='equipment',
name='quantity',
),
migrations.RemoveField(
model_name='event',
name='mission',
),
migrations.AddField(
model_name='requiredequipment',
name='equipment_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='polls.Equipment'),
),
migrations.AddField(
model_name='requiredequipment',
name='event_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='polls.Event'),
),
]
| [
"noreply@github.com"
] | mjohns12.noreply@github.com |
77de718380e45fd22369ee5d50e95868582d615b | f321337033e25e85ba35fa0526d4c898592fc790 | /blog/migrations/0002_auto_20200718_1528.py | 7b2b5348450d4f77f78de4ff1cd3f5f4dadd1e11 | [] | no_license | kuss123/django-assignment-1 | 02da1876b8a08830dce4eee0db8480b83537a907 | ec46e6620616815152b2abd72edadd07757f0647 | refs/heads/master | 2022-11-16T13:31:49.530341 | 2020-07-19T15:25:18 | 2020-07-19T15:25:18 | 280,893,328 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 376 | py | # Generated by Django 3.0.6 on 2020-07-18 09:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='blogtable',
name='updated_at',
field=models.DateField(blank=True),
),
]
| [
"halkstar12@gmail.com"
] | halkstar12@gmail.com |
9ce650e54cafe85121117010d69ef0d196a2a17a | 3eb1143458ea1abe618267b3254abdd3c23b1fc3 | /secchallenge/crypto/prerecorded/solution/util.py | a9e867898e4bc959c1445cd14fb6f7d46f46b5b2 | [] | no_license | encse/secchallenge2021 | cfb9e3d6883200871c9e23d21b3eb3ac8eeb8c5b | d88fbdbd4bd726de95b00bd6bcb4b1bb3a5a80e7 | refs/heads/master | 2023-04-14T19:57:06.612546 | 2021-05-02T19:31:06 | 2021-05-02T19:31:06 | 345,049,189 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,874 | py | # https://www.josephsurin.me/posts/2020-11-30-hitcon-ctf-2020-100-pins-writeup
import struct
from decimal import *
import os
from z3 import *
MAX_UNUSED_THREADS = 2
# Calculates xs128p (XorShift128Plus)
def xs128p(state0, state1):
s1 = state0 & 0xFFFFFFFFFFFFFFFF
s0 = state1 & 0xFFFFFFFFFFFFFFFF
s1 ^= (s1 << 23) & 0xFFFFFFFFFFFFFFFF
s1 ^= (s1 >> 17) & 0xFFFFFFFFFFFFFFFF
s1 ^= s0 & 0xFFFFFFFFFFFFFFFF
s1 ^= (s0 >> 26) & 0xFFFFFFFFFFFFFFFF
state0 = state1 & 0xFFFFFFFFFFFFFFFF
state1 = s1 & 0xFFFFFFFFFFFFFFFF
generated = state0 & 0xFFFFFFFFFFFFFFFF
return state0, state1, generated
def sym_xs128p(sym_state0, sym_state1):
# Symbolically represent xs128p
s1 = sym_state0
s0 = sym_state1
s1 ^= (s1 << 23)
s1 ^= LShR(s1, 17)
s1 ^= s0
s1 ^= LShR(s0, 26)
sym_state0 = sym_state1
sym_state1 = s1
# end symbolic execution
return sym_state0, sym_state1
# Symbolic execution of xs128p
def sym_floor_random(slvr, sym_state0, sym_state1, generated, multiple):
sym_state0, sym_state1 = sym_xs128p(sym_state0, sym_state1)
# "::ToDouble"
calc = LShR(sym_state0, 12)
"""
Symbolically compatible Math.floor expression.
Here's how it works:
64-bit floating point numbers are represented using IEEE 754 (https://en.wikipedia.org/wiki/Double-precision_floating-point_format) which describes how
bit vectors represent decimal values. In our specific case, we're dealing with a function (Math.random) that only generates numbers in the range [0, 1).
This allows us to make some assumptions in how we deal with floating point numbers (like ignoring parts of the bitvector entirely).
The 64bit floating point is laid out as follows
[1 bit sign][11 bit expr][52 bit "mantissa"]
The formula to calculate the value is as follows: (-1)^sign * (1 + Sigma_{i=1 -> 52}(M_{52 - i} * 2^-i)) * 2^(expr - 1023)
Therefore 0_01111111111_1100000000000000000000000000000000000000000000000000 is equal to "1.75"
sign => 0 => ((-1) ^ 0) => 1
expr => 1023 => 2^(expr - 1023) => 1
mantissa => <bitstring> => (1 + sum(M_{52 - i} * 2^-i) => 1.75
1 * 1 * 1.75 = 1.75 :)
Clearly we can ignore the sign as our numbers are entirely non-negative.
Additionally, we know that our values are between 0 and 1 (exclusive) and therefore the expr MUST be, at most, 1023, always.
What about the expr?
"""
lower = from_double(Decimal(generated) / Decimal(multiple))
upper = from_double((Decimal(generated) + 1) / Decimal(multiple))
lower_mantissa = (lower & 0x000FFFFFFFFFFFFF)
upper_mantissa = (upper & 0x000FFFFFFFFFFFFF)
upper_expr = (upper >> 52) & 0x7FF
slvr.add(And(lower_mantissa <= calc, Or(upper_mantissa >= calc, upper_expr == 1024)))
return sym_state0, sym_state1
def solve_instance(points, multiple, unknown_leading=False):
# setup symbolic state for xorshift128+
ostate0, ostate1 = BitVecs('ostate0 ostate1', 64)
sym_state0 = ostate0
sym_state1 = ostate1
set_option("parallel.enable", True)
set_option("parallel.threads.max", (
max(os.cpu_count() - MAX_UNUSED_THREADS, 1))) # will use max or max cpu thread support, whatever is smaller
slvr = SolverFor(
"QF_BV") # This type of problem is much faster computed using QF_BV (also, if branching happens, we can use parallelization)
# run symbolic xorshift128+ algorithm for three iterations
# using the recovered numbers as constraints
if unknown_leading:
# we want to try to predict one value ahead so let's slide one unknown into the calculation
sym_state0, sym_state1 = sym_xs128p(sym_state0, sym_state1)
for point in points:
sym_state0, sym_state1 = sym_floor_random(slvr, sym_state0, sym_state1, point, multiple)
if slvr.check() == sat:
# get a solved state
m = slvr.model()
state0 = m[ostate0].as_long()
state1 = m[ostate1].as_long()
return state0, state1
else:
print("Failed to find a valid solution")
return None, None
def solve(points, multiple, lead):
if lead > 0:
last_state0 = None
last_state1 = None
for i in range(0, int(lead)):
last_state0, last_state1 = solve_instance(points, multiple, True)
state0, state1, output = xs128p(last_state0, last_state1)
new_point = math.floor(multiple * to_double(output))
points = [new_point] + points
return last_state0, last_state1
else:
return solve_instance(points, multiple)
def to_double(value):
"""
https://github.com/v8/v8/blob/master/src/base/utils/random-number-generator.h#L111
"""
double_bits = (value >> 12) | 0x3FF0000000000000
return struct.unpack('d', struct.pack('<Q', double_bits))[0] - 1
def from_double(dbl):
"""
https://github.com/v8/v8/blob/master/src/base/utils/random-number-generator.h#L111
This function acts as the inverse to @to_double. The main difference is that we
use 0x7fffffffffffffff as our mask as this ensures the result _must_ be not-negative
but makes no other assumptions about the underlying value.
That being said, it should be safe to change the flag to 0x3ff...
"""
return struct.unpack('<Q', struct.pack('d', dbl + 1))[0] & 0x7FFFFFFFFFFFFFFF
def reverse17(val):
return val ^ (val >> 17) ^ (val >> 34) ^ (val >> 51)
def reverse23(val):
mask = 0xFFFFFFFFFFFFFFFF
return (val ^ (val << 23) ^ (val << 46)) & mask
def xs128p_backward(state0, state1):
prev_state1 = state0
prev_state0 = state1 ^ (state0 >> 26)
prev_state0 = prev_state0 ^ state0
prev_state0 = reverse17(prev_state0)
prev_state0 = reverse23(prev_state0)
generated = prev_state0
return prev_state0, prev_state1, generated
| [
"encse@csokavar.hu"
] | encse@csokavar.hu |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.