blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
954c06428955e07811496bfdf9a35f31248191f9 | Python | youridv1/ProgrammingYouriDeVorHU | /venv/Les7/7_4.py | UTF-8 | 443 | 2.96875 | 3 | [] | no_license | import datetime
vandaag = datetime.datetime.today()
s = vandaag.strftime("%a %d %b %Y")
t = vandaag.strftime("%I:%M:%S")
infile = open('hardlopers.txt', 'a')
naam = input("naam: ")
naam = naam.strip()
naam = naam.lower()
naam = naam.capitalize() #zodat wat voor onzin de user ook maar invoert, het netjes in lowercase is en begint met een hoofdletter
datum = s
tijd = t
infile.write(str(s) + ", " + str(t) + ", " + naam + "\n")
infile.close() | true |
9035cbd95deba57a8b130158ba7bf0f649cfa70b | Python | bastienreyne/cecidomyie | /drafts/testnsga.py | UTF-8 | 717 | 2.703125 | 3 | [] | no_license |
from pyOpt import Optimization, NSGA2
def objfunc(x):
f = -x[0]*x[1]*x[2]
h = -x[0]*x[2]
g = [0.0]*2
g[0] = x[0] + 2.*x[1] + 2.*x[2] - 72.0
g[1] = -x[0] - 2.*x[1] - 2.*x[2]
fail = 0
return f,h,g, fail
# Instantiate Optimization Problem
opt_prob = Optimization('TP37 Constrained Problem',objfunc)
opt_prob.addVar('x1','c',lower=0.0,upper=42.0,value=10.0)
opt_prob.addVar('x2','c',lower=0.0,upper=42.0,value=10.0)
opt_prob.addVar('x3','c',lower=0.0,upper=42.0,value=10.0)
opt_prob.addObj('f')
opt_prob.addObj('h')
opt_prob.addCon('g1','i')
opt_prob.addCon('g2','i')
print( opt_prob )
nsga2 = NSGA2()
nsga2.setOption('PrintOut',0)
nsga2(opt_prob)
print( opt_prob.solution(0))
| true |
f1410e1013d175a7396be96a2836e44b783b66ff | Python | nilamkk/Online-Attendance-System-Project | /WebApp/face_rec_web.py | UTF-8 | 3,085 | 2.921875 | 3 | [] | no_license | # cmake, dlib, facereco, cv2, numpy
# for now data base= dictionary { scholarId: encoding(array of 128) }
# later it will be stored in a csv file
import cv2
import os
import numpy as np
import face_recognition
from csv import writer, reader
# name: encoding
peopleDict = {}
def getImg2Enc(image):
"""
:param image: image read by cv2
:return: array : encoding
"""
img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
encode = face_recognition.face_encodings(img)[0] # since only one face will be there in input "image"
return encode
def findIdentity(tarImgEnc, dictP):
"""
:param tarImgEnc: targeted image encoding : array of 128 * 1
:param dictP: dict of encodings : {name:encd}
:return: most similar scholar id or -1
"""
encodings = list(dictP.values())
schIds = list(dictP.keys())
matches = face_recognition.compare_faces(encodings, tarImgEnc)
faceDis = face_recognition.face_distance(encodings, tarImgEnc)
matchIndex = np.argmin(faceDis)
if matches[matchIndex]:
return schIds[matchIndex]
return '-1'
def getPresStds(img):
"""
:param img: image read by cv2 or np array
:return: list of present students
"""
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
encodes = face_recognition.face_encodings(img)
presStds = []
if not os.path.exists('registeredStds.csv'):
return ['-1']
# make that name: enc dict from csv
regDict = {}
with open('registeredStds.csv', 'r') as fObj:
content = reader(fObj)
for row in content:
regDict[row[0]] = [float(value) for value in row[1:]]
for enc in encodes:
res = findIdentity(enc, regDict)
if res == '-1':
continue
else:
presStds.append(res)
return presStds
def checkPresence(imgPath):
"""
:param imgPath: str: image path
:return: None
"""
img = cv2.imread(imgPath)
imgEnc = getImg2Enc(img)
res = findIdentity(imgEnc, peopleDict)
if res == -1:
print("Not found in DB")
else:
print(res + " is present.")
def regImg(name, img):
"""
:param name: str : name
:param img: string: image read by cv2 or np array
:return: true if registered else false
"""
# get the encodings
imgEnc = getImg2Enc(img)
# check if the file (registerdStds.csv) exist if not make one
regFilePath = 'registeredStds.csv'
if not os.path.exists(regFilePath):
open(regFilePath, 'a').close()
# append the info to the file
to_save = [name] + imgEnc.tolist()
with open(regFilePath, 'a', newline='') as fObj:
writer_obj = writer(fObj)
writer_obj.writerow(to_save)
fObj.close()
return True
def imageShow(path):
"""
:param path: str: image path
:return: None
"""
print(path)
img = cv2.imread(path)
if img is not None:
window_name = 'image'
cv2.imshow(window_name, img)
cv2.waitKey(0)
cv2.destroyAllWindows()
else:
print("Image cant be read !!! ")
| true |
473700b0015a9a0ff26d199ce960919170b548ba | Python | HungryAppOrganization/DatabaseAndDataCollector | /db_setup.py | UTF-8 | 1,114 | 2.8125 | 3 | [] | no_license | import os
import sys
from sqlalchemy import Column, ForeignKey, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy import create_engine
Base = declarative_base()
class RestaurantDB(Base):
__tablename__ = 'restaurant'
id = Column(Integer,primary_key=True)
address = Column(String(250))
name = Column(String(250))
number = Column(String(250))
city = Column(String(250))
hours = Column(String(250))
class MenuItemDB(Base):
__tablename__ = 'menuitem'
id = Column(Integer, primary_key=True)
name = Column(String(250))
cost = Column(Integer)
picture = Column(String(250))
calorie = Column(Integer)
restaurant_id = Column(Integer, ForeignKey('restaurant.id'))
restaurant = relationship(RestaurantDB)
# Create an engine that stores data in the local directory's
# sqlalchemy_example.db file.
engine = create_engine('sqlite:///data/csv_test2.db')
# Create all tables in the engine. This is equivalent to "Create Table"
# statements in raw SQL.
Base.metadata.create_all(engine) | true |
e2f6e7f2c797f27f38b8130aa1b64824042567d0 | Python | mido/mido | /tests/test_syx.py | UTF-8 | 1,372 | 2.8125 | 3 | [
"MIT",
"CC-BY-4.0"
] | permissive | # SPDX-FileCopyrightText: 2017 Ole Martin Bjorndalen <ombdalen@gmail.com>
#
# SPDX-License-Identifier: MIT
from pytest import raises
from mido.messages import Message
from mido.syx import read_syx_file, write_syx_file
def test_read(tmpdir):
path = tmpdir.join("test.syx").strpath
msg = Message('sysex', data=(1, 2, 3))
with open(path, 'wb') as outfile:
outfile.write(msg.bin())
assert read_syx_file(path) == [msg]
with open(path, 'w') as outfile:
outfile.write(msg.hex())
assert read_syx_file(path) == [msg]
with open(path, 'w') as outfile:
outfile.write('NOT HEX')
with raises(ValueError):
read_syx_file(path)
def test_handle_any_whitespace(tmpdir):
path = tmpdir.join("test.syx").strpath
with open(path, 'w') as outfile:
outfile.write('F0 01 02 \t F7\n F0 03 04 F7\n')
assert read_syx_file(path) == [Message('sysex', data=[1, 2]),
Message('sysex', data=[3, 4])]
def test_write(tmpdir):
# p = tmpdir.mkdir("sub").join("hello.txt")
path = tmpdir.join("test.syx").strpath
msg = Message('sysex', data=(1, 2, 3))
write_syx_file(path, [msg])
with open(path, 'rb') as infile:
assert infile.read() == msg.bin()
write_syx_file(path, [msg], plaintext=True)
with open(path) as infile:
assert infile.read().strip() == msg.hex()
| true |
b4be5ea1447cc05c8c54852114d1fffe073692ff | Python | sarathsankar3690/Djangopython | /Flow controls/Looping/no of even and odd upto 50.py | UTF-8 | 225 | 3.5625 | 4 | [] | no_license | limit=int(input("Enter the limit"))
i=1
oddsum=0
evensum=0
ecount=0
ocount=0
while (i<=limit):
if(i%2==0):
evensum+=1
ecount+=1
else:
oddsum+=1
ocount+=1
print(evensum)
print(ecount)
| true |
22a3f6cf07d67f341b3173259eb35ac3638aa49f | Python | brozzi24/socialTestProject | /social_app/accounts/tests/test_views.py | UTF-8 | 3,588 | 2.515625 | 3 | [
"MIT"
] | permissive | from django.test import TestCase, Client
from django.urls import reverse
from django.contrib.auth.models import User
from django.contrib import auth
class TestAccountViews(TestCase):
def setUp(self):
self.client = Client()
self.register_url = reverse("register")
self.signIn_url = reverse("signIn")
self.signOut_url = reverse("signOut")
self.user = User.objects.create_user(username="testuser", password="justatest")
"""
REGISTER VIEW
"""
# User not signed in
def test_register_not_signed_in(self):
response = self.client.get(self.register_url)
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, "accounts/register.html")
# User is signed in
def test_register_signed_in(self):
login = self.client.login(username="testuser", password="justatest")
response = self.client.get(self.register_url)
self.assertRedirects(response, "/", status_code=302)
# Requst is post and with valid form data
def test_register_valid_form(self):
user_count = User.objects.count()
response = self.client.post(
self.register_url,
{
"email": "jedi@force.com",
"username": "babyyoda24",
"password1": "testthisis",
"password2": "testthisis",
},
)
self.assertRedirects(response, "/accounts/signIn", status_code=302)
# Request is post and with invalid form data
def test_register_invalid_form(self):
user_count = User.objects.count()
response = self.client.post(
self.register_url,
{
"email": "jedi@force.com",
"username": "babyyoda24",
"password1": "testthisis",
"password2": "faildYouHave",
},
)
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, "accounts/register.html")
"""
SIGNIN VIEW
"""
def test_signIn_already_signed_in(self):
login = self.client.login(username="testuser", password="justatest")
response = self.client.get(self.signIn_url)
self.assertRedirects(response, "/", status_code=302)
# User is not logged in and form data is valid
def test_signIn_valid_form(self):
response = self.client.post(
self.signIn_url, {"username": "testuser", "password": "justatest",}
)
user = auth.get_user(self.client)
self.assertEquals(self.user, user)
self.assertRedirects(response, "/", status_code=302)
# User form invalid
def test_signIn_invalid_form(self):
response = self.client.post(
self.signIn_url, {"username": "testuser", "username": "wrongpassword",}
)
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, "accounts/signIn.html")
"""
SIGNOUT VIEW
"""
# User is logged in then gets signed out
def test_signOut_user(self):
login = self.client.login(username="testuser", password="justatest")
response = self.client.get(self.signOut_url)
user = auth.get_user(self.client)
self.assertNotEqual(self.user, user)
self.assertRedirects(response, "/accounts/signIn", status_code=302)
# User is not logged in
def test_signOut_not_signed_in(self):
response = self.client.get(self.signOut_url)
self.assertRedirects(response, "/accounts/signIn", status_code=302)
| true |
eb85ed24aba1f20e23b6e6e1f7fc4c05301f5d43 | Python | singhalok641/kaggle-Bengali.AI | /src/models.py | UTF-8 | 1,106 | 2.625 | 3 | [] | no_license | import pretrainedmodels
import torch.nn as nn
from torch.nn import functional as F
class ResNet34(nn.Module):
def __init__(self, pretrained):
super(ResNet34, self).__init__()
if pretrained is True:
self.model = pretrainedmodels.__dict__["resnet34"](pretrained="imagenet")
else:
self.model = pretrainedmodels.__dict__["resnet34"](pretrained=None)
# (last_linear): Linear(in_features=512, out_features=1000, bias=True)
# resnet34 has 512 input features and 1000 output features
# we will change that by adding a few extra layers
# 168 outputs for grapheme_root
self.l0 = nn.Linear(512, 168)
# 11 outputs for vowel_diacritic
self.l1 = nn.Linear(512, 11)
# 7 outputs for consonant diacritic
self.l2 = nn.Linear(512, 7)
def forward(self, x):
bs, _, _, _ = x.shape
x = self.model.features(x)
x = F.adaptive_avg_pool2d(x, 1).reshape(bs, -1)
l0 = self.l0(x)
l1 = self.l1(x)
l2 = self.l2(x)
return l0,l1,l2
| true |
50713fa809cb59aebf2a81d70bb8d22f628fe0e5 | Python | empbetty/vocass | /vocass-v2.0/vocass.app/Contents/Resources/vocass.py | UTF-8 | 4,845 | 2.828125 | 3 | [] | no_license | import Tkinter
from tkFileDialog import askopenfilename, askdirectory
import re
from docx import Document
class vocass(Tkinter.Tk):
def __init__(self,parent):
Tkinter.Tk.__init__(self,parent)
self.parent = parent
self.initialize()
def initialize(self):
self.grid()
frame_wl = Tkinter.Frame(self,bd=2,width=600,height=40,relief="sunken")
frame_wl.pack(fill="both",expand=1)
frame_wl.pack_propagate(0)
frame_doc = Tkinter.Frame(self,bd=2,width=600,height=40,relief="sunken")
frame_doc.pack(fill="both",expand=1)
frame_doc.pack_propagate(0)
frame_output = Tkinter.Frame(self,bd=2,width=600,height=40,relief="sunken")
frame_output.pack(fill="both",expand=1)
frame_output.pack_propagate(0)
frame_run = Tkinter.Frame(self,bd=2,width=600,height=40,relief="sunken")
frame_run.pack(fill="both",expand=1)
frame_run.pack_propagate(0)
button_wl = Tkinter.Button(frame_wl,width=20,text=u"Select A Word List",command=self.selectWordList)
button_wl.pack(side="left")
self.wordlistselected = Tkinter.StringVar()
label_wl = Tkinter.Label(frame_wl,textvariable=self.wordlistselected,anchor="w",fg="red")
self.wordlistselected.set("No File Selected")
label_wl.pack(side="left")
button_doc = Tkinter.Button(frame_doc,width=20,text=u"Select A Document",command=self.selectDocument)
button_doc.pack(side="left")
self.documentselected = Tkinter.StringVar()
label_doc = Tkinter.Label(frame_doc,textvariable=self.documentselected,anchor="w",fg="red")
self.documentselected.set("No File Selected")
label_doc.pack(side="left")
button_output = Tkinter.Button(frame_output,width=20,text=u"Select Output Directory",command=self.selectDirectory)
button_output.pack(side="left")
self.directoryselected = Tkinter.StringVar()
label_output = Tkinter.Label(frame_output,textvariable=self.directoryselected,anchor="w",fg="red")
self.directoryselected.set("No Directory Selected")
label_output.pack(side="left")
button_run = Tkinter.Button(frame_run,width=20,text=u"Run!",command=self.run)
button_run.pack(side="left")
self.message = Tkinter.StringVar()
label_run = Tkinter.Label(frame_run,textvariable=self.message,anchor="w",fg="red")
self.message.set("Nothing Generated")
label_run.pack(side="left")
def selectWordList(self):
filename = askopenfilename(filetypes=[("Text Files", "*.txt")])
if (len(filename) >= 1):
self.wordlistselected.set(filename)
def selectDocument(self):
filename = askopenfilename(filetypes=[("Text Files", "*.txt")])
if (len(filename) >= 1):
self.documentselected.set(filename)
def selectDirectory(self):
directoryname = askdirectory()
if (len(directoryname) >= 1):
self.directoryselected.set(directoryname)
def run(self):
f = open(self.wordlistselected.get())
voc = []
for line in f:
for word in line.split():
voc.append(word)
f.close()
f = open(self.documentselected.get())
dic = {}
text = f.read().lower()
# content = f.read()
# text = re.sub('[,.!?*\(\)\"\'|#&$\[\];\{\}_:/]', " ", text)
text = re.sub('\. ', " ", text)
text = re.sub('\.\)', " ", text)
text = re.sub('\.\"', " ", text)
text = re.sub('\"\'', " ", text)
text = re.sub('\.\'', " ", text)
text = re.sub(' \'', " ", text)
text = re.sub('\' ', " ", text)
text = re.sub('\.\\n', " ", text)
text = re.sub('\.\\r\\n', " ", text)
text = re.sub('--', " ", text)
text = re.sub('[^a-z\'\-\.]+', " ", text)
# text = text.translate(None, string.punctuation)
for word in text.split():
if ((word in voc) == False):
i = dic.get(word)
if (i == None):
dic[word] = 1
else:
dic[word] = dic[word] + 1
f.close()
document = Document()
table = document.add_table(rows=1, cols=3)
hdr_cells = table.rows[0].cells
hdr_cells[0].text = 'Word'
hdr_cells[1].text = '# of times'
hdr_cells[2].text = 'Difficulty level'
for word in sorted(dic):
row_cells = table.add_row().cells
row_cells[0].text = str(word)
row_cells[1].text = str(dic[word])
document.save(self.directoryselected.get() + '/output.docx')
self.message.set("output.docx Successfully Generated!")
if __name__ == "__main__":
application = vocass(None)
application.title('Vocabulary Assessor')
application.mainloop() | true |
4f4c2e0d7f1114ba157679ae21f4d1a60373d8e0 | Python | clement/django | /tests/regressiontests/file_storage/models.py | UTF-8 | 2,751 | 2.609375 | 3 | [
"BSD-3-Clause"
] | permissive | import os
import tempfile
import shutil
from django.db import models
from django.core.files.storage import FileSystemStorage
from django.core.files.base import ContentFile
# Test for correct behavior of width_field/height_field.
# Of course, we can't run this without PIL.
try:
# Checking for the existence of Image is enough for CPython, but
# for PyPy, you need to check for the underlying modules
from PIL import Image, _imaging
except ImportError:
Image = None
# If we have PIL, do these tests
if Image:
temp_storage_dir = tempfile.mkdtemp()
temp_storage = FileSystemStorage(temp_storage_dir)
class Person(models.Model):
name = models.CharField(max_length=50)
mugshot = models.ImageField(storage=temp_storage, upload_to='tests',
height_field='mug_height',
width_field='mug_width')
mug_height = models.PositiveSmallIntegerField()
mug_width = models.PositiveSmallIntegerField()
__test__ = {'API_TESTS': """
>>> from django.core.files import File
>>> image_data = open(os.path.join(os.path.dirname(__file__), "test.png"), 'rb').read()
>>> p = Person(name="Joe")
>>> p.mugshot.save("mug", ContentFile(image_data))
>>> p.mugshot.width
16
>>> p.mugshot.height
16
>>> p.mug_height
16
>>> p.mug_width
16
# Bug #9786: Ensure '==' and '!=' work correctly.
>>> image_data = open(os.path.join(os.path.dirname(__file__), "test1.png"), 'rb').read()
>>> p1 = Person(name="Bob")
>>> p1.mugshot.save("mug", ContentFile(image_data))
>>> p2 = Person.objects.get(name="Joe")
>>> p.mugshot == p2.mugshot
True
>>> p.mugshot != p2.mugshot
False
>>> p.mugshot != p1.mugshot
True
Bug #9508: Similarly to the previous test, make sure hash() works as expected
(equal items must hash to the same value).
>>> hash(p.mugshot) == hash(p2.mugshot)
True
# Bug #8175: correctly delete files that have been removed off the file system.
>>> import os
>>> p2 = Person(name="Fred")
>>> p2.mugshot.save("shot", ContentFile(image_data))
>>> os.remove(p2.mugshot.path)
>>> p2.delete()
# Bug #8534: FileField.size should not leave the file open.
>>> p3 = Person(name="Joan")
>>> p3.mugshot.save("shot", ContentFile(image_data))
# Get a "clean" model instance
>>> p3 = Person.objects.get(name="Joan")
# It won't have an opened file.
>>> p3.mugshot.closed
True
# After asking for the size, the file should still be closed.
>>> _ = p3.mugshot.size
>>> p3.mugshot.closed
True
# Make sure that wrapping the file in a file still works
>>> p3.mugshot.file.open()
>>> p = Person.objects.create(name="Bob The Builder", mugshot=File(p3.mugshot.file))
>>> p.save()
>>> p3.mugshot.file.close()
# Delete all test files
>>> shutil.rmtree(temp_storage_dir)
"""}
| true |
8e47610f69042afd28066712333557ac979abb91 | Python | ministryofjustice/analytics-platform-ops | /scripts/auth0connections/auth0connections.py | UTF-8 | 5,158 | 2.734375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
import base64
import functools
from collections import defaultdict
from pathlib import Path
from pprint import pprint
import sys
import click
import yaml
from auth0.v3.management import Auth0
from environs import Env
from jinja2 import Environment
env = Env()
env.read_env() # read .env file, if it exists
jinja_env = Environment()
jinja_env.filters["base64enc"] = lambda x: base64.urlsafe_b64encode(
x.encode("utf8")
).decode()
AUTH0_TOKEN = env("AUTH0_TOKEN")
AUTH0_DOMAIN = env("AUTH0_DOMAIN")
@functools.lru_cache(maxsize=1)
def get_client():
return Auth0(AUTH0_DOMAIN, AUTH0_TOKEN)
def render_local_connections(config):
'''Returns full connection dicts, from the connections listed in the config
file, and rendered using the template specified.
Uses a template specified in the connection config
e.g. 'template_name: hmpps-auth' in the connection configuration
means use the template in: ./connection_templates/hmmps-auth/
'''
connections = {}
template_root = Path(__file__).cwd() / Path("connection_templates")
template_dirs = dict((entry.stem, entry) for entry in template_root.iterdir() if entry.is_dir())
for connection_name in config:
connection = config[connection_name]
connection['name'] = connection_name
try:
template_name = connection['connection_template']
except KeyError:
click.echo(f'ERROR: Connection YAML "{connection_name}": missing key "connection_template"')
sys.exit(1)
try:
template_path = template_dirs[template_name]
except KeyError:
click.echo(f'ERROR: template_name: "{template_name}" is specified in the config, but no such template exists in {template_root}')
sys.exit(1)
# render the scripts
scripts = template_path.glob("*.js")
script_templates = {
x.stem: jinja_env.from_string(x.open(encoding="utf8").read())
for x in scripts
}
scripts_rendered = {}
for name, script_template in script_templates.items():
scripts_rendered[name] = script_template.render(**connection)
# render the main connection template
with (template_path / Path("config.yaml")).open("r") as config_yaml_file:
yaml_rendered = jinja_env.from_string(config_yaml_file.read()).render(
**connection
)
body = yaml.safe_load(yaml_rendered) or defaultdict(dict)
# add in the rendered scripts
body["options"]["scripts"] = scripts_rendered
connections[connection_name] = body
return connections
@click.group()
@click.pass_context
@click.option("-f", "--config-file", type=click.File("r"), required=True)
def cli(ctx, config_file):
ctx.ensure_object(dict)
ctx.obj["config_file"] = yaml.safe_load(config_file)
config_file.close()
@cli.command()
@click.option('--names', '-n', help='Only print each connection\'s name', is_flag=True)
def remote(names):
"""
Show a list of existing connections on auth0
(-f is ignored)
"""
click.echo("Remote connections:")
client = get_client()
if names:
click.echo(yaml.safe_dump(
[c['name'] for c in client.connections.all()]
))
else:
click.echo(yaml.safe_dump(client.connections.all()))
@cli.command()
@click.pass_context
def local(ctx):
"""
Show defined connections
"""
click.echo("Local connections:")
click.echo(yaml.safe_dump(render_local_connections(ctx.obj["config_file"])))
@cli.command()
@click.pass_context
def create(ctx):
'''
Creates on Auth0 the connections that are defined locally, using the
Auth0 management API.
Does not overwrite connections of the same name - delete a connection if you
wish to overwrite it.
'''
click.echo("Creating connections:")
rendered_connections = render_local_connections(ctx.obj["config_file"])
client = get_client()
remote_connections = [x["name"] for x in client.connections.all()]
for connection_name, body in rendered_connections.items():
if not connection_name in remote_connections:
click.echo(f"Creating {connection_name}")
resp = client.connections.create(body)
if resp:
click.echo(pprint(resp))
else:
click.echo(
f"Skipping: {connection_name} as it already exists. Delete it "
f"from auth0 if you want this script to recreate it"
)
@cli.command()
@click.pass_context
@click.argument('name')
def delete(ctx, name):
click.echo(f"Deleting connection {name}")
client = get_client()
remote_connections = dict((c["name"], c) for c in client.connections.all())
try:
connection_id = remote_connections[name]['id']
except KeyError:
click.echo(f"Error: Connection {name} does not exist (remotely)", err=True)
sys.exit(1)
resp = client.connections.delete(connection_id)
if resp:
click.echo(pprint(resp))
if __name__ == "__main__":
cli()
| true |
d844404166ce936ed8af3828af93f350d0d3bb0c | Python | deepakantony/miscellaneous | /Tutorials/ProgrammingLanguages/Python/PythonCourse/IntroductionToThreads/ThreadModule.py | UTF-8 | 802 | 3.765625 | 4 | [] | no_license | import thread
def gcd(a, b):
""" Calculates gcd of two numbers"""
if b > a: return gcd(b, a)
if b == 0: return a
return gcd(b, a%b)
threadComplete = 0
lock = thread.allocate_lock()
def printgcd(a, b):
""" If you're lucky this will print neatly with all thread calls."""
global threadComplete
g = gcd(a,b)
lock.acquire()
print "GCD of %d and %d is %d" % ( a, b, g)
print "Second print"
threadComplete += 1
lock.release()
def mainForThreads():
thread.start_new_thread(printgcd, (12, 16))
thread.start_new_thread(printgcd, (20, 16))
thread.start_new_thread(printgcd, (8, 16))
while threadComplete != 3:
pass
raw_input("Enter anything to exit...")
if __name__ == "__main__":
mainForThreads()
| true |
dfc2f97191e319c3a084ab371dc1fbbfac137dd9 | Python | Yuvanshankar21/yuvan | /greatest.py | UTF-8 | 284 | 2.984375 | 3 | [] | no_license | n1=int(input(""))
n2=int(input(""))
n3=int(input(""))
if n1<0 and n2<0 and n3<0:
if n1>=n2 and n1>=n3:
print(n1)
elif n2>=n1 and n2>=n3:
print(n2)
else :
print(n3)
elif n1>=n2 and n1>n3:
print(n1)
elif n2>=n1 and n2>=n3:
print(n2)
else:
print(n3)
| true |
4195a5c477a78d5b3d4a1eb47b977dedbba48905 | Python | lilrob33/The-Tech-Academy-Python-Coding-Projects | /dirGui_main.py | UTF-8 | 1,872 | 3.40625 | 3 | [] | no_license | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Python Ver: 3.8.1
#
# Author: Pedro Suarez
#
# Purpose: For this drill, you will need to write a script that
# creates a GUI with a button widget and a text widget.
# Your script will also include a function that when
# it is called will invoke a dialog modal which will
# allow users with the ability to select a folder directory
# from their system. Finally, your script will show the
# user’s selected directory path into the text field.
#
# Tested OS: This code was written and tested to work with Windows 10.
from tkinter import *
import tkinter as tk
from tkinter import filedialog
class ParentWindow(Frame):
def __init__(self, master):
Frame.__init__(self)
def dirSearch():
dir_path=filedialog.askdirectory(parent=root,initialdir="/",title='Pick a directory')
if dir_path:
self.txt_dPath.insert(INSERT,dir_path)
self.master = master
self.master.minsize(600,250)
self.master.maxsize(600,250)
self.master.title("Directory Search")
self.master.configure(bg="#F0F0F0")
self.lbl_dPath = Label(self.master, text='Directory Path:', font=("Helvetica",16), fg='black')
self.lbl_dPath.pack(side=TOP, pady=15)
self.txt_dPath = Text(self.master, height=1.3, font=("Helvetica",14), fg='black', bg='white')
self.txt_dPath.pack(padx=10, pady=20)
self.btn_Search = Button(self.master, text='SEARCH', width=13, height=2, font=("Helvetica",14), fg='black', bg='lightgrey',command=dirSearch)
self.btn_Search.pack(pady=20)
if __name__ == "__main__":
root = Tk()
App = ParentWindow(root)
root.mainloop()
| true |
fbf240384f2815ecb084b7365beab81392b29919 | Python | litiblue/letmescrape | /tests/test_processors.py | UTF-8 | 723 | 2.734375 | 3 | [] | no_license | from scrapy.http import HtmlResponse, Request
from letmescrape.processors import *
def test_extract_price():
test_cases = (
(' $13 ', 13),
(' $13.1 ', 13.1),
(' $13.11 ', 13.11),
(' 13.11 ', None),
)
for text, price in test_cases:
assert extract_price(text) == price
def test_html2text():
assert html2text('<html> hello world! </html>').strip() == 'hello world!'
def test_get_absolute_url():
base_url, relative_url = 'http://www.test.com', 'path/to/resource'
absolute_url = base_url + '/' + relative_url
response = HtmlResponse(base_url, request=Request(base_url))
assert get_absolute_url(relative_url, {'response': response}) == absolute_url | true |
95ed1fe5ecae1316740dcfdd0ebff72042e6987e | Python | shahid1725/PythonDocuments | /FUNCTIONS/prime numbers.py | UTF-8 | 800 | 3.5 | 4 | [] | no_license | #prime numbers-2,3,5,7,11........
#numbers only can devide with same number and 2
# num=int(input("enter a number :"))
# flag=0
# if num>1:
# for i in range(2,num):
# if num%i==0:/
# break
# else:
# flag=1
# if flag==1:
# print("prime number")
# else:
# print("not prime number")
#
# min=int(input("enter a minimum number :"))
# max=int(input("enter a maximum number :"))
# fla=0
# if min>1:
#
# for i in range(min,max):
# if max%i==0:
# break
# else:
# fla=1
#
# min=int(input("enter a minimum number :"))
# max=int(input("enter a maximum number :"))
# for a in range(min,max+1):
# if a>1:
# for i in range(2,a):
# if a%i==0:
# break
# else:
# print(a)
#
| true |
328708139b8c2ca6c89b14e4c4e12e330e5fdc3b | Python | HOZH/leetCode | /leetCodePython2020/138.copy-list-with-random-pointer.py | UTF-8 | 1,101 | 3.234375 | 3 | [] | no_license | #
# @lc app=leetcode id=138 lang=python3
#
# [138] Copy List with Random Pointer
#
# @lc code=start
"""
# Definition for a Node.
class Node:
def __init__(self, x: int, next: 'Node' = None, random: 'Node' = None):
self.val = int(x)
self.next = next
self.random = random
"""
class Solution:
def copyRandomList(self, head: 'Node') -> 'Node':
if not head:
return None
current = head
while current:
old_next = current.next
current.next = Node(current.val, old_next)
current = old_next
current = head
while current:
if current.next:
current.next.random = current.random.next if current.random else None
current = current.next.next
head = head.next
current = head
while current:
if current.next:
old_next = current.next
current.next = old_next.next
current = current.next
else:
break
return head
# @lc code=end
| true |
ca49f1ca562341ac71930dbad40eefad79134154 | Python | hrfrahn/functprog | /Chapter 8 programming projects/ch8_3.py | UTF-8 | 2,112 | 4 | 4 | [] | no_license | # harrison frahn
# period 2
# chapter 8.3
# expected input: a prime and an integer
def isprime(num):
if num%1!=0:
return False
num = abs(num)
# 1 and 0 aren't prime
if num < 2:
return False
# 2 is the only even prime
if num == 2:
return True
# after we're sure n isn't 2, return false if n is even
if num%2==0:
return False
# check all the rest of the nums up to sqrt(n)
for i in range(3, int(num**0.5)+1, 2):
if num % i == 0:
return False
return True
def main():
again = True
try:
while again:
print('This program demonstrates Fermat\'s little theroem.')
p = eval(input("Enter a prime: "))
a = eval(input("enter an integer: "))
if isprime(p) and a%1==0:
mult = a**p
fact = (mult - a)//p
print('a =',a,'\np =',p)
print(a,'^',p,'=',int(mult))
print('(',int(mult),'-',a,') /',p,'=',int(fact))
print(mult-a,'is an integer multiple of',p,'so the theroem works! YAY!')
elif isprime(p)==False and a%1!=0:
raise RuntimeError("You didn't enter a prime number or an integer!")
elif isprime(p)==False:
raise RuntimeError("You didn't enter a prime number!")
elif a%1!=0:
raise RuntimeError("You didn't enter an integer!")
a = input('Demonstrate the theorem again, with different values?(y/n)')
if a[0].lower() != 'y':
again = False
if a[0].lower() != 'y' and a[0].lower() != 'n':
raise RuntimeError("You didn't enter y or n!")
except RuntimeError as err:
print(err.args)
except NameError:
print("You entered letter(s), not a number!")
except TypeError:
print("You entered more than 1 number!")
except SyntaxError:
print("You entered the number wrong!")
except EOFError or KeyboardInterrupt:
print('\n')
except:
print("Something went wrong!")
main()
| true |
f2e1ae4b3884a313db7242d370985f86d511a301 | Python | gndelgad/TopOpt | /Freefem_level_set_conformal_2D/MeshCorrect.py | UTF-8 | 624 | 2.546875 | 3 | [] | no_license | import fileinput
import sys
import re
def replace(file, pattern, subst):
# Read contents from file as a single string
file_handle = open(file, 'rb')
file_string = file_handle.read()
file_handle.close()
# Use RE package to allow for replacement (also allowing for (multiline) REGEX)
file_string = (re.sub(pattern, subst, file_string))
# Write contents to file.
# Using mode 'w' truncates the file.
file_handle = open(file, 'wb')
file_handle.write(file_string)
file_handle.close()
replace("Th.LevelSet.mesh","MeshVersionFormatted 0","MeshVersionFormatted 1") | true |
39c265bd19e3bc8b4947539b970d780a8f1a40e8 | Python | quoimec/Carrot | /Agent/Carrot.py | UTF-8 | 5,316 | 2.953125 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import pymongo
from keras import models as km
from keras import layers as kl
from keras import optimizers as ko
from functools import reduce
class Carrot:
# Model V1
# - Takes 0.0 -> 1.0 input from 7 facial classes, an observation time in seconds and a session count.
def __init__(self):
self.cosmos = pymongo.MongoClient("mongodb://remi-mongo:tZdoiZvlNhzLlFvCfB6vApJfLNvJBDYUPoRKIn0p3ujFPbVfLnlLouQlCDKgHEaJvN5mnkzglBl5e5UjsjjLTg==@remi-mongo.documents.azure.com:10255/?ssl=true&replicaSet=globaldb")
self.database = self.cosmos.carrot.results
self.model = self.build(learning = 0.2)
def build(self, *, learning, weights = None):
input = kl.Input(shape = (15,))
connection = kl.Dense(256, activation = "relu") (input)
connection = kl.Dense(512, activation = "linear") (connection)
connection = kl.Dense(256, activation = "relu") (connection)
output = kl.Dense(2, activation = "softmax") (connection)
model = km.Model(inputs = [input], outputs = [output])
model.compile(optimizer = ko.Adam(lr = learning), loss = "mse")
if weights != None:
model.set_weights(weights)
return model
def format(self, object):
""" Data Formatter
Emoji Agent Input: Vector[15]
-> Session Count: Float, Representing the number of games the player has played
-> Win Ratio: Float, Representing the proportion of the time that the Agent has won
-> Player One: Float, 0.0 for Human, 1.0 for Agent
-> Happy Count: Float, Normalised emotions, between 0.0 and 1.0
-> Neutral Count: Float, Normalised emotions, between 0.0 and 1.0
-> Disgust Count: Float, Normalised emotions, between 0.0 and 1.0
-> Fear Count: Float, Normalised emotions, between 0.0 and 1.0
-> Anger Count: Float, Normalised emotions, between 0.0 and 1.0
-> Surprised Count: Float, Normalised emotions, between 0.0 and 1.0
-> Sad Count: Float, Normalised emotions, between 0.0 and 1.0
-> Game Seconds: Float, The number of seconds that the game was played for before the agent needed to make a decision
-> Player Decision: Int, -1 for Swap, 1 for Keep and 0 if the the agent is player one
-> Emoji One: Int, Encoded value between 0 and 6
-> Emoji Two: Int, Encoded value between 0 and 6
-> Emoji Three: Int, Encoded value between 0 and 6
Emoji Agent Actual: Vector[2]
-> Agent Swap: Int, 1 if True, 0 if False
-> Agent Keep: Int, 1 if True, 0 if False
"""
total = lambda data: reduce(lambda a, b: a + b[1], data.items(), 0.0)
normalise = lambda data: dict(map(lambda a: (a[0], a[1] / total(data)), data.items()))
emojiIndex = lambda a: ["😡", "🤢", "😨", "😀", "😒", "😮", "😐"].index(a)
emotionKeys = ["happyCount", "neutralCount", "disgustCount", "fearCount", "angerCount", "surprisedCount", "sadCount"]
normalisedEmotions = normalise(dict(filter(lambda a: a[0] in emotionKeys, object.items())))
input = [
float(object["sessionCount"]),
(1 + object["humanLost"]) / (object["humanWon"] + object["humanLost"] + 2),
0.0 if object["playerOne"] == "Human" else 1.0,
normalisedEmotions["happyCount"],
normalisedEmotions["neutralCount"],
normalisedEmotions["disgustCount"],
normalisedEmotions["fearCount"],
normalisedEmotions["angerCount"],
normalisedEmotions["surprisedCount"],
normalisedEmotions["sadCount"],
normalisedEmotions["gameSeconds"],
0 if object["playerOne"] == "Agent" else (-1 if object["playerDecision"] == "Swap" else 1),
emojiIndex(object["emojiOne"]),
emojiIndex(object["emojiTwo"]),
emojiIndex(object["emojiThree"])
]
agentSwap = lambda carrot, decision: decision == "Keep" if carrot == "Human" else decision == "Swap"
output = [
int(agentSwap(object["carrotStart"], object["playerDecision"])),
int(not agentSwap(object["carrotStart"], object["playerDecision"]))
]
return input, output
def train(self, epochs = 10):
inputs, outputs = []
for record in self.database.find({}):
input, output = self.format(record)
inputs.append(input)
outputs.append(output)
# inputs, outputs = list(zip(*list(map(lambda a: tuple(self.format(a)), self.database.find({})))))
self.model.fit(x = inputs, y = outputs, batch_size = 1, epochs = epochs, verbose = 2)
self.model.save(filepath = "carrot-model.hdf5")
def test(self):
inputs, outputs = list(zip(*list(map(lambda a: tuple(self.format(a)), self.database.find({})))))
print(inputs[0:2])
print(outputs[0:2])
carrot = Carrot()
# carrot.test()
carrot.train()
| true |
b6f828d3b4e252549cc920f7e281a873f521ee55 | Python | sarareginaff/cashbackapp | /cashback/controllers/helper.py | UTF-8 | 1,446 | 2.640625 | 3 | [] | no_license | import datetime
from flask import request, jsonify
from functools import wraps
from cashback.db import auth_db
from cashback.models import auth_model
def token_required(f):
"""
Create decorator to request token.
:Headers:
- Content-Type: application/json
- Authorization (string): token of user.
:Returns:
- current_user: Current user data
:author: sarareginaff
:creation: Sep/2020
"""
@wraps(f)
def decorated(*args, **kwargs):
token = request.headers.get('Authorization')
if not token:
return jsonify({'message': 'Esta faltando o token'}), 401
else:
not_allowed_tokens = auth_db.get_not_allowed_tokens()
if not_allowed_tokens and token in not_allowed_tokens:
return jsonify({'message': '''Token nao esta habilitado.
Usuario foi deslogado'''}), 401
try:
data = auth_model.decode_token(token)
if data['exp'] < datetime.datetime.now().timestamp():
return jsonify({'message': 'Token expirado'}), 401
else:
current_user = auth_db.get_user_by_cpf(cpf=data['cpf'])
except:
return jsonify({'message': 'Token invalido'}), 401
return f(current_user, *args, **kwargs)
return decorated | true |
2fb8324def1b2b17ce1dc9528bd29716b7f49de2 | Python | BhargavasRamus/AI_ML-assignment_1 | /1_b.py | UTF-8 | 701 | 3.265625 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
# Number of training samples
N = 10
# Generate equispaced floats in the interval [0, 2pi]
x = np.linspace(0, 2*np.pi, N)
# Generate noise
mean = 0
std = 0.05
# Generate some numbers from the sine function
y = np.sin(x)
# Add noise
y += np.random.normal(mean, std, N)
x=x.reshape((-1,1))
# transpose of x
print "enter the highest degree of the poynomial="
m=input()
m= int(m)
# order of polynomial
X=np.full(np.shape(x),1)
for i in range(1,m,1)
X_=x**i
X=np.column_stack((X,X_))
print X
# basis function
W = np.zeros((N,N))
W = np.matmul(np.linalg.pinv(X),y)
print "W="
print W)
Y=np.matmul(X,W)
# y'=xw
plt.plot(x,y,'*',X,Y,'-')
plt.show()
| true |
115d766a50be52494b1edcb3633416ccfe94eeb4 | Python | gibbs212521/Kata | /lib/auxilliary/csv/base_parser.py | UTF-8 | 1,752 | 3.15625 | 3 | [
"MIT"
] | permissive | from os import path
from platform import system as CURRENT_OS
class BaseParser():
''' Base Parser Class. Abstract'''
def __init__(self):
### OS Check for filepath purposes ###
OS_TYPE = CURRENT_OS()
if 'Windows' in OS_TYPE:
self.path_separator = '\\'
self.anti_path_separator = '/'
else: # Linux or Darwin
self.path_separator = '/'
self.anti_path_separator = '\\'
def setPathSeparators(self, filepath):
'''
Reconfigures filepath to OS specific conventions and automatically
runs troubleshooting measures.
Returns corrected strings of both the filepath and directory.
'''
if self.anti_path_separator in filepath:
filepath = filepath.replace(
self.anti_path_separator, self.path_separator
)
path_list = filepath.split(self.path_separator)
path_dir = ''
for seg in path_list[:-1]:
path_dir += seg + self.path_separator
if path.isdir(path_dir):
return [filepath, path_dir]
# Check for landing error #
print(path_list)
if '.' not in path_dir[0] or ':' not in path_dir[0:3]:
LandingChecks = ['.', '.' + self.path_separator]
for LandingCheck in LandingChecks:
if path.isdir(path_dir):
continue
if path.isdir(LandingCheck + path_dir):
path_dir = LandingCheck + path_dir
filepath = path_dir + path_list[-1]
if not path.isdir(path_dir):
raise FileNotFoundError('Could not locate directory %s'% path_dir)
return [filepath, path_dir]
| true |
520b0ff4c9bb56ff11e80c4c6539b4b4690bad53 | Python | woaidapaopao/JData | /trainset.py | UTF-8 | 2,298 | 2.75 | 3 | [] | no_license | # -*- coding:utf-8 -*-
#这个文件不用直接调用,可以在clean中调用
import pandas as pd
import numpy as np
import datetime as dt
def getlabel(label_start,label_end):
#提取标签信息
TRAIN_FILE = 'TrainDataAll.csv'
train = pd.read_csv(TRAIN_FILE)
train['time'] = pd.to_datetime(train['time'])
train = train[(train['time'] >= label_start) & (train['time'] <= label_end)]
train = train[train['type'] == 4]
train = train[['user_id', 'sku_id']]
train.drop_duplicates()
train['label'] = 1
return train
def train_set(train_start1,train_end1,label_start1,label_end1):
train_end = dt.datetime.strptime(train_end1, "%Y-%m-%d")
train_start = dt.datetime.strptime(train_start1, "%Y-%m-%d")#训练数据开始时间
label_start = dt.datetime.strptime(label_start1, "%Y-%m-%d")
label_end = dt.datetime.strptime(label_end1, "%Y-%m-%d")#标签结束时间
userf = pd.read_csv('./feature/user_feature%s_%s.csv' % (train_start, train_end))
prof = pd.read_csv('./feature/product_feature%s_%s.csv' % (train_start, train_end))
uicf = pd.read_csv('./feature/user_product_cate_feature%s_%s.csv' % (train_start, train_end))
trainact = pd.merge(uicf, userf, how='left', on='user_id')
trainact = pd.merge(trainact, prof, how='left', on='sku_id')
label = getlabel(label_start, label_end)
trainact = pd.merge(trainact, label, how='left', on=['user_id', 'sku_id'])#这时的label部分只有1,其余为nan,这个在clean中处理
trainact.to_csv('./uncleanData/traindata%s_%s.csv' % (train_start1, label_end1), index=None)
def test_set(test_start1,test_end1):
test_start = dt.datetime.strptime(test_start1, "%Y-%m-%d")#测试数据开始
test_end = dt.datetime.strptime(test_end1, "%Y-%m-%d")#测试数据结束
userf = pd.read_csv('./feature/user_feature%s_%s.csv' % (test_start, test_end))
prof = pd.read_csv('./feature/product_feature%s_%s.csv' % (test_start, test_end))
uicf = pd.read_csv('./feature/user_product_cate_feature%s_%s.csv' % (test_start, test_end))
testact = pd.merge(uicf, userf, how='left', on='user_id')
testact = pd.merge(testact, prof, how='left', on='sku_id')
testact.to_csv('./uncleanData/testdata%s_%s.csv' % (test_start1, test_end1), index=None)
| true |
027a8444e00deb44a38c3848b19776565b92cbf0 | Python | backwardn/bao | /tests/test_bao.py | UTF-8 | 13,532 | 2.640625 | 3 | [
"CC0-1.0",
"Apache-2.0",
"LicenseRef-scancode-public-domain"
] | permissive | #! /usr/bin/env python3
# Run this file using pytest, either in this folder or at the root of the
# project. Since test_vectors.json is generated from bao.py, it's slightly
# cheating to then test bao.py against its own output. But at least this helps
# us notice changes, since the vectors are checked in rather than generated
# every time. Testing the Rust implementation against the same test vectors
# gives us some confidence that they're correct.
from binascii import hexlify, unhexlify
import io
import json
from pathlib import Path
import subprocess
import tempfile
# Imports from this directory.
import bao
import generate_input
HERE = Path(__file__).parent
BAO_PATH = HERE / "bao.py"
VECTORS_PATH = HERE / "test_vectors.json"
VECTORS = json.load(VECTORS_PATH.open())
# Wrapper functions
# =================
#
# Most of the functions in bao.py (except bao_encode) work with streams. These
# wrappers work with bytes, and return hashes as strings, which makes them
# easier to test.
def bao_hash(content):
return hexlify(bao.bao_hash(io.BytesIO(content))).decode("utf-8")
def blake3(b):
return bao_hash(b)
def bao_encode(content):
# Note that unlike the other functions, this one already takes bytes.
encoded, hash_ = bao.bao_encode(content, outboard=False)
return encoded, hash_.hex()
def bao_encode_outboard(content):
# Note that unlike the other functions, this one already takes bytes.
outboard, hash_ = bao.bao_encode(content, outboard=True)
return outboard, hash_.hex()
def bao_decode(hash, encoded):
hashbytes = unhexlify(hash)
output = io.BytesIO()
bao.bao_decode(io.BytesIO(encoded), output, hashbytes)
return output.getvalue()
def bao_decode_outboard(hash, content, outboard):
hashbytes = unhexlify(hash)
output = io.BytesIO()
bao.bao_decode(io.BytesIO(content),
output,
hashbytes,
outboard_stream=io.BytesIO(outboard))
return output.getvalue()
def bao_slice(encoded, slice_start, slice_len):
output = io.BytesIO()
bao.bao_slice(io.BytesIO(encoded), output, slice_start, slice_len)
return output.getvalue()
def bao_slice_outboard(content, outboard, slice_start, slice_len):
output = io.BytesIO()
bao.bao_slice(io.BytesIO(content),
output,
slice_start,
slice_len,
outboard_stream=io.BytesIO(outboard))
return output.getvalue()
def bao_decode_slice(slice_bytes, hash, slice_start, slice_len):
hashbytes = unhexlify(hash)
output = io.BytesIO()
bao.bao_decode_slice(io.BytesIO(slice_bytes), output, hashbytes,
slice_start, slice_len)
return output.getvalue()
# Tests
# =====
def test_hashes():
for case in VECTORS["hash"]:
input_len = case["input_len"]
input_bytes = generate_input.input_bytes(input_len)
expected_hash = case["bao_hash"]
computed_hash = bao_hash(input_bytes)
assert expected_hash == computed_hash
def bao_cli(*args, input=None, should_fail=False):
output = subprocess.run(
["python3", str(BAO_PATH), *args],
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL if should_fail else None,
input=input,
)
cmd = " ".join(["bao.py"] + list(args))
if should_fail:
assert output.returncode != 0, "`{}` should've failed".format(cmd)
else:
assert output.returncode == 0, "`{}` failed".format(cmd)
return output.stdout
def test_hash_cli():
# CLI tests just use the final (largest) test vector in each set, to avoid
# shelling out hundreds of times. There's no need to exhaustively test the
# implementation via the CLI, because it's tested on its own above.
# Instead, we just need to verify once that it's hooked up properly.
case = VECTORS["hash"][-1]
input_len = case["input_len"]
input_bytes = generate_input.input_bytes(input_len)
expected_hash = case["bao_hash"]
computed_hash = bao_cli("hash", input=input_bytes).decode().strip()
assert expected_hash == computed_hash
def assert_decode_failure(f, *args):
try:
f(*args)
except (AssertionError, IOError):
pass
else:
raise AssertionError("failure expected, but no exception raised")
def test_encoded():
for case in VECTORS["encode"]:
input_len = case["input_len"]
input_bytes = generate_input.input_bytes(input_len)
output_len = case["output_len"]
expected_bao_hash = case["bao_hash"]
encoded_blake3 = case["encoded_blake3"]
corruptions = case["corruptions"]
# First make sure the encoded output is what it's supposed to be.
encoded, hash_ = bao_encode(input_bytes)
assert expected_bao_hash == hash_
assert output_len == len(encoded)
assert encoded_blake3 == blake3(encoded)
# Now test decoding.
output = bao_decode(hash_, encoded)
assert input_bytes == output
# Make sure decoding with the wrong hash fails.
wrong_hash = "0" * len(hash_)
assert_decode_failure(bao_decode, wrong_hash, encoded)
# Make sure each of the corruption points causes decoding to fail.
for c in corruptions:
corrupted = bytearray(encoded)
corrupted[c] ^= 1
assert_decode_failure(bao_decode, hash_, corrupted)
def make_tempfile(b=b""):
f = tempfile.NamedTemporaryFile()
f.write(b)
f.flush()
f.seek(0)
return f
def test_encoded_cli():
case = VECTORS["encode"][-1]
input_len = case["input_len"]
input_bytes = generate_input.input_bytes(input_len)
output_len = case["output_len"]
expected_bao_hash = case["bao_hash"]
encoded_blake3 = case["encoded_blake3"]
# First make sure the encoded output is what it's supposed to be.
input_file = make_tempfile(input_bytes)
encoded_file = make_tempfile()
bao_cli("encode", input_file.name, encoded_file.name)
encoded = encoded_file.read()
assert output_len == len(encoded)
assert encoded_blake3 == blake3(encoded)
# Now test decoding.
output = bao_cli("decode", expected_bao_hash, encoded_file.name)
assert input_bytes == output
# Make sure decoding with the wrong hash fails.
wrong_hash = "0" * len(expected_bao_hash)
bao_cli("decode", wrong_hash, encoded_file.name, should_fail=True)
def test_outboard():
for case in VECTORS["outboard"]:
input_len = case["input_len"]
input_bytes = generate_input.input_bytes(input_len)
output_len = case["output_len"]
expected_bao_hash = case["bao_hash"]
encoded_blake3 = case["encoded_blake3"]
outboard_corruptions = case["outboard_corruptions"]
input_corruptions = case["input_corruptions"]
# First make sure the encoded output is what it's supposed to be.
outboard, hash_ = bao_encode_outboard(input_bytes)
assert expected_bao_hash == hash_
assert output_len == len(outboard)
assert encoded_blake3 == blake3(outboard)
# Now test decoding.
output = bao_decode_outboard(hash_, input_bytes, outboard)
assert input_bytes == output
# Make sure decoding with the wrong hash fails.
wrong_hash = "0" * len(hash_)
assert_decode_failure(bao_decode_outboard, wrong_hash, input_bytes,
outboard)
# Make sure each of the outboard corruption points causes decoding to
# fail.
for c in outboard_corruptions:
corrupted = bytearray(outboard)
corrupted[c] ^= 1
assert_decode_failure(bao_decode_outboard, hash_, input_bytes,
corrupted)
# Make sure each of the input corruption points causes decoding to
# fail.
for c in input_corruptions:
corrupted = bytearray(input_bytes)
corrupted[c] ^= 1
assert_decode_failure(bao_decode_outboard, hash_, corrupted,
outboard)
def test_outboard_cli():
case = VECTORS["outboard"][-1]
input_len = case["input_len"]
input_bytes = generate_input.input_bytes(input_len)
output_len = case["output_len"]
expected_bao_hash = case["bao_hash"]
encoded_blake3 = case["encoded_blake3"]
# First make sure the encoded output is what it's supposed to be.
input_file = make_tempfile(input_bytes)
outboard_file = make_tempfile()
bao_cli("encode", input_file.name, "--outboard", outboard_file.name)
outboard = outboard_file.read()
assert output_len == len(outboard)
assert encoded_blake3 == blake3(outboard)
# Now test decoding.
output = bao_cli("decode", expected_bao_hash, input_file.name,
"--outboard", outboard_file.name)
assert input_bytes == output
# Make sure decoding with the wrong hash fails.
wrong_hash = "0" * len(expected_bao_hash)
output = bao_cli("decode",
wrong_hash,
input_file.name,
"--outboard",
outboard_file.name,
should_fail=True)
def test_slices():
for case in VECTORS["slice"]:
input_len = case["input_len"]
input_bytes = generate_input.input_bytes(input_len)
expected_bao_hash = case["bao_hash"]
slices = case["slices"]
encoded, hash_ = bao_encode(input_bytes)
outboard, hash_outboard = bao_encode_outboard(input_bytes)
assert expected_bao_hash == hash_
assert expected_bao_hash == hash_outboard
for slice_case in slices:
slice_start = slice_case["start"]
slice_len = slice_case["len"]
output_len = slice_case["output_len"]
output_blake3 = slice_case["output_blake3"]
corruptions = slice_case["corruptions"]
# Make sure the slice output is what it should be.
slice_bytes = bao_slice(encoded, slice_start, slice_len)
assert output_len == len(slice_bytes)
assert output_blake3 == blake3(slice_bytes)
# Make sure slicing an outboard tree is the same.
outboard_slice_bytes = bao_slice_outboard(input_bytes, outboard,
slice_start, slice_len)
assert slice_bytes == outboard_slice_bytes
# Test decoding the slice, and compare it to the input. Note that
# slicing a byte array in Python allows indices past the end of the
# array, and sort of silently caps them.
input_slice = input_bytes[slice_start:][:slice_len]
output = bao_decode_slice(slice_bytes, hash_, slice_start,
slice_len)
assert input_slice == output
# Make sure decoding with the wrong hash fails.
wrong_hash = "0" * len(hash_)
assert_decode_failure(bao_decode_slice, slice_bytes, wrong_hash,
slice_start, slice_len)
# Make sure each of the slice corruption points causes decoding to
# fail.
for c in corruptions:
corrupted = bytearray(slice_bytes)
corrupted[c] ^= 1
assert_decode_failure(bao_decode_slice, corrupted, hash_,
slice_start, slice_len)
def test_slices_cli():
case = VECTORS["slice"][-1]
input_len = case["input_len"]
input_bytes = generate_input.input_bytes(input_len)
expected_bao_hash = case["bao_hash"]
slices = case["slices"]
input_file = make_tempfile(input_bytes)
encoded_file = make_tempfile()
bao_cli("encode", input_file.name, encoded_file.name)
outboard_file = make_tempfile()
bao_cli("encode", input_file.name, "--outboard", outboard_file.name)
# Use the first slice in the list. Currently they're all the same length.
slice_case = slices[0]
slice_start = slice_case["start"]
slice_len = slice_case["len"]
output_len = slice_case["output_len"]
output_blake3 = slice_case["output_blake3"]
# Make sure the slice output is what it should be.
slice_bytes = bao_cli("slice", str(slice_start), str(slice_len),
encoded_file.name)
assert output_len == len(slice_bytes)
assert output_blake3 == blake3(slice_bytes)
# Make sure slicing an outboard tree is the same.
outboard_slice_bytes = bao_cli("slice", str(slice_start), str(slice_len),
input_file.name, "--outboard",
outboard_file.name)
assert slice_bytes == outboard_slice_bytes
# Test decoding the slice, and compare it to the input. Note that
# slicing a byte array in Python allows indices past the end of the
# array, and sort of silently caps them.
input_slice = input_bytes[slice_start:][:slice_len]
output = bao_cli("decode-slice",
expected_bao_hash,
str(slice_start),
str(slice_len),
input=slice_bytes)
assert input_slice == output
# Make sure decoding with the wrong hash fails.
wrong_hash = "0" * len(expected_bao_hash)
bao_cli("decode-slice",
wrong_hash,
str(slice_start),
str(slice_len),
input=slice_bytes,
should_fail=True)
| true |
b44592df15c5d513efa1a946e137bfff70168d59 | Python | kpraveen2412/test_task_02a_Saidni | /test_02/test_database_coll_data.py | UTF-8 | 585 | 2.6875 | 3 | [] | no_license | #retriving test_database collection data
from pymongo import MongoClient
client=MongoClient()
mydb=client['test_database']
coll_list=mydb.list_collection_names()
print('Collections available in test_database are: ', coll_list)
api_users=mydb['api_users']
client_user=mydb['client_user']
roles=mydb['roles']
permissions=mydb['permissions']
routes= mydb['routes']
cost_centers=mydb['cost_centers']
#filename is collection name in mongodb
def test_db_data(file_name):
coll_data=[i for i in file_name.find()]
return coll_data
test_db_data(api_users) | true |
08f80b181ae2a54e653ea9decbca61cbd2575ba3 | Python | SunnyMarkLiu/LeetCode | /601-700/686. Repeated String Match.py | UTF-8 | 706 | 3.34375 | 3 | [] | no_license | #!/home/sunnymarkliu/software/miniconda2/bin/python
# _*_ coding: utf-8 _*_
"""
@author: MarkLiu
@time : 17-11-28 下午9:20
"""
class Solution(object):
def repeatedStringMatch(self, A, B):
"""
:type A: str
:type B: str
:rtype: int
"""
# 粗粒度判断重复次数为 B 的长度除以 A 的长度向上取整
times = -(-len(B) // len(A)) # // 操作实现向下取整, 利用负数相除的技巧实现 ceil
# 细粒度判断 times 还是 times + 1
for i in range(2):
if B in (A * (times + i)):
return times + i
return -1
print Solution().repeatedStringMatch('abababaaba', 'aabaaba')
| true |
95654fc1ad9209897454369ad9cfef2110c24bd4 | Python | SuperVoice-OAKLAND/superVoice | /silence_remove.py | UTF-8 | 4,368 | 2.828125 | 3 | [] | no_license | #! /usr/bin/env python
# encoding: utf-8
###########################################################################
# This vad will be applied to low sample rate data: 16kHz and high sample rate data: 192kHz
###########################################################################
import numpy
import scipy.io.wavfile as wf
import sys
import glob
import pathlib
class VoiceActivityDetection:
def __init__(self):
self.__step = 400
self.__buffer_size = 400
self.__buffer = numpy.array([], dtype=numpy.int16)
self.__out_buffer = numpy.array([], dtype=numpy.int16)
self.__hstep = 4800 # designed for high frequency data
self.__hbuffer_size = 4800
self.__hbuffer = numpy.array([], dtype=numpy.int16)
self.__hout_buffer = numpy.array([], dtype=numpy.int16)
self.__n = 0
self.__VADthd = 0.
self.__VADn = 0.
self.__silence_counter = 0
self.__iframe = 0
# Voice Activity Detection
# Adaptive threshold
def vad(self, _frame):
frame = numpy.array(_frame) ** 2.
# frame = numpy.array(_frame)
result = True
threshold = 0.25
thd = numpy.min(frame) + numpy.ptp(frame) * threshold
self.__VADthd = (self.__VADn * self.__VADthd + thd) / float(self.__VADn + 1.)
self.__VADn += 1.
# print("Mean is {}\n thd is {}".format(numpy.mean(frame), self.__VADthd))
if numpy.mean(frame) <= self.__VADthd:
self.__silence_counter += 1
else:
self.__silence_counter = 0
if self.__silence_counter > 25:
result = False
return result
# Push new audio samples into the buffer.
def add_samples(self, data, hdata):
self.__buffer = numpy.append(self.__buffer, data)
self.__hbuffer = numpy.append(self.__hbuffer, hdata)
result = len(self.__buffer) >= self.__buffer_size
# print('__buffer size %i'%self.__buffer.size)
return result
# Pull a portion of the buffer to process
# (pulled samples are deleted after being
# processed
def get_frame(self):
window = self.__buffer[:self.__buffer_size]
self.__buffer = self.__buffer[self.__step:]
self.__iframe = self.__iframe + 1
# print('__buffer size %i'%self.__buffer.size)
hwindow = self.__hbuffer[:self.__hbuffer_size]
self.__hbuffer = self.__hbuffer[self.__hstep:]
# self.__iframe = self.__iframe + 1
return window, hwindow
# Adds new audio samples to the internal
# buffer and process them
def process(self, data, hdata):
if self.add_samples(data, hdata):
while len(self.__buffer) >= self.__buffer_size:
# Framing
window, hwindow = self.get_frame()
# print('window size %i'%window.size)
result = self.vad(window)
if result:
self.__out_buffer = numpy.append(self.__out_buffer, window)
self.__hout_buffer = numpy.append(self.__hout_buffer, hwindow)
# remove high frequency data only based on low frequency results
# else:
# print("current frame is {}".format(self.__iframe))
# if self.vad(window): # speech frame
# self.__out_buffer = numpy.append(self.__out_buffer, window)
# print('__out_buffer size %i'%self.__out_buffer.size)
def get_voice_samples(self):
return self.__out_buffer, self.__hout_buffer
if __name__ == '__main__':
low_files = glob.glob("../oakland-dataset/lowfrequency/*/*.wav")
high_files = glob.glob("../oakland-dataset/dataset_1/*/*.wav")
print(low_files)
print(high_files)
i=0
for low_file, high_file in zip(low_files, high_files):
wav = wf.read(low_file)
hwav = wf.read(high_file)
sr = wav[0]
c0 = wav[1]
hsr = hwav[0]
hc0 = hwav[1]
vad = VoiceActivityDetection()
vad.process(c0, hc0)
voice_samples, hvoice_samples = vad.get_voice_samples()
outfile = low_file[:-4] + "cut.wav"
wf.write(outfile, sr, voice_samples)
houtfile = high_file[:-4] + "highcut.wav"
wf.write(houtfile, hsr, hvoice_samples)
i = i + 1
| true |
7f1da96805ab9d23476a2625f06bf0314faa66e2 | Python | ujjwal-raizada/A-Star-on-maps | /main-using-database.py | UTF-8 | 1,957 | 2.703125 | 3 | [] | no_license | import requests
import pprint
import env
import json
import pickle
import math
import flexpolyline as fp
from gmplot import gmplot
START_LAT = 17.5449
START_LNG = 78.5718
DEST_LAT = 17.2403
DEST_LNG = 78.4294
VIA_LAT = 17.4033
VIA_LNG = 78.4707
INFINITY = 10000000
with open("distance_matrix_db", "rb+") as file:
distance_matrix = pickle.load(file)
with open("graph_db", "rb+") as file:
graph = pickle.load(file)
with open("gmap_db", "rb+") as file:
gmap = pickle.load(file)
def h_dist_dest(a):
if distance_matrix.get(a) != None:
return distance_matrix[a]
return INFINITY
def distance(a, b):
p1, p2 = a
p3, p4 = b
return math.sqrt(((p3 - p1) ** 2) + ((p4 - p2) ** 2))
# a star implementation
open_list = []
closed_list = []
g = {}
g[(START_LAT, START_LNG)] = 0
parent = {}
open_list.append((0, (START_LAT, START_LNG)))
while len(open_list) > 0:
node = min(open_list)
print(node)
open_list.remove(node)
closed_list.append(node)
node = node[1]
gmap.marker(node[0], node[1], 'brown')
if (node == (DEST_LAT, DEST_LNG)):
print("found")
break
for child in graph[node]:
print("parent: {}, child: {}".format(node, child))
g_temp = g[node] + distance(node, child)
gv = INFINITY
if g.get(child) != None:
gv = g[child]
if gv > g_temp:
parent[child] = node
g[child] = g_temp
# f = g[child] + h_dist_dest(child)
f = g[child] + distance(child, (DEST_LAT, DEST_LNG))
if child not in closed_list:
open_list.append((f, child))
pprint.pprint(parent)
path = []
temp_node = (DEST_LAT, DEST_LNG)
while temp_node != (START_LAT, START_LNG):
path.append(temp_node)
temp_node = parent[temp_node]
lat_list2, lng_list2 = zip(*path)
gmap.plot(lat_list2, lng_list2, 'darkgreen', edge_width=10)
gmap.draw("my_map.html") | true |
923fce2a3c1e7b43d145d0bafc6c2dc9f4b43e82 | Python | jiangxiaosheng/Pic2Pic | /cv/feature/utils/distance.py | UTF-8 | 724 | 3.03125 | 3 | [] | no_license | import numpy as np
from scipy.spatial.distance import mahalanobis, cityblock, euclidean, chebyshev, cosine, correlation
def distance(v1, v2, method):
if method == 'cityblock':
return cityblock(v1, v2)
elif method == 'euclidean':
return euclidean(v1, v2)
elif method == 'cosine':
return cosine(v1, v2)
elif method == 'chebyshev':
return chebyshev(v1, v2)
elif method == 'correlation':
return correlation(v1, v2)
else:
return None
def distance_mahala(v1, v2, samples):
v = np.vstack(samples)
S = np.cov(v)
SI = np.linalg.inv(S)
try:
return mahalanobis(v1, v2, SI)
except:
return distance(v1, v2, method='cosine')
| true |
0dff0ca390b7dbe5623452539dd8c3e6cb55306b | Python | ianoti/Andela_Camp | /fizz_buzz.py | UTF-8 | 253 | 3.75 | 4 | [] | no_license | def fizz_buzz(arg):
while type(arg) == int:
if arg % 3 == 0 and arg % 5 == 0:
return "FizzBuzz"
elif arg % 3 == 0:
return "Fizz"
elif arg % 5 == 0:
return "Buzz"
else:
return arg
else:
return "please give only integers as input"
| true |
4fefeb4b05fb3e43802e749282fc72b5329ee20b | Python | chujiang/DAOCloud_DEMO | /application.py | UTF-8 | 678 | 2.875 | 3 | [] | no_license | import os
import flask
import redis
application = flask.Flask(__name__)
application.debug = True
@application.route('/')
def hello_world():
storage = Storage()
storage.populate()
score = storage.score()
return "Hello world, %s!" % score
class Storage():
def __init__(self):
self.redis = redis.Redis(
host=os.getenv('REDIS_PORT_6379_TCP_ADDR', 'localhost'),
port=int(os.getenv('REDIS_PORT_6379_TCP_PORT', '6379')),
password=os.getenv('REDIS_PASSWORD', ''))
def populate(self):
self.redis.set('score', '1234')
def score(self):
return self.redis.get('score')
if __name__ == "__main__":
application.run(host='0.0.0.0', port=3000)
| true |
c5b75c708242a2aebea73c1a79eaa78bcdb11642 | Python | Lukazovic/Learning-Python | /CursoEmVideo/exercicio050 - impares digitados.py | UTF-8 | 345 | 4.1875 | 4 | [] | no_license | # Soma dos ímpares digitados pelo usuário
print('\n---Soma dos números ímpares---')
somaImpares = 0
for i in range(0, 6):
numeroDigitado = int(input('Digite um número inteiro: '))
if numeroDigitado % 2 != 0:
somaImpares += numeroDigitado
print('\nA soma dos números ímpares digitador por você deu: {} \n' .format(somaImpares)) | true |
b196618a83786684605b9ada9f4061eb1646dcc5 | Python | mo-mo-666/AtCoder | /legacy/KEYENCE2019/A1.py | UTF-8 | 125 | 3.125 | 3 | [
"MIT"
] | permissive | n = list(map(int, input().split()))
ans = 'NO'
if 1 in n and 9 in n and 7 in n and 4 in n:
ans = 'YES'
print(ans) | true |
bdcfe1fc61e298260e21bc36f157118a6c5865ac | Python | pks3kor/For_GitHub | /Learn_Python/Python_Training/Batch-3/01_Apr_2018/test4.py | UTF-8 | 150 | 3.171875 | 3 | [] | no_license | #~ print [x for x in range(50) if x%2==0 and x >22 and x<44]
print [x*x for x in range(50)]
#~ a = range(10)
#~ print [a[x]*a[x] for x in range(10) ]
| true |
59b8eebaa904d5c12ff37205a58a82e2a4c3d0d1 | Python | MobProgramming/MobTimer.Python | /Infrastructure/TipsManager.py | UTF-8 | 984 | 3 | 3 | [
"MIT"
] | permissive | import os, random
import sys
from Infrastructure.FileUtilities import FileUtilities
from Infrastructure.PathUtility import PathUtility
class TipsManager(object):
def __init__(self, seed=None, root_directory=sys.argv[0]):
self.file_utility = FileUtilities()
self.root_directory = self.file_utility.go_up_dirs(root_directory, 1)
if seed is not None:
random.seed(seed)
def get_random_tip(self):
tips_folder = self.root_directory + "/Tips"
random_file = random.choice(os.listdir("%s" % tips_folder))
random_file_path = tips_folder + "\\" + random_file
return "{}: {}" .format(random_file, TipsManager.random_line(random_file_path))
@staticmethod
def random_line(file_name):
with open(file_name) as a_file:
line = next(a_file)
for num, aline in enumerate(a_file):
if random.randrange(num + 2): continue
line = aline
return line | true |
763904588326a731bfe8cbc8ea8ac747cd5b226b | Python | kai-anderson/GamestonkTerminal | /gamestonk_terminal/forex/av_view.py | UTF-8 | 1,970 | 2.796875 | 3 | [
"MIT"
] | permissive | """AlphaVantage Forex View"""
__docformat__ = "numpy"
import pandas as pd
from tabulate import tabulate
import mplfinance as mpf
import matplotlib.pyplot as plt
from gamestonk_terminal.forex import av_model
from gamestonk_terminal import feature_flags as gtff
from gamestonk_terminal.helper_funcs import plot_autoscale
def display_quote(to_symbol: str, from_symbol: str):
"""Display current forex pair exchange rate
Parameters
----------
to_symbol : str
To symbol
from_symbol : str
From forex symbol
"""
quote = av_model.get_quote(to_symbol, from_symbol)
if not quote:
print("Quote not pulled from AlphaVantage. Check API key.")
return
df = pd.DataFrame.from_dict(quote)
df.index = df.index.to_series().apply(lambda x: x[3:]).values
df = df.iloc[[0, 2, 5, 4, 7, 8]]
if gtff.USE_TABULATE_DF:
print(tabulate(df, tablefmt="fancy_grid"))
else:
print(df.to_string())
print("")
def display_candle(data: pd.DataFrame, to_symbol: str, from_symbol: str):
"""Show candle plot for fx data
Parameters
----------
data : pd.DataFrame
Loaded fx historical data
to_symbol : str
To forex symbol
from_symbol : str
From forex symbol
"""
mc = mpf.make_marketcolors(
up="green",
down="red",
edge="black",
wick="black",
volume="in",
ohlc="i",
)
s = mpf.make_mpf_style(marketcolors=mc, gridstyle=":", y_on_right=True)
mpf.plot(
data,
type="candle",
mav=(20, 50),
volume=False,
title=f"\n{to_symbol}/{from_symbol}",
xrotation=10,
style=s,
figratio=(10, 7),
figscale=1.10,
figsize=(plot_autoscale()),
update_width_config=dict(
candle_linewidth=1.0,
candle_width=0.8,
),
)
if gtff.USE_ION:
plt.ion()
plt.show()
print("")
| true |
319f7b2578042fbd1ea15353e238323d8d4fda85 | Python | Aka-Ikenga/Word-Cookies | /Cookies.py | UTF-8 | 753 | 3.59375 | 4 | [] | no_license | import itertools as it
import pprint as pp
def word_cookies(cookie):
with open('Common English Words.txt', 'r') as fh:
fh = fh.readlines()
valid_words = {i.strip() for i in fh} # a set containing all the words
cookies = {}
for i in range(3, len(cookie)+1):
# permutation produces invalid words, an intersection with the set of valid words will return
# the valid words produced by the permutation operation
words = {''.join(i) for i in it.permutations(cookie, i)} & valid_words
# there might be no valid words and permutation will return an empty set.
# no need to add them to the dictionary of cookies
if words != set():
cookies[f'{i} Letter Words'] = words
pp.pprint(cookies, indent=2)
word_cookies('Excellent')
| true |
525d1d5e4b4e89f8f061b920228d51ea3b59678d | Python | gfcarvalho2304/PythonExercicios | /JogoForca/ForcaPoo/Adicionar.py | UTF-8 | 3,004 | 3.6875 | 4 | [
"MIT"
] | permissive | from formata import * #importa pacote de formatação
from cadastro import * #importa o pacote de cadastro das palavras
############################################
# CLASSE CADASTRO DE PALAVRAS #
############################################
class Cadastrar:
def __init__(self):
############################################
# VARIÁVEIS DAS CLASSE #
############################################
palavras = 'palavras.txt' #Arquivo das palavras do jogo
dicionario = 'dicionario.txt'
palavrasValidas = []
listaPalavras = [] # Inicializa lista que vai receber as palavras cadastradas
############################################################
# ABRE O ARQUIVO TEXTO E CARREGA AS PALAVRAS EM UMA LISTA #
############################################################
if not arquivoExiste(palavras): # se o arquivo texto não existir ele é criado
criarArquivo(palavras)
# a = open(palavras, 'rt')
with open(palavras, 'rt') as a:
for l in a:
listaPalavras.append(l.replace('\n', ''))
############################################################
# ABRE O ARQUIVO DO DICIONÁRIO DE PALAVRAS VÁLIDAS #
############################################################
# d = open(dicionario, 'rt', encoding="UTF-8") #necessário parâmetro encoding para reconhecer as palavras do arquivo
with open(dicionario, 'rt', encoding="UTF-8") as d:
for x in d:
palavrasValidas.append(x.replace('\n', '').upper())
while True:
palavra = str(leia('Digite a palavra que gostaria de cadastrar:'))
if palavra.upper() in listaPalavras: # verifica se a palavra já existe na lista
erro('ERRO! A palavra digitada já existe na lista de plavras.')
elif palavra.upper() not in palavrasValidas:
erro('ERRO! Palavra inválida!')
elif palavra.isalpha() and len(palavra) > 1: # verifica se a palavra é do tipo alpha
# (não numerico ou chars especiais)
cadastrarPalavra(palavras, palavra.replace) # armazena palavra no arquivo texto
listaPalavras.append(palavra.upper()) # adiciona a palavra na lista gerada no início do programa
continuar = str(leia('Deseja continuar cadastrando?')) # verifica se o usuário deseja continuar
while continuar not in 'nNsS': # certifica que apenas s ou n será digitado
erro('ERRO! Digite apenas S ou N')
continuar = str(leia('Deseja continuar cadastrando?'))
if continuar in 'nN':
break
else:
erro('ERRO! Cadastre apenas palavras válidas!')
titulo('Bem vindo ao jogo de Forca!') | true |
74d5ae74fb9dae2be5899469b4756cfebc9f5db0 | Python | FilipLe/DailyInterviewPro-Unsolved | /Convert to Base Two (SOLVED)/ConvertToBaseTwo.py | UTF-8 | 280 | 3.671875 | 4 | [] | no_license | def base_2(n):
# Fill this in.
binary = ""
while n > 0:
remainder = n % 2
n = (n - remainder)/2
if remainder == 0:
binary = "0" + binary
else:
binary = "1" + binary
return binary
print(base_2(123))
# 1111011 | true |
ce7ebff6d17632682924f49472640883067b87e7 | Python | genzj/goTApaper | /contrib/gen_iconset.py | UTF-8 | 2,566 | 2.6875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/python
# coding: utf-8
# credit: https://retifrav.github.io/blog/2018/10/09/macos-convert-png-to-icns/
# Given proper (512x512x300 dpi/1024x1024x300 dpi) .png image, generate
# iconset folder and .icns icon.
# Ported Python 3 specific code to Python 2.7.10 for Apple Python support
# VikingOSX, 2019-04-04, Apple Support Communities, No Warranty at all.
import subprocess
import os
import sys
class IconParameters():
width = 0
scale = 1
def __init__(self, width, scale, ext):
self.width = width
self.scale = scale
self.ext = ext
def getIconName(self):
if self.scale != 1:
return "icon_{}x{}{}".format(self.width, self.width, self.ext)
else:
return "icon_{}x{}@2x{}".format(self.width // 2, self.width // 2,
self.ext)
def main():
if len(sys.argv) < 2:
print("No path to original / hi-res icon provided")
raise SystemExit
if len(sys.argv) > 2:
print("Too many arguments")
raise SystemExit
originalPicture = sys.argv[1]
if not (os.path.isfile(originalPicture)):
print("There is no such file: {}\n".format(sys.argv[1]))
raise SystemExit
fname, ext = os.path.splitext(originalPicture)
destDir = os.path.dirname(originalPicture)
iconsetDir = os.path.join(destDir, "{}.iconset".format(fname))
if not (os.path.exists(iconsetDir)):
os.mkdir(iconsetDir, 0o0755)
ListOfIconParameters = [
IconParameters(16, 1, ext),
IconParameters(16, 2, ext),
IconParameters(32, 1, ext),
IconParameters(32, 2, ext),
IconParameters(64, 1, ext),
IconParameters(64, 2, ext),
IconParameters(128, 1, ext),
IconParameters(128, 2, ext),
IconParameters(256, 1, ext),
IconParameters(256, 2, ext),
IconParameters(512, 1, ext),
IconParameters(512, 2, ext),
IconParameters(1024, 1, ext),
IconParameters(1024, 2, ext)
]
# generate iconset
for ip in ListOfIconParameters:
subprocess.call(["sips", "-z", str(ip.width), str(ip.width),
originalPicture, "--out",
os.path.join(iconsetDir, ip.getIconName())])
# print("Generated: {}\n".format(ip.getIconName()))
# convert iconset to icns file
subprocess.call(["iconutil", "-c", "icns", iconsetDir, "-o",
os.path.join(destDir, "{}.icns".format(fname))])
if __name__ == '__main__':
sys.exit(main())
| true |
1d3b22855a982a336fd933040ce9afa25165ad07 | Python | Humility-K/Rosalind | /040_MMCH/040_MMCH.py | UTF-8 | 994 | 2.984375 | 3 | [] | no_license | '''
My solution to Rosalind Bioinformatics Problem 040
Title: Maximum Matchings and RNA Secondary Structures
Rosalind ID: MMCH
Rosalind #: 040
URL: http://rosalind.info/problems/mmch
Goal to return the number of possible maximum matchings of
basepair edges in a given rna sequence.
'''
from Bio import SeqIO
from math import factorial
f = open("/Rosalind/data/rosalind_mmch.txt", 'r')
raw = SeqIO.read(f, "fasta")
f.close()
seq = str(raw.seq)
# also in my combination functions script in 'scripts'
def nPr(n, r):
return factorial(n)/factorial(n-r)
AU = []
for nt in 'AU':
AU.append(seq.count(nt))
GC = []
for nt in 'GC':
GC.append(seq.count(nt))
''' Since we don't care about overlapping edges, we can just
look at the number of permutations. They can only be
a length of the minimum between complementary base pairs.
'''
num_matches = nPr(max(AU), min(AU))*nPr(max(GC), min(GC))
o = open("/Rosalind/output/040_MMCH.txt", 'w')
o.write(str(num_matches))
o.close()
| true |
56be9032535118c2cf9ed39e8f1ea5539b03d61e | Python | WildStriker/advent_of_code_2019 | /advent_of_code/day_14/commands.py | UTF-8 | 260 | 2.515625 | 3 | [] | no_license | """day 14 group set up"""
import click
from day_14.part_01 import part_01
from day_14.part_02 import part_02
@click.group()
def day_14():
"""Day 14: Space Stoichiometry"""
# add individual parts
day_14.add_command(part_01)
day_14.add_command(part_02)
| true |
919e0fcd2eec8ad52f8b5ce2ae7c81e9aea63ed5 | Python | Dyrnwyn/3dFutureProgramm | /count_cut_element/gui.py | UTF-8 | 3,340 | 2.546875 | 3 | [] | no_license | import sys
from PyQt5.QtWidgets import (QApplication, QWidget, QPushButton, QFileDialog,
QLineEdit, QGridLayout, QProgressBar, QMessageBox,
QGroupBox, QMenuBar)
import os
import combinator
class guiInterface(QWidget):
"""docstring for guiInterface"""
def __init__(self):
super().__init__()
self.initUi()
def initUi(self):
self.setGeometry(0, 0, 400, 100)
self.setWindowTitle("Вырезка элемента")
self.move(650, 490)
self.grpboxPath = QGroupBox("Выберите объект", self)
self.grpboxObjectName = QGroupBox("Cюда не смотрим, вообще, просто тыкаем на кнопки", self)
self.btn = QPushButton('Начать', self)
self.rbtn = QPushButton('Обзор', self)
self.qbtn = QPushButton('Выход', self)
self.le = QLineEdit(self)
self.leObjectName = QLineEdit(self)
self.prgBar = QProgressBar(self)
self.prgBar.setMaximum(100)
self.rbtn.clicked.connect(self.getPath)
self.qbtn.clicked.connect(self.close)
self.btn.clicked.connect(self.generatePreview)
self.le.textChanged.connect(self.getObjectName)
self.msgBox = QMessageBox(self)
gridPath = QGridLayout()
gridPath.addWidget(self.le, 1, 0)
gridPath.addWidget(self.rbtn, 1, 1)
self.grpboxPath.setLayout(gridPath)
gridObjectName = QGridLayout()
gridObjectName.addWidget(self.leObjectName, 2, 0)
self.grpboxObjectName.setLayout(gridObjectName)
grid = QGridLayout()
grid.setSpacing(10)
grid.addWidget(self.prgBar, 2, 0, 1, 2)
grid.addWidget(self.grpboxPath, 3, 0, 1, 2)
grid.addWidget(self.grpboxObjectName, 4, 0, 1, 2)
grid.addWidget(self.btn, 7, 0)
grid.addWidget(self.qbtn, 7, 1)
self.setLayout(grid)
self.show()
def setTextMsg(self):
self.msgBox.setText("Количество шаблонов с вырезкой: " + str(combinator.count))
combinator.count = 0
def getPath(self):
path = QFileDialog.getExistingDirectory()
self.le.setText(path)
self.leObjectName.setText(path.split("/")[-1])
def getObjectName(self):
path = self.le.text()
self.leObjectName.setText(path.split("\\")[-1])
def generatePreview(self):
folder = self.le.text()
os.chdir(folder)
#combinator.createCupDir()
self.prgBar.setValue(5)
psdFl = combinator.searchFl("psd", folder)
self.prgBar.setValue(10)
count = combinator.findCutTemplate(psdFl)
self.setTextMsg()
self.prgBar.setValue(15)
# combinator.convertPsd(cupFl, combinator.cup_dir)
# self.prgBar.setValue(60)
# pngCupFl = combinator.searchFl("png", combinator.cup_dir)
# self.prgBar.setValue(70)
# dictOfClass = combinator.createDictClass(pngCupFl)
# self.prgBar.setValue(80)
# combinator.generatePngForPrint(dictOfClass, self.leObjectName.text())
# self.prgBar.setValue(95)
# combinator.delOldPng(pngCupFl)
self.prgBar.setValue(100)
self.msgBox.exec_()
| true |
cff11302f34e82f919f749f5fc63040640670035 | Python | jaycody/mypy | /miscpy/math_prac.py | UTF-8 | 3,501 | 3.578125 | 4 | [] | no_license | #!/usr/bin/env python -tt
"""jstephens - pyfu - 2014 july
Variety of number manipulations
"""
import sys
import math
# my home made module
import file_reader
def create_fileRef(filename):
"""Create and return a file object
"""
fileRef = open(filename)
return fileRef
def evens(fileRef):
"""Create a list of even numbers from 0 - n.
fileRef: file object passed from main()
"""
# Make a list where each element is a single line from file object
allLines = fileRef.readlines()
# extract the number n from each line of the text
for line in allLines:
# strip white space
n = line.rstrip('\n')
### what's the difference between foo.rstrip() and foo.strip()??"
#print type(n)
n = int(n)
# only print the evens
if n % 2 == 0 or n == 0:
print n
fileRef.close()
def odds():
"""Use a list of numbers from an external file to create a list of odd nums
"""
fileRef = file_reader.get_fileRef('numbers.txt')
allLines = fileRef.readlines()
for line in allLines:
n = line.rstrip('\n')
n = int(n)
if n % 2 == 1:
print n
fileRef.close()
def generator(maxNum):
"""Prints a list of numbers of specified length
"""
#fileRef = open('morenums.txt', 'r+')
n = 0
while True:
print n
n = n +1
if n > maxNum:
break
def isPrime(n):
"""Determine if n is prime
"""
'''
# Hhahahahah === the wrong way
for n in range(maxNum):
for divisor in range(n):
if divisor != 0:
if divisor != 1:
if divisor != n:
if n % divisor == 0:
print "%d is prime " % (n)
#print '%d is not prime. Dividable by %d' % (n, divisor)
break
else:
print '%d is not prime. Dividable by %d' % (n, divisor)
'''
if n < 2:
print "%d is not prime" % n
return False
possibleDivisor = 2
# One number of the 2 multiples needs to be less than the square root
#while possibleDivisor <= math.sqrt(n)
#isPrime = True
for i in range(possibleDivisor):
if n % possibleDivisor == 0:
print "%d is not prime. Divisible by %d" % (n, possibleDivisor)
return False
possibleDivisor += 1
print "%d is prime." % n
return True
#fileRef = file_reader.get_fileRef('numbers.txt')
# Define Prime: divisible by 1 and by itself only.
#for i in range(100):
def recursive_isPrime(n, possibleDivisor):
"""Determine if a given number is prime using recursive method
[ ] Start this one from scratch after a full nights rest...........
"""
if n < 2:
print "%d is not prime." % n
return False
if possibleDivisor == n:
print "%d is prime." % n
return True
if n % possibleDivisor == 0:
print "%d is not prime." % n
return False
if possibleDivisor <= math.sqrt(n):
print "%d is prime." % n
return True
return recursive_isPrime(n, possibleDivisor + 1)
def main():
"""Handle command line args.
"""
lenInput = len(sys.argv)
if lenInput > 4:
print "usage: ./math_exercises.py [--function name] [--input filename | NULL]"
sys.exit(1)
function = sys.argv[1]
filename = sys.argv[2]
# Create a file object from specified file containing list of numbers
fileRef = create_fileRef('numbers.txt')
if function == '--evens':
evens(fileRef)
if function == '--odds':
odds()
if function == '--generator':
generator(int(sys.argv[2]))
if function == '--isPrime':
isPrime(int(sys.argv[2]))
if function == '--recursive_isPrime':
recursive_isPrime(int(sys.argv[2]), int(sys.argv[3]))
else:
print "unknown function: " + function
sys.exit(1)
if __name__ == '__main__':
main() | true |
a066ba3c08d259dc992e0042ac86e78aa2e0f6c5 | Python | woodychang0611/PortfolioDRL | /Model.py | UTF-8 | 7,599 | 2.75 | 3 | [] | no_license | import pandas as pd
import os
import numpy as np
import math
import datetime
import gym
import csv
import matplotlib.pyplot as plt
import logging
from random import randint
def next_year(year,month):
year,month = (year+1,1) if (month ==12) else (year,month+1)
return (year,month)
def acc_return(profits):
return (1+profits).cumprod()-1
def maxdrawdown(x,display=False):
e = np.argmax(np.maximum.accumulate(x) - x) # end of the period
s = e if (e==0) else np.argmax(x[:e])
if (display):
plt.plot(x)
plt.plot([e, s], [x[e], x[s]], 'o', color='Red', markersize=10)
plt.show()
return (x[e]-x[s])/(1+x[s])
def get_cagr(profits):
return math.pow(1+acc_return(profits)[-1],12.0/len(profits))-1
def get_score(cagr,mdd):
#use double sigmoid
b= 1+1/(1+np.exp(-(mdd-0.15)*200))+1/(1+np.exp(-(mdd-0.2)*200))
return 100*cagr/b
def portfolios_to_csv(portfolios,start_year,start_month,file):
ids,data = np.unique([item[0] for sublist in portfolios for item in sublist]),{}
data['DAYM']=[]
year,month = start_year,start_month
for id in ids:
data[id]=[]
for portfolio in portfolios:
data[id].append(next(iter([i[1] for i in portfolio if i[0]==id]),'NA'))
for portfolio in portfolios:
data['DAYM'].append(datetime.datetime(year,month,1))
year,month = next_year(year,month)
pd.DataFrame(data=data).to_csv(file,index=False)
class FundData:
def __init__(self,data_src: str):
self.data = None
parse_dates = ['DATAYM']
self.data=pd.read_csv(data_src,parse_dates=parse_dates).groupby(['ISINCODE','DATAYM'])
def get_risk_types(self):
return self.data['RISKTYPENAME'].unique()
def get_perofrmence_id(self,risk_type=None):
src = self.data[self.data['RISKTYPENAME']==risk_type] if (risk_type!=None) else self.data
return src['ISINCODE'].unique()
def fund_return(self,isin_code,date):
try:
#ret = self.data[(self.data.ISINCODE==isin_code)& (self.data.DATAYM ==date)]['RET1M'].values
data = self.data.get_group((isin_code,date))
ret = data['RET1M'].values
#return 0 for non-existing data
return ret[0] if(len(ret)==1 and not np.isnan(ret[0])) else 0
except:
return 0
def portfolios_return(self,portfolios,start_year,start_month):
profits = []
year,month = start_year,start_month
for portfolio in portfolios:
profit =0
for id, weight in portfolio:
ret = self.fund_return(id,datetime.datetime(year,month,1))
profit += weight*ret
year,month = next_year(year,month)
profits.append(profit)
profits=np.array(profits)
transfer_count=0
cagr = get_cagr(profits)
mdd = maxdrawdown(acc_return(profits))
return cagr,mdd,transfer_count
class Market_Env():
def __init__ (self,feature_src,fund_map_src,fund_return_src,
equity_limit=0.75,episode_limit=30,validation=False):
self.fund_data = FundData(fund_return_src)
self.feature_data = pd.read_csv(feature_src,parse_dates=['Date'])
self.fund_map = pd.read_csv(fund_map_src)
self.funds = self.fund_map['ISINCODE'].values
self.state_dim =self.feature_data.shape[1]-1 #skip Date
self.max_action = 1
self.action_dim = len(self.funds)+1 #one more action for risk off
self.equity_limit=equity_limit
self.episode_limit=episode_limit
logging.info(f'model par: state:{self.state_dim} action:{self.action_dim}')
@property
def state(self):
date=datetime.datetime(self.year,self.month,1)
state = self.feature_data[self.feature_data['Date']==date].to_numpy()[0][1:].astype(float)
return state
@property
def done(self):
return True if (self.episode>=self.episode_limit) else False
def create_portfolio(self,inputs,max_fund_count=6):
funds = self.funds
if(len(inputs)!=len(funds)+1):
logging.warning(f"size of inputs and funds does not match should be {len(funds)+1}")
return None
rand_list = 0.001*np.random.rand(len(inputs))
#replace NaN = 0
#inputs = [0 if np.isnan(i) else i for i in inputs]
inputs = inputs *(1+ rand_list)
inputs = inputs + 1 + rand_list
#print(inputs)
threshold = inputs[np.argsort(inputs[:-1])[-max_fund_count]]
weights = [i if i >= threshold else 0 for i in inputs[:-1]]
if(sum(weights)==0):
print(f"weights sum is zero !!!!")
print(f'rand_list {rand_list}')
print(f"inputs {inputs}")
print(f"weights {weights}")
print(f"threshold{threshold}")
weights = np.random.rand(len(funds))
weights = weights/sum(weights)
weights = self.adjust_weight(weights)
portfolio =[]
for batch_id, weight in enumerate(weights):
if(not np.isclose(weight,0)):
portfolio.append((self.funds[batch_id],weight))
return portfolio
def adjust_weight(self,weights):
#adjust equity to be under equity_limit
equity_sum = 0
for batch_id, weight in enumerate(weights):
fund = self.funds[batch_id]
risk_type = self.get_fund_risk_type(fund)
equity_sum += (weight if risk_type=='Equity' else 0)
logging.debug(f'equity_weight_sum:{equity_sum}')
if(equity_sum>self.equity_limit):
ratio = (self.equity_limit-equity_sum*self.equity_limit) / (equity_sum-equity_sum*self.equity_limit)
for batch_id, _ in enumerate(weights):
fund = self.funds[batch_id]
risk_type = self.get_fund_risk_type(fund)
if risk_type=='Equity':
weights[batch_id] = weights[batch_id]*ratio
weights = weights/sum(weights)
equity_sum=0
for batch_id, weight in enumerate(weights):
fund = self.funds[batch_id]
risk_type = self.get_fund_risk_type(fund)
equity_sum += (weight if risk_type=='Equity' else 0)
logging.debug(f'equity_weight_sum after adjust:{equity_sum}')
return weights
def get_fund_risk_type(self,fund):
return self.fund_map[self.fund_map['ISINCODE']==fund]['RISKTYPENAME'].values[0]
def seed(self,seed):
pass
def reset(self,validation=False):
self.episode=0
self.start_year, self.start_month = (2014,7) if (validation) else (randint(1998,2011), randint(1,12))
self.year,self.month = self.start_year, self.start_month
self.portfolios=[]
self.score =0
return self.state
def step(self,action):
reward=0
portfolio = self.create_portfolio(action)
self.portfolios.append(portfolio)
old_score = self.score
cagr,mdd,_ = self.fund_data.portfolios_return(self.portfolios,self.start_year,self.start_month)
self.score = get_score(cagr,mdd)
reward= self.score - old_score
if(not self.done):
self.episode+=1
self.year, self.month = next_year(self.year, self.month)
return self.state, reward, self.done
| true |
e21d6a237e50d3eec0d94c6479a3b253a06bec65 | Python | Jesus10RC/APIE_CAPM_NormalizedReturns_PlotTimeSeries | /stream_classes.py | UTF-8 | 5,823 | 2.96875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Create classes
"""
import numpy as np
import pandas as pd
import matplotlib as mpl
import scipy
import importlib
import matplotlib.pyplot as plt
from scipy.stats import skew, kurtosis, chi2, linregress
# Import our own Function and Class files and Reload
import stream_functions
importlib.reload(stream_functions)
class jarque_bera_test():
def __init__(self, x, x_str):
self.returns = x
self.str_name = x_str
self.size = len(x) #Size of returns
self.round_digits = 4
self.mean = 0.0
self.stdev = 0.0
self.skew = 0.0
self.kurt = 0.0
self.median = 0.0
self.var_95 = 0.0
self.cvar_95 = 0.0
self.jarque_bera = 0.0
self.p_value = 0.0
self.is_normal = 0.0
def compute(self):
self.mean = np.mean(self.returns)
self.stdev = np.std(self.returns) #Volatility
self.skew = skew(self.returns)
self.kurt = kurtosis(self.returns) # excess kurtosis
self.sharpe = self. mean / self.stdev * np.sqrt(252)
self.median = np.median(self.returns)
self.var_95 = np.percentile(self.returns,5)
self.cvar_95 = np.mean(self.returns[self.returns <= self.var_95])
self.jarque_bera = self.size/6*(self.skew**2 + 1/4*self.kurt**2)
self.p_value = 1 - chi2.cdf(self.jarque_bera, df=2) #Degree Freedom
self.is_normal = (self.p_value > 0.05 ) #Equivalenty x_jarque_bera < 6
def __str__(self):
str_self = self.str_name + ' / size ' + str(self.size) + '\n' + self.plot_str()
return str_self
def plot_str(self):
# Print Metrics in Graph
round_digits = 4
plot_str = 'mean ' + str(np.round(self.mean,round_digits))\
+ ' / std dev ' + str(np.round(self.stdev,round_digits))\
+ ' / skewness ' + str(np.round(self.skew,round_digits))\
+ ' / kurtosis ' + str(np.round(self.kurt,round_digits))\
+ ' / Sharpe ratio ' + str(np.round(self.sharpe,round_digits)) + '\n'\
+ 'VaR 95% ' + str(np.round(self.var_95,round_digits))\
+ ' / CVaR 95% ' + str(np.round(self.cvar_95,round_digits))\
+ ' / Jarque_Bera ' + str(np.round(self.jarque_bera,round_digits))\
+ ' / p_value ' + str(np.round(self.p_value,round_digits))\
+ ' / is_normal ' + str(self.is_normal)
return plot_str
class capm_manager():
def __init__(self, ric, benchmark):
self.nb_decimals = 4
self.ric = ric
self.benchmark = benchmark
self.x = []
self.y = []
self.t = []
self.beta = 0.0
self.alpha = 0.0
self.p_value = 0.0
self.null_hypothesis = False
self.r_value = 0.0
self.r_squared = 0.0
self.predictor_linreg = []
def __str__(self):
str_self = 'linear regression | ric ' + self.ric\
+ '| benchmark ' + self.benchmark + '\n'\
+ 'alpha (intercept)' + str(self.alpha)\
+ '| beta (slope) ' + str(self.beta) + '\n'\
+ 'p-value ' + str(self.p_value)\
+ '| null hypothesis ' + str(self.null_hypothesis) + '\n'\
+ 'r-value ' + str(self.r_value)\
+ ' | r_squared ' + str(self.r_squared)
return str_self
def load_timeseries(self):
#Load timeseries and synchronize
self.x, self.y, self.t = stream_functions.synchronize_timeseries(self.ric, self.benchmark)
def compute(self):
# Linear Regression of ric with respect to benchmark
slope, intercept, r_value, p_value, std_err = linregress(self.x,self.y)
self.beta = np.round(slope, self.nb_decimals)
self.alpha = np.round(intercept, self.nb_decimals)
self.p_value = np.round(p_value, self.nb_decimals)
self.null_hypothesis = p_value > 0.05 #p_value<0.05 Reject null hypothesis
self.r_value = np.round(r_value, self.nb_decimals) #Correlation Coefficient
self.r_squared = np.round(r_value**2, self.nb_decimals) # Pct of Variance of "y" explained by "x"
self.predictor_linreg = self.alpha + self.beta*self.x
def scatterplot(self):
#Scatterplot of returns
str_title = 'Scaterplot of returns' + '\n' + self.__str__()
plt.figure()
plt.title(str_title)
plt.scatter(self.x, self.y)
plt.plot(self.x, self.predictor_linreg, color='green')
plt.ylabel(self.ric)
plt.xlabel(self.benchmark)
plt.grid()
plt.show()
def plot_normalized(self):
price_ric = self.t['price_1']
price_benchmark = self.t['price_2']
plt.figure(figsize=(12,5))
plt.title('Time series of price | Normalized at 100')
plt.xlabel('Time')
plt.ylabel('Normalized Prices')
price_ric = 100 * price_ric / price_ric[0]
price_benchmark = 100 * price_benchmark / price_benchmark[0]
plt.plot(price_ric, color='blue', label=self.ric)
plt.plot(price_benchmark, color='red', label=self.benchmark)
plt.legend(loc=0)
plt.grid()
plt.show()
def plot_dual_axes(self):
plt.figure(figsize=(12,5))
plt.title('Time Series of Prices')
plt.xlabel('Time')
plt.ylabel('Price')
ax1 = self.t['price_1'].plot(color='blue', grid=True, label= self.ric)
ax2 = self.t['price_2'].plot(color='red', grid=True, secondary_y=True, label= self.benchmark)
ax1.legend(loc=2)
ax2.legend(loc=1)
plt.show() | true |
d96e5f774f7b0895938bfa30928fbd971c9d0cff | Python | sz376/chatroom | /pokemon_parser.py | UTF-8 | 729 | 2.6875 | 3 | [] | no_license | from os.path import join, dirname
from dotenv import load_dotenv
import os
import requests
import re
dotenv_path = join(dirname(__file__), "sql.env")
load_dotenv(dotenv_path)
def pokefacts(pokename):
url = f"https://pokeapi.co/api/v2/pokemon-species/{pokename}"
res = requests.get(url)
data = res.json()
for i in range(len(data["flavor_text_entries"])):
if data["flavor_text_entries"][i]["language"]["name"] == "en":
flavor_text = data["flavor_text_entries"][i]["flavor_text"]
text = ""
for char in flavor_text:
if char == "\n" or char == "\x0c":
text += " "
else:
text += char
return text
| true |
2428051c795e4443aa7b25604427b1ae679c7b9a | Python | Chrvasq/cs-module-project-recursive-sorting | /src/searching/searching.py | UTF-8 | 1,621 | 4.0625 | 4 | [] | no_license | # TO-DO: Implement a recursive implementation of binary search
def binary_search(arr, target, start, end):
# Your code here
start = start
end = end
while start <= end:
middle = (start + end) // 2
guess = arr[middle]
if guess is target:
return middle
elif guess > target:
return binary_search(arr, target, start, middle - 1)
else:
return binary_search(arr, target, middle + 1, end)
return -1
# STRETCH: implement an order-agnostic binary search
# This version of binary search should correctly find
# the target regardless of whether the input array is
# sorted in ascending order or in descending order
# You can implement this function either recursively
# or iteratively
def agnostic_binary_search(arr, target, start=0, end=None):
start = start
end = len(arr) - 1 if end is None else end
first_value = arr[0]
last_value = arr[-1]
middle = (start + end) // 2
guess = arr[middle]
while start <= end:
middle = (start + end) // 2
guess = arr[middle]
if guess is target:
return middle
elif first_value < last_value:
if guess > target:
return agnostic_binary_search(arr, target, start, middle - 1)
else:
return agnostic_binary_search(arr, target, middle + 1, end)
else:
if guess < target:
return agnostic_binary_search(arr, target, start, middle - 1)
else:
return agnostic_binary_search(arr, target, middle + 1, end)
return -1 | true |
df67087ea9df4eb41719166f081173e86a9e7e62 | Python | fooyou/Exercise | /python/sort.py | UTF-8 | 767 | 2.984375 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: joshua
# @Date: 2014-11-17 15:14:51
# @Last Modified by: joshua
# @Last Modified time: 2014-11-18 20:09:04
import os
import re
# files = os.listdir('res')
# print(files)
# files.sort()
# for fl in files:
# print(fl)
fileslist = ['0.0-52.73.wav.txt', '102.14-152.32.wav.txt', '152.32-180.0.wav.txt', '52.73-102.14.wav.txt']
fileslist.sort(key=lambda x : float(re.split('\-', x)[0]))
for fl in fileslist:
# times = re.split('\-', fl)
print(fl, re.split('\-', fl)[0])
# def make_repeater (n):
# return lambda s: s*n
# twice = make_repeater(2)
# print(twice('word'))
# print(twice(5))
strtest = '0.0-52.73.wav.txt'
get_float = lambda x : re.split('\-', x)[0]
print(get_float(strtest)) | true |
ada3037f666e529d7bbf5b97473a046709616f0d | Python | FrankFacundo/ReadingMood | /rapport/annexes/code/src/PythonTest/Book.py | UTF-8 | 757 | 2.984375 | 3 | [] | no_license | import Atmosphere
class Livre(object) :
def __init__(self,title,author,summary,epub):
self.title = title
self.author = author
self.summary = summary
self.text = epub
self.ambiance
self.currentText
def get_items(self):
# Returns all the datas of the book
return self.title, self.author, self.summary, self.text
def get_ambiance(self):
# Returns the ambiance spread by this text, a piece of the book
return self.ambiance
def set_ambiance(self, newAmbiance):
# Changes the atmosphere
self.ambiance = newAmbiance
def get_currentText(self):
# Returns the current text that is read by the user
return self.currentText
| true |
a9df2d5b0616f6d9a525ec67033a377f161f3840 | Python | ahmadfaig/UdemyML | /Machine Learning A-Z Template Folder/Other/Passenger/poly.py | UTF-8 | 1,037 | 2.875 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
def parse(x):
return x[9:]
# Importing the dataset
dataset = pd.read_csv('data.csv', header = None)
X_temp = dataset.iloc[:,0].values
X = np.reshape(np.array([parse(x) for x in X_temp], int), (-1, 1))
y = dataset.iloc[:, 1].values
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
poly_reg = PolynomialFeatures(degree = 4)
X_poly = poly_reg.fit_transform(X)
X_poly2 = X_poly[50:57,:]
y2 = y[50:57]
poly_reg.fit(X_poly, y)
regressor = LinearRegression()
regressor.fit(X_poly, y)
X_res = np.reshape(np.array([i for i in range(61,73)]), (-1, 1))
X_poly_res = poly_reg.fit_transform(X_res)
y_act = [1563178,1312558,1501793,1388316,1325942,1410769,687396,1493945,1161128,590382,1082215,1416327]
plt.plot(X, y, color = 'red')
plt.plot(X, regressor.predict(poly_reg.fit_transform(X)), color = 'blue')
plt.plot(X_res, regressor.predict(X_poly_res), color = 'blue')
plt.plot(X_res, y_act, color = 'green')
| true |
09fa8d95b93fcd3b25906989664b8dbf75c5ff91 | Python | matthewmccullough/pythonreplit1 | /main.py | UTF-8 | 133 | 2.6875 | 3 | [] | no_license | import matplotlib as mpl
import matplotlib.pyplot as plt
print("Hello World!")
plt.plot([1, 2, 4, 8, 16])
plt.savefig('plot.png')
| true |
27cbd595efc383ad4515d8807cc485a772e68cd7 | Python | martiniblack/Movement-Representation | /FS_CEM.py | UTF-8 | 3,116 | 3 | 3 | [] | no_license | import numpy as np
from numpy import linalg as LA
import matplotlib.pyplot as plt
from scipy.io import loadmat # loading data from matlab
from scipy import fftpack # calculate the fft
# Parameters
nbData = 200 # Number of data points in a trajectory
nbStates = 10 # Number of basis functions
# Load handwriting data
letter = 'S' # choose a letter in the alphabet
datapath = '2Dletters/'
data = loadmat(datapath + '%s.mat' % letter)
demos = [d['pos'][0][0].T for d in data['demos'][0]] # cleaning awful matlab data
data = np.array(demos[1])
t = np.linspace(0, 1, nbData)
x = np.reshape(data, nbData * 2)
# Compute reward function
def rewardEval(p):
r = np.zeros((1, p.shape[1]))
for i in range(p.shape[1]):
w = p[:, i]
xr = np.dot(Psi, w) # Eq.(29)
r[:, i] = - np.sum(np.absolute(xr - x)) # The reward is the negative distance between xr and x
return r
# Compute basis functions Psi and activation weights w
phi = np.zeros((nbData, nbStates))
for i in range(nbStates):
xTmp = np.zeros((1, nbData))
xTmp[:, i] = 1
phi[:, i] = fftpack.idct(xTmp) # Discrete cosine transform
Psi = np.kron(phi, np.eye(2)) # Eq.(27)
# Parameters for CEM
nbVar = 2 * nbStates # Dimension of datapoints
nbEpisods = 2000 # Number of exploration iterations
nbE = 100 # Number of initial points (for the first iteration)
nbPointsRegr = 50 # Number of points with highest rewards considered at each iteration (importance sampling)
minSigma = np.eye(nbVar) * 1 # Minimum exploration covariance matrix
Sigma = np.eye(nbVar) * 50 # Initial exploration noise
p = np.empty((nbVar, 0)) # Storing tested parameters (initialized as empty)
r = np.empty((1, 0)) # Storing associated rewards (initialized as empty)
# Initialise the w parameter
w = np.zeros(nbVar) + 3
# EM-based stochastic optimization
for i in range(0, nbEpisods):
# Generate noisy data with variable exploration noise
D, V = LA.eig(Sigma)
pNoisy = np.tile(w.reshape(nbVar, 1), (1, nbE)) + np.dot(np.dot(V, np.diag(D ** 0.5)), np.random.randn(nbVar, nbE))
nbE = 1 # nbE=1 for the next iterations
# Compute associated rewards
rNoisy = rewardEval(pNoisy)
# Add new points to dataset
p = np.append(p, pNoisy, axis=1)
r = np.append(r, rNoisy, axis=1)
# Keep the nbPointsRegr points with highest rewards
rSrt, idSrt = np.flip(np.sort(r), 1), np.squeeze(np.flip(np.argsort(r), 1))
nbP = min(idSrt.shape[0], nbPointsRegr)
pTmp = p[:, idSrt[:nbP]]
rTmp = np.squeeze(rSrt[:, :nbP])
# Compute error term
eTmp = pTmp - np.tile(w.reshape(nbVar, 1), (1, nbP))
# CEM update of mean and covariance (exploration noise)
w = np.mean(pTmp, axis=1).reshape(nbVar, 1)
Sigma0 = (eTmp.dot(eTmp.conj().T)) / nbP
# Add minimal exploration noise
Sigma = Sigma0 + minSigma
xr = np.dot(Psi, w) # Eq.(29)
xr = xr.reshape(nbData, 2) # Reshape the data to 2D coordination
x = x.reshape(nbData, 2) # Reshape the data to 2D coordination
plt.figure()
plt.plot(xr[:, 0], xr[:, 1], '-r', alpha=1)
plt.plot(x[:, 0], x[:, 1], '.b', alpha=0.3)
plt.show()
| true |
6426277b1fdca182314337b86ef8716744f99fdf | Python | sbmlteam/libCombine | /examples/python/createArchiveExample.py | UTF-8 | 1,742 | 2.890625 | 3 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | #!/usr/bin/env python
"""
Create example archive.
"""
from __future__ import print_function
import sys
from libcombine import *
def createArchiveExample(fileName):
""" Creates Combine Archive containing the given file.
:param fileName: file to include in the archive
:return: None
"""
print('*' * 80)
print('Create archive')
print('*' * 80)
archive = CombineArchive()
archive.addFile(
fileName, # filename
"./models/model.xml", # target file name
KnownFormats.lookupFormat("sbml"), # look up identifier for SBML models
True # mark file as master
)
# add metadata to the archive itself
description = OmexDescription()
description.setAbout(".")
description.setDescription("Simple test archive including one SBML model")
description.setCreated(OmexDescription.getCurrentDateAndTime())
creator = VCard()
creator.setFamilyName("Bergmann")
creator.setGivenName("Frank")
creator.setEmail("fbergman@caltech.edu")
creator.setOrganization("Caltech")
description.addCreator(creator)
archive.addMetadata(".", description)
# add metadata to the added file
location = "./models/model.xml"
description = OmexDescription()
description.setAbout(location)
description.setDescription("SBML model")
description.setCreated(OmexDescription.getCurrentDateAndTime())
archive.addMetadata(location, description)
# write the archive
out_file = "out.omex"
archive.writeToFile(out_file)
print('Archive created:', out_file)
if __name__ == "__main__":
if len(sys.argv) < 2:
print("usage: python createArchiveExample.py sbml-file")
sys.exit(1)
createArchiveExample(sys.argv[1])
| true |
53fad847afcdd057171e13d1cd5b68ec1af516fd | Python | bangalorebyte-cohort11/Functional-Programming-II | /decorator_practical.py | UTF-8 | 305 | 3.203125 | 3 | [] | no_license | #!/usr/bin/env python
# Application for logging
from time import ctime, sleep
#print(ctime())
def tsfunc(func):
def wrappedFunc():
print ('[%s] %s() called' % (
ctime(), func.__name__) )
return func()
return wrappedFunc
@tsfunc
def foo():
pass
sleep(4)
for i in range(2):
sleep(1)
foo() | true |
d5c83769a43c0ae521528da86470e1a7915a3a86 | Python | wangyum/Anaconda | /lib/python2.7/site-packages/numba/tests/cache_usecases.py | UTF-8 | 2,141 | 2.78125 | 3 | [
"Python-2.0",
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown"
] | permissive | """
This file will be copied to a temporary directory in order to
exercise caching compiled Numba functions.
See test_dispatcher.py.
"""
import numpy as np
from numba import jit, generated_jit, types
from numba.tests.ctypes_usecases import c_sin
@jit(cache=True, nopython=True)
def add_usecase(x, y):
return x + y + Z
@jit(cache=True, forceobj=True)
def add_objmode_usecase(x, y):
object()
return x + y + Z
@jit(nopython=True)
def add_nocache_usecase(x, y):
return x + y + Z
@generated_jit(cache=True, nopython=True)
def generated_usecase(x, y):
if isinstance(x, types.Complex):
def impl(x, y):
return x + y
else:
def impl(x, y):
return x - y
return impl
@jit(cache=True, nopython=True)
def inner(x, y):
return x + y + Z
@jit(cache=True, nopython=True)
def outer(x, y):
return inner(-y, x)
@jit(cache=False, nopython=True)
def outer_uncached(x, y):
return inner(-y, x)
@jit(cache=True, forceobj=True)
def looplifted(n):
object()
res = 0
for i in range(n):
res = res + i
return res
@jit(cache=True, nopython=True)
def use_c_sin(x):
return c_sin(x)
@jit(cache=True, nopython=True)
def ambiguous_function(x):
return x + 2
renamed_function1 = ambiguous_function
@jit(cache=True, nopython=True)
def ambiguous_function(x):
return x + 6
renamed_function2 = ambiguous_function
def make_closure(x):
@jit(cache=True, nopython=True)
def closure(y):
return x + y
return closure
closure1 = make_closure(3)
closure2 = make_closure(5)
Z = 1
# Exercise returning a record instance. This used to hardcode the dtype
# pointer's value in the bitcode.
packed_record_type = np.dtype([('a', np.int8), ('b', np.float64)])
aligned_record_type = np.dtype([('a', np.int8), ('b', np.float64)], align=True)
packed_arr = np.empty(2, dtype=packed_record_type)
for i in range(packed_arr.size):
packed_arr[i]['a'] = i + 1
packed_arr[i]['b'] = i + 42.5
aligned_arr = np.array(packed_arr, dtype=aligned_record_type)
@jit(cache=True, nopython=True)
def record_return(ary, i):
return ary[i]
| true |
784ea4c92b3660462472808e07c75df70150d298 | Python | RahulAnanda/Team_4max | /apps/predict.py | UTF-8 | 2,063 | 2.890625 | 3 | [] | no_license | import streamlit as st
import pickle
import numpy as np
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sb
from sklearn.impute import SimpleImputer
from sklearn import preprocessing
from sklearn import svm
from sklearn.model_selection import cross_val_score
import warnings
warnings.filterwarnings("ignore")
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
def app():
df = pd.read_csv('./dataset/corona_tested_individuals_preprocessed.csv')
X = df.drop(['corona_result'], axis = 1)
Y = df['corona_result']
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size = 0.2, random_state = 66)
logmodel = LogisticRegression()
logmodel.fit(X_train, y_train)
y_pred = logmodel.predict(X_test)
accuracy_lr = logmodel.score(X_test, y_test)
st.info('Select 1 for Yes and 0 for No')
cough = st.radio(
"Do you have cough?",
(1, 0))
fever = st.radio(
"Do you have fever?",
(1, 0))
sore_throat = st.radio(
"Do you have a sore throat?",
(1, 0))
shortness_of_breath = st.radio(
"Are you suffering from shortness of breath?",
(1, 0))
head_ache = st.radio(
"Do you have a head ache?",
(1, 0))
age_60_and_above = st.radio(
"Are you above the age of 60?",
(1, 0))
test_indication = st.radio(
"Have you been tested positive before?",
(1, 0))
if st.button("Predict the diagnosis"):
st.write("The accuracy of our model's diagnosis is:",accuracy_lr)
output = logmodel.predict([[cough,fever,sore_throat,shortness_of_breath,
head_ache,age_60_and_above,test_indication]])
st.success('The diagnosis is {}'.format(output))
if output == 1:
st.error("You are Covid positive")
elif output == 0:
st.success("You are Covid negative")
| true |
1a8d4a55530c3be08318a77397c95e0d689d9b56 | Python | itsolutionscorp/AutoStyle-Clustering | /all_data/exercism_data/python/series/1b3f2889821744a9b795c93b58cdfae6.py | UTF-8 | 429 | 3.265625 | 3 | [] | no_license | from itertools import combinations
def slices(string, n):
result = []
stuff = [int(number) for number in list(string)]
if len(stuff) < n or n == 0:
raise ValueError('Error!')
while len(stuff) >= n:
list1 = []
i = 0
while i < n:
list1.append(stuff[i])
i += 1
result.append(list1)
stuff.pop(0)
return result
| true |
1c83c23b75687a3e25b262e952f270398120ea5a | Python | mattfister/freezedraw | /freezedraw/image.py | UTF-8 | 344 | 3.1875 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
class Image(object):
def __init__(self, w, h):
self.w = w
self.h = h
self.data = np.ones((h, w, 3))
def set_pixel(self, x, y, rgb):
self.data[y][x] = rgb
def show(self):
plt.imshow(self.data, interpolation='nearest')
plt.show()
| true |
693ea8a9af5de02bf275efb087d181f687e45917 | Python | rlongocode/Portfolio | /Python/Computer Graphics Design/GengarPokemonDraw in Python/pokemonshadeRicardo.py | UTF-8 | 3,125 | 2.9375 | 3 | [] | no_license | from graphics import *
import math
import random
def main():
win = GraphWin("My Window",500,500)
win.setCoords(0,200,200,0)
# win.setBackground( 'blue' )
i=int(input("enter color, 0 for red, 1 for green"))
if( i==0):
win.setBackground( color_rgb(255,0,0 ))
else:
win.setBackground( color_rgb(0,255,0 ))
colval = []
pt1 = Point(75,115)
pt2 = Point(115,73)
rec=Rectangle(pt1,pt2)
rec.setFill(color_rgb(133,0,208 ))
rec.draw(win)
# Shade interior of this robotic component
nlev = 0
for i in range(0,512):
nlev = nlev +1
if(nlev <128):
a = 255
b = (nlev-1)
c = (nlev-1)
elif (nlev>=128 and nlev < 384):
a = (nlev-128)
b = 255
c = 255
else:
a = 0
b = (128 + nlev-384)
c = (128 + nlev-384)
red = a
green = b
blue = c
colval.append( red + green *256 + blue *(256**2))
for gg in range(1,201):
hh = (gg-1)/200*(115-73)
pt1 = Point(75+hh,115-hh)
pt2 = Point(115-hh,73 +hh)
rec= Rectangle( pt1,pt2)
ind = int(hh/(115-73)*len(colval)) -1
# if(gg == 50):
# print(ind)
color = colval[ind]
red = color % 256
col1 = int((color-red)/256)
green = col1 %256
blue = int((col1 - green)/256)
red = int(red)
blue = int(blue)
green = int(green)
if(gg == 50):
print(red,green,blue)
cond1 = (red>=0 and red<256)
cond2 = (green>=0 and green<256)
cond3 = (blue>=0 and blue<256)
if(cond1 and cond2 and cond3):
rec.setOutline(color_rgb(red,green,blue) )
rec.draw(win)
#Arms
pt1 = Point(68,115)
pt2 = Point(73,93)
rec=Rectangle(pt1,pt2)
rec.setFill(color_rgb(133,0,208 ))
rec.draw(win)
pt1 = Point(117,115)
pt2 = Point(122,93)
rec=Rectangle(pt1,pt2)
rec.setFill(color_rgb(133,0,208 ))
rec.draw(win)
# Legs
pt1 = Point(85,135)
pt2 = Point(95,117)
rec=Rectangle(pt1,pt2)
rec.setFill(color_rgb(133,0,208 ))
rec.draw(win)
pt1 = Point(97,135)
pt2 = Point(107,117)
rec=Rectangle(pt1,pt2)
rec.setFill(color_rgb(133,0,208 ))
rec.draw(win)
pt1 = Point(83,98)
pt2 = Point(108,92)
rec=Rectangle(pt1,pt2)
rec.setFill(color_rgb(250,250,250 ))
rec.draw(win)
pt1 = Point(87,70)
pt2 = Point(77,53)
rec=Rectangle(pt1,pt2)
rec.setFill(color_rgb(133,0,208 ))
rec.draw(win)
pt1 = Point(112,70)
pt2 = Point(102,53)
rec=Rectangle(pt1,pt2)
rec.setFill(color_rgb(133,0,208 ))
rec.draw(win)
# components of snowman
# draw circle2
pt= Point(90,80)
cir = Circle(pt,3)
cir.setFill(color_rgb(255,0,0 ))
cir.draw(win)
pt= Point(100,80)
cir = Circle(pt,3)
cir.setFill(color_rgb(255,0,0 ))
cir.draw(win)
win.getMouse()
win.close()
main() | true |
13d76722594f76b87389dd3d53f8133a09066226 | Python | rsling/cow | /src/de/cow16-ospl.py | UTF-8 | 1,996 | 2.8125 | 3 | [
"BSD-2-Clause"
] | permissive | # -*- coding: utf-8 -*-
# Takes as input a COW-XML file with sentences delimited by <s> ... </s>
# Delets all XML-tags, produces a one-sentence-per-line format,
# appropriate as input for the Berkeley parser.
# (Also puts non-sentence material on a single line.)
import sys
import codecs
import re
import gzip
import argparse
import os
def cleanup(s):
s = re.sub(u' +', r' ', s, re.UNICODE)
s = s.replace('(', '[').replace(')', ']')
s = s.replace('"', '"').replace('<', '<').replace('>', '>').replace(''', "'")
s = s.replace('&', '&')
return s
def main():
parser = argparse.ArgumentParser()
parser.add_argument('infile', help='input from Marmot (NO gzip)')
parser.add_argument('outfile', help='output file name (gzip)')
parser.add_argument("--erase", action='store_true', help="erase outout files if present")
args = parser.parse_args()
# Check input files.
infiles = [args.infile]
for fn in infiles:
if not os.path.exists(fn):
sys.exit("Input file does not exist: " + fn)
# Check (potentially erase) output files.
outfiles = [args.outfile]
for fn in outfiles:
if fn is not None and os.path.exists(fn):
if args.erase:
try:
os.remove(fn)
except:
sys.exit("Cannot delete pre-existing output file: " + fn)
else:
sys.exit("Output file already exists: " + fn)
ofh = gzip.open(args.outfile, 'wb')
ifh = gzip.open(args.infile, 'r')
c_sent = list()
while True:
l = ifh.readline()
if not l:
if len(c_sent) > 0:
ofh.write(cleanup(" ".join(c_sent)).encode('utf-8') + '\n')
c_sent = list()
break
l = l.decode('utf-8')
l = l.strip()
if not l:
if len(c_sent) > 0:
ofh.write(cleanup(" ".join(c_sent)).encode('utf-8') + '\n')
c_sent = list()
else:
c_sent = c_sent + [l]
ofh.close()
ifh.close()
if __name__ == "__main__":
main()
| true |
2ba2c4b9c0d49def40b52fbe9d20c1c2be9b7a60 | Python | dylsugar/dns_resolver | /resolve.py | UTF-8 | 7,269 | 2.609375 | 3 | [] | no_license | """
resolve.py: a recursive resolver built using dnspython
"""
import argparse
import dns.message
import dns.name
import dns.query
import dns.rdata
import dns.rdataclass
import dns.rdatatype
from dns.exception import DNSException, Timeout
FORMATS = (('CNAME', '{alias} is an alias for {name}'), ('A',
'{name} has address {address}'), ('AAAA',
'{name} has IPv6 address {address}'), ('MX',
'{name} mail is handled by {preference} {exchange}'))
# current as of 19 October 2020
ROOT_SERVERS = ("198.41.0.4",
"199.9.14.201",
"192.33.4.12",
"199.7.91.13",
"192.203.230.10",
"192.5.5.241",
"192.112.36.4",
"198.97.190.53",
"192.36.148.17",
"192.58.128.30",
"193.0.14.129",
"199.7.83.42",
"202.12.27.33")
# Cache Helper
class my_dictionary(dict):
def __init__(self):
self = dict()
def add(self, key, value):
self[key] = value
def collect_results(name: str, domaincache) -> dict:
"""
This function parses final answers into the proper data structure that
print_results requires. The main work is done within the `lookup` function.
"""
full_response = {}
target_name = dns.name.from_text(name)
# lookup CNAME
response = lookup(target_name, dns.rdatatype.CNAME, domaincache)
cnames = []
for answers in response.answer:
for answer in answers:
cnames.append({"name": answer, "alias": name})
# lookup A
response = lookup(target_name, dns.rdatatype.A, domaincache)
arecords = []
for answers in response.answer:
a_name = answers.name
for answer in answers:
if answer.rdtype == 1: # A record
arecords.append({"name": a_name, "address": str(answer)})
# lookup AAAA
response = lookup(target_name, dns.rdatatype.AAAA, domaincache)
aaaarecords = []
for answers in response.answer:
aaaa_name = answers.name
for answer in answers:
if answer.rdtype == 28: # AAAA record
aaaarecords.append({"name": aaaa_name, "address": str(answer)})
# lookup MX
response = lookup(target_name, dns.rdatatype.MX, domaincache)
mxrecords = []
for answers in response.answer:
mx_name = answers.name
for answer in answers:
if answer.rdtype == 15: # MX record
mxrecords.append({"name": mx_name,
"preference": answer.preference,
"exchange": str(answer.exchange)})
full_response["CNAME"] = cnames
full_response["A"] = arecords
full_response["AAAA"] = aaaarecords
full_response["MX"] = mxrecords
domaincache.key = name
domaincache.value = full_response
domaincache.add(domaincache.key, domaincache.value)
return full_response
def lookup(target_name: dns.name.Name,
qtype: dns.rdata.Rdata,
dc) -> dns.message.Message:
"""
Find relevant root server that
contains the name server ip referral needed to recurse
"""
found = False
i = 0
for server in ROOT_SERVERS:
response, found = lookup_iter(target_name,
qtype, server, found, dc)
if response.answer:
cname = dns.rdatatype.CNAME
# change answer type based on referral
top_resp = response.answer[0].rdtype
if top_resp == cname and qtype != cname:
found = False
first_resp = str(response.answer[0][0])
target_name = dns.name.from_text(first_resp)
response = lookup(target_name, qtype, dc)
return response
elif response.authority:
# start of authority record. irrelevant
if response.authority[0].rdtype == dns.rdatatype.SOA:
break
return response
def lookup_iter(target_name: dns.name.Name,
qtype: dns.rdata.Rdata, server, found, dc):
"""
Retrieves end answer from referral queries that we find
by recursing down the server tree
"""
outbound_query = dns.message.make_query(target_name, qtype)
try:
response = dns.query.udp(outbound_query, server, 3)
if response.rcode() != dns.rcode.NOERROR:
response = dns.rcode.NXDOMAIN
if response.answer:
found = True
return response, found
elif response.additional:
# parse through additional section
for add in response.additional:
# each ip response listed
for addx in add:
if addx.rdtype == dns.rdatatype.A:
response, found = lookup_iter(target_name,
qtype, str(addx),
found, dc)
if found:
break
if found:
break
elif response.authority and not found:
# parse through authority section
for auth in response.authority:
# each response listed
for authx in auth:
if authx.rdtype == dns.rdatatype.NS:
ns_response = lookup(str(authx), dns.rdatatype.A, dc)
ns_addr = str(ns_response.answer[0][0])
response, found = lookup_iter(target_name,
qtype, ns_addr,
found, dc)
elif authx.rdtype == dns.rdatatype.SOA:
found = True
break
if found:
break
return response, found
except Timeout:
print("Search longer than 3 seconds...waiting...")
return dns.message.Message(), False
except DNSException:
print("Invalid query...")
return dns.message.Message(), False
def print_results(results: dict) -> None:
"""
take the results of a `lookup` and print them to the screen like the host
program would.
"""
for rtype, fmt_str in FORMATS:
for result in results.get(rtype, []):
print(fmt_str.format(**result))
def main():
"""
if run from the command line, take args and call
printresults(lookup(hostname))
"""
dict_obj = my_dictionary()
argument_parser = argparse.ArgumentParser()
argument_parser.add_argument("name", nargs="+",
help="DNS name(s) to look up")
argument_parser.add_argument("-v", "--verbose",
help="increase output verbosity",
action="store_true")
program_args = argument_parser.parse_args()
for a_domain_name in program_args.name:
cache = dict_obj.get(a_domain_name)
if cache:
print_results(cache)
else:
print_results(collect_results(a_domain_name, dict_obj))
if __name__ == "__main__":
main()
| true |
5bd89b927940411b783ba8be864f62a42957f828 | Python | kornerc/opentracing-logging-python | /tests/util.py | UTF-8 | 2,711 | 2.828125 | 3 | [
"MIT"
] | permissive | import logging
from typing import Dict, List
from logging_opentracing import OpenTracingHandler
from opentracing import Tracer
from opentracing.mocktracer import MockTracer
import pytest
def check_finished_spans(tracer: MockTracer, operation_names_expected: List[str],
logs_expected: Dict[str, List[Dict[str, str]]]):
"""
Helper function to check if the finished spans of the tracer are as expected and the logs have also been passed
correctly
:param tracer: Instance of the MockTracer
:param operation_names_expected: The operation names of the spans which are order in the same order as they have
been created
:param logs_expected: Expected logs for each ``operation_names_expected``. The keys of the outer dictionary are the
names defined in ``operation_names``. For each operation name a list of logs must be provided which ordered
correctly.
"""
finished_spans = tracer.finished_spans()
assert len(operation_names_expected) == len(finished_spans), \
f'{len(operation_names_expected)} finished spans are expected but only {len(finished_spans)} have ' \
f'been registred'
# internally the the Mock tracer saves the traces in veversed order
operation_names_expected.reverse()
for span, operation_name_expected in zip(finished_spans, operation_names_expected):
assert operation_name_expected == span.operation_name, \
f'The expected operation name is "{operation_names_expected}", however, the operation name is ' \
f'"{span.operation_name}"'
logs = span.logs
span_logs_expected = logs_expected[operation_name_expected]
assert len(span_logs_expected) == len(logs), \
f'For the span "{operation_names_expected}" {len(span_logs_expected)} logs are expected but {len(logs)} ' \
f'are available'
for log, key_values_expected in zip(logs, span_logs_expected):
assert str(key_values_expected) == str(log.key_values), \
f'For the span "{operation_names_expected}" a log "{key_values_expected}" is expected, however, the ' \
f'log is "{log.key_values}"'
@pytest.fixture
def tracer() -> Tracer:
"""
Get a MockTracer
"""
return MockTracer()
@pytest.fixture
def logger(tracer):
"""
Get a logger with an initialized OpenTracingHandler
"""
logger = logging.getLogger('Test')
logger.setLevel(logging.DEBUG)
# this fixture is called multiple times and we have to remove the handlers added from the previous fixture call
logger.handlers.clear()
logger.addHandler(OpenTracingHandler(tracer=tracer))
return logger
| true |
8f3683fe5a52503309c9e1f138b9fddb81366e6c | Python | Daipuwei/YOLO-tf2 | /model/yolov3_tiny.py | UTF-8 | 6,105 | 2.53125 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
# @Time : 2021/9/19 下午5:43
# @Author : DaiPuWei
# @Email : 771830171@qq.com
# @File : yolov3_tiny.py
# @Software: PyCharm
"""
这是YOLOv3-tiny模型的定义脚本
"""
import os
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input
from tensorflow.keras.layers import Lambda
from tensorflow.keras.layers import Concatenate
from tensorflow.keras.layers import UpSampling2D
from tensorflow.keras.layers import MaxPooling2D
from utils.model_utils import compose
from utils.model_utils import get_anchors
from utils.model_utils import get_classes
from model.loss import yolo_loss
from model.layer.yolo_common import yolo_eval
from model.backbone.darknet import DarknetConv2D
from model.backbone.darknet import DarknetConv2D_BN_Leaky
def yolov3_tiny_body(image_input, num_anchors, num_classes):
"""
这是YOLOv3-tiny的主干模块定义函数
:param image_input: 输入张量
:param num_anchors: anchor个数
:param num_classes: 目标分类个数
:return:
"""
x1 = compose(DarknetConv2D_BN_Leaky(16, (3,3)),
MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'),
DarknetConv2D_BN_Leaky(32, (3,3)),
MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'),
DarknetConv2D_BN_Leaky(64, (3,3)),
MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'),
DarknetConv2D_BN_Leaky(128, (3,3)),
MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'),
DarknetConv2D_BN_Leaky(256, (3,3)))(image_input)
x2 = compose(MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'),
DarknetConv2D_BN_Leaky(512, (3,3)),
MaxPooling2D(pool_size=(2,2), strides=(1,1), padding='same'),
DarknetConv2D_BN_Leaky(1024, (3,3)),
DarknetConv2D_BN_Leaky(256, (1,1)))(x1)
y1 = compose(DarknetConv2D_BN_Leaky(512, (3,3)),
DarknetConv2D(num_anchors*(num_classes+5), (1,1)))(x2)
x2 = compose(DarknetConv2D_BN_Leaky(128, (1,1)),
UpSampling2D(2))(x2)
y2 = compose(Concatenate(),
DarknetConv2D_BN_Leaky(256, (3,3)),
DarknetConv2D(num_anchors*(num_classes+5), (1,1)))([x2,x1])
print()
return [y1,y2]
def build_yolov3_tiny_train(cfg):
"""
这是搭建训练阶段YOLOv3-tiny的函数
:param cfg: 参数配置类
:return:
"""
# 初始化anchor和classes
anchors = get_anchors(cfg.DATASET.ANCHORS_PATH)
classes = get_classes(cfg.DATASET.CLASSES_PATH)
num_anchors = len(anchors)
num_classes = len(classes)
# 搭建YOLOv3-tiny主干
image_input = Input(shape=(None, None, 3), name='image_input')
print('Create YOLOv3-tiny model with {} anchors and {} classes.'.format(num_anchors, num_classes))
yolov3_tiny_outputs = yolov3_tiny_body(image_input, num_anchors//2, num_classes)
yolov3_tiny_body_model = Model(image_input, yolov3_tiny_outputs)
if cfg.MODEL.MODEL_PATH is not None:
yolov3_tiny_body_model.load_weights(os.path.abspath(cfg.MODEL.MODEL_PATH), by_name=True, skip_mismatch=True)
print('Load weights from: {}.'.format(os.path.abspath(cfg.MODEL.MODEL_PATH)))
yolov3_tiny_body_model.summary()
# 搭建训练阶段YOLOv3-tiny
y_true = [Input(shape=(None, None, num_anchors//2, num_classes + 5)) for l in range(2)]
loss_input = [*yolov3_tiny_body_model.output, *y_true]
model_loss = Lambda(yolo_loss, output_shape=(1,), name='yolo_loss',
arguments={'anchors': anchors,
'num_classes': num_classes,
'label_smoothing': cfg.LOSS.USE_LABEL_SMOOTHING,
'use_giou_loss': cfg.LOSS.USE_GIOU_LOSS,
'use_diou_loss': cfg.LOSS.USE_DIOU_LOSS,
'use_ciou_loss': cfg.LOSS.USE_CIOU_LOSS,
'model_name': cfg.MODEL.MODEL_NAME})(loss_input)
yolov3_tiny_train_model = Model([image_input, *y_true], model_loss)
return yolov3_tiny_body_model, yolov3_tiny_train_model
def build_yolov3_tiny_eval(cfg):
"""
这是搭建评估阶段YOLOv3(-spp)的函数
:param cfg: 参数配置类
:return:
"""
# 初始化anchor和classes
anchors = get_anchors(cfg.DATASET.ANCHORS_PATH)
classes = get_classes(cfg.DATASET.CLASSES_PATH)
num_anchors = len(anchors)
num_classes = len(classes)
# 搭建YOLOv3-tiny主干
image_input = Input(shape=(None, None, 3), name='image_input')
print('Create YOLOv3-tiny model with {} anchors and {} classes.'.format(num_anchors, num_classes))
yolov3_tiny_outputs = yolov3_tiny_body(image_input, num_anchors//2, num_classes)
yolov3_tiny_body_model = Model(image_input, yolov3_tiny_outputs)
if cfg.MODEL.MODEL_PATH is not None:
yolov3_tiny_body_model.load_weights(os.path.abspath(cfg.MODEL.MODEL_PATH), by_name=True, skip_mismatch=True)
print('Load weights from: {}.'.format(os.path.abspath(cfg.MODEL.MODEL_PATH)))
yolov3_tiny_body_model.summary()
# 搭建评估阶段YOLOv3-tiny
input_image_shape = Input(shape=(2,), batch_size=1, name='input_image_shape')
inputs = [*yolov3_tiny_body_model.output, input_image_shape]
outputs = Lambda(yolo_eval, output_shape=(1,), name='yolov3_tiny_preds',
arguments={'anchors': anchors,
'num_classes': num_classes,
'max_boxes': cfg.DATASET.MAX_BOXES,
'score_threshold': cfg.DATASET.SCORE_THRESHOLD,
'iou_threshold': cfg.DATASET.IOU_THRESHOLD,
'letterbox_image': cfg.DATASET.LETTERBOX_IMAGE,
'model_name': cfg.MODEL.MODEL_NAME})(inputs)
yolov3_tiny_eval_model = Model([image_input, input_image_shape], outputs)
return yolov3_tiny_eval_model | true |
8b8f73b1f3f2a612e43f0a71860753ba7b7e70ea | Python | rosalexander/CloudForce_Frontend | /json_to_sql.py | UTF-8 | 2,192 | 2.671875 | 3 | [] | no_license | import requests
import datetime
import sqlite3
DATABASE = 'database.db'
def add_readings_to_route(route_id):
conn = sqlite3.connect(DATABASE)
cur = conn.cursor()
iotJson = requests.get('http://132.145.129.245:5000/iot').json()
modified = False
for row in iotJson:
date_string = row['Date'] + " " + row['Time']
date_unformatted = datetime.datetime.strptime(date_string, "%Y/%m/%d %H:%M:%S")
date_formatted = datetime.date.strftime(date_unformatted, "%Y-%m-%d %H:%M:%S")
cur.execute("SELECT EXISTS(SELECT 1 FROM readings WHERE route_id = ? AND create_date = ?)", (route_id, date_formatted, ))
id_exists = cur.fetchone()
print(id_exists[0])
if id_exists[0] != 1:
humidity = row['Humidity']
lux = row['Light']
temperature = row['Temperature']
try:
humidity = int(humidity.split('.')[0])
lux = int(lux.split('.')[0])
temperature = int(temperature.split('.')[0])
except ValueError:
print("Type error")
pass
cur.execute("INSERT INTO readings(route_id, temperature, humidity, lux, create_date) VALUES(?, ?, ?, ?, ?);", (route_id, temperature, humidity, lux, date_formatted))
modified = True
conn.commit()
cur.close()
conn.close()
return modified
def add_nfc_to_product(product_id):
conn = sqlite3.connect(DATABASE)
cur = conn.cursor()
nfcJson = requests.get('http://132.145.129.245:5000/nfc').json()
for row in nfcJson:
if int(row['ID']) == product_id:
date_string = "20" + row['Date Hatched'] + " " + row['Time']
date_unformatted = datetime.datetime.strptime(date_string, "%Y/%m/%d %H:%M:%S")
date_formatted = datetime.date.strftime(date_unformatted, "%Y-%m-%d %H:%M:%S")
cur.execute("SELECT EXISTS(SELECT 1 FROM nfc WHERE product_id = ? AND scan_date = ?)", (product_id, date_formatted, ))
id_exists = cur.fetchone()
if id_exists[0] != 1:
lon = row['Longitude']
lat = row['Latitude']
try:
lon = float(lon)
lat = float(lat)
except ValueError:
print("Type error")
pass
cur.execute("INSERT INTO nfc(product_id, lon, lat, scan_date) VALUES(?, ?, ?, ?)", (product_id, lon, lat, date_formatted))
conn.commit()
cur.close()
conn.close()
| true |
aea0613bd6d0d68d01ce8fc9b63dd8b65f89fc51 | Python | u-tokyo-gps-tanaka-lab/gpw2016 | /projects/curling/python/dcl_counter.py | UTF-8 | 2,346 | 2.640625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
# dcl_counter.py
# Katsuki Ohto
import argparse
import glob
import numpy as np
def analyze_logs(logs):
# result survey
wl = {}
scores = {}
# simulation survey
errors = [[], []]
for f in glob.glob(logs):
# get data
name = ["", ""]
chosen = (0, 0)
run = (0, 0)
score = [-1, -1]
flip = 1
for line in open(f, 'r'):
data = line.split()
if 'First=' in data[0]:
name[0] = data[0][6:]
elif 'Second=' in data[0]:
name[1] = data[0][7:]
elif 'BESTSHOT' in data[0]:
chosen = (float(data[1]), float(data[2]))
elif 'RUNSHOT' in data[0]:
run = (float(data[1]), float(data[2]))
errors[0].append(run[0] - chosen[0])
errors[1].append(run[1] - chosen[1])
elif 'TOTALSCORE' in data[0]:
score[0] = int(data[1])
score[1] = int(data[2])
elif 'SCORE' in data[0]:
if flip * int(data[1]) < 0:
flip = -flip
for i in range(2):
if name[i] not in wl:
wl[name[i]] = [[0, 0, 0, 0], [0, 0, 0, 0]]
scores[name[i]] = [[0, 0], [0, 0]]
for i in range(2):
for c in range(2):
scores[name[i]][i][i ^ c] += score[c]
if score[0] > score[1]:
wl[name[0]][0][0] += 1
wl[name[1]][1][3] += 1
elif score[0] < score[1]:
wl[name[0]][0][3] += 1
wl[name[1]][1][0] += 1
else:
if flip == -1:
wl[name[0]][0][1] += 1
wl[name[1]][1][2] += 1
else:
wl[name[0]][0][2] += 1
wl[name[1]][1][1] += 1
print(wl)
print(scores)
print("error in Vx : mean = %f stddev = %f" % (np.mean(errors[0]), np.std(errors[0])))
print("error in Vy : mean = %f stddev = %f" % (np.mean(errors[1]), np.std(errors[1])))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='')
parser.add_argument('--logs', required=True)
args = parser.parse_args()
analyze_logs(args.logs)
| true |
8c048e9d79bc54df72373a2d522f0fac2508a260 | Python | 2bora/Cancer_conquest | /src/Preprocessing.py | UTF-8 | 5,371 | 2.75 | 3 | [] | no_license | import pandas as pd
import numpy as np
import os
from keras.utils import to_categorical
# data processing
def preprcossing_data(filename, followup_years):
base_df = pd.read_csv("../data/"+str(filename)+".csv")
print "raw input data shape = ", str(base_df.shape)
base_df["years_to_followup"] =(base_df["days_to_followup"]/(30*12))
for i in range(len(base_df)):
base_df.iloc[i,-1] = round(base_df.iloc[i,-1],3)
base_df_row =[]
for i, row in base_df.iterrows():
if int(row["years_to_followup"]) <= followup_years and row["Censored_flag"] == 1:
pass
else :
base_df_row.append(row.tolist())
base_df = pd.DataFrame(base_df_row, columns = base_df.columns.tolist())
print " Followup years max, min = " , (base_df["years_to_followup"].max(), base_df["years_to_followup"].min())
# year max = 18, time interval = 1 year
#0th = 0 -1, ... , 17th = 17 - 18
return base_df
## make censored df
def makeTimeintervalCensored(base_df):
header = []
max_interval = int(base_df["years_to_followup"].max()) + 1
for i in range(0,max_interval):
header.append("t_interval_"+str(i))
censored_df_pre = []
for i,row in base_df.iterrows():
censored_row = []
sy = int(row["years_to_followup"])
d = 0
if row['Event_flag'] == 1 :#dead, censored = 0
for l in range(0,max_interval):
censored_row.append(0)
elif row['Event_flag'] == 0 :#Censored, censored = 1
for j in range(0,sy):
censored_row.append(0)
for k in range(sy,max_interval):
censored_row.append(1)
censored_df_pre.append(censored_row)
Censored_df = pd.DataFrame(censored_df_pre,columns = header) # whether censored or not
print "Shape of Censored_by_t_df = ", Censored_df.shape
return Censored_df
## make Outputdf (timeinterval, binary, multitask)
def makeTimeintervalOutput(base_df):
header = []
max_interval = int(base_df["years_to_followup"].max()) + 1
for i in range(0,max_interval):
header.append("t_interval_"+str(i))
out_df_pre = []
for i,row in base_df.iterrows():
out_row =[]
sy = int(row["years_to_followup"])
d = 0
if row['Event_flag'] == 1 :#dead, censored = 0
for j in range(0,sy):
out_row.append(0)
for k in range(sy,max_interval):
out_row.append(1)
elif row['Event_flag'] == 0 :#Censored, censored = 1
for j in range(0,sy):
out_row.append(0)
for k in range(sy,max_interval):
n = len(base_df.loc[base_df.years_to_followup >= k])
d_p = base_df.loc[base_df.Censored_flag == 1]
d_p.reindex(range(0,len(d_p)))
for l in range(len(d_p)):
if k<d_p.iloc[l, -1] and d_p.iloc[l,-1]<k+1:
d += 1
if n != 0:
if d != 0 :
out_row.append(round(float(d)/n,4))
else :
out_row.append(round(float(0.0001),5))
else :
out_row.append(round(float(0.0001),5))
d = 0
out_df_pre.append(out_row)
Output_df=pd.DataFrame(out_df_pre,columns = header) # hazard function
Output_df=1-Output_df # survival probability
print "Shape of Output_df = ", Output_df.shape
return Output_df
def makeBinaryOutput(base_df, followup_years): # survival = 0, event(death) = 1
out_df_row = []
for i, row in base_df.iterrows():
if int(row["years_to_followup"]) <= followup_years and row["Event_flag"] == 1:
out_df_row.append(0)
else :
out_df_row.append(1)
Output_df = pd.DataFrame(out_df_row, columns =[str(followup_years)+'yr survival bianry'])
return Output_df
## make input df
def makeInput(base_df):
output_related_f = ['Censored_flag','Event_flag','years_to_followup',"days_to_followup","PID"]
Input_df = base_df.drop(output_related_f,axis =1)
print "Shape of Iutput_df = ", Input_df.shape
return Input_df
def makeAllData(filename, method, followup_years):
# Prepare base dataframes
base_df=preprcossing_data(filename, followup_years)
auc_df = makeBinaryOutput(base_df, followup_years)
test_binary_out = pd.concat([base_df,auc_df], axis =1 )
test_binary_out.to_csv("../data/binary_output_"+str(followup_years)+".csv")
auc_df = auc_df.values.astype(float)
if method =='time':
output_df = makeTimeintervalOutput(base_df)
output_df = output_df.values.astype(float)
censored_df = makeTimeintervalCensored(base_df)
censored_df = censored_df.values.astype(float)
elif method == 'binary':
output_df = auc_df
censored_df = makeTimeintervalCensored(base_df)
censored_df = censored_df.values.astype(float)
followup_time = np.array(base_df["years_to_followup"].tolist())
censored_flag = np.array(base_df["Censored_flag"].tolist())
input_df = makeInput(base_df)
feature_list = input_df.columns.tolist()
input_df = input_df.values.astype(float)
return auc_df, output_df, censored_df, followup_time, censored_flag, input_df, feature_list
| true |
dedf72c3bd6c8a5cf16aea8808531cc804309f75 | Python | SMS-NED16/lpthw-3 | /drills/drill_39_1.py | UTF-8 | 1,899 | 4.28125 | 4 | [] | no_license | """Programme which uses dictionaries to store states and citites in Pakistan"""
# Dictionaries of states mapped to their abbreviations
states = {
'Sindh': 'SN',
'Punjab': 'PN',
'Balochistan': 'BST',
'Khyber Pakthunkhwa': 'KPK',
'Islamabad and Capital Territories': 'ISB',
'Federally Administered Tribal Areas': 'FATA',
}
# Dictionaries of cities mapped to each state
cities = {
'SN' : ['Karachi', 'Hyderabad', 'Larkana'],
'PN': ['Lahore', 'Multan', 'Faisalabad'],
'BST': ['Quetta', 'Gwadar', 'Zhob'],
'KPK': ['Peshawar', 'Mardan'],
'ISB': ['Islamabad'],
'FATA': ['Swat', 'Mianwali', 'Rahimyar Khan']
}
# print some states
print('-' * 20)
print("Sindh's abbreviation is: ", states['Sindh'])
print("Balochistan's abbreviation is: ", states['Balochistan'])
# print some cities
print('-' * 20)
print("SN state has: ", cities['SN'])
print("BST state has: ", cities['BST'])
# do it by using the state then cities dict
print('-' * 20)
print("Sindh has: ", cities[states['Sindh']])
print("Balochistan has ", cities[states['Balochistan']])
# print every state's abbrevation
print('-' * 20)
for state, abbrev in states.items():
print(f"{state} is abbreviated {abbrev}.")
# print every city in state
print('-' * 20)
for state, city_list in cities.items():
print(f"{state} has cities.")
for city in city_list:
print(f"\t- {city}")
# now do both at the same time
print('-' * 10)
for state, abbrev in list(states.items()):
print(f"{state} state is abbreviated {abbrev}.")
print(f"and has the following cities.")
for city in cities[abbrev]:
print(f"\t- {city}")
# safely get abbrevation by state that might not be there
state = states.get('Muhajir Sooba')
if not state:
print("Sorry, no Muhajir Sooba. Marsun Marsun, Sindh na desun")
# get a city with a default value
city = cities.get('Muhajir Sooba', 'Does not exist')
print(f"The city for the state 'Muhajir Sooba' is: {city}.") | true |
4d166cb02ce6415ec462424403ffa852702d3fd0 | Python | YunyLee/BaekJoon | /6. 함수/함수_셀프넘버.py | UTF-8 | 390 | 3.4375 | 3 | [] | no_license | # 백준 단계별 풀이 - 함수 - 4673 문제
# 생성자가 없는 셀프넘버를 출력하는 프로그램
natural_number = set(range(1,10001))
generated_number = set()
for i in natural_number:
sum_number = 0
for j in str(i):
sum_number += int(j)
generated_number.add(i+sum_number)
result = sorted(natural_number-generated_number)
for i in result:
print(i)
| true |
0b9e154dfc9dc04fa6320c3ea906f0867175eb4c | Python | abhijeet3922/Predict-the-Happiness-HackerEarth-Challenge | /sentiment_detector_review.py | UTF-8 | 1,605 | 2.828125 | 3 | [] | no_license | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 24 17:47:57 2017
@author: abhijeet
"""
import json
import numpy as np
import keras.preprocessing.text as kpt
from keras.preprocessing.text import Tokenizer
from keras.models import model_from_json
import pandas as pd
def convert_text_to_index_array(text):
words = kpt.text_to_word_sequence(text)
wordIndices = []
for word in words:
if word in dictionary:
wordIndices.append(dictionary[word])
return wordIndices
# Load the dictionary
labels = ['happy','not_happy']
with open('dictionary.json', 'r') as dictionary_file:
dictionary = json.load(dictionary_file)
# Load trained model
json_file = open('model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
model = model_from_json(loaded_model_json)
model.load_weights('model.h5')
testset = pd.read_csv("./test.csv")
cLen = len(testset['Description'])
tokenizer = Tokenizer(num_words=10000)
# Predict happiness for each review in test.csv
y_pred = []
for i in range(0,cLen):
review = testset['Description'][i]
testArr = convert_text_to_index_array(review)
input = tokenizer.sequences_to_matrix([testArr], mode='binary')
pred = model.predict(input)
#print pred[0][np.argmax(pred)] * 100, labels[np.argmax(pred)]
y_pred.append(labels[np.argmax(pred)])
# Write the results in submission csv file
raw_data = {'User_ID': testset['User_ID'],
'Is_Response': y_pred}
df = pd.DataFrame(raw_data, columns = ['User_ID', 'Is_Response'])
df.to_csv('submission_model1.csv', sep=',',index=False) | true |
a933c706c494c38c443d8e92b68472801600610c | Python | accus84/python_bootcamp_28032020 | /moje_skrypty/nauka/01_podstawy/011_tabliczka_mnozenia.py | UTF-8 | 1,190 | 4.125 | 4 | [] | no_license | print(" ", end="") #printowanie w jednej linii pięciu pustych znaków zakończonych pustym znakiem zamiast znakiem nowej linii
for i in range(10): #...dzieki temu ta pętla wykona się w tej samej linii z printem
print(f"{i:4}", end="") #printuj i z odstępem 4, a ta pętla też kończy się bez znaku nowej linii więc też wykona się w tej samej
print() #ten zank kończy to co było w jednej linii
print() #a ten powoduje już, że jest pusta linia
for i in range(10):
print(i, end=" ") #wyprintowanie pierwszej kolumny 0,1,2,3,4,5,6,7,8,9 ale ponieważ każdy numerek nie kończy się znakiem nowej linii to kolejny print będzie się wykonywał dla każdego numerka w tej samej linii
for j in range(10): #...czyli dla linii 0 wykona się 10 obrotów, dla 1 wykona się 10 obrotów, dla 2 wykona się 10 obrotów
print(f"{i*j:4}", end="") #które składają się z 0*0, 0*1, 0*2, 0*3 itd, 1*0, 1*1, 1*2, 1*3 itd, 2*0, 2*1, 2*2, 2*3 itd i każdy taki obrót w tej samej linii co i
print()
print()
| true |
bc7f6c24aef04c3c96d348fd37140feda7a4c3e5 | Python | slavaglasha/eptm2 | /places/models.py | UTF-8 | 1,244 | 2.515625 | 3 | [] | no_license | from django.db import models
# Create your models here.
class Places(models.Model):
name = models.CharField(max_length=200, null=True, blank=True, verbose_name='Название станции', help_text='Если у объекта есть названеи (станция)' )
adres = models.CharField(max_length=200, null=True, unique=False, blank=True)
geo_point = models.DecimalField(max_digits=5, decimal_places=2,null=True, blank=True)
note = models.CharField(max_length=500, null=True, blank=True)
to_Place = models.ForeignKey('self',related_name='places', verbose_name='Относится к станции', null=True, blank=True)
def __str__(self):
if self.adres is not None:
return "%s %s " %(self.name,self.adres)
else:
return "%s " % (self.name)
class Meta:
verbose_name = "Место"
verbose_name_plural = "Обьекты"
@property
def to_dict(selfe):
return{'id':selfe.id,
'name':selfe.name,
'adres':selfe.adres,
'geo_point':selfe.geo_point,
'note':selfe.note,
'to_place':'' if selfe.to_Place==None else selfe.to_Place.name
}
| true |
2f7b15c79ee184139d4af6f34579bd34f8e0cb10 | Python | didiwai/tianchi_recommend | /prodata.py | UTF-8 | 2,066 | 2.5625 | 3 | [] | no_license | import numpy as np
from sklearn.preprocessing import StandardScaler
def analysisDataFeature(fname1, fname2):
feat1 = dict()
with open(fname1, "r") as f:
for row in f:
row = row.strip().split(',')
feat = [float(i) for i in row[2:]]
for j in range(len(row[2:])):
feat1.setdefault(j, []).append(feat[j])
with open(fname2, "w") as fw:
for key in feat1:
#fw.write("feature "+str(key)+"\n")
feat = feat1[key]
minnum = min(feat); maxnum = max(feat); avgnum = np.mean(feat)
#fw.write("minnum: "+str(minnum)+"\t"+"maxnum: "+str(maxnum)+"\t"+"avgnum: "+str(avgnum)+"\n")
fw.write(str(avgnum)+"\n")
def processFeatureData(fname1,fname2):
feat1 = dict()
with open(fname1, "r") as f:
for row in f:
row = row.strip().split(',')
feat = [float(i) for i in row[2:]]
for j in range(len(row[2:])):
feat1.setdefault(j, []).append(feat[j])
col = len(feat1)
newfeat = dict()
scaler = StandardScaler()
for key in feat1:
newfeat[key] = scaler.fit_transform(feat1[key])
with open(fname2,"w") as fw:
with open(fname1,"r") as f:
number = 0
for row in f:
row = row.strip().split(',')
templist = list()
for t in range(col):
templist.append(newfeat[t][number])
fw.write(row[0]+","+row[1]+","+",".join([str(nf) for nf in templist])+"\n")
number += 1
def proOriData(filename1, filename2):
with open(filename2,"w") as fw:
with open(filename1,"r") as f:
for row in f:
line = row.strip().split(',')
feat = [float(n) for n in line[2:]]
tempnum = feat[4]
if tempnum < 500 :
fw.write(row)
if __name__ == "__main__":
proOriData("traindata_feature_offline_add.csv","traindata_feature_offline_add_1.csv")
proOriData("testdata_feature_offline_add.csv","testdata_feature_offline_add_1.csv")
#analysisDataFeature("data/traindata_feature_online.csv", "feature_analysis.txt")
processFeatureData("traindata_feature_offline_add_1.csv","traindata_feature_offline_add_post.csv")
processFeatureData("testdata_feature_offline_add_1.csv","testdata_feature_offline_add_post.csv")
| true |
bfd6c65d778be337e3c1bdf3d0b59d1ba591de0a | Python | otaviobertucini/Clinic-Aid | /clinic/extras.py | UTF-8 | 219 | 2.734375 | 3 | [] | no_license | from datetime import datetime, timedelta
def check_date(date):
next_date = datetime.strptime(date.replace('-', ''), "%Y%m%d").date()
if next_date > datetime.now().date():
return True
return False
| true |
ed0518219cb629b8a946e6f1866cfec59eaa4493 | Python | mynameischaos/Machine-Learning | /Regression/test.py | UTF-8 | 624 | 2.6875 | 3 | [] | no_license | # encoding=utf-8
import regression
from numpy import *
xArr, yArr = regression.loadDataSet("filename")
ws = regression.standRegres(xArr, yArr)
'''
# 绘图
xMat = mat(xArr)
yMat = mat(yArr)
yHat = xMat * ws
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(xMat[:, 1].flatten().A[0], yMat.T[:, 0].flatten().A[0])
xCopy = xMat.copy()
xCopy.sort(0)
yHat = xCopy * ws
ax.plot(xCopy[:, 1], yHat)
plt.show()
#相关系数:用来衡量预测值和真实值的匹配程序
corrcoef(yHat.T, yMat)
'''
# 得到数据集中所有点的估计
yHat = regression.lwlrTest(xArr, xArr, yArr, 0.003)
| true |
a61734568b170f3c42cf2f05d3294a428aaaee03 | Python | nareshkodimala/Python | /Functions/using filter function in lambda.py | UTF-8 | 109 | 3.234375 | 3 | [] | no_license | #using filter function in lambda
res=list(filter((lambda x:x%2==0),[1,2,3,4,5,6,7,8,9,10]))
print(res)
| true |
2a424a689da449b89d337a0def55041d4096482c | Python | Maelstrom6/COVID19 | /Models.py | UTF-8 | 18,990 | 2.90625 | 3 | [
"Apache-2.0"
] | permissive | """
This class contains all parameters for all models for different countries.
It contains methods to obtain observed data.
It also contains the common methods to use the model itself.
"""
import numpy as np
import pandas as pd
import math
from Communication import Database
np.set_printoptions(suppress=True)
# Inspiration for the model:
# https://en.wikipedia.org/wiki/Compartmental_models_in_epidemiology
# https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3182455/
# https://www.researchgate.net/publication/334695153_Complete_maximum_likelihood_estimation_for_SEIR_epidemic_models_theoretical_development
# I got up to the first sample path diagrams before I got completely lost
class Parameters:
"""
A single class to store all model parameters.
See model_1 comments for information on these parameters
"""
def __init__(self, m, n, Ep, Ip, alpha, beta, offset, country, is_country, N):
self.m = m
self.n = n
self.Ep = Ep
self.Ip = Ip
self.alpha = alpha
self.beta = beta
self.offset = offset
self.country = country
self.is_country = is_country
self.N = N
def unpack(self):
"""
:return: all the model parameters as a tuple
"""
return self.m, self.n, self.Ep, self.Ip, self.alpha, self.beta, self.offset, \
self.country, self.is_country, self.N
def choose(n, r):
return (math.factorial(n)) / math.factorial(r) / math.factorial(n - r)
def binomial(x, n, p):
return choose(n, x) * p ** x * (1 - p) ** (n - x)
def binomial_dist(n, p):
dist = []
for x in range(n + 1):
dist.append(binomial(x, n, p))
return np.array(dist)
def neg_bin(x, k, p):
return choose(x + k - 1, k - 1) * p ** k * (1 - p) ** x
def neg_bin_dist(k, p, length):
dist = []
for x in range(length):
dist.append(neg_bin(x, k, p))
return np.array(dist)
def get_observed_I_and_R(country: str, is_country: bool = True):
"""
Gets the data for the number of confirmed cases and the number of recovered.
:param country: The country or province for which to get the cases
:param is_country: Whether the country variable is for a country or for a province
:return: A tuple of 2 arrays: one for cumulative infected and one for cumulative recovered
"""
country = country.replace("'", "")
database = Database("Data\\CombinedData.accdb")
if is_country:
# Source: https://data.humdata.org/dataset/novel-coronavirus-2019-ncov-cases
# And https://www.worldometers.info/coronavirus/
data_dict = database.select("SELECT IRTbl.Country, IRTbl.Day, Sum(IRTbl.Infected) AS SumOfInfected, "
"Sum([IRTbl].[Recovered]+[IRTbl].[Dead]) AS SumOfRecoveredAndDead\n"
"FROM IRTbl\n"
"GROUP BY IRTbl.Country, IRTbl.Day\n"
"HAVING (((IRTbl.Country)='{}') AND ((Sum(IRTbl.Infected))>0))\n"
"ORDER BY IRTbl.Day;".format(country),
["SumOfInfected", "SumOfRecoveredAndDead"])
else:
print("Warning: Worldometer (the data source since 2020/03/25) does not specify provinces in their data.\n"
" It is best to rather specify a country than a province due to this.")
data_dict = database.select("SELECT IRTbl.Province, IRTbl.Day, Sum(IRTbl.Infected) AS SumOfInfected, "
"Sum([IRTbl].[Recovered]+[IRTbl].[Dead]) AS SumOfRecoveredAndDead\n"
"FROM IRTbl\n"
"GROUP BY IRTbl.Province, IRTbl.Day\n"
"HAVING (((IRTbl.Province)='{}') AND ((Sum([IRTbl].[Infected]))>0))\n"
"ORDER BY IRTbl.Day;".format(country),
["SumOfInfected", "SumOfRecoveredAndDead"])
data = pd.DataFrame(data=data_dict)
data.columns = data.columns.str.replace('/', '') # Just a safety precaution
I_tot_observed = data["SumOfInfected"].to_numpy()
R_tot_observed = data["SumOfRecoveredAndDead"].to_numpy()
return I_tot_observed, R_tot_observed
def model_SA():
"""
Gets model parameters for the first type of model that seems to work.
Here we assume a discrete SEIR model with m substates in E and n substates in I.
We assume the number of days in state E follows Negative Binomial(m, Ep)
And the number of days in state I follows a Negative Binomial(n, Ip)
Where Ip and Ep are chosen so as to give the desired means.
Assuming SA's lock down is as effective as China's.
Assuming the reported cases for both China and SA are accurate representations
of the number of infected people that are showing symptoms.
Assuming no one will be infectious after the lock down.
Note that this model can easily become a binomial recover time and exposed time.
One needs to simply change the transition state matrix so that in each sub state, Ip and Ep are 1.
Also note the Ip and Ep calculations need to change.
:return: All the parameters as a tuple
"""
m = 7 # The parameter within the distribution of E
n = 7 # The parameter within the distribution of I
mean_exposed_days = 6
mean_infectious_days = 20
# The result is that the median time in I is ~15 days while the mean time is 20
# Source: https://ourworldindata.org/coronavirus
Ep = m / (m + mean_exposed_days) # The p needed to give the desired expected value
Ip = n / (n + mean_infectious_days) # The p needed to give the desired expected value
# If one wants the binomial assumption
# Ep = mean_exposed_days/m, m=20
# Ip = mean_infectious_days/n, n=20
offset = 5 # The offset time on the graph between theoretical and observed I
# Source: https://en.wikipedia.org/wiki/2020_Hubei_lockdowns
lock_down = 21 + offset
# the average number of susceptible people exposed per day per person exposed immediately
alpha = lambda t: 0.6 / (t + 1) ** 0.2 if t < lock_down else 0
# the average number of susceptible people exposed per day per person infected
beta = lambda t: 1.5 / (t + 1) if t < lock_down else 0
# The number of days since patient 0 until the country goes into lock down
country = "South Africa"
is_country = True
N = 58_000_000 # The total number of people in the country
# NOTE: other countries such as China need a province specified instead
# and you mush change the function get_observed_I_and_R to match.
return Parameters(m, n, Ep, Ip, alpha, beta, offset, country, is_country, N)
def model_Hubei():
"""
This is the model for Hubei that I tried to match to South Africa.
Since the data does not contain information on patient 0 and the
epidemic is underway already, the initial state vector must have people
already in the pipeline.
:return: All the parameters as a tuple
"""
m = 7 # The parameter within the distribution of E
n = 7 # The parameter within the distribution of I
mean_exposed_days = 6
mean_infectious_days = 20
Ep = m / (m + mean_exposed_days) # The p needed to give the desired expected value
Ip = n / (n + mean_infectious_days) # The p needed to give the desired expected value
offset = 5 # The offset time on the graph between theoretical and observed I
lock_down = 5 + offset
# Number of days since patient 0 until the province goes into lock down
# the average number of susceptible people exposed per day per person exposed immediately
a, b, c, d, e, f = [102.94127581, 59.02443664, 0.21857432, 11.55142024, 1.06625104, 7.73675499]
alpha = lambda t: abs(c * b / a * (t / a) ** (b - 1) * math.exp(-(t / a) ** b)) + \
abs(f * e / d * (t / d) ** (e - 1) * math.exp(-(t / d) ** e)) # + abs(g)
# the average number of susceptible people exposed per day per person infected
beta = lambda t: 0.2 * alpha(t)
# the average number of susceptible people exposed per day per person exposed immediately
# alpha = lambda t: 1.61 / (0.5 * t + 1) if t < lock_down else 0
# the average number of susceptible people exposed per day per person infected
# beta = lambda t: 1.5 / (t + 1) if t < lock_down else 0
# This should be 5
province = "Hubei"
is_country = False
N = 58_000_000 # The total number of people in the province
return Parameters(m, n, Ep, Ip, alpha, beta, offset, province, is_country, N)
def model_SouthKorea():
"""
This is a model for South Korea.
Due to how much testing they do, it is probably more accurate
to try match observations to E instead of I.
"""
m = 4 # The parameter within the distribution of E
n = 7 # The parameter within the distribution of I
mean_exposed_days = 11
mean_infectious_days = 14
Ep = m / (m + mean_exposed_days) # The p needed to give the desired expected value
Ip = n / (n + mean_infectious_days) # The p needed to give the desired expected value
offset = 2 # The offset time on the graph between theoretical and observed I
lock_down = 28 + offset
# Number of days since patient 0 until the province goes into lock down
# the average number of susceptible people exposed per day per person exposed immediately
a, b, c, d, e, f = [3.14391294, 7.8549395, 2.02250233, 29.93859138, 2.42269529, 10.44748158]
alpha = lambda t: abs(c * b / a * (t / a) ** (b - 1) * math.exp(-(t / a) ** b)) + \
abs(f * e / d * (t / d) ** (e - 1) * math.exp(-(t / d) ** e)) # + abs(g)
# the average number of susceptible people exposed per day per person infected
beta = lambda t: 0.2 * alpha(t)
# the average number of susceptible people exposed per day per person exposed immediately
# alpha = lambda t: 1.1 / (t + 1) if t < lock_down else 1.7 / (0.9 * (t - 28) + 1)
# the average number of susceptible people exposed per day per person infected
# beta = alpha
# This should be 5
country = "Korea, South"
is_country = True
N = 51_000_000 # The total number of people in the province
print("Note for South Korea, since they do so much testing, it is best to match observed cases with Exposed.")
return Parameters(m, n, Ep, Ip, alpha, beta, offset, country, is_country, N)
def model_Italy():
"""
A model of Italy
"""
m = 7 # The parameter within the distribution of E
n = 7 # The parameter within the distribution of I
mean_exposed_days = 6
mean_infectious_days = 14
Ep = m / (m + mean_exposed_days) # The p needed to give the desired expected value
Ip = n / (n + mean_infectious_days) # The p needed to give the desired expected value
offset = 0 # The offset time on the graph between theoretical and observed I
lock_down = 0 + offset
# Number of days since patient 0 until the province goes into lock down
# the average number of susceptible people exposed per day per person exposed immediately
a, b, c, d, e, f = [20.90247103, 1.45905646, 9.1360014, 38.84609153, 2.16521914, 8.27952718]
alpha = lambda t: abs(c * b / a * (t / a) ** (b - 1) * math.exp(-(t / a) ** b)) + \
abs(f * e / d * (t / d) ** (e - 1) * math.exp(-(t / d) ** e)) # + abs(g)
# the average number of susceptible people exposed per day per person infected
beta = lambda t: 0.2 * alpha(t)
# the average number of susceptible people exposed per day per person exposed immediately
# alpha = lambda t: 1.1 / (t + 1) if t < lock_down else 1.7 / (0.9 * (t - 28) + 1)
# the average number of susceptible people exposed per day per person infected
# beta = alpha
# This should be 5
country = "Italy"
is_country = True
N = 60_000_000 # The total number of people in the province
return Parameters(m, n, Ep, Ip, alpha, beta, offset, country, is_country, N)
def create_transition_matrix(m, n, Ep, Ip):
"""
Sets up the initial transition matrix.
:param m: See model_1 comments
:param n: See model_1 comments
:param Ep: See model_1 comments
:param Ip: See model_1 comments
:return: The one-step transition matrix
"""
# This will change within the loop but most of the values will remain the same
P = np.zeros((m + n + 2, m + n + 2))
P[0, 0] = 1
for row in range(1, m + 1):
P[row, row] = 1 - Ep
P[row, row + 1] = Ep
P[m, m + 1:m + n + 2] = Ep * np.array(binomial_dist(n, Ip))
for row in range(m + 1, m + n + 1):
P[row, row] = 1 - Ip
P[row, row + 1] = Ip
P[m + n + 1, m + n + 1] = 1
return P
def __create_transition_matrix_binomial(m, n, Ep, Ip):
"""
Sets up the initial transition matrix under the binomial assumption.
This method is not advised.
:param m: See model_1 comments
:param n: See model_1 comments
:param Ep: See model_1 comments
:param Ip: See model_1 comments
:return: The one-step transition matrix
"""
# This will change within the loop but most of the values will remain the same
P = np.zeros((m + n + 2, m + n + 2))
P[0, 0] = 1
for row in range(1, m + 1):
P[row, row + 1] = 1
P[m, m + 1:m + n + 2] = Ep * np.array(binomial_dist(n, Ip))
for row in range(m + 1, m + n + 1):
P[row, row + 1] = 1
P[m + n + 1, m + n + 1] = 1
return P
def create_initial_state_vector(m, n, N, N0, NR):
"""
Creates the initial state vector for the system.
:param NR: Number of people who have recovered
:param m: See model_1 comments
:param n: See model_1 comments
:param N: The number of people in the homogenous group
:param N0: The number of "patient 0's"
:return: a numpy 1 by m+n+2 matrix
"""
pt = np.zeros((1, m + n + 2))
pt[0, 0] = 1 - 2 * N0 / N - NR / N
pt[0, 1:m + 1] = N0 / N / m
pt[0, m + 1:m + n + 1] = N0 / N / n
pt[0, m + n + 1] = NR / N
return pt
def get_modelled_time_series(params: Parameters, N0, NR, max_T, return_R_0=False):
"""
Runs the model using the model parameters
:param return_R_0: Whether or not the function should also return a numpy array of R_0
:param max_T: The number of time steps (up to an excluding) for the model
:param params: The model parameters
:param N0: The number of "patient 0's" at time 0
:param NR: The number of already recovered at time 0
:return: a tuple of numpy arrays of S,E,I,R,I_tot,R_tot
"""
m, n, Ep, Ip, alpha, beta, _, country, is_country, N = params.unpack()
P = create_transition_matrix(m, n, Ep, Ip)
pt = create_initial_state_vector(m, n, N, N0, NR)
# Set up the time series variables
T = np.arange(0, max_T)
S = [] # The proportion of people in the susceptible stage at time t
E = [] # The proportion of people in the exposed stage at time t
I = [] # The proportion of people in the infectious stage at time t
I_tot = [] # The total number of people infected since time 0 up until time t
E_tot = []
R = [] # The proportion of people in the recovered stage at time t
R_0 = [] # the basic reproduction number
length = 25 # the maximum value of the random variable T_E to sum up to
# This can't be large since the factorials in the negative binomial distribution get massive
for t in T:
# Append the current state of the system
S.append(np.sum(pt[0, 0]))
E.append(np.sum(pt[0, 1:m + 1]))
I.append(np.sum(pt[0, m + 1:m + n + 1]))
R.append(np.sum(pt[0, m + n + 1]))
I_tot.append(I[-1] + R[-1])
E_tot.append(E[-1] + I_tot[-1])
# Estimate R_0 using an arbitrary alpha and beta
# Warning: this slows down the program drastically due to sums in multiple loops
# This is by far the most time consuming process in the calculation
if return_R_0:
ET_E = round(m * (1 - Ep) / Ep) # E[T_E]
total = 0
for t_val in range(length): # run through all possible values of T_E and T_I
for s in range(1, t_val + 1): # sum from 1 up to the value of T_E inclusive
total += alpha(t + s) * neg_bin(t_val, m, Ep)
total += beta(t + ET_E + s) * neg_bin(t_val, n, Ip)
R_0.append(total)
# Estimate R_0 using alpha_int and beta_int as Weibull anti-derivatives or sums up to n
# This is faster but needs alpha and beta to be defined in specific ways.
# a, b, c, d, e, f = [30.73504421, 2.5255927, 6.15897371, 22.99668146, 1.00000064, 12.86532984]
# alpha_int = lambda w: abs(c * math.exp(-(w / a) ** b)) + abs(f * math.exp(-(w / d) ** e))
# beta_int = lambda w: 0.2*alpha(w)
#
# ET_E = round(m * (1 - Ep) / Ep) # E[T_E]
# T_E = np.arange(length) # Possible values for T_E and T_I to take on in the sum
# summand = lambda s: (alpha_int(t) - alpha_int(t + s)) * neg_bin(s, m, Ep) + \
# (beta_int(t + ET_E) - beta_int(t + ET_E + s)) * neg_bin(s, n, Ip)
# R_0.append(np.sum(np.array(list(map(summand, T_E)))))
# Adjust the transition matrix
P_adjusted = P.copy()
P_adjusted[0, 0] = 1 - alpha(t) * E[-1] - beta(t) * I[-1]
P_adjusted[0, 1:m + 2] = alpha(t) * E[-1] * np.array(binomial_dist(m, Ep)) \
+ beta(t) * I[-1] * np.array(binomial_dist(m, Ep)) # the conditional binomial
# Refresh the time t state vector
pt = np.matmul(pt, P_adjusted)
# Turn everything into a numpy array and make them numbers instead of proportions
S = np.array(S) * N
E = np.array(E) * N
I = np.array(I) * N
R = np.array(R) * N
I_tot = np.array(I_tot) * N
E_tot = np.array(E_tot) * N
R_0 = np.array(R_0)
if return_R_0:
return S, E, I, R, I_tot, E_tot, R_0
else:
return S, E, I, R, I_tot, E_tot
def get_mse(params: Parameters, N0, NR, I_tot_observed):
"""
Calculates the Mean Square Error between the modelled data and the observed data.
Note for South Korea this must change. All instances of I_tot must change to E_tot.
:param params: The model parameters
:param N0: The number of "patient 0's" at time 0
:param NR: The number of already recovered at time 0
:param I_tot_observed: The observed total number in the infectious state
:return: The MSE
"""
_, _, _, _, I_tot, E_tot = get_modelled_time_series(params, N0, NR, len(I_tot_observed) + params.offset, False)
I_tot = I_tot[params.offset:]
mse = np.sum((I_tot - I_tot_observed) ** 2) / len(I_tot_observed)
# Note for South Korea, by the number of tests that they are doing,
# it is best to calculate MSE by E_tot - I_tot_observed
return mse
| true |
bb5520d82aac613dcf92e05a4dd7233b23e9cc3f | Python | Monstarrr/KG_dataManage | /remove_duplicate.py | UTF-8 | 1,085 | 2.625 | 3 | [] | no_license | import pandas as pd
EachMessage=["award","AwardInstrument","Organization","ProgramOfficer","Investigator","Institution","ProgramOfficer","Investigator","Institution","ProgramElement","Directorate","Division"]
rfile=open("data.txt","r")
award1=[]
AwardInstrument1=[]
Organization1=[]
ProgramOfficer1=[]
Investigator1=[]
Institution1=[]
ProgramElement1=[]
Directorate1=[]
Division1 = []
# exec("{}1=pd.DataFrame(eval(rflie.readline))")
for i in EachMessage:
exec("{}1=pd.DataFrame(eval(rfile.readline().lower()))".format(i))
exec("{}1.drop_duplicates(subset=None,keep='first',inplace=True)".format(i))
exec("{}1.to_csv(\"/home/jytang/KG/graph/data/{}.txt\",sep=\"\t\",index=False)".format(i,i))
print("{}done".format(i))
# for di in eval(rfile.readline()):
# exec("({}1).append(str(di))".format(i))
# # print(di)
# exec("({}1).sort()".format(i))
# print(eval("{}1".format(i)))
# print("done.")
# exec("({}1)=rfile.readline()".format(i))
# for e in eval("{}1".format(i)):
# e=str(e)
| true |
dcbc476e381e3533edaca0d640d0c463c66e8af4 | Python | asilva009/Curso-Python | /Seção 8/EX_11_secao8.py | UTF-8 | 265 | 3.375 | 3 | [] | no_license | def notas_aluno(nota1, nota2, nota3, media='A'):
if media == 'P':
media_p = (nota1*5 + nota2*3 + nota3*2) / 10.0
return f'Média Ponderada = {media_p}'
media_a = (nota1 + nota2 + nota3) / 3.0
return f'Média Aritmética = {media_a:.1f}'
| true |
f0ad1fb98c780387b1ba5196fc0cda6db436e36e | Python | daveclouds/Python_Pildoras | /11 - Lista_Exercise.py | UTF-8 | 517 | 4 | 4 | [] | no_license | name = input("Enter your name: ")
addrees = input("Enter your address: ")
phone = input("Enter your phone: ")
datauser = [name, addrees, phone]
print("Your information is: " + datauser[0] + " " + datauser[1] + " " + datauser[2])
#Crea un programa que pida por teclado “Nombre”, “Dirección” y “Tfno”. Esos tres datos
#deberán ser almacenados en una lista y mostrar en consola el mensaje: “Los datos
#personales son: nombre apellido teléfono” (Se mostrarán los datos introducidos por
#teclado). | true |
869f8a034b5edbf24b1ecdb9d97380d65a177eae | Python | pwinston/napari | /napari/_vispy/utils_gl.py | UTF-8 | 1,666 | 2.859375 | 3 | [
"BSD-3-Clause"
] | permissive | """OpenGL Utilities.
"""
from contextlib import contextmanager
from functools import lru_cache
from typing import Tuple
from vispy.app import Canvas
from vispy.gloo import gl
from vispy.gloo.context import get_current_canvas
@contextmanager
def _opengl_context():
"""Assure we are running with a valid OpenGL context.
Only create a Canvas is one doesn't exist. Creating and closing a
Canvas causes vispy to process Qt events which can cause problems.
Ideally call opengl_context() on start after creating your first
Canvas. However it will work either way.
"""
canvas = Canvas(show=False) if get_current_canvas() is None else None
try:
yield
finally:
if canvas is not None:
canvas.close()
@lru_cache()
def get_max_texture_sizes() -> Tuple[int, int]:
"""Return the maximum texture sizes for 2D and 3D rendering.
If this function is called without an OpenGL context it will create a
temporary non-visible Canvas. Either way the lru_cache means subsequent
calls to thing function will return the original values without
actually running again.
Returns
-------
Tuple[int, int]
The max textures sizes for (2d, 3d) rendering.
"""
with _opengl_context():
max_size_2d = gl.glGetParameter(gl.GL_MAX_TEXTURE_SIZE)
if max_size_2d == ():
max_size_2d = None
# vispy doesn't expose GL_MAX_3D_TEXTURE_SIZE so hard coding for now.
# MAX_TEXTURE_SIZE_3D = gl.glGetParameter(gl.GL_MAX_3D_TEXTURE_SIZE)
# if MAX_TEXTURE_SIZE_3D == ():
# MAX_TEXTURE_SIZE_3D = None
max_size_3d = 2048
return max_size_2d, max_size_3d
| true |
8cd2957eaf5abbe68db99e2a8b84e5000ccd0140 | Python | dero24/task_manager | /TaskManager/users.py | UTF-8 | 1,662 | 2.828125 | 3 | [] | no_license | class Users:
def __init__(self, shared):
self.shared = shared
self.user_tasks = self.shared.user_tasks
self.save_config = self.shared.save_config
print("USERS:",self.user_tasks)
def add_user(self):
save_file,serializer,_,w = self.save_config
while True:
try:
input_name = str(input("Please enter your user name: "))
# if current user
for name,tasks in self.user_tasks.items():
if input_name == name:
print("Loading user data..")
print("add_user:",self.user_tasks)
return name
print("Creating new user")
self.user_tasks[input_name] = []
serializer.dump(self.user_tasks, open(save_file, w))
return input_name
except ValueError as e:
print(e)
def load_user(self):
self.save()
self.read_users()
while True:
input_name = input("Which user would you like to load?")
for name,tasks in self.user_tasks.items():
if str(input_name) == name:
return name
else:
print("Please enter a user name from above\n")
def read_users(self):
for name,tasks in self.user_tasks.items():
print(name)
def save(self):
save_file,serializer,_,w = self.save_config
serializer.dump(self.user_tasks, open(save_file, w))
| true |
0a6d07826add814f5a246a45c27adc2822b7759c | Python | JIdigit/Part4-Task8 | /task8.py | UTF-8 | 143 | 2.796875 | 3 | [] | no_license | import moneyftm
cash = moneyftm.MoneyFmt(12345.1231212)
print(cash.dollar)
print(cash.update(11413.1232))
print(cash.repr())
print(cash.str()) | true |
f697bb5533f9f32bc12d30955120bd280a89e54d | Python | alexandraback/datacollection | /solutions_1674486_0/Python/20100/A.py | UTF-8 | 178 | 3.015625 | 3 | [] | no_license |
T=int(input())
for i in range(T):
l=input()
print("Case #"+str(i+1)+": "+l.translate(str.maketrans('aozyemljpkdrisbtvguhbwfxcnq','ykqaolguristdnhwpvjxhfcmebz')))
| true |
506ccfdc9503c0bad0c5abbb54c75f44c7f5f258 | Python | oguzhandikici/Python-Exercises | /Exercises/class/BankAccount(CHALLENGING).py | UTF-8 | 1,277 | 3.859375 | 4 | [] | no_license | class Account:
def __init__(self,owner='Oguzhan',balance=4000):
self.owner=owner
self.balance=balance
print('\nAccount Created!\nOwner: {}\nBalance: {}'.format(self.owner,self.balance))
def deposit(self,amount):
self.balance=self.balance+amount
print('\nDeposited the amount {}!\nCurrent Balance: {}'.format(amount,self.balance))
def withdraw(self,amount):
if self.balance>=amount:
self.balance=self.balance-amount
print('Withdrawn the amount {}!\nCurrent Balance: {}'.format(amount,self.balance))
else:
print('Amount Exceeds Balance!')
##Check
myacc=Account(input('Hesap ismi girin: '),int(input('Hesapta olacak tutari girin: ')))
while True:
choice=int(input('\nNe yapmak istersiniz? (1: Para yatirma, 2: Para cekme): '))
if choice==1:
myacc.deposit(float(input('\nYatirilacak miktari girin: ')))
another=int(input('\nBaska islem yapmak ister misiniz? (1:Evet, 2:Hayir): '))
if another==1:
continue
elif another==2:
break
elif choice==2:
myacc.withdraw(float(input('\nCekilecek miktari girin: ')))
another=int(input('\nBaska islem yapmak ister misiniz? (1:Evet, 2:Hayir): '))
if another==1:
continue
elif another==2:
break
else:
print('\nHatali giris. Tekrar deneyin.')
input('Cikmak icin [ENTER] basin.') | true |
3912e4f22d9aa32ad095dbfed422f62b479058ca | Python | philip-morlier/udp_holepuching_messenger | /src/hole_punching_messenger.py | UTF-8 | 1,991 | 2.765625 | 3 | [
"MIT"
] | permissive | import argparse
import socket
import time
import threading
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
class UdpServerAndClient:
def __init__(self, args):
self.user = args.user_name
self.target_ip = args.target_ip
self.target_port = args.target_port
self.local_port = args.local_port
self.away = (str(self.target_ip), int(self.target_port))
self.home = ('0.0.0.0', int(self.local_port))
sock.bind(self.home)
def ping(self):
ping_message_string = f'0 {self.user} {self.target_ip} {self.target_port}'
ping_message = ping_message_string.encode()
while True:
time.sleep(.5)
sock.sendto(ping_message, self.away)
def receive_and_interpret(self):
while True:
data, port = sock.recvfrom(55000)
if data[:1] == b'0':
pass
else:
print(data.decode('utf-8'))
def send_message(self):
while True:
user_input = input()
package = f'{self.user} - {user_input}'
if user_input:
message = package.encode()
sock.sendto(message, self.away)
def run(self):
t1 = threading.Thread(target=self.ping)
t2 = threading.Thread(target=self.receive_and_interpret)
t3 = threading.Thread(target=self.send_message)
t1.start()
t2.start()
t3.start()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--user_name', help="local user name", nargs='?')
parser.add_argument('--target_ip', help="ip address for remote peer", nargs='?')
parser.add_argument('--target_port', help="target port for remote peer", nargs='?')
parser.add_argument('--local_port', help="local port for udp server / client", nargs='?')
args = parser.parse_args()
UdpServerAndClient(args).run()
| true |
f2bfbb0807b5f060d5222a84b0476e9f594cbf8b | Python | Mungage/Kattis | /aaah/aaah.py | UTF-8 | 1,767 | 3.859375 | 4 | [] | no_license | """ Jon Marius shouted too much at the recent Justin Bieber concert,
and now needs to go to the doctor because of his sore throat.
The doctor’s instructions are to say “aaah”. Unfortunately,
the doctors sometimes need Jon Marius to say “aaah” for a while,
which Jon Marius has never been good at. Each doctor requires a certain level of “aah” – some require “aaaaaah”,
while others can actually diagnose his throat with just an “h”. (They often diagnose wrongly,
but that is beyond the scope of this problem.) Since Jon Marius does not want to go to a doctor and have his time wasted,
he wants to compare how long he manages to hold the “aaah” with the doctor’s requirements.
(After all, who wants to be all like “aaah” when the doctor wants you to go “aaaaaah”?)
Each day Jon Marius calls up a different doctor and asks them how long his “aaah” has to be.
Find out if Jon Marius would waste his time going to the given doctor.
Input: The input consists of two lines. The first line is the “aaah” Jon Marius is able to say that day. he second line is the “aah” the doctor wants to hear.
Only lowercase ’a’ and ’h’ will be used in the input, and each line will contain between 0 and 999 ’a’s, inclusive, followed by a single ’h’.
Output: Output “go” if Jon Marius can go to that doctor, and output “no” otherwise.
"""
import sys
# get inputs and turn them into a list of strings
doctor_list = [str(item) for item in sys.stdin.read().split()]
jon = doctor_list[0]
requirement = doctor_list[1]
# check if last letter is an "h",
if jon[-1] != "h" or requirement[-1] != "h" :
print("The last pronounced letter is not an h")
elif jon.count("a") >= requirement.count("a"):
print("go")
else:
print("no")
| true |
db76883eb845ca5abcffbf48a98a7750d06e62b5 | Python | hemuke/python | /09_func/02_func/02_lambda.py | UTF-8 | 657 | 3.6875 | 4 | [
"Apache-2.0"
] | permissive | #! /root/anaconda3/bin/python
from collections.abc import Iterator
from collections.abc import Iterable
# 例子一
def add(num1, num2):
return num1 + num2
print(add(1, 2))
print((lambda num1, num2: num1 + num2)(3, 4))
# 例子二
def le(num1, num2): return num1 + num2
print(le(5, 6))
# 例子三
result = map(lambda x: x * x, [1, 2, 3])
print(next(result))
print(next(result))
print(next(result))
print(result)
print(isinstance(result, Iterator))
print(isinstance(result, Iterable))
result = map(lambda x: x * x, [1, 2, 3])
print(list(result))
# 例子四
def do_sth():
return lambda num1, num2: num1 + num2
print(do_sth()(7, 8))
| true |
1d93bf4c513a0ad27acdb03587f75e717368b962 | Python | Sheyma83/LPTHW | /ex13.py | UTF-8 | 249 | 2.875 | 3 | [] | no_license | # - - coding: utf- 8 - -
from sys import argv
script, first, second, third = argv
print "The script is called :", script
print "your first variable is:", first
print "Your second variable is:", second
print "Your third varible is:", third | true |
fd1c55d0694e11949f78597a3bab3f6afdd642e2 | Python | hawksong/pythontest | /pylearn/src/test/testclassmethod.py | UTF-8 | 573 | 3.328125 | 3 | [] | no_license | '''
Created on 2017年12月14日
@author: user
'''
'''
def get_no_of_instances(cls_obj):
return cls_obj.no_inst
class Kls(object):
no_inst = 0
def __init__(self):
Kls.no_inst = Kls.no_inst + 1
ik1 = Kls()
ik2 = Kls()
print(get_no_of_instances(Kls))
'''
class Kls(object):
no_inst = 0
def __init__(self):
Kls.no_inst = Kls.no_inst + 1
@classmethod
def get_no_of_instance(cls_obj):
return cls_obj.no_inst
ik1 = Kls()
ik2 = Kls()
print(ik1.get_no_of_instance())
print(Kls.get_no_of_instance())
| true |
0b48a92d6cf6bb0720dfc0d720377332e156e66b | Python | Stay-Ugly/TelegramBot | /sample_simple_bot.py | UTF-8 | 1,056 | 2.734375 | 3 | [] | no_license | from pyrogram import Client
import credentials
import config
# install lib pytogram with:
# pip install -u pyrogram
# create in https://my.telegram.org/apps
api_id = -000 # integer variable. Not string.
api_hash = ''
# Get in t.me/MissRose_bot
# 1. Add her into a group.
# 2. And send the command: /id
chat_id = ''
# create in t.me/BotFather
# Is the code that they said after the text:
# "Use this token to access the HTTP API:"
bot_token = ''
def send_file(file_path, file_name_mask, caption):
with Client("bot", api_id, api_hash,
bot_token=bot_token) as app:
app.send_document(chat_id,
file_path,
file_name=file_name_mask,
caption=caption)
# Let's test send a file into a group!
# 1.: Add your bot into a group.
# The BotFather will say the link after 'You will find it at'
file_path=''
file_name_mask='' # with extension!
caption=''
send_file(file_path, file_name_mask, caption) | true |
f80e099320f1e41db40fd56b65cc046f98683a0c | Python | lieutenantHertha/python | /generator_practice.py | UTF-8 | 1,013 | 3.515625 | 4 | [] | no_license | # This is a practicing script of generator expression.
import collections
import csv
City = collections.namedtuple('City', ['city_name', 'country', 'population', 'latitude', 'longtitude'])
with open('work_stack/worldcities.csv', 'r') as data_file:
csv_reader = csv.DictReader(data_file)
city_generator = (City(*single_city.values()) for single_city in csv_reader)
city_counter = 0
for city_info in city_generator:
if city_info.population:
if float(city_info.longtitude) < 0.0 and int(city_info.population) > 5000000:
city_counter += 1
print('{:20}{:15}{:10.4f}{:10.4f}'.format(city_info.city_name,
city_info.population,
float(city_info.latitude),
float(city_info.longtitude)))
print('There are {} cities satisfied your criteria.'.format(city_counter))
| true |
5a32f084258a09e270c9103a8029d56c142c880b | Python | daniel-reich/ubiquitous-fiesta | /Pf2kDoCRvEL8qzKTs_7.py | UTF-8 | 297 | 2.921875 | 3 | [] | no_license |
def order_people(l,p):
if l[0]*l[1] < p:
return "overcrowded"
x = [[i*l[1]+j+1 for j in range (l[1])] for i in range (l[0])]
for i in range (l[0]):
x[i].sort(reverse=(i%2==1))
for i in range (l[0]):
for j in range (l[1]):
if x[i][j] > p:
x[i][j] = 0
return x
| true |
0fd381073e7295281c3f8a310d92188fde9e6535 | Python | DanoDataScientist/real-time-visual-respiration-rate-estimation-with-dynamic-sene-adaptation | /src/module.py | UTF-8 | 2,912 | 2.640625 | 3 | [] | no_license | import cv2
import matplotlib.pyplot as plt
import numpy as np
def Computeflow(image_new, image_old, scale):
def ComputeSpatialDerivative(image, axis):
derivative = np.diff(image, axis=axis)
if(axis == 0):
padding = np.zeros((1, derivative.shape[1]), dtype=np.float32)
else:
padding = np.zeros((derivative.shape[0], 1), dtype=np.float32)
derivative = np.concatenate([padding, derivative], axis=axis)
return derivative
dx = ComputeSpatialDerivative(image_new, axis=1)
dy = ComputeSpatialDerivative(image_new, axis=0)
dx = np.expand_dims(dx, axis=2)
dy = np.expand_dims(dy, axis=2)
gradients = np.concatenate([dx, dy], axis=2)
magnitude = (gradients ** 2).sum(axis=2)
magnitude[magnitude < 9] = np.Inf
ImageDiff = image_old - image_new
flowField = ImageDiff / magnitude
flowField = gradients * np.expand_dims(flowField, axis=2)
flowField = cv2.resize(flowField, scale)
return flowField
def GetImage(cap, scale):
frame = cap.read()[1] / 2 + cap.read()[1] / 4 + cap.read()[1] / 2
frame = frame[..., 0] * 0.29 + frame[..., 1] * 0.59 + frame[..., 2] * 0.11
frame = cv2.GaussianBlur(frame, (25, 25), 2)
frame = cv2.resize(frame, scale)
return frame
def main():
with open("output.txt", "w") as file:
capture = cv2.VideoCapture(0)
scale = (120, 160)
N = 100
lambda_ = 0.8
MA = 25
respiration_signal = [0]
image_old = GetImage(capture, scale)
image_new = image_old
for i in range(N):
if(i > 0):
image_new = GetImage(capture, scale)
flow = Computeflow(image_new, image_old, (30, 40))
image_old = image_new
if(i == 0):
tflow = flow
dflow = flow
else:
tflow = lambda_ * tflow + flow
magnitude = np.linalg.norm(tflow)
if(magnitude > MA):
tflow = tflow * MA / magnitude
if((dflow * flow).sum() > 0):
dflow = lambda_ * dflow + flow
else:
dflow = lambda_ * dflow - flow
magnitude = np.linalg.norm(dflow)
if(magnitude > MA):
dflow = dflow * MA / magnitude
magnitude = MA
respiration_velocity = (dflow * tflow).sum() / magnitude
if(not np.isnan(respiration_velocity)):
respiration_signal.append(
0.9 * respiration_signal[i - 1] + respiration_velocity)
file.write(str(respiration_signal[len(respiration_signal) - 1]) + "\n")
else:
respiration_signal.append(0)
file.write("0\n")
if(__name__ == "__main__"):
main()
| true |
c6b50517665822118aaf67f2b2e2e6a1ca49ddc3 | Python | ArtemPl-lab/game_py | /player.py | UTF-8 | 4,076 | 2.65625 | 3 | [] | no_license | from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
class player:
def __init__(self, grid, map_,pl_pos, finish_pos, image_):
super().__init__()
self.grid = grid
self.map_ = map_
self.player_posX = pl_pos[0]
self.player_posY = pl_pos[1]
self.image = image_
self.player_stack = []
self.finish_pos = finish_pos
def go_left(self):
if (self.player_posX-1 > 0) and (self.player_posX-1 < len(self.map_[0])) and (self.player_posY > 0) and (self.player_posY < len(self.map_)):
if self.map_[self.player_posY][self.player_posX-1] != 0:
self.player_stack.append([self.player_posX, self.player_posY])
self.player_posX -= 1
self.update()
return True
else:
return False
else:
return False
def go_right(self):
if (self.player_posX+1 > 0) and (self.player_posX+1 < len(self.map_[0])) and (self.player_posY > 0) and (self.player_posY < len(self.map_)):
if self.map_[self.player_posY][self.player_posX+1] != 0:
self.player_stack.append([self.player_posX, self.player_posY])
self.player_posX += 1
self.update()
return True
else:
return False
else:
return False
def go_up(self):
if (self.player_posX > 0) and (self.player_posX < len(self.map_[0])) and (self.player_posY-1 > 0) and (self.player_posY-1 < len(self.map_)):
if self.map_[self.player_posY-1][self.player_posX] != 0:
self.player_stack.append([self.player_posX, self.player_posY])
self.player_posY -= 1
self.update()
return True
else:
return False
else:
return False
def go_down(self):
if (self.player_posX > 0) and (self.player_posX < len(self.map_[0])) and (self.player_posY+1 > 0) and (self.player_posY+1 < len(self.map_)):
if self.map_[self.player_posY+1][self.player_posX] != 0:
self.player_stack.append([self.player_posX, self.player_posY])
self.player_posY += 1
self.update()
return True
else:
return False
else:
return False
def update(self):
if (self.player_posX > 0) and (self.player_posX < len(self.map_[0])) and (self.player_posY > 0) and (self.player_posY < len(self.map_)):
lb = QLabel()
lb.setPixmap(self.image)
self.grid.addWidget(lb, self.player_posY, self.player_posX)
if len(self.player_stack) != 0:
st_pos = self.player_stack[len(self.player_stack) - 1]
lb2 = QLabel()
pic = QPixmap('sprite.jpg__resize_sprite.png')
lb2.setPixmap(pic)
self.grid.addWidget(lb2, st_pos[1], st_pos[0])
if self.player_posX == self.finish_pos[0] and self.player_posY == self.finish_pos[1]:
print("finished!")
finish_pic = QPixmap('finish.png')
flb = QLabel()
flb.setPixmap(finish_pic)
self.grid.addWidget(flb,self.finish_pos[1],self.finish_pos[0])
def draw_line(self,fl):
if fl:
pic = QPixmap('line.png')
else:
pic = QPixmap('sprite.jpg__resize_sprite.png')
for i in self.player_stack:
lab = QLabel()
lab.setPixmap(pic)
self.grid.addWidget(lab,i[1],i[0])
def teleported(self,x,y):
if (x > 0) and (x < len(self.map_[0])) and (y > 0) and (y < len(self.map_)):
self.player_stack.append([self.player_posX, self.player_posY])
self.player_posX = x
self.player_posY = y
self.update()
def isTeleported(self,tp):
for i in tp:
if self.player_posX == i[0][0] and self.player_posY == i[0][1]:
self.teleported(i[1][0], i[1][0])
tp.remove(i) | true |
5b9d72075a2bf89248f3c799b379de80108930c2 | Python | lyfewithcode/python-v1 | /latihan 12 - class.py | UTF-8 | 370 | 3.75 | 4 | [] | no_license | class Teman:
#_init_ merupakan function yang jalan / aktif terlebih dahulu saat class Teman diinisiasi
#self merupakan inisiasi kedalam propertinya sendiri
def __init__(self, nama, usia):
self.nama = nama
self.usia = usia
def panggil(self):
print("Namaku "+ self.nama + " usiaku " + self.usia)
t = Teman("Andi", "20")
t.panggil() | true |
fd38dc024e6266e2fa393a2adcb790593192dcdc | Python | adamgignac/seagull | /seagull/seagull/PickleStore.py | UTF-8 | 792 | 2.78125 | 3 | [
"BSD-3-Clause"
] | permissive | from gi.repository import Gtk
def _new_picklestore(cls, columnTypes, rows):
instance = cls.__new__(cls)
instance.__init__(*columnTypes)
for row in rows:
instance.append(row)
return instance
class PickleStore(Gtk.ListStore):
def __reduce__(self):
rows = [list(row) for row in self]
coltypes = [type(c) for c in rows[0]] if len(rows)>0 else [str] #Usage-specific hack
return _new_picklestore, (self.__class__, coltypes, rows)
if __name__ == "__main__":
import cPickle as pickle
ps = PickleStore(str, int)
ps.append(["a", 1])
ps.append(["b", 2])
with open("temp", 'w') as f:
pickle.dump(ps, f)
with open("temp", 'r') as f:
loaded = pickle.load(f)
for row in loaded:
print row[:]
| true |
0a22e85ab50404a0b21eb27133375d6b4d737681 | Python | mohitgureja/DSA_Nanodegree-Python | /Project_3-Basic Algorithms/problem_3.py | UTF-8 | 2,007 | 4.03125 | 4 | [] | no_license | def rearrange_digits(input_list):
if len(input_list) == 0 or len(input_list) == 1:
return input_list
# Sort the input list first using merge sort in descending order
input_list = mergesort(input_list)
result = []
first_num = ""
second_num = ""
# traverse the sorted list and maximum sum will be combination of digits of all even index numbers and all odd index numbers
for i in range(0,len(input_list)):
# append numbers on all even indexes in first number
if i % 2 == 0:
first_num += str(input_list[i])
# append numbers on all odd indexes in second number
else:
second_num += str(input_list[i])
result.append(int(first_num))
result.append(int(second_num))
return result
def mergesort(input_list):
if len(input_list) <= 1:
return input_list
mid = len(input_list) // 2
left = input_list[:mid]
right = input_list[mid:]
left = mergesort(left)
right = mergesort(right)
return merge(left, right)
def merge(left, right):
merged = []
left_index = 0
right_index = 0
# Merge in descending order
while left_index < len(left) and right_index < len(right):
if left[left_index] > right[right_index]:
merged.append(left[left_index])
left_index += 1
else:
merged.append(right[right_index])
right_index += 1
merged += left[left_index:]
merged += right[right_index:]
return merged
def test_function(test_case):
output = rearrange_digits(test_case[0])
print (output)
solution = test_case[1]
if sum(output) == sum(solution):
print("Pass")
else:
print("Fail")
#Test Cases
test_function([[1, 2, 3, 4, 5], [531, 42]])
test_function([[4, 6, 2, 5, 9, 8], [964, 852]])
test_function([[],[]])
test_function([[1],[1]])
test_function([[1,2],[2,1]]) | true |
9524be7d2df19626a1a36480c0f86b3fdce6b3e4 | Python | OvenO/BlueDat | /EC/1DECAndSin1DEnsbl/SingPltTrag/Beta_pnt_1/stability.py | UTF-8 | 9,969 | 2.84375 | 3 | [] | no_license | import os
import pylab as pl
from scipy.integrate import odeint
from scipy.integrate import ode
import numpy
class surfCentreLineApx(object):
def __init__(self,coef,k,w,drgCoef,dt):
self.dt = dt
self.coef = coef
self.k = k
self.w = w
self.drg = drgCoef
self.sol = pl.array([])
def set_sol(self,sol):
self.sol=sol
# just make normal functions to try to pass into odeint function. Should be much faster
def f(self,xarr,t):
temp = 0.0
for i in range(2):
temp+=pl.sin(self.k*xarr[2]-i*pl.pi)*pl.cos(self.w*t-i*pl.pi)/(pl.cosh(self.k*xarr[3])-pl.cos(self.k*xarr[2]-i*pl.pi))
temp = temp*self.coef
temp -= self.drg*xarr[0]
x1dot = temp
x2dot = 0.0
x3dot = xarr[0]
x4dot = 0.0
return [x1dot,x2dot,x3dot,x4dot]
# define a funciton that grabs the matrix elements of the jacobian, set_sol must have already
# been done for hhis to work
def J(self,which_M,t):
# to get the solution at a particular time we need the index that is assosiated witht that
# time. we get this by taking the time value wanted and deviding by dt. In order for this to
# work with single (non array values) of time we need a self,dt to be defined.
x1 = self.sol[int(t/self.dt+.5),2]
y = self.sol[int(t/self.dt+.5),3]
#print(t/self.dt)
# define the matrix elements of the time dependent jacobian
M11 = 0.0
M12 = 1.0
#M21 = self.coef*pl.cos(x1)*pl.cosh(y)*pl.cos(t)*(pl.cos(2.0*x1)+pl.cosh(2.0*y)-2.0)/(pl.cos(x1)**2-pl.cosh(y)**2)**2
M21 = -2.0*self.coef*pl.cos(x1)*pl.cosh(y)*pl.cos(t)*(pl.cos(x1)**2-pl.cosh(y)**2+2.0*pl.sin(x1)**2)/(pl.cos(x1)**2-pl.cosh(y)**2)**2
M22 = -self.drg
if (which_M == "M11"):
return M11
if (which_M == "M12"):
return M12
if (which_M == "M21"):
return M21
if (which_M == "M22"):
return M22
def mw(self,warr,t):
# to get the solution at a particular time we need the index that is assosiated witht that
# time. we get this by taking the time value wanted and deviding by dt. In order for this to
# work with single (non array values) of time we need a self,dt to be defined.
dotW11 = warr[0]*self.J("M11",t)+warr[2]*self.J("M12",t)
dotW12 = warr[1]*self.J("M11",t)+warr[3]*self.J("M12",t)
dotW21 = warr[0]*self.J("M21",t)+warr[2]*self.J("M22",t)
dotW22 = warr[1]*self.J("M21",t)+warr[3]*self.J("M22",t)
return [dotW11,dotW12,dotW21,dotW22]
# functions looks to see weather or not the curent point is in the threshold radius of the first
# point
# returns True if NOT in threshhold radius
# returns False if we found our guy
def not_close(first_pnt,curnt_pnt,thresh):
rf = pl.array([first_pnt[0] , first_pnt[2]])
rs = pl.array([curnt_pnt[0] , curnt_pnt[2]])
diff = rf-rs
r = pl.sqrt(diff[0]**2+diff[1]**2)
print("r is: "+str(r))
if (r>thresh):
return True
else:
return False
# find a single loop of the limit cycle. Might be periodoc over more than one cycle
# returns the solution of just that loop AND the periodicity of the loop
# takes a threshhold number. If it cant find a solution where the begining and end of the
# trajectroy lye within this threshold value than it quits and prints an error
#thresh is a distance in the phase plane
def find_one_full_closed(sol,thresh,dt):
not_found = False
# work our way backwards from last time value to find last period
# first find last %2*pi position
loc = len(sol[:,2])
while ((loc*dt)%(2*pl.pi)>dt):
loc-=1
first_loc = loc
first_pnt = sol[first_loc,:]
loc-=1
# now find the next point where the orbit closes (going backward)
# orbits should have trajectories in multiples of 2*pi so only check those
while ((loc*dt)%(2*pl.pi)>dt):
loc-=1
curnt_pnt = sol[loc,:]
loc -= int(pl.pi/4.0/dt)
while (not_close(first_pnt,curnt_pnt,thresh)):
if (loc == 0):
print("Point in threshold not found!!")
not_found =True
break
while ((loc*dt)%(2*pl.pi)>dt):
loc-=1
curnt_pnt = sol[loc,:]
secnd_loc = loc
loc-=1
secnd_pnt = curnt_pnt
if not_found:
final = find_one_full_closed(sol,thresh+.003,dt)
else:
final = sol[secnd_loc:first_loc+1,:]
return final
def main():
dt = .001
# total number of iterations to perform
totIter = 500000
totTime = totIter*dt
time = pl.arange(0.0,totTime,dt)
surf = 1.0
coef = .21
k = 1.0
w = 1.0
damp = .1
g = .1
# how many cells is till periodicity use x = n*pi/k (n must be even #) modNum = 2*pl.pi/k
modNum = 2.0*pl.pi
# initial conditions
initx = 3.4653
inity = 1.0
initvx = -0.0242
initvy = 0.0
# initial conditions vector
# set up: [xdot,ydot,x,y]
x0 = pl.array([initvx,initvy,initx,inity])
apx = surfCentreLineApx(coef,k,w,damp,dt)
sol = odeint(apx.f,x0,time)
sol[:,2]=sol[:,2]%(2*pl.pi)
# find a single loop of the limit cycle. Might be periodoc over more than one cycle
# returns the solution of just that loop AND the periodicity of the loop
# takes a threshhold number. If it cant find a solution where the begining and end of the
# trajectroy lye within this threshold value than it quits and prints an error
#thresh is distance in the phase place
thresh = .005
loop = find_one_full_closed(sol,thresh,dt)
loop_t = pl.arange(0.0,(len(loop))*dt,dt)
apx.set_sol(loop)
w0 = pl.array([1.0,0.0,0.0,1.0])
w_of_t = odeint(apx.mw,w0,loop_t,hmax=dt,hmin=dt)
#w_of_t = odeint(apx.mw,w0,loop_t)
print("len w_of_t: " + str(len(w_of_t)))
# make the matrix form of w_of_t
matrix = w_of_t[-1,:].reshape(2,2)
# use linalg to get the eigen values of the W(t=q) where q is the period time of the orbit
print(numpy.linalg.eig(matrix))
# test the w solution matrix by refinding the sololution with it
initial_x = loop[0,2]
initial_vx = loop[0,0]
ut = w_of_t[:,0]*initial_x + w_of_t[:,1]*initial_vx
vt = w_of_t[:,2]*initial_x + w_of_t[:,3]*initial_vx
fig4 = pl.figure()
ax4 = fig4.add_subplot(111)
#ax1.plot(sol[-int(.5*len(sol[:,0])):,2],sol[-int(.5*len(sol[:,0])):,0])
ax4.plot(ut,vt)
ax4.scatter([0.0,pl.pi,2*pl.pi],[0.0,0.0,0.0],color = "Red", marker="o",label="Electrodes")
#ax1.set_title("Time Slice")
#ax1.legend(loc = "best")
ax4.set_xlabel("$x$",fontsize="30")
ax4.set_ylabel("$\dot{x}$",fontsize="30")
fig4.savefig("try_w_sol.png")
os.system("open try_w_sol.png")
fig2,ax2 = pl.subplots(2,sharex=True)
#ax2[0].plot(loop_t,ut+loop[:,2])
#ax2[1].plot(loop_t,vt+loop[:,0])
ax2[0].plot(loop_t,ut)
ax2[1].plot(loop_t,vt)
ax2[1].set_xlabel("$t$")
fig2.savefig("test_sol_m.png")
os.system("open test_sol_m.png")
fig3,axarr = pl.subplots(4,sharex=True)
axarr[0].plot(pl.arange(0,len(w_of_t)),w_of_t[:,0])
axarr[1].plot(pl.arange(0,len(w_of_t)),w_of_t[:,1])
axarr[2].plot(pl.arange(0,len(w_of_t)),w_of_t[:,2])
axarr[3].plot(pl.arange(0,len(w_of_t)),w_of_t[:,3])
axarr[0].set_ylabel("0")
axarr[1].set_ylabel("1")
axarr[2].set_ylabel("2")
axarr[3].set_ylabel("3")
fig3.savefig("w_of_t.png")
os.system("open w_of_t.png")
print("loopj_t[-1]: " +str(loop_t[-1]))
print("loop_t")
print(len(loop_t))
print(len(loop))
fig1 = pl.figure()
ax1 = fig1.add_subplot(111)
#ax1.plot(sol[-int(.5*len(sol[:,0])):,2],sol[-int(.5*len(sol[:,0])):,0])
ax1.plot(loop[:,2],loop[:,0])
ax1.scatter([0.0,pl.pi,2*pl.pi],[0.0,0.0,0.0],color = "Red", marker="o",label="Electrodes")
#ax1.set_title("Time Slice")
#ax1.legend(loc = "best")
ax1.set_xlabel("$x$",fontsize="30")
ax1.set_ylabel("$\dot{x}$",fontsize="30")
fig1.savefig("loop_plot.png")
os.system("open loop_plot.png")
print("w matrix")
print([w_of_t[-1,0],w_of_t[-1,1]])
print([w_of_t[-1,2],w_of_t[-1,3]])
#for a in range(len(sol[:,0])):
# sol[a,2] = sol[a,2]%modNum
# sol[a,3] = sol[a,3]+.063
# fig1 = pl.figure()
# ax1 = fig1.add_subplot(111)
# ax1.scatter(sol[a,2],abs(sol[a,3]-1.0)+1.0,marker = "o", s=100,color="Grey")
# surf_arr = pl.arange(0.0,10,.2)
# pl.axis([0.0,2.0*pl.pi,-.5,3.0])
# ax1.fill_between(surf_arr,pl.zeros([len(surf_arr)])+1,pl.zeros([len(surf_arr)])-.5,where=None,color="Black")
# ax1.set_xlabel("$x$",fontsize=36)
# ax1.set_ylabel("$y$",fontsize=36)
# ax1.tick_params(axis="both",labelsize=15)
# ax1.scatter([0.0,pl.pi,2*pl.pi],[0.0,0.0,0.0],s=70,color = "White", marker="o",label="Electrodes")
# pl.savefig(str(a)+".png")
dat_file = open("data.txt","w")
for i in range(len(sol[:,2])):
dat_file.write(str(sol[i,0])+" "+str(sol[i,1])+" "+str(sol[i,2])+" "+str(sol[i,3])+"\n")
dat_file.close()
# make text file with all extra information
outFile = open("info.dat","w")
outFile.write("Info \n coefficient: " + str(coef) \
+ "\nwave number: " +str(k)\
+ "\nomega: " + str(w)\
+ "\ndamping: " + str(damp)\
+ "\ng: " + str(g)\
+ "\ntime step: " + str(dt)\
+ "\ntotal time: " + str(dt*totIter)\
+ "\ntotal iterations: " + str(totIter)\
+ "\nInitial Conditions: \n" +
"initial x: " +str(initx) \
+"\ninitial y: " +str(inity) \
+"\ninitial vx: " +str(initvx)\
+"\ninitial vy: " +str(initvy) )
outFile.close()
if __name__ == '__main__':
main()
| true |